test_compaction_throughput | Failure | AssertionError: assert (5.0 + 0.5) >= 5.59
+ where 5.0 = float('5')
self = <compaction_test.TestCompaction object at 0x7f46e9e362e0>
def test_compaction_throughput(self):
"""
Test setting compaction throughput.
Set throughput, insert data and ensure compaction performance corresponds.
"""
cluster = self.cluster
cluster.populate(1).start()
[node1] = cluster.nodelist()
# disableautocompaction only disables compaction for existing tables,
# so initialize stress tables with stress first
stress_write(node1, keycount=1)
node1.nodetool('disableautocompaction')
stress_write(node1, keycount=200000 * cluster.data_dir_count)
threshold = "5"
node1.nodetool('setcompactionthroughput -- ' + threshold)
node1.flush()
if node1.get_cassandra_version() < '2.2':
log_file = 'system.log'
else:
log_file = 'debug.log'
mark = node1.mark_log(filename=log_file)
node1.compact()
matches = node1.watch_log_for('Compacted', from_mark=mark, filename=log_file)
stringline = matches[0]
throughput_pattern = '{}={avgthroughput:f}{units}/s'
m = parse.search(throughput_pattern, stringline)
avgthroughput = m.named['avgthroughput']
found_units = m.named['units']
unit_conversion_dct = {
"MB": 1,
"MiB": 1,
"KiB": 1. / 1024,
"GiB": 1024,
"B": 1. / (1024 * 1024),
}
units = ['MB'] if cluster.version() < LooseVersion('3.6') else ['B', 'KiB', 'MiB', 'GiB']
assert found_units in units
logger.debug(avgthroughput)
avgthroughput_mb = unit_conversion_dct[found_units] * float(avgthroughput)
# The throughput in the log is computed independantly from the throttling and on the output files while
# throttling is on the input files, so while that throughput shouldn't be higher than the one set in
# principle, a bit of wiggle room is expected
> assert float(threshold) + 0.5 >= avgthroughput_mb
E AssertionError: assert (5.0 + 0.5) >= 5.59
E + where 5.0 = float('5')
compaction_test.py:302: AssertionError | 106.538 |