Unit Test Results.

Designed for use with JUnit and Ant.

Class dtest_jdk11_python3.8_cythonno_x86_64_58_64

NameTestsErrorsFailuresSkippedTime(s)Time StampHost
8_cythonno_x86_64_58_64170121618.4282024-12-17T04:10:46.160645a111ef85dd77

Tests

NameStatusTypeTime(s)
test_list_permissionsSuccess32.175
test_batchlog_replay_compatibility_1Skippedported to in-JVM from 4.0 >= 5.1

/home/cassandra/cassandra-dtest/conftest.py:526: ported to in-JVM from 4.0 >= 5.1
0.346
test_compaction_throughputFailureAssertionError: assert (5.0 + 0.5) >= 5.642 + where 5.0 = float('5')

self = <compaction_test.TestCompaction object at 0x7f2a753a1520>

def test_compaction_throughput(self):
"""
Test setting compaction throughput.
Set throughput, insert data and ensure compaction performance corresponds.
"""
cluster = self.cluster
cluster.populate(1).start()
[node1] = cluster.nodelist()

# disableautocompaction only disables compaction for existing tables,
# so initialize stress tables with stress first
stress_write(node1, keycount=1)
node1.nodetool('disableautocompaction')

stress_write(node1, keycount=200000 * cluster.data_dir_count)

threshold = "5"
node1.nodetool('setcompactionthroughput -- ' + threshold)

node1.flush()
if node1.get_cassandra_version() < '2.2':
log_file = 'system.log'
else:
log_file = 'debug.log'
mark = node1.mark_log(filename=log_file)
node1.compact()
matches = node1.watch_log_for('Compacted', from_mark=mark, filename=log_file)

stringline = matches[0]

throughput_pattern = '{}={avgthroughput:f}{units}/s'
m = parse.search(throughput_pattern, stringline)
avgthroughput = m.named['avgthroughput']
found_units = m.named['units']

unit_conversion_dct = {
"MB": 1,
"MiB": 1,
"KiB": 1. / 1024,
"GiB": 1024,
"B": 1. / (1024 * 1024),
}

units = ['MB'] if cluster.version() < LooseVersion('3.6') else ['B', 'KiB', 'MiB', 'GiB']
assert found_units in units

logger.debug(avgthroughput)
avgthroughput_mb = unit_conversion_dct[found_units] * float(avgthroughput)

# The throughput in the log is computed independantly from the throttling and on the output files while
# throttling is on the input files, so while that throughput shouldn't be higher than the one set in
# principle, a bit of wiggle room is expected
> assert float(threshold) + 0.5 >= avgthroughput_mb
E AssertionError: assert (5.0 + 0.5) >= 5.642
E + where 5.0 = float('5')

compaction_test.py:302: AssertionError
92.728
test_counter_consistencySuccess92.172
test_hintedhandoff_dc_reenabledSuccess84.982
test_no_base_column_in_view_pk_complex_timestamp_with_flushSuccess266.331
test_static_columns_pagingSuccess45.723
test_tracing_does_not_interfere_with_digest_calculationSuccess71.694
test_complementary_deletion_with_limit_and_rows_betweenSuccess78.636
test_manual_rebuild_indexSuccess44.766
test_sstableloader_with_failing_2iSuccess85.988
test_get_range_sliceSkipped5.1 > 4

/home/cassandra/cassandra-dtest/conftest.py:468: 5.1 > 4
0.362
test_size_estimates_multidcSuccess84.432
test_counterSuccess87.550
test_login_keeps_keyspaceSuccess35.295
test_bulk_round_trip_blogpostsSuccess66.880
test_dc_parallel_repairSuccess447.508
Properties »