Unit Test Results.

Designed for use with JUnit and Ant.

Class dtest_jdk17_python3.8_cythonno_x86_64_58_64

NameTestsErrorsFailuresSkippedTime(s)Time StampHost
8_cythonno_x86_64_58_64170121574.7382024-12-17T04:10:21.386374b94bebfa80b4

Tests

NameStatusTypeTime(s)
test_list_permissionsSuccess28.740
test_batchlog_replay_compatibility_1Skippedported to in-JVM from 4.0 >= 5.1

/home/cassandra/cassandra-dtest/conftest.py:526: ported to in-JVM from 4.0 >= 5.1
0.431
test_compaction_throughputFailureAssertionError: assert (5.0 + 0.5) >= 5.563 + where 5.0 = float('5')

self = <compaction_test.TestCompaction object at 0x7f52d4292520>

def test_compaction_throughput(self):
"""
Test setting compaction throughput.
Set throughput, insert data and ensure compaction performance corresponds.
"""
cluster = self.cluster
cluster.populate(1).start()
[node1] = cluster.nodelist()

# disableautocompaction only disables compaction for existing tables,
# so initialize stress tables with stress first
stress_write(node1, keycount=1)
node1.nodetool('disableautocompaction')

stress_write(node1, keycount=200000 * cluster.data_dir_count)

threshold = "5"
node1.nodetool('setcompactionthroughput -- ' + threshold)

node1.flush()
if node1.get_cassandra_version() < '2.2':
log_file = 'system.log'
else:
log_file = 'debug.log'
mark = node1.mark_log(filename=log_file)
node1.compact()
matches = node1.watch_log_for('Compacted', from_mark=mark, filename=log_file)

stringline = matches[0]

throughput_pattern = '{}={avgthroughput:f}{units}/s'
m = parse.search(throughput_pattern, stringline)
avgthroughput = m.named['avgthroughput']
found_units = m.named['units']

unit_conversion_dct = {
"MB": 1,
"MiB": 1,
"KiB": 1. / 1024,
"GiB": 1024,
"B": 1. / (1024 * 1024),
}

units = ['MB'] if cluster.version() < LooseVersion('3.6') else ['B', 'KiB', 'MiB', 'GiB']
assert found_units in units

logger.debug(avgthroughput)
avgthroughput_mb = unit_conversion_dct[found_units] * float(avgthroughput)

# The throughput in the log is computed independantly from the throttling and on the output files while
# throttling is on the input files, so while that throughput shouldn't be higher than the one set in
# principle, a bit of wiggle room is expected
> assert float(threshold) + 0.5 >= avgthroughput_mb
E AssertionError: assert (5.0 + 0.5) >= 5.563
E + where 5.0 = float('5')

compaction_test.py:302: AssertionError
91.004
test_counter_consistencySuccess109.563
test_hintedhandoff_dc_reenabledSuccess83.364
test_no_base_column_in_view_pk_complex_timestamp_with_flushSuccess214.270
test_static_columns_pagingSuccess42.707
test_tracing_does_not_interfere_with_digest_calculationSuccess66.401
test_complementary_deletion_with_limit_and_rows_betweenSuccess72.095
test_manual_rebuild_indexSuccess44.060
test_sstableloader_with_failing_2iSuccess76.064
test_get_range_sliceSkipped5.1 > 4

/home/cassandra/cassandra-dtest/conftest.py:468: 5.1 > 4
0.210
test_size_estimates_multidcSuccess80.686
test_counterSuccess85.908
test_login_keeps_keyspaceSuccess33.412
test_bulk_round_trip_blogpostsSuccess86.977
test_dc_parallel_repairSuccess458.089
Properties »