Unit Test Results.

Designed for use with JUnit and Ant.

Class dtest_jdk17_python3.8_cythonno_x86_64_58_64

NameTestsErrorsFailuresSkippedTime(s)Time StampHost
8_cythonno_x86_64_58_64170121545.8352024-12-16T21:31:09.8881192fe0e92473e1

Tests

NameStatusTypeTime(s)
test_list_permissionsSuccess29.703
test_batchlog_replay_compatibility_1Skippedported to in-JVM from 4.0 >= 5.1

/home/cassandra/cassandra-dtest/conftest.py:526: ported to in-JVM from 4.0 >= 5.1
0.220
test_compaction_throughputFailureAssertionError: assert (5.0 + 0.5) >= 5.692 + where 5.0 = float('5')

self = <compaction_test.TestCompaction object at 0x7f77edb65520>

def test_compaction_throughput(self):
"""
Test setting compaction throughput.
Set throughput, insert data and ensure compaction performance corresponds.
"""
cluster = self.cluster
cluster.populate(1).start()
[node1] = cluster.nodelist()

# disableautocompaction only disables compaction for existing tables,
# so initialize stress tables with stress first
stress_write(node1, keycount=1)
node1.nodetool('disableautocompaction')

stress_write(node1, keycount=200000 * cluster.data_dir_count)

threshold = "5"
node1.nodetool('setcompactionthroughput -- ' + threshold)

node1.flush()
if node1.get_cassandra_version() < '2.2':
log_file = 'system.log'
else:
log_file = 'debug.log'
mark = node1.mark_log(filename=log_file)
node1.compact()
matches = node1.watch_log_for('Compacted', from_mark=mark, filename=log_file)

stringline = matches[0]

throughput_pattern = '{}={avgthroughput:f}{units}/s'
m = parse.search(throughput_pattern, stringline)
avgthroughput = m.named['avgthroughput']
found_units = m.named['units']

unit_conversion_dct = {
"MB": 1,
"MiB": 1,
"KiB": 1. / 1024,
"GiB": 1024,
"B": 1. / (1024 * 1024),
}

units = ['MB'] if cluster.version() < LooseVersion('3.6') else ['B', 'KiB', 'MiB', 'GiB']
assert found_units in units

logger.debug(avgthroughput)
avgthroughput_mb = unit_conversion_dct[found_units] * float(avgthroughput)

# The throughput in the log is computed independantly from the throttling and on the output files while
# throttling is on the input files, so while that throughput shouldn't be higher than the one set in
# principle, a bit of wiggle room is expected
> assert float(threshold) + 0.5 >= avgthroughput_mb
E AssertionError: assert (5.0 + 0.5) >= 5.692
E + where 5.0 = float('5')

compaction_test.py:302: AssertionError
99.021
test_counter_consistencySuccess99.988
test_hintedhandoff_dc_reenabledSuccess83.040
test_no_base_column_in_view_pk_complex_timestamp_with_flushSuccess226.091
test_static_columns_pagingSuccess46.238
test_tracing_does_not_interfere_with_digest_calculationSuccess67.991
test_complementary_deletion_with_limit_and_rows_betweenSuccess71.646
test_manual_rebuild_indexSuccess45.931
test_sstableloader_with_failing_2iSuccess78.266
test_get_range_sliceSkipped5.1 > 4

/home/cassandra/cassandra-dtest/conftest.py:468: 5.1 > 4
0.182
test_size_estimates_multidcSuccess74.281
test_counterSuccess81.277
test_login_keeps_keyspaceSuccess32.716
test_bulk_round_trip_blogpostsSuccess80.144
test_dc_parallel_repairSuccess428.598
Properties »