Unit Test Results.

Designed for use with JUnit and Ant.

All Tests

ClassNameStatusTypeTime(s)
8_cythonno_x86_64_1_8test_network_topology_strategySuccess289.851
8_cythonno_x86_64_1_8test_disk_balance_bootstrapSuccess136.961
8_cythonno_x86_64_1_8test_throttled_partition_updateSuccess540.707
8_cythonno_x86_64_1_8test_resume_failed_replaceSuccess172.463
8_cythonno_x86_64_1_8test_initial_empty_repair_tablesSkippedhangs CI

/home/cassandra/cassandra-dtest/repair_tests/repair_test.py:1416: hangs CI
0.001
8_cythonno_x86_64_2_8test_network_topology_strategy_each_quorumSuccess214.024
8_cythonno_x86_64_2_8test_add_dc_after_mv_simple_replicationSuccess112.673
8_cythonno_x86_64_2_8test_really_complex_repairSuccess255.595
8_cythonno_x86_64_2_8test_restart_failed_replace_with_reset_resume_stateSuccess191.459
8_cythonno_x86_64_2_8test_repair_parent_tableSuccess78.771
8_cythonno_x86_64_3_8test_simple_strategy_usersSuccess190.799
8_cythonno_x86_64_3_8test_add_dc_after_mv_network_replicationSuccess119.019
8_cythonno_x86_64_3_8test_replace_stopped_nodeSuccess192.257
8_cythonno_x86_64_3_8test_restart_failed_replaceSuccess263.754
8_cythonno_x86_64_3_8test_repair_tableSuccess96.176
8_cythonno_x86_64_4_8test_simple_strategy_each_quorum_usersSuccess69.050
8_cythonno_x86_64_4_8test_add_node_after_mvSuccess69.984
8_cythonno_x86_64_4_8test_replace_shutdown_nodeSuccess174.724
8_cythonno_x86_64_4_8test_rf_collapse_gossiping_property_file_snitch_multi_dcSkipped5.1 > 5.0.x

/home/cassandra/cassandra-dtest/conftest.py:468: 5.1 > 5.0.x
0.452
8_cythonno_x86_64_5_8test_network_topology_strategy_usersSuccess308.181
8_cythonno_x86_64_5_8test_add_node_after_wide_mv_with_range_deletionsSuccess132.027
8_cythonno_x86_64_5_8test_replace_stopped_node_same_addressSuccess208.195
8_cythonno_x86_64_5_8test_rf_expand_gossiping_property_file_snitch_multi_dcSkipped5.1 > 5.0.x

/home/cassandra/cassandra-dtest/conftest.py:468: 5.1 > 5.0.x
0.279
8_cythonno_x86_64_6_8test_network_topology_strategy_each_quorum_usersSuccess94.528
8_cythonno_x86_64_6_8test_add_node_after_very_wide_mvSuccess303.191
8_cythonno_x86_64_6_8test_replace_first_bootSuccess180.989
8_cythonno_x86_64_6_8test_multidatacenter_local_quorumSuccess136.194
8_cythonno_x86_64_7_8test_network_topology_strategy_countersSuccess128.689
8_cythonno_x86_64_7_8test_add_write_survey_node_after_mvSuccess72.699
8_cythonno_x86_64_7_8test_replace_active_nodeSuccess89.986
8_cythonno_x86_64_7_8test_stop_decommission_too_few_replicas_multi_dcSuccess58.011
8_cythonno_x86_64_8_8test_network_topology_strategy_each_quorum_countersSuccess86.247
8_cythonno_x86_64_8_8test_complex_repairSuccess282.271
8_cythonno_x86_64_8_8test_replace_nonexistent_nodeSuccess82.222
8_cythonno_x86_64_8_8test_sstable_marking_not_intersecting_all_rangesSuccess91.111
8_cythonno_x86_64_1_8test_network_topology_strategySuccess235.303
8_cythonno_x86_64_1_8test_disk_balance_bootstrapSuccess98.091
8_cythonno_x86_64_1_8test_throttled_partition_updateSuccess495.853
8_cythonno_x86_64_1_8test_resume_failed_replaceSuccess198.842
8_cythonno_x86_64_1_8test_initial_empty_repair_tablesSkippedhangs CI

/home/cassandra/cassandra-dtest/repair_tests/repair_test.py:1416: hangs CI
0.001
8_cythonno_x86_64_2_8test_network_topology_strategy_each_quorumSuccess192.104
8_cythonno_x86_64_2_8test_add_dc_after_mv_simple_replicationSuccess102.201
8_cythonno_x86_64_2_8test_really_complex_repairSuccess256.305
8_cythonno_x86_64_2_8test_restart_failed_replace_with_reset_resume_stateSuccess187.380
8_cythonno_x86_64_2_8test_repair_parent_tableSuccess78.041
8_cythonno_x86_64_3_8test_simple_strategy_usersSuccess188.848
8_cythonno_x86_64_3_8test_add_dc_after_mv_network_replicationSuccess113.668
8_cythonno_x86_64_3_8test_replace_stopped_nodeSuccess189.640
8_cythonno_x86_64_3_8test_restart_failed_replaceSuccess255.944
8_cythonno_x86_64_3_8test_repair_tableSuccess78.487
8_cythonno_x86_64_4_8test_simple_strategy_each_quorum_usersSuccess79.972
8_cythonno_x86_64_4_8test_add_node_after_mvSuccess77.581
8_cythonno_x86_64_4_8test_replace_shutdown_nodeSuccess176.604
8_cythonno_x86_64_4_8test_rf_collapse_gossiping_property_file_snitch_multi_dcSkipped5.1 > 5.0.x

/home/cassandra/cassandra-dtest/conftest.py:468: 5.1 > 5.0.x
0.503
8_cythonno_x86_64_5_8test_network_topology_strategy_usersSuccess203.335
8_cythonno_x86_64_5_8test_add_node_after_wide_mv_with_range_deletionsSuccess91.472
8_cythonno_x86_64_5_8test_replace_stopped_node_same_addressSuccess176.166
8_cythonno_x86_64_5_8test_rf_expand_gossiping_property_file_snitch_multi_dcSkipped5.1 > 5.0.x

/home/cassandra/cassandra-dtest/conftest.py:468: 5.1 > 5.0.x
0.478
8_cythonno_x86_64_6_8test_network_topology_strategy_each_quorum_usersSuccess133.245
8_cythonno_x86_64_6_8test_add_node_after_very_wide_mvSuccess324.972
8_cythonno_x86_64_6_8test_replace_first_bootSuccess180.371
8_cythonno_x86_64_6_8test_multidatacenter_local_quorumFailureAssertionError: too many reads hit the degraded node: 4 assert 4 <= 1

self = <snitch_test.TestDynamicEndpointSnitch object at 0x7eff817dbfa0>

@pytest.mark.resource_intensive
@since('3.10')
def test_multidatacenter_local_quorum(self):
'''
@jira_ticket CASSANDRA-13074

If we do only local datacenters reads in a multidatacenter DES setup,
DES should take effect and route around a degraded node
'''

def no_cross_dc(scores, cross_dc_nodes):
return all('/' + k.address() not in scores for k in cross_dc_nodes)

def snitchable(scores_before, scores_after, needed_nodes):
return all('/' + k.address() in scores_before and '/' + k.address()
in scores_after for k in needed_nodes)

cluster = self.cluster
cluster.populate([3, 3])
coordinator_node, healthy_node, degraded_node, node4, node5, node6 = cluster.nodelist()
# increase DES reset/update interval so we clear any cross-DC startup reads faster
cluster.set_configuration_options(values={'dynamic_snitch_reset_interval_in_ms': 10000,
'dynamic_snitch_update_interval_in_ms': 50,
'phi_convict_threshold': 12})
# Delay reads on the degraded node by 50 milliseconds
degraded_node.start(jvm_args=['-Dcassandra.test.read_iteration_delay_ms=50',
'-Dcassandra.allow_unsafe_join=true'])
cluster.start()

des = make_mbean('db', type='DynamicEndpointSnitch')
read_stage = make_mbean('metrics', type='ThreadPools', path='request',
scope='ReadStage', name='CompletedTasks')
session = self.patient_exclusive_cql_connection(coordinator_node)
session.execute("CREATE KEYSPACE snitchtestks WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': 3, 'dc2': 3}")
session.execute("CREATE TABLE snitchtestks.tbl1 (key int PRIMARY KEY) WITH speculative_retry = 'NONE'")
read_stmt = session.prepare("SELECT * FROM snitchtestks.tbl1 where key = ?")
read_stmt.consistency_level = ConsistencyLevel.LOCAL_QUORUM
insert_stmt = session.prepare("INSERT INTO snitchtestks.tbl1 (key) VALUES (?)")
insert_stmt.consistency_level = ConsistencyLevel.ALL
with JolokiaAgent(coordinator_node) as jmx:
with JolokiaAgent(degraded_node) as bad_jmx:
for x in range(0, 150):
session.execute(insert_stmt, [x])

cleared = False
# Wait for a snitch reset in case any earlier
# startup process populated cross-DC read timings
while not cleared:
scores = jmx.read_attribute(des, 'Scores')
cleared = ('/127.0.0.1' in scores and (len(scores) == 1)) or not scores

snitchable_count = 0

for x in range(0, 150):
degraded_reads_before = bad_jmx.read_attribute(read_stage, 'Value')
scores_before = jmx.read_attribute(des, 'Scores')
assert no_cross_dc(scores_before, [node4, node5, node6]), "Cross DC scores were present: " + str(scores_before)
future = session.execute_async(read_stmt, [x])
future.result()
scores_after = jmx.read_attribute(des, 'Scores')
assert no_cross_dc(scores_after, [node4, node5, node6]), "Cross DC scores were present: " + str(scores_after)

if snitchable(scores_before, scores_after,
[coordinator_node, healthy_node, degraded_node]):
snitchable_count = snitchable_count + 1
# If the DES correctly routed the read around the degraded node,
# it shouldn't have another completed read request in metrics,
# unless there was one right after a reset.
degraded_reads = bad_jmx.read_attribute(read_stage, 'Value')
difference = abs(degraded_reads_before - degraded_reads)
> assert difference <= 1, "too many reads hit the degraded node: %s" % difference
E AssertionError: too many reads hit the degraded node: 4
E assert 4 <= 1

snitch_test.py:235: AssertionError
141.289
8_cythonno_x86_64_7_8test_network_topology_strategy_countersSuccess132.912
8_cythonno_x86_64_7_8test_add_write_survey_node_after_mvSuccess65.228
8_cythonno_x86_64_7_8test_replace_active_nodeSuccess85.403
8_cythonno_x86_64_7_8test_stop_decommission_too_few_replicas_multi_dcSuccess53.772
8_cythonno_x86_64_8_8test_network_topology_strategy_each_quorum_countersSuccess77.382
8_cythonno_x86_64_8_8test_complex_repairSuccess265.283
8_cythonno_x86_64_8_8test_replace_nonexistent_nodeSuccess79.544
8_cythonno_x86_64_8_8test_sstable_marking_not_intersecting_all_rangesSuccess74.997