test_failed_read_repair | Failure | Failed: DID NOT RAISE <class 'cassandra.ReadTimeout'>
self = <read_repair_test.TestSpeculativeReadRepair object at 0x7f0e248ee7f0>
@since('4.0')
def test_failed_read_repair(self):
"""
If none of the disagreeing nodes ack the repair mutation, the read should fail
"""
node1, node2, node3 = self.cluster.nodelist()
assert isinstance(node1, Node)
assert isinstance(node2, Node)
assert isinstance(node3, Node)
session = self.get_cql_connection(node1, timeout=2)
session.execute(quorum("INSERT INTO ks.tbl (k, c, v) VALUES (1, 0, 1)"))
node2.byteman_submit([mk_bman_path('read_repair/stop_writes.btm')])
node3.byteman_submit([mk_bman_path('read_repair/stop_writes.btm')])
script_version = '_5_1' if self.cluster.version() >= LooseVersion('5.1') else ''
node2.byteman_submit([mk_bman_path('read_repair/stop_rr_writes{}.btm'.format(script_version))])
node3.byteman_submit([mk_bman_path('read_repair/stop_rr_writes{}.btm'.format(script_version))])
with raises(WriteTimeout):
session.execute(quorum("INSERT INTO ks.tbl (k, c, v) VALUES (1, 1, 2)"))
node2.byteman_submit([mk_bman_path('read_repair/sorted_live_endpoints.btm')])
session = self.get_cql_connection(node2)
with StorageProxy(node2) as storage_proxy:
assert storage_proxy.blocking_read_repair == 0
assert storage_proxy.speculated_rr_read == 0
assert storage_proxy.speculated_rr_write == 0
with raises(ReadTimeout):
> session.execute(quorum("SELECT * FROM ks.tbl WHERE k=1"))
E Failed: DID NOT RAISE <class 'cassandra.ReadTimeout'>
read_repair_test.py:555: Failed | 37.820 |
test_repair_validates_dc | Failure | ccmlib.node.NodeError: C* process with 22000 is terminated
self = <repair_tests.repair_test.TestRepair object at 0x7f0e2457f520>
@since('3.11')
def test_repair_validates_dc(self):
"""
* Set up a multi DC cluster
* Perform a -dc repair with nonexistent dc and without local dc
* Assert that the repair is not trigger in both cases
"""
> cluster = self._setup_multi_dc()
repair_tests/repair_test.py:720:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
repair_tests/repair_test.py:756: in _setup_multi_dc
cluster.populate([2, 1, 1]).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7f0e246cb580>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 22000 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 17.596 |