test_network_topology_strategy_each_quorum_users | Failure | ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
self = <consistency_test.TestAccuracy object at 0x7f4f145374c0>
@pytest.mark.resource_intensive
@since("3.0")
def test_network_topology_strategy_each_quorum_users(self):
"""
@jira_ticket CASSANDRA-10584
Test for a multiple datacenters, users table, only the each quorum
reads.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])
combinations = [
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.EACH_QUORUM),
]
logger.debug("Testing multiple dcs, users, each quorum reads")
> self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, self.nodes, list(self.rf.values()), combinations)
consistency_test.py:670:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:529: in _run_test_function_in_parallel
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.cluster.Cluster object at 0x7f4f14464a00>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migrat..._wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f4f144094f0>, itf = ('127.0.0.6', 9042)
started = [(<ccmlib.node.Node object at 0x7f4f144094f0>, <subprocess.Popen object at 0x7f4f14449ee0>, 0), (<ccmlib.node.Node obj... at 0x7f4f14492550>, 0), (<ccmlib.node.Node object at 0x7f4f14501ee0>, <subprocess.Popen object at 0x7f4f14492820>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f4f14449ee0>
def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []
extension.pre_cluster_start(self)
# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)
started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()
# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)
# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)
started.append((node, p, mark))
if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError | 24.938 |
test_multidatacenter_local_quorum | Failure | ccmlib.node.TimeoutError: 16 Dec 2024 18:09:10 [node3] after 120.13/120 seconds Missing: ['127.0.0.1:7000.* is now UP'] not found in system.log:
Head: INFO [main] 2024-12-16 18:07:10,470 YamlConfigura
Tail: ....cassandra.service.CassandraDaemon.activate(CassandraDaemon.java:724)
at org.apache.cassandra.service.CassandraDaemon.main(CassandraDaemon.java:882)
self = <snitch_test.TestDynamicEndpointSnitch object at 0x7f4f1445bf40>
@pytest.mark.resource_intensive
@since('3.10')
def test_multidatacenter_local_quorum(self):
'''
@jira_ticket CASSANDRA-13074
If we do only local datacenters reads in a multidatacenter DES setup,
DES should take effect and route around a degraded node
'''
def no_cross_dc(scores, cross_dc_nodes):
return all('/' + k.address() not in scores for k in cross_dc_nodes)
def snitchable(scores_before, scores_after, needed_nodes):
return all('/' + k.address() in scores_before and '/' + k.address()
in scores_after for k in needed_nodes)
cluster = self.cluster
cluster.populate([3, 3])
coordinator_node, healthy_node, degraded_node, node4, node5, node6 = cluster.nodelist()
# increase DES reset/update interval so we clear any cross-DC startup reads faster
cluster.set_configuration_options(values={'dynamic_snitch_reset_interval_in_ms': 10000,
'dynamic_snitch_update_interval_in_ms': 50,
'phi_convict_threshold': 12})
# Delay reads on the degraded node by 50 milliseconds
degraded_node.start(jvm_args=['-Dcassandra.test.read_iteration_delay_ms=50',
'-Dcassandra.allow_unsafe_join=true'])
> cluster.start()
snitch_test.py:192:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:526: in start
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:922: in start
node.watch_log_for_alive(self, from_mark=mark)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:686: in watch_log_for_alive
self.watch_log_for(tofind, from_mark=from_mark, timeout=timeout, filename=filename)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:610: in watch_log_for
TimeoutError.raise_if_passed(start=start, timeout=timeout, node=self.name,
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
start = 1734372430.4562736, timeout = 120
msg = "Missing: ['127.0.0.1:7000.* is now UP'] not found in system.log:\n Head: INFO [main] 2024-12-16 18:07:10,470 YamlCon...activate(CassandraDaemon.java:724)\n\tat org.apache.cassandra.service.CassandraDaemon.main(CassandraDaemon.java:882)\n"
node = 'node3'
@staticmethod
def raise_if_passed(start, timeout, msg, node=None):
if start + timeout < time.time():
> raise TimeoutError.create(start, timeout, msg, node)
E ccmlib.node.TimeoutError: 16 Dec 2024 18:09:10 [node3] after 120.13/120 seconds Missing: ['127.0.0.1:7000.* is now UP'] not found in system.log:
E Head: INFO [main] 2024-12-16 18:07:10,470 YamlConfigura
E Tail: ....cassandra.service.CassandraDaemon.activate(CassandraDaemon.java:724)
E at org.apache.cassandra.service.CassandraDaemon.main(CassandraDaemon.java:882)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:56: TimeoutError | 134.881 |