Class | Name | Status | Type | Time(s) |
8_cythonno_x86_64_1_8 | test_network_topology_strategy | Failure | ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
self = <consistency_test.TestAvailability object at 0x7f0c277f0280>
@pytest.mark.resource_intensive
def test_network_topology_strategy(self):
"""
Test for multiple datacenters, using network topology replication strategy.
"""
self.nodes = [3, 3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3), ('dc3', 3)])
> self._start_cluster()
consistency_test.py:370:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.cluster.Cluster object at 0x7f0c27737190>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migrat..._in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', ...]
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f0c2773ca00>, itf = ('127.0.0.9', 9042)
started = [(<ccmlib.node.Node object at 0x7f0c2773ca00>, <subprocess.Popen object at 0x7f0c276c18e0>, 0), (<ccmlib.node.Node obj...x7f0c277a1d30>, 0), (<ccmlib.node.Node object at 0x7f0c27824820>, <subprocess.Popen object at 0x7f0c277a1970>, 0), ...]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f0c276c18e0>
def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []
extension.pre_cluster_start(self)
# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)
started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()
# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)
# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)
started.append((node, p, mark))
if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError | 56.924 |
8_cythonno_x86_64_1_8 | test_throttled_partition_update | Failure | ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
self = <materialized_views_test.TestMaterializedViews object at 0x7f0c27708e50>
@pytest.mark.resource_intensive
def test_throttled_partition_update(self):
"""
@jira_ticket: CASSANDRA-13299, test break up large partition when repairing base with mv.
Provide a configuable batch size(cassandra.mv.mutation.row.count=100) to trottle number
of rows to be applied in one mutation
"""
> session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
materialized_views_test.py:2192:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.cluster.Cluster object at 0x7f0c2776d430>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.reset_bootstrap_progress=false', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration..._wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f0c2767e910>, itf = ('127.0.0.5', 9042)
started = [(<ccmlib.node.Node object at 0x7f0c2767e910>, <subprocess.Popen object at 0x7f0c27700d00>, 0), (<ccmlib.node.Node obj... at 0x7f0c2775cc10>, 0), (<ccmlib.node.Node object at 0x7f0c2776d1c0>, <subprocess.Popen object at 0x7f0c2775c040>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f0c27700d00>
def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []
extension.pre_cluster_start(self)
# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)
started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()
# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)
# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)
started.append((node, p, mark))
if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError | 24.129 |
8_cythonno_x86_64_2_8 | test_network_topology_strategy_each_quorum | Failure | ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
self = <consistency_test.TestAvailability object at 0x7f4c60dc7460>
@pytest.mark.resource_intensive
@since("3.0")
def test_network_topology_strategy_each_quorum(self):
"""
@jira_ticket CASSANDRA-10584
Test for multiple datacenters, using network topology strategy, only
the each quorum reads.
"""
self.nodes = [3, 3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3), ('dc3', 3)])
> self._start_cluster()
consistency_test.py:405:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.cluster.Cluster object at 0x7f4c60ca6af0>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migrat..._in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', ...]
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f4c60d30700>, itf = ('127.0.0.9', 9042)
started = [(<ccmlib.node.Node object at 0x7f4c60d30700>, <subprocess.Popen object at 0x7f4c60d30880>, 0), (<ccmlib.node.Node obj...x7f4c60cdfaf0>, 0), (<ccmlib.node.Node object at 0x7f4c60dc7250>, <subprocess.Popen object at 0x7f4c60cdfcd0>, 0), ...]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f4c60d30880>
def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []
extension.pre_cluster_start(self)
# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)
started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()
# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)
# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)
started.append((node, p, mark))
if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError | 35.173 |
8_cythonno_x86_64_2_8 | test_add_dc_after_mv_simple_replication | Failure | ccmlib.node.NodeError: C* process with 4003 is terminated
self = <materialized_views_test.TestMaterializedViews object at 0x7f4c60d8f1f0>
@pytest.mark.resource_intensive
def test_add_dc_after_mv_simple_replication(self):
"""
@jira_ticket CASSANDRA-10634
Test that materialized views work as expected when adding a datacenter with SimpleStrategy.
"""
> self._add_dc_after_mv_test(1, False)
materialized_views_test.py:583:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:523: in _add_dc_after_mv_test
session = self.prepare(rf=rf)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7f4c60d16130>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 4003 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 11.348 |
8_cythonno_x86_64_2_8 | test_really_complex_repair | Failure | ccmlib.node.NodeError: C* process with 6077 is terminated
self = <materialized_views_test.TestMaterializedViews object at 0x7f4c60d2bc40>
@pytest.mark.resource_intensive
def test_really_complex_repair(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
> session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
materialized_views_test.py:2308:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7f4c60d7ffa0>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 6077 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 16.042 |
8_cythonno_x86_64_3_8 | test_simple_strategy_users | Failure | ccmlib.node.NodeError: C* process with 831 is terminated
self = <consistency_test.TestAccuracy object at 0x7f4e09420ca0>
@pytest.mark.resource_intensive
def test_simple_strategy_users(self):
"""
Test for a single datacenter, users table, only the each quorum reads.
"""
self.nodes = 5
self.rf = 3
combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.QUORUM),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.ONE, ConsistencyLevel.THREE),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ANY, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.TWO),
(ConsistencyLevel.TWO, ConsistencyLevel.ONE),
# These are multi-DC consistency levels that should default to quorum calls
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL),
]
logger.debug("Testing single dc, users")
> self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, [self.nodes], [self.rf], combinations)
consistency_test.py:597:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:529: in _run_test_function_in_parallel
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
consistency_test.py:136: in _start_cluster
cluster.start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7f4e09397070>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 831 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 56.680 |
8_cythonno_x86_64_3_8 | test_add_dc_after_mv_network_replication | Failure | ccmlib.node.NodeError: C* process with 2977 is terminated
self = <materialized_views_test.TestMaterializedViews object at 0x7f4e093fc520>
@pytest.mark.resource_intensive
def test_add_dc_after_mv_network_replication(self):
"""
@jira_ticket CASSANDRA-10634
Test that materialized views work as expected when adding a datacenter with NetworkTopologyStrategy.
"""
> self._add_dc_after_mv_test({'dc1': 1}, True)
materialized_views_test.py:593:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:523: in _add_dc_after_mv_test
session = self.prepare(rf=rf)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7f4e08a604f0>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 2977 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 35.875 |
8_cythonno_x86_64_4_8 | test_add_node_after_mv | Failure | ccmlib.node.NodeError: C* process with 3633 is terminated
self = <materialized_views_test.TestMaterializedViews object at 0x7f5207f77760>
@pytest.mark.resource_intensive
def test_add_node_after_mv(self):
"""
@jira_ticket CASSANDRA-10978
Test that materialized views work as expected when adding a node.
"""
> session = self.prepare()
materialized_views_test.py:603:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7f52065bb460>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3633 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 38.089 |
8_cythonno_x86_64_5_8 | test_network_topology_strategy_users | Failure | ccmlib.node.NodeError: C* process with 902 is terminated
self = <consistency_test.TestAccuracy object at 0x7ffaa5d98190>
@pytest.mark.resource_intensive
def test_network_topology_strategy_users(self):
"""
Test for multiple datacenters, users table.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])
combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.ONE, ConsistencyLevel.THREE),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ANY, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.TWO),
(ConsistencyLevel.TWO, ConsistencyLevel.ONE),
(ConsistencyLevel.QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.LOCAL_SERIAL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.LOCAL_SERIAL),
]
logger.debug("Testing multiple dcs, users")
> self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, self.nodes, list(self.rf.values()), combinations),
consistency_test.py:651:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:529: in _run_test_function_in_parallel
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
consistency_test.py:136: in _start_cluster
cluster.start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7ffaa5caaca0>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 902 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 18.004 |
8_cythonno_x86_64_5_8 | test_add_node_after_wide_mv_with_range_deletions | Failure | ccmlib.node.NodeError: C* process with 3222 is terminated
self = <materialized_views_test.TestMaterializedViews object at 0x7ffaa5ce86d0>
@pytest.mark.resource_intensive
def test_add_node_after_wide_mv_with_range_deletions(self):
"""
@jira_ticket CASSANDRA-11670
Test that materialized views work with wide materialized views as expected when adding a node.
"""
> session = self.prepare()
materialized_views_test.py:691:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7ffaa5d072b0>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3222 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 10.026 |
8_cythonno_x86_64_6_8 | test_network_topology_strategy_each_quorum_users | Failure | ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
self = <consistency_test.TestAccuracy object at 0x7f15d6d58520>
@pytest.mark.resource_intensive
@since("3.0")
def test_network_topology_strategy_each_quorum_users(self):
"""
@jira_ticket CASSANDRA-10584
Test for a multiple datacenters, users table, only the each quorum
reads.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])
combinations = [
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.EACH_QUORUM),
]
logger.debug("Testing multiple dcs, users, each quorum reads")
> self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, self.nodes, list(self.rf.values()), combinations)
consistency_test.py:670:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:529: in _run_test_function_in_parallel
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.cluster.Cluster object at 0x7f15d6c85a60>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migrat..._wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f15d6c2a550>, itf = ('127.0.0.6', 9042)
started = [(<ccmlib.node.Node object at 0x7f15d6c2a550>, <subprocess.Popen object at 0x7f15d6c45340>, 0), (<ccmlib.node.Node obj... at 0x7f15d6cbe280>, 0), (<ccmlib.node.Node object at 0x7f15d6cb7af0>, <subprocess.Popen object at 0x7f15d6cbed90>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f15d6c45340>
def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []
extension.pre_cluster_start(self)
# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)
started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()
# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)
# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)
started.append((node, p, mark))
if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError | 26.826 |
8_cythonno_x86_64_6_8 | test_add_node_after_very_wide_mv | Failure | ccmlib.node.NodeError: C* process with 3218 is terminated
self = <materialized_views_test.TestMaterializedViews object at 0x7f15d6ca9b50>
@pytest.mark.resource_intensive
def test_add_node_after_very_wide_mv(self):
"""
@jira_ticket CASSANDRA-11670
Test that materialized views work with very wide materialized views as expected when adding a node.
"""
> session = self.prepare()
materialized_views_test.py:761:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7f15d61436a0>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3218 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 13.751 |
8_cythonno_x86_64_6_8 | test_multidatacenter_local_quorum | Failure | ccmlib.node.TimeoutError: 17 Dec 2024 01:05:08 [node3] after 120.12/120 seconds Missing: ['127.0.0.1:7000.* is now UP'] not found in system.log:
Head: INFO [main] 2024-12-17 01:03:08,532 YamlConfigura
Tail: ....cassandra.service.CassandraDaemon.activate(CassandraDaemon.java:724)
at org.apache.cassandra.service.CassandraDaemon.main(CassandraDaemon.java:882)
self = <snitch_test.TestDynamicEndpointSnitch object at 0x7f15d6c7cfa0>
@pytest.mark.resource_intensive
@since('3.10')
def test_multidatacenter_local_quorum(self):
'''
@jira_ticket CASSANDRA-13074
If we do only local datacenters reads in a multidatacenter DES setup,
DES should take effect and route around a degraded node
'''
def no_cross_dc(scores, cross_dc_nodes):
return all('/' + k.address() not in scores for k in cross_dc_nodes)
def snitchable(scores_before, scores_after, needed_nodes):
return all('/' + k.address() in scores_before and '/' + k.address()
in scores_after for k in needed_nodes)
cluster = self.cluster
cluster.populate([3, 3])
coordinator_node, healthy_node, degraded_node, node4, node5, node6 = cluster.nodelist()
# increase DES reset/update interval so we clear any cross-DC startup reads faster
cluster.set_configuration_options(values={'dynamic_snitch_reset_interval_in_ms': 10000,
'dynamic_snitch_update_interval_in_ms': 50,
'phi_convict_threshold': 12})
# Delay reads on the degraded node by 50 milliseconds
degraded_node.start(jvm_args=['-Dcassandra.test.read_iteration_delay_ms=50',
'-Dcassandra.allow_unsafe_join=true'])
> cluster.start()
snitch_test.py:192:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:526: in start
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:922: in start
node.watch_log_for_alive(self, from_mark=mark)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:686: in watch_log_for_alive
self.watch_log_for(tofind, from_mark=from_mark, timeout=timeout, filename=filename)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:610: in watch_log_for
TimeoutError.raise_if_passed(start=start, timeout=timeout, node=self.name,
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
start = 1734397387.9138258, timeout = 120
msg = "Missing: ['127.0.0.1:7000.* is now UP'] not found in system.log:\n Head: INFO [main] 2024-12-17 01:03:08,532 YamlCon...activate(CassandraDaemon.java:724)\n\tat org.apache.cassandra.service.CassandraDaemon.main(CassandraDaemon.java:882)\n"
node = 'node3'
@staticmethod
def raise_if_passed(start, timeout, msg, node=None):
if start + timeout < time.time():
> raise TimeoutError.create(start, timeout, msg, node)
E ccmlib.node.TimeoutError: 17 Dec 2024 01:05:08 [node3] after 120.12/120 seconds Missing: ['127.0.0.1:7000.* is now UP'] not found in system.log:
E Head: INFO [main] 2024-12-17 01:03:08,532 YamlConfigura
E Tail: ....cassandra.service.CassandraDaemon.activate(CassandraDaemon.java:724)
E at org.apache.cassandra.service.CassandraDaemon.main(CassandraDaemon.java:882)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:56: TimeoutError | 135.005 |
8_cythonno_x86_64_7_8 | test_network_topology_strategy_counters | Failure | ccmlib.node.NodeError: C* process with 934 is terminated
self = <consistency_test.TestAccuracy object at 0x7fccf58198e0>
@pytest.mark.resource_intensive
def test_network_topology_strategy_counters(self):
"""
Test for multiple datacenters, counters table.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])
combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.ONE, ConsistencyLevel.THREE),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.TWO),
(ConsistencyLevel.TWO, ConsistencyLevel.ONE),
]
logger.debug("Testing multiple dcs, counters")
> self._run_test_function_in_parallel(TestAccuracy.Validation.validate_counters, self.nodes, list(self.rf.values()), combinations),
consistency_test.py:747:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:529: in _run_test_function_in_parallel
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
consistency_test.py:136: in _start_cluster
cluster.start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7fccf5735490>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 934 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 98.888 |
8_cythonno_x86_64_7_8 | test_add_write_survey_node_after_mv | Failure | ccmlib.node.NodeError: C* process with 3270 is terminated
self = <materialized_views_test.TestMaterializedViews object at 0x7fccf5769f40>
@pytest.mark.resource_intensive
def test_add_write_survey_node_after_mv(self):
"""
@jira_ticket CASSANDRA-10621
@jira_ticket CASSANDRA-10978
Test that materialized views work as expected when adding a node in write survey mode.
"""
> session = self.prepare()
materialized_views_test.py:805:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7fccf5783c70>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3270 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 59.954 |
8_cythonno_x86_64_7_8 | test_stop_decommission_too_few_replicas_multi_dc | Failure | ccmlib.node.NodeError: C* process with 5685 is terminated
self = <topology_test.TestTopology object at 0x7fccf574ad90>
@since('3.12')
@pytest.mark.resource_intensive
def test_stop_decommission_too_few_replicas_multi_dc(self):
"""
Decommission should fail when it would result in the number of live replicas being less than
the replication factor. --force should bypass this requirement.
@jira_ticket CASSANDRA-12510
@expected_errors ToolError when # nodes will drop below configured replicas in NTS/SimpleStrategy
"""
# we need to ignore this error log message which is emitted during the test
# because dtest framework which reads the logs would evaluate the node is errorneous and
# it would terminate it prematurely
self.fixture_dtest_setup.ignore_log_patterns = (r'.*Not enough live nodes to maintain replication factor*')
cluster = self.cluster
> cluster.populate([2, 2]).start()
topology_test.py:494:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7fccf4cc1160>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 5685 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 96.143 |
8_cythonno_x86_64_8_8 | test_network_topology_strategy_each_quorum_counters | Failure | ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
self = <consistency_test.TestAccuracy object at 0x7f91d63301c0>
@pytest.mark.resource_intensive
@since("3.0")
def test_network_topology_strategy_each_quorum_counters(self):
"""
@jira_ticket CASSANDRA-10584
Test for multiple datacenters, counters table, only the each quorum
reads.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])
combinations = [
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.EACH_QUORUM),
]
logger.debug("Testing multiple dcs, counters, each quorum reads")
> self._run_test_function_in_parallel(TestAccuracy.Validation.validate_counters, self.nodes, list(self.rf.values()), combinations),
consistency_test.py:766:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:529: in _run_test_function_in_parallel
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.cluster.Cluster object at 0x7f91d62454f0>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migrat..._wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f91d62579a0>, itf = ('127.0.0.6', 9042)
started = [(<ccmlib.node.Node object at 0x7f91d62579a0>, <subprocess.Popen object at 0x7f91d62ec640>, 0), (<ccmlib.node.Node obj... at 0x7f91d62ecfd0>, 0), (<ccmlib.node.Node object at 0x7f91d6311dc0>, <subprocess.Popen object at 0x7f91d62804f0>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f91d62ec640>
def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []
extension.pre_cluster_start(self)
# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)
started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()
# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)
# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)
started.append((node, p, mark))
if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError | 29.446 |
8_cythonno_x86_64_8_8 | test_complex_repair | Failure | ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
self = <materialized_views_test.TestMaterializedViews object at 0x7f91d6295460>
@pytest.mark.resource_intensive
def test_complex_repair(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
> session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
materialized_views_test.py:2090:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.cluster.Cluster object at 0x7f91d631a730>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.reset_bootstrap_progress=false', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration..._wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f91d6285d30>, itf = ('127.0.0.5', 9042)
started = [(<ccmlib.node.Node object at 0x7f91d6285d30>, <subprocess.Popen object at 0x7f91d6282130>, 0), (<ccmlib.node.Node obj... at 0x7f91d6282460>, 0), (<ccmlib.node.Node object at 0x7f91d626c7f0>, <subprocess.Popen object at 0x7f91d62825b0>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f91d6282130>
def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []
extension.pre_cluster_start(self)
# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)
started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()
# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)
# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)
started.append((node, p, mark))
if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError | 27.242 |
8_cythonno_x86_64_1_8 | test_network_topology_strategy | Failure | ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
self = <consistency_test.TestAvailability object at 0x7f0a55800130>
@pytest.mark.resource_intensive
def test_network_topology_strategy(self):
"""
Test for multiple datacenters, using network topology replication strategy.
"""
self.nodes = [3, 3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3), ('dc3', 3)])
> self._start_cluster()
consistency_test.py:370:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.cluster.Cluster object at 0x7f0a557472e0>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migrat..._in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', ...]
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f0a55778df0>, itf = ('127.0.0.9', 9042)
started = [(<ccmlib.node.Node object at 0x7f0a55778df0>, <subprocess.Popen object at 0x7f0a557784c0>, 0), (<ccmlib.node.Node obj...x7f0a557785b0>, 0), (<ccmlib.node.Node object at 0x7f0a557b1b50>, <subprocess.Popen object at 0x7f0a557686d0>, 0), ...]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f0a557784c0>
def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []
extension.pre_cluster_start(self)
# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)
started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()
# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)
# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)
started.append((node, p, mark))
if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError | 31.112 |
8_cythonno_x86_64_1_8 | test_throttled_partition_update | Failure | ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
self = <materialized_views_test.TestMaterializedViews object at 0x7f0a55718c40>
@pytest.mark.resource_intensive
def test_throttled_partition_update(self):
"""
@jira_ticket: CASSANDRA-13299, test break up large partition when repairing base with mv.
Provide a configuable batch size(cassandra.mv.mutation.row.count=100) to trottle number
of rows to be applied in one mutation
"""
> session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
materialized_views_test.py:2192:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.cluster.Cluster object at 0x7f0a55771730>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.reset_bootstrap_progress=false', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration..._wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f0a556d2040>, itf = ('127.0.0.5', 9042)
started = [(<ccmlib.node.Node object at 0x7f0a556d2040>, <subprocess.Popen object at 0x7f0a5574c040>, 0), (<ccmlib.node.Node obj... at 0x7f0a55714070>, 0), (<ccmlib.node.Node object at 0x7f0a55743400>, <subprocess.Popen object at 0x7f0a55714dc0>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f0a5574c040>
def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []
extension.pre_cluster_start(self)
# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)
started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()
# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)
# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)
started.append((node, p, mark))
if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError | 19.241 |
8_cythonno_x86_64_2_8 | test_network_topology_strategy_each_quorum | Failure | ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
self = <consistency_test.TestAvailability object at 0x7fdbeea01610>
@pytest.mark.resource_intensive
@since("3.0")
def test_network_topology_strategy_each_quorum(self):
"""
@jira_ticket CASSANDRA-10584
Test for multiple datacenters, using network topology strategy, only
the each quorum reads.
"""
self.nodes = [3, 3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3), ('dc3', 3)])
> self._start_cluster()
consistency_test.py:405:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.cluster.Cluster object at 0x7fdbee8e1ca0>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migrat..._in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', ...]
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7fdbee96b5e0>, itf = ('127.0.0.9', 9042)
started = [(<ccmlib.node.Node object at 0x7fdbee96b5e0>, <subprocess.Popen object at 0x7fdbee95bac0>, 0), (<ccmlib.node.Node obj...x7fdbee95b640>, 0), (<ccmlib.node.Node object at 0x7fdbee9676d0>, <subprocess.Popen object at 0x7fdbee9cafd0>, 0), ...]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7fdbee95bac0>
def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []
extension.pre_cluster_start(self)
# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)
started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()
# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)
# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)
started.append((node, p, mark))
if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError | 39.829 |
8_cythonno_x86_64_2_8 | test_add_dc_after_mv_simple_replication | Failure | ccmlib.node.NodeError: C* process with 4375 is terminated
self = <materialized_views_test.TestMaterializedViews object at 0x7fdbee9ca3a0>
@pytest.mark.resource_intensive
def test_add_dc_after_mv_simple_replication(self):
"""
@jira_ticket CASSANDRA-10634
Test that materialized views work as expected when adding a datacenter with SimpleStrategy.
"""
> self._add_dc_after_mv_test(1, False)
materialized_views_test.py:583:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:523: in _add_dc_after_mv_test
session = self.prepare(rf=rf)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7fdbee952880>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 4375 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 12.427 |
8_cythonno_x86_64_2_8 | test_really_complex_repair | Failure | ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
self = <materialized_views_test.TestMaterializedViews object at 0x7fdbee967df0>
@pytest.mark.resource_intensive
def test_really_complex_repair(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
> session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
materialized_views_test.py:2308:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.cluster.Cluster object at 0x7fdbedddcdc0>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.reset_bootstrap_progress=false', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration..._wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7fdbedc98040>, itf = ('127.0.0.5', 9042)
started = [(<ccmlib.node.Node object at 0x7fdbedc98040>, <subprocess.Popen object at 0x7fdbee9c4cd0>, 0), (<ccmlib.node.Node obj... at 0x7fdbee9c4bb0>, 0), (<ccmlib.node.Node object at 0x7fdbed9a57c0>, <subprocess.Popen object at 0x7fdbede26d60>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7fdbee9c4cd0>
def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []
extension.pre_cluster_start(self)
# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)
started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()
# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)
# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)
started.append((node, p, mark))
if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError | 23.144 |
8_cythonno_x86_64_3_8 | test_simple_strategy_users | Failure | ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
self = <consistency_test.TestAccuracy object at 0x7f5c5586ed00>
@pytest.mark.resource_intensive
def test_simple_strategy_users(self):
"""
Test for a single datacenter, users table, only the each quorum reads.
"""
self.nodes = 5
self.rf = 3
combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.QUORUM),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.ONE, ConsistencyLevel.THREE),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ANY, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.TWO),
(ConsistencyLevel.TWO, ConsistencyLevel.ONE),
# These are multi-DC consistency levels that should default to quorum calls
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL),
]
logger.debug("Testing single dc, users")
> self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, [self.nodes], [self.rf], combinations)
consistency_test.py:597:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:529: in _run_test_function_in_parallel
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.cluster.Cluster object at 0x7f5c5579e190>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migrat..._wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f5c557e5850>, itf = ('127.0.0.5', 9042)
started = [(<ccmlib.node.Node object at 0x7f5c557e5850>, <subprocess.Popen object at 0x7f5c55798b80>, 0), (<ccmlib.node.Node obj... at 0x7f5c55798310>, 0), (<ccmlib.node.Node object at 0x7f5c5578edc0>, <subprocess.Popen object at 0x7f5c55798d60>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f5c55798b80>
def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []
extension.pre_cluster_start(self)
# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)
started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()
# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)
# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)
started.append((node, p, mark))
if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError | 23.234 |
8_cythonno_x86_64_3_8 | test_add_dc_after_mv_network_replication | Failure | ccmlib.node.NodeError: C* process with 3151 is terminated
self = <materialized_views_test.TestMaterializedViews object at 0x7f5c55848580>
@pytest.mark.resource_intensive
def test_add_dc_after_mv_network_replication(self):
"""
@jira_ticket CASSANDRA-10634
Test that materialized views work as expected when adding a datacenter with NetworkTopologyStrategy.
"""
> self._add_dc_after_mv_test({'dc1': 1}, True)
materialized_views_test.py:593:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:523: in _add_dc_after_mv_test
session = self.prepare(rf=rf)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7f5c557dabb0>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3151 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 12.034 |
8_cythonno_x86_64_4_8 | test_add_node_after_mv | Failure | ccmlib.node.NodeError: C* process with 3722 is terminated
self = <materialized_views_test.TestMaterializedViews object at 0x7fc93d160850>
@pytest.mark.resource_intensive
def test_add_node_after_mv(self):
"""
@jira_ticket CASSANDRA-10978
Test that materialized views work as expected when adding a node.
"""
> session = self.prepare()
materialized_views_test.py:603:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7fc93436bf10>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3722 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 11.428 |
8_cythonno_x86_64_5_8 | test_network_topology_strategy_users | Failure | ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
self = <consistency_test.TestAccuracy object at 0x7f353f482160>
@pytest.mark.resource_intensive
def test_network_topology_strategy_users(self):
"""
Test for multiple datacenters, users table.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])
combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.ONE, ConsistencyLevel.THREE),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ANY, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.TWO),
(ConsistencyLevel.TWO, ConsistencyLevel.ONE),
(ConsistencyLevel.QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.LOCAL_SERIAL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.LOCAL_SERIAL),
]
logger.debug("Testing multiple dcs, users")
> self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, self.nodes, list(self.rf.values()), combinations),
consistency_test.py:651:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:529: in _run_test_function_in_parallel
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.cluster.Cluster object at 0x7f353f3c4880>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migrat..._wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f353f3f1490>, itf = ('127.0.0.6', 9042)
started = [(<ccmlib.node.Node object at 0x7f353f3f1490>, <subprocess.Popen object at 0x7f353f3d20a0>, 0), (<ccmlib.node.Node obj... at 0x7f353f3a2850>, 0), (<ccmlib.node.Node object at 0x7f353f3d70d0>, <subprocess.Popen object at 0x7f353f3a2640>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f353f3d20a0>
def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []
extension.pre_cluster_start(self)
# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)
started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()
# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)
# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)
started.append((node, p, mark))
if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError | 25.575 |
8_cythonno_x86_64_5_8 | test_add_node_after_wide_mv_with_range_deletions | Failure | ccmlib.node.NodeError: C* process with 3507 is terminated
self = <materialized_views_test.TestMaterializedViews object at 0x7f353f3d27c0>
@pytest.mark.resource_intensive
def test_add_node_after_wide_mv_with_range_deletions(self):
"""
@jira_ticket CASSANDRA-11670
Test that materialized views work with wide materialized views as expected when adding a node.
"""
> session = self.prepare()
materialized_views_test.py:691:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7f353e6f9460>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3507 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 10.437 |
8_cythonno_x86_64_6_8 | test_network_topology_strategy_each_quorum_users | Failure | ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
self = <consistency_test.TestAccuracy object at 0x7f058e960520>
@pytest.mark.resource_intensive
@since("3.0")
def test_network_topology_strategy_each_quorum_users(self):
"""
@jira_ticket CASSANDRA-10584
Test for a multiple datacenters, users table, only the each quorum
reads.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])
combinations = [
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.EACH_QUORUM),
]
logger.debug("Testing multiple dcs, users, each quorum reads")
> self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, self.nodes, list(self.rf.values()), combinations)
consistency_test.py:670:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:529: in _run_test_function_in_parallel
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.cluster.Cluster object at 0x7f058e88c9a0>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migrat..._wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f058e833520>, itf = ('127.0.0.6', 9042)
started = [(<ccmlib.node.Node object at 0x7f058e833520>, <subprocess.Popen object at 0x7f058e84afd0>, 0), (<ccmlib.node.Node obj... at 0x7f058e8c23a0>, 0), (<ccmlib.node.Node object at 0x7f058e8b84c0>, <subprocess.Popen object at 0x7f058e8c2970>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f058e84afd0>
def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []
extension.pre_cluster_start(self)
# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)
started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()
# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)
# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)
started.append((node, p, mark))
if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError | 22.391 |
8_cythonno_x86_64_6_8 | test_add_node_after_very_wide_mv | Failure | ccmlib.node.NodeError: C* process with 3500 is terminated
self = <materialized_views_test.TestMaterializedViews object at 0x7f058e8aea30>
@pytest.mark.resource_intensive
def test_add_node_after_very_wide_mv(self):
"""
@jira_ticket CASSANDRA-11670
Test that materialized views work with very wide materialized views as expected when adding a node.
"""
> session = self.prepare()
materialized_views_test.py:761:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7f058e8c4970>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3500 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 8.793 |
8_cythonno_x86_64_6_8 | test_multidatacenter_local_quorum | Failure | ccmlib.node.TimeoutError: 17 Dec 2024 01:01:46 [node3] after 120.12/120 seconds Missing: ['127.0.0.1:7000.* is now UP'] not found in system.log:
Head: INFO [main] 2024-12-17 00:59:46,388 YamlConfigura
Tail: ....cassandra.service.CassandraDaemon.activate(CassandraDaemon.java:724)
at org.apache.cassandra.service.CassandraDaemon.main(CassandraDaemon.java:882)
self = <snitch_test.TestDynamicEndpointSnitch object at 0x7f058e884ee0>
@pytest.mark.resource_intensive
@since('3.10')
def test_multidatacenter_local_quorum(self):
'''
@jira_ticket CASSANDRA-13074
If we do only local datacenters reads in a multidatacenter DES setup,
DES should take effect and route around a degraded node
'''
def no_cross_dc(scores, cross_dc_nodes):
return all('/' + k.address() not in scores for k in cross_dc_nodes)
def snitchable(scores_before, scores_after, needed_nodes):
return all('/' + k.address() in scores_before and '/' + k.address()
in scores_after for k in needed_nodes)
cluster = self.cluster
cluster.populate([3, 3])
coordinator_node, healthy_node, degraded_node, node4, node5, node6 = cluster.nodelist()
# increase DES reset/update interval so we clear any cross-DC startup reads faster
cluster.set_configuration_options(values={'dynamic_snitch_reset_interval_in_ms': 10000,
'dynamic_snitch_update_interval_in_ms': 50,
'phi_convict_threshold': 12})
# Delay reads on the degraded node by 50 milliseconds
degraded_node.start(jvm_args=['-Dcassandra.test.read_iteration_delay_ms=50',
'-Dcassandra.allow_unsafe_join=true'])
> cluster.start()
snitch_test.py:192:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:526: in start
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:922: in start
node.watch_log_for_alive(self, from_mark=mark)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:686: in watch_log_for_alive
self.watch_log_for(tofind, from_mark=from_mark, timeout=timeout, filename=filename)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:610: in watch_log_for
TimeoutError.raise_if_passed(start=start, timeout=timeout, node=self.name,
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
start = 1734397186.482361, timeout = 120
msg = "Missing: ['127.0.0.1:7000.* is now UP'] not found in system.log:\n Head: INFO [main] 2024-12-17 00:59:46,388 YamlCon...activate(CassandraDaemon.java:724)\n\tat org.apache.cassandra.service.CassandraDaemon.main(CassandraDaemon.java:882)\n"
node = 'node3'
@staticmethod
def raise_if_passed(start, timeout, msg, node=None):
if start + timeout < time.time():
> raise TimeoutError.create(start, timeout, msg, node)
E ccmlib.node.TimeoutError: 17 Dec 2024 01:01:46 [node3] after 120.12/120 seconds Missing: ['127.0.0.1:7000.* is now UP'] not found in system.log:
E Head: INFO [main] 2024-12-17 00:59:46,388 YamlConfigura
E Tail: ....cassandra.service.CassandraDaemon.activate(CassandraDaemon.java:724)
E at org.apache.cassandra.service.CassandraDaemon.main(CassandraDaemon.java:882)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:56: TimeoutError | 131.063 |
8_cythonno_x86_64_7_8 | test_network_topology_strategy_counters | Failure | ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
self = <consistency_test.TestAccuracy object at 0x7f335953a910>
@pytest.mark.resource_intensive
def test_network_topology_strategy_counters(self):
"""
Test for multiple datacenters, counters table.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])
combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.ONE, ConsistencyLevel.THREE),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.TWO),
(ConsistencyLevel.TWO, ConsistencyLevel.ONE),
]
logger.debug("Testing multiple dcs, counters")
> self._run_test_function_in_parallel(TestAccuracy.Validation.validate_counters, self.nodes, list(self.rf.values()), combinations),
consistency_test.py:747:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:529: in _run_test_function_in_parallel
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.cluster.Cluster object at 0x7f3359485400>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migrat..._wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f33594544c0>, itf = ('127.0.0.6', 9042)
started = [(<ccmlib.node.Node object at 0x7f33594544c0>, <subprocess.Popen object at 0x7f335949d0d0>, 0), (<ccmlib.node.Node obj... at 0x7f335949da00>, 0), (<ccmlib.node.Node object at 0x7f3359500970>, <subprocess.Popen object at 0x7f3359498310>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f335949d0d0>
def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []
extension.pre_cluster_start(self)
# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)
started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()
# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)
# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)
started.append((node, p, mark))
if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError | 49.686 |
8_cythonno_x86_64_7_8 | test_add_write_survey_node_after_mv | Failure | ccmlib.node.NodeError: C* process with 3498 is terminated
self = <materialized_views_test.TestMaterializedViews object at 0x7f335948af70>
@pytest.mark.resource_intensive
def test_add_write_survey_node_after_mv(self):
"""
@jira_ticket CASSANDRA-10621
@jira_ticket CASSANDRA-10978
Test that materialized views work as expected when adding a node in write survey mode.
"""
> session = self.prepare()
materialized_views_test.py:805:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7f335953f430>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3498 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 13.185 |
8_cythonno_x86_64_7_8 | test_stop_decommission_too_few_replicas_multi_dc | Failure | ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
self = <topology_test.TestTopology object at 0x7f3359469dc0>
@since('3.12')
@pytest.mark.resource_intensive
def test_stop_decommission_too_few_replicas_multi_dc(self):
"""
Decommission should fail when it would result in the number of live replicas being less than
the replication factor. --force should bypass this requirement.
@jira_ticket CASSANDRA-12510
@expected_errors ToolError when # nodes will drop below configured replicas in NTS/SimpleStrategy
"""
# we need to ignore this error log message which is emitted during the test
# because dtest framework which reads the logs would evaluate the node is errorneous and
# it would terminate it prematurely
self.fixture_dtest_setup.ignore_log_patterns = (r'.*Not enough live nodes to maintain replication factor*')
cluster = self.cluster
> cluster.populate([2, 2]).start()
topology_test.py:494:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.cluster.Cluster object at 0x7f3358469df0>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f335845e280>, itf = ('127.0.0.4', 9042)
started = [(<ccmlib.node.Node object at 0x7f335845e280>, <subprocess.Popen object at 0x7f3359471430>, 0), (<ccmlib.node.Node obj... at 0x7f3359469970>, 0), (<ccmlib.node.Node object at 0x7f3358952c40>, <subprocess.Popen object at 0x7f335949ff40>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f3359471430>
def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []
extension.pre_cluster_start(self)
# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)
started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()
# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)
# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)
started.append((node, p, mark))
if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError | 24.334 |
8_cythonno_x86_64_8_8 | test_network_topology_strategy_each_quorum_counters | Failure | ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
self = <consistency_test.TestAccuracy object at 0x7fb38fcca1c0>
@pytest.mark.resource_intensive
@since("3.0")
def test_network_topology_strategy_each_quorum_counters(self):
"""
@jira_ticket CASSANDRA-10584
Test for multiple datacenters, counters table, only the each quorum
reads.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])
combinations = [
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.EACH_QUORUM),
]
logger.debug("Testing multiple dcs, counters, each quorum reads")
> self._run_test_function_in_parallel(TestAccuracy.Validation.validate_counters, self.nodes, list(self.rf.values()), combinations),
consistency_test.py:766:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:529: in _run_test_function_in_parallel
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.cluster.Cluster object at 0x7fb38fbdf4f0>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migrat..._wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7fb38fcdbb80>, itf = ('127.0.0.6', 9042)
started = [(<ccmlib.node.Node object at 0x7fb38fcdbb80>, <subprocess.Popen object at 0x7fb38fc1a2e0>, 0), (<ccmlib.node.Node obj... at 0x7fb38fc1a340>, 0), (<ccmlib.node.Node object at 0x7fb38fcabdc0>, <subprocess.Popen object at 0x7fb38fc1ac40>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7fb38fc1a2e0>
def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []
extension.pre_cluster_start(self)
# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)
started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()
# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)
# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)
started.append((node, p, mark))
if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError | 27.259 |
8_cythonno_x86_64_8_8 | test_complex_repair | Failure | ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
self = <materialized_views_test.TestMaterializedViews object at 0x7fb38fc2f460>
@pytest.mark.resource_intensive
def test_complex_repair(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
> session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
materialized_views_test.py:2090:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.cluster.Cluster object at 0x7fb38fbe80d0>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.reset_bootstrap_progress=false', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration..._wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7fb38fbe86d0>, itf = ('127.0.0.5', 9042)
started = [(<ccmlib.node.Node object at 0x7fb38fbe86d0>, <subprocess.Popen object at 0x7fb38fc2adf0>, 0), (<ccmlib.node.Node obj... at 0x7fb38fc2a040>, 0), (<ccmlib.node.Node object at 0x7fb38fcc4820>, <subprocess.Popen object at 0x7fb38fc2a0d0>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7fb38fc2adf0>
def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []
extension.pre_cluster_start(self)
# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)
started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()
# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)
# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)
started.append((node, p, mark))
if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError | 23.221 |