Unit Test Results.

Designed for use with JUnit and Ant.

All Failures

ClassNameStatusTypeTime(s)
8_cythonno_x86_64_1_8test_network_topology_strategyFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <consistency_test.TestAvailability object at 0x7fb1a6aa1130>

@pytest.mark.resource_intensive
def test_network_topology_strategy(self):
"""
Test for multiple datacenters, using network topology replication strategy.
"""
self.nodes = [3, 3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3), ('dc3', 3)])

> self._start_cluster()

consistency_test.py:370:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7fb1a69e8910>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migrat..._in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', ...]
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7fb1a69ed4f0>, itf = ('127.0.0.9', 9042)
started = [(<ccmlib.node.Node object at 0x7fb1a69ed4f0>, <subprocess.Popen object at 0x7fb1a6a52340>, 0), (<ccmlib.node.Node obj...x7fb1a6a522e0>, 0), (<ccmlib.node.Node object at 0x7fb1a69f5910>, <subprocess.Popen object at 0x7fb1a6a1e850>, 0), ...]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7fb1a6a52340>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
51.373
8_cythonno_x86_64_1_8test_throttled_partition_updateFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <materialized_views_test.TestMaterializedViews object at 0x7fb1a69b9d00>

@pytest.mark.resource_intensive
def test_throttled_partition_update(self):
"""
@jira_ticket: CASSANDRA-13299, test break up large partition when repairing base with mv.

Provide a configuable batch size(cassandra.mv.mutation.row.count=100) to trottle number
of rows to be applied in one mutation
"""

> session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)

materialized_views_test.py:2192:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7fb1a5edf250>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.reset_bootstrap_progress=false', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration..._wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7fb1a6ac4e50>, itf = ('127.0.0.5', 9042)
started = [(<ccmlib.node.Node object at 0x7fb1a6ac4e50>, <subprocess.Popen object at 0x7fb1a6a29e50>, 0), (<ccmlib.node.Node obj... at 0x7fb1a6a291c0>, 0), (<ccmlib.node.Node object at 0x7fb1a6a52f70>, <subprocess.Popen object at 0x7fb1a6a29cd0>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7fb1a6a29e50>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
21.809
8_cythonno_x86_64_2_8test_network_topology_strategy_each_quorumFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <consistency_test.TestAvailability object at 0x7f3f2b864610>

@pytest.mark.resource_intensive
@since("3.0")
def test_network_topology_strategy_each_quorum(self):
"""
@jira_ticket CASSANDRA-10584
Test for multiple datacenters, using network topology strategy, only
the each quorum reads.
"""
self.nodes = [3, 3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3), ('dc3', 3)])

> self._start_cluster()

consistency_test.py:405:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7f3f2b745ca0>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migrat..._in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', ...]
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f3f2b7cf4f0>, itf = ('127.0.0.9', 9042)
started = [(<ccmlib.node.Node object at 0x7f3f2b7cf4f0>, <subprocess.Popen object at 0x7f3f2b77e070>, 0), (<ccmlib.node.Node obj...x7f3f2b7b53d0>, 0), (<ccmlib.node.Node object at 0x7f3f2b7ca5b0>, <subprocess.Popen object at 0x7f3f2b7b5be0>, 0), ...]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f3f2b77e070>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
34.827
8_cythonno_x86_64_2_8test_add_dc_after_mv_simple_replicationFailureccmlib.node.NodeError: C* process with 3999 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f3f2b82d3a0>

@pytest.mark.resource_intensive
def test_add_dc_after_mv_simple_replication(self):
"""
@jira_ticket CASSANDRA-10634

Test that materialized views work as expected when adding a datacenter with SimpleStrategy.
"""

> self._add_dc_after_mv_test(1, False)

materialized_views_test.py:583:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:523: in _add_dc_after_mv_test
session = self.prepare(rf=rf)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f3f2b82dee0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3999 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.859
8_cythonno_x86_64_2_8test_really_complex_repairFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <materialized_views_test.TestMaterializedViews object at 0x7f3f2b7cadf0>

@pytest.mark.resource_intensive
def test_really_complex_repair(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
> session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)

materialized_views_test.py:2308:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7f3f2ac14b80>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.reset_bootstrap_progress=false', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration..._wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f3f2b73d9a0>, itf = ('127.0.0.5', 9042)
started = [(<ccmlib.node.Node object at 0x7f3f2b73d9a0>, <subprocess.Popen object at 0x7f3f2b7cf310>, 0), (<ccmlib.node.Node obj... at 0x7f3f2b7cf760>, 0), (<ccmlib.node.Node object at 0x7f3f2b8230d0>, <subprocess.Popen object at 0x7f3f2b81c700>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f3f2b7cf310>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
21.687
8_cythonno_x86_64_3_8test_simple_strategy_usersFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <consistency_test.TestAccuracy object at 0x7f4d4149bcd0>

@pytest.mark.resource_intensive
def test_simple_strategy_users(self):
"""
Test for a single datacenter, users table, only the each quorum reads.
"""
self.nodes = 5
self.rf = 3

combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.QUORUM),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.ONE, ConsistencyLevel.THREE),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ANY, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.TWO),
(ConsistencyLevel.TWO, ConsistencyLevel.ONE),
# These are multi-DC consistency levels that should default to quorum calls
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL),
]

logger.debug("Testing single dc, users")
> self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, [self.nodes], [self.rf], combinations)

consistency_test.py:597:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:529: in _run_test_function_in_parallel
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7f4d413c5cd0>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migrat..._wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f4d414119a0>, itf = ('127.0.0.5', 9042)
started = [(<ccmlib.node.Node object at 0x7f4d414119a0>, <subprocess.Popen object at 0x7f4d4140aa00>, 0), (<ccmlib.node.Node obj... at 0x7f4d4140ac40>, 0), (<ccmlib.node.Node object at 0x7f4d413bcd60>, <subprocess.Popen object at 0x7f4d414abb20>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f4d4140aa00>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
23.612
8_cythonno_x86_64_3_8test_add_dc_after_mv_network_replicationFailureccmlib.node.NodeError: C* process with 2904 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f4d41475550>

@pytest.mark.resource_intensive
def test_add_dc_after_mv_network_replication(self):
"""
@jira_ticket CASSANDRA-10634

Test that materialized views work as expected when adding a datacenter with NetworkTopologyStrategy.
"""

> self._add_dc_after_mv_test({'dc1': 1}, True)

materialized_views_test.py:593:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:523: in _add_dc_after_mv_test
session = self.prepare(rf=rf)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f4d41411c70>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 2904 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.889
8_cythonno_x86_64_4_8test_add_node_after_mvFailureccmlib.node.NodeError: C* process with 3488 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f0b012fe8b0>

@pytest.mark.resource_intensive
def test_add_node_after_mv(self):
"""
@jira_ticket CASSANDRA-10978

Test that materialized views work as expected when adding a node.
"""

> session = self.prepare()

materialized_views_test.py:603:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f0b00755370>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3488 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
11.273
8_cythonno_x86_64_5_8test_network_topology_strategy_usersFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <consistency_test.TestAccuracy object at 0x7f8db4b0f160>

@pytest.mark.resource_intensive
def test_network_topology_strategy_users(self):
"""
Test for multiple datacenters, users table.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])

combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.ONE, ConsistencyLevel.THREE),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ANY, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.TWO),
(ConsistencyLevel.TWO, ConsistencyLevel.ONE),
(ConsistencyLevel.QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.LOCAL_SERIAL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.LOCAL_SERIAL),
]

logger.debug("Testing multiple dcs, users")
> self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, self.nodes, list(self.rf.values()), combinations),

consistency_test.py:651:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:529: in _run_test_function_in_parallel
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7f8db4a51880>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migrat..._wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f8db4a20d60>, itf = ('127.0.0.6', 9042)
started = [(<ccmlib.node.Node object at 0x7f8db4a20d60>, <subprocess.Popen object at 0x7f8db4a7e070>, 0), (<ccmlib.node.Node obj... at 0x7f8db4a73eb0>, 0), (<ccmlib.node.Node object at 0x7f8db4a64220>, <subprocess.Popen object at 0x7f8db4a73850>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f8db4a7e070>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
28.633
8_cythonno_x86_64_5_8test_add_node_after_wide_mv_with_range_deletionsFailureccmlib.node.NodeError: C* process with 3212 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f8db4a5f7c0>

@pytest.mark.resource_intensive
def test_add_node_after_wide_mv_with_range_deletions(self):
"""
@jira_ticket CASSANDRA-11670

Test that materialized views work with wide materialized views as expected when adding a node.
"""

> session = self.prepare()

materialized_views_test.py:691:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f8dafd85580>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3212 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.795
8_cythonno_x86_64_6_8test_network_topology_strategy_each_quorum_usersFailureccmlib.node.NodeError: C* process with 923 is terminated

self = <consistency_test.TestAccuracy object at 0x7fae6c9c1490>

@pytest.mark.resource_intensive
@since("3.0")
def test_network_topology_strategy_each_quorum_users(self):
"""
@jira_ticket CASSANDRA-10584
Test for a multiple datacenters, users table, only the each quorum
reads.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])

combinations = [
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.EACH_QUORUM),
]

logger.debug("Testing multiple dcs, users, each quorum reads")
> self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, self.nodes, list(self.rf.values()), combinations)

consistency_test.py:670:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:529: in _run_test_function_in_parallel
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
consistency_test.py:136: in _start_cluster
cluster.start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fae6c88d580>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 923 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
64.152
8_cythonno_x86_64_6_8test_add_node_after_very_wide_mvFailureccmlib.node.NodeError: C* process with 3256 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fae6c90e940>

@pytest.mark.resource_intensive
def test_add_node_after_very_wide_mv(self):
"""
@jira_ticket CASSANDRA-11670

Test that materialized views work with very wide materialized views as expected when adding a node.
"""

> session = self.prepare()

materialized_views_test.py:761:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fae6c9c1910>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3256 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
39.082
8_cythonno_x86_64_6_8test_multidatacenter_local_quorumFailureccmlib.node.TimeoutError: 16 Dec 2024 18:18:16 [node3] after 120.27/120 seconds Missing: ['127.0.0.1:7000.* is now UP'] not found in system.log: Head: INFO [main] 2024-12-16 18:16:27,808 YamlConfigura Tail: ....cassandra.service.CassandraDaemon.activate(CassandraDaemon.java:724) at org.apache.cassandra.service.CassandraDaemon.main(CassandraDaemon.java:882)

self = <snitch_test.TestDynamicEndpointSnitch object at 0x7fae6c8da520>

@pytest.mark.resource_intensive
@since('3.10')
def test_multidatacenter_local_quorum(self):
'''
@jira_ticket CASSANDRA-13074

If we do only local datacenters reads in a multidatacenter DES setup,
DES should take effect and route around a degraded node
'''

def no_cross_dc(scores, cross_dc_nodes):
return all('/' + k.address() not in scores for k in cross_dc_nodes)

def snitchable(scores_before, scores_after, needed_nodes):
return all('/' + k.address() in scores_before and '/' + k.address()
in scores_after for k in needed_nodes)

cluster = self.cluster
cluster.populate([3, 3])
coordinator_node, healthy_node, degraded_node, node4, node5, node6 = cluster.nodelist()
# increase DES reset/update interval so we clear any cross-DC startup reads faster
cluster.set_configuration_options(values={'dynamic_snitch_reset_interval_in_ms': 10000,
'dynamic_snitch_update_interval_in_ms': 50,
'phi_convict_threshold': 12})
# Delay reads on the degraded node by 50 milliseconds
degraded_node.start(jvm_args=['-Dcassandra.test.read_iteration_delay_ms=50',
'-Dcassandra.allow_unsafe_join=true'])
> cluster.start()

snitch_test.py:192:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:526: in start
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:922: in start
node.watch_log_for_alive(self, from_mark=mark)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:686: in watch_log_for_alive
self.watch_log_for(tofind, from_mark=from_mark, timeout=timeout, filename=filename)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:610: in watch_log_for
TimeoutError.raise_if_passed(start=start, timeout=timeout, node=self.name,
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

start = 1734372976.0603232, timeout = 120
msg = "Missing: ['127.0.0.1:7000.* is now UP'] not found in system.log:\n Head: INFO [main] 2024-12-16 18:16:27,808 YamlCon...activate(CassandraDaemon.java:724)\n\tat org.apache.cassandra.service.CassandraDaemon.main(CassandraDaemon.java:882)\n"
node = 'node3'

@staticmethod
def raise_if_passed(start, timeout, msg, node=None):
if start + timeout < time.time():
> raise TimeoutError.create(start, timeout, msg, node)
E ccmlib.node.TimeoutError: 16 Dec 2024 18:18:16 [node3] after 120.27/120 seconds Missing: ['127.0.0.1:7000.* is now UP'] not found in system.log:
E Head: INFO [main] 2024-12-16 18:16:27,808 YamlConfigura
E Tail: ....cassandra.service.CassandraDaemon.activate(CassandraDaemon.java:724)
E at org.apache.cassandra.service.CassandraDaemon.main(CassandraDaemon.java:882)

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:56: TimeoutError
184.883
8_cythonno_x86_64_7_8test_network_topology_strategy_countersFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <consistency_test.TestAccuracy object at 0x7f953fb658e0>

@pytest.mark.resource_intensive
def test_network_topology_strategy_counters(self):
"""
Test for multiple datacenters, counters table.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])

combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.ONE, ConsistencyLevel.THREE),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.TWO),
(ConsistencyLevel.TWO, ConsistencyLevel.ONE),
]

logger.debug("Testing multiple dcs, counters")
> self._run_test_function_in_parallel(TestAccuracy.Validation.validate_counters, self.nodes, list(self.rf.values()), combinations),

consistency_test.py:747:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:529: in _run_test_function_in_parallel
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7f953faaf3d0>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migrat..._wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f953fa7f490>, itf = ('127.0.0.6', 9042)
started = [(<ccmlib.node.Node object at 0x7f953fa7f490>, <subprocess.Popen object at 0x7f953fac8a90>, 0), (<ccmlib.node.Node obj... at 0x7f953fac8970>, 0), (<ccmlib.node.Node object at 0x7f953faca3a0>, <subprocess.Popen object at 0x7f953fac3250>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f953fac8a90>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
29.959
8_cythonno_x86_64_7_8test_add_write_survey_node_after_mvFailureccmlib.node.NodeError: C* process with 3218 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f953fab5f40>

@pytest.mark.resource_intensive
def test_add_write_survey_node_after_mv(self):
"""
@jira_ticket CASSANDRA-10621
@jira_ticket CASSANDRA-10978

Test that materialized views work as expected when adding a node in write survey mode.
"""

> session = self.prepare()

materialized_views_test.py:805:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f953fb2b370>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3218 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.882
8_cythonno_x86_64_7_8test_stop_decommission_too_few_replicas_multi_dcFailureccmlib.node.NodeError: C* process with 5497 is terminated

self = <topology_test.TestTopology object at 0x7f953fa93d90>

@since('3.12')
@pytest.mark.resource_intensive
def test_stop_decommission_too_few_replicas_multi_dc(self):
"""
Decommission should fail when it would result in the number of live replicas being less than
the replication factor. --force should bypass this requirement.
@jira_ticket CASSANDRA-12510
@expected_errors ToolError when # nodes will drop below configured replicas in NTS/SimpleStrategy
"""

# we need to ignore this error log message which is emitted during the test
# because dtest framework which reads the logs would evaluate the node is errorneous and
# it would terminate it prematurely
self.fixture_dtest_setup.ignore_log_patterns = (r'.*Not enough live nodes to maintain replication factor*')

cluster = self.cluster
> cluster.populate([2, 2]).start()

topology_test.py:494:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f953ef8d250>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 5497 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
26.352
8_cythonno_x86_64_8_8test_network_topology_strategy_each_quorum_countersFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <consistency_test.TestAccuracy object at 0x7fb726989160>

@pytest.mark.resource_intensive
@since("3.0")
def test_network_topology_strategy_each_quorum_counters(self):
"""
@jira_ticket CASSANDRA-10584
Test for multiple datacenters, counters table, only the each quorum
reads.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])

combinations = [
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.EACH_QUORUM),
]

logger.debug("Testing multiple dcs, counters, each quorum reads")
> self._run_test_function_in_parallel(TestAccuracy.Validation.validate_counters, self.nodes, list(self.rf.values()), combinations),

consistency_test.py:766:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:529: in _run_test_function_in_parallel
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7fb7268a49a0>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migrat..._wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7fb7268b0940>, itf = ('127.0.0.6', 9042)
started = [(<ccmlib.node.Node object at 0x7fb7268b0940>, <subprocess.Popen object at 0x7fb7268dc190>, 0), (<ccmlib.node.Node obj... at 0x7fb7268dc7f0>, 0), (<ccmlib.node.Node object at 0x7fb726964fd0>, <subprocess.Popen object at 0x7fb7268dc850>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7fb7268dc190>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
25.520
8_cythonno_x86_64_8_8test_complex_repairFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <materialized_views_test.TestMaterializedViews object at 0x7fb7268ee400>

@pytest.mark.resource_intensive
def test_complex_repair(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
> session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)

materialized_views_test.py:2090:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7fb7268bb610>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.reset_bootstrap_progress=false', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration..._wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7fb7268bbf40>, itf = ('127.0.0.5', 9042)
started = [(<ccmlib.node.Node object at 0x7fb7268bbf40>, <subprocess.Popen object at 0x7fb7268e8640>, 0), (<ccmlib.node.Node obj... at 0x7fb7268e8760>, 0), (<ccmlib.node.Node object at 0x7fb7268a4070>, <subprocess.Popen object at 0x7fb7268523d0>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7fb7268e8640>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
21.883
8_cythonno_x86_64_1_8test_network_topology_strategyFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <consistency_test.TestAvailability object at 0x7fefe79b7250>

@pytest.mark.resource_intensive
def test_network_topology_strategy(self):
"""
Test for multiple datacenters, using network topology replication strategy.
"""
self.nodes = [3, 3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3), ('dc3', 3)])

> self._start_cluster()

consistency_test.py:370:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7fefe78fe520>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migrat..._in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', ...]
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7fefe79038e0>, itf = ('127.0.0.9', 9042)
started = [(<ccmlib.node.Node object at 0x7fefe79038e0>, <subprocess.Popen object at 0x7fefe78c4040>, 0), (<ccmlib.node.Node obj...x7fefe78c41f0>, 0), (<ccmlib.node.Node object at 0x7fefe79ed040>, <subprocess.Popen object at 0x7fefe78c48b0>, 0), ...]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7fefe78c4040>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
30.841
8_cythonno_x86_64_1_8test_throttled_partition_updateFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <materialized_views_test.TestMaterializedViews object at 0x7fefe78cfd60>

@pytest.mark.resource_intensive
def test_throttled_partition_update(self):
"""
@jira_ticket: CASSANDRA-13299, test break up large partition when repairing base with mv.

Provide a configuable batch size(cassandra.mv.mutation.row.count=100) to trottle number
of rows to be applied in one mutation
"""

> session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)

materialized_views_test.py:2192:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7fefe793b520>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.reset_bootstrap_progress=false', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration..._wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7fefe796b460>, itf = ('127.0.0.5', 9042)
started = [(<ccmlib.node.Node object at 0x7fefe796b460>, <subprocess.Popen object at 0x7fefe78886a0>, 0), (<ccmlib.node.Node obj... at 0x7fefe7888a90>, 0), (<ccmlib.node.Node object at 0x7fefe78c41c0>, <subprocess.Popen object at 0x7fefe6cbe850>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7fefe78886a0>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
19.146
8_cythonno_x86_64_2_8test_network_topology_strategy_each_quorumFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <consistency_test.TestAvailability object at 0x7f4701ee8580>

@pytest.mark.resource_intensive
@since("3.0")
def test_network_topology_strategy_each_quorum(self):
"""
@jira_ticket CASSANDRA-10584
Test for multiple datacenters, using network topology strategy, only
the each quorum reads.
"""
self.nodes = [3, 3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3), ('dc3', 3)])

> self._start_cluster()

consistency_test.py:405:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7f4701dc9c10>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migrat..._in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', ...]
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f4701dc1610>, itf = ('127.0.0.9', 9042)
started = [(<ccmlib.node.Node object at 0x7f4701dc1610>, <subprocess.Popen object at 0x7f4701eab880>, 0), (<ccmlib.node.Node obj...x7f4701eab100>, 0), (<ccmlib.node.Node object at 0x7f4701e73f10>, <subprocess.Popen object at 0x7f4701df9c70>, 0), ...]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f4701eab880>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
36.962
8_cythonno_x86_64_2_8test_add_dc_after_mv_simple_replicationFailureccmlib.node.NodeError: C* process with 4367 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f4701eb2310>

@pytest.mark.resource_intensive
def test_add_dc_after_mv_simple_replication(self):
"""
@jira_ticket CASSANDRA-10634

Test that materialized views work as expected when adding a datacenter with SimpleStrategy.
"""

> self._add_dc_after_mv_test(1, False)

materialized_views_test.py:583:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:523: in _add_dc_after_mv_test
session = self.prepare(rf=rf)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f4701ea2af0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 4367 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.267
8_cythonno_x86_64_2_8test_really_complex_repairFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <materialized_views_test.TestMaterializedViews object at 0x7f4701e4ed60>

@pytest.mark.resource_intensive
def test_really_complex_repair(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
> session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)

materialized_views_test.py:2308:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7f470127fe50>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.reset_bootstrap_progress=false', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration..._wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f470127f070>, itf = ('127.0.0.5', 9042)
started = [(<ccmlib.node.Node object at 0x7f470127f070>, <subprocess.Popen object at 0x7f4701eabd90>, 0), (<ccmlib.node.Node obj... at 0x7f4701241a60>, 0), (<ccmlib.node.Node object at 0x7f47014a09a0>, <subprocess.Popen object at 0x7f47012411f0>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f4701eabd90>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
24.135
8_cythonno_x86_64_3_8test_simple_strategy_usersFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <consistency_test.TestAccuracy object at 0x7f347272dc10>

@pytest.mark.resource_intensive
def test_simple_strategy_users(self):
"""
Test for a single datacenter, users table, only the each quorum reads.
"""
self.nodes = 5
self.rf = 3

combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.QUORUM),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.ONE, ConsistencyLevel.THREE),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ANY, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.TWO),
(ConsistencyLevel.TWO, ConsistencyLevel.ONE),
# These are multi-DC consistency levels that should default to quorum calls
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL),
]

logger.debug("Testing single dc, users")
> self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, [self.nodes], [self.rf], combinations)

consistency_test.py:597:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:529: in _run_test_function_in_parallel
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7f347265d0a0>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migrat..._wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f34726a3cd0>, itf = ('127.0.0.5', 9042)
started = [(<ccmlib.node.Node object at 0x7f34726a3cd0>, <subprocess.Popen object at 0x7f34726a83a0>, 0), (<ccmlib.node.Node obj... at 0x7f34726a80a0>, 0), (<ccmlib.node.Node object at 0x7f347264eca0>, <subprocess.Popen object at 0x7f34726a8340>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f34726a83a0>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
23.186
8_cythonno_x86_64_3_8test_add_dc_after_mv_network_replicationFailureccmlib.node.NodeError: C* process with 3155 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f3472707490>

@pytest.mark.resource_intensive
def test_add_dc_after_mv_network_replication(self):
"""
@jira_ticket CASSANDRA-10634

Test that materialized views work as expected when adding a datacenter with NetworkTopologyStrategy.
"""

> self._add_dc_after_mv_test({'dc1': 1}, True)

materialized_views_test.py:593:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:523: in _add_dc_after_mv_test
session = self.prepare(rf=rf)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f3471adfca0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3155 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
11.546
8_cythonno_x86_64_4_8test_add_node_after_mvFailureccmlib.node.NodeError: C* process with 3714 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fb3a087c7f0>

@pytest.mark.resource_intensive
def test_add_node_after_mv(self):
"""
@jira_ticket CASSANDRA-10978

Test that materialized views work as expected when adding a node.
"""

> session = self.prepare()

materialized_views_test.py:603:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fb39ee6df70>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3714 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
11.017
8_cythonno_x86_64_5_8test_network_topology_strategy_usersFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <consistency_test.TestAccuracy object at 0x7fe2581760a0>

@pytest.mark.resource_intensive
def test_network_topology_strategy_users(self):
"""
Test for multiple datacenters, users table.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])

combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.ONE, ConsistencyLevel.THREE),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ANY, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.TWO),
(ConsistencyLevel.TWO, ConsistencyLevel.ONE),
(ConsistencyLevel.QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.LOCAL_SERIAL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.LOCAL_SERIAL),
]

logger.debug("Testing multiple dcs, users")
> self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, self.nodes, list(self.rf.values()), combinations),

consistency_test.py:651:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:529: in _run_test_function_in_parallel
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7fe2580b77c0>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migrat..._wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7fe25808f6d0>, itf = ('127.0.0.6', 9042)
started = [(<ccmlib.node.Node object at 0x7fe25808f6d0>, <subprocess.Popen object at 0x7fe258095340>, 0), (<ccmlib.node.Node obj... at 0x7fe258095220>, 0), (<ccmlib.node.Node object at 0x7fe2580b79a0>, <subprocess.Popen object at 0x7fe258095e20>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7fe258095340>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
42.712
8_cythonno_x86_64_5_8test_add_node_after_wide_mv_with_range_deletionsFailureccmlib.node.NodeError: C* process with 3507 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fe2580c5700>

@pytest.mark.resource_intensive
def test_add_node_after_wide_mv_with_range_deletions(self):
"""
@jira_ticket CASSANDRA-11670

Test that materialized views work with wide materialized views as expected when adding a node.
"""

> session = self.prepare()

materialized_views_test.py:691:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fe2580d85b0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3507 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
14.247
8_cythonno_x86_64_6_8test_network_topology_strategy_each_quorum_usersFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <consistency_test.TestAccuracy object at 0x7f4f145374c0>

@pytest.mark.resource_intensive
@since("3.0")
def test_network_topology_strategy_each_quorum_users(self):
"""
@jira_ticket CASSANDRA-10584
Test for a multiple datacenters, users table, only the each quorum
reads.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])

combinations = [
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.EACH_QUORUM),
]

logger.debug("Testing multiple dcs, users, each quorum reads")
> self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, self.nodes, list(self.rf.values()), combinations)

consistency_test.py:670:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:529: in _run_test_function_in_parallel
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7f4f14464a00>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migrat..._wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f4f144094f0>, itf = ('127.0.0.6', 9042)
started = [(<ccmlib.node.Node object at 0x7f4f144094f0>, <subprocess.Popen object at 0x7f4f14449ee0>, 0), (<ccmlib.node.Node obj... at 0x7f4f14492550>, 0), (<ccmlib.node.Node object at 0x7f4f14501ee0>, <subprocess.Popen object at 0x7f4f14492820>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f4f14449ee0>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
24.938
8_cythonno_x86_64_6_8test_add_node_after_very_wide_mvFailureccmlib.node.NodeError: C* process with 3495 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f4f14488af0>

@pytest.mark.resource_intensive
def test_add_node_after_very_wide_mv(self):
"""
@jira_ticket CASSANDRA-11670

Test that materialized views work with very wide materialized views as expected when adding a node.
"""

> session = self.prepare()

materialized_views_test.py:761:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f4f1449c3a0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3495 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
10.182
8_cythonno_x86_64_6_8test_multidatacenter_local_quorumFailureccmlib.node.TimeoutError: 16 Dec 2024 18:09:10 [node3] after 120.13/120 seconds Missing: ['127.0.0.1:7000.* is now UP'] not found in system.log: Head: INFO [main] 2024-12-16 18:07:10,470 YamlConfigura Tail: ....cassandra.service.CassandraDaemon.activate(CassandraDaemon.java:724) at org.apache.cassandra.service.CassandraDaemon.main(CassandraDaemon.java:882)

self = <snitch_test.TestDynamicEndpointSnitch object at 0x7f4f1445bf40>

@pytest.mark.resource_intensive
@since('3.10')
def test_multidatacenter_local_quorum(self):
'''
@jira_ticket CASSANDRA-13074

If we do only local datacenters reads in a multidatacenter DES setup,
DES should take effect and route around a degraded node
'''

def no_cross_dc(scores, cross_dc_nodes):
return all('/' + k.address() not in scores for k in cross_dc_nodes)

def snitchable(scores_before, scores_after, needed_nodes):
return all('/' + k.address() in scores_before and '/' + k.address()
in scores_after for k in needed_nodes)

cluster = self.cluster
cluster.populate([3, 3])
coordinator_node, healthy_node, degraded_node, node4, node5, node6 = cluster.nodelist()
# increase DES reset/update interval so we clear any cross-DC startup reads faster
cluster.set_configuration_options(values={'dynamic_snitch_reset_interval_in_ms': 10000,
'dynamic_snitch_update_interval_in_ms': 50,
'phi_convict_threshold': 12})
# Delay reads on the degraded node by 50 milliseconds
degraded_node.start(jvm_args=['-Dcassandra.test.read_iteration_delay_ms=50',
'-Dcassandra.allow_unsafe_join=true'])
> cluster.start()

snitch_test.py:192:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:526: in start
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:922: in start
node.watch_log_for_alive(self, from_mark=mark)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:686: in watch_log_for_alive
self.watch_log_for(tofind, from_mark=from_mark, timeout=timeout, filename=filename)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:610: in watch_log_for
TimeoutError.raise_if_passed(start=start, timeout=timeout, node=self.name,
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

start = 1734372430.4562736, timeout = 120
msg = "Missing: ['127.0.0.1:7000.* is now UP'] not found in system.log:\n Head: INFO [main] 2024-12-16 18:07:10,470 YamlCon...activate(CassandraDaemon.java:724)\n\tat org.apache.cassandra.service.CassandraDaemon.main(CassandraDaemon.java:882)\n"
node = 'node3'

@staticmethod
def raise_if_passed(start, timeout, msg, node=None):
if start + timeout < time.time():
> raise TimeoutError.create(start, timeout, msg, node)
E ccmlib.node.TimeoutError: 16 Dec 2024 18:09:10 [node3] after 120.13/120 seconds Missing: ['127.0.0.1:7000.* is now UP'] not found in system.log:
E Head: INFO [main] 2024-12-16 18:07:10,470 YamlConfigura
E Tail: ....cassandra.service.CassandraDaemon.activate(CassandraDaemon.java:724)
E at org.apache.cassandra.service.CassandraDaemon.main(CassandraDaemon.java:882)

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:56: TimeoutError
134.881
8_cythonno_x86_64_7_8test_network_topology_strategy_countersFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <consistency_test.TestAccuracy object at 0x7fb66e8278e0>

@pytest.mark.resource_intensive
def test_network_topology_strategy_counters(self):
"""
Test for multiple datacenters, counters table.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])

combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.ONE, ConsistencyLevel.THREE),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.TWO),
(ConsistencyLevel.TWO, ConsistencyLevel.ONE),
]

logger.debug("Testing multiple dcs, counters")
> self._run_test_function_in_parallel(TestAccuracy.Validation.validate_counters, self.nodes, list(self.rf.values()), combinations),

consistency_test.py:747:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:529: in _run_test_function_in_parallel
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7fb66e7713d0>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migrat..._wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7fb66e7413a0>, itf = ('127.0.0.6', 9042)
started = [(<ccmlib.node.Node object at 0x7fb66e7413a0>, <subprocess.Popen object at 0x7fb66e7e5a60>, 0), (<ccmlib.node.Node obj... at 0x7fb66e7e5910>, 0), (<ccmlib.node.Node object at 0x7fb66e781b80>, <subprocess.Popen object at 0x7fb66e7e5d90>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7fb66e7e5a60>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
48.674
8_cythonno_x86_64_7_8test_add_write_survey_node_after_mvFailureccmlib.node.NodeError: C* process with 3504 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fb66e777f40>

@pytest.mark.resource_intensive
def test_add_write_survey_node_after_mv(self):
"""
@jira_ticket CASSANDRA-10621
@jira_ticket CASSANDRA-10978

Test that materialized views work as expected when adding a node in write survey mode.
"""

> session = self.prepare()

materialized_views_test.py:805:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fb66e795130>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3504 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.196
8_cythonno_x86_64_7_8test_stop_decommission_too_few_replicas_multi_dcFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <topology_test.TestTopology object at 0x7fb66e756d90>

@since('3.12')
@pytest.mark.resource_intensive
def test_stop_decommission_too_few_replicas_multi_dc(self):
"""
Decommission should fail when it would result in the number of live replicas being less than
the replication factor. --force should bypass this requirement.
@jira_ticket CASSANDRA-12510
@expected_errors ToolError when # nodes will drop below configured replicas in NTS/SimpleStrategy
"""

# we need to ignore this error log message which is emitted during the test
# because dtest framework which reads the logs would evaluate the node is errorneous and
# it would terminate it prematurely
self.fixture_dtest_setup.ignore_log_patterns = (r'.*Not enough live nodes to maintain replication factor*')

cluster = self.cluster
> cluster.populate([2, 2]).start()

topology_test.py:494:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7fb66d746370>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7fb66d750d30>, itf = ('127.0.0.4', 9042)
started = [(<ccmlib.node.Node object at 0x7fb66d750d30>, <subprocess.Popen object at 0x7fb66e785f10>, 0), (<ccmlib.node.Node obj... at 0x7fb66e7919a0>, 0), (<ccmlib.node.Node object at 0x7fb66d755370>, <subprocess.Popen object at 0x7fb66e791dc0>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7fb66e785f10>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
24.331
8_cythonno_x86_64_8_8test_network_topology_strategy_each_quorum_countersFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <consistency_test.TestAccuracy object at 0x7f57dbd68220>

@pytest.mark.resource_intensive
@since("3.0")
def test_network_topology_strategy_each_quorum_counters(self):
"""
@jira_ticket CASSANDRA-10584
Test for multiple datacenters, counters table, only the each quorum
reads.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])

combinations = [
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.EACH_QUORUM),
]

logger.debug("Testing multiple dcs, counters, each quorum reads")
> self._run_test_function_in_parallel(TestAccuracy.Validation.validate_counters, self.nodes, list(self.rf.values()), combinations),

consistency_test.py:766:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:529: in _run_test_function_in_parallel
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7f57dbc46130>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migrat..._wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f57dbc7ac40>, itf = ('127.0.0.6', 9042)
started = [(<ccmlib.node.Node object at 0x7f57dbc7ac40>, <subprocess.Popen object at 0x7f57dbd2fb80>, 0), (<ccmlib.node.Node obj... at 0x7f57dbd2f880>, 0), (<ccmlib.node.Node object at 0x7f57dbcb3460>, <subprocess.Popen object at 0x7f57dbcaa070>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f57dbd2fb80>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
27.808
8_cythonno_x86_64_8_8test_complex_repairFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <materialized_views_test.TestMaterializedViews object at 0x7f57dbcd4430>

@pytest.mark.resource_intensive
def test_complex_repair(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
> session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)

materialized_views_test.py:2090:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7f57dbcd41c0>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.reset_bootstrap_progress=false', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration..._wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f57dbcd41f0>, itf = ('127.0.0.5', 9042)
started = [(<ccmlib.node.Node object at 0x7f57dbcd41f0>, <subprocess.Popen object at 0x7f57dbca5fd0>, 0), (<ccmlib.node.Node obj... at 0x7f57dbca5e20>, 0), (<ccmlib.node.Node object at 0x7f57dbd26130>, <subprocess.Popen object at 0x7f57dbca5580>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f57dbca5fd0>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
23.495