Unit Test Results.

Designed for use with JUnit and Ant.

All Failures

ClassNameStatusTypeTime(s)
8_cythonno_x86_64_10_64test_base_view_consistency_on_failure_before_mv_applyFailureccmlib.node.NodeError: C* process with 8119 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7efdcc483ca0>

def test_base_view_consistency_on_failure_before_mv_apply(self):
> self._test_base_view_consistency_on_crash("before")

materialized_views_test.py:2556:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:2569: in _test_base_view_consistency_on_crash
self.prepare(rf=1, install_byteman=True)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7efdca871490>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 8119 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
10.034
8_cythonno_x86_64_12_64test_local_quorum_bootstrapFailureccmlib.node.NodeError: C* process with 1140 is terminated

self = <bootstrap_test.TestBootstrap object at 0x7f3abb847dc0>

def test_local_quorum_bootstrap(self):
"""
Test that CL local_quorum works while a node is bootstrapping.
@jira_ticket CASSANDRA-8058
"""
cluster = self.cluster
cluster.set_environment_variable('CASSANDRA_TOKEN_PREGENERATION_DISABLED', 'True')
cluster.populate([1, 1])
> cluster.start()

bootstrap_test.py:560:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f3ab99ad340>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 1140 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
9.780
8_cythonno_x86_64_15_64test_multi_dc_replace_with_rf1Failureccmlib.node.NodeError: C* process with 6175 is terminated

self = <replace_address_test.TestReplaceAddress object at 0x7faf2ed235b0>

def test_multi_dc_replace_with_rf1(self):
"""
Test that multi-dc replace works when rf=1 on each dc
"""
> self._setup(n=[1, 1])

replace_address_test.py:646:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
replace_address_test.py:76: in _setup
self.cluster.start(jvm_args=jvm_args)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7faf2d12a4c0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 6175 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.459
8_cythonno_x86_64_16_64test_putget_2dc_rf1Failureccmlib.node.NodeError: C* process with 4253 is terminated

self = <multidc_putget_test.TestMultiDCPutGet object at 0x7f730f934b20>

def test_putget_2dc_rf1(self):
""" Simple put-get test for 2 DC with one node each (RF=1) [catches #3539] """
cluster = self.cluster
> cluster.populate([1, 1]).start()

multidc_putget_test.py:14:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f730f8831c0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 4253 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
16.465
8_cythonno_x86_64_16_64test_cannot_restart_with_different_rackFailureccmlib.node.NodeError: C* process with 7524 is terminated

self = <replication_test.TestSnitchConfigurationUpdate object at 0x7f730f8d65e0>

def test_cannot_restart_with_different_rack(self):
"""
@jira_ticket CASSANDRA-10242

Test that we cannot restart with a different rack if '-Dcassandra.ignore_rack=true' is not specified.
"""
cluster = self.cluster
cluster.populate(1)
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.{}'
.format('GossipingPropertyFileSnitch')})

node1 = cluster.nodelist()[0]

with open(os.path.join(node1.get_conf_dir(), 'cassandra-rackdc.properties'), 'w') as topo_file:
for line in ["dc={}".format(node1.data_center), "rack=rack1"]:
topo_file.write(line + os.linesep)

logger.debug("Starting node {} with rack1".format(node1.address()))
> node1.start(wait_for_binary_proto=True)

replication_test.py:614:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:928: in start
self.wait_for_binary_interface(from_mark=self.mark)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:711: in wait_for_binary_interface
self.watch_log_for("Starting listening for CQL clients", **kwargs)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f730ed42b20>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7524 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
10.967
8_cythonno_x86_64_17_64test_putget_2dc_rf2Failureccmlib.node.NodeError: C* process with 3606 is terminated

self = <multidc_putget_test.TestMultiDCPutGet object at 0x7fc6a0048130>

def test_putget_2dc_rf2(self):
""" Simple put-get test for 2 DC with 2 node each (RF=2) -- tests cross-DC efficient writes """
cluster = self.cluster
> cluster.populate([2, 2]).start()

multidc_putget_test.py:25:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fc69d0baa30>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3606 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
19.682
8_cythonno_x86_64_19_64test_dont_archive_commitlogFailureAssertionError: It's been over 180s and we haven't written a new commitlog segment. Something is wrong.

self = <snapshot_test.TestArchiveCommitlog object at 0x7f581499ff40>

def test_dont_archive_commitlog(self):
"""
Run the archive commitlog test, but forget to add the restore commands
"""
> self.run_archive_commitlog(restore_point_in_time=False, restore_archived_commitlog=False)

snapshot_test.py:255:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
snapshot_test.py:303: in run_archive_commitlog
advance_to_next_cl_segment(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

session = <cassandra.cluster.Session object at 0x7f5812f1d130>
commitlog_dir = '/home/cassandra/cassandra/build/run-python-dtest.bMFOfU/dtest-lq8nplnq/test/node1/commitlogs'
keyspace_name = 'ks', table_name = 'junk_table', timeout = 180

def advance_to_next_cl_segment(session, commitlog_dir,
keyspace_name='ks', table_name='junk_table',
timeout=180):
"""
This is a hack to work around problems like CASSANDRA-11811.

The problem happens in commitlog-replaying tests, like the snapshot and CDC
tests. If we replay the first commitlog that's created, we wind up
replaying some mutations that initialize system tables, so this function
advances the node to the next CL by filling up the first one.
"""
session.execute(
'CREATE TABLE {ks}.{tab} ('
'a uuid PRIMARY KEY, b uuid, c uuid, d uuid, '
'e uuid, f uuid, g uuid, h uuid'
')'.format(ks=keyspace_name, tab=table_name)
)
prepared_insert = session.prepare(
'INSERT INTO {ks}.{tab} '
'(a, b, c, d, e, f, g, h) '
'VALUES ('
'uuid(), uuid(), uuid(), uuid(), '
'uuid(), uuid(), uuid(), uuid()'
')'.format(ks=keyspace_name, tab=table_name)
)

# record segments that we want to advance past
initial_cl_files = _files_in(commitlog_dir)

start = time.time()
stop_time = start + timeout
rate_limited_debug_logger = get_rate_limited_function(logger.debug, 5)
logger.debug('attempting to write until we start writing to new CL segments: {}'.format(initial_cl_files))

while _files_in(commitlog_dir) <= initial_cl_files:
elapsed = time.time() - start
rate_limited_debug_logger(' commitlog-advancing load step has lasted {s:.2f}s'.format(s=elapsed))
> assert (
time.time() <= stop_time), ("It's been over {s}s and we haven't written a new " +
"commitlog segment. Something is wrong.").format(s=timeout)
E AssertionError: It's been over 180s and we haven't written a new commitlog segment. Something is wrong.

tools/hacks.py:59: AssertionError
253.267
8_cythonno_x86_64_1_64test_base_column_in_view_pk_commutative_tombstone_with_flushFailureccmlib.node.NodeError: C* process with 6666 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f25e3690f10>

@since('3.0')
def test_base_column_in_view_pk_commutative_tombstone_with_flush(self):
> self._test_base_column_in_view_pk_commutative_tombstone_(flush=True)

materialized_views_test.py:1774:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:1785: in _test_base_column_in_view_pk_commutative_tombstone_
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f25e3595070>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 6666 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
13.381
8_cythonno_x86_64_20_64test_switch_data_center_startup_failsFailureccmlib.node.NodeError: C* process with 8836 is terminated

self = <replication_test.TestSnitchConfigurationUpdate object at 0x7fb9a0f58a00>

def test_switch_data_center_startup_fails(self):
"""
@jira_ticket CASSANDRA-9474

Confirm that switching data centers fails to bring up the node.
"""
expected_error = (r"Cannot start node if snitch's data center (.*) differs from previous data center (.*)\. "
"Please fix the snitch configuration, decommission and rebootstrap this node "
"or use the flag -Dcassandra.ignore_dc=true.")
self.fixture_dtest_setup.ignore_log_patterns = [expected_error]

cluster = self.cluster
cluster.populate(1)
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.GossipingPropertyFileSnitch'})

node = cluster.nodelist()[0]
with open(os.path.join(node.get_conf_dir(), 'cassandra-rackdc.properties'), 'w') as topo_file:
topo_file.write("dc=dc9" + os.linesep)
topo_file.write("rack=rack1" + os.linesep)

> cluster.start()

replication_test.py:768:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fb998f71400>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 8836 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
37.966
8_cythonno_x86_64_20_64test_archive_commitlog_point_in_timeFailureAssertionError: It's been over 180s and we haven't written a new commitlog segment. Something is wrong.

self = <snapshot_test.TestArchiveCommitlog object at 0x7fb9a0f0e0d0>

def test_archive_commitlog_point_in_time(self):
"""
Test archive commit log with restore_point_in_time setting
"""
> self.run_archive_commitlog(restore_point_in_time=True)

snapshot_test.py:261:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
snapshot_test.py:303: in run_archive_commitlog
advance_to_next_cl_segment(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

session = <cassandra.cluster.Session object at 0x7fb9a0e8bbb0>
commitlog_dir = '/home/cassandra/cassandra/build/run-python-dtest.F1BMv1/dtest-v78dgbme/test/node1/commitlogs'
keyspace_name = 'ks', table_name = 'junk_table', timeout = 180

def advance_to_next_cl_segment(session, commitlog_dir,
keyspace_name='ks', table_name='junk_table',
timeout=180):
"""
This is a hack to work around problems like CASSANDRA-11811.

The problem happens in commitlog-replaying tests, like the snapshot and CDC
tests. If we replay the first commitlog that's created, we wind up
replaying some mutations that initialize system tables, so this function
advances the node to the next CL by filling up the first one.
"""
session.execute(
'CREATE TABLE {ks}.{tab} ('
'a uuid PRIMARY KEY, b uuid, c uuid, d uuid, '
'e uuid, f uuid, g uuid, h uuid'
')'.format(ks=keyspace_name, tab=table_name)
)
prepared_insert = session.prepare(
'INSERT INTO {ks}.{tab} '
'(a, b, c, d, e, f, g, h) '
'VALUES ('
'uuid(), uuid(), uuid(), uuid(), '
'uuid(), uuid(), uuid(), uuid()'
')'.format(ks=keyspace_name, tab=table_name)
)

# record segments that we want to advance past
initial_cl_files = _files_in(commitlog_dir)

start = time.time()
stop_time = start + timeout
rate_limited_debug_logger = get_rate_limited_function(logger.debug, 5)
logger.debug('attempting to write until we start writing to new CL segments: {}'.format(initial_cl_files))

while _files_in(commitlog_dir) <= initial_cl_files:
elapsed = time.time() - start
rate_limited_debug_logger(' commitlog-advancing load step has lasted {s:.2f}s'.format(s=elapsed))
> assert (
time.time() <= stop_time), ("It's been over {s}s and we haven't written a new " +
"commitlog segment. Something is wrong.").format(s=timeout)
E AssertionError: It's been over 180s and we haven't written a new commitlog segment. Something is wrong.

tools/hacks.py:59: AssertionError
264.289
8_cythonno_x86_64_21_64test_archive_commitlog_point_in_time_lnFailureAssertionError: It's been over 180s and we haven't written a new commitlog segment. Something is wrong.

self = <snapshot_test.TestArchiveCommitlog object at 0x7fcf9a1a8880>

def test_archive_commitlog_point_in_time_ln(self):
"""
Test archive commit log with restore_point_in_time setting
"""
> self.run_archive_commitlog(restore_point_in_time=True, archive_command='ln')

snapshot_test.py:267:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
snapshot_test.py:303: in run_archive_commitlog
advance_to_next_cl_segment(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

session = <cassandra.cluster.Session object at 0x7fcf982e1520>
commitlog_dir = '/home/cassandra/cassandra/build/run-python-dtest.Cm8yOP/dtest-3qwn9cig/test/node1/commitlogs'
keyspace_name = 'ks', table_name = 'junk_table', timeout = 180

def advance_to_next_cl_segment(session, commitlog_dir,
keyspace_name='ks', table_name='junk_table',
timeout=180):
"""
This is a hack to work around problems like CASSANDRA-11811.

The problem happens in commitlog-replaying tests, like the snapshot and CDC
tests. If we replay the first commitlog that's created, we wind up
replaying some mutations that initialize system tables, so this function
advances the node to the next CL by filling up the first one.
"""
session.execute(
'CREATE TABLE {ks}.{tab} ('
'a uuid PRIMARY KEY, b uuid, c uuid, d uuid, '
'e uuid, f uuid, g uuid, h uuid'
')'.format(ks=keyspace_name, tab=table_name)
)
prepared_insert = session.prepare(
'INSERT INTO {ks}.{tab} '
'(a, b, c, d, e, f, g, h) '
'VALUES ('
'uuid(), uuid(), uuid(), uuid(), '
'uuid(), uuid(), uuid(), uuid()'
')'.format(ks=keyspace_name, tab=table_name)
)

# record segments that we want to advance past
initial_cl_files = _files_in(commitlog_dir)

start = time.time()
stop_time = start + timeout
rate_limited_debug_logger = get_rate_limited_function(logger.debug, 5)
logger.debug('attempting to write until we start writing to new CL segments: {}'.format(initial_cl_files))

while _files_in(commitlog_dir) <= initial_cl_files:
elapsed = time.time() - start
rate_limited_debug_logger(' commitlog-advancing load step has lasted {s:.2f}s'.format(s=elapsed))
> assert (
time.time() <= stop_time), ("It's been over {s}s and we haven't written a new " +
"commitlog segment. Something is wrong.").format(s=timeout)
E AssertionError: It's been over 180s and we haven't written a new commitlog segment. Something is wrong.

tools/hacks.py:59: AssertionError
265.000
8_cythonno_x86_64_22_64test_decommission_after_drain_is_invalidFailureccmlib.node.NodeError: C* process with 3554 is terminated

self = <nodetool_test.TestNodetool object at 0x7f5a235ac8e0>

def test_decommission_after_drain_is_invalid(self):
"""
@jira_ticket CASSANDRA-8741

Running a decommission after a drain should generate
an unsupported operation message and exit with an error
code (which we receive as a ToolError exception).
"""
cluster = self.cluster
> cluster.populate([3]).start()

nodetool_test.py:31:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f5a22acddf0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3554 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
11.886
8_cythonno_x86_64_23_64test_correct_dc_rack_in_nodetool_infoFailureccmlib.node.NodeError: C* process with 3800 is terminated

self = <nodetool_test.TestNodetool object at 0x7f8ad3fcbeb0>

def test_correct_dc_rack_in_nodetool_info(self):
"""
@jira_ticket CASSANDRA-10382

Test that nodetool info returns the correct rack and dc
"""

cluster = self.cluster
cluster.populate([2, 2])
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.GossipingPropertyFileSnitch'})

for i, node in enumerate(cluster.nodelist()):
with open(os.path.join(node.get_conf_dir(), 'cassandra-rackdc.properties'), 'w') as snitch_file:
for line in ["dc={}".format(node.data_center), "rack=rack{}".format(i % 2)]:
snitch_file.write(line + os.linesep)

> cluster.start()

nodetool_test.py:59:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f8ab3f37760>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3800 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
23.654
8_cythonno_x86_64_24_64test_nodetool_timeout_commandsFailureccmlib.node.NodeError: C* process with 3507 is terminated

self = <nodetool_test.TestNodetool object at 0x7f6c2d0e5a60>

@since('3.4')
def test_nodetool_timeout_commands(self):
"""
@jira_ticket CASSANDRA-10953

Test that nodetool gettimeout and settimeout work at a basic level
"""
cluster = self.cluster
> cluster.populate([1]).start()

nodetool_test.py:85:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f6c2c03c9a0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3507 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
8.988
8_cythonno_x86_64_24_64test_prefer_local_reconnect_on_listen_addressFailureccmlib.node.NodeError: C* process with 7172 is terminated

self = <snitch_test.TestGossipingPropertyFileSnitch object at 0x7f6c2d008310>

def test_prefer_local_reconnect_on_listen_address(self):
"""
@jira_ticket CASSANDRA-9748
@jira_ticket CASSANDRA-8084

Test that it's possible to connect over the broadcast_address when
listen_on_broadcast_address=true and that GossipingPropertyFileSnitch
reconnect via listen_address when prefer_local=true
"""

NODE1_LISTEN_ADDRESS = '127.0.0.1'
NODE1_BROADCAST_ADDRESS = '127.0.0.3'

NODE2_LISTEN_ADDRESS = '127.0.0.2'
NODE2_BROADCAST_ADDRESS = '127.0.0.4'

STORAGE_PORT = 7000

cluster = self.cluster
cluster.populate(2)
node1, node2 = cluster.nodelist()

running40 = node1.get_base_cassandra_version() >= 4.0

cluster.seeds = [NODE1_BROADCAST_ADDRESS]
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.GossipingPropertyFileSnitch',
'listen_on_broadcast_address': 'true'})
node1.set_configuration_options(values={'broadcast_address': NODE1_BROADCAST_ADDRESS})
node2.auto_bootstrap = True
node2.set_configuration_options(values={'broadcast_address': NODE2_BROADCAST_ADDRESS})

for node in cluster.nodelist():
with open(os.path.join(node.get_conf_dir(), 'cassandra-rackdc.properties'), 'w') as snitch_file:
snitch_file.write("dc=dc1" + os.linesep)
snitch_file.write("rack=rack1" + os.linesep)
snitch_file.write("prefer_local=true" + os.linesep)

> node1.start(wait_for_binary_proto=True)

snitch_test.py:66:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:928: in start
self.wait_for_binary_interface(from_mark=self.mark)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:711: in wait_for_binary_interface
self.watch_log_for("Starting listening for CQL clients", **kwargs)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f6c240ce250>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7172 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
8.843
8_cythonno_x86_64_25_64test_cleanup_when_no_replica_with_indexFailureccmlib.node.NodeError: C* process with 6623 is terminated

self = <nodetool_test.TestNodetool object at 0x7f321ef27a60>

@since('3.0')
def test_cleanup_when_no_replica_with_index(self):
> self._cleanup_when_no_replica(True)

nodetool_test.py:114:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
nodetool_test.py:125: in _cleanup_when_no_replica
self.cluster.populate([1, 1]).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f321c174790>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 6623 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.387
8_cythonno_x86_64_26_64test_cleanup_when_no_replica_without_indexFailureccmlib.node.NodeError: C* process with 4931 is terminated

self = <nodetool_test.TestNodetool object at 0x7f24db09bdc0>

@since('3.0')
def test_cleanup_when_no_replica_without_index(self):
> self._cleanup_when_no_replica(False)

nodetool_test.py:118:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
nodetool_test.py:125: in _cleanup_when_no_replica
self.cluster.populate([1, 1]).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f24d83b92e0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 4931 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
18.280
8_cythonno_x86_64_27_64test_view_metadata_cleanupFailureccmlib.node.NodeError: C* process with 5152 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fb8f4a04850>

def test_view_metadata_cleanup(self):
"""
drop keyspace or view should clear built_views and view_build_status
"""
> session = self.prepare(rf=2, nodes=2)

materialized_views_test.py:192:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fb8f3e40d90>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 5152 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
10.597
8_cythonno_x86_64_27_64test_meaningless_notice_in_statusFailureccmlib.node.NodeError: C* process with 5717 is terminated

self = <nodetool_test.TestNodetool object at 0x7fb8f4948d90>

def test_meaningless_notice_in_status(self):
"""
@jira_ticket CASSANDRA-10176

nodetool status don't return ownership when there is more than one user keyspace
define (since they likely have different replication infos making ownership
meaningless in general) and shows a helpful notice as to why it does that.
This test checks that said notice is only printed is there is indeed more than
one user keyspace.
"""
cluster = self.cluster
> cluster.populate([3]).start()

nodetool_test.py:194:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fb8f4bd6d30>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 5717 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
11.824
8_cythonno_x86_64_28_64test_createFailureccmlib.node.NodeError: C* process with 4988 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fa84b51b820>

def test_create(self):
"""Test the materialized view creation"""
> session = self.prepare(user_table=True)

materialized_views_test.py:255:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fa84b5343d0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 4988 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
14.007
8_cythonno_x86_64_29_64test_gcgs_validationFailureccmlib.node.NodeError: C* process with 4550 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f29c0c71c40>

def test_gcgs_validation(self):
"""Verify that it's not possible to create or set a too low gc_grace_seconds on MVs"""
> session = self.prepare(user_table=True)

materialized_views_test.py:263:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f29c0cf28b0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 4550 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
17.843
8_cythonno_x86_64_2_64test_base_column_in_view_pk_commutative_tombstone_without_flushFailureccmlib.node.NodeError: C* process with 8332 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fd3a53194c0>

@since('3.0')
def test_base_column_in_view_pk_commutative_tombstone_without_flush(self):
> self._test_base_column_in_view_pk_commutative_tombstone_(flush=False)

materialized_views_test.py:1778:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:1785: in _test_base_column_in_view_pk_commutative_tombstone_
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fd3a51d6a60>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 8332 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
14.453
8_cythonno_x86_64_30_64test_insertFailureccmlib.node.NodeError: C* process with 4910 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f84decdfd90>

def test_insert(self):
"""Test basic insertions"""
> session = self.prepare(user_table=True)

materialized_views_test.py:301:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f84ddfe9d90>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 4910 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
9.399
8_cythonno_x86_64_31_64test_change_durable_writesFailureAssertionError: Commitlog was written with durable writes disabled assert 163840 == 90112 +163840 -90112

self = <configuration_test.TestConfiguration object at 0x7f7f24ce9520>

@pytest.mark.timeout(60*30)
def test_change_durable_writes(self):
"""
@jira_ticket CASSANDRA-9560

Test that changes to the DURABLE_WRITES option on keyspaces is
respected in subsequent writes.

This test starts by writing a dataset to a cluster and asserting that
the commitlogs have been written to. The subsequent test depends on
the assumption that this dataset triggers an fsync.

After checking this assumption, the test destroys the cluster and
creates a fresh one. Then it tests that DURABLE_WRITES is respected by:

- creating a keyspace with DURABLE_WRITES set to false,
- using ALTER KEYSPACE to set its DURABLE_WRITES option to true,
- writing a dataset to this keyspace that is known to trigger a commitlog fsync,
- asserting that the commitlog has grown in size since the data was written.
"""
cluster = self.cluster
cluster.set_batch_commitlog(enabled=True, use_batch_window = cluster.version() < '5.0')
cluster.set_configuration_options(values={'commitlog_segment_size_in_mb': 1})

cluster.populate(1).start()
durable_node = cluster.nodelist()[0]

durable_init_size = commitlog_size(durable_node)
durable_session = self.patient_exclusive_cql_connection(durable_node)

# test assumption that write_to_trigger_fsync actually triggers a commitlog fsync
durable_session.execute("CREATE KEYSPACE ks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1} "
"AND DURABLE_WRITES = true")
durable_session.execute('CREATE TABLE ks.tab (key int PRIMARY KEY, a int, b int, c int)')
logger.debug('commitlog size diff = ' + str(commitlog_size(durable_node) - durable_init_size))
write_to_trigger_fsync(durable_session, 'ks', 'tab')
logger.debug('commitlog size diff = ' + str(commitlog_size(durable_node) - durable_init_size))

assert commitlog_size(durable_node) > durable_init_size, \
"This test will not work in this environment; write_to_trigger_fsync does not trigger fsync."

durable_session.shutdown()
cluster.stop()
cluster.clear()

cluster.set_batch_commitlog(enabled=True, use_batch_window = cluster.version() < '5.0')
cluster.set_configuration_options(values={'commitlog_segment_size_in_mb': 1})
cluster.start()
node = cluster.nodelist()[0]
session = self.patient_exclusive_cql_connection(node)

# set up a keyspace without durable writes, then alter it to use them
session.execute("CREATE KEYSPACE ks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1} "
"AND DURABLE_WRITES = false")
session.execute('CREATE TABLE ks.tab (key int PRIMARY KEY, a int, b int, c int)')
init_size = commitlog_size(node)
write_to_trigger_fsync(session, 'ks', 'tab')
> assert commitlog_size(node) == init_size, "Commitlog was written with durable writes disabled"
E AssertionError: Commitlog was written with durable writes disabled
E assert 163840 == 90112
E +163840
E -90112

configuration_test.py:104: AssertionError
285.194
8_cythonno_x86_64_31_64test_delete_insert_searchFailureccmlib.node.NodeError: C* process with 5001 is terminated

self = <delete_insert_test.TestDeleteInsert object at 0x7f7f24c9b1f0>

def test_delete_insert_search(self):
cluster = self.cluster
> cluster.populate([2, 2]).start()

delete_insert_test.py:42:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f7f1cd55610>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 5001 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
63.907
8_cythonno_x86_64_31_64test_populate_mv_after_insertFailureccmlib.node.NodeError: C* process with 6897 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f7f24c627c0>

def test_populate_mv_after_insert(self):
"""Test that a view is OK when created with existing data"""
> session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)

materialized_views_test.py:319:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f7f1cd4f0d0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 6897 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
34.738
8_cythonno_x86_64_32_64test_describecluster_more_information_three_datacentersFailureccmlib.node.NodeError: C* process with 6703 is terminated

self = <nodetool_test.TestNodetool object at 0x7fc09c7cb610>

@since('4.0')
def test_describecluster_more_information_three_datacenters(self):
"""
nodetool describecluster should be more informative. It should include detailes
for total node count, list of datacenters, RF, number of nodes per dc, how many
are down and version(s).
@jira_ticket CASSANDRA-13853
@expected_result This test invokes nodetool describecluster and matches the output with the expected one
"""
cluster = self.cluster
> cluster.populate([1, 2, 1]).start()

/home/cassandra/cassandra-dtest/nodetool_test.py:364:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/home/cassandra/cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
/home/cassandra/cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fc09aa02a90>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 6703 is terminated

/home/cassandra/cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
20.798
8_cythonno_x86_64_33_64test_sjkFailureccmlib.node.NodeError: C* process with 4481 is terminated

self = <nodetool_test.TestNodetool object at 0x7f64e9654640>

@since('4.0')
def test_sjk(self):
"""
Verify that SJK generally works.
"""

cluster = self.cluster
> cluster.populate([1]).start()

nodetool_test.py:457:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f64e88ccf10>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 4481 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
7.412
8_cythonno_x86_64_34_64test_crc_check_chanceFailureccmlib.node.NodeError: C* process with 7963 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f2815774cd0>

def test_crc_check_chance(self):
"""Test that crc_check_chance parameter is properly populated after mv creation and update"""
> session = self.prepare()

materialized_views_test.py:370:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f28142319d0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7963 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.663
8_cythonno_x86_64_35_64test_simple_strategy_countersFailureccmlib.node.NodeError: C* process with 2514 is terminated

self = <consistency_test.TestAccuracy object at 0x7f78cdd3d100>

def test_simple_strategy_counters(self):
"""
Test for a single datacenter, counters table.
"""
self.nodes = 3
self.rf = 3

combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.QUORUM),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.ONE, ConsistencyLevel.THREE),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.TWO),
(ConsistencyLevel.TWO, ConsistencyLevel.ONE),
# These are multi-DC consistency levels that should default to quorum calls
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
]

logger.debug("Testing single dc, counters")
> self._run_test_function_in_parallel(TestAccuracy.Validation.validate_counters, [self.nodes], [self.rf], combinations)

consistency_test.py:698:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:529: in _run_test_function_in_parallel
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
consistency_test.py:136: in _start_cluster
cluster.start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f78cd9c1070>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 2514 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
13.172
8_cythonno_x86_64_35_64test_prepared_statementFailureccmlib.node.NodeError: C* process with 7108 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f78cdd247f0>

def test_prepared_statement(self):
"""Test basic insertions with prepared statement"""
> session = self.prepare(user_table=True)

materialized_views_test.py:384:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f78be5b82e0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7108 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
11.402
8_cythonno_x86_64_36_64test_immutableFailureccmlib.node.NodeError: C* process with 7439 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f78c78e7be0>

def test_immutable(self):
"""Test that a materialized view is immutable"""
> session = self.prepare(user_table=True)

materialized_views_test.py:413:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f78c5a89370>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7439 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
10.637
8_cythonno_x86_64_37_64test_drop_mvFailureccmlib.node.NodeError: C* process with 8012 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f771798b7f0>

def test_drop_mv(self):
"""Test that we can drop a view properly"""
> session = self.prepare(user_table=True)

materialized_views_test.py:437:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f7714880940>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 8012 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.940
8_cythonno_x86_64_38_64test_drop_columnFailureccmlib.node.NodeError: C* process with 4748 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f0409fcaaf0>

def test_drop_column(self):
"""Test that we cannot drop a column if it is used by a MV"""
> session = self.prepare(user_table=True)

materialized_views_test.py:456:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f0408cad370>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 4748 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
11.859
8_cythonno_x86_64_39_64test_drop_tableFailureccmlib.node.NodeError: C* process with 7889 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f0c86e34070>

def test_drop_table(self):
"""Test that we cannot drop a table without deleting its MVs first"""
> session = self.prepare(user_table=True)

materialized_views_test.py:470:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f0c86f40f10>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7889 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
13.125
8_cythonno_x86_64_3_64test_view_tombstoneFailureccmlib.node.NodeError: C* process with 8297 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f747542a850>

def test_view_tombstone(self):
"""
Test that a materialized views properly tombstone

@jira_ticket CASSANDRA-10261
@jira_ticket CASSANDRA-10910
"""

> self.prepare(rf=3, options={'hinted_handoff_enabled': False})

materialized_views_test.py:1838:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f746f6a18b0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 8297 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
14.732
8_cythonno_x86_64_40_64test_disk_balance_after_boundary_change_stcsFailureFailed: Timeout >900.0s

self = <disk_balance_test.TestDiskBalance object at 0x7f27ad2fd250>

@since('3.10')
def test_disk_balance_after_boundary_change_stcs(self):
"""
@jira_ticket CASSANDRA-13948
"""
> self._disk_balance_after_boundary_change_test(lcs=False)

disk_balance_test.py:194:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
disk_balance_test.py:258: in _disk_balance_after_boundary_change_test
self._assert_balanced_after_boundary_change(node1, total_keys, lcs)
disk_balance_test.py:353: in _assert_balanced_after_boundary_change
node.stress(['read', 'n={}'.format(total_keys), "no-warmup", "cl=ALL", "-pop", "seq=1...{}".format(total_keys), "-rate", "threads=1"])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:1529: in stress
return handle_external_tool_process(p, ['stress'] + stress_options)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:2325: in handle_external_tool_process
out, err = process.communicate()
/usr/lib/python3.8/subprocess.py:1028: in communicate
stdout, stderr = self._communicate(input, endtime, timeout)
/usr/lib/python3.8/subprocess.py:1868: in _communicate
ready = selector.select(timeout)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <selectors.PollSelector object at 0x7f27ac491ee0>, timeout = None

def select(self, timeout=None):
# This is shared between poll() and epoll().
# epoll() has a different signature and handling of timeout parameter.
if timeout is None:
timeout = None
elif timeout <= 0:
timeout = 0
else:
# poll() has a resolution of 1 millisecond, round away from
# zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3)
ready = []
try:
> fd_event_list = self._selector.poll(timeout)
E Failed: Timeout >900.0s

/usr/lib/python3.8/selectors.py:415: Failed
905.435
8_cythonno_x86_64_40_64test_clustering_columnFailureccmlib.node.NodeError: C* process with 9172 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f27ad2c0220>

def test_clustering_column(self):
"""Test that we can use clustering columns as primary key for a materialized view"""
> session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)

materialized_views_test.py:495:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f27ac2a5eb0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 9172 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
66.263
8_cythonno_x86_64_41_64test_insert_during_range_movement_rf1Failureccmlib.node.NodeError: C* process with 9281 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f898be15490>

def test_insert_during_range_movement_rf1(self):
> self._base_test_insert_during_range_movement(rf=1)

materialized_views_test.py:637:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:653: in _base_test_insert_during_range_movement
session = self.prepare(rf=rf)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f898a9fdd90>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 9281 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.624
8_cythonno_x86_64_42_64test_disk_balance_after_joining_ring_stcsFailureFailed: Timeout >900.0s

self = <disk_balance_test.TestDiskBalance object at 0x7f28ae791ca0>

@since('3.10')
def test_disk_balance_after_joining_ring_stcs(self):
"""
@jira_ticket CASSANDRA-13948
"""
> self._disk_balance_after_joining_ring_test(lcs=False)

disk_balance_test.py:271:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
disk_balance_test.py:314: in _disk_balance_after_joining_ring_test
node1.stress(['write', 'n={}'.format(keys_per_flush), "no-warmup", "cl=ALL", "-pop",
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:1529: in stress
return handle_external_tool_process(p, ['stress'] + stress_options)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:2325: in handle_external_tool_process
out, err = process.communicate()
/usr/lib/python3.8/subprocess.py:1028: in communicate
stdout, stderr = self._communicate(input, endtime, timeout)
/usr/lib/python3.8/subprocess.py:1868: in _communicate
ready = selector.select(timeout)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <selectors.PollSelector object at 0x7f28ada33190>, timeout = None

def select(self, timeout=None):
# This is shared between poll() and epoll().
# epoll() has a different signature and handling of timeout parameter.
if timeout is None:
timeout = None
elif timeout <= 0:
timeout = 0
else:
# poll() has a resolution of 1 millisecond, round away from
# zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3)
ready = []
try:
> fd_event_list = self._selector.poll(timeout)
E Failed: Timeout >900.0s

/usr/lib/python3.8/selectors.py:415: Failed
900.042
8_cythonno_x86_64_42_64test_insert_during_range_movement_rf2Failureccmlib.node.NodeError: C* process with 8117 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f28ae7584f0>

def test_insert_during_range_movement_rf2(self):
> self._base_test_insert_during_range_movement(rf=2)

materialized_views_test.py:640:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:653: in _base_test_insert_during_range_movement
session = self.prepare(rf=rf)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f28ada31fa0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 8117 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
57.668
8_cythonno_x86_64_43_64test_insert_during_range_movement_rf3Failureccmlib.node.NodeError: C* process with 8792 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fd29a808bb0>

def test_insert_during_range_movement_rf3(self):
> self._base_test_insert_during_range_movement(rf=3)

materialized_views_test.py:643:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:653: in _base_test_insert_during_range_movement
session = self.prepare(rf=rf)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fd298a442b0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 8792 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
14.378
8_cythonno_x86_64_44_64test_allow_filteringFailureccmlib.node.NodeError: C* process with 9285 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7ff24ed99af0>

def test_allow_filtering(self):
"""Test that allow filtering works as usual for a materialized view"""
> session = self.prepare()

materialized_views_test.py:828:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7ff24ee92940>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 9285 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
17.581
8_cythonno_x86_64_45_64test_secondary_indexFailureccmlib.node.NodeError: C* process with 7806 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fd39a7ecfa0>

def test_secondary_index(self):
"""Test that secondary indexes cannot be created on a materialized view"""
> session = self.prepare()

materialized_views_test.py:862:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fd39a8fa430>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7806 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
13.384
8_cythonno_x86_64_45_64test_anticompaction_after_normal_repairFailureccmlib.node.NodeError: C* process with 16818 is terminated

self = <repair_tests.repair_test.TestRepair object at 0x7fd39a3ee370>

@since('2.2.1', '4')
def test_anticompaction_after_normal_repair(self):
"""
* Launch a four node, two DC cluster
* Start a normal repair
* Assert every node anticompacts
@jira_ticket CASSANDRA-10422
"""
cluster = self.cluster
logger.debug("Starting cluster..")
> cluster.populate([2, 2]).start()

repair_tests/repair_test.py:353:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fd3987868e0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 16818 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
16.881
8_cythonno_x86_64_46_64test_ttlFailureccmlib.node.NodeError: C* process with 7741 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fc95442ed90>

def test_ttl(self):
"""
Test that TTL works as expected for a materialized view
@expected_result The TTL is propagated properly between tables.
"""
> session = self.prepare()

materialized_views_test.py:875:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fc954328310>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7741 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.690
8_cythonno_x86_64_47_64test_query_all_new_columnFailureccmlib.node.NodeError: C* process with 3420 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fe6e03ae970>

def test_query_all_new_column(self):
"""
Test that a materialized view created with a 'SELECT *' works as expected when adding a new column
@expected_result The new column is present in the view.
"""
> session = self.prepare(user_table=True)

materialized_views_test.py:896:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fe6dffa4850>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3420 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.770
8_cythonno_x86_64_48_64test_query_new_columnFailureccmlib.node.NodeError: C* process with 2963 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f97ed5c3d90>

def test_query_new_column(self):
"""
Test that a materialized view created with 'SELECT <col1, ...>' works as expected when adding a new column
@expected_result The new column is not present in the view.
"""
> session = self.prepare(user_table=True)

materialized_views_test.py:922:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f97ed5cb2e0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 2963 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.744
8_cythonno_x86_64_49_64test_rename_columnFailureccmlib.node.NodeError: C* process with 15932 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f2bed089160>

def test_rename_column(self):
"""
Test that a materialized view created with a 'SELECT *' works as expected when renaming a column
@expected_result The column is also renamed in the view.
"""
> session = self.prepare(user_table=True)

materialized_views_test.py:951:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f2bec1c1040>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 15932 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.031
8_cythonno_x86_64_4_64test_simple_repair_by_baseFailureccmlib.node.NodeError: C* process with 5329 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f2662ddbbe0>

def test_simple_repair_by_base(self):
> self._simple_repair_test(repair_base=True)

materialized_views_test.py:1935:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:1945: in _simple_repair_test
session = self.prepare(rf=3, options={'hinted_handoff_enabled': False})
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f2662e39370>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 5329 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
13.569
8_cythonno_x86_64_50_64test_rename_column_atomicityFailureccmlib.node.NodeError: C* process with 7286 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f79e380ffa0>

def test_rename_column_atomicity(self):
"""
Test that column renaming is atomically done between a table and its materialized views
@jira_ticket CASSANDRA-12952
"""
> session = self.prepare(nodes=1, user_table=True, install_byteman=True)

materialized_views_test.py:977:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f79e38aa0a0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7286 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
14.651
8_cythonno_x86_64_51_64test_2dc_parallel_startupFailureccmlib.node.NodeError: C* process with 6120 is terminated

self = <gossip_test.TestGossip object at 0x7f4db565de20>

@since('4.0')
def test_2dc_parallel_startup(self):
"""
@jira_ticket CASSANDRA-16588
Given a 2 DC cluster, start all seeds node in parallel followed by
all non-seed nodes in parallel.
"""
> node1, node2, node3, node4 = self._create_2dc_cluster()

gossip_test.py:135:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
gossip_test.py:109: in _create_2dc_cluster
node1.start(wait_for_binary_proto=True)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:928: in start
self.wait_for_binary_interface(from_mark=self.mark)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:711: in wait_for_binary_interface
self.watch_log_for("Starting listening for CQL clients", **kwargs)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f4daf60ff70>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 6120 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
5.494
8_cythonno_x86_64_51_64test_lwtFailureccmlib.node.NodeError: C* process with 7263 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f4db5637370>

def test_lwt(self):
"""Test that lightweight transaction behave properly with a materialized view"""
> session = self.prepare()

materialized_views_test.py:1015:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f4daf75fac0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7263 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
11.354
8_cythonno_x86_64_52_64test_2dc_parallel_startup_one_seedFailureccmlib.node.NodeError: C* process with 7356 is terminated

self = <gossip_test.TestGossip object at 0x7f1e6e7bbfa0>

@since('4.0')
def test_2dc_parallel_startup_one_seed(self):
"""
@jira_ticket CASSANDRA-16588
Given a 2 DC cluster, start one seed node followed by all non-seed
nodes in parallel.
"""
> node1, node2, node3, node4 = self._create_2dc_cluster()

gossip_test.py:150:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
gossip_test.py:109: in _create_2dc_cluster
node1.start(wait_for_binary_proto=True)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:928: in start
self.wait_for_binary_interface(from_mark=self.mark)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:711: in wait_for_binary_interface
self.watch_log_for("Starting listening for CQL clients", **kwargs)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f1e6c7f3340>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7356 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
11.467
8_cythonno_x86_64_52_64test_interrupt_build_processFailureccmlib.node.NodeError: C* process with 8518 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f1e6e7934f0>

def test_interrupt_build_process(self):
"""Test that an interrupted MV build process is resumed as it should"""

options = {'hinted_handoff_enabled': False}
if self.cluster.version() >= '4':
options['concurrent_materialized_view_builders'] = 4

> session = self.prepare(options=options, install_byteman=True)

materialized_views_test.py:1096:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f1e6c6aaac0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 8518 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
14.917
8_cythonno_x86_64_54_64test_hintedhandoff_disabledFailureccmlib.node.NodeError: C* process with 2578 is terminated

self = <hintedhandoff_test.TestHintedHandoffConfig object at 0x7f87bd2331c0>

def test_hintedhandoff_disabled(self):
"""
Test gloabl hinted handoff disabled
"""
> node1, node2 = self._start_two_node_cluster({'hinted_handoff_enabled': False})

hintedhandoff_test.py:107:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
hintedhandoff_test.py:36: in _start_two_node_cluster
cluster.populate([2]).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f87bc1353d0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 2578 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
9.432
8_cythonno_x86_64_54_64test_drop_with_stopped_buildFailureccmlib.node.NodeError: C* process with 3951 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f87bd205c40>

@since('4.0')
def test_drop_with_stopped_build(self):
"""Test that MV whose build has been stopped with `nodetool stop` can be dropped"""

> session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)

materialized_views_test.py:1210:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f87bc3fb460>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3951 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
11.855
8_cythonno_x86_64_55_64test_hintedhandoff_enabledFailureccmlib.node.NodeError: C* process with 4110 is terminated

self = <hintedhandoff_test.TestHintedHandoffConfig object at 0x7f4a4cf96880>

def test_hintedhandoff_enabled(self):
"""
Test global hinted handoff enabled
"""
> node1, node2 = self._start_two_node_cluster({'hinted_handoff_enabled': True})

hintedhandoff_test.py:119:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
hintedhandoff_test.py:36: in _start_two_node_cluster
cluster.populate([2]).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f4a4c10e7c0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 4110 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.406
8_cythonno_x86_64_55_64test_resume_stopped_buildFailureccmlib.node.NodeError: C* process with 5486 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f4a4cefafa0>

@since('4.0')
def test_resume_stopped_build(self):
"""Test that MV builds stopped with `nodetool stop` are resumed after restart"""

> session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)

materialized_views_test.py:1278:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f4a4c1180d0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 5486 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
13.684
8_cythonno_x86_64_56_64test_hintedhandoff_setmaxwindowFailureccmlib.node.NodeError: C* process with 3210 is terminated

self = <hintedhandoff_test.TestHintedHandoffConfig object at 0x7f9e6d55aa00>

@since('4.0')
def test_hintedhandoff_setmaxwindow(self):
"""
Test global hinted handoff against max_hint_window_in_ms update via nodetool
"""
> node1, node2 = self._start_two_node_cluster({'hinted_handoff_enabled': True, "max_hint_window_in_ms": 300000})

hintedhandoff_test.py:132:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
hintedhandoff_test.py:36: in _start_two_node_cluster
cluster.populate([2]).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f9e6c5f96d0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3210 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
9.095
8_cythonno_x86_64_56_64test_mv_with_default_ttl_with_flushFailureccmlib.node.NodeError: C* process with 4568 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f9e6d4c9430>

@since('3.0')
def test_mv_with_default_ttl_with_flush(self):
> self._test_mv_with_default_ttl(True)

materialized_views_test.py:1333:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:1344: in _test_mv_with_default_ttl
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f9e6c2f81f0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 4568 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.102
8_cythonno_x86_64_56_64test_local_dc_repairFailureccmlib.node.NodeError: C* process with 12444 is terminated

self = <repair_tests.repair_test.TestRepair object at 0x7f9e6d087160>

def test_local_dc_repair(self):
"""
* Set up a multi DC cluster
* Perform a -local repair on one DC
* Assert only nodes in that DC are repaired
"""
> cluster = self._setup_multi_dc()

repair_tests/repair_test.py:624:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
repair_tests/repair_test.py:756: in _setup_multi_dc
cluster.populate([2, 1, 1]).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f9e6c04bbb0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 12444 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
15.774
8_cythonno_x86_64_57_64test_hintedhandoff_dc_disabledFailureccmlib.node.NodeError: C* process with 5512 is terminated

self = <hintedhandoff_test.TestHintedHandoffConfig object at 0x7f3f67d55310>

def test_hintedhandoff_dc_disabled(self):
"""
Test global hinted handoff enabled with the dc disabled
"""
> node1, node2 = self._start_two_node_cluster({'hinted_handoff_enabled': True,
'hinted_handoff_disabled_datacenters': ['dc1']})

hintedhandoff_test.py:153:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
hintedhandoff_test.py:36: in _start_two_node_cluster
cluster.populate([2]).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f3f4c775e50>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 5512 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
61.940
8_cythonno_x86_64_57_64test_mv_with_default_ttl_without_flushFailureccmlib.node.NodeError: C* process with 6948 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f3f67bf0490>

@since('3.0')
def test_mv_with_default_ttl_without_flush(self):
> self._test_mv_with_default_ttl(False)

materialized_views_test.py:1337:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:1344: in _test_mv_with_default_ttl
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f3f6542c9a0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 6948 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
69.847
8_cythonno_x86_64_57_64test_bulk_round_trip_non_prepared_statementsFailurecassandra.OperationTimedOut: errors={'127.0.0.2:9042': 'Client request timeout. See Session.execute[_async](timeout)'}, last_host=127.0.0.2:9042

self = <cqlsh_tests.test_cqlsh_copy.TestCqlshCopy object at 0x7f3f677e8430>

def test_bulk_round_trip_non_prepared_statements(self):
"""
Test bulk import with default stress import (one row per operation) and without
prepared statements.

@jira_ticket CASSANDRA-11053
"""
> self._test_bulk_round_trip(nodes=3, partitioner="murmur3", num_operations=100000,
copy_from_options={'PREPAREDSTATEMENTS': False})

cqlsh_tests/test_cqlsh_copy.py:2475:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
cqlsh_tests/test_cqlsh_copy.py:2436: in _test_bulk_round_trip
num_records = create_records()
cqlsh_tests/test_cqlsh_copy.py:2409: in create_records
ret = rows_to_list(self.session.execute(count_statement))[0][0]
../cassandra/build/venv/src/cassandra-driver/cassandra/cluster.py:2618: in execute
return self.execute_async(query, parameters, trace, custom_payload, timeout, execution_profile, paging_state, host, execute_as).result()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ResponseFuture: query='<SimpleStatement query="SELECT COUNT(*) FROM keyspace1.standard1", consistency=ALL>' request_i...9042': 'Client request timeout. See Session.execute[_async](timeout)'}, last_host=127.0.0.2:9042 coordinator_host=None>

def result(self):
"""
Return the final result or raise an Exception if errors were
encountered. If the final result or error has not been set
yet, this method will block until it is set, or the timeout
set for the request expires.

Timeout is specified in the Session request execution functions.
If the timeout is exceeded, an :exc:`cassandra.OperationTimedOut` will be raised.
This is a client-side timeout. For more information
about server-side coordinator timeouts, see :class:`.policies.RetryPolicy`.

Example usage::

>>> future = session.execute_async("SELECT * FROM mycf")
>>> # do other stuff...

>>> try:
... rows = future.result()
... for row in rows:
... ... # process results
... except Exception:
... log.exception("Operation failed:")

"""
self._event.wait()
if self._final_result is not _NOT_SET:
return ResultSet(self, self._final_result)
else:
> raise self._final_exception
E cassandra.OperationTimedOut: errors={'127.0.0.2:9042': 'Client request timeout. See Session.execute[_async](timeout)'}, last_host=127.0.0.2:9042

../cassandra/build/venv/src/cassandra-driver/cassandra/cluster.py:4894: OperationTimedOut
212.944
8_cythonno_x86_64_57_64test_dc_repairFailureccmlib.node.NodeError: C* process with 16726 is terminated

self = <repair_tests.repair_test.TestRepair object at 0x7f3f677c1550>

def test_dc_repair(self):
"""
* Set up a multi DC cluster
* Perform a -dc repair on two dc's
* Assert only nodes on those dcs were repaired
"""
> cluster = self._setup_multi_dc()

repair_tests/repair_test.py:652:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
repair_tests/repair_test.py:756: in _setup_multi_dc
cluster.populate([2, 1, 1]).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f3f658cbd60>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 16726 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
68.040
8_cythonno_x86_64_58_64test_hintedhandoff_dc_reenabledFailureccmlib.node.NodeError: C* process with 4211 is terminated

self = <hintedhandoff_test.TestHintedHandoffConfig object at 0x7f4b05d61820>

def test_hintedhandoff_dc_reenabled(self):
"""
Test global hinted handoff enabled with the dc disabled first and then re-enabled
"""
> node1, node2 = self._start_two_node_cluster({'hinted_handoff_enabled': True,
'hinted_handoff_disabled_datacenters': ['dc1']})

hintedhandoff_test.py:166:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
hintedhandoff_test.py:36: in _start_two_node_cluster
cluster.populate([2]).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f4af7d54dc0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 4211 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
10.721
8_cythonno_x86_64_58_64test_no_base_column_in_view_pk_complex_timestamp_with_flushFailureccmlib.node.NodeError: C* process with 5578 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f4b05d2c760>

@flaky
@since('3.0')
def test_no_base_column_in_view_pk_complex_timestamp_with_flush(self):
> self._test_no_base_column_in_view_pk_complex_timestamp(flush=True)

materialized_views_test.py:1462:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:1476: in _test_no_base_column_in_view_pk_complex_timestamp
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f4af7d55fa0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 5578 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.629
8_cythonno_x86_64_58_64test_size_estimates_multidcFailureccmlib.node.NodeError: C* process with 13042 is terminated

self = <topology_test.TestTopology object at 0x7f4b05a4bbb0>

@since('3.0.11')
def test_size_estimates_multidc(self):
"""
Test that primary ranges are correctly generated on
system.size_estimates for multi-dc, multi-ks scenario
@jira_ticket CASSANDRA-9639
"""
logger.debug("Creating cluster")
cluster = self.cluster
cluster.set_configuration_options(values={'num_tokens': 2})
cluster.populate([2, 1])
node1_1, node1_2, node2_1 = cluster.nodelist()

logger.debug("Setting tokens")
node1_tokens, node2_tokens, node3_tokens = ['-6639341390736545756,-2688160409776496397',
'-2506475074448728501,8473270337963525440',
'-3736333188524231709,8673615181726552074']
node1_1.set_configuration_options(values={'initial_token': node1_tokens})
node1_2.set_configuration_options(values={'initial_token': node2_tokens})
node2_1.set_configuration_options(values={'initial_token': node3_tokens})
cluster.set_configuration_options(values={'num_tokens': 2})

logger.debug("Starting cluster")
> cluster.start()

topology_test.py:62:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f4b04868f40>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 13042 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
18.558
8_cythonno_x86_64_58_64test_dc_parallel_repairFailureccmlib.node.NodeError: C* process with 18006 is terminated

self = <repair_tests.repair_test.TestRepair object at 0x7f4b058f86d0>

def test_dc_parallel_repair(self):
"""
* Set up a multi DC cluster
* Perform a -dc repair on two dc's, with -dcpar
* Assert only nodes on those dcs were repaired
"""
> cluster = self._setup_multi_dc()

repair_tests/repair_test.py:682:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
repair_tests/repair_test.py:756: in _setup_multi_dc
cluster.populate([2, 1, 1]).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f4af7ff12b0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 18006 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
19.826
8_cythonno_x86_64_59_64test_failed_read_repairFailureFailed: DID NOT RAISE <class 'cassandra.ReadTimeout'>

self = <read_repair_test.TestSpeculativeReadRepair object at 0x7fb6845da910>

@since('4.0')
def test_failed_read_repair(self):
"""
If none of the disagreeing nodes ack the repair mutation, the read should fail
"""
node1, node2, node3 = self.cluster.nodelist()
assert isinstance(node1, Node)
assert isinstance(node2, Node)
assert isinstance(node3, Node)

session = self.get_cql_connection(node1, timeout=2)
session.execute(quorum("INSERT INTO ks.tbl (k, c, v) VALUES (1, 0, 1)"))

node2.byteman_submit([mk_bman_path('read_repair/stop_writes.btm')])
node3.byteman_submit([mk_bman_path('read_repair/stop_writes.btm')])
script_version = '_5_1' if self.cluster.version() >= LooseVersion('5.1') else ''
node2.byteman_submit([mk_bman_path('read_repair/stop_rr_writes{}.btm'.format(script_version))])
node3.byteman_submit([mk_bman_path('read_repair/stop_rr_writes{}.btm'.format(script_version))])

with raises(WriteTimeout):
session.execute(quorum("INSERT INTO ks.tbl (k, c, v) VALUES (1, 1, 2)"))

node2.byteman_submit([mk_bman_path('read_repair/sorted_live_endpoints.btm')])
session = self.get_cql_connection(node2)
with StorageProxy(node2) as storage_proxy:
assert storage_proxy.blocking_read_repair == 0
assert storage_proxy.speculated_rr_read == 0
assert storage_proxy.speculated_rr_write == 0

with raises(ReadTimeout):
> session.execute(quorum("SELECT * FROM ks.tbl WHERE k=1"))
E Failed: DID NOT RAISE <class 'cassandra.ReadTimeout'>

read_repair_test.py:555: Failed
38.479
8_cythonno_x86_64_59_64test_repair_validates_dcFailureccmlib.node.NodeError: C* process with 20508 is terminated

self = <repair_tests.repair_test.TestRepair object at 0x7fb68426b2e0>

@since('3.11')
def test_repair_validates_dc(self):
"""
* Set up a multi DC cluster
* Perform a -dc repair with nonexistent dc and without local dc
* Assert that the repair is not trigger in both cases
"""
> cluster = self._setup_multi_dc()

repair_tests/repair_test.py:720:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
repair_tests/repair_test.py:756: in _setup_multi_dc
cluster.populate([2, 1, 1]).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fb6834dceb0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 20508 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
15.560
8_cythonno_x86_64_5_64test_simple_repair_by_viewFailureccmlib.node.NodeError: C* process with 7034 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f1cd443bca0>

def test_simple_repair_by_view(self):
> self._simple_repair_test(repair_view=True)

materialized_views_test.py:1938:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:1945: in _simple_repair_test
session = self.prepare(rf=3, options={'hinted_handoff_enabled': False})
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f1cd2772dc0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7034 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
11.634
8_cythonno_x86_64_5_64test_simple_rebuildFailureccmlib.node.NodeError: C* process with 8832 is terminated

self = <rebuild_test.TestRebuild object at 0x7f1cd43a7040>

def test_simple_rebuild(self):
"""
@jira_ticket CASSANDRA-9119

Test rebuild from other dc works as expected.
"""

keys = 1000

cluster = self.cluster
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
node1 = cluster.create_node('node1', False,
None,
('127.0.0.1', 7000),
'7100', '2000', None,
binary_interface=('127.0.0.1', 9042))
cluster.add(node1, True, data_center='dc1')

# start node in dc1
> node1.start(wait_for_binary_proto=True)

rebuild_test.py:57:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:928: in start
self.wait_for_binary_interface(from_mark=self.mark)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:711: in wait_for_binary_interface
self.watch_log_for("Starting listening for CQL clients", **kwargs)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f1cd46fa430>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 8832 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
7.477
8_cythonno_x86_64_60_64test_base_column_in_view_pk_complex_timestamp_with_flushFailureccmlib.node.NodeError: C* process with 6193 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f9fdc332b50>

@since('3.0')
def test_base_column_in_view_pk_complex_timestamp_with_flush(self):
> self._test_base_column_in_view_pk_complex_timestamp(flush=True)

materialized_views_test.py:1588:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:1601: in _test_base_column_in_view_pk_complex_timestamp
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f9fdc3a2b20>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 6193 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
13.495
8_cythonno_x86_64_60_64test_non_replicated_ks_repairFailureccmlib.node.NodeError: C* process with 20902 is terminated

self = <repair_tests.repair_test.TestRepair object at 0x7f9fdbf580a0>

@since('4.0')
def test_non_replicated_ks_repair(self):
cluster = self.cluster
> cluster.populate([2, 2]).start(wait_for_binary_proto=True)

repair_tests/repair_test.py:960:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f9fda5d7a30>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 20902 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
17.914
8_cythonno_x86_64_61_64test_base_column_in_view_pk_complex_timestamp_without_flushFailureccmlib.node.NodeError: C* process with 4917 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f27eef5ad00>

@since('3.0')
def test_base_column_in_view_pk_complex_timestamp_without_flush(self):
> self._test_base_column_in_view_pk_complex_timestamp(flush=False)

materialized_views_test.py:1592:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:1601: in _test_base_column_in_view_pk_complex_timestamp
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f27eef55700>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 4917 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.307
8_cythonno_x86_64_62_64test_expired_liveness_with_limit_rf1_nodes1Failureccmlib.node.NodeError: C* process with 3089 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f86242de220>

@since('3.0')
def test_expired_liveness_with_limit_rf1_nodes1(self):
> self._test_expired_liveness_with_limit(rf=1, nodes=1)

materialized_views_test.py:1718:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:1734: in _test_expired_liveness_with_limit
session = self.prepare(rf=rf, nodes=nodes, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f8623f165e0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3089 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
11.772
8_cythonno_x86_64_62_64test_speculative_writeFailureassert 0 == 1 +0 -1

self = <read_repair_test.TestSpeculativeReadRepair object at 0x7f862425c070>

@since('4.0')
def test_speculative_write(self):
""" if one node doesn't respond to a read repair mutation, it should be sent to the remaining node """
node1, node2, node3 = self.cluster.nodelist()
assert isinstance(node1, Node)
assert isinstance(node2, Node)
assert isinstance(node3, Node)
session = self.get_cql_connection(node1, timeout=2)

session.execute(quorum("INSERT INTO ks.tbl (k, c, v) VALUES (1, 0, 1)"))

node2.byteman_submit([mk_bman_path('read_repair/stop_writes.btm')])
node3.byteman_submit([mk_bman_path('read_repair/stop_writes.btm')])

session.execute("INSERT INTO ks.tbl (k, c, v) VALUES (1, 1, 2)")

# re-enable writes on node 3, leave them off on node2
script_version = '_5_1' if self.cluster.version() >= LooseVersion('5.1') else ''
node2.byteman_submit([mk_bman_path('read_repair/stop_rr_writes{}.btm'.format(script_version))])

node1.byteman_submit([mk_bman_path('read_repair/sorted_live_endpoints.btm')])
with StorageProxy(node1) as storage_proxy:
assert storage_proxy.blocking_read_repair == 0
assert storage_proxy.speculated_rr_read == 0
assert storage_proxy.speculated_rr_write == 0

session = self.get_cql_connection(node1)
expected = [kcv(1, 0, 1), kcv(1, 1, 2)]
results = session.execute(quorum("SELECT * FROM ks.tbl WHERE k=1"))
assert listify(results) == expected

assert storage_proxy.blocking_read_repair == 1
assert storage_proxy.speculated_rr_read == 0
> assert storage_proxy.speculated_rr_write == 1
E assert 0 == 1
E +0
E -1

read_repair_test.py:680: AssertionError
51.161
8_cythonno_x86_64_62_64test_oversized_mutationFailureccmlib.node.NodeError: C* process with 13360 is terminated

self = <write_failures_test.TestMultiDCWriteFailures object at 0x7f8623fda460>

def test_oversized_mutation(self):
"""
Test that multi-DC write failures return operation failed rather than a timeout.
@jira_ticket CASSANDRA-16334.
"""

cluster = self.cluster
cluster.populate([2, 2])
cluster.set_configuration_options(values={'max_mutation_size_in_kb': 128})
> cluster.start()

write_failures_test.py:261:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f8623536d60>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 13360 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
19.954
8_cythonno_x86_64_63_64test_expired_liveness_with_limit_rf1_nodes3Failureccmlib.node.NodeError: C* process with 6169 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fe966c7e850>

@since('3.0')
def test_expired_liveness_with_limit_rf1_nodes3(self):
> self._test_expired_liveness_with_limit(rf=1, nodes=3)

materialized_views_test.py:1722:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:1734: in _test_expired_liveness_with_limit
session = self.prepare(rf=rf, nodes=nodes, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fe9668a2e20>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 6169 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
13.259
8_cythonno_x86_64_63_64test_quorum_requirementFailureassert 0 == 1 +0 -1

self = <read_repair_test.TestSpeculativeReadRepair object at 0x7fe966c0a130>

@since('4.0')
def test_quorum_requirement(self):
"""
Even if we speculate on every stage, we should still only require a quorum of responses for success
"""
node1, node2, node3 = self.cluster.nodelist()
assert isinstance(node1, Node)
assert isinstance(node2, Node)
assert isinstance(node3, Node)
session = self.get_cql_connection(node1, timeout=2)

session.execute(quorum("INSERT INTO ks.tbl (k, c, v) VALUES (1, 0, 1)"))

node2.byteman_submit([mk_bman_path('read_repair/stop_writes.btm')])
node3.byteman_submit([mk_bman_path('read_repair/stop_writes.btm')])

session.execute("INSERT INTO ks.tbl (k, c, v) VALUES (1, 1, 2)")

# re-enable writes
node2.byteman_submit(['-u', mk_bman_path('read_repair/stop_writes.btm')])
node3.byteman_submit(['-u', mk_bman_path('read_repair/stop_writes.btm')])

# force endpoint order
node1.byteman_submit([mk_bman_path('read_repair/sorted_live_endpoints.btm')])

node2.byteman_submit([mk_bman_path('read_repair/stop_data_reads.btm')])
script_version = '_5_1' if self.cluster.version() >= LooseVersion('5.1') else ''
node3.byteman_submit([mk_bman_path('read_repair/stop_rr_writes{}.btm'.format(script_version))])

with StorageProxy(node1) as storage_proxy:
assert storage_proxy.get_table_metric("ks", "tbl", "SpeculativeRetries") == 0
assert storage_proxy.blocking_read_repair == 0
assert storage_proxy.speculated_rr_read == 0
assert storage_proxy.speculated_rr_write == 0

session = self.get_cql_connection(node1)
expected = [kcv(1, 0, 1), kcv(1, 1, 2)]
results = session.execute(quorum("SELECT * FROM ks.tbl WHERE k=1"))
assert listify(results) == expected

assert storage_proxy.get_table_metric("ks", "tbl", "SpeculativeRetries") == 0
assert storage_proxy.blocking_read_repair == 1
> assert storage_proxy.speculated_rr_read == 1
E assert 0 == 1
E +0
E -1

read_repair_test.py:724: AssertionError
43.402
8_cythonno_x86_64_63_64test_zerocopy_streamingFailureccmlib.node.NodeError: C* process with 12519 is terminated

self = <streaming_test.TestStreaming object at 0x7fe966be71f0>

@since('4.0')
def test_zerocopy_streaming(self):
> self._test_streaming(op_zerocopy=operator.gt, op_partial=operator.eq, num_zerocopy=1, num_partial=0,
num_nodes=2, rf=2)

streaming_test.py:100:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
streaming_test.py:63: in _test_streaming
cluster.start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fe9643d20d0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 12519 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
10.240
8_cythonno_x86_64_64_64test_expired_liveness_with_limit_rf3Failureccmlib.node.NodeError: C* process with 8762 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fd16b2b5220>

@since('3.0')
def test_expired_liveness_with_limit_rf3(self):
> self._test_expired_liveness_with_limit(rf=3, nodes=3)

materialized_views_test.py:1726:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:1734: in _test_expired_liveness_with_limit
session = self.prepare(rf=rf, nodes=nodes, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fd168071070>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 8762 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
13.046
8_cythonno_x86_64_64_64test_quorum_requirement_on_speculated_readFailureassert 0 == 1 +0 -1

self = <read_repair_test.TestSpeculativeReadRepair object at 0x7fd16b23b790>

@since('4.0')
def test_quorum_requirement_on_speculated_read(self):
"""
Even if we speculate on every stage, we should still only require a quorum of responses for success
"""
node1, node2, node3 = self.cluster.nodelist()
assert isinstance(node1, Node)
assert isinstance(node2, Node)
assert isinstance(node3, Node)
session = self.get_cql_connection(node1, timeout=2)

session.execute(quorum("INSERT INTO ks.tbl (k, c, v) VALUES (1, 0, 1)"))

node2.byteman_submit([mk_bman_path('read_repair/stop_writes.btm')])
node3.byteman_submit([mk_bman_path('read_repair/stop_writes.btm')])

session.execute("INSERT INTO ks.tbl (k, c, v) VALUES (1, 1, 2)")

# re-enable writes
node2.byteman_submit(['-u', mk_bman_path('read_repair/stop_writes.btm')])
node3.byteman_submit(['-u', mk_bman_path('read_repair/stop_writes.btm')])

# force endpoint order
node1.byteman_submit([mk_bman_path('read_repair/sorted_live_endpoints.btm')])

node2.byteman_submit([mk_bman_path('read_repair/stop_digest_reads.btm')])
node3.byteman_submit([mk_bman_path('read_repair/stop_data_reads.btm')])
script_version = '_5_1' if self.cluster.version() >= LooseVersion('5.1') else ''
node2.byteman_submit([mk_bman_path('read_repair/stop_rr_writes{}.btm'.format(script_version))])

with StorageProxy(node1) as storage_proxy:
assert storage_proxy.get_table_metric("ks", "tbl", "SpeculativeRetries") == 0
assert storage_proxy.blocking_read_repair == 0
assert storage_proxy.speculated_rr_read == 0
assert storage_proxy.speculated_rr_write == 0

session = self.get_cql_connection(node1)
expected = [kcv(1, 0, 1), kcv(1, 1, 2)]
results = session.execute(quorum("SELECT * FROM ks.tbl WHERE k=1"))
assert listify(results) == expected

> assert storage_proxy.get_table_metric("ks", "tbl", "SpeculativeRetries") == 1
E assert 0 == 1
E +0
E -1

read_repair_test.py:768: AssertionError
39.166
8_cythonno_x86_64_64_64test_zerocopy_streaming_no_replicationFailureccmlib.node.NodeError: C* process with 13314 is terminated

self = <streaming_test.TestStreaming object at 0x7fd16b210af0>

@since('4.0')
def test_zerocopy_streaming_no_replication(self):
> self._test_streaming(op_zerocopy=operator.eq, op_partial=operator.eq, num_zerocopy=0, num_partial=0, rf=1,
num_nodes=3)

streaming_test.py:105:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
streaming_test.py:63: in _test_streaming
cluster.start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fd1505df6a0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 13314 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
10.505
8_cythonno_x86_64_6_64test_base_replica_repairFailureccmlib.node.NodeError: C* process with 7784 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f046823bc10>

def test_base_replica_repair(self):
> self._base_replica_repair_test()

materialized_views_test.py:2000:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:2014: in _base_replica_repair_test
self.prepare(rf=3)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f0468583520>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7784 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
15.309
8_cythonno_x86_64_6_64test_resumable_rebuildFailureccmlib.node.NodeError: C* process with 9558 is terminated

self = <rebuild_test.TestRebuild object at 0x7f046821ffa0>

@since('2.2')
def test_resumable_rebuild(self):
"""
@jira_ticket CASSANDRA-10810

Test rebuild operation is resumable
"""
self.fixture_dtest_setup.ignore_log_patterns = list(self.fixture_dtest_setup.ignore_log_patterns) + [
r'Error while rebuilding node',
r'Streaming error occurred on session with peer 127.0.0.3',
r'Remote peer 127.0.0.3 failed stream session',
r'Streaming error occurred on session with peer 127.0.0.3:7000',
r'Remote peer /?127.0.0.3:7000 failed stream session',
r'Stream receive task .* already finished',
r'stream operation from /?127.0.0.1:.* failed'
]

cluster = self.cluster
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})

# Create 2 nodes on dc1
node1 = cluster.create_node('node1', False,
('127.0.0.1', 9160),
('127.0.0.1', 7000),
'7100', '2000', None,
binary_interface=('127.0.0.1', 9042))
node2 = cluster.create_node('node2', False,
('127.0.0.2', 9160),
('127.0.0.2', 7000),
'7200', '2001', None,
binary_interface=('127.0.0.2', 9042))

cluster.add(node1, True, data_center='dc1')
cluster.add(node2, True, data_center='dc1')

> node1.start(wait_for_binary_proto=True, jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])

rebuild_test.py:184:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:928: in start
self.wait_for_binary_interface(from_mark=self.mark)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:711: in wait_for_binary_interface
self.watch_log_for("Starting listening for CQL clients", **kwargs)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f046822fc40>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 9558 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
10.973
8_cythonno_x86_64_7_64test_base_replica_repair_with_contentionFailureccmlib.node.NodeError: C* process with 5591 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7ff4542e3e50>

def test_base_replica_repair_with_contention(self):
"""
Test repair does not fail when there is MV lock contention
@jira_ticket CASSANDRA-12905
"""
> self._base_replica_repair_test(fail_mv_lock=True)

materialized_views_test.py:2007:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:2014: in _base_replica_repair_test
self.prepare(rf=3)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7ff4541d6b20>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 5591 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
9.965
8_cythonno_x86_64_7_64test_rebuild_rangesFailureccmlib.node.NodeError: C* process with 7383 is terminated

self = <rebuild_test.TestRebuild object at 0x7ff45424c250>

@since('3.6')
def test_rebuild_ranges(self):
"""
@jira_ticket CASSANDRA-10406
"""
keys = 1000

cluster = self.cluster
tokens = cluster.balanced_tokens_across_dcs(['dc1', 'dc2'])
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
cluster.set_configuration_options(values={'num_tokens': 1})
node1 = cluster.create_node('node1', False,
('127.0.0.1', 9160),
('127.0.0.1', 7000),
'7100', '2000', tokens[0],
binary_interface=('127.0.0.1', 9042))
node1.set_configuration_options(values={'initial_token': tokens[0]})
cluster.add(node1, True, data_center='dc1')
node1 = cluster.nodelist()[0]

# start node in dc1
> node1.start(wait_for_binary_proto=True)

rebuild_test.py:270:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:928: in start
self.wait_for_binary_interface(from_mark=self.mark)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:711: in wait_for_binary_interface
self.watch_log_for("Starting listening for CQL clients", **kwargs)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7ff454170700>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7383 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
8.682
8_cythonno_x86_64_8_64test_rf_gt_nodes_multidc_should_succeedFailureccmlib.node.NodeError: C* process with 1886 is terminated

self = <bootstrap_test.TestBootstrap object at 0x7f3716ebbf10>

def test_rf_gt_nodes_multidc_should_succeed(self):
"""
Validating a KS with RF > N on multi DC doesn't break bootstrap
@jira_ticket CASSANDRA-16296 CASSANDRA-16411
"""
cluster = self.cluster
cluster.set_environment_variable('CASSANDRA_TOKEN_PREGENERATION_DISABLED', 'True')
cluster.populate([1, 1])
> cluster.start()

bootstrap_test.py:320:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f3716069e20>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 1886 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
10.133
8_cythonno_x86_64_9_64test_base_view_consistency_on_failure_after_mv_applyFailureccmlib.node.NodeError: C* process with 9048 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f91e0d16580>

def test_base_view_consistency_on_failure_after_mv_apply(self):
> self._test_base_view_consistency_on_crash("after")

materialized_views_test.py:2553:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:2569: in _test_base_view_consistency_on_crash
self.prepare(rf=1, install_byteman=True)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f91e0c57580>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 9048 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
10.165
8_cythonno_x86_64_10_64test_base_view_consistency_on_failure_before_mv_applyFailureccmlib.node.NodeError: C* process with 8615 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f12c3a2abb0>

def test_base_view_consistency_on_failure_before_mv_apply(self):
> self._test_base_view_consistency_on_crash("before")

materialized_views_test.py:2556:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:2569: in _test_base_view_consistency_on_crash
self.prepare(rf=1, install_byteman=True)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f12c3827430>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 8615 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
14.159
8_cythonno_x86_64_12_64test_local_quorum_bootstrapFailureccmlib.node.NodeError: C* process with 1208 is terminated

self = <bootstrap_test.TestBootstrap object at 0x7f5897e55d90>

def test_local_quorum_bootstrap(self):
"""
Test that CL local_quorum works while a node is bootstrapping.
@jira_ticket CASSANDRA-8058
"""
cluster = self.cluster
cluster.set_environment_variable('CASSANDRA_TOKEN_PREGENERATION_DISABLED', 'True')
cluster.populate([1, 1])
> cluster.start()

bootstrap_test.py:560:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f5895fafd30>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 1208 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.051
8_cythonno_x86_64_15_64test_multi_dc_replace_with_rf1Failureccmlib.node.NodeError: C* process with 6534 is terminated

self = <replace_address_test.TestReplaceAddress object at 0x7fea65939700>

def test_multi_dc_replace_with_rf1(self):
"""
Test that multi-dc replace works when rf=1 on each dc
"""
> self._setup(n=[1, 1])

replace_address_test.py:646:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
replace_address_test.py:76: in _setup
self.cluster.start(jvm_args=jvm_args)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fea657b3d90>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 6534 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.230
8_cythonno_x86_64_16_64test_putget_2dc_rf1Failureccmlib.node.NodeError: C* process with 4425 is terminated

self = <multidc_putget_test.TestMultiDCPutGet object at 0x7faea8daabe0>

def test_putget_2dc_rf1(self):
""" Simple put-get test for 2 DC with one node each (RF=1) [catches #3539] """
cluster = self.cluster
> cluster.populate([1, 1]).start()

multidc_putget_test.py:14:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7faea8c98b50>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 4425 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
15.013
8_cythonno_x86_64_16_64test_cannot_restart_with_different_rackFailureccmlib.node.NodeError: C* process with 7822 is terminated

self = <replication_test.TestSnitchConfigurationUpdate object at 0x7faea8d4c6a0>

def test_cannot_restart_with_different_rack(self):
"""
@jira_ticket CASSANDRA-10242

Test that we cannot restart with a different rack if '-Dcassandra.ignore_rack=true' is not specified.
"""
cluster = self.cluster
cluster.populate(1)
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.{}'
.format('GossipingPropertyFileSnitch')})

node1 = cluster.nodelist()[0]

with open(os.path.join(node1.get_conf_dir(), 'cassandra-rackdc.properties'), 'w') as topo_file:
for line in ["dc={}".format(node1.data_center), "rack=rack1"]:
topo_file.write(line + os.linesep)

logger.debug("Starting node {} with rack1".format(node1.address()))
> node1.start(wait_for_binary_proto=True)

replication_test.py:614:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:928: in start
self.wait_for_binary_interface(from_mark=self.mark)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:711: in wait_for_binary_interface
self.watch_log_for("Starting listening for CQL clients", **kwargs)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7faea8b5f640>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7822 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
11.049
8_cythonno_x86_64_17_64test_putget_2dc_rf2Failureccmlib.node.NodeError: C* process with 3854 is terminated

self = <multidc_putget_test.TestMultiDCPutGet object at 0x7ff570063070>

def test_putget_2dc_rf2(self):
""" Simple put-get test for 2 DC with 2 node each (RF=2) -- tests cross-DC efficient writes """
cluster = self.cluster
> cluster.populate([2, 2]).start()

multidc_putget_test.py:25:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7ff56d0feee0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3854 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
16.737
8_cythonno_x86_64_1_64test_base_column_in_view_pk_commutative_tombstone_with_flushFailureccmlib.node.NodeError: C* process with 7710 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f8f353e1070>

@since('3.0')
def test_base_column_in_view_pk_commutative_tombstone_with_flush(self):
> self._test_base_column_in_view_pk_commutative_tombstone_(flush=True)

materialized_views_test.py:1774:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:1785: in _test_base_column_in_view_pk_commutative_tombstone_
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f8f35279850>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7710 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.810
8_cythonno_x86_64_20_64test_switch_data_center_startup_failsFailureccmlib.node.NodeError: C* process with 8972 is terminated

self = <replication_test.TestSnitchConfigurationUpdate object at 0x7f728b2c4910>

def test_switch_data_center_startup_fails(self):
"""
@jira_ticket CASSANDRA-9474

Confirm that switching data centers fails to bring up the node.
"""
expected_error = (r"Cannot start node if snitch's data center (.*) differs from previous data center (.*)\. "
"Please fix the snitch configuration, decommission and rebootstrap this node "
"or use the flag -Dcassandra.ignore_dc=true.")
self.fixture_dtest_setup.ignore_log_patterns = [expected_error]

cluster = self.cluster
cluster.populate(1)
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.GossipingPropertyFileSnitch'})

node = cluster.nodelist()[0]
with open(os.path.join(node.get_conf_dir(), 'cassandra-rackdc.properties'), 'w') as topo_file:
topo_file.write("dc=dc9" + os.linesep)
topo_file.write("rack=rack1" + os.linesep)

> cluster.start()

replication_test.py:768:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f728852b820>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 8972 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
6.755
8_cythonno_x86_64_22_64test_decommission_after_drain_is_invalidFailureccmlib.node.NodeError: C* process with 3785 is terminated

self = <nodetool_test.TestNodetool object at 0x7f251d03c8e0>

def test_decommission_after_drain_is_invalid(self):
"""
@jira_ticket CASSANDRA-8741

Running a decommission after a drain should generate
an unsupported operation message and exit with an error
code (which we receive as a ToolError exception).
"""
cluster = self.cluster
> cluster.populate([3]).start()

nodetool_test.py:31:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f251ce3f970>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3785 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
9.390
8_cythonno_x86_64_23_64test_correct_dc_rack_in_nodetool_infoFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <nodetool_test.TestNodetool object at 0x7fd700d00eb0>

def test_correct_dc_rack_in_nodetool_info(self):
"""
@jira_ticket CASSANDRA-10382

Test that nodetool info returns the correct rack and dc
"""

cluster = self.cluster
cluster.populate([2, 2])
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.GossipingPropertyFileSnitch'})

for i, node in enumerate(cluster.nodelist()):
with open(os.path.join(node.get_conf_dir(), 'cassandra-rackdc.properties'), 'w') as snitch_file:
for line in ["dc={}".format(node.data_center), "rack=rack{}".format(i % 2)]:
snitch_file.write(line + os.linesep)

> cluster.start()

nodetool_test.py:59:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7fd6f8cdcd90>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7fd6f9fe3d00>, itf = ('127.0.0.4', 9042)
started = [(<ccmlib.node.Node object at 0x7fd6f9fe3d00>, <subprocess.Popen object at 0x7fd6f9f80c40>, 0), (<ccmlib.node.Node obj... at 0x7fd6f9f3f670>, 0), (<ccmlib.node.Node object at 0x7fd6f9e7ebb0>, <subprocess.Popen object at 0x7fd6f9de2d30>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7fd6f9f80c40>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
20.692
8_cythonno_x86_64_24_64test_nodetool_timeout_commandsFailureccmlib.node.NodeError: C* process with 3769 is terminated

self = <nodetool_test.TestNodetool object at 0x7f1164e54a30>

@since('3.4')
def test_nodetool_timeout_commands(self):
"""
@jira_ticket CASSANDRA-10953

Test that nodetool gettimeout and settimeout work at a basic level
"""
cluster = self.cluster
> cluster.populate([1]).start()

nodetool_test.py:85:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f115fdb5070>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3769 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
8.073
8_cythonno_x86_64_24_64test_prefer_local_reconnect_on_listen_addressFailureccmlib.node.NodeError: C* process with 7688 is terminated

self = <snitch_test.TestGossipingPropertyFileSnitch object at 0x7f1164d772e0>

def test_prefer_local_reconnect_on_listen_address(self):
"""
@jira_ticket CASSANDRA-9748
@jira_ticket CASSANDRA-8084

Test that it's possible to connect over the broadcast_address when
listen_on_broadcast_address=true and that GossipingPropertyFileSnitch
reconnect via listen_address when prefer_local=true
"""

NODE1_LISTEN_ADDRESS = '127.0.0.1'
NODE1_BROADCAST_ADDRESS = '127.0.0.3'

NODE2_LISTEN_ADDRESS = '127.0.0.2'
NODE2_BROADCAST_ADDRESS = '127.0.0.4'

STORAGE_PORT = 7000

cluster = self.cluster
cluster.populate(2)
node1, node2 = cluster.nodelist()

running40 = node1.get_base_cassandra_version() >= 4.0

cluster.seeds = [NODE1_BROADCAST_ADDRESS]
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.GossipingPropertyFileSnitch',
'listen_on_broadcast_address': 'true'})
node1.set_configuration_options(values={'broadcast_address': NODE1_BROADCAST_ADDRESS})
node2.auto_bootstrap = True
node2.set_configuration_options(values={'broadcast_address': NODE2_BROADCAST_ADDRESS})

for node in cluster.nodelist():
with open(os.path.join(node.get_conf_dir(), 'cassandra-rackdc.properties'), 'w') as snitch_file:
snitch_file.write("dc=dc1" + os.linesep)
snitch_file.write("rack=rack1" + os.linesep)
snitch_file.write("prefer_local=true" + os.linesep)

> node1.start(wait_for_binary_proto=True)

snitch_test.py:66:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:928: in start
self.wait_for_binary_interface(from_mark=self.mark)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:711: in wait_for_binary_interface
self.watch_log_for("Starting listening for CQL clients", **kwargs)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f1164cbe550>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7688 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
11.868
8_cythonno_x86_64_25_64test_cleanup_when_no_replica_with_indexFailureccmlib.node.NodeError: C* process with 7327 is terminated

self = <nodetool_test.TestNodetool object at 0x7f97a2d40730>

@since('3.0')
def test_cleanup_when_no_replica_with_index(self):
> self._cleanup_when_no_replica(True)

nodetool_test.py:114:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
nodetool_test.py:125: in _cleanup_when_no_replica
self.cluster.populate([1, 1]).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f97a1204f70>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7327 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
19.112
8_cythonno_x86_64_26_64test_cleanup_when_no_replica_without_indexFailureccmlib.node.NodeError: C* process with 5308 is terminated

self = <nodetool_test.TestNodetool object at 0x7f454eae9c40>

@since('3.0')
def test_cleanup_when_no_replica_without_index(self):
> self._cleanup_when_no_replica(False)

nodetool_test.py:118:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
nodetool_test.py:125: in _cleanup_when_no_replica
self.cluster.populate([1, 1]).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f454c6622e0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 5308 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
13.268
8_cythonno_x86_64_27_64test_view_metadata_cleanupFailureccmlib.node.NodeError: C* process with 5549 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fc7ab870550>

def test_view_metadata_cleanup(self):
"""
drop keyspace or view should clear built_views and view_build_status
"""
> session = self.prepare(rf=2, nodes=2)

materialized_views_test.py:192:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fc7aad3eca0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 5549 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
10.546
8_cythonno_x86_64_27_64test_meaningless_notice_in_statusFailureccmlib.node.NodeError: C* process with 6176 is terminated

self = <nodetool_test.TestNodetool object at 0x7fc7ab833a90>

def test_meaningless_notice_in_status(self):
"""
@jira_ticket CASSANDRA-10176

nodetool status don't return ownership when there is more than one user keyspace
define (since they likely have different replication infos making ownership
meaningless in general) and shows a helpful notice as to why it does that.
This test checks that said notice is only printed is there is indeed more than
one user keyspace.
"""
cluster = self.cluster
> cluster.populate([3]).start()

nodetool_test.py:194:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fc7ab8b5c40>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 6176 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
10.706
8_cythonno_x86_64_28_64test_createFailureccmlib.node.NodeError: C* process with 5355 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f73fb7718e0>

def test_create(self):
"""Test the materialized view creation"""
> session = self.prepare(user_table=True)

materialized_views_test.py:255:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f73fb7507f0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 5355 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
11.520
8_cythonno_x86_64_29_64test_gcgs_validationFailureccmlib.node.NodeError: C* process with 4858 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f3fe9034bb0>

def test_gcgs_validation(self):
"""Verify that it's not possible to create or set a too low gc_grace_seconds on MVs"""
> session = self.prepare(user_table=True)

materialized_views_test.py:263:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f3fe90d41f0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 4858 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
10.097
8_cythonno_x86_64_2_64test_base_column_in_view_pk_commutative_tombstone_without_flushFailureccmlib.node.NodeError: C* process with 9173 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f30d650e340>

@since('3.0')
def test_base_column_in_view_pk_commutative_tombstone_without_flush(self):
> self._test_base_column_in_view_pk_commutative_tombstone_(flush=False)

materialized_views_test.py:1778:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:1785: in _test_base_column_in_view_pk_commutative_tombstone_
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f30d47000a0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 9173 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
33.427
8_cythonno_x86_64_30_64test_insertFailureccmlib.node.NodeError: C* process with 5343 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7ff820ad8e20>

def test_insert(self):
"""Test basic insertions"""
> session = self.prepare(user_table=True)

materialized_views_test.py:301:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7ff81d40b280>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 5343 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
14.908
8_cythonno_x86_64_31_64test_change_durable_writesFailureAssertionError: Commitlog was written with durable writes disabled assert 163840 == 90112 +163840 -90112

self = <configuration_test.TestConfiguration object at 0x7f8dad74e550>

@pytest.mark.timeout(60*30)
def test_change_durable_writes(self):
"""
@jira_ticket CASSANDRA-9560

Test that changes to the DURABLE_WRITES option on keyspaces is
respected in subsequent writes.

This test starts by writing a dataset to a cluster and asserting that
the commitlogs have been written to. The subsequent test depends on
the assumption that this dataset triggers an fsync.

After checking this assumption, the test destroys the cluster and
creates a fresh one. Then it tests that DURABLE_WRITES is respected by:

- creating a keyspace with DURABLE_WRITES set to false,
- using ALTER KEYSPACE to set its DURABLE_WRITES option to true,
- writing a dataset to this keyspace that is known to trigger a commitlog fsync,
- asserting that the commitlog has grown in size since the data was written.
"""
cluster = self.cluster
cluster.set_batch_commitlog(enabled=True, use_batch_window = cluster.version() < '5.0')
cluster.set_configuration_options(values={'commitlog_segment_size_in_mb': 1})

cluster.populate(1).start()
durable_node = cluster.nodelist()[0]

durable_init_size = commitlog_size(durable_node)
durable_session = self.patient_exclusive_cql_connection(durable_node)

# test assumption that write_to_trigger_fsync actually triggers a commitlog fsync
durable_session.execute("CREATE KEYSPACE ks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1} "
"AND DURABLE_WRITES = true")
durable_session.execute('CREATE TABLE ks.tab (key int PRIMARY KEY, a int, b int, c int)')
logger.debug('commitlog size diff = ' + str(commitlog_size(durable_node) - durable_init_size))
write_to_trigger_fsync(durable_session, 'ks', 'tab')
logger.debug('commitlog size diff = ' + str(commitlog_size(durable_node) - durable_init_size))

assert commitlog_size(durable_node) > durable_init_size, \
"This test will not work in this environment; write_to_trigger_fsync does not trigger fsync."

durable_session.shutdown()
cluster.stop()
cluster.clear()

cluster.set_batch_commitlog(enabled=True, use_batch_window = cluster.version() < '5.0')
cluster.set_configuration_options(values={'commitlog_segment_size_in_mb': 1})
cluster.start()
node = cluster.nodelist()[0]
session = self.patient_exclusive_cql_connection(node)

# set up a keyspace without durable writes, then alter it to use them
session.execute("CREATE KEYSPACE ks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1} "
"AND DURABLE_WRITES = false")
session.execute('CREATE TABLE ks.tab (key int PRIMARY KEY, a int, b int, c int)')
init_size = commitlog_size(node)
write_to_trigger_fsync(session, 'ks', 'tab')
> assert commitlog_size(node) == init_size, "Commitlog was written with durable writes disabled"
E AssertionError: Commitlog was written with durable writes disabled
E assert 163840 == 90112
E +163840
E -90112

configuration_test.py:104: AssertionError
90.662
8_cythonno_x86_64_31_64test_delete_insert_searchFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <delete_insert_test.TestDeleteInsert object at 0x7f8dad701220>

def test_delete_insert_search(self):
cluster = self.cluster
> cluster.populate([2, 2]).start()

delete_insert_test.py:42:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7f8dac0b7b50>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f8dac0986a0>, itf = ('127.0.0.4', 9042)
started = [(<ccmlib.node.Node object at 0x7f8dac0986a0>, <subprocess.Popen object at 0x7f8dac0a94c0>, 0), (<ccmlib.node.Node obj... at 0x7f8dac1c2c10>, 0), (<ccmlib.node.Node object at 0x7f8dad3338b0>, <subprocess.Popen object at 0x7f8dac1c21f0>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f8dac0a94c0>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
21.457
8_cythonno_x86_64_31_64test_populate_mv_after_insertFailureccmlib.node.NodeError: C* process with 7135 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f8dad6c47f0>

def test_populate_mv_after_insert(self):
"""Test that a view is OK when created with existing data"""
> session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)

materialized_views_test.py:319:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f8d7a76f970>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7135 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
11.644
8_cythonno_x86_64_32_64test_describecluster_more_information_three_datacentersFailureccmlib.node.NodeError: C* process with 7198 is terminated

self = <nodetool_test.TestNodetool object at 0x7f251dbaa490>

@since('4.0')
def test_describecluster_more_information_three_datacenters(self):
"""
nodetool describecluster should be more informative. It should include detailes
for total node count, list of datacenters, RF, number of nodes per dc, how many
are down and version(s).
@jira_ticket CASSANDRA-13853
@expected_result This test invokes nodetool describecluster and matches the output with the expected one
"""
cluster = self.cluster
> cluster.populate([1, 2, 1]).start()

/home/cassandra/cassandra-dtest/nodetool_test.py:364:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/home/cassandra/cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
/home/cassandra/cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f251c568280>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7198 is terminated

/home/cassandra/cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
16.649
8_cythonno_x86_64_33_64test_sjkFailureccmlib.node.NodeError: C* process with 5358 is terminated

self = <nodetool_test.TestNodetool object at 0x7fc376390820>

@since('4.0')
def test_sjk(self):
"""
Verify that SJK generally works.
"""

cluster = self.cluster
> cluster.populate([1]).start()

nodetool_test.py:457:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fc375302280>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 5358 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
8.943
8_cythonno_x86_64_34_64test_crc_check_chanceFailureccmlib.node.NodeError: C* process with 8378 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fac68e16d30>

def test_crc_check_chance(self):
"""Test that crc_check_chance parameter is properly populated after mv creation and update"""
> session = self.prepare()

materialized_views_test.py:370:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fac63ec5280>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 8378 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.477
8_cythonno_x86_64_35_64test_simple_strategy_countersFailureccmlib.node.NodeError: C* process with 2738 is terminated

self = <consistency_test.TestAccuracy object at 0x7fb17a23f100>

def test_simple_strategy_counters(self):
"""
Test for a single datacenter, counters table.
"""
self.nodes = 3
self.rf = 3

combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.QUORUM),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.ONE, ConsistencyLevel.THREE),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.TWO),
(ConsistencyLevel.TWO, ConsistencyLevel.ONE),
# These are multi-DC consistency levels that should default to quorum calls
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
]

logger.debug("Testing single dc, counters")
> self._run_test_function_in_parallel(TestAccuracy.Validation.validate_counters, [self.nodes], [self.rf], combinations)

consistency_test.py:698:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:529: in _run_test_function_in_parallel
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
consistency_test.py:136: in _start_cluster
cluster.start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fb1783cd040>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 2738 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
13.455
8_cythonno_x86_64_35_64test_prepared_statementFailureccmlib.node.NodeError: C* process with 7635 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fb17a225790>

def test_prepared_statement(self):
"""Test basic insertions with prepared statement"""
> session = self.prepare(user_table=True)

materialized_views_test.py:384:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fb17a1a6c40>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7635 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
19.116
8_cythonno_x86_64_36_64test_immutableFailureccmlib.node.NodeError: C* process with 7970 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f12c8964bb0>

def test_immutable(self):
"""Test that a materialized view is immutable"""
> session = self.prepare(user_table=True)

materialized_views_test.py:413:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f12c7a5c4c0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7970 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
8.673
8_cythonno_x86_64_37_64test_drop_mvFailureccmlib.node.NodeError: C* process with 8633 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fec6cbc67c0>

def test_drop_mv(self):
"""Test that we can drop a view properly"""
> session = self.prepare(user_table=True)

materialized_views_test.py:437:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fec6aeb3910>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 8633 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
11.590
8_cythonno_x86_64_38_64test_drop_columnFailureccmlib.node.NodeError: C* process with 5134 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f22783e1a30>

def test_drop_column(self):
"""Test that we cannot drop a column if it is used by a MV"""
> session = self.prepare(user_table=True)

materialized_views_test.py:456:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f2277536580>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 5134 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.861
8_cythonno_x86_64_39_64test_drop_tableFailureccmlib.node.NodeError: C* process with 9379 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f53451a3fd0>

def test_drop_table(self):
"""Test that we cannot drop a table without deleting its MVs first"""
> session = self.prepare(user_table=True)

materialized_views_test.py:470:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f53452b7160>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 9379 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
11.153
8_cythonno_x86_64_3_64test_view_tombstoneFailureccmlib.node.NodeError: C* process with 8857 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7ff3e38aa730>

def test_view_tombstone(self):
"""
Test that a materialized views properly tombstone

@jira_ticket CASSANDRA-10261
@jira_ticket CASSANDRA-10910
"""

> self.prepare(rf=3, options={'hinted_handoff_enabled': False})

materialized_views_test.py:1838:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7ff3e366d100>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 8857 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
11.691
8_cythonno_x86_64_40_64test_clustering_columnFailureccmlib.node.NodeError: C* process with 10821 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f19be460520>

def test_clustering_column(self):
"""Test that we can use clustering columns as primary key for a materialized view"""
> session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)

materialized_views_test.py:495:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f19bd7b8c10>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 10821 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
13.744
8_cythonno_x86_64_41_64test_insert_during_range_movement_rf1Failureccmlib.node.NodeError: C* process with 10066 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f3cf7c793d0>

def test_insert_during_range_movement_rf1(self):
> self._base_test_insert_during_range_movement(rf=1)

materialized_views_test.py:637:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:653: in _base_test_insert_during_range_movement
session = self.prepare(rf=rf)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f3cf689bfa0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 10066 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
14.938
8_cythonno_x86_64_42_64test_insert_during_range_movement_rf2Failureccmlib.node.NodeError: C* process with 9879 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fa06a1f41f0>

def test_insert_during_range_movement_rf2(self):
> self._base_test_insert_during_range_movement(rf=2)

materialized_views_test.py:640:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:653: in _base_test_insert_during_range_movement
session = self.prepare(rf=rf)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fa0681b5340>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 9879 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
10.138
8_cythonno_x86_64_43_64test_insert_during_range_movement_rf3Failureccmlib.node.NodeError: C* process with 9468 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fb9d2750c70>

def test_insert_during_range_movement_rf3(self):
> self._base_test_insert_during_range_movement(rf=3)

materialized_views_test.py:643:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:653: in _base_test_insert_during_range_movement
session = self.prepare(rf=rf)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fb9d08d5220>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 9468 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
10.444
8_cythonno_x86_64_44_64test_allow_filteringFailureccmlib.node.NodeError: C* process with 9743 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7ff1c2fcda90>

def test_allow_filtering(self):
"""Test that allow filtering works as usual for a materialized view"""
> session = self.prepare()

materialized_views_test.py:828:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7ff1c30848e0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 9743 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
13.262
8_cythonno_x86_64_45_64test_secondary_indexFailureccmlib.node.NodeError: C* process with 6441 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fd0ccee4d30>

def test_secondary_index(self):
"""Test that secondary indexes cannot be created on a materialized view"""
> session = self.prepare()

materialized_views_test.py:862:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fd0ccfe5be0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 6441 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
13.195
8_cythonno_x86_64_45_64test_anticompaction_after_normal_repairFailureccmlib.node.NodeError: C* process with 17331 is terminated

self = <repair_tests.repair_test.TestRepair object at 0x7fd0ccada520>

@since('2.2.1', '4')
def test_anticompaction_after_normal_repair(self):
"""
* Launch a four node, two DC cluster
* Start a normal repair
* Assert every node anticompacts
@jira_ticket CASSANDRA-10422
"""
cluster = self.cluster
logger.debug("Starting cluster..")
> cluster.populate([2, 2]).start()

repair_tests/repair_test.py:353:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fd0c667bac0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 17331 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
16.418
8_cythonno_x86_64_46_64test_ttlFailureccmlib.node.NodeError: C* process with 9753 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7febaa861e50>

def test_ttl(self):
"""
Test that TTL works as expected for a materialized view
@expected_result The TTL is propagated properly between tables.
"""
> session = self.prepare()

materialized_views_test.py:875:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7febaa8cff10>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 9753 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
10.508
8_cythonno_x86_64_47_64test_query_all_new_columnFailureccmlib.node.NodeError: C* process with 3665 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fa3e24245b0>

def test_query_all_new_column(self):
"""
Test that a materialized view created with a 'SELECT *' works as expected when adding a new column
@expected_result The new column is present in the view.
"""
> session = self.prepare(user_table=True)

materialized_views_test.py:896:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fa3e023ea60>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3665 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
9.992
8_cythonno_x86_64_48_64test_query_new_columnFailureccmlib.node.NodeError: C* process with 3183 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fa1d8049d30>

def test_query_new_column(self):
"""
Test that a materialized view created with 'SELECT <col1, ...>' works as expected when adding a new column
@expected_result The new column is not present in the view.
"""
> session = self.prepare(user_table=True)

materialized_views_test.py:922:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fa1d7f83c40>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3183 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
8.780
8_cythonno_x86_64_49_64test_rename_columnFailureccmlib.node.NodeError: C* process with 16921 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f954b234040>

def test_rename_column(self):
"""
Test that a materialized view created with a 'SELECT *' works as expected when renaming a column
@expected_result The column is also renamed in the view.
"""
> session = self.prepare(user_table=True)

materialized_views_test.py:951:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f954a28ba90>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 16921 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
13.076
8_cythonno_x86_64_4_64test_simple_repair_by_baseFailureccmlib.node.NodeError: C* process with 5737 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f4e4710eaf0>

def test_simple_repair_by_base(self):
> self._simple_repair_test(repair_base=True)

materialized_views_test.py:1935:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:1945: in _simple_repair_test
session = self.prepare(rf=3, options={'hinted_handoff_enabled': False})
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f4e471174c0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 5737 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
13.243
8_cythonno_x86_64_50_64test_rename_column_atomicityFailureccmlib.node.NodeError: C* process with 7767 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fdb75e47c70>

def test_rename_column_atomicity(self):
"""
Test that column renaming is atomically done between a table and its materialized views
@jira_ticket CASSANDRA-12952
"""
> session = self.prepare(nodes=1, user_table=True, install_byteman=True)

materialized_views_test.py:977:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fdb75f17760>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7767 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
14.927
8_cythonno_x86_64_51_64test_2dc_parallel_startupFailureccmlib.node.NodeError: C* process with 6419 is terminated

self = <gossip_test.TestGossip object at 0x7f66b9694cd0>

@since('4.0')
def test_2dc_parallel_startup(self):
"""
@jira_ticket CASSANDRA-16588
Given a 2 DC cluster, start all seeds node in parallel followed by
all non-seed nodes in parallel.
"""
> node1, node2, node3, node4 = self._create_2dc_cluster()

gossip_test.py:135:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
gossip_test.py:109: in _create_2dc_cluster
node1.start(wait_for_binary_proto=True)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:928: in start
self.wait_for_binary_interface(from_mark=self.mark)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:711: in wait_for_binary_interface
self.watch_log_for("Starting listening for CQL clients", **kwargs)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f66b3719d60>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 6419 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
4.962
8_cythonno_x86_64_51_64test_lwtFailureccmlib.node.NodeError: C* process with 7681 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f66b966d220>

def test_lwt(self):
"""Test that lightweight transaction behave properly with a materialized view"""
> session = self.prepare()

materialized_views_test.py:1015:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f66b97673a0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7681 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
8.759
8_cythonno_x86_64_52_64test_2dc_parallel_startup_one_seedFailureccmlib.node.NodeError: C* process with 7667 is terminated

self = <gossip_test.TestGossip object at 0x7f99d2a7b1f0>

@since('4.0')
def test_2dc_parallel_startup_one_seed(self):
"""
@jira_ticket CASSANDRA-16588
Given a 2 DC cluster, start one seed node followed by all non-seed
nodes in parallel.
"""
> node1, node2, node3, node4 = self._create_2dc_cluster()

gossip_test.py:150:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
gossip_test.py:109: in _create_2dc_cluster
node1.start(wait_for_binary_proto=True)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:928: in start
self.wait_for_binary_interface(from_mark=self.mark)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:711: in wait_for_binary_interface
self.watch_log_for("Starting listening for CQL clients", **kwargs)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f99d0a5d100>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7667 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
6.267
8_cythonno_x86_64_52_64test_interrupt_build_processFailureccmlib.node.NodeError: C* process with 8930 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f99d2a4f700>

def test_interrupt_build_process(self):
"""Test that an interrupted MV build process is resumed as it should"""

options = {'hinted_handoff_enabled': False}
if self.cluster.version() >= '4':
options['concurrent_materialized_view_builders'] = 4

> session = self.prepare(options=options, install_byteman=True)

materialized_views_test.py:1096:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f99d00e5ee0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 8930 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
11.363
8_cythonno_x86_64_54_64test_hintedhandoff_disabledFailureccmlib.node.NodeError: C* process with 2973 is terminated

self = <hintedhandoff_test.TestHintedHandoffConfig object at 0x7fc5e0c84880>

def test_hintedhandoff_disabled(self):
"""
Test gloabl hinted handoff disabled
"""
> node1, node2 = self._start_two_node_cluster({'hinted_handoff_enabled': False})

hintedhandoff_test.py:107:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
hintedhandoff_test.py:36: in _start_two_node_cluster
cluster.populate([2]).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fc5dd283280>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 2973 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
10.865
8_cythonno_x86_64_54_64test_drop_with_stopped_buildFailureccmlib.node.NodeError: C* process with 4602 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fc5e0b61610>

@since('4.0')
def test_drop_with_stopped_build(self):
"""Test that MV whose build has been stopped with `nodetool stop` can be dropped"""

> session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)

materialized_views_test.py:1210:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fc5ded635e0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 4602 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
13.617
8_cythonno_x86_64_55_64test_hintedhandoff_enabledFailureccmlib.node.NodeError: C* process with 4333 is terminated

self = <hintedhandoff_test.TestHintedHandoffConfig object at 0x7f0534f17af0>

def test_hintedhandoff_enabled(self):
"""
Test global hinted handoff enabled
"""
> node1, node2 = self._start_two_node_cluster({'hinted_handoff_enabled': True})

hintedhandoff_test.py:119:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
hintedhandoff_test.py:36: in _start_two_node_cluster
cluster.populate([2]).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f05340636a0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 4333 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
8.985
8_cythonno_x86_64_55_64test_resume_stopped_buildFailureccmlib.node.NodeError: C* process with 5833 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f0534e7af40>

@since('4.0')
def test_resume_stopped_build(self):
"""Test that MV builds stopped with `nodetool stop` are resumed after restart"""

> session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)

materialized_views_test.py:1278:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f052ff7cbb0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 5833 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
10.599
8_cythonno_x86_64_56_64test_hintedhandoff_setmaxwindowFailureccmlib.node.NodeError: C* process with 3454 is terminated

self = <hintedhandoff_test.TestHintedHandoffConfig object at 0x7fd45ae30ee0>

@since('4.0')
def test_hintedhandoff_setmaxwindow(self):
"""
Test global hinted handoff against max_hint_window_in_ms update via nodetool
"""
> node1, node2 = self._start_two_node_cluster({'hinted_handoff_enabled': True, "max_hint_window_in_ms": 300000})

hintedhandoff_test.py:132:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
hintedhandoff_test.py:36: in _start_two_node_cluster
cluster.populate([2]).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fd458edc7f0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3454 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.105
8_cythonno_x86_64_56_64test_mv_with_default_ttl_with_flushFailureccmlib.node.NodeError: C* process with 4949 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fd45ad9d190>

@since('3.0')
def test_mv_with_default_ttl_with_flush(self):
> self._test_mv_with_default_ttl(True)

materialized_views_test.py:1333:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:1344: in _test_mv_with_default_ttl
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fd458d3faf0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 4949 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.051
8_cythonno_x86_64_56_64test_local_dc_repairFailureccmlib.node.NodeError: C* process with 13542 is terminated

self = <repair_tests.repair_test.TestRepair object at 0x7fd45a958e80>

def test_local_dc_repair(self):
"""
* Set up a multi DC cluster
* Perform a -local repair on one DC
* Assert only nodes in that DC are repaired
"""
> cluster = self._setup_multi_dc()

repair_tests/repair_test.py:624:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
repair_tests/repair_test.py:756: in _setup_multi_dc
cluster.populate([2, 1, 1]).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fd459cf4310>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 13542 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
26.693
8_cythonno_x86_64_57_64test_hintedhandoff_dc_disabledFailureccmlib.node.NodeError: C* process with 5545 is terminated

self = <hintedhandoff_test.TestHintedHandoffConfig object at 0x7fa747bfe790>

def test_hintedhandoff_dc_disabled(self):
"""
Test global hinted handoff enabled with the dc disabled
"""
> node1, node2 = self._start_two_node_cluster({'hinted_handoff_enabled': True,
'hinted_handoff_disabled_datacenters': ['dc1']})

hintedhandoff_test.py:153:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
hintedhandoff_test.py:36: in _start_two_node_cluster
cluster.populate([2]).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fa72c7b4490>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 5545 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
10.143
8_cythonno_x86_64_57_64test_mv_with_default_ttl_without_flushFailureccmlib.node.NodeError: C* process with 7042 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fa747a9b460>

@since('3.0')
def test_mv_with_default_ttl_without_flush(self):
> self._test_mv_with_default_ttl(False)

materialized_views_test.py:1337:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:1344: in _test_mv_with_default_ttl
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fa7459de940>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7042 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
11.871
8_cythonno_x86_64_57_64test_dc_repairFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <repair_tests.repair_test.TestRepair object at 0x7fa74766a580>

def test_dc_repair(self):
"""
* Set up a multi DC cluster
* Perform a -dc repair on two dc's
* Assert only nodes on those dcs were repaired
"""
> cluster = self._setup_multi_dc()

repair_tests/repair_test.py:652:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
repair_tests/repair_test.py:756: in _setup_multi_dc
cluster.populate([2, 1, 1]).start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7fa7476921f0>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7fa7440c46a0>, itf = ('127.0.0.4', 9042)
started = [(<ccmlib.node.Node object at 0x7fa7440c46a0>, <subprocess.Popen object at 0x7fa745cf1a30>, 0), (<ccmlib.node.Node obj... at 0x7fa7477063d0>, 0), (<ccmlib.node.Node object at 0x7fa745ba3850>, <subprocess.Popen object at 0x7fa745c8b3d0>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7fa745cf1a30>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
25.601
8_cythonno_x86_64_58_64test_hintedhandoff_dc_reenabledFailureccmlib.node.NodeError: C* process with 4481 is terminated

self = <hintedhandoff_test.TestHintedHandoffConfig object at 0x7fbb6ef048e0>

def test_hintedhandoff_dc_reenabled(self):
"""
Test global hinted handoff enabled with the dc disabled first and then re-enabled
"""
> node1, node2 = self._start_two_node_cluster({'hinted_handoff_enabled': True,
'hinted_handoff_disabled_datacenters': ['dc1']})

hintedhandoff_test.py:166:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
hintedhandoff_test.py:36: in _start_two_node_cluster
cluster.populate([2]).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fbb6d0124c0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 4481 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
11.415
8_cythonno_x86_64_58_64test_no_base_column_in_view_pk_complex_timestamp_with_flushFailureccmlib.node.NodeError: C* process with 5967 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fbb6eece820>

@flaky
@since('3.0')
def test_no_base_column_in_view_pk_complex_timestamp_with_flush(self):
> self._test_no_base_column_in_view_pk_complex_timestamp(flush=True)

materialized_views_test.py:1462:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:1476: in _test_no_base_column_in_view_pk_complex_timestamp
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fbb6d03e250>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 5967 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
11.340
8_cythonno_x86_64_58_64test_size_estimates_multidcFailureccmlib.node.NodeError: C* process with 14111 is terminated

self = <topology_test.TestTopology object at 0x7fbb6ebebc70>

@since('3.0.11')
def test_size_estimates_multidc(self):
"""
Test that primary ranges are correctly generated on
system.size_estimates for multi-dc, multi-ks scenario
@jira_ticket CASSANDRA-9639
"""
logger.debug("Creating cluster")
cluster = self.cluster
cluster.set_configuration_options(values={'num_tokens': 2})
cluster.populate([2, 1])
node1_1, node1_2, node2_1 = cluster.nodelist()

logger.debug("Setting tokens")
node1_tokens, node2_tokens, node3_tokens = ['-6639341390736545756,-2688160409776496397',
'-2506475074448728501,8473270337963525440',
'-3736333188524231709,8673615181726552074']
node1_1.set_configuration_options(values={'initial_token': node1_tokens})
node1_2.set_configuration_options(values={'initial_token': node2_tokens})
node2_1.set_configuration_options(values={'initial_token': node3_tokens})
cluster.set_configuration_options(values={'num_tokens': 2})

logger.debug("Starting cluster")
> cluster.start()

topology_test.py:62:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fbb6dd2f820>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 14111 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
18.487
8_cythonno_x86_64_58_64test_dc_parallel_repairFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <repair_tests.repair_test.TestRepair object at 0x7fbb6ea9b7c0>

def test_dc_parallel_repair(self):
"""
* Set up a multi DC cluster
* Perform a -dc repair on two dc's, with -dcpar
* Assert only nodes on those dcs were repaired
"""
> cluster = self._setup_multi_dc()

repair_tests/repair_test.py:682:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
repair_tests/repair_test.py:756: in _setup_multi_dc
cluster.populate([2, 1, 1]).start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7fbb6eea8850>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7fbb6ee44760>, itf = ('127.0.0.4', 9042)
started = [(<ccmlib.node.Node object at 0x7fbb6ee44760>, <subprocess.Popen object at 0x7fbb6ee0aa00>, 0), (<ccmlib.node.Node obj... at 0x7fbb6ea8ea90>, 0), (<ccmlib.node.Node object at 0x7fbb6efd7d60>, <subprocess.Popen object at 0x7fbb6d030700>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7fbb6ee0aa00>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
24.692
8_cythonno_x86_64_59_64test_failed_read_repairFailureFailed: DID NOT RAISE <class 'cassandra.ReadTimeout'>

self = <read_repair_test.TestSpeculativeReadRepair object at 0x7f2bab911940>

@since('4.0')
def test_failed_read_repair(self):
"""
If none of the disagreeing nodes ack the repair mutation, the read should fail
"""
node1, node2, node3 = self.cluster.nodelist()
assert isinstance(node1, Node)
assert isinstance(node2, Node)
assert isinstance(node3, Node)

session = self.get_cql_connection(node1, timeout=2)
session.execute(quorum("INSERT INTO ks.tbl (k, c, v) VALUES (1, 0, 1)"))

node2.byteman_submit([mk_bman_path('read_repair/stop_writes.btm')])
node3.byteman_submit([mk_bman_path('read_repair/stop_writes.btm')])
script_version = '_5_1' if self.cluster.version() >= LooseVersion('5.1') else ''
node2.byteman_submit([mk_bman_path('read_repair/stop_rr_writes{}.btm'.format(script_version))])
node3.byteman_submit([mk_bman_path('read_repair/stop_rr_writes{}.btm'.format(script_version))])

with raises(WriteTimeout):
session.execute(quorum("INSERT INTO ks.tbl (k, c, v) VALUES (1, 1, 2)"))

node2.byteman_submit([mk_bman_path('read_repair/sorted_live_endpoints.btm')])
session = self.get_cql_connection(node2)
with StorageProxy(node2) as storage_proxy:
assert storage_proxy.blocking_read_repair == 0
assert storage_proxy.speculated_rr_read == 0
assert storage_proxy.speculated_rr_write == 0

with raises(ReadTimeout):
> session.execute(quorum("SELECT * FROM ks.tbl WHERE k=1"))
E Failed: DID NOT RAISE <class 'cassandra.ReadTimeout'>

read_repair_test.py:555: Failed
38.485
8_cythonno_x86_64_59_64test_repair_validates_dcFailureccmlib.node.NodeError: C* process with 22079 is terminated

self = <repair_tests.repair_test.TestRepair object at 0x7f2bab5a3340>

@since('3.11')
def test_repair_validates_dc(self):
"""
* Set up a multi DC cluster
* Perform a -dc repair with nonexistent dc and without local dc
* Assert that the repair is not trigger in both cases
"""
> cluster = self._setup_multi_dc()

repair_tests/repair_test.py:720:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
repair_tests/repair_test.py:756: in _setup_multi_dc
cluster.populate([2, 1, 1]).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f2baba46910>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 22079 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
17.198
8_cythonno_x86_64_5_64test_simple_repair_by_viewFailureccmlib.node.NodeError: C* process with 7652 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7fc963e5ceb0>

def test_simple_repair_by_view(self):
> self._simple_repair_test(repair_view=True)

materialized_views_test.py:1938:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:1945: in _simple_repair_test
session = self.prepare(rf=3, options={'hinted_handoff_enabled': False})
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fc9622986a0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7652 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.924
8_cythonno_x86_64_5_64test_simple_rebuildFailureccmlib.node.NodeError: C* process with 9608 is terminated

self = <rebuild_test.TestRebuild object at 0x7fc963dc5250>

def test_simple_rebuild(self):
"""
@jira_ticket CASSANDRA-9119

Test rebuild from other dc works as expected.
"""

keys = 1000

cluster = self.cluster
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
node1 = cluster.create_node('node1', False,
None,
('127.0.0.1', 7000),
'7100', '2000', None,
binary_interface=('127.0.0.1', 9042))
cluster.add(node1, True, data_center='dc1')

# start node in dc1
> node1.start(wait_for_binary_proto=True)

rebuild_test.py:57:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:928: in start
self.wait_for_binary_interface(from_mark=self.mark)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:711: in wait_for_binary_interface
self.watch_log_for("Starting listening for CQL clients", **kwargs)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fc961ffc1c0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 9608 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
8.244
8_cythonno_x86_64_60_64test_base_column_in_view_pk_complex_timestamp_with_flushFailureccmlib.node.NodeError: C* process with 5962 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f039ebd7970>

@since('3.0')
def test_base_column_in_view_pk_complex_timestamp_with_flush(self):
> self._test_base_column_in_view_pk_complex_timestamp(flush=True)

materialized_views_test.py:1588:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:1601: in _test_base_column_in_view_pk_complex_timestamp
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f039eb73ac0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 5962 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
10.970
8_cythonno_x86_64_60_64test_non_replicated_ks_repairFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <repair_tests.repair_test.TestRepair object at 0x7f039e7f2f40>

@since('4.0')
def test_non_replicated_ks_repair(self):
cluster = self.cluster
> cluster.populate([2, 2]).start(wait_for_binary_proto=True)

repair_tests/repair_test.py:960:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7f039dd37c70>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f039deffe20>, itf = ('127.0.0.4', 9042)
started = [(<ccmlib.node.Node object at 0x7f039deffe20>, <subprocess.Popen object at 0x7f039ebadeb0>, 0), (<ccmlib.node.Node obj... at 0x7f039e9162e0>, 0), (<ccmlib.node.Node object at 0x7f039ecb6fa0>, <subprocess.Popen object at 0x7f039c30b2b0>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f039ebadeb0>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
21.366
8_cythonno_x86_64_61_64test_base_column_in_view_pk_complex_timestamp_without_flushFailureccmlib.node.NodeError: C* process with 5275 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f668eb24d30>

@since('3.0')
def test_base_column_in_view_pk_complex_timestamp_without_flush(self):
> self._test_base_column_in_view_pk_complex_timestamp(flush=False)

materialized_views_test.py:1592:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:1601: in _test_base_column_in_view_pk_complex_timestamp
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f668eb0ac40>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 5275 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
9.593
8_cythonno_x86_64_62_64test_expired_liveness_with_limit_rf1_nodes1Failureccmlib.node.NodeError: C* process with 3328 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f8b898f2490>

@since('3.0')
def test_expired_liveness_with_limit_rf1_nodes1(self):
> self._test_expired_liveness_with_limit(rf=1, nodes=1)

materialized_views_test.py:1718:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:1734: in _test_expired_liveness_with_limit
session = self.prepare(rf=rf, nodes=nodes, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f8b895346a0>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 3328 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
8.535
8_cythonno_x86_64_62_64test_oversized_mutationFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <write_failures_test.TestMultiDCWriteFailures object at 0x7f8b895ed6d0>

def test_oversized_mutation(self):
"""
Test that multi-DC write failures return operation failed rather than a timeout.
@jira_ticket CASSANDRA-16334.
"""

cluster = self.cluster
cluster.populate([2, 2])
cluster.set_configuration_options(values={'max_mutation_size_in_kb': 128})
> cluster.start()

write_failures_test.py:261:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7f8b89585eb0>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7f8b827fc640>, itf = ('127.0.0.4', 9042)
started = [(<ccmlib.node.Node object at 0x7f8b827fc640>, <subprocess.Popen object at 0x7f8b8234a340>, 0), (<ccmlib.node.Node obj... at 0x7f8b82f72e20>, 0), (<ccmlib.node.Node object at 0x7f8b898d9100>, <subprocess.Popen object at 0x7f8b8951f310>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7f8b8234a340>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
27.725
8_cythonno_x86_64_63_64test_expired_liveness_with_limit_rf1_nodes3Failureccmlib.node.NodeError: C* process with 6959 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f1fe2c03fa0>

@since('3.0')
def test_expired_liveness_with_limit_rf1_nodes3(self):
> self._test_expired_liveness_with_limit(rf=1, nodes=3)

materialized_views_test.py:1722:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:1734: in _test_expired_liveness_with_limit
session = self.prepare(rf=rf, nodes=nodes, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f1fe282ad00>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 6959 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
83.878
8_cythonno_x86_64_63_64test_zerocopy_streamingFailureccmlib.node.NodeError: C* process with 14282 is terminated

self = <streaming_test.TestStreaming object at 0x7f1fe2b6d1f0>

@since('4.0')
def test_zerocopy_streaming(self):
> self._test_streaming(op_zerocopy=operator.gt, op_partial=operator.eq, num_zerocopy=1, num_partial=0,
num_nodes=2, rf=2)

streaming_test.py:100:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
streaming_test.py:63: in _test_streaming
cluster.start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f1fe02c8280>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 14282 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
39.356
8_cythonno_x86_64_63_64test_decommissioned_node_cant_rejoinFailureccmlib.node.TimeoutError: timed out and did not find log entry: This node was decommissioned and will not rejoin the ring

self = <topology_test.TestTopology object at 0x7f1fe28f8220>

@since('3.0')
def test_decommissioned_node_cant_rejoin(self):
"""
@jira_ticket CASSANDRA-8801

Test that a decommissioned node can't rejoin the cluster by:

- creating a cluster,
- decommissioning a node, and
- asserting that the "decommissioned node won't rejoin" error is in the
logs for that node and
- asserting that the node is not running.
"""
rejoin_err = 'This node was decommissioned and will not rejoin the ring'
self.fixture_dtest_setup.ignore_log_patterns = list(self.fixture_dtest_setup.ignore_log_patterns) + [
rejoin_err]

self.cluster.populate(3).start()
node1, node2, node3 = self.cluster.nodelist()

logger.debug('decommissioning...')
node3.decommission(force=self.cluster.version() >= '4.0')
logger.debug('stopping...')
node3.stop()
logger.debug('attempting restart...')
node3.start(wait_other_notice=False)
timedout = False
try:
# usually takes 3 seconds, so give it a generous 15
node3.watch_log_for(rejoin_err, timeout=15)
except TimeoutError:
# TimeoutError is not very helpful to the reader of the test output;
# let that pass and move on to string assertion below
timedout = True

n3errors = node3.grep_log_for_errors()
if len(n3errors) == 0 and timedout:
> raise TimeoutError("timed out and did not find log entry: " + rejoin_err)
E ccmlib.node.TimeoutError: timed out and did not find log entry: This node was decommissioned and will not rejoin the ring

topology_test.py:425: TimeoutError
152.449
8_cythonno_x86_64_63_64test_bulk_round_trip_with_backoffFailurecassandra.OperationTimedOut: errors={'127.0.0.1:9042': 'Client request timeout. See Session.execute[_async](timeout)'}, last_host=127.0.0.1:9042

self = <cqlsh_tests.test_cqlsh_copy.TestCqlshCopy object at 0x7f1fe2853940>

@since('3.0.5')
def test_bulk_round_trip_with_backoff(self):
"""
Test bulk import with default stress import (one row per operation) and COPY options
that exercise the new back-off policy introduced by CASSANDRA-11320.

@jira_ticket CASSANDRA-11320
"""
> self._test_bulk_round_trip(nodes=3, partitioner="murmur3", num_operations=250000,
copy_from_options={'MAXINFLIGHTMESSAGES': 64, 'MAXPENDINGCHUNKS': 1})

cqlsh_tests/test_cqlsh_copy.py:2553:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
cqlsh_tests/test_cqlsh_copy.py:2436: in _test_bulk_round_trip
num_records = create_records()
cqlsh_tests/test_cqlsh_copy.py:2409: in create_records
ret = rows_to_list(self.session.execute(count_statement))[0][0]
../cassandra/build/venv/src/cassandra-driver/cassandra/cluster.py:2618: in execute
return self.execute_async(query, parameters, trace, custom_payload, timeout, execution_profile, paging_state, host, execute_as).result()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ResponseFuture: query='<SimpleStatement query="SELECT COUNT(*) FROM keyspace1.standard1", consistency=ALL>' request_i...9042': 'Client request timeout. See Session.execute[_async](timeout)'}, last_host=127.0.0.1:9042 coordinator_host=None>

def result(self):
"""
Return the final result or raise an Exception if errors were
encountered. If the final result or error has not been set
yet, this method will block until it is set, or the timeout
set for the request expires.

Timeout is specified in the Session request execution functions.
If the timeout is exceeded, an :exc:`cassandra.OperationTimedOut` will be raised.
This is a client-side timeout. For more information
about server-side coordinator timeouts, see :class:`.policies.RetryPolicy`.

Example usage::

>>> future = session.execute_async("SELECT * FROM mycf")
>>> # do other stuff...

>>> try:
... rows = future.result()
... for row in rows:
... ... # process results
... except Exception:
... log.exception("Operation failed:")

"""
self._event.wait()
if self._final_result is not _NOT_SET:
return ResultSet(self, self._final_result)
else:
> raise self._final_exception
E cassandra.OperationTimedOut: errors={'127.0.0.1:9042': 'Client request timeout. See Session.execute[_async](timeout)'}, last_host=127.0.0.1:9042

../cassandra/build/venv/src/cassandra-driver/cassandra/cluster.py:4894: OperationTimedOut
417.409
8_cythonno_x86_64_64_64test_expired_liveness_with_limit_rf3Failureccmlib.node.NodeError: C* process with 9591 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7ff8b6a600a0>

@since('3.0')
def test_expired_liveness_with_limit_rf3(self):
> self._test_expired_liveness_with_limit(rf=3, nodes=3)

materialized_views_test.py:1726:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:1734: in _test_expired_liveness_with_limit
session = self.prepare(rf=rf, nodes=nodes, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7ff8a4677040>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 9591 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
10.333
8_cythonno_x86_64_64_64test_quorum_requirement_on_speculated_readFailureassert 0 == 1 +0 -1

self = <read_repair_test.TestSpeculativeReadRepair object at 0x7ff8b69e7610>

@since('4.0')
def test_quorum_requirement_on_speculated_read(self):
"""
Even if we speculate on every stage, we should still only require a quorum of responses for success
"""
node1, node2, node3 = self.cluster.nodelist()
assert isinstance(node1, Node)
assert isinstance(node2, Node)
assert isinstance(node3, Node)
session = self.get_cql_connection(node1, timeout=2)

session.execute(quorum("INSERT INTO ks.tbl (k, c, v) VALUES (1, 0, 1)"))

node2.byteman_submit([mk_bman_path('read_repair/stop_writes.btm')])
node3.byteman_submit([mk_bman_path('read_repair/stop_writes.btm')])

session.execute("INSERT INTO ks.tbl (k, c, v) VALUES (1, 1, 2)")

# re-enable writes
node2.byteman_submit(['-u', mk_bman_path('read_repair/stop_writes.btm')])
node3.byteman_submit(['-u', mk_bman_path('read_repair/stop_writes.btm')])

# force endpoint order
node1.byteman_submit([mk_bman_path('read_repair/sorted_live_endpoints.btm')])

node2.byteman_submit([mk_bman_path('read_repair/stop_digest_reads.btm')])
node3.byteman_submit([mk_bman_path('read_repair/stop_data_reads.btm')])
script_version = '_5_1' if self.cluster.version() >= LooseVersion('5.1') else ''
node2.byteman_submit([mk_bman_path('read_repair/stop_rr_writes{}.btm'.format(script_version))])

with StorageProxy(node1) as storage_proxy:
assert storage_proxy.get_table_metric("ks", "tbl", "SpeculativeRetries") == 0
assert storage_proxy.blocking_read_repair == 0
assert storage_proxy.speculated_rr_read == 0
assert storage_proxy.speculated_rr_write == 0

session = self.get_cql_connection(node1)
expected = [kcv(1, 0, 1), kcv(1, 1, 2)]
results = session.execute(quorum("SELECT * FROM ks.tbl WHERE k=1"))
assert listify(results) == expected

> assert storage_proxy.get_table_metric("ks", "tbl", "SpeculativeRetries") == 1
E assert 0 == 1
E +0
E -1

read_repair_test.py:768: AssertionError
37.342
8_cythonno_x86_64_64_64test_zerocopy_streaming_no_replicationFailureccmlib.node.NodeError: C* process with 14476 is terminated

self = <streaming_test.TestStreaming object at 0x7ff8b69bb970>

@since('4.0')
def test_zerocopy_streaming_no_replication(self):
> self._test_streaming(op_zerocopy=operator.eq, op_partial=operator.eq, num_zerocopy=0, num_partial=0, rf=1,
num_nodes=3)

streaming_test.py:105:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
streaming_test.py:63: in _test_streaming
cluster.start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7ff8b4cf9190>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 14476 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
10.178
8_cythonno_x86_64_6_64test_base_replica_repairFailureccmlib.node.NodeError: C* process with 8571 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7ff1e7fabee0>

def test_base_replica_repair(self):
> self._base_replica_repair_test()

materialized_views_test.py:2000:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:2014: in _base_replica_repair_test
self.prepare(rf=3)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7ff1e7531220>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 8571 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
32.536
8_cythonno_x86_64_6_64test_resumable_rebuildFailureccmlib.node.NodeError: C* process with 10596 is terminated

self = <rebuild_test.TestRebuild object at 0x7ff1e7f172b0>

@since('2.2')
def test_resumable_rebuild(self):
"""
@jira_ticket CASSANDRA-10810

Test rebuild operation is resumable
"""
self.fixture_dtest_setup.ignore_log_patterns = list(self.fixture_dtest_setup.ignore_log_patterns) + [
r'Error while rebuilding node',
r'Streaming error occurred on session with peer 127.0.0.3',
r'Remote peer 127.0.0.3 failed stream session',
r'Streaming error occurred on session with peer 127.0.0.3:7000',
r'Remote peer /?127.0.0.3:7000 failed stream session',
r'Stream receive task .* already finished',
r'stream operation from /?127.0.0.1:.* failed'
]

cluster = self.cluster
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})

# Create 2 nodes on dc1
node1 = cluster.create_node('node1', False,
('127.0.0.1', 9160),
('127.0.0.1', 7000),
'7100', '2000', None,
binary_interface=('127.0.0.1', 9042))
node2 = cluster.create_node('node2', False,
('127.0.0.2', 9160),
('127.0.0.2', 7000),
'7200', '2001', None,
binary_interface=('127.0.0.2', 9042))

cluster.add(node1, True, data_center='dc1')
cluster.add(node2, True, data_center='dc1')

> node1.start(wait_for_binary_proto=True, jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])

rebuild_test.py:184:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:928: in start
self.wait_for_binary_interface(from_mark=self.mark)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:711: in wait_for_binary_interface
self.watch_log_for("Starting listening for CQL clients", **kwargs)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7ff1e7ea8250>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 10596 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
21.683
8_cythonno_x86_64_7_64test_base_replica_repair_with_contentionFailureccmlib.node.NodeError: C* process with 5991 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f556abfc790>

def test_base_replica_repair_with_contention(self):
"""
Test repair does not fail when there is MV lock contention
@jira_ticket CASSANDRA-12905
"""
> self._base_replica_repair_test(fail_mv_lock=True)

materialized_views_test.py:2007:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:2014: in _base_replica_repair_test
self.prepare(rf=3)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f556ac0bc70>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 5991 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
10.980
8_cythonno_x86_64_7_64test_rebuild_rangesFailureccmlib.node.NodeError: C* process with 7923 is terminated

self = <rebuild_test.TestRebuild object at 0x7f556abecca0>

@since('3.6')
def test_rebuild_ranges(self):
"""
@jira_ticket CASSANDRA-10406
"""
keys = 1000

cluster = self.cluster
tokens = cluster.balanced_tokens_across_dcs(['dc1', 'dc2'])
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
cluster.set_configuration_options(values={'num_tokens': 1})
node1 = cluster.create_node('node1', False,
('127.0.0.1', 9160),
('127.0.0.1', 7000),
'7100', '2000', tokens[0],
binary_interface=('127.0.0.1', 9042))
node1.set_configuration_options(values={'initial_token': tokens[0]})
cluster.add(node1, True, data_center='dc1')
node1 = cluster.nodelist()[0]

# start node in dc1
> node1.start(wait_for_binary_proto=True)

rebuild_test.py:270:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:928: in start
self.wait_for_binary_interface(from_mark=self.mark)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:711: in wait_for_binary_interface
self.watch_log_for("Starting listening for CQL clients", **kwargs)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f555a62e190>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7923 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
7.373
8_cythonno_x86_64_8_64test_rf_gt_nodes_multidc_should_succeedFailureccmlib.node.NodeError: C* process with 2199 is terminated

self = <bootstrap_test.TestBootstrap object at 0x7fabf58d62e0>

def test_rf_gt_nodes_multidc_should_succeed(self):
"""
Validating a KS with RF > N on multi DC doesn't break bootstrap
@jira_ticket CASSANDRA-16296 CASSANDRA-16411
"""
cluster = self.cluster
cluster.set_environment_variable('CASSANDRA_TOKEN_PREGENERATION_DISABLED', 'True')
cluster.populate([1, 1])
> cluster.start()

bootstrap_test.py:320:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fabf403f940>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 2199 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
9.722
8_cythonno_x86_64_9_64test_base_view_consistency_on_failure_after_mv_applyFailureccmlib.node.NodeError: C* process with 9732 is terminated

self = <materialized_views_test.TestMaterializedViews object at 0x7f824565d7f0>

def test_base_view_consistency_on_failure_after_mv_apply(self):
> self._test_base_view_consistency_on_crash("after")

materialized_views_test.py:2553:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:2569: in _test_base_view_consistency_on_crash
self.prepare(rf=1, install_byteman=True)
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7f82455d5520>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 9732 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
12.007