Unit Test Results.

Designed for use with JUnit and Ant.

Class dtest-large-latest_jdk11_python3.8_cythonno_x86_64_1_8

NameTestsErrorsFailuresSkippedTime(s)Time StampHost
8_cythonno_x86_64_1_86321420.6042024-12-16T17:09:07.784886e065e4938eea

Failures

NameStatusTypeTime(s)
test_network_topology_strategyFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <consistency_test.TestAvailability object at 0x7fb1a6aa1130>

@pytest.mark.resource_intensive
def test_network_topology_strategy(self):
"""
Test for multiple datacenters, using network topology replication strategy.
"""
self.nodes = [3, 3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3), ('dc3', 3)])

> self._start_cluster()

consistency_test.py:370:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7fb1a69e8910>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migrat..._in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', '-Dcassandra.migration_task_wait_in_seconds=18', ...]
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7fb1a69ed4f0>, itf = ('127.0.0.9', 9042)
started = [(<ccmlib.node.Node object at 0x7fb1a69ed4f0>, <subprocess.Popen object at 0x7fb1a6a52340>, 0), (<ccmlib.node.Node obj...x7fb1a6a522e0>, 0), (<ccmlib.node.Node object at 0x7fb1a69f5910>, <subprocess.Popen object at 0x7fb1a6a1e850>, 0), ...]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7fb1a6a52340>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
51.373
test_throttled_partition_updateFailureccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

self = <materialized_views_test.TestMaterializedViews object at 0x7fb1a69b9d00>

@pytest.mark.resource_intensive
def test_throttled_partition_update(self):
"""
@jira_ticket: CASSANDRA-13299, test break up large partition when repairing base with mv.

Provide a configuable batch size(cassandra.mv.mutation.row.count=100) to trottle number
of rows to be applied in one mutation
"""

> session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)

materialized_views_test.py:2192:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
materialized_views_test.py:60: in prepare
cluster.start(jvm_args=['-Dcassandra.reset_bootstrap_progress=false'])
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.cluster.Cluster object at 0x7fb1a5edf250>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.reset_bootstrap_progress=false', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration..._wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10', '-Dcassandra.migration_task_wait_in_seconds=10']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7fb1a6ac4e50>, itf = ('127.0.0.5', 9042)
started = [(<ccmlib.node.Node object at 0x7fb1a6ac4e50>, <subprocess.Popen object at 0x7fb1a6a29e50>, 0), (<ccmlib.node.Node obj... at 0x7fb1a6a291c0>, 0), (<ccmlib.node.Node object at 0x7fb1a6a52f70>, <subprocess.Popen object at 0x7fb1a6a29cd0>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7fb1a6a29e50>

def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []

extension.pre_cluster_start(self)

# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)

started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()

# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)

# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)

started.append((node, p, mark))

if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError
21.809
Properties »