test_network_topology_strategy_counters | Failure | ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
self = <consistency_test.TestAccuracy object at 0x7fb66e8278e0>
@pytest.mark.resource_intensive
def test_network_topology_strategy_counters(self):
"""
Test for multiple datacenters, counters table.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])
combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.ONE, ConsistencyLevel.THREE),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.TWO),
(ConsistencyLevel.TWO, ConsistencyLevel.ONE),
]
logger.debug("Testing multiple dcs, counters")
> self._run_test_function_in_parallel(TestAccuracy.Validation.validate_counters, self.nodes, list(self.rf.values()), combinations),
consistency_test.py:747:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
consistency_test.py:529: in _run_test_function_in_parallel
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
consistency_test.py:136: in _start_cluster
cluster.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.cluster.Cluster object at 0x7fb66e7713d0>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migrat..._wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12', '-Dcassandra.migration_task_wait_in_seconds=12']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7fb66e7413a0>, itf = ('127.0.0.6', 9042)
started = [(<ccmlib.node.Node object at 0x7fb66e7413a0>, <subprocess.Popen object at 0x7fb66e7e5a60>, 0), (<ccmlib.node.Node obj... at 0x7fb66e7e5910>, 0), (<ccmlib.node.Node object at 0x7fb66e781b80>, <subprocess.Popen object at 0x7fb66e7e5d90>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7fb66e7e5a60>
def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []
extension.pre_cluster_start(self)
# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)
started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()
# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)
# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)
started.append((node, p, mark))
if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError | 48.674 |
test_stop_decommission_too_few_replicas_multi_dc | Failure | ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
self = <topology_test.TestTopology object at 0x7fb66e756d90>
@since('3.12')
@pytest.mark.resource_intensive
def test_stop_decommission_too_few_replicas_multi_dc(self):
"""
Decommission should fail when it would result in the number of live replicas being less than
the replication factor. --force should bypass this requirement.
@jira_ticket CASSANDRA-12510
@expected_errors ToolError when # nodes will drop below configured replicas in NTS/SimpleStrategy
"""
# we need to ignore this error log message which is emitted during the test
# because dtest framework which reads the logs would evaluate the node is errorneous and
# it would terminate it prematurely
self.fixture_dtest_setup.ignore_log_patterns = (r'.*Not enough live nodes to maintain replication factor*')
cluster = self.cluster
> cluster.populate([2, 2]).start()
topology_test.py:494:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.cluster.Cluster object at 0x7fb66d746370>, no_wait = False
verbose = False, wait_for_binary_proto = True, wait_other_notice = True
jvm_args = ['-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8', '-Dcassandra.migration_task_wait_in_seconds=8']
profile_options = None, quiet_start = False, allow_root = False
jvm_version = None, kwargs = {}
node = <ccmlib.node.Node object at 0x7fb66d750d30>, itf = ('127.0.0.4', 9042)
started = [(<ccmlib.node.Node object at 0x7fb66d750d30>, <subprocess.Popen object at 0x7fb66e785f10>, 0), (<ccmlib.node.Node obj... at 0x7fb66e7919a0>, 0), (<ccmlib.node.Node object at 0x7fb66d755370>, <subprocess.Popen object at 0x7fb66e791dc0>, 0)]
mark = 0, node_wait_for_binary_proto = False
p = <subprocess.Popen object at 0x7fb66e785f10>
def start(self, no_wait=False, verbose=False, wait_for_binary_proto=True,
wait_other_notice=True, jvm_args=None, profile_options=None,
quiet_start=False, allow_root=False, jvm_version=None, **kwargs):
if jvm_args is None:
jvm_args = []
extension.pre_cluster_start(self)
# check whether all loopback aliases are available before starting any nodes
for node in list(self.nodes.values()):
if not node.is_running():
for itf in node.network_interfaces.values():
if itf is not None:
common.assert_socket_available(itf)
started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()
# if the node is going to allocate_strategy_ tokens during start, then wait_for_binary_proto=True
node_wait_for_binary_proto = (self.can_generate_tokens() and self.use_vnodes and node.initial_token is None)
p = node.start(update_pid=False, jvm_args=jvm_args, jvm_version=jvm_version,
profile_options=profile_options, verbose=verbose, quiet_start=quiet_start,
allow_root=allow_root, wait_for_binary_proto=node_wait_for_binary_proto)
# Prior to JDK8, starting every node at once could lead to a
# nanotime collision where the RNG that generates a node's tokens
# gives identical tokens to several nodes. Thus, we stagger
# the node starts
if common.get_jdk_version() < '1.8':
time.sleep(1)
started.append((node, p, mark))
if no_wait:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
if not node._wait_for_running(p, timeout_s=7):
> raise NodeError("Node {} should be running before waiting for <started listening> log message, "
"but C* process is terminated.".format(node.name))
E ccmlib.node.NodeError: Node node1 should be running before waiting for <started listening> log message, but C* process is terminated.
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:544: NodeError | 24.331 |