8_cythonno_x86_64_48_64 | test_round_trip_with_rate_file | Failure | AssertionError: assert False
+ where False = <built-in method startswith of str object at 0x7f334e4bb430>('Processed: 200000 rows;')
+ where <built-in method startswith of str object at 0x7f334e4bb430> = 'Processed: 195000 rows; Rate: 29372 rows/s; Avg. rate: 2029 rows/s'.startswith
+ and 'Processed: 200000 rows;' = <built-in method format of str object at 0x7f335d9d58f0>(200000)
+ where <built-in method format of str object at 0x7f335d9d58f0> = 'Processed: {} rows;'.format
self = <cqlsh_tests.test_cqlsh_copy.TestCqlshCopy object at 0x7f335d9a5f40>
def test_round_trip_with_rate_file(self):
"""
Test a round trip with a large number of rows and a rate file. Make sure the rate file contains
output statistics
@jira_ticket CASSANDRA-9303
"""
num_rows = 200000
report_frequency = 0.1 # every 100 milliseconds
stress_table = 'keyspace1.standard1'
ratefile = self.get_temp_file()
tempfile = self.get_temp_file()
def check_rate_file():
lines = [line.rstrip('\n') for line in open(ratefile.name)]
logger.debug(lines)
assert lines[-1].startswith('Processed: {} rows;'.format(num_rows))
self.prepare()
logger.debug('Running stress')
self.node1.stress(['write', 'n={}'.format(num_rows), 'no-warmup', '-rate', 'threads=50'])
logger.debug('Exporting to csv file: {}'.format(tempfile.name))
self.run_cqlsh(cmds="COPY {} TO '{}' WITH RATEFILE='{}' AND REPORTFREQUENCY='{}'"
.format(stress_table, tempfile.name, ratefile.name, report_frequency))
# check all records were exported
assert num_rows == len(open(tempfile.name).readlines())
check_rate_file()
# clean-up
os.unlink(ratefile.name)
self.session.execute("TRUNCATE {}".format(stress_table))
logger.debug('Importing from csv file: {}'.format(tempfile.name))
self.run_cqlsh(cmds="COPY {} FROM '{}' WITH RATEFILE='{}' AND REPORTFREQUENCY='{}'"
.format(stress_table, tempfile.name, ratefile.name, report_frequency))
# check all records were imported
assert [[num_rows]] == rows_to_list(self.session.execute("SELECT COUNT(*) FROM {}"
.format(stress_table)))
> check_rate_file()
cqlsh_tests/test_cqlsh_copy.py:2139:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
def check_rate_file():
lines = [line.rstrip('\n') for line in open(ratefile.name)]
logger.debug(lines)
> assert lines[-1].startswith('Processed: {} rows;'.format(num_rows))
E AssertionError: assert False
E + where False = <built-in method startswith of str object at 0x7f334e4bb430>('Processed: 200000 rows;')
E + where <built-in method startswith of str object at 0x7f334e4bb430> = 'Processed: 195000 rows; Rate: 29372 rows/s; Avg. rate: 2029 rows/s'.startswith
E + and 'Processed: 200000 rows;' = <built-in method format of str object at 0x7f335d9d58f0>(200000)
E + where <built-in method format of str object at 0x7f335d9d58f0> = 'Processed: {} rows;'.format
cqlsh_tests/test_cqlsh_copy.py:2111: AssertionError | 143.848 |
8_cythonno_x86_64_9_64 | test_fail_without_replace | Failure | ccmlib.node.TimeoutError: 10 Nov 2024 22:20:32 [node3] after 20.02/20 seconds Missing: ['Use cassandra.replace_address if you want to replace this node'] not found in system.log:
Head: INFO [main] 2024-11-10 22:18:32,747 YamlConfigura
Tail: ... UP
WARN [GossipTasks:1] 2024-11-10 22:20:26,392 FailureDetector.java:355 - Not marking nodes down due to local pause of 6904110547ns > 5000000000ns
self = <replace_address_test.TestReplaceAddress object at 0x7fd93008e1f0>
@since('3.6')
def test_fail_without_replace(self):
"""
When starting a node from a clean slate with the same address as
an existing down node, the node should error out even when
auto_bootstrap = false (or the node is a seed) and tell the user
to use replace_address.
@jira_ticket CASSANDRA-10134
"""
self.fixture_dtest_setup.ignore_log_patterns = list(self.fixture_dtest_setup.ignore_log_patterns) + [
r'Exception encountered during startup']
self._setup(n=3)
self._insert_data()
node1, node2, node3 = self.cluster.nodelist()
mark = None
for auto_bootstrap in (True, False):
logger.debug("Stopping node 3.")
node3.stop(gently=False)
# completely delete the data, commitlog, and saved caches
for d in chain([os.path.join(node3.get_path(), "commitlogs")],
[os.path.join(node3.get_path(), "saved_caches")],
node3.data_directories()):
if os.path.exists(d):
rmtree(d)
node3.set_configuration_options(values={'auto_bootstrap': auto_bootstrap})
logger.debug("Starting node 3 with auto_bootstrap = {val}".format(val=auto_bootstrap))
node3.start(wait_other_notice=False)
> node3.watch_log_for('Use cassandra.replace_address if you want to replace this node', from_mark=mark, timeout=20)
replace_address_test.py:398:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:610: in watch_log_for
TimeoutError.raise_if_passed(start=start, timeout=timeout, node=self.name,
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
start = 1731277212.5182626, timeout = 20
msg = "Missing: ['Use cassandra.replace_address if you want to replace this node'] not found in system.log:\n Head: INFO [m...-10 22:20:26,392 FailureDetector.java:355 - Not marking nodes down due to local pause of 6904110547ns > 5000000000ns\n"
node = 'node3'
@staticmethod
def raise_if_passed(start, timeout, msg, node=None):
if start + timeout < time.time():
> raise TimeoutError.create(start, timeout, msg, node)
E ccmlib.node.TimeoutError: 10 Nov 2024 22:20:32 [node3] after 20.02/20 seconds Missing: ['Use cassandra.replace_address if you want to replace this node'] not found in system.log:
E Head: INFO [main] 2024-11-10 22:18:32,747 YamlConfigura
E Tail: ... UP
E WARN [GossipTasks:1] 2024-11-10 22:20:26,392 FailureDetector.java:355 - Not marking nodes down due to local pause of 6904110547ns > 5000000000ns
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:56: TimeoutError | 142.517 |