Class | Name | Status | Type | Time(s) |
8_cythonno_x86_64_59_64 | test_bulk_round_trip_with_timeouts | Failure | assert 100000 == 96611
+100000
-96611
self = <cqlsh_tests.test_cqlsh_copy.TestCqlshCopy object at 0x7f3811ccbfa0>
def test_bulk_round_trip_with_timeouts(self):
"""
Test bulk import with very short read and write timeout values, this should exercise the
retry and back-off policies. We cannot check the counts because "SELECT COUNT(*)" could timeout
on Jenkins making the test flacky.
@jira_ticket CASSANDRA-9302
"""
> self._test_bulk_round_trip(nodes=1, partitioner="murmur3", num_operations=100000,
configuration_options={'range_request_timeout_in_ms': '200',
'write_request_timeout_in_ms': '100'},
copy_from_options={'MAXINSERTERRORS': -1},
skip_count_checks=True)
cqlsh_tests/test_cqlsh_copy.py:2514:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <cqlsh_tests.test_cqlsh_copy.TestCqlshCopy object at 0x7f3811ccbfa0>
nodes = 1, partitioner = 'murmur3', num_operations = 100000, profile = None
stress_table = 'keyspace1.standard1'
configuration_options = {'range_request_timeout_in_ms': '200', 'truncate_request_timeout_in_ms': 60000, 'write_request_timeout_in_ms': '100'}
skip_count_checks = True, copy_to_options = {}
copy_from_options = {'MAXINSERTERRORS': -1}
def _test_bulk_round_trip(self, nodes, partitioner,
num_operations, profile=None,
stress_table='keyspace1.standard1',
configuration_options=None,
skip_count_checks=False,
copy_to_options=None,
copy_from_options=None):
"""
Test exporting a large number of rows into a csv file.
If skip_count_checks is True then it means we cannot use "SELECT COUNT(*)" as it may time out but
it also means that we can be sure that one cassandra-stress operation is one record and hence
num_records=num_operations.
Perform the following:
- create the records with cassandra-stress
- export the records to a csv file
- truncate the table and import the csv file
- export the records to another csv file
- check that the length of the two csv files is the same
Therefore, 3 COPY operations are run in total. Return a list of tuples, containing stdout and stderr
for all 3 copy operations.
"""
if configuration_options is None:
configuration_options = {}
if copy_to_options is None:
copy_to_options = {}
# The default truncate timeout of 10 seconds that is set in init_default_config() is not
# enough for truncating larger tables, see CASSANDRA-11157
if 'truncate_request_timeout_in_ms' not in configuration_options:
configuration_options['truncate_request_timeout_in_ms'] = 60000
self.prepare(nodes=nodes, partitioner=partitioner, configuration_options=configuration_options)
ret = []
def create_records():
if not profile:
logger.debug('Running stress without any user profile')
self.node1.stress(['write', 'n={} cl=ALL'.format(num_operations), 'no-warmup', '-rate', 'threads=50'])
else:
logger.debug('Running stress with user profile {}'.format(profile))
self.node1.stress(['user', 'profile={}'.format(profile), 'ops(insert=1)',
'n={} cl=ALL'.format(num_operations), 'no-warmup', '-rate', 'threads=50'])
if skip_count_checks:
return num_operations
else:
count_statement = SimpleStatement("SELECT COUNT(*) FROM {}".format(stress_table), consistency_level=ConsistencyLevel.ALL,
retry_policy=FlakyRetryPolicy(max_retries=3))
ret = rows_to_list(self.session.execute(count_statement))[0][0]
logger.debug('Generated {} records'.format(ret))
assert ret >= num_operations, 'cassandra-stress did not import enough records'
return ret
def run_copy_to(filename):
logger.debug('Exporting to csv file: {}'.format(filename.name))
start = datetime.datetime.now()
copy_to_cmd = "CONSISTENCY ALL; COPY {} TO '{}'".format(stress_table, filename.name)
if copy_to_options:
copy_to_cmd += ' WITH ' + ' AND '.join('{} = {}'.format(k, v) for k, v in copy_to_options.items())
logger.debug('Running {}'.format(copy_to_cmd))
result = self.run_cqlsh(cmds=copy_to_cmd)
ret.append(result)
logger.debug("COPY TO took {} to export {} records".format(datetime.datetime.now() - start, num_records))
def run_copy_from(filename):
logger.debug('Importing from csv file: {}'.format(filename.name))
start = datetime.datetime.now()
copy_from_cmd = "COPY {} FROM '{}'".format(stress_table, filename.name)
if copy_from_options:
copy_from_cmd += ' WITH ' + ' AND '.join('{} = {}'.format(k, v) for k, v in copy_from_options.items())
logger.debug('Running {}'.format(copy_from_cmd))
result = self.run_cqlsh(cmds=copy_from_cmd)
ret.append(result)
logger.debug("COPY FROM took {} to import {} records".format(datetime.datetime.now() - start, num_records))
num_records = create_records()
# Copy to the first csv files
tempfile1 = self.get_temp_file()
run_copy_to(tempfile1)
# check all records generated were exported
with io.open(tempfile1.name, encoding="utf-8", newline='') as csvfile:
assert num_records == sum(1 for _ in csv.reader(csvfile, quotechar='"', escapechar='\\'))
# import records from the first csv file
logger.debug('Truncating {}...'.format(stress_table))
self.session.execute("TRUNCATE {}".format(stress_table))
run_copy_from(tempfile1)
# export again to a second csv file
tempfile2 = self.get_temp_file()
run_copy_to(tempfile2)
# check the length of both files is the same to ensure all exported records were imported
> assert sum(1 for _ in open(tempfile1.name)) == sum(1 for _ in open(tempfile2.name))
E assert 100000 == 96611
E +100000
E -96611
cqlsh_tests/test_cqlsh_copy.py:2456: AssertionError | 138.051 |
8_cythonno_x86_64_59_64 | test_bulk_round_trip_with_timeouts | Failure | assert 100000 == 98380
+100000
-98380
self = <cqlsh_tests.test_cqlsh_copy.TestCqlshCopy object at 0x7fe9606d02b0>
def test_bulk_round_trip_with_timeouts(self):
"""
Test bulk import with very short read and write timeout values, this should exercise the
retry and back-off policies. We cannot check the counts because "SELECT COUNT(*)" could timeout
on Jenkins making the test flacky.
@jira_ticket CASSANDRA-9302
"""
> self._test_bulk_round_trip(nodes=1, partitioner="murmur3", num_operations=100000,
configuration_options={'range_request_timeout_in_ms': '200',
'write_request_timeout_in_ms': '100'},
copy_from_options={'MAXINSERTERRORS': -1},
skip_count_checks=True)
cqlsh_tests/test_cqlsh_copy.py:2514:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <cqlsh_tests.test_cqlsh_copy.TestCqlshCopy object at 0x7fe9606d02b0>
nodes = 1, partitioner = 'murmur3', num_operations = 100000, profile = None
stress_table = 'keyspace1.standard1'
configuration_options = {'range_request_timeout_in_ms': '200', 'truncate_request_timeout_in_ms': 60000, 'write_request_timeout_in_ms': '100'}
skip_count_checks = True, copy_to_options = {}
copy_from_options = {'MAXINSERTERRORS': -1}
def _test_bulk_round_trip(self, nodes, partitioner,
num_operations, profile=None,
stress_table='keyspace1.standard1',
configuration_options=None,
skip_count_checks=False,
copy_to_options=None,
copy_from_options=None):
"""
Test exporting a large number of rows into a csv file.
If skip_count_checks is True then it means we cannot use "SELECT COUNT(*)" as it may time out but
it also means that we can be sure that one cassandra-stress operation is one record and hence
num_records=num_operations.
Perform the following:
- create the records with cassandra-stress
- export the records to a csv file
- truncate the table and import the csv file
- export the records to another csv file
- check that the length of the two csv files is the same
Therefore, 3 COPY operations are run in total. Return a list of tuples, containing stdout and stderr
for all 3 copy operations.
"""
if configuration_options is None:
configuration_options = {}
if copy_to_options is None:
copy_to_options = {}
# The default truncate timeout of 10 seconds that is set in init_default_config() is not
# enough for truncating larger tables, see CASSANDRA-11157
if 'truncate_request_timeout_in_ms' not in configuration_options:
configuration_options['truncate_request_timeout_in_ms'] = 60000
self.prepare(nodes=nodes, partitioner=partitioner, configuration_options=configuration_options)
ret = []
def create_records():
if not profile:
logger.debug('Running stress without any user profile')
self.node1.stress(['write', 'n={} cl=ALL'.format(num_operations), 'no-warmup', '-rate', 'threads=50'])
else:
logger.debug('Running stress with user profile {}'.format(profile))
self.node1.stress(['user', 'profile={}'.format(profile), 'ops(insert=1)',
'n={} cl=ALL'.format(num_operations), 'no-warmup', '-rate', 'threads=50'])
if skip_count_checks:
return num_operations
else:
count_statement = SimpleStatement("SELECT COUNT(*) FROM {}".format(stress_table), consistency_level=ConsistencyLevel.ALL,
retry_policy=FlakyRetryPolicy(max_retries=3))
ret = rows_to_list(self.session.execute(count_statement))[0][0]
logger.debug('Generated {} records'.format(ret))
assert ret >= num_operations, 'cassandra-stress did not import enough records'
return ret
def run_copy_to(filename):
logger.debug('Exporting to csv file: {}'.format(filename.name))
start = datetime.datetime.now()
copy_to_cmd = "CONSISTENCY ALL; COPY {} TO '{}'".format(stress_table, filename.name)
if copy_to_options:
copy_to_cmd += ' WITH ' + ' AND '.join('{} = {}'.format(k, v) for k, v in copy_to_options.items())
logger.debug('Running {}'.format(copy_to_cmd))
result = self.run_cqlsh(cmds=copy_to_cmd)
ret.append(result)
logger.debug("COPY TO took {} to export {} records".format(datetime.datetime.now() - start, num_records))
def run_copy_from(filename):
logger.debug('Importing from csv file: {}'.format(filename.name))
start = datetime.datetime.now()
copy_from_cmd = "COPY {} FROM '{}'".format(stress_table, filename.name)
if copy_from_options:
copy_from_cmd += ' WITH ' + ' AND '.join('{} = {}'.format(k, v) for k, v in copy_from_options.items())
logger.debug('Running {}'.format(copy_from_cmd))
result = self.run_cqlsh(cmds=copy_from_cmd)
ret.append(result)
logger.debug("COPY FROM took {} to import {} records".format(datetime.datetime.now() - start, num_records))
num_records = create_records()
# Copy to the first csv files
tempfile1 = self.get_temp_file()
run_copy_to(tempfile1)
# check all records generated were exported
with io.open(tempfile1.name, encoding="utf-8", newline='') as csvfile:
assert num_records == sum(1 for _ in csv.reader(csvfile, quotechar='"', escapechar='\\'))
# import records from the first csv file
logger.debug('Truncating {}...'.format(stress_table))
self.session.execute("TRUNCATE {}".format(stress_table))
run_copy_from(tempfile1)
# export again to a second csv file
tempfile2 = self.get_temp_file()
run_copy_to(tempfile2)
# check the length of both files is the same to ensure all exported records were imported
> assert sum(1 for _ in open(tempfile1.name)) == sum(1 for _ in open(tempfile2.name))
E assert 100000 == 98380
E +100000
E -98380
cqlsh_tests/test_cqlsh_copy.py:2456: AssertionError | 149.645 |