Unit Test Results.

Designed for use with JUnit and Ant.

All Failures

ClassNameStatusTypeTime(s)
8_cythonno_x86_64_31_64test_change_durable_writesFailureAssertionError: Commitlog was written with durable writes disabled assert 94208 == 90112 +94208 -90112

self = <configuration_test.TestConfiguration object at 0x7f9742c2d1f0>

@pytest.mark.timeout(60*30)
def test_change_durable_writes(self):
"""
@jira_ticket CASSANDRA-9560

Test that changes to the DURABLE_WRITES option on keyspaces is
respected in subsequent writes.

This test starts by writing a dataset to a cluster and asserting that
the commitlogs have been written to. The subsequent test depends on
the assumption that this dataset triggers an fsync.

After checking this assumption, the test destroys the cluster and
creates a fresh one. Then it tests that DURABLE_WRITES is respected by:

- creating a keyspace with DURABLE_WRITES set to false,
- using ALTER KEYSPACE to set its DURABLE_WRITES option to true,
- writing a dataset to this keyspace that is known to trigger a commitlog fsync,
- asserting that the commitlog has grown in size since the data was written.
"""
cluster = self.cluster
cluster.set_batch_commitlog(enabled=True, use_batch_window = cluster.version() < '5.0')
cluster.set_configuration_options(values={'commitlog_segment_size_in_mb': 1})

cluster.populate(1).start()
durable_node = cluster.nodelist()[0]

durable_init_size = commitlog_size(durable_node)
durable_session = self.patient_exclusive_cql_connection(durable_node)

# test assumption that write_to_trigger_fsync actually triggers a commitlog fsync
durable_session.execute("CREATE KEYSPACE ks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1} "
"AND DURABLE_WRITES = true")
durable_session.execute('CREATE TABLE ks.tab (key int PRIMARY KEY, a int, b int, c int)')
logger.debug('commitlog size diff = ' + str(commitlog_size(durable_node) - durable_init_size))
write_to_trigger_fsync(durable_session, 'ks', 'tab')
logger.debug('commitlog size diff = ' + str(commitlog_size(durable_node) - durable_init_size))

assert commitlog_size(durable_node) > durable_init_size, \
"This test will not work in this environment; write_to_trigger_fsync does not trigger fsync."

durable_session.shutdown()
cluster.stop()
cluster.clear()

cluster.set_batch_commitlog(enabled=True, use_batch_window = cluster.version() < '5.0')
cluster.set_configuration_options(values={'commitlog_segment_size_in_mb': 1})
cluster.start()
node = cluster.nodelist()[0]
session = self.patient_exclusive_cql_connection(node)

# set up a keyspace without durable writes, then alter it to use them
session.execute("CREATE KEYSPACE ks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1} "
"AND DURABLE_WRITES = false")
session.execute('CREATE TABLE ks.tab (key int PRIMARY KEY, a int, b int, c int)')
init_size = commitlog_size(node)
write_to_trigger_fsync(session, 'ks', 'tab')
> assert commitlog_size(node) == init_size, "Commitlog was written with durable writes disabled"
E AssertionError: Commitlog was written with durable writes disabled
E assert 94208 == 90112
E +94208
E -90112

configuration_test.py:104: AssertionError
88.689
8_cythonno_x86_64_59_64test_bulk_round_trip_with_timeoutsFailureassert 100000 == 68874 +100000 -68874

self = <cqlsh_tests.test_cqlsh_copy.TestCqlshCopy object at 0x7f0715db42e0>

def test_bulk_round_trip_with_timeouts(self):
"""
Test bulk import with very short read and write timeout values, this should exercise the
retry and back-off policies. We cannot check the counts because "SELECT COUNT(*)" could timeout
on Jenkins making the test flacky.

@jira_ticket CASSANDRA-9302
"""
> self._test_bulk_round_trip(nodes=1, partitioner="murmur3", num_operations=100000,
configuration_options={'range_request_timeout_in_ms': '200',
'write_request_timeout_in_ms': '100'},
copy_from_options={'MAXINSERTERRORS': -1},
skip_count_checks=True)

cqlsh_tests/test_cqlsh_copy.py:2514:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <cqlsh_tests.test_cqlsh_copy.TestCqlshCopy object at 0x7f0715db42e0>
nodes = 1, partitioner = 'murmur3', num_operations = 100000, profile = None
stress_table = 'keyspace1.standard1'
configuration_options = {'range_request_timeout_in_ms': '200', 'truncate_request_timeout_in_ms': 60000, 'write_request_timeout_in_ms': '100'}
skip_count_checks = True, copy_to_options = {}
copy_from_options = {'MAXINSERTERRORS': -1}

def _test_bulk_round_trip(self, nodes, partitioner,
num_operations, profile=None,
stress_table='keyspace1.standard1',
configuration_options=None,
skip_count_checks=False,
copy_to_options=None,
copy_from_options=None):
"""
Test exporting a large number of rows into a csv file.

If skip_count_checks is True then it means we cannot use "SELECT COUNT(*)" as it may time out but
it also means that we can be sure that one cassandra-stress operation is one record and hence
num_records=num_operations.

Perform the following:
- create the records with cassandra-stress
- export the records to a csv file
- truncate the table and import the csv file
- export the records to another csv file
- check that the length of the two csv files is the same

Therefore, 3 COPY operations are run in total. Return a list of tuples, containing stdout and stderr
for all 3 copy operations.
"""
if configuration_options is None:
configuration_options = {}
if copy_to_options is None:
copy_to_options = {}

# The default truncate timeout of 10 seconds that is set in init_default_config() is not
# enough for truncating larger tables, see CASSANDRA-11157
if 'truncate_request_timeout_in_ms' not in configuration_options:
configuration_options['truncate_request_timeout_in_ms'] = 60000

self.prepare(nodes=nodes, partitioner=partitioner, configuration_options=configuration_options)

ret = []

def create_records():
if not profile:
logger.debug('Running stress without any user profile')
self.node1.stress(['write', 'n={} cl=ALL'.format(num_operations), 'no-warmup', '-rate', 'threads=50'])
else:
logger.debug('Running stress with user profile {}'.format(profile))
self.node1.stress(['user', 'profile={}'.format(profile), 'ops(insert=1)',
'n={} cl=ALL'.format(num_operations), 'no-warmup', '-rate', 'threads=50'])

if skip_count_checks:
return num_operations
else:
count_statement = SimpleStatement("SELECT COUNT(*) FROM {}".format(stress_table), consistency_level=ConsistencyLevel.ALL,
retry_policy=FlakyRetryPolicy(max_retries=3))
ret = rows_to_list(self.session.execute(count_statement))[0][0]
logger.debug('Generated {} records'.format(ret))
assert ret >= num_operations, 'cassandra-stress did not import enough records'
return ret

def run_copy_to(filename):
logger.debug('Exporting to csv file: {}'.format(filename.name))
start = datetime.datetime.now()
copy_to_cmd = "CONSISTENCY ALL; COPY {} TO '{}'".format(stress_table, filename.name)
if copy_to_options:
copy_to_cmd += ' WITH ' + ' AND '.join('{} = {}'.format(k, v) for k, v in copy_to_options.items())
logger.debug('Running {}'.format(copy_to_cmd))
result = self.run_cqlsh(cmds=copy_to_cmd)
ret.append(result)
logger.debug("COPY TO took {} to export {} records".format(datetime.datetime.now() - start, num_records))

def run_copy_from(filename):
logger.debug('Importing from csv file: {}'.format(filename.name))
start = datetime.datetime.now()
copy_from_cmd = "COPY {} FROM '{}'".format(stress_table, filename.name)
if copy_from_options:
copy_from_cmd += ' WITH ' + ' AND '.join('{} = {}'.format(k, v) for k, v in copy_from_options.items())
logger.debug('Running {}'.format(copy_from_cmd))
result = self.run_cqlsh(cmds=copy_from_cmd)
ret.append(result)
logger.debug("COPY FROM took {} to import {} records".format(datetime.datetime.now() - start, num_records))

num_records = create_records()

# Copy to the first csv files
tempfile1 = self.get_temp_file()
run_copy_to(tempfile1)

# check all records generated were exported
with io.open(tempfile1.name, encoding="utf-8", newline='') as csvfile:
assert num_records == sum(1 for _ in csv.reader(csvfile, quotechar='"', escapechar='\\'))

# import records from the first csv file
logger.debug('Truncating {}...'.format(stress_table))
self.session.execute("TRUNCATE {}".format(stress_table))
run_copy_from(tempfile1)

# export again to a second csv file
tempfile2 = self.get_temp_file()
run_copy_to(tempfile2)

# check the length of both files is the same to ensure all exported records were imported
> assert sum(1 for _ in open(tempfile1.name)) == sum(1 for _ in open(tempfile2.name))
E assert 100000 == 68874
E +100000
E -68874

cqlsh_tests/test_cqlsh_copy.py:2456: AssertionError
161.559
8_cythonno_x86_64_31_64test_change_durable_writesFailureAssertionError: Commitlog was written with durable writes disabled assert 163840 == 90112 +163840 -90112

self = <configuration_test.TestConfiguration object at 0x7f39cec651c0>

@pytest.mark.timeout(60*30)
def test_change_durable_writes(self):
"""
@jira_ticket CASSANDRA-9560

Test that changes to the DURABLE_WRITES option on keyspaces is
respected in subsequent writes.

This test starts by writing a dataset to a cluster and asserting that
the commitlogs have been written to. The subsequent test depends on
the assumption that this dataset triggers an fsync.

After checking this assumption, the test destroys the cluster and
creates a fresh one. Then it tests that DURABLE_WRITES is respected by:

- creating a keyspace with DURABLE_WRITES set to false,
- using ALTER KEYSPACE to set its DURABLE_WRITES option to true,
- writing a dataset to this keyspace that is known to trigger a commitlog fsync,
- asserting that the commitlog has grown in size since the data was written.
"""
cluster = self.cluster
cluster.set_batch_commitlog(enabled=True, use_batch_window = cluster.version() < '5.0')
cluster.set_configuration_options(values={'commitlog_segment_size_in_mb': 1})

cluster.populate(1).start()
durable_node = cluster.nodelist()[0]

durable_init_size = commitlog_size(durable_node)
durable_session = self.patient_exclusive_cql_connection(durable_node)

# test assumption that write_to_trigger_fsync actually triggers a commitlog fsync
durable_session.execute("CREATE KEYSPACE ks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1} "
"AND DURABLE_WRITES = true")
durable_session.execute('CREATE TABLE ks.tab (key int PRIMARY KEY, a int, b int, c int)')
logger.debug('commitlog size diff = ' + str(commitlog_size(durable_node) - durable_init_size))
write_to_trigger_fsync(durable_session, 'ks', 'tab')
logger.debug('commitlog size diff = ' + str(commitlog_size(durable_node) - durable_init_size))

assert commitlog_size(durable_node) > durable_init_size, \
"This test will not work in this environment; write_to_trigger_fsync does not trigger fsync."

durable_session.shutdown()
cluster.stop()
cluster.clear()

cluster.set_batch_commitlog(enabled=True, use_batch_window = cluster.version() < '5.0')
cluster.set_configuration_options(values={'commitlog_segment_size_in_mb': 1})
cluster.start()
node = cluster.nodelist()[0]
session = self.patient_exclusive_cql_connection(node)

# set up a keyspace without durable writes, then alter it to use them
session.execute("CREATE KEYSPACE ks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1} "
"AND DURABLE_WRITES = false")
session.execute('CREATE TABLE ks.tab (key int PRIMARY KEY, a int, b int, c int)')
init_size = commitlog_size(node)
write_to_trigger_fsync(session, 'ks', 'tab')
> assert commitlog_size(node) == init_size, "Commitlog was written with durable writes disabled"
E AssertionError: Commitlog was written with durable writes disabled
E assert 163840 == 90112
E +163840
E -90112

configuration_test.py:104: AssertionError
98.087