Unit Test Results.

Designed for use with JUnit and Ant.

All Failures

ClassNameStatusTypeTime(s)
8_cythonno_x86_64_10_64test_fail_when_seedFailureccmlib.node.TimeoutError: 24 Nov 2024 17:13:51 [node3] after 20.05/20 seconds Missing: ['Replacing a node without bootstrapping risks invalidating consistency guarantees'] not found in system.log: Head: INFO [main] 2024-11-24 17:13:34,390 YamlConfigura Tail: ...3 ColumnFamilyStore.java:1060 - Enqueuing flush of system.local_metadata_log, Reason: INTERNALLY_FORCED, Usage: 364B (0%) on-heap, 311B (0%) off-heap

self = <replace_address_test.TestReplaceAddress object at 0x7f09ff6d97f0>

@since('3.0')
def test_fail_when_seed(self):
"""
When a node is a seed replace should fail
@jira_ticket CASSANDRA-14463
"""
self.fixture_dtest_setup.ignore_log_patterns = list(self.fixture_dtest_setup.ignore_log_patterns) + [
r'Couldn\'t find table with id 5bc52802-de25-35ed-aeab-188eecebb090', # system_auth
r'Exception encountered during startup',
r'Unknown endpoint'
]

# see CASSANDRA-17611
if self.cluster.version() < '4.0':
self.fixture_dtest_setup.ignore_log_patterns = list(self.fixture_dtest_setup.ignore_log_patterns) + [
r'RejectedExecutionException']

self._setup(n=3)
node1, node2, node3 = self.cluster.nodelist()
self.cluster.seeds.append(node3.address())

node3.stop(gently=False)
mark = node3.mark_log()

for d in chain([os.path.join(node3.get_path(), "commitlogs")],
[os.path.join(node3.get_path(), "saved_caches")],
node3.data_directories()):
if os.path.exists(d):
rmtree(d)

node3.start(jvm_args=["-Dcassandra.replace_address=" + node3.address()], wait_other_notice=False)
> node3.watch_log_for('Replacing a node without bootstrapping risks invalidating consistency guarantees', from_mark=mark, timeout=20)

replace_address_test.py:432:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:610: in watch_log_for
TimeoutError.raise_if_passed(start=start, timeout=timeout, node=self.name,
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

start = 1732468411.8734467, timeout = 20
msg = "Missing: ['Replacing a node without bootstrapping risks invalidating consistency guarantees'] not found in system.log...nqueuing flush of system.local_metadata_log, Reason: INTERNALLY_FORCED, Usage: 364B (0%) on-heap, 311B (0%) off-heap\n"
node = 'node3'

@staticmethod
def raise_if_passed(start, timeout, msg, node=None):
if start + timeout < time.time():
> raise TimeoutError.create(start, timeout, msg, node)
E ccmlib.node.TimeoutError: 24 Nov 2024 17:13:51 [node3] after 20.05/20 seconds Missing: ['Replacing a node without bootstrapping risks invalidating consistency guarantees'] not found in system.log:
E Head: INFO [main] 2024-11-24 17:13:34,390 YamlConfigura
E Tail: ...3 ColumnFamilyStore.java:1060 - Enqueuing flush of system.local_metadata_log, Reason: INTERNALLY_FORCED, Usage: 364B (0%) on-heap, 311B (0%) off-heap

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:56: TimeoutError
152.322
8_cythonno_x86_64_31_64test_change_durable_writesFailureAssertionError: Commitlog was written with durable writes disabled assert 163840 == 90112 +163840 -90112

self = <configuration_test.TestConfiguration object at 0x7f6f75c12310>

@pytest.mark.timeout(60*30)
def test_change_durable_writes(self):
"""
@jira_ticket CASSANDRA-9560

Test that changes to the DURABLE_WRITES option on keyspaces is
respected in subsequent writes.

This test starts by writing a dataset to a cluster and asserting that
the commitlogs have been written to. The subsequent test depends on
the assumption that this dataset triggers an fsync.

After checking this assumption, the test destroys the cluster and
creates a fresh one. Then it tests that DURABLE_WRITES is respected by:

- creating a keyspace with DURABLE_WRITES set to false,
- using ALTER KEYSPACE to set its DURABLE_WRITES option to true,
- writing a dataset to this keyspace that is known to trigger a commitlog fsync,
- asserting that the commitlog has grown in size since the data was written.
"""
cluster = self.cluster
cluster.set_batch_commitlog(enabled=True, use_batch_window = cluster.version() < '5.0')
cluster.set_configuration_options(values={'commitlog_segment_size_in_mb': 1})

cluster.populate(1).start()
durable_node = cluster.nodelist()[0]

durable_init_size = commitlog_size(durable_node)
durable_session = self.patient_exclusive_cql_connection(durable_node)

# test assumption that write_to_trigger_fsync actually triggers a commitlog fsync
durable_session.execute("CREATE KEYSPACE ks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1} "
"AND DURABLE_WRITES = true")
durable_session.execute('CREATE TABLE ks.tab (key int PRIMARY KEY, a int, b int, c int)')
logger.debug('commitlog size diff = ' + str(commitlog_size(durable_node) - durable_init_size))
write_to_trigger_fsync(durable_session, 'ks', 'tab')
logger.debug('commitlog size diff = ' + str(commitlog_size(durable_node) - durable_init_size))

assert commitlog_size(durable_node) > durable_init_size, \
"This test will not work in this environment; write_to_trigger_fsync does not trigger fsync."

durable_session.shutdown()
cluster.stop()
cluster.clear()

cluster.set_batch_commitlog(enabled=True, use_batch_window = cluster.version() < '5.0')
cluster.set_configuration_options(values={'commitlog_segment_size_in_mb': 1})
cluster.start()
node = cluster.nodelist()[0]
session = self.patient_exclusive_cql_connection(node)

# set up a keyspace without durable writes, then alter it to use them
session.execute("CREATE KEYSPACE ks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1} "
"AND DURABLE_WRITES = false")
session.execute('CREATE TABLE ks.tab (key int PRIMARY KEY, a int, b int, c int)')
init_size = commitlog_size(node)
write_to_trigger_fsync(session, 'ks', 'tab')
> assert commitlog_size(node) == init_size, "Commitlog was written with durable writes disabled"
E AssertionError: Commitlog was written with durable writes disabled
E assert 163840 == 90112
E +163840
E -90112

configuration_test.py:104: AssertionError
88.615
8_cythonno_x86_64_56_64test_bulk_round_trip_defaultFailurecassandra.OperationTimedOut: errors={'127.0.0.1:9042': 'Client request timeout. See Session.execute[_async](timeout)'}, last_host=127.0.0.1:9042

self = <cqlsh_tests.test_cqlsh_copy.TestCqlshCopy object at 0x7f054e68feb0>

def test_bulk_round_trip_default(self):
"""
Test bulk import with default stress import (one row per operation)

@jira_ticket CASSANDRA-9302
"""
> self._test_bulk_round_trip(nodes=3, partitioner="murmur3", num_operations=100000)

cqlsh_tests/test_cqlsh_copy.py:2466:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
cqlsh_tests/test_cqlsh_copy.py:2436: in _test_bulk_round_trip
num_records = create_records()
cqlsh_tests/test_cqlsh_copy.py:2409: in create_records
ret = rows_to_list(self.session.execute(count_statement))[0][0]
../cassandra/build/venv/src/cassandra-driver/cassandra/cluster.py:2618: in execute
return self.execute_async(query, parameters, trace, custom_payload, timeout, execution_profile, paging_state, host, execute_as).result()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ResponseFuture: query='<SimpleStatement query="SELECT COUNT(*) FROM keyspace1.standard1", consistency=ALL>' request_i...9042': 'Client request timeout. See Session.execute[_async](timeout)'}, last_host=127.0.0.1:9042 coordinator_host=None>

def result(self):
"""
Return the final result or raise an Exception if errors were
encountered. If the final result or error has not been set
yet, this method will block until it is set, or the timeout
set for the request expires.

Timeout is specified in the Session request execution functions.
If the timeout is exceeded, an :exc:`cassandra.OperationTimedOut` will be raised.
This is a client-side timeout. For more information
about server-side coordinator timeouts, see :class:`.policies.RetryPolicy`.

Example usage::

>>> future = session.execute_async("SELECT * FROM mycf")
>>> # do other stuff...

>>> try:
... rows = future.result()
... for row in rows:
... ... # process results
... except Exception:
... log.exception("Operation failed:")

"""
self._event.wait()
if self._final_result is not _NOT_SET:
return ResultSet(self, self._final_result)
else:
> raise self._final_exception
E cassandra.OperationTimedOut: errors={'127.0.0.1:9042': 'Client request timeout. See Session.execute[_async](timeout)'}, last_host=127.0.0.1:9042

../cassandra/build/venv/src/cassandra-driver/cassandra/cluster.py:4894: OperationTimedOut
78.368
8_cythonno_x86_64_62_64test_bulk_round_trip_with_single_coreFailurecassandra.OperationTimedOut: errors={'127.0.0.2:9042': 'Client request timeout. See Session.execute[_async](timeout)'}, last_host=127.0.0.2:9042

self = <cqlsh_tests.test_cqlsh_copy.TestCqlshCopy object at 0x7f6286a89760>

def test_bulk_round_trip_with_single_core(self):
"""
Perform a round trip on a simulated single core machine. When determining the number of cores,
copyutil.py will return the number carried by the environment variable CQLSH_COPY_TEST_NUM_CORES if it has
been set.

@jira_ticket CASSANDRA-11053
"""
os.environ['CQLSH_COPY_TEST_NUM_CORES'] = '1'
> ret = self._test_bulk_round_trip(nodes=3, partitioner="murmur3", num_operations=100000)

cqlsh_tests/test_cqlsh_copy.py:2539:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
cqlsh_tests/test_cqlsh_copy.py:2436: in _test_bulk_round_trip
num_records = create_records()
cqlsh_tests/test_cqlsh_copy.py:2409: in create_records
ret = rows_to_list(self.session.execute(count_statement))[0][0]
../cassandra/build/venv/src/cassandra-driver/cassandra/cluster.py:2618: in execute
return self.execute_async(query, parameters, trace, custom_payload, timeout, execution_profile, paging_state, host, execute_as).result()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ResponseFuture: query='<SimpleStatement query="SELECT COUNT(*) FROM keyspace1.standard1", consistency=ALL>' request_i...9042': 'Client request timeout. See Session.execute[_async](timeout)'}, last_host=127.0.0.2:9042 coordinator_host=None>

def result(self):
"""
Return the final result or raise an Exception if errors were
encountered. If the final result or error has not been set
yet, this method will block until it is set, or the timeout
set for the request expires.

Timeout is specified in the Session request execution functions.
If the timeout is exceeded, an :exc:`cassandra.OperationTimedOut` will be raised.
This is a client-side timeout. For more information
about server-side coordinator timeouts, see :class:`.policies.RetryPolicy`.

Example usage::

>>> future = session.execute_async("SELECT * FROM mycf")
>>> # do other stuff...

>>> try:
... rows = future.result()
... for row in rows:
... ... # process results
... except Exception:
... log.exception("Operation failed:")

"""
self._event.wait()
if self._final_result is not _NOT_SET:
return ResultSet(self, self._final_result)
else:
> raise self._final_exception
E cassandra.OperationTimedOut: errors={'127.0.0.2:9042': 'Client request timeout. See Session.execute[_async](timeout)'}, last_host=127.0.0.2:9042

../cassandra/build/venv/src/cassandra-driver/cassandra/cluster.py:4894: OperationTimedOut
157.707