Unit Test Results.

Designed for use with JUnit and Ant.

Class dtest-latest_jdk11_python3.8_cythonno_x86_64_20_64

NameTestsErrorsFailuresSkippedTime(s)Time StampHost
8_cythonno_x86_64_20_64171252030.6362024-12-17T02:05:55.36447772c702317532

Tests

NameStatusTypeTime(s)
test_drop_user_case_sensitiveSuccess77.622
test_builtin_functions_require_no_special_permissionsSuccess66.964
test_cleanupSuccess540.120
test_create_lots_of_mv_concurrentlySkippedawaiting CASSANDRA-10699

/home/cassandra/cassandra-dtest/concurrent_schema_changes_test.py:244: awaiting CASSANDRA-10699
0.002
test_remote_querySuccess213.384
test_simple_schemaSuccess87.682
test_use_custom_portSuccess201.820
test_data_change_impacting_earlier_pageSuccess145.954
test_update_on_collectionSuccess160.427
test_switch_data_center_startup_failsFailureccmlib.node.NodeError: C* process with 8836 is terminated

self = <replication_test.TestSnitchConfigurationUpdate object at 0x7fb9a0f58a00>

def test_switch_data_center_startup_fails(self):
"""
@jira_ticket CASSANDRA-9474

Confirm that switching data centers fails to bring up the node.
"""
expected_error = (r"Cannot start node if snitch's data center (.*) differs from previous data center (.*)\. "
"Please fix the snitch configuration, decommission and rebootstrap this node "
"or use the flag -Dcassandra.ignore_dc=true.")
self.fixture_dtest_setup.ignore_log_patterns = [expected_error]

cluster = self.cluster
cluster.populate(1)
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.GossipingPropertyFileSnitch'})

node = cluster.nodelist()[0]
with open(os.path.join(node.get_conf_dir(), 'cassandra-rackdc.properties'), 'w') as topo_file:
topo_file.write("dc=dc9" + os.linesep)
topo_file.write("rack=rack1" + os.linesep)

> cluster.start()

replication_test.py:768:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fb998f71400>

def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 8836 is terminated

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError
37.966
test_switch_data_center_startup_failsErrorfailed on teardown with "Unexpected error found in node logs (see stdout for full details). Errors: [[node1] 'ERROR [main] 2024-12-17 02:31:27,397 CassandraDaemon.java:904 - Exception encountered during startup\norg.apache.cassandra.exceptions.ConfigurationException: Configuration must specify either node_proximity and initial_location_provider or endpoint_snitch but not both. \n\tat org.apache.cassandra.config.DatabaseDescriptor.applySnitch(DatabaseDescriptor.java:1504)\n\tat org.apache.cassandra.config.DatabaseDescriptor.applyAll(DatabaseDescriptor.java:530)\n\tat org.apache.cassandra.config.DatabaseDescriptor.daemonInitialization(DatabaseDescriptor.java:281)\n\tat org.apache.cassandra.config.DatabaseDescriptor.daemonInitialization(DatabaseDescriptor.java:267)\n\tat org.apache.cassandra.service.CassandraDaemon.applyConfig(CassandraDaemon.java:781)\n\tat org.apache.cassandra.service.CassandraDaemon.activate(CassandraDaemon.java:724)\n\tat org.apache.cassandra.service.CassandraDaemon.main(CassandraDaemon.java:882)']"

Unexpected error found in node logs (see stdout for full details). Errors: [[node1] 'ERROR [main] 2024-12-17 02:31:27,397 CassandraDaemon.java:904 - Exception encountered during startup\norg.apache.cassandra.exceptions.ConfigurationException: Configuration must specify either node_proximity and initial_location_provider or endpoint_snitch but not both. \n\tat org.apache.cassandra.config.DatabaseDescriptor.applySnitch(DatabaseDescriptor.java:1504)\n\tat org.apache.cassandra.config.DatabaseDescriptor.applyAll(DatabaseDescriptor.java:530)\n\tat org.apache.cassandra.config.DatabaseDescriptor.daemonInitialization(DatabaseDescriptor.java:281)\n\tat org.apache.cassandra.config.DatabaseDescriptor.daemonInitialization(DatabaseDescriptor.java:267)\n\tat org.apache.cassandra.service.CassandraDaemon.applyConfig(CassandraDaemon.java:781)\n\tat org.apache.cassandra.service.CassandraDaemon.activate(CassandraDaemon.java:724)\n\tat org.apache.cassandra.service.CassandraDaemon.main(CassandraDaemon.java:882)']
2.023
test_archive_commitlog_point_in_timeFailureAssertionError: It's been over 180s and we haven't written a new commitlog segment. Something is wrong.

self = <snapshot_test.TestArchiveCommitlog object at 0x7fb9a0f0e0d0>

def test_archive_commitlog_point_in_time(self):
"""
Test archive commit log with restore_point_in_time setting
"""
> self.run_archive_commitlog(restore_point_in_time=True)

snapshot_test.py:261:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
snapshot_test.py:303: in run_archive_commitlog
advance_to_next_cl_segment(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

session = <cassandra.cluster.Session object at 0x7fb9a0e8bbb0>
commitlog_dir = '/home/cassandra/cassandra/build/run-python-dtest.F1BMv1/dtest-v78dgbme/test/node1/commitlogs'
keyspace_name = 'ks', table_name = 'junk_table', timeout = 180

def advance_to_next_cl_segment(session, commitlog_dir,
keyspace_name='ks', table_name='junk_table',
timeout=180):
"""
This is a hack to work around problems like CASSANDRA-11811.

The problem happens in commitlog-replaying tests, like the snapshot and CDC
tests. If we replay the first commitlog that's created, we wind up
replaying some mutations that initialize system tables, so this function
advances the node to the next CL by filling up the first one.
"""
session.execute(
'CREATE TABLE {ks}.{tab} ('
'a uuid PRIMARY KEY, b uuid, c uuid, d uuid, '
'e uuid, f uuid, g uuid, h uuid'
')'.format(ks=keyspace_name, tab=table_name)
)
prepared_insert = session.prepare(
'INSERT INTO {ks}.{tab} '
'(a, b, c, d, e, f, g, h) '
'VALUES ('
'uuid(), uuid(), uuid(), uuid(), '
'uuid(), uuid(), uuid(), uuid()'
')'.format(ks=keyspace_name, tab=table_name)
)

# record segments that we want to advance past
initial_cl_files = _files_in(commitlog_dir)

start = time.time()
stop_time = start + timeout
rate_limited_debug_logger = get_rate_limited_function(logger.debug, 5)
logger.debug('attempting to write until we start writing to new CL segments: {}'.format(initial_cl_files))

while _files_in(commitlog_dir) <= initial_cl_files:
elapsed = time.time() - start
rate_limited_debug_logger(' commitlog-advancing load step has lasted {s:.2f}s'.format(s=elapsed))
> assert (
time.time() <= stop_time), ("It's been over {s}s and we haven't written a new " +
"commitlog segment. Something is wrong.").format(s=timeout)
E AssertionError: It's been over 180s and we haven't written a new commitlog segment. Something is wrong.

tools/hacks.py:59: AssertionError
264.289
test_long_orderSkipped5.1 > 4

/home/cassandra/cassandra-dtest/conftest.py:468: 5.1 > 4
1.145
test_insert_max_ttlSkipped5.1 > 4

/home/cassandra/cassandra-dtest/conftest.py:468: 5.1 > 4
1.238
test_expiration_overflow_policy_capnowarn_default_ttlSuccess120.324
test_copy_toSuccess104.398
test_datetimeformat_round_tripSkipped5.1 > 3.X

/home/cassandra/cassandra-dtest/conftest.py:468: 5.1 > 3.X
1.231
test_force_repair_range_async_2Skipped5.1 > 4

/home/cassandra/cassandra-dtest/conftest.py:468: 5.1 > 4
1.022
Properties »