Unit Test Results.

Designed for use with JUnit and Ant.

Class dtest-novnode_jdk17_python3.8_cythonno_x86_64_53_64

NameTestsErrorsFailuresSkippedTime(s)Time StampHost
8_cythonno_x86_64_53_64180161719.5752024-11-25T08:00:07.482271cedd6250a9d7

Failures

NameStatusTypeTime(s)
test_functionalFailureFailed: Timeout >900.0s

self = <global_row_key_cache_test.TestGlobalRowKeyCache object at 0x7fd3ff829520>

def test_functional(self):
cluster = self.cluster
cluster.populate(3)
node1 = cluster.nodelist()[0]

for keycache_size in (0, 10):
for rowcache_size in (0, 10):
cluster.stop()
logger.debug("Testing with keycache size of %d MB, rowcache size of %d MB " %
(keycache_size, rowcache_size))
keyspace_name = 'ks_%d_%d' % (keycache_size, rowcache_size)

# make the caches save every five seconds
cluster.set_configuration_options(values={
'key_cache_size_in_mb': keycache_size,
'row_cache_size_in_mb': rowcache_size,
'row_cache_save_period': 5,
'key_cache_save_period': 5,
})

cluster.start()
session = self.patient_cql_connection(node1)
create_ks(session, keyspace_name, rf=3)

session.set_keyspace(keyspace_name)
create_cf_simple(session, 'test', "CREATE TABLE test (k int PRIMARY KEY, v1 int, v2 int)")
create_cf_simple(session, 'test_clustering',
"CREATE TABLE test_clustering (k int, v1 int, v2 int, PRIMARY KEY (k, v1))")
create_cf_simple(session, 'test_counter', "CREATE TABLE test_counter (k int PRIMARY KEY, v1 counter)")
create_cf_simple(session, 'test_counter_clustering',
"CREATE TABLE test_counter_clustering (k int, v1 int, v2 counter, PRIMARY KEY (k, v1))")

# insert 100 rows into each table
for cf in ('test', 'test_clustering'):
execute_concurrent_with_args(
session, session.prepare("INSERT INTO %s (k, v1, v2) VALUES (?, ?, ?)" % (cf,)),
[(i, i, i) for i in range(100)])

execute_concurrent_with_args(
session, session.prepare("UPDATE test_counter SET v1 = v1 + ? WHERE k = ?"),
[(i, i) for i in range(100)],
concurrency=2)

execute_concurrent_with_args(
session, session.prepare("UPDATE test_counter_clustering SET v2 = v2 + ? WHERE k = ? AND v1 = ?"),
[(i, i, i) for i in range(100)],
concurrency=2)

# flush everything to get it into sstables
for node in cluster.nodelist():
node.flush()

# update the first 10 rows in every table
# on non-counter tables, delete the first (remaining) row each round
num_updates = 10
for validation_round in range(3):
session.execute("DELETE FROM test WHERE k = %s", (validation_round,))
execute_concurrent_with_args(
session, session.prepare("UPDATE test SET v1 = ?, v2 = ? WHERE k = ?"),
[(i, validation_round, i) for i in range(validation_round + 1, num_updates)])

session.execute("DELETE FROM test_clustering WHERE k = %s AND v1 = %s", (validation_round, validation_round))
execute_concurrent_with_args(
session, session.prepare("UPDATE test_clustering SET v2 = ? WHERE k = ? AND v1 = ?"),
[(validation_round, i, i) for i in range(validation_round + 1, num_updates)])

execute_concurrent_with_args(
session, session.prepare("UPDATE test_counter SET v1 = v1 + ? WHERE k = ?"),
[(1, i) for i in range(num_updates)],
concurrency=2)

execute_concurrent_with_args(
session, session.prepare("UPDATE test_counter_clustering SET v2 = v2 + ? WHERE k = ? AND v1 = ?"),
[(1, i, i) for i in range(num_updates)],
concurrency=2)

self._validate_values(session, num_updates, validation_round)

session.shutdown()

# let the data be written to the row/key caches.
logger.debug("Letting caches be saved to disk")
time.sleep(10)
logger.debug("Stopping cluster")
> cluster.stop()

global_row_key_cache_test.py:104:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:578: in stop
if not node.stop(wait=wait, signal_event=signal_event, **kwargs):
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ccmlib.node.Node object at 0x7fd3fd8def40>, wait = True
wait_other_notice = False, signal_event = <Signals.SIGTERM: 15>, kwargs = {}
still_running = True, wait_time_sec = 4, i = 2

def stop(self, wait=True, wait_other_notice=False, signal_event=signal.SIGTERM, **kwargs):
"""
Stop the node.
- wait: if True (the default), wait for the Cassandra process to be
really dead. Otherwise return after having sent the kill signal.
- wait_other_notice: return only when the other live nodes of the
cluster have marked this node has dead.
- signal_event: Signal event to send to Cassandra; default is to
let Cassandra clean up and shut down properly (SIGTERM [15])
- Optional:
+ gently: Let Cassandra clean up and shut down properly; unless
false perform a 'kill -9' which shuts down faster.
"""
if self.is_running():
if wait_other_notice:
marks = [(node, node.mark_log()) for node in list(self.cluster.nodes.values()) if node.is_live() and node is not self]

if common.is_win():
# Just taskkill the instance, don't bother trying to shut it down gracefully.
# Node recovery should prevent data loss from hard shutdown.
# We have recurring issues with nodes not stopping / releasing files in the CI
# environment so it makes more sense just to murder it hard since there's
# really little downside.

# We want the node to flush its data before shutdown as some tests rely on small writes being present.
# The default Periodic sync at 10 ms may not have flushed data yet, causing tests to fail.
# This is not a hard requirement, however, so we swallow any exceptions this may throw and kill anyway.
if signal_event is signal.SIGTERM:
try:
self.flush()
except:
common.warning("Failed to flush node: {0} on shutdown.".format(self.name))
pass

os.system("taskkill /F /PID " + str(self.pid))
if self._find_pid_on_windows():
common.warning("Failed to terminate node: {0} with pid: {1}".format(self.name, self.pid))
else:
# Determine if the signal event should be updated to keep API compatibility
if 'gently' in kwargs and kwargs['gently'] is False:
signal_event = signal.SIGKILL

os.kill(self.pid, signal_event)

if wait_other_notice:
for node, mark in marks:
node.watch_log_for_death(self, from_mark=mark)
else:
time.sleep(.1)

still_running = self.is_running()
if still_running and wait:
wait_time_sec = 1
for i in xrange(0, 7):
# we'll double the wait time each try and cassandra should
# not take more than 1 minute to shutdown
> time.sleep(wait_time_sec)
E Failed: Timeout >900.0s

../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:997: Failed
906.842
Properties »