Class | Name | Status | Type | Time(s) |
8_cythonno_x86_64_16_64 | test_putget_2dc_rf1 | Failure | ccmlib.common.UnavailableSocketError: Inet address 127.0.0.2:7000 is not available: [Errno 98] Address already in use; a cluster may already be running or you may need to add the loopback alias
itf = ('127.0.0.2', 7000)
def assert_socket_available(itf):
info = socket.getaddrinfo(itf[0], itf[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if not info:
raise UnavailableSocketError("Failed to get address info for [%s]:%s" % itf)
(family, socktype, proto, canonname, sockaddr) = info[0]
s = socket.socket(family, socktype)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
> s.bind(sockaddr)
E OSError: [Errno 98] Address already in use
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/common.py:631: OSError
During handling of the above exception, another exception occurred:
self = <multidc_putget_test.TestMultiDCPutGet object at 0x7f243d8c5c40>
def test_putget_2dc_rf1(self):
""" Simple put-get test for 2 DC with one node each (RF=1) [catches #3539] """
cluster = self.cluster
> cluster.populate([1, 1]).start()
multidc_putget_test.py:14:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:515: in start
common.assert_socket_available(itf)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
itf = ('127.0.0.2', 7000)
def assert_socket_available(itf):
info = socket.getaddrinfo(itf[0], itf[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if not info:
raise UnavailableSocketError("Failed to get address info for [%s]:%s" % itf)
(family, socktype, proto, canonname, sockaddr) = info[0]
s = socket.socket(family, socktype)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.bind(sockaddr)
s.close()
return True
except socket.error as msg:
s.close()
addr, port = itf
> raise UnavailableSocketError(
"Inet address %s:%s is not available: %s; a cluster may already be running or you may need to add the loopback alias" % (
addr, port, msg))
E ccmlib.common.UnavailableSocketError: Inet address 127.0.0.2:7000 is not available: [Errno 98] Address already in use; a cluster may already be running or you may need to add the loopback alias
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/common.py:637: UnavailableSocketError | 7.702 |
8_cythonno_x86_64_16_64 | test_per_partition_limit_paging | Failure | ccmlib.common.UnavailableSocketError: Inet address 127.0.0.2:7000 is not available: [Errno 98] Address already in use; a cluster may already be running or you may need to add the loopback alias
itf = ('127.0.0.2', 7000)
def assert_socket_available(itf):
info = socket.getaddrinfo(itf[0], itf[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if not info:
raise UnavailableSocketError("Failed to get address info for [%s]:%s" % itf)
(family, socktype, proto, canonname, sockaddr) = info[0]
s = socket.socket(family, socktype)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
> s.bind(sockaddr)
E OSError: [Errno 98] Address already in use
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/common.py:631: OSError
During handling of the above exception, another exception occurred:
self = <paging_test.TestPagingData object at 0x7f243d8a2c70>
@since('3.6')
def test_per_partition_limit_paging(self):
"""
Test paging with per partition limit queries.
@jira_ticket CASSANDRA-11535
"""
> session = self.prepare(row_factory=tuple_factory)
paging_test.py:2645:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
paging_test.py:33: in prepare
cluster.populate(3).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:515: in start
common.assert_socket_available(itf)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
itf = ('127.0.0.2', 7000)
def assert_socket_available(itf):
info = socket.getaddrinfo(itf[0], itf[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if not info:
raise UnavailableSocketError("Failed to get address info for [%s]:%s" % itf)
(family, socktype, proto, canonname, sockaddr) = info[0]
s = socket.socket(family, socktype)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.bind(sockaddr)
s.close()
return True
except socket.error as msg:
s.close()
addr, port = itf
> raise UnavailableSocketError(
"Inet address %s:%s is not available: %s; a cluster may already be running or you may need to add the loopback alias" % (
addr, port, msg))
E ccmlib.common.UnavailableSocketError: Inet address 127.0.0.2:7000 is not available: [Errno 98] Address already in use; a cluster may already be running or you may need to add the loopback alias
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/common.py:637: UnavailableSocketError | 5.842 |
8_cythonno_x86_64_16_64 | test_update_on_skinny_table | Failure | ccmlib.common.UnavailableSocketError: Inet address 127.0.0.2:7000 is not available: [Errno 98] Address already in use; a cluster may already be running or you may need to add the loopback alias
itf = ('127.0.0.2', 7000)
def assert_socket_available(itf):
info = socket.getaddrinfo(itf[0], itf[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if not info:
raise UnavailableSocketError("Failed to get address info for [%s]:%s" % itf)
(family, socktype, proto, canonname, sockaddr) = info[0]
s = socket.socket(family, socktype)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
> s.bind(sockaddr)
E OSError: [Errno 98] Address already in use
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/common.py:631: OSError
During handling of the above exception, another exception occurred:
self = <replica_side_filtering_test.TestSecondaryIndexes object at 0x7f243d835460>
def test_update_on_skinny_table(self):
> self._prepare_cluster(
create_table="CREATE TABLE t (k int PRIMARY KEY, v text)",
create_index="CREATE INDEX ON t(v)",
both_nodes=["INSERT INTO t(k, v) VALUES (0, 'old')"],
only_node1=["UPDATE t SET v = 'new' WHERE k = 0"])
replica_side_filtering_test.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
replica_side_filtering_test.py:38: in _prepare_cluster
cluster.start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:515: in start
common.assert_socket_available(itf)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
itf = ('127.0.0.2', 7000)
def assert_socket_available(itf):
info = socket.getaddrinfo(itf[0], itf[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if not info:
raise UnavailableSocketError("Failed to get address info for [%s]:%s" % itf)
(family, socktype, proto, canonname, sockaddr) = info[0]
s = socket.socket(family, socktype)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.bind(sockaddr)
s.close()
return True
except socket.error as msg:
s.close()
addr, port = itf
> raise UnavailableSocketError(
"Inet address %s:%s is not available: %s; a cluster may already be running or you may need to add the loopback alias" % (
addr, port, msg))
E ccmlib.common.UnavailableSocketError: Inet address 127.0.0.2:7000 is not available: [Errno 98] Address already in use; a cluster may already be running or you may need to add the loopback alias
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/common.py:637: UnavailableSocketError | 5.293 |
8_cythonno_x86_64_16_64 | test_cannot_restart_with_different_rack | Failure | ccmlib.node.NodeError: C* process with 4801 is terminated
self = <replication_test.TestSnitchConfigurationUpdate object at 0x7f243d867640>
def test_cannot_restart_with_different_rack(self):
"""
@jira_ticket CASSANDRA-10242
Test that we cannot restart with a different rack if '-Dcassandra.ignore_rack=true' is not specified.
"""
cluster = self.cluster
cluster.populate(1)
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.{}'
.format('GossipingPropertyFileSnitch')})
node1 = cluster.nodelist()[0]
with open(os.path.join(node1.get_conf_dir(), 'cassandra-rackdc.properties'), 'w') as topo_file:
for line in ["dc={}".format(node1.data_center), "rack=rack1"]:
topo_file.write(line + os.linesep)
logger.debug("Starting node {} with rack1".format(node1.address()))
> node1.start(wait_for_binary_proto=True)
replication_test.py:614:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:928: in start
self.wait_for_binary_interface(from_mark=self.mark)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:711: in wait_for_binary_interface
self.watch_log_for("Starting listening for CQL clients", **kwargs)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7f243d835c40>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 4801 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 28.417 |
8_cythonno_x86_64_16_64 | test_snapshot_and_restore_dropping_a_column | Failure | ccmlib.node.NodeError: C* process with 5193 is terminated
self = <snapshot_test.TestSnapshot object at 0x7f243d817850>
@since('3.11')
def test_snapshot_and_restore_dropping_a_column(self):
"""
@jira_ticket CASSANDRA-13276
Can't load snapshots of tables with dropped columns.
"""
cluster = self.cluster
> cluster.populate(1).start()
snapshot_test.py:167:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7f243c467310>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 5193 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 28.031 |
8_cythonno_x86_64_16_64 | test_default_keyspaces_exist | Failure | ccmlib.node.NodeError: C* process with 6009 is terminated
self = <cqlsh_tests.test_cqlsh.TestCqlsh object at 0x7f243d763f40>
@since('4.0')
def test_default_keyspaces_exist(self):
self.cluster.populate(1)
> self.cluster.start()
cqlsh_tests/test_cqlsh.py:1046:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7f243d831a00>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 6009 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 25.956 |
8_cythonno_x86_64_16_64 | test_writing_use_header | Failure | ccmlib.node.NodeError: C* process with 6400 is terminated
self = <cqlsh_tests.test_cqlsh_copy.TestCqlshCopy object at 0x7f243d6ce0d0>
def test_writing_use_header(self):
"""
Test that COPY can write a CSV with a header by:
- creating and populating a table,
- exporting the contents of the table to a CSV file using COPY WITH
HEADER = true
- checking that the contents of the CSV file are the written values plus
the header.
"""
> self.prepare()
cqlsh_tests/test_cqlsh_copy.py:605:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
cqlsh_tests/test_cqlsh_copy.py:116: in prepare
self.cluster.populate(nodes, tokens=tokens).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7f243d826d00>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 6400 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 28.005 |
8_cythonno_x86_64_31_64 | test_change_durable_writes | Failure | AssertionError: Commitlog was written with durable writes disabled
assert 163840 == 90112
+163840
-90112
self = <configuration_test.TestConfiguration object at 0x7fba30e064c0>
@pytest.mark.timeout(60*30)
def test_change_durable_writes(self):
"""
@jira_ticket CASSANDRA-9560
Test that changes to the DURABLE_WRITES option on keyspaces is
respected in subsequent writes.
This test starts by writing a dataset to a cluster and asserting that
the commitlogs have been written to. The subsequent test depends on
the assumption that this dataset triggers an fsync.
After checking this assumption, the test destroys the cluster and
creates a fresh one. Then it tests that DURABLE_WRITES is respected by:
- creating a keyspace with DURABLE_WRITES set to false,
- using ALTER KEYSPACE to set its DURABLE_WRITES option to true,
- writing a dataset to this keyspace that is known to trigger a commitlog fsync,
- asserting that the commitlog has grown in size since the data was written.
"""
cluster = self.cluster
cluster.set_batch_commitlog(enabled=True, use_batch_window = cluster.version() < '5.0')
cluster.set_configuration_options(values={'commitlog_segment_size_in_mb': 1})
cluster.populate(1).start()
durable_node = cluster.nodelist()[0]
durable_init_size = commitlog_size(durable_node)
durable_session = self.patient_exclusive_cql_connection(durable_node)
# test assumption that write_to_trigger_fsync actually triggers a commitlog fsync
durable_session.execute("CREATE KEYSPACE ks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1} "
"AND DURABLE_WRITES = true")
durable_session.execute('CREATE TABLE ks.tab (key int PRIMARY KEY, a int, b int, c int)')
logger.debug('commitlog size diff = ' + str(commitlog_size(durable_node) - durable_init_size))
write_to_trigger_fsync(durable_session, 'ks', 'tab')
logger.debug('commitlog size diff = ' + str(commitlog_size(durable_node) - durable_init_size))
assert commitlog_size(durable_node) > durable_init_size, \
"This test will not work in this environment; write_to_trigger_fsync does not trigger fsync."
durable_session.shutdown()
cluster.stop()
cluster.clear()
cluster.set_batch_commitlog(enabled=True, use_batch_window = cluster.version() < '5.0')
cluster.set_configuration_options(values={'commitlog_segment_size_in_mb': 1})
cluster.start()
node = cluster.nodelist()[0]
session = self.patient_exclusive_cql_connection(node)
# set up a keyspace without durable writes, then alter it to use them
session.execute("CREATE KEYSPACE ks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1} "
"AND DURABLE_WRITES = false")
session.execute('CREATE TABLE ks.tab (key int PRIMARY KEY, a int, b int, c int)')
init_size = commitlog_size(node)
write_to_trigger_fsync(session, 'ks', 'tab')
> assert commitlog_size(node) == init_size, "Commitlog was written with durable writes disabled"
E AssertionError: Commitlog was written with durable writes disabled
E assert 163840 == 90112
E +163840
E -90112
configuration_test.py:104: AssertionError | 91.772 |
8_cythonno_x86_64_16_64 | test_per_partition_limit_paging | Failure | ccmlib.common.UnavailableSocketError: Inet address 127.0.0.2:7000 is not available: [Errno 98] Address already in use; a cluster may already be running or you may need to add the loopback alias
itf = ('127.0.0.2', 7000)
def assert_socket_available(itf):
info = socket.getaddrinfo(itf[0], itf[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if not info:
raise UnavailableSocketError("Failed to get address info for [%s]:%s" % itf)
(family, socktype, proto, canonname, sockaddr) = info[0]
s = socket.socket(family, socktype)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
> s.bind(sockaddr)
E OSError: [Errno 98] Address already in use
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/common.py:631: OSError
During handling of the above exception, another exception occurred:
self = <paging_test.TestPagingData object at 0x7f15d3ecec10>
@since('3.6')
def test_per_partition_limit_paging(self):
"""
Test paging with per partition limit queries.
@jira_ticket CASSANDRA-11535
"""
> session = self.prepare(row_factory=tuple_factory)
paging_test.py:2645:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
paging_test.py:33: in prepare
cluster.populate(3).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:515: in start
common.assert_socket_available(itf)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
itf = ('127.0.0.2', 7000)
def assert_socket_available(itf):
info = socket.getaddrinfo(itf[0], itf[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if not info:
raise UnavailableSocketError("Failed to get address info for [%s]:%s" % itf)
(family, socktype, proto, canonname, sockaddr) = info[0]
s = socket.socket(family, socktype)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.bind(sockaddr)
s.close()
return True
except socket.error as msg:
s.close()
addr, port = itf
> raise UnavailableSocketError(
"Inet address %s:%s is not available: %s; a cluster may already be running or you may need to add the loopback alias" % (
addr, port, msg))
E ccmlib.common.UnavailableSocketError: Inet address 127.0.0.2:7000 is not available: [Errno 98] Address already in use; a cluster may already be running or you may need to add the loopback alias
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/common.py:637: UnavailableSocketError | 5.999 |
8_cythonno_x86_64_16_64 | test_update_on_skinny_table | Failure | ccmlib.common.UnavailableSocketError: Inet address 127.0.0.2:7000 is not available: [Errno 98] Address already in use; a cluster may already be running or you may need to add the loopback alias
itf = ('127.0.0.2', 7000)
def assert_socket_available(itf):
info = socket.getaddrinfo(itf[0], itf[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if not info:
raise UnavailableSocketError("Failed to get address info for [%s]:%s" % itf)
(family, socktype, proto, canonname, sockaddr) = info[0]
s = socket.socket(family, socktype)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
> s.bind(sockaddr)
E OSError: [Errno 98] Address already in use
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/common.py:631: OSError
During handling of the above exception, another exception occurred:
self = <replica_side_filtering_test.TestSecondaryIndexes object at 0x7f15d3e61400>
def test_update_on_skinny_table(self):
> self._prepare_cluster(
create_table="CREATE TABLE t (k int PRIMARY KEY, v text)",
create_index="CREATE INDEX ON t(v)",
both_nodes=["INSERT INTO t(k, v) VALUES (0, 'old')"],
only_node1=["UPDATE t SET v = 'new' WHERE k = 0"])
replica_side_filtering_test.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
replica_side_filtering_test.py:38: in _prepare_cluster
cluster.start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:515: in start
common.assert_socket_available(itf)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
itf = ('127.0.0.2', 7000)
def assert_socket_available(itf):
info = socket.getaddrinfo(itf[0], itf[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if not info:
raise UnavailableSocketError("Failed to get address info for [%s]:%s" % itf)
(family, socktype, proto, canonname, sockaddr) = info[0]
s = socket.socket(family, socktype)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.bind(sockaddr)
s.close()
return True
except socket.error as msg:
s.close()
addr, port = itf
> raise UnavailableSocketError(
"Inet address %s:%s is not available: %s; a cluster may already be running or you may need to add the loopback alias" % (
addr, port, msg))
E ccmlib.common.UnavailableSocketError: Inet address 127.0.0.2:7000 is not available: [Errno 98] Address already in use; a cluster may already be running or you may need to add the loopback alias
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/common.py:637: UnavailableSocketError | 5.566 |
8_cythonno_x86_64_16_64 | test_cannot_restart_with_different_rack | Failure | ccmlib.node.NodeError: C* process with 7018 is terminated
self = <replication_test.TestSnitchConfigurationUpdate object at 0x7f15d3e935e0>
def test_cannot_restart_with_different_rack(self):
"""
@jira_ticket CASSANDRA-10242
Test that we cannot restart with a different rack if '-Dcassandra.ignore_rack=true' is not specified.
"""
cluster = self.cluster
cluster.populate(1)
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.{}'
.format('GossipingPropertyFileSnitch')})
node1 = cluster.nodelist()[0]
with open(os.path.join(node1.get_conf_dir(), 'cassandra-rackdc.properties'), 'w') as topo_file:
for line in ["dc={}".format(node1.data_center), "rack=rack1"]:
topo_file.write(line + os.linesep)
logger.debug("Starting node {} with rack1".format(node1.address()))
> node1.start(wait_for_binary_proto=True)
replication_test.py:614:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:928: in start
self.wait_for_binary_interface(from_mark=self.mark)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:711: in wait_for_binary_interface
self.watch_log_for("Starting listening for CQL clients", **kwargs)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7f15d320f8e0>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7018 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 27.660 |
8_cythonno_x86_64_16_64 | test_snapshot_and_restore_dropping_a_column | Failure | ccmlib.node.NodeError: C* process with 7444 is terminated
self = <snapshot_test.TestSnapshot object at 0x7f15d3e437f0>
@since('3.11')
def test_snapshot_and_restore_dropping_a_column(self):
"""
@jira_ticket CASSANDRA-13276
Can't load snapshots of tables with dropped columns.
"""
cluster = self.cluster
> cluster.populate(1).start()
snapshot_test.py:167:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7f15d3cbbfd0>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7444 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 27.088 |
8_cythonno_x86_64_16_64 | test_default_keyspaces_exist | Failure | ccmlib.node.NodeError: C* process with 8333 is terminated
self = <cqlsh_tests.test_cqlsh.TestCqlsh object at 0x7f15d3d8fee0>
@since('4.0')
def test_default_keyspaces_exist(self):
self.cluster.populate(1)
> self.cluster.start()
cqlsh_tests/test_cqlsh.py:1046:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7f15d31554f0>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 8333 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 25.537 |
8_cythonno_x86_64_16_64 | test_writing_use_header | Failure | ccmlib.node.NodeError: C* process with 8757 is terminated
self = <cqlsh_tests.test_cqlsh_copy.TestCqlshCopy object at 0x7f15d3cfb070>
def test_writing_use_header(self):
"""
Test that COPY can write a CSV with a header by:
- creating and populating a table,
- exporting the contents of the table to a CSV file using COPY WITH
HEADER = true
- checking that the contents of the CSV file are the written values plus
the header.
"""
> self.prepare()
cqlsh_tests/test_cqlsh_copy.py:605:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
cqlsh_tests/test_cqlsh_copy.py:116: in prepare
self.cluster.populate(nodes, tokens=tokens).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7f15d0063700>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 8757 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 27.360 |
8_cythonno_x86_64_31_64 | test_change_durable_writes | Failure | AssertionError: Commitlog was written with durable writes disabled
assert 163840 == 90112
+163840
-90112
self = <configuration_test.TestConfiguration object at 0x7faa628d15b0>
@pytest.mark.timeout(60*30)
def test_change_durable_writes(self):
"""
@jira_ticket CASSANDRA-9560
Test that changes to the DURABLE_WRITES option on keyspaces is
respected in subsequent writes.
This test starts by writing a dataset to a cluster and asserting that
the commitlogs have been written to. The subsequent test depends on
the assumption that this dataset triggers an fsync.
After checking this assumption, the test destroys the cluster and
creates a fresh one. Then it tests that DURABLE_WRITES is respected by:
- creating a keyspace with DURABLE_WRITES set to false,
- using ALTER KEYSPACE to set its DURABLE_WRITES option to true,
- writing a dataset to this keyspace that is known to trigger a commitlog fsync,
- asserting that the commitlog has grown in size since the data was written.
"""
cluster = self.cluster
cluster.set_batch_commitlog(enabled=True, use_batch_window = cluster.version() < '5.0')
cluster.set_configuration_options(values={'commitlog_segment_size_in_mb': 1})
cluster.populate(1).start()
durable_node = cluster.nodelist()[0]
durable_init_size = commitlog_size(durable_node)
durable_session = self.patient_exclusive_cql_connection(durable_node)
# test assumption that write_to_trigger_fsync actually triggers a commitlog fsync
durable_session.execute("CREATE KEYSPACE ks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1} "
"AND DURABLE_WRITES = true")
durable_session.execute('CREATE TABLE ks.tab (key int PRIMARY KEY, a int, b int, c int)')
logger.debug('commitlog size diff = ' + str(commitlog_size(durable_node) - durable_init_size))
write_to_trigger_fsync(durable_session, 'ks', 'tab')
logger.debug('commitlog size diff = ' + str(commitlog_size(durable_node) - durable_init_size))
assert commitlog_size(durable_node) > durable_init_size, \
"This test will not work in this environment; write_to_trigger_fsync does not trigger fsync."
durable_session.shutdown()
cluster.stop()
cluster.clear()
cluster.set_batch_commitlog(enabled=True, use_batch_window = cluster.version() < '5.0')
cluster.set_configuration_options(values={'commitlog_segment_size_in_mb': 1})
cluster.start()
node = cluster.nodelist()[0]
session = self.patient_exclusive_cql_connection(node)
# set up a keyspace without durable writes, then alter it to use them
session.execute("CREATE KEYSPACE ks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1} "
"AND DURABLE_WRITES = false")
session.execute('CREATE TABLE ks.tab (key int PRIMARY KEY, a int, b int, c int)')
init_size = commitlog_size(node)
write_to_trigger_fsync(session, 'ks', 'tab')
> assert commitlog_size(node) == init_size, "Commitlog was written with durable writes disabled"
E AssertionError: Commitlog was written with durable writes disabled
E assert 163840 == 90112
E +163840
E -90112
configuration_test.py:104: AssertionError | 104.677 |