Name | Status | Type | Time(s) |
test_per_partition_limit_paging | Failure | ccmlib.common.UnavailableSocketError: Inet address 127.0.0.2:7000 is not available: [Errno 98] Address already in use; a cluster may already be running or you may need to add the loopback alias
itf = ('127.0.0.2', 7000)
def assert_socket_available(itf):
info = socket.getaddrinfo(itf[0], itf[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if not info:
raise UnavailableSocketError("Failed to get address info for [%s]:%s" % itf)
(family, socktype, proto, canonname, sockaddr) = info[0]
s = socket.socket(family, socktype)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
> s.bind(sockaddr)
E OSError: [Errno 98] Address already in use
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/common.py:631: OSError
During handling of the above exception, another exception occurred:
self = <paging_test.TestPagingData object at 0x7f15d3ecec10>
@since('3.6')
def test_per_partition_limit_paging(self):
"""
Test paging with per partition limit queries.
@jira_ticket CASSANDRA-11535
"""
> session = self.prepare(row_factory=tuple_factory)
paging_test.py:2645:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
paging_test.py:33: in prepare
cluster.populate(3).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:515: in start
common.assert_socket_available(itf)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
itf = ('127.0.0.2', 7000)
def assert_socket_available(itf):
info = socket.getaddrinfo(itf[0], itf[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if not info:
raise UnavailableSocketError("Failed to get address info for [%s]:%s" % itf)
(family, socktype, proto, canonname, sockaddr) = info[0]
s = socket.socket(family, socktype)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.bind(sockaddr)
s.close()
return True
except socket.error as msg:
s.close()
addr, port = itf
> raise UnavailableSocketError(
"Inet address %s:%s is not available: %s; a cluster may already be running or you may need to add the loopback alias" % (
addr, port, msg))
E ccmlib.common.UnavailableSocketError: Inet address 127.0.0.2:7000 is not available: [Errno 98] Address already in use; a cluster may already be running or you may need to add the loopback alias
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/common.py:637: UnavailableSocketError | 5.999 |
test_update_on_skinny_table | Failure | ccmlib.common.UnavailableSocketError: Inet address 127.0.0.2:7000 is not available: [Errno 98] Address already in use; a cluster may already be running or you may need to add the loopback alias
itf = ('127.0.0.2', 7000)
def assert_socket_available(itf):
info = socket.getaddrinfo(itf[0], itf[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if not info:
raise UnavailableSocketError("Failed to get address info for [%s]:%s" % itf)
(family, socktype, proto, canonname, sockaddr) = info[0]
s = socket.socket(family, socktype)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
> s.bind(sockaddr)
E OSError: [Errno 98] Address already in use
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/common.py:631: OSError
During handling of the above exception, another exception occurred:
self = <replica_side_filtering_test.TestSecondaryIndexes object at 0x7f15d3e61400>
def test_update_on_skinny_table(self):
> self._prepare_cluster(
create_table="CREATE TABLE t (k int PRIMARY KEY, v text)",
create_index="CREATE INDEX ON t(v)",
both_nodes=["INSERT INTO t(k, v) VALUES (0, 'old')"],
only_node1=["UPDATE t SET v = 'new' WHERE k = 0"])
replica_side_filtering_test.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
replica_side_filtering_test.py:38: in _prepare_cluster
cluster.start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:515: in start
common.assert_socket_available(itf)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
itf = ('127.0.0.2', 7000)
def assert_socket_available(itf):
info = socket.getaddrinfo(itf[0], itf[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if not info:
raise UnavailableSocketError("Failed to get address info for [%s]:%s" % itf)
(family, socktype, proto, canonname, sockaddr) = info[0]
s = socket.socket(family, socktype)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.bind(sockaddr)
s.close()
return True
except socket.error as msg:
s.close()
addr, port = itf
> raise UnavailableSocketError(
"Inet address %s:%s is not available: %s; a cluster may already be running or you may need to add the loopback alias" % (
addr, port, msg))
E ccmlib.common.UnavailableSocketError: Inet address 127.0.0.2:7000 is not available: [Errno 98] Address already in use; a cluster may already be running or you may need to add the loopback alias
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/common.py:637: UnavailableSocketError | 5.566 |
test_cannot_restart_with_different_rack | Failure | ccmlib.node.NodeError: C* process with 7018 is terminated
self = <replication_test.TestSnitchConfigurationUpdate object at 0x7f15d3e935e0>
def test_cannot_restart_with_different_rack(self):
"""
@jira_ticket CASSANDRA-10242
Test that we cannot restart with a different rack if '-Dcassandra.ignore_rack=true' is not specified.
"""
cluster = self.cluster
cluster.populate(1)
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.{}'
.format('GossipingPropertyFileSnitch')})
node1 = cluster.nodelist()[0]
with open(os.path.join(node1.get_conf_dir(), 'cassandra-rackdc.properties'), 'w') as topo_file:
for line in ["dc={}".format(node1.data_center), "rack=rack1"]:
topo_file.write(line + os.linesep)
logger.debug("Starting node {} with rack1".format(node1.address()))
> node1.start(wait_for_binary_proto=True)
replication_test.py:614:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:928: in start
self.wait_for_binary_interface(from_mark=self.mark)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:711: in wait_for_binary_interface
self.watch_log_for("Starting listening for CQL clients", **kwargs)
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7f15d320f8e0>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7018 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 27.660 |
test_snapshot_and_restore_dropping_a_column | Failure | ccmlib.node.NodeError: C* process with 7444 is terminated
self = <snapshot_test.TestSnapshot object at 0x7f15d3e437f0>
@since('3.11')
def test_snapshot_and_restore_dropping_a_column(self):
"""
@jira_ticket CASSANDRA-13276
Can't load snapshots of tables with dropped columns.
"""
cluster = self.cluster
> cluster.populate(1).start()
snapshot_test.py:167:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7f15d3cbbfd0>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 7444 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 27.088 |
test_default_keyspaces_exist | Failure | ccmlib.node.NodeError: C* process with 8333 is terminated
self = <cqlsh_tests.test_cqlsh.TestCqlsh object at 0x7f15d3d8fee0>
@since('4.0')
def test_default_keyspaces_exist(self):
self.cluster.populate(1)
> self.cluster.start()
cqlsh_tests/test_cqlsh.py:1046:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7f15d31554f0>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 8333 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 25.537 |
test_writing_use_header | Failure | ccmlib.node.NodeError: C* process with 8757 is terminated
self = <cqlsh_tests.test_cqlsh_copy.TestCqlshCopy object at 0x7f15d3cfb070>
def test_writing_use_header(self):
"""
Test that COPY can write a CSV with a header by:
- creating and populating a table,
- exporting the contents of the table to a CSV file using COPY WITH
HEADER = true
- checking that the contents of the CSV file are the written values plus
the header.
"""
> self.prepare()
cqlsh_tests/test_cqlsh_copy.py:605:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
cqlsh_tests/test_cqlsh_copy.py:116: in prepare
self.cluster.populate(nodes, tokens=tokens).start()
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/cluster.py:550: in start
node.watch_log_for(start_message, timeout=timeout, process=p, verbose=verbose, from_mark=mark,
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:608: in watch_log_for
self.raise_node_error_if_cassandra_process_is_terminated()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <ccmlib.node.Node object at 0x7f15d0063700>
def raise_node_error_if_cassandra_process_is_terminated(self):
if not self._is_pid_running():
msg = "C* process with {pid} is terminated".format(pid=self.pid)
common.debug(msg)
> raise NodeError(msg)
E ccmlib.node.NodeError: C* process with 8757 is terminated
../cassandra/build/venv/lib/python3.8/site-packages/ccmlib/node.py:692: NodeError | 27.360 |