desc stringlengths 3 26.7k | decl stringlengths 11 7.89k | bodies stringlengths 8 553k |
|---|---|---|
'Return a built-in datetime.time (nanosecond precision truncated to micros).'
| def time(self):
| return datetime.time(hour=self.hour, minute=self.minute, second=self.second, microsecond=(self.nanosecond // Time.MICRO))
|
'Initializer value can be:
- integer_type: absolute days from epoch (1970, 1, 1). Can be negative.
- datetime.date: built-in date
- string_type: a string time of the form "yyyy-mm-dd"'
| def __init__(self, value):
| if isinstance(value, six.integer_types):
self.days_from_epoch = value
elif isinstance(value, (datetime.date, datetime.datetime)):
self._from_timetuple(value.timetuple())
elif isinstance(value, six.string_types):
self._from_datestring(value)
else:
raise TypeError('Date ... |
'Absolute seconds from epoch (can be negative)'
| @property
def seconds(self):
| return (self.days_from_epoch * Date.DAY)
|
'Return a built-in datetime.date for Dates falling in the years [datetime.MINYEAR, datetime.MAXYEAR]
ValueError is raised for Dates outside this range.'
| def date(self):
| try:
dt = datetime_from_timestamp(self.seconds)
return datetime.date(dt.year, dt.month, dt.day)
except Exception:
raise ValueError(('%r exceeds ranges for built-in datetime.date' % self))
|
'internal-only; no checks are done because this entry is populated on cluster init'
| @property
def default(self):
| return self.profiles[EXEC_PROFILE_DEFAULT]
|
'When :attr:`~.Cluster.protocol_version` is 2 or higher, this should
be an instance of a subclass of :class:`~cassandra.auth.AuthProvider`,
such as :class:`~.PlainTextAuthProvider`.
When :attr:`~.Cluster.protocol_version` is 1, this should be
a function that accepts one argument, the IP address of a node,
and returns a... | @property
def auth_provider(self):
| return self._auth_provider
|
'An instance of :class:`.policies.LoadBalancingPolicy` or
one of its subclasses.
.. versionchanged:: 2.6.0
Defaults to :class:`~.TokenAwarePolicy` (:class:`~.DCAwareRoundRobinPolicy`).
when using CPython (where the murmur3 extension is available). :class:`~.DCAwareRoundRobinPolicy`
otherwise. Default local DC will be c... | @property
def load_balancing_policy(self):
| return self._load_balancing_policy
|
'A default :class:`.policies.RetryPolicy` instance to use for all
:class:`.Statement` objects which do not have a :attr:`~.Statement.retry_policy`
explicitly set.'
| @property
def default_retry_policy(self):
| return self._default_retry_policy
|
'Flag indicating whether internal schema metadata is updated.
When disabled, the driver does not populate Cluster.metadata.keyspaces on connect, or on schema change events. This
can be used to speed initial connection, and reduce load on client and server during operation. Turning this off
gives away token aware reques... | @property
def schema_metadata_enabled(self):
| return self.control_connection._schema_meta_enabled
|
'Flag indicating whether internal token metadata is updated.
When disabled, the driver does not query node token information on connect, or on topology change events. This
can be used to speed initial connection, and reduce load on client and server during operation. It is most useful
in large clusters using vnodes, wh... | @property
def token_metadata_enabled(self):
| return self.control_connection._token_meta_enabled
|
'``executor_threads`` defines the number of threads in a pool for handling asynchronous tasks such as
extablishing connection pools or refreshing metadata.
Any of the mutable Cluster attributes may be set as keyword arguments to the constructor.'
| def __init__(self, contact_points=['127.0.0.1'], port=9042, compression=True, auth_provider=None, load_balancing_policy=None, reconnection_policy=None, default_retry_policy=None, conviction_policy_factory=None, metrics_enabled=False, connection_class=None, ssl_options=None, sockopts=None, cql_version=None, protocol_ver... | if (contact_points is not None):
if isinstance(contact_points, six.string_types):
raise TypeError('contact_points should not be a string, it should be a sequence (e.g. list) of strings')
if (None in contact_points):
raise ValueError('... |
'Registers a class to use to represent a particular user-defined type.
Query parameters for this user-defined type will be assumed to be
instances of `klass`. Result sets for this user-defined type will
be instances of `klass`. If no class is registered for a user-defined
type, a namedtuple will be used for result se... | def register_user_type(self, keyspace, user_type, klass):
| if (self.protocol_version < 3):
log.warning('User Type serialization is only supported in native protocol version 3+ (%d in use). CQL encoding for simple statements will still work, but named tuples will be returned when ... |
'Adds an :class:`.ExecutionProfile` to the cluster. This makes it available for use by ``name`` in :meth:`.Session.execute`
and :meth:`.Session.execute_async`. This method will raise if the profile already exists.
Normally profiles will be injected at cluster initialization via ``Cluster(execution_profiles)``. This met... | def add_execution_profile(self, name, profile, pool_wait_timeout=5):
| if (not isinstance(profile, ExecutionProfile)):
raise TypeError('profile must be an instance of ExecutionProfile')
if (self._config_mode == _ConfigMode.LEGACY):
raise ValueError('Cannot add execution profiles when legacy parameters are set explicitly.... |
'Sets a threshold for concurrent requests per connection, below which
connections will be considered for disposal (down to core connections;
see :meth:`~Cluster.set_core_connections_per_host`).
Pertains to connection pool management in protocol versions {1,2}.'
| def set_min_requests_per_connection(self, host_distance, min_requests):
| if (self.protocol_version >= 3):
raise UnsupportedOperation('Cluster.set_min_requests_per_connection() only has an effect when using protocol_version 1 or 2.')
if ((min_requests < 0) or (min_requests > 126) or (min_requests >= self._max_requests_per_connection[host_distance... |
'Sets a threshold for concurrent requests per connection, above which new
connections will be created to a host (up to max connections;
see :meth:`~Cluster.set_max_connections_per_host`).
Pertains to connection pool management in protocol versions {1,2}.'
| def set_max_requests_per_connection(self, host_distance, max_requests):
| if (self.protocol_version >= 3):
raise UnsupportedOperation('Cluster.set_max_requests_per_connection() only has an effect when using protocol_version 1 or 2.')
if ((max_requests < 1) or (max_requests > 127) or (max_requests <= self._min_requests_per_connection[host_distance... |
'Gets the minimum number of connections per Session that will be opened
for each host with :class:`~.HostDistance` equal to `host_distance`.
The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for
:attr:`~HostDistance.REMOTE`.
This property is ignored if :attr:`~.Cluster.protocol_version` is
3 or higher.'
| def get_core_connections_per_host(self, host_distance):
| return self._core_connections_per_host[host_distance]
|
'Sets the minimum number of connections per Session that will be opened
for each host with :class:`~.HostDistance` equal to `host_distance`.
The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for
:attr:`~HostDistance.REMOTE`.
Protocol version 1 and 2 are limited in the number of concurrent
requests they can send pe... | def set_core_connections_per_host(self, host_distance, core_connections):
| if (self.protocol_version >= 3):
raise UnsupportedOperation('Cluster.set_core_connections_per_host() only has an effect when using protocol_version 1 or 2.')
old = self._core_connections_per_host[host_distance]
self._core_connections_per_host[host_distance] = core_conne... |
'Gets the maximum number of connections per Session that will be opened
for each host with :class:`~.HostDistance` equal to `host_distance`.
The default is 8 for :attr:`~HostDistance.LOCAL` and 2 for
:attr:`~HostDistance.REMOTE`.
This property is ignored if :attr:`~.Cluster.protocol_version` is
3 or higher.'
| def get_max_connections_per_host(self, host_distance):
| return self._max_connections_per_host[host_distance]
|
'Sets the maximum number of connections per Session that will be opened
for each host with :class:`~.HostDistance` equal to `host_distance`.
The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for
:attr:`~HostDistance.REMOTE`.
If :attr:`~.Cluster.protocol_version` is set to 3 or higher, this
is not supported (there ... | def set_max_connections_per_host(self, host_distance, max_connections):
| if (self.protocol_version >= 3):
raise UnsupportedOperation('Cluster.set_max_connections_per_host() only has an effect when using protocol_version 1 or 2.')
self._max_connections_per_host[host_distance] = max_connections
|
'Called to create a new connection with proper configuration.
Intended for internal use only.'
| def connection_factory(self, address, *args, **kwargs):
| kwargs = self._make_connection_kwargs(address, kwargs)
return self.connection_class.factory(address, self.connect_timeout, *args, **kwargs)
|
'Creates and returns a new :class:`~.Session` object. If `keyspace`
is specified, that keyspace will be the default keyspace for
operations on the ``Session``.'
| def connect(self, keyspace=None, wait_for_all_pools=False):
| with self._lock:
if self.is_shutdown:
raise DriverException('Cluster is already shut down')
if (not self._is_setup):
log.debug('Connecting to cluster, contact points: %s; protocol version: %s', self.contact_points, self.protocol_version)
... |
'Closes all sessions and connection associated with this Cluster.
To ensure all connections are properly closed, **you should always
call shutdown() on a Cluster instance when you are done with it**.
Once shutdown, a Cluster should not be used for any purpose.'
| def shutdown(self):
| with self._lock:
if self.is_shutdown:
return
else:
self.is_shutdown = True
if self._idle_heartbeat:
self._idle_heartbeat.stop()
self.scheduler.shutdown()
self.control_connection.shutdown()
for session in self.sessions:
session.shutdown()
se... |
'Intended for internal use only.'
| def on_up(self, host):
| if self.is_shutdown:
return
log.debug('Waiting to acquire lock for handling up status of node %s', host)
with host.lock:
if host._currently_handling_node_up:
log.debug('Another thread is already handling up status of node %... |
'Intended for internal use only.'
| @run_in_executor
def on_down(self, host, is_host_addition, expect_host_to_be_down=False):
| if self.is_shutdown:
return
with host.lock:
was_up = host.is_up
if (self._discount_down_events and (self.profile_manager.distance(host) != HostDistance.IGNORED)):
connected = False
for session in self.sessions:
pool_states = session.get_pool_state(... |
'Called when adding initial contact points and when the control
connection subsequently discovers a new node.
Returns a Host instance, and a flag indicating whether it was new in
the metadata.
Intended for internal use only.'
| def add_host(self, address, datacenter=None, rack=None, signal=True, refresh_nodes=True):
| (host, new) = self.metadata.add_or_return_host(Host(address, self.conviction_policy_factory, datacenter, rack))
if (new and signal):
log.info('New Cassandra host %r discovered', host)
self.on_add(host, refresh_nodes)
return (host, new)
|
'Called when the control connection observes that a node has left the
ring. Intended for internal use only.'
| def remove_host(self, host):
| if (host and self.metadata.remove_host(host)):
log.info('Cassandra host %s removed', host)
self.on_remove(host)
|
'Adds a :class:`cassandra.policies.HostStateListener` subclass instance to
the list of listeners to be notified when a host is added, removed,
marked up, or marked down.'
| def register_listener(self, listener):
| with self._listener_lock:
self._listeners.add(listener)
|
'Removes a registered listener.'
| def unregister_listener(self, listener):
| with self._listener_lock:
self._listeners.remove(listener)
|
'If any host has fewer than the configured number of core connections
open, attempt to open connections until that number is met.'
| def _ensure_core_connections(self):
| for session in self.sessions:
for pool in tuple(session._pools.values()):
pool.ensure_core_connections()
|
'Returns the control connection host metadata.'
| def get_control_connection_host(self):
| connection = self.control_connection._connection
host = (connection.host if connection else None)
return (self.metadata.get_host(host) if host else None)
|
'Synchronously refresh all schema metadata.
By default, the timeout for this operation is governed by :attr:`~.Cluster.max_schema_agreement_wait`
and :attr:`~.Cluster.control_connection_timeout`.
Passing max_schema_agreement_wait here overrides :attr:`~.Cluster.max_schema_agreement_wait`.
Setting max_schema_agreement_w... | def refresh_schema_metadata(self, max_schema_agreement_wait=None):
| if (not self.control_connection.refresh_schema(schema_agreement_wait=max_schema_agreement_wait, force=True)):
raise DriverException('Schema metadata was not refreshed. See log for details.')
|
'Synchronously refresh keyspace metadata. This applies to keyspace-level information such as replication
and durability settings. It does not refresh tables, types, etc. contained in the keyspace.
See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior'
| def refresh_keyspace_metadata(self, keyspace, max_schema_agreement_wait=None):
| if (not self.control_connection.refresh_schema(target_type=SchemaTargetType.KEYSPACE, keyspace=keyspace, schema_agreement_wait=max_schema_agreement_wait, force=True)):
raise DriverException('Keyspace metadata was not refreshed. See log for details.')
|
'Synchronously refresh table metadata. This applies to a table, and any triggers or indexes attached
to the table.
See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior'
| def refresh_table_metadata(self, keyspace, table, max_schema_agreement_wait=None):
| if (not self.control_connection.refresh_schema(target_type=SchemaTargetType.TABLE, keyspace=keyspace, table=table, schema_agreement_wait=max_schema_agreement_wait, force=True)):
raise DriverException('Table metadata was not refreshed. See log for details.')
|
'Synchronously refresh materialized view metadata.
See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior'
| def refresh_materialized_view_metadata(self, keyspace, view, max_schema_agreement_wait=None):
| if (not self.control_connection.refresh_schema(target_type=SchemaTargetType.TABLE, keyspace=keyspace, table=view, schema_agreement_wait=max_schema_agreement_wait, force=True)):
raise DriverException('View metadata was not refreshed. See log for details.')
|
'Synchronously refresh user defined type metadata.
See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior'
| def refresh_user_type_metadata(self, keyspace, user_type, max_schema_agreement_wait=None):
| if (not self.control_connection.refresh_schema(target_type=SchemaTargetType.TYPE, keyspace=keyspace, type=user_type, schema_agreement_wait=max_schema_agreement_wait, force=True)):
raise DriverException('User Type metadata was not refreshed. See log for details.')
|
'Synchronously refresh user defined function metadata.
``function`` is a :class:`cassandra.UserFunctionDescriptor`.
See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior'
| def refresh_user_function_metadata(self, keyspace, function, max_schema_agreement_wait=None):
| if (not self.control_connection.refresh_schema(target_type=SchemaTargetType.FUNCTION, keyspace=keyspace, function=function, schema_agreement_wait=max_schema_agreement_wait, force=True)):
raise DriverException('User Function metadata was not refreshed. See log for details.')
|
'Synchronously refresh user defined aggregate metadata.
``aggregate`` is a :class:`cassandra.UserAggregateDescriptor`.
See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior'
| def refresh_user_aggregate_metadata(self, keyspace, aggregate, max_schema_agreement_wait=None):
| if (not self.control_connection.refresh_schema(target_type=SchemaTargetType.AGGREGATE, keyspace=keyspace, aggregate=aggregate, schema_agreement_wait=max_schema_agreement_wait, force=True)):
raise DriverException('User Aggregate metadata was not refreshed. See log for details.')
|
'Synchronously refresh the node list and token metadata
`force_token_rebuild` can be used to rebuild the token map metadata, even if no new nodes are discovered.
An Exception is raised if node refresh fails for any reason.'
| def refresh_nodes(self, force_token_rebuild=False):
| if (not self.control_connection.refresh_node_list_and_token_map(force_token_rebuild)):
raise DriverException('Node list was not refreshed. See log for details.')
|
'*Deprecated:* set :attr:`~.Cluster.schema_metadata_enabled` :attr:`~.Cluster.token_metadata_enabled` instead
Sets a flag to enable (True) or disable (False) all metadata refresh queries.
This applies to both schema and node topology.
Disabling this is useful to minimize refreshes during multiple changes.
Meta refresh ... | def set_meta_refresh_enabled(self, enabled):
| self.schema_metadata_enabled = enabled
self.token_metadata_enabled = enabled
|
'The format to return row results in. By default, each
returned row will be a named tuple. You can alternatively
use any of the following:
- :func:`cassandra.query.tuple_factory` - return a result row as a tuple
- :func:`cassandra.query.named_tuple_factory` - return a result row as a named tuple
- :func:`cassandra.qu... | @property
def row_factory(self):
| return self._row_factory
|
'A default timeout, measured in seconds, for queries executed through
:meth:`.execute()` or :meth:`.execute_async()`. This default may be
overridden with the `timeout` parameter for either of those methods.
Setting this to :const:`None` will cause no timeouts to be set by default.
Please see :meth:`.ResponseFuture.res... | @property
def default_timeout(self):
| return self._default_timeout
|
'The default :class:`~ConsistencyLevel` for operations executed through
this session. This default may be overridden by setting the
:attr:`~.Statement.consistency_level` on individual statements.
.. versionadded:: 1.2.0
.. versionchanged:: 3.0.0
default changed from ONE to LOCAL_ONE'
| @property
def default_consistency_level(self):
| return self._default_consistency_level
|
'The default :class:`~ConsistencyLevel` for serial phase of conditional updates executed through
this session. This default may be overridden by setting the
:attr:`~.Statement.serial_consistency_level` on individual statements.
Only valid for ``protocol_version >= 2``.'
| @property
def default_serial_consistency_level(self):
| return self._default_serial_consistency_level
|
'Execute the given query and synchronously wait for the response.
If an error is encountered while executing the query, an Exception
will be raised.
`query` may be a query string or an instance of :class:`cassandra.query.Statement`.
`parameters` may be a sequence or dict of parameters to bind. If a
sequence is used, `... | def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_payload=None, execution_profile=EXEC_PROFILE_DEFAULT, paging_state=None):
| return self.execute_async(query, parameters, trace, custom_payload, timeout, execution_profile, paging_state).result()
|
'Execute the given query and return a :class:`~.ResponseFuture` object
which callbacks may be attached to for asynchronous response
delivery. You may also call :meth:`~.ResponseFuture.result()`
on the :class:`.ResponseFuture` to synchronously block for results at
any time.
See :meth:`Session.execute` for parameter def... | def execute_async(self, query, parameters=None, trace=False, custom_payload=None, timeout=_NOT_SET, execution_profile=EXEC_PROFILE_DEFAULT, paging_state=None):
| future = self._create_response_future(query, parameters, trace, custom_payload, timeout, execution_profile, paging_state)
future._protocol_handler = self.client_protocol_handler
self._on_request(future)
future.send_request()
return future
|
'Returns the ResponseFuture before calling send_request() on it'
| def _create_response_future(self, query, parameters, trace, custom_payload, timeout, execution_profile=EXEC_PROFILE_DEFAULT, paging_state=None):
| prepared_statement = None
if isinstance(query, six.string_types):
query = SimpleStatement(query)
elif isinstance(query, PreparedStatement):
query = query.bind(parameters)
if (self.cluster._config_mode == _ConfigMode.LEGACY):
if (execution_profile is not EXEC_PROFILE_DEFAULT):
... |
'Returns a clone of the ``ep`` profile. ``kwargs`` can be specified to update attributes
of the returned profile.
This is a shallow clone, so any objects referenced by the profile are shared. This means Load Balancing Policy
is maintained by inclusion in the active profiles. It also means updating any other rich objec... | def execution_profile_clone_update(self, ep, **kwargs):
| clone = copy(self._get_execution_profile(ep))
for (attr, value) in kwargs.items():
setattr(clone, attr, value)
return clone
|
'Adds a callback with arguments to be called when any request is created.
It will be invoked as `fn(response_future, *args, **kwargs)` after each client request is created,
and before the request is sent\*. This can be used to create extensions by adding result callbacks to the
response future.
\* where `response_futur... | def add_request_init_listener(self, fn, *args, **kwargs):
| self._request_init_callbacks.append((fn, args, kwargs))
|
'Removes a callback and arguments from the list.
See :meth:`.Session.add_request_init_listener`.'
| def remove_request_init_listener(self, fn, *args, **kwargs):
| self._request_init_callbacks.remove((fn, args, kwargs))
|
'Prepares a query string, returning a :class:`~cassandra.query.PreparedStatement`
instance which can be used as follows::
>>> session = cluster.connect("mykeyspace")
>>> query = "INSERT INTO users (id, name, age) VALUES (?, ?, ?)"
>>> prepared = session.prepare(query)
>>> session.execute(prepared, (user.id, user.name, ... | def prepare(self, query, custom_payload=None):
| message = PrepareMessage(query=query)
future = ResponseFuture(self, message, query=None, timeout=self.default_timeout)
try:
future.send_request()
(query_id, bind_metadata, pk_indexes, result_metadata) = future.result()
except Exception:
log.exception('Error preparing query:... |
'Prepare the given query on all hosts, excluding ``excluded_host``.
Intended for internal use only.'
| def prepare_on_all_hosts(self, query, excluded_host):
| futures = []
for host in tuple(self._pools.keys()):
if ((host != excluded_host) and host.is_up):
future = ResponseFuture(self, PrepareMessage(query=query), None, self.default_timeout)
try:
request_id = future._query(host)
except Exception:
... |
'Close all connections. ``Session`` instances should not be used
for any purpose after being shutdown.'
| def shutdown(self):
| with self._lock:
if self.is_shutdown:
return
else:
self.is_shutdown = True
for future in self._initial_connect_futures:
future.cancel()
wait_futures(self._initial_connect_futures)
for pool in tuple(self._pools.values()):
pool.shutdown()
|
'For internal use only.'
| def add_or_renew_pool(self, host, is_host_addition):
| distance = self._profile_manager.distance(host)
if (distance == HostDistance.IGNORED):
return None
def run_add_or_renew_pool():
try:
if (self._protocol_version >= 3):
new_pool = HostConnection(host, distance, self)
else:
new_pool = Host... |
'When the set of live nodes change, the loadbalancer will change its
mind on host distances. It might change it on the node that came/left
but also on other nodes (for instance, if a node dies, another
previously ignored node may be now considered).
This method ensures that all hosts for which a pool should exist
have ... | def update_created_pools(self):
| futures = set()
for host in self.cluster.metadata.all_hosts():
distance = self._profile_manager.distance(host)
pool = self._pools.get(host)
future = None
if ((not pool) or pool.is_shutdown):
if ((distance != HostDistance.IGNORED) and (host.is_up in (True, None))):
... |
'Called by the parent Cluster instance when a node is marked down.
Only intended for internal use.'
| def on_down(self, host):
| future = self.remove_pool(host)
if future:
future.add_done_callback((lambda f: self.update_created_pools()))
|
'Internal'
| def on_remove(self, host):
| self.on_down(host)
|
'Set the default keyspace for all queries made through this Session.
This operation blocks until complete.'
| def set_keyspace(self, keyspace):
| self.execute(('USE %s' % (protect_name(keyspace),)))
|
'Asynchronously sets the keyspace on all pools. When all
pools have set all of their connections, `callback` will be
called with a dictionary of all errors that occurred, keyed
by the `Host` that they occurred against.'
| def _set_keyspace_for_all_pools(self, keyspace, callback):
| with self._lock:
self.keyspace = keyspace
remaining_callbacks = set(self._pools.values())
errors = {}
if (not remaining_callbacks):
callback(errors)
return
def pool_finished_setting_keyspace(pool, host_errors):
remaining_callbacks.remove(pool)
if host_erro... |
'Called by the parent Cluster instance when the user registers a new
mapping from a user-defined type to a class. Intended for internal
use only.'
| def user_type_registered(self, keyspace, user_type, klass):
| try:
ks_meta = self.cluster.metadata.keyspaces[keyspace]
except KeyError:
raise UserTypeDoesNotExist(('Keyspace %s does not exist or has not been discovered by the driver' % (keyspace,)))
try:
type_meta = ks_meta.user_types[user_type]
except Ke... |
'Internal'
| def submit(self, fn, *args, **kwargs):
| if (not self.is_shutdown):
return self.cluster.executor.submit(fn, *args, **kwargs)
|
'Replace existing connection (if there is one) and close it.'
| def _set_new_connection(self, conn):
| with self._lock:
old = self._connection
self._connection = conn
if old:
log.debug('[control connection] Closing old connection %r, replacing with %r', old, conn)
old.close()
|
'Tries to connect to each host in the query plan until one succeeds
or every attempt fails. If successful, a new Connection will be
returned. Otherwise, :exc:`NoHostAvailable` will be raised
with an "errors" arg that is a dict mapping host addresses
to the exception that was raised when an attempt was made to open
a c... | def _reconnect_internal(self):
| errors = {}
for host in self._cluster._default_load_balancing_policy.make_query_plan():
try:
return self._try_connect(host)
except ConnectionException as exc:
errors[host.address] = exc
log.warning('[control connection] Error connecting to %s:',... |
'Creates a new Connection, registers for pushed events, and refreshes
node/token and schema metadata.'
| def _try_connect(self, host):
| log.debug('[control connection] Opening new connection to %s', host)
while True:
try:
connection = self._cluster.connection_factory(host.address, is_control_connection=True)
if self._is_shutdown:
connection.close()
raise DriverExc... |
'Called by the _ControlReconnectionHandler when a new connection
is successfully created. Clears out the _reconnection_handler on
this ControlConnection.'
| def _get_and_set_reconnection_handler(self, new_handler):
| with self._reconnection_lock:
old = self._reconnection_handler
self._reconnection_handler = new_handler
return old
|
'Used to mitigate refreshes for nodes that are already known.
Some versions of the server send superfluous NEW_NODE messages in addition to UP events.'
| def _refresh_nodes_if_not_up(self, addr):
| host = self._cluster.metadata.get_host(addr)
if ((not host) or (not host.is_up)):
self.refresh_node_list_and_token_map()
|
'Internal'
| def send_request(self, error_no_hosts=True):
| for host in self.query_plan:
req_id = self._query(host)
if (req_id is not None):
self._req_id = req_id
return True
if ((self.timeout is not None) and ((time.time() - self._start_time) > self.timeout)):
self._on_timeout()
return True
if erro... |
'Returns :const:`True` if there are more pages left in the
query results, :const:`False` otherwise. This should only
be checked after the first page has been returned.
.. versionadded:: 2.0.0'
| @property
def has_more_pages(self):
| return (self._paging_state is not None)
|
'Warnings returned from the server, if any. This will only be
set for protocol_version 4+.
Warnings may be returned for such things as oversized batches,
or too many tombstones in slice queries.
Ensure the future is complete before trying to access this property
(call :meth:`.result()`, or after callback is invoked).
O... | @property
def warnings(self):
| if (not self._event.is_set()):
raise DriverException('warnings cannot be retrieved before ResponseFuture is finalized')
return self._warnings
|
'The custom payload returned from the server, if any. This will only be
set by Cassandra servers implementing a custom QueryHandler, and only
for protocol_version 4+.
Ensure the future is complete before trying to access this property
(call :meth:`.result()`, or after callback is invoked).
Otherwise it may throw if the... | @property
def custom_payload(self):
| if (not self._event.is_set()):
raise DriverException('custom_payload cannot be retrieved before ResponseFuture is finalized')
return self._custom_payload
|
'If there are more pages left in the query result, this asynchronously
starts fetching the next page. If there are no pages left, :exc:`.QueryExhausted`
is raised. Also see :attr:`.has_more_pages`.
This should only be called after the first page has been returned.
.. versionadded:: 2.0.0'
| def start_fetching_next_page(self):
| if (not self._paging_state):
raise QueryExhausted()
self._make_query_plan()
self.message.paging_state = self._paging_state
self._event.clear()
self._final_result = _NOT_SET
self._final_exception = None
self._start_timer()
self.send_request()
|
'Handle the response to our attempt to prepare a statement.
If it succeeded, run the original query again against the same host.'
| def _execute_after_prepare(self, host, connection, pool, response):
| if pool:
pool.return_connection(connection)
if self._final_exception:
return
if isinstance(response, ResultMessage):
if (response.kind == RESULT_KIND_PREPARED):
if self.prepared_statement:
(_, _, _, result_metadata) = response.results
self.... |
'Return the final result or raise an Exception if errors were
encountered. If the final result or error has not been set
yet, this method will block until it is set, or the timeout
set for the request expires.
Timeout is specified in the Session request execution functions.
If the timeout is exceeded, an :exc:`cassand... | def result(self):
| self._event.wait()
if (self._final_result is not _NOT_SET):
return ResultSet(self, self._final_result)
else:
raise self._final_exception
|
'Returns the trace session ids for this future, if tracing was enabled (does not fetch trace data).'
| def get_query_trace_ids(self):
| return [trace.trace_id for trace in self._query_traces]
|
'Fetches and returns the query trace of the last response, or `None` if tracing was
not enabled.
Note that this may raise an exception if there are problems retrieving the trace
details from Cassandra. If the trace is not available after `max_wait`,
:exc:`cassandra.query.TraceUnavailable` will be raised.
If the Respons... | def get_query_trace(self, max_wait=None, query_cl=ConsistencyLevel.LOCAL_ONE):
| if ((self._final_result is _NOT_SET) and (self._final_exception is None)):
raise TraceUnavailable('Trace information was not available. The ResponseFuture is not done.')
if self._query_traces:
return self._get_query_trace((len(self._query_traces) - 1), max_wait, query_... |
'Fetches and returns the query traces for all query pages, if tracing was enabled.
See note in :meth:`~.get_query_trace` regarding possible exceptions.'
| def get_all_query_traces(self, max_wait_per=None, query_cl=ConsistencyLevel.LOCAL_ONE):
| if self._query_traces:
return [self._get_query_trace(i, max_wait_per, query_cl) for i in range(len(self._query_traces))]
return []
|
'Attaches a callback function to be called when the final results arrive.
By default, `fn` will be called with the results as the first and only
argument. If `*args` or `**kwargs` are supplied, they will be passed
through as additional positional or keyword arguments to `fn`.
If an error is hit while executing the ope... | def add_callback(self, fn, *args, **kwargs):
| run_now = False
with self._callback_lock:
self._callbacks.append((fn, args, kwargs))
if (self._final_result is not _NOT_SET):
run_now = True
if run_now:
fn(self._final_result, *args, **kwargs)
return self
|
'Like :meth:`.add_callback()`, but handles error cases.
An Exception instance will be passed as the first positional argument
to `fn`.'
| def add_errback(self, fn, *args, **kwargs):
| run_now = False
with self._callback_lock:
self._errbacks.append((fn, args, kwargs))
if self._final_exception:
run_now = True
if run_now:
fn(self._final_exception, *args, **kwargs)
return self
|
'A convenient combination of :meth:`.add_callback()` and
:meth:`.add_errback()`.
Example usage::
>>> session = cluster.connect()
>>> query = "SELECT * FROM mycf"
>>> future = session.execute_async(query)
>>> def log_results(results, level=\'debug\'):
... for row in results:
... log.log(level, "Result: %s", ... | def add_callbacks(self, callback, errback, callback_args=(), callback_kwargs=None, errback_args=(), errback_kwargs=None):
| self.add_callback(callback, *callback_args, **(callback_kwargs or {}))
self.add_errback(errback, *errback_args, **(errback_kwargs or {}))
|
'True if the last response indicated more pages; False otherwise'
| @property
def has_more_pages(self):
| return self.response_future.has_more_pages
|
'The list of current page rows. May be empty if the result was empty,
or this is the last page.'
| @property
def current_rows(self):
| return (self._current_rows or [])
|
'Manually, synchronously fetch the next page. Supplied for manually retrieving pages
and inspecting :meth:`~.current_page`. It is not necessary to call this when iterating
through results; paging happens implicitly in iteration.'
| def fetch_next_page(self):
| if self.response_future.has_more_pages:
self.response_future.start_fetching_next_page()
result = self.response_future.result()
self._current_rows = result._current_rows
else:
self._current_rows = []
|
'Gets the last query trace from the associated future.
See :meth:`.ResponseFuture.get_query_trace` for details.'
| def get_query_trace(self, max_wait_sec=None):
| return self.response_future.get_query_trace(max_wait_sec)
|
'Gets all query traces from the associated future.
See :meth:`.ResponseFuture.get_all_query_traces` for details.'
| def get_all_query_traces(self, max_wait_sec_per=None):
| return self.response_future.get_all_query_traces(max_wait_sec_per)
|
'For LWT results, returns whether the transaction was applied.
Result is indeterminate if called on a result that was not an LWT request.
Only valid when one of tne of the internal row factories is in use.'
| @property
def was_applied(self):
| if (self.response_future.row_factory not in (named_tuple_factory, dict_factory, tuple_factory)):
raise RuntimeError(('Cannot determine LWT result with row factory %s' % (self.response_future.row_factsory,)))
if (len(self.current_rows) != 1):
raise RuntimeError(('LWT resul... |
'Server paging state of the query. Can be `None` if the query was not paged.
The driver treats paging state as opaque, but it may contain primary key data, so applications may want to
avoid sending this to untrusted parties.'
| @property
def paging_state(self):
| return self.response_future._paging_state
|
'Returns the metrics for the registered cluster instance.'
| def get_stats(self):
| return scales.getStats()[self.stats_name]
|
'Set the metrics stats name.
The stats_name is a string used to access the metris through scales: scales.getStats()[<stats_name>]
Default is \'cassandra-<num>\'.'
| def set_stats_name(self, stats_name):
| if (self.stats_name == stats_name):
return
if (stats_name in scales._Stats.stats):
raise ValueError('"{0}" already exists in stats.'.format(stats_name))
stats = scales._Stats.stats[self.stats_name]
del scales._Stats.stats[self.stats_name]
self.stats_name = stats_name
... |
'`query_string` should be a literal CQL statement with the exception
of parameter placeholders that will be filled through the
`parameters` argument of :meth:`.Session.execute()`.
See :class:`Statement` attributes for a description of the other parameters.'
| def __init__(self, query_string, retry_policy=None, consistency_level=None, routing_key=None, serial_consistency_level=None, fetch_size=FETCH_SIZE_UNSET, keyspace=None, custom_payload=None, is_idempotent=False):
| Statement.__init__(self, retry_policy, consistency_level, routing_key, serial_consistency_level, fetch_size, keyspace, custom_payload, is_idempotent)
self._query_string = query_string
|
'Creates and returns a :class:`BoundStatement` instance using `values`.
See :meth:`BoundStatement.bind` for rules on input ``values``.'
| def bind(self, values):
| return BoundStatement(self).bind(values)
|
'`prepared_statement` should be an instance of :class:`PreparedStatement`.
See :class:`Statement` attributes for a description of the other parameters.'
| def __init__(self, prepared_statement, retry_policy=None, consistency_level=None, routing_key=None, serial_consistency_level=None, fetch_size=FETCH_SIZE_UNSET, keyspace=None, custom_payload=None):
| self.prepared_statement = prepared_statement
self.consistency_level = prepared_statement.consistency_level
self.serial_consistency_level = prepared_statement.serial_consistency_level
self.fetch_size = prepared_statement.fetch_size
self.custom_payload = prepared_statement.custom_payload
self.is_i... |
'Binds a sequence of values for the prepared statement parameters
and returns this instance. Note that `values` *must* be:
* a sequence, even if you are only binding one value, or
* a dict that relates 1-to-1 between dict keys and columns
.. versionchanged:: 2.6.0
:data:`~.UNSET_VALUE` was introduced. These can be bou... | def bind(self, values):
| if (values is None):
values = ()
proto_version = self.prepared_statement.protocol_version
col_meta = self.prepared_statement.column_metadata
if isinstance(values, dict):
values_dict = values
values = []
for col in col_meta:
try:
values.append(v... |
'`batch_type` specifies The :class:`.BatchType` for the batch operation.
Defaults to :attr:`.BatchType.LOGGED`.
`retry_policy` should be a :class:`~.RetryPolicy` instance for
controlling retries on the operation.
`consistency_level` should be a :class:`~.ConsistencyLevel` value
to be used for all operations in the batc... | def __init__(self, batch_type=BatchType.LOGGED, retry_policy=None, consistency_level=None, serial_consistency_level=None, session=None, custom_payload=None):
| self.batch_type = batch_type
self._statements_and_parameters = []
self._session = session
Statement.__init__(self, retry_policy=retry_policy, consistency_level=consistency_level, serial_consistency_level=serial_consistency_level, custom_payload=custom_payload)
|
'This is a convenience method to clear a batch statement for reuse.
*Note:* it should not be used concurrently with uncompleted execution futures executing the same
``BatchStatement``.'
| def clear(self):
| del self._statements_and_parameters[:]
self.keyspace = None
self.routing_key = None
if self.custom_payload:
self.custom_payload.clear()
|
'Adds a :class:`.Statement` and optional sequence of parameters
to be used with the statement to the batch.
Like with other statements, parameters must be a sequence, even
if there is only one item.'
| def add(self, statement, parameters=None):
| if isinstance(statement, six.string_types):
if parameters:
encoder = (Encoder() if (self._session is None) else self._session.encoder)
statement = bind_params(statement, parameters, encoder)
self._add_statement_and_params(False, statement, ())
elif isinstance(statement, P... |
'Adds a sequence of :class:`.Statement` objects and a matching sequence
of parameters to the batch. Statement and parameter sequences must be of equal length or
one will be truncated. :const:`None` can be used in the parameters position where are needed.'
| def add_all(self, statements, parameters):
| for (statement, value) in zip(statements, parameters):
self.add(statement, value)
|
'Retrieves the actual tracing details from Cassandra and populates the
attributes of this instance. Because tracing details are stored
asynchronously by Cassandra, this may need to retry the session
detail fetch. If the trace is still not available after `max_wait`
seconds, :exc:`.TraceUnavailable` will be raised; if... | def populate(self, max_wait=2.0, wait_for_complete=True, query_cl=None):
| attempt = 0
start = time.time()
while True:
time_spent = (time.time() - start)
if ((max_wait is not None) and (time_spent >= max_wait)):
raise TraceUnavailable(('Trace information was not available within %f seconds. Consider raising Session.max_trac... |
'Implementations of this class should return a new instance
of :class:`~.Authenticator` or one of its subclasses.'
| def new_authenticator(self, host):
| raise NotImplementedError()
|
'Returns an message to send to the server to initiate the SASL handshake.
:const:`None` may be returned to send an empty message.'
| def initial_response(self):
| return None
|
'Called when the server sends a challenge message. Generally, this method
should return :const:`None` when authentication is complete from a
client perspective. Otherwise, a string should be returned.'
| def evaluate_challenge(self, challenge):
| raise NotImplementedError()
|
'Called when the server indicates that authentication was successful.
Depending on the authentication mechanism, `token` may be :const:`None`
or a string.'
| def on_authentication_success(self, token):
| pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.