desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Returns a list of the columns that have been updated since instantiation or save'
def get_changed_columns(self):
return [k for (k, v) in self._values.items() if v.changed]
'Returns an in operator used where you\'d typically want to use python\'s `in` operator'
def in_(self, item):
return WhereClause(six.text_type(self), InOperator(), item)
'Returns a CONTAINS operator'
def contains_(self, item):
return WhereClause(six.text_type(self), ContainsOperator(), item)
':param batch_type: (optional) One of batch type values available through BatchType enum :type batch_type: str or None :param timestamp: (optional) A datetime or timedelta object with desired timestamp to be applied to the batch conditional. :type timestamp: datetime or timedelta or None :param consistency: (optional) ...
def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on_exception=False, timeout=conn.NOT_SET, connection=None):
self.queries = [] self.batch_type = batch_type if ((timestamp is not None) and (not isinstance(timestamp, (datetime, timedelta)))): raise CQLEngineException('timestamp object must be an instance of datetime') self.timestamp = timestamp self._consistency = consistency ...
'Add a function and arguments to be passed to it to be executed after the batch executes. A batch can support multiple callbacks. Note, that if the batch does not execute, the callbacks are not executed. A callback, thus, is an "on batch success" handler. :param fn: Callable object :type fn: callable :param \*args: Pos...
def add_callback(self, fn, *args, **kwargs):
if (not callable(fn)): raise ValueError("Value for argument 'fn' is {0} and is not a callable object.".format(type(fn))) self._callbacks.append((fn, args, kwargs))
'returns the fields to select'
def _select_fields(self):
return []
'Returns a select clause based on the given filter args'
def _select_query(self):
if self._where: self._validate_select_where() return SelectStatement(self.column_family_name, fields=self._select_fields(), where=self._where, order_by=self._order, limit=self._limit, allow_filtering=self._allow_filtering, distinct_fields=self._distinct_fields, fetch_size=self._fetch_size)
'Fill the result cache with all results.'
def _fill_result_cache(self):
idx = 0 try: while True: idx += 1000 self._fill_result_cache_to_idx(idx) except StopIteration: pass self._count = len(self._result_cache)
'Returns a function that will be used to instantiate query results'
def _get_result_constructor(self):
raise NotImplementedError
'Set a batch object to run the query on. Note: running a select query with a batch object will raise an exception'
def batch(self, batch_obj):
if self._connection: raise CQLEngineException('Cannot specify the connection on model in batch mode.') if ((batch_obj is not None) and (not isinstance(batch_obj, BatchQuery))): raise CQLEngineException('batch_obj must be a BatchQuery instance or None'...
'Returns a queryset matching all rows .. code-block:: python for user in User.objects().all(): print(user)'
def all(self):
return copy.deepcopy(self)
'Sets the consistency level for the operation. See :class:`.ConsistencyLevel`. .. code-block:: python for user in User.objects(id=3).consistency(CL.ONE): print(user)'
def consistency(self, consistency):
clone = copy.deepcopy(self) clone._consistency = consistency return clone
'Parses a filter arg in the format: <colname>__<op> :returns: colname, op tuple'
def _parse_filter_arg(self, arg):
statement = arg.rsplit('__', 1) if (len(statement) == 1): return (arg, None) elif (len(statement) == 2): return ((statement[0], statement[1]) if (arg != 'pk__token') else (arg, None)) else: raise QueryException("Can't parse '{0}'".format(arg))
'Adds IF statements to queryset'
def iff(self, *args, **kwargs):
if len([x for x in kwargs.values() if (x is None)]): raise CQLEngineException('None values on iff are not allowed') clone = copy.deepcopy(self) for operator in args: if (not isinstance(operator, ConditionalClause)): raise QueryException('{0} is not a ...
'Adds WHERE arguments to the queryset, returning a new queryset See :ref:`retrieving-objects-with-filters` Returns a QuerySet filtered on the keyword arguments'
def filter(self, *args, **kwargs):
if len([x for x in kwargs.values() if (x is None)]): raise CQLEngineException('None values on filter are not allowed') clone = copy.deepcopy(self) for operator in args: if (not isinstance(operator, WhereClause)): raise QueryException('{0} is not a va...
'Returns a single instance matching this query, optionally with additional filter kwargs. See :ref:`retrieving-objects-with-filters` Returns a single object matching the QuerySet. .. code-block:: python user = User.get(id=1) If no objects are matched, a :class:`~.DoesNotExist` exception is raised. If more than one obje...
def get(self, *args, **kwargs):
if (args or kwargs): return self.filter(*args, **kwargs).get() self._execute_query() try: self[1] raise self.model.MultipleObjectsReturned('Multiple objects found') except IndexError: pass try: obj = self[0] except IndexError: raise self.mode...
'Sets the column(s) to be used for ordering Default order is ascending, prepend a \'-\' to any column name for descending *Note: column names must be a clustering key* .. code-block:: python from uuid import uuid1,uuid4 class Comment(Model): photo_id = UUID(primary_key=True) comment_id = TimeUUID(primary_key=True, defa...
def order_by(self, *colnames):
if (len(colnames) == 0): clone = copy.deepcopy(self) clone._order = [] return clone conditions = [] for colname in colnames: conditions.append('"{0}" {1}'.format(*self._get_ordering_condition(colname))) clone = copy.deepcopy(self) clone._order.extend(conditions) ...
'Returns the number of rows matched by this query. *Note: This function executes a SELECT COUNT() and has a performance cost on large datasets*'
def count(self):
if self._batch: raise CQLEngineException('Only inserts, updates, and deletes are available in batch mode') if (self._count is None): query = self._select_query() query.count = True result = self._execute(query) count_row = result[0].popitem() ...
'Returns the DISTINCT rows matched by this query. distinct_fields default to the partition key fields if not specified. *Note: distinct_fields must be a partition key or a static column* .. code-block:: python class Automobile(Model): manufacturer = columns.Text(partition_key=True) year = columns.Integer(primary_key=Tr...
def distinct(self, distinct_fields=None):
clone = copy.deepcopy(self) if distinct_fields: clone._distinct_fields = distinct_fields else: clone._distinct_fields = [x.column_name for x in self.model._partition_keys.values()] return clone
'Limits the number of results returned by Cassandra. Use *0* or *None* to disable. *Note that CQL\'s default limit is 10,000, so all queries without a limit set explicitly will have an implicit limit of 10,000* .. code-block:: python # Fetch 100 users for user in User.objects().limit(100): print(user) # Fetch all users...
def limit(self, v):
if (v is None): v = 0 if (not isinstance(v, six.integer_types)): raise TypeError if (v == self._limit): return self if (v < 0): raise QueryException('Negative limit is not allowed') clone = copy.deepcopy(self) clone._limit = v return clone
'Sets the number of rows that are fetched at a time. *Note that driver\'s default fetch size is 5000.* .. code-block:: python for user in User.objects().fetch_size(500): print(user)'
def fetch_size(self, v):
if (not isinstance(v, six.integer_types)): raise TypeError if (v == self._fetch_size): return self if (v < 1): raise QueryException('fetch size less than 1 is not allowed') clone = copy.deepcopy(self) clone._fetch_size = v return clone
'Enables the (usually) unwise practive of querying on a clustering key without also defining a partition key'
def allow_filtering(self):
clone = copy.deepcopy(self) clone._allow_filtering = True return clone
'Load only these fields for the returned query'
def only(self, fields):
return self._only_or_defer('only', fields)
'Don\'t load these fields for the returned query'
def defer(self, fields):
return self._only_or_defer('defer', fields)
'Deletes the contents of a query'
def delete(self):
partition_keys = set((x.db_field_name for x in self.model._partition_keys.values())) if (partition_keys - set((c.field for c in self._where))): raise QueryException('The partition key must be defined on delete queries') dq = DeleteStatement(self.column_family_name, where=self...
':param timeout: Timeout for the query (in seconds) :type timeout: float or None'
def timeout(self, timeout):
clone = copy.deepcopy(self) clone._timeout = timeout return clone
'Change the context on-the-fly of the Model class (keyspace, connection)'
def using(self, keyspace=None, connection=None):
if (connection and self._batch): raise CQLEngineException('Cannot specify a connection on model in batch mode.') clone = copy.deepcopy(self) if keyspace: from cassandra.cqlengine.models import _clone_model_class clone.model = _clone_model_class(self.model, {'_...
'Returns a function that will be used to instantiate query results'
def _get_result_constructor(self):
return ResultObject
'Checks that a filterset will not create invalid select statement'
def _validate_select_where(self):
equal_ops = [self.model._get_column_by_db_name(w.field) for w in self._where if (isinstance(w.operator, EqualsOperator) and (not isinstance(w.value, Token)))] token_comparison = any([w for w in self._where if isinstance(w.value, Token)]) if ((not any(((w.primary_key or w.index) for w in equal_ops))) and (no...
'Returns a function that will be used to instantiate query results'
def _get_result_constructor(self):
if (not self._values_list): return self.model._construct_instance elif self._flat_values_list: key = self._only_fields[0] return (lambda row: row[key]) else: return (lambda row: [row[f] for f in self._only_fields])
'Instructs the query set to return tuples, not model instance'
def values_list(self, *fields, **kwargs):
flat = kwargs.pop('flat', False) if kwargs: raise TypeError(('Unexpected keyword arguments to values_list: %s' % (kwargs.keys(),))) if (flat and (len(fields) > 1)): raise TypeError("'flat' is not valid when values_list is called with more than ...
'Sets the ttl (in seconds) for modified data. *Note that running a select query with a ttl value will raise an exception*'
def ttl(self, ttl):
clone = copy.deepcopy(self) clone._ttl = ttl return clone
'Allows for custom timestamps to be saved with the record.'
def timestamp(self, timestamp):
clone = copy.deepcopy(self) clone._timestamp = timestamp return clone
'Check the existence of an object before insertion. If the insertion isn\'t applied, a LWTException is raised.'
def if_not_exists(self):
if self.model._has_counter: raise IfNotExistsWithCounterColumn('if_not_exists cannot be used with tables containing counter columns') clone = copy.deepcopy(self) clone._if_not_exists = True return clone
'Check the existence of an object before an update or delete. If the update or delete isn\'t applied, a LWTException is raised.'
def if_exists(self):
if self.model._has_counter: raise IfExistsWithCounterColumn('if_exists cannot be used with tables containing counter columns') clone = copy.deepcopy(self) clone._if_exists = True return clone
'Performs an update on the row selected by the queryset. Include values to update in the update like so: .. code-block:: python Model.objects(key=n).update(value=\'x\') Passing in updates for columns which are not part of the model will raise a ValidationError. Per column validation will be performed, but instance leve...
def update(self, **values):
if (not values): return nulled_columns = set() updated_columns = set() us = UpdateStatement(self.column_family_name, where=self._where, ttl=self._ttl, timestamp=self._timestamp, conditionals=self._conditional, if_exists=self._if_exists) for (name, val) in values.items(): (col_name, c...
'executes a delete query to remove columns that have changed to null'
def _delete_null_columns(self, conditionals=None):
ds = DeleteStatement(self.column_family_name, conditionals=conditionals, if_exists=self._if_exists) deleted_fields = False static_only = True for (_, v) in self.instance._values.items(): col = v.column if v.deleted: ds.add_field(col.db_field_name) deleted_fields =...
'updates a row. This is a blind update call. All validation and cleaning needs to happen prior to calling this.'
def update(self):
if (self.instance is None): raise CQLEngineException('DML Query intance attribute is None') assert (type(self.instance) == self.model) null_clustering_key = (False if (len(self.instance._clustering_keys) == 0) else True) static_changed_only = True statement = UpdateStatement(s...
'Creates / updates a row. This is a blind insert call. All validation and cleaning needs to happen prior to calling this.'
def save(self):
if (self.instance is None): raise CQLEngineException('DML Query intance attribute is None') assert (type(self.instance) == self.model) nulled_fields = set() if (self.instance._has_counter or self.instance._can_update()): if self.instance._has_counter: warn("'cr...
'Deletes one instance'
def delete(self):
if (self.instance is None): raise CQLEngineException('DML Query instance attribute is None') ds = DeleteStatement(self.column_family_name, timestamp=self._timestamp, conditionals=self._conditional, if_exists=self._if_exists) for (name, col) in self.model._primary_keys.items(): ...
'Setup the connection'
def setup(self):
global cluster, session if (('username' in self.cluster_options) or ('password' in self.cluster_options)): raise CQLEngineException("Username & Password are now handled by using the native driver's auth_provider") if self.lazy_connect: return self.cluster...
':param value: the time to create bounding time uuid from :type value: datetime'
def __init__(self, value):
if (not isinstance(value, datetime)): raise ValidationError('datetime instance is required') super(TimeUUIDQueryFunction, self).__init__(value)
'Returns a list of column IDs.'
def keys(self):
return [k for k in self]
'Returns list of column values.'
def values(self):
return [self[k] for k in self]
'Returns a list of column ID/value tuples.'
def items(self):
return [(k, self[k]) for k in self]
'Returns the type name if it\'s been defined otherwise, it creates it from the class name'
@classmethod def type_name(cls):
if cls.__type_name__: type_name = cls.__type_name__.lower() else: camelcase = re.compile('([a-z])([A-Z])') ccase = (lambda s: camelcase.sub((lambda v: '{0}_{1}'.format(v.group(1), v.group(2))), s)) type_name = ccase(cls.__name__) type_name = type_name[(-48):] type...
'Cleans and validates the field values'
def validate(self):
for (name, field) in self._fields.items(): v = getattr(self, name) if ((v is None) and (not self._values[name].explicit) and field.has_default): v = field.get_default() val = field.validate(v) setattr(self, name, val)
':rtype: ModelQuerySet'
def __get__(self, obj, model):
if model.__abstract__: raise CQLEngineException('cannot execute queries against abstract models') return SimpleQuerySet(obj)
'Just a hint to IDEs that it\'s ok to call this :rtype: ModelQuerySet'
def __call__(self, *args, **kwargs):
raise NotImplementedError
':rtype: NamedColumn'
def _get_column(self):
return self
'Returns the column family name if it\'s been defined otherwise, it creates it from the module and class name'
def column_family_name(self, include_keyspace=True):
if include_keyspace: return '{0}.{1}'.format(self.keyspace, self.name) else: return self.name
'Returns the column matching the given name :rtype: Column'
def _get_column(self, name):
return self.column(name)
'returns a table descriptor with the given name that belongs to this keyspace'
def table(self, name):
return NamedTable(self.name, name)
'The datacenter the node is in.'
@property def datacenter(self):
return self._datacenter
'The rack the node is in.'
@property def rack(self):
return self._rack
'Sets the datacenter and rack for this node. Intended for internal use (by the control connection, which periodically checks the ring topology) only.'
def set_location_info(self, datacenter, rack):
self._datacenter = datacenter self._rack = rack
'Atomically replaces the reconnection handler for this host. Intended for internal use only.'
def get_and_set_reconnection_handler(self, new_handler):
with self.lock: old = self._reconnection_handler self._reconnection_handler = new_handler return old
'Subclasses must implement this method. It should attempt to open a new Connection and return it; if a failure occurs, an Exception should be raised.'
def try_reconnect(self):
raise NotImplementedError()
'Called when a new Connection is successfully opened. Nothing is done by default.'
def on_reconnection(self, connection):
pass
'Called when an Exception is raised when trying to connect. `exc` is the Exception that was raised and `next_delay` is the number of seconds (as a float) that the handler will wait before attempting to connect again. Subclasses should return :const:`False` if no more attempts to connection should be made, :const:`True`...
def on_exception(self, exc, next_delay):
if isinstance(exc, AuthenticationFailed): return False else: return True
'Asynchronously sets the keyspace for all connections. When all connections have been set, `callback` will be called with two arguments: this pool, and a list of any errors that occurred.'
def _set_keyspace_for_all_conns(self, keyspace, callback):
remaining_callbacks = set(self._connections) errors = [] if (not remaining_callbacks): callback(self, errors) return def connection_finished_setting_keyspace(conn, error): self.return_connection(conn) remaining_callbacks.remove(conn) if error: errors.a...
'Returns a string that can be executed as a query in order to recreate the entire schema. The string is formatted to be human readable.'
def export_schema_as_string(self):
return '\n\n'.join((ks.export_as_string() for ks in self.keyspaces.values()))
'Rebuild our view of the topology from fresh rows from the system topology tables. For internal use only.'
def rebuild_token_map(self, partitioner, token_map):
self.partitioner = partitioner if partitioner.endswith('RandomPartitioner'): token_class = MD5Token elif partitioner.endswith('Murmur3Partitioner'): token_class = Murmur3Token elif partitioner.endswith('ByteOrderedPartitioner'): token_class = BytesToken else: self.tok...
'Returns a list of :class:`.Host` instances that are replicas for a given partition key.'
def get_replicas(self, keyspace, key):
t = self.token_map if (not t): return [] try: return t.get_replicas(keyspace, t.token_class.from_key(key)) except NoMurmur3: return []
'Returns a tuple (host, new), where ``host`` is a Host instance, and ``new`` is a bool indicating whether the host was newly added.'
def add_or_return_host(self, host):
with self._hosts_lock: try: return (self._hosts[host.address], False) except KeyError: self._hosts[host.address] = host return (host, True)
'Returns a list of all known :class:`.Host` instances in the cluster.'
def all_hosts(self):
with self._hosts_lock: return list(self._hosts.values())
'Returns a string version of these replication options which are suitable for use in a CREATE KEYSPACE statement.'
def export_for_schema(self):
if self.options_map: return dict(((str(key), str(value)) for (key, value) in self.options_map.items())) return ("{'class': '%s'}" % (self.name,))
'Returns a string version of these replication options which are suitable for use in a CREATE KEYSPACE statement.'
def export_for_schema(self):
return ("{'class': 'SimpleStrategy', 'replication_factor': '%d'}" % (self.replication_factor,))
'Returns a string version of these replication options which are suitable for use in a CREATE KEYSPACE statement.'
def export_for_schema(self):
ret = "{'class': 'NetworkTopologyStrategy'" for (dc, repl_factor) in sorted(self.dc_replication_factors.items()): ret += (", '%s': '%d'" % (dc, repl_factor)) return (ret + '}')
'Returns a string version of these replication options which are suitable for use in a CREATE KEYSPACE statement.'
def export_for_schema(self):
return "{'class': 'LocalStrategy'}"
'Returns a CQL query string that can be used to recreate the entire keyspace, including user-defined types and tables.'
def export_as_string(self):
cql = '\n\n'.join((((([(self.as_cql_query() + ';')] + self.user_type_strings()) + [f.export_as_string() for f in self.functions.values()]) + [a.export_as_string() for a in self.aggregates.values()]) + [t.export_as_string() for t in self.tables.values()])) if self._exc_info: import traceback ret ...
'Returns a CQL query string that can be used to recreate just this keyspace, not including user-defined types and tables.'
def as_cql_query(self):
ret = ('CREATE KEYSPACE %s WITH replication = %s ' % (protect_name(self.name), self.replication_strategy.export_for_schema())) return (ret + (' AND durable_writes = %s' % ('true' if self.durable_writes else 'false')))
'Returns a CQL query that can be used to recreate this type. If `formatted` is set to :const:`True`, extra whitespace will be added to make the query more readable.'
def as_cql_query(self, formatted=False):
ret = ('CREATE TYPE %s.%s (%s' % (protect_name(self.keyspace), protect_name(self.name), ('\n' if formatted else ''))) if formatted: field_join = ',\n' padding = ' ' else: field_join = ', ' padding = '' fields = [] for (field_name, field_type...
'Returns a CQL query that can be used to recreate this aggregate. If `formatted` is set to :const:`True`, extra whitespace will be added to make the query more readable.'
def as_cql_query(self, formatted=False):
sep = ('\n ' if formatted else ' ') keyspace = protect_name(self.keyspace) name = protect_name(self.name) type_list = ', '.join(self.argument_types) state_func = protect_name(self.state_func) state_type = self.state_type ret = ('CREATE AGGREGATE %(keyspace)s.%(name...
'Returns a CQL query that can be used to recreate this function. If `formatted` is set to :const:`True`, extra whitespace will be added to make the query more readable.'
def as_cql_query(self, formatted=False):
sep = ('\n ' if formatted else ' ') keyspace = protect_name(self.keyspace) name = protect_name(self.name) arg_list = ', '.join([('%s %s' % (protect_name(n), t)) for (n, t) in zip(self.argument_names, self.argument_types)]) typ = self.return_type lang = self.language b...
'A list of :class:`.ColumnMetadata` representing the components of the primary key for this table.'
@property def primary_key(self):
return (self.partition_key + self.clustering_key)
'A boolean indicating if this table can be represented as CQL in export'
@property def is_cql_compatible(self):
comparator = getattr(self, 'comparator', None) if comparator: incompatible = (self.is_compact_storage and (len(self.columns) > (len(self.primary_key) + 1)) and (len(self.clustering_key) >= 1)) return (not incompatible) return True
'Returns a string of CQL queries that can be used to recreate this table along with all indexes on it. The returned string is formatted to be human readable.'
def export_as_string(self):
if self._exc_info: import traceback ret = ('/*\nWarning: Table %s.%s is incomplete because of an error processing metadata.\n' % (self.keyspace_name, self.name)) for line in traceback.format_exception(*self._exc_info): ret += line ret += ('\n...
'Returns a CQL query that can be used to recreate this table (index creations are not included). If `formatted` is set to :const:`True`, extra whitespace will be added to make the query human readable.'
def as_cql_query(self, formatted=False):
ret = ('CREATE TABLE %s.%s (%s' % (protect_name(self.keyspace_name), protect_name(self.name), ('\n' if formatted else ''))) if formatted: column_join = ',\n' padding = ' ' else: column_join = ', ' padding = '' columns = [] for col in self.co...
'Called to produce CQL/DDL to follow the table definition. Should contain requisite terminating semicolon(s).'
@classmethod def after_table_cql(cls, ext_key, ext_blob):
pass
'Returns a CQL query that can be used to recreate this index.'
def as_cql_query(self):
options = dict(self.index_options) index_target = options.pop('target') if (self.kind != 'CUSTOM'): return ('CREATE INDEX %s ON %s.%s (%s)' % (protect_name(self.name), protect_name(self.keyspace_name), protect_name(self.table_name), index_target)) else: class_name = option...
'Returns a CQL query string that can be used to recreate this index.'
def export_as_string(self):
return (self.as_cql_query() + ';')
'Get a set of :class:`.Host` instances representing all of the replica nodes for a given :class:`.Token`.'
def get_replicas(self, keyspace, token):
tokens_to_hosts = self.tokens_to_hosts_by_ks.get(keyspace, None) if (tokens_to_hosts is None): self.rebuild_keyspace(keyspace, build_if_absent=True) tokens_to_hosts = self.tokens_to_hosts_by_ks.get(keyspace, None) if tokens_to_hosts: point = bisect_right(self.ring, token) if ...
'`token_string` should be the string representation from the server.'
@classmethod def from_string(cls, token_string):
return cls(int(token_string))
'`token` is an int or string representing the token.'
def __init__(self, token):
self.value = int(token)
'`token_string` should be the string representation from the server.'
@classmethod def from_string(cls, token_string):
if isinstance(token_string, six.text_type): token_string = token_string.encode('ascii') return cls(unhexlify(token_string))
'Setup the mostly-non-schema table options, like caching settings'
def _build_table_options(self, row):
options = dict(((o, row.get(o)) for o in self.recognized_table_options if (o in row))) if ('local_read_repair_chance' in options): val = options.pop('local_read_repair_chance') options['dclocal_read_repair_chance'] = val return options
'Setup the mostly-non-schema table options, like caching settings'
def _build_table_options(self, row):
return dict(((o, row.get(o)) for o in self.recognized_table_options if (o in row)))
'Returns a CQL query that can be used to recreate this function. If `formatted` is set to :const:`True`, extra whitespace will be added to make the query more readable.'
def as_cql_query(self, formatted=False):
sep = ('\n ' if formatted else ' ') keyspace = protect_name(self.keyspace_name) name = protect_name(self.name) selected_cols = ('*' if self.include_all_columns else ', '.join((protect_name(col.name) for col in self.columns.values()))) base_table = protect_name(self.base_table_na...
'Returns the timestamp that should be used if ``now`` is the current time and ``last`` is the last timestamp returned by this object. Intended for internal and testing use only; to generate timestamps, call an instantiated ``MonotonicTimestampGenerator`` object. :param int now: an integer to be used as the current time...
def _next_timestamp(self, now, last):
if (now > last): self.last = now return now else: self._maybe_warn(now=now) self.last = (last + 1) return self.last
'Makes ``MonotonicTimestampGenerator`` objects callable; defers internally to _next_timestamp.'
def __call__(self):
with self.lock: return self._next_timestamp(now=int((time.time() * 1000000.0)), last=self.last)
'Deserialize a bytestring into a value. See the deserialize() method for more information. This method differs in that if None or the empty string is passed in, None may be returned.'
@classmethod def from_binary(cls, byts, protocol_version):
if (byts is None): return None elif ((len(byts) == 0) and (not cls.empty_binary_ok)): return (EMPTY if cls.support_empty_values else None) return cls.deserialize(byts, protocol_version)
'Serialize a value into a bytestring. See the serialize() method for more information. This method differs in that if None is passed in, the result is the empty string.'
@classmethod def to_binary(cls, val, protocol_version):
return ('' if (val is None) else cls.serialize(val, protocol_version))
'Given a bytestring, deserialize into a value according to the protocol for this type. Note that this does not create a new instance of this class; it merely gives back a value that would be appropriate to go inside an instance of this class.'
@staticmethod def deserialize(byts, protocol_version):
return byts
'Given a value appropriate for this class, serialize it according to the protocol for this type and return the corresponding bytestring.'
@staticmethod def serialize(val, protocol_version):
return val
'Return the name of this type as it would be expressed by Cassandra, optionally fully qualified. If subtypes is not None, it is expected to be a list of other CassandraType subclasses, and the output string includes the Cassandra names for those subclasses as well, as parameters to this one. Example: >>> LongType.cass_...
@classmethod def cass_parameterized_type_with(cls, subtypes, full=False):
cname = cls.cassname if (full and ('.' not in cname)): cname = (apache_cassandra_type_prefix + cname) if (not subtypes): return cname sublist = ', '.join((styp.cass_parameterized_type(full=full) for styp in subtypes)) return ('%s(%s)' % (cname, sublist))
'Given a set of other CassandraTypes, create a new subtype of this type using them as parameters. This is how composite types are constructed. >>> MapType.apply_parameters([DateType, BooleanType]) <class \'cassandra.cqltypes.MapType(DateType, BooleanType)\'> `subtypes` will be a sequence of CassandraTypes. If provided...
@classmethod def apply_parameters(cls, subtypes, names=None):
if ((cls.num_subtypes != 'UNKNOWN') and (len(subtypes) != cls.num_subtypes)): raise ValueError(('%s types require %d subtypes (%d given)' % (cls.typename, cls.num_subtypes, len(subtypes)))) newname = cls.cass_parameterized_type_with(subtypes) if (six.PY2 and isinstance(newname, uni...
'Return a CQL type specifier for this type. If this type has parameters, they are included in standard CQL <> notation.'
@classmethod def cql_parameterized_type(cls):
if (not cls.subtypes): return cls.typename return ('%s<%s>' % (cls.typename, ', '.join((styp.cql_parameterized_type() for styp in cls.subtypes))))
'Return a Cassandra type specifier for this type. If this type has parameters, they are included in the standard () notation.'
@classmethod def cass_parameterized_type(cls, full=False):
return cls.cass_parameterized_type_with(cls.subtypes, full=full)
'There is no CQL notation for Composites, so we override this.'
@classmethod def cql_parameterized_type(cls):
typestring = cls.cass_parameterized_type(full=True) return ("'%s'" % (typestring,))