repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens list | docstring stringlengths 3 17.3k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|
yt-project/unyt | unyt/unit_object.py | Unit.simplify | def simplify(self):
"""Return a new equivalent unit object with a simplified unit expression
>>> import unyt as u
>>> unit = (u.m**2/u.cm).simplify()
>>> unit
100*m
"""
expr = self.expr
self.expr = _cancel_mul(expr, self.registry)
return self | python | def simplify(self):
"""Return a new equivalent unit object with a simplified unit expression
>>> import unyt as u
>>> unit = (u.m**2/u.cm).simplify()
>>> unit
100*m
"""
expr = self.expr
self.expr = _cancel_mul(expr, self.registry)
return self | [
"def",
"simplify",
"(",
"self",
")",
":",
"expr",
"=",
"self",
".",
"expr",
"self",
".",
"expr",
"=",
"_cancel_mul",
"(",
"expr",
",",
"self",
".",
"registry",
")",
"return",
"self"
] | Return a new equivalent unit object with a simplified unit expression
>>> import unyt as u
>>> unit = (u.m**2/u.cm).simplify()
>>> unit
100*m | [
"Return",
"a",
"new",
"equivalent",
"unit",
"object",
"with",
"a",
"simplified",
"unit",
"expression"
] | 7a4eafc229f83784f4c63d639aee554f9a6b1ca0 | https://github.com/yt-project/unyt/blob/7a4eafc229f83784f4c63d639aee554f9a6b1ca0/unyt/unit_object.py#L681-L691 | train |
yt-project/unyt | unyt/_parsing.py | _auto_positive_symbol | def _auto_positive_symbol(tokens, local_dict, global_dict):
"""
Inserts calls to ``Symbol`` for undefined variables.
Passes in positive=True as a keyword argument.
Adapted from sympy.sympy.parsing.sympy_parser.auto_symbol
"""
result = []
tokens.append((None, None)) # so zip traverses all t... | python | def _auto_positive_symbol(tokens, local_dict, global_dict):
"""
Inserts calls to ``Symbol`` for undefined variables.
Passes in positive=True as a keyword argument.
Adapted from sympy.sympy.parsing.sympy_parser.auto_symbol
"""
result = []
tokens.append((None, None)) # so zip traverses all t... | [
"def",
"_auto_positive_symbol",
"(",
"tokens",
",",
"local_dict",
",",
"global_dict",
")",
":",
"result",
"=",
"[",
"]",
"tokens",
".",
"append",
"(",
"(",
"None",
",",
"None",
")",
")",
"# so zip traverses all tokens",
"for",
"tok",
",",
"nextTok",
"in",
... | Inserts calls to ``Symbol`` for undefined variables.
Passes in positive=True as a keyword argument.
Adapted from sympy.sympy.parsing.sympy_parser.auto_symbol | [
"Inserts",
"calls",
"to",
"Symbol",
"for",
"undefined",
"variables",
".",
"Passes",
"in",
"positive",
"=",
"True",
"as",
"a",
"keyword",
"argument",
".",
"Adapted",
"from",
"sympy",
".",
"sympy",
".",
"parsing",
".",
"sympy_parser",
".",
"auto_symbol"
] | 7a4eafc229f83784f4c63d639aee554f9a6b1ca0 | https://github.com/yt-project/unyt/blob/7a4eafc229f83784f4c63d639aee554f9a6b1ca0/unyt/_parsing.py#L25-L68 | train |
kz26/PyExcelerate | pyexcelerate/Range.py | Range.intersection | def intersection(self, range):
"""
Calculates the intersection with another range object
"""
if self.worksheet != range.worksheet:
# Different worksheet
return None
start = (max(self._start[0], range._start[0]),
max(self._start[1], range._start[1]))
... | python | def intersection(self, range):
"""
Calculates the intersection with another range object
"""
if self.worksheet != range.worksheet:
# Different worksheet
return None
start = (max(self._start[0], range._start[0]),
max(self._start[1], range._start[1]))
... | [
"def",
"intersection",
"(",
"self",
",",
"range",
")",
":",
"if",
"self",
".",
"worksheet",
"!=",
"range",
".",
"worksheet",
":",
"# Different worksheet",
"return",
"None",
"start",
"=",
"(",
"max",
"(",
"self",
".",
"_start",
"[",
"0",
"]",
",",
"rang... | Calculates the intersection with another range object | [
"Calculates",
"the",
"intersection",
"with",
"another",
"range",
"object"
] | 247406dc41adc7e94542bcbf04589f1e5fdf8c51 | https://github.com/kz26/PyExcelerate/blob/247406dc41adc7e94542bcbf04589f1e5fdf8c51/pyexcelerate/Range.py#L140-L153 | train |
harlowja/fasteners | fasteners/process_lock.py | interprocess_locked | def interprocess_locked(path):
"""Acquires & releases a interprocess lock around call into
decorated function."""
lock = InterProcessLock(path)
def decorator(f):
@six.wraps(f)
def wrapper(*args, **kwargs):
with lock:
return f(*args, **kwargs)
re... | python | def interprocess_locked(path):
"""Acquires & releases a interprocess lock around call into
decorated function."""
lock = InterProcessLock(path)
def decorator(f):
@six.wraps(f)
def wrapper(*args, **kwargs):
with lock:
return f(*args, **kwargs)
re... | [
"def",
"interprocess_locked",
"(",
"path",
")",
":",
"lock",
"=",
"InterProcessLock",
"(",
"path",
")",
"def",
"decorator",
"(",
"f",
")",
":",
"@",
"six",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")... | Acquires & releases a interprocess lock around call into
decorated function. | [
"Acquires",
"&",
"releases",
"a",
"interprocess",
"lock",
"around",
"call",
"into",
"decorated",
"function",
"."
] | 8f3bbab0204a50037448a8fad7a6bf12eb1a2695 | https://github.com/harlowja/fasteners/blob/8f3bbab0204a50037448a8fad7a6bf12eb1a2695/fasteners/process_lock.py#L265-L280 | train |
harlowja/fasteners | fasteners/process_lock.py | _InterProcessLock.acquire | def acquire(self, blocking=True,
delay=DELAY_INCREMENT, max_delay=MAX_DELAY,
timeout=None):
"""Attempt to acquire the given lock.
:param blocking: whether to wait forever to try to acquire the lock
:type blocking: bool
:param delay: when blocking this is ... | python | def acquire(self, blocking=True,
delay=DELAY_INCREMENT, max_delay=MAX_DELAY,
timeout=None):
"""Attempt to acquire the given lock.
:param blocking: whether to wait forever to try to acquire the lock
:type blocking: bool
:param delay: when blocking this is ... | [
"def",
"acquire",
"(",
"self",
",",
"blocking",
"=",
"True",
",",
"delay",
"=",
"DELAY_INCREMENT",
",",
"max_delay",
"=",
"MAX_DELAY",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"delay",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Delay must be greater... | Attempt to acquire the given lock.
:param blocking: whether to wait forever to try to acquire the lock
:type blocking: bool
:param delay: when blocking this is the delay time in seconds that
will be added after each failed acquisition
:type delay: int/float
... | [
"Attempt",
"to",
"acquire",
"the",
"given",
"lock",
"."
] | 8f3bbab0204a50037448a8fad7a6bf12eb1a2695 | https://github.com/harlowja/fasteners/blob/8f3bbab0204a50037448a8fad7a6bf12eb1a2695/fasteners/process_lock.py#L130-L171 | train |
harlowja/fasteners | fasteners/process_lock.py | _InterProcessLock.release | def release(self):
"""Release the previously acquired lock."""
if not self.acquired:
raise threading.ThreadError("Unable to release an unacquired"
" lock")
try:
self.unlock()
except IOError:
self.logger.exception... | python | def release(self):
"""Release the previously acquired lock."""
if not self.acquired:
raise threading.ThreadError("Unable to release an unacquired"
" lock")
try:
self.unlock()
except IOError:
self.logger.exception... | [
"def",
"release",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"acquired",
":",
"raise",
"threading",
".",
"ThreadError",
"(",
"\"Unable to release an unacquired\"",
"\" lock\"",
")",
"try",
":",
"self",
".",
"unlock",
"(",
")",
"except",
"IOError",
":",... | Release the previously acquired lock. | [
"Release",
"the",
"previously",
"acquired",
"lock",
"."
] | 8f3bbab0204a50037448a8fad7a6bf12eb1a2695 | https://github.com/harlowja/fasteners/blob/8f3bbab0204a50037448a8fad7a6bf12eb1a2695/fasteners/process_lock.py#L187-L207 | train |
harlowja/fasteners | fasteners/_utils.py | canonicalize_path | def canonicalize_path(path):
"""Canonicalizes a potential path.
Returns a binary string encoded into filesystem encoding.
"""
if isinstance(path, six.binary_type):
return path
if isinstance(path, six.text_type):
return _fsencode(path)
else:
return canonicalize_path(str(p... | python | def canonicalize_path(path):
"""Canonicalizes a potential path.
Returns a binary string encoded into filesystem encoding.
"""
if isinstance(path, six.binary_type):
return path
if isinstance(path, six.text_type):
return _fsencode(path)
else:
return canonicalize_path(str(p... | [
"def",
"canonicalize_path",
"(",
"path",
")",
":",
"if",
"isinstance",
"(",
"path",
",",
"six",
".",
"binary_type",
")",
":",
"return",
"path",
"if",
"isinstance",
"(",
"path",
",",
"six",
".",
"text_type",
")",
":",
"return",
"_fsencode",
"(",
"path",
... | Canonicalizes a potential path.
Returns a binary string encoded into filesystem encoding. | [
"Canonicalizes",
"a",
"potential",
"path",
"."
] | 8f3bbab0204a50037448a8fad7a6bf12eb1a2695 | https://github.com/harlowja/fasteners/blob/8f3bbab0204a50037448a8fad7a6bf12eb1a2695/fasteners/_utils.py#L47-L57 | train |
harlowja/fasteners | fasteners/lock.py | read_locked | def read_locked(*args, **kwargs):
"""Acquires & releases a read lock around call into decorated method.
NOTE(harlowja): if no attribute name is provided then by default the
attribute named '_lock' is looked for (this attribute is expected to be
a :py:class:`.ReaderWriterLock`) in the instance object th... | python | def read_locked(*args, **kwargs):
"""Acquires & releases a read lock around call into decorated method.
NOTE(harlowja): if no attribute name is provided then by default the
attribute named '_lock' is looked for (this attribute is expected to be
a :py:class:`.ReaderWriterLock`) in the instance object th... | [
"def",
"read_locked",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"attr_name",
"=",
"kwargs",
".",
"get",
"(",
"'lock'",
",",
"'_lock'",
")",
"@",
"six",
".",
"wraps",
"(",
"f",
")",
"def",
"wrappe... | Acquires & releases a read lock around call into decorated method.
NOTE(harlowja): if no attribute name is provided then by default the
attribute named '_lock' is looked for (this attribute is expected to be
a :py:class:`.ReaderWriterLock`) in the instance object this decorator
is attached to. | [
"Acquires",
"&",
"releases",
"a",
"read",
"lock",
"around",
"call",
"into",
"decorated",
"method",
"."
] | 8f3bbab0204a50037448a8fad7a6bf12eb1a2695 | https://github.com/harlowja/fasteners/blob/8f3bbab0204a50037448a8fad7a6bf12eb1a2695/fasteners/lock.py#L38-L66 | train |
harlowja/fasteners | fasteners/lock.py | write_locked | def write_locked(*args, **kwargs):
"""Acquires & releases a write lock around call into decorated method.
NOTE(harlowja): if no attribute name is provided then by default the
attribute named '_lock' is looked for (this attribute is expected to be
a :py:class:`.ReaderWriterLock` object) in the instance ... | python | def write_locked(*args, **kwargs):
"""Acquires & releases a write lock around call into decorated method.
NOTE(harlowja): if no attribute name is provided then by default the
attribute named '_lock' is looked for (this attribute is expected to be
a :py:class:`.ReaderWriterLock` object) in the instance ... | [
"def",
"write_locked",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"attr_name",
"=",
"kwargs",
".",
"get",
"(",
"'lock'",
",",
"'_lock'",
")",
"@",
"six",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapp... | Acquires & releases a write lock around call into decorated method.
NOTE(harlowja): if no attribute name is provided then by default the
attribute named '_lock' is looked for (this attribute is expected to be
a :py:class:`.ReaderWriterLock` object) in the instance object this
decorator is attached to. | [
"Acquires",
"&",
"releases",
"a",
"write",
"lock",
"around",
"call",
"into",
"decorated",
"method",
"."
] | 8f3bbab0204a50037448a8fad7a6bf12eb1a2695 | https://github.com/harlowja/fasteners/blob/8f3bbab0204a50037448a8fad7a6bf12eb1a2695/fasteners/lock.py#L69-L97 | train |
harlowja/fasteners | fasteners/lock.py | ReaderWriterLock.is_writer | def is_writer(self, check_pending=True):
"""Returns if the caller is the active writer or a pending writer."""
me = self._current_thread()
if self._writer == me:
return True
if check_pending:
return me in self._pending_writers
else:
return Fals... | python | def is_writer(self, check_pending=True):
"""Returns if the caller is the active writer or a pending writer."""
me = self._current_thread()
if self._writer == me:
return True
if check_pending:
return me in self._pending_writers
else:
return Fals... | [
"def",
"is_writer",
"(",
"self",
",",
"check_pending",
"=",
"True",
")",
":",
"me",
"=",
"self",
".",
"_current_thread",
"(",
")",
"if",
"self",
".",
"_writer",
"==",
"me",
":",
"return",
"True",
"if",
"check_pending",
":",
"return",
"me",
"in",
"self"... | Returns if the caller is the active writer or a pending writer. | [
"Returns",
"if",
"the",
"caller",
"is",
"the",
"active",
"writer",
"or",
"a",
"pending",
"writer",
"."
] | 8f3bbab0204a50037448a8fad7a6bf12eb1a2695 | https://github.com/harlowja/fasteners/blob/8f3bbab0204a50037448a8fad7a6bf12eb1a2695/fasteners/lock.py#L136-L144 | train |
harlowja/fasteners | fasteners/lock.py | ReaderWriterLock.owner | def owner(self):
"""Returns whether the lock is locked by a writer or reader."""
if self._writer is not None:
return self.WRITER
if self._readers:
return self.READER
return None | python | def owner(self):
"""Returns whether the lock is locked by a writer or reader."""
if self._writer is not None:
return self.WRITER
if self._readers:
return self.READER
return None | [
"def",
"owner",
"(",
"self",
")",
":",
"if",
"self",
".",
"_writer",
"is",
"not",
"None",
":",
"return",
"self",
".",
"WRITER",
"if",
"self",
".",
"_readers",
":",
"return",
"self",
".",
"READER",
"return",
"None"
] | Returns whether the lock is locked by a writer or reader. | [
"Returns",
"whether",
"the",
"lock",
"is",
"locked",
"by",
"a",
"writer",
"or",
"reader",
"."
] | 8f3bbab0204a50037448a8fad7a6bf12eb1a2695 | https://github.com/harlowja/fasteners/blob/8f3bbab0204a50037448a8fad7a6bf12eb1a2695/fasteners/lock.py#L147-L153 | train |
harlowja/fasteners | fasteners/lock.py | ReaderWriterLock.read_lock | def read_lock(self):
"""Context manager that grants a read lock.
Will wait until no active or pending writers.
Raises a ``RuntimeError`` if a pending writer tries to acquire
a read lock.
"""
me = self._current_thread()
if me in self._pending_writers:
... | python | def read_lock(self):
"""Context manager that grants a read lock.
Will wait until no active or pending writers.
Raises a ``RuntimeError`` if a pending writer tries to acquire
a read lock.
"""
me = self._current_thread()
if me in self._pending_writers:
... | [
"def",
"read_lock",
"(",
"self",
")",
":",
"me",
"=",
"self",
".",
"_current_thread",
"(",
")",
"if",
"me",
"in",
"self",
".",
"_pending_writers",
":",
"raise",
"RuntimeError",
"(",
"\"Writer %s can not acquire a read lock\"",
"\" while waiting for the write lock\"",
... | Context manager that grants a read lock.
Will wait until no active or pending writers.
Raises a ``RuntimeError`` if a pending writer tries to acquire
a read lock. | [
"Context",
"manager",
"that",
"grants",
"a",
"read",
"lock",
"."
] | 8f3bbab0204a50037448a8fad7a6bf12eb1a2695 | https://github.com/harlowja/fasteners/blob/8f3bbab0204a50037448a8fad7a6bf12eb1a2695/fasteners/lock.py#L161-L202 | train |
harlowja/fasteners | fasteners/lock.py | ReaderWriterLock.write_lock | def write_lock(self):
"""Context manager that grants a write lock.
Will wait until no active readers. Blocks readers after acquiring.
Guaranteed for locks to be processed in fair order (FIFO).
Raises a ``RuntimeError`` if an active reader attempts to acquire
a lock.
""... | python | def write_lock(self):
"""Context manager that grants a write lock.
Will wait until no active readers. Blocks readers after acquiring.
Guaranteed for locks to be processed in fair order (FIFO).
Raises a ``RuntimeError`` if an active reader attempts to acquire
a lock.
""... | [
"def",
"write_lock",
"(",
"self",
")",
":",
"me",
"=",
"self",
".",
"_current_thread",
"(",
")",
"i_am_writer",
"=",
"self",
".",
"is_writer",
"(",
"check_pending",
"=",
"False",
")",
"if",
"self",
".",
"is_reader",
"(",
")",
"and",
"not",
"i_am_writer",... | Context manager that grants a write lock.
Will wait until no active readers. Blocks readers after acquiring.
Guaranteed for locks to be processed in fair order (FIFO).
Raises a ``RuntimeError`` if an active reader attempts to acquire
a lock. | [
"Context",
"manager",
"that",
"grants",
"a",
"write",
"lock",
"."
] | 8f3bbab0204a50037448a8fad7a6bf12eb1a2695 | https://github.com/harlowja/fasteners/blob/8f3bbab0204a50037448a8fad7a6bf12eb1a2695/fasteners/lock.py#L205-L238 | train |
bfontaine/freesms | freesms/__init__.py | FreeClient.send_sms | def send_sms(self, text, **kw):
"""
Send an SMS. Since Free only allows us to send SMSes to ourselves you
don't have to provide your phone number.
"""
params = {
'user': self._user,
'pass': self._passwd,
'msg': text
}
kw.setde... | python | def send_sms(self, text, **kw):
"""
Send an SMS. Since Free only allows us to send SMSes to ourselves you
don't have to provide your phone number.
"""
params = {
'user': self._user,
'pass': self._passwd,
'msg': text
}
kw.setde... | [
"def",
"send_sms",
"(",
"self",
",",
"text",
",",
"*",
"*",
"kw",
")",
":",
"params",
"=",
"{",
"'user'",
":",
"self",
".",
"_user",
",",
"'pass'",
":",
"self",
".",
"_passwd",
",",
"'msg'",
":",
"text",
"}",
"kw",
".",
"setdefault",
"(",
"\"veri... | Send an SMS. Since Free only allows us to send SMSes to ourselves you
don't have to provide your phone number. | [
"Send",
"an",
"SMS",
".",
"Since",
"Free",
"only",
"allows",
"us",
"to",
"send",
"SMSes",
"to",
"ourselves",
"you",
"don",
"t",
"have",
"to",
"provide",
"your",
"phone",
"number",
"."
] | 64b3df222a852f313bd80afd9a7280b584fe31e1 | https://github.com/bfontaine/freesms/blob/64b3df222a852f313bd80afd9a7280b584fe31e1/freesms/__init__.py#L63-L82 | train |
scrapinghub/exporters | exporters/writers/filebase_base_writer.py | FilebaseBaseWriter.create_filebase_name | def create_filebase_name(self, group_info, extension='gz', file_name=None):
"""
Return tuple of resolved destination folder name and file name
"""
dirname = self.filebase.formatted_dirname(groups=group_info)
if not file_name:
file_name = self.filebase.prefix_template ... | python | def create_filebase_name(self, group_info, extension='gz', file_name=None):
"""
Return tuple of resolved destination folder name and file name
"""
dirname = self.filebase.formatted_dirname(groups=group_info)
if not file_name:
file_name = self.filebase.prefix_template ... | [
"def",
"create_filebase_name",
"(",
"self",
",",
"group_info",
",",
"extension",
"=",
"'gz'",
",",
"file_name",
"=",
"None",
")",
":",
"dirname",
"=",
"self",
".",
"filebase",
".",
"formatted_dirname",
"(",
"groups",
"=",
"group_info",
")",
"if",
"not",
"f... | Return tuple of resolved destination folder name and file name | [
"Return",
"tuple",
"of",
"resolved",
"destination",
"folder",
"name",
"and",
"file",
"name"
] | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/writers/filebase_base_writer.py#L145-L152 | train |
scrapinghub/exporters | exporters/writers/aggregation_stats_writer.py | AggregationStatsWriter.write_batch | def write_batch(self, batch):
"""
Receives the batch and writes it. This method is usually called from a manager.
"""
for item in batch:
for key in item:
self.aggregated_info['occurrences'][key] += 1
self.increment_written_items()
if se... | python | def write_batch(self, batch):
"""
Receives the batch and writes it. This method is usually called from a manager.
"""
for item in batch:
for key in item:
self.aggregated_info['occurrences'][key] += 1
self.increment_written_items()
if se... | [
"def",
"write_batch",
"(",
"self",
",",
"batch",
")",
":",
"for",
"item",
"in",
"batch",
":",
"for",
"key",
"in",
"item",
":",
"self",
".",
"aggregated_info",
"[",
"'occurrences'",
"]",
"[",
"key",
"]",
"+=",
"1",
"self",
".",
"increment_written_items",
... | Receives the batch and writes it. This method is usually called from a manager. | [
"Receives",
"the",
"batch",
"and",
"writes",
"it",
".",
"This",
"method",
"is",
"usually",
"called",
"from",
"a",
"manager",
"."
] | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/writers/aggregation_stats_writer.py#L18-L29 | train |
scrapinghub/exporters | exporters/writers/aggregation_stats_writer.py | AggregationStatsWriter._get_aggregated_info | def _get_aggregated_info(self):
"""
Keeps track of aggregated info in a dictionary called self.aggregated_info
"""
agg_results = {}
for key in self.aggregated_info['occurrences']:
agg_results[key] = {
'occurrences': self.aggregated_info['occurrences'].... | python | def _get_aggregated_info(self):
"""
Keeps track of aggregated info in a dictionary called self.aggregated_info
"""
agg_results = {}
for key in self.aggregated_info['occurrences']:
agg_results[key] = {
'occurrences': self.aggregated_info['occurrences'].... | [
"def",
"_get_aggregated_info",
"(",
"self",
")",
":",
"agg_results",
"=",
"{",
"}",
"for",
"key",
"in",
"self",
".",
"aggregated_info",
"[",
"'occurrences'",
"]",
":",
"agg_results",
"[",
"key",
"]",
"=",
"{",
"'occurrences'",
":",
"self",
".",
"aggregated... | Keeps track of aggregated info in a dictionary called self.aggregated_info | [
"Keeps",
"track",
"of",
"aggregated",
"info",
"in",
"a",
"dictionary",
"called",
"self",
".",
"aggregated_info"
] | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/writers/aggregation_stats_writer.py#L31-L42 | train |
scrapinghub/exporters | exporters/writers/cloudsearch_writer.py | create_document_batches | def create_document_batches(jsonlines, id_field, max_batch_size=CLOUDSEARCH_MAX_BATCH_SIZE):
"""Create batches in expected AWS Cloudsearch format, limiting the
byte size per batch according to given max_batch_size
See: http://docs.aws.amazon.com/cloudsearch/latest/developerguide/preparing-data.html
"""... | python | def create_document_batches(jsonlines, id_field, max_batch_size=CLOUDSEARCH_MAX_BATCH_SIZE):
"""Create batches in expected AWS Cloudsearch format, limiting the
byte size per batch according to given max_batch_size
See: http://docs.aws.amazon.com/cloudsearch/latest/developerguide/preparing-data.html
"""... | [
"def",
"create_document_batches",
"(",
"jsonlines",
",",
"id_field",
",",
"max_batch_size",
"=",
"CLOUDSEARCH_MAX_BATCH_SIZE",
")",
":",
"batch",
"=",
"[",
"]",
"fixed_initial_size",
"=",
"2",
"def",
"create_entry",
"(",
"line",
")",
":",
"try",
":",
"record",
... | Create batches in expected AWS Cloudsearch format, limiting the
byte size per batch according to given max_batch_size
See: http://docs.aws.amazon.com/cloudsearch/latest/developerguide/preparing-data.html | [
"Create",
"batches",
"in",
"expected",
"AWS",
"Cloudsearch",
"format",
"limiting",
"the",
"byte",
"size",
"per",
"batch",
"according",
"to",
"given",
"max_batch_size"
] | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/writers/cloudsearch_writer.py#L14-L45 | train |
scrapinghub/exporters | exporters/writers/cloudsearch_writer.py | CloudSearchWriter._post_document_batch | def _post_document_batch(self, batch):
"""
Send a batch to Cloudsearch endpoint
See: http://docs.aws.amazon.com/cloudsearch/latest/developerguide/submitting-doc-requests.html
""" # noqa
target_batch = '/2013-01-01/documents/batch'
url = self.endpoint_url + target_batch
... | python | def _post_document_batch(self, batch):
"""
Send a batch to Cloudsearch endpoint
See: http://docs.aws.amazon.com/cloudsearch/latest/developerguide/submitting-doc-requests.html
""" # noqa
target_batch = '/2013-01-01/documents/batch'
url = self.endpoint_url + target_batch
... | [
"def",
"_post_document_batch",
"(",
"self",
",",
"batch",
")",
":",
"# noqa",
"target_batch",
"=",
"'/2013-01-01/documents/batch'",
"url",
"=",
"self",
".",
"endpoint_url",
"+",
"target_batch",
"return",
"requests",
".",
"post",
"(",
"url",
",",
"data",
"=",
"... | Send a batch to Cloudsearch endpoint
See: http://docs.aws.amazon.com/cloudsearch/latest/developerguide/submitting-doc-requests.html | [
"Send",
"a",
"batch",
"to",
"Cloudsearch",
"endpoint"
] | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/writers/cloudsearch_writer.py#L97-L105 | train |
scrapinghub/exporters | exporters/writers/fs_writer.py | FSWriter._create_path_if_not_exist | def _create_path_if_not_exist(self, path):
"""
Creates a folders path if it doesn't exist
"""
if path and not os.path.exists(path):
os.makedirs(path) | python | def _create_path_if_not_exist(self, path):
"""
Creates a folders path if it doesn't exist
"""
if path and not os.path.exists(path):
os.makedirs(path) | [
"def",
"_create_path_if_not_exist",
"(",
"self",
",",
"path",
")",
":",
"if",
"path",
"and",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"os",
".",
"makedirs",
"(",
"path",
")"
] | Creates a folders path if it doesn't exist | [
"Creates",
"a",
"folders",
"path",
"if",
"it",
"doesn",
"t",
"exist"
] | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/writers/fs_writer.py#L26-L31 | train |
scrapinghub/exporters | exporters/writers/s3_writer.py | S3Writer.close | def close(self):
"""
Called to clean all possible tmp files created during the process.
"""
if self.read_option('save_pointer'):
self._update_last_pointer()
super(S3Writer, self).close() | python | def close(self):
"""
Called to clean all possible tmp files created during the process.
"""
if self.read_option('save_pointer'):
self._update_last_pointer()
super(S3Writer, self).close() | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"read_option",
"(",
"'save_pointer'",
")",
":",
"self",
".",
"_update_last_pointer",
"(",
")",
"super",
"(",
"S3Writer",
",",
"self",
")",
".",
"close",
"(",
")"
] | Called to clean all possible tmp files created during the process. | [
"Called",
"to",
"clean",
"all",
"possible",
"tmp",
"files",
"created",
"during",
"the",
"process",
"."
] | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/writers/s3_writer.py#L207-L213 | train |
scrapinghub/exporters | exporters/utils.py | get_boto_connection | def get_boto_connection(aws_access_key_id, aws_secret_access_key, region=None, bucketname=None,
host=None):
"""
Conection parameters must be different only if bucket name has a period
"""
m = _AWS_ACCESS_KEY_ID_RE.match(aws_access_key_id)
if m is None or m.group() != aws_acce... | python | def get_boto_connection(aws_access_key_id, aws_secret_access_key, region=None, bucketname=None,
host=None):
"""
Conection parameters must be different only if bucket name has a period
"""
m = _AWS_ACCESS_KEY_ID_RE.match(aws_access_key_id)
if m is None or m.group() != aws_acce... | [
"def",
"get_boto_connection",
"(",
"aws_access_key_id",
",",
"aws_secret_access_key",
",",
"region",
"=",
"None",
",",
"bucketname",
"=",
"None",
",",
"host",
"=",
"None",
")",
":",
"m",
"=",
"_AWS_ACCESS_KEY_ID_RE",
".",
"match",
"(",
"aws_access_key_id",
")",
... | Conection parameters must be different only if bucket name has a period | [
"Conection",
"parameters",
"must",
"be",
"different",
"only",
"if",
"bucket",
"name",
"has",
"a",
"period"
] | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/utils.py#L115-L140 | train |
scrapinghub/exporters | exporters/utils.py | maybe_cast_list | def maybe_cast_list(value, types):
"""
Try to coerce list values into more specific list subclasses in types.
"""
if not isinstance(value, list):
return value
if type(types) not in (list, tuple):
types = (types,)
for list_type in types:
if issubclass(list_type, list):
... | python | def maybe_cast_list(value, types):
"""
Try to coerce list values into more specific list subclasses in types.
"""
if not isinstance(value, list):
return value
if type(types) not in (list, tuple):
types = (types,)
for list_type in types:
if issubclass(list_type, list):
... | [
"def",
"maybe_cast_list",
"(",
"value",
",",
"types",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"return",
"value",
"if",
"type",
"(",
"types",
")",
"not",
"in",
"(",
"list",
",",
"tuple",
")",
":",
"types",
"=",
"(",... | Try to coerce list values into more specific list subclasses in types. | [
"Try",
"to",
"coerce",
"list",
"values",
"into",
"more",
"specific",
"list",
"subclasses",
"in",
"types",
"."
] | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/utils.py#L143-L159 | train |
scrapinghub/exporters | exporters/iterio.py | iterate_chunks | def iterate_chunks(file, chunk_size):
"""
Iterate chunks of size chunk_size from a file-like object
"""
chunk = file.read(chunk_size)
while chunk:
yield chunk
chunk = file.read(chunk_size) | python | def iterate_chunks(file, chunk_size):
"""
Iterate chunks of size chunk_size from a file-like object
"""
chunk = file.read(chunk_size)
while chunk:
yield chunk
chunk = file.read(chunk_size) | [
"def",
"iterate_chunks",
"(",
"file",
",",
"chunk_size",
")",
":",
"chunk",
"=",
"file",
".",
"read",
"(",
"chunk_size",
")",
"while",
"chunk",
":",
"yield",
"chunk",
"chunk",
"=",
"file",
".",
"read",
"(",
"chunk_size",
")"
] | Iterate chunks of size chunk_size from a file-like object | [
"Iterate",
"chunks",
"of",
"size",
"chunk_size",
"from",
"a",
"file",
"-",
"like",
"object"
] | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/iterio.py#L15-L22 | train |
scrapinghub/exporters | exporters/iterio.py | IterIO.unshift | def unshift(self, chunk):
"""
Pushes a chunk of data back into the internal buffer. This is useful
in certain situations where a stream is being consumed by code that
needs to "un-consume" some amount of data that it has optimistically
pulled out of the source, so that the data c... | python | def unshift(self, chunk):
"""
Pushes a chunk of data back into the internal buffer. This is useful
in certain situations where a stream is being consumed by code that
needs to "un-consume" some amount of data that it has optimistically
pulled out of the source, so that the data c... | [
"def",
"unshift",
"(",
"self",
",",
"chunk",
")",
":",
"if",
"chunk",
":",
"self",
".",
"_pos",
"-=",
"len",
"(",
"chunk",
")",
"self",
".",
"_unconsumed",
".",
"append",
"(",
"chunk",
")"
] | Pushes a chunk of data back into the internal buffer. This is useful
in certain situations where a stream is being consumed by code that
needs to "un-consume" some amount of data that it has optimistically
pulled out of the source, so that the data can be passed on to some
other party. | [
"Pushes",
"a",
"chunk",
"of",
"data",
"back",
"into",
"the",
"internal",
"buffer",
".",
"This",
"is",
"useful",
"in",
"certain",
"situations",
"where",
"a",
"stream",
"is",
"being",
"consumed",
"by",
"code",
"that",
"needs",
"to",
"un",
"-",
"consume",
"... | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/iterio.py#L46-L56 | train |
scrapinghub/exporters | exporters/iterio.py | IterIO.readline | def readline(self):
"""
Read until a new-line character is encountered
"""
line = ""
n_pos = -1
try:
while n_pos < 0:
line += self.next_chunk()
n_pos = line.find('\n')
except StopIteration:
pass
if n... | python | def readline(self):
"""
Read until a new-line character is encountered
"""
line = ""
n_pos = -1
try:
while n_pos < 0:
line += self.next_chunk()
n_pos = line.find('\n')
except StopIteration:
pass
if n... | [
"def",
"readline",
"(",
"self",
")",
":",
"line",
"=",
"\"\"",
"n_pos",
"=",
"-",
"1",
"try",
":",
"while",
"n_pos",
"<",
"0",
":",
"line",
"+=",
"self",
".",
"next_chunk",
"(",
")",
"n_pos",
"=",
"line",
".",
"find",
"(",
"'\\n'",
")",
"except",... | Read until a new-line character is encountered | [
"Read",
"until",
"a",
"new",
"-",
"line",
"character",
"is",
"encountered"
] | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/iterio.py#L110-L126 | train |
scrapinghub/exporters | exporters/iterio.py | IterIO.close | def close(self):
"""
Disable al operations and close the underlying file-like object, if any
"""
if callable(getattr(self._file, 'close', None)):
self._iterator.close()
self._iterator = None
self._unconsumed = None
self.closed = True | python | def close(self):
"""
Disable al operations and close the underlying file-like object, if any
"""
if callable(getattr(self._file, 'close', None)):
self._iterator.close()
self._iterator = None
self._unconsumed = None
self.closed = True | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"callable",
"(",
"getattr",
"(",
"self",
".",
"_file",
",",
"'close'",
",",
"None",
")",
")",
":",
"self",
".",
"_iterator",
".",
"close",
"(",
")",
"self",
".",
"_iterator",
"=",
"None",
"self",
".",
... | Disable al operations and close the underlying file-like object, if any | [
"Disable",
"al",
"operations",
"and",
"close",
"the",
"underlying",
"file",
"-",
"like",
"object",
"if",
"any"
] | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/iterio.py#L148-L156 | train |
scrapinghub/exporters | exporters/persistence/pickle_persistence.py | PicklePersistence.configuration_from_uri | def configuration_from_uri(uri, uri_regex):
"""
returns a configuration object.
"""
file_path = re.match(uri_regex, uri).groups()[0]
with open(file_path) as f:
configuration = pickle.load(f)['configuration']
configuration = yaml.safe_load(configuration)
... | python | def configuration_from_uri(uri, uri_regex):
"""
returns a configuration object.
"""
file_path = re.match(uri_regex, uri).groups()[0]
with open(file_path) as f:
configuration = pickle.load(f)['configuration']
configuration = yaml.safe_load(configuration)
... | [
"def",
"configuration_from_uri",
"(",
"uri",
",",
"uri_regex",
")",
":",
"file_path",
"=",
"re",
".",
"match",
"(",
"uri_regex",
",",
"uri",
")",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"with",
"open",
"(",
"file_path",
")",
"as",
"f",
":",
"configur... | returns a configuration object. | [
"returns",
"a",
"configuration",
"object",
"."
] | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/persistence/pickle_persistence.py#L77-L88 | train |
scrapinghub/exporters | exporters/write_buffers/base.py | WriteBuffer.buffer | def buffer(self, item):
"""
Receive an item and write it.
"""
key = self.get_key_from_item(item)
if not self.grouping_info.is_first_file_item(key):
self.items_group_files.add_item_separator_to_file(key)
self.grouping_info.ensure_group_info(key)
self.it... | python | def buffer(self, item):
"""
Receive an item and write it.
"""
key = self.get_key_from_item(item)
if not self.grouping_info.is_first_file_item(key):
self.items_group_files.add_item_separator_to_file(key)
self.grouping_info.ensure_group_info(key)
self.it... | [
"def",
"buffer",
"(",
"self",
",",
"item",
")",
":",
"key",
"=",
"self",
".",
"get_key_from_item",
"(",
"item",
")",
"if",
"not",
"self",
".",
"grouping_info",
".",
"is_first_file_item",
"(",
"key",
")",
":",
"self",
".",
"items_group_files",
".",
"add_i... | Receive an item and write it. | [
"Receive",
"an",
"item",
"and",
"write",
"it",
"."
] | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/write_buffers/base.py#L29-L37 | train |
scrapinghub/exporters | exporters/persistence/alchemy_persistence.py | BaseAlchemyPersistence.parse_persistence_uri | def parse_persistence_uri(cls, persistence_uri):
"""Parse a database URI and the persistence state ID from
the given persistence URI
"""
regex = cls.persistence_uri_re
match = re.match(regex, persistence_uri)
if not match:
raise ValueError("Couldn't parse pers... | python | def parse_persistence_uri(cls, persistence_uri):
"""Parse a database URI and the persistence state ID from
the given persistence URI
"""
regex = cls.persistence_uri_re
match = re.match(regex, persistence_uri)
if not match:
raise ValueError("Couldn't parse pers... | [
"def",
"parse_persistence_uri",
"(",
"cls",
",",
"persistence_uri",
")",
":",
"regex",
"=",
"cls",
".",
"persistence_uri_re",
"match",
"=",
"re",
".",
"match",
"(",
"regex",
",",
"persistence_uri",
")",
"if",
"not",
"match",
":",
"raise",
"ValueError",
"(",
... | Parse a database URI and the persistence state ID from
the given persistence URI | [
"Parse",
"a",
"database",
"URI",
"and",
"the",
"persistence",
"state",
"ID",
"from",
"the",
"given",
"persistence",
"URI"
] | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/persistence/alchemy_persistence.py#L104-L122 | train |
scrapinghub/exporters | exporters/persistence/alchemy_persistence.py | BaseAlchemyPersistence.configuration_from_uri | def configuration_from_uri(cls, persistence_uri):
"""
Return a configuration object.
"""
db_uri, persistence_state_id = cls.parse_persistence_uri(persistence_uri)
engine = create_engine(db_uri)
Base.metadata.create_all(engine)
Base.metadata.bind = engine
D... | python | def configuration_from_uri(cls, persistence_uri):
"""
Return a configuration object.
"""
db_uri, persistence_state_id = cls.parse_persistence_uri(persistence_uri)
engine = create_engine(db_uri)
Base.metadata.create_all(engine)
Base.metadata.bind = engine
D... | [
"def",
"configuration_from_uri",
"(",
"cls",
",",
"persistence_uri",
")",
":",
"db_uri",
",",
"persistence_state_id",
"=",
"cls",
".",
"parse_persistence_uri",
"(",
"persistence_uri",
")",
"engine",
"=",
"create_engine",
"(",
"db_uri",
")",
"Base",
".",
"metadata"... | Return a configuration object. | [
"Return",
"a",
"configuration",
"object",
"."
] | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/persistence/alchemy_persistence.py#L125-L140 | train |
scrapinghub/exporters | exporters/readers/fs_reader.py | FSReader._get_input_files | def _get_input_files(cls, input_specification):
"""Get list of input files according to input definition.
Input definition can be:
- str: specifying a filename
- list of str: specifying list a of filenames
- dict with "dir" and optional "pattern" parameters: specifying the
... | python | def _get_input_files(cls, input_specification):
"""Get list of input files according to input definition.
Input definition can be:
- str: specifying a filename
- list of str: specifying list a of filenames
- dict with "dir" and optional "pattern" parameters: specifying the
... | [
"def",
"_get_input_files",
"(",
"cls",
",",
"input_specification",
")",
":",
"if",
"isinstance",
"(",
"input_specification",
",",
"(",
"basestring",
",",
"dict",
")",
")",
":",
"input_specification",
"=",
"[",
"input_specification",
"]",
"elif",
"not",
"isinstan... | Get list of input files according to input definition.
Input definition can be:
- str: specifying a filename
- list of str: specifying list a of filenames
- dict with "dir" and optional "pattern" parameters: specifying the
toplevel directory under which input files will be so... | [
"Get",
"list",
"of",
"input",
"files",
"according",
"to",
"input",
"definition",
"."
] | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/readers/fs_reader.py#L59-L103 | train |
scrapinghub/exporters | exporters/readers/kafka_random_reader.py | KafkaRandomReader.consume_messages | def consume_messages(self, batchsize):
""" Get messages batch from the reservoir """
if not self._reservoir:
self.finished = True
return
for msg in self._reservoir[:batchsize]:
yield msg
self._reservoir = self._reservoir[batchsize:] | python | def consume_messages(self, batchsize):
""" Get messages batch from the reservoir """
if not self._reservoir:
self.finished = True
return
for msg in self._reservoir[:batchsize]:
yield msg
self._reservoir = self._reservoir[batchsize:] | [
"def",
"consume_messages",
"(",
"self",
",",
"batchsize",
")",
":",
"if",
"not",
"self",
".",
"_reservoir",
":",
"self",
".",
"finished",
"=",
"True",
"return",
"for",
"msg",
"in",
"self",
".",
"_reservoir",
"[",
":",
"batchsize",
"]",
":",
"yield",
"m... | Get messages batch from the reservoir | [
"Get",
"messages",
"batch",
"from",
"the",
"reservoir"
] | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/readers/kafka_random_reader.py#L127-L134 | train |
scrapinghub/exporters | exporters/readers/kafka_random_reader.py | KafkaRandomReader.decompress_messages | def decompress_messages(self, offmsgs):
""" Decompress pre-defined compressed fields for each message.
Msgs should be unpacked before this step. """
for offmsg in offmsgs:
yield offmsg.message.key, self.decompress_fun(offmsg.message.value) | python | def decompress_messages(self, offmsgs):
""" Decompress pre-defined compressed fields for each message.
Msgs should be unpacked before this step. """
for offmsg in offmsgs:
yield offmsg.message.key, self.decompress_fun(offmsg.message.value) | [
"def",
"decompress_messages",
"(",
"self",
",",
"offmsgs",
")",
":",
"for",
"offmsg",
"in",
"offmsgs",
":",
"yield",
"offmsg",
".",
"message",
".",
"key",
",",
"self",
".",
"decompress_fun",
"(",
"offmsg",
".",
"message",
".",
"value",
")"
] | Decompress pre-defined compressed fields for each message.
Msgs should be unpacked before this step. | [
"Decompress",
"pre",
"-",
"defined",
"compressed",
"fields",
"for",
"each",
"message",
".",
"Msgs",
"should",
"be",
"unpacked",
"before",
"this",
"step",
"."
] | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/readers/kafka_random_reader.py#L136-L141 | train |
scrapinghub/exporters | exporters/filters/base_filter.py | BaseFilter.filter_batch | def filter_batch(self, batch):
"""
Receives the batch, filters it, and returns it.
"""
for item in batch:
if self.filter(item):
yield item
else:
self.set_metadata('filtered_out',
self.get_metadata('... | python | def filter_batch(self, batch):
"""
Receives the batch, filters it, and returns it.
"""
for item in batch:
if self.filter(item):
yield item
else:
self.set_metadata('filtered_out',
self.get_metadata('... | [
"def",
"filter_batch",
"(",
"self",
",",
"batch",
")",
":",
"for",
"item",
"in",
"batch",
":",
"if",
"self",
".",
"filter",
"(",
"item",
")",
":",
"yield",
"item",
"else",
":",
"self",
".",
"set_metadata",
"(",
"'filtered_out'",
",",
"self",
".",
"ge... | Receives the batch, filters it, and returns it. | [
"Receives",
"the",
"batch",
"filters",
"it",
"and",
"returns",
"it",
"."
] | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/filters/base_filter.py#L24-L36 | train |
scrapinghub/exporters | exporters/writers/base_writer.py | BaseWriter.write_batch | def write_batch(self, batch):
"""
Buffer a batch of items to be written and update internal counters.
Calling this method doesn't guarantee that all items have been written.
To ensure everything has been written you need to call flush().
"""
for item in batch:
... | python | def write_batch(self, batch):
"""
Buffer a batch of items to be written and update internal counters.
Calling this method doesn't guarantee that all items have been written.
To ensure everything has been written you need to call flush().
"""
for item in batch:
... | [
"def",
"write_batch",
"(",
"self",
",",
"batch",
")",
":",
"for",
"item",
"in",
"batch",
":",
"self",
".",
"write_buffer",
".",
"buffer",
"(",
"item",
")",
"key",
"=",
"self",
".",
"write_buffer",
".",
"get_key_from_item",
"(",
"item",
")",
"if",
"self... | Buffer a batch of items to be written and update internal counters.
Calling this method doesn't guarantee that all items have been written.
To ensure everything has been written you need to call flush(). | [
"Buffer",
"a",
"batch",
"of",
"items",
"to",
"be",
"written",
"and",
"update",
"internal",
"counters",
"."
] | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/writers/base_writer.py#L103-L116 | train |
scrapinghub/exporters | exporters/writers/base_writer.py | BaseWriter._check_items_limit | def _check_items_limit(self):
"""
Raise ItemsLimitReached if the writer reached the configured items limit.
"""
if self.items_limit and self.items_limit == self.get_metadata('items_count'):
raise ItemsLimitReached('Finishing job after items_limit reached:'
... | python | def _check_items_limit(self):
"""
Raise ItemsLimitReached if the writer reached the configured items limit.
"""
if self.items_limit and self.items_limit == self.get_metadata('items_count'):
raise ItemsLimitReached('Finishing job after items_limit reached:'
... | [
"def",
"_check_items_limit",
"(",
"self",
")",
":",
"if",
"self",
".",
"items_limit",
"and",
"self",
".",
"items_limit",
"==",
"self",
".",
"get_metadata",
"(",
"'items_count'",
")",
":",
"raise",
"ItemsLimitReached",
"(",
"'Finishing job after items_limit reached:'... | Raise ItemsLimitReached if the writer reached the configured items limit. | [
"Raise",
"ItemsLimitReached",
"if",
"the",
"writer",
"reached",
"the",
"configured",
"items",
"limit",
"."
] | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/writers/base_writer.py#L118-L124 | train |
scrapinghub/exporters | exporters/writers/base_writer.py | BaseWriter.flush | def flush(self):
"""
Ensure all remaining buffers are written.
"""
for key in self.grouping_info.keys():
if self._should_flush(key):
self._write_current_buffer_for_group_key(key) | python | def flush(self):
"""
Ensure all remaining buffers are written.
"""
for key in self.grouping_info.keys():
if self._should_flush(key):
self._write_current_buffer_for_group_key(key) | [
"def",
"flush",
"(",
"self",
")",
":",
"for",
"key",
"in",
"self",
".",
"grouping_info",
".",
"keys",
"(",
")",
":",
"if",
"self",
".",
"_should_flush",
"(",
"key",
")",
":",
"self",
".",
"_write_current_buffer_for_group_key",
"(",
"key",
")"
] | Ensure all remaining buffers are written. | [
"Ensure",
"all",
"remaining",
"buffers",
"are",
"written",
"."
] | c9fb01db1771ada4672bbffd67cb46e1f7802ab9 | https://github.com/scrapinghub/exporters/blob/c9fb01db1771ada4672bbffd67cb46e1f7802ab9/exporters/writers/base_writer.py#L129-L135 | train |
opendatateam/udata | udata/assets.py | has_manifest | def has_manifest(app, filename='manifest.json'):
'''Verify the existance of a JSON assets manifest'''
try:
return pkg_resources.resource_exists(app, filename)
except ImportError:
return os.path.isabs(filename) and os.path.exists(filename) | python | def has_manifest(app, filename='manifest.json'):
'''Verify the existance of a JSON assets manifest'''
try:
return pkg_resources.resource_exists(app, filename)
except ImportError:
return os.path.isabs(filename) and os.path.exists(filename) | [
"def",
"has_manifest",
"(",
"app",
",",
"filename",
"=",
"'manifest.json'",
")",
":",
"try",
":",
"return",
"pkg_resources",
".",
"resource_exists",
"(",
"app",
",",
"filename",
")",
"except",
"ImportError",
":",
"return",
"os",
".",
"path",
".",
"isabs",
... | Verify the existance of a JSON assets manifest | [
"Verify",
"the",
"existance",
"of",
"a",
"JSON",
"assets",
"manifest"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/assets.py#L19-L24 | train |
opendatateam/udata | udata/assets.py | register_manifest | def register_manifest(app, filename='manifest.json'):
'''Register an assets json manifest'''
if current_app.config.get('TESTING'):
return # Do not spend time here when testing
if not has_manifest(app, filename):
msg = '{filename} not found for {app}'.format(**locals())
raise ValueEr... | python | def register_manifest(app, filename='manifest.json'):
'''Register an assets json manifest'''
if current_app.config.get('TESTING'):
return # Do not spend time here when testing
if not has_manifest(app, filename):
msg = '{filename} not found for {app}'.format(**locals())
raise ValueEr... | [
"def",
"register_manifest",
"(",
"app",
",",
"filename",
"=",
"'manifest.json'",
")",
":",
"if",
"current_app",
".",
"config",
".",
"get",
"(",
"'TESTING'",
")",
":",
"return",
"# Do not spend time here when testing",
"if",
"not",
"has_manifest",
"(",
"app",
","... | Register an assets json manifest | [
"Register",
"an",
"assets",
"json",
"manifest"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/assets.py#L27-L36 | train |
opendatateam/udata | udata/assets.py | load_manifest | def load_manifest(app, filename='manifest.json'):
'''Load an assets json manifest'''
if os.path.isabs(filename):
path = filename
else:
path = pkg_resources.resource_filename(app, filename)
with io.open(path, mode='r', encoding='utf8') as stream:
data = json.load(stream)
_regi... | python | def load_manifest(app, filename='manifest.json'):
'''Load an assets json manifest'''
if os.path.isabs(filename):
path = filename
else:
path = pkg_resources.resource_filename(app, filename)
with io.open(path, mode='r', encoding='utf8') as stream:
data = json.load(stream)
_regi... | [
"def",
"load_manifest",
"(",
"app",
",",
"filename",
"=",
"'manifest.json'",
")",
":",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"filename",
")",
":",
"path",
"=",
"filename",
"else",
":",
"path",
"=",
"pkg_resources",
".",
"resource_filename",
"(",
"a... | Load an assets json manifest | [
"Load",
"an",
"assets",
"json",
"manifest"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/assets.py#L39-L48 | train |
opendatateam/udata | udata/assets.py | from_manifest | def from_manifest(app, filename, raw=False, **kwargs):
'''
Get the path to a static file for a given app entry of a given type.
:param str app: The application key to which is tied this manifest
:param str filename: the original filename (without hash)
:param bool raw: if True, doesn't add prefix t... | python | def from_manifest(app, filename, raw=False, **kwargs):
'''
Get the path to a static file for a given app entry of a given type.
:param str app: The application key to which is tied this manifest
:param str filename: the original filename (without hash)
:param bool raw: if True, doesn't add prefix t... | [
"def",
"from_manifest",
"(",
"app",
",",
"filename",
",",
"raw",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"cfg",
"=",
"current_app",
".",
"config",
"if",
"current_app",
".",
"config",
".",
"get",
"(",
"'TESTING'",
")",
":",
"return",
"# Do not ... | Get the path to a static file for a given app entry of a given type.
:param str app: The application key to which is tied this manifest
:param str filename: the original filename (without hash)
:param bool raw: if True, doesn't add prefix to the manifest
:return: the resolved file path from manifest
... | [
"Get",
"the",
"path",
"to",
"a",
"static",
"file",
"for",
"a",
"given",
"app",
"entry",
"of",
"a",
"given",
"type",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/assets.py#L58-L85 | train |
opendatateam/udata | udata/assets.py | cdn_for | def cdn_for(endpoint, **kwargs):
'''
Get a CDN URL for a static assets.
Do not use a replacement for all flask.url_for calls
as it is only meant for CDN assets URLS.
(There is some extra round trip which cost is justified
by the CDN assets prformance improvements)
'''
if current_app.con... | python | def cdn_for(endpoint, **kwargs):
'''
Get a CDN URL for a static assets.
Do not use a replacement for all flask.url_for calls
as it is only meant for CDN assets URLS.
(There is some extra round trip which cost is justified
by the CDN assets prformance improvements)
'''
if current_app.con... | [
"def",
"cdn_for",
"(",
"endpoint",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"current_app",
".",
"config",
"[",
"'CDN_DOMAIN'",
"]",
":",
"if",
"not",
"current_app",
".",
"config",
".",
"get",
"(",
"'CDN_DEBUG'",
")",
":",
"kwargs",
".",
"pop",
"(",
"... | Get a CDN URL for a static assets.
Do not use a replacement for all flask.url_for calls
as it is only meant for CDN assets URLS.
(There is some extra round trip which cost is justified
by the CDN assets prformance improvements) | [
"Get",
"a",
"CDN",
"URL",
"for",
"a",
"static",
"assets",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/assets.py#L92-L105 | train |
opendatateam/udata | udata/models/queryset.py | UDataQuerySet.get_or_create | def get_or_create(self, write_concern=None, auto_save=True,
*q_objs, **query):
"""Retrieve unique object or create, if it doesn't exist.
Returns a tuple of ``(object, created)``, where ``object`` is
the retrieved or created object and ``created`` is a boolean
speci... | python | def get_or_create(self, write_concern=None, auto_save=True,
*q_objs, **query):
"""Retrieve unique object or create, if it doesn't exist.
Returns a tuple of ``(object, created)``, where ``object`` is
the retrieved or created object and ``created`` is a boolean
speci... | [
"def",
"get_or_create",
"(",
"self",
",",
"write_concern",
"=",
"None",
",",
"auto_save",
"=",
"True",
",",
"*",
"q_objs",
",",
"*",
"*",
"query",
")",
":",
"defaults",
"=",
"query",
".",
"pop",
"(",
"'defaults'",
",",
"{",
"}",
")",
"try",
":",
"d... | Retrieve unique object or create, if it doesn't exist.
Returns a tuple of ``(object, created)``, where ``object`` is
the retrieved or created object and ``created`` is a boolean
specifying whether a new object was created.
Taken back from:
https://github.com/MongoEngine/mongoe... | [
"Retrieve",
"unique",
"object",
"or",
"create",
"if",
"it",
"doesn",
"t",
"exist",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/models/queryset.py#L50-L73 | train |
opendatateam/udata | udata/models/queryset.py | UDataQuerySet.generic_in | def generic_in(self, **kwargs):
'''Bypass buggy GenericReferenceField querying issue'''
query = {}
for key, value in kwargs.items():
if not value:
continue
# Optimize query for when there is only one value
if isinstance(value, (list, tuple)) an... | python | def generic_in(self, **kwargs):
'''Bypass buggy GenericReferenceField querying issue'''
query = {}
for key, value in kwargs.items():
if not value:
continue
# Optimize query for when there is only one value
if isinstance(value, (list, tuple)) an... | [
"def",
"generic_in",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"query",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"not",
"value",
":",
"continue",
"# Optimize query for when there is only one value"... | Bypass buggy GenericReferenceField querying issue | [
"Bypass",
"buggy",
"GenericReferenceField",
"querying",
"issue"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/models/queryset.py#L75-L98 | train |
opendatateam/udata | udata/core/issues/notifications.py | issues_notifications | def issues_notifications(user):
'''Notify user about open issues'''
notifications = []
# Only fetch required fields for notification serialization
# Greatly improve performances and memory usage
qs = issues_for(user).only('id', 'title', 'created', 'subject')
# Do not dereference subject (so it... | python | def issues_notifications(user):
'''Notify user about open issues'''
notifications = []
# Only fetch required fields for notification serialization
# Greatly improve performances and memory usage
qs = issues_for(user).only('id', 'title', 'created', 'subject')
# Do not dereference subject (so it... | [
"def",
"issues_notifications",
"(",
"user",
")",
":",
"notifications",
"=",
"[",
"]",
"# Only fetch required fields for notification serialization",
"# Greatly improve performances and memory usage",
"qs",
"=",
"issues_for",
"(",
"user",
")",
".",
"only",
"(",
"'id'",
","... | Notify user about open issues | [
"Notify",
"user",
"about",
"open",
"issues"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/issues/notifications.py#L15-L35 | train |
opendatateam/udata | udata/features/identicon/backends.py | get_config | def get_config(key):
'''
Get an identicon configuration parameter.
Precedance order is:
- application config (`udata.cfg`)
- theme config
- default
'''
key = 'AVATAR_{0}'.format(key.upper())
local_config = current_app.config.get(key)
return local_config or getattr(th... | python | def get_config(key):
'''
Get an identicon configuration parameter.
Precedance order is:
- application config (`udata.cfg`)
- theme config
- default
'''
key = 'AVATAR_{0}'.format(key.upper())
local_config = current_app.config.get(key)
return local_config or getattr(th... | [
"def",
"get_config",
"(",
"key",
")",
":",
"key",
"=",
"'AVATAR_{0}'",
".",
"format",
"(",
"key",
".",
"upper",
"(",
")",
")",
"local_config",
"=",
"current_app",
".",
"config",
".",
"get",
"(",
"key",
")",
"return",
"local_config",
"or",
"getattr",
"(... | Get an identicon configuration parameter.
Precedance order is:
- application config (`udata.cfg`)
- theme config
- default | [
"Get",
"an",
"identicon",
"configuration",
"parameter",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/features/identicon/backends.py#L41-L52 | train |
opendatateam/udata | udata/features/identicon/backends.py | get_provider | def get_provider():
'''Get the current provider from config'''
name = get_config('provider')
available = entrypoints.get_all('udata.avatars')
if name not in available:
raise ValueError('Unknown avatar provider: {0}'.format(name))
return available[name] | python | def get_provider():
'''Get the current provider from config'''
name = get_config('provider')
available = entrypoints.get_all('udata.avatars')
if name not in available:
raise ValueError('Unknown avatar provider: {0}'.format(name))
return available[name] | [
"def",
"get_provider",
"(",
")",
":",
"name",
"=",
"get_config",
"(",
"'provider'",
")",
"available",
"=",
"entrypoints",
".",
"get_all",
"(",
"'udata.avatars'",
")",
"if",
"name",
"not",
"in",
"available",
":",
"raise",
"ValueError",
"(",
"'Unknown avatar pro... | Get the current provider from config | [
"Get",
"the",
"current",
"provider",
"from",
"config"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/features/identicon/backends.py#L59-L65 | train |
opendatateam/udata | udata/features/identicon/backends.py | generate_pydenticon | def generate_pydenticon(identifier, size):
'''
Use pydenticon to generate an identicon image.
All parameters are extracted from configuration.
'''
blocks_size = get_internal_config('size')
foreground = get_internal_config('foreground')
background = get_internal_config('background')
gener... | python | def generate_pydenticon(identifier, size):
'''
Use pydenticon to generate an identicon image.
All parameters are extracted from configuration.
'''
blocks_size = get_internal_config('size')
foreground = get_internal_config('foreground')
background = get_internal_config('background')
gener... | [
"def",
"generate_pydenticon",
"(",
"identifier",
",",
"size",
")",
":",
"blocks_size",
"=",
"get_internal_config",
"(",
"'size'",
")",
"foreground",
"=",
"get_internal_config",
"(",
"'foreground'",
")",
"background",
"=",
"get_internal_config",
"(",
"'background'",
... | Use pydenticon to generate an identicon image.
All parameters are extracted from configuration. | [
"Use",
"pydenticon",
"to",
"generate",
"an",
"identicon",
"image",
".",
"All",
"parameters",
"are",
"extracted",
"from",
"configuration",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/features/identicon/backends.py#L80-L100 | train |
opendatateam/udata | udata/features/identicon/backends.py | adorable | def adorable(identifier, size):
'''
Adorable Avatars provider
Simply redirect to the external API.
See: http://avatars.adorable.io/
'''
url = ADORABLE_AVATARS_URL.format(identifier=identifier, size=size)
return redirect(url) | python | def adorable(identifier, size):
'''
Adorable Avatars provider
Simply redirect to the external API.
See: http://avatars.adorable.io/
'''
url = ADORABLE_AVATARS_URL.format(identifier=identifier, size=size)
return redirect(url) | [
"def",
"adorable",
"(",
"identifier",
",",
"size",
")",
":",
"url",
"=",
"ADORABLE_AVATARS_URL",
".",
"format",
"(",
"identifier",
"=",
"identifier",
",",
"size",
"=",
"size",
")",
"return",
"redirect",
"(",
"url",
")"
] | Adorable Avatars provider
Simply redirect to the external API.
See: http://avatars.adorable.io/ | [
"Adorable",
"Avatars",
"provider"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/features/identicon/backends.py#L116-L125 | train |
opendatateam/udata | udata/core/dataset/commands.py | licenses | def licenses(source=DEFAULT_LICENSE_FILE):
'''Feed the licenses from a JSON file'''
if source.startswith('http'):
json_licenses = requests.get(source).json()
else:
with open(source) as fp:
json_licenses = json.load(fp)
if len(json_licenses):
log.info('Dropping existi... | python | def licenses(source=DEFAULT_LICENSE_FILE):
'''Feed the licenses from a JSON file'''
if source.startswith('http'):
json_licenses = requests.get(source).json()
else:
with open(source) as fp:
json_licenses = json.load(fp)
if len(json_licenses):
log.info('Dropping existi... | [
"def",
"licenses",
"(",
"source",
"=",
"DEFAULT_LICENSE_FILE",
")",
":",
"if",
"source",
".",
"startswith",
"(",
"'http'",
")",
":",
"json_licenses",
"=",
"requests",
".",
"get",
"(",
"source",
")",
".",
"json",
"(",
")",
"else",
":",
"with",
"open",
"... | Feed the licenses from a JSON file | [
"Feed",
"the",
"licenses",
"from",
"a",
"JSON",
"file"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/commands.py#L30-L64 | train |
opendatateam/udata | udata/core/spatial/forms.py | ZonesField.fetch_objects | def fetch_objects(self, geoids):
'''
Custom object retrieval.
Zones are resolved from their identifier
instead of the default bulk fetch by ID.
'''
zones = []
no_match = []
for geoid in geoids:
zone = GeoZone.objects.resolve(geoid)
... | python | def fetch_objects(self, geoids):
'''
Custom object retrieval.
Zones are resolved from their identifier
instead of the default bulk fetch by ID.
'''
zones = []
no_match = []
for geoid in geoids:
zone = GeoZone.objects.resolve(geoid)
... | [
"def",
"fetch_objects",
"(",
"self",
",",
"geoids",
")",
":",
"zones",
"=",
"[",
"]",
"no_match",
"=",
"[",
"]",
"for",
"geoid",
"in",
"geoids",
":",
"zone",
"=",
"GeoZone",
".",
"objects",
".",
"resolve",
"(",
"geoid",
")",
"if",
"zone",
":",
"zon... | Custom object retrieval.
Zones are resolved from their identifier
instead of the default bulk fetch by ID. | [
"Custom",
"object",
"retrieval",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/spatial/forms.py#L34-L55 | train |
opendatateam/udata | tasks_helpers.py | lrun | def lrun(command, *args, **kwargs):
'''Run a local command from project root'''
return run('cd {0} && {1}'.format(ROOT, command), *args, **kwargs) | python | def lrun(command, *args, **kwargs):
'''Run a local command from project root'''
return run('cd {0} && {1}'.format(ROOT, command), *args, **kwargs) | [
"def",
"lrun",
"(",
"command",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"run",
"(",
"'cd {0} && {1}'",
".",
"format",
"(",
"ROOT",
",",
"command",
")",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Run a local command from project root | [
"Run",
"a",
"local",
"command",
"from",
"project",
"root"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/tasks_helpers.py#L37-L39 | train |
opendatateam/udata | udata/harvest/backends/dcat.py | DcatBackend.initialize | def initialize(self):
'''List all datasets for a given ...'''
fmt = guess_format(self.source.url)
# if format can't be guessed from the url
# we fallback on the declared Content-Type
if not fmt:
response = requests.head(self.source.url)
mime_type = respons... | python | def initialize(self):
'''List all datasets for a given ...'''
fmt = guess_format(self.source.url)
# if format can't be guessed from the url
# we fallback on the declared Content-Type
if not fmt:
response = requests.head(self.source.url)
mime_type = respons... | [
"def",
"initialize",
"(",
"self",
")",
":",
"fmt",
"=",
"guess_format",
"(",
"self",
".",
"source",
".",
"url",
")",
"# if format can't be guessed from the url",
"# we fallback on the declared Content-Type",
"if",
"not",
"fmt",
":",
"response",
"=",
"requests",
".",... | List all datasets for a given ... | [
"List",
"all",
"datasets",
"for",
"a",
"given",
"..."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/backends/dcat.py#L51-L67 | train |
opendatateam/udata | udata/commands/worker.py | get_tasks | def get_tasks():
'''Get a list of known tasks with their routing queue'''
return {
name: get_task_queue(name, cls)
for name, cls in celery.tasks.items()
# Exclude celery internal tasks
if not name.startswith('celery.')
# Exclude udata test tasks
and not name.start... | python | def get_tasks():
'''Get a list of known tasks with their routing queue'''
return {
name: get_task_queue(name, cls)
for name, cls in celery.tasks.items()
# Exclude celery internal tasks
if not name.startswith('celery.')
# Exclude udata test tasks
and not name.start... | [
"def",
"get_tasks",
"(",
")",
":",
"return",
"{",
"name",
":",
"get_task_queue",
"(",
"name",
",",
"cls",
")",
"for",
"name",
",",
"cls",
"in",
"celery",
".",
"tasks",
".",
"items",
"(",
")",
"# Exclude celery internal tasks",
"if",
"not",
"name",
".",
... | Get a list of known tasks with their routing queue | [
"Get",
"a",
"list",
"of",
"known",
"tasks",
"with",
"their",
"routing",
"queue"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/worker.py#L97-L106 | train |
opendatateam/udata | udata/commands/worker.py | tasks | def tasks():
'''Display registered tasks with their queue'''
tasks = get_tasks()
longest = max(tasks.keys(), key=len)
size = len(longest)
for name, queue in sorted(tasks.items()):
print('* {0}: {1}'.format(name.ljust(size), queue)) | python | def tasks():
'''Display registered tasks with their queue'''
tasks = get_tasks()
longest = max(tasks.keys(), key=len)
size = len(longest)
for name, queue in sorted(tasks.items()):
print('* {0}: {1}'.format(name.ljust(size), queue)) | [
"def",
"tasks",
"(",
")",
":",
"tasks",
"=",
"get_tasks",
"(",
")",
"longest",
"=",
"max",
"(",
"tasks",
".",
"keys",
"(",
")",
",",
"key",
"=",
"len",
")",
"size",
"=",
"len",
"(",
"longest",
")",
"for",
"name",
",",
"queue",
"in",
"sorted",
"... | Display registered tasks with their queue | [
"Display",
"registered",
"tasks",
"with",
"their",
"queue"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/worker.py#L110-L116 | train |
opendatateam/udata | udata/commands/worker.py | status | def status(queue, munin, munin_config):
"""List queued tasks aggregated by name"""
if munin_config:
return status_print_config(queue)
queues = get_queues(queue)
for queue in queues:
status_print_queue(queue, munin=munin)
if not munin:
print('-' * 40) | python | def status(queue, munin, munin_config):
"""List queued tasks aggregated by name"""
if munin_config:
return status_print_config(queue)
queues = get_queues(queue)
for queue in queues:
status_print_queue(queue, munin=munin)
if not munin:
print('-' * 40) | [
"def",
"status",
"(",
"queue",
",",
"munin",
",",
"munin_config",
")",
":",
"if",
"munin_config",
":",
"return",
"status_print_config",
"(",
"queue",
")",
"queues",
"=",
"get_queues",
"(",
"queue",
")",
"for",
"queue",
"in",
"queues",
":",
"status_print_queu... | List queued tasks aggregated by name | [
"List",
"queued",
"tasks",
"aggregated",
"by",
"name"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/worker.py#L125-L133 | train |
opendatateam/udata | udata/forms/fields.py | FieldHelper.pre_validate | def pre_validate(self, form):
'''Calls preprocessors before pre_validation'''
for preprocessor in self._preprocessors:
preprocessor(form, self)
super(FieldHelper, self).pre_validate(form) | python | def pre_validate(self, form):
'''Calls preprocessors before pre_validation'''
for preprocessor in self._preprocessors:
preprocessor(form, self)
super(FieldHelper, self).pre_validate(form) | [
"def",
"pre_validate",
"(",
"self",
",",
"form",
")",
":",
"for",
"preprocessor",
"in",
"self",
".",
"_preprocessors",
":",
"preprocessor",
"(",
"form",
",",
"self",
")",
"super",
"(",
"FieldHelper",
",",
"self",
")",
".",
"pre_validate",
"(",
"form",
")... | Calls preprocessors before pre_validation | [
"Calls",
"preprocessors",
"before",
"pre_validation"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/forms/fields.py#L55-L59 | train |
opendatateam/udata | udata/forms/fields.py | EmptyNone.process_formdata | def process_formdata(self, valuelist):
'''Replace empty values by None'''
super(EmptyNone, self).process_formdata(valuelist)
self.data = self.data or None | python | def process_formdata(self, valuelist):
'''Replace empty values by None'''
super(EmptyNone, self).process_formdata(valuelist)
self.data = self.data or None | [
"def",
"process_formdata",
"(",
"self",
",",
"valuelist",
")",
":",
"super",
"(",
"EmptyNone",
",",
"self",
")",
".",
"process_formdata",
"(",
"valuelist",
")",
"self",
".",
"data",
"=",
"self",
".",
"data",
"or",
"None"
] | Replace empty values by None | [
"Replace",
"empty",
"values",
"by",
"None"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/forms/fields.py#L67-L70 | train |
opendatateam/udata | udata/forms/fields.py | ModelList.fetch_objects | def fetch_objects(self, oids):
'''
This methods is used to fetch models
from a list of identifiers.
Default implementation performs a bulk query on identifiers.
Override this method to customize the objects retrieval.
'''
objects = self.model.objects.in_bulk(oid... | python | def fetch_objects(self, oids):
'''
This methods is used to fetch models
from a list of identifiers.
Default implementation performs a bulk query on identifiers.
Override this method to customize the objects retrieval.
'''
objects = self.model.objects.in_bulk(oid... | [
"def",
"fetch_objects",
"(",
"self",
",",
"oids",
")",
":",
"objects",
"=",
"self",
".",
"model",
".",
"objects",
".",
"in_bulk",
"(",
"oids",
")",
"if",
"len",
"(",
"objects",
".",
"keys",
"(",
")",
")",
"!=",
"len",
"(",
"oids",
")",
":",
"non_... | This methods is used to fetch models
from a list of identifiers.
Default implementation performs a bulk query on identifiers.
Override this method to customize the objects retrieval. | [
"This",
"methods",
"is",
"used",
"to",
"fetch",
"models",
"from",
"a",
"list",
"of",
"identifiers",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/forms/fields.py#L487-L503 | train |
opendatateam/udata | udata/forms/fields.py | NestedModelList.validate | def validate(self, form, extra_validators=tuple()):
'''Perform validation only if data has been submitted'''
if not self.has_data:
return True
if self.is_list_data:
if not isinstance(self._formdata[self.name], (list, tuple)):
return False
return su... | python | def validate(self, form, extra_validators=tuple()):
'''Perform validation only if data has been submitted'''
if not self.has_data:
return True
if self.is_list_data:
if not isinstance(self._formdata[self.name], (list, tuple)):
return False
return su... | [
"def",
"validate",
"(",
"self",
",",
"form",
",",
"extra_validators",
"=",
"tuple",
"(",
")",
")",
":",
"if",
"not",
"self",
".",
"has_data",
":",
"return",
"True",
"if",
"self",
".",
"is_list_data",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
... | Perform validation only if data has been submitted | [
"Perform",
"validation",
"only",
"if",
"data",
"has",
"been",
"submitted"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/forms/fields.py#L531-L538 | train |
opendatateam/udata | udata/forms/fields.py | NestedModelList._add_entry | def _add_entry(self, formdata=None, data=unset_value, index=None):
'''
Fill the form with previous data if necessary to handle partial update
'''
if formdata:
prefix = '-'.join((self.name, str(index)))
basekey = '-'.join((prefix, '{0}'))
idkey = baseke... | python | def _add_entry(self, formdata=None, data=unset_value, index=None):
'''
Fill the form with previous data if necessary to handle partial update
'''
if formdata:
prefix = '-'.join((self.name, str(index)))
basekey = '-'.join((prefix, '{0}'))
idkey = baseke... | [
"def",
"_add_entry",
"(",
"self",
",",
"formdata",
"=",
"None",
",",
"data",
"=",
"unset_value",
",",
"index",
"=",
"None",
")",
":",
"if",
"formdata",
":",
"prefix",
"=",
"'-'",
".",
"join",
"(",
"(",
"self",
".",
"name",
",",
"str",
"(",
"index",... | Fill the form with previous data if necessary to handle partial update | [
"Fill",
"the",
"form",
"with",
"previous",
"data",
"if",
"necessary",
"to",
"handle",
"partial",
"update"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/forms/fields.py#L564-L587 | train |
opendatateam/udata | udata/forms/fields.py | ExtrasField.parse | def parse(self, data):
'''Parse fields and store individual errors'''
self.field_errors = {}
return dict(
(k, self._parse_value(k, v)) for k, v in data.items()
) | python | def parse(self, data):
'''Parse fields and store individual errors'''
self.field_errors = {}
return dict(
(k, self._parse_value(k, v)) for k, v in data.items()
) | [
"def",
"parse",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"field_errors",
"=",
"{",
"}",
"return",
"dict",
"(",
"(",
"k",
",",
"self",
".",
"_parse_value",
"(",
"k",
",",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"data",
".",
"items",
... | Parse fields and store individual errors | [
"Parse",
"fields",
"and",
"store",
"individual",
"errors"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/forms/fields.py#L733-L738 | train |
opendatateam/udata | udata/core/metrics/commands.py | update | def update(site=False, organizations=False, users=False, datasets=False,
reuses=False):
'''Update all metrics for the current date'''
do_all = not any((site, organizations, users, datasets, reuses))
if do_all or site:
log.info('Update site metrics')
update_site_metrics()
if ... | python | def update(site=False, organizations=False, users=False, datasets=False,
reuses=False):
'''Update all metrics for the current date'''
do_all = not any((site, organizations, users, datasets, reuses))
if do_all or site:
log.info('Update site metrics')
update_site_metrics()
if ... | [
"def",
"update",
"(",
"site",
"=",
"False",
",",
"organizations",
"=",
"False",
",",
"users",
"=",
"False",
",",
"datasets",
"=",
"False",
",",
"reuses",
"=",
"False",
")",
":",
"do_all",
"=",
"not",
"any",
"(",
"(",
"site",
",",
"organizations",
","... | Update all metrics for the current date | [
"Update",
"all",
"metrics",
"for",
"the",
"current",
"date"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/metrics/commands.py#L37-L66 | train |
opendatateam/udata | udata/core/metrics/commands.py | list | def list():
'''List all known metrics'''
for cls, metrics in metric_catalog.items():
echo(white(cls.__name__))
for metric in metrics.keys():
echo('> {0}'.format(metric)) | python | def list():
'''List all known metrics'''
for cls, metrics in metric_catalog.items():
echo(white(cls.__name__))
for metric in metrics.keys():
echo('> {0}'.format(metric)) | [
"def",
"list",
"(",
")",
":",
"for",
"cls",
",",
"metrics",
"in",
"metric_catalog",
".",
"items",
"(",
")",
":",
"echo",
"(",
"white",
"(",
"cls",
".",
"__name__",
")",
")",
"for",
"metric",
"in",
"metrics",
".",
"keys",
"(",
")",
":",
"echo",
"(... | List all known metrics | [
"List",
"all",
"known",
"metrics"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/metrics/commands.py#L70-L76 | train |
opendatateam/udata | udata/api/commands.py | json_to_file | def json_to_file(data, filename, pretty=False):
'''Dump JSON data to a file'''
kwargs = dict(indent=4) if pretty else {}
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
dump = json.dumps(api.__schema__, **kwargs)
with open(filename, 'wb') as f:
... | python | def json_to_file(data, filename, pretty=False):
'''Dump JSON data to a file'''
kwargs = dict(indent=4) if pretty else {}
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
dump = json.dumps(api.__schema__, **kwargs)
with open(filename, 'wb') as f:
... | [
"def",
"json_to_file",
"(",
"data",
",",
"filename",
",",
"pretty",
"=",
"False",
")",
":",
"kwargs",
"=",
"dict",
"(",
"indent",
"=",
"4",
")",
"if",
"pretty",
"else",
"{",
"}",
"dirname",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"filename",
"... | Dump JSON data to a file | [
"Dump",
"JSON",
"data",
"to",
"a",
"file"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/api/commands.py#L24-L32 | train |
opendatateam/udata | udata/api/commands.py | postman | def postman(filename, pretty, urlvars, swagger):
'''Dump the API as a Postman collection'''
data = api.as_postman(urlvars=urlvars, swagger=swagger)
json_to_file(data, filename, pretty) | python | def postman(filename, pretty, urlvars, swagger):
'''Dump the API as a Postman collection'''
data = api.as_postman(urlvars=urlvars, swagger=swagger)
json_to_file(data, filename, pretty) | [
"def",
"postman",
"(",
"filename",
",",
"pretty",
",",
"urlvars",
",",
"swagger",
")",
":",
"data",
"=",
"api",
".",
"as_postman",
"(",
"urlvars",
"=",
"urlvars",
",",
"swagger",
"=",
"swagger",
")",
"json_to_file",
"(",
"data",
",",
"filename",
",",
"... | Dump the API as a Postman collection | [
"Dump",
"the",
"API",
"as",
"a",
"Postman",
"collection"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/api/commands.py#L49-L52 | train |
opendatateam/udata | udata/core/badges/tasks.py | notify_badge_added_certified | def notify_badge_added_certified(sender, kind=''):
'''
Send an email when a `CERTIFIED` badge is added to an `Organization`
Parameters
----------
sender
The object that emitted the event.
kind: str
The kind of `Badge` object awarded.
'''
if kind == CERTIFIED and isinstan... | python | def notify_badge_added_certified(sender, kind=''):
'''
Send an email when a `CERTIFIED` badge is added to an `Organization`
Parameters
----------
sender
The object that emitted the event.
kind: str
The kind of `Badge` object awarded.
'''
if kind == CERTIFIED and isinstan... | [
"def",
"notify_badge_added_certified",
"(",
"sender",
",",
"kind",
"=",
"''",
")",
":",
"if",
"kind",
"==",
"CERTIFIED",
"and",
"isinstance",
"(",
"sender",
",",
"Organization",
")",
":",
"recipients",
"=",
"[",
"member",
".",
"user",
"for",
"member",
"in"... | Send an email when a `CERTIFIED` badge is added to an `Organization`
Parameters
----------
sender
The object that emitted the event.
kind: str
The kind of `Badge` object awarded. | [
"Send",
"an",
"email",
"when",
"a",
"CERTIFIED",
"badge",
"is",
"added",
"to",
"an",
"Organization"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/badges/tasks.py#L27-L50 | train |
opendatateam/udata | udata/core/discussions/notifications.py | discussions_notifications | def discussions_notifications(user):
'''Notify user about open discussions'''
notifications = []
# Only fetch required fields for notification serialization
# Greatly improve performances and memory usage
qs = discussions_for(user).only('id', 'created', 'title', 'subject')
# Do not dereference... | python | def discussions_notifications(user):
'''Notify user about open discussions'''
notifications = []
# Only fetch required fields for notification serialization
# Greatly improve performances and memory usage
qs = discussions_for(user).only('id', 'created', 'title', 'subject')
# Do not dereference... | [
"def",
"discussions_notifications",
"(",
"user",
")",
":",
"notifications",
"=",
"[",
"]",
"# Only fetch required fields for notification serialization",
"# Greatly improve performances and memory usage",
"qs",
"=",
"discussions_for",
"(",
"user",
")",
".",
"only",
"(",
"'i... | Notify user about open discussions | [
"Notify",
"user",
"about",
"open",
"discussions"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/discussions/notifications.py#L15-L35 | train |
opendatateam/udata | udata/tracking.py | send_signal | def send_signal(signal, request, user, **kwargs):
'''Generic method to send signals to Piwik
given that we always have to compute IP and UID for instance.
'''
params = {
'user_ip': request.remote_addr
}
params.update(kwargs)
if user.is_authenticated:
params['uid'] = user.id
... | python | def send_signal(signal, request, user, **kwargs):
'''Generic method to send signals to Piwik
given that we always have to compute IP and UID for instance.
'''
params = {
'user_ip': request.remote_addr
}
params.update(kwargs)
if user.is_authenticated:
params['uid'] = user.id
... | [
"def",
"send_signal",
"(",
"signal",
",",
"request",
",",
"user",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"{",
"'user_ip'",
":",
"request",
".",
"remote_addr",
"}",
"params",
".",
"update",
"(",
"kwargs",
")",
"if",
"user",
".",
"is_authentic... | Generic method to send signals to Piwik
given that we always have to compute IP and UID for instance. | [
"Generic",
"method",
"to",
"send",
"signals",
"to",
"Piwik"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/tracking.py#L5-L16 | train |
opendatateam/udata | udata/core/organization/notifications.py | membership_request_notifications | def membership_request_notifications(user):
'''Notify user about pending membership requests'''
orgs = [o for o in user.organizations if o.is_admin(user)]
notifications = []
for org in orgs:
for request in org.pending_requests:
notifications.append((request.created, {
... | python | def membership_request_notifications(user):
'''Notify user about pending membership requests'''
orgs = [o for o in user.organizations if o.is_admin(user)]
notifications = []
for org in orgs:
for request in org.pending_requests:
notifications.append((request.created, {
... | [
"def",
"membership_request_notifications",
"(",
"user",
")",
":",
"orgs",
"=",
"[",
"o",
"for",
"o",
"in",
"user",
".",
"organizations",
"if",
"o",
".",
"is_admin",
"(",
"user",
")",
"]",
"notifications",
"=",
"[",
"]",
"for",
"org",
"in",
"orgs",
":",... | Notify user about pending membership requests | [
"Notify",
"user",
"about",
"pending",
"membership",
"requests"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/organization/notifications.py#L13-L30 | train |
opendatateam/udata | udata/harvest/commands.py | validate | def validate(identifier):
'''Validate a source given its identifier'''
source = actions.validate_source(identifier)
log.info('Source %s (%s) has been validated', source.slug, str(source.id)) | python | def validate(identifier):
'''Validate a source given its identifier'''
source = actions.validate_source(identifier)
log.info('Source %s (%s) has been validated', source.slug, str(source.id)) | [
"def",
"validate",
"(",
"identifier",
")",
":",
"source",
"=",
"actions",
".",
"validate_source",
"(",
"identifier",
")",
"log",
".",
"info",
"(",
"'Source %s (%s) has been validated'",
",",
"source",
".",
"slug",
",",
"str",
"(",
"source",
".",
"id",
")",
... | Validate a source given its identifier | [
"Validate",
"a",
"source",
"given",
"its",
"identifier"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/commands.py#L48-L51 | train |
opendatateam/udata | udata/harvest/commands.py | delete | def delete(identifier):
'''Delete a harvest source'''
log.info('Deleting source "%s"', identifier)
actions.delete_source(identifier)
log.info('Deleted source "%s"', identifier) | python | def delete(identifier):
'''Delete a harvest source'''
log.info('Deleting source "%s"', identifier)
actions.delete_source(identifier)
log.info('Deleted source "%s"', identifier) | [
"def",
"delete",
"(",
"identifier",
")",
":",
"log",
".",
"info",
"(",
"'Deleting source \"%s\"'",
",",
"identifier",
")",
"actions",
".",
"delete_source",
"(",
"identifier",
")",
"log",
".",
"info",
"(",
"'Deleted source \"%s\"'",
",",
"identifier",
")"
] | Delete a harvest source | [
"Delete",
"a",
"harvest",
"source"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/commands.py#L55-L59 | train |
opendatateam/udata | udata/harvest/commands.py | sources | def sources(scheduled=False):
'''List all harvest sources'''
sources = actions.list_sources()
if scheduled:
sources = [s for s in sources if s.periodic_task]
if sources:
for source in sources:
msg = '{source.name} ({source.backend}): {cron}'
if source.periodic_tas... | python | def sources(scheduled=False):
'''List all harvest sources'''
sources = actions.list_sources()
if scheduled:
sources = [s for s in sources if s.periodic_task]
if sources:
for source in sources:
msg = '{source.name} ({source.backend}): {cron}'
if source.periodic_tas... | [
"def",
"sources",
"(",
"scheduled",
"=",
"False",
")",
":",
"sources",
"=",
"actions",
".",
"list_sources",
"(",
")",
"if",
"scheduled",
":",
"sources",
"=",
"[",
"s",
"for",
"s",
"in",
"sources",
"if",
"s",
".",
"periodic_task",
"]",
"if",
"sources",
... | List all harvest sources | [
"List",
"all",
"harvest",
"sources"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/commands.py#L65-L81 | train |
opendatateam/udata | udata/harvest/commands.py | backends | def backends():
'''List available backends'''
log.info('Available backends:')
for backend in actions.list_backends():
log.info('%s (%s)', backend.name, backend.display_name or backend.name) | python | def backends():
'''List available backends'''
log.info('Available backends:')
for backend in actions.list_backends():
log.info('%s (%s)', backend.name, backend.display_name or backend.name) | [
"def",
"backends",
"(",
")",
":",
"log",
".",
"info",
"(",
"'Available backends:'",
")",
"for",
"backend",
"in",
"actions",
".",
"list_backends",
"(",
")",
":",
"log",
".",
"info",
"(",
"'%s (%s)'",
",",
"backend",
".",
"name",
",",
"backend",
".",
"di... | List available backends | [
"List",
"available",
"backends"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/commands.py#L85-L89 | train |
opendatateam/udata | udata/harvest/commands.py | schedule | def schedule(identifier, **kwargs):
'''Schedule a harvest job to run periodically'''
source = actions.schedule(identifier, **kwargs)
msg = 'Scheduled {source.name} with the following crontab: {cron}'
log.info(msg.format(source=source, cron=source.periodic_task.crontab)) | python | def schedule(identifier, **kwargs):
'''Schedule a harvest job to run periodically'''
source = actions.schedule(identifier, **kwargs)
msg = 'Scheduled {source.name} with the following crontab: {cron}'
log.info(msg.format(source=source, cron=source.periodic_task.crontab)) | [
"def",
"schedule",
"(",
"identifier",
",",
"*",
"*",
"kwargs",
")",
":",
"source",
"=",
"actions",
".",
"schedule",
"(",
"identifier",
",",
"*",
"*",
"kwargs",
")",
"msg",
"=",
"'Scheduled {source.name} with the following crontab: {cron}'",
"log",
".",
"info",
... | Schedule a harvest job to run periodically | [
"Schedule",
"a",
"harvest",
"job",
"to",
"run",
"periodically"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/commands.py#L120-L124 | train |
opendatateam/udata | udata/harvest/commands.py | unschedule | def unschedule(identifier):
'''Unschedule a periodical harvest job'''
source = actions.unschedule(identifier)
log.info('Unscheduled harvest source "%s"', source.name) | python | def unschedule(identifier):
'''Unschedule a periodical harvest job'''
source = actions.unschedule(identifier)
log.info('Unscheduled harvest source "%s"', source.name) | [
"def",
"unschedule",
"(",
"identifier",
")",
":",
"source",
"=",
"actions",
".",
"unschedule",
"(",
"identifier",
")",
"log",
".",
"info",
"(",
"'Unscheduled harvest source \"%s\"'",
",",
"source",
".",
"name",
")"
] | Unschedule a periodical harvest job | [
"Unschedule",
"a",
"periodical",
"harvest",
"job"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/commands.py#L129-L132 | train |
opendatateam/udata | udata/harvest/commands.py | attach | def attach(domain, filename):
'''
Attach existing datasets to their harvest remote id
Mapping between identifiers should be in FILENAME CSV file.
'''
log.info('Attaching datasets for domain %s', domain)
result = actions.attach(domain, filename)
log.info('Attached %s datasets to %s', result.... | python | def attach(domain, filename):
'''
Attach existing datasets to their harvest remote id
Mapping between identifiers should be in FILENAME CSV file.
'''
log.info('Attaching datasets for domain %s', domain)
result = actions.attach(domain, filename)
log.info('Attached %s datasets to %s', result.... | [
"def",
"attach",
"(",
"domain",
",",
"filename",
")",
":",
"log",
".",
"info",
"(",
"'Attaching datasets for domain %s'",
",",
"domain",
")",
"result",
"=",
"actions",
".",
"attach",
"(",
"domain",
",",
"filename",
")",
"log",
".",
"info",
"(",
"'Attached ... | Attach existing datasets to their harvest remote id
Mapping between identifiers should be in FILENAME CSV file. | [
"Attach",
"existing",
"datasets",
"to",
"their",
"harvest",
"remote",
"id"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/commands.py#L146-L154 | train |
opendatateam/udata | udata/features/transfer/actions.py | request_transfer | def request_transfer(subject, recipient, comment):
'''Initiate a transfer request'''
TransferPermission(subject).test()
if recipient == (subject.organization or subject.owner):
raise ValueError(
'Recipient should be different than the current owner')
transfer = Transfer.objects.creat... | python | def request_transfer(subject, recipient, comment):
'''Initiate a transfer request'''
TransferPermission(subject).test()
if recipient == (subject.organization or subject.owner):
raise ValueError(
'Recipient should be different than the current owner')
transfer = Transfer.objects.creat... | [
"def",
"request_transfer",
"(",
"subject",
",",
"recipient",
",",
"comment",
")",
":",
"TransferPermission",
"(",
"subject",
")",
".",
"test",
"(",
")",
"if",
"recipient",
"==",
"(",
"subject",
".",
"organization",
"or",
"subject",
".",
"owner",
")",
":",
... | Initiate a transfer request | [
"Initiate",
"a",
"transfer",
"request"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/features/transfer/actions.py#L19-L31 | train |
opendatateam/udata | udata/features/transfer/actions.py | accept_transfer | def accept_transfer(transfer, comment=None):
'''Accept an incoming a transfer request'''
TransferResponsePermission(transfer).test()
transfer.responded = datetime.now()
transfer.responder = current_user._get_current_object()
transfer.status = 'accepted'
transfer.response_comment = comment
t... | python | def accept_transfer(transfer, comment=None):
'''Accept an incoming a transfer request'''
TransferResponsePermission(transfer).test()
transfer.responded = datetime.now()
transfer.responder = current_user._get_current_object()
transfer.status = 'accepted'
transfer.response_comment = comment
t... | [
"def",
"accept_transfer",
"(",
"transfer",
",",
"comment",
"=",
"None",
")",
":",
"TransferResponsePermission",
"(",
"transfer",
")",
".",
"test",
"(",
")",
"transfer",
".",
"responded",
"=",
"datetime",
".",
"now",
"(",
")",
"transfer",
".",
"responder",
... | Accept an incoming a transfer request | [
"Accept",
"an",
"incoming",
"a",
"transfer",
"request"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/features/transfer/actions.py#L35-L54 | train |
opendatateam/udata | udata/features/transfer/actions.py | refuse_transfer | def refuse_transfer(transfer, comment=None):
'''Refuse an incoming a transfer request'''
TransferResponsePermission(transfer).test()
transfer.responded = datetime.now()
transfer.responder = current_user._get_current_object()
transfer.status = 'refused'
transfer.response_comment = comment
tr... | python | def refuse_transfer(transfer, comment=None):
'''Refuse an incoming a transfer request'''
TransferResponsePermission(transfer).test()
transfer.responded = datetime.now()
transfer.responder = current_user._get_current_object()
transfer.status = 'refused'
transfer.response_comment = comment
tr... | [
"def",
"refuse_transfer",
"(",
"transfer",
",",
"comment",
"=",
"None",
")",
":",
"TransferResponsePermission",
"(",
"transfer",
")",
".",
"test",
"(",
")",
"transfer",
".",
"responded",
"=",
"datetime",
".",
"now",
"(",
")",
"transfer",
".",
"responder",
... | Refuse an incoming a transfer request | [
"Refuse",
"an",
"incoming",
"a",
"transfer",
"request"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/features/transfer/actions.py#L58-L68 | train |
opendatateam/udata | udata/core/metrics/models.py | WithMetrics.clean | def clean(self):
'''Fill metrics with defaults on create'''
if not self.metrics:
self.metrics = dict(
(name, spec.default)
for name, spec in (metric_catalog.get(self.__class__, {})
.items()))
return supe... | python | def clean(self):
'''Fill metrics with defaults on create'''
if not self.metrics:
self.metrics = dict(
(name, spec.default)
for name, spec in (metric_catalog.get(self.__class__, {})
.items()))
return supe... | [
"def",
"clean",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"metrics",
":",
"self",
".",
"metrics",
"=",
"dict",
"(",
"(",
"name",
",",
"spec",
".",
"default",
")",
"for",
"name",
",",
"spec",
"in",
"(",
"metric_catalog",
".",
"get",
"(",
"s... | Fill metrics with defaults on create | [
"Fill",
"metrics",
"with",
"defaults",
"on",
"create"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/metrics/models.py#L44-L51 | train |
opendatateam/udata | udata/core/site/rdf.py | build_catalog | def build_catalog(site, datasets, format=None):
'''Build the DCAT catalog for this site'''
site_url = url_for('site.home_redirect', _external=True)
catalog_url = url_for('site.rdf_catalog', _external=True)
graph = Graph(namespace_manager=namespace_manager)
catalog = graph.resource(URIRef(catalog_url... | python | def build_catalog(site, datasets, format=None):
'''Build the DCAT catalog for this site'''
site_url = url_for('site.home_redirect', _external=True)
catalog_url = url_for('site.rdf_catalog', _external=True)
graph = Graph(namespace_manager=namespace_manager)
catalog = graph.resource(URIRef(catalog_url... | [
"def",
"build_catalog",
"(",
"site",
",",
"datasets",
",",
"format",
"=",
"None",
")",
":",
"site_url",
"=",
"url_for",
"(",
"'site.home_redirect'",
",",
"_external",
"=",
"True",
")",
"catalog_url",
"=",
"url_for",
"(",
"'site.rdf_catalog'",
",",
"_external",... | Build the DCAT catalog for this site | [
"Build",
"the",
"DCAT",
"catalog",
"for",
"this",
"site"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/site/rdf.py#L15-L68 | train |
opendatateam/udata | udata/patch_flask_security.py | sendmail_proxy | def sendmail_proxy(subject, email, template, **context):
"""Cast the lazy_gettext'ed subject to string before passing to Celery"""
sendmail.delay(subject.value, email, template, **context) | python | def sendmail_proxy(subject, email, template, **context):
"""Cast the lazy_gettext'ed subject to string before passing to Celery"""
sendmail.delay(subject.value, email, template, **context) | [
"def",
"sendmail_proxy",
"(",
"subject",
",",
"email",
",",
"template",
",",
"*",
"*",
"context",
")",
":",
"sendmail",
".",
"delay",
"(",
"subject",
".",
"value",
",",
"email",
",",
"template",
",",
"*",
"*",
"context",
")"
] | Cast the lazy_gettext'ed subject to string before passing to Celery | [
"Cast",
"the",
"lazy_gettext",
"ed",
"subject",
"to",
"string",
"before",
"passing",
"to",
"Celery"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/patch_flask_security.py#L18-L20 | train |
opendatateam/udata | udata/commands/static.py | collect | def collect(path, no_input):
'''Collect static files'''
if exists(path):
msg = '"%s" directory already exists and will be erased'
log.warning(msg, path)
if not no_input:
click.confirm('Are you sure?', abort=True)
log.info('Deleting static directory "%s"', path)
... | python | def collect(path, no_input):
'''Collect static files'''
if exists(path):
msg = '"%s" directory already exists and will be erased'
log.warning(msg, path)
if not no_input:
click.confirm('Are you sure?', abort=True)
log.info('Deleting static directory "%s"', path)
... | [
"def",
"collect",
"(",
"path",
",",
"no_input",
")",
":",
"if",
"exists",
"(",
"path",
")",
":",
"msg",
"=",
"'\"%s\" directory already exists and will be erased'",
"log",
".",
"warning",
"(",
"msg",
",",
"path",
")",
"if",
"not",
"no_input",
":",
"click",
... | Collect static files | [
"Collect",
"static",
"files"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/static.py#L24-L59 | train |
opendatateam/udata | udata/harvest/notifications.py | validate_harvester_notifications | def validate_harvester_notifications(user):
'''Notify admins about pending harvester validation'''
if not user.sysadmin:
return []
notifications = []
# Only fetch required fields for notification serialization
# Greatly improve performances and memory usage
qs = HarvestSource.objects(v... | python | def validate_harvester_notifications(user):
'''Notify admins about pending harvester validation'''
if not user.sysadmin:
return []
notifications = []
# Only fetch required fields for notification serialization
# Greatly improve performances and memory usage
qs = HarvestSource.objects(v... | [
"def",
"validate_harvester_notifications",
"(",
"user",
")",
":",
"if",
"not",
"user",
".",
"sysadmin",
":",
"return",
"[",
"]",
"notifications",
"=",
"[",
"]",
"# Only fetch required fields for notification serialization",
"# Greatly improve performances and memory usage",
... | Notify admins about pending harvester validation | [
"Notify",
"admins",
"about",
"pending",
"harvester",
"validation"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/notifications.py#L14-L32 | train |
opendatateam/udata | udata/harvest/backends/__init__.py | get | def get(app, name):
'''Get a backend given its name'''
backend = get_all(app).get(name)
if not backend:
msg = 'Harvest backend "{0}" is not registered'.format(name)
raise EntrypointError(msg)
return backend | python | def get(app, name):
'''Get a backend given its name'''
backend = get_all(app).get(name)
if not backend:
msg = 'Harvest backend "{0}" is not registered'.format(name)
raise EntrypointError(msg)
return backend | [
"def",
"get",
"(",
"app",
",",
"name",
")",
":",
"backend",
"=",
"get_all",
"(",
"app",
")",
".",
"get",
"(",
"name",
")",
"if",
"not",
"backend",
":",
"msg",
"=",
"'Harvest backend \"{0}\" is not registered'",
".",
"format",
"(",
"name",
")",
"raise",
... | Get a backend given its name | [
"Get",
"a",
"backend",
"given",
"its",
"name"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/backends/__init__.py#L7-L13 | train |
opendatateam/udata | udata/core/topic/views.py | TopicSearchMixin.search | def search(self):
'''Override search to match on topic tags'''
s = super(TopicSearchMixin, self).search()
s = s.filter('bool', should=[
Q('term', tags=tag) for tag in self.topic.tags
])
return s | python | def search(self):
'''Override search to match on topic tags'''
s = super(TopicSearchMixin, self).search()
s = s.filter('bool', should=[
Q('term', tags=tag) for tag in self.topic.tags
])
return s | [
"def",
"search",
"(",
"self",
")",
":",
"s",
"=",
"super",
"(",
"TopicSearchMixin",
",",
"self",
")",
".",
"search",
"(",
")",
"s",
"=",
"s",
".",
"filter",
"(",
"'bool'",
",",
"should",
"=",
"[",
"Q",
"(",
"'term'",
",",
"tags",
"=",
"tag",
")... | Override search to match on topic tags | [
"Override",
"search",
"to",
"match",
"on",
"topic",
"tags"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/topic/views.py#L25-L31 | train |
opendatateam/udata | udata/core/reuse/models.py | Reuse.clean | def clean(self):
'''Auto populate urlhash from url'''
if not self.urlhash or 'url' in self._get_changed_fields():
self.urlhash = hash_url(self.url)
super(Reuse, self).clean() | python | def clean(self):
'''Auto populate urlhash from url'''
if not self.urlhash or 'url' in self._get_changed_fields():
self.urlhash = hash_url(self.url)
super(Reuse, self).clean() | [
"def",
"clean",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"urlhash",
"or",
"'url'",
"in",
"self",
".",
"_get_changed_fields",
"(",
")",
":",
"self",
".",
"urlhash",
"=",
"hash_url",
"(",
"self",
".",
"url",
")",
"super",
"(",
"Reuse",
",",
"... | Auto populate urlhash from url | [
"Auto",
"populate",
"urlhash",
"from",
"url"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/reuse/models.py#L126-L130 | train |
opendatateam/udata | udata/commands/serve.py | serve | def serve(info, host, port, reload, debugger, eager_loading, with_threads):
'''
Runs a local udata development server.
This local server is recommended for development purposes only but it
can also be used for simple intranet deployments.
By default it will not support any sort of concurrency at a... | python | def serve(info, host, port, reload, debugger, eager_loading, with_threads):
'''
Runs a local udata development server.
This local server is recommended for development purposes only but it
can also be used for simple intranet deployments.
By default it will not support any sort of concurrency at a... | [
"def",
"serve",
"(",
"info",
",",
"host",
",",
"port",
",",
"reload",
",",
"debugger",
",",
"eager_loading",
",",
"with_threads",
")",
":",
"# Werkzeug logger is special and is required",
"# with this configuration for development server",
"logger",
"=",
"logging",
".",... | Runs a local udata development server.
This local server is recommended for development purposes only but it
can also be used for simple intranet deployments.
By default it will not support any sort of concurrency at all
to simplify debugging.
This can be changed with the --with-threads option whi... | [
"Runs",
"a",
"local",
"udata",
"development",
"server",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/serve.py#L38-L77 | train |
opendatateam/udata | udata/core/dataset/forms.py | enforce_filetype_file | def enforce_filetype_file(form, field):
'''Only allowed domains in resource.url when filetype is file'''
if form._fields.get('filetype').data != RESOURCE_FILETYPE_FILE:
return
domain = urlparse(field.data).netloc
allowed_domains = current_app.config['RESOURCES_FILE_ALLOWED_DOMAINS']
allowed_... | python | def enforce_filetype_file(form, field):
'''Only allowed domains in resource.url when filetype is file'''
if form._fields.get('filetype').data != RESOURCE_FILETYPE_FILE:
return
domain = urlparse(field.data).netloc
allowed_domains = current_app.config['RESOURCES_FILE_ALLOWED_DOMAINS']
allowed_... | [
"def",
"enforce_filetype_file",
"(",
"form",
",",
"field",
")",
":",
"if",
"form",
".",
"_fields",
".",
"get",
"(",
"'filetype'",
")",
".",
"data",
"!=",
"RESOURCE_FILETYPE_FILE",
":",
"return",
"domain",
"=",
"urlparse",
"(",
"field",
".",
"data",
")",
... | Only allowed domains in resource.url when filetype is file | [
"Only",
"allowed",
"domains",
"in",
"resource",
".",
"url",
"when",
"filetype",
"is",
"file"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/forms.py#L36-L51 | train |
opendatateam/udata | udata/core/dataset/forms.py | map_legacy_frequencies | def map_legacy_frequencies(form, field):
''' Map legacy frequencies to new ones'''
if field.data in LEGACY_FREQUENCIES:
field.data = LEGACY_FREQUENCIES[field.data] | python | def map_legacy_frequencies(form, field):
''' Map legacy frequencies to new ones'''
if field.data in LEGACY_FREQUENCIES:
field.data = LEGACY_FREQUENCIES[field.data] | [
"def",
"map_legacy_frequencies",
"(",
"form",
",",
"field",
")",
":",
"if",
"field",
".",
"data",
"in",
"LEGACY_FREQUENCIES",
":",
"field",
".",
"data",
"=",
"LEGACY_FREQUENCIES",
"[",
"field",
".",
"data",
"]"
] | Map legacy frequencies to new ones | [
"Map",
"legacy",
"frequencies",
"to",
"new",
"ones"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/forms.py#L101-L104 | train |
opendatateam/udata | udata/core/user/models.py | User.resources_availability | def resources_availability(self):
"""Return the percentage of availability for resources."""
# Flatten the list.
availabilities = list(
chain(
*[org.check_availability() for org in self.organizations]
)
)
# Filter out the unknown
av... | python | def resources_availability(self):
"""Return the percentage of availability for resources."""
# Flatten the list.
availabilities = list(
chain(
*[org.check_availability() for org in self.organizations]
)
)
# Filter out the unknown
av... | [
"def",
"resources_availability",
"(",
"self",
")",
":",
"# Flatten the list.",
"availabilities",
"=",
"list",
"(",
"chain",
"(",
"*",
"[",
"org",
".",
"check_availability",
"(",
")",
"for",
"org",
"in",
"self",
".",
"organizations",
"]",
")",
")",
"# Filter ... | Return the percentage of availability for resources. | [
"Return",
"the",
"percentage",
"of",
"availability",
"for",
"resources",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/user/models.py#L128-L142 | train |
opendatateam/udata | udata/core/user/models.py | User.datasets_org_count | def datasets_org_count(self):
"""Return the number of datasets of user's organizations."""
from udata.models import Dataset # Circular imports.
return sum(Dataset.objects(organization=org).visible().count()
for org in self.organizations) | python | def datasets_org_count(self):
"""Return the number of datasets of user's organizations."""
from udata.models import Dataset # Circular imports.
return sum(Dataset.objects(organization=org).visible().count()
for org in self.organizations) | [
"def",
"datasets_org_count",
"(",
"self",
")",
":",
"from",
"udata",
".",
"models",
"import",
"Dataset",
"# Circular imports.",
"return",
"sum",
"(",
"Dataset",
".",
"objects",
"(",
"organization",
"=",
"org",
")",
".",
"visible",
"(",
")",
".",
"count",
"... | Return the number of datasets of user's organizations. | [
"Return",
"the",
"number",
"of",
"datasets",
"of",
"user",
"s",
"organizations",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/user/models.py#L145-L149 | train |
opendatateam/udata | udata/core/user/models.py | User.followers_org_count | def followers_org_count(self):
"""Return the number of followers of user's organizations."""
from udata.models import Follow # Circular imports.
return sum(Follow.objects(following=org).count()
for org in self.organizations) | python | def followers_org_count(self):
"""Return the number of followers of user's organizations."""
from udata.models import Follow # Circular imports.
return sum(Follow.objects(following=org).count()
for org in self.organizations) | [
"def",
"followers_org_count",
"(",
"self",
")",
":",
"from",
"udata",
".",
"models",
"import",
"Follow",
"# Circular imports.",
"return",
"sum",
"(",
"Follow",
".",
"objects",
"(",
"following",
"=",
"org",
")",
".",
"count",
"(",
")",
"for",
"org",
"in",
... | Return the number of followers of user's organizations. | [
"Return",
"the",
"number",
"of",
"followers",
"of",
"user",
"s",
"organizations",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/user/models.py#L152-L156 | train |
opendatateam/udata | udata/core/badges/models.py | BadgeMixin.get_badge | def get_badge(self, kind):
''' Get a badge given its kind if present'''
candidates = [b for b in self.badges if b.kind == kind]
return candidates[0] if candidates else None | python | def get_badge(self, kind):
''' Get a badge given its kind if present'''
candidates = [b for b in self.badges if b.kind == kind]
return candidates[0] if candidates else None | [
"def",
"get_badge",
"(",
"self",
",",
"kind",
")",
":",
"candidates",
"=",
"[",
"b",
"for",
"b",
"in",
"self",
".",
"badges",
"if",
"b",
".",
"kind",
"==",
"kind",
"]",
"return",
"candidates",
"[",
"0",
"]",
"if",
"candidates",
"else",
"None"
] | Get a badge given its kind if present | [
"Get",
"a",
"badge",
"given",
"its",
"kind",
"if",
"present"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/badges/models.py#L54-L57 | train |
opendatateam/udata | udata/core/badges/models.py | BadgeMixin.add_badge | def add_badge(self, kind):
'''Perform an atomic prepend for a new badge'''
badge = self.get_badge(kind)
if badge:
return badge
if kind not in getattr(self, '__badges__', {}):
msg = 'Unknown badge type for {model}: {kind}'
raise db.ValidationError(msg.f... | python | def add_badge(self, kind):
'''Perform an atomic prepend for a new badge'''
badge = self.get_badge(kind)
if badge:
return badge
if kind not in getattr(self, '__badges__', {}):
msg = 'Unknown badge type for {model}: {kind}'
raise db.ValidationError(msg.f... | [
"def",
"add_badge",
"(",
"self",
",",
"kind",
")",
":",
"badge",
"=",
"self",
".",
"get_badge",
"(",
"kind",
")",
"if",
"badge",
":",
"return",
"badge",
"if",
"kind",
"not",
"in",
"getattr",
"(",
"self",
",",
"'__badges__'",
",",
"{",
"}",
")",
":"... | Perform an atomic prepend for a new badge | [
"Perform",
"an",
"atomic",
"prepend",
"for",
"a",
"new",
"badge"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/badges/models.py#L59-L83 | train |
opendatateam/udata | udata/core/badges/models.py | BadgeMixin.remove_badge | def remove_badge(self, kind):
'''Perform an atomic removal for a given badge'''
self.update(__raw__={
'$pull': {
'badges': {'kind': kind}
}
})
self.reload()
on_badge_removed.send(self, kind=kind)
post_save.send(self.__class__, docum... | python | def remove_badge(self, kind):
'''Perform an atomic removal for a given badge'''
self.update(__raw__={
'$pull': {
'badges': {'kind': kind}
}
})
self.reload()
on_badge_removed.send(self, kind=kind)
post_save.send(self.__class__, docum... | [
"def",
"remove_badge",
"(",
"self",
",",
"kind",
")",
":",
"self",
".",
"update",
"(",
"__raw__",
"=",
"{",
"'$pull'",
":",
"{",
"'badges'",
":",
"{",
"'kind'",
":",
"kind",
"}",
"}",
"}",
")",
"self",
".",
"reload",
"(",
")",
"on_badge_removed",
"... | Perform an atomic removal for a given badge | [
"Perform",
"an",
"atomic",
"removal",
"for",
"a",
"given",
"badge"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/badges/models.py#L85-L94 | train |
opendatateam/udata | udata/core/badges/models.py | BadgeMixin.toggle_badge | def toggle_badge(self, kind):
'''Toggle a bdage given its kind'''
badge = self.get_badge(kind)
if badge:
return self.remove_badge(kind)
else:
return self.add_badge(kind) | python | def toggle_badge(self, kind):
'''Toggle a bdage given its kind'''
badge = self.get_badge(kind)
if badge:
return self.remove_badge(kind)
else:
return self.add_badge(kind) | [
"def",
"toggle_badge",
"(",
"self",
",",
"kind",
")",
":",
"badge",
"=",
"self",
".",
"get_badge",
"(",
"kind",
")",
"if",
"badge",
":",
"return",
"self",
".",
"remove_badge",
"(",
"kind",
")",
"else",
":",
"return",
"self",
".",
"add_badge",
"(",
"k... | Toggle a bdage given its kind | [
"Toggle",
"a",
"bdage",
"given",
"its",
"kind"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/badges/models.py#L96-L102 | train |
opendatateam/udata | udata/core/badges/models.py | BadgeMixin.badge_label | def badge_label(self, badge):
'''Display the badge label for a given kind'''
kind = badge.kind if isinstance(badge, Badge) else badge
return self.__badges__[kind] | python | def badge_label(self, badge):
'''Display the badge label for a given kind'''
kind = badge.kind if isinstance(badge, Badge) else badge
return self.__badges__[kind] | [
"def",
"badge_label",
"(",
"self",
",",
"badge",
")",
":",
"kind",
"=",
"badge",
".",
"kind",
"if",
"isinstance",
"(",
"badge",
",",
"Badge",
")",
"else",
"badge",
"return",
"self",
".",
"__badges__",
"[",
"kind",
"]"
] | Display the badge label for a given kind | [
"Display",
"the",
"badge",
"label",
"for",
"a",
"given",
"kind"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/badges/models.py#L104-L107 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.