sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def get_plugins() -> Dict[str, pkg_resources.EntryPoint]: """ Get all available plugins for unidown. :return: plugin name list :rtype: Dict[str, ~pkg_resources.EntryPoint] """ return {entry.name: entry for entry in pkg_resources.iter_entry_points('unidown.plugin')}
Get all available plugins for unidown. :return: plugin name list :rtype: Dict[str, ~pkg_resources.EntryPoint]
entailment
def _equation_of_time(t): """ Find the difference between apparent and mean solar time Parameters ---------- t : `~astropy.time.Time` times (array) Returns ---------- ret1 : `~astropy.units.Quantity` the equation of time """ # Julian centuries since J2000.0 ...
Find the difference between apparent and mean solar time Parameters ---------- t : `~astropy.time.Time` times (array) Returns ---------- ret1 : `~astropy.units.Quantity` the equation of time
entailment
def _astropy_time_from_LST(t, LST, location, prev_next): """ Convert a Local Sidereal Time to an astropy Time object. The local time is related to the LST through the RA of the Sun. This routine uses this relationship to convert a LST to an astropy time object. Returns ------- ret1 : `...
Convert a Local Sidereal Time to an astropy Time object. The local time is related to the LST through the RA of the Sun. This routine uses this relationship to convert a LST to an astropy time object. Returns ------- ret1 : `~astropy.time.Time` time corresponding to LST
entailment
def _rise_set_trig(t, target, location, prev_next, rise_set): """ Crude time at next rise/set of ``target`` using spherical trig. This method is ~15 times faster than `_calcriseset`, and inherently does *not* take the atmosphere into account. The time returned should not be used in calculations; t...
Crude time at next rise/set of ``target`` using spherical trig. This method is ~15 times faster than `_calcriseset`, and inherently does *not* take the atmosphere into account. The time returned should not be used in calculations; the purpose of this routine is to supply a guess to `_calcriseset`. ...
entailment
def calc_riseset(t, target_name, location, prev_next, rise_set, horizon): """ Time at next rise/set of ``target``. Parameters ---------- t : `~astropy.time.Time` or other (see below) Time of observation. This will be passed in as the first argument to the `~astropy.time.Time` initia...
Time at next rise/set of ``target``. Parameters ---------- t : `~astropy.time.Time` or other (see below) Time of observation. This will be passed in as the first argument to the `~astropy.time.Time` initializer, so it can be anything that `~astropy.time.Time` will accept (including ...
entailment
def _horiz_cross(t, alt, rise_set, horizon=0*u.degree): """ Find time ``t`` when values in array ``a`` go from negative to positive or positive to negative (exclude endpoints) ``return_limits`` will return nearest times to zero-crossing. Parameters ---------- t : `~astropy.time.Time` ...
Find time ``t`` when values in array ``a`` go from negative to positive or positive to negative (exclude endpoints) ``return_limits`` will return nearest times to zero-crossing. Parameters ---------- t : `~astropy.time.Time` Grid of times alt : `~astropy.units.Quantity` Grid of...
entailment
def _two_point_interp(times, altitudes, horizon=0*u.deg): """ Do linear interpolation between two ``altitudes`` at two ``times`` to determine the time where the altitude goes through zero. Parameters ---------- times : `~astropy.time.Time` Two times for linear interpolation between ...
Do linear interpolation between two ``altitudes`` at two ``times`` to determine the time where the altitude goes through zero. Parameters ---------- times : `~astropy.time.Time` Two times for linear interpolation between altitudes : array of `~astropy.units.Quantity` Two altitu...
entailment
def init_dirs(main_dir: Path, logfilepath: Path): """ Initialize the main directories. :param main_dir: main directory :type main_dir: ~pathlib.Path :param logfilepath: log file :type logfilepath: ~pathlib.Path """ global MAIN_DIR, TEMP_DIR, DOWNLOAD_DIR, SAVESTAT_DIR, LOGFILE_PATH ...
Initialize the main directories. :param main_dir: main directory :type main_dir: ~pathlib.Path :param logfilepath: log file :type logfilepath: ~pathlib.Path
entailment
def reset(): """ Reset all dynamic variables to the default values. """ global MAIN_DIR, TEMP_DIR, DOWNLOAD_DIR, SAVESTAT_DIR, LOGFILE_PATH, USING_CORES, LOG_LEVEL, DISABLE_TQDM, \ SAVE_STATE_VERSION MAIN_DIR = Path('./') TEMP_DIR = MAIN_DIR.joinpath(Path('temp/')) DOWNLOAD_DIR = MAI...
Reset all dynamic variables to the default values.
entailment
def check_dirs(): """ Check the directories if they exist. :raises FileExistsError: if a file exists but is not a directory """ dirs = [MAIN_DIR, TEMP_DIR, DOWNLOAD_DIR, SAVESTAT_DIR] for directory in dirs: if directory.exists() and not directory.is_dir(): raise FileExistsEr...
Check the directories if they exist. :raises FileExistsError: if a file exists but is not a directory
entailment
def parse_hstring(hs): """ Parse a single item from the telescope server into name, value, comment. """ # split the string on = and /, also stripping whitespace and annoying quotes name, value, comment = yield_three( [val.strip().strip("'") for val in filter(None, re.split("[=/]+", hs))] ...
Parse a single item from the telescope server into name, value, comment.
entailment
def create_header_from_telpars(telpars): """ Create a list of fits header items from GTC telescope pars. The GTC telescope server gives a list of string describing FITS header items such as RA, DEC, etc. Arguments --------- telpars : list list returned by server call to getTelescop...
Create a list of fits header items from GTC telescope pars. The GTC telescope server gives a list of string describing FITS header items such as RA, DEC, etc. Arguments --------- telpars : list list returned by server call to getTelescopeParams
entailment
def add_gtc_header_table_row(t, telpars): """ Add a row with current values to GTC table Arguments --------- t : `~astropy.table.Table` The table to append row to telpars : list list returned by server call to getTelescopeParams """ now = Time.now().mjd hdr = create_...
Add a row with current values to GTC table Arguments --------- t : `~astropy.table.Table` The table to append row to telpars : list list returned by server call to getTelescopeParams
entailment
def from_protobuf(cls, proto: PluginInfoProto) -> PluginInfo: """ Constructor from protobuf. :param proto: protobuf structure :type proto: ~unidown.plugin.protobuf.plugin_info_pb2.PluginInfoProto :return: the PluginInfo :rtype: ~unidown.plugin.plugin_info.PluginInfo ...
Constructor from protobuf. :param proto: protobuf structure :type proto: ~unidown.plugin.protobuf.plugin_info_pb2.PluginInfoProto :return: the PluginInfo :rtype: ~unidown.plugin.plugin_info.PluginInfo :raises ValueError: name of PluginInfo does not exist or is empty inside the p...
entailment
def to_protobuf(self) -> PluginInfoProto: """ Create protobuf item. :return: protobuf structure :rtype: ~unidown.plugin.protobuf.link_item_pb2.PluginInfoProto """ proto = PluginInfoProto() proto.name = self.name proto.version = str(self.version) p...
Create protobuf item. :return: protobuf structure :rtype: ~unidown.plugin.protobuf.link_item_pb2.PluginInfoProto
entailment
def from_protobuf(cls, proto: SaveStateProto) -> SaveState: """ Constructor from protobuf. Can raise ValueErrors from called from_protobuf() parsers. :param proto: protobuf structure :type proto: ~unidown.plugin.protobuf.save_state_pb2.SaveStateProto :return: the SaveState ...
Constructor from protobuf. Can raise ValueErrors from called from_protobuf() parsers. :param proto: protobuf structure :type proto: ~unidown.plugin.protobuf.save_state_pb2.SaveStateProto :return: the SaveState :rtype: ~unidown.plugin.save_state.SaveState :raises ValueError: vers...
entailment
def to_protobuf(self) -> SaveStateProto: """ Create protobuf item. :return: protobuf structure :rtype: ~unidown.plugin.protobuf.save_state_pb2.SaveStateProto """ result = SaveStateProto() result.version = str(self.version) result.last_update.CopyFrom(date...
Create protobuf item. :return: protobuf structure :rtype: ~unidown.plugin.protobuf.save_state_pb2.SaveStateProto
entailment
def definite_article(word, gender=MALE, role=SUBJECT): """ Returns the definite article (der/die/das/die) for a given word. """ return article_definite.get((gender[:1].lower(), role[:3].lower()))
Returns the definite article (der/die/das/die) for a given word.
entailment
def indefinite_article(word, gender=MALE, role=SUBJECT): """ Returns the indefinite article (ein) for a given word. """ return article_indefinite.get((gender[:1].lower(), role[:3].lower()))
Returns the indefinite article (ein) for a given word.
entailment
def article(word, function=INDEFINITE, gender=MALE, role=SUBJECT): """ Returns the indefinite (ein) or definite (der/die/das/die) article for the given word. """ return function == DEFINITE \ and definite_article(word, gender, role) \ or indefinite_article(word, gender, role)
Returns the indefinite (ein) or definite (der/die/das/die) article for the given word.
entailment
def referenced(word, article=INDEFINITE, gender=MALE, role=SUBJECT): """ Returns a string with the article + the word. """ return "%s %s" % (_article(word, article, gender, role), word)
Returns a string with the article + the word.
entailment
def gender(word, pos=NOUN): """ Returns the gender (MALE, FEMALE or NEUTRAL) for nouns (majority vote). Returns None for words that are not nouns. """ w = word.lower() if pos == NOUN: # Default rules (baseline = 32%). if w.endswith(gender_masculine): return MASCULINE ...
Returns the gender (MALE, FEMALE or NEUTRAL) for nouns (majority vote). Returns None for words that are not nouns.
entailment
def pluralize(word, pos=NOUN, gender=MALE, role=SUBJECT, custom={}): """ Returns the plural of a given word. The inflection is based on probability rather than gender and role. """ w = word.lower().capitalize() if word in custom: return custom[word] if pos == NOUN: for a, b i...
Returns the plural of a given word. The inflection is based on probability rather than gender and role.
entailment
def singularize(word, pos=NOUN, gender=MALE, role=SUBJECT, custom={}): """ Returns the singular of a given word. The inflection is based on probability rather than gender and role. """ w = word.lower().capitalize() if word in custom: return custom[word] if word in singular: r...
Returns the singular of a given word. The inflection is based on probability rather than gender and role.
entailment
def attributive(adjective, gender=MALE, role=SUBJECT, article=None): """ For a predicative adjective, returns the attributive form (lowercase). In German, the attributive is formed with -e, -em, -en, -er or -es, depending on gender (masculine, feminine, neuter or plural) and role (nominative...
For a predicative adjective, returns the attributive form (lowercase). In German, the attributive is formed with -e, -em, -en, -er or -es, depending on gender (masculine, feminine, neuter or plural) and role (nominative, accusative, dative, genitive).
entailment
def predicative(adjective): """ Returns the predicative adjective (lowercase). In German, the attributive form preceding a noun is always used: "ein kleiner Junge" => strong, masculine, nominative, "eine schöne Frau" => mixed, feminine, nominative, "der kleine Prinz" => weak, masculi...
Returns the predicative adjective (lowercase). In German, the attributive form preceding a noun is always used: "ein kleiner Junge" => strong, masculine, nominative, "eine schöne Frau" => mixed, feminine, nominative, "der kleine Prinz" => weak, masculine, nominative, etc. The pre...
entailment
def grade(adjective, suffix=COMPARATIVE): """ Returns the comparative or superlative form of the given (inflected) adjective. """ b = predicative(adjective) # groß => großt, schön => schönst if suffix == SUPERLATIVE and b.endswith(("s", u"ß")): suffix = suffix[1:] # große => großere, sch...
Returns the comparative or superlative form of the given (inflected) adjective.
entailment
def find_lemma(self, verb): """ Returns the base form of the given inflected verb, using a rule-based approach. """ v = verb.lower() # Common prefixes: be-finden and emp-finden probably inflect like finden. if not (v.startswith("ge") and v.endswith("t")): # Probably gerund. ...
Returns the base form of the given inflected verb, using a rule-based approach.
entailment
def find_lexeme(self, verb): """ For a regular verb (base form), returns the forms using a rule-based approach. """ v = verb.lower() # Stem = infinitive minus -en, -ln, -rn. b = b0 = re.sub("en$", "", re.sub("ln$", "l", re.sub("rn$", "r", v))) # Split common prefixes. ...
For a regular verb (base form), returns the forms using a rule-based approach.
entailment
def tenses(self, verb, parse=True): """ Returns a list of possible tenses for the given inflected verb. """ tenses = _Verbs.tenses(self, verb, parse) if len(tenses) == 0: # auswirkte => wirkte aus for prefix in prefix_separable: if verb.startswith(...
Returns a list of possible tenses for the given inflected verb.
entailment
def _get_words_from_dataset(dataset): """Return a set of all words in a dataset. :param dataset: A list of tuples of the form ``(words, label)`` where ``words`` is either a string of a list of tokens. """ # Words may be either a string or a list of tokens. Return an iterator # of tokens ac...
Return a set of all words in a dataset. :param dataset: A list of tuples of the form ``(words, label)`` where ``words`` is either a string of a list of tokens.
entailment
def basic_extractor(document, train_set): """A basic document feature extractor that returns a dict indicating what words in ``train_set`` are contained in ``document``. :param document: The text to extract features from. Can be a string or an iterable. :param list train_set: Training data set, a list ...
A basic document feature extractor that returns a dict indicating what words in ``train_set`` are contained in ``document``. :param document: The text to extract features from. Can be a string or an iterable. :param list train_set: Training data set, a list of tuples of the form ``(words, label)``.
entailment
def contains_extractor(document): """A basic document feature extractor that returns a dict of words that the document contains.""" tokens = _get_document_tokens(document) features = dict((u'contains({0})'.format(w), True) for w in tokens) return features
A basic document feature extractor that returns a dict of words that the document contains.
entailment
def _read_data(self, dataset, format=None): """Reads a data file and returns and iterable that can be used as testing or training data.""" # Attempt to detect file format if "format" isn't specified if not format: format_class = formats.detect(dataset) else: ...
Reads a data file and returns and iterable that can be used as testing or training data.
entailment
def extract_features(self, text): """Extracts features from a body of text. :rtype: dictionary of features """ # Feature extractor may take one or two arguments try: return self.feature_extractor(text, self.train_set) except (TypeError, AttributeError): ...
Extracts features from a body of text. :rtype: dictionary of features
entailment
def train(self, *args, **kwargs): """Train the classifier with a labeled feature set and return the classifier. Takes the same arguments as the wrapped NLTK class. This method is implicitly called when calling ``classify`` or ``accuracy`` methods and is included only to allow passing in ...
Train the classifier with a labeled feature set and return the classifier. Takes the same arguments as the wrapped NLTK class. This method is implicitly called when calling ``classify`` or ``accuracy`` methods and is included only to allow passing in arguments to the ``train`` method of ...
entailment
def classify(self, text): """Classifies the text. :param str text: A string of text. """ text_features = self.extract_features(text) return self.classifier.classify(text_features)
Classifies the text. :param str text: A string of text.
entailment
def accuracy(self, test_set, format=None): """Compute the accuracy on a test set. :param test_set: A list of tuples of the form ``(text, label)``, or a filename. :param format: If ``test_set`` is a filename, the file format, e.g. ``"csv"`` or ``"json"``. If ``None``, wil...
Compute the accuracy on a test set. :param test_set: A list of tuples of the form ``(text, label)``, or a filename. :param format: If ``test_set`` is a filename, the file format, e.g. ``"csv"`` or ``"json"``. If ``None``, will attempt to detect the file format.
entailment
def update(self, new_data, *args, **kwargs): '''Update the classifier with new training data and re-trains the classifier. :param new_data: New data as a list of tuples of the form ``(text, label)``. ''' self.train_set += new_data self.train_features = [(self...
Update the classifier with new training data and re-trains the classifier. :param new_data: New data as a list of tuples of the form ``(text, label)``.
entailment
def prob_classify(self, text): """Return the label probability distribution for classifying a string of text. Example: :: >>> classifier = NaiveBayesClassifier(train_data) >>> prob_dist = classifier.prob_classify("I feel happy this morning.") >>> pro...
Return the label probability distribution for classifying a string of text. Example: :: >>> classifier = NaiveBayesClassifier(train_data) >>> prob_dist = classifier.prob_classify("I feel happy this morning.") >>> prob_dist.max() 'positive' ...
entailment
def train(self, *args, **kwargs): """Train the classifier with a labeled and unlabeled feature sets and return the classifier. Takes the same arguments as the wrapped NLTK class. This method is implicitly called when calling ``classify`` or ``accuracy`` methods and is included only to al...
Train the classifier with a labeled and unlabeled feature sets and return the classifier. Takes the same arguments as the wrapped NLTK class. This method is implicitly called when calling ``classify`` or ``accuracy`` methods and is included only to allow passing in arguments to the ``tra...
entailment
def update(self, new_positive_data=None, new_unlabeled_data=None, positive_prob_prior=0.5, *args, **kwargs): '''Update the classifier with new data and re-trains the classifier. :param new_positive_data: List of new, labeled strings. :param new_unlabeled_da...
Update the classifier with new data and re-trains the classifier. :param new_positive_data: List of new, labeled strings. :param new_unlabeled_data: List of new, unlabeled strings.
entailment
def prob_classify(self, text): """Return the label probability distribution for classifying a string of text. Example: :: >>> classifier = MaxEntClassifier(train_data) >>> prob_dist = classifier.prob_classify("I feel happy this morning.") >>> prob_di...
Return the label probability distribution for classifying a string of text. Example: :: >>> classifier = MaxEntClassifier(train_data) >>> prob_dist = classifier.prob_classify("I feel happy this morning.") >>> prob_dist.max() 'positive' ...
entailment
def lemmatize(self, text): """Return a list of (lemma, tag) tuples. :param str text: A string. """ #: Do not process empty strings (Issue #3) if text.strip() == "": return [] parsed_sentences = self._parse_text(text) _lemmalist = [] for s in ...
Return a list of (lemma, tag) tuples. :param str text: A string.
entailment
def _parse_text(self, text): """Parse text (string) and return list of parsed sentences (strings). Each sentence consists of space separated token elements and the token format returned by the PatternParser is WORD/TAG/PHRASE/ROLE/LEMMA (separated by a forward slash '/') :param...
Parse text (string) and return list of parsed sentences (strings). Each sentence consists of space separated token elements and the token format returned by the PatternParser is WORD/TAG/PHRASE/ROLE/LEMMA (separated by a forward slash '/') :param str text: A string.
entailment
def _match(string, pattern): """ Returns True if the pattern matches the given word string. The pattern can include a wildcard (*front, back*, *both*, in*side), or it can be a compiled regular expression. """ p = pattern try: if p[:1] == WILDCARD and (p[-1:] == WILDCARD and p[1:-...
Returns True if the pattern matches the given word string. The pattern can include a wildcard (*front, back*, *both*, in*side), or it can be a compiled regular expression.
entailment
def unique(iterable): """ Returns a list copy in which each item occurs only once (in-order). """ seen = set() return [x for x in iterable if x not in seen and not seen.add(x)]
Returns a list copy in which each item occurs only once (in-order).
entailment
def product(*args, **kwargs): """ Yields all permutations with replacement: list(product("cat", repeat=2)) => [("c", "c"), ("c", "a"), ("c", "t"), ("a", "c"), ("a", "a"), ("a", "t"), ("t", "c"), ("t", "a"), ("t", "t")] ...
Yields all permutations with replacement: list(product("cat", repeat=2)) => [("c", "c"), ("c", "a"), ("c", "t"), ("a", "c"), ("a", "a"), ("a", "t"), ("t", "c"), ("t", "a"), ("t", "t")]
entailment
def variations(iterable, optional=lambda x: False): """ Returns all possible variations of a sequence with optional items. """ # For example: variations(["A?", "B?", "C"], optional=lambda s: s.endswith("?")) # defines a sequence where constraint A and B are optional: # [("A?", "B?", "C"), ("B?", "C"...
Returns all possible variations of a sequence with optional items.
entailment
def compile(pattern, *args, **kwargs): """ Returns a Pattern from the given string or regular expression. Recently compiled patterns are kept in cache (if they do not use taxonomies, which are mutable dicts). """ id, p = repr(pattern) + repr(args), pattern if id in _cache and not kwargs:...
Returns a Pattern from the given string or regular expression. Recently compiled patterns are kept in cache (if they do not use taxonomies, which are mutable dicts).
entailment
def scan(pattern, string, *args, **kwargs): """ Returns True if pattern.search(Sentence(string)) may yield matches. If is often faster to scan prior to creating a Sentence and searching it. """ return compile(pattern, *args, **kwargs).scan(string)
Returns True if pattern.search(Sentence(string)) may yield matches. If is often faster to scan prior to creating a Sentence and searching it.
entailment
def match(pattern, sentence, *args, **kwargs): """ Returns the first match found in the given sentence, or None. """ return compile(pattern, *args, **kwargs).match(sentence)
Returns the first match found in the given sentence, or None.
entailment
def search(pattern, sentence, *args, **kwargs): """ Returns a list of all matches found in the given sentence. """ return compile(pattern, *args, **kwargs).search(sentence)
Returns a list of all matches found in the given sentence.
entailment
def push(self, kv): """ Adds a new item from the given (key, value)-tuple. If the key exists, pushes the updated item to the head of the dict. """ if kv[0] in self: self.__delitem__(kv[0]) self.__setitem__(kv[0], kv[1])
Adds a new item from the given (key, value)-tuple. If the key exists, pushes the updated item to the head of the dict.
entailment
def append(self, term, type=None, value=None): """ Appends the given term to the taxonomy and tags it as the given type. Optionally, a disambiguation value can be supplied. For example: taxonomy.append("many", "quantity", "50-200") """ term = self._normalize(term) ...
Appends the given term to the taxonomy and tags it as the given type. Optionally, a disambiguation value can be supplied. For example: taxonomy.append("many", "quantity", "50-200")
entailment
def classify(self, term, **kwargs): """ Returns the (most recently added) semantic type for the given term ("many" => "quantity"). If the term is not in the dictionary, try Taxonomy.classifiers. """ term = self._normalize(term) if dict.__contains__(self, term): re...
Returns the (most recently added) semantic type for the given term ("many" => "quantity"). If the term is not in the dictionary, try Taxonomy.classifiers.
entailment
def parents(self, term, recursive=False, **kwargs): """ Returns a list of all semantic types for the given term. If recursive=True, traverses parents up to the root. """ def dfs(term, recursive=False, visited={}, **kwargs): if term in visited: # Break on cyclic relations....
Returns a list of all semantic types for the given term. If recursive=True, traverses parents up to the root.
entailment
def value(self, term, **kwargs): """ Returns the value of the given term ("many" => "50-200") """ term = self._normalize(term) if term in self._values: return self._values[term] for classifier in self.classifiers: v = classifier.value(term, **kwargs) ...
Returns the value of the given term ("many" => "50-200")
entailment
def fromstring(cls, s, **kwargs): """ Returns a new Constraint from the given string. Uppercase words indicate either a tag ("NN", "JJ", "VP") or a taxonomy term (e.g., "PRODUCT", "PERSON"). Syntax: ( defines an optional constraint, e.g., "(JJ)". [ def...
Returns a new Constraint from the given string. Uppercase words indicate either a tag ("NN", "JJ", "VP") or a taxonomy term (e.g., "PRODUCT", "PERSON"). Syntax: ( defines an optional constraint, e.g., "(JJ)". [ defines a constraint with spaces, e.g., "[Mac OS ...
entailment
def match(self, word): """ Return True if the given Word is part of the constraint: - the word (or lemma) occurs in Constraint.words, OR - the word (or lemma) occurs in Constraint.taxa taxonomy tree, AND - the word and/or chunk tags match those defined in the constraint. ...
Return True if the given Word is part of the constraint: - the word (or lemma) occurs in Constraint.words, OR - the word (or lemma) occurs in Constraint.taxa taxonomy tree, AND - the word and/or chunk tags match those defined in the constraint. Individual terms in Constra...
entailment
def fromstring(cls, s, *args, **kwargs): """ Returns a new Pattern from the given string. Constraints are separated by a space. If a constraint contains a space, it must be wrapped in []. """ s = s.replace("\(", "&lparen;") s = s.replace("\)", "&rparen;") ...
Returns a new Pattern from the given string. Constraints are separated by a space. If a constraint contains a space, it must be wrapped in [].
entailment
def scan(self, string): """ Returns True if search(Sentence(string)) may yield matches. If is often faster to scan prior to creating a Sentence and searching it. """ # In the following example, first scan the string for "good" and "bad": # p = Pattern.fromstring("good|bad NN"...
Returns True if search(Sentence(string)) may yield matches. If is often faster to scan prior to creating a Sentence and searching it.
entailment
def search(self, sentence): """ Returns a list of all matches found in the given sentence. """ if sentence.__class__.__name__ == "Sentence": pass elif isinstance(sentence, list) or sentence.__class__.__name__ == "Text": a=[]; [a.extend(self.search(s)) for s in sen...
Returns a list of all matches found in the given sentence.
entailment
def match(self, sentence, start=0, _v=None, _u=None): """ Returns the first match found in the given sentence, or None. """ if sentence.__class__.__name__ == "Sentence": pass elif isinstance(sentence, list) or sentence.__class__.__name__ == "Text": return find(lam...
Returns the first match found in the given sentence, or None.
entailment
def constraint(self, word): """ Returns the constraint that matches the given Word, or None. """ if word.index in self._map1: return self._map1[word.index]
Returns the constraint that matches the given Word, or None.
entailment
def constraints(self, chunk): """ Returns a list of constraints that match the given Chunk. """ a = [self._map1[w.index] for w in chunk.words if w.index in self._map1] b = []; [b.append(constraint) for constraint in a if constraint not in b] return b
Returns a list of constraints that match the given Chunk.
entailment
def constituents(self, constraint=None): """ Returns a list of Word and Chunk objects, where words have been grouped into their chunks whenever possible. Optionally, returns only chunks/words that match given constraint(s), or constraint index. """ # Select only words th...
Returns a list of Word and Chunk objects, where words have been grouped into their chunks whenever possible. Optionally, returns only chunks/words that match given constraint(s), or constraint index.
entailment
def group(self, index, chunked=False): """ Returns a list of Word objects that match the given group. With chunked=True, returns a list of Word + Chunk objects - see Match.constituents(). A group consists of consecutive constraints wrapped in { }, e.g., search("{JJ JJ} NN", S...
Returns a list of Word objects that match the given group. With chunked=True, returns a list of Word + Chunk objects - see Match.constituents(). A group consists of consecutive constraints wrapped in { }, e.g., search("{JJ JJ} NN", Sentence(parse("big black cat"))).group(1) => big bl...
entailment
def analyze(self, text): """Return the sentiment as a tuple of the form: ``(polarity, subjectivity)`` :param str text: A string. .. todo:: Figure out best format to be passed to the analyzer. There might be a better format than a string of space separated ...
Return the sentiment as a tuple of the form: ``(polarity, subjectivity)`` :param str text: A string. .. todo:: Figure out best format to be passed to the analyzer. There might be a better format than a string of space separated lemmas (e.g. with pos tags) b...
entailment
def stts2universal(token, tag): """ Converts an STTS tag to a universal tag. For example: ohne/APPR => ohne/PREP """ if tag in ("KON", "KOUI", "KOUS", "KOKOM"): return (token, CONJ) if tag in ("PTKZU", "PTKNEG", "PTKVZ", "PTKANT"): return (token, PRT) if tag in ("PDF", "PDAT"...
Converts an STTS tag to a universal tag. For example: ohne/APPR => ohne/PREP
entailment
def find_lemmata(tokens): """ Annotates the tokens with lemmata for plural nouns and conjugated verbs, where each token is a [word, part-of-speech] list. """ for token in tokens: word, pos, lemma = token[0], token[1], token[0] if pos.startswith(("DT", "JJ")): lemma = pred...
Annotates the tokens with lemmata for plural nouns and conjugated verbs, where each token is a [word, part-of-speech] list.
entailment
def tree(s, token=[WORD, POS, CHUNK, PNP, REL, LEMMA]): """ Returns a parsed Text from the given parsed string. """ return Text(s, token)
Returns a parsed Text from the given parsed string.
entailment
def tag(s, tokenize=True, encoding="utf-8", **kwargs): """ Returns a list of (token, tag)-tuples from the given string. """ tags = [] for sentence in parse(s, tokenize, True, False, False, False, encoding, **kwargs).split(): for token in sentence: tags.append((token[0], token[1])) ...
Returns a list of (token, tag)-tuples from the given string.
entailment
def keywords(s, top=10, **kwargs): """ Returns a sorted list of keywords in the given string. """ return parser.find_keywords(s, top=top, frequency=parser.frequency)
Returns a sorted list of keywords in the given string.
entailment
def sent_tokenize(text, tokenizer=None): """Convenience function for tokenizing sentences (not iterable). If tokenizer is not specified, the default tokenizer NLTKPunktTokenizer() is used (same behaviour as in the main `TextBlob`_ library). This function returns the sentences as a generator object. ...
Convenience function for tokenizing sentences (not iterable). If tokenizer is not specified, the default tokenizer NLTKPunktTokenizer() is used (same behaviour as in the main `TextBlob`_ library). This function returns the sentences as a generator object. .. _TextBlob: http://textblob.readthedocs.org...
entailment
def word_tokenize(text, tokenizer=None, include_punc=True, *args, **kwargs): """Convenience function for tokenizing text into words. NOTE: NLTK's word tokenizer expects sentences as input, so the text will be tokenized to sentences before being tokenized to words. This function returns an itertools ch...
Convenience function for tokenizing text into words. NOTE: NLTK's word tokenizer expects sentences as input, so the text will be tokenized to sentences before being tokenized to words. This function returns an itertools chain object (generator).
entailment
def tokenize(self, text, include_punc=True, nested=False): """Return a list of word tokens. :param text: string of text. :param include_punc: (optional) whether to include punctuation as separate tokens. Default to True. :param nested: (optional) whether to return tokens as ...
Return a list of word tokens. :param text: string of text. :param include_punc: (optional) whether to include punctuation as separate tokens. Default to True. :param nested: (optional) whether to return tokens as nested lists of sentences. Default to False.
entailment
def sent_tokenize(self, text, **kwargs): """NLTK's sentence tokenizer (currently PunktSentenceTokenizer). Uses an unsupervised algorithm to build a model for abbreviation words, collocations, and words that start sentences, then uses that to find sentence boundaries. """ ...
NLTK's sentence tokenizer (currently PunktSentenceTokenizer). Uses an unsupervised algorithm to build a model for abbreviation words, collocations, and words that start sentences, then uses that to find sentence boundaries.
entailment
def word_tokenize(self, text, include_punc=True): """The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank. It assumes that the text has already been segmented into sentences, e.g. using ``self.sent_tokenize()``. This tokenizer performs the follow...
The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank. It assumes that the text has already been segmented into sentences, e.g. using ``self.sent_tokenize()``. This tokenizer performs the following steps: - split standard contractions, e.g. ``don...
entailment
def sent_tokenize(self, text, **kwargs): """Returns a list of sentences. Each sentence is a space-separated string of tokens (words). Handles common cases of abbreviations (e.g., etc., ...). Punctuation marks are split from other words. Periods (or ?!) mark the end of a sentence. ...
Returns a list of sentences. Each sentence is a space-separated string of tokens (words). Handles common cases of abbreviations (e.g., etc., ...). Punctuation marks are split from other words. Periods (or ?!) mark the end of a sentence. Headings without an ending period are inferred by ...
entailment
def tokenize(self, text, include_punc=True, **kwargs): """Return a list of word tokens. :param text: string of text. :param include_punc: (optional) whether to include punctuation as separate tokens. Default to True. """ return self.tokenizer.word_tokenize(text, inc...
Return a list of word tokens. :param text: string of text. :param include_punc: (optional) whether to include punctuation as separate tokens. Default to True.
entailment
def parse(self, text): """Parses the text. ``pattern.de.parse(**kwargs)`` can be passed to the parser instance and are documented in the main docstring of :class:`PatternParser() <textblob_de.parsers.PatternParser>`. :param str text: A string. """ #: Do not pro...
Parses the text. ``pattern.de.parse(**kwargs)`` can be passed to the parser instance and are documented in the main docstring of :class:`PatternParser() <textblob_de.parsers.PatternParser>`. :param str text: A string.
entailment
def extract(self, text): """Return a list of noun phrases (strings) for a body of text. :param str text: A string. """ _extracted = [] if text.strip() == "": return _extracted parsed_sentences = self._parse_text(text) for s in parsed_sentences: ...
Return a list of noun phrases (strings) for a body of text. :param str text: A string.
entailment
def _filter_extracted(self, extracted_list): """Filter insignificant words for key noun phrase extraction. determiners, relative pronouns, reflexive pronouns In general, pronouns are not useful, as you need context to know what they refer to. Most of the pronouns, however, are filtered ...
Filter insignificant words for key noun phrase extraction. determiners, relative pronouns, reflexive pronouns In general, pronouns are not useful, as you need context to know what they refer to. Most of the pronouns, however, are filtered out by blob.noun_phrase method's np length (>1) ...
entailment
def _parse_text(self, text): """Parse text (string) and return list of parsed sentences (strings). Each sentence consists of space separated token elements and the token format returned by the PatternParser is WORD/TAG/PHRASE/ROLE/(LEMMA) (separated by a forward slash '/') :par...
Parse text (string) and return list of parsed sentences (strings). Each sentence consists of space separated token elements and the token format returned by the PatternParser is WORD/TAG/PHRASE/ROLE/(LEMMA) (separated by a forward slash '/') :param str text: A string.
entailment
def tag(self, sentence, tokenize=True): """Tag a string `sentence`. :param str or list sentence: A string or a list of sentence strings. :param tokenize: (optional) If ``False`` string has to be tokenized before (space separated string). """ #: Do not process empty ...
Tag a string `sentence`. :param str or list sentence: A string or a list of sentence strings. :param tokenize: (optional) If ``False`` string has to be tokenized before (space separated string).
entailment
def decode_string(v, encoding="utf-8"): """Returns the given value as a Unicode string (if possible).""" if isinstance(encoding, basestring): encoding = ((encoding,),) + (("windows-1252",), ("utf-8", "ignore")) if isinstance(v, binary_type): for e in encoding: try: ...
Returns the given value as a Unicode string (if possible).
entailment
def encode_string(v, encoding="utf-8"): """Returns the given value as a Python byte string (if possible).""" if isinstance(encoding, basestring): encoding = ((encoding,),) + (("windows-1252",), ("utf-8", "ignore")) if isinstance(v, unicode): for e in encoding: try: ...
Returns the given value as a Python byte string (if possible).
entailment
def _shutil_which(cmd, mode=os.F_OK | os.X_OK, path=None): """Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can b...
Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can be overridden with a custom search path.
entailment
def translate(self, from_lang=None, to="de"): """Translate the word to another language using Google's Translate API. .. versionadded:: 0.5.0 (``textblob``) """ if from_lang is None: from_lang = self.translator.detect(self.string) return self.translator.translate(se...
Translate the word to another language using Google's Translate API. .. versionadded:: 0.5.0 (``textblob``)
entailment
def lemmatize(self): """Return the lemma of each word in this WordList. Currently using NLTKPunktTokenizer() for all lemmatization tasks. This might cause slightly different tokenization results compared to the TextBlob.words property. """ _lemmatizer = PatternParserLem...
Return the lemma of each word in this WordList. Currently using NLTKPunktTokenizer() for all lemmatization tasks. This might cause slightly different tokenization results compared to the TextBlob.words property.
entailment
def tokenize(self, tokenizer=None): """Return a list of tokens, using ``tokenizer``. :param tokenizer: (optional) A tokenizer object. If None, defaults to this blob's default tokenizer. """ t = tokenizer if tokenizer is not None else self.tokenizer return WordList(t...
Return a list of tokens, using ``tokenizer``. :param tokenizer: (optional) A tokenizer object. If None, defaults to this blob's default tokenizer.
entailment
def noun_phrases(self): """Returns a list of noun phrases for this blob.""" return WordList([phrase.strip() for phrase in self.np_extractor.extract(self.raw) if len(phrase.split()) > 1])
Returns a list of noun phrases for this blob.
entailment
def pos_tags(self): """Returns an list of tuples of the form (word, POS tag). Example: :: [('At', 'IN'), ('eight', 'CD'), ("o'clock", 'JJ'), ('on', 'IN'), ('Thursday', 'NNP'), ('morning', 'NN')] :rtype: list of tuples """ return [(Word(...
Returns an list of tuples of the form (word, POS tag). Example: :: [('At', 'IN'), ('eight', 'CD'), ("o'clock", 'JJ'), ('on', 'IN'), ('Thursday', 'NNP'), ('morning', 'NN')] :rtype: list of tuples
entailment
def word_counts(self): """Dictionary of word frequencies in this text.""" counts = defaultdict(int) stripped_words = [lowerstrip(word) for word in self.words] for word in stripped_words: counts[word] += 1 return counts
Dictionary of word frequencies in this text.
entailment
def dict(self): """The dict representation of this sentence.""" return { 'raw': self.raw, 'start_index': self.start_index, 'end_index': self.end_index, 'stripped': self.stripped, 'noun_phrases': self.noun_phrases, 'polarity': self.p...
The dict representation of this sentence.
entailment
def words(self): """Return a list of word tokens. This excludes punctuation characters. If you want to include punctuation characters, access the ``tokens`` property. :returns: A :class:`WordList <WordList>` of word tokens. """ return WordList( word_tokenize...
Return a list of word tokens. This excludes punctuation characters. If you want to include punctuation characters, access the ``tokens`` property. :returns: A :class:`WordList <WordList>` of word tokens.
entailment
def sentiment(self): """Return a tuple of form (polarity, subjectivity ) where polarity is a float within the range [-1.0, 1.0] and subjectivity is a float within the range [0.0, 1.0] where 0.0 is very objective and 1.0 is very subjective. :rtype: named tuple of the form ``Senti...
Return a tuple of form (polarity, subjectivity ) where polarity is a float within the range [-1.0, 1.0] and subjectivity is a float within the range [0.0, 1.0] where 0.0 is very objective and 1.0 is very subjective. :rtype: named tuple of the form ``Sentiment(polarity=0.0, subjectivity=...
entailment
def to_json(self, *args, **kwargs): """Return a json representation (str) of this blob. Takes the same arguments as json.dumps. .. versionadded:: 0.5.1 (``textblob``) """ return json.dumps(self.serialized, *args, **kwargs)
Return a json representation (str) of this blob. Takes the same arguments as json.dumps. .. versionadded:: 0.5.1 (``textblob``)
entailment
def _create_sentence_objects(self): """Returns a list of Sentence objects from the raw text.""" sentence_objects = [] sentences = sent_tokenize(self.raw, tokenizer=self.tokenizer) char_index = 0 # Keeps track of character index within the blob for sent in sentences: ...
Returns a list of Sentence objects from the raw text.
entailment