code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
l = [] E = ElementMaker(namespace="http://ilk.uvt.nl/folia",nsmap={None: "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"}) for annotationtype, set in self.annotations: label = None #Find the 'label' for the declarations dynamically (aka...
def xmldeclarations(self)
Internal method to generate XML nodes for all declarations
3.918028
3.828393
1.023413
l = [] for annotationtype, set in self.annotations: label = None #Find the 'label' for the declarations dynamically (aka: AnnotationType --> String) for key, value in vars(AnnotationType).items(): if value == annotationtype: ...
def jsondeclarations(self)
Return all declarations in a form ready to be serialised to JSON. Returns: list of dict
5.013553
5.123099
0.978617
self.pendingvalidation() E = ElementMaker(namespace="http://ilk.uvt.nl/folia",nsmap={'xml' : "http://www.w3.org/XML/1998/namespace", 'xlink':"http://www.w3.org/1999/xlink"}) attribs = {} attribs['{http://www.w3.org/XML/1998/namespace}id'] = self.id #if self.version: ...
def xml(self)
Serialise the document to XML. Returns: lxml.etree.Element See also: :meth:`Document.xmlstring`
4.15994
4.183474
0.994375
self.pendingvalidation() jsondoc = {'id': self.id, 'children': [], 'declarations': self.jsondeclarations() } if self.version: jsondoc['version'] = self.version else: jsondoc['version'] = FOLIAVERSION jsondoc['generator'] = 'pynlpl.formats.folia-v...
def json(self)
Serialise the document to a ``dict`` ready for serialisation to JSON. Example:: import json jsondoc = json.dumps(doc.json())
7.851553
7.921583
0.99116
E = ElementMaker(namespace="http://ilk.uvt.nl/folia",nsmap={None: "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"}) elements = [] if self.metadatatype == "native": if isinstance(self.metadata, NativeMetaData): for key, value in self....
def xmlmetadata(self)
Internal method to serialize metadata to XML
2.635773
2.61233
1.008974
self.metadata = ElementTree.tostring(node, xml_declaration=False, pretty_print=True, encoding='utf-8') else: self.metadata = ElementTree.tostring(node, encoding='utf-8') n = node.xpath('imdi:Session/imdi:Title', namespaces=ns) if n and n[0].text: self._title = n[0].text ...
def setimdi(self, node): #OBSOLETE ns = {'imdi': 'http://www.mpi.nl/IMDI/Schema/IMDI'} self.metadatatype = MetaDataType.IMDI if LXE
OBSOLETE
1.884285
1.86903
1.008162
if (sys.version > '3' and not isinstance(set,str)) or (sys.version < '3' and not isinstance(set,(str,unicode))): raise ValueError("Set parameter for declare() must be a string") if inspect.isclass(annotationtype): annotationtype = annotationtype.ANNOTATIONTYPE i...
def declare(self, annotationtype, set, **kwargs)
Declare a new annotation type to be used in the document. Keyword arguments can be used to set defaults for any annotation of this type and set. Arguments: annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation`...
2.33462
2.340233
0.997601
if inspect.isclass(annotationtype): annotationtype = annotationtype.ANNOTATIONTYPE return ( (annotationtype,set) in self.annotations) or (set in self.alias_set and self.alias_set[set] and (annotationtype, self.alias_set[set]) in self.annotations )
def declared(self, annotationtype, set)
Checks if the annotation type is present (i.e. declared) in the document. Arguments: annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType....
4.291375
4.896905
0.876344
if inspect.isclass(annotationtype) or isinstance(annotationtype,AbstractElement): annotationtype = annotationtype.ANNOTATIONTYPE try: return list(self.annotationdefaults[annotationtype].keys())[0] except KeyError: raise NoDefaultError except IndexError: ...
def defaultset(self, annotationtype)
Obtain the default set for the specified annotation type. Arguments: annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``. ...
5.508179
4.490763
1.226558
if inspect.isclass(annotationtype) or isinstance(annotationtype,AbstractElement): annotationtype = annotationtype.ANNOTATIONTYPE if not set: set = self.defaultset(annotationtype) try: return self.annotationdefaults[annotationtype][set]['annotator'] except KeyError: ...
def defaultannotator(self, annotationtype, set=None)
Obtain the default annotator for the specified annotation type and set. Arguments: annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.PO...
5.30407
4.196514
1.263923
if not (value is None): if (self.metadatatype == "native"): self.metadata['title'] = value else: self._title = value if (self.metadatatype == "native"): if 'title' in self.metadata: return self.metadata['title']...
def title(self, value=None)
Get or set the document's title from/in the metadata No arguments: Get the document's title from metadata Argument: Set the document's title in metadata
2.877156
2.896225
0.993416
if not (value is None): if (self.metadatatype == "native"): self.metadata['date'] = value else: self._date = value if (self.metadatatype == "native"): if 'date' in self.metadata: return self.metadata['date'] ...
def date(self, value=None)
Get or set the document's date from/in the metadata. No arguments: Get the document's date from metadata Argument: Set the document's date in metadata
2.998534
2.909766
1.030507
if not (value is None): if (self.metadatatype == "native"): self.metadata['publisher'] = value else: self._publisher = value if (self.metadatatype == "native"): if 'publisher' in self.metadata: return self.metad...
def publisher(self, value=None)
No arguments: Get the document's publisher from metadata Argument: Set the document's publisher in metadata
2.960265
2.717883
1.08918
if not (value is None): if (self.metadatatype == "native"): self.metadata['license'] = value else: self._license = value if (self.metadatatype == "native"): if 'license' in self.metadata: return self.metadata['l...
def license(self, value=None)
No arguments: Get the document's license from metadata Argument: Set the document's license in metadata
2.952225
2.726532
1.082777
if not (value is None): if (self.metadatatype == "native"): self.metadata['language'] = value else: self._language = value if self.metadatatype == "native": if 'language' in self.metadata: return self.metadata['...
def language(self, value=None)
No arguments: Get the document's language (ISO-639-3) from metadata Argument: Set the document's language (ISO-639-3) in metadata
3.051805
2.833565
1.07702
if 'type' in node.attrib: self.metadatatype = node.attrib['type'] else: #no type specified, default to native self.metadatatype = "native" if 'src' in node.attrib: self.metadata = ExternalMetaData(node.attrib['src']) elif self.me...
def parsemetadata(self, node)
Internal method to parse metadata
3.4689
3.450739
1.005263
if self.debug: print("[PyNLPl FoLiA DEBUG] Processing pending validations (if any)",file=stderr) if warnonly is None and self and self.version: warnonly = (checkversion(self.version, '1.5.0') < 0) #warn only for documents older than FoLiA v1.5 if self.textvalidation: ...
def pendingvalidation(self, warnonly=None)
Perform any pending validations Parameters: warnonly (bool): Warn only (True) or raise exceptions (False). If set to None then this value will be determined based on the document's FoLiA version (Warn only before FoLiA v1.5) Returns: bool
6.249314
5.690001
1.098298
if self.mode == Mode.MEMORY: for t in self.data: if Class.__name__ == 'Text': yield t else: for e in t.select(Class,set,recursive,ignore): yield e
def select(self, Class, set=None, recursive=True, ignore=True)
See :meth:`AbstractElement.select`
5.078865
4.814737
1.054858
if self.mode == Mode.MEMORY: s = 0 for t in self.data: s += sum( 1 for e in t.select(Class,recursive,True ) ) return s
def count(self, Class, set=None, recursive=True,ignore=True)
See :meth:`AbstractElement.count`
8.376006
7.993842
1.047807
if index is None: return self.select(Paragraph) else: if index < 0: index = sum(t.count(Paragraph) for t in self.data) + index for t in self.data: for i,e in enumerate(t.select(Paragraph)) : if i == index: ...
def paragraphs(self, index = None)
Return a generator of all paragraphs found in the document. If an index is specified, return the n'th paragraph only (starting at 0)
3.667985
3.669436
0.999605
if index is None: return self.select(Sentence,None,True,[Quote]) else: if index < 0: index = sum(t.count(Sentence,None,True,[Quote]) for t in self.data) + index for t in self.data: for i,e in enumerate(t.select(Sentence,None,Tr...
def sentences(self, index = None)
Return a generator of all sentence found in the document. Except for sentences in quotes. If an index is specified, return the n'th sentence only (starting at 0)
4.06069
3.785156
1.072793
#backward compatibility, old versions didn't have cls as first argument, so if a boolean is passed first we interpret it as the 2nd: if cls is True or cls is False: retaintokenisation = cls cls = 'current' s = "" for c in self.data: if s: s ...
def text(self, cls='current', retaintokenisation=False)
Returns the text of the entire document (returns a unicode instance) See also: :meth:`AbstractElement.text`
5.648828
5.998115
0.941767
if not nextstate in processedstates: self._states(nextstate, processedstates) for _, nextstate in state.transitions: if not nextstate in processedstates: self._states(nextstate, processedstates) return processedstates
def _states(self, state, processedstates=[]): #pylint: disable=dangerous-default-value processedstates.append(state) for nextstate in state.epsilon
Iterate over all states in no particular order
3.301553
2.324134
1.420552
if 'debug' in kwargs: if 'currentdebug' in kwargs: if kwargs['currentdebug'] < kwargs['debug']: return False else: return False #no currentdebug passed, assuming no debug mode and thus skipping message s = "[" + datetime.datetime.now().strftime("%Y-%...
def log(msg, **kwargs)
Generic log method. Will prepend timestamp. Keyword arguments: system - Name of the system/module indent - Integer denoting the desired level of indentation streams - List of streams to output to stream - Stream to output to (singleton version of streams)
3.350699
3.146059
1.065047
finalsolution = None bestscore = None for solution in self: if bestscore == None: bestscore = solution.score() finalsolution = solution elif self.minimize: score = solution.score() if score < bestsco...
def searchbest(self)
Returns the single best result (if multiple have the same score, the first match is returned)
2.211629
2.082187
1.062166
solutions = PriorityQueue([], lambda x: x.score, self.minimize, length=n, blockworse=False, blockequal=False,duplicates=False) for solution in self: solutions.append(solution) return solutions
def searchtop(self,n=10)
Return the top n best resulta (or possibly less if not enough is found)
17.739511
15.993224
1.109189
solutions = deque([], n) for solution in self: solutions.append(solution) return solutions
def searchlast(self,n=10)
Return the last n results (or possibly less if not found). Note that the last results are not necessarily the best ones! Depending on the search type.
9.863415
8.865089
1.112613
if not isinstance(lemma,unicode): lemma = unicode(lemma,'utf-8') http, resp, content = self.connect() params = "" fragment = "" path = "cdb_syn" if self.debug: printf( "cornettodb/views/query_remote_syn_lemma: db_opt: %s" % path ) ...
def get_syn_ids_by_lemma(self, lemma)
Returns a list of synset IDs based on a lemma
3.831795
3.827914
1.001014
http, resp, content = self.connect() params = "" fragment = "" path = "cdb_syn" if self.debug: printf( "cornettodb/views/query_remote_syn_id: db_opt: %s" % path ) # output_opt: plain, html, xml # 'xml' is actually xhtml (with markup), bu...
def get_synset_xml(self,syn_id)
call cdb_syn with synset identifier -> returns the synset xml;
3.823879
3.731251
1.024825
root = self.get_synset_xml(syn_id) elem_synonyms = root.find( ".//synonyms" ) lus = [] for elem_synonym in elem_synonyms: synonym_str = elem_synonym.get( "c_lu_id-previewtext" ) # get "c_lu_id-previewtext" attribute # synonym_str ends with ":<nu...
def get_lus_from_synset(self, syn_id)
Returns a list of (word, lu_id) tuples given a synset ID
4.913917
4.496503
1.092831
if not lemma: return self.get_lus_from_synset(syn_id) #alias if not isinstance(lemma,unicode): lemma = unicode(lemma,'utf-8') root = self.get_synset_xml(syn_id) elem_synonyms = root.find( ".//synonyms" ) lu_id = None synonyms = [] ...
def get_lu_from_synset(self, syn_id, lemma = None)
Returns (lu_id, synonyms=[(word, lu_id)] ) tuple given a synset ID and a lemma
3.56635
3.245774
1.098767
l = [] for word_id, senses,distance in self: for sense, confidence in senses: if not sense in l: l.append(sense) if bestonly: break return l
def senses(self, bestonly=False)
Returns a list of all predicted senses
5.897015
5.76519
1.022866
if isinstance(input_data, list) or isinstance(input_data, tuple): input_data = " ".join(input_data) input_data = u(input_data, source_encoding) #decode (or preferably do this in an earlier stage) input_data = input_data.strip(' \t\n') s = input_data.encode(self.s...
def process(self,input_data, source_encoding="utf-8", return_unicode = True, oldfrog=False)
Receives input_data in the form of a str or unicode object, passes this to the server, with proper consideration for the encodings, and returns the Frog output as a list of tuples: (word,pos,lemma,morphology), each of these is a proper unicode object unless return_unicode is set to False, in which case raw strings will...
3.473105
3.278679
1.0593
alignment = [] cursor = 0 for inputword in inputwords: if len(outputwords) > cursor and outputwords[cursor] == inputword: alignment.append(cursor) cursor += 1 elif len(outputwords) > cursor+1 and outputwords[cursor+1] == inputword:...
def align(self,inputwords, outputwords)
For each inputword, provides the index of the outputword
1.918264
1.880726
1.019959
needle = tuple(needle) haystack = tuple(haystack) solutions = [] #equality check if needle == haystack: return [(needle, 2)] if allowpartial: minl =1 else: minl = len(needle) for l in range(minl,min(len(needle), len(haystack))+1): #print "LEFT-DEBU...
def calculate_overlap(haystack, needle, allowpartial=True)
Calculate the overlap between two sequences. Yields (overlap, placement) tuples (multiple because there may be multiple overlaps!). The former is the part of the sequence that overlaps, and the latter is -1 if the overlap is on the left side, 0 if it is a subset, 1 if it overlaps on the right side, 2 if its an identica...
3.628165
3.38038
1.073301
for i,regexp in list(enumerate(regexps)): if isstring(regexp): regexps[i] = re.compile(regexp) tokens = [] begin = 0 for i, c in enumerate(text): if begin > i: continue elif i == begin: m = False for regexp in regexps: ...
def tokenize(text, regexps=TOKENIZERRULES)
Tokenizes a string and returns a list of tokens :param text: The text to tokenise :type text: string :param regexps: Regular expressions to use as tokeniser rules in tokenisation (default=_pynlpl.textprocessors.TOKENIZERRULES_) :type regexps: Tuple/list of regular expressions to use in tokenisation ...
3.367037
3.65463
0.921307
begin = 0 for i, token in enumerate(tokens): if is_end_of_sentence(tokens, i): yield tokens[begin:i+1] begin = i+1 if begin <= len(tokens)-1: yield tokens[begin:]
def split_sentences(tokens)
Split sentences (based on tokenised data), returns sentences as a list of lists of tokens, each sentence is a list of tokens
2.250202
2.464463
0.91306
if sys.version < '3': if isinstance(s,unicode): return unicodedata.normalize('NFKD', s).encode('ASCII', 'ignore') else: return unicodedata.normalize('NFKD', unicode(s,encoding)).encode('ASCII', 'ignore') else: if isinstance(s,bytes): s = str(s,encoding) ...
def strip_accents(s, encoding= 'utf-8')
Strip characters with diacritics and return a flat ascii representation
2.175481
2.043353
1.064662
assert maxdist >= 2 tokens = list(tokens) if maxdist > len(tokens): maxdist = len(tokens) l = len(tokens) for i in range(0,l - 1): for permutation in permutations(tokens[i:i+maxdist]): if permutation != tuple(tokens[i:i+maxdist]): newtokens = tokens[:...
def swap(tokens, maxdist=2)
Perform a swap operation on a sequence of tokens, exhaustively swapping all tokens up to the maximum specified distance. This is a subset of all permutations.
2.659284
2.508583
1.060074
if isinstance(keyword,tuple) and isinstance(keyword,list): l = len(keyword) else: keyword = (keyword,) l = 1 n = l + contextsize*2 focuspos = contextsize + 1 for ngram in Windower(tokens,n,None,None): if ngram[focuspos:focuspos+l] == keyword: yield ng...
def find_keyword_in_context(tokens, keyword, contextsize=1)
Find a keyword in a particular sequence of tokens, and return the local context. Contextsize is the number of words to the left and right. The keyword may have multiple word, in which case it should to passed as a tuple or list
4.206357
3.814266
1.102796
e = self.data[self.start] self.start += 1 if self.start > 5 and self.start > len(self.data)//2: self.data = self.data[self.start:] self.start = 0 return e
def pop(self)
Retrieve the next element in line, this will remove it from the queue
3.089326
2.879076
1.073027
f = self.f(item) if callable(f): score = f() else: score = f if not self.duplicates: for s, i in self.data: if s == score and item == i: #item is a duplicate, don't add it return False ...
def append(self, item)
Adds an item to the priority queue (in the right place), returns True if successfull, False if the item was blocked (because of a bad score)
3.064399
2.851216
1.074769
if self.minimize: return self.data.pop(0)[1] else: return self.data.pop()[1]
def pop(self)
Retrieve the next element in line, this will remove it from the queue
4.76139
4.386494
1.085466
if self.minimize: return self.data[i][0] else: return self.data[(-1 * i) - 1][0]
def score(self, i)
Return the score for item x (cheap lookup), Item 0 is always the best item
4.858691
4.349029
1.11719
if self.minimize: self.data = self.data[:n] else: self.data = self.data[-1 * n:]
def prune(self, n)
prune all but the first (=best) n items
4.535503
3.570599
1.270236
self.data = random.sample(self.data, n)
def randomprune(self,n)
prune down to n items at random, disregarding their score
6.315843
4.429971
1.425707
if retainequalscore: if self.minimize: f = lambda x: x[0] <= score else: f = lambda x: x[0] >= score else: if self.minimize: f = lambda x: x[0] < score else: f = lambda x: x[0] > scor...
def prunebyscore(self, score, retainequalscore=False)
Deletes all items below/above a certain score from the queue, depending on whether minimize is True or False. Note: It is recommended (more efficient) to use blockworse=True / blockequal=True instead! Preventing the addition of 'worse' items.
1.951658
1.718258
1.135835
if not isinstance(item, Tree): return ValueError("Can only append items of type Tree") if not self.children: self.children = [] item.parent = self self.children.append(item)
def append(self, item)
Add an item to the Tree
3.773808
3.287379
1.147969
if self.children: return sum( ( c.size() for c in self.children.values() ) ) + 1 else: return 1
def size(self)
Size is number of nodes under the trie, including the current node
3.820153
3.063506
1.246987
if self.children: if not maxdepth or (maxdepth and _depth < maxdepth): for key, child in self.children.items(): if child.leaf(): yield child else: for results in child.walk(leavesonly, ma...
def walk(self, leavesonly=True, maxdepth=None, _depth = 0)
Depth-first search, walking through trie, returning all encounterd nodes (by default only leaves)
2.485551
2.29437
1.083326
prevp = 0 prevs = 0 sentence = []; sentence_id = "" for word, id, pos, lemma in iter(self): try: doc_id, ptype, p, s, w = re.findall('([\w\d-]+)\.(p|head)\.(\d+)\.s\.(\d+)\.w\.(\d+)',id)[0] if ((p != prevp) or (s != prevs)) and...
def sentences(self)
Iterate over all sentences (sentence_id, sentence) in the document, sentence is a list of 4-tuples (word,id,pos,lemma)
2.918547
2.57253
1.134505
prevp = 0 partext = [] for word, id, pos, lemma in iter(self): doc_id, ptype, p, s, w = re.findall('([\w\d-]+)\.(p|head)\.(\d+)\.s\.(\d+)\.w\.(\d+)',id)[0] if prevp != p and partext: yield ( doc_id + "." + ptype + "." + prevp , " ".join(partex...
def paragraphs(self, with_id = False)
Extracts paragraphs, returns list of plain-text(!) paragraphs
4.553851
4.568027
0.996897
#TODO: download XSD from web if self.inline: xmlschema = ElementTree.XMLSchema(ElementTree.parse(StringIO("\n".join(open(formats_dir+"dcoi-dsc.xsd").readlines())))) xmlschema.assertValid(self.tree) #return xmlschema.validate(self) else: xm...
def validate(self, formats_dir="../formats/")
checks if the document is valid
5.072318
4.844937
1.046932
global namespaces return self.tree.xpath(expression, namespaces=namespaces)
def xpath(self, expression)
Executes an xpath expression using the correct namespaces
8.032832
6.176901
1.300463
targetwords = [] for i, (word,lemma,postag) in enumerate(zip(datatuple[0],datatuple[1],datatuple[2])): if word: subwords = word.split("_") for w in subwords: #split multiword expressions targetwords.append( (w, lemma, postag, i, le...
def align(self, referencewords, datatuple)
align the reference sentence with the tagged data
3.151972
3.120832
1.009978
if self.mainsetcache: return self.mainsetcache set_uri = self.get_set_uri() for row in self.graph.query("SELECT ?seturi ?setid ?setlabel ?setopen ?setempty WHERE { ?seturi rdf:type skos:Collection . OPTIONAL { ?seturi skos:notation ?setid } OPTIONAL { ?seturi skos:prefLabel ...
def mainset(self)
Returns information regarding the set
3.059313
3.016162
1.014307
if subset_id in self.subsetcache: return self.subsetcache[subset_id] set_uri = self.get_set_uri(subset_id) for row in self.graph.query("SELECT ?seturi ?setid ?setlabel ?setopen WHERE { ?seturi rdf:type skos:Collection . OPTIONAL { ?seturi skos:notation ?setid } OPTIONAL { ?s...
def subset(self, subset_id)
Returns information regarding the set
2.764326
2.658463
1.039821
classes = self.classes(set_uri_or_id, nestedhierarchy) for classid in self.classorder(classes): yield classes[classid]
def orderedclasses(self, set_uri_or_id=None, nestedhierarchy=False)
Higher-order generator function that yields class information in the right order, combines calls to :meth:`SetDefinition.classes` and :meth:`SetDefinition.classorder`
3.808137
3.303462
1.152771
if set_uri_or_id and set_uri_or_id.startswith(('http://','https://')): set_uri = set_uri_or_id else: set_uri = self.get_set_uri(set_uri_or_id) assert set_uri is not None classes= {} uri2idmap = {} for row in self.graph.query("SELECT ?cla...
def classes(self, set_uri_or_id=None, nestedhierarchy=False)
Returns a dictionary of classes for the specified (sub)set (if None, default, the main set is selected)
2.240683
2.209358
1.014178
return [ classid for classid, classitem in sorted( ((classid, classitem) for classid, classitem in classes.items() if 'seqnr' in classitem) , key=lambda pair: pair[1]['seqnr'] )] + \ [ classid for classid, classitem in sorted( ((classid, classitem) for classid, classitem in classes.items...
def classorder(self,classes)
Return a list of class IDs in order for presentational purposes: order is determined first and foremost by explicit ordering, else alphabetically by label or as a last resort by class ID
2.586968
2.322355
1.113942
self.lexer = ply.lex.lex(object=self, **kwargs)
def build(self, **kwargs)
Build the lexer.
7.763836
3.624951
2.141777
if tree.tag == 'identifier': return tree.attrib['name'] if tree.tag in ('string', 'boolean'): return tree.text if tree.tag == 'number': return tree.attrib['value'] if tree.tag in ('property', 'object'): return make_varname(_xpath_one(tree, '*')) if tree.tag.e...
def make_varname(tree)
<left> tree </left>
2.37267
2.384996
0.994832
auth_string = os.environ.get(env_prefix + 'WSGI_AUTH_CREDENTIALS') if not auth_string: return {} result = {} for credentials in auth_string.split('|'): username, password = credentials.split(':', 1) result[username] = password return result
def _users_from_environ(env_prefix='')
Environment value via `user:password|user2:password2`
2.826556
2.423023
1.166541
paths = os.environ.get(env_prefix + 'WSGI_AUTH_EXCLUDE_PATHS') if not paths: return [] return paths.split(';')
def _exclude_paths_from_environ(env_prefix='')
Environment value via `/login;/register`
4.075587
3.329314
1.224152
paths = os.environ.get(env_prefix + 'WSGI_AUTH_PATHS') if not paths: return [] return paths.split(';')
def _include_paths_from_environ(env_prefix='')
Environment value via `/login;/register`
4.870564
3.64868
1.334884
if self._is_request_in_include_path(request): if self._is_request_in_exclude_path(request): return True else: auth = request.authorization if auth and auth[0] == 'Basic': credentials = b64decode(auth[1]).decode(...
def is_authorized(self, request)
Check if the user is authenticated for the given request. The include_paths and exclude_paths are first checked. If authentication is required then the Authorization HTTP header is checked against the credentials.
2.511236
2.270371
1.106091
response = HTTPUnauthorized() response.www_authenticate = ('Basic', {'realm': self._realm}) return response(environ, start_response)
def _login(self, environ, start_response)
Send a login response back to the client.
4.958553
4.522533
1.096411
if self._include_paths: for path in self._include_paths: if request.path.startswith(path): return True return False else: return True
def _is_request_in_include_path(self, request)
Check if the request path is in the `_include_paths` list. If no specific include paths are given then we assume that authentication is required for all paths.
2.327203
1.963186
1.185421
if self._exclude_paths: for path in self._exclude_paths: if request.path.startswith(path): return True return False else: return False
def _is_request_in_exclude_path(self, request)
Check if the request path is in the `_exclude_paths` list
2.300144
1.800388
1.277583
prompt_kwargs = prompt_kwargs or {} defaults = { "history": InMemoryHistory(), "completer": ClickCompleter(group), "message": u"> ", } for key in defaults: default_value = defaults[key] if key not in prompt_kwargs: prompt_kwargs[key] = default_v...
def bootstrap_prompt(prompt_kwargs, group)
Bootstrap prompt_toolkit kwargs or use user defined values. :param prompt_kwargs: The user specified prompt kwargs.
3.263362
3.729604
0.874989
# parent should be available, but we're not going to bother if not group_ctx = old_ctx.parent or old_ctx group = group_ctx.command isatty = sys.stdin.isatty() # Delete the REPL command from those available, as we don't want to allow # nesting REPLs (note: pass `None` to `pop` as we don't w...
def repl( # noqa: C901 old_ctx, prompt_kwargs=None, allow_system_commands=True, allow_internal_commands=True, )
Start an interactive shell. All subcommands are available in it. :param old_ctx: The current Click context. :param prompt_kwargs: Parameters passed to :py:func:`prompt_toolkit.shortcuts.prompt`. If stdin is not a TTY, no prompt will be printed, but only commands read from stdin.
3.390891
3.410112
0.994363
group.command(name=name)(click.pass_context(repl))
def register_repl(group, name="repl")
Register :func:`repl()` as sub-command *name* of *group*.
9.57271
7.680469
1.246371
if command.startswith(":"): target = _get_registered_target(command[1:], default=None) if target: return target()
def handle_internal_commands(command)
Run repl-internal commands. Repl-internal commands are all commands starting with ":".
7.274225
7.26974
1.000617
D = self._initialize(X) for i in range(self.max_iter): gamma = self._transform(D, X) e = np.linalg.norm(X - gamma.dot(D)) if e < self.tol: break D, gamma = self._update_dict(X, D, gamma) self.components_ = D return...
def fit(self, X)
Parameters ---------- X: shape = [n_samples, n_features]
3.544769
3.859171
0.918531
''' Given a function to map from an ID to an underlying object, and a function to map from an underlying object to the concrete GraphQLObjectType it corresponds to, constructs a `Node` interface that objects can implement, and a field config for a `node` root field. If the type_resolver is omit...
def node_definitions(id_fetcher, type_resolver=None, id_resolver=None)
Given a function to map from an ID to an underlying object, and a function to map from an underlying object to the concrete GraphQLObjectType it corresponds to, constructs a `Node` interface that objects can implement, and a field config for a `node` root field. If the type_resolver is omitted, object ...
3.925619
1.697994
2.311916
''' Takes the "global ID" created by toGlobalID, and retuns the type name and ID used to create it. ''' unbased_global_id = unbase64(global_id) _type, _id = unbased_global_id.split(':', 1) return _type, _id
def from_global_id(global_id)
Takes the "global ID" created by toGlobalID, and retuns the type name and ID used to create it.
6.228088
2.547439
2.444843
''' Creates the configuration for an id field on a node, using `to_global_id` to construct the ID from the provided typename. The type-specific ID is fetcher by calling id_fetcher on the object, or if not provided, by accessing the `id` property on the object. ''' return GraphQLField( ...
def global_id_field(type_name, id_fetcher=None)
Creates the configuration for an id field on a node, using `to_global_id` to construct the ID from the provided typename. The type-specific ID is fetcher by calling id_fetcher on the object, or if not provided, by accessing the `id` property on the object.
4.841914
1.954889
2.476823
''' A simple function that accepts an array and connection arguments, and returns a connection object for use in GraphQL. It uses array offsets as pagination, so pagination will only work if the array is static. ''' _len = len(data) return connection_from_list_slice( data, ar...
def connection_from_list(data, args=None, **kwargs)
A simple function that accepts an array and connection arguments, and returns a connection object for use in GraphQL. It uses array offsets as pagination, so pagination will only work if the array is static.
6.283294
2.41208
2.604928
''' A version of `connectionFromArray` that takes a promised array, and returns a promised connection. ''' return data_promise.then(lambda data: connection_from_list(data, args, **kwargs))
def connection_from_promised_list(data_promise, args=None, **kwargs)
A version of `connectionFromArray` that takes a promised array, and returns a promised connection.
5.727461
3.1263
1.832025
''' Given a slice (subset) of an array, returns a connection object for use in GraphQL. This function is similar to `connectionFromArray`, but is intended for use cases where you know the cardinality of the connection, consider it too large to materialize the entire array, and instead wish pass ...
def connection_from_list_slice(list_slice, args=None, connection_type=None, edge_type=None, pageinfo_type=None, slice_start=0, list_length=0, list_slice_length=None)
Given a slice (subset) of an array, returns a connection object for use in GraphQL. This function is similar to `connectionFromArray`, but is intended for use cases where you know the cardinality of the connection, consider it too large to materialize the entire array, and instead wish pass in a slice o...
2.433667
1.763791
1.379794
''' Return the cursor associated with an object in an array. ''' if _object not in data: return None offset = data.index(_object) return offset_to_cursor(offset)
def cursor_for_object_in_connection(data, _object)
Return the cursor associated with an object in an array.
5.384243
3.68172
1.462426
''' Given an optional cursor and a default offset, returns the offset to use; if the cursor contains a valid offset, that will be used, otherwise it will be the default. ''' if not is_str(cursor): return default_offset offset = cursor_to_offset(cursor) try: return int(of...
def get_offset_with_default(cursor=None, default_offset=0)
Given an optional cursor and a default offset, returns the offset to use; if the cursor contains a valid offset, that will be used, otherwise it will be the default.
3.98127
2.254083
1.766248
edges = [{'source': s, 'target': t} for s, t in data] nodes = force_directed_layout.run(edges, iterations, force_strength, dampening, max_velocity, max_distance, is_3d) return {'edges': edges, 'nodes': nodes}
def generate(data, iterations=1000, force_strength=5.0, dampening=0.01, max_velocity=2.0, max_distance=50, is_3d=True)
Runs a force-directed algorithm on a graph, returning a data structure. Args: data: An adjacency list of tuples (ie. [(1,2),...]) iterations: (Optional) Number of FDL iterations to run in coordinate generation force_strength: (Optional) Strength of Coulomb and Hooke forces ...
2.898435
2.881119
1.00601
return json.dumps(obj, sort_keys=True, separators=(',', ':'), cls=CustomEncoder)
def compress(obj)
Outputs json without whitespace.
3.540116
2.901616
1.22005
return json.dumps(obj, indent=4, sort_keys=True, cls=CustomEncoder)
def dumps(obj)
Outputs json with formatting edits + object handling.
3.234043
2.917868
1.108358
s = super(CustomEncoder, self).encode(obj) # If uncompressed, postprocess for formatting if len(s.splitlines()) > 1: s = self.postprocess(s) return s
def encode(self, obj)
Fired for every object.
6.178361
6.042747
1.022443
is_compressing, is_hash, compressed, spaces = False, False, [], 0 for row in json_string.split('\n'): if is_compressing: if (row[:spaces + 5] == ' ' * (spaces + 4) + ('"' if is_hash else '{')): compressed.append(row.rstrip(...
def postprocess(self, json_string)
Displays each entry on its own line.
4.149051
3.930341
1.055646
# Get a list of node ids from the edge data nodes = set(e['source'] for e in edges) | set(e['target'] for e in edges) # Convert to a data-storing object and initialize some values d = 3 if is_3d else 2 nodes = {n: {'velocity': [0.0] * d, 'force': [0.0] * d} for n in nodes} # Repeat n tim...
def run(edges, iterations=1000, force_strength=5.0, dampening=0.01, max_velocity=2.0, max_distance=50, is_3d=True)
Runs a force-directed-layout algorithm on the input graph. iterations - Number of FDL iterations to run in coordinate generation force_strength - Strength of Coulomb and Hooke forces (edit this to scale the distance between nodes) dampening - Multiplier to reduce force applied to nodes...
3.911408
3.815891
1.025032
# Get relevant positional data delta = [x2 - x1 for x1, x2 in zip(n1['velocity'], n2['velocity'])] distance = sqrt(sum(d ** 2 for d in delta)) # If the deltas are too small, use random values to keep things moving if distance < 0.1: delta = [uniform(0.1, 0.2) for _ in repeat(None, 3)] ...
def _coulomb(n1, n2, k, r)
Calculates Coulomb forces and updates node data.
3.300904
3.084999
1.069985
logger.debug("started") context.clear() logger.info(f"Context wiped. New context size: {len(context)}") logger.debug("done")
def run_step(context)
Wipe the entire context. Args: Context is a dictionary or dictionary-like. Does not require any specific keys in context.
7.736632
7.097061
1.090118
assert context_arg, ("pipeline must be invoked with context arg set. For " "this json parser you're looking for something " "like: " "pypyr pipelinename './myjsonfile.json'") logger.debug("starting") # open the json file on disk...
def get_parsed_context(context_arg)
Parse input context string and returns context as dictionary.
7.504802
7.484313
1.002737
logger.debug("started") context.assert_key_has_value(key='pathCheck', caller=__name__) paths_to_check = context['pathCheck'] if not paths_to_check: raise KeyInContextHasNoValueError("context['pathCheck'] must have a " f"value for {__name__}.") ...
def run_step(context)
pypyr step that checks if a file or directory path exists. Args: context: pypyr.context.Context. Mandatory. The following context key must exist - pathsToCheck. str/path-like or list of str/paths. Path to file on disk to check. All input...
3.674255
2.722688
1.349496
logger.debug("started") context.assert_child_key_has_value('fileWriteJson', 'path', __name__) out_path = context.get_formatted_string(context['fileWriteJson']['path']) # doing it like this to safeguard against accidentally dumping all context # with potentially sensitive values in it to disk i...
def run_step(context)
Write payload out to json file. Args: context: pypyr.context.Context. Mandatory. The following context keys expected: - fileWriteJson - path. mandatory. path-like. Write output file to here. Will create directories in path for you. ...
4.586438
3.717387
1.23378
logger.debug("started") (pipeline_name, use_parent_context, pipe_arg, skip_parse, raise_error, loader, ) = get_arguments(context) try: if use_parent_context: logger.info(f"pyping {pipeline_name}, using parent context.") pipelinerunner.load...
def run_step(context)
Run another pipeline from this step. The parent pipeline is the current, executing pipeline. The invoked, or child pipeline is the pipeline you are calling from this step. Args: context: dictionary-like pypyr.context.Context. context is mandatory. Uses the following context keys i...
4.030962
3.306196
1.219214
context.assert_key_has_value(key='pype', caller=__name__) pype = context.get_formatted('pype') try: pipeline_name = pype['name'] if pipeline_name is None: raise KeyInContextHasNoValueError( "pypyr.steps.pype ['pype']['name'] exists but is empty.") excep...
def get_arguments(context)
Parse arguments for pype from context and assign default values. Args: context: pypyr.context.Context. context is mandatory. Returns: tuple (pipeline_name, #str use_parent_context, #bool pipe_arg, #str skip_parse, #bool raise_error #b...
3.854171
2.327638
1.655829
logger.debug("starting") # look for name.yaml in the pipelines/ sub-directory logger.debug(f"current directory is {working_directory}") # looking for {cwd}/pipelines/[pipeline_name].yaml pipeline_path = os.path.abspath(os.path.join( working_directory, 'pipelines', pipe...
def get_pipeline_path(pipeline_name, working_directory)
Look for the pipeline in the various places it could be. First checks the cwd. Then checks pypyr/pipelines dir. Args: pipeline_name: string. Name of pipeline to find working_directory: string. Path in which to look for pipeline_name.yaml Returns: Absolute path to the pipeline_name...
2.47594
2.286536
1.082835
logger.debug("starting") pipeline_path = get_pipeline_path( pipeline_name=pipeline_name, working_directory=working_dir) logger.debug(f"Trying to open pipeline at path {pipeline_path}") try: with open(pipeline_path) as yaml_file: pipeline_definition = pypyr.yaml...
def get_pipeline_definition(pipeline_name, working_dir)
Open and parse the pipeline definition yaml. Parses pipeline yaml and returns dictionary representing the pipeline. pipeline_name.yaml should be in the working_dir/pipelines/ directory. Args: pipeline_name: string. Name of pipeline. This will be the file-name of the pipelin...
3.545114
3.777203
0.938555
return representer.represent_scalar(cls.yaml_tag, node.value)
def to_yaml(cls, representer, node)
How to serialize this class back to yaml.
3.442521
2.885491
1.193045
if self.value: return expressions.eval_string(self.value, context) else: # Empty input raises cryptic EOF syntax err, this more human # friendly raise ValueError('!py string expression is empty. It must be a ' 'valid p...
def get_value(self, context)
Run python eval on the input string.
23.730183
17.44957
1.359929
logger.debug("starting") # Loop decorators only evaluated once, not for every step repeat # execution. foreach = context.get_formatted_iterable(self.foreach_items) foreach_length = len(foreach) logger.info(f"foreach decorator will loop {foreach_length} times."...
def foreach_loop(self, context)
Run step once for each item in foreach_items. On each iteration, the invoked step can use context['i'] to get the current iterator value. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate.
8.789891
7.262051
1.210387