_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 31
13.1k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q0 | AbstractElement.settext | train | def settext(self, text, cls='current'):
"""Set the text for this element.
Arguments:
text (str): The text
cls (str): The class of the text, defaults to ``current`` (leave this unless you know what you are doing). There may be | python | {
"resource": ""
} |
q1 | AbstractElement.setdocument | train | def setdocument(self, doc):
"""Associate a document with this element.
Arguments:
doc (:class:`Document`): A document
Each element must be associated with a FoLiA document.
"""
assert isinstance(doc, Document)
if not self.doc:
self.doc = doc
if self.id:
if self.id in doc:
| python | {
"resource": ""
} |
q2 | AbstractElement.addable | train | def addable(Class, parent, set=None, raiseexceptions=True):
"""Tests whether a new element of this class can be added to the parent.
This method is mostly for internal use.
This will use the ``OCCURRENCES`` property, but may be overidden by subclasses for more customised behaviour.
Parameters:
parent (:class:`AbstractElement`): The element that is being added to
set (str or None): The set
raiseexceptions (bool): Raise an exception if the element can't be added?
Returns:
bool
Raises:
ValueError
"""
if not parent.__class__.accepts(Class, raiseexceptions, parent):
return False
if Class.OCCURRENCES > 0:
#check if the parent doesn't have too many already
count = parent.count(Class,None,True,[True, AbstractStructureElement]) #never descend into embedded structure annotatioton
| python | {
"resource": ""
} |
q3 | AbstractElement.postappend | train | def postappend(self):
"""This method will be called after an element is added to another and does some checks.
It can do extra checks and if necessary raise exceptions to prevent addition. By default makes sure the right document is associated.
This method is mostly for internal use.
"""
| python | {
"resource": ""
} |
q4 | AbstractElement.updatetext | train | def updatetext(self):
"""Recompute textual value based on the text content of the children. Only supported on elements that are a ``TEXTCONTAINER``"""
if self.TEXTCONTAINER:
| python | {
"resource": ""
} |
q5 | AbstractElement.ancestors | train | def ancestors(self, Class=None):
"""Generator yielding all ancestors of this element, effectively back-tracing its path to the root element. A tuple of multiple classes may be specified.
Arguments:
*Class: The class or classes (:class:`AbstractElement` or subclasses). Not instances!
Yields:
elements (instances derived from :class:`AbstractElement`)
"""
e = self
while e:
if e.parent:
| python | {
"resource": ""
} |
q6 | AbstractElement.ancestor | train | def ancestor(self, *Classes):
"""Find the most immediate ancestor of the specified type, multiple classes may be specified.
Arguments:
*Classes: The possible classes (:class:`AbstractElement` | python | {
"resource": ""
} |
q7 | AbstractElement.json | train | def json(self, attribs=None, recurse=True, ignorelist=False):
"""Serialises the FoLiA element and all its contents to a Python dictionary suitable for serialisation to JSON.
Example::
import json
json.dumps(word.json())
Returns:
dict
"""
jsonnode = {}
jsonnode['type'] = self.XMLTAG
if self.id:
jsonnode['id'] = self.id
if self.set:
jsonnode['set'] = self.set
if self.cls:
jsonnode['class'] = self.cls
if self.annotator:
jsonnode['annotator'] = self.annotator
if self.annotatortype:
if self.annotatortype == AnnotatorType.AUTO:
jsonnode['annotatortype'] = "auto"
elif self.annotatortype == AnnotatorType.MANUAL:
jsonnode['annotatortype'] = "manual"
if self.confidence is not None:
jsonnode['confidence'] = self.confidence
if self.n:
jsonnode['n'] = self.n
if self.auth:
jsonnode['auth'] = self.auth
if self.datetime:
jsonnode['datetime'] = self.datetime.strftime("%Y-%m-%dT%H:%M:%S")
if recurse: #pylint: disable=too-many-nested-blocks
jsonnode['children'] = []
if self.TEXTCONTAINER:
| python | {
"resource": ""
} |
q8 | AbstractElement.xmlstring | train | def xmlstring(self, pretty_print=False):
"""Serialises this FoLiA element and all its contents to XML.
Returns:
str: a string with XML representation for this element and all its children"""
s = ElementTree.tostring(self.xml(), xml_declaration=False, pretty_print=pretty_print, encoding='utf-8')
if sys.version < '3':
if isinstance(s, str):
| python | {
"resource": ""
} |
q9 | AbstractElement.select | train | def select(self, Class, set=None, recursive=True, ignore=True, node=None): #pylint: disable=bad-classmethod-argument,redefined-builtin
"""Select child elements of the specified class.
A further restriction can be made based on set.
Arguments:
Class (class): The class to select; any python class (not instance) subclassed off :class:`AbstractElement`
Set (str): The set to match against, only elements pertaining to this set will be returned. If set to None (default), all elements regardless of set will be returned.
recursive (bool): Select recursively? Descending into child elements? Defaults to ``True``.
ignore: A list of Classes to ignore, if set to ``True`` instead of a list, all non-authoritative elements will be skipped (this is the default behaviour and corresponds to the following elements: :class:`Alternative`, :class:`AlternativeLayer`, :class:`Suggestion`, and :class:`folia.Original`. These elements and those contained within are never *authorative*. You may also include the boolean True as a member of a list, if you want to skip additional tags along the predefined non-authoritative ones.
* ``node``: Reserved for internal usage, used in recursion.
Yields:
Elements (instances derived from :class:`AbstractElement`)
Example::
for sense in text.select(folia.Sense, 'cornetto', True, [folia.Original, folia.Suggestion, folia.Alternative] ):
..
""" | python | {
"resource": ""
} |
q10 | AbstractElement.getmetadata | train | def getmetadata(self, key=None):
"""Get the metadata that applies to this element, automatically inherited from parent elements"""
if self.metadata:
d = self.doc.submetadata[self.metadata]
elif self.parent:
d = self.parent.getmetadata()
elif self.doc: | python | {
"resource": ""
} |
q11 | AbstractElement.getindex | train | def getindex(self, child, recursive=True, ignore=True):
"""Get the index at which an element occurs, recursive by default!
Returns:
int
"""
#breadth first search
for i, c in enumerate(self.data):
if c is child:
return i
if recursive: #pylint: disable=too-many-nested-blocks
for i, c in enumerate(self.data):
if ignore is True:
try:
if not c.auth:
continue
except AttributeError:
#not all elements have auth attribute..
pass
elif ignore: #list
doignore = False
for e in ignore:
if e is True:
try:
if not c.auth:
doignore =True
break
except AttributeError:
| python | {
"resource": ""
} |
q12 | AbstractElement.precedes | train | def precedes(self, other):
"""Returns a boolean indicating whether this element precedes the other element"""
try:
ancestor = next(commonancestors(AbstractElement, self, other))
except StopIteration:
raise Exception("Elements share no common ancestor")
#now we just do a depth first search and see who comes first
def callback(e):
if e is self:
return True
| python | {
"resource": ""
} |
q13 | AbstractElement.depthfirstsearch | train | def depthfirstsearch(self, function):
"""Generic depth first search algorithm using a callback function, continues as long as the callback function returns None"""
result = function(self)
if result is not None:
| python | {
"resource": ""
} |
q14 | AbstractElement.next | train | def next(self, Class=True, scope=True, reverse=False):
"""Returns the next element, if it is of the specified type and if it does not cross the boundary of the defined scope. Returns None if no next element is found. Non-authoritative elements are never returned.
Arguments:
* ``Class``: The class to select; any python class subclassed off `'AbstractElement``, may also be a tuple of multiple classes. Set to ``True`` to constrain to the same class as that of the current instance, set to ``None`` to not constrain at all
* ``scope``: A list of classes which are never crossed looking for a next element. Set to ``True`` to constrain to a default list of structure elements (Sentence,Paragraph,Division,Event, ListItem,Caption), set to ``None`` to not constrain at all.
"""
if Class is True: Class = self.__class__
if scope is True: scope = STRUCTURESCOPE
structural = Class is not None and issubclass(Class,AbstractStructureElement)
if reverse:
order = reversed
descendindex = -1
else:
order = lambda x: x #pylint: disable=redefined-variable-type
descendindex = 0
child = self
parent = self.parent
while parent: #pylint: disable=too-many-nested-blocks
if len(parent) > 1:
returnnext = False
for e in order(parent):
if e is child:
#we found the current item, next item will be the one to return
returnnext = True
elif returnnext and e.auth and not isinstance(e,AbstractAnnotationLayer) and (not structural or (structural and (not isinstance(e,(AbstractTokenAnnotation,TextContent)) ) )):
if structural and isinstance(e,Correction):
if not list(e.select(AbstractStructureElement)): #skip-over non-structural correction
continue
if Class is None or (isinstance(Class,tuple) | python | {
"resource": ""
} |
q15 | AbstractElement.previous | train | def previous(self, Class=True, scope=True):
"""Returns the previous element, if it is of the specified type and if it does not cross the boundary of the defined scope. Returns None if no next element is found. Non-authoritative elements are never returned.
Arguments:
* ``Class``: The class to select; any python class subclassed off `'AbstractElement``. Set to ``True`` to constrain to the same class as that of the current instance, set to ``None`` to not constrain at all
| python | {
"resource": ""
} |
q16 | AbstractElement.remove | train | def remove(self, child):
"""Removes the child element"""
if not isinstance(child, AbstractElement):
raise ValueError("Expected AbstractElement, got " + str(type(child)))
if child.parent == self:
child.parent = None
| python | {
"resource": ""
} |
q17 | AllowTokenAnnotation.hasannotation | train | def hasannotation(self,Class,set=None):
"""Returns an integer indicating whether such as annotation exists, and if so, how many. | python | {
"resource": ""
} |
q18 | AllowTokenAnnotation.annotation | train | def annotation(self, type, set=None):
"""Obtain a single annotation element.
A further restriction can be made based on set.
Arguments:
Class (class): The class to select; any python class (not instance) subclassed off :class:`AbstractElement`
Set (str): The set to match against, only elements pertaining to this set will be returned. If set to None (default), all elements regardless of set will be returned.
Returns:
An element (instance derived from :class:`AbstractElement`)
Example::
sense = word.annotation(folia.Sense, 'http://some/path/cornetto').cls
See also:
| python | {
"resource": ""
} |
q19 | AbstractStructureElement.hasannotationlayer | train | def hasannotationlayer(self, annotationtype=None,set=None):
"""Does the | python | {
"resource": ""
} |
q20 | TextContent.getreference | train | def getreference(self, validate=True):
"""Returns and validates the Text Content's reference. Raises UnresolvableTextContent when invalid"""
if self.offset is None: return None #nothing to test
if self.ref:
ref = self.doc[self.ref]
else:
ref = self.finddefaultreference()
if not ref:
raise UnresolvableTextContent("Default reference for textcontent not found!")
| python | {
"resource": ""
} |
q21 | PhonContent.getreference | train | def getreference(self, validate=True):
"""Return and validate the Phonetic Content's reference. Raises UnresolvableTextContent when invalid"""
if self.offset is None: return None #nothing to test
if self.ref:
ref = self.doc[self.ref]
else:
ref = self.finddefaultreference()
if not ref:
raise UnresolvableTextContent("Default reference for phonetic content not found!")
| python | {
"resource": ""
} |
q22 | Word.findspans | train | def findspans(self, type,set=None):
"""Yields span annotation elements of the specified type that include this word.
Arguments:
type: The annotation type, can be passed as using any of the :class:`AnnotationType` member, or by passing the relevant :class:`AbstractSpanAnnotation` or :class:`AbstractAnnotationLayer` class.
set (str or None): Constrain by set
Example::
for chunk in word.findspans(folia.Chunk):
print(" Chunk class=", chunk.cls, " words=")
for word2 in chunk.wrefs(): #print all words in the chunk (of which the word is a part)
print(word2, end="")
print()
Yields:
Matching span annotation instances (derived from :class:`AbstractSpanAnnotation`)
"""
if issubclass(type, AbstractAnnotationLayer):
layerclass = type
else:
layerclass = ANNOTATIONTYPE2LAYERCLASS[type.ANNOTATIONTYPE]
e = self
while True:
if not e.parent: break
| python | {
"resource": ""
} |
q23 | AbstractSpanAnnotation.setspan | train | def setspan(self, *args):
"""Sets the span of the span element anew, erases all data inside.
Arguments:
*args: Instances of :class:`Word`, :class:`Morpheme` or | python | {
"resource": ""
} |
q24 | AbstractSpanAnnotation._helper_wrefs | train | def _helper_wrefs(self, targets, recurse=True):
"""Internal helper function"""
for c in self:
if isinstance(c,Word) or isinstance(c,Morpheme) or isinstance(c, Phoneme):
targets.append(c)
elif isinstance(c,WordReference):
try:
targets.append(self.doc[c.id]) #try to resolve
except KeyError:
targets.append(c) #add unresolved
elif isinstance(c, AbstractSpanAnnotation) and recurse:
#recursion
c._helper_wrefs(targets) #pylint: disable=protected-access
| python | {
"resource": ""
} |
q25 | AbstractSpanAnnotation.wrefs | train | def wrefs(self, index = None, recurse=True):
"""Returns a list of word references, these can be Words but also Morphemes or Phonemes.
Arguments:
index (int or None): If set to an integer, will retrieve and return the n'th element (starting at 0) instead of returning the list of all
"""
| python | {
"resource": ""
} |
q26 | AbstractSpanAnnotation.copychildren | train | def copychildren(self, newdoc=None, idsuffix=""):
"""Generator creating a deep copy of the children of this element. If idsuffix is a string, if set to True, a random idsuffix will be generated including a random 32-bit hash"""
if idsuffix is True: idsuffix = ".copy." + "%08x" % random.getrandbits(32) #random 32-bit hash for each copy, same one will be reused for all children
| python | {
"resource": ""
} |
q27 | AbstractAnnotationLayer.alternatives | train | def alternatives(self, Class=None, set=None):
"""Generator over alternatives, either all or only of a specific annotation type, and possibly restrained also by set.
Arguments:
* ``Class`` - The Class you want to retrieve (e.g. PosAnnotation). Or set to None to select all alternatives regardless of what type they are.
* ``set`` - The set you want to retrieve (defaults to None, which selects irregardless of set)
Returns:
Generator over Alternative elements
"""
for e in self.select(AlternativeLayers,None, True, ['Original','Suggestion']): #pylint: disable=too-many-nested-blocks
if Class is None:
yield e
elif len(e) >= 1: #child elements?
for e2 in e:
try:
if isinstance(e2, Class): | python | {
"resource": ""
} |
q28 | AbstractAnnotationLayer.findspan | train | def findspan(self, *words):
"""Returns the span element which spans over the specified words or morphemes.
See also:
:meth:`Word.findspans`
"""
| python | {
"resource": ""
} |
q29 | Correction.hasnew | train | def hasnew(self,allowempty=False):
"""Does the correction define new corrected annotations?"""
for e in self.select(New,None,False, False):
| python | {
"resource": ""
} |
q30 | Correction.hasoriginal | train | def hasoriginal(self,allowempty=False):
"""Does the correction record the old annotations prior to correction?"""
for e in self.select(Original,None,False, False):
| python | {
"resource": ""
} |
q31 | Correction.hassuggestions | train | def hassuggestions(self,allowempty=False):
"""Does the correction propose suggestions for correction?"""
for e in self.select(Suggestion,None,False, False):
| python | {
"resource": ""
} |
q32 | Correction.new | train | def new(self,index = None):
"""Get the new corrected annotation.
This returns only one annotation if multiple exist, use `index` to select another in the sequence.
Returns:
an annotation element (:class:`AbstractElement`)
Raises:
:class:`NoSuchAnnotation`
"""
if index is None:
try:
| python | {
"resource": ""
} |
q33 | Correction.original | train | def original(self,index=None):
"""Get the old annotation prior to correction.
This returns only one annotation if multiple exist, use `index` to select another in the sequence.
Returns:
an annotation element (:class:`AbstractElement`)
Raises:
:class:`NoSuchAnnotation`
"""
if index is None:
try:
| python | {
"resource": ""
} |
q34 | Correction.suggestions | train | def suggestions(self,index=None):
"""Get suggestions for correction.
Yields:
:class:`Suggestion` element that encapsulate the suggested annotations (if index is ``None``, default)
Returns:
a :class:`Suggestion` element that encapsulate the suggested | python | {
"resource": ""
} |
q35 | Morpheme.findspans | train | def findspans(self, type,set=None):
"""Find span annotation of the specified type that include this word"""
if issubclass(type, AbstractAnnotationLayer):
layerclass = type
else:
layerclass = ANNOTATIONTYPE2LAYERCLASS[type.ANNOTATIONTYPE]
| python | {
"resource": ""
} |
q36 | Pattern.resolve | train | def resolve(self,size, distribution):
"""Resolve a variable sized pattern to all patterns of a certain fixed size"""
if not self.variablesize():
raise Exception("Can only resize patterns with * wildcards")
nrofwildcards = 0
for x in self.sequence:
if x == '*':
nrofwildcards += 1
assert (len(distribution) == nrofwildcards)
wildcardnr = 0
newsequence = []
for x in self.sequence:
if x == '*':
| python | {
"resource": ""
} |
q37 | Document.load | train | def load(self, filename):
"""Load a FoLiA XML file.
Argument:
filename (str): The file to load
"""
#if LXE and self.mode != Mode.XPATH:
# #workaround for xml:id problem (disabled)
# #f = open(filename)
| python | {
"resource": ""
} |
q38 | Document.items | train | def items(self):
"""Returns a depth-first flat list of all items in the document"""
l = []
| python | {
"resource": ""
} |
q39 | Document.save | train | def save(self, filename=None):
"""Save the document to file.
Arguments:
* filename (str): The filename to save to. If not set (``None``, default), saves to the same file as loaded from.
"""
if not filename:
filename = self.filename
if not filename:
raise Exception("No filename specified")
if filename[-4:].lower() == '.bz2':
f = bz2.BZ2File(filename,'wb')
f.write(self.xmlstring().encode('utf-8'))
f.close()
elif filename[-3:].lower() == '.gz':
| python | {
"resource": ""
} |
q40 | Document.xmldeclarations | train | def xmldeclarations(self):
"""Internal method to generate XML nodes for all declarations"""
l = []
E = ElementMaker(namespace="http://ilk.uvt.nl/folia",nsmap={None: "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"})
for annotationtype, set in self.annotations:
label = None
#Find the 'label' for the declarations dynamically (aka: AnnotationType --> String)
for key, value in vars(AnnotationType).items():
if value == annotationtype:
label = key
break
#gather attribs
if (annotationtype == AnnotationType.TEXT or annotationtype == AnnotationType.PHON) and set == 'undefined' and len(self.annotationdefaults[annotationtype][set]) == 0:
#this is the implicit TextContent declaration, no need to output it explicitly
continue
attribs = {}
if set and set != 'undefined':
attribs['{' + NSFOLIA + '}set'] = set
for key, value in self.annotationdefaults[annotationtype][set].items():
if key == 'annotatortype':
| python | {
"resource": ""
} |
q41 | Document.jsondeclarations | train | def jsondeclarations(self):
"""Return all declarations in a form ready to be serialised to JSON.
Returns:
list of dict
"""
l = []
for annotationtype, set in self.annotations:
label = None
#Find the 'label' for the declarations dynamically (aka: AnnotationType --> String)
for key, value in vars(AnnotationType).items():
if value == annotationtype:
label = key
break
#gather attribs
if (annotationtype == AnnotationType.TEXT or annotationtype == AnnotationType.PHON) and set == 'undefined' and len(self.annotationdefaults[annotationtype][set]) == 0:
#this is the implicit TextContent declaration, no need to output it explicitly
continue
jsonnode = {'annotationtype': label.lower()}
if set and set != 'undefined':
jsonnode['set'] = set
for key, value in self.annotationdefaults[annotationtype][set].items():
if key == 'annotatortype':
| python | {
"resource": ""
} |
q42 | Document.xml | train | def xml(self):
"""Serialise the document to XML.
Returns:
lxml.etree.Element
See also:
:meth:`Document.xmlstring`
"""
self.pendingvalidation()
E = ElementMaker(namespace="http://ilk.uvt.nl/folia",nsmap={'xml' : "http://www.w3.org/XML/1998/namespace", 'xlink':"http://www.w3.org/1999/xlink"})
attribs = {}
attribs['{http://www.w3.org/XML/1998/namespace}id'] = self.id
#if self.version:
# attribs['version'] = self.version
#else:
attribs['version'] = FOLIAVERSION
attribs['generator'] = 'pynlpl.formats.folia-v' + LIBVERSION
metadataattribs = {}
metadataattribs['{' + NSFOLIA + '}type'] = | python | {
"resource": ""
} |
q43 | Document.json | train | def json(self):
"""Serialise the document to a ``dict`` ready for serialisation to JSON.
Example::
import json
jsondoc = json.dumps(doc.json())
"""
self.pendingvalidation()
jsondoc = {'id': self.id, 'children': [], 'declarations': self.jsondeclarations() }
if self.version:
jsondoc['version'] = self.version
| python | {
"resource": ""
} |
q44 | Document.xmlmetadata | train | def xmlmetadata(self):
"""Internal method to serialize metadata to XML"""
E = ElementMaker(namespace="http://ilk.uvt.nl/folia",nsmap={None: "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"})
elements = []
if self.metadatatype == "native":
if isinstance(self.metadata, NativeMetaData):
for key, value in self.metadata.items():
elements.append(E.meta(value,id=key) )
else:
if isinstance(self.metadata, ForeignData):
#in-document
m = self.metadata
while m is not None:
elements.append(m.xml())
m = m.next
for metadata_id, submetadata in self.submetadata.items():
subelements = []
attribs = {
"{http://www.w3.org/XML/1998/namespace}id": metadata_id,
"type": self.submetadatatype[metadata_id] }
if isinstance(submetadata, NativeMetaData):
| python | {
"resource": ""
} |
q45 | Document.declare | train | def declare(self, annotationtype, set, **kwargs):
"""Declare a new annotation type to be used in the document.
Keyword arguments can be used to set defaults for any annotation of this type and set.
Arguments:
annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.
set (str): the set, should formally be a URL pointing to the set definition
Keyword Arguments:
annotator (str): Sets a default annotator
annotatortype: Should be either ``AnnotatorType.MANUAL`` or ``AnnotatorType.AUTO``, indicating whether the annotation was performed manually or by an automated process.
datetime (datetime.datetime): Sets the default datetime
alias (str): Defines alias that may be used in set attribute of elements instead of the full set name
Example::
doc.declare(folia.PosAnnotation, 'http://some/path/brown-tag-set', annotator="mytagger", annotatortype=folia.AnnotatorType.AUTO)
"""
if (sys.version > '3' and not isinstance(set,str)) or (sys.version < '3' and not isinstance(set,(str,unicode))):
raise ValueError("Set parameter for declare() must be a string")
if inspect.isclass(annotationtype):
annotationtype = annotationtype.ANNOTATIONTYPE
if annotationtype in self.alias_set and set in self.alias_set[annotationtype]:
raise ValueError("Set " + set + " conflicts with alias, may not be equal!")
if not (annotationtype, set) in self.annotations:
self.annotations.append( (annotationtype,set) )
if set and self.loadsetdefinitions and not set in self.setdefinitions:
if set[:7] == "http://" or set[:8] == "https://" | python | {
"resource": ""
} |
q46 | Document.defaultset | train | def defaultset(self, annotationtype):
"""Obtain the default set for the specified annotation type.
Arguments:
annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.
Returns:
the set (str)
Raises:
:class:`NoDefaultError` if the annotation type does not exist or if there is ambiguity (multiple sets for the same type)
"""
| python | {
"resource": ""
} |
q47 | Document.defaultannotator | train | def defaultannotator(self, annotationtype, set=None):
"""Obtain the default annotator for the specified annotation type and set.
Arguments:
annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.
set (str): the set, should formally be a URL pointing to the set definition
Returns:
the set (str)
Raises:
:class:`NoDefaultError` if the annotation type does not exist or if there is ambiguity (multiple | python | {
"resource": ""
} |
q48 | Document.parsemetadata | train | def parsemetadata(self, node):
"""Internal method to parse metadata"""
if 'type' in node.attrib:
self.metadatatype = node.attrib['type']
else:
#no type specified, default to native
self.metadatatype = "native"
if 'src' in node.attrib:
self.metadata = ExternalMetaData(node.attrib['src'])
elif self.metadatatype == "native":
self.metadata = NativeMetaData()
else:
self.metadata = None #may be set below to ForeignData
for subnode in node:
if subnode.tag == '{' + NSFOLIA + '}annotations':
self.parsexmldeclarations(subnode)
elif subnode.tag == '{' + NSFOLIA + '}meta':
if self.metadatatype == "native":
if subnode.text:
self.metadata[subnode.attrib['id']] = subnode.text
else:
raise MetaDataError("Encountered a meta element but metadata type is not native!")
elif subnode.tag == '{' + NSFOLIA + '}provenance':
#forward compatibility with FoLiA 2.0; ignore provenance
print("WARNING: Ignoring provenance data. Use foliapy instead of pynlpl.formats.folia for FoLiA v2.0 compatibility!",file=sys.stderr)
pass
elif subnode.tag == '{' + NSFOLIA + '}foreign-data':
if self.metadatatype == "native":
| python | {
"resource": ""
} |
q49 | Document.pendingvalidation | train | def pendingvalidation(self, warnonly=None):
"""Perform any pending validations
Parameters:
warnonly (bool): Warn only (True) or raise exceptions (False). If set to None then this value will be determined based on the document's FoLiA version (Warn only before FoLiA v1.5)
Returns:
bool
"""
if self.debug: print("[PyNLPl FoLiA DEBUG] Processing pending validations (if any)",file=stderr)
if warnonly is None and self and self.version:
warnonly = (checkversion(self.version, '1.5.0') < 0) #warn only for documents older than FoLiA v1.5
if self.textvalidation:
while self.offsetvalidationbuffer:
structureelement, textclass = self.offsetvalidationbuffer.pop()
if self.debug: print("[PyNLPl FoLiA DEBUG] Performing offset validation on " + repr(structureelement) + " textclass " + textclass,file=stderr)
#validate offsets
tc = structureelement.textcontent(textclass)
if tc.offset is not None:
| python | {
"resource": ""
} |
q50 | Document.paragraphs | train | def paragraphs(self, index = None):
"""Return a generator of all paragraphs found in the document.
If an index is specified, return the n'th paragraph only (starting at 0)"""
if index is None:
| python | {
"resource": ""
} |
q51 | Document.sentences | train | def sentences(self, index = None):
"""Return a generator of all sentence found in the document. Except for sentences in quotes.
If an index is specified, return the n'th sentence only (starting at 0)"""
if index is None:
return self.select(Sentence,None,True,[Quote])
| python | {
"resource": ""
} |
q52 | NFA._states | train | def _states(self, state, processedstates=[]): #pylint: disable=dangerous-default-value
"""Iterate over all states in no particular order"""
processedstates.append(state)
for nextstate in state.epsilon:
if not nextstate in processedstates:
self._states(nextstate, | python | {
"resource": ""
} |
q53 | log | train | def log(msg, **kwargs):
"""Generic log method. Will prepend timestamp.
Keyword arguments:
system - Name of the system/module
indent - Integer denoting the desired level of indentation
streams - List of streams to output to
stream - Stream to output to (singleton version of streams)
"""
if 'debug' in kwargs:
if 'currentdebug' in kwargs:
if kwargs['currentdebug'] < kwargs['debug']:
return False
else:
return False #no currentdebug passed, assuming no debug mode and thus skipping message
s = "[" + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "] "
if 'system' in | python | {
"resource": ""
} |
q54 | CornettoClient.get_syn_ids_by_lemma | train | def get_syn_ids_by_lemma(self, lemma):
"""Returns a list of synset IDs based on a lemma"""
if not isinstance(lemma,unicode):
lemma = unicode(lemma,'utf-8')
http, resp, content = self.connect()
params = ""
fragment = ""
path = "cdb_syn"
if self.debug:
printf( "cornettodb/views/query_remote_syn_lemma: db_opt: %s" % path )
query_opt = "dict_search"
if self.debug:
printf( "cornettodb/views/query_remote_syn_lemma: query_opt: %s" % query_opt )
qdict = {}
qdict[ "action" ] = "queryList"
qdict[ "word" ] = lemma.encode('utf-8')
query = urllib.urlencode( qdict )
db_url_tuple = ( self.scheme, self.host + ':' + str(self.port), path, params, query, fragment )
db_url = urlparse.urlunparse( db_url_tuple )
if self.debug:
printf( "db_url: %s" % db_url )
resp, content = http.request( db_url, "GET" )
if self.debug:
printf( "resp:\n%s" % resp )
printf( "content:\n%s" % content )
# printf( "content is of type: %s" % type( content ) )
dict_list = []
dict_list = eval( content ) | python | {
"resource": ""
} |
q55 | CornettoClient.get_synset_xml | train | def get_synset_xml(self,syn_id):
"""
call cdb_syn with synset identifier -> returns the synset xml;
"""
http, resp, content = self.connect()
params = ""
fragment = ""
path = "cdb_syn"
if self.debug:
printf( "cornettodb/views/query_remote_syn_id: db_opt: %s" % path )
# output_opt: plain, html, xml
# 'xml' is actually xhtml (with markup), but it is not valid xml!
# 'plain' is actually valid xml (without markup)
output_opt = "plain"
if self.debug:
printf( "cornettodb/views/query_remote_syn_id: output_opt: %s" % output_opt )
action = "runQuery"
if self.debug:
printf( "cornettodb/views/query_remote_syn_id: action: %s" % action )
printf( "cornettodb/views/query_remote_syn_id: query: %s" % syn_id )
qdict = {}
qdict[ "action" ] = action
qdict[ "query" ] = syn_id
qdict[ "outtype" ] = output_opt
query = urllib.urlencode( qdict )
db_url_tuple = ( self.scheme, self.host + | python | {
"resource": ""
} |
q56 | WSDSystemOutput.senses | train | def senses(self, bestonly=False):
"""Returns a list of all predicted senses"""
l = []
for word_id, senses,distance in self:
for sense, confidence in senses:
if | python | {
"resource": ""
} |
q57 | FrogClient.align | train | def align(self,inputwords, outputwords):
"""For each inputword, provides the index of the outputword"""
alignment = []
cursor = 0
for inputword in inputwords:
if len(outputwords) > cursor and outputwords[cursor] == inputword:
alignment.append(cursor)
cursor | python | {
"resource": ""
} |
q58 | tokenize | train | def tokenize(text, regexps=TOKENIZERRULES):
"""Tokenizes a string and returns a list of tokens
:param text: The text to tokenise
:type text: string
:param regexps: Regular expressions to use as tokeniser rules in tokenisation (default=_pynlpl.textprocessors.TOKENIZERRULES_)
:type regexps: Tuple/list of regular expressions to use in tokenisation
:rtype: Returns a list of tokens
Examples:
>>> for token in tokenize("This is a test."):
... print(token)
This
is
a
test
.
"""
for i,regexp in list(enumerate(regexps)):
if isstring(regexp):
regexps[i] = re.compile(regexp)
tokens = []
begin = 0
for i, c in enumerate(text):
if begin > i:
continue
elif i == begin:
m = False
for regexp in regexps:
m = regexp.findall(text[i:i+300])
if m:
tokens.append(m[0])
begin = i + len(m[0])
break
if m: continue
if c in string.punctuation or c in WHITESPACE:
prev = text[i-1] if i > 0 else ""
next = text[i+1] if i < len(text)-1 else ""
if (c == '.' or c == ',') and prev.isdigit() and next.isdigit():
#punctuation in between numbers, keep as one token
pass
elif (c == "'" or c == "`") and prev.isalpha() and next.isalpha():
#quote in between chars, keep...
pass
| python | {
"resource": ""
} |
q59 | strip_accents | train | def strip_accents(s, encoding= 'utf-8'):
"""Strip characters with diacritics and return a flat ascii representation"""
if sys.version < '3':
if isinstance(s,unicode):
return unicodedata.normalize('NFKD', s).encode('ASCII', 'ignore')
else:
| python | {
"resource": ""
} |
q60 | swap | train | def swap(tokens, maxdist=2):
"""Perform a swap operation on a sequence of tokens, exhaustively swapping all tokens up to the maximum specified distance. This is a subset of all permutations."""
assert maxdist >= 2
tokens = list(tokens)
if maxdist > len(tokens):
maxdist = len(tokens)
l = len(tokens)
for i in range(0,l - 1):
| python | {
"resource": ""
} |
q61 | find_keyword_in_context | train | def find_keyword_in_context(tokens, keyword, contextsize=1):
"""Find a keyword in a particular sequence of tokens, and return the local context. Contextsize is the number of words to the left and right. The keyword may have multiple word, in which case it should to passed as a tuple or | python | {
"resource": ""
} |
q62 | PriorityQueue.randomprune | train | def randomprune(self,n):
"""prune down to n items at random, disregarding their score"""
| python | {
"resource": ""
} |
q63 | Tree.append | train | def append(self, item):
"""Add an item to the Tree"""
if not isinstance(item, Tree):
return ValueError("Can only append items of type Tree")
if | python | {
"resource": ""
} |
q64 | Trie.size | train | def size(self):
"""Size is number of nodes under the trie, including the current node"""
if self.children:
| python | {
"resource": ""
} |
q65 | CorpusDocumentX.validate | train | def validate(self, formats_dir="../formats/"):
"""checks if the document is valid"""
#TODO: download XSD from web
if self.inline:
xmlschema = ElementTree.XMLSchema(ElementTree.parse(StringIO("\n".join(open(formats_dir+"dcoi-dsc.xsd").readlines()))))
xmlschema.assertValid(self.tree)
#return xmlschema.validate(self)
else: | python | {
"resource": ""
} |
q66 | CorpusDocumentX.xpath | train | def xpath(self, expression):
"""Executes an xpath expression using the correct namespaces"""
global namespaces
| python | {
"resource": ""
} |
q67 | Taggerdata.align | train | def align(self, referencewords, datatuple):
"""align the reference sentence with the tagged data"""
targetwords = []
for i, (word,lemma,postag) in enumerate(zip(datatuple[0],datatuple[1],datatuple[2])):
if word:
subwords = word.split("_")
for w in subwords: #split multiword expressions
targetwords.append( (w, lemma, postag, i, len(subwords) > 1 ) ) #word, lemma, pos, index, multiword?
referencewords = [ w.lower() for w in referencewords ] | python | {
"resource": ""
} |
q68 | CustomLexer.build | train | def build(self, **kwargs):
"""Build the lexer."""
| python | {
"resource": ""
} |
q69 | BasicAuth.is_authorized | train | def is_authorized(self, request):
"""Check if the user is authenticated for the given request.
The include_paths and exclude_paths are first checked. If
authentication is required then the Authorization HTTP header is
checked against the credentials.
"""
if self._is_request_in_include_path(request):
if self._is_request_in_exclude_path(request):
return True
else:
auth = request.authorization
| python | {
"resource": ""
} |
q70 | BasicAuth._login | train | def _login(self, environ, start_response):
"""Send a login response back to the client."""
| python | {
"resource": ""
} |
q71 | BasicAuth._is_request_in_include_path | train | def _is_request_in_include_path(self, request):
"""Check if the request path is in the `_include_paths` list.
If no specific include paths are given then we assume that
authentication is required for all paths.
"""
if self._include_paths:
| python | {
"resource": ""
} |
q72 | BasicAuth._is_request_in_exclude_path | train | def _is_request_in_exclude_path(self, request):
"""Check if the request path is in the `_exclude_paths` list"""
if self._exclude_paths:
| python | {
"resource": ""
} |
q73 | bootstrap_prompt | train | def bootstrap_prompt(prompt_kwargs, group):
"""
Bootstrap prompt_toolkit kwargs or use user defined values.
:param prompt_kwargs: The user specified prompt kwargs.
"""
prompt_kwargs = prompt_kwargs or {}
defaults = {
"history": InMemoryHistory(),
"completer": ClickCompleter(group),
| python | {
"resource": ""
} |
q74 | repl | train | def repl( # noqa: C901
old_ctx,
prompt_kwargs=None,
allow_system_commands=True,
allow_internal_commands=True,
):
"""
Start an interactive shell. All subcommands are available in it.
:param old_ctx: The current Click context.
:param prompt_kwargs: Parameters passed to
:py:func:`prompt_toolkit.shortcuts.prompt`.
If stdin is not a TTY, no prompt will be printed, but only commands read
from stdin.
"""
# parent should be available, but we're not going to bother if not
group_ctx = old_ctx.parent or old_ctx
group = group_ctx.command
isatty = sys.stdin.isatty()
# Delete the REPL command from those available, as we don't want to allow
# nesting REPLs (note: pass `None` to `pop` as we don't want to error if
# REPL command already not present for some reason).
repl_command_name = old_ctx.command.name
if isinstance(group_ctx.command, click.CommandCollection):
available_commands = {
cmd_name: cmd_obj
for source in group_ctx.command.sources
for cmd_name, cmd_obj in source.commands.items()
}
else:
available_commands = group_ctx.command.commands
available_commands.pop(repl_command_name, None)
prompt_kwargs = bootstrap_prompt(prompt_kwargs, group)
if isatty:
def get_command():
return prompt(**prompt_kwargs)
else:
get_command = sys.stdin.readline
while True:
try:
command = get_command()
except KeyboardInterrupt:
continue
except EOFError:
break
if not command:
if isatty:
continue
else:
break
if allow_system_commands and dispatch_repl_commands(command):
continue
if | python | {
"resource": ""
} |
q75 | handle_internal_commands | train | def handle_internal_commands(command):
"""Run repl-internal commands.
Repl-internal commands are all commands starting with ":".
"""
if command.startswith(":"):
| python | {
"resource": ""
} |
q76 | node_definitions | train | def node_definitions(id_fetcher, type_resolver=None, id_resolver=None):
'''
Given a function to map from an ID to an underlying object, and a function
to map from an underlying object to the concrete GraphQLObjectType it
corresponds to, constructs a `Node` interface that objects can implement,
and a field config for a `node` root field.
If the type_resolver is omitted, object resolution on the interface will be
handled with the `isTypeOf` method on object types, as with any GraphQL
interface without a provided `resolveType` method.
'''
node_interface = GraphQLInterfaceType(
'Node',
description='An object with an ID',
fields=lambda: OrderedDict((
('id', GraphQLField(
GraphQLNonNull(GraphQLID),
description='The id of the object.',
resolver=id_resolver,
)),
)),
resolve_type=type_resolver | python | {
"resource": ""
} |
q77 | from_global_id | train | def from_global_id(global_id):
'''
Takes the "global ID" created by toGlobalID, and retuns the type name and ID
used to create it.
'''
| python | {
"resource": ""
} |
q78 | global_id_field | train | def global_id_field(type_name, id_fetcher=None):
'''
Creates the configuration for an id field on a node, using `to_global_id` to
construct the ID from the provided typename. The type-specific ID is fetcher
by calling id_fetcher on the object, or if not provided, by accessing the `id`
property on the object.
'''
return GraphQLField(
GraphQLNonNull(GraphQLID), | python | {
"resource": ""
} |
q79 | connection_from_list | train | def connection_from_list(data, args=None, **kwargs):
'''
A simple function that accepts an array and connection arguments, and returns
a connection object for use in GraphQL. It uses array offsets | python | {
"resource": ""
} |
q80 | connection_from_promised_list | train | def connection_from_promised_list(data_promise, args=None, **kwargs):
'''
A version of `connectionFromArray` that takes a promised array, and returns a
promised connection.
| python | {
"resource": ""
} |
q81 | cursor_for_object_in_connection | train | def cursor_for_object_in_connection(data, _object):
'''
Return the cursor associated with an object in an array.
'''
if _object not in data: | python | {
"resource": ""
} |
q82 | get_offset_with_default | train | def get_offset_with_default(cursor=None, default_offset=0):
'''
Given an optional cursor and a default offset, returns the offset
to use; if the cursor contains a valid offset, that will be used,
otherwise it will be the default.
'''
if not is_str(cursor):
| python | {
"resource": ""
} |
q83 | generate | train | def generate(data, iterations=1000, force_strength=5.0, dampening=0.01,
max_velocity=2.0, max_distance=50, is_3d=True):
"""Runs a force-directed algorithm on a graph, returning a data structure.
Args:
data: An adjacency list of tuples (ie. [(1,2),...])
iterations: (Optional) Number of FDL iterations to run in coordinate
generation
force_strength: (Optional) Strength of Coulomb and Hooke forces
(edit this to scale the distance between nodes)
dampening: (Optional) Multiplier to reduce force applied to nodes
max_velocity: (Optional) Maximum distance a node can move in one step
max_distance: (Optional) The maximum inter-node distance considered
is_3d: (Optional) Generates three-dimensional coordinates
Outputs | python | {
"resource": ""
} |
q84 | compress | train | def compress(obj):
"""Outputs json without whitespace."""
return json.dumps(obj, sort_keys=True, | python | {
"resource": ""
} |
q85 | dumps | train | def dumps(obj):
"""Outputs json with formatting edits + object handling."""
| python | {
"resource": ""
} |
q86 | CustomEncoder.encode | train | def encode(self, obj):
"""Fired for every object."""
s = super(CustomEncoder, self).encode(obj)
# If uncompressed, postprocess for formatting
| python | {
"resource": ""
} |
q87 | CustomEncoder.postprocess | train | def postprocess(self, json_string):
"""Displays each entry on its own line."""
is_compressing, is_hash, compressed, spaces = False, False, [], 0
for row in json_string.split('\n'):
if is_compressing:
if (row[:spaces + 5] == ' ' * (spaces + 4) +
('"' if is_hash else '{')):
compressed.append(row.rstrip())
elif (len(row) > spaces and row[:spaces] == ' ' * spaces and
re.match('[\]\}],?', row[spaces:].rstrip())):
compressed.append(row.rstrip())
is_compressing = False
else:
compressed[-1] += ' ' + row.strip()
else:
compressed.append(row.rstrip()) | python | {
"resource": ""
} |
q88 | run | train | def run(edges, iterations=1000, force_strength=5.0, dampening=0.01,
max_velocity=2.0, max_distance=50, is_3d=True):
"""Runs a force-directed-layout algorithm on the input graph.
iterations - Number of FDL iterations to run in coordinate generation
force_strength - Strength of Coulomb and Hooke forces
(edit this to scale the distance between nodes)
dampening - Multiplier to reduce force applied to nodes
max_velocity - Maximum distance a node can move in one step
max_distance - The maximum distance considered for interactions
"""
# Get a list of node ids from the edge data
nodes = set(e['source'] for e in edges) | set(e['target'] for e in edges)
# Convert to a data-storing object and initialize some values
d = 3 if is_3d else 2
nodes = {n: {'velocity': [0.0] * d, 'force': [0.0] * d} for n in nodes}
# Repeat n times (is there a more Pythonic way to do this?)
for _ in repeat(None, iterations):
# Add in Coulomb-esque node-node repulsive forces
for node1, node2 in combinations(nodes.values(), 2):
_coulomb(node1, node2, force_strength, max_distance)
# And Hooke-esque edge spring forces
for edge in edges:
_hooke(nodes[edge['source']], nodes[edge['target']],
force_strength * edge.get('size', 1), max_distance)
| python | {
"resource": ""
} |
q89 | _coulomb | train | def _coulomb(n1, n2, k, r):
"""Calculates Coulomb forces and updates node data."""
# Get relevant positional data
delta = [x2 - x1 for x1, x2 in zip(n1['velocity'], n2['velocity'])]
distance = sqrt(sum(d ** 2 for d in delta))
# If the deltas are too small, use random values to keep things moving
if distance < 0.1:
delta = [uniform(0.1, 0.2) for _ in repeat(None, 3)]
| python | {
"resource": ""
} |
q90 | run_step | train | def run_step(context):
"""Wipe the entire context.
Args:
Context is a dictionary or dictionary-like.
Does not require any specific keys in context.
"""
logger.debug("started")
| python | {
"resource": ""
} |
q91 | run_step | train | def run_step(context):
"""pypyr step that checks if a file or directory path exists.
Args:
context: pypyr.context.Context. Mandatory.
The following context key must exist
- pathsToCheck. str/path-like or list of str/paths.
Path to file on disk to check.
All inputs support formatting expressions. Supports globs.
This step creates pathCheckOut in context, containing the results of the
path check operation.
pathCheckOut:
'inpath':
exists: true # bool. True if path exists.
count: 0 # int. Number of files found for in path.
found: ['path1', 'path2'] # list of strings. Paths of files found.
[count] is 0 if no files found. If you specified a single input
path to check and it exists, it's going to be 1. If you specified multiple
in paths or a glob expression that found more than 1 result, well, take a
guess.
[found] is a list of all the paths found for the [inpath]. If you passed
in a glob or globs, will contain the globs found for [inpath].
This means you can do an existence evaluation like this in a formatting
expression: '{pathCheckOut[inpathhere][exists]}'
Returns:
None. updates context arg.
Raises:
pypyr.errors.KeyNotInContextError: pathExists missing in context.
pypyr.errors.KeyInContextHasNoValueError: pathCheck exists but is None.
"""
logger.debug("started")
context.assert_key_has_value(key='pathCheck', caller=__name__)
paths_to_check = context['pathCheck']
if not paths_to_check:
raise KeyInContextHasNoValueError("context['pathCheck'] must have a "
| python | {
"resource": ""
} |
q92 | run_step | train | def run_step(context):
"""Write payload out to json file.
Args:
context: pypyr.context.Context. Mandatory.
The following context keys expected:
- fileWriteJson
- path. mandatory. path-like. Write output file to
here. Will create directories in path for you.
- payload. optional. Write this key to output file. If not
specified, output entire context.
Returns:
None.
Raises:
pypyr.errors.KeyNotInContextError: fileWriteJson or
fileWriteJson['path'] missing in context.
pypyr.errors.KeyInContextHasNoValueError: fileWriteJson or
fileWriteJson['path'] exists but is None.
"""
logger.debug("started")
context.assert_child_key_has_value('fileWriteJson', 'path', __name__)
out_path = context.get_formatted_string(context['fileWriteJson']['path'])
# doing it like this to safeguard against accidentally dumping all context
# with potentially sensitive values in it to disk if payload exists but is
# None.
| python | {
"resource": ""
} |
q93 | run_step | train | def run_step(context):
"""Run another pipeline from this step.
The parent pipeline is the current, executing pipeline. The invoked, or
child pipeline is the pipeline you are calling from this step.
Args:
context: dictionary-like pypyr.context.Context. context is mandatory.
Uses the following context keys in context:
- pype
- name. mandatory. str. Name of pipeline to execute. This
{name}.yaml must exist in the working directory/pipelines
dir.
- pipeArg. string. optional. String to pass to the
context_parser - the equivalent to context arg on the
pypyr cli. Only used if skipParse==False.
- raiseError. bool. optional. Defaults to True. If False, log,
but swallow any errors that happen during the invoked
pipeline execution. Swallowing means that the current/parent
pipeline will carry on with the next step even if an error
occurs in the invoked pipeline.
- skipParse. bool. optional. Defaults to True. skip the
context_parser on the invoked pipeline.
- useParentContext. optional. bool. Defaults to True. Pass the
current (i.e parent) pipeline context to the invoked (child)
pipeline.
- loader: str. optional. Absolute name of pipeline loader
module. If not specified will use
pypyr.pypeloaders.fileloader.
Returns:
None
Raises:
pypyr.errors.KeyNotInContextError: if ['pype'] or ['pype']['name']
is missing.
pypyr.errors.KeyInContextHasNoValueError: ['pype']['name'] exists but
is empty.
"""
logger.debug("started")
(pipeline_name,
use_parent_context,
pipe_arg,
skip_parse,
raise_error,
loader,
) = get_arguments(context)
| python | {
"resource": ""
} |
q94 | get_arguments | train | def get_arguments(context):
"""Parse arguments for pype from context and assign default values.
Args:
context: pypyr.context.Context. context is mandatory.
Returns:
tuple (pipeline_name, #str
use_parent_context, #bool
pipe_arg, #str
skip_parse, #bool
raise_error #bool
)
Raises:
pypyr.errors.KeyNotInContextError: if ['pype']['name'] is missing.
pypyr.errors.KeyInContextHasNoValueError: if ['pype']['name'] exists but
is None.
"""
context.assert_key_has_value(key='pype', caller=__name__)
pype = context.get_formatted('pype')
try:
pipeline_name = pype['name']
if pipeline_name is None:
raise KeyInContextHasNoValueError(
"pypyr.steps.pype ['pype']['name'] exists but is | python | {
"resource": ""
} |
q95 | get_pipeline_path | train | def get_pipeline_path(pipeline_name, working_directory):
"""Look for the pipeline in the various places it could be.
First checks the cwd. Then checks pypyr/pipelines dir.
Args:
pipeline_name: string. Name of pipeline to find
working_directory: string. Path in which to look for pipeline_name.yaml
Returns:
Absolute path to the pipeline_name.yaml file
Raises:
PipelineNotFoundError: if pipeline_name.yaml not found in working_dir
or in {pypyr install dir}/pipelines.
"""
logger.debug("starting")
# look for name.yaml in the pipelines/ sub-directory
logger.debug(f"current directory is {working_directory}")
# looking for {cwd}/pipelines/[pipeline_name].yaml
pipeline_path = os.path.abspath(os.path.join(
working_directory,
'pipelines',
pipeline_name + '.yaml'))
if os.path.isfile(pipeline_path):
logger.debug(f"Found {pipeline_path}")
else:
logger.debug(f"{pipeline_name} not found in current "
"directory/pipelines folder. Looking in pypyr install "
"directory instead.")
pypyr_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
logger.debug(f"pypyr installation directory is: | python | {
"resource": ""
} |
q96 | get_pipeline_definition | train | def get_pipeline_definition(pipeline_name, working_dir):
"""Open and parse the pipeline definition yaml.
Parses pipeline yaml and returns dictionary representing the pipeline.
pipeline_name.yaml should be in the working_dir/pipelines/ directory.
Args:
pipeline_name: string. Name of pipeline. This will be the file-name of
the pipeline - i.e {pipeline_name}.yaml
working_dir: path. Start looking in
./working_dir/pipelines/pipeline_name.yaml
Returns:
dict describing the pipeline, parsed from the pipeline yaml.
Raises:
| python | {
"resource": ""
} |
q97 | SpecialTagDirective.to_yaml | train | def to_yaml(cls, representer, node):
"""How to serialize this class back to yaml."""
| python | {
"resource": ""
} |
q98 | PyString.get_value | train | def get_value(self, context):
"""Run python eval on the input string."""
if self.value:
return expressions.eval_string(self.value, context)
else:
# Empty input raises cryptic EOF syntax err, this more human
| python | {
"resource": ""
} |
q99 | Step.foreach_loop | train | def foreach_loop(self, context):
"""Run step once for each item in foreach_items.
On each iteration, the invoked step can use context['i'] to get the
current iterator value.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate.
"""
logger.debug("starting")
# Loop decorators only evaluated once, not for every step repeat
# execution.
foreach = context.get_formatted_iterable(self.foreach_items)
foreach_length = len(foreach)
logger.info(f"foreach decorator will loop {foreach_length} times.")
for i in foreach:
logger.info(f"foreach: running step {i}")
| python | {
"resource": ""
} |