Search is not available for this dataset
repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_code_tokens
sequence | func_documentation_string
stringlengths 1
47.2k
| func_documentation_tokens
sequence | split_name
stringclasses 1
value | func_code_url
stringlengths 85
339
| parameters
sequence | question
stringlengths 9
114
| answer
sequence |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
proycon/pynlpl | pynlpl/formats/folia.py | Document.save | def save(self, filename=None):
"""Save the document to file.
Arguments:
* filename (str): The filename to save to. If not set (``None``, default), saves to the same file as loaded from.
"""
if not filename:
filename = self.filename
if not filename:
raise Exception("No filename specified")
if filename[-4:].lower() == '.bz2':
f = bz2.BZ2File(filename,'wb')
f.write(self.xmlstring().encode('utf-8'))
f.close()
elif filename[-3:].lower() == '.gz':
f = gzip.GzipFile(filename,'wb') #pylint: disable=redefined-variable-type
f.write(self.xmlstring().encode('utf-8'))
f.close()
else:
f = io.open(filename,'w',encoding='utf-8')
f.write(self.xmlstring())
f.close() | python | def save(self, filename=None):
"""Save the document to file.
Arguments:
* filename (str): The filename to save to. If not set (``None``, default), saves to the same file as loaded from.
"""
if not filename:
filename = self.filename
if not filename:
raise Exception("No filename specified")
if filename[-4:].lower() == '.bz2':
f = bz2.BZ2File(filename,'wb')
f.write(self.xmlstring().encode('utf-8'))
f.close()
elif filename[-3:].lower() == '.gz':
f = gzip.GzipFile(filename,'wb') #pylint: disable=redefined-variable-type
f.write(self.xmlstring().encode('utf-8'))
f.close()
else:
f = io.open(filename,'w',encoding='utf-8')
f.write(self.xmlstring())
f.close() | [
"def",
"save",
"(",
"self",
",",
"filename",
"=",
"None",
")",
":",
"if",
"not",
"filename",
":",
"filename",
"=",
"self",
".",
"filename",
"if",
"not",
"filename",
":",
"raise",
"Exception",
"(",
"\"No filename specified\"",
")",
"if",
"filename",
"[",
"-",
"4",
":",
"]",
".",
"lower",
"(",
")",
"==",
"'.bz2'",
":",
"f",
"=",
"bz2",
".",
"BZ2File",
"(",
"filename",
",",
"'wb'",
")",
"f",
".",
"write",
"(",
"self",
".",
"xmlstring",
"(",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"f",
".",
"close",
"(",
")",
"elif",
"filename",
"[",
"-",
"3",
":",
"]",
".",
"lower",
"(",
")",
"==",
"'.gz'",
":",
"f",
"=",
"gzip",
".",
"GzipFile",
"(",
"filename",
",",
"'wb'",
")",
"#pylint: disable=redefined-variable-type",
"f",
".",
"write",
"(",
"self",
".",
"xmlstring",
"(",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"f",
".",
"close",
"(",
")",
"else",
":",
"f",
"=",
"io",
".",
"open",
"(",
"filename",
",",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"f",
".",
"write",
"(",
"self",
".",
"xmlstring",
"(",
")",
")",
"f",
".",
"close",
"(",
")"
] | Save the document to file.
Arguments:
* filename (str): The filename to save to. If not set (``None``, default), saves to the same file as loaded from. | [
"Save",
"the",
"document",
"to",
"file",
"."
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L6547-L6568 | [
"filename"
] | What does this function do? | [
"Save",
"the",
"document",
"to",
"file",
"."
] |
proycon/pynlpl | pynlpl/formats/folia.py | Document.append | def append(self,text):
"""Add a text (or speech) to the document:
Example 1::
doc.append(folia.Text)
Example 2::
doc.append( folia.Text(doc, id='example.text') )
Example 3::
doc.append(folia.Speech)
"""
if text is Text:
text = Text(self, id=self.id + '.text.' + str(len(self.data)+1) )
elif text is Speech:
text = Speech(self, id=self.id + '.speech.' + str(len(self.data)+1) ) #pylint: disable=redefined-variable-type
else:
assert isinstance(text, Text) or isinstance(text, Speech)
self.data.append(text)
return text | python | def append(self,text):
"""Add a text (or speech) to the document:
Example 1::
doc.append(folia.Text)
Example 2::
doc.append( folia.Text(doc, id='example.text') )
Example 3::
doc.append(folia.Speech)
"""
if text is Text:
text = Text(self, id=self.id + '.text.' + str(len(self.data)+1) )
elif text is Speech:
text = Speech(self, id=self.id + '.speech.' + str(len(self.data)+1) ) #pylint: disable=redefined-variable-type
else:
assert isinstance(text, Text) or isinstance(text, Speech)
self.data.append(text)
return text | [
"def",
"append",
"(",
"self",
",",
"text",
")",
":",
"if",
"text",
"is",
"Text",
":",
"text",
"=",
"Text",
"(",
"self",
",",
"id",
"=",
"self",
".",
"id",
"+",
"'.text.'",
"+",
"str",
"(",
"len",
"(",
"self",
".",
"data",
")",
"+",
"1",
")",
")",
"elif",
"text",
"is",
"Speech",
":",
"text",
"=",
"Speech",
"(",
"self",
",",
"id",
"=",
"self",
".",
"id",
"+",
"'.speech.'",
"+",
"str",
"(",
"len",
"(",
"self",
".",
"data",
")",
"+",
"1",
")",
")",
"#pylint: disable=redefined-variable-type",
"else",
":",
"assert",
"isinstance",
"(",
"text",
",",
"Text",
")",
"or",
"isinstance",
"(",
"text",
",",
"Speech",
")",
"self",
".",
"data",
".",
"append",
"(",
"text",
")",
"return",
"text"
] | Add a text (or speech) to the document:
Example 1::
doc.append(folia.Text)
Example 2::
doc.append( folia.Text(doc, id='example.text') )
Example 3::
doc.append(folia.Speech) | [
"Add",
"a",
"text",
"(",
"or",
"speech",
")",
"to",
"the",
"document",
":"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L6621-L6643 | [
"text"
] | What does this function do? | [
"Add",
"a",
"text",
"(",
"or",
"speech",
")",
"to",
"the",
"document",
":"
] |
proycon/pynlpl | pynlpl/formats/folia.py | Document.xmldeclarations | def xmldeclarations(self):
"""Internal method to generate XML nodes for all declarations"""
l = []
E = ElementMaker(namespace="http://ilk.uvt.nl/folia",nsmap={None: "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"})
for annotationtype, set in self.annotations:
label = None
#Find the 'label' for the declarations dynamically (aka: AnnotationType --> String)
for key, value in vars(AnnotationType).items():
if value == annotationtype:
label = key
break
#gather attribs
if (annotationtype == AnnotationType.TEXT or annotationtype == AnnotationType.PHON) and set == 'undefined' and len(self.annotationdefaults[annotationtype][set]) == 0:
#this is the implicit TextContent declaration, no need to output it explicitly
continue
attribs = {}
if set and set != 'undefined':
attribs['{' + NSFOLIA + '}set'] = set
for key, value in self.annotationdefaults[annotationtype][set].items():
if key == 'annotatortype':
if value == AnnotatorType.MANUAL:
attribs['{' + NSFOLIA + '}' + key] = 'manual'
elif value == AnnotatorType.AUTO:
attribs['{' + NSFOLIA + '}' + key] = 'auto'
elif key == 'datetime':
attribs['{' + NSFOLIA + '}' + key] = value.strftime("%Y-%m-%dT%H:%M:%S") #proper iso-formatting
elif value:
attribs['{' + NSFOLIA + '}' + key] = value
if label:
l.append( makeelement(E,'{' + NSFOLIA + '}' + label.lower() + '-annotation', **attribs) )
else:
raise Exception("Invalid annotation type")
return l | python | def xmldeclarations(self):
"""Internal method to generate XML nodes for all declarations"""
l = []
E = ElementMaker(namespace="http://ilk.uvt.nl/folia",nsmap={None: "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"})
for annotationtype, set in self.annotations:
label = None
#Find the 'label' for the declarations dynamically (aka: AnnotationType --> String)
for key, value in vars(AnnotationType).items():
if value == annotationtype:
label = key
break
#gather attribs
if (annotationtype == AnnotationType.TEXT or annotationtype == AnnotationType.PHON) and set == 'undefined' and len(self.annotationdefaults[annotationtype][set]) == 0:
#this is the implicit TextContent declaration, no need to output it explicitly
continue
attribs = {}
if set and set != 'undefined':
attribs['{' + NSFOLIA + '}set'] = set
for key, value in self.annotationdefaults[annotationtype][set].items():
if key == 'annotatortype':
if value == AnnotatorType.MANUAL:
attribs['{' + NSFOLIA + '}' + key] = 'manual'
elif value == AnnotatorType.AUTO:
attribs['{' + NSFOLIA + '}' + key] = 'auto'
elif key == 'datetime':
attribs['{' + NSFOLIA + '}' + key] = value.strftime("%Y-%m-%dT%H:%M:%S") #proper iso-formatting
elif value:
attribs['{' + NSFOLIA + '}' + key] = value
if label:
l.append( makeelement(E,'{' + NSFOLIA + '}' + label.lower() + '-annotation', **attribs) )
else:
raise Exception("Invalid annotation type")
return l | [
"def",
"xmldeclarations",
"(",
"self",
")",
":",
"l",
"=",
"[",
"]",
"E",
"=",
"ElementMaker",
"(",
"namespace",
"=",
"\"http://ilk.uvt.nl/folia\"",
",",
"nsmap",
"=",
"{",
"None",
":",
"\"http://ilk.uvt.nl/folia\"",
",",
"'xml'",
":",
"\"http://www.w3.org/XML/1998/namespace\"",
"}",
")",
"for",
"annotationtype",
",",
"set",
"in",
"self",
".",
"annotations",
":",
"label",
"=",
"None",
"#Find the 'label' for the declarations dynamically (aka: AnnotationType --> String)",
"for",
"key",
",",
"value",
"in",
"vars",
"(",
"AnnotationType",
")",
".",
"items",
"(",
")",
":",
"if",
"value",
"==",
"annotationtype",
":",
"label",
"=",
"key",
"break",
"#gather attribs",
"if",
"(",
"annotationtype",
"==",
"AnnotationType",
".",
"TEXT",
"or",
"annotationtype",
"==",
"AnnotationType",
".",
"PHON",
")",
"and",
"set",
"==",
"'undefined'",
"and",
"len",
"(",
"self",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
"[",
"set",
"]",
")",
"==",
"0",
":",
"#this is the implicit TextContent declaration, no need to output it explicitly",
"continue",
"attribs",
"=",
"{",
"}",
"if",
"set",
"and",
"set",
"!=",
"'undefined'",
":",
"attribs",
"[",
"'{'",
"+",
"NSFOLIA",
"+",
"'}set'",
"]",
"=",
"set",
"for",
"key",
",",
"value",
"in",
"self",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
"[",
"set",
"]",
".",
"items",
"(",
")",
":",
"if",
"key",
"==",
"'annotatortype'",
":",
"if",
"value",
"==",
"AnnotatorType",
".",
"MANUAL",
":",
"attribs",
"[",
"'{'",
"+",
"NSFOLIA",
"+",
"'}'",
"+",
"key",
"]",
"=",
"'manual'",
"elif",
"value",
"==",
"AnnotatorType",
".",
"AUTO",
":",
"attribs",
"[",
"'{'",
"+",
"NSFOLIA",
"+",
"'}'",
"+",
"key",
"]",
"=",
"'auto'",
"elif",
"key",
"==",
"'datetime'",
":",
"attribs",
"[",
"'{'",
"+",
"NSFOLIA",
"+",
"'}'",
"+",
"key",
"]",
"=",
"value",
".",
"strftime",
"(",
"\"%Y-%m-%dT%H:%M:%S\"",
")",
"#proper iso-formatting",
"elif",
"value",
":",
"attribs",
"[",
"'{'",
"+",
"NSFOLIA",
"+",
"'}'",
"+",
"key",
"]",
"=",
"value",
"if",
"label",
":",
"l",
".",
"append",
"(",
"makeelement",
"(",
"E",
",",
"'{'",
"+",
"NSFOLIA",
"+",
"'}'",
"+",
"label",
".",
"lower",
"(",
")",
"+",
"'-annotation'",
",",
"*",
"*",
"attribs",
")",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Invalid annotation type\"",
")",
"return",
"l"
] | Internal method to generate XML nodes for all declarations | [
"Internal",
"method",
"to",
"generate",
"XML",
"nodes",
"for",
"all",
"declarations"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L6653-L6690 | [] | What does this function do? | [
"Internal",
"method",
"to",
"generate",
"XML",
"nodes",
"for",
"all",
"declarations"
] |
proycon/pynlpl | pynlpl/formats/folia.py | Document.jsondeclarations | def jsondeclarations(self):
"""Return all declarations in a form ready to be serialised to JSON.
Returns:
list of dict
"""
l = []
for annotationtype, set in self.annotations:
label = None
#Find the 'label' for the declarations dynamically (aka: AnnotationType --> String)
for key, value in vars(AnnotationType).items():
if value == annotationtype:
label = key
break
#gather attribs
if (annotationtype == AnnotationType.TEXT or annotationtype == AnnotationType.PHON) and set == 'undefined' and len(self.annotationdefaults[annotationtype][set]) == 0:
#this is the implicit TextContent declaration, no need to output it explicitly
continue
jsonnode = {'annotationtype': label.lower()}
if set and set != 'undefined':
jsonnode['set'] = set
for key, value in self.annotationdefaults[annotationtype][set].items():
if key == 'annotatortype':
if value == AnnotatorType.MANUAL:
jsonnode[key] = 'manual'
elif value == AnnotatorType.AUTO:
jsonnode[key] = 'auto'
elif key == 'datetime':
jsonnode[key] = value.strftime("%Y-%m-%dT%H:%M:%S") #proper iso-formatting
elif value:
jsonnode[key] = value
if label:
l.append( jsonnode )
else:
raise Exception("Invalid annotation type")
return l | python | def jsondeclarations(self):
"""Return all declarations in a form ready to be serialised to JSON.
Returns:
list of dict
"""
l = []
for annotationtype, set in self.annotations:
label = None
#Find the 'label' for the declarations dynamically (aka: AnnotationType --> String)
for key, value in vars(AnnotationType).items():
if value == annotationtype:
label = key
break
#gather attribs
if (annotationtype == AnnotationType.TEXT or annotationtype == AnnotationType.PHON) and set == 'undefined' and len(self.annotationdefaults[annotationtype][set]) == 0:
#this is the implicit TextContent declaration, no need to output it explicitly
continue
jsonnode = {'annotationtype': label.lower()}
if set and set != 'undefined':
jsonnode['set'] = set
for key, value in self.annotationdefaults[annotationtype][set].items():
if key == 'annotatortype':
if value == AnnotatorType.MANUAL:
jsonnode[key] = 'manual'
elif value == AnnotatorType.AUTO:
jsonnode[key] = 'auto'
elif key == 'datetime':
jsonnode[key] = value.strftime("%Y-%m-%dT%H:%M:%S") #proper iso-formatting
elif value:
jsonnode[key] = value
if label:
l.append( jsonnode )
else:
raise Exception("Invalid annotation type")
return l | [
"def",
"jsondeclarations",
"(",
"self",
")",
":",
"l",
"=",
"[",
"]",
"for",
"annotationtype",
",",
"set",
"in",
"self",
".",
"annotations",
":",
"label",
"=",
"None",
"#Find the 'label' for the declarations dynamically (aka: AnnotationType --> String)",
"for",
"key",
",",
"value",
"in",
"vars",
"(",
"AnnotationType",
")",
".",
"items",
"(",
")",
":",
"if",
"value",
"==",
"annotationtype",
":",
"label",
"=",
"key",
"break",
"#gather attribs",
"if",
"(",
"annotationtype",
"==",
"AnnotationType",
".",
"TEXT",
"or",
"annotationtype",
"==",
"AnnotationType",
".",
"PHON",
")",
"and",
"set",
"==",
"'undefined'",
"and",
"len",
"(",
"self",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
"[",
"set",
"]",
")",
"==",
"0",
":",
"#this is the implicit TextContent declaration, no need to output it explicitly",
"continue",
"jsonnode",
"=",
"{",
"'annotationtype'",
":",
"label",
".",
"lower",
"(",
")",
"}",
"if",
"set",
"and",
"set",
"!=",
"'undefined'",
":",
"jsonnode",
"[",
"'set'",
"]",
"=",
"set",
"for",
"key",
",",
"value",
"in",
"self",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
"[",
"set",
"]",
".",
"items",
"(",
")",
":",
"if",
"key",
"==",
"'annotatortype'",
":",
"if",
"value",
"==",
"AnnotatorType",
".",
"MANUAL",
":",
"jsonnode",
"[",
"key",
"]",
"=",
"'manual'",
"elif",
"value",
"==",
"AnnotatorType",
".",
"AUTO",
":",
"jsonnode",
"[",
"key",
"]",
"=",
"'auto'",
"elif",
"key",
"==",
"'datetime'",
":",
"jsonnode",
"[",
"key",
"]",
"=",
"value",
".",
"strftime",
"(",
"\"%Y-%m-%dT%H:%M:%S\"",
")",
"#proper iso-formatting",
"elif",
"value",
":",
"jsonnode",
"[",
"key",
"]",
"=",
"value",
"if",
"label",
":",
"l",
".",
"append",
"(",
"jsonnode",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Invalid annotation type\"",
")",
"return",
"l"
] | Return all declarations in a form ready to be serialised to JSON.
Returns:
list of dict | [
"Return",
"all",
"declarations",
"in",
"a",
"form",
"ready",
"to",
"be",
"serialised",
"to",
"JSON",
"."
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L6692-L6731 | [] | What does this function do? | [
"Return",
"all",
"declarations",
"in",
"a",
"form",
"ready",
"to",
"be",
"serialised",
"to",
"JSON",
"."
] |
proycon/pynlpl | pynlpl/formats/folia.py | Document.xml | def xml(self):
"""Serialise the document to XML.
Returns:
lxml.etree.Element
See also:
:meth:`Document.xmlstring`
"""
self.pendingvalidation()
E = ElementMaker(namespace="http://ilk.uvt.nl/folia",nsmap={'xml' : "http://www.w3.org/XML/1998/namespace", 'xlink':"http://www.w3.org/1999/xlink"})
attribs = {}
attribs['{http://www.w3.org/XML/1998/namespace}id'] = self.id
#if self.version:
# attribs['version'] = self.version
#else:
attribs['version'] = FOLIAVERSION
attribs['generator'] = 'pynlpl.formats.folia-v' + LIBVERSION
metadataattribs = {}
metadataattribs['{' + NSFOLIA + '}type'] = self.metadatatype
if isinstance(self.metadata, ExternalMetaData):
metadataattribs['{' + NSFOLIA + '}src'] = self.metadata.url
e = E.FoLiA(
E.metadata(
E.annotations(
*self.xmldeclarations()
),
*self.xmlmetadata(),
**metadataattribs
)
, **attribs)
for text in self.data:
e.append(text.xml())
return e | python | def xml(self):
"""Serialise the document to XML.
Returns:
lxml.etree.Element
See also:
:meth:`Document.xmlstring`
"""
self.pendingvalidation()
E = ElementMaker(namespace="http://ilk.uvt.nl/folia",nsmap={'xml' : "http://www.w3.org/XML/1998/namespace", 'xlink':"http://www.w3.org/1999/xlink"})
attribs = {}
attribs['{http://www.w3.org/XML/1998/namespace}id'] = self.id
#if self.version:
# attribs['version'] = self.version
#else:
attribs['version'] = FOLIAVERSION
attribs['generator'] = 'pynlpl.formats.folia-v' + LIBVERSION
metadataattribs = {}
metadataattribs['{' + NSFOLIA + '}type'] = self.metadatatype
if isinstance(self.metadata, ExternalMetaData):
metadataattribs['{' + NSFOLIA + '}src'] = self.metadata.url
e = E.FoLiA(
E.metadata(
E.annotations(
*self.xmldeclarations()
),
*self.xmlmetadata(),
**metadataattribs
)
, **attribs)
for text in self.data:
e.append(text.xml())
return e | [
"def",
"xml",
"(",
"self",
")",
":",
"self",
".",
"pendingvalidation",
"(",
")",
"E",
"=",
"ElementMaker",
"(",
"namespace",
"=",
"\"http://ilk.uvt.nl/folia\"",
",",
"nsmap",
"=",
"{",
"'xml'",
":",
"\"http://www.w3.org/XML/1998/namespace\"",
",",
"'xlink'",
":",
"\"http://www.w3.org/1999/xlink\"",
"}",
")",
"attribs",
"=",
"{",
"}",
"attribs",
"[",
"'{http://www.w3.org/XML/1998/namespace}id'",
"]",
"=",
"self",
".",
"id",
"#if self.version:",
"# attribs['version'] = self.version",
"#else:",
"attribs",
"[",
"'version'",
"]",
"=",
"FOLIAVERSION",
"attribs",
"[",
"'generator'",
"]",
"=",
"'pynlpl.formats.folia-v'",
"+",
"LIBVERSION",
"metadataattribs",
"=",
"{",
"}",
"metadataattribs",
"[",
"'{'",
"+",
"NSFOLIA",
"+",
"'}type'",
"]",
"=",
"self",
".",
"metadatatype",
"if",
"isinstance",
"(",
"self",
".",
"metadata",
",",
"ExternalMetaData",
")",
":",
"metadataattribs",
"[",
"'{'",
"+",
"NSFOLIA",
"+",
"'}src'",
"]",
"=",
"self",
".",
"metadata",
".",
"url",
"e",
"=",
"E",
".",
"FoLiA",
"(",
"E",
".",
"metadata",
"(",
"E",
".",
"annotations",
"(",
"*",
"self",
".",
"xmldeclarations",
"(",
")",
")",
",",
"*",
"self",
".",
"xmlmetadata",
"(",
")",
",",
"*",
"*",
"metadataattribs",
")",
",",
"*",
"*",
"attribs",
")",
"for",
"text",
"in",
"self",
".",
"data",
":",
"e",
".",
"append",
"(",
"text",
".",
"xml",
"(",
")",
")",
"return",
"e"
] | Serialise the document to XML.
Returns:
lxml.etree.Element
See also:
:meth:`Document.xmlstring` | [
"Serialise",
"the",
"document",
"to",
"XML",
"."
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L6733-L6773 | [] | What does this function do? | [
"Serialise",
"the",
"document",
"to",
"XML",
"."
] |
proycon/pynlpl | pynlpl/formats/folia.py | Document.json | def json(self):
"""Serialise the document to a ``dict`` ready for serialisation to JSON.
Example::
import json
jsondoc = json.dumps(doc.json())
"""
self.pendingvalidation()
jsondoc = {'id': self.id, 'children': [], 'declarations': self.jsondeclarations() }
if self.version:
jsondoc['version'] = self.version
else:
jsondoc['version'] = FOLIAVERSION
jsondoc['generator'] = 'pynlpl.formats.folia-v' + LIBVERSION
for text in self.data:
jsondoc['children'].append(text.json())
return jsondoc | python | def json(self):
"""Serialise the document to a ``dict`` ready for serialisation to JSON.
Example::
import json
jsondoc = json.dumps(doc.json())
"""
self.pendingvalidation()
jsondoc = {'id': self.id, 'children': [], 'declarations': self.jsondeclarations() }
if self.version:
jsondoc['version'] = self.version
else:
jsondoc['version'] = FOLIAVERSION
jsondoc['generator'] = 'pynlpl.formats.folia-v' + LIBVERSION
for text in self.data:
jsondoc['children'].append(text.json())
return jsondoc | [
"def",
"json",
"(",
"self",
")",
":",
"self",
".",
"pendingvalidation",
"(",
")",
"jsondoc",
"=",
"{",
"'id'",
":",
"self",
".",
"id",
",",
"'children'",
":",
"[",
"]",
",",
"'declarations'",
":",
"self",
".",
"jsondeclarations",
"(",
")",
"}",
"if",
"self",
".",
"version",
":",
"jsondoc",
"[",
"'version'",
"]",
"=",
"self",
".",
"version",
"else",
":",
"jsondoc",
"[",
"'version'",
"]",
"=",
"FOLIAVERSION",
"jsondoc",
"[",
"'generator'",
"]",
"=",
"'pynlpl.formats.folia-v'",
"+",
"LIBVERSION",
"for",
"text",
"in",
"self",
".",
"data",
":",
"jsondoc",
"[",
"'children'",
"]",
".",
"append",
"(",
"text",
".",
"json",
"(",
")",
")",
"return",
"jsondoc"
] | Serialise the document to a ``dict`` ready for serialisation to JSON.
Example::
import json
jsondoc = json.dumps(doc.json()) | [
"Serialise",
"the",
"document",
"to",
"a",
"dict",
"ready",
"for",
"serialisation",
"to",
"JSON",
"."
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L6775-L6794 | [] | What does this function do? | [
"Serialise",
"the",
"document",
"to",
"a",
"dict",
"ready",
"for",
"serialisation",
"to",
"JSON",
"."
] |
proycon/pynlpl | pynlpl/formats/folia.py | Document.xmlmetadata | def xmlmetadata(self):
"""Internal method to serialize metadata to XML"""
E = ElementMaker(namespace="http://ilk.uvt.nl/folia",nsmap={None: "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"})
elements = []
if self.metadatatype == "native":
if isinstance(self.metadata, NativeMetaData):
for key, value in self.metadata.items():
elements.append(E.meta(value,id=key) )
else:
if isinstance(self.metadata, ForeignData):
#in-document
m = self.metadata
while m is not None:
elements.append(m.xml())
m = m.next
for metadata_id, submetadata in self.submetadata.items():
subelements = []
attribs = {
"{http://www.w3.org/XML/1998/namespace}id": metadata_id,
"type": self.submetadatatype[metadata_id] }
if isinstance(submetadata, NativeMetaData):
for key, value in submetadata.items():
subelements.append(E.meta(value,id=key) )
elif isinstance(submetadata, ExternalMetaData):
attribs['src'] = submetadata.url
elif isinstance(submetadata, ForeignData):
#in-document
m = submetadata
while m is not None:
subelements.append(m.xml())
m = m.next
elements.append( E.submetadata(*subelements, **attribs))
return elements | python | def xmlmetadata(self):
"""Internal method to serialize metadata to XML"""
E = ElementMaker(namespace="http://ilk.uvt.nl/folia",nsmap={None: "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"})
elements = []
if self.metadatatype == "native":
if isinstance(self.metadata, NativeMetaData):
for key, value in self.metadata.items():
elements.append(E.meta(value,id=key) )
else:
if isinstance(self.metadata, ForeignData):
#in-document
m = self.metadata
while m is not None:
elements.append(m.xml())
m = m.next
for metadata_id, submetadata in self.submetadata.items():
subelements = []
attribs = {
"{http://www.w3.org/XML/1998/namespace}id": metadata_id,
"type": self.submetadatatype[metadata_id] }
if isinstance(submetadata, NativeMetaData):
for key, value in submetadata.items():
subelements.append(E.meta(value,id=key) )
elif isinstance(submetadata, ExternalMetaData):
attribs['src'] = submetadata.url
elif isinstance(submetadata, ForeignData):
#in-document
m = submetadata
while m is not None:
subelements.append(m.xml())
m = m.next
elements.append( E.submetadata(*subelements, **attribs))
return elements | [
"def",
"xmlmetadata",
"(",
"self",
")",
":",
"E",
"=",
"ElementMaker",
"(",
"namespace",
"=",
"\"http://ilk.uvt.nl/folia\"",
",",
"nsmap",
"=",
"{",
"None",
":",
"\"http://ilk.uvt.nl/folia\"",
",",
"'xml'",
":",
"\"http://www.w3.org/XML/1998/namespace\"",
"}",
")",
"elements",
"=",
"[",
"]",
"if",
"self",
".",
"metadatatype",
"==",
"\"native\"",
":",
"if",
"isinstance",
"(",
"self",
".",
"metadata",
",",
"NativeMetaData",
")",
":",
"for",
"key",
",",
"value",
"in",
"self",
".",
"metadata",
".",
"items",
"(",
")",
":",
"elements",
".",
"append",
"(",
"E",
".",
"meta",
"(",
"value",
",",
"id",
"=",
"key",
")",
")",
"else",
":",
"if",
"isinstance",
"(",
"self",
".",
"metadata",
",",
"ForeignData",
")",
":",
"#in-document",
"m",
"=",
"self",
".",
"metadata",
"while",
"m",
"is",
"not",
"None",
":",
"elements",
".",
"append",
"(",
"m",
".",
"xml",
"(",
")",
")",
"m",
"=",
"m",
".",
"next",
"for",
"metadata_id",
",",
"submetadata",
"in",
"self",
".",
"submetadata",
".",
"items",
"(",
")",
":",
"subelements",
"=",
"[",
"]",
"attribs",
"=",
"{",
"\"{http://www.w3.org/XML/1998/namespace}id\"",
":",
"metadata_id",
",",
"\"type\"",
":",
"self",
".",
"submetadatatype",
"[",
"metadata_id",
"]",
"}",
"if",
"isinstance",
"(",
"submetadata",
",",
"NativeMetaData",
")",
":",
"for",
"key",
",",
"value",
"in",
"submetadata",
".",
"items",
"(",
")",
":",
"subelements",
".",
"append",
"(",
"E",
".",
"meta",
"(",
"value",
",",
"id",
"=",
"key",
")",
")",
"elif",
"isinstance",
"(",
"submetadata",
",",
"ExternalMetaData",
")",
":",
"attribs",
"[",
"'src'",
"]",
"=",
"submetadata",
".",
"url",
"elif",
"isinstance",
"(",
"submetadata",
",",
"ForeignData",
")",
":",
"#in-document",
"m",
"=",
"submetadata",
"while",
"m",
"is",
"not",
"None",
":",
"subelements",
".",
"append",
"(",
"m",
".",
"xml",
"(",
")",
")",
"m",
"=",
"m",
".",
"next",
"elements",
".",
"append",
"(",
"E",
".",
"submetadata",
"(",
"*",
"subelements",
",",
"*",
"*",
"attribs",
")",
")",
"return",
"elements"
] | Internal method to serialize metadata to XML | [
"Internal",
"method",
"to",
"serialize",
"metadata",
"to",
"XML"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L6796-L6828 | [] | What does this function do? | [
"Internal",
"method",
"to",
"serialize",
"metadata",
"to",
"XML"
] |
proycon/pynlpl | pynlpl/formats/folia.py | Document.parsexmldeclarations | def parsexmldeclarations(self, node):
"""Internal method to parse XML declarations"""
if self.debug >= 1:
print("[PyNLPl FoLiA DEBUG] Processing Annotation Declarations",file=stderr)
self.declareprocessed = True
for subnode in node: #pylint: disable=too-many-nested-blocks
if not isinstance(subnode.tag, str): continue
if subnode.tag[:25] == '{' + NSFOLIA + '}' and subnode.tag[-11:] == '-annotation':
prefix = subnode.tag[25:][:-11]
type = None
if prefix.upper() in vars(AnnotationType):
type = vars(AnnotationType)[prefix.upper()]
else:
raise Exception("Unknown declaration: " + subnode.tag)
if 'set' in subnode.attrib and subnode.attrib['set']:
set = subnode.attrib['set']
else:
set = 'undefined'
if (type,set) in self.annotations:
if type == AnnotationType.TEXT:
#explicit Text declaration, remove the implicit declaration:
a = []
for t,s in self.annotations:
if not (t == AnnotationType.TEXT and s == 'undefined'):
a.append( (t,s) )
self.annotations = a
#raise ValueError("Double declaration of " + subnode.tag + ", set '" + set + "' + is already declared") //doubles are okay says Ko
else:
self.annotations.append( (type, set) )
#Load set definition
if set and self.loadsetdefinitions and set not in self.setdefinitions:
if set[:7] == "http://" or set[:8] == "https://" or set[:6] == "ftp://":
try:
self.setdefinitions[set] = SetDefinition(set,verbose=self.verbose) #will raise exception on error
except DeepValidationError:
print("WARNING: Set " + set + " could not be downloaded, ignoring!",file=sys.stderr) #warning and ignore
#Set defaults
if type in self.annotationdefaults and set in self.annotationdefaults[type]:
#handle duplicate. If ambiguous: remove defaults
if 'annotator' in subnode.attrib:
if not ('annotator' in self.annotationdefaults[type][set]):
self.annotationdefaults[type][set]['annotator'] = subnode.attrib['annotator']
elif self.annotationdefaults[type][set]['annotator'] != subnode.attrib['annotator']:
del self.annotationdefaults[type][set]['annotator']
if 'annotatortype' in subnode.attrib:
if not ('annotatortype' in self.annotationdefaults[type][set]):
self.annotationdefaults[type][set]['annotatortype'] = subnode.attrib['annotatortype']
elif self.annotationdefaults[type][set]['annotatortype'] != subnode.attrib['annotatortype']:
del self.annotationdefaults[type][set]['annotatortype']
else:
defaults = {}
if 'annotator' in subnode.attrib:
defaults['annotator'] = subnode.attrib['annotator']
if 'annotatortype' in subnode.attrib:
if subnode.attrib['annotatortype'] == 'auto':
defaults['annotatortype'] = AnnotatorType.AUTO
else:
defaults['annotatortype'] = AnnotatorType.MANUAL
if 'datetime' in subnode.attrib:
if isinstance(subnode.attrib['datetime'], datetime):
defaults['datetime'] = subnode.attrib['datetime']
else:
defaults['datetime'] = parse_datetime(subnode.attrib['datetime'])
if not type in self.annotationdefaults:
self.annotationdefaults[type] = {}
self.annotationdefaults[type][set] = defaults
if 'external' in subnode.attrib and subnode.attrib['external']:
if self.debug >= 1:
print("[PyNLPl FoLiA DEBUG] Loading external document: " + subnode.attrib['external'],file=stderr)
if not type in self.standoffdocs:
self.standoffdocs[type] = {}
self.standoffdocs[type][set] = {}
#check if it is already loaded, if multiple references are made to the same doc we reuse the instance
standoffdoc = None
for t in self.standoffdocs:
for s in self.standoffdocs[t]:
for source in self.standoffdocs[t][s]:
if source == subnode.attrib['external']:
standoffdoc = self.standoffdocs[t][s]
break
if standoffdoc: break
if standoffdoc: break
if not standoffdoc:
if subnode.attrib['external'][:7] == 'http://' or subnode.attrib['external'][:8] == 'https://':
#document is remote, download (in memory)
try:
f = urlopen(subnode.attrib['external'])
except:
raise DeepValidationError("Unable to download standoff document: " + subnode.attrib['external'])
try:
content = u(f.read())
except IOError:
raise DeepValidationError("Unable to download standoff document: " + subnode.attrib['external'])
f.close()
standoffdoc = Document(string=content, parentdoc=self, setdefinitions=self.setdefinitions)
elif os.path.exists(subnode.attrib['external']):
#document is on disk:
standoffdoc = Document(file=subnode.attrib['external'], parentdoc=self, setdefinitions=self.setdefinitions)
else:
#document not found
raise DeepValidationError("Unable to find standoff document: " + subnode.attrib['external'])
self.standoffdocs[type][set][subnode.attrib['external']] = standoffdoc
standoffdoc.parentdoc = self
if self.debug >= 1:
print("[PyNLPl FoLiA DEBUG] Found declared annotation " + subnode.tag + ". Defaults: " + repr(defaults),file=stderr) | python | def parsexmldeclarations(self, node):
"""Internal method to parse XML declarations"""
if self.debug >= 1:
print("[PyNLPl FoLiA DEBUG] Processing Annotation Declarations",file=stderr)
self.declareprocessed = True
for subnode in node: #pylint: disable=too-many-nested-blocks
if not isinstance(subnode.tag, str): continue
if subnode.tag[:25] == '{' + NSFOLIA + '}' and subnode.tag[-11:] == '-annotation':
prefix = subnode.tag[25:][:-11]
type = None
if prefix.upper() in vars(AnnotationType):
type = vars(AnnotationType)[prefix.upper()]
else:
raise Exception("Unknown declaration: " + subnode.tag)
if 'set' in subnode.attrib and subnode.attrib['set']:
set = subnode.attrib['set']
else:
set = 'undefined'
if (type,set) in self.annotations:
if type == AnnotationType.TEXT:
#explicit Text declaration, remove the implicit declaration:
a = []
for t,s in self.annotations:
if not (t == AnnotationType.TEXT and s == 'undefined'):
a.append( (t,s) )
self.annotations = a
#raise ValueError("Double declaration of " + subnode.tag + ", set '" + set + "' + is already declared") //doubles are okay says Ko
else:
self.annotations.append( (type, set) )
#Load set definition
if set and self.loadsetdefinitions and set not in self.setdefinitions:
if set[:7] == "http://" or set[:8] == "https://" or set[:6] == "ftp://":
try:
self.setdefinitions[set] = SetDefinition(set,verbose=self.verbose) #will raise exception on error
except DeepValidationError:
print("WARNING: Set " + set + " could not be downloaded, ignoring!",file=sys.stderr) #warning and ignore
#Set defaults
if type in self.annotationdefaults and set in self.annotationdefaults[type]:
#handle duplicate. If ambiguous: remove defaults
if 'annotator' in subnode.attrib:
if not ('annotator' in self.annotationdefaults[type][set]):
self.annotationdefaults[type][set]['annotator'] = subnode.attrib['annotator']
elif self.annotationdefaults[type][set]['annotator'] != subnode.attrib['annotator']:
del self.annotationdefaults[type][set]['annotator']
if 'annotatortype' in subnode.attrib:
if not ('annotatortype' in self.annotationdefaults[type][set]):
self.annotationdefaults[type][set]['annotatortype'] = subnode.attrib['annotatortype']
elif self.annotationdefaults[type][set]['annotatortype'] != subnode.attrib['annotatortype']:
del self.annotationdefaults[type][set]['annotatortype']
else:
defaults = {}
if 'annotator' in subnode.attrib:
defaults['annotator'] = subnode.attrib['annotator']
if 'annotatortype' in subnode.attrib:
if subnode.attrib['annotatortype'] == 'auto':
defaults['annotatortype'] = AnnotatorType.AUTO
else:
defaults['annotatortype'] = AnnotatorType.MANUAL
if 'datetime' in subnode.attrib:
if isinstance(subnode.attrib['datetime'], datetime):
defaults['datetime'] = subnode.attrib['datetime']
else:
defaults['datetime'] = parse_datetime(subnode.attrib['datetime'])
if not type in self.annotationdefaults:
self.annotationdefaults[type] = {}
self.annotationdefaults[type][set] = defaults
if 'external' in subnode.attrib and subnode.attrib['external']:
if self.debug >= 1:
print("[PyNLPl FoLiA DEBUG] Loading external document: " + subnode.attrib['external'],file=stderr)
if not type in self.standoffdocs:
self.standoffdocs[type] = {}
self.standoffdocs[type][set] = {}
#check if it is already loaded, if multiple references are made to the same doc we reuse the instance
standoffdoc = None
for t in self.standoffdocs:
for s in self.standoffdocs[t]:
for source in self.standoffdocs[t][s]:
if source == subnode.attrib['external']:
standoffdoc = self.standoffdocs[t][s]
break
if standoffdoc: break
if standoffdoc: break
if not standoffdoc:
if subnode.attrib['external'][:7] == 'http://' or subnode.attrib['external'][:8] == 'https://':
#document is remote, download (in memory)
try:
f = urlopen(subnode.attrib['external'])
except:
raise DeepValidationError("Unable to download standoff document: " + subnode.attrib['external'])
try:
content = u(f.read())
except IOError:
raise DeepValidationError("Unable to download standoff document: " + subnode.attrib['external'])
f.close()
standoffdoc = Document(string=content, parentdoc=self, setdefinitions=self.setdefinitions)
elif os.path.exists(subnode.attrib['external']):
#document is on disk:
standoffdoc = Document(file=subnode.attrib['external'], parentdoc=self, setdefinitions=self.setdefinitions)
else:
#document not found
raise DeepValidationError("Unable to find standoff document: " + subnode.attrib['external'])
self.standoffdocs[type][set][subnode.attrib['external']] = standoffdoc
standoffdoc.parentdoc = self
if self.debug >= 1:
print("[PyNLPl FoLiA DEBUG] Found declared annotation " + subnode.tag + ". Defaults: " + repr(defaults),file=stderr) | [
"def",
"parsexmldeclarations",
"(",
"self",
",",
"node",
")",
":",
"if",
"self",
".",
"debug",
">=",
"1",
":",
"print",
"(",
"\"[PyNLPl FoLiA DEBUG] Processing Annotation Declarations\"",
",",
"file",
"=",
"stderr",
")",
"self",
".",
"declareprocessed",
"=",
"True",
"for",
"subnode",
"in",
"node",
":",
"#pylint: disable=too-many-nested-blocks",
"if",
"not",
"isinstance",
"(",
"subnode",
".",
"tag",
",",
"str",
")",
":",
"continue",
"if",
"subnode",
".",
"tag",
"[",
":",
"25",
"]",
"==",
"'{'",
"+",
"NSFOLIA",
"+",
"'}'",
"and",
"subnode",
".",
"tag",
"[",
"-",
"11",
":",
"]",
"==",
"'-annotation'",
":",
"prefix",
"=",
"subnode",
".",
"tag",
"[",
"25",
":",
"]",
"[",
":",
"-",
"11",
"]",
"type",
"=",
"None",
"if",
"prefix",
".",
"upper",
"(",
")",
"in",
"vars",
"(",
"AnnotationType",
")",
":",
"type",
"=",
"vars",
"(",
"AnnotationType",
")",
"[",
"prefix",
".",
"upper",
"(",
")",
"]",
"else",
":",
"raise",
"Exception",
"(",
"\"Unknown declaration: \"",
"+",
"subnode",
".",
"tag",
")",
"if",
"'set'",
"in",
"subnode",
".",
"attrib",
"and",
"subnode",
".",
"attrib",
"[",
"'set'",
"]",
":",
"set",
"=",
"subnode",
".",
"attrib",
"[",
"'set'",
"]",
"else",
":",
"set",
"=",
"'undefined'",
"if",
"(",
"type",
",",
"set",
")",
"in",
"self",
".",
"annotations",
":",
"if",
"type",
"==",
"AnnotationType",
".",
"TEXT",
":",
"#explicit Text declaration, remove the implicit declaration:",
"a",
"=",
"[",
"]",
"for",
"t",
",",
"s",
"in",
"self",
".",
"annotations",
":",
"if",
"not",
"(",
"t",
"==",
"AnnotationType",
".",
"TEXT",
"and",
"s",
"==",
"'undefined'",
")",
":",
"a",
".",
"append",
"(",
"(",
"t",
",",
"s",
")",
")",
"self",
".",
"annotations",
"=",
"a",
"#raise ValueError(\"Double declaration of \" + subnode.tag + \", set '\" + set + \"' + is already declared\") //doubles are okay says Ko",
"else",
":",
"self",
".",
"annotations",
".",
"append",
"(",
"(",
"type",
",",
"set",
")",
")",
"#Load set definition",
"if",
"set",
"and",
"self",
".",
"loadsetdefinitions",
"and",
"set",
"not",
"in",
"self",
".",
"setdefinitions",
":",
"if",
"set",
"[",
":",
"7",
"]",
"==",
"\"http://\"",
"or",
"set",
"[",
":",
"8",
"]",
"==",
"\"https://\"",
"or",
"set",
"[",
":",
"6",
"]",
"==",
"\"ftp://\"",
":",
"try",
":",
"self",
".",
"setdefinitions",
"[",
"set",
"]",
"=",
"SetDefinition",
"(",
"set",
",",
"verbose",
"=",
"self",
".",
"verbose",
")",
"#will raise exception on error",
"except",
"DeepValidationError",
":",
"print",
"(",
"\"WARNING: Set \"",
"+",
"set",
"+",
"\" could not be downloaded, ignoring!\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"#warning and ignore",
"#Set defaults",
"if",
"type",
"in",
"self",
".",
"annotationdefaults",
"and",
"set",
"in",
"self",
".",
"annotationdefaults",
"[",
"type",
"]",
":",
"#handle duplicate. If ambiguous: remove defaults",
"if",
"'annotator'",
"in",
"subnode",
".",
"attrib",
":",
"if",
"not",
"(",
"'annotator'",
"in",
"self",
".",
"annotationdefaults",
"[",
"type",
"]",
"[",
"set",
"]",
")",
":",
"self",
".",
"annotationdefaults",
"[",
"type",
"]",
"[",
"set",
"]",
"[",
"'annotator'",
"]",
"=",
"subnode",
".",
"attrib",
"[",
"'annotator'",
"]",
"elif",
"self",
".",
"annotationdefaults",
"[",
"type",
"]",
"[",
"set",
"]",
"[",
"'annotator'",
"]",
"!=",
"subnode",
".",
"attrib",
"[",
"'annotator'",
"]",
":",
"del",
"self",
".",
"annotationdefaults",
"[",
"type",
"]",
"[",
"set",
"]",
"[",
"'annotator'",
"]",
"if",
"'annotatortype'",
"in",
"subnode",
".",
"attrib",
":",
"if",
"not",
"(",
"'annotatortype'",
"in",
"self",
".",
"annotationdefaults",
"[",
"type",
"]",
"[",
"set",
"]",
")",
":",
"self",
".",
"annotationdefaults",
"[",
"type",
"]",
"[",
"set",
"]",
"[",
"'annotatortype'",
"]",
"=",
"subnode",
".",
"attrib",
"[",
"'annotatortype'",
"]",
"elif",
"self",
".",
"annotationdefaults",
"[",
"type",
"]",
"[",
"set",
"]",
"[",
"'annotatortype'",
"]",
"!=",
"subnode",
".",
"attrib",
"[",
"'annotatortype'",
"]",
":",
"del",
"self",
".",
"annotationdefaults",
"[",
"type",
"]",
"[",
"set",
"]",
"[",
"'annotatortype'",
"]",
"else",
":",
"defaults",
"=",
"{",
"}",
"if",
"'annotator'",
"in",
"subnode",
".",
"attrib",
":",
"defaults",
"[",
"'annotator'",
"]",
"=",
"subnode",
".",
"attrib",
"[",
"'annotator'",
"]",
"if",
"'annotatortype'",
"in",
"subnode",
".",
"attrib",
":",
"if",
"subnode",
".",
"attrib",
"[",
"'annotatortype'",
"]",
"==",
"'auto'",
":",
"defaults",
"[",
"'annotatortype'",
"]",
"=",
"AnnotatorType",
".",
"AUTO",
"else",
":",
"defaults",
"[",
"'annotatortype'",
"]",
"=",
"AnnotatorType",
".",
"MANUAL",
"if",
"'datetime'",
"in",
"subnode",
".",
"attrib",
":",
"if",
"isinstance",
"(",
"subnode",
".",
"attrib",
"[",
"'datetime'",
"]",
",",
"datetime",
")",
":",
"defaults",
"[",
"'datetime'",
"]",
"=",
"subnode",
".",
"attrib",
"[",
"'datetime'",
"]",
"else",
":",
"defaults",
"[",
"'datetime'",
"]",
"=",
"parse_datetime",
"(",
"subnode",
".",
"attrib",
"[",
"'datetime'",
"]",
")",
"if",
"not",
"type",
"in",
"self",
".",
"annotationdefaults",
":",
"self",
".",
"annotationdefaults",
"[",
"type",
"]",
"=",
"{",
"}",
"self",
".",
"annotationdefaults",
"[",
"type",
"]",
"[",
"set",
"]",
"=",
"defaults",
"if",
"'external'",
"in",
"subnode",
".",
"attrib",
"and",
"subnode",
".",
"attrib",
"[",
"'external'",
"]",
":",
"if",
"self",
".",
"debug",
">=",
"1",
":",
"print",
"(",
"\"[PyNLPl FoLiA DEBUG] Loading external document: \"",
"+",
"subnode",
".",
"attrib",
"[",
"'external'",
"]",
",",
"file",
"=",
"stderr",
")",
"if",
"not",
"type",
"in",
"self",
".",
"standoffdocs",
":",
"self",
".",
"standoffdocs",
"[",
"type",
"]",
"=",
"{",
"}",
"self",
".",
"standoffdocs",
"[",
"type",
"]",
"[",
"set",
"]",
"=",
"{",
"}",
"#check if it is already loaded, if multiple references are made to the same doc we reuse the instance",
"standoffdoc",
"=",
"None",
"for",
"t",
"in",
"self",
".",
"standoffdocs",
":",
"for",
"s",
"in",
"self",
".",
"standoffdocs",
"[",
"t",
"]",
":",
"for",
"source",
"in",
"self",
".",
"standoffdocs",
"[",
"t",
"]",
"[",
"s",
"]",
":",
"if",
"source",
"==",
"subnode",
".",
"attrib",
"[",
"'external'",
"]",
":",
"standoffdoc",
"=",
"self",
".",
"standoffdocs",
"[",
"t",
"]",
"[",
"s",
"]",
"break",
"if",
"standoffdoc",
":",
"break",
"if",
"standoffdoc",
":",
"break",
"if",
"not",
"standoffdoc",
":",
"if",
"subnode",
".",
"attrib",
"[",
"'external'",
"]",
"[",
":",
"7",
"]",
"==",
"'http://'",
"or",
"subnode",
".",
"attrib",
"[",
"'external'",
"]",
"[",
":",
"8",
"]",
"==",
"'https://'",
":",
"#document is remote, download (in memory)",
"try",
":",
"f",
"=",
"urlopen",
"(",
"subnode",
".",
"attrib",
"[",
"'external'",
"]",
")",
"except",
":",
"raise",
"DeepValidationError",
"(",
"\"Unable to download standoff document: \"",
"+",
"subnode",
".",
"attrib",
"[",
"'external'",
"]",
")",
"try",
":",
"content",
"=",
"u",
"(",
"f",
".",
"read",
"(",
")",
")",
"except",
"IOError",
":",
"raise",
"DeepValidationError",
"(",
"\"Unable to download standoff document: \"",
"+",
"subnode",
".",
"attrib",
"[",
"'external'",
"]",
")",
"f",
".",
"close",
"(",
")",
"standoffdoc",
"=",
"Document",
"(",
"string",
"=",
"content",
",",
"parentdoc",
"=",
"self",
",",
"setdefinitions",
"=",
"self",
".",
"setdefinitions",
")",
"elif",
"os",
".",
"path",
".",
"exists",
"(",
"subnode",
".",
"attrib",
"[",
"'external'",
"]",
")",
":",
"#document is on disk:",
"standoffdoc",
"=",
"Document",
"(",
"file",
"=",
"subnode",
".",
"attrib",
"[",
"'external'",
"]",
",",
"parentdoc",
"=",
"self",
",",
"setdefinitions",
"=",
"self",
".",
"setdefinitions",
")",
"else",
":",
"#document not found",
"raise",
"DeepValidationError",
"(",
"\"Unable to find standoff document: \"",
"+",
"subnode",
".",
"attrib",
"[",
"'external'",
"]",
")",
"self",
".",
"standoffdocs",
"[",
"type",
"]",
"[",
"set",
"]",
"[",
"subnode",
".",
"attrib",
"[",
"'external'",
"]",
"]",
"=",
"standoffdoc",
"standoffdoc",
".",
"parentdoc",
"=",
"self",
"if",
"self",
".",
"debug",
">=",
"1",
":",
"print",
"(",
"\"[PyNLPl FoLiA DEBUG] Found declared annotation \"",
"+",
"subnode",
".",
"tag",
"+",
"\". Defaults: \"",
"+",
"repr",
"(",
"defaults",
")",
",",
"file",
"=",
"stderr",
")"
] | Internal method to parse XML declarations | [
"Internal",
"method",
"to",
"parse",
"XML",
"declarations"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L6833-L6948 | [
"node"
] | What does this function do? | [
"Internal",
"method",
"to",
"parse",
"XML",
"declarations"
] |
proycon/pynlpl | pynlpl/formats/folia.py | Document.declare | def declare(self, annotationtype, set, **kwargs):
"""Declare a new annotation type to be used in the document.
Keyword arguments can be used to set defaults for any annotation of this type and set.
Arguments:
annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.
set (str): the set, should formally be a URL pointing to the set definition
Keyword Arguments:
annotator (str): Sets a default annotator
annotatortype: Should be either ``AnnotatorType.MANUAL`` or ``AnnotatorType.AUTO``, indicating whether the annotation was performed manually or by an automated process.
datetime (datetime.datetime): Sets the default datetime
alias (str): Defines alias that may be used in set attribute of elements instead of the full set name
Example::
doc.declare(folia.PosAnnotation, 'http://some/path/brown-tag-set', annotator="mytagger", annotatortype=folia.AnnotatorType.AUTO)
"""
if (sys.version > '3' and not isinstance(set,str)) or (sys.version < '3' and not isinstance(set,(str,unicode))):
raise ValueError("Set parameter for declare() must be a string")
if inspect.isclass(annotationtype):
annotationtype = annotationtype.ANNOTATIONTYPE
if annotationtype in self.alias_set and set in self.alias_set[annotationtype]:
raise ValueError("Set " + set + " conflicts with alias, may not be equal!")
if not (annotationtype, set) in self.annotations:
self.annotations.append( (annotationtype,set) )
if set and self.loadsetdefinitions and not set in self.setdefinitions:
if set[:7] == "http://" or set[:8] == "https://" or set[:6] == "ftp://":
self.setdefinitions[set] = SetDefinition(set,verbose=self.verbose) #will raise exception on error
if not annotationtype in self.annotationdefaults:
self.annotationdefaults[annotationtype] = {}
self.annotationdefaults[annotationtype][set] = kwargs
if 'alias' in kwargs:
if annotationtype in self.set_alias and set in self.set_alias[annotationtype] and self.set_alias[annotationtype][set] != kwargs['alias']:
raise ValueError("Redeclaring set " + set + " with another alias ('"+kwargs['alias']+"') is not allowed!")
if annotationtype in self.alias_set and kwargs['alias'] in self.alias_set[annotationtype] and self.alias_set[annotationtype][kwargs['alias']] != set:
raise ValueError("Redeclaring alias " + kwargs['alias'] + " with another set ('"+set+"') is not allowed!")
if annotationtype in self.set_alias and kwargs['alias'] in self.set_alias[annotationtype]:
raise ValueError("Alias " + kwargs['alias'] + " conflicts with set name, may not be equal!")
if annotationtype not in self.alias_set:
self.alias_set[annotationtype] = {}
if annotationtype not in self.set_alias:
self.set_alias[annotationtype] = {}
self.alias_set[annotationtype][kwargs['alias']] = set
self.set_alias[annotationtype][set] = kwargs['alias'] | python | def declare(self, annotationtype, set, **kwargs):
"""Declare a new annotation type to be used in the document.
Keyword arguments can be used to set defaults for any annotation of this type and set.
Arguments:
annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.
set (str): the set, should formally be a URL pointing to the set definition
Keyword Arguments:
annotator (str): Sets a default annotator
annotatortype: Should be either ``AnnotatorType.MANUAL`` or ``AnnotatorType.AUTO``, indicating whether the annotation was performed manually or by an automated process.
datetime (datetime.datetime): Sets the default datetime
alias (str): Defines alias that may be used in set attribute of elements instead of the full set name
Example::
doc.declare(folia.PosAnnotation, 'http://some/path/brown-tag-set', annotator="mytagger", annotatortype=folia.AnnotatorType.AUTO)
"""
if (sys.version > '3' and not isinstance(set,str)) or (sys.version < '3' and not isinstance(set,(str,unicode))):
raise ValueError("Set parameter for declare() must be a string")
if inspect.isclass(annotationtype):
annotationtype = annotationtype.ANNOTATIONTYPE
if annotationtype in self.alias_set and set in self.alias_set[annotationtype]:
raise ValueError("Set " + set + " conflicts with alias, may not be equal!")
if not (annotationtype, set) in self.annotations:
self.annotations.append( (annotationtype,set) )
if set and self.loadsetdefinitions and not set in self.setdefinitions:
if set[:7] == "http://" or set[:8] == "https://" or set[:6] == "ftp://":
self.setdefinitions[set] = SetDefinition(set,verbose=self.verbose) #will raise exception on error
if not annotationtype in self.annotationdefaults:
self.annotationdefaults[annotationtype] = {}
self.annotationdefaults[annotationtype][set] = kwargs
if 'alias' in kwargs:
if annotationtype in self.set_alias and set in self.set_alias[annotationtype] and self.set_alias[annotationtype][set] != kwargs['alias']:
raise ValueError("Redeclaring set " + set + " with another alias ('"+kwargs['alias']+"') is not allowed!")
if annotationtype in self.alias_set and kwargs['alias'] in self.alias_set[annotationtype] and self.alias_set[annotationtype][kwargs['alias']] != set:
raise ValueError("Redeclaring alias " + kwargs['alias'] + " with another set ('"+set+"') is not allowed!")
if annotationtype in self.set_alias and kwargs['alias'] in self.set_alias[annotationtype]:
raise ValueError("Alias " + kwargs['alias'] + " conflicts with set name, may not be equal!")
if annotationtype not in self.alias_set:
self.alias_set[annotationtype] = {}
if annotationtype not in self.set_alias:
self.set_alias[annotationtype] = {}
self.alias_set[annotationtype][kwargs['alias']] = set
self.set_alias[annotationtype][set] = kwargs['alias'] | [
"def",
"declare",
"(",
"self",
",",
"annotationtype",
",",
"set",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"(",
"sys",
".",
"version",
">",
"'3'",
"and",
"not",
"isinstance",
"(",
"set",
",",
"str",
")",
")",
"or",
"(",
"sys",
".",
"version",
"<",
"'3'",
"and",
"not",
"isinstance",
"(",
"set",
",",
"(",
"str",
",",
"unicode",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Set parameter for declare() must be a string\"",
")",
"if",
"inspect",
".",
"isclass",
"(",
"annotationtype",
")",
":",
"annotationtype",
"=",
"annotationtype",
".",
"ANNOTATIONTYPE",
"if",
"annotationtype",
"in",
"self",
".",
"alias_set",
"and",
"set",
"in",
"self",
".",
"alias_set",
"[",
"annotationtype",
"]",
":",
"raise",
"ValueError",
"(",
"\"Set \"",
"+",
"set",
"+",
"\" conflicts with alias, may not be equal!\"",
")",
"if",
"not",
"(",
"annotationtype",
",",
"set",
")",
"in",
"self",
".",
"annotations",
":",
"self",
".",
"annotations",
".",
"append",
"(",
"(",
"annotationtype",
",",
"set",
")",
")",
"if",
"set",
"and",
"self",
".",
"loadsetdefinitions",
"and",
"not",
"set",
"in",
"self",
".",
"setdefinitions",
":",
"if",
"set",
"[",
":",
"7",
"]",
"==",
"\"http://\"",
"or",
"set",
"[",
":",
"8",
"]",
"==",
"\"https://\"",
"or",
"set",
"[",
":",
"6",
"]",
"==",
"\"ftp://\"",
":",
"self",
".",
"setdefinitions",
"[",
"set",
"]",
"=",
"SetDefinition",
"(",
"set",
",",
"verbose",
"=",
"self",
".",
"verbose",
")",
"#will raise exception on error",
"if",
"not",
"annotationtype",
"in",
"self",
".",
"annotationdefaults",
":",
"self",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
"=",
"{",
"}",
"self",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
"[",
"set",
"]",
"=",
"kwargs",
"if",
"'alias'",
"in",
"kwargs",
":",
"if",
"annotationtype",
"in",
"self",
".",
"set_alias",
"and",
"set",
"in",
"self",
".",
"set_alias",
"[",
"annotationtype",
"]",
"and",
"self",
".",
"set_alias",
"[",
"annotationtype",
"]",
"[",
"set",
"]",
"!=",
"kwargs",
"[",
"'alias'",
"]",
":",
"raise",
"ValueError",
"(",
"\"Redeclaring set \"",
"+",
"set",
"+",
"\" with another alias ('\"",
"+",
"kwargs",
"[",
"'alias'",
"]",
"+",
"\"') is not allowed!\"",
")",
"if",
"annotationtype",
"in",
"self",
".",
"alias_set",
"and",
"kwargs",
"[",
"'alias'",
"]",
"in",
"self",
".",
"alias_set",
"[",
"annotationtype",
"]",
"and",
"self",
".",
"alias_set",
"[",
"annotationtype",
"]",
"[",
"kwargs",
"[",
"'alias'",
"]",
"]",
"!=",
"set",
":",
"raise",
"ValueError",
"(",
"\"Redeclaring alias \"",
"+",
"kwargs",
"[",
"'alias'",
"]",
"+",
"\" with another set ('\"",
"+",
"set",
"+",
"\"') is not allowed!\"",
")",
"if",
"annotationtype",
"in",
"self",
".",
"set_alias",
"and",
"kwargs",
"[",
"'alias'",
"]",
"in",
"self",
".",
"set_alias",
"[",
"annotationtype",
"]",
":",
"raise",
"ValueError",
"(",
"\"Alias \"",
"+",
"kwargs",
"[",
"'alias'",
"]",
"+",
"\" conflicts with set name, may not be equal!\"",
")",
"if",
"annotationtype",
"not",
"in",
"self",
".",
"alias_set",
":",
"self",
".",
"alias_set",
"[",
"annotationtype",
"]",
"=",
"{",
"}",
"if",
"annotationtype",
"not",
"in",
"self",
".",
"set_alias",
":",
"self",
".",
"set_alias",
"[",
"annotationtype",
"]",
"=",
"{",
"}",
"self",
".",
"alias_set",
"[",
"annotationtype",
"]",
"[",
"kwargs",
"[",
"'alias'",
"]",
"]",
"=",
"set",
"self",
".",
"set_alias",
"[",
"annotationtype",
"]",
"[",
"set",
"]",
"=",
"kwargs",
"[",
"'alias'",
"]"
] | Declare a new annotation type to be used in the document.
Keyword arguments can be used to set defaults for any annotation of this type and set.
Arguments:
annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.
set (str): the set, should formally be a URL pointing to the set definition
Keyword Arguments:
annotator (str): Sets a default annotator
annotatortype: Should be either ``AnnotatorType.MANUAL`` or ``AnnotatorType.AUTO``, indicating whether the annotation was performed manually or by an automated process.
datetime (datetime.datetime): Sets the default datetime
alias (str): Defines alias that may be used in set attribute of elements instead of the full set name
Example::
doc.declare(folia.PosAnnotation, 'http://some/path/brown-tag-set', annotator="mytagger", annotatortype=folia.AnnotatorType.AUTO) | [
"Declare",
"a",
"new",
"annotation",
"type",
"to",
"be",
"used",
"in",
"the",
"document",
"."
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L6972-L7018 | [
"annotationtype",
"set",
"kwargs"
] | What does this function do? | [
"Declare",
"a",
"new",
"annotation",
"type",
"to",
"be",
"used",
"in",
"the",
"document",
"."
] |
proycon/pynlpl | pynlpl/formats/folia.py | Document.declared | def declared(self, annotationtype, set):
"""Checks if the annotation type is present (i.e. declared) in the document.
Arguments:
annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.
set (str): the set, should formally be a URL pointing to the set definition (aliases are also supported)
Example::
if doc.declared(folia.PosAnnotation, 'http://some/path/brown-tag-set'):
..
Returns:
bool
"""
if inspect.isclass(annotationtype): annotationtype = annotationtype.ANNOTATIONTYPE
return ( (annotationtype,set) in self.annotations) or (set in self.alias_set and self.alias_set[set] and (annotationtype, self.alias_set[set]) in self.annotations ) | python | def declared(self, annotationtype, set):
"""Checks if the annotation type is present (i.e. declared) in the document.
Arguments:
annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.
set (str): the set, should formally be a URL pointing to the set definition (aliases are also supported)
Example::
if doc.declared(folia.PosAnnotation, 'http://some/path/brown-tag-set'):
..
Returns:
bool
"""
if inspect.isclass(annotationtype): annotationtype = annotationtype.ANNOTATIONTYPE
return ( (annotationtype,set) in self.annotations) or (set in self.alias_set and self.alias_set[set] and (annotationtype, self.alias_set[set]) in self.annotations ) | [
"def",
"declared",
"(",
"self",
",",
"annotationtype",
",",
"set",
")",
":",
"if",
"inspect",
".",
"isclass",
"(",
"annotationtype",
")",
":",
"annotationtype",
"=",
"annotationtype",
".",
"ANNOTATIONTYPE",
"return",
"(",
"(",
"annotationtype",
",",
"set",
")",
"in",
"self",
".",
"annotations",
")",
"or",
"(",
"set",
"in",
"self",
".",
"alias_set",
"and",
"self",
".",
"alias_set",
"[",
"set",
"]",
"and",
"(",
"annotationtype",
",",
"self",
".",
"alias_set",
"[",
"set",
"]",
")",
"in",
"self",
".",
"annotations",
")"
] | Checks if the annotation type is present (i.e. declared) in the document.
Arguments:
annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.
set (str): the set, should formally be a URL pointing to the set definition (aliases are also supported)
Example::
if doc.declared(folia.PosAnnotation, 'http://some/path/brown-tag-set'):
..
Returns:
bool | [
"Checks",
"if",
"the",
"annotation",
"type",
"is",
"present",
"(",
"i",
".",
"e",
".",
"declared",
")",
"in",
"the",
"document",
"."
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L7020-L7036 | [
"annotationtype",
"set"
] | What does this function do? | [
"Checks",
"if",
"the",
"annotation",
"type",
"is",
"present",
"(",
"i",
".",
"e",
".",
"declared",
")",
"in",
"the",
"document",
"."
] |
proycon/pynlpl | pynlpl/formats/folia.py | Document.defaultset | def defaultset(self, annotationtype):
"""Obtain the default set for the specified annotation type.
Arguments:
annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.
Returns:
the set (str)
Raises:
:class:`NoDefaultError` if the annotation type does not exist or if there is ambiguity (multiple sets for the same type)
"""
if inspect.isclass(annotationtype) or isinstance(annotationtype,AbstractElement): annotationtype = annotationtype.ANNOTATIONTYPE
try:
return list(self.annotationdefaults[annotationtype].keys())[0]
except KeyError:
raise NoDefaultError
except IndexError:
raise NoDefaultError | python | def defaultset(self, annotationtype):
"""Obtain the default set for the specified annotation type.
Arguments:
annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.
Returns:
the set (str)
Raises:
:class:`NoDefaultError` if the annotation type does not exist or if there is ambiguity (multiple sets for the same type)
"""
if inspect.isclass(annotationtype) or isinstance(annotationtype,AbstractElement): annotationtype = annotationtype.ANNOTATIONTYPE
try:
return list(self.annotationdefaults[annotationtype].keys())[0]
except KeyError:
raise NoDefaultError
except IndexError:
raise NoDefaultError | [
"def",
"defaultset",
"(",
"self",
",",
"annotationtype",
")",
":",
"if",
"inspect",
".",
"isclass",
"(",
"annotationtype",
")",
"or",
"isinstance",
"(",
"annotationtype",
",",
"AbstractElement",
")",
":",
"annotationtype",
"=",
"annotationtype",
".",
"ANNOTATIONTYPE",
"try",
":",
"return",
"list",
"(",
"self",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"except",
"KeyError",
":",
"raise",
"NoDefaultError",
"except",
"IndexError",
":",
"raise",
"NoDefaultError"
] | Obtain the default set for the specified annotation type.
Arguments:
annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.
Returns:
the set (str)
Raises:
:class:`NoDefaultError` if the annotation type does not exist or if there is ambiguity (multiple sets for the same type) | [
"Obtain",
"the",
"default",
"set",
"for",
"the",
"specified",
"annotation",
"type",
"."
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L7039-L7058 | [
"annotationtype"
] | What does this function do? | [
"Obtain",
"the",
"default",
"set",
"for",
"the",
"specified",
"annotation",
"type",
"."
] |
proycon/pynlpl | pynlpl/formats/folia.py | Document.defaultannotator | def defaultannotator(self, annotationtype, set=None):
"""Obtain the default annotator for the specified annotation type and set.
Arguments:
annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.
set (str): the set, should formally be a URL pointing to the set definition
Returns:
the set (str)
Raises:
:class:`NoDefaultError` if the annotation type does not exist or if there is ambiguity (multiple sets for the same type)
"""
if inspect.isclass(annotationtype) or isinstance(annotationtype,AbstractElement): annotationtype = annotationtype.ANNOTATIONTYPE
if not set: set = self.defaultset(annotationtype)
try:
return self.annotationdefaults[annotationtype][set]['annotator']
except KeyError:
raise NoDefaultError | python | def defaultannotator(self, annotationtype, set=None):
"""Obtain the default annotator for the specified annotation type and set.
Arguments:
annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.
set (str): the set, should formally be a URL pointing to the set definition
Returns:
the set (str)
Raises:
:class:`NoDefaultError` if the annotation type does not exist or if there is ambiguity (multiple sets for the same type)
"""
if inspect.isclass(annotationtype) or isinstance(annotationtype,AbstractElement): annotationtype = annotationtype.ANNOTATIONTYPE
if not set: set = self.defaultset(annotationtype)
try:
return self.annotationdefaults[annotationtype][set]['annotator']
except KeyError:
raise NoDefaultError | [
"def",
"defaultannotator",
"(",
"self",
",",
"annotationtype",
",",
"set",
"=",
"None",
")",
":",
"if",
"inspect",
".",
"isclass",
"(",
"annotationtype",
")",
"or",
"isinstance",
"(",
"annotationtype",
",",
"AbstractElement",
")",
":",
"annotationtype",
"=",
"annotationtype",
".",
"ANNOTATIONTYPE",
"if",
"not",
"set",
":",
"set",
"=",
"self",
".",
"defaultset",
"(",
"annotationtype",
")",
"try",
":",
"return",
"self",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
"[",
"set",
"]",
"[",
"'annotator'",
"]",
"except",
"KeyError",
":",
"raise",
"NoDefaultError"
] | Obtain the default annotator for the specified annotation type and set.
Arguments:
annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.
set (str): the set, should formally be a URL pointing to the set definition
Returns:
the set (str)
Raises:
:class:`NoDefaultError` if the annotation type does not exist or if there is ambiguity (multiple sets for the same type) | [
"Obtain",
"the",
"default",
"annotator",
"for",
"the",
"specified",
"annotation",
"type",
"and",
"set",
"."
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L7061-L7080 | [
"annotationtype",
"set"
] | What does this function do? | [
"Obtain",
"the",
"default",
"annotator",
"for",
"the",
"specified",
"annotation",
"type",
"and",
"set",
"."
] |
proycon/pynlpl | pynlpl/formats/folia.py | Document.title | def title(self, value=None):
"""Get or set the document's title from/in the metadata
No arguments: Get the document's title from metadata
Argument: Set the document's title in metadata
"""
if not (value is None):
if (self.metadatatype == "native"):
self.metadata['title'] = value
else:
self._title = value
if (self.metadatatype == "native"):
if 'title' in self.metadata:
return self.metadata['title']
else:
return None
else:
return self._title | python | def title(self, value=None):
"""Get or set the document's title from/in the metadata
No arguments: Get the document's title from metadata
Argument: Set the document's title in metadata
"""
if not (value is None):
if (self.metadatatype == "native"):
self.metadata['title'] = value
else:
self._title = value
if (self.metadatatype == "native"):
if 'title' in self.metadata:
return self.metadata['title']
else:
return None
else:
return self._title | [
"def",
"title",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"if",
"not",
"(",
"value",
"is",
"None",
")",
":",
"if",
"(",
"self",
".",
"metadatatype",
"==",
"\"native\"",
")",
":",
"self",
".",
"metadata",
"[",
"'title'",
"]",
"=",
"value",
"else",
":",
"self",
".",
"_title",
"=",
"value",
"if",
"(",
"self",
".",
"metadatatype",
"==",
"\"native\"",
")",
":",
"if",
"'title'",
"in",
"self",
".",
"metadata",
":",
"return",
"self",
".",
"metadata",
"[",
"'title'",
"]",
"else",
":",
"return",
"None",
"else",
":",
"return",
"self",
".",
"_title"
] | Get or set the document's title from/in the metadata
No arguments: Get the document's title from metadata
Argument: Set the document's title in metadata | [
"Get",
"or",
"set",
"the",
"document",
"s",
"title",
"from",
"/",
"in",
"the",
"metadata"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L7127-L7144 | [
"value"
] | What does this function do? | [
"Get",
"or",
"set",
"the",
"document",
"s",
"title",
"from",
"/",
"in",
"the",
"metadata"
] |
proycon/pynlpl | pynlpl/formats/folia.py | Document.date | def date(self, value=None):
"""Get or set the document's date from/in the metadata.
No arguments: Get the document's date from metadata
Argument: Set the document's date in metadata
"""
if not (value is None):
if (self.metadatatype == "native"):
self.metadata['date'] = value
else:
self._date = value
if (self.metadatatype == "native"):
if 'date' in self.metadata:
return self.metadata['date']
else:
return None
else:
return self._date | python | def date(self, value=None):
"""Get or set the document's date from/in the metadata.
No arguments: Get the document's date from metadata
Argument: Set the document's date in metadata
"""
if not (value is None):
if (self.metadatatype == "native"):
self.metadata['date'] = value
else:
self._date = value
if (self.metadatatype == "native"):
if 'date' in self.metadata:
return self.metadata['date']
else:
return None
else:
return self._date | [
"def",
"date",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"if",
"not",
"(",
"value",
"is",
"None",
")",
":",
"if",
"(",
"self",
".",
"metadatatype",
"==",
"\"native\"",
")",
":",
"self",
".",
"metadata",
"[",
"'date'",
"]",
"=",
"value",
"else",
":",
"self",
".",
"_date",
"=",
"value",
"if",
"(",
"self",
".",
"metadatatype",
"==",
"\"native\"",
")",
":",
"if",
"'date'",
"in",
"self",
".",
"metadata",
":",
"return",
"self",
".",
"metadata",
"[",
"'date'",
"]",
"else",
":",
"return",
"None",
"else",
":",
"return",
"self",
".",
"_date"
] | Get or set the document's date from/in the metadata.
No arguments: Get the document's date from metadata
Argument: Set the document's date in metadata | [
"Get",
"or",
"set",
"the",
"document",
"s",
"date",
"from",
"/",
"in",
"the",
"metadata",
"."
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L7146-L7163 | [
"value"
] | What does this function do? | [
"Get",
"or",
"set",
"the",
"document",
"s",
"date",
"from",
"/",
"in",
"the",
"metadata",
"."
] |
proycon/pynlpl | pynlpl/formats/folia.py | Document.publisher | def publisher(self, value=None):
"""No arguments: Get the document's publisher from metadata
Argument: Set the document's publisher in metadata
"""
if not (value is None):
if (self.metadatatype == "native"):
self.metadata['publisher'] = value
else:
self._publisher = value
if (self.metadatatype == "native"):
if 'publisher' in self.metadata:
return self.metadata['publisher']
else:
return None
else:
return self._publisher | python | def publisher(self, value=None):
"""No arguments: Get the document's publisher from metadata
Argument: Set the document's publisher in metadata
"""
if not (value is None):
if (self.metadatatype == "native"):
self.metadata['publisher'] = value
else:
self._publisher = value
if (self.metadatatype == "native"):
if 'publisher' in self.metadata:
return self.metadata['publisher']
else:
return None
else:
return self._publisher | [
"def",
"publisher",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"if",
"not",
"(",
"value",
"is",
"None",
")",
":",
"if",
"(",
"self",
".",
"metadatatype",
"==",
"\"native\"",
")",
":",
"self",
".",
"metadata",
"[",
"'publisher'",
"]",
"=",
"value",
"else",
":",
"self",
".",
"_publisher",
"=",
"value",
"if",
"(",
"self",
".",
"metadatatype",
"==",
"\"native\"",
")",
":",
"if",
"'publisher'",
"in",
"self",
".",
"metadata",
":",
"return",
"self",
".",
"metadata",
"[",
"'publisher'",
"]",
"else",
":",
"return",
"None",
"else",
":",
"return",
"self",
".",
"_publisher"
] | No arguments: Get the document's publisher from metadata
Argument: Set the document's publisher in metadata | [
"No",
"arguments",
":",
"Get",
"the",
"document",
"s",
"publisher",
"from",
"metadata",
"Argument",
":",
"Set",
"the",
"document",
"s",
"publisher",
"in",
"metadata"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L7165-L7180 | [
"value"
] | What does this function do? | [
"No",
"arguments",
":",
"Get",
"the",
"document",
"s",
"publisher",
"from",
"metadata",
"Argument",
":",
"Set",
"the",
"document",
"s",
"publisher",
"in",
"metadata"
] |
proycon/pynlpl | pynlpl/formats/folia.py | Document.license | def license(self, value=None):
"""No arguments: Get the document's license from metadata
Argument: Set the document's license in metadata
"""
if not (value is None):
if (self.metadatatype == "native"):
self.metadata['license'] = value
else:
self._license = value
if (self.metadatatype == "native"):
if 'license' in self.metadata:
return self.metadata['license']
else:
return None
else:
return self._license | python | def license(self, value=None):
"""No arguments: Get the document's license from metadata
Argument: Set the document's license in metadata
"""
if not (value is None):
if (self.metadatatype == "native"):
self.metadata['license'] = value
else:
self._license = value
if (self.metadatatype == "native"):
if 'license' in self.metadata:
return self.metadata['license']
else:
return None
else:
return self._license | [
"def",
"license",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"if",
"not",
"(",
"value",
"is",
"None",
")",
":",
"if",
"(",
"self",
".",
"metadatatype",
"==",
"\"native\"",
")",
":",
"self",
".",
"metadata",
"[",
"'license'",
"]",
"=",
"value",
"else",
":",
"self",
".",
"_license",
"=",
"value",
"if",
"(",
"self",
".",
"metadatatype",
"==",
"\"native\"",
")",
":",
"if",
"'license'",
"in",
"self",
".",
"metadata",
":",
"return",
"self",
".",
"metadata",
"[",
"'license'",
"]",
"else",
":",
"return",
"None",
"else",
":",
"return",
"self",
".",
"_license"
] | No arguments: Get the document's license from metadata
Argument: Set the document's license in metadata | [
"No",
"arguments",
":",
"Get",
"the",
"document",
"s",
"license",
"from",
"metadata",
"Argument",
":",
"Set",
"the",
"document",
"s",
"license",
"in",
"metadata"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L7182-L7197 | [
"value"
] | What does this function do? | [
"No",
"arguments",
":",
"Get",
"the",
"document",
"s",
"license",
"from",
"metadata",
"Argument",
":",
"Set",
"the",
"document",
"s",
"license",
"in",
"metadata"
] |
proycon/pynlpl | pynlpl/formats/folia.py | Document.language | def language(self, value=None):
"""No arguments: Get the document's language (ISO-639-3) from metadata
Argument: Set the document's language (ISO-639-3) in metadata
"""
if not (value is None):
if (self.metadatatype == "native"):
self.metadata['language'] = value
else:
self._language = value
if self.metadatatype == "native":
if 'language' in self.metadata:
return self.metadata['language']
else:
return None
else:
return self._language | python | def language(self, value=None):
"""No arguments: Get the document's language (ISO-639-3) from metadata
Argument: Set the document's language (ISO-639-3) in metadata
"""
if not (value is None):
if (self.metadatatype == "native"):
self.metadata['language'] = value
else:
self._language = value
if self.metadatatype == "native":
if 'language' in self.metadata:
return self.metadata['language']
else:
return None
else:
return self._language | [
"def",
"language",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"if",
"not",
"(",
"value",
"is",
"None",
")",
":",
"if",
"(",
"self",
".",
"metadatatype",
"==",
"\"native\"",
")",
":",
"self",
".",
"metadata",
"[",
"'language'",
"]",
"=",
"value",
"else",
":",
"self",
".",
"_language",
"=",
"value",
"if",
"self",
".",
"metadatatype",
"==",
"\"native\"",
":",
"if",
"'language'",
"in",
"self",
".",
"metadata",
":",
"return",
"self",
".",
"metadata",
"[",
"'language'",
"]",
"else",
":",
"return",
"None",
"else",
":",
"return",
"self",
".",
"_language"
] | No arguments: Get the document's language (ISO-639-3) from metadata
Argument: Set the document's language (ISO-639-3) in metadata | [
"No",
"arguments",
":",
"Get",
"the",
"document",
"s",
"language",
"(",
"ISO",
"-",
"639",
"-",
"3",
")",
"from",
"metadata",
"Argument",
":",
"Set",
"the",
"document",
"s",
"language",
"(",
"ISO",
"-",
"639",
"-",
"3",
")",
"in",
"metadata"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L7199-L7214 | [
"value"
] | What does this function do? | [
"No",
"arguments",
":",
"Get",
"the",
"document",
"s",
"language",
"(",
"ISO",
"-",
"639",
"-",
"3",
")",
"from",
"metadata",
"Argument",
":",
"Set",
"the",
"document",
"s",
"language",
"(",
"ISO",
"-",
"639",
"-",
"3",
")",
"in",
"metadata"
] |
proycon/pynlpl | pynlpl/formats/folia.py | Document.parsemetadata | def parsemetadata(self, node):
"""Internal method to parse metadata"""
if 'type' in node.attrib:
self.metadatatype = node.attrib['type']
else:
#no type specified, default to native
self.metadatatype = "native"
if 'src' in node.attrib:
self.metadata = ExternalMetaData(node.attrib['src'])
elif self.metadatatype == "native":
self.metadata = NativeMetaData()
else:
self.metadata = None #may be set below to ForeignData
for subnode in node:
if subnode.tag == '{' + NSFOLIA + '}annotations':
self.parsexmldeclarations(subnode)
elif subnode.tag == '{' + NSFOLIA + '}meta':
if self.metadatatype == "native":
if subnode.text:
self.metadata[subnode.attrib['id']] = subnode.text
else:
raise MetaDataError("Encountered a meta element but metadata type is not native!")
elif subnode.tag == '{' + NSFOLIA + '}provenance':
#forward compatibility with FoLiA 2.0; ignore provenance
print("WARNING: Ignoring provenance data. Use foliapy instead of pynlpl.formats.folia for FoLiA v2.0 compatibility!",file=sys.stderr)
pass
elif subnode.tag == '{' + NSFOLIA + '}foreign-data':
if self.metadatatype == "native":
raise MetaDataError("Encountered a foreign-data element but metadata type is native!")
elif self.metadata is not None:
#multiple foreign-data elements, chain:
e = self.metadata
while e.next is not None:
e = e.next
e.next = ForeignData(self, node=subnode)
else:
self.metadata = ForeignData(self, node=subnode)
elif subnode.tag == '{' + NSFOLIA + '}submetadata':
self.parsesubmetadata(subnode)
elif subnode.tag == '{http://www.mpi.nl/IMDI/Schema/IMDI}METATRANSCRIPT': #backward-compatibility for old IMDI without foreign-key
E = ElementMaker(namespace=NSFOLIA,nsmap={None: NSFOLIA, 'xml' : "http://www.w3.org/XML/1998/namespace"})
self.metadatatype = "imdi"
self.metadata = ForeignData(self, node=subnode) | python | def parsemetadata(self, node):
"""Internal method to parse metadata"""
if 'type' in node.attrib:
self.metadatatype = node.attrib['type']
else:
#no type specified, default to native
self.metadatatype = "native"
if 'src' in node.attrib:
self.metadata = ExternalMetaData(node.attrib['src'])
elif self.metadatatype == "native":
self.metadata = NativeMetaData()
else:
self.metadata = None #may be set below to ForeignData
for subnode in node:
if subnode.tag == '{' + NSFOLIA + '}annotations':
self.parsexmldeclarations(subnode)
elif subnode.tag == '{' + NSFOLIA + '}meta':
if self.metadatatype == "native":
if subnode.text:
self.metadata[subnode.attrib['id']] = subnode.text
else:
raise MetaDataError("Encountered a meta element but metadata type is not native!")
elif subnode.tag == '{' + NSFOLIA + '}provenance':
#forward compatibility with FoLiA 2.0; ignore provenance
print("WARNING: Ignoring provenance data. Use foliapy instead of pynlpl.formats.folia for FoLiA v2.0 compatibility!",file=sys.stderr)
pass
elif subnode.tag == '{' + NSFOLIA + '}foreign-data':
if self.metadatatype == "native":
raise MetaDataError("Encountered a foreign-data element but metadata type is native!")
elif self.metadata is not None:
#multiple foreign-data elements, chain:
e = self.metadata
while e.next is not None:
e = e.next
e.next = ForeignData(self, node=subnode)
else:
self.metadata = ForeignData(self, node=subnode)
elif subnode.tag == '{' + NSFOLIA + '}submetadata':
self.parsesubmetadata(subnode)
elif subnode.tag == '{http://www.mpi.nl/IMDI/Schema/IMDI}METATRANSCRIPT': #backward-compatibility for old IMDI without foreign-key
E = ElementMaker(namespace=NSFOLIA,nsmap={None: NSFOLIA, 'xml' : "http://www.w3.org/XML/1998/namespace"})
self.metadatatype = "imdi"
self.metadata = ForeignData(self, node=subnode) | [
"def",
"parsemetadata",
"(",
"self",
",",
"node",
")",
":",
"if",
"'type'",
"in",
"node",
".",
"attrib",
":",
"self",
".",
"metadatatype",
"=",
"node",
".",
"attrib",
"[",
"'type'",
"]",
"else",
":",
"#no type specified, default to native",
"self",
".",
"metadatatype",
"=",
"\"native\"",
"if",
"'src'",
"in",
"node",
".",
"attrib",
":",
"self",
".",
"metadata",
"=",
"ExternalMetaData",
"(",
"node",
".",
"attrib",
"[",
"'src'",
"]",
")",
"elif",
"self",
".",
"metadatatype",
"==",
"\"native\"",
":",
"self",
".",
"metadata",
"=",
"NativeMetaData",
"(",
")",
"else",
":",
"self",
".",
"metadata",
"=",
"None",
"#may be set below to ForeignData",
"for",
"subnode",
"in",
"node",
":",
"if",
"subnode",
".",
"tag",
"==",
"'{'",
"+",
"NSFOLIA",
"+",
"'}annotations'",
":",
"self",
".",
"parsexmldeclarations",
"(",
"subnode",
")",
"elif",
"subnode",
".",
"tag",
"==",
"'{'",
"+",
"NSFOLIA",
"+",
"'}meta'",
":",
"if",
"self",
".",
"metadatatype",
"==",
"\"native\"",
":",
"if",
"subnode",
".",
"text",
":",
"self",
".",
"metadata",
"[",
"subnode",
".",
"attrib",
"[",
"'id'",
"]",
"]",
"=",
"subnode",
".",
"text",
"else",
":",
"raise",
"MetaDataError",
"(",
"\"Encountered a meta element but metadata type is not native!\"",
")",
"elif",
"subnode",
".",
"tag",
"==",
"'{'",
"+",
"NSFOLIA",
"+",
"'}provenance'",
":",
"#forward compatibility with FoLiA 2.0; ignore provenance",
"print",
"(",
"\"WARNING: Ignoring provenance data. Use foliapy instead of pynlpl.formats.folia for FoLiA v2.0 compatibility!\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"pass",
"elif",
"subnode",
".",
"tag",
"==",
"'{'",
"+",
"NSFOLIA",
"+",
"'}foreign-data'",
":",
"if",
"self",
".",
"metadatatype",
"==",
"\"native\"",
":",
"raise",
"MetaDataError",
"(",
"\"Encountered a foreign-data element but metadata type is native!\"",
")",
"elif",
"self",
".",
"metadata",
"is",
"not",
"None",
":",
"#multiple foreign-data elements, chain:",
"e",
"=",
"self",
".",
"metadata",
"while",
"e",
".",
"next",
"is",
"not",
"None",
":",
"e",
"=",
"e",
".",
"next",
"e",
".",
"next",
"=",
"ForeignData",
"(",
"self",
",",
"node",
"=",
"subnode",
")",
"else",
":",
"self",
".",
"metadata",
"=",
"ForeignData",
"(",
"self",
",",
"node",
"=",
"subnode",
")",
"elif",
"subnode",
".",
"tag",
"==",
"'{'",
"+",
"NSFOLIA",
"+",
"'}submetadata'",
":",
"self",
".",
"parsesubmetadata",
"(",
"subnode",
")",
"elif",
"subnode",
".",
"tag",
"==",
"'{http://www.mpi.nl/IMDI/Schema/IMDI}METATRANSCRIPT'",
":",
"#backward-compatibility for old IMDI without foreign-key",
"E",
"=",
"ElementMaker",
"(",
"namespace",
"=",
"NSFOLIA",
",",
"nsmap",
"=",
"{",
"None",
":",
"NSFOLIA",
",",
"'xml'",
":",
"\"http://www.w3.org/XML/1998/namespace\"",
"}",
")",
"self",
".",
"metadatatype",
"=",
"\"imdi\"",
"self",
".",
"metadata",
"=",
"ForeignData",
"(",
"self",
",",
"node",
"=",
"subnode",
")"
] | Internal method to parse metadata | [
"Internal",
"method",
"to",
"parse",
"metadata"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L7216-L7261 | [
"node"
] | What does this function do? | [
"Internal",
"method",
"to",
"parse",
"metadata"
] |
proycon/pynlpl | pynlpl/formats/folia.py | Document.select | def select(self, Class, set=None, recursive=True, ignore=True):
"""See :meth:`AbstractElement.select`"""
if self.mode == Mode.MEMORY:
for t in self.data:
if Class.__name__ == 'Text':
yield t
else:
for e in t.select(Class,set,recursive,ignore):
yield e | python | def select(self, Class, set=None, recursive=True, ignore=True):
"""See :meth:`AbstractElement.select`"""
if self.mode == Mode.MEMORY:
for t in self.data:
if Class.__name__ == 'Text':
yield t
else:
for e in t.select(Class,set,recursive,ignore):
yield e | [
"def",
"select",
"(",
"self",
",",
"Class",
",",
"set",
"=",
"None",
",",
"recursive",
"=",
"True",
",",
"ignore",
"=",
"True",
")",
":",
"if",
"self",
".",
"mode",
"==",
"Mode",
".",
"MEMORY",
":",
"for",
"t",
"in",
"self",
".",
"data",
":",
"if",
"Class",
".",
"__name__",
"==",
"'Text'",
":",
"yield",
"t",
"else",
":",
"for",
"e",
"in",
"t",
".",
"select",
"(",
"Class",
",",
"set",
",",
"recursive",
",",
"ignore",
")",
":",
"yield",
"e"
] | See :meth:`AbstractElement.select` | [
"See",
":",
"meth",
":",
"AbstractElement",
".",
"select"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L7427-L7435 | [
"Class",
"set",
"recursive",
"ignore"
] | What does this function do? | [
"See",
":",
"meth",
":",
"AbstractElement",
".",
"select"
] |
proycon/pynlpl | pynlpl/formats/folia.py | Document.count | def count(self, Class, set=None, recursive=True,ignore=True):
"""See :meth:`AbstractElement.count`"""
if self.mode == Mode.MEMORY:
s = 0
for t in self.data:
s += sum( 1 for e in t.select(Class,recursive,True ) )
return s | python | def count(self, Class, set=None, recursive=True,ignore=True):
"""See :meth:`AbstractElement.count`"""
if self.mode == Mode.MEMORY:
s = 0
for t in self.data:
s += sum( 1 for e in t.select(Class,recursive,True ) )
return s | [
"def",
"count",
"(",
"self",
",",
"Class",
",",
"set",
"=",
"None",
",",
"recursive",
"=",
"True",
",",
"ignore",
"=",
"True",
")",
":",
"if",
"self",
".",
"mode",
"==",
"Mode",
".",
"MEMORY",
":",
"s",
"=",
"0",
"for",
"t",
"in",
"self",
".",
"data",
":",
"s",
"+=",
"sum",
"(",
"1",
"for",
"e",
"in",
"t",
".",
"select",
"(",
"Class",
",",
"recursive",
",",
"True",
")",
")",
"return",
"s"
] | See :meth:`AbstractElement.count` | [
"See",
":",
"meth",
":",
"AbstractElement",
".",
"count"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L7437-L7443 | [
"Class",
"set",
"recursive",
"ignore"
] | What does this function do? | [
"See",
":",
"meth",
":",
"AbstractElement",
".",
"count"
] |
proycon/pynlpl | pynlpl/formats/folia.py | Document.paragraphs | def paragraphs(self, index = None):
"""Return a generator of all paragraphs found in the document.
If an index is specified, return the n'th paragraph only (starting at 0)"""
if index is None:
return self.select(Paragraph)
else:
if index < 0:
index = sum(t.count(Paragraph) for t in self.data) + index
for t in self.data:
for i,e in enumerate(t.select(Paragraph)) :
if i == index:
return e
raise IndexError | python | def paragraphs(self, index = None):
"""Return a generator of all paragraphs found in the document.
If an index is specified, return the n'th paragraph only (starting at 0)"""
if index is None:
return self.select(Paragraph)
else:
if index < 0:
index = sum(t.count(Paragraph) for t in self.data) + index
for t in self.data:
for i,e in enumerate(t.select(Paragraph)) :
if i == index:
return e
raise IndexError | [
"def",
"paragraphs",
"(",
"self",
",",
"index",
"=",
"None",
")",
":",
"if",
"index",
"is",
"None",
":",
"return",
"self",
".",
"select",
"(",
"Paragraph",
")",
"else",
":",
"if",
"index",
"<",
"0",
":",
"index",
"=",
"sum",
"(",
"t",
".",
"count",
"(",
"Paragraph",
")",
"for",
"t",
"in",
"self",
".",
"data",
")",
"+",
"index",
"for",
"t",
"in",
"self",
".",
"data",
":",
"for",
"i",
",",
"e",
"in",
"enumerate",
"(",
"t",
".",
"select",
"(",
"Paragraph",
")",
")",
":",
"if",
"i",
"==",
"index",
":",
"return",
"e",
"raise",
"IndexError"
] | Return a generator of all paragraphs found in the document.
If an index is specified, return the n'th paragraph only (starting at 0) | [
"Return",
"a",
"generator",
"of",
"all",
"paragraphs",
"found",
"in",
"the",
"document",
"."
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L7445-L7458 | [
"index"
] | What does this function do? | [
"Return",
"a",
"generator",
"of",
"all",
"paragraphs",
"found",
"in",
"the",
"document",
"."
] |
proycon/pynlpl | pynlpl/formats/folia.py | Document.sentences | def sentences(self, index = None):
"""Return a generator of all sentence found in the document. Except for sentences in quotes.
If an index is specified, return the n'th sentence only (starting at 0)"""
if index is None:
return self.select(Sentence,None,True,[Quote])
else:
if index < 0:
index = sum(t.count(Sentence,None,True,[Quote]) for t in self.data) + index
for t in self.data:
for i,e in enumerate(t.select(Sentence,None,True,[Quote])) :
if i == index:
return e
raise IndexError | python | def sentences(self, index = None):
"""Return a generator of all sentence found in the document. Except for sentences in quotes.
If an index is specified, return the n'th sentence only (starting at 0)"""
if index is None:
return self.select(Sentence,None,True,[Quote])
else:
if index < 0:
index = sum(t.count(Sentence,None,True,[Quote]) for t in self.data) + index
for t in self.data:
for i,e in enumerate(t.select(Sentence,None,True,[Quote])) :
if i == index:
return e
raise IndexError | [
"def",
"sentences",
"(",
"self",
",",
"index",
"=",
"None",
")",
":",
"if",
"index",
"is",
"None",
":",
"return",
"self",
".",
"select",
"(",
"Sentence",
",",
"None",
",",
"True",
",",
"[",
"Quote",
"]",
")",
"else",
":",
"if",
"index",
"<",
"0",
":",
"index",
"=",
"sum",
"(",
"t",
".",
"count",
"(",
"Sentence",
",",
"None",
",",
"True",
",",
"[",
"Quote",
"]",
")",
"for",
"t",
"in",
"self",
".",
"data",
")",
"+",
"index",
"for",
"t",
"in",
"self",
".",
"data",
":",
"for",
"i",
",",
"e",
"in",
"enumerate",
"(",
"t",
".",
"select",
"(",
"Sentence",
",",
"None",
",",
"True",
",",
"[",
"Quote",
"]",
")",
")",
":",
"if",
"i",
"==",
"index",
":",
"return",
"e",
"raise",
"IndexError"
] | Return a generator of all sentence found in the document. Except for sentences in quotes.
If an index is specified, return the n'th sentence only (starting at 0) | [
"Return",
"a",
"generator",
"of",
"all",
"sentence",
"found",
"in",
"the",
"document",
".",
"Except",
"for",
"sentences",
"in",
"quotes",
"."
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L7460-L7473 | [
"index"
] | What does this function do? | [
"Return",
"a",
"generator",
"of",
"all",
"sentence",
"found",
"in",
"the",
"document",
".",
"Except",
"for",
"sentences",
"in",
"quotes",
"."
] |
proycon/pynlpl | pynlpl/formats/folia.py | Document.text | def text(self, cls='current', retaintokenisation=False):
"""Returns the text of the entire document (returns a unicode instance)
See also:
:meth:`AbstractElement.text`
"""
#backward compatibility, old versions didn't have cls as first argument, so if a boolean is passed first we interpret it as the 2nd:
if cls is True or cls is False:
retaintokenisation = cls
cls = 'current'
s = ""
for c in self.data:
if s: s += "\n\n\n"
try:
s += c.text(cls, retaintokenisation)
except NoSuchText:
continue
return s | python | def text(self, cls='current', retaintokenisation=False):
"""Returns the text of the entire document (returns a unicode instance)
See also:
:meth:`AbstractElement.text`
"""
#backward compatibility, old versions didn't have cls as first argument, so if a boolean is passed first we interpret it as the 2nd:
if cls is True or cls is False:
retaintokenisation = cls
cls = 'current'
s = ""
for c in self.data:
if s: s += "\n\n\n"
try:
s += c.text(cls, retaintokenisation)
except NoSuchText:
continue
return s | [
"def",
"text",
"(",
"self",
",",
"cls",
"=",
"'current'",
",",
"retaintokenisation",
"=",
"False",
")",
":",
"#backward compatibility, old versions didn't have cls as first argument, so if a boolean is passed first we interpret it as the 2nd:",
"if",
"cls",
"is",
"True",
"or",
"cls",
"is",
"False",
":",
"retaintokenisation",
"=",
"cls",
"cls",
"=",
"'current'",
"s",
"=",
"\"\"",
"for",
"c",
"in",
"self",
".",
"data",
":",
"if",
"s",
":",
"s",
"+=",
"\"\\n\\n\\n\"",
"try",
":",
"s",
"+=",
"c",
".",
"text",
"(",
"cls",
",",
"retaintokenisation",
")",
"except",
"NoSuchText",
":",
"continue",
"return",
"s"
] | Returns the text of the entire document (returns a unicode instance)
See also:
:meth:`AbstractElement.text` | [
"Returns",
"the",
"text",
"of",
"the",
"entire",
"document",
"(",
"returns",
"a",
"unicode",
"instance",
")"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L7493-L7512 | [
"cls",
"retaintokenisation"
] | What does this function do? | [
"Returns",
"the",
"text",
"of",
"the",
"entire",
"document",
"(",
"returns",
"a",
"unicode",
"instance",
")"
] |
proycon/pynlpl | pynlpl/fsa.py | NFA._states | def _states(self, state, processedstates=[]): #pylint: disable=dangerous-default-value
"""Iterate over all states in no particular order"""
processedstates.append(state)
for nextstate in state.epsilon:
if not nextstate in processedstates:
self._states(nextstate, processedstates)
for _, nextstate in state.transitions:
if not nextstate in processedstates:
self._states(nextstate, processedstates)
return processedstates | python | def _states(self, state, processedstates=[]): #pylint: disable=dangerous-default-value
"""Iterate over all states in no particular order"""
processedstates.append(state)
for nextstate in state.epsilon:
if not nextstate in processedstates:
self._states(nextstate, processedstates)
for _, nextstate in state.transitions:
if not nextstate in processedstates:
self._states(nextstate, processedstates)
return processedstates | [
"def",
"_states",
"(",
"self",
",",
"state",
",",
"processedstates",
"=",
"[",
"]",
")",
":",
"#pylint: disable=dangerous-default-value",
"processedstates",
".",
"append",
"(",
"state",
")",
"for",
"nextstate",
"in",
"state",
".",
"epsilon",
":",
"if",
"not",
"nextstate",
"in",
"processedstates",
":",
"self",
".",
"_states",
"(",
"nextstate",
",",
"processedstates",
")",
"for",
"_",
",",
"nextstate",
"in",
"state",
".",
"transitions",
":",
"if",
"not",
"nextstate",
"in",
"processedstates",
":",
"self",
".",
"_states",
"(",
"nextstate",
",",
"processedstates",
")",
"return",
"processedstates"
] | Iterate over all states in no particular order | [
"Iterate",
"over",
"all",
"states",
"in",
"no",
"particular",
"order"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/fsa.py#L97-L109 | [
"state",
"processedstates"
] | What does this function do? | [
"Iterate",
"over",
"all",
"states",
"in",
"no",
"particular",
"order"
] |
proycon/pynlpl | pynlpl/common.py | log | def log(msg, **kwargs):
"""Generic log method. Will prepend timestamp.
Keyword arguments:
system - Name of the system/module
indent - Integer denoting the desired level of indentation
streams - List of streams to output to
stream - Stream to output to (singleton version of streams)
"""
if 'debug' in kwargs:
if 'currentdebug' in kwargs:
if kwargs['currentdebug'] < kwargs['debug']:
return False
else:
return False #no currentdebug passed, assuming no debug mode and thus skipping message
s = "[" + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "] "
if 'system' in kwargs:
s += "[" + system + "] "
if 'indent' in kwargs:
s += ("\t" * int(kwargs['indent']))
s += u(msg)
if s[-1] != '\n':
s += '\n'
if 'streams' in kwargs:
streams = kwargs['streams']
elif 'stream' in kwargs:
streams = [kwargs['stream']]
else:
streams = [stderr]
for stream in streams:
stream.write(s)
return s | python | def log(msg, **kwargs):
"""Generic log method. Will prepend timestamp.
Keyword arguments:
system - Name of the system/module
indent - Integer denoting the desired level of indentation
streams - List of streams to output to
stream - Stream to output to (singleton version of streams)
"""
if 'debug' in kwargs:
if 'currentdebug' in kwargs:
if kwargs['currentdebug'] < kwargs['debug']:
return False
else:
return False #no currentdebug passed, assuming no debug mode and thus skipping message
s = "[" + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "] "
if 'system' in kwargs:
s += "[" + system + "] "
if 'indent' in kwargs:
s += ("\t" * int(kwargs['indent']))
s += u(msg)
if s[-1] != '\n':
s += '\n'
if 'streams' in kwargs:
streams = kwargs['streams']
elif 'stream' in kwargs:
streams = [kwargs['stream']]
else:
streams = [stderr]
for stream in streams:
stream.write(s)
return s | [
"def",
"log",
"(",
"msg",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'debug'",
"in",
"kwargs",
":",
"if",
"'currentdebug'",
"in",
"kwargs",
":",
"if",
"kwargs",
"[",
"'currentdebug'",
"]",
"<",
"kwargs",
"[",
"'debug'",
"]",
":",
"return",
"False",
"else",
":",
"return",
"False",
"#no currentdebug passed, assuming no debug mode and thus skipping message",
"s",
"=",
"\"[\"",
"+",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M:%S\"",
")",
"+",
"\"] \"",
"if",
"'system'",
"in",
"kwargs",
":",
"s",
"+=",
"\"[\"",
"+",
"system",
"+",
"\"] \"",
"if",
"'indent'",
"in",
"kwargs",
":",
"s",
"+=",
"(",
"\"\\t\"",
"*",
"int",
"(",
"kwargs",
"[",
"'indent'",
"]",
")",
")",
"s",
"+=",
"u",
"(",
"msg",
")",
"if",
"s",
"[",
"-",
"1",
"]",
"!=",
"'\\n'",
":",
"s",
"+=",
"'\\n'",
"if",
"'streams'",
"in",
"kwargs",
":",
"streams",
"=",
"kwargs",
"[",
"'streams'",
"]",
"elif",
"'stream'",
"in",
"kwargs",
":",
"streams",
"=",
"[",
"kwargs",
"[",
"'stream'",
"]",
"]",
"else",
":",
"streams",
"=",
"[",
"stderr",
"]",
"for",
"stream",
"in",
"streams",
":",
"stream",
".",
"write",
"(",
"s",
")",
"return",
"s"
] | Generic log method. Will prepend timestamp.
Keyword arguments:
system - Name of the system/module
indent - Integer denoting the desired level of indentation
streams - List of streams to output to
stream - Stream to output to (singleton version of streams) | [
"Generic",
"log",
"method",
".",
"Will",
"prepend",
"timestamp",
"."
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/common.py#L98-L136 | [
"msg",
"kwargs"
] | What does this function do? | [
"Generic",
"log",
"method",
".",
"Will",
"prepend",
"timestamp",
"."
] |
proycon/pynlpl | pynlpl/search.py | AbstractSearch.searchbest | def searchbest(self):
"""Returns the single best result (if multiple have the same score, the first match is returned)"""
finalsolution = None
bestscore = None
for solution in self:
if bestscore == None:
bestscore = solution.score()
finalsolution = solution
elif self.minimize:
score = solution.score()
if score < bestscore:
bestscore = score
finalsolution = solution
elif not self.minimize:
score = solution.score()
if score > bestscore:
bestscore = score
finalsolution = solution
return finalsolution | python | def searchbest(self):
"""Returns the single best result (if multiple have the same score, the first match is returned)"""
finalsolution = None
bestscore = None
for solution in self:
if bestscore == None:
bestscore = solution.score()
finalsolution = solution
elif self.minimize:
score = solution.score()
if score < bestscore:
bestscore = score
finalsolution = solution
elif not self.minimize:
score = solution.score()
if score > bestscore:
bestscore = score
finalsolution = solution
return finalsolution | [
"def",
"searchbest",
"(",
"self",
")",
":",
"finalsolution",
"=",
"None",
"bestscore",
"=",
"None",
"for",
"solution",
"in",
"self",
":",
"if",
"bestscore",
"==",
"None",
":",
"bestscore",
"=",
"solution",
".",
"score",
"(",
")",
"finalsolution",
"=",
"solution",
"elif",
"self",
".",
"minimize",
":",
"score",
"=",
"solution",
".",
"score",
"(",
")",
"if",
"score",
"<",
"bestscore",
":",
"bestscore",
"=",
"score",
"finalsolution",
"=",
"solution",
"elif",
"not",
"self",
".",
"minimize",
":",
"score",
"=",
"solution",
".",
"score",
"(",
")",
"if",
"score",
">",
"bestscore",
":",
"bestscore",
"=",
"score",
"finalsolution",
"=",
"solution",
"return",
"finalsolution"
] | Returns the single best result (if multiple have the same score, the first match is returned) | [
"Returns",
"the",
"single",
"best",
"result",
"(",
"if",
"multiple",
"have",
"the",
"same",
"score",
"the",
"first",
"match",
"is",
"returned",
")"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/search.py#L243-L261 | [] | What does this function do? | [
"Returns",
"the",
"single",
"best",
"result",
"(",
"if",
"multiple",
"have",
"the",
"same",
"score",
"the",
"first",
"match",
"is",
"returned",
")"
] |
proycon/pynlpl | pynlpl/search.py | AbstractSearch.searchtop | def searchtop(self,n=10):
"""Return the top n best resulta (or possibly less if not enough is found)"""
solutions = PriorityQueue([], lambda x: x.score, self.minimize, length=n, blockworse=False, blockequal=False,duplicates=False)
for solution in self:
solutions.append(solution)
return solutions | python | def searchtop(self,n=10):
"""Return the top n best resulta (or possibly less if not enough is found)"""
solutions = PriorityQueue([], lambda x: x.score, self.minimize, length=n, blockworse=False, blockequal=False,duplicates=False)
for solution in self:
solutions.append(solution)
return solutions | [
"def",
"searchtop",
"(",
"self",
",",
"n",
"=",
"10",
")",
":",
"solutions",
"=",
"PriorityQueue",
"(",
"[",
"]",
",",
"lambda",
"x",
":",
"x",
".",
"score",
",",
"self",
".",
"minimize",
",",
"length",
"=",
"n",
",",
"blockworse",
"=",
"False",
",",
"blockequal",
"=",
"False",
",",
"duplicates",
"=",
"False",
")",
"for",
"solution",
"in",
"self",
":",
"solutions",
".",
"append",
"(",
"solution",
")",
"return",
"solutions"
] | Return the top n best resulta (or possibly less if not enough is found) | [
"Return",
"the",
"top",
"n",
"best",
"resulta",
"(",
"or",
"possibly",
"less",
"if",
"not",
"enough",
"is",
"found",
")"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/search.py#L263-L268 | [
"n"
] | What does this function do? | [
"Return",
"the",
"top",
"n",
"best",
"resulta",
"(",
"or",
"possibly",
"less",
"if",
"not",
"enough",
"is",
"found",
")"
] |
proycon/pynlpl | pynlpl/search.py | AbstractSearch.searchlast | def searchlast(self,n=10):
"""Return the last n results (or possibly less if not found). Note that the last results are not necessarily the best ones! Depending on the search type."""
solutions = deque([], n)
for solution in self:
solutions.append(solution)
return solutions | python | def searchlast(self,n=10):
"""Return the last n results (or possibly less if not found). Note that the last results are not necessarily the best ones! Depending on the search type."""
solutions = deque([], n)
for solution in self:
solutions.append(solution)
return solutions | [
"def",
"searchlast",
"(",
"self",
",",
"n",
"=",
"10",
")",
":",
"solutions",
"=",
"deque",
"(",
"[",
"]",
",",
"n",
")",
"for",
"solution",
"in",
"self",
":",
"solutions",
".",
"append",
"(",
"solution",
")",
"return",
"solutions"
] | Return the last n results (or possibly less if not found). Note that the last results are not necessarily the best ones! Depending on the search type. | [
"Return",
"the",
"last",
"n",
"results",
"(",
"or",
"possibly",
"less",
"if",
"not",
"found",
")",
".",
"Note",
"that",
"the",
"last",
"results",
"are",
"not",
"necessarily",
"the",
"best",
"ones!",
"Depending",
"on",
"the",
"search",
"type",
"."
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/search.py#L270-L275 | [
"n"
] | What does this function do? | [
"Return",
"the",
"last",
"n",
"results",
"(",
"or",
"possibly",
"less",
"if",
"not",
"found",
")",
".",
"Note",
"that",
"the",
"last",
"results",
"are",
"not",
"necessarily",
"the",
"best",
"ones!",
"Depending",
"on",
"the",
"search",
"type",
"."
] |
proycon/pynlpl | pynlpl/clients/cornetto.py | CornettoClient.get_syn_ids_by_lemma | def get_syn_ids_by_lemma(self, lemma):
"""Returns a list of synset IDs based on a lemma"""
if not isinstance(lemma,unicode):
lemma = unicode(lemma,'utf-8')
http, resp, content = self.connect()
params = ""
fragment = ""
path = "cdb_syn"
if self.debug:
printf( "cornettodb/views/query_remote_syn_lemma: db_opt: %s" % path )
query_opt = "dict_search"
if self.debug:
printf( "cornettodb/views/query_remote_syn_lemma: query_opt: %s" % query_opt )
qdict = {}
qdict[ "action" ] = "queryList"
qdict[ "word" ] = lemma.encode('utf-8')
query = urllib.urlencode( qdict )
db_url_tuple = ( self.scheme, self.host + ':' + str(self.port), path, params, query, fragment )
db_url = urlparse.urlunparse( db_url_tuple )
if self.debug:
printf( "db_url: %s" % db_url )
resp, content = http.request( db_url, "GET" )
if self.debug:
printf( "resp:\n%s" % resp )
printf( "content:\n%s" % content )
# printf( "content is of type: %s" % type( content ) )
dict_list = []
dict_list = eval( content ) # string to list
synsets = []
items = len( dict_list )
if self.debug:
printf( "items: %d" % items )
# syn dict: like lu dict, but without pos: part-of-speech
for dict in dict_list:
if self.debug:
printf( dict )
seq_nr = dict[ "seq_nr" ] # sense number
value = dict[ "value" ] # lexical unit identifier
form = dict[ "form" ] # lemma
label = dict[ "label" ] # label to be shown
if self.debug:
printf( "seq_nr: %s" % seq_nr )
printf( "value: %s" % value )
printf( "form: %s" % form )
printf( "label: %s" % label )
if value != "":
synsets.append( value )
return synsets | python | def get_syn_ids_by_lemma(self, lemma):
"""Returns a list of synset IDs based on a lemma"""
if not isinstance(lemma,unicode):
lemma = unicode(lemma,'utf-8')
http, resp, content = self.connect()
params = ""
fragment = ""
path = "cdb_syn"
if self.debug:
printf( "cornettodb/views/query_remote_syn_lemma: db_opt: %s" % path )
query_opt = "dict_search"
if self.debug:
printf( "cornettodb/views/query_remote_syn_lemma: query_opt: %s" % query_opt )
qdict = {}
qdict[ "action" ] = "queryList"
qdict[ "word" ] = lemma.encode('utf-8')
query = urllib.urlencode( qdict )
db_url_tuple = ( self.scheme, self.host + ':' + str(self.port), path, params, query, fragment )
db_url = urlparse.urlunparse( db_url_tuple )
if self.debug:
printf( "db_url: %s" % db_url )
resp, content = http.request( db_url, "GET" )
if self.debug:
printf( "resp:\n%s" % resp )
printf( "content:\n%s" % content )
# printf( "content is of type: %s" % type( content ) )
dict_list = []
dict_list = eval( content ) # string to list
synsets = []
items = len( dict_list )
if self.debug:
printf( "items: %d" % items )
# syn dict: like lu dict, but without pos: part-of-speech
for dict in dict_list:
if self.debug:
printf( dict )
seq_nr = dict[ "seq_nr" ] # sense number
value = dict[ "value" ] # lexical unit identifier
form = dict[ "form" ] # lemma
label = dict[ "label" ] # label to be shown
if self.debug:
printf( "seq_nr: %s" % seq_nr )
printf( "value: %s" % value )
printf( "form: %s" % form )
printf( "label: %s" % label )
if value != "":
synsets.append( value )
return synsets | [
"def",
"get_syn_ids_by_lemma",
"(",
"self",
",",
"lemma",
")",
":",
"if",
"not",
"isinstance",
"(",
"lemma",
",",
"unicode",
")",
":",
"lemma",
"=",
"unicode",
"(",
"lemma",
",",
"'utf-8'",
")",
"http",
",",
"resp",
",",
"content",
"=",
"self",
".",
"connect",
"(",
")",
"params",
"=",
"\"\"",
"fragment",
"=",
"\"\"",
"path",
"=",
"\"cdb_syn\"",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"\"cornettodb/views/query_remote_syn_lemma: db_opt: %s\"",
"%",
"path",
")",
"query_opt",
"=",
"\"dict_search\"",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"\"cornettodb/views/query_remote_syn_lemma: query_opt: %s\"",
"%",
"query_opt",
")",
"qdict",
"=",
"{",
"}",
"qdict",
"[",
"\"action\"",
"]",
"=",
"\"queryList\"",
"qdict",
"[",
"\"word\"",
"]",
"=",
"lemma",
".",
"encode",
"(",
"'utf-8'",
")",
"query",
"=",
"urllib",
".",
"urlencode",
"(",
"qdict",
")",
"db_url_tuple",
"=",
"(",
"self",
".",
"scheme",
",",
"self",
".",
"host",
"+",
"':'",
"+",
"str",
"(",
"self",
".",
"port",
")",
",",
"path",
",",
"params",
",",
"query",
",",
"fragment",
")",
"db_url",
"=",
"urlparse",
".",
"urlunparse",
"(",
"db_url_tuple",
")",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"\"db_url: %s\"",
"%",
"db_url",
")",
"resp",
",",
"content",
"=",
"http",
".",
"request",
"(",
"db_url",
",",
"\"GET\"",
")",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"\"resp:\\n%s\"",
"%",
"resp",
")",
"printf",
"(",
"\"content:\\n%s\"",
"%",
"content",
")",
"# printf( \"content is of type: %s\" % type( content ) )",
"dict_list",
"=",
"[",
"]",
"dict_list",
"=",
"eval",
"(",
"content",
")",
"# string to list",
"synsets",
"=",
"[",
"]",
"items",
"=",
"len",
"(",
"dict_list",
")",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"\"items: %d\"",
"%",
"items",
")",
"# syn dict: like lu dict, but without pos: part-of-speech",
"for",
"dict",
"in",
"dict_list",
":",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"dict",
")",
"seq_nr",
"=",
"dict",
"[",
"\"seq_nr\"",
"]",
"# sense number",
"value",
"=",
"dict",
"[",
"\"value\"",
"]",
"# lexical unit identifier",
"form",
"=",
"dict",
"[",
"\"form\"",
"]",
"# lemma",
"label",
"=",
"dict",
"[",
"\"label\"",
"]",
"# label to be shown",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"\"seq_nr: %s\"",
"%",
"seq_nr",
")",
"printf",
"(",
"\"value: %s\"",
"%",
"value",
")",
"printf",
"(",
"\"form: %s\"",
"%",
"form",
")",
"printf",
"(",
"\"label: %s\"",
"%",
"label",
")",
"if",
"value",
"!=",
"\"\"",
":",
"synsets",
".",
"append",
"(",
"value",
")",
"return",
"synsets"
] | Returns a list of synset IDs based on a lemma | [
"Returns",
"a",
"list",
"of",
"synset",
"IDs",
"based",
"on",
"a",
"lemma"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/clients/cornetto.py#L96-L160 | [
"lemma"
] | What does this function do? | [
"Returns",
"a",
"list",
"of",
"synset",
"IDs",
"based",
"on",
"a",
"lemma"
] |
proycon/pynlpl | pynlpl/clients/cornetto.py | CornettoClient.get_synset_xml | def get_synset_xml(self,syn_id):
"""
call cdb_syn with synset identifier -> returns the synset xml;
"""
http, resp, content = self.connect()
params = ""
fragment = ""
path = "cdb_syn"
if self.debug:
printf( "cornettodb/views/query_remote_syn_id: db_opt: %s" % path )
# output_opt: plain, html, xml
# 'xml' is actually xhtml (with markup), but it is not valid xml!
# 'plain' is actually valid xml (without markup)
output_opt = "plain"
if self.debug:
printf( "cornettodb/views/query_remote_syn_id: output_opt: %s" % output_opt )
action = "runQuery"
if self.debug:
printf( "cornettodb/views/query_remote_syn_id: action: %s" % action )
printf( "cornettodb/views/query_remote_syn_id: query: %s" % syn_id )
qdict = {}
qdict[ "action" ] = action
qdict[ "query" ] = syn_id
qdict[ "outtype" ] = output_opt
query = urllib.urlencode( qdict )
db_url_tuple = ( self.scheme, self.host + ':' + str(self.port), path, params, query, fragment )
db_url = urlparse.urlunparse( db_url_tuple )
if self.debug:
printf( "db_url: %s" % db_url )
resp, content = http.request( db_url, "GET" )
if self.debug:
printf( "resp:\n%s" % resp )
# printf( "content:\n%s" % content )
# printf( "content is of type: %s" % type( content ) ) #<type 'str'>
xml_data = eval( content )
return etree.fromstring( xml_data ) | python | def get_synset_xml(self,syn_id):
"""
call cdb_syn with synset identifier -> returns the synset xml;
"""
http, resp, content = self.connect()
params = ""
fragment = ""
path = "cdb_syn"
if self.debug:
printf( "cornettodb/views/query_remote_syn_id: db_opt: %s" % path )
# output_opt: plain, html, xml
# 'xml' is actually xhtml (with markup), but it is not valid xml!
# 'plain' is actually valid xml (without markup)
output_opt = "plain"
if self.debug:
printf( "cornettodb/views/query_remote_syn_id: output_opt: %s" % output_opt )
action = "runQuery"
if self.debug:
printf( "cornettodb/views/query_remote_syn_id: action: %s" % action )
printf( "cornettodb/views/query_remote_syn_id: query: %s" % syn_id )
qdict = {}
qdict[ "action" ] = action
qdict[ "query" ] = syn_id
qdict[ "outtype" ] = output_opt
query = urllib.urlencode( qdict )
db_url_tuple = ( self.scheme, self.host + ':' + str(self.port), path, params, query, fragment )
db_url = urlparse.urlunparse( db_url_tuple )
if self.debug:
printf( "db_url: %s" % db_url )
resp, content = http.request( db_url, "GET" )
if self.debug:
printf( "resp:\n%s" % resp )
# printf( "content:\n%s" % content )
# printf( "content is of type: %s" % type( content ) ) #<type 'str'>
xml_data = eval( content )
return etree.fromstring( xml_data ) | [
"def",
"get_synset_xml",
"(",
"self",
",",
"syn_id",
")",
":",
"http",
",",
"resp",
",",
"content",
"=",
"self",
".",
"connect",
"(",
")",
"params",
"=",
"\"\"",
"fragment",
"=",
"\"\"",
"path",
"=",
"\"cdb_syn\"",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"\"cornettodb/views/query_remote_syn_id: db_opt: %s\"",
"%",
"path",
")",
"# output_opt: plain, html, xml",
"# 'xml' is actually xhtml (with markup), but it is not valid xml!",
"# 'plain' is actually valid xml (without markup)",
"output_opt",
"=",
"\"plain\"",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"\"cornettodb/views/query_remote_syn_id: output_opt: %s\"",
"%",
"output_opt",
")",
"action",
"=",
"\"runQuery\"",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"\"cornettodb/views/query_remote_syn_id: action: %s\"",
"%",
"action",
")",
"printf",
"(",
"\"cornettodb/views/query_remote_syn_id: query: %s\"",
"%",
"syn_id",
")",
"qdict",
"=",
"{",
"}",
"qdict",
"[",
"\"action\"",
"]",
"=",
"action",
"qdict",
"[",
"\"query\"",
"]",
"=",
"syn_id",
"qdict",
"[",
"\"outtype\"",
"]",
"=",
"output_opt",
"query",
"=",
"urllib",
".",
"urlencode",
"(",
"qdict",
")",
"db_url_tuple",
"=",
"(",
"self",
".",
"scheme",
",",
"self",
".",
"host",
"+",
"':'",
"+",
"str",
"(",
"self",
".",
"port",
")",
",",
"path",
",",
"params",
",",
"query",
",",
"fragment",
")",
"db_url",
"=",
"urlparse",
".",
"urlunparse",
"(",
"db_url_tuple",
")",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"\"db_url: %s\"",
"%",
"db_url",
")",
"resp",
",",
"content",
"=",
"http",
".",
"request",
"(",
"db_url",
",",
"\"GET\"",
")",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"\"resp:\\n%s\"",
"%",
"resp",
")",
"# printf( \"content:\\n%s\" % content )",
"# printf( \"content is of type: %s\" % type( content ) ) #<type 'str'>",
"xml_data",
"=",
"eval",
"(",
"content",
")",
"return",
"etree",
".",
"fromstring",
"(",
"xml_data",
")"
] | call cdb_syn with synset identifier -> returns the synset xml; | [
"call",
"cdb_syn",
"with",
"synset",
"identifier",
"-",
">",
"returns",
"the",
"synset",
"xml",
";"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/clients/cornetto.py#L227-L272 | [
"syn_id"
] | What does this function do? | [
"call",
"cdb_syn",
"with",
"synset",
"identifier",
"-",
">",
"returns",
"the",
"synset",
"xml",
";"
] |
proycon/pynlpl | pynlpl/clients/cornetto.py | CornettoClient.get_lus_from_synset | def get_lus_from_synset(self, syn_id):
"""Returns a list of (word, lu_id) tuples given a synset ID"""
root = self.get_synset_xml(syn_id)
elem_synonyms = root.find( ".//synonyms" )
lus = []
for elem_synonym in elem_synonyms:
synonym_str = elem_synonym.get( "c_lu_id-previewtext" ) # get "c_lu_id-previewtext" attribute
# synonym_str ends with ":<num>"
synonym = synonym_str.split( ':' )[ 0 ].strip()
lus.append( (synonym, elem_synonym.get( "c_lu_id") ) )
return lus | python | def get_lus_from_synset(self, syn_id):
"""Returns a list of (word, lu_id) tuples given a synset ID"""
root = self.get_synset_xml(syn_id)
elem_synonyms = root.find( ".//synonyms" )
lus = []
for elem_synonym in elem_synonyms:
synonym_str = elem_synonym.get( "c_lu_id-previewtext" ) # get "c_lu_id-previewtext" attribute
# synonym_str ends with ":<num>"
synonym = synonym_str.split( ':' )[ 0 ].strip()
lus.append( (synonym, elem_synonym.get( "c_lu_id") ) )
return lus | [
"def",
"get_lus_from_synset",
"(",
"self",
",",
"syn_id",
")",
":",
"root",
"=",
"self",
".",
"get_synset_xml",
"(",
"syn_id",
")",
"elem_synonyms",
"=",
"root",
".",
"find",
"(",
"\".//synonyms\"",
")",
"lus",
"=",
"[",
"]",
"for",
"elem_synonym",
"in",
"elem_synonyms",
":",
"synonym_str",
"=",
"elem_synonym",
".",
"get",
"(",
"\"c_lu_id-previewtext\"",
")",
"# get \"c_lu_id-previewtext\" attribute",
"# synonym_str ends with \":<num>\"",
"synonym",
"=",
"synonym_str",
".",
"split",
"(",
"':'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"lus",
".",
"append",
"(",
"(",
"synonym",
",",
"elem_synonym",
".",
"get",
"(",
"\"c_lu_id\"",
")",
")",
")",
"return",
"lus"
] | Returns a list of (word, lu_id) tuples given a synset ID | [
"Returns",
"a",
"list",
"of",
"(",
"word",
"lu_id",
")",
"tuples",
"given",
"a",
"synset",
"ID"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/clients/cornetto.py#L275-L288 | [
"syn_id"
] | What does this function do? | [
"Returns",
"a",
"list",
"of",
"(",
"word",
"lu_id",
")",
"tuples",
"given",
"a",
"synset",
"ID"
] |
proycon/pynlpl | pynlpl/clients/cornetto.py | CornettoClient.get_lu_from_synset | def get_lu_from_synset(self, syn_id, lemma = None):
"""Returns (lu_id, synonyms=[(word, lu_id)] ) tuple given a synset ID and a lemma"""
if not lemma:
return self.get_lus_from_synset(syn_id) #alias
if not isinstance(lemma,unicode):
lemma = unicode(lemma,'utf-8')
root = self.get_synset_xml(syn_id)
elem_synonyms = root.find( ".//synonyms" )
lu_id = None
synonyms = []
for elem_synonym in elem_synonyms:
synonym_str = elem_synonym.get( "c_lu_id-previewtext" ) # get "c_lu_id-previewtext" attribute
# synonym_str ends with ":<num>"
synonym = synonym_str.split( ':' )[ 0 ].strip()
if synonym != lemma:
synonyms.append( (synonym, elem_synonym.get("c_lu_id")) )
if self.debug:
printf( "synonym add: %s" % synonym )
else:
lu_id = elem_synonym.get( "c_lu_id" ) # get "c_lu_id" attribute
if self.debug:
printf( "lu_id: %s" % lu_id )
printf( "synonym skip lemma: %s" % synonym )
return lu_id, synonyms | python | def get_lu_from_synset(self, syn_id, lemma = None):
"""Returns (lu_id, synonyms=[(word, lu_id)] ) tuple given a synset ID and a lemma"""
if not lemma:
return self.get_lus_from_synset(syn_id) #alias
if not isinstance(lemma,unicode):
lemma = unicode(lemma,'utf-8')
root = self.get_synset_xml(syn_id)
elem_synonyms = root.find( ".//synonyms" )
lu_id = None
synonyms = []
for elem_synonym in elem_synonyms:
synonym_str = elem_synonym.get( "c_lu_id-previewtext" ) # get "c_lu_id-previewtext" attribute
# synonym_str ends with ":<num>"
synonym = synonym_str.split( ':' )[ 0 ].strip()
if synonym != lemma:
synonyms.append( (synonym, elem_synonym.get("c_lu_id")) )
if self.debug:
printf( "synonym add: %s" % synonym )
else:
lu_id = elem_synonym.get( "c_lu_id" ) # get "c_lu_id" attribute
if self.debug:
printf( "lu_id: %s" % lu_id )
printf( "synonym skip lemma: %s" % synonym )
return lu_id, synonyms | [
"def",
"get_lu_from_synset",
"(",
"self",
",",
"syn_id",
",",
"lemma",
"=",
"None",
")",
":",
"if",
"not",
"lemma",
":",
"return",
"self",
".",
"get_lus_from_synset",
"(",
"syn_id",
")",
"#alias",
"if",
"not",
"isinstance",
"(",
"lemma",
",",
"unicode",
")",
":",
"lemma",
"=",
"unicode",
"(",
"lemma",
",",
"'utf-8'",
")",
"root",
"=",
"self",
".",
"get_synset_xml",
"(",
"syn_id",
")",
"elem_synonyms",
"=",
"root",
".",
"find",
"(",
"\".//synonyms\"",
")",
"lu_id",
"=",
"None",
"synonyms",
"=",
"[",
"]",
"for",
"elem_synonym",
"in",
"elem_synonyms",
":",
"synonym_str",
"=",
"elem_synonym",
".",
"get",
"(",
"\"c_lu_id-previewtext\"",
")",
"# get \"c_lu_id-previewtext\" attribute",
"# synonym_str ends with \":<num>\"",
"synonym",
"=",
"synonym_str",
".",
"split",
"(",
"':'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"if",
"synonym",
"!=",
"lemma",
":",
"synonyms",
".",
"append",
"(",
"(",
"synonym",
",",
"elem_synonym",
".",
"get",
"(",
"\"c_lu_id\"",
")",
")",
")",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"\"synonym add: %s\"",
"%",
"synonym",
")",
"else",
":",
"lu_id",
"=",
"elem_synonym",
".",
"get",
"(",
"\"c_lu_id\"",
")",
"# get \"c_lu_id\" attribute",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"\"lu_id: %s\"",
"%",
"lu_id",
")",
"printf",
"(",
"\"synonym skip lemma: %s\"",
"%",
"synonym",
")",
"return",
"lu_id",
",",
"synonyms"
] | Returns (lu_id, synonyms=[(word, lu_id)] ) tuple given a synset ID and a lemma | [
"Returns",
"(",
"lu_id",
"synonyms",
"=",
"[",
"(",
"word",
"lu_id",
")",
"]",
")",
"tuple",
"given",
"a",
"synset",
"ID",
"and",
"a",
"lemma"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/clients/cornetto.py#L291-L317 | [
"syn_id",
"lemma"
] | What does this function do? | [
"Returns",
"(",
"lu_id",
"synonyms",
"=",
"[",
"(",
"word",
"lu_id",
")",
"]",
")",
"tuple",
"given",
"a",
"synset",
"ID",
"and",
"a",
"lemma"
] |
proycon/pynlpl | pynlpl/formats/dutchsemcor.py | WSDSystemOutput.senses | def senses(self, bestonly=False):
"""Returns a list of all predicted senses"""
l = []
for word_id, senses,distance in self:
for sense, confidence in senses:
if not sense in l: l.append(sense)
if bestonly:
break
return l | python | def senses(self, bestonly=False):
"""Returns a list of all predicted senses"""
l = []
for word_id, senses,distance in self:
for sense, confidence in senses:
if not sense in l: l.append(sense)
if bestonly:
break
return l | [
"def",
"senses",
"(",
"self",
",",
"bestonly",
"=",
"False",
")",
":",
"l",
"=",
"[",
"]",
"for",
"word_id",
",",
"senses",
",",
"distance",
"in",
"self",
":",
"for",
"sense",
",",
"confidence",
"in",
"senses",
":",
"if",
"not",
"sense",
"in",
"l",
":",
"l",
".",
"append",
"(",
"sense",
")",
"if",
"bestonly",
":",
"break",
"return",
"l"
] | Returns a list of all predicted senses | [
"Returns",
"a",
"list",
"of",
"all",
"predicted",
"senses"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/dutchsemcor.py#L139-L147 | [
"bestonly"
] | What does this function do? | [
"Returns",
"a",
"list",
"of",
"all",
"predicted",
"senses"
] |
proycon/pynlpl | pynlpl/clients/frogclient.py | FrogClient.process | def process(self,input_data, source_encoding="utf-8", return_unicode = True, oldfrog=False):
"""Receives input_data in the form of a str or unicode object, passes this to the server, with proper consideration for the encodings, and returns the Frog output as a list of tuples: (word,pos,lemma,morphology), each of these is a proper unicode object unless return_unicode is set to False, in which case raw strings will be returned. Return_unicode is no longer optional, it is fixed to True, parameter is still there only for backwards-compatibility."""
if isinstance(input_data, list) or isinstance(input_data, tuple):
input_data = " ".join(input_data)
input_data = u(input_data, source_encoding) #decode (or preferably do this in an earlier stage)
input_data = input_data.strip(' \t\n')
s = input_data.encode(self.server_encoding) +b'\r\n'
if not oldfrog: s += b'EOT\r\n'
self.socket.sendall(s) #send to socket in desired encoding
output = []
done = False
while not done:
data = b""
while not data.endswith(b'\n'):
moredata = self.socket.recv(self.BUFSIZE)
if not moredata: break
data += moredata
data = u(data,self.server_encoding)
for line in data.strip(' \t\r\n').split('\n'):
if line == "READY":
done = True
break
elif line:
line = line.split('\t') #split on tab
if len(line) > 4 and line[0].isdigit(): #first column is token number
if line[0] == '1' and output:
if self.returnall:
output.append( (None,None,None,None, None,None,None, None) )
else:
output.append( (None,None,None,None) )
fields = line[1:]
parse1=parse2=ner=chunk=""
word,lemma,morph,pos = fields[0:4]
if len(fields) > 5:
ner = fields[5]
if len(fields) > 6:
chunk = fields[6]
if len(fields) >= 8:
parse1 = fields[7]
parse2 = fields[8]
if len(fields) < 5:
raise Exception("Can't process response line from Frog: ", repr(line), " got unexpected number of fields ", str(len(fields) + 1))
if self.returnall:
output.append( (word,lemma,morph,pos,ner,chunk,parse1,parse2) )
else:
output.append( (word,lemma,morph,pos) )
return output | python | def process(self,input_data, source_encoding="utf-8", return_unicode = True, oldfrog=False):
"""Receives input_data in the form of a str or unicode object, passes this to the server, with proper consideration for the encodings, and returns the Frog output as a list of tuples: (word,pos,lemma,morphology), each of these is a proper unicode object unless return_unicode is set to False, in which case raw strings will be returned. Return_unicode is no longer optional, it is fixed to True, parameter is still there only for backwards-compatibility."""
if isinstance(input_data, list) or isinstance(input_data, tuple):
input_data = " ".join(input_data)
input_data = u(input_data, source_encoding) #decode (or preferably do this in an earlier stage)
input_data = input_data.strip(' \t\n')
s = input_data.encode(self.server_encoding) +b'\r\n'
if not oldfrog: s += b'EOT\r\n'
self.socket.sendall(s) #send to socket in desired encoding
output = []
done = False
while not done:
data = b""
while not data.endswith(b'\n'):
moredata = self.socket.recv(self.BUFSIZE)
if not moredata: break
data += moredata
data = u(data,self.server_encoding)
for line in data.strip(' \t\r\n').split('\n'):
if line == "READY":
done = True
break
elif line:
line = line.split('\t') #split on tab
if len(line) > 4 and line[0].isdigit(): #first column is token number
if line[0] == '1' and output:
if self.returnall:
output.append( (None,None,None,None, None,None,None, None) )
else:
output.append( (None,None,None,None) )
fields = line[1:]
parse1=parse2=ner=chunk=""
word,lemma,morph,pos = fields[0:4]
if len(fields) > 5:
ner = fields[5]
if len(fields) > 6:
chunk = fields[6]
if len(fields) >= 8:
parse1 = fields[7]
parse2 = fields[8]
if len(fields) < 5:
raise Exception("Can't process response line from Frog: ", repr(line), " got unexpected number of fields ", str(len(fields) + 1))
if self.returnall:
output.append( (word,lemma,morph,pos,ner,chunk,parse1,parse2) )
else:
output.append( (word,lemma,morph,pos) )
return output | [
"def",
"process",
"(",
"self",
",",
"input_data",
",",
"source_encoding",
"=",
"\"utf-8\"",
",",
"return_unicode",
"=",
"True",
",",
"oldfrog",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"input_data",
",",
"list",
")",
"or",
"isinstance",
"(",
"input_data",
",",
"tuple",
")",
":",
"input_data",
"=",
"\" \"",
".",
"join",
"(",
"input_data",
")",
"input_data",
"=",
"u",
"(",
"input_data",
",",
"source_encoding",
")",
"#decode (or preferably do this in an earlier stage)",
"input_data",
"=",
"input_data",
".",
"strip",
"(",
"' \\t\\n'",
")",
"s",
"=",
"input_data",
".",
"encode",
"(",
"self",
".",
"server_encoding",
")",
"+",
"b'\\r\\n'",
"if",
"not",
"oldfrog",
":",
"s",
"+=",
"b'EOT\\r\\n'",
"self",
".",
"socket",
".",
"sendall",
"(",
"s",
")",
"#send to socket in desired encoding",
"output",
"=",
"[",
"]",
"done",
"=",
"False",
"while",
"not",
"done",
":",
"data",
"=",
"b\"\"",
"while",
"not",
"data",
".",
"endswith",
"(",
"b'\\n'",
")",
":",
"moredata",
"=",
"self",
".",
"socket",
".",
"recv",
"(",
"self",
".",
"BUFSIZE",
")",
"if",
"not",
"moredata",
":",
"break",
"data",
"+=",
"moredata",
"data",
"=",
"u",
"(",
"data",
",",
"self",
".",
"server_encoding",
")",
"for",
"line",
"in",
"data",
".",
"strip",
"(",
"' \\t\\r\\n'",
")",
".",
"split",
"(",
"'\\n'",
")",
":",
"if",
"line",
"==",
"\"READY\"",
":",
"done",
"=",
"True",
"break",
"elif",
"line",
":",
"line",
"=",
"line",
".",
"split",
"(",
"'\\t'",
")",
"#split on tab",
"if",
"len",
"(",
"line",
")",
">",
"4",
"and",
"line",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
":",
"#first column is token number",
"if",
"line",
"[",
"0",
"]",
"==",
"'1'",
"and",
"output",
":",
"if",
"self",
".",
"returnall",
":",
"output",
".",
"append",
"(",
"(",
"None",
",",
"None",
",",
"None",
",",
"None",
",",
"None",
",",
"None",
",",
"None",
",",
"None",
")",
")",
"else",
":",
"output",
".",
"append",
"(",
"(",
"None",
",",
"None",
",",
"None",
",",
"None",
")",
")",
"fields",
"=",
"line",
"[",
"1",
":",
"]",
"parse1",
"=",
"parse2",
"=",
"ner",
"=",
"chunk",
"=",
"\"\"",
"word",
",",
"lemma",
",",
"morph",
",",
"pos",
"=",
"fields",
"[",
"0",
":",
"4",
"]",
"if",
"len",
"(",
"fields",
")",
">",
"5",
":",
"ner",
"=",
"fields",
"[",
"5",
"]",
"if",
"len",
"(",
"fields",
")",
">",
"6",
":",
"chunk",
"=",
"fields",
"[",
"6",
"]",
"if",
"len",
"(",
"fields",
")",
">=",
"8",
":",
"parse1",
"=",
"fields",
"[",
"7",
"]",
"parse2",
"=",
"fields",
"[",
"8",
"]",
"if",
"len",
"(",
"fields",
")",
"<",
"5",
":",
"raise",
"Exception",
"(",
"\"Can't process response line from Frog: \"",
",",
"repr",
"(",
"line",
")",
",",
"\" got unexpected number of fields \"",
",",
"str",
"(",
"len",
"(",
"fields",
")",
"+",
"1",
")",
")",
"if",
"self",
".",
"returnall",
":",
"output",
".",
"append",
"(",
"(",
"word",
",",
"lemma",
",",
"morph",
",",
"pos",
",",
"ner",
",",
"chunk",
",",
"parse1",
",",
"parse2",
")",
")",
"else",
":",
"output",
".",
"append",
"(",
"(",
"word",
",",
"lemma",
",",
"morph",
",",
"pos",
")",
")",
"return",
"output"
] | Receives input_data in the form of a str or unicode object, passes this to the server, with proper consideration for the encodings, and returns the Frog output as a list of tuples: (word,pos,lemma,morphology), each of these is a proper unicode object unless return_unicode is set to False, in which case raw strings will be returned. Return_unicode is no longer optional, it is fixed to True, parameter is still there only for backwards-compatibility. | [
"Receives",
"input_data",
"in",
"the",
"form",
"of",
"a",
"str",
"or",
"unicode",
"object",
"passes",
"this",
"to",
"the",
"server",
"with",
"proper",
"consideration",
"for",
"the",
"encodings",
"and",
"returns",
"the",
"Frog",
"output",
"as",
"a",
"list",
"of",
"tuples",
":",
"(",
"word",
"pos",
"lemma",
"morphology",
")",
"each",
"of",
"these",
"is",
"a",
"proper",
"unicode",
"object",
"unless",
"return_unicode",
"is",
"set",
"to",
"False",
"in",
"which",
"case",
"raw",
"strings",
"will",
"be",
"returned",
".",
"Return_unicode",
"is",
"no",
"longer",
"optional",
"it",
"is",
"fixed",
"to",
"True",
"parameter",
"is",
"still",
"there",
"only",
"for",
"backwards",
"-",
"compatibility",
"."
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/clients/frogclient.py#L40-L98 | [
"input_data",
"source_encoding",
"return_unicode",
"oldfrog"
] | What does this function do? | [
"Receives",
"input_data",
"in",
"the",
"form",
"of",
"a",
"str",
"or",
"unicode",
"object",
"passes",
"this",
"to",
"the",
"server",
"with",
"proper",
"consideration",
"for",
"the",
"encodings",
"and",
"returns",
"the",
"Frog",
"output",
"as",
"a",
"list",
"of",
"tuples",
":",
"(",
"word",
"pos",
"lemma",
"morphology",
")",
"each",
"of",
"these",
"is",
"a",
"proper",
"unicode",
"object",
"unless",
"return_unicode",
"is",
"set",
"to",
"False",
"in",
"which",
"case",
"raw",
"strings",
"will",
"be",
"returned",
".",
"Return_unicode",
"is",
"no",
"longer",
"optional",
"it",
"is",
"fixed",
"to",
"True",
"parameter",
"is",
"still",
"there",
"only",
"for",
"backwards",
"-",
"compatibility",
"."
] |
proycon/pynlpl | pynlpl/clients/frogclient.py | FrogClient.align | def align(self,inputwords, outputwords):
"""For each inputword, provides the index of the outputword"""
alignment = []
cursor = 0
for inputword in inputwords:
if len(outputwords) > cursor and outputwords[cursor] == inputword:
alignment.append(cursor)
cursor += 1
elif len(outputwords) > cursor+1 and outputwords[cursor+1] == inputword:
alignment.append(cursor+1)
cursor += 2
else:
alignment.append(None)
cursor += 1
return alignment | python | def align(self,inputwords, outputwords):
"""For each inputword, provides the index of the outputword"""
alignment = []
cursor = 0
for inputword in inputwords:
if len(outputwords) > cursor and outputwords[cursor] == inputword:
alignment.append(cursor)
cursor += 1
elif len(outputwords) > cursor+1 and outputwords[cursor+1] == inputword:
alignment.append(cursor+1)
cursor += 2
else:
alignment.append(None)
cursor += 1
return alignment | [
"def",
"align",
"(",
"self",
",",
"inputwords",
",",
"outputwords",
")",
":",
"alignment",
"=",
"[",
"]",
"cursor",
"=",
"0",
"for",
"inputword",
"in",
"inputwords",
":",
"if",
"len",
"(",
"outputwords",
")",
">",
"cursor",
"and",
"outputwords",
"[",
"cursor",
"]",
"==",
"inputword",
":",
"alignment",
".",
"append",
"(",
"cursor",
")",
"cursor",
"+=",
"1",
"elif",
"len",
"(",
"outputwords",
")",
">",
"cursor",
"+",
"1",
"and",
"outputwords",
"[",
"cursor",
"+",
"1",
"]",
"==",
"inputword",
":",
"alignment",
".",
"append",
"(",
"cursor",
"+",
"1",
")",
"cursor",
"+=",
"2",
"else",
":",
"alignment",
".",
"append",
"(",
"None",
")",
"cursor",
"+=",
"1",
"return",
"alignment"
] | For each inputword, provides the index of the outputword | [
"For",
"each",
"inputword",
"provides",
"the",
"index",
"of",
"the",
"outputword"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/clients/frogclient.py#L115-L129 | [
"inputwords",
"outputwords"
] | What does this function do? | [
"For",
"each",
"inputword",
"provides",
"the",
"index",
"of",
"the",
"outputword"
] |
proycon/pynlpl | pynlpl/textprocessors.py | calculate_overlap | def calculate_overlap(haystack, needle, allowpartial=True):
"""Calculate the overlap between two sequences. Yields (overlap, placement) tuples (multiple because there may be multiple overlaps!). The former is the part of the sequence that overlaps, and the latter is -1 if the overlap is on the left side, 0 if it is a subset, 1 if it overlaps on the right side, 2 if its an identical match"""
needle = tuple(needle)
haystack = tuple(haystack)
solutions = []
#equality check
if needle == haystack:
return [(needle, 2)]
if allowpartial:
minl =1
else:
minl = len(needle)
for l in range(minl,min(len(needle), len(haystack))+1):
#print "LEFT-DEBUG", l,":", needle[-l:], " vs ", haystack[:l]
#print "RIGHT-DEBUG", l,":", needle[:l], " vs ", haystack[-l:]
#Search for overlap left (including partial overlap!)
if needle[-l:] == haystack[:l]:
#print "LEFT MATCH"
solutions.append( (needle[-l:], -1) )
#Search for overlap right (including partial overlap!)
if needle[:l] == haystack[-l:]:
#print "RIGHT MATCH"
solutions.append( (needle[:l], 1) )
if len(needle) <= len(haystack):
options = list(iter(Windower(haystack,len(needle),beginmarker=None,endmarker=None)))
for option in options[1:-1]:
if option == needle:
#print "SUBSET MATCH"
solutions.append( (needle, 0) )
return solutions | python | def calculate_overlap(haystack, needle, allowpartial=True):
"""Calculate the overlap between two sequences. Yields (overlap, placement) tuples (multiple because there may be multiple overlaps!). The former is the part of the sequence that overlaps, and the latter is -1 if the overlap is on the left side, 0 if it is a subset, 1 if it overlaps on the right side, 2 if its an identical match"""
needle = tuple(needle)
haystack = tuple(haystack)
solutions = []
#equality check
if needle == haystack:
return [(needle, 2)]
if allowpartial:
minl =1
else:
minl = len(needle)
for l in range(minl,min(len(needle), len(haystack))+1):
#print "LEFT-DEBUG", l,":", needle[-l:], " vs ", haystack[:l]
#print "RIGHT-DEBUG", l,":", needle[:l], " vs ", haystack[-l:]
#Search for overlap left (including partial overlap!)
if needle[-l:] == haystack[:l]:
#print "LEFT MATCH"
solutions.append( (needle[-l:], -1) )
#Search for overlap right (including partial overlap!)
if needle[:l] == haystack[-l:]:
#print "RIGHT MATCH"
solutions.append( (needle[:l], 1) )
if len(needle) <= len(haystack):
options = list(iter(Windower(haystack,len(needle),beginmarker=None,endmarker=None)))
for option in options[1:-1]:
if option == needle:
#print "SUBSET MATCH"
solutions.append( (needle, 0) )
return solutions | [
"def",
"calculate_overlap",
"(",
"haystack",
",",
"needle",
",",
"allowpartial",
"=",
"True",
")",
":",
"needle",
"=",
"tuple",
"(",
"needle",
")",
"haystack",
"=",
"tuple",
"(",
"haystack",
")",
"solutions",
"=",
"[",
"]",
"#equality check",
"if",
"needle",
"==",
"haystack",
":",
"return",
"[",
"(",
"needle",
",",
"2",
")",
"]",
"if",
"allowpartial",
":",
"minl",
"=",
"1",
"else",
":",
"minl",
"=",
"len",
"(",
"needle",
")",
"for",
"l",
"in",
"range",
"(",
"minl",
",",
"min",
"(",
"len",
"(",
"needle",
")",
",",
"len",
"(",
"haystack",
")",
")",
"+",
"1",
")",
":",
"#print \"LEFT-DEBUG\", l,\":\", needle[-l:], \" vs \", haystack[:l]",
"#print \"RIGHT-DEBUG\", l,\":\", needle[:l], \" vs \", haystack[-l:]",
"#Search for overlap left (including partial overlap!)",
"if",
"needle",
"[",
"-",
"l",
":",
"]",
"==",
"haystack",
"[",
":",
"l",
"]",
":",
"#print \"LEFT MATCH\"",
"solutions",
".",
"append",
"(",
"(",
"needle",
"[",
"-",
"l",
":",
"]",
",",
"-",
"1",
")",
")",
"#Search for overlap right (including partial overlap!)",
"if",
"needle",
"[",
":",
"l",
"]",
"==",
"haystack",
"[",
"-",
"l",
":",
"]",
":",
"#print \"RIGHT MATCH\"",
"solutions",
".",
"append",
"(",
"(",
"needle",
"[",
":",
"l",
"]",
",",
"1",
")",
")",
"if",
"len",
"(",
"needle",
")",
"<=",
"len",
"(",
"haystack",
")",
":",
"options",
"=",
"list",
"(",
"iter",
"(",
"Windower",
"(",
"haystack",
",",
"len",
"(",
"needle",
")",
",",
"beginmarker",
"=",
"None",
",",
"endmarker",
"=",
"None",
")",
")",
")",
"for",
"option",
"in",
"options",
"[",
"1",
":",
"-",
"1",
"]",
":",
"if",
"option",
"==",
"needle",
":",
"#print \"SUBSET MATCH\"",
"solutions",
".",
"append",
"(",
"(",
"needle",
",",
"0",
")",
")",
"return",
"solutions"
] | Calculate the overlap between two sequences. Yields (overlap, placement) tuples (multiple because there may be multiple overlaps!). The former is the part of the sequence that overlaps, and the latter is -1 if the overlap is on the left side, 0 if it is a subset, 1 if it overlaps on the right side, 2 if its an identical match | [
"Calculate",
"the",
"overlap",
"between",
"two",
"sequences",
".",
"Yields",
"(",
"overlap",
"placement",
")",
"tuples",
"(",
"multiple",
"because",
"there",
"may",
"be",
"multiple",
"overlaps!",
")",
".",
"The",
"former",
"is",
"the",
"part",
"of",
"the",
"sequence",
"that",
"overlaps",
"and",
"the",
"latter",
"is",
"-",
"1",
"if",
"the",
"overlap",
"is",
"on",
"the",
"left",
"side",
"0",
"if",
"it",
"is",
"a",
"subset",
"1",
"if",
"it",
"overlaps",
"on",
"the",
"right",
"side",
"2",
"if",
"its",
"an",
"identical",
"match"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/textprocessors.py#L228-L262 | [
"haystack",
"needle",
"allowpartial"
] | What does this function do? | [
"Calculate",
"the",
"overlap",
"between",
"two",
"sequences",
".",
"Yields",
"(",
"overlap",
"placement",
")",
"tuples",
"(",
"multiple",
"because",
"there",
"may",
"be",
"multiple",
"overlaps!",
")",
".",
"The",
"former",
"is",
"the",
"part",
"of",
"the",
"sequence",
"that",
"overlaps",
"and",
"the",
"latter",
"is",
"-",
"1",
"if",
"the",
"overlap",
"is",
"on",
"the",
"left",
"side",
"0",
"if",
"it",
"is",
"a",
"subset",
"1",
"if",
"it",
"overlaps",
"on",
"the",
"right",
"side",
"2",
"if",
"its",
"an",
"identical",
"match"
] |
proycon/pynlpl | pynlpl/textprocessors.py | tokenize | def tokenize(text, regexps=TOKENIZERRULES):
"""Tokenizes a string and returns a list of tokens
:param text: The text to tokenise
:type text: string
:param regexps: Regular expressions to use as tokeniser rules in tokenisation (default=_pynlpl.textprocessors.TOKENIZERRULES_)
:type regexps: Tuple/list of regular expressions to use in tokenisation
:rtype: Returns a list of tokens
Examples:
>>> for token in tokenize("This is a test."):
... print(token)
This
is
a
test
.
"""
for i,regexp in list(enumerate(regexps)):
if isstring(regexp):
regexps[i] = re.compile(regexp)
tokens = []
begin = 0
for i, c in enumerate(text):
if begin > i:
continue
elif i == begin:
m = False
for regexp in regexps:
m = regexp.findall(text[i:i+300])
if m:
tokens.append(m[0])
begin = i + len(m[0])
break
if m: continue
if c in string.punctuation or c in WHITESPACE:
prev = text[i-1] if i > 0 else ""
next = text[i+1] if i < len(text)-1 else ""
if (c == '.' or c == ',') and prev.isdigit() and next.isdigit():
#punctuation in between numbers, keep as one token
pass
elif (c == "'" or c == "`") and prev.isalpha() and next.isalpha():
#quote in between chars, keep...
pass
elif c not in WHITESPACE and next == c: #group clusters of identical punctuation together
continue
elif c == '\r' and prev == '\n':
#ignore
begin = i+1
continue
else:
token = text[begin:i]
if token: tokens.append(token)
if c not in WHITESPACE:
tokens.append(c) #anything but spaces and newlines (i.e. punctuation) counts as a token too
begin = i + 1 #set the begin cursor
if begin <= len(text) - 1:
token = text[begin:]
tokens.append(token)
return tokens | python | def tokenize(text, regexps=TOKENIZERRULES):
"""Tokenizes a string and returns a list of tokens
:param text: The text to tokenise
:type text: string
:param regexps: Regular expressions to use as tokeniser rules in tokenisation (default=_pynlpl.textprocessors.TOKENIZERRULES_)
:type regexps: Tuple/list of regular expressions to use in tokenisation
:rtype: Returns a list of tokens
Examples:
>>> for token in tokenize("This is a test."):
... print(token)
This
is
a
test
.
"""
for i,regexp in list(enumerate(regexps)):
if isstring(regexp):
regexps[i] = re.compile(regexp)
tokens = []
begin = 0
for i, c in enumerate(text):
if begin > i:
continue
elif i == begin:
m = False
for regexp in regexps:
m = regexp.findall(text[i:i+300])
if m:
tokens.append(m[0])
begin = i + len(m[0])
break
if m: continue
if c in string.punctuation or c in WHITESPACE:
prev = text[i-1] if i > 0 else ""
next = text[i+1] if i < len(text)-1 else ""
if (c == '.' or c == ',') and prev.isdigit() and next.isdigit():
#punctuation in between numbers, keep as one token
pass
elif (c == "'" or c == "`") and prev.isalpha() and next.isalpha():
#quote in between chars, keep...
pass
elif c not in WHITESPACE and next == c: #group clusters of identical punctuation together
continue
elif c == '\r' and prev == '\n':
#ignore
begin = i+1
continue
else:
token = text[begin:i]
if token: tokens.append(token)
if c not in WHITESPACE:
tokens.append(c) #anything but spaces and newlines (i.e. punctuation) counts as a token too
begin = i + 1 #set the begin cursor
if begin <= len(text) - 1:
token = text[begin:]
tokens.append(token)
return tokens | [
"def",
"tokenize",
"(",
"text",
",",
"regexps",
"=",
"TOKENIZERRULES",
")",
":",
"for",
"i",
",",
"regexp",
"in",
"list",
"(",
"enumerate",
"(",
"regexps",
")",
")",
":",
"if",
"isstring",
"(",
"regexp",
")",
":",
"regexps",
"[",
"i",
"]",
"=",
"re",
".",
"compile",
"(",
"regexp",
")",
"tokens",
"=",
"[",
"]",
"begin",
"=",
"0",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"text",
")",
":",
"if",
"begin",
">",
"i",
":",
"continue",
"elif",
"i",
"==",
"begin",
":",
"m",
"=",
"False",
"for",
"regexp",
"in",
"regexps",
":",
"m",
"=",
"regexp",
".",
"findall",
"(",
"text",
"[",
"i",
":",
"i",
"+",
"300",
"]",
")",
"if",
"m",
":",
"tokens",
".",
"append",
"(",
"m",
"[",
"0",
"]",
")",
"begin",
"=",
"i",
"+",
"len",
"(",
"m",
"[",
"0",
"]",
")",
"break",
"if",
"m",
":",
"continue",
"if",
"c",
"in",
"string",
".",
"punctuation",
"or",
"c",
"in",
"WHITESPACE",
":",
"prev",
"=",
"text",
"[",
"i",
"-",
"1",
"]",
"if",
"i",
">",
"0",
"else",
"\"\"",
"next",
"=",
"text",
"[",
"i",
"+",
"1",
"]",
"if",
"i",
"<",
"len",
"(",
"text",
")",
"-",
"1",
"else",
"\"\"",
"if",
"(",
"c",
"==",
"'.'",
"or",
"c",
"==",
"','",
")",
"and",
"prev",
".",
"isdigit",
"(",
")",
"and",
"next",
".",
"isdigit",
"(",
")",
":",
"#punctuation in between numbers, keep as one token",
"pass",
"elif",
"(",
"c",
"==",
"\"'\"",
"or",
"c",
"==",
"\"`\"",
")",
"and",
"prev",
".",
"isalpha",
"(",
")",
"and",
"next",
".",
"isalpha",
"(",
")",
":",
"#quote in between chars, keep...",
"pass",
"elif",
"c",
"not",
"in",
"WHITESPACE",
"and",
"next",
"==",
"c",
":",
"#group clusters of identical punctuation together",
"continue",
"elif",
"c",
"==",
"'\\r'",
"and",
"prev",
"==",
"'\\n'",
":",
"#ignore",
"begin",
"=",
"i",
"+",
"1",
"continue",
"else",
":",
"token",
"=",
"text",
"[",
"begin",
":",
"i",
"]",
"if",
"token",
":",
"tokens",
".",
"append",
"(",
"token",
")",
"if",
"c",
"not",
"in",
"WHITESPACE",
":",
"tokens",
".",
"append",
"(",
"c",
")",
"#anything but spaces and newlines (i.e. punctuation) counts as a token too",
"begin",
"=",
"i",
"+",
"1",
"#set the begin cursor",
"if",
"begin",
"<=",
"len",
"(",
"text",
")",
"-",
"1",
":",
"token",
"=",
"text",
"[",
"begin",
":",
"]",
"tokens",
".",
"append",
"(",
"token",
")",
"return",
"tokens"
] | Tokenizes a string and returns a list of tokens
:param text: The text to tokenise
:type text: string
:param regexps: Regular expressions to use as tokeniser rules in tokenisation (default=_pynlpl.textprocessors.TOKENIZERRULES_)
:type regexps: Tuple/list of regular expressions to use in tokenisation
:rtype: Returns a list of tokens
Examples:
>>> for token in tokenize("This is a test."):
... print(token)
This
is
a
test
. | [
"Tokenizes",
"a",
"string",
"and",
"returns",
"a",
"list",
"of",
"tokens"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/textprocessors.py#L317-L386 | [
"text",
"regexps"
] | What does this function do? | [
"Tokenizes",
"a",
"string",
"and",
"returns",
"a",
"list",
"of",
"tokens"
] |
proycon/pynlpl | pynlpl/textprocessors.py | split_sentences | def split_sentences(tokens):
"""Split sentences (based on tokenised data), returns sentences as a list of lists of tokens, each sentence is a list of tokens"""
begin = 0
for i, token in enumerate(tokens):
if is_end_of_sentence(tokens, i):
yield tokens[begin:i+1]
begin = i+1
if begin <= len(tokens)-1:
yield tokens[begin:] | python | def split_sentences(tokens):
"""Split sentences (based on tokenised data), returns sentences as a list of lists of tokens, each sentence is a list of tokens"""
begin = 0
for i, token in enumerate(tokens):
if is_end_of_sentence(tokens, i):
yield tokens[begin:i+1]
begin = i+1
if begin <= len(tokens)-1:
yield tokens[begin:] | [
"def",
"split_sentences",
"(",
"tokens",
")",
":",
"begin",
"=",
"0",
"for",
"i",
",",
"token",
"in",
"enumerate",
"(",
"tokens",
")",
":",
"if",
"is_end_of_sentence",
"(",
"tokens",
",",
"i",
")",
":",
"yield",
"tokens",
"[",
"begin",
":",
"i",
"+",
"1",
"]",
"begin",
"=",
"i",
"+",
"1",
"if",
"begin",
"<=",
"len",
"(",
"tokens",
")",
"-",
"1",
":",
"yield",
"tokens",
"[",
"begin",
":",
"]"
] | Split sentences (based on tokenised data), returns sentences as a list of lists of tokens, each sentence is a list of tokens | [
"Split",
"sentences",
"(",
"based",
"on",
"tokenised",
"data",
")",
"returns",
"sentences",
"as",
"a",
"list",
"of",
"lists",
"of",
"tokens",
"each",
"sentence",
"is",
"a",
"list",
"of",
"tokens"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/textprocessors.py#L403-L411 | [
"tokens"
] | What does this function do? | [
"Split",
"sentences",
"(",
"based",
"on",
"tokenised",
"data",
")",
"returns",
"sentences",
"as",
"a",
"list",
"of",
"lists",
"of",
"tokens",
"each",
"sentence",
"is",
"a",
"list",
"of",
"tokens"
] |
proycon/pynlpl | pynlpl/textprocessors.py | strip_accents | def strip_accents(s, encoding= 'utf-8'):
"""Strip characters with diacritics and return a flat ascii representation"""
if sys.version < '3':
if isinstance(s,unicode):
return unicodedata.normalize('NFKD', s).encode('ASCII', 'ignore')
else:
return unicodedata.normalize('NFKD', unicode(s,encoding)).encode('ASCII', 'ignore')
else:
if isinstance(s,bytes): s = str(s,encoding)
return str(unicodedata.normalize('NFKD', s).encode('ASCII', 'ignore'),'ascii') | python | def strip_accents(s, encoding= 'utf-8'):
"""Strip characters with diacritics and return a flat ascii representation"""
if sys.version < '3':
if isinstance(s,unicode):
return unicodedata.normalize('NFKD', s).encode('ASCII', 'ignore')
else:
return unicodedata.normalize('NFKD', unicode(s,encoding)).encode('ASCII', 'ignore')
else:
if isinstance(s,bytes): s = str(s,encoding)
return str(unicodedata.normalize('NFKD', s).encode('ASCII', 'ignore'),'ascii') | [
"def",
"strip_accents",
"(",
"s",
",",
"encoding",
"=",
"'utf-8'",
")",
":",
"if",
"sys",
".",
"version",
"<",
"'3'",
":",
"if",
"isinstance",
"(",
"s",
",",
"unicode",
")",
":",
"return",
"unicodedata",
".",
"normalize",
"(",
"'NFKD'",
",",
"s",
")",
".",
"encode",
"(",
"'ASCII'",
",",
"'ignore'",
")",
"else",
":",
"return",
"unicodedata",
".",
"normalize",
"(",
"'NFKD'",
",",
"unicode",
"(",
"s",
",",
"encoding",
")",
")",
".",
"encode",
"(",
"'ASCII'",
",",
"'ignore'",
")",
"else",
":",
"if",
"isinstance",
"(",
"s",
",",
"bytes",
")",
":",
"s",
"=",
"str",
"(",
"s",
",",
"encoding",
")",
"return",
"str",
"(",
"unicodedata",
".",
"normalize",
"(",
"'NFKD'",
",",
"s",
")",
".",
"encode",
"(",
"'ASCII'",
",",
"'ignore'",
")",
",",
"'ascii'",
")"
] | Strip characters with diacritics and return a flat ascii representation | [
"Strip",
"characters",
"with",
"diacritics",
"and",
"return",
"a",
"flat",
"ascii",
"representation"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/textprocessors.py#L415-L424 | [
"s",
"encoding"
] | What does this function do? | [
"Strip",
"characters",
"with",
"diacritics",
"and",
"return",
"a",
"flat",
"ascii",
"representation"
] |
proycon/pynlpl | pynlpl/textprocessors.py | swap | def swap(tokens, maxdist=2):
"""Perform a swap operation on a sequence of tokens, exhaustively swapping all tokens up to the maximum specified distance. This is a subset of all permutations."""
assert maxdist >= 2
tokens = list(tokens)
if maxdist > len(tokens):
maxdist = len(tokens)
l = len(tokens)
for i in range(0,l - 1):
for permutation in permutations(tokens[i:i+maxdist]):
if permutation != tuple(tokens[i:i+maxdist]):
newtokens = tokens[:i]
newtokens += permutation
newtokens += tokens[i+maxdist:]
yield newtokens
if maxdist == len(tokens):
break | python | def swap(tokens, maxdist=2):
"""Perform a swap operation on a sequence of tokens, exhaustively swapping all tokens up to the maximum specified distance. This is a subset of all permutations."""
assert maxdist >= 2
tokens = list(tokens)
if maxdist > len(tokens):
maxdist = len(tokens)
l = len(tokens)
for i in range(0,l - 1):
for permutation in permutations(tokens[i:i+maxdist]):
if permutation != tuple(tokens[i:i+maxdist]):
newtokens = tokens[:i]
newtokens += permutation
newtokens += tokens[i+maxdist:]
yield newtokens
if maxdist == len(tokens):
break | [
"def",
"swap",
"(",
"tokens",
",",
"maxdist",
"=",
"2",
")",
":",
"assert",
"maxdist",
">=",
"2",
"tokens",
"=",
"list",
"(",
"tokens",
")",
"if",
"maxdist",
">",
"len",
"(",
"tokens",
")",
":",
"maxdist",
"=",
"len",
"(",
"tokens",
")",
"l",
"=",
"len",
"(",
"tokens",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"l",
"-",
"1",
")",
":",
"for",
"permutation",
"in",
"permutations",
"(",
"tokens",
"[",
"i",
":",
"i",
"+",
"maxdist",
"]",
")",
":",
"if",
"permutation",
"!=",
"tuple",
"(",
"tokens",
"[",
"i",
":",
"i",
"+",
"maxdist",
"]",
")",
":",
"newtokens",
"=",
"tokens",
"[",
":",
"i",
"]",
"newtokens",
"+=",
"permutation",
"newtokens",
"+=",
"tokens",
"[",
"i",
"+",
"maxdist",
":",
"]",
"yield",
"newtokens",
"if",
"maxdist",
"==",
"len",
"(",
"tokens",
")",
":",
"break"
] | Perform a swap operation on a sequence of tokens, exhaustively swapping all tokens up to the maximum specified distance. This is a subset of all permutations. | [
"Perform",
"a",
"swap",
"operation",
"on",
"a",
"sequence",
"of",
"tokens",
"exhaustively",
"swapping",
"all",
"tokens",
"up",
"to",
"the",
"maximum",
"specified",
"distance",
".",
"This",
"is",
"a",
"subset",
"of",
"all",
"permutations",
"."
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/textprocessors.py#L426-L441 | [
"tokens",
"maxdist"
] | What does this function do? | [
"Perform",
"a",
"swap",
"operation",
"on",
"a",
"sequence",
"of",
"tokens",
"exhaustively",
"swapping",
"all",
"tokens",
"up",
"to",
"the",
"maximum",
"specified",
"distance",
".",
"This",
"is",
"a",
"subset",
"of",
"all",
"permutations",
"."
] |
proycon/pynlpl | pynlpl/textprocessors.py | find_keyword_in_context | def find_keyword_in_context(tokens, keyword, contextsize=1):
"""Find a keyword in a particular sequence of tokens, and return the local context. Contextsize is the number of words to the left and right. The keyword may have multiple word, in which case it should to passed as a tuple or list"""
if isinstance(keyword,tuple) and isinstance(keyword,list):
l = len(keyword)
else:
keyword = (keyword,)
l = 1
n = l + contextsize*2
focuspos = contextsize + 1
for ngram in Windower(tokens,n,None,None):
if ngram[focuspos:focuspos+l] == keyword:
yield ngram[:focuspos], ngram[focuspos:focuspos+l],ngram[focuspos+l+1:] | python | def find_keyword_in_context(tokens, keyword, contextsize=1):
"""Find a keyword in a particular sequence of tokens, and return the local context. Contextsize is the number of words to the left and right. The keyword may have multiple word, in which case it should to passed as a tuple or list"""
if isinstance(keyword,tuple) and isinstance(keyword,list):
l = len(keyword)
else:
keyword = (keyword,)
l = 1
n = l + contextsize*2
focuspos = contextsize + 1
for ngram in Windower(tokens,n,None,None):
if ngram[focuspos:focuspos+l] == keyword:
yield ngram[:focuspos], ngram[focuspos:focuspos+l],ngram[focuspos+l+1:] | [
"def",
"find_keyword_in_context",
"(",
"tokens",
",",
"keyword",
",",
"contextsize",
"=",
"1",
")",
":",
"if",
"isinstance",
"(",
"keyword",
",",
"tuple",
")",
"and",
"isinstance",
"(",
"keyword",
",",
"list",
")",
":",
"l",
"=",
"len",
"(",
"keyword",
")",
"else",
":",
"keyword",
"=",
"(",
"keyword",
",",
")",
"l",
"=",
"1",
"n",
"=",
"l",
"+",
"contextsize",
"*",
"2",
"focuspos",
"=",
"contextsize",
"+",
"1",
"for",
"ngram",
"in",
"Windower",
"(",
"tokens",
",",
"n",
",",
"None",
",",
"None",
")",
":",
"if",
"ngram",
"[",
"focuspos",
":",
"focuspos",
"+",
"l",
"]",
"==",
"keyword",
":",
"yield",
"ngram",
"[",
":",
"focuspos",
"]",
",",
"ngram",
"[",
"focuspos",
":",
"focuspos",
"+",
"l",
"]",
",",
"ngram",
"[",
"focuspos",
"+",
"l",
"+",
"1",
":",
"]"
] | Find a keyword in a particular sequence of tokens, and return the local context. Contextsize is the number of words to the left and right. The keyword may have multiple word, in which case it should to passed as a tuple or list | [
"Find",
"a",
"keyword",
"in",
"a",
"particular",
"sequence",
"of",
"tokens",
"and",
"return",
"the",
"local",
"context",
".",
"Contextsize",
"is",
"the",
"number",
"of",
"words",
"to",
"the",
"left",
"and",
"right",
".",
"The",
"keyword",
"may",
"have",
"multiple",
"word",
"in",
"which",
"case",
"it",
"should",
"to",
"passed",
"as",
"a",
"tuple",
"or",
"list"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/textprocessors.py#L444-L455 | [
"tokens",
"keyword",
"contextsize"
] | What does this function do? | [
"Find",
"a",
"keyword",
"in",
"a",
"particular",
"sequence",
"of",
"tokens",
"and",
"return",
"the",
"local",
"context",
".",
"Contextsize",
"is",
"the",
"number",
"of",
"words",
"to",
"the",
"left",
"and",
"right",
".",
"The",
"keyword",
"may",
"have",
"multiple",
"word",
"in",
"which",
"case",
"it",
"should",
"to",
"passed",
"as",
"a",
"tuple",
"or",
"list"
] |
proycon/pynlpl | pynlpl/datatypes.py | FIFOQueue.pop | def pop(self):
"""Retrieve the next element in line, this will remove it from the queue"""
e = self.data[self.start]
self.start += 1
if self.start > 5 and self.start > len(self.data)//2:
self.data = self.data[self.start:]
self.start = 0
return e | python | def pop(self):
"""Retrieve the next element in line, this will remove it from the queue"""
e = self.data[self.start]
self.start += 1
if self.start > 5 and self.start > len(self.data)//2:
self.data = self.data[self.start:]
self.start = 0
return e | [
"def",
"pop",
"(",
"self",
")",
":",
"e",
"=",
"self",
".",
"data",
"[",
"self",
".",
"start",
"]",
"self",
".",
"start",
"+=",
"1",
"if",
"self",
".",
"start",
">",
"5",
"and",
"self",
".",
"start",
">",
"len",
"(",
"self",
".",
"data",
")",
"//",
"2",
":",
"self",
".",
"data",
"=",
"self",
".",
"data",
"[",
"self",
".",
"start",
":",
"]",
"self",
".",
"start",
"=",
"0",
"return",
"e"
] | Retrieve the next element in line, this will remove it from the queue | [
"Retrieve",
"the",
"next",
"element",
"in",
"line",
"this",
"will",
"remove",
"it",
"from",
"the",
"queue"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/datatypes.py#L65-L72 | [] | What does this function do? | [
"Retrieve",
"the",
"next",
"element",
"in",
"line",
"this",
"will",
"remove",
"it",
"from",
"the",
"queue"
] |
proycon/pynlpl | pynlpl/datatypes.py | PriorityQueue.append | def append(self, item):
"""Adds an item to the priority queue (in the right place), returns True if successfull, False if the item was blocked (because of a bad score)"""
f = self.f(item)
if callable(f):
score = f()
else:
score = f
if not self.duplicates:
for s, i in self.data:
if s == score and item == i:
#item is a duplicate, don't add it
return False
if self.length and len(self.data) == self.length:
#Fixed-length priority queue, abort when queue is full and new item scores worst than worst scoring item.
if self.minimize:
worstscore = self.data[-1][0]
if score >= worstscore:
return False
else:
worstscore = self.data[0][0]
if score <= worstscore:
return False
if self.blockworse and self.bestscore != None:
if self.minimize:
if score > self.bestscore:
return False
else:
if score < self.bestscore:
return False
if self.blockequal and self.bestscore != None:
if self.bestscore == score:
return False
if (self.bestscore == None) or (self.minimize and score < self.bestscore) or (not self.minimize and score > self.bestscore):
self.bestscore = score
bisect.insort(self.data, (score, item))
if self.length:
#fixed length queue: queue is now too long, delete worst items
while len(self.data) > self.length:
if self.minimize:
del self.data[-1]
else:
del self.data[0]
return True | python | def append(self, item):
"""Adds an item to the priority queue (in the right place), returns True if successfull, False if the item was blocked (because of a bad score)"""
f = self.f(item)
if callable(f):
score = f()
else:
score = f
if not self.duplicates:
for s, i in self.data:
if s == score and item == i:
#item is a duplicate, don't add it
return False
if self.length and len(self.data) == self.length:
#Fixed-length priority queue, abort when queue is full and new item scores worst than worst scoring item.
if self.minimize:
worstscore = self.data[-1][0]
if score >= worstscore:
return False
else:
worstscore = self.data[0][0]
if score <= worstscore:
return False
if self.blockworse and self.bestscore != None:
if self.minimize:
if score > self.bestscore:
return False
else:
if score < self.bestscore:
return False
if self.blockequal and self.bestscore != None:
if self.bestscore == score:
return False
if (self.bestscore == None) or (self.minimize and score < self.bestscore) or (not self.minimize and score > self.bestscore):
self.bestscore = score
bisect.insort(self.data, (score, item))
if self.length:
#fixed length queue: queue is now too long, delete worst items
while len(self.data) > self.length:
if self.minimize:
del self.data[-1]
else:
del self.data[0]
return True | [
"def",
"append",
"(",
"self",
",",
"item",
")",
":",
"f",
"=",
"self",
".",
"f",
"(",
"item",
")",
"if",
"callable",
"(",
"f",
")",
":",
"score",
"=",
"f",
"(",
")",
"else",
":",
"score",
"=",
"f",
"if",
"not",
"self",
".",
"duplicates",
":",
"for",
"s",
",",
"i",
"in",
"self",
".",
"data",
":",
"if",
"s",
"==",
"score",
"and",
"item",
"==",
"i",
":",
"#item is a duplicate, don't add it",
"return",
"False",
"if",
"self",
".",
"length",
"and",
"len",
"(",
"self",
".",
"data",
")",
"==",
"self",
".",
"length",
":",
"#Fixed-length priority queue, abort when queue is full and new item scores worst than worst scoring item.",
"if",
"self",
".",
"minimize",
":",
"worstscore",
"=",
"self",
".",
"data",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"if",
"score",
">=",
"worstscore",
":",
"return",
"False",
"else",
":",
"worstscore",
"=",
"self",
".",
"data",
"[",
"0",
"]",
"[",
"0",
"]",
"if",
"score",
"<=",
"worstscore",
":",
"return",
"False",
"if",
"self",
".",
"blockworse",
"and",
"self",
".",
"bestscore",
"!=",
"None",
":",
"if",
"self",
".",
"minimize",
":",
"if",
"score",
">",
"self",
".",
"bestscore",
":",
"return",
"False",
"else",
":",
"if",
"score",
"<",
"self",
".",
"bestscore",
":",
"return",
"False",
"if",
"self",
".",
"blockequal",
"and",
"self",
".",
"bestscore",
"!=",
"None",
":",
"if",
"self",
".",
"bestscore",
"==",
"score",
":",
"return",
"False",
"if",
"(",
"self",
".",
"bestscore",
"==",
"None",
")",
"or",
"(",
"self",
".",
"minimize",
"and",
"score",
"<",
"self",
".",
"bestscore",
")",
"or",
"(",
"not",
"self",
".",
"minimize",
"and",
"score",
">",
"self",
".",
"bestscore",
")",
":",
"self",
".",
"bestscore",
"=",
"score",
"bisect",
".",
"insort",
"(",
"self",
".",
"data",
",",
"(",
"score",
",",
"item",
")",
")",
"if",
"self",
".",
"length",
":",
"#fixed length queue: queue is now too long, delete worst items",
"while",
"len",
"(",
"self",
".",
"data",
")",
">",
"self",
".",
"length",
":",
"if",
"self",
".",
"minimize",
":",
"del",
"self",
".",
"data",
"[",
"-",
"1",
"]",
"else",
":",
"del",
"self",
".",
"data",
"[",
"0",
"]",
"return",
"True"
] | Adds an item to the priority queue (in the right place), returns True if successfull, False if the item was blocked (because of a bad score) | [
"Adds",
"an",
"item",
"to",
"the",
"priority",
"queue",
"(",
"in",
"the",
"right",
"place",
")",
"returns",
"True",
"if",
"successfull",
"False",
"if",
"the",
"item",
"was",
"blocked",
"(",
"because",
"of",
"a",
"bad",
"score",
")"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/datatypes.py#L97-L142 | [
"item"
] | What does this function do? | [
"Adds",
"an",
"item",
"to",
"the",
"priority",
"queue",
"(",
"in",
"the",
"right",
"place",
")",
"returns",
"True",
"if",
"successfull",
"False",
"if",
"the",
"item",
"was",
"blocked",
"(",
"because",
"of",
"a",
"bad",
"score",
")"
] |
proycon/pynlpl | pynlpl/datatypes.py | PriorityQueue.pop | def pop(self):
"""Retrieve the next element in line, this will remove it from the queue"""
if self.minimize:
return self.data.pop(0)[1]
else:
return self.data.pop()[1] | python | def pop(self):
"""Retrieve the next element in line, this will remove it from the queue"""
if self.minimize:
return self.data.pop(0)[1]
else:
return self.data.pop()[1] | [
"def",
"pop",
"(",
"self",
")",
":",
"if",
"self",
".",
"minimize",
":",
"return",
"self",
".",
"data",
".",
"pop",
"(",
"0",
")",
"[",
"1",
"]",
"else",
":",
"return",
"self",
".",
"data",
".",
"pop",
"(",
")",
"[",
"1",
"]"
] | Retrieve the next element in line, this will remove it from the queue | [
"Retrieve",
"the",
"next",
"element",
"in",
"line",
"this",
"will",
"remove",
"it",
"from",
"the",
"queue"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/datatypes.py#L173-L178 | [] | What does this function do? | [
"Retrieve",
"the",
"next",
"element",
"in",
"line",
"this",
"will",
"remove",
"it",
"from",
"the",
"queue"
] |
proycon/pynlpl | pynlpl/datatypes.py | PriorityQueue.score | def score(self, i):
"""Return the score for item x (cheap lookup), Item 0 is always the best item"""
if self.minimize:
return self.data[i][0]
else:
return self.data[(-1 * i) - 1][0] | python | def score(self, i):
"""Return the score for item x (cheap lookup), Item 0 is always the best item"""
if self.minimize:
return self.data[i][0]
else:
return self.data[(-1 * i) - 1][0] | [
"def",
"score",
"(",
"self",
",",
"i",
")",
":",
"if",
"self",
".",
"minimize",
":",
"return",
"self",
".",
"data",
"[",
"i",
"]",
"[",
"0",
"]",
"else",
":",
"return",
"self",
".",
"data",
"[",
"(",
"-",
"1",
"*",
"i",
")",
"-",
"1",
"]",
"[",
"0",
"]"
] | Return the score for item x (cheap lookup), Item 0 is always the best item | [
"Return",
"the",
"score",
"for",
"item",
"x",
"(",
"cheap",
"lookup",
")",
"Item",
"0",
"is",
"always",
"the",
"best",
"item"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/datatypes.py#L181-L186 | [
"i"
] | What does this function do? | [
"Return",
"the",
"score",
"for",
"item",
"x",
"(",
"cheap",
"lookup",
")",
"Item",
"0",
"is",
"always",
"the",
"best",
"item"
] |
proycon/pynlpl | pynlpl/datatypes.py | PriorityQueue.prune | def prune(self, n):
"""prune all but the first (=best) n items"""
if self.minimize:
self.data = self.data[:n]
else:
self.data = self.data[-1 * n:] | python | def prune(self, n):
"""prune all but the first (=best) n items"""
if self.minimize:
self.data = self.data[:n]
else:
self.data = self.data[-1 * n:] | [
"def",
"prune",
"(",
"self",
",",
"n",
")",
":",
"if",
"self",
".",
"minimize",
":",
"self",
".",
"data",
"=",
"self",
".",
"data",
"[",
":",
"n",
"]",
"else",
":",
"self",
".",
"data",
"=",
"self",
".",
"data",
"[",
"-",
"1",
"*",
"n",
":",
"]"
] | prune all but the first (=best) n items | [
"prune",
"all",
"but",
"the",
"first",
"(",
"=",
"best",
")",
"n",
"items"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/datatypes.py#L188-L193 | [
"n"
] | What does this function do? | [
"prune",
"all",
"but",
"the",
"first",
"(",
"=",
"best",
")",
"n",
"items"
] |
proycon/pynlpl | pynlpl/datatypes.py | PriorityQueue.randomprune | def randomprune(self,n):
"""prune down to n items at random, disregarding their score"""
self.data = random.sample(self.data, n) | python | def randomprune(self,n):
"""prune down to n items at random, disregarding their score"""
self.data = random.sample(self.data, n) | [
"def",
"randomprune",
"(",
"self",
",",
"n",
")",
":",
"self",
".",
"data",
"=",
"random",
".",
"sample",
"(",
"self",
".",
"data",
",",
"n",
")"
] | prune down to n items at random, disregarding their score | [
"prune",
"down",
"to",
"n",
"items",
"at",
"random",
"disregarding",
"their",
"score"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/datatypes.py#L196-L198 | [
"n"
] | What does this function do? | [
"prune",
"down",
"to",
"n",
"items",
"at",
"random",
"disregarding",
"their",
"score"
] |
proycon/pynlpl | pynlpl/datatypes.py | PriorityQueue.prunebyscore | def prunebyscore(self, score, retainequalscore=False):
"""Deletes all items below/above a certain score from the queue, depending on whether minimize is True or False. Note: It is recommended (more efficient) to use blockworse=True / blockequal=True instead! Preventing the addition of 'worse' items."""
if retainequalscore:
if self.minimize:
f = lambda x: x[0] <= score
else:
f = lambda x: x[0] >= score
else:
if self.minimize:
f = lambda x: x[0] < score
else:
f = lambda x: x[0] > score
self.data = filter(f, self.data) | python | def prunebyscore(self, score, retainequalscore=False):
"""Deletes all items below/above a certain score from the queue, depending on whether minimize is True or False. Note: It is recommended (more efficient) to use blockworse=True / blockequal=True instead! Preventing the addition of 'worse' items."""
if retainequalscore:
if self.minimize:
f = lambda x: x[0] <= score
else:
f = lambda x: x[0] >= score
else:
if self.minimize:
f = lambda x: x[0] < score
else:
f = lambda x: x[0] > score
self.data = filter(f, self.data) | [
"def",
"prunebyscore",
"(",
"self",
",",
"score",
",",
"retainequalscore",
"=",
"False",
")",
":",
"if",
"retainequalscore",
":",
"if",
"self",
".",
"minimize",
":",
"f",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
"<=",
"score",
"else",
":",
"f",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
">=",
"score",
"else",
":",
"if",
"self",
".",
"minimize",
":",
"f",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
"<",
"score",
"else",
":",
"f",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
">",
"score",
"self",
".",
"data",
"=",
"filter",
"(",
"f",
",",
"self",
".",
"data",
")"
] | Deletes all items below/above a certain score from the queue, depending on whether minimize is True or False. Note: It is recommended (more efficient) to use blockworse=True / blockequal=True instead! Preventing the addition of 'worse' items. | [
"Deletes",
"all",
"items",
"below",
"/",
"above",
"a",
"certain",
"score",
"from",
"the",
"queue",
"depending",
"on",
"whether",
"minimize",
"is",
"True",
"or",
"False",
".",
"Note",
":",
"It",
"is",
"recommended",
"(",
"more",
"efficient",
")",
"to",
"use",
"blockworse",
"=",
"True",
"/",
"blockequal",
"=",
"True",
"instead!",
"Preventing",
"the",
"addition",
"of",
"worse",
"items",
"."
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/datatypes.py#L205-L217 | [
"score",
"retainequalscore"
] | What does this function do? | [
"Deletes",
"all",
"items",
"below",
"/",
"above",
"a",
"certain",
"score",
"from",
"the",
"queue",
"depending",
"on",
"whether",
"minimize",
"is",
"True",
"or",
"False",
".",
"Note",
":",
"It",
"is",
"recommended",
"(",
"more",
"efficient",
")",
"to",
"use",
"blockworse",
"=",
"True",
"/",
"blockequal",
"=",
"True",
"instead!",
"Preventing",
"the",
"addition",
"of",
"worse",
"items",
"."
] |
proycon/pynlpl | pynlpl/datatypes.py | Tree.append | def append(self, item):
"""Add an item to the Tree"""
if not isinstance(item, Tree):
return ValueError("Can only append items of type Tree")
if not self.children: self.children = []
item.parent = self
self.children.append(item) | python | def append(self, item):
"""Add an item to the Tree"""
if not isinstance(item, Tree):
return ValueError("Can only append items of type Tree")
if not self.children: self.children = []
item.parent = self
self.children.append(item) | [
"def",
"append",
"(",
"self",
",",
"item",
")",
":",
"if",
"not",
"isinstance",
"(",
"item",
",",
"Tree",
")",
":",
"return",
"ValueError",
"(",
"\"Can only append items of type Tree\"",
")",
"if",
"not",
"self",
".",
"children",
":",
"self",
".",
"children",
"=",
"[",
"]",
"item",
".",
"parent",
"=",
"self",
"self",
".",
"children",
".",
"append",
"(",
"item",
")"
] | Add an item to the Tree | [
"Add",
"an",
"item",
"to",
"the",
"Tree"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/datatypes.py#L261-L267 | [
"item"
] | What does this function do? | [
"Add",
"an",
"item",
"to",
"the",
"Tree"
] |
proycon/pynlpl | pynlpl/datatypes.py | Trie.size | def size(self):
"""Size is number of nodes under the trie, including the current node"""
if self.children:
return sum( ( c.size() for c in self.children.values() ) ) + 1
else:
return 1 | python | def size(self):
"""Size is number of nodes under the trie, including the current node"""
if self.children:
return sum( ( c.size() for c in self.children.values() ) ) + 1
else:
return 1 | [
"def",
"size",
"(",
"self",
")",
":",
"if",
"self",
".",
"children",
":",
"return",
"sum",
"(",
"(",
"c",
".",
"size",
"(",
")",
"for",
"c",
"in",
"self",
".",
"children",
".",
"values",
"(",
")",
")",
")",
"+",
"1",
"else",
":",
"return",
"1"
] | Size is number of nodes under the trie, including the current node | [
"Size",
"is",
"number",
"of",
"nodes",
"under",
"the",
"trie",
"including",
"the",
"current",
"node"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/datatypes.py#L361-L366 | [] | What does this function do? | [
"Size",
"is",
"number",
"of",
"nodes",
"under",
"the",
"trie",
"including",
"the",
"current",
"node"
] |
proycon/pynlpl | pynlpl/datatypes.py | Trie.walk | def walk(self, leavesonly=True, maxdepth=None, _depth = 0):
"""Depth-first search, walking through trie, returning all encounterd nodes (by default only leaves)"""
if self.children:
if not maxdepth or (maxdepth and _depth < maxdepth):
for key, child in self.children.items():
if child.leaf():
yield child
else:
for results in child.walk(leavesonly, maxdepth, _depth + 1):
yield results | python | def walk(self, leavesonly=True, maxdepth=None, _depth = 0):
"""Depth-first search, walking through trie, returning all encounterd nodes (by default only leaves)"""
if self.children:
if not maxdepth or (maxdepth and _depth < maxdepth):
for key, child in self.children.items():
if child.leaf():
yield child
else:
for results in child.walk(leavesonly, maxdepth, _depth + 1):
yield results | [
"def",
"walk",
"(",
"self",
",",
"leavesonly",
"=",
"True",
",",
"maxdepth",
"=",
"None",
",",
"_depth",
"=",
"0",
")",
":",
"if",
"self",
".",
"children",
":",
"if",
"not",
"maxdepth",
"or",
"(",
"maxdepth",
"and",
"_depth",
"<",
"maxdepth",
")",
":",
"for",
"key",
",",
"child",
"in",
"self",
".",
"children",
".",
"items",
"(",
")",
":",
"if",
"child",
".",
"leaf",
"(",
")",
":",
"yield",
"child",
"else",
":",
"for",
"results",
"in",
"child",
".",
"walk",
"(",
"leavesonly",
",",
"maxdepth",
",",
"_depth",
"+",
"1",
")",
":",
"yield",
"results"
] | Depth-first search, walking through trie, returning all encounterd nodes (by default only leaves) | [
"Depth",
"-",
"first",
"search",
"walking",
"through",
"trie",
"returning",
"all",
"encounterd",
"nodes",
"(",
"by",
"default",
"only",
"leaves",
")"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/datatypes.py#L392-L401 | [
"leavesonly",
"maxdepth",
"_depth"
] | What does this function do? | [
"Depth",
"-",
"first",
"search",
"walking",
"through",
"trie",
"returning",
"all",
"encounterd",
"nodes",
"(",
"by",
"default",
"only",
"leaves",
")"
] |
proycon/pynlpl | pynlpl/formats/sonar.py | CorpusDocument.sentences | def sentences(self):
"""Iterate over all sentences (sentence_id, sentence) in the document, sentence is a list of 4-tuples (word,id,pos,lemma)"""
prevp = 0
prevs = 0
sentence = [];
sentence_id = ""
for word, id, pos, lemma in iter(self):
try:
doc_id, ptype, p, s, w = re.findall('([\w\d-]+)\.(p|head)\.(\d+)\.s\.(\d+)\.w\.(\d+)',id)[0]
if ((p != prevp) or (s != prevs)) and sentence:
yield sentence_id, sentence
sentence = []
sentence_id = doc_id + '.' + ptype + '.' + str(p) + '.s.' + str(s)
prevp = p
except IndexError:
doc_id, s, w = re.findall('([\w\d-]+)\.s\.(\d+)\.w\.(\d+)',id)[0]
if s != prevs and sentence:
yield sentence_id, sentence
sentence = []
sentence_id = doc_id + '.s.' + str(s)
sentence.append( (word,id,pos,lemma) )
prevs = s
if sentence:
yield sentence_id, sentence | python | def sentences(self):
"""Iterate over all sentences (sentence_id, sentence) in the document, sentence is a list of 4-tuples (word,id,pos,lemma)"""
prevp = 0
prevs = 0
sentence = [];
sentence_id = ""
for word, id, pos, lemma in iter(self):
try:
doc_id, ptype, p, s, w = re.findall('([\w\d-]+)\.(p|head)\.(\d+)\.s\.(\d+)\.w\.(\d+)',id)[0]
if ((p != prevp) or (s != prevs)) and sentence:
yield sentence_id, sentence
sentence = []
sentence_id = doc_id + '.' + ptype + '.' + str(p) + '.s.' + str(s)
prevp = p
except IndexError:
doc_id, s, w = re.findall('([\w\d-]+)\.s\.(\d+)\.w\.(\d+)',id)[0]
if s != prevs and sentence:
yield sentence_id, sentence
sentence = []
sentence_id = doc_id + '.s.' + str(s)
sentence.append( (word,id,pos,lemma) )
prevs = s
if sentence:
yield sentence_id, sentence | [
"def",
"sentences",
"(",
"self",
")",
":",
"prevp",
"=",
"0",
"prevs",
"=",
"0",
"sentence",
"=",
"[",
"]",
"sentence_id",
"=",
"\"\"",
"for",
"word",
",",
"id",
",",
"pos",
",",
"lemma",
"in",
"iter",
"(",
"self",
")",
":",
"try",
":",
"doc_id",
",",
"ptype",
",",
"p",
",",
"s",
",",
"w",
"=",
"re",
".",
"findall",
"(",
"'([\\w\\d-]+)\\.(p|head)\\.(\\d+)\\.s\\.(\\d+)\\.w\\.(\\d+)'",
",",
"id",
")",
"[",
"0",
"]",
"if",
"(",
"(",
"p",
"!=",
"prevp",
")",
"or",
"(",
"s",
"!=",
"prevs",
")",
")",
"and",
"sentence",
":",
"yield",
"sentence_id",
",",
"sentence",
"sentence",
"=",
"[",
"]",
"sentence_id",
"=",
"doc_id",
"+",
"'.'",
"+",
"ptype",
"+",
"'.'",
"+",
"str",
"(",
"p",
")",
"+",
"'.s.'",
"+",
"str",
"(",
"s",
")",
"prevp",
"=",
"p",
"except",
"IndexError",
":",
"doc_id",
",",
"s",
",",
"w",
"=",
"re",
".",
"findall",
"(",
"'([\\w\\d-]+)\\.s\\.(\\d+)\\.w\\.(\\d+)'",
",",
"id",
")",
"[",
"0",
"]",
"if",
"s",
"!=",
"prevs",
"and",
"sentence",
":",
"yield",
"sentence_id",
",",
"sentence",
"sentence",
"=",
"[",
"]",
"sentence_id",
"=",
"doc_id",
"+",
"'.s.'",
"+",
"str",
"(",
"s",
")",
"sentence",
".",
"append",
"(",
"(",
"word",
",",
"id",
",",
"pos",
",",
"lemma",
")",
")",
"prevs",
"=",
"s",
"if",
"sentence",
":",
"yield",
"sentence_id",
",",
"sentence"
] | Iterate over all sentences (sentence_id, sentence) in the document, sentence is a list of 4-tuples (word,id,pos,lemma) | [
"Iterate",
"over",
"all",
"sentences",
"(",
"sentence_id",
"sentence",
")",
"in",
"the",
"document",
"sentence",
"is",
"a",
"list",
"of",
"4",
"-",
"tuples",
"(",
"word",
"id",
"pos",
"lemma",
")"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/sonar.py#L85-L108 | [] | What does this function do? | [
"Iterate",
"over",
"all",
"sentences",
"(",
"sentence_id",
"sentence",
")",
"in",
"the",
"document",
"sentence",
"is",
"a",
"list",
"of",
"4",
"-",
"tuples",
"(",
"word",
"id",
"pos",
"lemma",
")"
] |
proycon/pynlpl | pynlpl/formats/sonar.py | CorpusDocument.paragraphs | def paragraphs(self, with_id = False):
"""Extracts paragraphs, returns list of plain-text(!) paragraphs"""
prevp = 0
partext = []
for word, id, pos, lemma in iter(self):
doc_id, ptype, p, s, w = re.findall('([\w\d-]+)\.(p|head)\.(\d+)\.s\.(\d+)\.w\.(\d+)',id)[0]
if prevp != p and partext:
yield ( doc_id + "." + ptype + "." + prevp , " ".join(partext) )
partext = []
partext.append(word)
prevp = p
if partext:
yield (doc_id + "." + ptype + "." + prevp, " ".join(partext) ) | python | def paragraphs(self, with_id = False):
"""Extracts paragraphs, returns list of plain-text(!) paragraphs"""
prevp = 0
partext = []
for word, id, pos, lemma in iter(self):
doc_id, ptype, p, s, w = re.findall('([\w\d-]+)\.(p|head)\.(\d+)\.s\.(\d+)\.w\.(\d+)',id)[0]
if prevp != p and partext:
yield ( doc_id + "." + ptype + "." + prevp , " ".join(partext) )
partext = []
partext.append(word)
prevp = p
if partext:
yield (doc_id + "." + ptype + "." + prevp, " ".join(partext) ) | [
"def",
"paragraphs",
"(",
"self",
",",
"with_id",
"=",
"False",
")",
":",
"prevp",
"=",
"0",
"partext",
"=",
"[",
"]",
"for",
"word",
",",
"id",
",",
"pos",
",",
"lemma",
"in",
"iter",
"(",
"self",
")",
":",
"doc_id",
",",
"ptype",
",",
"p",
",",
"s",
",",
"w",
"=",
"re",
".",
"findall",
"(",
"'([\\w\\d-]+)\\.(p|head)\\.(\\d+)\\.s\\.(\\d+)\\.w\\.(\\d+)'",
",",
"id",
")",
"[",
"0",
"]",
"if",
"prevp",
"!=",
"p",
"and",
"partext",
":",
"yield",
"(",
"doc_id",
"+",
"\".\"",
"+",
"ptype",
"+",
"\".\"",
"+",
"prevp",
",",
"\" \"",
".",
"join",
"(",
"partext",
")",
")",
"partext",
"=",
"[",
"]",
"partext",
".",
"append",
"(",
"word",
")",
"prevp",
"=",
"p",
"if",
"partext",
":",
"yield",
"(",
"doc_id",
"+",
"\".\"",
"+",
"ptype",
"+",
"\".\"",
"+",
"prevp",
",",
"\" \"",
".",
"join",
"(",
"partext",
")",
")"
] | Extracts paragraphs, returns list of plain-text(!) paragraphs | [
"Extracts",
"paragraphs",
"returns",
"list",
"of",
"plain",
"-",
"text",
"(",
"!",
")",
"paragraphs"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/sonar.py#L110-L122 | [
"with_id"
] | What does this function do? | [
"Extracts",
"paragraphs",
"returns",
"list",
"of",
"plain",
"-",
"text",
"(",
"!",
")",
"paragraphs"
] |
proycon/pynlpl | pynlpl/formats/sonar.py | CorpusDocumentX.validate | def validate(self, formats_dir="../formats/"):
"""checks if the document is valid"""
#TODO: download XSD from web
if self.inline:
xmlschema = ElementTree.XMLSchema(ElementTree.parse(StringIO("\n".join(open(formats_dir+"dcoi-dsc.xsd").readlines()))))
xmlschema.assertValid(self.tree)
#return xmlschema.validate(self)
else:
xmlschema = ElementTree.XMLSchema(ElementTree.parse(StringIO("\n".join(open(formats_dir+"dutchsemcor-standalone.xsd").readlines()))))
xmlschema.assertValid(self.tree) | python | def validate(self, formats_dir="../formats/"):
"""checks if the document is valid"""
#TODO: download XSD from web
if self.inline:
xmlschema = ElementTree.XMLSchema(ElementTree.parse(StringIO("\n".join(open(formats_dir+"dcoi-dsc.xsd").readlines()))))
xmlschema.assertValid(self.tree)
#return xmlschema.validate(self)
else:
xmlschema = ElementTree.XMLSchema(ElementTree.parse(StringIO("\n".join(open(formats_dir+"dutchsemcor-standalone.xsd").readlines()))))
xmlschema.assertValid(self.tree) | [
"def",
"validate",
"(",
"self",
",",
"formats_dir",
"=",
"\"../formats/\"",
")",
":",
"#TODO: download XSD from web",
"if",
"self",
".",
"inline",
":",
"xmlschema",
"=",
"ElementTree",
".",
"XMLSchema",
"(",
"ElementTree",
".",
"parse",
"(",
"StringIO",
"(",
"\"\\n\"",
".",
"join",
"(",
"open",
"(",
"formats_dir",
"+",
"\"dcoi-dsc.xsd\"",
")",
".",
"readlines",
"(",
")",
")",
")",
")",
")",
"xmlschema",
".",
"assertValid",
"(",
"self",
".",
"tree",
")",
"#return xmlschema.validate(self)",
"else",
":",
"xmlschema",
"=",
"ElementTree",
".",
"XMLSchema",
"(",
"ElementTree",
".",
"parse",
"(",
"StringIO",
"(",
"\"\\n\"",
".",
"join",
"(",
"open",
"(",
"formats_dir",
"+",
"\"dutchsemcor-standalone.xsd\"",
")",
".",
"readlines",
"(",
")",
")",
")",
")",
")",
"xmlschema",
".",
"assertValid",
"(",
"self",
".",
"tree",
")"
] | checks if the document is valid | [
"checks",
"if",
"the",
"document",
"is",
"valid"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/sonar.py#L235-L244 | [
"formats_dir"
] | What does this function do? | [
"checks",
"if",
"the",
"document",
"is",
"valid"
] |
proycon/pynlpl | pynlpl/formats/sonar.py | CorpusDocumentX.xpath | def xpath(self, expression):
"""Executes an xpath expression using the correct namespaces"""
global namespaces
return self.tree.xpath(expression, namespaces=namespaces) | python | def xpath(self, expression):
"""Executes an xpath expression using the correct namespaces"""
global namespaces
return self.tree.xpath(expression, namespaces=namespaces) | [
"def",
"xpath",
"(",
"self",
",",
"expression",
")",
":",
"global",
"namespaces",
"return",
"self",
".",
"tree",
".",
"xpath",
"(",
"expression",
",",
"namespaces",
"=",
"namespaces",
")"
] | Executes an xpath expression using the correct namespaces | [
"Executes",
"an",
"xpath",
"expression",
"using",
"the",
"correct",
"namespaces"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/sonar.py#L247-L250 | [
"expression"
] | What does this function do? | [
"Executes",
"an",
"xpath",
"expression",
"using",
"the",
"correct",
"namespaces"
] |
proycon/pynlpl | pynlpl/formats/taggerdata.py | Taggerdata.align | def align(self, referencewords, datatuple):
"""align the reference sentence with the tagged data"""
targetwords = []
for i, (word,lemma,postag) in enumerate(zip(datatuple[0],datatuple[1],datatuple[2])):
if word:
subwords = word.split("_")
for w in subwords: #split multiword expressions
targetwords.append( (w, lemma, postag, i, len(subwords) > 1 ) ) #word, lemma, pos, index, multiword?
referencewords = [ w.lower() for w in referencewords ]
alignment = []
for i, referenceword in enumerate(referencewords):
found = False
best = 0
distance = 999999
for j, (targetword, lemma, pos, index, multiword) in enumerate(targetwords):
if referenceword == targetword and abs(i-j) < distance:
found = True
best = j
distance = abs(i-j)
if found:
alignment.append(targetwords[best])
else:
alignment.append((None,None,None,None,False)) #no alignment found
return alignment | python | def align(self, referencewords, datatuple):
"""align the reference sentence with the tagged data"""
targetwords = []
for i, (word,lemma,postag) in enumerate(zip(datatuple[0],datatuple[1],datatuple[2])):
if word:
subwords = word.split("_")
for w in subwords: #split multiword expressions
targetwords.append( (w, lemma, postag, i, len(subwords) > 1 ) ) #word, lemma, pos, index, multiword?
referencewords = [ w.lower() for w in referencewords ]
alignment = []
for i, referenceword in enumerate(referencewords):
found = False
best = 0
distance = 999999
for j, (targetword, lemma, pos, index, multiword) in enumerate(targetwords):
if referenceword == targetword and abs(i-j) < distance:
found = True
best = j
distance = abs(i-j)
if found:
alignment.append(targetwords[best])
else:
alignment.append((None,None,None,None,False)) #no alignment found
return alignment | [
"def",
"align",
"(",
"self",
",",
"referencewords",
",",
"datatuple",
")",
":",
"targetwords",
"=",
"[",
"]",
"for",
"i",
",",
"(",
"word",
",",
"lemma",
",",
"postag",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"datatuple",
"[",
"0",
"]",
",",
"datatuple",
"[",
"1",
"]",
",",
"datatuple",
"[",
"2",
"]",
")",
")",
":",
"if",
"word",
":",
"subwords",
"=",
"word",
".",
"split",
"(",
"\"_\"",
")",
"for",
"w",
"in",
"subwords",
":",
"#split multiword expressions",
"targetwords",
".",
"append",
"(",
"(",
"w",
",",
"lemma",
",",
"postag",
",",
"i",
",",
"len",
"(",
"subwords",
")",
">",
"1",
")",
")",
"#word, lemma, pos, index, multiword? ",
"referencewords",
"=",
"[",
"w",
".",
"lower",
"(",
")",
"for",
"w",
"in",
"referencewords",
"]",
"alignment",
"=",
"[",
"]",
"for",
"i",
",",
"referenceword",
"in",
"enumerate",
"(",
"referencewords",
")",
":",
"found",
"=",
"False",
"best",
"=",
"0",
"distance",
"=",
"999999",
"for",
"j",
",",
"(",
"targetword",
",",
"lemma",
",",
"pos",
",",
"index",
",",
"multiword",
")",
"in",
"enumerate",
"(",
"targetwords",
")",
":",
"if",
"referenceword",
"==",
"targetword",
"and",
"abs",
"(",
"i",
"-",
"j",
")",
"<",
"distance",
":",
"found",
"=",
"True",
"best",
"=",
"j",
"distance",
"=",
"abs",
"(",
"i",
"-",
"j",
")",
"if",
"found",
":",
"alignment",
".",
"append",
"(",
"targetwords",
"[",
"best",
"]",
")",
"else",
":",
"alignment",
".",
"append",
"(",
"(",
"None",
",",
"None",
",",
"None",
",",
"None",
",",
"False",
")",
")",
"#no alignment found ",
"return",
"alignment"
] | align the reference sentence with the tagged data | [
"align",
"the",
"reference",
"sentence",
"with",
"the",
"tagged",
"data"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/taggerdata.py#L99-L125 | [
"referencewords",
"datatuple"
] | What does this function do? | [
"align",
"the",
"reference",
"sentence",
"with",
"the",
"tagged",
"data"
] |
proycon/pynlpl | pynlpl/formats/foliaset.py | SetDefinition.mainset | def mainset(self):
"""Returns information regarding the set"""
if self.mainsetcache:
return self.mainsetcache
set_uri = self.get_set_uri()
for row in self.graph.query("SELECT ?seturi ?setid ?setlabel ?setopen ?setempty WHERE { ?seturi rdf:type skos:Collection . OPTIONAL { ?seturi skos:notation ?setid } OPTIONAL { ?seturi skos:prefLabel ?setlabel } OPTIONAL { ?seturi fsd:open ?setopen } OPTIONAL { ?seturi fsd:empty ?setempty } FILTER NOT EXISTS { ?y skos:member ?seturi . ?y rdf:type skos:Collection } }"):
self.mainsetcache = {'uri': str(row.seturi), 'id': str(row.setid), 'label': str(row.setlabel) if row.setlabel else "", 'open': bool(row.setopen), 'empty': bool(row.setempty) }
return self.mainsetcache
raise DeepValidationError("Unable to find main set (set_uri=" + str(set_uri)+"), this should not happen") | python | def mainset(self):
"""Returns information regarding the set"""
if self.mainsetcache:
return self.mainsetcache
set_uri = self.get_set_uri()
for row in self.graph.query("SELECT ?seturi ?setid ?setlabel ?setopen ?setempty WHERE { ?seturi rdf:type skos:Collection . OPTIONAL { ?seturi skos:notation ?setid } OPTIONAL { ?seturi skos:prefLabel ?setlabel } OPTIONAL { ?seturi fsd:open ?setopen } OPTIONAL { ?seturi fsd:empty ?setempty } FILTER NOT EXISTS { ?y skos:member ?seturi . ?y rdf:type skos:Collection } }"):
self.mainsetcache = {'uri': str(row.seturi), 'id': str(row.setid), 'label': str(row.setlabel) if row.setlabel else "", 'open': bool(row.setopen), 'empty': bool(row.setempty) }
return self.mainsetcache
raise DeepValidationError("Unable to find main set (set_uri=" + str(set_uri)+"), this should not happen") | [
"def",
"mainset",
"(",
"self",
")",
":",
"if",
"self",
".",
"mainsetcache",
":",
"return",
"self",
".",
"mainsetcache",
"set_uri",
"=",
"self",
".",
"get_set_uri",
"(",
")",
"for",
"row",
"in",
"self",
".",
"graph",
".",
"query",
"(",
"\"SELECT ?seturi ?setid ?setlabel ?setopen ?setempty WHERE { ?seturi rdf:type skos:Collection . OPTIONAL { ?seturi skos:notation ?setid } OPTIONAL { ?seturi skos:prefLabel ?setlabel } OPTIONAL { ?seturi fsd:open ?setopen } OPTIONAL { ?seturi fsd:empty ?setempty } FILTER NOT EXISTS { ?y skos:member ?seturi . ?y rdf:type skos:Collection } }\"",
")",
":",
"self",
".",
"mainsetcache",
"=",
"{",
"'uri'",
":",
"str",
"(",
"row",
".",
"seturi",
")",
",",
"'id'",
":",
"str",
"(",
"row",
".",
"setid",
")",
",",
"'label'",
":",
"str",
"(",
"row",
".",
"setlabel",
")",
"if",
"row",
".",
"setlabel",
"else",
"\"\"",
",",
"'open'",
":",
"bool",
"(",
"row",
".",
"setopen",
")",
",",
"'empty'",
":",
"bool",
"(",
"row",
".",
"setempty",
")",
"}",
"return",
"self",
".",
"mainsetcache",
"raise",
"DeepValidationError",
"(",
"\"Unable to find main set (set_uri=\"",
"+",
"str",
"(",
"set_uri",
")",
"+",
"\"), this should not happen\"",
")"
] | Returns information regarding the set | [
"Returns",
"information",
"regarding",
"the",
"set"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/foliaset.py#L351-L359 | [] | What does this function do? | [
"Returns",
"information",
"regarding",
"the",
"set"
] |
proycon/pynlpl | pynlpl/formats/foliaset.py | SetDefinition.subset | def subset(self, subset_id):
"""Returns information regarding the set"""
if subset_id in self.subsetcache:
return self.subsetcache[subset_id]
set_uri = self.get_set_uri(subset_id)
for row in self.graph.query("SELECT ?seturi ?setid ?setlabel ?setopen WHERE { ?seturi rdf:type skos:Collection . OPTIONAL { ?seturi skos:notation ?setid } OPTIONAL { ?seturi skos:prefLabel ?setlabel } OPTIONAL { ?seturi fsd:open ?setopen } FILTER (?seturi = <" + str(set_uri)+">) }"):
self.subsetcache[str(row.setid)] = {'uri': str(row.seturi), 'id': str(row.setid), 'label': str(row.setlabel) if row.setlabel else "", 'open': bool(row.setopen) }
return self.subsetcache[str(row.setid)]
raise DeepValidationError("Unable to find subset (set_uri=" + str(set_uri)+")") | python | def subset(self, subset_id):
"""Returns information regarding the set"""
if subset_id in self.subsetcache:
return self.subsetcache[subset_id]
set_uri = self.get_set_uri(subset_id)
for row in self.graph.query("SELECT ?seturi ?setid ?setlabel ?setopen WHERE { ?seturi rdf:type skos:Collection . OPTIONAL { ?seturi skos:notation ?setid } OPTIONAL { ?seturi skos:prefLabel ?setlabel } OPTIONAL { ?seturi fsd:open ?setopen } FILTER (?seturi = <" + str(set_uri)+">) }"):
self.subsetcache[str(row.setid)] = {'uri': str(row.seturi), 'id': str(row.setid), 'label': str(row.setlabel) if row.setlabel else "", 'open': bool(row.setopen) }
return self.subsetcache[str(row.setid)]
raise DeepValidationError("Unable to find subset (set_uri=" + str(set_uri)+")") | [
"def",
"subset",
"(",
"self",
",",
"subset_id",
")",
":",
"if",
"subset_id",
"in",
"self",
".",
"subsetcache",
":",
"return",
"self",
".",
"subsetcache",
"[",
"subset_id",
"]",
"set_uri",
"=",
"self",
".",
"get_set_uri",
"(",
"subset_id",
")",
"for",
"row",
"in",
"self",
".",
"graph",
".",
"query",
"(",
"\"SELECT ?seturi ?setid ?setlabel ?setopen WHERE { ?seturi rdf:type skos:Collection . OPTIONAL { ?seturi skos:notation ?setid } OPTIONAL { ?seturi skos:prefLabel ?setlabel } OPTIONAL { ?seturi fsd:open ?setopen } FILTER (?seturi = <\"",
"+",
"str",
"(",
"set_uri",
")",
"+",
"\">) }\"",
")",
":",
"self",
".",
"subsetcache",
"[",
"str",
"(",
"row",
".",
"setid",
")",
"]",
"=",
"{",
"'uri'",
":",
"str",
"(",
"row",
".",
"seturi",
")",
",",
"'id'",
":",
"str",
"(",
"row",
".",
"setid",
")",
",",
"'label'",
":",
"str",
"(",
"row",
".",
"setlabel",
")",
"if",
"row",
".",
"setlabel",
"else",
"\"\"",
",",
"'open'",
":",
"bool",
"(",
"row",
".",
"setopen",
")",
"}",
"return",
"self",
".",
"subsetcache",
"[",
"str",
"(",
"row",
".",
"setid",
")",
"]",
"raise",
"DeepValidationError",
"(",
"\"Unable to find subset (set_uri=\"",
"+",
"str",
"(",
"set_uri",
")",
"+",
"\")\"",
")"
] | Returns information regarding the set | [
"Returns",
"information",
"regarding",
"the",
"set"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/foliaset.py#L361-L369 | [
"subset_id"
] | What does this function do? | [
"Returns",
"information",
"regarding",
"the",
"set"
] |
proycon/pynlpl | pynlpl/formats/foliaset.py | SetDefinition.orderedclasses | def orderedclasses(self, set_uri_or_id=None, nestedhierarchy=False):
"""Higher-order generator function that yields class information in the right order, combines calls to :meth:`SetDefinition.classes` and :meth:`SetDefinition.classorder`"""
classes = self.classes(set_uri_or_id, nestedhierarchy)
for classid in self.classorder(classes):
yield classes[classid] | python | def orderedclasses(self, set_uri_or_id=None, nestedhierarchy=False):
"""Higher-order generator function that yields class information in the right order, combines calls to :meth:`SetDefinition.classes` and :meth:`SetDefinition.classorder`"""
classes = self.classes(set_uri_or_id, nestedhierarchy)
for classid in self.classorder(classes):
yield classes[classid] | [
"def",
"orderedclasses",
"(",
"self",
",",
"set_uri_or_id",
"=",
"None",
",",
"nestedhierarchy",
"=",
"False",
")",
":",
"classes",
"=",
"self",
".",
"classes",
"(",
"set_uri_or_id",
",",
"nestedhierarchy",
")",
"for",
"classid",
"in",
"self",
".",
"classorder",
"(",
"classes",
")",
":",
"yield",
"classes",
"[",
"classid",
"]"
] | Higher-order generator function that yields class information in the right order, combines calls to :meth:`SetDefinition.classes` and :meth:`SetDefinition.classorder` | [
"Higher",
"-",
"order",
"generator",
"function",
"that",
"yields",
"class",
"information",
"in",
"the",
"right",
"order",
"combines",
"calls",
"to",
":",
"meth",
":",
"SetDefinition",
".",
"classes",
"and",
":",
"meth",
":",
"SetDefinition",
".",
"classorder"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/foliaset.py#L371-L375 | [
"set_uri_or_id",
"nestedhierarchy"
] | What does this function do? | [
"Higher",
"-",
"order",
"generator",
"function",
"that",
"yields",
"class",
"information",
"in",
"the",
"right",
"order",
"combines",
"calls",
"to",
":",
"meth",
":",
"SetDefinition",
".",
"classes",
"and",
":",
"meth",
":",
"SetDefinition",
".",
"classorder"
] |
proycon/pynlpl | pynlpl/formats/foliaset.py | SetDefinition.classes | def classes(self, set_uri_or_id=None, nestedhierarchy=False):
"""Returns a dictionary of classes for the specified (sub)set (if None, default, the main set is selected)"""
if set_uri_or_id and set_uri_or_id.startswith(('http://','https://')):
set_uri = set_uri_or_id
else:
set_uri = self.get_set_uri(set_uri_or_id)
assert set_uri is not None
classes= {}
uri2idmap = {}
for row in self.graph.query("SELECT ?classuri ?classid ?classlabel ?parentclass ?seqnr WHERE { ?classuri rdf:type skos:Concept ; skos:notation ?classid. <" + str(set_uri) + "> skos:member ?classuri . OPTIONAL { ?classuri skos:prefLabel ?classlabel } OPTIONAL { ?classuri skos:narrower ?parentclass } OPTIONAL { ?classuri fsd:sequenceNumber ?seqnr } }"):
classinfo = {'uri': str(row.classuri), 'id': str(row.classid),'label': str(row.classlabel) if row.classlabel else "" }
if nestedhierarchy:
uri2idmap[str(row.classuri)] = str(row.classid)
if row.parentclass:
classinfo['parentclass'] = str(row.parentclass) #uri
if row.seqnr:
classinfo['seqnr'] = int(row.seqnr)
classes[str(row.classid)] = classinfo
if nestedhierarchy:
#build hierarchy
removekeys = []
for classid, classinfo in classes.items():
if 'parentclass' in classinfo:
removekeys.append(classid)
parentclassid = uri2idmap[classinfo['parentclass']]
if 'subclasses' not in classes[parentclassid]:
classes[parentclassid]['subclasses'] = {}
classes[parentclassid]['subclasses'][classid] = classinfo
for key in removekeys:
del classes[key]
return classes | python | def classes(self, set_uri_or_id=None, nestedhierarchy=False):
"""Returns a dictionary of classes for the specified (sub)set (if None, default, the main set is selected)"""
if set_uri_or_id and set_uri_or_id.startswith(('http://','https://')):
set_uri = set_uri_or_id
else:
set_uri = self.get_set_uri(set_uri_or_id)
assert set_uri is not None
classes= {}
uri2idmap = {}
for row in self.graph.query("SELECT ?classuri ?classid ?classlabel ?parentclass ?seqnr WHERE { ?classuri rdf:type skos:Concept ; skos:notation ?classid. <" + str(set_uri) + "> skos:member ?classuri . OPTIONAL { ?classuri skos:prefLabel ?classlabel } OPTIONAL { ?classuri skos:narrower ?parentclass } OPTIONAL { ?classuri fsd:sequenceNumber ?seqnr } }"):
classinfo = {'uri': str(row.classuri), 'id': str(row.classid),'label': str(row.classlabel) if row.classlabel else "" }
if nestedhierarchy:
uri2idmap[str(row.classuri)] = str(row.classid)
if row.parentclass:
classinfo['parentclass'] = str(row.parentclass) #uri
if row.seqnr:
classinfo['seqnr'] = int(row.seqnr)
classes[str(row.classid)] = classinfo
if nestedhierarchy:
#build hierarchy
removekeys = []
for classid, classinfo in classes.items():
if 'parentclass' in classinfo:
removekeys.append(classid)
parentclassid = uri2idmap[classinfo['parentclass']]
if 'subclasses' not in classes[parentclassid]:
classes[parentclassid]['subclasses'] = {}
classes[parentclassid]['subclasses'][classid] = classinfo
for key in removekeys:
del classes[key]
return classes | [
"def",
"classes",
"(",
"self",
",",
"set_uri_or_id",
"=",
"None",
",",
"nestedhierarchy",
"=",
"False",
")",
":",
"if",
"set_uri_or_id",
"and",
"set_uri_or_id",
".",
"startswith",
"(",
"(",
"'http://'",
",",
"'https://'",
")",
")",
":",
"set_uri",
"=",
"set_uri_or_id",
"else",
":",
"set_uri",
"=",
"self",
".",
"get_set_uri",
"(",
"set_uri_or_id",
")",
"assert",
"set_uri",
"is",
"not",
"None",
"classes",
"=",
"{",
"}",
"uri2idmap",
"=",
"{",
"}",
"for",
"row",
"in",
"self",
".",
"graph",
".",
"query",
"(",
"\"SELECT ?classuri ?classid ?classlabel ?parentclass ?seqnr WHERE { ?classuri rdf:type skos:Concept ; skos:notation ?classid. <\"",
"+",
"str",
"(",
"set_uri",
")",
"+",
"\"> skos:member ?classuri . OPTIONAL { ?classuri skos:prefLabel ?classlabel } OPTIONAL { ?classuri skos:narrower ?parentclass } OPTIONAL { ?classuri fsd:sequenceNumber ?seqnr } }\"",
")",
":",
"classinfo",
"=",
"{",
"'uri'",
":",
"str",
"(",
"row",
".",
"classuri",
")",
",",
"'id'",
":",
"str",
"(",
"row",
".",
"classid",
")",
",",
"'label'",
":",
"str",
"(",
"row",
".",
"classlabel",
")",
"if",
"row",
".",
"classlabel",
"else",
"\"\"",
"}",
"if",
"nestedhierarchy",
":",
"uri2idmap",
"[",
"str",
"(",
"row",
".",
"classuri",
")",
"]",
"=",
"str",
"(",
"row",
".",
"classid",
")",
"if",
"row",
".",
"parentclass",
":",
"classinfo",
"[",
"'parentclass'",
"]",
"=",
"str",
"(",
"row",
".",
"parentclass",
")",
"#uri",
"if",
"row",
".",
"seqnr",
":",
"classinfo",
"[",
"'seqnr'",
"]",
"=",
"int",
"(",
"row",
".",
"seqnr",
")",
"classes",
"[",
"str",
"(",
"row",
".",
"classid",
")",
"]",
"=",
"classinfo",
"if",
"nestedhierarchy",
":",
"#build hierarchy",
"removekeys",
"=",
"[",
"]",
"for",
"classid",
",",
"classinfo",
"in",
"classes",
".",
"items",
"(",
")",
":",
"if",
"'parentclass'",
"in",
"classinfo",
":",
"removekeys",
".",
"append",
"(",
"classid",
")",
"parentclassid",
"=",
"uri2idmap",
"[",
"classinfo",
"[",
"'parentclass'",
"]",
"]",
"if",
"'subclasses'",
"not",
"in",
"classes",
"[",
"parentclassid",
"]",
":",
"classes",
"[",
"parentclassid",
"]",
"[",
"'subclasses'",
"]",
"=",
"{",
"}",
"classes",
"[",
"parentclassid",
"]",
"[",
"'subclasses'",
"]",
"[",
"classid",
"]",
"=",
"classinfo",
"for",
"key",
"in",
"removekeys",
":",
"del",
"classes",
"[",
"key",
"]",
"return",
"classes"
] | Returns a dictionary of classes for the specified (sub)set (if None, default, the main set is selected) | [
"Returns",
"a",
"dictionary",
"of",
"classes",
"for",
"the",
"specified",
"(",
"sub",
")",
"set",
"(",
"if",
"None",
"default",
"the",
"main",
"set",
"is",
"selected",
")"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/foliaset.py#L381-L414 | [
"set_uri_or_id",
"nestedhierarchy"
] | What does this function do? | [
"Returns",
"a",
"dictionary",
"of",
"classes",
"for",
"the",
"specified",
"(",
"sub",
")",
"set",
"(",
"if",
"None",
"default",
"the",
"main",
"set",
"is",
"selected",
")"
] |
proycon/pynlpl | pynlpl/formats/foliaset.py | SetDefinition.classorder | def classorder(self,classes):
"""Return a list of class IDs in order for presentational purposes: order is determined first and foremost by explicit ordering, else alphabetically by label or as a last resort by class ID"""
return [ classid for classid, classitem in sorted( ((classid, classitem) for classid, classitem in classes.items() if 'seqnr' in classitem) , key=lambda pair: pair[1]['seqnr'] )] + \
[ classid for classid, classitem in sorted( ((classid, classitem) for classid, classitem in classes.items() if 'seqnr' not in classitem) , key=lambda pair: pair[1]['label'] if 'label' in pair[1] else pair[1]['id']) ] | python | def classorder(self,classes):
"""Return a list of class IDs in order for presentational purposes: order is determined first and foremost by explicit ordering, else alphabetically by label or as a last resort by class ID"""
return [ classid for classid, classitem in sorted( ((classid, classitem) for classid, classitem in classes.items() if 'seqnr' in classitem) , key=lambda pair: pair[1]['seqnr'] )] + \
[ classid for classid, classitem in sorted( ((classid, classitem) for classid, classitem in classes.items() if 'seqnr' not in classitem) , key=lambda pair: pair[1]['label'] if 'label' in pair[1] else pair[1]['id']) ] | [
"def",
"classorder",
"(",
"self",
",",
"classes",
")",
":",
"return",
"[",
"classid",
"for",
"classid",
",",
"classitem",
"in",
"sorted",
"(",
"(",
"(",
"classid",
",",
"classitem",
")",
"for",
"classid",
",",
"classitem",
"in",
"classes",
".",
"items",
"(",
")",
"if",
"'seqnr'",
"in",
"classitem",
")",
",",
"key",
"=",
"lambda",
"pair",
":",
"pair",
"[",
"1",
"]",
"[",
"'seqnr'",
"]",
")",
"]",
"+",
"[",
"classid",
"for",
"classid",
",",
"classitem",
"in",
"sorted",
"(",
"(",
"(",
"classid",
",",
"classitem",
")",
"for",
"classid",
",",
"classitem",
"in",
"classes",
".",
"items",
"(",
")",
"if",
"'seqnr'",
"not",
"in",
"classitem",
")",
",",
"key",
"=",
"lambda",
"pair",
":",
"pair",
"[",
"1",
"]",
"[",
"'label'",
"]",
"if",
"'label'",
"in",
"pair",
"[",
"1",
"]",
"else",
"pair",
"[",
"1",
"]",
"[",
"'id'",
"]",
")",
"]"
] | Return a list of class IDs in order for presentational purposes: order is determined first and foremost by explicit ordering, else alphabetically by label or as a last resort by class ID | [
"Return",
"a",
"list",
"of",
"class",
"IDs",
"in",
"order",
"for",
"presentational",
"purposes",
":",
"order",
"is",
"determined",
"first",
"and",
"foremost",
"by",
"explicit",
"ordering",
"else",
"alphabetically",
"by",
"label",
"or",
"as",
"a",
"last",
"resort",
"by",
"class",
"ID"
] | train | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/foliaset.py#L416-L419 | [
"classes"
] | What does this function do? | [
"Return",
"a",
"list",
"of",
"class",
"IDs",
"in",
"order",
"for",
"presentational",
"purposes",
":",
"order",
"is",
"determined",
"first",
"and",
"foremost",
"by",
"explicit",
"ordering",
"else",
"alphabetically",
"by",
"label",
"or",
"as",
"a",
"last",
"resort",
"by",
"class",
"ID"
] |
scrapinghub/js2xml | js2xml/lexer.py | CustomLexer.build | def build(self, **kwargs):
"""Build the lexer."""
self.lexer = ply.lex.lex(object=self, **kwargs) | python | def build(self, **kwargs):
"""Build the lexer."""
self.lexer = ply.lex.lex(object=self, **kwargs) | [
"def",
"build",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"lexer",
"=",
"ply",
".",
"lex",
".",
"lex",
"(",
"object",
"=",
"self",
",",
"*",
"*",
"kwargs",
")"
] | Build the lexer. | [
"Build",
"the",
"lexer",
"."
] | train | https://github.com/scrapinghub/js2xml/blob/d01b79e1a82de157deffcc1a22f4e0b6bfa07715/js2xml/lexer.py#L74-L76 | [
"kwargs"
] | What does this function do? | [
"Build",
"the",
"lexer",
"."
] |
scrapinghub/js2xml | js2xml/utils/vars.py | make_varname | def make_varname(tree):
"""
<left> tree </left>
"""
if tree.tag == 'identifier':
return tree.attrib['name']
if tree.tag in ('string', 'boolean'):
return tree.text
if tree.tag == 'number':
return tree.attrib['value']
if tree.tag in ('property', 'object'):
return make_varname(_xpath_one(tree, '*'))
if tree.tag.endswith('accessor'):
kind = tree.tag[:-len('accessor')]
obj = make_varname(_xpath_one(tree, 'object'))
prop = make_varname(_xpath_one(tree, 'property'))
if kind == 'dot':
fmt = '%s.%s'
elif kind == 'bracket':
fmt = '%s[%s]'
else:
raise ValueError("Unknown accessor: %s" % tree.tag)
return fmt % (obj, prop)
raise ValueError("Unknown tag: %s" % tree.tag) | python | def make_varname(tree):
"""
<left> tree </left>
"""
if tree.tag == 'identifier':
return tree.attrib['name']
if tree.tag in ('string', 'boolean'):
return tree.text
if tree.tag == 'number':
return tree.attrib['value']
if tree.tag in ('property', 'object'):
return make_varname(_xpath_one(tree, '*'))
if tree.tag.endswith('accessor'):
kind = tree.tag[:-len('accessor')]
obj = make_varname(_xpath_one(tree, 'object'))
prop = make_varname(_xpath_one(tree, 'property'))
if kind == 'dot':
fmt = '%s.%s'
elif kind == 'bracket':
fmt = '%s[%s]'
else:
raise ValueError("Unknown accessor: %s" % tree.tag)
return fmt % (obj, prop)
raise ValueError("Unknown tag: %s" % tree.tag) | [
"def",
"make_varname",
"(",
"tree",
")",
":",
"if",
"tree",
".",
"tag",
"==",
"'identifier'",
":",
"return",
"tree",
".",
"attrib",
"[",
"'name'",
"]",
"if",
"tree",
".",
"tag",
"in",
"(",
"'string'",
",",
"'boolean'",
")",
":",
"return",
"tree",
".",
"text",
"if",
"tree",
".",
"tag",
"==",
"'number'",
":",
"return",
"tree",
".",
"attrib",
"[",
"'value'",
"]",
"if",
"tree",
".",
"tag",
"in",
"(",
"'property'",
",",
"'object'",
")",
":",
"return",
"make_varname",
"(",
"_xpath_one",
"(",
"tree",
",",
"'*'",
")",
")",
"if",
"tree",
".",
"tag",
".",
"endswith",
"(",
"'accessor'",
")",
":",
"kind",
"=",
"tree",
".",
"tag",
"[",
":",
"-",
"len",
"(",
"'accessor'",
")",
"]",
"obj",
"=",
"make_varname",
"(",
"_xpath_one",
"(",
"tree",
",",
"'object'",
")",
")",
"prop",
"=",
"make_varname",
"(",
"_xpath_one",
"(",
"tree",
",",
"'property'",
")",
")",
"if",
"kind",
"==",
"'dot'",
":",
"fmt",
"=",
"'%s.%s'",
"elif",
"kind",
"==",
"'bracket'",
":",
"fmt",
"=",
"'%s[%s]'",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown accessor: %s\"",
"%",
"tree",
".",
"tag",
")",
"return",
"fmt",
"%",
"(",
"obj",
",",
"prop",
")",
"raise",
"ValueError",
"(",
"\"Unknown tag: %s\"",
"%",
"tree",
".",
"tag",
")"
] | <left> tree </left> | [
"<left",
">",
"tree",
"<",
"/",
"left",
">"
] | train | https://github.com/scrapinghub/js2xml/blob/d01b79e1a82de157deffcc1a22f4e0b6bfa07715/js2xml/utils/vars.py#L55-L83 | [
"tree"
] | What does this function do? | [
"<left",
">",
"tree",
"<",
"/",
"left",
">"
] |
mvantellingen/wsgi-basic-auth | src/wsgi_basic_auth.py | _users_from_environ | def _users_from_environ(env_prefix=''):
"""Environment value via `user:password|user2:password2`"""
auth_string = os.environ.get(env_prefix + 'WSGI_AUTH_CREDENTIALS')
if not auth_string:
return {}
result = {}
for credentials in auth_string.split('|'):
username, password = credentials.split(':', 1)
result[username] = password
return result | python | def _users_from_environ(env_prefix=''):
"""Environment value via `user:password|user2:password2`"""
auth_string = os.environ.get(env_prefix + 'WSGI_AUTH_CREDENTIALS')
if not auth_string:
return {}
result = {}
for credentials in auth_string.split('|'):
username, password = credentials.split(':', 1)
result[username] = password
return result | [
"def",
"_users_from_environ",
"(",
"env_prefix",
"=",
"''",
")",
":",
"auth_string",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"env_prefix",
"+",
"'WSGI_AUTH_CREDENTIALS'",
")",
"if",
"not",
"auth_string",
":",
"return",
"{",
"}",
"result",
"=",
"{",
"}",
"for",
"credentials",
"in",
"auth_string",
".",
"split",
"(",
"'|'",
")",
":",
"username",
",",
"password",
"=",
"credentials",
".",
"split",
"(",
"':'",
",",
"1",
")",
"result",
"[",
"username",
"]",
"=",
"password",
"return",
"result"
] | Environment value via `user:password|user2:password2` | [
"Environment",
"value",
"via",
"user",
":",
"password|user2",
":",
"password2"
] | train | https://github.com/mvantellingen/wsgi-basic-auth/blob/4e829bff21526f587f8d1a8592b63e1abd862a74/src/wsgi_basic_auth.py#L102-L112 | [
"env_prefix"
] | What does this function do? | [
"Environment",
"value",
"via",
"user",
":",
"password|user2",
":",
"password2"
] |
mvantellingen/wsgi-basic-auth | src/wsgi_basic_auth.py | _exclude_paths_from_environ | def _exclude_paths_from_environ(env_prefix=''):
"""Environment value via `/login;/register`"""
paths = os.environ.get(env_prefix + 'WSGI_AUTH_EXCLUDE_PATHS')
if not paths:
return []
return paths.split(';') | python | def _exclude_paths_from_environ(env_prefix=''):
"""Environment value via `/login;/register`"""
paths = os.environ.get(env_prefix + 'WSGI_AUTH_EXCLUDE_PATHS')
if not paths:
return []
return paths.split(';') | [
"def",
"_exclude_paths_from_environ",
"(",
"env_prefix",
"=",
"''",
")",
":",
"paths",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"env_prefix",
"+",
"'WSGI_AUTH_EXCLUDE_PATHS'",
")",
"if",
"not",
"paths",
":",
"return",
"[",
"]",
"return",
"paths",
".",
"split",
"(",
"';'",
")"
] | Environment value via `/login;/register` | [
"Environment",
"value",
"via",
"/",
"login",
";",
"/",
"register"
] | train | https://github.com/mvantellingen/wsgi-basic-auth/blob/4e829bff21526f587f8d1a8592b63e1abd862a74/src/wsgi_basic_auth.py#L115-L120 | [
"env_prefix"
] | What does this function do? | [
"Environment",
"value",
"via",
"/",
"login",
";",
"/",
"register"
] |
mvantellingen/wsgi-basic-auth | src/wsgi_basic_auth.py | _include_paths_from_environ | def _include_paths_from_environ(env_prefix=''):
"""Environment value via `/login;/register`"""
paths = os.environ.get(env_prefix + 'WSGI_AUTH_PATHS')
if not paths:
return []
return paths.split(';') | python | def _include_paths_from_environ(env_prefix=''):
"""Environment value via `/login;/register`"""
paths = os.environ.get(env_prefix + 'WSGI_AUTH_PATHS')
if not paths:
return []
return paths.split(';') | [
"def",
"_include_paths_from_environ",
"(",
"env_prefix",
"=",
"''",
")",
":",
"paths",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"env_prefix",
"+",
"'WSGI_AUTH_PATHS'",
")",
"if",
"not",
"paths",
":",
"return",
"[",
"]",
"return",
"paths",
".",
"split",
"(",
"';'",
")"
] | Environment value via `/login;/register` | [
"Environment",
"value",
"via",
"/",
"login",
";",
"/",
"register"
] | train | https://github.com/mvantellingen/wsgi-basic-auth/blob/4e829bff21526f587f8d1a8592b63e1abd862a74/src/wsgi_basic_auth.py#L123-L128 | [
"env_prefix"
] | What does this function do? | [
"Environment",
"value",
"via",
"/",
"login",
";",
"/",
"register"
] |
mvantellingen/wsgi-basic-auth | src/wsgi_basic_auth.py | BasicAuth.is_authorized | def is_authorized(self, request):
"""Check if the user is authenticated for the given request.
The include_paths and exclude_paths are first checked. If
authentication is required then the Authorization HTTP header is
checked against the credentials.
"""
if self._is_request_in_include_path(request):
if self._is_request_in_exclude_path(request):
return True
else:
auth = request.authorization
if auth and auth[0] == 'Basic':
credentials = b64decode(auth[1]).decode('UTF-8')
username, password = credentials.split(':', 1)
return self._users.get(username) == password
else:
return False
else:
return True | python | def is_authorized(self, request):
"""Check if the user is authenticated for the given request.
The include_paths and exclude_paths are first checked. If
authentication is required then the Authorization HTTP header is
checked against the credentials.
"""
if self._is_request_in_include_path(request):
if self._is_request_in_exclude_path(request):
return True
else:
auth = request.authorization
if auth and auth[0] == 'Basic':
credentials = b64decode(auth[1]).decode('UTF-8')
username, password = credentials.split(':', 1)
return self._users.get(username) == password
else:
return False
else:
return True | [
"def",
"is_authorized",
"(",
"self",
",",
"request",
")",
":",
"if",
"self",
".",
"_is_request_in_include_path",
"(",
"request",
")",
":",
"if",
"self",
".",
"_is_request_in_exclude_path",
"(",
"request",
")",
":",
"return",
"True",
"else",
":",
"auth",
"=",
"request",
".",
"authorization",
"if",
"auth",
"and",
"auth",
"[",
"0",
"]",
"==",
"'Basic'",
":",
"credentials",
"=",
"b64decode",
"(",
"auth",
"[",
"1",
"]",
")",
".",
"decode",
"(",
"'UTF-8'",
")",
"username",
",",
"password",
"=",
"credentials",
".",
"split",
"(",
"':'",
",",
"1",
")",
"return",
"self",
".",
"_users",
".",
"get",
"(",
"username",
")",
"==",
"password",
"else",
":",
"return",
"False",
"else",
":",
"return",
"True"
] | Check if the user is authenticated for the given request.
The include_paths and exclude_paths are first checked. If
authentication is required then the Authorization HTTP header is
checked against the credentials. | [
"Check",
"if",
"the",
"user",
"is",
"authenticated",
"for",
"the",
"given",
"request",
"."
] | train | https://github.com/mvantellingen/wsgi-basic-auth/blob/4e829bff21526f587f8d1a8592b63e1abd862a74/src/wsgi_basic_auth.py#L48-L68 | [
"request"
] | What does this function do? | [
"Check",
"if",
"the",
"user",
"is",
"authenticated",
"for",
"the",
"given",
"request",
"."
] |
mvantellingen/wsgi-basic-auth | src/wsgi_basic_auth.py | BasicAuth._login | def _login(self, environ, start_response):
"""Send a login response back to the client."""
response = HTTPUnauthorized()
response.www_authenticate = ('Basic', {'realm': self._realm})
return response(environ, start_response) | python | def _login(self, environ, start_response):
"""Send a login response back to the client."""
response = HTTPUnauthorized()
response.www_authenticate = ('Basic', {'realm': self._realm})
return response(environ, start_response) | [
"def",
"_login",
"(",
"self",
",",
"environ",
",",
"start_response",
")",
":",
"response",
"=",
"HTTPUnauthorized",
"(",
")",
"response",
".",
"www_authenticate",
"=",
"(",
"'Basic'",
",",
"{",
"'realm'",
":",
"self",
".",
"_realm",
"}",
")",
"return",
"response",
"(",
"environ",
",",
"start_response",
")"
] | Send a login response back to the client. | [
"Send",
"a",
"login",
"response",
"back",
"to",
"the",
"client",
"."
] | train | https://github.com/mvantellingen/wsgi-basic-auth/blob/4e829bff21526f587f8d1a8592b63e1abd862a74/src/wsgi_basic_auth.py#L70-L74 | [
"environ",
"start_response"
] | What does this function do? | [
"Send",
"a",
"login",
"response",
"back",
"to",
"the",
"client",
"."
] |
mvantellingen/wsgi-basic-auth | src/wsgi_basic_auth.py | BasicAuth._is_request_in_include_path | def _is_request_in_include_path(self, request):
"""Check if the request path is in the `_include_paths` list.
If no specific include paths are given then we assume that
authentication is required for all paths.
"""
if self._include_paths:
for path in self._include_paths:
if request.path.startswith(path):
return True
return False
else:
return True | python | def _is_request_in_include_path(self, request):
"""Check if the request path is in the `_include_paths` list.
If no specific include paths are given then we assume that
authentication is required for all paths.
"""
if self._include_paths:
for path in self._include_paths:
if request.path.startswith(path):
return True
return False
else:
return True | [
"def",
"_is_request_in_include_path",
"(",
"self",
",",
"request",
")",
":",
"if",
"self",
".",
"_include_paths",
":",
"for",
"path",
"in",
"self",
".",
"_include_paths",
":",
"if",
"request",
".",
"path",
".",
"startswith",
"(",
"path",
")",
":",
"return",
"True",
"return",
"False",
"else",
":",
"return",
"True"
] | Check if the request path is in the `_include_paths` list.
If no specific include paths are given then we assume that
authentication is required for all paths. | [
"Check",
"if",
"the",
"request",
"path",
"is",
"in",
"the",
"_include_paths",
"list",
"."
] | train | https://github.com/mvantellingen/wsgi-basic-auth/blob/4e829bff21526f587f8d1a8592b63e1abd862a74/src/wsgi_basic_auth.py#L76-L89 | [
"request"
] | What does this function do? | [
"Check",
"if",
"the",
"request",
"path",
"is",
"in",
"the",
"_include_paths",
"list",
"."
] |
mvantellingen/wsgi-basic-auth | src/wsgi_basic_auth.py | BasicAuth._is_request_in_exclude_path | def _is_request_in_exclude_path(self, request):
"""Check if the request path is in the `_exclude_paths` list"""
if self._exclude_paths:
for path in self._exclude_paths:
if request.path.startswith(path):
return True
return False
else:
return False | python | def _is_request_in_exclude_path(self, request):
"""Check if the request path is in the `_exclude_paths` list"""
if self._exclude_paths:
for path in self._exclude_paths:
if request.path.startswith(path):
return True
return False
else:
return False | [
"def",
"_is_request_in_exclude_path",
"(",
"self",
",",
"request",
")",
":",
"if",
"self",
".",
"_exclude_paths",
":",
"for",
"path",
"in",
"self",
".",
"_exclude_paths",
":",
"if",
"request",
".",
"path",
".",
"startswith",
"(",
"path",
")",
":",
"return",
"True",
"return",
"False",
"else",
":",
"return",
"False"
] | Check if the request path is in the `_exclude_paths` list | [
"Check",
"if",
"the",
"request",
"path",
"is",
"in",
"the",
"_exclude_paths",
"list"
] | train | https://github.com/mvantellingen/wsgi-basic-auth/blob/4e829bff21526f587f8d1a8592b63e1abd862a74/src/wsgi_basic_auth.py#L91-L99 | [
"request"
] | What does this function do? | [
"Check",
"if",
"the",
"request",
"path",
"is",
"in",
"the",
"_exclude_paths",
"list"
] |
click-contrib/click-repl | click_repl/__init__.py | bootstrap_prompt | def bootstrap_prompt(prompt_kwargs, group):
"""
Bootstrap prompt_toolkit kwargs or use user defined values.
:param prompt_kwargs: The user specified prompt kwargs.
"""
prompt_kwargs = prompt_kwargs or {}
defaults = {
"history": InMemoryHistory(),
"completer": ClickCompleter(group),
"message": u"> ",
}
for key in defaults:
default_value = defaults[key]
if key not in prompt_kwargs:
prompt_kwargs[key] = default_value
return prompt_kwargs | python | def bootstrap_prompt(prompt_kwargs, group):
"""
Bootstrap prompt_toolkit kwargs or use user defined values.
:param prompt_kwargs: The user specified prompt kwargs.
"""
prompt_kwargs = prompt_kwargs or {}
defaults = {
"history": InMemoryHistory(),
"completer": ClickCompleter(group),
"message": u"> ",
}
for key in defaults:
default_value = defaults[key]
if key not in prompt_kwargs:
prompt_kwargs[key] = default_value
return prompt_kwargs | [
"def",
"bootstrap_prompt",
"(",
"prompt_kwargs",
",",
"group",
")",
":",
"prompt_kwargs",
"=",
"prompt_kwargs",
"or",
"{",
"}",
"defaults",
"=",
"{",
"\"history\"",
":",
"InMemoryHistory",
"(",
")",
",",
"\"completer\"",
":",
"ClickCompleter",
"(",
"group",
")",
",",
"\"message\"",
":",
"u\"> \"",
",",
"}",
"for",
"key",
"in",
"defaults",
":",
"default_value",
"=",
"defaults",
"[",
"key",
"]",
"if",
"key",
"not",
"in",
"prompt_kwargs",
":",
"prompt_kwargs",
"[",
"key",
"]",
"=",
"default_value",
"return",
"prompt_kwargs"
] | Bootstrap prompt_toolkit kwargs or use user defined values.
:param prompt_kwargs: The user specified prompt kwargs. | [
"Bootstrap",
"prompt_toolkit",
"kwargs",
"or",
"use",
"user",
"defined",
"values",
"."
] | train | https://github.com/click-contrib/click-repl/blob/2d78dc520eb0bb5b813bad3b72344edbd22a7f4e/click_repl/__init__.py#L146-L165 | [
"prompt_kwargs",
"group"
] | What does this function do? | [
"Bootstrap",
"prompt_toolkit",
"kwargs",
"or",
"use",
"user",
"defined",
"values",
"."
] |
click-contrib/click-repl | click_repl/__init__.py | repl | def repl( # noqa: C901
old_ctx,
prompt_kwargs=None,
allow_system_commands=True,
allow_internal_commands=True,
):
"""
Start an interactive shell. All subcommands are available in it.
:param old_ctx: The current Click context.
:param prompt_kwargs: Parameters passed to
:py:func:`prompt_toolkit.shortcuts.prompt`.
If stdin is not a TTY, no prompt will be printed, but only commands read
from stdin.
"""
# parent should be available, but we're not going to bother if not
group_ctx = old_ctx.parent or old_ctx
group = group_ctx.command
isatty = sys.stdin.isatty()
# Delete the REPL command from those available, as we don't want to allow
# nesting REPLs (note: pass `None` to `pop` as we don't want to error if
# REPL command already not present for some reason).
repl_command_name = old_ctx.command.name
if isinstance(group_ctx.command, click.CommandCollection):
available_commands = {
cmd_name: cmd_obj
for source in group_ctx.command.sources
for cmd_name, cmd_obj in source.commands.items()
}
else:
available_commands = group_ctx.command.commands
available_commands.pop(repl_command_name, None)
prompt_kwargs = bootstrap_prompt(prompt_kwargs, group)
if isatty:
def get_command():
return prompt(**prompt_kwargs)
else:
get_command = sys.stdin.readline
while True:
try:
command = get_command()
except KeyboardInterrupt:
continue
except EOFError:
break
if not command:
if isatty:
continue
else:
break
if allow_system_commands and dispatch_repl_commands(command):
continue
if allow_internal_commands:
try:
result = handle_internal_commands(command)
if isinstance(result, six.string_types):
click.echo(result)
continue
except ExitReplException:
break
try:
args = shlex.split(command)
except ValueError as e:
click.echo("{}: {}".format(type(e).__name__, e))
continue
try:
with group.make_context(None, args, parent=group_ctx) as ctx:
group.invoke(ctx)
ctx.exit()
except click.ClickException as e:
e.show()
except ClickExit:
pass
except SystemExit:
pass
except ExitReplException:
break | python | def repl( # noqa: C901
old_ctx,
prompt_kwargs=None,
allow_system_commands=True,
allow_internal_commands=True,
):
"""
Start an interactive shell. All subcommands are available in it.
:param old_ctx: The current Click context.
:param prompt_kwargs: Parameters passed to
:py:func:`prompt_toolkit.shortcuts.prompt`.
If stdin is not a TTY, no prompt will be printed, but only commands read
from stdin.
"""
# parent should be available, but we're not going to bother if not
group_ctx = old_ctx.parent or old_ctx
group = group_ctx.command
isatty = sys.stdin.isatty()
# Delete the REPL command from those available, as we don't want to allow
# nesting REPLs (note: pass `None` to `pop` as we don't want to error if
# REPL command already not present for some reason).
repl_command_name = old_ctx.command.name
if isinstance(group_ctx.command, click.CommandCollection):
available_commands = {
cmd_name: cmd_obj
for source in group_ctx.command.sources
for cmd_name, cmd_obj in source.commands.items()
}
else:
available_commands = group_ctx.command.commands
available_commands.pop(repl_command_name, None)
prompt_kwargs = bootstrap_prompt(prompt_kwargs, group)
if isatty:
def get_command():
return prompt(**prompt_kwargs)
else:
get_command = sys.stdin.readline
while True:
try:
command = get_command()
except KeyboardInterrupt:
continue
except EOFError:
break
if not command:
if isatty:
continue
else:
break
if allow_system_commands and dispatch_repl_commands(command):
continue
if allow_internal_commands:
try:
result = handle_internal_commands(command)
if isinstance(result, six.string_types):
click.echo(result)
continue
except ExitReplException:
break
try:
args = shlex.split(command)
except ValueError as e:
click.echo("{}: {}".format(type(e).__name__, e))
continue
try:
with group.make_context(None, args, parent=group_ctx) as ctx:
group.invoke(ctx)
ctx.exit()
except click.ClickException as e:
e.show()
except ClickExit:
pass
except SystemExit:
pass
except ExitReplException:
break | [
"def",
"repl",
"(",
"# noqa: C901",
"old_ctx",
",",
"prompt_kwargs",
"=",
"None",
",",
"allow_system_commands",
"=",
"True",
",",
"allow_internal_commands",
"=",
"True",
",",
")",
":",
"# parent should be available, but we're not going to bother if not",
"group_ctx",
"=",
"old_ctx",
".",
"parent",
"or",
"old_ctx",
"group",
"=",
"group_ctx",
".",
"command",
"isatty",
"=",
"sys",
".",
"stdin",
".",
"isatty",
"(",
")",
"# Delete the REPL command from those available, as we don't want to allow",
"# nesting REPLs (note: pass `None` to `pop` as we don't want to error if",
"# REPL command already not present for some reason).",
"repl_command_name",
"=",
"old_ctx",
".",
"command",
".",
"name",
"if",
"isinstance",
"(",
"group_ctx",
".",
"command",
",",
"click",
".",
"CommandCollection",
")",
":",
"available_commands",
"=",
"{",
"cmd_name",
":",
"cmd_obj",
"for",
"source",
"in",
"group_ctx",
".",
"command",
".",
"sources",
"for",
"cmd_name",
",",
"cmd_obj",
"in",
"source",
".",
"commands",
".",
"items",
"(",
")",
"}",
"else",
":",
"available_commands",
"=",
"group_ctx",
".",
"command",
".",
"commands",
"available_commands",
".",
"pop",
"(",
"repl_command_name",
",",
"None",
")",
"prompt_kwargs",
"=",
"bootstrap_prompt",
"(",
"prompt_kwargs",
",",
"group",
")",
"if",
"isatty",
":",
"def",
"get_command",
"(",
")",
":",
"return",
"prompt",
"(",
"*",
"*",
"prompt_kwargs",
")",
"else",
":",
"get_command",
"=",
"sys",
".",
"stdin",
".",
"readline",
"while",
"True",
":",
"try",
":",
"command",
"=",
"get_command",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"continue",
"except",
"EOFError",
":",
"break",
"if",
"not",
"command",
":",
"if",
"isatty",
":",
"continue",
"else",
":",
"break",
"if",
"allow_system_commands",
"and",
"dispatch_repl_commands",
"(",
"command",
")",
":",
"continue",
"if",
"allow_internal_commands",
":",
"try",
":",
"result",
"=",
"handle_internal_commands",
"(",
"command",
")",
"if",
"isinstance",
"(",
"result",
",",
"six",
".",
"string_types",
")",
":",
"click",
".",
"echo",
"(",
"result",
")",
"continue",
"except",
"ExitReplException",
":",
"break",
"try",
":",
"args",
"=",
"shlex",
".",
"split",
"(",
"command",
")",
"except",
"ValueError",
"as",
"e",
":",
"click",
".",
"echo",
"(",
"\"{}: {}\"",
".",
"format",
"(",
"type",
"(",
"e",
")",
".",
"__name__",
",",
"e",
")",
")",
"continue",
"try",
":",
"with",
"group",
".",
"make_context",
"(",
"None",
",",
"args",
",",
"parent",
"=",
"group_ctx",
")",
"as",
"ctx",
":",
"group",
".",
"invoke",
"(",
"ctx",
")",
"ctx",
".",
"exit",
"(",
")",
"except",
"click",
".",
"ClickException",
"as",
"e",
":",
"e",
".",
"show",
"(",
")",
"except",
"ClickExit",
":",
"pass",
"except",
"SystemExit",
":",
"pass",
"except",
"ExitReplException",
":",
"break"
] | Start an interactive shell. All subcommands are available in it.
:param old_ctx: The current Click context.
:param prompt_kwargs: Parameters passed to
:py:func:`prompt_toolkit.shortcuts.prompt`.
If stdin is not a TTY, no prompt will be printed, but only commands read
from stdin. | [
"Start",
"an",
"interactive",
"shell",
".",
"All",
"subcommands",
"are",
"available",
"in",
"it",
"."
] | train | https://github.com/click-contrib/click-repl/blob/2d78dc520eb0bb5b813bad3b72344edbd22a7f4e/click_repl/__init__.py#L168-L257 | [
"",
"prompt_kwargs",
"allow_system_commands",
"allow_internal_commands",
""
] | What does this function do? | [
"Start",
"an",
"interactive",
"shell",
".",
"All",
"subcommands",
"are",
"available",
"in",
"it",
"."
] |
click-contrib/click-repl | click_repl/__init__.py | register_repl | def register_repl(group, name="repl"):
"""Register :func:`repl()` as sub-command *name* of *group*."""
group.command(name=name)(click.pass_context(repl)) | python | def register_repl(group, name="repl"):
"""Register :func:`repl()` as sub-command *name* of *group*."""
group.command(name=name)(click.pass_context(repl)) | [
"def",
"register_repl",
"(",
"group",
",",
"name",
"=",
"\"repl\"",
")",
":",
"group",
".",
"command",
"(",
"name",
"=",
"name",
")",
"(",
"click",
".",
"pass_context",
"(",
"repl",
")",
")"
] | Register :func:`repl()` as sub-command *name* of *group*. | [
"Register",
":",
"func",
":",
"repl",
"()",
"as",
"sub",
"-",
"command",
"*",
"name",
"*",
"of",
"*",
"group",
"*",
"."
] | train | https://github.com/click-contrib/click-repl/blob/2d78dc520eb0bb5b813bad3b72344edbd22a7f4e/click_repl/__init__.py#L260-L262 | [
"group",
"name"
] | What does this function do? | [
"Register",
":",
"func",
":",
"repl",
"()",
"as",
"sub",
"-",
"command",
"*",
"name",
"*",
"of",
"*",
"group",
"*",
"."
] |
click-contrib/click-repl | click_repl/__init__.py | handle_internal_commands | def handle_internal_commands(command):
"""Run repl-internal commands.
Repl-internal commands are all commands starting with ":".
"""
if command.startswith(":"):
target = _get_registered_target(command[1:], default=None)
if target:
return target() | python | def handle_internal_commands(command):
"""Run repl-internal commands.
Repl-internal commands are all commands starting with ":".
"""
if command.startswith(":"):
target = _get_registered_target(command[1:], default=None)
if target:
return target() | [
"def",
"handle_internal_commands",
"(",
"command",
")",
":",
"if",
"command",
".",
"startswith",
"(",
"\":\"",
")",
":",
"target",
"=",
"_get_registered_target",
"(",
"command",
"[",
"1",
":",
"]",
",",
"default",
"=",
"None",
")",
"if",
"target",
":",
"return",
"target",
"(",
")"
] | Run repl-internal commands.
Repl-internal commands are all commands starting with ":". | [
"Run",
"repl",
"-",
"internal",
"commands",
"."
] | train | https://github.com/click-contrib/click-repl/blob/2d78dc520eb0bb5b813bad3b72344edbd22a7f4e/click_repl/__init__.py#L283-L292 | [
"command"
] | What does this function do? | [
"Run",
"repl",
"-",
"internal",
"commands",
"."
] |
nel215/ksvd | ksvd/__init__.py | ApproximateKSVD.fit | def fit(self, X):
"""
Parameters
----------
X: shape = [n_samples, n_features]
"""
D = self._initialize(X)
for i in range(self.max_iter):
gamma = self._transform(D, X)
e = np.linalg.norm(X - gamma.dot(D))
if e < self.tol:
break
D, gamma = self._update_dict(X, D, gamma)
self.components_ = D
return self | python | def fit(self, X):
"""
Parameters
----------
X: shape = [n_samples, n_features]
"""
D = self._initialize(X)
for i in range(self.max_iter):
gamma = self._transform(D, X)
e = np.linalg.norm(X - gamma.dot(D))
if e < self.tol:
break
D, gamma = self._update_dict(X, D, gamma)
self.components_ = D
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
")",
":",
"D",
"=",
"self",
".",
"_initialize",
"(",
"X",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"max_iter",
")",
":",
"gamma",
"=",
"self",
".",
"_transform",
"(",
"D",
",",
"X",
")",
"e",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"X",
"-",
"gamma",
".",
"dot",
"(",
"D",
")",
")",
"if",
"e",
"<",
"self",
".",
"tol",
":",
"break",
"D",
",",
"gamma",
"=",
"self",
".",
"_update_dict",
"(",
"X",
",",
"D",
",",
"gamma",
")",
"self",
".",
"components_",
"=",
"D",
"return",
"self"
] | Parameters
----------
X: shape = [n_samples, n_features] | [
"Parameters",
"----------",
"X",
":",
"shape",
"=",
"[",
"n_samples",
"n_features",
"]"
] | train | https://github.com/nel215/ksvd/blob/917f65a9f15ee7689253b27f0107a9773cf837f6/ksvd/__init__.py#L67-L82 | [
"X"
] | What does this function do? | [
"Parameters",
"----------",
"X",
":",
"shape",
"=",
"[",
"n_samples",
"n_features",
"]"
] |
graphql-python/graphql-relay-py | graphql_relay/node/node.py | node_definitions | def node_definitions(id_fetcher, type_resolver=None, id_resolver=None):
'''
Given a function to map from an ID to an underlying object, and a function
to map from an underlying object to the concrete GraphQLObjectType it
corresponds to, constructs a `Node` interface that objects can implement,
and a field config for a `node` root field.
If the type_resolver is omitted, object resolution on the interface will be
handled with the `isTypeOf` method on object types, as with any GraphQL
interface without a provided `resolveType` method.
'''
node_interface = GraphQLInterfaceType(
'Node',
description='An object with an ID',
fields=lambda: OrderedDict((
('id', GraphQLField(
GraphQLNonNull(GraphQLID),
description='The id of the object.',
resolver=id_resolver,
)),
)),
resolve_type=type_resolver
)
node_field = GraphQLField(
node_interface,
description='Fetches an object given its ID',
args=OrderedDict((
('id', GraphQLArgument(
GraphQLNonNull(GraphQLID),
description='The ID of an object'
)),
)),
resolver=lambda obj, args, *_: id_fetcher(args.get('id'), *_)
)
return node_interface, node_field | python | def node_definitions(id_fetcher, type_resolver=None, id_resolver=None):
'''
Given a function to map from an ID to an underlying object, and a function
to map from an underlying object to the concrete GraphQLObjectType it
corresponds to, constructs a `Node` interface that objects can implement,
and a field config for a `node` root field.
If the type_resolver is omitted, object resolution on the interface will be
handled with the `isTypeOf` method on object types, as with any GraphQL
interface without a provided `resolveType` method.
'''
node_interface = GraphQLInterfaceType(
'Node',
description='An object with an ID',
fields=lambda: OrderedDict((
('id', GraphQLField(
GraphQLNonNull(GraphQLID),
description='The id of the object.',
resolver=id_resolver,
)),
)),
resolve_type=type_resolver
)
node_field = GraphQLField(
node_interface,
description='Fetches an object given its ID',
args=OrderedDict((
('id', GraphQLArgument(
GraphQLNonNull(GraphQLID),
description='The ID of an object'
)),
)),
resolver=lambda obj, args, *_: id_fetcher(args.get('id'), *_)
)
return node_interface, node_field | [
"def",
"node_definitions",
"(",
"id_fetcher",
",",
"type_resolver",
"=",
"None",
",",
"id_resolver",
"=",
"None",
")",
":",
"node_interface",
"=",
"GraphQLInterfaceType",
"(",
"'Node'",
",",
"description",
"=",
"'An object with an ID'",
",",
"fields",
"=",
"lambda",
":",
"OrderedDict",
"(",
"(",
"(",
"'id'",
",",
"GraphQLField",
"(",
"GraphQLNonNull",
"(",
"GraphQLID",
")",
",",
"description",
"=",
"'The id of the object.'",
",",
"resolver",
"=",
"id_resolver",
",",
")",
")",
",",
")",
")",
",",
"resolve_type",
"=",
"type_resolver",
")",
"node_field",
"=",
"GraphQLField",
"(",
"node_interface",
",",
"description",
"=",
"'Fetches an object given its ID'",
",",
"args",
"=",
"OrderedDict",
"(",
"(",
"(",
"'id'",
",",
"GraphQLArgument",
"(",
"GraphQLNonNull",
"(",
"GraphQLID",
")",
",",
"description",
"=",
"'The ID of an object'",
")",
")",
",",
")",
")",
",",
"resolver",
"=",
"lambda",
"obj",
",",
"args",
",",
"*",
"_",
":",
"id_fetcher",
"(",
"args",
".",
"get",
"(",
"'id'",
")",
",",
"*",
"_",
")",
")",
"return",
"node_interface",
",",
"node_field"
] | Given a function to map from an ID to an underlying object, and a function
to map from an underlying object to the concrete GraphQLObjectType it
corresponds to, constructs a `Node` interface that objects can implement,
and a field config for a `node` root field.
If the type_resolver is omitted, object resolution on the interface will be
handled with the `isTypeOf` method on object types, as with any GraphQL
interface without a provided `resolveType` method. | [
"Given",
"a",
"function",
"to",
"map",
"from",
"an",
"ID",
"to",
"an",
"underlying",
"object",
"and",
"a",
"function",
"to",
"map",
"from",
"an",
"underlying",
"object",
"to",
"the",
"concrete",
"GraphQLObjectType",
"it",
"corresponds",
"to",
"constructs",
"a",
"Node",
"interface",
"that",
"objects",
"can",
"implement",
"and",
"a",
"field",
"config",
"for",
"a",
"node",
"root",
"field",
"."
] | train | https://github.com/graphql-python/graphql-relay-py/blob/17ce2efa3c396df42791ae00667120b5fae64610/graphql_relay/node/node.py#L15-L49 | [
"id_fetcher",
"type_resolver",
"id_resolver"
] | What does this function do? | [
"Given",
"a",
"function",
"to",
"map",
"from",
"an",
"ID",
"to",
"an",
"underlying",
"object",
"and",
"a",
"function",
"to",
"map",
"from",
"an",
"underlying",
"object",
"to",
"the",
"concrete",
"GraphQLObjectType",
"it",
"corresponds",
"to",
"constructs",
"a",
"Node",
"interface",
"that",
"objects",
"can",
"implement",
"and",
"a",
"field",
"config",
"for",
"a",
"node",
"root",
"field",
"."
] |
graphql-python/graphql-relay-py | graphql_relay/node/node.py | from_global_id | def from_global_id(global_id):
'''
Takes the "global ID" created by toGlobalID, and retuns the type name and ID
used to create it.
'''
unbased_global_id = unbase64(global_id)
_type, _id = unbased_global_id.split(':', 1)
return _type, _id | python | def from_global_id(global_id):
'''
Takes the "global ID" created by toGlobalID, and retuns the type name and ID
used to create it.
'''
unbased_global_id = unbase64(global_id)
_type, _id = unbased_global_id.split(':', 1)
return _type, _id | [
"def",
"from_global_id",
"(",
"global_id",
")",
":",
"unbased_global_id",
"=",
"unbase64",
"(",
"global_id",
")",
"_type",
",",
"_id",
"=",
"unbased_global_id",
".",
"split",
"(",
"':'",
",",
"1",
")",
"return",
"_type",
",",
"_id"
] | Takes the "global ID" created by toGlobalID, and retuns the type name and ID
used to create it. | [
"Takes",
"the",
"global",
"ID",
"created",
"by",
"toGlobalID",
"and",
"retuns",
"the",
"type",
"name",
"and",
"ID",
"used",
"to",
"create",
"it",
"."
] | train | https://github.com/graphql-python/graphql-relay-py/blob/17ce2efa3c396df42791ae00667120b5fae64610/graphql_relay/node/node.py#L60-L67 | [
"global_id"
] | What does this function do? | [
"Takes",
"the",
"global",
"ID",
"created",
"by",
"toGlobalID",
"and",
"retuns",
"the",
"type",
"name",
"and",
"ID",
"used",
"to",
"create",
"it",
"."
] |
graphql-python/graphql-relay-py | graphql_relay/node/node.py | global_id_field | def global_id_field(type_name, id_fetcher=None):
'''
Creates the configuration for an id field on a node, using `to_global_id` to
construct the ID from the provided typename. The type-specific ID is fetcher
by calling id_fetcher on the object, or if not provided, by accessing the `id`
property on the object.
'''
return GraphQLField(
GraphQLNonNull(GraphQLID),
description='The ID of an object',
resolver=lambda obj, args, context, info: to_global_id(
type_name or info.parent_type.name,
id_fetcher(obj, context, info) if id_fetcher else obj.id
)
) | python | def global_id_field(type_name, id_fetcher=None):
'''
Creates the configuration for an id field on a node, using `to_global_id` to
construct the ID from the provided typename. The type-specific ID is fetcher
by calling id_fetcher on the object, or if not provided, by accessing the `id`
property on the object.
'''
return GraphQLField(
GraphQLNonNull(GraphQLID),
description='The ID of an object',
resolver=lambda obj, args, context, info: to_global_id(
type_name or info.parent_type.name,
id_fetcher(obj, context, info) if id_fetcher else obj.id
)
) | [
"def",
"global_id_field",
"(",
"type_name",
",",
"id_fetcher",
"=",
"None",
")",
":",
"return",
"GraphQLField",
"(",
"GraphQLNonNull",
"(",
"GraphQLID",
")",
",",
"description",
"=",
"'The ID of an object'",
",",
"resolver",
"=",
"lambda",
"obj",
",",
"args",
",",
"context",
",",
"info",
":",
"to_global_id",
"(",
"type_name",
"or",
"info",
".",
"parent_type",
".",
"name",
",",
"id_fetcher",
"(",
"obj",
",",
"context",
",",
"info",
")",
"if",
"id_fetcher",
"else",
"obj",
".",
"id",
")",
")"
] | Creates the configuration for an id field on a node, using `to_global_id` to
construct the ID from the provided typename. The type-specific ID is fetcher
by calling id_fetcher on the object, or if not provided, by accessing the `id`
property on the object. | [
"Creates",
"the",
"configuration",
"for",
"an",
"id",
"field",
"on",
"a",
"node",
"using",
"to_global_id",
"to",
"construct",
"the",
"ID",
"from",
"the",
"provided",
"typename",
".",
"The",
"type",
"-",
"specific",
"ID",
"is",
"fetcher",
"by",
"calling",
"id_fetcher",
"on",
"the",
"object",
"or",
"if",
"not",
"provided",
"by",
"accessing",
"the",
"id",
"property",
"on",
"the",
"object",
"."
] | train | https://github.com/graphql-python/graphql-relay-py/blob/17ce2efa3c396df42791ae00667120b5fae64610/graphql_relay/node/node.py#L70-L84 | [
"type_name",
"id_fetcher"
] | What does this function do? | [
"Creates",
"the",
"configuration",
"for",
"an",
"id",
"field",
"on",
"a",
"node",
"using",
"to_global_id",
"to",
"construct",
"the",
"ID",
"from",
"the",
"provided",
"typename",
".",
"The",
"type",
"-",
"specific",
"ID",
"is",
"fetcher",
"by",
"calling",
"id_fetcher",
"on",
"the",
"object",
"or",
"if",
"not",
"provided",
"by",
"accessing",
"the",
"id",
"property",
"on",
"the",
"object",
"."
] |
graphql-python/graphql-relay-py | graphql_relay/connection/arrayconnection.py | connection_from_list | def connection_from_list(data, args=None, **kwargs):
'''
A simple function that accepts an array and connection arguments, and returns
a connection object for use in GraphQL. It uses array offsets as pagination,
so pagination will only work if the array is static.
'''
_len = len(data)
return connection_from_list_slice(
data,
args,
slice_start=0,
list_length=_len,
list_slice_length=_len,
**kwargs
) | python | def connection_from_list(data, args=None, **kwargs):
'''
A simple function that accepts an array and connection arguments, and returns
a connection object for use in GraphQL. It uses array offsets as pagination,
so pagination will only work if the array is static.
'''
_len = len(data)
return connection_from_list_slice(
data,
args,
slice_start=0,
list_length=_len,
list_slice_length=_len,
**kwargs
) | [
"def",
"connection_from_list",
"(",
"data",
",",
"args",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"_len",
"=",
"len",
"(",
"data",
")",
"return",
"connection_from_list_slice",
"(",
"data",
",",
"args",
",",
"slice_start",
"=",
"0",
",",
"list_length",
"=",
"_len",
",",
"list_slice_length",
"=",
"_len",
",",
"*",
"*",
"kwargs",
")"
] | A simple function that accepts an array and connection arguments, and returns
a connection object for use in GraphQL. It uses array offsets as pagination,
so pagination will only work if the array is static. | [
"A",
"simple",
"function",
"that",
"accepts",
"an",
"array",
"and",
"connection",
"arguments",
"and",
"returns",
"a",
"connection",
"object",
"for",
"use",
"in",
"GraphQL",
".",
"It",
"uses",
"array",
"offsets",
"as",
"pagination",
"so",
"pagination",
"will",
"only",
"work",
"if",
"the",
"array",
"is",
"static",
"."
] | train | https://github.com/graphql-python/graphql-relay-py/blob/17ce2efa3c396df42791ae00667120b5fae64610/graphql_relay/connection/arrayconnection.py#L7-L21 | [
"data",
"args",
"kwargs"
] | What does this function do? | [
"A",
"simple",
"function",
"that",
"accepts",
"an",
"array",
"and",
"connection",
"arguments",
"and",
"returns",
"a",
"connection",
"object",
"for",
"use",
"in",
"GraphQL",
".",
"It",
"uses",
"array",
"offsets",
"as",
"pagination",
"so",
"pagination",
"will",
"only",
"work",
"if",
"the",
"array",
"is",
"static",
"."
] |
graphql-python/graphql-relay-py | graphql_relay/connection/arrayconnection.py | connection_from_promised_list | def connection_from_promised_list(data_promise, args=None, **kwargs):
'''
A version of `connectionFromArray` that takes a promised array, and returns a
promised connection.
'''
return data_promise.then(lambda data: connection_from_list(data, args, **kwargs)) | python | def connection_from_promised_list(data_promise, args=None, **kwargs):
'''
A version of `connectionFromArray` that takes a promised array, and returns a
promised connection.
'''
return data_promise.then(lambda data: connection_from_list(data, args, **kwargs)) | [
"def",
"connection_from_promised_list",
"(",
"data_promise",
",",
"args",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"data_promise",
".",
"then",
"(",
"lambda",
"data",
":",
"connection_from_list",
"(",
"data",
",",
"args",
",",
"*",
"*",
"kwargs",
")",
")"
] | A version of `connectionFromArray` that takes a promised array, and returns a
promised connection. | [
"A",
"version",
"of",
"connectionFromArray",
"that",
"takes",
"a",
"promised",
"array",
"and",
"returns",
"a",
"promised",
"connection",
"."
] | train | https://github.com/graphql-python/graphql-relay-py/blob/17ce2efa3c396df42791ae00667120b5fae64610/graphql_relay/connection/arrayconnection.py#L24-L29 | [
"data_promise",
"args",
"kwargs"
] | What does this function do? | [
"A",
"version",
"of",
"connectionFromArray",
"that",
"takes",
"a",
"promised",
"array",
"and",
"returns",
"a",
"promised",
"connection",
"."
] |
graphql-python/graphql-relay-py | graphql_relay/connection/arrayconnection.py | connection_from_list_slice | def connection_from_list_slice(list_slice, args=None, connection_type=None,
edge_type=None, pageinfo_type=None,
slice_start=0, list_length=0, list_slice_length=None):
'''
Given a slice (subset) of an array, returns a connection object for use in
GraphQL.
This function is similar to `connectionFromArray`, but is intended for use
cases where you know the cardinality of the connection, consider it too large
to materialize the entire array, and instead wish pass in a slice of the
total result large enough to cover the range specified in `args`.
'''
connection_type = connection_type or Connection
edge_type = edge_type or Edge
pageinfo_type = pageinfo_type or PageInfo
args = args or {}
before = args.get('before')
after = args.get('after')
first = args.get('first')
last = args.get('last')
if list_slice_length is None:
list_slice_length = len(list_slice)
slice_end = slice_start + list_slice_length
before_offset = get_offset_with_default(before, list_length)
after_offset = get_offset_with_default(after, -1)
start_offset = max(
slice_start - 1,
after_offset,
-1
) + 1
end_offset = min(
slice_end,
before_offset,
list_length
)
if isinstance(first, int):
end_offset = min(
end_offset,
start_offset + first
)
if isinstance(last, int):
start_offset = max(
start_offset,
end_offset - last
)
# If supplied slice is too large, trim it down before mapping over it.
_slice = list_slice[
max(start_offset - slice_start, 0):
list_slice_length - (slice_end - end_offset)
]
edges = [
edge_type(
node=node,
cursor=offset_to_cursor(start_offset + i)
)
for i, node in enumerate(_slice)
]
first_edge_cursor = edges[0].cursor if edges else None
last_edge_cursor = edges[-1].cursor if edges else None
lower_bound = after_offset + 1 if after else 0
upper_bound = before_offset if before else list_length
return connection_type(
edges=edges,
page_info=pageinfo_type(
start_cursor=first_edge_cursor,
end_cursor=last_edge_cursor,
has_previous_page=isinstance(last, int) and start_offset > lower_bound,
has_next_page=isinstance(first, int) and end_offset < upper_bound
)
) | python | def connection_from_list_slice(list_slice, args=None, connection_type=None,
edge_type=None, pageinfo_type=None,
slice_start=0, list_length=0, list_slice_length=None):
'''
Given a slice (subset) of an array, returns a connection object for use in
GraphQL.
This function is similar to `connectionFromArray`, but is intended for use
cases where you know the cardinality of the connection, consider it too large
to materialize the entire array, and instead wish pass in a slice of the
total result large enough to cover the range specified in `args`.
'''
connection_type = connection_type or Connection
edge_type = edge_type or Edge
pageinfo_type = pageinfo_type or PageInfo
args = args or {}
before = args.get('before')
after = args.get('after')
first = args.get('first')
last = args.get('last')
if list_slice_length is None:
list_slice_length = len(list_slice)
slice_end = slice_start + list_slice_length
before_offset = get_offset_with_default(before, list_length)
after_offset = get_offset_with_default(after, -1)
start_offset = max(
slice_start - 1,
after_offset,
-1
) + 1
end_offset = min(
slice_end,
before_offset,
list_length
)
if isinstance(first, int):
end_offset = min(
end_offset,
start_offset + first
)
if isinstance(last, int):
start_offset = max(
start_offset,
end_offset - last
)
# If supplied slice is too large, trim it down before mapping over it.
_slice = list_slice[
max(start_offset - slice_start, 0):
list_slice_length - (slice_end - end_offset)
]
edges = [
edge_type(
node=node,
cursor=offset_to_cursor(start_offset + i)
)
for i, node in enumerate(_slice)
]
first_edge_cursor = edges[0].cursor if edges else None
last_edge_cursor = edges[-1].cursor if edges else None
lower_bound = after_offset + 1 if after else 0
upper_bound = before_offset if before else list_length
return connection_type(
edges=edges,
page_info=pageinfo_type(
start_cursor=first_edge_cursor,
end_cursor=last_edge_cursor,
has_previous_page=isinstance(last, int) and start_offset > lower_bound,
has_next_page=isinstance(first, int) and end_offset < upper_bound
)
) | [
"def",
"connection_from_list_slice",
"(",
"list_slice",
",",
"args",
"=",
"None",
",",
"connection_type",
"=",
"None",
",",
"edge_type",
"=",
"None",
",",
"pageinfo_type",
"=",
"None",
",",
"slice_start",
"=",
"0",
",",
"list_length",
"=",
"0",
",",
"list_slice_length",
"=",
"None",
")",
":",
"connection_type",
"=",
"connection_type",
"or",
"Connection",
"edge_type",
"=",
"edge_type",
"or",
"Edge",
"pageinfo_type",
"=",
"pageinfo_type",
"or",
"PageInfo",
"args",
"=",
"args",
"or",
"{",
"}",
"before",
"=",
"args",
".",
"get",
"(",
"'before'",
")",
"after",
"=",
"args",
".",
"get",
"(",
"'after'",
")",
"first",
"=",
"args",
".",
"get",
"(",
"'first'",
")",
"last",
"=",
"args",
".",
"get",
"(",
"'last'",
")",
"if",
"list_slice_length",
"is",
"None",
":",
"list_slice_length",
"=",
"len",
"(",
"list_slice",
")",
"slice_end",
"=",
"slice_start",
"+",
"list_slice_length",
"before_offset",
"=",
"get_offset_with_default",
"(",
"before",
",",
"list_length",
")",
"after_offset",
"=",
"get_offset_with_default",
"(",
"after",
",",
"-",
"1",
")",
"start_offset",
"=",
"max",
"(",
"slice_start",
"-",
"1",
",",
"after_offset",
",",
"-",
"1",
")",
"+",
"1",
"end_offset",
"=",
"min",
"(",
"slice_end",
",",
"before_offset",
",",
"list_length",
")",
"if",
"isinstance",
"(",
"first",
",",
"int",
")",
":",
"end_offset",
"=",
"min",
"(",
"end_offset",
",",
"start_offset",
"+",
"first",
")",
"if",
"isinstance",
"(",
"last",
",",
"int",
")",
":",
"start_offset",
"=",
"max",
"(",
"start_offset",
",",
"end_offset",
"-",
"last",
")",
"# If supplied slice is too large, trim it down before mapping over it.",
"_slice",
"=",
"list_slice",
"[",
"max",
"(",
"start_offset",
"-",
"slice_start",
",",
"0",
")",
":",
"list_slice_length",
"-",
"(",
"slice_end",
"-",
"end_offset",
")",
"]",
"edges",
"=",
"[",
"edge_type",
"(",
"node",
"=",
"node",
",",
"cursor",
"=",
"offset_to_cursor",
"(",
"start_offset",
"+",
"i",
")",
")",
"for",
"i",
",",
"node",
"in",
"enumerate",
"(",
"_slice",
")",
"]",
"first_edge_cursor",
"=",
"edges",
"[",
"0",
"]",
".",
"cursor",
"if",
"edges",
"else",
"None",
"last_edge_cursor",
"=",
"edges",
"[",
"-",
"1",
"]",
".",
"cursor",
"if",
"edges",
"else",
"None",
"lower_bound",
"=",
"after_offset",
"+",
"1",
"if",
"after",
"else",
"0",
"upper_bound",
"=",
"before_offset",
"if",
"before",
"else",
"list_length",
"return",
"connection_type",
"(",
"edges",
"=",
"edges",
",",
"page_info",
"=",
"pageinfo_type",
"(",
"start_cursor",
"=",
"first_edge_cursor",
",",
"end_cursor",
"=",
"last_edge_cursor",
",",
"has_previous_page",
"=",
"isinstance",
"(",
"last",
",",
"int",
")",
"and",
"start_offset",
">",
"lower_bound",
",",
"has_next_page",
"=",
"isinstance",
"(",
"first",
",",
"int",
")",
"and",
"end_offset",
"<",
"upper_bound",
")",
")"
] | Given a slice (subset) of an array, returns a connection object for use in
GraphQL.
This function is similar to `connectionFromArray`, but is intended for use
cases where you know the cardinality of the connection, consider it too large
to materialize the entire array, and instead wish pass in a slice of the
total result large enough to cover the range specified in `args`. | [
"Given",
"a",
"slice",
"(",
"subset",
")",
"of",
"an",
"array",
"returns",
"a",
"connection",
"object",
"for",
"use",
"in",
"GraphQL",
".",
"This",
"function",
"is",
"similar",
"to",
"connectionFromArray",
"but",
"is",
"intended",
"for",
"use",
"cases",
"where",
"you",
"know",
"the",
"cardinality",
"of",
"the",
"connection",
"consider",
"it",
"too",
"large",
"to",
"materialize",
"the",
"entire",
"array",
"and",
"instead",
"wish",
"pass",
"in",
"a",
"slice",
"of",
"the",
"total",
"result",
"large",
"enough",
"to",
"cover",
"the",
"range",
"specified",
"in",
"args",
"."
] | train | https://github.com/graphql-python/graphql-relay-py/blob/17ce2efa3c396df42791ae00667120b5fae64610/graphql_relay/connection/arrayconnection.py#L32-L107 | [
"list_slice",
"args",
"connection_type",
"edge_type",
"pageinfo_type",
"slice_start",
"list_length",
"list_slice_length"
] | What does this function do? | [
"Given",
"a",
"slice",
"(",
"subset",
")",
"of",
"an",
"array",
"returns",
"a",
"connection",
"object",
"for",
"use",
"in",
"GraphQL",
".",
"This",
"function",
"is",
"similar",
"to",
"connectionFromArray",
"but",
"is",
"intended",
"for",
"use",
"cases",
"where",
"you",
"know",
"the",
"cardinality",
"of",
"the",
"connection",
"consider",
"it",
"too",
"large",
"to",
"materialize",
"the",
"entire",
"array",
"and",
"instead",
"wish",
"pass",
"in",
"a",
"slice",
"of",
"the",
"total",
"result",
"large",
"enough",
"to",
"cover",
"the",
"range",
"specified",
"in",
"args",
"."
] |
graphql-python/graphql-relay-py | graphql_relay/connection/arrayconnection.py | cursor_for_object_in_connection | def cursor_for_object_in_connection(data, _object):
'''
Return the cursor associated with an object in an array.
'''
if _object not in data:
return None
offset = data.index(_object)
return offset_to_cursor(offset) | python | def cursor_for_object_in_connection(data, _object):
'''
Return the cursor associated with an object in an array.
'''
if _object not in data:
return None
offset = data.index(_object)
return offset_to_cursor(offset) | [
"def",
"cursor_for_object_in_connection",
"(",
"data",
",",
"_object",
")",
":",
"if",
"_object",
"not",
"in",
"data",
":",
"return",
"None",
"offset",
"=",
"data",
".",
"index",
"(",
"_object",
")",
"return",
"offset_to_cursor",
"(",
"offset",
")"
] | Return the cursor associated with an object in an array. | [
"Return",
"the",
"cursor",
"associated",
"with",
"an",
"object",
"in",
"an",
"array",
"."
] | train | https://github.com/graphql-python/graphql-relay-py/blob/17ce2efa3c396df42791ae00667120b5fae64610/graphql_relay/connection/arrayconnection.py#L134-L142 | [
"data",
"_object"
] | What does this function do? | [
"Return",
"the",
"cursor",
"associated",
"with",
"an",
"object",
"in",
"an",
"array",
"."
] |
graphql-python/graphql-relay-py | graphql_relay/connection/arrayconnection.py | get_offset_with_default | def get_offset_with_default(cursor=None, default_offset=0):
'''
Given an optional cursor and a default offset, returns the offset
to use; if the cursor contains a valid offset, that will be used,
otherwise it will be the default.
'''
if not is_str(cursor):
return default_offset
offset = cursor_to_offset(cursor)
try:
return int(offset)
except:
return default_offset | python | def get_offset_with_default(cursor=None, default_offset=0):
'''
Given an optional cursor and a default offset, returns the offset
to use; if the cursor contains a valid offset, that will be used,
otherwise it will be the default.
'''
if not is_str(cursor):
return default_offset
offset = cursor_to_offset(cursor)
try:
return int(offset)
except:
return default_offset | [
"def",
"get_offset_with_default",
"(",
"cursor",
"=",
"None",
",",
"default_offset",
"=",
"0",
")",
":",
"if",
"not",
"is_str",
"(",
"cursor",
")",
":",
"return",
"default_offset",
"offset",
"=",
"cursor_to_offset",
"(",
"cursor",
")",
"try",
":",
"return",
"int",
"(",
"offset",
")",
"except",
":",
"return",
"default_offset"
] | Given an optional cursor and a default offset, returns the offset
to use; if the cursor contains a valid offset, that will be used,
otherwise it will be the default. | [
"Given",
"an",
"optional",
"cursor",
"and",
"a",
"default",
"offset",
"returns",
"the",
"offset",
"to",
"use",
";",
"if",
"the",
"cursor",
"contains",
"a",
"valid",
"offset",
"that",
"will",
"be",
"used",
"otherwise",
"it",
"will",
"be",
"the",
"default",
"."
] | train | https://github.com/graphql-python/graphql-relay-py/blob/17ce2efa3c396df42791ae00667120b5fae64610/graphql_relay/connection/arrayconnection.py#L145-L158 | [
"cursor",
"default_offset"
] | What does this function do? | [
"Given",
"an",
"optional",
"cursor",
"and",
"a",
"default",
"offset",
"returns",
"the",
"offset",
"to",
"use",
";",
"if",
"the",
"cursor",
"contains",
"a",
"valid",
"offset",
"that",
"will",
"be",
"used",
"otherwise",
"it",
"will",
"be",
"the",
"default",
"."
] |
patrickfuller/jgraph | python/notebook.py | draw | def draw(data, size=(600, 400), node_size=2.0, edge_size=0.25,
default_node_color=0x5bc0de, default_edge_color=0xaaaaaa, z=100,
shader='basic', optimize=True, directed=True, display_html=True,
show_save=False):
"""Draws an interactive 3D visualization of the inputted graph.
Args:
data: Either an adjacency list of tuples (ie. [(1,2),...]) or object
size: (Optional) Dimensions of visualization, in pixels
node_size: (Optional) Defaults to 2.0
edge_size: (Optional) Defaults to 0.25
default_node_color: (Optional) If loading data without specified
'color' properties, this will be used. Default is 0x5bc0de
default_edge_color: (Optional) If loading data without specified
'color' properties, this will be used. Default is 0xaaaaaa
z: (Optional) Starting z position of the camera. Default is 100.
shader: (Optional) Specifies shading algorithm to use. Can be 'toon',
'basic', 'phong', or 'lambert'. Default is 'basic'.
optimize: (Optional) Runs a force-directed layout algorithm on the
graph. Default True.
directed: (Optional) Includes arrows on edges to indicate direction.
Default True.
display_html: If True (default), embed the html in a IPython display.
If False, return the html as a string.
show_save: If True, displays a save icon for rendering graph as an
image.
Inputting an adjacency list into `data` results in a 'default' graph type.
For more customization, use the more expressive object format.
"""
# Catch errors on string-based input before getting js involved
shader_options = ['toon', 'basic', 'phong', 'lambert']
if shader not in shader_options:
raise Exception('Invalid shader! Please use one of: ' +
', '.join(shader_options))
if isinstance(default_edge_color, int):
default_edge_color = hex(default_edge_color)
if isinstance(default_node_color, int):
default_node_color = hex(default_node_color)
# Guess the input format and handle accordingly
if isinstance(data, list):
graph = json_formatter.dumps(generate(data, iterations=1))
elif isinstance(data, dict):
# Convert color hex to string for json handling
for node_key in data['nodes']:
node = data['nodes'][node_key]
if 'color' in node and isinstance(node['color'], int):
node['color'] = hex(node['color'])
for edge in data['edges']:
if 'color' in edge and isinstance(edge['color'], int):
edge['color'] = hex(edge['color'])
graph = json_formatter.dumps(data)
else:
# Support both files and strings
try:
with open(data) as in_file:
graph = in_file.read()
except:
graph = data
div_id = uuid.uuid4()
html = '''<div id="graph-%(id)s"></div>
<script type="text/javascript">
require.config({baseUrl: '/',
paths: {jgraph: ['%(local)s', '%(remote)s']}});
require(['jgraph'], function () {
var $d = $('#graph-%(id)s');
$d.width(%(w)d); $d.height(%(h)d);
$d.jgraph = jQuery.extend({}, jgraph);
$d.jgraph.create($d, {nodeSize: %(node_size)f,
edgeSize: %(edge_size)f,
defaultNodeColor: '%(node_color)s',
defaultEdgeColor: '%(edge_color)s',
shader: '%(shader)s',
z: %(z)d,
runOptimization: %(optimize)s,
directed: %(directed)s,
showSave: %(show_save)s});
$d.jgraph.draw(%(graph)s);
$d.resizable({
aspectRatio: %(w)d / %(h)d,
resize: function (evt, ui) {
$d.jgraph.renderer.setSize(ui.size.width,
ui.size.height);
}
});
});
</script>''' % dict(id=div_id, local=local_path[:-3],
remote=remote_path[:-3], w=size[0], h=size[1],
node_size=node_size, edge_size=edge_size,
node_color=default_node_color,
edge_color=default_edge_color, shader=shader,
z=z, graph=graph,
optimize='true' if optimize else 'false',
directed='true' if directed else 'false',
show_save='true' if show_save else 'false')
# Execute js and display the results in a div (see script for more)
if display_html:
display(HTML(html))
else:
return html | python | def draw(data, size=(600, 400), node_size=2.0, edge_size=0.25,
default_node_color=0x5bc0de, default_edge_color=0xaaaaaa, z=100,
shader='basic', optimize=True, directed=True, display_html=True,
show_save=False):
"""Draws an interactive 3D visualization of the inputted graph.
Args:
data: Either an adjacency list of tuples (ie. [(1,2),...]) or object
size: (Optional) Dimensions of visualization, in pixels
node_size: (Optional) Defaults to 2.0
edge_size: (Optional) Defaults to 0.25
default_node_color: (Optional) If loading data without specified
'color' properties, this will be used. Default is 0x5bc0de
default_edge_color: (Optional) If loading data without specified
'color' properties, this will be used. Default is 0xaaaaaa
z: (Optional) Starting z position of the camera. Default is 100.
shader: (Optional) Specifies shading algorithm to use. Can be 'toon',
'basic', 'phong', or 'lambert'. Default is 'basic'.
optimize: (Optional) Runs a force-directed layout algorithm on the
graph. Default True.
directed: (Optional) Includes arrows on edges to indicate direction.
Default True.
display_html: If True (default), embed the html in a IPython display.
If False, return the html as a string.
show_save: If True, displays a save icon for rendering graph as an
image.
Inputting an adjacency list into `data` results in a 'default' graph type.
For more customization, use the more expressive object format.
"""
# Catch errors on string-based input before getting js involved
shader_options = ['toon', 'basic', 'phong', 'lambert']
if shader not in shader_options:
raise Exception('Invalid shader! Please use one of: ' +
', '.join(shader_options))
if isinstance(default_edge_color, int):
default_edge_color = hex(default_edge_color)
if isinstance(default_node_color, int):
default_node_color = hex(default_node_color)
# Guess the input format and handle accordingly
if isinstance(data, list):
graph = json_formatter.dumps(generate(data, iterations=1))
elif isinstance(data, dict):
# Convert color hex to string for json handling
for node_key in data['nodes']:
node = data['nodes'][node_key]
if 'color' in node and isinstance(node['color'], int):
node['color'] = hex(node['color'])
for edge in data['edges']:
if 'color' in edge and isinstance(edge['color'], int):
edge['color'] = hex(edge['color'])
graph = json_formatter.dumps(data)
else:
# Support both files and strings
try:
with open(data) as in_file:
graph = in_file.read()
except:
graph = data
div_id = uuid.uuid4()
html = '''<div id="graph-%(id)s"></div>
<script type="text/javascript">
require.config({baseUrl: '/',
paths: {jgraph: ['%(local)s', '%(remote)s']}});
require(['jgraph'], function () {
var $d = $('#graph-%(id)s');
$d.width(%(w)d); $d.height(%(h)d);
$d.jgraph = jQuery.extend({}, jgraph);
$d.jgraph.create($d, {nodeSize: %(node_size)f,
edgeSize: %(edge_size)f,
defaultNodeColor: '%(node_color)s',
defaultEdgeColor: '%(edge_color)s',
shader: '%(shader)s',
z: %(z)d,
runOptimization: %(optimize)s,
directed: %(directed)s,
showSave: %(show_save)s});
$d.jgraph.draw(%(graph)s);
$d.resizable({
aspectRatio: %(w)d / %(h)d,
resize: function (evt, ui) {
$d.jgraph.renderer.setSize(ui.size.width,
ui.size.height);
}
});
});
</script>''' % dict(id=div_id, local=local_path[:-3],
remote=remote_path[:-3], w=size[0], h=size[1],
node_size=node_size, edge_size=edge_size,
node_color=default_node_color,
edge_color=default_edge_color, shader=shader,
z=z, graph=graph,
optimize='true' if optimize else 'false',
directed='true' if directed else 'false',
show_save='true' if show_save else 'false')
# Execute js and display the results in a div (see script for more)
if display_html:
display(HTML(html))
else:
return html | [
"def",
"draw",
"(",
"data",
",",
"size",
"=",
"(",
"600",
",",
"400",
")",
",",
"node_size",
"=",
"2.0",
",",
"edge_size",
"=",
"0.25",
",",
"default_node_color",
"=",
"0x5bc0de",
",",
"default_edge_color",
"=",
"0xaaaaaa",
",",
"z",
"=",
"100",
",",
"shader",
"=",
"'basic'",
",",
"optimize",
"=",
"True",
",",
"directed",
"=",
"True",
",",
"display_html",
"=",
"True",
",",
"show_save",
"=",
"False",
")",
":",
"# Catch errors on string-based input before getting js involved",
"shader_options",
"=",
"[",
"'toon'",
",",
"'basic'",
",",
"'phong'",
",",
"'lambert'",
"]",
"if",
"shader",
"not",
"in",
"shader_options",
":",
"raise",
"Exception",
"(",
"'Invalid shader! Please use one of: '",
"+",
"', '",
".",
"join",
"(",
"shader_options",
")",
")",
"if",
"isinstance",
"(",
"default_edge_color",
",",
"int",
")",
":",
"default_edge_color",
"=",
"hex",
"(",
"default_edge_color",
")",
"if",
"isinstance",
"(",
"default_node_color",
",",
"int",
")",
":",
"default_node_color",
"=",
"hex",
"(",
"default_node_color",
")",
"# Guess the input format and handle accordingly",
"if",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"graph",
"=",
"json_formatter",
".",
"dumps",
"(",
"generate",
"(",
"data",
",",
"iterations",
"=",
"1",
")",
")",
"elif",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"# Convert color hex to string for json handling",
"for",
"node_key",
"in",
"data",
"[",
"'nodes'",
"]",
":",
"node",
"=",
"data",
"[",
"'nodes'",
"]",
"[",
"node_key",
"]",
"if",
"'color'",
"in",
"node",
"and",
"isinstance",
"(",
"node",
"[",
"'color'",
"]",
",",
"int",
")",
":",
"node",
"[",
"'color'",
"]",
"=",
"hex",
"(",
"node",
"[",
"'color'",
"]",
")",
"for",
"edge",
"in",
"data",
"[",
"'edges'",
"]",
":",
"if",
"'color'",
"in",
"edge",
"and",
"isinstance",
"(",
"edge",
"[",
"'color'",
"]",
",",
"int",
")",
":",
"edge",
"[",
"'color'",
"]",
"=",
"hex",
"(",
"edge",
"[",
"'color'",
"]",
")",
"graph",
"=",
"json_formatter",
".",
"dumps",
"(",
"data",
")",
"else",
":",
"# Support both files and strings",
"try",
":",
"with",
"open",
"(",
"data",
")",
"as",
"in_file",
":",
"graph",
"=",
"in_file",
".",
"read",
"(",
")",
"except",
":",
"graph",
"=",
"data",
"div_id",
"=",
"uuid",
".",
"uuid4",
"(",
")",
"html",
"=",
"'''<div id=\"graph-%(id)s\"></div>\n <script type=\"text/javascript\">\n require.config({baseUrl: '/',\n paths: {jgraph: ['%(local)s', '%(remote)s']}});\n require(['jgraph'], function () {\n var $d = $('#graph-%(id)s');\n $d.width(%(w)d); $d.height(%(h)d);\n $d.jgraph = jQuery.extend({}, jgraph);\n $d.jgraph.create($d, {nodeSize: %(node_size)f,\n edgeSize: %(edge_size)f,\n defaultNodeColor: '%(node_color)s',\n defaultEdgeColor: '%(edge_color)s',\n shader: '%(shader)s',\n z: %(z)d,\n runOptimization: %(optimize)s,\n directed: %(directed)s,\n showSave: %(show_save)s});\n $d.jgraph.draw(%(graph)s);\n\n $d.resizable({\n aspectRatio: %(w)d / %(h)d,\n resize: function (evt, ui) {\n $d.jgraph.renderer.setSize(ui.size.width,\n ui.size.height);\n }\n });\n });\n </script>'''",
"%",
"dict",
"(",
"id",
"=",
"div_id",
",",
"local",
"=",
"local_path",
"[",
":",
"-",
"3",
"]",
",",
"remote",
"=",
"remote_path",
"[",
":",
"-",
"3",
"]",
",",
"w",
"=",
"size",
"[",
"0",
"]",
",",
"h",
"=",
"size",
"[",
"1",
"]",
",",
"node_size",
"=",
"node_size",
",",
"edge_size",
"=",
"edge_size",
",",
"node_color",
"=",
"default_node_color",
",",
"edge_color",
"=",
"default_edge_color",
",",
"shader",
"=",
"shader",
",",
"z",
"=",
"z",
",",
"graph",
"=",
"graph",
",",
"optimize",
"=",
"'true'",
"if",
"optimize",
"else",
"'false'",
",",
"directed",
"=",
"'true'",
"if",
"directed",
"else",
"'false'",
",",
"show_save",
"=",
"'true'",
"if",
"show_save",
"else",
"'false'",
")",
"# Execute js and display the results in a div (see script for more)",
"if",
"display_html",
":",
"display",
"(",
"HTML",
"(",
"html",
")",
")",
"else",
":",
"return",
"html"
] | Draws an interactive 3D visualization of the inputted graph.
Args:
data: Either an adjacency list of tuples (ie. [(1,2),...]) or object
size: (Optional) Dimensions of visualization, in pixels
node_size: (Optional) Defaults to 2.0
edge_size: (Optional) Defaults to 0.25
default_node_color: (Optional) If loading data without specified
'color' properties, this will be used. Default is 0x5bc0de
default_edge_color: (Optional) If loading data without specified
'color' properties, this will be used. Default is 0xaaaaaa
z: (Optional) Starting z position of the camera. Default is 100.
shader: (Optional) Specifies shading algorithm to use. Can be 'toon',
'basic', 'phong', or 'lambert'. Default is 'basic'.
optimize: (Optional) Runs a force-directed layout algorithm on the
graph. Default True.
directed: (Optional) Includes arrows on edges to indicate direction.
Default True.
display_html: If True (default), embed the html in a IPython display.
If False, return the html as a string.
show_save: If True, displays a save icon for rendering graph as an
image.
Inputting an adjacency list into `data` results in a 'default' graph type.
For more customization, use the more expressive object format. | [
"Draws",
"an",
"interactive",
"3D",
"visualization",
"of",
"the",
"inputted",
"graph",
"."
] | train | https://github.com/patrickfuller/jgraph/blob/7297450f26ae8cba21914668a5aaa755de8aa14d/python/notebook.py#L29-L133 | [
"data",
"size"
] | What does this function do? | [
"Draws",
"an",
"interactive",
"3D",
"visualization",
"of",
"the",
"inputted",
"graph",
"."
] |
patrickfuller/jgraph | python/notebook.py | generate | def generate(data, iterations=1000, force_strength=5.0, dampening=0.01,
max_velocity=2.0, max_distance=50, is_3d=True):
"""Runs a force-directed algorithm on a graph, returning a data structure.
Args:
data: An adjacency list of tuples (ie. [(1,2),...])
iterations: (Optional) Number of FDL iterations to run in coordinate
generation
force_strength: (Optional) Strength of Coulomb and Hooke forces
(edit this to scale the distance between nodes)
dampening: (Optional) Multiplier to reduce force applied to nodes
max_velocity: (Optional) Maximum distance a node can move in one step
max_distance: (Optional) The maximum inter-node distance considered
is_3d: (Optional) Generates three-dimensional coordinates
Outputs a json-serializable Python object. To visualize, pass the output to
`jgraph.draw(...)`.
"""
edges = [{'source': s, 'target': t} for s, t in data]
nodes = force_directed_layout.run(edges, iterations, force_strength,
dampening, max_velocity, max_distance,
is_3d)
return {'edges': edges, 'nodes': nodes} | python | def generate(data, iterations=1000, force_strength=5.0, dampening=0.01,
max_velocity=2.0, max_distance=50, is_3d=True):
"""Runs a force-directed algorithm on a graph, returning a data structure.
Args:
data: An adjacency list of tuples (ie. [(1,2),...])
iterations: (Optional) Number of FDL iterations to run in coordinate
generation
force_strength: (Optional) Strength of Coulomb and Hooke forces
(edit this to scale the distance between nodes)
dampening: (Optional) Multiplier to reduce force applied to nodes
max_velocity: (Optional) Maximum distance a node can move in one step
max_distance: (Optional) The maximum inter-node distance considered
is_3d: (Optional) Generates three-dimensional coordinates
Outputs a json-serializable Python object. To visualize, pass the output to
`jgraph.draw(...)`.
"""
edges = [{'source': s, 'target': t} for s, t in data]
nodes = force_directed_layout.run(edges, iterations, force_strength,
dampening, max_velocity, max_distance,
is_3d)
return {'edges': edges, 'nodes': nodes} | [
"def",
"generate",
"(",
"data",
",",
"iterations",
"=",
"1000",
",",
"force_strength",
"=",
"5.0",
",",
"dampening",
"=",
"0.01",
",",
"max_velocity",
"=",
"2.0",
",",
"max_distance",
"=",
"50",
",",
"is_3d",
"=",
"True",
")",
":",
"edges",
"=",
"[",
"{",
"'source'",
":",
"s",
",",
"'target'",
":",
"t",
"}",
"for",
"s",
",",
"t",
"in",
"data",
"]",
"nodes",
"=",
"force_directed_layout",
".",
"run",
"(",
"edges",
",",
"iterations",
",",
"force_strength",
",",
"dampening",
",",
"max_velocity",
",",
"max_distance",
",",
"is_3d",
")",
"return",
"{",
"'edges'",
":",
"edges",
",",
"'nodes'",
":",
"nodes",
"}"
] | Runs a force-directed algorithm on a graph, returning a data structure.
Args:
data: An adjacency list of tuples (ie. [(1,2),...])
iterations: (Optional) Number of FDL iterations to run in coordinate
generation
force_strength: (Optional) Strength of Coulomb and Hooke forces
(edit this to scale the distance between nodes)
dampening: (Optional) Multiplier to reduce force applied to nodes
max_velocity: (Optional) Maximum distance a node can move in one step
max_distance: (Optional) The maximum inter-node distance considered
is_3d: (Optional) Generates three-dimensional coordinates
Outputs a json-serializable Python object. To visualize, pass the output to
`jgraph.draw(...)`. | [
"Runs",
"a",
"force",
"-",
"directed",
"algorithm",
"on",
"a",
"graph",
"returning",
"a",
"data",
"structure",
"."
] | train | https://github.com/patrickfuller/jgraph/blob/7297450f26ae8cba21914668a5aaa755de8aa14d/python/notebook.py#L136-L159 | [
"data",
"iterations",
"force_strength",
"dampening",
"max_velocity",
"max_distance",
"is_3d"
] | What does this function do? | [
"Runs",
"a",
"force",
"-",
"directed",
"algorithm",
"on",
"a",
"graph",
"returning",
"a",
"data",
"structure",
"."
] |
patrickfuller/jgraph | python/json_formatter.py | compress | def compress(obj):
"""Outputs json without whitespace."""
return json.dumps(obj, sort_keys=True, separators=(',', ':'),
cls=CustomEncoder) | python | def compress(obj):
"""Outputs json without whitespace."""
return json.dumps(obj, sort_keys=True, separators=(',', ':'),
cls=CustomEncoder) | [
"def",
"compress",
"(",
"obj",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"obj",
",",
"sort_keys",
"=",
"True",
",",
"separators",
"=",
"(",
"','",
",",
"':'",
")",
",",
"cls",
"=",
"CustomEncoder",
")"
] | Outputs json without whitespace. | [
"Outputs",
"json",
"without",
"whitespace",
"."
] | train | https://github.com/patrickfuller/jgraph/blob/7297450f26ae8cba21914668a5aaa755de8aa14d/python/json_formatter.py#L18-L21 | [
"obj"
] | What does this function do? | [
"Outputs",
"json",
"without",
"whitespace",
"."
] |
patrickfuller/jgraph | python/json_formatter.py | dumps | def dumps(obj):
"""Outputs json with formatting edits + object handling."""
return json.dumps(obj, indent=4, sort_keys=True, cls=CustomEncoder) | python | def dumps(obj):
"""Outputs json with formatting edits + object handling."""
return json.dumps(obj, indent=4, sort_keys=True, cls=CustomEncoder) | [
"def",
"dumps",
"(",
"obj",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"obj",
",",
"indent",
"=",
"4",
",",
"sort_keys",
"=",
"True",
",",
"cls",
"=",
"CustomEncoder",
")"
] | Outputs json with formatting edits + object handling. | [
"Outputs",
"json",
"with",
"formatting",
"edits",
"+",
"object",
"handling",
"."
] | train | https://github.com/patrickfuller/jgraph/blob/7297450f26ae8cba21914668a5aaa755de8aa14d/python/json_formatter.py#L24-L26 | [
"obj"
] | What does this function do? | [
"Outputs",
"json",
"with",
"formatting",
"edits",
"+",
"object",
"handling",
"."
] |
patrickfuller/jgraph | python/json_formatter.py | CustomEncoder.encode | def encode(self, obj):
"""Fired for every object."""
s = super(CustomEncoder, self).encode(obj)
# If uncompressed, postprocess for formatting
if len(s.splitlines()) > 1:
s = self.postprocess(s)
return s | python | def encode(self, obj):
"""Fired for every object."""
s = super(CustomEncoder, self).encode(obj)
# If uncompressed, postprocess for formatting
if len(s.splitlines()) > 1:
s = self.postprocess(s)
return s | [
"def",
"encode",
"(",
"self",
",",
"obj",
")",
":",
"s",
"=",
"super",
"(",
"CustomEncoder",
",",
"self",
")",
".",
"encode",
"(",
"obj",
")",
"# If uncompressed, postprocess for formatting",
"if",
"len",
"(",
"s",
".",
"splitlines",
"(",
")",
")",
">",
"1",
":",
"s",
"=",
"self",
".",
"postprocess",
"(",
"s",
")",
"return",
"s"
] | Fired for every object. | [
"Fired",
"for",
"every",
"object",
"."
] | train | https://github.com/patrickfuller/jgraph/blob/7297450f26ae8cba21914668a5aaa755de8aa14d/python/json_formatter.py#L31-L37 | [
"obj"
] | What does this function do? | [
"Fired",
"for",
"every",
"object",
"."
] |
patrickfuller/jgraph | python/json_formatter.py | CustomEncoder.postprocess | def postprocess(self, json_string):
"""Displays each entry on its own line."""
is_compressing, is_hash, compressed, spaces = False, False, [], 0
for row in json_string.split('\n'):
if is_compressing:
if (row[:spaces + 5] == ' ' * (spaces + 4) +
('"' if is_hash else '{')):
compressed.append(row.rstrip())
elif (len(row) > spaces and row[:spaces] == ' ' * spaces and
re.match('[\]\}],?', row[spaces:].rstrip())):
compressed.append(row.rstrip())
is_compressing = False
else:
compressed[-1] += ' ' + row.strip()
else:
compressed.append(row.rstrip())
if any(a in row for a in ['edges', 'nodes']):
# Fix to handle issues that arise with empty lists
if '[]' in row:
continue
spaces = sum(1 for _ in takewhile(str.isspace, row))
is_compressing, is_hash = True, '{' in row
return '\n'.join(compressed) | python | def postprocess(self, json_string):
"""Displays each entry on its own line."""
is_compressing, is_hash, compressed, spaces = False, False, [], 0
for row in json_string.split('\n'):
if is_compressing:
if (row[:spaces + 5] == ' ' * (spaces + 4) +
('"' if is_hash else '{')):
compressed.append(row.rstrip())
elif (len(row) > spaces and row[:spaces] == ' ' * spaces and
re.match('[\]\}],?', row[spaces:].rstrip())):
compressed.append(row.rstrip())
is_compressing = False
else:
compressed[-1] += ' ' + row.strip()
else:
compressed.append(row.rstrip())
if any(a in row for a in ['edges', 'nodes']):
# Fix to handle issues that arise with empty lists
if '[]' in row:
continue
spaces = sum(1 for _ in takewhile(str.isspace, row))
is_compressing, is_hash = True, '{' in row
return '\n'.join(compressed) | [
"def",
"postprocess",
"(",
"self",
",",
"json_string",
")",
":",
"is_compressing",
",",
"is_hash",
",",
"compressed",
",",
"spaces",
"=",
"False",
",",
"False",
",",
"[",
"]",
",",
"0",
"for",
"row",
"in",
"json_string",
".",
"split",
"(",
"'\\n'",
")",
":",
"if",
"is_compressing",
":",
"if",
"(",
"row",
"[",
":",
"spaces",
"+",
"5",
"]",
"==",
"' '",
"*",
"(",
"spaces",
"+",
"4",
")",
"+",
"(",
"'\"'",
"if",
"is_hash",
"else",
"'{'",
")",
")",
":",
"compressed",
".",
"append",
"(",
"row",
".",
"rstrip",
"(",
")",
")",
"elif",
"(",
"len",
"(",
"row",
")",
">",
"spaces",
"and",
"row",
"[",
":",
"spaces",
"]",
"==",
"' '",
"*",
"spaces",
"and",
"re",
".",
"match",
"(",
"'[\\]\\}],?'",
",",
"row",
"[",
"spaces",
":",
"]",
".",
"rstrip",
"(",
")",
")",
")",
":",
"compressed",
".",
"append",
"(",
"row",
".",
"rstrip",
"(",
")",
")",
"is_compressing",
"=",
"False",
"else",
":",
"compressed",
"[",
"-",
"1",
"]",
"+=",
"' '",
"+",
"row",
".",
"strip",
"(",
")",
"else",
":",
"compressed",
".",
"append",
"(",
"row",
".",
"rstrip",
"(",
")",
")",
"if",
"any",
"(",
"a",
"in",
"row",
"for",
"a",
"in",
"[",
"'edges'",
",",
"'nodes'",
"]",
")",
":",
"# Fix to handle issues that arise with empty lists",
"if",
"'[]'",
"in",
"row",
":",
"continue",
"spaces",
"=",
"sum",
"(",
"1",
"for",
"_",
"in",
"takewhile",
"(",
"str",
".",
"isspace",
",",
"row",
")",
")",
"is_compressing",
",",
"is_hash",
"=",
"True",
",",
"'{'",
"in",
"row",
"return",
"'\\n'",
".",
"join",
"(",
"compressed",
")"
] | Displays each entry on its own line. | [
"Displays",
"each",
"entry",
"on",
"its",
"own",
"line",
"."
] | train | https://github.com/patrickfuller/jgraph/blob/7297450f26ae8cba21914668a5aaa755de8aa14d/python/json_formatter.py#L39-L61 | [
"json_string"
] | What does this function do? | [
"Displays",
"each",
"entry",
"on",
"its",
"own",
"line",
"."
] |
patrickfuller/jgraph | python/force_directed_layout.py | run | def run(edges, iterations=1000, force_strength=5.0, dampening=0.01,
max_velocity=2.0, max_distance=50, is_3d=True):
"""Runs a force-directed-layout algorithm on the input graph.
iterations - Number of FDL iterations to run in coordinate generation
force_strength - Strength of Coulomb and Hooke forces
(edit this to scale the distance between nodes)
dampening - Multiplier to reduce force applied to nodes
max_velocity - Maximum distance a node can move in one step
max_distance - The maximum distance considered for interactions
"""
# Get a list of node ids from the edge data
nodes = set(e['source'] for e in edges) | set(e['target'] for e in edges)
# Convert to a data-storing object and initialize some values
d = 3 if is_3d else 2
nodes = {n: {'velocity': [0.0] * d, 'force': [0.0] * d} for n in nodes}
# Repeat n times (is there a more Pythonic way to do this?)
for _ in repeat(None, iterations):
# Add in Coulomb-esque node-node repulsive forces
for node1, node2 in combinations(nodes.values(), 2):
_coulomb(node1, node2, force_strength, max_distance)
# And Hooke-esque edge spring forces
for edge in edges:
_hooke(nodes[edge['source']], nodes[edge['target']],
force_strength * edge.get('size', 1), max_distance)
# Move by resultant force
for node in nodes.values():
# Constrain the force to the bounds specified by input parameter
force = [_constrain(dampening * f, -max_velocity, max_velocity)
for f in node['force']]
# Update velocities and reset force
node['velocity'] = [v + dv
for v, dv in zip(node['velocity'], force)]
node['force'] = [0] * d
# Clean and return
for node in nodes.values():
del node['force']
node['location'] = node['velocity']
del node['velocity']
# Even if it's 2D, let's specify three dimensions
if not is_3d:
node['location'] += [0.0]
return nodes | python | def run(edges, iterations=1000, force_strength=5.0, dampening=0.01,
max_velocity=2.0, max_distance=50, is_3d=True):
"""Runs a force-directed-layout algorithm on the input graph.
iterations - Number of FDL iterations to run in coordinate generation
force_strength - Strength of Coulomb and Hooke forces
(edit this to scale the distance between nodes)
dampening - Multiplier to reduce force applied to nodes
max_velocity - Maximum distance a node can move in one step
max_distance - The maximum distance considered for interactions
"""
# Get a list of node ids from the edge data
nodes = set(e['source'] for e in edges) | set(e['target'] for e in edges)
# Convert to a data-storing object and initialize some values
d = 3 if is_3d else 2
nodes = {n: {'velocity': [0.0] * d, 'force': [0.0] * d} for n in nodes}
# Repeat n times (is there a more Pythonic way to do this?)
for _ in repeat(None, iterations):
# Add in Coulomb-esque node-node repulsive forces
for node1, node2 in combinations(nodes.values(), 2):
_coulomb(node1, node2, force_strength, max_distance)
# And Hooke-esque edge spring forces
for edge in edges:
_hooke(nodes[edge['source']], nodes[edge['target']],
force_strength * edge.get('size', 1), max_distance)
# Move by resultant force
for node in nodes.values():
# Constrain the force to the bounds specified by input parameter
force = [_constrain(dampening * f, -max_velocity, max_velocity)
for f in node['force']]
# Update velocities and reset force
node['velocity'] = [v + dv
for v, dv in zip(node['velocity'], force)]
node['force'] = [0] * d
# Clean and return
for node in nodes.values():
del node['force']
node['location'] = node['velocity']
del node['velocity']
# Even if it's 2D, let's specify three dimensions
if not is_3d:
node['location'] += [0.0]
return nodes | [
"def",
"run",
"(",
"edges",
",",
"iterations",
"=",
"1000",
",",
"force_strength",
"=",
"5.0",
",",
"dampening",
"=",
"0.01",
",",
"max_velocity",
"=",
"2.0",
",",
"max_distance",
"=",
"50",
",",
"is_3d",
"=",
"True",
")",
":",
"# Get a list of node ids from the edge data",
"nodes",
"=",
"set",
"(",
"e",
"[",
"'source'",
"]",
"for",
"e",
"in",
"edges",
")",
"|",
"set",
"(",
"e",
"[",
"'target'",
"]",
"for",
"e",
"in",
"edges",
")",
"# Convert to a data-storing object and initialize some values",
"d",
"=",
"3",
"if",
"is_3d",
"else",
"2",
"nodes",
"=",
"{",
"n",
":",
"{",
"'velocity'",
":",
"[",
"0.0",
"]",
"*",
"d",
",",
"'force'",
":",
"[",
"0.0",
"]",
"*",
"d",
"}",
"for",
"n",
"in",
"nodes",
"}",
"# Repeat n times (is there a more Pythonic way to do this?)",
"for",
"_",
"in",
"repeat",
"(",
"None",
",",
"iterations",
")",
":",
"# Add in Coulomb-esque node-node repulsive forces",
"for",
"node1",
",",
"node2",
"in",
"combinations",
"(",
"nodes",
".",
"values",
"(",
")",
",",
"2",
")",
":",
"_coulomb",
"(",
"node1",
",",
"node2",
",",
"force_strength",
",",
"max_distance",
")",
"# And Hooke-esque edge spring forces",
"for",
"edge",
"in",
"edges",
":",
"_hooke",
"(",
"nodes",
"[",
"edge",
"[",
"'source'",
"]",
"]",
",",
"nodes",
"[",
"edge",
"[",
"'target'",
"]",
"]",
",",
"force_strength",
"*",
"edge",
".",
"get",
"(",
"'size'",
",",
"1",
")",
",",
"max_distance",
")",
"# Move by resultant force",
"for",
"node",
"in",
"nodes",
".",
"values",
"(",
")",
":",
"# Constrain the force to the bounds specified by input parameter",
"force",
"=",
"[",
"_constrain",
"(",
"dampening",
"*",
"f",
",",
"-",
"max_velocity",
",",
"max_velocity",
")",
"for",
"f",
"in",
"node",
"[",
"'force'",
"]",
"]",
"# Update velocities and reset force",
"node",
"[",
"'velocity'",
"]",
"=",
"[",
"v",
"+",
"dv",
"for",
"v",
",",
"dv",
"in",
"zip",
"(",
"node",
"[",
"'velocity'",
"]",
",",
"force",
")",
"]",
"node",
"[",
"'force'",
"]",
"=",
"[",
"0",
"]",
"*",
"d",
"# Clean and return",
"for",
"node",
"in",
"nodes",
".",
"values",
"(",
")",
":",
"del",
"node",
"[",
"'force'",
"]",
"node",
"[",
"'location'",
"]",
"=",
"node",
"[",
"'velocity'",
"]",
"del",
"node",
"[",
"'velocity'",
"]",
"# Even if it's 2D, let's specify three dimensions",
"if",
"not",
"is_3d",
":",
"node",
"[",
"'location'",
"]",
"+=",
"[",
"0.0",
"]",
"return",
"nodes"
] | Runs a force-directed-layout algorithm on the input graph.
iterations - Number of FDL iterations to run in coordinate generation
force_strength - Strength of Coulomb and Hooke forces
(edit this to scale the distance between nodes)
dampening - Multiplier to reduce force applied to nodes
max_velocity - Maximum distance a node can move in one step
max_distance - The maximum distance considered for interactions | [
"Runs",
"a",
"force",
"-",
"directed",
"-",
"layout",
"algorithm",
"on",
"the",
"input",
"graph",
"."
] | train | https://github.com/patrickfuller/jgraph/blob/7297450f26ae8cba21914668a5aaa755de8aa14d/python/force_directed_layout.py#L10-L59 | [
"edges",
"iterations",
"force_strength",
"dampening",
"max_velocity",
"max_distance",
"is_3d"
] | What does this function do? | [
"Runs",
"a",
"force",
"-",
"directed",
"-",
"layout",
"algorithm",
"on",
"the",
"input",
"graph",
"."
] |
patrickfuller/jgraph | python/force_directed_layout.py | _coulomb | def _coulomb(n1, n2, k, r):
"""Calculates Coulomb forces and updates node data."""
# Get relevant positional data
delta = [x2 - x1 for x1, x2 in zip(n1['velocity'], n2['velocity'])]
distance = sqrt(sum(d ** 2 for d in delta))
# If the deltas are too small, use random values to keep things moving
if distance < 0.1:
delta = [uniform(0.1, 0.2) for _ in repeat(None, 3)]
distance = sqrt(sum(d ** 2 for d in delta))
# If the distance isn't huge (ie. Coulomb is negligible), calculate
if distance < r:
force = (k / distance) ** 2
n1['force'] = [f - force * d for f, d in zip(n1['force'], delta)]
n2['force'] = [f + force * d for f, d in zip(n2['force'], delta)] | python | def _coulomb(n1, n2, k, r):
"""Calculates Coulomb forces and updates node data."""
# Get relevant positional data
delta = [x2 - x1 for x1, x2 in zip(n1['velocity'], n2['velocity'])]
distance = sqrt(sum(d ** 2 for d in delta))
# If the deltas are too small, use random values to keep things moving
if distance < 0.1:
delta = [uniform(0.1, 0.2) for _ in repeat(None, 3)]
distance = sqrt(sum(d ** 2 for d in delta))
# If the distance isn't huge (ie. Coulomb is negligible), calculate
if distance < r:
force = (k / distance) ** 2
n1['force'] = [f - force * d for f, d in zip(n1['force'], delta)]
n2['force'] = [f + force * d for f, d in zip(n2['force'], delta)] | [
"def",
"_coulomb",
"(",
"n1",
",",
"n2",
",",
"k",
",",
"r",
")",
":",
"# Get relevant positional data",
"delta",
"=",
"[",
"x2",
"-",
"x1",
"for",
"x1",
",",
"x2",
"in",
"zip",
"(",
"n1",
"[",
"'velocity'",
"]",
",",
"n2",
"[",
"'velocity'",
"]",
")",
"]",
"distance",
"=",
"sqrt",
"(",
"sum",
"(",
"d",
"**",
"2",
"for",
"d",
"in",
"delta",
")",
")",
"# If the deltas are too small, use random values to keep things moving",
"if",
"distance",
"<",
"0.1",
":",
"delta",
"=",
"[",
"uniform",
"(",
"0.1",
",",
"0.2",
")",
"for",
"_",
"in",
"repeat",
"(",
"None",
",",
"3",
")",
"]",
"distance",
"=",
"sqrt",
"(",
"sum",
"(",
"d",
"**",
"2",
"for",
"d",
"in",
"delta",
")",
")",
"# If the distance isn't huge (ie. Coulomb is negligible), calculate",
"if",
"distance",
"<",
"r",
":",
"force",
"=",
"(",
"k",
"/",
"distance",
")",
"**",
"2",
"n1",
"[",
"'force'",
"]",
"=",
"[",
"f",
"-",
"force",
"*",
"d",
"for",
"f",
",",
"d",
"in",
"zip",
"(",
"n1",
"[",
"'force'",
"]",
",",
"delta",
")",
"]",
"n2",
"[",
"'force'",
"]",
"=",
"[",
"f",
"+",
"force",
"*",
"d",
"for",
"f",
",",
"d",
"in",
"zip",
"(",
"n2",
"[",
"'force'",
"]",
",",
"delta",
")",
"]"
] | Calculates Coulomb forces and updates node data. | [
"Calculates",
"Coulomb",
"forces",
"and",
"updates",
"node",
"data",
"."
] | train | https://github.com/patrickfuller/jgraph/blob/7297450f26ae8cba21914668a5aaa755de8aa14d/python/force_directed_layout.py#L62-L77 | [
"n1",
"n2",
"k",
"r"
] | What does this function do? | [
"Calculates",
"Coulomb",
"forces",
"and",
"updates",
"node",
"data",
"."
] |
pypyr/pypyr-cli | pypyr/steps/contextclearall.py | run_step | def run_step(context):
"""Wipe the entire context.
Args:
Context is a dictionary or dictionary-like.
Does not require any specific keys in context.
"""
logger.debug("started")
context.clear()
logger.info(f"Context wiped. New context size: {len(context)}")
logger.debug("done") | python | def run_step(context):
"""Wipe the entire context.
Args:
Context is a dictionary or dictionary-like.
Does not require any specific keys in context.
"""
logger.debug("started")
context.clear()
logger.info(f"Context wiped. New context size: {len(context)}")
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"context",
".",
"clear",
"(",
")",
"logger",
".",
"info",
"(",
"f\"Context wiped. New context size: {len(context)}\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Wipe the entire context.
Args:
Context is a dictionary or dictionary-like.
Does not require any specific keys in context. | [
"Wipe",
"the",
"entire",
"context",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/contextclearall.py#L8-L20 | [
"context"
] | What does this function do? | [
"Wipe",
"the",
"entire",
"context",
"."
] |
pypyr/pypyr-cli | pypyr/parser/jsonfile.py | get_parsed_context | def get_parsed_context(context_arg):
"""Parse input context string and returns context as dictionary."""
assert context_arg, ("pipeline must be invoked with context arg set. For "
"this json parser you're looking for something "
"like: "
"pypyr pipelinename './myjsonfile.json'")
logger.debug("starting")
# open the json file on disk so that you can initialize the dictionary
logger.debug(f"attempting to open file: {context_arg}")
with open(context_arg) as json_file:
payload = json.load(json_file)
logger.debug(f"json file loaded into context. Count: {len(payload)}")
logger.debug("done")
return payload | python | def get_parsed_context(context_arg):
"""Parse input context string and returns context as dictionary."""
assert context_arg, ("pipeline must be invoked with context arg set. For "
"this json parser you're looking for something "
"like: "
"pypyr pipelinename './myjsonfile.json'")
logger.debug("starting")
# open the json file on disk so that you can initialize the dictionary
logger.debug(f"attempting to open file: {context_arg}")
with open(context_arg) as json_file:
payload = json.load(json_file)
logger.debug(f"json file loaded into context. Count: {len(payload)}")
logger.debug("done")
return payload | [
"def",
"get_parsed_context",
"(",
"context_arg",
")",
":",
"assert",
"context_arg",
",",
"(",
"\"pipeline must be invoked with context arg set. For \"",
"\"this json parser you're looking for something \"",
"\"like: \"",
"\"pypyr pipelinename './myjsonfile.json'\"",
")",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"# open the json file on disk so that you can initialize the dictionary",
"logger",
".",
"debug",
"(",
"f\"attempting to open file: {context_arg}\"",
")",
"with",
"open",
"(",
"context_arg",
")",
"as",
"json_file",
":",
"payload",
"=",
"json",
".",
"load",
"(",
"json_file",
")",
"logger",
".",
"debug",
"(",
"f\"json file loaded into context. Count: {len(payload)}\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")",
"return",
"payload"
] | Parse input context string and returns context as dictionary. | [
"Parse",
"input",
"context",
"string",
"and",
"returns",
"context",
"as",
"dictionary",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/parser/jsonfile.py#L10-L24 | [
"context_arg"
] | What does this function do? | [
"Parse",
"input",
"context",
"string",
"and",
"returns",
"context",
"as",
"dictionary",
"."
] |
pypyr/pypyr-cli | pypyr/steps/pathcheck.py | run_step | def run_step(context):
"""pypyr step that checks if a file or directory path exists.
Args:
context: pypyr.context.Context. Mandatory.
The following context key must exist
- pathsToCheck. str/path-like or list of str/paths.
Path to file on disk to check.
All inputs support formatting expressions. Supports globs.
This step creates pathCheckOut in context, containing the results of the
path check operation.
pathCheckOut:
'inpath':
exists: true # bool. True if path exists.
count: 0 # int. Number of files found for in path.
found: ['path1', 'path2'] # list of strings. Paths of files found.
[count] is 0 if no files found. If you specified a single input
path to check and it exists, it's going to be 1. If you specified multiple
in paths or a glob expression that found more than 1 result, well, take a
guess.
[found] is a list of all the paths found for the [inpath]. If you passed
in a glob or globs, will contain the globs found for [inpath].
This means you can do an existence evaluation like this in a formatting
expression: '{pathCheckOut[inpathhere][exists]}'
Returns:
None. updates context arg.
Raises:
pypyr.errors.KeyNotInContextError: pathExists missing in context.
pypyr.errors.KeyInContextHasNoValueError: pathCheck exists but is None.
"""
logger.debug("started")
context.assert_key_has_value(key='pathCheck', caller=__name__)
paths_to_check = context['pathCheck']
if not paths_to_check:
raise KeyInContextHasNoValueError("context['pathCheck'] must have a "
f"value for {__name__}.")
# pathsToCheck can be a string or a list in case there are multiple paths
if isinstance(paths_to_check, list):
check_me = paths_to_check
else:
# assuming it's a str/path at this point
check_me = [paths_to_check]
out = {}
total_found = 0
for path in check_me:
logger.debug(f"checking path: {path}")
formatted_path = context.get_formatted_string(path)
found_paths = pypyr.utils.filesystem.get_glob(formatted_path)
no_of_paths = len(found_paths)
out[path] = {
'exists': no_of_paths > 0,
'count': no_of_paths,
'found': found_paths
}
total_found = total_found + no_of_paths
context['pathCheckOut'] = out
logger.info(f'checked {len(out)} path(s) and found {total_found}')
logger.debug("done") | python | def run_step(context):
"""pypyr step that checks if a file or directory path exists.
Args:
context: pypyr.context.Context. Mandatory.
The following context key must exist
- pathsToCheck. str/path-like or list of str/paths.
Path to file on disk to check.
All inputs support formatting expressions. Supports globs.
This step creates pathCheckOut in context, containing the results of the
path check operation.
pathCheckOut:
'inpath':
exists: true # bool. True if path exists.
count: 0 # int. Number of files found for in path.
found: ['path1', 'path2'] # list of strings. Paths of files found.
[count] is 0 if no files found. If you specified a single input
path to check and it exists, it's going to be 1. If you specified multiple
in paths or a glob expression that found more than 1 result, well, take a
guess.
[found] is a list of all the paths found for the [inpath]. If you passed
in a glob or globs, will contain the globs found for [inpath].
This means you can do an existence evaluation like this in a formatting
expression: '{pathCheckOut[inpathhere][exists]}'
Returns:
None. updates context arg.
Raises:
pypyr.errors.KeyNotInContextError: pathExists missing in context.
pypyr.errors.KeyInContextHasNoValueError: pathCheck exists but is None.
"""
logger.debug("started")
context.assert_key_has_value(key='pathCheck', caller=__name__)
paths_to_check = context['pathCheck']
if not paths_to_check:
raise KeyInContextHasNoValueError("context['pathCheck'] must have a "
f"value for {__name__}.")
# pathsToCheck can be a string or a list in case there are multiple paths
if isinstance(paths_to_check, list):
check_me = paths_to_check
else:
# assuming it's a str/path at this point
check_me = [paths_to_check]
out = {}
total_found = 0
for path in check_me:
logger.debug(f"checking path: {path}")
formatted_path = context.get_formatted_string(path)
found_paths = pypyr.utils.filesystem.get_glob(formatted_path)
no_of_paths = len(found_paths)
out[path] = {
'exists': no_of_paths > 0,
'count': no_of_paths,
'found': found_paths
}
total_found = total_found + no_of_paths
context['pathCheckOut'] = out
logger.info(f'checked {len(out)} path(s) and found {total_found}')
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"context",
".",
"assert_key_has_value",
"(",
"key",
"=",
"'pathCheck'",
",",
"caller",
"=",
"__name__",
")",
"paths_to_check",
"=",
"context",
"[",
"'pathCheck'",
"]",
"if",
"not",
"paths_to_check",
":",
"raise",
"KeyInContextHasNoValueError",
"(",
"\"context['pathCheck'] must have a \"",
"f\"value for {__name__}.\"",
")",
"# pathsToCheck can be a string or a list in case there are multiple paths",
"if",
"isinstance",
"(",
"paths_to_check",
",",
"list",
")",
":",
"check_me",
"=",
"paths_to_check",
"else",
":",
"# assuming it's a str/path at this point",
"check_me",
"=",
"[",
"paths_to_check",
"]",
"out",
"=",
"{",
"}",
"total_found",
"=",
"0",
"for",
"path",
"in",
"check_me",
":",
"logger",
".",
"debug",
"(",
"f\"checking path: {path}\"",
")",
"formatted_path",
"=",
"context",
".",
"get_formatted_string",
"(",
"path",
")",
"found_paths",
"=",
"pypyr",
".",
"utils",
".",
"filesystem",
".",
"get_glob",
"(",
"formatted_path",
")",
"no_of_paths",
"=",
"len",
"(",
"found_paths",
")",
"out",
"[",
"path",
"]",
"=",
"{",
"'exists'",
":",
"no_of_paths",
">",
"0",
",",
"'count'",
":",
"no_of_paths",
",",
"'found'",
":",
"found_paths",
"}",
"total_found",
"=",
"total_found",
"+",
"no_of_paths",
"context",
"[",
"'pathCheckOut'",
"]",
"=",
"out",
"logger",
".",
"info",
"(",
"f'checked {len(out)} path(s) and found {total_found}'",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | pypyr step that checks if a file or directory path exists.
Args:
context: pypyr.context.Context. Mandatory.
The following context key must exist
- pathsToCheck. str/path-like or list of str/paths.
Path to file on disk to check.
All inputs support formatting expressions. Supports globs.
This step creates pathCheckOut in context, containing the results of the
path check operation.
pathCheckOut:
'inpath':
exists: true # bool. True if path exists.
count: 0 # int. Number of files found for in path.
found: ['path1', 'path2'] # list of strings. Paths of files found.
[count] is 0 if no files found. If you specified a single input
path to check and it exists, it's going to be 1. If you specified multiple
in paths or a glob expression that found more than 1 result, well, take a
guess.
[found] is a list of all the paths found for the [inpath]. If you passed
in a glob or globs, will contain the globs found for [inpath].
This means you can do an existence evaluation like this in a formatting
expression: '{pathCheckOut[inpathhere][exists]}'
Returns:
None. updates context arg.
Raises:
pypyr.errors.KeyNotInContextError: pathExists missing in context.
pypyr.errors.KeyInContextHasNoValueError: pathCheck exists but is None. | [
"pypyr",
"step",
"that",
"checks",
"if",
"a",
"file",
"or",
"directory",
"path",
"exists",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/pathcheck.py#L10-L83 | [
"context"
] | What does this function do? | [
"pypyr",
"step",
"that",
"checks",
"if",
"a",
"file",
"or",
"directory",
"path",
"exists",
"."
] |
pypyr/pypyr-cli | pypyr/steps/filewritejson.py | run_step | def run_step(context):
"""Write payload out to json file.
Args:
context: pypyr.context.Context. Mandatory.
The following context keys expected:
- fileWriteJson
- path. mandatory. path-like. Write output file to
here. Will create directories in path for you.
- payload. optional. Write this key to output file. If not
specified, output entire context.
Returns:
None.
Raises:
pypyr.errors.KeyNotInContextError: fileWriteJson or
fileWriteJson['path'] missing in context.
pypyr.errors.KeyInContextHasNoValueError: fileWriteJson or
fileWriteJson['path'] exists but is None.
"""
logger.debug("started")
context.assert_child_key_has_value('fileWriteJson', 'path', __name__)
out_path = context.get_formatted_string(context['fileWriteJson']['path'])
# doing it like this to safeguard against accidentally dumping all context
# with potentially sensitive values in it to disk if payload exists but is
# None.
is_payload_specified = 'payload' in context['fileWriteJson']
logger.debug(f"opening destination file for writing: {out_path}")
os.makedirs(os.path.abspath(os.path.dirname(out_path)), exist_ok=True)
with open(out_path, 'w') as outfile:
if is_payload_specified:
payload = context['fileWriteJson']['payload']
formatted_iterable = context.get_formatted_iterable(payload)
else:
formatted_iterable = context.get_formatted_iterable(context)
json.dump(formatted_iterable, outfile, indent=2, ensure_ascii=False)
logger.info(f"formatted context content and wrote to {out_path}")
logger.debug("done") | python | def run_step(context):
"""Write payload out to json file.
Args:
context: pypyr.context.Context. Mandatory.
The following context keys expected:
- fileWriteJson
- path. mandatory. path-like. Write output file to
here. Will create directories in path for you.
- payload. optional. Write this key to output file. If not
specified, output entire context.
Returns:
None.
Raises:
pypyr.errors.KeyNotInContextError: fileWriteJson or
fileWriteJson['path'] missing in context.
pypyr.errors.KeyInContextHasNoValueError: fileWriteJson or
fileWriteJson['path'] exists but is None.
"""
logger.debug("started")
context.assert_child_key_has_value('fileWriteJson', 'path', __name__)
out_path = context.get_formatted_string(context['fileWriteJson']['path'])
# doing it like this to safeguard against accidentally dumping all context
# with potentially sensitive values in it to disk if payload exists but is
# None.
is_payload_specified = 'payload' in context['fileWriteJson']
logger.debug(f"opening destination file for writing: {out_path}")
os.makedirs(os.path.abspath(os.path.dirname(out_path)), exist_ok=True)
with open(out_path, 'w') as outfile:
if is_payload_specified:
payload = context['fileWriteJson']['payload']
formatted_iterable = context.get_formatted_iterable(payload)
else:
formatted_iterable = context.get_formatted_iterable(context)
json.dump(formatted_iterable, outfile, indent=2, ensure_ascii=False)
logger.info(f"formatted context content and wrote to {out_path}")
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"context",
".",
"assert_child_key_has_value",
"(",
"'fileWriteJson'",
",",
"'path'",
",",
"__name__",
")",
"out_path",
"=",
"context",
".",
"get_formatted_string",
"(",
"context",
"[",
"'fileWriteJson'",
"]",
"[",
"'path'",
"]",
")",
"# doing it like this to safeguard against accidentally dumping all context",
"# with potentially sensitive values in it to disk if payload exists but is",
"# None.",
"is_payload_specified",
"=",
"'payload'",
"in",
"context",
"[",
"'fileWriteJson'",
"]",
"logger",
".",
"debug",
"(",
"f\"opening destination file for writing: {out_path}\"",
")",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"out_path",
")",
")",
",",
"exist_ok",
"=",
"True",
")",
"with",
"open",
"(",
"out_path",
",",
"'w'",
")",
"as",
"outfile",
":",
"if",
"is_payload_specified",
":",
"payload",
"=",
"context",
"[",
"'fileWriteJson'",
"]",
"[",
"'payload'",
"]",
"formatted_iterable",
"=",
"context",
".",
"get_formatted_iterable",
"(",
"payload",
")",
"else",
":",
"formatted_iterable",
"=",
"context",
".",
"get_formatted_iterable",
"(",
"context",
")",
"json",
".",
"dump",
"(",
"formatted_iterable",
",",
"outfile",
",",
"indent",
"=",
"2",
",",
"ensure_ascii",
"=",
"False",
")",
"logger",
".",
"info",
"(",
"f\"formatted context content and wrote to {out_path}\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Write payload out to json file.
Args:
context: pypyr.context.Context. Mandatory.
The following context keys expected:
- fileWriteJson
- path. mandatory. path-like. Write output file to
here. Will create directories in path for you.
- payload. optional. Write this key to output file. If not
specified, output entire context.
Returns:
None.
Raises:
pypyr.errors.KeyNotInContextError: fileWriteJson or
fileWriteJson['path'] missing in context.
pypyr.errors.KeyInContextHasNoValueError: fileWriteJson or
fileWriteJson['path'] exists but is None. | [
"Write",
"payload",
"out",
"to",
"json",
"file",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/filewritejson.py#L10-L53 | [
"context"
] | What does this function do? | [
"Write",
"payload",
"out",
"to",
"json",
"file",
"."
] |
pypyr/pypyr-cli | pypyr/steps/pype.py | run_step | def run_step(context):
"""Run another pipeline from this step.
The parent pipeline is the current, executing pipeline. The invoked, or
child pipeline is the pipeline you are calling from this step.
Args:
context: dictionary-like pypyr.context.Context. context is mandatory.
Uses the following context keys in context:
- pype
- name. mandatory. str. Name of pipeline to execute. This
{name}.yaml must exist in the working directory/pipelines
dir.
- pipeArg. string. optional. String to pass to the
context_parser - the equivalent to context arg on the
pypyr cli. Only used if skipParse==False.
- raiseError. bool. optional. Defaults to True. If False, log,
but swallow any errors that happen during the invoked
pipeline execution. Swallowing means that the current/parent
pipeline will carry on with the next step even if an error
occurs in the invoked pipeline.
- skipParse. bool. optional. Defaults to True. skip the
context_parser on the invoked pipeline.
- useParentContext. optional. bool. Defaults to True. Pass the
current (i.e parent) pipeline context to the invoked (child)
pipeline.
- loader: str. optional. Absolute name of pipeline loader
module. If not specified will use
pypyr.pypeloaders.fileloader.
Returns:
None
Raises:
pypyr.errors.KeyNotInContextError: if ['pype'] or ['pype']['name']
is missing.
pypyr.errors.KeyInContextHasNoValueError: ['pype']['name'] exists but
is empty.
"""
logger.debug("started")
(pipeline_name,
use_parent_context,
pipe_arg,
skip_parse,
raise_error,
loader,
) = get_arguments(context)
try:
if use_parent_context:
logger.info(f"pyping {pipeline_name}, using parent context.")
pipelinerunner.load_and_run_pipeline(
pipeline_name=pipeline_name,
pipeline_context_input=pipe_arg,
context=context,
parse_input=not skip_parse,
loader=loader
)
else:
logger.info(f"pyping {pipeline_name}, without parent context.")
pipelinerunner.load_and_run_pipeline(
pipeline_name=pipeline_name,
pipeline_context_input=pipe_arg,
working_dir=context.working_dir,
parse_input=not skip_parse,
loader=loader
)
logger.info(f"pyped {pipeline_name}.")
except Exception as ex_info:
# yes, yes, don't catch Exception. Have to, though, in order to swallow
# errs if !raise_error
logger.error(f"Something went wrong pyping {pipeline_name}. "
f"{type(ex_info).__name__}: {ex_info}")
if raise_error:
logger.debug("Raising original exception to caller.")
raise
else:
logger.debug(
f"raiseError is False. Swallowing error in {pipeline_name}.")
logger.debug("done") | python | def run_step(context):
"""Run another pipeline from this step.
The parent pipeline is the current, executing pipeline. The invoked, or
child pipeline is the pipeline you are calling from this step.
Args:
context: dictionary-like pypyr.context.Context. context is mandatory.
Uses the following context keys in context:
- pype
- name. mandatory. str. Name of pipeline to execute. This
{name}.yaml must exist in the working directory/pipelines
dir.
- pipeArg. string. optional. String to pass to the
context_parser - the equivalent to context arg on the
pypyr cli. Only used if skipParse==False.
- raiseError. bool. optional. Defaults to True. If False, log,
but swallow any errors that happen during the invoked
pipeline execution. Swallowing means that the current/parent
pipeline will carry on with the next step even if an error
occurs in the invoked pipeline.
- skipParse. bool. optional. Defaults to True. skip the
context_parser on the invoked pipeline.
- useParentContext. optional. bool. Defaults to True. Pass the
current (i.e parent) pipeline context to the invoked (child)
pipeline.
- loader: str. optional. Absolute name of pipeline loader
module. If not specified will use
pypyr.pypeloaders.fileloader.
Returns:
None
Raises:
pypyr.errors.KeyNotInContextError: if ['pype'] or ['pype']['name']
is missing.
pypyr.errors.KeyInContextHasNoValueError: ['pype']['name'] exists but
is empty.
"""
logger.debug("started")
(pipeline_name,
use_parent_context,
pipe_arg,
skip_parse,
raise_error,
loader,
) = get_arguments(context)
try:
if use_parent_context:
logger.info(f"pyping {pipeline_name}, using parent context.")
pipelinerunner.load_and_run_pipeline(
pipeline_name=pipeline_name,
pipeline_context_input=pipe_arg,
context=context,
parse_input=not skip_parse,
loader=loader
)
else:
logger.info(f"pyping {pipeline_name}, without parent context.")
pipelinerunner.load_and_run_pipeline(
pipeline_name=pipeline_name,
pipeline_context_input=pipe_arg,
working_dir=context.working_dir,
parse_input=not skip_parse,
loader=loader
)
logger.info(f"pyped {pipeline_name}.")
except Exception as ex_info:
# yes, yes, don't catch Exception. Have to, though, in order to swallow
# errs if !raise_error
logger.error(f"Something went wrong pyping {pipeline_name}. "
f"{type(ex_info).__name__}: {ex_info}")
if raise_error:
logger.debug("Raising original exception to caller.")
raise
else:
logger.debug(
f"raiseError is False. Swallowing error in {pipeline_name}.")
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"(",
"pipeline_name",
",",
"use_parent_context",
",",
"pipe_arg",
",",
"skip_parse",
",",
"raise_error",
",",
"loader",
",",
")",
"=",
"get_arguments",
"(",
"context",
")",
"try",
":",
"if",
"use_parent_context",
":",
"logger",
".",
"info",
"(",
"f\"pyping {pipeline_name}, using parent context.\"",
")",
"pipelinerunner",
".",
"load_and_run_pipeline",
"(",
"pipeline_name",
"=",
"pipeline_name",
",",
"pipeline_context_input",
"=",
"pipe_arg",
",",
"context",
"=",
"context",
",",
"parse_input",
"=",
"not",
"skip_parse",
",",
"loader",
"=",
"loader",
")",
"else",
":",
"logger",
".",
"info",
"(",
"f\"pyping {pipeline_name}, without parent context.\"",
")",
"pipelinerunner",
".",
"load_and_run_pipeline",
"(",
"pipeline_name",
"=",
"pipeline_name",
",",
"pipeline_context_input",
"=",
"pipe_arg",
",",
"working_dir",
"=",
"context",
".",
"working_dir",
",",
"parse_input",
"=",
"not",
"skip_parse",
",",
"loader",
"=",
"loader",
")",
"logger",
".",
"info",
"(",
"f\"pyped {pipeline_name}.\"",
")",
"except",
"Exception",
"as",
"ex_info",
":",
"# yes, yes, don't catch Exception. Have to, though, in order to swallow",
"# errs if !raise_error",
"logger",
".",
"error",
"(",
"f\"Something went wrong pyping {pipeline_name}. \"",
"f\"{type(ex_info).__name__}: {ex_info}\"",
")",
"if",
"raise_error",
":",
"logger",
".",
"debug",
"(",
"\"Raising original exception to caller.\"",
")",
"raise",
"else",
":",
"logger",
".",
"debug",
"(",
"f\"raiseError is False. Swallowing error in {pipeline_name}.\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Run another pipeline from this step.
The parent pipeline is the current, executing pipeline. The invoked, or
child pipeline is the pipeline you are calling from this step.
Args:
context: dictionary-like pypyr.context.Context. context is mandatory.
Uses the following context keys in context:
- pype
- name. mandatory. str. Name of pipeline to execute. This
{name}.yaml must exist in the working directory/pipelines
dir.
- pipeArg. string. optional. String to pass to the
context_parser - the equivalent to context arg on the
pypyr cli. Only used if skipParse==False.
- raiseError. bool. optional. Defaults to True. If False, log,
but swallow any errors that happen during the invoked
pipeline execution. Swallowing means that the current/parent
pipeline will carry on with the next step even if an error
occurs in the invoked pipeline.
- skipParse. bool. optional. Defaults to True. skip the
context_parser on the invoked pipeline.
- useParentContext. optional. bool. Defaults to True. Pass the
current (i.e parent) pipeline context to the invoked (child)
pipeline.
- loader: str. optional. Absolute name of pipeline loader
module. If not specified will use
pypyr.pypeloaders.fileloader.
Returns:
None
Raises:
pypyr.errors.KeyNotInContextError: if ['pype'] or ['pype']['name']
is missing.
pypyr.errors.KeyInContextHasNoValueError: ['pype']['name'] exists but
is empty. | [
"Run",
"another",
"pipeline",
"from",
"this",
"step",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/pype.py#L10-L93 | [
"context"
] | What does this function do? | [
"Run",
"another",
"pipeline",
"from",
"this",
"step",
"."
] |
pypyr/pypyr-cli | pypyr/steps/pype.py | get_arguments | def get_arguments(context):
"""Parse arguments for pype from context and assign default values.
Args:
context: pypyr.context.Context. context is mandatory.
Returns:
tuple (pipeline_name, #str
use_parent_context, #bool
pipe_arg, #str
skip_parse, #bool
raise_error #bool
)
Raises:
pypyr.errors.KeyNotInContextError: if ['pype']['name'] is missing.
pypyr.errors.KeyInContextHasNoValueError: if ['pype']['name'] exists but
is None.
"""
context.assert_key_has_value(key='pype', caller=__name__)
pype = context.get_formatted('pype')
try:
pipeline_name = pype['name']
if pipeline_name is None:
raise KeyInContextHasNoValueError(
"pypyr.steps.pype ['pype']['name'] exists but is empty.")
except KeyError as err:
raise KeyNotInContextError(
"pypyr.steps.pype missing 'name' in the 'pype' context item. "
"You need to specify the pipeline name to run another "
"pipeline.") from err
use_parent_context = pype.get('useParentContext', True)
pipe_arg = pype.get('pipeArg', None)
skip_parse = pype.get('skipParse', True)
raise_error = pype.get('raiseError', True)
loader = pype.get('loader', None)
return (
pipeline_name,
use_parent_context,
pipe_arg,
skip_parse,
raise_error,
loader,
) | python | def get_arguments(context):
"""Parse arguments for pype from context and assign default values.
Args:
context: pypyr.context.Context. context is mandatory.
Returns:
tuple (pipeline_name, #str
use_parent_context, #bool
pipe_arg, #str
skip_parse, #bool
raise_error #bool
)
Raises:
pypyr.errors.KeyNotInContextError: if ['pype']['name'] is missing.
pypyr.errors.KeyInContextHasNoValueError: if ['pype']['name'] exists but
is None.
"""
context.assert_key_has_value(key='pype', caller=__name__)
pype = context.get_formatted('pype')
try:
pipeline_name = pype['name']
if pipeline_name is None:
raise KeyInContextHasNoValueError(
"pypyr.steps.pype ['pype']['name'] exists but is empty.")
except KeyError as err:
raise KeyNotInContextError(
"pypyr.steps.pype missing 'name' in the 'pype' context item. "
"You need to specify the pipeline name to run another "
"pipeline.") from err
use_parent_context = pype.get('useParentContext', True)
pipe_arg = pype.get('pipeArg', None)
skip_parse = pype.get('skipParse', True)
raise_error = pype.get('raiseError', True)
loader = pype.get('loader', None)
return (
pipeline_name,
use_parent_context,
pipe_arg,
skip_parse,
raise_error,
loader,
) | [
"def",
"get_arguments",
"(",
"context",
")",
":",
"context",
".",
"assert_key_has_value",
"(",
"key",
"=",
"'pype'",
",",
"caller",
"=",
"__name__",
")",
"pype",
"=",
"context",
".",
"get_formatted",
"(",
"'pype'",
")",
"try",
":",
"pipeline_name",
"=",
"pype",
"[",
"'name'",
"]",
"if",
"pipeline_name",
"is",
"None",
":",
"raise",
"KeyInContextHasNoValueError",
"(",
"\"pypyr.steps.pype ['pype']['name'] exists but is empty.\"",
")",
"except",
"KeyError",
"as",
"err",
":",
"raise",
"KeyNotInContextError",
"(",
"\"pypyr.steps.pype missing 'name' in the 'pype' context item. \"",
"\"You need to specify the pipeline name to run another \"",
"\"pipeline.\"",
")",
"from",
"err",
"use_parent_context",
"=",
"pype",
".",
"get",
"(",
"'useParentContext'",
",",
"True",
")",
"pipe_arg",
"=",
"pype",
".",
"get",
"(",
"'pipeArg'",
",",
"None",
")",
"skip_parse",
"=",
"pype",
".",
"get",
"(",
"'skipParse'",
",",
"True",
")",
"raise_error",
"=",
"pype",
".",
"get",
"(",
"'raiseError'",
",",
"True",
")",
"loader",
"=",
"pype",
".",
"get",
"(",
"'loader'",
",",
"None",
")",
"return",
"(",
"pipeline_name",
",",
"use_parent_context",
",",
"pipe_arg",
",",
"skip_parse",
",",
"raise_error",
",",
"loader",
",",
")"
] | Parse arguments for pype from context and assign default values.
Args:
context: pypyr.context.Context. context is mandatory.
Returns:
tuple (pipeline_name, #str
use_parent_context, #bool
pipe_arg, #str
skip_parse, #bool
raise_error #bool
)
Raises:
pypyr.errors.KeyNotInContextError: if ['pype']['name'] is missing.
pypyr.errors.KeyInContextHasNoValueError: if ['pype']['name'] exists but
is None. | [
"Parse",
"arguments",
"for",
"pype",
"from",
"context",
"and",
"assign",
"default",
"values",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/pype.py#L96-L143 | [
"context"
] | What does this function do? | [
"Parse",
"arguments",
"for",
"pype",
"from",
"context",
"and",
"assign",
"default",
"values",
"."
] |
pypyr/pypyr-cli | pypyr/pypeloaders/fileloader.py | get_pipeline_path | def get_pipeline_path(pipeline_name, working_directory):
"""Look for the pipeline in the various places it could be.
First checks the cwd. Then checks pypyr/pipelines dir.
Args:
pipeline_name: string. Name of pipeline to find
working_directory: string. Path in which to look for pipeline_name.yaml
Returns:
Absolute path to the pipeline_name.yaml file
Raises:
PipelineNotFoundError: if pipeline_name.yaml not found in working_dir
or in {pypyr install dir}/pipelines.
"""
logger.debug("starting")
# look for name.yaml in the pipelines/ sub-directory
logger.debug(f"current directory is {working_directory}")
# looking for {cwd}/pipelines/[pipeline_name].yaml
pipeline_path = os.path.abspath(os.path.join(
working_directory,
'pipelines',
pipeline_name + '.yaml'))
if os.path.isfile(pipeline_path):
logger.debug(f"Found {pipeline_path}")
else:
logger.debug(f"{pipeline_name} not found in current "
"directory/pipelines folder. Looking in pypyr install "
"directory instead.")
pypyr_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
logger.debug(f"pypyr installation directory is: {pypyr_dir}")
pipeline_path = os.path.abspath(os.path.join(
pypyr_dir,
'pipelines',
pipeline_name + '.yaml'))
if os.path.isfile(pipeline_path):
logger.debug(f"Found {pipeline_path}")
else:
raise PipelineNotFoundError(f"{pipeline_name}.yaml not found in "
f"either "
f"{working_directory}/pipelines "
f"or {pypyr_dir}/pipelines")
logger.debug("done")
return pipeline_path | python | def get_pipeline_path(pipeline_name, working_directory):
"""Look for the pipeline in the various places it could be.
First checks the cwd. Then checks pypyr/pipelines dir.
Args:
pipeline_name: string. Name of pipeline to find
working_directory: string. Path in which to look for pipeline_name.yaml
Returns:
Absolute path to the pipeline_name.yaml file
Raises:
PipelineNotFoundError: if pipeline_name.yaml not found in working_dir
or in {pypyr install dir}/pipelines.
"""
logger.debug("starting")
# look for name.yaml in the pipelines/ sub-directory
logger.debug(f"current directory is {working_directory}")
# looking for {cwd}/pipelines/[pipeline_name].yaml
pipeline_path = os.path.abspath(os.path.join(
working_directory,
'pipelines',
pipeline_name + '.yaml'))
if os.path.isfile(pipeline_path):
logger.debug(f"Found {pipeline_path}")
else:
logger.debug(f"{pipeline_name} not found in current "
"directory/pipelines folder. Looking in pypyr install "
"directory instead.")
pypyr_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
logger.debug(f"pypyr installation directory is: {pypyr_dir}")
pipeline_path = os.path.abspath(os.path.join(
pypyr_dir,
'pipelines',
pipeline_name + '.yaml'))
if os.path.isfile(pipeline_path):
logger.debug(f"Found {pipeline_path}")
else:
raise PipelineNotFoundError(f"{pipeline_name}.yaml not found in "
f"either "
f"{working_directory}/pipelines "
f"or {pypyr_dir}/pipelines")
logger.debug("done")
return pipeline_path | [
"def",
"get_pipeline_path",
"(",
"pipeline_name",
",",
"working_directory",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"# look for name.yaml in the pipelines/ sub-directory",
"logger",
".",
"debug",
"(",
"f\"current directory is {working_directory}\"",
")",
"# looking for {cwd}/pipelines/[pipeline_name].yaml",
"pipeline_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"working_directory",
",",
"'pipelines'",
",",
"pipeline_name",
"+",
"'.yaml'",
")",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"pipeline_path",
")",
":",
"logger",
".",
"debug",
"(",
"f\"Found {pipeline_path}\"",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"f\"{pipeline_name} not found in current \"",
"\"directory/pipelines folder. Looking in pypyr install \"",
"\"directory instead.\"",
")",
"pypyr_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
")",
"logger",
".",
"debug",
"(",
"f\"pypyr installation directory is: {pypyr_dir}\"",
")",
"pipeline_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"pypyr_dir",
",",
"'pipelines'",
",",
"pipeline_name",
"+",
"'.yaml'",
")",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"pipeline_path",
")",
":",
"logger",
".",
"debug",
"(",
"f\"Found {pipeline_path}\"",
")",
"else",
":",
"raise",
"PipelineNotFoundError",
"(",
"f\"{pipeline_name}.yaml not found in \"",
"f\"either \"",
"f\"{working_directory}/pipelines \"",
"f\"or {pypyr_dir}/pipelines\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")",
"return",
"pipeline_path"
] | Look for the pipeline in the various places it could be.
First checks the cwd. Then checks pypyr/pipelines dir.
Args:
pipeline_name: string. Name of pipeline to find
working_directory: string. Path in which to look for pipeline_name.yaml
Returns:
Absolute path to the pipeline_name.yaml file
Raises:
PipelineNotFoundError: if pipeline_name.yaml not found in working_dir
or in {pypyr install dir}/pipelines. | [
"Look",
"for",
"the",
"pipeline",
"in",
"the",
"various",
"places",
"it",
"could",
"be",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/pypeloaders/fileloader.py#L11-L61 | [
"pipeline_name",
"working_directory"
] | What does this function do? | [
"Look",
"for",
"the",
"pipeline",
"in",
"the",
"various",
"places",
"it",
"could",
"be",
"."
] |
pypyr/pypyr-cli | pypyr/pypeloaders/fileloader.py | get_pipeline_definition | def get_pipeline_definition(pipeline_name, working_dir):
"""Open and parse the pipeline definition yaml.
Parses pipeline yaml and returns dictionary representing the pipeline.
pipeline_name.yaml should be in the working_dir/pipelines/ directory.
Args:
pipeline_name: string. Name of pipeline. This will be the file-name of
the pipeline - i.e {pipeline_name}.yaml
working_dir: path. Start looking in
./working_dir/pipelines/pipeline_name.yaml
Returns:
dict describing the pipeline, parsed from the pipeline yaml.
Raises:
FileNotFoundError: pipeline_name.yaml not found in the various pipeline
dirs.
"""
logger.debug("starting")
pipeline_path = get_pipeline_path(
pipeline_name=pipeline_name,
working_directory=working_dir)
logger.debug(f"Trying to open pipeline at path {pipeline_path}")
try:
with open(pipeline_path) as yaml_file:
pipeline_definition = pypyr.yaml.get_pipeline_yaml(
yaml_file)
logger.debug(
f"found {len(pipeline_definition)} stages in pipeline.")
except FileNotFoundError:
logger.error(
"The pipeline doesn't exist. Looking for a file here: "
f"{pipeline_name}.yaml in the /pipelines sub directory.")
raise
logger.debug("pipeline definition loaded")
logger.debug("done")
return pipeline_definition | python | def get_pipeline_definition(pipeline_name, working_dir):
"""Open and parse the pipeline definition yaml.
Parses pipeline yaml and returns dictionary representing the pipeline.
pipeline_name.yaml should be in the working_dir/pipelines/ directory.
Args:
pipeline_name: string. Name of pipeline. This will be the file-name of
the pipeline - i.e {pipeline_name}.yaml
working_dir: path. Start looking in
./working_dir/pipelines/pipeline_name.yaml
Returns:
dict describing the pipeline, parsed from the pipeline yaml.
Raises:
FileNotFoundError: pipeline_name.yaml not found in the various pipeline
dirs.
"""
logger.debug("starting")
pipeline_path = get_pipeline_path(
pipeline_name=pipeline_name,
working_directory=working_dir)
logger.debug(f"Trying to open pipeline at path {pipeline_path}")
try:
with open(pipeline_path) as yaml_file:
pipeline_definition = pypyr.yaml.get_pipeline_yaml(
yaml_file)
logger.debug(
f"found {len(pipeline_definition)} stages in pipeline.")
except FileNotFoundError:
logger.error(
"The pipeline doesn't exist. Looking for a file here: "
f"{pipeline_name}.yaml in the /pipelines sub directory.")
raise
logger.debug("pipeline definition loaded")
logger.debug("done")
return pipeline_definition | [
"def",
"get_pipeline_definition",
"(",
"pipeline_name",
",",
"working_dir",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"pipeline_path",
"=",
"get_pipeline_path",
"(",
"pipeline_name",
"=",
"pipeline_name",
",",
"working_directory",
"=",
"working_dir",
")",
"logger",
".",
"debug",
"(",
"f\"Trying to open pipeline at path {pipeline_path}\"",
")",
"try",
":",
"with",
"open",
"(",
"pipeline_path",
")",
"as",
"yaml_file",
":",
"pipeline_definition",
"=",
"pypyr",
".",
"yaml",
".",
"get_pipeline_yaml",
"(",
"yaml_file",
")",
"logger",
".",
"debug",
"(",
"f\"found {len(pipeline_definition)} stages in pipeline.\"",
")",
"except",
"FileNotFoundError",
":",
"logger",
".",
"error",
"(",
"\"The pipeline doesn't exist. Looking for a file here: \"",
"f\"{pipeline_name}.yaml in the /pipelines sub directory.\"",
")",
"raise",
"logger",
".",
"debug",
"(",
"\"pipeline definition loaded\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")",
"return",
"pipeline_definition"
] | Open and parse the pipeline definition yaml.
Parses pipeline yaml and returns dictionary representing the pipeline.
pipeline_name.yaml should be in the working_dir/pipelines/ directory.
Args:
pipeline_name: string. Name of pipeline. This will be the file-name of
the pipeline - i.e {pipeline_name}.yaml
working_dir: path. Start looking in
./working_dir/pipelines/pipeline_name.yaml
Returns:
dict describing the pipeline, parsed from the pipeline yaml.
Raises:
FileNotFoundError: pipeline_name.yaml not found in the various pipeline
dirs. | [
"Open",
"and",
"parse",
"the",
"pipeline",
"definition",
"yaml",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/pypeloaders/fileloader.py#L64-L107 | [
"pipeline_name",
"working_dir"
] | What does this function do? | [
"Open",
"and",
"parse",
"the",
"pipeline",
"definition",
"yaml",
"."
] |
pypyr/pypyr-cli | pypyr/dsl.py | SpecialTagDirective.to_yaml | def to_yaml(cls, representer, node):
"""How to serialize this class back to yaml."""
return representer.represent_scalar(cls.yaml_tag, node.value) | python | def to_yaml(cls, representer, node):
"""How to serialize this class back to yaml."""
return representer.represent_scalar(cls.yaml_tag, node.value) | [
"def",
"to_yaml",
"(",
"cls",
",",
"representer",
",",
"node",
")",
":",
"return",
"representer",
".",
"represent_scalar",
"(",
"cls",
".",
"yaml_tag",
",",
"node",
".",
"value",
")"
] | How to serialize this class back to yaml. | [
"How",
"to",
"serialize",
"this",
"class",
"back",
"to",
"yaml",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L55-L57 | [
"cls",
"representer",
"node"
] | What does this function do? | [
"How",
"to",
"serialize",
"this",
"class",
"back",
"to",
"yaml",
"."
] |