repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequencelengths 20
707
| docstring
stringlengths 3
17.3k
| docstring_tokens
sequencelengths 3
222
| sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value | idx
int64 0
252k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
christophertbrown/bioscripts | ctbBio/nr_fasta.py | append_index_id | def append_index_id(id, ids):
"""
add index to id to make it unique wrt ids
"""
index = 1
mod = '%s_%s' % (id, index)
while mod in ids:
index += 1
mod = '%s_%s' % (id, index)
ids.append(mod)
return mod, ids | python | def append_index_id(id, ids):
"""
add index to id to make it unique wrt ids
"""
index = 1
mod = '%s_%s' % (id, index)
while mod in ids:
index += 1
mod = '%s_%s' % (id, index)
ids.append(mod)
return mod, ids | [
"def",
"append_index_id",
"(",
"id",
",",
"ids",
")",
":",
"index",
"=",
"1",
"mod",
"=",
"'%s_%s'",
"%",
"(",
"id",
",",
"index",
")",
"while",
"mod",
"in",
"ids",
":",
"index",
"+=",
"1",
"mod",
"=",
"'%s_%s'",
"%",
"(",
"id",
",",
"index",
")",
"ids",
".",
"append",
"(",
"mod",
")",
"return",
"mod",
",",
"ids"
] | add index to id to make it unique wrt ids | [
"add",
"index",
"to",
"id",
"to",
"make",
"it",
"unique",
"wrt",
"ids"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/nr_fasta.py#L11-L21 | train | 200 |
christophertbrown/bioscripts | ctbBio/nr_fasta.py | de_rep | def de_rep(fastas, append_index, return_original = False):
"""
de-replicate fastas based on sequence names
"""
ids = []
for fasta in fastas:
for seq in parse_fasta(fasta):
header = seq[0].split('>')[1].split()
id = header[0]
if id not in ids:
ids.append(id)
if return_original is True:
yield [header, seq]
else:
yield seq
elif append_index == True:
new, ids = append_index_id(id, ids)
if return_original is True:
yield [header, ['>%s %s' % (new, ' '.join(header[1::])), seq[1]]]
else:
yield ['>%s %s' % (new, ' '.join(header[1::])), seq[1]] | python | def de_rep(fastas, append_index, return_original = False):
"""
de-replicate fastas based on sequence names
"""
ids = []
for fasta in fastas:
for seq in parse_fasta(fasta):
header = seq[0].split('>')[1].split()
id = header[0]
if id not in ids:
ids.append(id)
if return_original is True:
yield [header, seq]
else:
yield seq
elif append_index == True:
new, ids = append_index_id(id, ids)
if return_original is True:
yield [header, ['>%s %s' % (new, ' '.join(header[1::])), seq[1]]]
else:
yield ['>%s %s' % (new, ' '.join(header[1::])), seq[1]] | [
"def",
"de_rep",
"(",
"fastas",
",",
"append_index",
",",
"return_original",
"=",
"False",
")",
":",
"ids",
"=",
"[",
"]",
"for",
"fasta",
"in",
"fastas",
":",
"for",
"seq",
"in",
"parse_fasta",
"(",
"fasta",
")",
":",
"header",
"=",
"seq",
"[",
"0",
"]",
".",
"split",
"(",
"'>'",
")",
"[",
"1",
"]",
".",
"split",
"(",
")",
"id",
"=",
"header",
"[",
"0",
"]",
"if",
"id",
"not",
"in",
"ids",
":",
"ids",
".",
"append",
"(",
"id",
")",
"if",
"return_original",
"is",
"True",
":",
"yield",
"[",
"header",
",",
"seq",
"]",
"else",
":",
"yield",
"seq",
"elif",
"append_index",
"==",
"True",
":",
"new",
",",
"ids",
"=",
"append_index_id",
"(",
"id",
",",
"ids",
")",
"if",
"return_original",
"is",
"True",
":",
"yield",
"[",
"header",
",",
"[",
"'>%s %s'",
"%",
"(",
"new",
",",
"' '",
".",
"join",
"(",
"header",
"[",
"1",
":",
":",
"]",
")",
")",
",",
"seq",
"[",
"1",
"]",
"]",
"]",
"else",
":",
"yield",
"[",
"'>%s %s'",
"%",
"(",
"new",
",",
"' '",
".",
"join",
"(",
"header",
"[",
"1",
":",
":",
"]",
")",
")",
",",
"seq",
"[",
"1",
"]",
"]"
] | de-replicate fastas based on sequence names | [
"de",
"-",
"replicate",
"fastas",
"based",
"on",
"sequence",
"names"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/nr_fasta.py#L23-L43 | train | 201 |
e-dard/postcodes | postcodes.py | get | def get(postcode):
"""
Request data associated with `postcode`.
:param postcode: the postcode to search for. The postcode may
contain spaces (they will be removed).
:returns: a dict of the nearest postcode's data or None if no
postcode data is found.
"""
postcode = quote(postcode.replace(' ', ''))
url = '%s/postcode/%s.json' % (END_POINT, postcode)
return _get_json_resp(url) | python | def get(postcode):
"""
Request data associated with `postcode`.
:param postcode: the postcode to search for. The postcode may
contain spaces (they will be removed).
:returns: a dict of the nearest postcode's data or None if no
postcode data is found.
"""
postcode = quote(postcode.replace(' ', ''))
url = '%s/postcode/%s.json' % (END_POINT, postcode)
return _get_json_resp(url) | [
"def",
"get",
"(",
"postcode",
")",
":",
"postcode",
"=",
"quote",
"(",
"postcode",
".",
"replace",
"(",
"' '",
",",
"''",
")",
")",
"url",
"=",
"'%s/postcode/%s.json'",
"%",
"(",
"END_POINT",
",",
"postcode",
")",
"return",
"_get_json_resp",
"(",
"url",
")"
] | Request data associated with `postcode`.
:param postcode: the postcode to search for. The postcode may
contain spaces (they will be removed).
:returns: a dict of the nearest postcode's data or None if no
postcode data is found. | [
"Request",
"data",
"associated",
"with",
"postcode",
"."
] | d63c47b4ecd765bc2e4e6ba34bc0b8a796f44005 | https://github.com/e-dard/postcodes/blob/d63c47b4ecd765bc2e4e6ba34bc0b8a796f44005/postcodes.py#L22-L34 | train | 202 |
e-dard/postcodes | postcodes.py | get_from_postcode | def get_from_postcode(postcode, distance):
"""
Request all postcode data within `distance` miles of `postcode`.
:param postcode: the postcode to search for. The postcode may
contain spaces (they will be removed).
:param distance: distance in miles to `postcode`.
:returns: a list of dicts containing postcode data within the
specified distance or `None` if `postcode` is not valid.
"""
postcode = quote(postcode.replace(' ', ''))
return _get_from(distance, 'postcode=%s' % postcode) | python | def get_from_postcode(postcode, distance):
"""
Request all postcode data within `distance` miles of `postcode`.
:param postcode: the postcode to search for. The postcode may
contain spaces (they will be removed).
:param distance: distance in miles to `postcode`.
:returns: a list of dicts containing postcode data within the
specified distance or `None` if `postcode` is not valid.
"""
postcode = quote(postcode.replace(' ', ''))
return _get_from(distance, 'postcode=%s' % postcode) | [
"def",
"get_from_postcode",
"(",
"postcode",
",",
"distance",
")",
":",
"postcode",
"=",
"quote",
"(",
"postcode",
".",
"replace",
"(",
"' '",
",",
"''",
")",
")",
"return",
"_get_from",
"(",
"distance",
",",
"'postcode=%s'",
"%",
"postcode",
")"
] | Request all postcode data within `distance` miles of `postcode`.
:param postcode: the postcode to search for. The postcode may
contain spaces (they will be removed).
:param distance: distance in miles to `postcode`.
:returns: a list of dicts containing postcode data within the
specified distance or `None` if `postcode` is not valid. | [
"Request",
"all",
"postcode",
"data",
"within",
"distance",
"miles",
"of",
"postcode",
"."
] | d63c47b4ecd765bc2e4e6ba34bc0b8a796f44005 | https://github.com/e-dard/postcodes/blob/d63c47b4ecd765bc2e4e6ba34bc0b8a796f44005/postcodes.py#L56-L69 | train | 203 |
e-dard/postcodes | postcodes.py | PostCoder._check_point | def _check_point(self, lat, lng):
""" Checks if latitude and longitude correct """
if abs(lat) > 90 or abs(lng) > 180:
msg = "Illegal lat and/or lng, (%s, %s) provided." % (lat, lng)
raise IllegalPointException(msg) | python | def _check_point(self, lat, lng):
""" Checks if latitude and longitude correct """
if abs(lat) > 90 or abs(lng) > 180:
msg = "Illegal lat and/or lng, (%s, %s) provided." % (lat, lng)
raise IllegalPointException(msg) | [
"def",
"_check_point",
"(",
"self",
",",
"lat",
",",
"lng",
")",
":",
"if",
"abs",
"(",
"lat",
")",
">",
"90",
"or",
"abs",
"(",
"lng",
")",
">",
"180",
":",
"msg",
"=",
"\"Illegal lat and/or lng, (%s, %s) provided.\"",
"%",
"(",
"lat",
",",
"lng",
")",
"raise",
"IllegalPointException",
"(",
"msg",
")"
] | Checks if latitude and longitude correct | [
"Checks",
"if",
"latitude",
"and",
"longitude",
"correct"
] | d63c47b4ecd765bc2e4e6ba34bc0b8a796f44005 | https://github.com/e-dard/postcodes/blob/d63c47b4ecd765bc2e4e6ba34bc0b8a796f44005/postcodes.py#L123-L127 | train | 204 |
e-dard/postcodes | postcodes.py | PostCoder._lookup | def _lookup(self, skip_cache, fun, *args, **kwargs):
"""
Checks for cached responses, before requesting from
web-service
"""
if args not in self.cache or skip_cache:
self.cache[args] = fun(*args, **kwargs)
return self.cache[args] | python | def _lookup(self, skip_cache, fun, *args, **kwargs):
"""
Checks for cached responses, before requesting from
web-service
"""
if args not in self.cache or skip_cache:
self.cache[args] = fun(*args, **kwargs)
return self.cache[args] | [
"def",
"_lookup",
"(",
"self",
",",
"skip_cache",
",",
"fun",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"args",
"not",
"in",
"self",
".",
"cache",
"or",
"skip_cache",
":",
"self",
".",
"cache",
"[",
"args",
"]",
"=",
"fun",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"cache",
"[",
"args",
"]"
] | Checks for cached responses, before requesting from
web-service | [
"Checks",
"for",
"cached",
"responses",
"before",
"requesting",
"from",
"web",
"-",
"service"
] | d63c47b4ecd765bc2e4e6ba34bc0b8a796f44005 | https://github.com/e-dard/postcodes/blob/d63c47b4ecd765bc2e4e6ba34bc0b8a796f44005/postcodes.py#L129-L136 | train | 205 |
e-dard/postcodes | postcodes.py | PostCoder.get_nearest | def get_nearest(self, lat, lng, skip_cache=False):
"""
Calls `postcodes.get_nearest` but checks correctness of `lat`
and `long`, and by default utilises a local cache.
:param skip_cache: optional argument specifying whether to skip
the cache and make an explicit request.
:raises IllegalPointException: if the latitude or longitude
are out of bounds.
:returns: a dict of the nearest postcode's data.
"""
lat, lng = float(lat), float(lng)
self._check_point(lat, lng)
return self._lookup(skip_cache, get_nearest, lat, lng) | python | def get_nearest(self, lat, lng, skip_cache=False):
"""
Calls `postcodes.get_nearest` but checks correctness of `lat`
and `long`, and by default utilises a local cache.
:param skip_cache: optional argument specifying whether to skip
the cache and make an explicit request.
:raises IllegalPointException: if the latitude or longitude
are out of bounds.
:returns: a dict of the nearest postcode's data.
"""
lat, lng = float(lat), float(lng)
self._check_point(lat, lng)
return self._lookup(skip_cache, get_nearest, lat, lng) | [
"def",
"get_nearest",
"(",
"self",
",",
"lat",
",",
"lng",
",",
"skip_cache",
"=",
"False",
")",
":",
"lat",
",",
"lng",
"=",
"float",
"(",
"lat",
")",
",",
"float",
"(",
"lng",
")",
"self",
".",
"_check_point",
"(",
"lat",
",",
"lng",
")",
"return",
"self",
".",
"_lookup",
"(",
"skip_cache",
",",
"get_nearest",
",",
"lat",
",",
"lng",
")"
] | Calls `postcodes.get_nearest` but checks correctness of `lat`
and `long`, and by default utilises a local cache.
:param skip_cache: optional argument specifying whether to skip
the cache and make an explicit request.
:raises IllegalPointException: if the latitude or longitude
are out of bounds.
:returns: a dict of the nearest postcode's data. | [
"Calls",
"postcodes",
".",
"get_nearest",
"but",
"checks",
"correctness",
"of",
"lat",
"and",
"long",
"and",
"by",
"default",
"utilises",
"a",
"local",
"cache",
"."
] | d63c47b4ecd765bc2e4e6ba34bc0b8a796f44005 | https://github.com/e-dard/postcodes/blob/d63c47b4ecd765bc2e4e6ba34bc0b8a796f44005/postcodes.py#L152-L167 | train | 206 |
e-dard/postcodes | postcodes.py | PostCoder.get_from_postcode | def get_from_postcode(self, postcode, distance, skip_cache=False):
"""
Calls `postcodes.get_from_postcode` but checks correctness of
`distance`, and by default utilises a local cache.
:param skip_cache: optional argument specifying whether to skip
the cache and make an explicit request.
:raises IllegalPointException: if the latitude or longitude
are out of bounds.
:returns: a list of dicts containing postcode data within the
specified distance.
"""
distance = float(distance)
if distance < 0:
raise IllegalDistanceException("Distance must not be negative")
# remove spaces and change case here due to caching
postcode = postcode.lower().replace(' ', '')
return self._lookup(skip_cache, get_from_postcode, postcode,
float(distance)) | python | def get_from_postcode(self, postcode, distance, skip_cache=False):
"""
Calls `postcodes.get_from_postcode` but checks correctness of
`distance`, and by default utilises a local cache.
:param skip_cache: optional argument specifying whether to skip
the cache and make an explicit request.
:raises IllegalPointException: if the latitude or longitude
are out of bounds.
:returns: a list of dicts containing postcode data within the
specified distance.
"""
distance = float(distance)
if distance < 0:
raise IllegalDistanceException("Distance must not be negative")
# remove spaces and change case here due to caching
postcode = postcode.lower().replace(' ', '')
return self._lookup(skip_cache, get_from_postcode, postcode,
float(distance)) | [
"def",
"get_from_postcode",
"(",
"self",
",",
"postcode",
",",
"distance",
",",
"skip_cache",
"=",
"False",
")",
":",
"distance",
"=",
"float",
"(",
"distance",
")",
"if",
"distance",
"<",
"0",
":",
"raise",
"IllegalDistanceException",
"(",
"\"Distance must not be negative\"",
")",
"# remove spaces and change case here due to caching",
"postcode",
"=",
"postcode",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"' '",
",",
"''",
")",
"return",
"self",
".",
"_lookup",
"(",
"skip_cache",
",",
"get_from_postcode",
",",
"postcode",
",",
"float",
"(",
"distance",
")",
")"
] | Calls `postcodes.get_from_postcode` but checks correctness of
`distance`, and by default utilises a local cache.
:param skip_cache: optional argument specifying whether to skip
the cache and make an explicit request.
:raises IllegalPointException: if the latitude or longitude
are out of bounds.
:returns: a list of dicts containing postcode data within the
specified distance. | [
"Calls",
"postcodes",
".",
"get_from_postcode",
"but",
"checks",
"correctness",
"of",
"distance",
"and",
"by",
"default",
"utilises",
"a",
"local",
"cache",
"."
] | d63c47b4ecd765bc2e4e6ba34bc0b8a796f44005 | https://github.com/e-dard/postcodes/blob/d63c47b4ecd765bc2e4e6ba34bc0b8a796f44005/postcodes.py#L169-L189 | train | 207 |
e-dard/postcodes | postcodes.py | PostCoder.get_from_geo | def get_from_geo(self, lat, lng, distance, skip_cache=False):
"""
Calls `postcodes.get_from_geo` but checks the correctness of
all arguments, and by default utilises a local cache.
:param skip_cache: optional argument specifying whether to skip
the cache and make an explicit request.
:raises IllegalPointException: if the latitude or longitude
are out of bounds.
:returns: a list of dicts containing postcode data within the
specified distance.
"""
# remove spaces and change case here due to caching
lat, lng, distance = float(lat), float(lng), float(distance)
if distance < 0:
raise IllegalDistanceException("Distance must not be negative")
self._check_point(lat, lng)
return self._lookup(skip_cache, get_from_geo, lat, lng, distance) | python | def get_from_geo(self, lat, lng, distance, skip_cache=False):
"""
Calls `postcodes.get_from_geo` but checks the correctness of
all arguments, and by default utilises a local cache.
:param skip_cache: optional argument specifying whether to skip
the cache and make an explicit request.
:raises IllegalPointException: if the latitude or longitude
are out of bounds.
:returns: a list of dicts containing postcode data within the
specified distance.
"""
# remove spaces and change case here due to caching
lat, lng, distance = float(lat), float(lng), float(distance)
if distance < 0:
raise IllegalDistanceException("Distance must not be negative")
self._check_point(lat, lng)
return self._lookup(skip_cache, get_from_geo, lat, lng, distance) | [
"def",
"get_from_geo",
"(",
"self",
",",
"lat",
",",
"lng",
",",
"distance",
",",
"skip_cache",
"=",
"False",
")",
":",
"# remove spaces and change case here due to caching",
"lat",
",",
"lng",
",",
"distance",
"=",
"float",
"(",
"lat",
")",
",",
"float",
"(",
"lng",
")",
",",
"float",
"(",
"distance",
")",
"if",
"distance",
"<",
"0",
":",
"raise",
"IllegalDistanceException",
"(",
"\"Distance must not be negative\"",
")",
"self",
".",
"_check_point",
"(",
"lat",
",",
"lng",
")",
"return",
"self",
".",
"_lookup",
"(",
"skip_cache",
",",
"get_from_geo",
",",
"lat",
",",
"lng",
",",
"distance",
")"
] | Calls `postcodes.get_from_geo` but checks the correctness of
all arguments, and by default utilises a local cache.
:param skip_cache: optional argument specifying whether to skip
the cache and make an explicit request.
:raises IllegalPointException: if the latitude or longitude
are out of bounds.
:returns: a list of dicts containing postcode data within the
specified distance. | [
"Calls",
"postcodes",
".",
"get_from_geo",
"but",
"checks",
"the",
"correctness",
"of",
"all",
"arguments",
"and",
"by",
"default",
"utilises",
"a",
"local",
"cache",
"."
] | d63c47b4ecd765bc2e4e6ba34bc0b8a796f44005 | https://github.com/e-dard/postcodes/blob/d63c47b4ecd765bc2e4e6ba34bc0b8a796f44005/postcodes.py#L191-L210 | train | 208 |
christophertbrown/bioscripts | ctbBio/rRNA_insertions.py | insertions_from_masked | def insertions_from_masked(seq):
"""
get coordinates of insertions from insertion-masked sequence
"""
insertions = []
prev = True
for i, base in enumerate(seq):
if base.isupper() and prev is True:
insertions.append([])
prev = False
elif base.islower():
insertions[-1].append(i)
prev = True
return [[min(i), max(i)] for i in insertions if i != []] | python | def insertions_from_masked(seq):
"""
get coordinates of insertions from insertion-masked sequence
"""
insertions = []
prev = True
for i, base in enumerate(seq):
if base.isupper() and prev is True:
insertions.append([])
prev = False
elif base.islower():
insertions[-1].append(i)
prev = True
return [[min(i), max(i)] for i in insertions if i != []] | [
"def",
"insertions_from_masked",
"(",
"seq",
")",
":",
"insertions",
"=",
"[",
"]",
"prev",
"=",
"True",
"for",
"i",
",",
"base",
"in",
"enumerate",
"(",
"seq",
")",
":",
"if",
"base",
".",
"isupper",
"(",
")",
"and",
"prev",
"is",
"True",
":",
"insertions",
".",
"append",
"(",
"[",
"]",
")",
"prev",
"=",
"False",
"elif",
"base",
".",
"islower",
"(",
")",
":",
"insertions",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"i",
")",
"prev",
"=",
"True",
"return",
"[",
"[",
"min",
"(",
"i",
")",
",",
"max",
"(",
"i",
")",
"]",
"for",
"i",
"in",
"insertions",
"if",
"i",
"!=",
"[",
"]",
"]"
] | get coordinates of insertions from insertion-masked sequence | [
"get",
"coordinates",
"of",
"insertions",
"from",
"insertion",
"-",
"masked",
"sequence"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions.py#L15-L28 | train | 209 |
christophertbrown/bioscripts | ctbBio/rRNA_insertions.py | seq_info | def seq_info(names, id2names, insertions, sequences):
"""
get insertion information from header
"""
seqs = {} # seqs[id] = [gene, model, [[i-gene_pos, i-model_pos, i-length, iseq, [orfs], [introns]], ...]]
for name in names:
id = id2names[name]
gene = name.split('fromHMM::', 1)[0].rsplit(' ', 1)[1]
model = name.split('fromHMM::', 1)[1].split('=', 1)[1].split()[0]
i_gene_pos = insertions[id] # coordinates of each insertion wrt gene
i_model_pos = name.split('fromHMM::', 1)[1].split('model-pos(ins-len)=')[1].split()[0].split(';') # model overlap
i_info = []
for i, ins in enumerate(i_gene_pos):
model_pos = i_model_pos[i].split('-')[1].split('(')[0]
length = i_model_pos[i].split('(')[1].split(')')[0]
iheader = '>%s_%s insertion::seq=%s type=insertion strand=n/a gene-pos=%s-%s model-pos=%s'\
% (id, (i + 1), (i + 1), ins[0], ins[1], model_pos)
iseq = sequences[id][1][ins[0]:(ins[1] + 1)]
iseq = [iheader, iseq]
info = [ins, model_pos, length, iseq, [], []]
i_info.append(info)
seqs[id] = [gene, model, i_info]
return seqs | python | def seq_info(names, id2names, insertions, sequences):
"""
get insertion information from header
"""
seqs = {} # seqs[id] = [gene, model, [[i-gene_pos, i-model_pos, i-length, iseq, [orfs], [introns]], ...]]
for name in names:
id = id2names[name]
gene = name.split('fromHMM::', 1)[0].rsplit(' ', 1)[1]
model = name.split('fromHMM::', 1)[1].split('=', 1)[1].split()[0]
i_gene_pos = insertions[id] # coordinates of each insertion wrt gene
i_model_pos = name.split('fromHMM::', 1)[1].split('model-pos(ins-len)=')[1].split()[0].split(';') # model overlap
i_info = []
for i, ins in enumerate(i_gene_pos):
model_pos = i_model_pos[i].split('-')[1].split('(')[0]
length = i_model_pos[i].split('(')[1].split(')')[0]
iheader = '>%s_%s insertion::seq=%s type=insertion strand=n/a gene-pos=%s-%s model-pos=%s'\
% (id, (i + 1), (i + 1), ins[0], ins[1], model_pos)
iseq = sequences[id][1][ins[0]:(ins[1] + 1)]
iseq = [iheader, iseq]
info = [ins, model_pos, length, iseq, [], []]
i_info.append(info)
seqs[id] = [gene, model, i_info]
return seqs | [
"def",
"seq_info",
"(",
"names",
",",
"id2names",
",",
"insertions",
",",
"sequences",
")",
":",
"seqs",
"=",
"{",
"}",
"# seqs[id] = [gene, model, [[i-gene_pos, i-model_pos, i-length, iseq, [orfs], [introns]], ...]]",
"for",
"name",
"in",
"names",
":",
"id",
"=",
"id2names",
"[",
"name",
"]",
"gene",
"=",
"name",
".",
"split",
"(",
"'fromHMM::'",
",",
"1",
")",
"[",
"0",
"]",
".",
"rsplit",
"(",
"' '",
",",
"1",
")",
"[",
"1",
"]",
"model",
"=",
"name",
".",
"split",
"(",
"'fromHMM::'",
",",
"1",
")",
"[",
"1",
"]",
".",
"split",
"(",
"'='",
",",
"1",
")",
"[",
"1",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
"i_gene_pos",
"=",
"insertions",
"[",
"id",
"]",
"# coordinates of each insertion wrt gene",
"i_model_pos",
"=",
"name",
".",
"split",
"(",
"'fromHMM::'",
",",
"1",
")",
"[",
"1",
"]",
".",
"split",
"(",
"'model-pos(ins-len)='",
")",
"[",
"1",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
".",
"split",
"(",
"';'",
")",
"# model overlap",
"i_info",
"=",
"[",
"]",
"for",
"i",
",",
"ins",
"in",
"enumerate",
"(",
"i_gene_pos",
")",
":",
"model_pos",
"=",
"i_model_pos",
"[",
"i",
"]",
".",
"split",
"(",
"'-'",
")",
"[",
"1",
"]",
".",
"split",
"(",
"'('",
")",
"[",
"0",
"]",
"length",
"=",
"i_model_pos",
"[",
"i",
"]",
".",
"split",
"(",
"'('",
")",
"[",
"1",
"]",
".",
"split",
"(",
"')'",
")",
"[",
"0",
"]",
"iheader",
"=",
"'>%s_%s insertion::seq=%s type=insertion strand=n/a gene-pos=%s-%s model-pos=%s'",
"%",
"(",
"id",
",",
"(",
"i",
"+",
"1",
")",
",",
"(",
"i",
"+",
"1",
")",
",",
"ins",
"[",
"0",
"]",
",",
"ins",
"[",
"1",
"]",
",",
"model_pos",
")",
"iseq",
"=",
"sequences",
"[",
"id",
"]",
"[",
"1",
"]",
"[",
"ins",
"[",
"0",
"]",
":",
"(",
"ins",
"[",
"1",
"]",
"+",
"1",
")",
"]",
"iseq",
"=",
"[",
"iheader",
",",
"iseq",
"]",
"info",
"=",
"[",
"ins",
",",
"model_pos",
",",
"length",
",",
"iseq",
",",
"[",
"]",
",",
"[",
"]",
"]",
"i_info",
".",
"append",
"(",
"info",
")",
"seqs",
"[",
"id",
"]",
"=",
"[",
"gene",
",",
"model",
",",
"i_info",
"]",
"return",
"seqs"
] | get insertion information from header | [
"get",
"insertion",
"information",
"from",
"header"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions.py#L64-L86 | train | 210 |
christophertbrown/bioscripts | ctbBio/rRNA_insertions.py | check_overlap | def check_overlap(pos, ins, thresh):
"""
make sure thresh % feature is contained within insertion
"""
ins_pos = ins[0]
ins_len = ins[2]
ol = overlap(ins_pos, pos)
feat_len = pos[1] - pos[0] + 1
# print float(ol) / float(feat_len)
if float(ol) / float(feat_len) >= thresh:
return True
return False | python | def check_overlap(pos, ins, thresh):
"""
make sure thresh % feature is contained within insertion
"""
ins_pos = ins[0]
ins_len = ins[2]
ol = overlap(ins_pos, pos)
feat_len = pos[1] - pos[0] + 1
# print float(ol) / float(feat_len)
if float(ol) / float(feat_len) >= thresh:
return True
return False | [
"def",
"check_overlap",
"(",
"pos",
",",
"ins",
",",
"thresh",
")",
":",
"ins_pos",
"=",
"ins",
"[",
"0",
"]",
"ins_len",
"=",
"ins",
"[",
"2",
"]",
"ol",
"=",
"overlap",
"(",
"ins_pos",
",",
"pos",
")",
"feat_len",
"=",
"pos",
"[",
"1",
"]",
"-",
"pos",
"[",
"0",
"]",
"+",
"1",
"# print float(ol) / float(feat_len)",
"if",
"float",
"(",
"ol",
")",
"/",
"float",
"(",
"feat_len",
")",
">=",
"thresh",
":",
"return",
"True",
"return",
"False"
] | make sure thresh % feature is contained within insertion | [
"make",
"sure",
"thresh",
"%",
"feature",
"is",
"contained",
"within",
"insertion"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions.py#L91-L102 | train | 211 |
christophertbrown/bioscripts | ctbBio/rRNA_insertions.py | max_insertion | def max_insertion(seqs, gene, domain):
"""
length of largest insertion
"""
seqs = [i[2] for i in list(seqs.values()) if i[2] != [] and i[0] == gene and i[1] == domain]
lengths = []
for seq in seqs:
for ins in seq:
lengths.append(int(ins[2]))
if lengths == []:
return 100
return max(lengths) | python | def max_insertion(seqs, gene, domain):
"""
length of largest insertion
"""
seqs = [i[2] for i in list(seqs.values()) if i[2] != [] and i[0] == gene and i[1] == domain]
lengths = []
for seq in seqs:
for ins in seq:
lengths.append(int(ins[2]))
if lengths == []:
return 100
return max(lengths) | [
"def",
"max_insertion",
"(",
"seqs",
",",
"gene",
",",
"domain",
")",
":",
"seqs",
"=",
"[",
"i",
"[",
"2",
"]",
"for",
"i",
"in",
"list",
"(",
"seqs",
".",
"values",
"(",
")",
")",
"if",
"i",
"[",
"2",
"]",
"!=",
"[",
"]",
"and",
"i",
"[",
"0",
"]",
"==",
"gene",
"and",
"i",
"[",
"1",
"]",
"==",
"domain",
"]",
"lengths",
"=",
"[",
"]",
"for",
"seq",
"in",
"seqs",
":",
"for",
"ins",
"in",
"seq",
":",
"lengths",
".",
"append",
"(",
"int",
"(",
"ins",
"[",
"2",
"]",
")",
")",
"if",
"lengths",
"==",
"[",
"]",
":",
"return",
"100",
"return",
"max",
"(",
"lengths",
")"
] | length of largest insertion | [
"length",
"of",
"largest",
"insertion"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions.py#L305-L316 | train | 212 |
christophertbrown/bioscripts | ctbBio/rRNA_insertions.py | model_length | def model_length(gene, domain):
"""
get length of model
"""
if gene == '16S':
domain2max = {'E_coli_K12': int(1538), 'bacteria': int(1689), 'archaea': int(1563), 'eukarya': int(2652)}
return domain2max[domain]
elif gene == '23S':
domain2max = {'E_coli_K12': int(2903), 'bacteria': int(3146), 'archaea': int(3774), 'eukarya': int(9079)}
return domain2max[domain]
else:
print(sys.stderr, '# length unknown for gene: %s, domain: %s' % (gene, domain))
exit() | python | def model_length(gene, domain):
"""
get length of model
"""
if gene == '16S':
domain2max = {'E_coli_K12': int(1538), 'bacteria': int(1689), 'archaea': int(1563), 'eukarya': int(2652)}
return domain2max[domain]
elif gene == '23S':
domain2max = {'E_coli_K12': int(2903), 'bacteria': int(3146), 'archaea': int(3774), 'eukarya': int(9079)}
return domain2max[domain]
else:
print(sys.stderr, '# length unknown for gene: %s, domain: %s' % (gene, domain))
exit() | [
"def",
"model_length",
"(",
"gene",
",",
"domain",
")",
":",
"if",
"gene",
"==",
"'16S'",
":",
"domain2max",
"=",
"{",
"'E_coli_K12'",
":",
"int",
"(",
"1538",
")",
",",
"'bacteria'",
":",
"int",
"(",
"1689",
")",
",",
"'archaea'",
":",
"int",
"(",
"1563",
")",
",",
"'eukarya'",
":",
"int",
"(",
"2652",
")",
"}",
"return",
"domain2max",
"[",
"domain",
"]",
"elif",
"gene",
"==",
"'23S'",
":",
"domain2max",
"=",
"{",
"'E_coli_K12'",
":",
"int",
"(",
"2903",
")",
",",
"'bacteria'",
":",
"int",
"(",
"3146",
")",
",",
"'archaea'",
":",
"int",
"(",
"3774",
")",
",",
"'eukarya'",
":",
"int",
"(",
"9079",
")",
"}",
"return",
"domain2max",
"[",
"domain",
"]",
"else",
":",
"print",
"(",
"sys",
".",
"stderr",
",",
"'# length unknown for gene: %s, domain: %s'",
"%",
"(",
"gene",
",",
"domain",
")",
")",
"exit",
"(",
")"
] | get length of model | [
"get",
"length",
"of",
"model"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions.py#L318-L330 | train | 213 |
christophertbrown/bioscripts | ctbBio/rRNA_insertions.py | setup_markers | def setup_markers(seqs):
"""
setup unique marker for every orf annotation
- change size if necessary
"""
family2marker = {} # family2marker[family] = [marker, size]
markers = cycle(['^', 'p', '*', '+', 'x', 'd', '|', 'v', '>', '<', '8'])
size = 60
families = []
for seq in list(seqs.values()):
for insertion in seq[2]:
for family in list(insertion[-1].values()):
if family not in families:
families.append(family)
for family in families:
marker = next(markers)
if marker == '^':
size = size * 0.5
family2marker[family] = [marker, size]
return family2marker | python | def setup_markers(seqs):
"""
setup unique marker for every orf annotation
- change size if necessary
"""
family2marker = {} # family2marker[family] = [marker, size]
markers = cycle(['^', 'p', '*', '+', 'x', 'd', '|', 'v', '>', '<', '8'])
size = 60
families = []
for seq in list(seqs.values()):
for insertion in seq[2]:
for family in list(insertion[-1].values()):
if family not in families:
families.append(family)
for family in families:
marker = next(markers)
if marker == '^':
size = size * 0.5
family2marker[family] = [marker, size]
return family2marker | [
"def",
"setup_markers",
"(",
"seqs",
")",
":",
"family2marker",
"=",
"{",
"}",
"# family2marker[family] = [marker, size]",
"markers",
"=",
"cycle",
"(",
"[",
"'^'",
",",
"'p'",
",",
"'*'",
",",
"'+'",
",",
"'x'",
",",
"'d'",
",",
"'|'",
",",
"'v'",
",",
"'>'",
",",
"'<'",
",",
"'8'",
"]",
")",
"size",
"=",
"60",
"families",
"=",
"[",
"]",
"for",
"seq",
"in",
"list",
"(",
"seqs",
".",
"values",
"(",
")",
")",
":",
"for",
"insertion",
"in",
"seq",
"[",
"2",
"]",
":",
"for",
"family",
"in",
"list",
"(",
"insertion",
"[",
"-",
"1",
"]",
".",
"values",
"(",
")",
")",
":",
"if",
"family",
"not",
"in",
"families",
":",
"families",
".",
"append",
"(",
"family",
")",
"for",
"family",
"in",
"families",
":",
"marker",
"=",
"next",
"(",
"markers",
")",
"if",
"marker",
"==",
"'^'",
":",
"size",
"=",
"size",
"*",
"0.5",
"family2marker",
"[",
"family",
"]",
"=",
"[",
"marker",
",",
"size",
"]",
"return",
"family2marker"
] | setup unique marker for every orf annotation
- change size if necessary | [
"setup",
"unique",
"marker",
"for",
"every",
"orf",
"annotation",
"-",
"change",
"size",
"if",
"necessary"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions.py#L332-L351 | train | 214 |
christophertbrown/bioscripts | ctbBio/rRNA_insertions.py | plot_by_gene_and_domain | def plot_by_gene_and_domain(name, seqs, tax, id2name):
"""
plot insertions for each gene and domain
"""
for gene in set([seq[0] for seq in list(seqs.values())]):
for domain in set([seq[1] for seq in list(seqs.values())]):
plot_insertions(name, seqs, gene, domain, tax, id2name) | python | def plot_by_gene_and_domain(name, seqs, tax, id2name):
"""
plot insertions for each gene and domain
"""
for gene in set([seq[0] for seq in list(seqs.values())]):
for domain in set([seq[1] for seq in list(seqs.values())]):
plot_insertions(name, seqs, gene, domain, tax, id2name) | [
"def",
"plot_by_gene_and_domain",
"(",
"name",
",",
"seqs",
",",
"tax",
",",
"id2name",
")",
":",
"for",
"gene",
"in",
"set",
"(",
"[",
"seq",
"[",
"0",
"]",
"for",
"seq",
"in",
"list",
"(",
"seqs",
".",
"values",
"(",
")",
")",
"]",
")",
":",
"for",
"domain",
"in",
"set",
"(",
"[",
"seq",
"[",
"1",
"]",
"for",
"seq",
"in",
"list",
"(",
"seqs",
".",
"values",
"(",
")",
")",
"]",
")",
":",
"plot_insertions",
"(",
"name",
",",
"seqs",
",",
"gene",
",",
"domain",
",",
"tax",
",",
"id2name",
")"
] | plot insertions for each gene and domain | [
"plot",
"insertions",
"for",
"each",
"gene",
"and",
"domain"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions.py#L551-L557 | train | 215 |
christophertbrown/bioscripts | ctbBio/neto.py | get_descriptions | def get_descriptions(fastas):
"""
get the description for each ORF
"""
id2desc = {}
for fasta in fastas:
for seq in parse_fasta(fasta):
header = seq[0].split('>')[1].split(' ')
id = header[0]
if len(header) > 1:
desc = ' '.join(header[1:])
else:
desc = 'n/a'
length = float(len([i for i in seq[1].strip() if i != '*']))
id2desc[id] = [fasta, desc, length]
return id2desc | python | def get_descriptions(fastas):
"""
get the description for each ORF
"""
id2desc = {}
for fasta in fastas:
for seq in parse_fasta(fasta):
header = seq[0].split('>')[1].split(' ')
id = header[0]
if len(header) > 1:
desc = ' '.join(header[1:])
else:
desc = 'n/a'
length = float(len([i for i in seq[1].strip() if i != '*']))
id2desc[id] = [fasta, desc, length]
return id2desc | [
"def",
"get_descriptions",
"(",
"fastas",
")",
":",
"id2desc",
"=",
"{",
"}",
"for",
"fasta",
"in",
"fastas",
":",
"for",
"seq",
"in",
"parse_fasta",
"(",
"fasta",
")",
":",
"header",
"=",
"seq",
"[",
"0",
"]",
".",
"split",
"(",
"'>'",
")",
"[",
"1",
"]",
".",
"split",
"(",
"' '",
")",
"id",
"=",
"header",
"[",
"0",
"]",
"if",
"len",
"(",
"header",
")",
">",
"1",
":",
"desc",
"=",
"' '",
".",
"join",
"(",
"header",
"[",
"1",
":",
"]",
")",
"else",
":",
"desc",
"=",
"'n/a'",
"length",
"=",
"float",
"(",
"len",
"(",
"[",
"i",
"for",
"i",
"in",
"seq",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"if",
"i",
"!=",
"'*'",
"]",
")",
")",
"id2desc",
"[",
"id",
"]",
"=",
"[",
"fasta",
",",
"desc",
",",
"length",
"]",
"return",
"id2desc"
] | get the description for each ORF | [
"get",
"the",
"description",
"for",
"each",
"ORF"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/neto.py#L37-L52 | train | 216 |
christophertbrown/bioscripts | ctbBio/neto.py | print_genome_matrix | def print_genome_matrix(hits, fastas, id2desc, file_name):
"""
optimize later? slow ...
should combine with calculate_threshold module
"""
out = open(file_name, 'w')
fastas = sorted(fastas)
print('## percent identity between genomes', file=out)
print('# - \t %s' % ('\t'.join(fastas)), file=out)
for fasta in fastas:
line = [fasta]
for other in fastas:
if other == fasta:
average = '-'
else:
average = numpy.average([hits[fasta][other][i][3] for i in hits[fasta][other]])
line.append(str(average))
print('\t'.join(line), file=out)
print('', file=out)
print('## percent of orfs that are orthologous between genomes', file=out)
print('# - \t %s' % ('\t'.join(fastas)), file=out)
for fasta in fastas:
line = [fasta]
for other in fastas:
if other == fasta:
percent = '-'
else:
orthologs = float(len(hits[fasta][other]))
orfs = float(len([i for i in id2desc if id2desc[i][0] == fasta]))
percent = float(orthologs / orfs) * 100
line.append(str(percent))
print('\t'.join(line), file=out) | python | def print_genome_matrix(hits, fastas, id2desc, file_name):
"""
optimize later? slow ...
should combine with calculate_threshold module
"""
out = open(file_name, 'w')
fastas = sorted(fastas)
print('## percent identity between genomes', file=out)
print('# - \t %s' % ('\t'.join(fastas)), file=out)
for fasta in fastas:
line = [fasta]
for other in fastas:
if other == fasta:
average = '-'
else:
average = numpy.average([hits[fasta][other][i][3] for i in hits[fasta][other]])
line.append(str(average))
print('\t'.join(line), file=out)
print('', file=out)
print('## percent of orfs that are orthologous between genomes', file=out)
print('# - \t %s' % ('\t'.join(fastas)), file=out)
for fasta in fastas:
line = [fasta]
for other in fastas:
if other == fasta:
percent = '-'
else:
orthologs = float(len(hits[fasta][other]))
orfs = float(len([i for i in id2desc if id2desc[i][0] == fasta]))
percent = float(orthologs / orfs) * 100
line.append(str(percent))
print('\t'.join(line), file=out) | [
"def",
"print_genome_matrix",
"(",
"hits",
",",
"fastas",
",",
"id2desc",
",",
"file_name",
")",
":",
"out",
"=",
"open",
"(",
"file_name",
",",
"'w'",
")",
"fastas",
"=",
"sorted",
"(",
"fastas",
")",
"print",
"(",
"'## percent identity between genomes'",
",",
"file",
"=",
"out",
")",
"print",
"(",
"'# - \\t %s'",
"%",
"(",
"'\\t'",
".",
"join",
"(",
"fastas",
")",
")",
",",
"file",
"=",
"out",
")",
"for",
"fasta",
"in",
"fastas",
":",
"line",
"=",
"[",
"fasta",
"]",
"for",
"other",
"in",
"fastas",
":",
"if",
"other",
"==",
"fasta",
":",
"average",
"=",
"'-'",
"else",
":",
"average",
"=",
"numpy",
".",
"average",
"(",
"[",
"hits",
"[",
"fasta",
"]",
"[",
"other",
"]",
"[",
"i",
"]",
"[",
"3",
"]",
"for",
"i",
"in",
"hits",
"[",
"fasta",
"]",
"[",
"other",
"]",
"]",
")",
"line",
".",
"append",
"(",
"str",
"(",
"average",
")",
")",
"print",
"(",
"'\\t'",
".",
"join",
"(",
"line",
")",
",",
"file",
"=",
"out",
")",
"print",
"(",
"''",
",",
"file",
"=",
"out",
")",
"print",
"(",
"'## percent of orfs that are orthologous between genomes'",
",",
"file",
"=",
"out",
")",
"print",
"(",
"'# - \\t %s'",
"%",
"(",
"'\\t'",
".",
"join",
"(",
"fastas",
")",
")",
",",
"file",
"=",
"out",
")",
"for",
"fasta",
"in",
"fastas",
":",
"line",
"=",
"[",
"fasta",
"]",
"for",
"other",
"in",
"fastas",
":",
"if",
"other",
"==",
"fasta",
":",
"percent",
"=",
"'-'",
"else",
":",
"orthologs",
"=",
"float",
"(",
"len",
"(",
"hits",
"[",
"fasta",
"]",
"[",
"other",
"]",
")",
")",
"orfs",
"=",
"float",
"(",
"len",
"(",
"[",
"i",
"for",
"i",
"in",
"id2desc",
"if",
"id2desc",
"[",
"i",
"]",
"[",
"0",
"]",
"==",
"fasta",
"]",
")",
")",
"percent",
"=",
"float",
"(",
"orthologs",
"/",
"orfs",
")",
"*",
"100",
"line",
".",
"append",
"(",
"str",
"(",
"percent",
")",
")",
"print",
"(",
"'\\t'",
".",
"join",
"(",
"line",
")",
",",
"file",
"=",
"out",
")"
] | optimize later? slow ...
should combine with calculate_threshold module | [
"optimize",
"later?",
"slow",
"...",
"should",
"combine",
"with",
"calculate_threshold",
"module"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/neto.py#L140-L171 | train | 217 |
christophertbrown/bioscripts | ctbBio/neto.py | self_compare | def self_compare(fastas, id2desc, algorithm):
"""
compare genome to self to get the best possible bit score for each ORF
"""
for fasta in fastas:
blast = open(search(fasta, fasta, method = algorithm, alignment = 'local'))
for hit in best_blast(blast, 1):
id, bit = hit[0].split()[0], float(hit[-1])
id2desc[id].append(bit)
return id2desc | python | def self_compare(fastas, id2desc, algorithm):
"""
compare genome to self to get the best possible bit score for each ORF
"""
for fasta in fastas:
blast = open(search(fasta, fasta, method = algorithm, alignment = 'local'))
for hit in best_blast(blast, 1):
id, bit = hit[0].split()[0], float(hit[-1])
id2desc[id].append(bit)
return id2desc | [
"def",
"self_compare",
"(",
"fastas",
",",
"id2desc",
",",
"algorithm",
")",
":",
"for",
"fasta",
"in",
"fastas",
":",
"blast",
"=",
"open",
"(",
"search",
"(",
"fasta",
",",
"fasta",
",",
"method",
"=",
"algorithm",
",",
"alignment",
"=",
"'local'",
")",
")",
"for",
"hit",
"in",
"best_blast",
"(",
"blast",
",",
"1",
")",
":",
"id",
",",
"bit",
"=",
"hit",
"[",
"0",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
",",
"float",
"(",
"hit",
"[",
"-",
"1",
"]",
")",
"id2desc",
"[",
"id",
"]",
".",
"append",
"(",
"bit",
")",
"return",
"id2desc"
] | compare genome to self to get the best possible bit score for each ORF | [
"compare",
"genome",
"to",
"self",
"to",
"get",
"the",
"best",
"possible",
"bit",
"score",
"for",
"each",
"ORF"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/neto.py#L240-L249 | train | 218 |
christophertbrown/bioscripts | ctbBio/neto.py | calc_thresholds | def calc_thresholds(rbh, file_name, thresholds = [False, False, False, False], stdevs = 2):
"""
if thresholds are not specififed, calculate based on the distribution of normalized bit scores
"""
calc_threshold = thresholds[-1]
norm_threshold = {}
for pair in itertools.permutations([i for i in rbh], 2):
if pair[0] not in norm_threshold:
norm_threshold[pair[0]] = {}
norm_threshold[pair[0]][pair[1]] = {}
out = open(file_name, 'w')
print('#### summary of rbh comparisons\n', file=out)
comparisons = []
for genome in rbh:
for compare in rbh[genome]:
pair = ''.join(sorted([genome, compare]))
if pair in comparisons:
continue
comparisons.append(pair)
scores = {'percent identity': [], 'e-value': [], 'bit score': [], 'normalized bit score': [], 'alignment length fraction': []}
print('### blast between %s and %s\n' % (genome, compare), file=out)
for id in rbh[genome][compare]:
pident, length_fraction, e, bit, norm_bit = rbh[genome][compare][id][3:]
scores['percent identity'].append(pident)
scores['alignment length fraction'].append(length_fraction)
scores['e-value'].append(e)
scores['bit score'].append(bit)
scores['normalized bit score'].append(norm_bit)
if calc_threshold is True:
norms = scores['normalized bit score']
average = numpy.average(norms)
std = numpy.std(norms)
normal_thresh = average - (std * stdevs)
print('## average normalized bit score: %s' % average, file=out)
print('## standard deviation of normalized bit scores: %s' % std, file=out)
print('## normalized bit score threshold set to: %s\n' % (normal_thresh), file=out)
norm_threshold[genome][compare], norm_threshold[compare][genome] = normal_thresh, normal_thresh
for score in scores:
print('## %s' % (score), file=out)
if len(scores[score]) > 0:
print('## average: %s' % numpy.average(scores[score]), file=out)
# hist = histogram(scores[score], [])
# for line in hist:
# print >> out, line
print('', file=out)
out.close()
if calc_threshold is True:
return thresholds[0:-1] + [norm_threshold]
else:
return thresholds | python | def calc_thresholds(rbh, file_name, thresholds = [False, False, False, False], stdevs = 2):
"""
if thresholds are not specififed, calculate based on the distribution of normalized bit scores
"""
calc_threshold = thresholds[-1]
norm_threshold = {}
for pair in itertools.permutations([i for i in rbh], 2):
if pair[0] not in norm_threshold:
norm_threshold[pair[0]] = {}
norm_threshold[pair[0]][pair[1]] = {}
out = open(file_name, 'w')
print('#### summary of rbh comparisons\n', file=out)
comparisons = []
for genome in rbh:
for compare in rbh[genome]:
pair = ''.join(sorted([genome, compare]))
if pair in comparisons:
continue
comparisons.append(pair)
scores = {'percent identity': [], 'e-value': [], 'bit score': [], 'normalized bit score': [], 'alignment length fraction': []}
print('### blast between %s and %s\n' % (genome, compare), file=out)
for id in rbh[genome][compare]:
pident, length_fraction, e, bit, norm_bit = rbh[genome][compare][id][3:]
scores['percent identity'].append(pident)
scores['alignment length fraction'].append(length_fraction)
scores['e-value'].append(e)
scores['bit score'].append(bit)
scores['normalized bit score'].append(norm_bit)
if calc_threshold is True:
norms = scores['normalized bit score']
average = numpy.average(norms)
std = numpy.std(norms)
normal_thresh = average - (std * stdevs)
print('## average normalized bit score: %s' % average, file=out)
print('## standard deviation of normalized bit scores: %s' % std, file=out)
print('## normalized bit score threshold set to: %s\n' % (normal_thresh), file=out)
norm_threshold[genome][compare], norm_threshold[compare][genome] = normal_thresh, normal_thresh
for score in scores:
print('## %s' % (score), file=out)
if len(scores[score]) > 0:
print('## average: %s' % numpy.average(scores[score]), file=out)
# hist = histogram(scores[score], [])
# for line in hist:
# print >> out, line
print('', file=out)
out.close()
if calc_threshold is True:
return thresholds[0:-1] + [norm_threshold]
else:
return thresholds | [
"def",
"calc_thresholds",
"(",
"rbh",
",",
"file_name",
",",
"thresholds",
"=",
"[",
"False",
",",
"False",
",",
"False",
",",
"False",
"]",
",",
"stdevs",
"=",
"2",
")",
":",
"calc_threshold",
"=",
"thresholds",
"[",
"-",
"1",
"]",
"norm_threshold",
"=",
"{",
"}",
"for",
"pair",
"in",
"itertools",
".",
"permutations",
"(",
"[",
"i",
"for",
"i",
"in",
"rbh",
"]",
",",
"2",
")",
":",
"if",
"pair",
"[",
"0",
"]",
"not",
"in",
"norm_threshold",
":",
"norm_threshold",
"[",
"pair",
"[",
"0",
"]",
"]",
"=",
"{",
"}",
"norm_threshold",
"[",
"pair",
"[",
"0",
"]",
"]",
"[",
"pair",
"[",
"1",
"]",
"]",
"=",
"{",
"}",
"out",
"=",
"open",
"(",
"file_name",
",",
"'w'",
")",
"print",
"(",
"'#### summary of rbh comparisons\\n'",
",",
"file",
"=",
"out",
")",
"comparisons",
"=",
"[",
"]",
"for",
"genome",
"in",
"rbh",
":",
"for",
"compare",
"in",
"rbh",
"[",
"genome",
"]",
":",
"pair",
"=",
"''",
".",
"join",
"(",
"sorted",
"(",
"[",
"genome",
",",
"compare",
"]",
")",
")",
"if",
"pair",
"in",
"comparisons",
":",
"continue",
"comparisons",
".",
"append",
"(",
"pair",
")",
"scores",
"=",
"{",
"'percent identity'",
":",
"[",
"]",
",",
"'e-value'",
":",
"[",
"]",
",",
"'bit score'",
":",
"[",
"]",
",",
"'normalized bit score'",
":",
"[",
"]",
",",
"'alignment length fraction'",
":",
"[",
"]",
"}",
"print",
"(",
"'### blast between %s and %s\\n'",
"%",
"(",
"genome",
",",
"compare",
")",
",",
"file",
"=",
"out",
")",
"for",
"id",
"in",
"rbh",
"[",
"genome",
"]",
"[",
"compare",
"]",
":",
"pident",
",",
"length_fraction",
",",
"e",
",",
"bit",
",",
"norm_bit",
"=",
"rbh",
"[",
"genome",
"]",
"[",
"compare",
"]",
"[",
"id",
"]",
"[",
"3",
":",
"]",
"scores",
"[",
"'percent identity'",
"]",
".",
"append",
"(",
"pident",
")",
"scores",
"[",
"'alignment length fraction'",
"]",
".",
"append",
"(",
"length_fraction",
")",
"scores",
"[",
"'e-value'",
"]",
".",
"append",
"(",
"e",
")",
"scores",
"[",
"'bit score'",
"]",
".",
"append",
"(",
"bit",
")",
"scores",
"[",
"'normalized bit score'",
"]",
".",
"append",
"(",
"norm_bit",
")",
"if",
"calc_threshold",
"is",
"True",
":",
"norms",
"=",
"scores",
"[",
"'normalized bit score'",
"]",
"average",
"=",
"numpy",
".",
"average",
"(",
"norms",
")",
"std",
"=",
"numpy",
".",
"std",
"(",
"norms",
")",
"normal_thresh",
"=",
"average",
"-",
"(",
"std",
"*",
"stdevs",
")",
"print",
"(",
"'## average normalized bit score: %s'",
"%",
"average",
",",
"file",
"=",
"out",
")",
"print",
"(",
"'## standard deviation of normalized bit scores: %s'",
"%",
"std",
",",
"file",
"=",
"out",
")",
"print",
"(",
"'## normalized bit score threshold set to: %s\\n'",
"%",
"(",
"normal_thresh",
")",
",",
"file",
"=",
"out",
")",
"norm_threshold",
"[",
"genome",
"]",
"[",
"compare",
"]",
",",
"norm_threshold",
"[",
"compare",
"]",
"[",
"genome",
"]",
"=",
"normal_thresh",
",",
"normal_thresh",
"for",
"score",
"in",
"scores",
":",
"print",
"(",
"'## %s'",
"%",
"(",
"score",
")",
",",
"file",
"=",
"out",
")",
"if",
"len",
"(",
"scores",
"[",
"score",
"]",
")",
">",
"0",
":",
"print",
"(",
"'## average: %s'",
"%",
"numpy",
".",
"average",
"(",
"scores",
"[",
"score",
"]",
")",
",",
"file",
"=",
"out",
")",
"# hist = histogram(scores[score], [])",
"# for line in hist:",
"# print >> out, line",
"print",
"(",
"''",
",",
"file",
"=",
"out",
")",
"out",
".",
"close",
"(",
")",
"if",
"calc_threshold",
"is",
"True",
":",
"return",
"thresholds",
"[",
"0",
":",
"-",
"1",
"]",
"+",
"[",
"norm_threshold",
"]",
"else",
":",
"return",
"thresholds"
] | if thresholds are not specififed, calculate based on the distribution of normalized bit scores | [
"if",
"thresholds",
"are",
"not",
"specififed",
"calculate",
"based",
"on",
"the",
"distribution",
"of",
"normalized",
"bit",
"scores"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/neto.py#L303-L352 | train | 219 |
christophertbrown/bioscripts | ctbBio/neto.py | neto | def neto(fastas, algorithm = 'usearch', e = 0.01, bit = 40, length = .65, norm_bit = False):
"""
make and split a rbh network
"""
thresholds = [e, bit, length, norm_bit]
id2desc = get_descriptions(fastas)
# get [fasta, description, length] for ORF id
id2desc = self_compare(fastas, id2desc, algorithm)
# get best possible bit score for each ORF
# (comparing with itself) [fasta, description, length, bestbit]
hits = compare_genomes(fastas, id2desc, algorithm)
# pair wise genome comparisons {genome: {id: [match_type = 'rbh' or 'fbh', scores]}}
calc_thresholds(hits, file_name = 'fbh.scores.summary.txt')
rbh_network(id2desc, hits, file_name = 'fbh.network.edges.txt')
hits, rbh = find_rbh(hits, id2desc)
# remove hits that are not reciprocal best blast hits
thresholds = calc_thresholds(rbh, 'rbh.scores.summary.txt', thresholds)
# print rbh score summary to rbh_score_summary.txt and
# calculate normalized bit score cutoff for each pair of
# genomes, if desired
g = rbh_network(id2desc, rbh, file_name = 'rbh.network.edges.txt')
filtered_g, filtered_rbh = rbh_network(id2desc, rbh, 'rbh.filtered.network.edges.txt', thresholds)
calc_thresholds(filtered_rbh, file_name = 'rbh.filtered.scores.summary.txt')
print_summary(filtered_g, fastas, id2desc, file_name = 'rbh.filtered.network.nodes.txt')
print_network_matrix(filtered_g, fastas, id2desc, file_name = 'rbh.filtered.network.matrix.txt')
print_genome_matrix(filtered_rbh, fastas, id2desc, file_name = 'rbh.filtered.network.genome_matrix.txt')
split_g = split_network(filtered_g, id2desc, file_name = 'rbh.filtered.split.network.edges.txt')
print_summary(split_g, fastas, id2desc, file_name = 'rbh.filtered.split.network.nodes.txt')
print_network_matrix(split_g, fastas, id2desc, file_name = 'rbh.filtered.split.network.matrix.txt')
return split_g | python | def neto(fastas, algorithm = 'usearch', e = 0.01, bit = 40, length = .65, norm_bit = False):
"""
make and split a rbh network
"""
thresholds = [e, bit, length, norm_bit]
id2desc = get_descriptions(fastas)
# get [fasta, description, length] for ORF id
id2desc = self_compare(fastas, id2desc, algorithm)
# get best possible bit score for each ORF
# (comparing with itself) [fasta, description, length, bestbit]
hits = compare_genomes(fastas, id2desc, algorithm)
# pair wise genome comparisons {genome: {id: [match_type = 'rbh' or 'fbh', scores]}}
calc_thresholds(hits, file_name = 'fbh.scores.summary.txt')
rbh_network(id2desc, hits, file_name = 'fbh.network.edges.txt')
hits, rbh = find_rbh(hits, id2desc)
# remove hits that are not reciprocal best blast hits
thresholds = calc_thresholds(rbh, 'rbh.scores.summary.txt', thresholds)
# print rbh score summary to rbh_score_summary.txt and
# calculate normalized bit score cutoff for each pair of
# genomes, if desired
g = rbh_network(id2desc, rbh, file_name = 'rbh.network.edges.txt')
filtered_g, filtered_rbh = rbh_network(id2desc, rbh, 'rbh.filtered.network.edges.txt', thresholds)
calc_thresholds(filtered_rbh, file_name = 'rbh.filtered.scores.summary.txt')
print_summary(filtered_g, fastas, id2desc, file_name = 'rbh.filtered.network.nodes.txt')
print_network_matrix(filtered_g, fastas, id2desc, file_name = 'rbh.filtered.network.matrix.txt')
print_genome_matrix(filtered_rbh, fastas, id2desc, file_name = 'rbh.filtered.network.genome_matrix.txt')
split_g = split_network(filtered_g, id2desc, file_name = 'rbh.filtered.split.network.edges.txt')
print_summary(split_g, fastas, id2desc, file_name = 'rbh.filtered.split.network.nodes.txt')
print_network_matrix(split_g, fastas, id2desc, file_name = 'rbh.filtered.split.network.matrix.txt')
return split_g | [
"def",
"neto",
"(",
"fastas",
",",
"algorithm",
"=",
"'usearch'",
",",
"e",
"=",
"0.01",
",",
"bit",
"=",
"40",
",",
"length",
"=",
".65",
",",
"norm_bit",
"=",
"False",
")",
":",
"thresholds",
"=",
"[",
"e",
",",
"bit",
",",
"length",
",",
"norm_bit",
"]",
"id2desc",
"=",
"get_descriptions",
"(",
"fastas",
")",
"# get [fasta, description, length] for ORF id",
"id2desc",
"=",
"self_compare",
"(",
"fastas",
",",
"id2desc",
",",
"algorithm",
")",
"# get best possible bit score for each ORF ",
"# (comparing with itself) [fasta, description, length, bestbit]",
"hits",
"=",
"compare_genomes",
"(",
"fastas",
",",
"id2desc",
",",
"algorithm",
")",
"# pair wise genome comparisons {genome: {id: [match_type = 'rbh' or 'fbh', scores]}}",
"calc_thresholds",
"(",
"hits",
",",
"file_name",
"=",
"'fbh.scores.summary.txt'",
")",
"rbh_network",
"(",
"id2desc",
",",
"hits",
",",
"file_name",
"=",
"'fbh.network.edges.txt'",
")",
"hits",
",",
"rbh",
"=",
"find_rbh",
"(",
"hits",
",",
"id2desc",
")",
"# remove hits that are not reciprocal best blast hits",
"thresholds",
"=",
"calc_thresholds",
"(",
"rbh",
",",
"'rbh.scores.summary.txt'",
",",
"thresholds",
")",
"# print rbh score summary to rbh_score_summary.txt and",
"# calculate normalized bit score cutoff for each pair of",
"# genomes, if desired",
"g",
"=",
"rbh_network",
"(",
"id2desc",
",",
"rbh",
",",
"file_name",
"=",
"'rbh.network.edges.txt'",
")",
"filtered_g",
",",
"filtered_rbh",
"=",
"rbh_network",
"(",
"id2desc",
",",
"rbh",
",",
"'rbh.filtered.network.edges.txt'",
",",
"thresholds",
")",
"calc_thresholds",
"(",
"filtered_rbh",
",",
"file_name",
"=",
"'rbh.filtered.scores.summary.txt'",
")",
"print_summary",
"(",
"filtered_g",
",",
"fastas",
",",
"id2desc",
",",
"file_name",
"=",
"'rbh.filtered.network.nodes.txt'",
")",
"print_network_matrix",
"(",
"filtered_g",
",",
"fastas",
",",
"id2desc",
",",
"file_name",
"=",
"'rbh.filtered.network.matrix.txt'",
")",
"print_genome_matrix",
"(",
"filtered_rbh",
",",
"fastas",
",",
"id2desc",
",",
"file_name",
"=",
"'rbh.filtered.network.genome_matrix.txt'",
")",
"split_g",
"=",
"split_network",
"(",
"filtered_g",
",",
"id2desc",
",",
"file_name",
"=",
"'rbh.filtered.split.network.edges.txt'",
")",
"print_summary",
"(",
"split_g",
",",
"fastas",
",",
"id2desc",
",",
"file_name",
"=",
"'rbh.filtered.split.network.nodes.txt'",
")",
"print_network_matrix",
"(",
"split_g",
",",
"fastas",
",",
"id2desc",
",",
"file_name",
"=",
"'rbh.filtered.split.network.matrix.txt'",
")",
"return",
"split_g"
] | make and split a rbh network | [
"make",
"and",
"split",
"a",
"rbh",
"network"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/neto.py#L416-L445 | train | 220 |
consbio/gis-metadata-parser | gis_metadata/iso_metadata_parser.py | IsoParser._parse_raster_info | def _parse_raster_info(self, prop=RASTER_INFO):
""" Collapses multiple dimensions into a single raster_info complex struct """
raster_info = {}.fromkeys(_iso_definitions[prop], u'')
# Ensure conversion of lists to newlines is in place
raster_info['dimensions'] = get_default_for_complex_sub(
prop=prop,
subprop='dimensions',
value=parse_property(self._xml_tree, None, self._data_map, '_ri_num_dims'),
xpath=self._data_map['_ri_num_dims']
)
xpath_root = self._get_xroot_for(prop)
xpath_map = self._data_structures[prop]
for dimension in parse_complex_list(self._xml_tree, xpath_root, xpath_map, RASTER_DIMS):
dimension_type = dimension['type'].lower()
if dimension_type == 'vertical':
raster_info['vertical_count'] = dimension['size']
elif dimension_type == 'column':
raster_info['column_count'] = dimension['size']
raster_info['x_resolution'] = u' '.join(dimension[k] for k in ['value', 'units']).strip()
elif dimension_type == 'row':
raster_info['row_count'] = dimension['size']
raster_info['y_resolution'] = u' '.join(dimension[k] for k in ['value', 'units']).strip()
return raster_info if any(raster_info[k] for k in raster_info) else {} | python | def _parse_raster_info(self, prop=RASTER_INFO):
""" Collapses multiple dimensions into a single raster_info complex struct """
raster_info = {}.fromkeys(_iso_definitions[prop], u'')
# Ensure conversion of lists to newlines is in place
raster_info['dimensions'] = get_default_for_complex_sub(
prop=prop,
subprop='dimensions',
value=parse_property(self._xml_tree, None, self._data_map, '_ri_num_dims'),
xpath=self._data_map['_ri_num_dims']
)
xpath_root = self._get_xroot_for(prop)
xpath_map = self._data_structures[prop]
for dimension in parse_complex_list(self._xml_tree, xpath_root, xpath_map, RASTER_DIMS):
dimension_type = dimension['type'].lower()
if dimension_type == 'vertical':
raster_info['vertical_count'] = dimension['size']
elif dimension_type == 'column':
raster_info['column_count'] = dimension['size']
raster_info['x_resolution'] = u' '.join(dimension[k] for k in ['value', 'units']).strip()
elif dimension_type == 'row':
raster_info['row_count'] = dimension['size']
raster_info['y_resolution'] = u' '.join(dimension[k] for k in ['value', 'units']).strip()
return raster_info if any(raster_info[k] for k in raster_info) else {} | [
"def",
"_parse_raster_info",
"(",
"self",
",",
"prop",
"=",
"RASTER_INFO",
")",
":",
"raster_info",
"=",
"{",
"}",
".",
"fromkeys",
"(",
"_iso_definitions",
"[",
"prop",
"]",
",",
"u''",
")",
"# Ensure conversion of lists to newlines is in place",
"raster_info",
"[",
"'dimensions'",
"]",
"=",
"get_default_for_complex_sub",
"(",
"prop",
"=",
"prop",
",",
"subprop",
"=",
"'dimensions'",
",",
"value",
"=",
"parse_property",
"(",
"self",
".",
"_xml_tree",
",",
"None",
",",
"self",
".",
"_data_map",
",",
"'_ri_num_dims'",
")",
",",
"xpath",
"=",
"self",
".",
"_data_map",
"[",
"'_ri_num_dims'",
"]",
")",
"xpath_root",
"=",
"self",
".",
"_get_xroot_for",
"(",
"prop",
")",
"xpath_map",
"=",
"self",
".",
"_data_structures",
"[",
"prop",
"]",
"for",
"dimension",
"in",
"parse_complex_list",
"(",
"self",
".",
"_xml_tree",
",",
"xpath_root",
",",
"xpath_map",
",",
"RASTER_DIMS",
")",
":",
"dimension_type",
"=",
"dimension",
"[",
"'type'",
"]",
".",
"lower",
"(",
")",
"if",
"dimension_type",
"==",
"'vertical'",
":",
"raster_info",
"[",
"'vertical_count'",
"]",
"=",
"dimension",
"[",
"'size'",
"]",
"elif",
"dimension_type",
"==",
"'column'",
":",
"raster_info",
"[",
"'column_count'",
"]",
"=",
"dimension",
"[",
"'size'",
"]",
"raster_info",
"[",
"'x_resolution'",
"]",
"=",
"u' '",
".",
"join",
"(",
"dimension",
"[",
"k",
"]",
"for",
"k",
"in",
"[",
"'value'",
",",
"'units'",
"]",
")",
".",
"strip",
"(",
")",
"elif",
"dimension_type",
"==",
"'row'",
":",
"raster_info",
"[",
"'row_count'",
"]",
"=",
"dimension",
"[",
"'size'",
"]",
"raster_info",
"[",
"'y_resolution'",
"]",
"=",
"u' '",
".",
"join",
"(",
"dimension",
"[",
"k",
"]",
"for",
"k",
"in",
"[",
"'value'",
",",
"'units'",
"]",
")",
".",
"strip",
"(",
")",
"return",
"raster_info",
"if",
"any",
"(",
"raster_info",
"[",
"k",
"]",
"for",
"k",
"in",
"raster_info",
")",
"else",
"{",
"}"
] | Collapses multiple dimensions into a single raster_info complex struct | [
"Collapses",
"multiple",
"dimensions",
"into",
"a",
"single",
"raster_info",
"complex",
"struct"
] | 59eefb2e51cd4d8cc3e94623a2167499ca9ef70f | https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/iso_metadata_parser.py#L472-L502 | train | 221 |
consbio/gis-metadata-parser | gis_metadata/iso_metadata_parser.py | IsoParser._update_raster_info | def _update_raster_info(self, **update_props):
""" Derives multiple dimensions from a single raster_info complex struct """
tree_to_update = update_props['tree_to_update']
prop = update_props['prop']
values = update_props.pop('values')
# Update number of dimensions at raster_info root (applies to all dimensions below)
xroot, xpath = None, self._data_map['_ri_num_dims']
raster_info = [update_property(tree_to_update, xroot, xpath, prop, values.get('dimensions', u''))]
# Derive vertical, longitude, and latitude dimensions from raster_info
xpath_root = self._get_xroot_for(prop)
xpath_map = self._data_structures[prop]
v_dimension = {}
if values.get('vertical_count'):
v_dimension = v_dimension.fromkeys(xpath_map, u'')
v_dimension['type'] = 'vertical'
v_dimension['size'] = values.get('vertical_count', u'')
x_dimension = {}
if values.get('column_count') or values.get('x_resolution'):
x_dimension = x_dimension.fromkeys(xpath_map, u'')
x_dimension['type'] = 'column'
x_dimension['size'] = values.get('column_count', u'')
x_dimension['value'] = values.get('x_resolution', u'')
y_dimension = {}
if values.get('row_count') or values.get('y_resolution'):
y_dimension = y_dimension.fromkeys(xpath_map, u'')
y_dimension['type'] = 'row'
y_dimension['size'] = values.get('row_count', u'')
y_dimension['value'] = values.get('y_resolution', u'')
# Update derived dimensions as complex list, and append affected elements for return
update_props['prop'] = RASTER_DIMS
update_props['values'] = [v_dimension, x_dimension, y_dimension]
raster_info += update_complex_list(xpath_root=xpath_root, xpath_map=xpath_map, **update_props)
return raster_info | python | def _update_raster_info(self, **update_props):
""" Derives multiple dimensions from a single raster_info complex struct """
tree_to_update = update_props['tree_to_update']
prop = update_props['prop']
values = update_props.pop('values')
# Update number of dimensions at raster_info root (applies to all dimensions below)
xroot, xpath = None, self._data_map['_ri_num_dims']
raster_info = [update_property(tree_to_update, xroot, xpath, prop, values.get('dimensions', u''))]
# Derive vertical, longitude, and latitude dimensions from raster_info
xpath_root = self._get_xroot_for(prop)
xpath_map = self._data_structures[prop]
v_dimension = {}
if values.get('vertical_count'):
v_dimension = v_dimension.fromkeys(xpath_map, u'')
v_dimension['type'] = 'vertical'
v_dimension['size'] = values.get('vertical_count', u'')
x_dimension = {}
if values.get('column_count') or values.get('x_resolution'):
x_dimension = x_dimension.fromkeys(xpath_map, u'')
x_dimension['type'] = 'column'
x_dimension['size'] = values.get('column_count', u'')
x_dimension['value'] = values.get('x_resolution', u'')
y_dimension = {}
if values.get('row_count') or values.get('y_resolution'):
y_dimension = y_dimension.fromkeys(xpath_map, u'')
y_dimension['type'] = 'row'
y_dimension['size'] = values.get('row_count', u'')
y_dimension['value'] = values.get('y_resolution', u'')
# Update derived dimensions as complex list, and append affected elements for return
update_props['prop'] = RASTER_DIMS
update_props['values'] = [v_dimension, x_dimension, y_dimension]
raster_info += update_complex_list(xpath_root=xpath_root, xpath_map=xpath_map, **update_props)
return raster_info | [
"def",
"_update_raster_info",
"(",
"self",
",",
"*",
"*",
"update_props",
")",
":",
"tree_to_update",
"=",
"update_props",
"[",
"'tree_to_update'",
"]",
"prop",
"=",
"update_props",
"[",
"'prop'",
"]",
"values",
"=",
"update_props",
".",
"pop",
"(",
"'values'",
")",
"# Update number of dimensions at raster_info root (applies to all dimensions below)",
"xroot",
",",
"xpath",
"=",
"None",
",",
"self",
".",
"_data_map",
"[",
"'_ri_num_dims'",
"]",
"raster_info",
"=",
"[",
"update_property",
"(",
"tree_to_update",
",",
"xroot",
",",
"xpath",
",",
"prop",
",",
"values",
".",
"get",
"(",
"'dimensions'",
",",
"u''",
")",
")",
"]",
"# Derive vertical, longitude, and latitude dimensions from raster_info",
"xpath_root",
"=",
"self",
".",
"_get_xroot_for",
"(",
"prop",
")",
"xpath_map",
"=",
"self",
".",
"_data_structures",
"[",
"prop",
"]",
"v_dimension",
"=",
"{",
"}",
"if",
"values",
".",
"get",
"(",
"'vertical_count'",
")",
":",
"v_dimension",
"=",
"v_dimension",
".",
"fromkeys",
"(",
"xpath_map",
",",
"u''",
")",
"v_dimension",
"[",
"'type'",
"]",
"=",
"'vertical'",
"v_dimension",
"[",
"'size'",
"]",
"=",
"values",
".",
"get",
"(",
"'vertical_count'",
",",
"u''",
")",
"x_dimension",
"=",
"{",
"}",
"if",
"values",
".",
"get",
"(",
"'column_count'",
")",
"or",
"values",
".",
"get",
"(",
"'x_resolution'",
")",
":",
"x_dimension",
"=",
"x_dimension",
".",
"fromkeys",
"(",
"xpath_map",
",",
"u''",
")",
"x_dimension",
"[",
"'type'",
"]",
"=",
"'column'",
"x_dimension",
"[",
"'size'",
"]",
"=",
"values",
".",
"get",
"(",
"'column_count'",
",",
"u''",
")",
"x_dimension",
"[",
"'value'",
"]",
"=",
"values",
".",
"get",
"(",
"'x_resolution'",
",",
"u''",
")",
"y_dimension",
"=",
"{",
"}",
"if",
"values",
".",
"get",
"(",
"'row_count'",
")",
"or",
"values",
".",
"get",
"(",
"'y_resolution'",
")",
":",
"y_dimension",
"=",
"y_dimension",
".",
"fromkeys",
"(",
"xpath_map",
",",
"u''",
")",
"y_dimension",
"[",
"'type'",
"]",
"=",
"'row'",
"y_dimension",
"[",
"'size'",
"]",
"=",
"values",
".",
"get",
"(",
"'row_count'",
",",
"u''",
")",
"y_dimension",
"[",
"'value'",
"]",
"=",
"values",
".",
"get",
"(",
"'y_resolution'",
",",
"u''",
")",
"# Update derived dimensions as complex list, and append affected elements for return",
"update_props",
"[",
"'prop'",
"]",
"=",
"RASTER_DIMS",
"update_props",
"[",
"'values'",
"]",
"=",
"[",
"v_dimension",
",",
"x_dimension",
",",
"y_dimension",
"]",
"raster_info",
"+=",
"update_complex_list",
"(",
"xpath_root",
"=",
"xpath_root",
",",
"xpath_map",
"=",
"xpath_map",
",",
"*",
"*",
"update_props",
")",
"return",
"raster_info"
] | Derives multiple dimensions from a single raster_info complex struct | [
"Derives",
"multiple",
"dimensions",
"from",
"a",
"single",
"raster_info",
"complex",
"struct"
] | 59eefb2e51cd4d8cc3e94623a2167499ca9ef70f | https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/iso_metadata_parser.py#L622-L666 | train | 222 |
consbio/gis-metadata-parser | gis_metadata/iso_metadata_parser.py | IsoParser._trim_xpath | def _trim_xpath(self, xpath, prop):
""" Removes primitive type tags from an XPATH """
xroot = self._get_xroot_for(prop)
if xroot is None and isinstance(xpath, string_types):
xtags = xpath.split(XPATH_DELIM)
if xtags[-1] in _iso_tag_primitives:
xroot = XPATH_DELIM.join(xtags[:-1])
return xroot | python | def _trim_xpath(self, xpath, prop):
""" Removes primitive type tags from an XPATH """
xroot = self._get_xroot_for(prop)
if xroot is None and isinstance(xpath, string_types):
xtags = xpath.split(XPATH_DELIM)
if xtags[-1] in _iso_tag_primitives:
xroot = XPATH_DELIM.join(xtags[:-1])
return xroot | [
"def",
"_trim_xpath",
"(",
"self",
",",
"xpath",
",",
"prop",
")",
":",
"xroot",
"=",
"self",
".",
"_get_xroot_for",
"(",
"prop",
")",
"if",
"xroot",
"is",
"None",
"and",
"isinstance",
"(",
"xpath",
",",
"string_types",
")",
":",
"xtags",
"=",
"xpath",
".",
"split",
"(",
"XPATH_DELIM",
")",
"if",
"xtags",
"[",
"-",
"1",
"]",
"in",
"_iso_tag_primitives",
":",
"xroot",
"=",
"XPATH_DELIM",
".",
"join",
"(",
"xtags",
"[",
":",
"-",
"1",
"]",
")",
"return",
"xroot"
] | Removes primitive type tags from an XPATH | [
"Removes",
"primitive",
"type",
"tags",
"from",
"an",
"XPATH"
] | 59eefb2e51cd4d8cc3e94623a2167499ca9ef70f | https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/iso_metadata_parser.py#L691-L702 | train | 223 |
scottrice/pysteam | pysteam/shortcuts.py | shortcut_app_id | def shortcut_app_id(shortcut):
"""
Generates the app id for a given shortcut. Steam uses app ids as a unique
identifier for games, but since shortcuts dont have a canonical serverside
representation they need to be generated on the fly. The important part
about this function is that it will generate the same app id as Steam does
for a given shortcut
"""
algorithm = Crc(width = 32, poly = 0x04C11DB7, reflect_in = True, xor_in = 0xffffffff, reflect_out = True, xor_out = 0xffffffff)
crc_input = ''.join([shortcut.exe,shortcut.name])
high_32 = algorithm.bit_by_bit(crc_input) | 0x80000000
full_64 = (high_32 << 32) | 0x02000000
return str(full_64) | python | def shortcut_app_id(shortcut):
"""
Generates the app id for a given shortcut. Steam uses app ids as a unique
identifier for games, but since shortcuts dont have a canonical serverside
representation they need to be generated on the fly. The important part
about this function is that it will generate the same app id as Steam does
for a given shortcut
"""
algorithm = Crc(width = 32, poly = 0x04C11DB7, reflect_in = True, xor_in = 0xffffffff, reflect_out = True, xor_out = 0xffffffff)
crc_input = ''.join([shortcut.exe,shortcut.name])
high_32 = algorithm.bit_by_bit(crc_input) | 0x80000000
full_64 = (high_32 << 32) | 0x02000000
return str(full_64) | [
"def",
"shortcut_app_id",
"(",
"shortcut",
")",
":",
"algorithm",
"=",
"Crc",
"(",
"width",
"=",
"32",
",",
"poly",
"=",
"0x04C11DB7",
",",
"reflect_in",
"=",
"True",
",",
"xor_in",
"=",
"0xffffffff",
",",
"reflect_out",
"=",
"True",
",",
"xor_out",
"=",
"0xffffffff",
")",
"crc_input",
"=",
"''",
".",
"join",
"(",
"[",
"shortcut",
".",
"exe",
",",
"shortcut",
".",
"name",
"]",
")",
"high_32",
"=",
"algorithm",
".",
"bit_by_bit",
"(",
"crc_input",
")",
"|",
"0x80000000",
"full_64",
"=",
"(",
"high_32",
"<<",
"32",
")",
"|",
"0x02000000",
"return",
"str",
"(",
"full_64",
")"
] | Generates the app id for a given shortcut. Steam uses app ids as a unique
identifier for games, but since shortcuts dont have a canonical serverside
representation they need to be generated on the fly. The important part
about this function is that it will generate the same app id as Steam does
for a given shortcut | [
"Generates",
"the",
"app",
"id",
"for",
"a",
"given",
"shortcut",
".",
"Steam",
"uses",
"app",
"ids",
"as",
"a",
"unique",
"identifier",
"for",
"games",
"but",
"since",
"shortcuts",
"dont",
"have",
"a",
"canonical",
"serverside",
"representation",
"they",
"need",
"to",
"be",
"generated",
"on",
"the",
"fly",
".",
"The",
"important",
"part",
"about",
"this",
"function",
"is",
"that",
"it",
"will",
"generate",
"the",
"same",
"app",
"id",
"as",
"Steam",
"does",
"for",
"a",
"given",
"shortcut"
] | 1eb2254b5235a053a953e596fa7602d0b110245d | https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/shortcuts.py#L9-L21 | train | 224 |
mkouhei/bootstrap-py | bootstrap_py/vcs.py | VCS._config | def _config(self):
"""Execute git config."""
cfg_wr = self.repo.config_writer()
cfg_wr.add_section('user')
cfg_wr.set_value('user', 'name', self.metadata.author)
cfg_wr.set_value('user', 'email', self.metadata.email)
cfg_wr.release() | python | def _config(self):
"""Execute git config."""
cfg_wr = self.repo.config_writer()
cfg_wr.add_section('user')
cfg_wr.set_value('user', 'name', self.metadata.author)
cfg_wr.set_value('user', 'email', self.metadata.email)
cfg_wr.release() | [
"def",
"_config",
"(",
"self",
")",
":",
"cfg_wr",
"=",
"self",
".",
"repo",
".",
"config_writer",
"(",
")",
"cfg_wr",
".",
"add_section",
"(",
"'user'",
")",
"cfg_wr",
".",
"set_value",
"(",
"'user'",
",",
"'name'",
",",
"self",
".",
"metadata",
".",
"author",
")",
"cfg_wr",
".",
"set_value",
"(",
"'user'",
",",
"'email'",
",",
"self",
".",
"metadata",
".",
"email",
")",
"cfg_wr",
".",
"release",
"(",
")"
] | Execute git config. | [
"Execute",
"git",
"config",
"."
] | 95d56ed98ef409fd9f019dc352fd1c3711533275 | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/vcs.py#L35-L41 | train | 225 |
mkouhei/bootstrap-py | bootstrap_py/vcs.py | VCS._remote_add | def _remote_add(self):
"""Execute git remote add."""
self.repo.create_remote(
'origin',
'git@github.com:{username}/{repo}.git'.format(
username=self.metadata.username,
repo=self.metadata.name)) | python | def _remote_add(self):
"""Execute git remote add."""
self.repo.create_remote(
'origin',
'git@github.com:{username}/{repo}.git'.format(
username=self.metadata.username,
repo=self.metadata.name)) | [
"def",
"_remote_add",
"(",
"self",
")",
":",
"self",
".",
"repo",
".",
"create_remote",
"(",
"'origin'",
",",
"'git@github.com:{username}/{repo}.git'",
".",
"format",
"(",
"username",
"=",
"self",
".",
"metadata",
".",
"username",
",",
"repo",
"=",
"self",
".",
"metadata",
".",
"name",
")",
")"
] | Execute git remote add. | [
"Execute",
"git",
"remote",
"add",
"."
] | 95d56ed98ef409fd9f019dc352fd1c3711533275 | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/vcs.py#L47-L53 | train | 226 |
deep-compute/basescript | basescript/basescript.py | BaseScript.start | def start(self):
'''
Starts execution of the script
'''
# invoke the appropriate sub-command as requested from command-line
try:
self.args.func()
except SystemExit as e:
if e.code != 0:
raise
except KeyboardInterrupt:
self.log.warning("exited via keyboard interrupt")
except:
self.log.exception("exited start function")
# set exit code so we know it did not end successfully
# TODO different exit codes based on signals ?
finally:
self._flush_metrics_q.put(None, block=True)
self._flush_metrics_q.put(None, block=True, timeout=1)
self.log.debug("exited_successfully") | python | def start(self):
'''
Starts execution of the script
'''
# invoke the appropriate sub-command as requested from command-line
try:
self.args.func()
except SystemExit as e:
if e.code != 0:
raise
except KeyboardInterrupt:
self.log.warning("exited via keyboard interrupt")
except:
self.log.exception("exited start function")
# set exit code so we know it did not end successfully
# TODO different exit codes based on signals ?
finally:
self._flush_metrics_q.put(None, block=True)
self._flush_metrics_q.put(None, block=True, timeout=1)
self.log.debug("exited_successfully") | [
"def",
"start",
"(",
"self",
")",
":",
"# invoke the appropriate sub-command as requested from command-line",
"try",
":",
"self",
".",
"args",
".",
"func",
"(",
")",
"except",
"SystemExit",
"as",
"e",
":",
"if",
"e",
".",
"code",
"!=",
"0",
":",
"raise",
"except",
"KeyboardInterrupt",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"exited via keyboard interrupt\"",
")",
"except",
":",
"self",
".",
"log",
".",
"exception",
"(",
"\"exited start function\"",
")",
"# set exit code so we know it did not end successfully",
"# TODO different exit codes based on signals ?",
"finally",
":",
"self",
".",
"_flush_metrics_q",
".",
"put",
"(",
"None",
",",
"block",
"=",
"True",
")",
"self",
".",
"_flush_metrics_q",
".",
"put",
"(",
"None",
",",
"block",
"=",
"True",
",",
"timeout",
"=",
"1",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"exited_successfully\"",
")"
] | Starts execution of the script | [
"Starts",
"execution",
"of",
"the",
"script"
] | f7233963c5291530fcb2444a7f45b556e6407b90 | https://github.com/deep-compute/basescript/blob/f7233963c5291530fcb2444a7f45b556e6407b90/basescript/basescript.py#L67-L87 | train | 227 |
deep-compute/basescript | basescript/basescript.py | BaseScript.define_baseargs | def define_baseargs(self, parser):
'''
Define basic command-line arguments required by the script.
@parser is a parser object created using the `argparse` module.
returns: None
'''
parser.add_argument('--name', default=sys.argv[0],
help='Name to identify this instance')
parser.add_argument('--log-level', default=None,
help='Logging level as picked from the logging module')
parser.add_argument('--log-format', default=None,
# TODO add more formats
choices=("json", "pretty",),
help=("Force the format of the logs. By default, if the "
"command is from a terminal, print colorful logs. "
"Otherwise print json."),
)
parser.add_argument('--log-file', default=None,
help='Writes logs to log file if specified, default: %(default)s',
)
parser.add_argument('--quiet', default=False, action="store_true",
help='if true, does not print logs to stderr, default: %(default)s',
)
parser.add_argument('--metric-grouping-interval', default=None, type=int,
help='To group metrics based on time interval ex:10 i.e;(10 sec)',
)
parser.add_argument('--debug', default=False, action="store_true",
help='To run the code in debug mode',
) | python | def define_baseargs(self, parser):
'''
Define basic command-line arguments required by the script.
@parser is a parser object created using the `argparse` module.
returns: None
'''
parser.add_argument('--name', default=sys.argv[0],
help='Name to identify this instance')
parser.add_argument('--log-level', default=None,
help='Logging level as picked from the logging module')
parser.add_argument('--log-format', default=None,
# TODO add more formats
choices=("json", "pretty",),
help=("Force the format of the logs. By default, if the "
"command is from a terminal, print colorful logs. "
"Otherwise print json."),
)
parser.add_argument('--log-file', default=None,
help='Writes logs to log file if specified, default: %(default)s',
)
parser.add_argument('--quiet', default=False, action="store_true",
help='if true, does not print logs to stderr, default: %(default)s',
)
parser.add_argument('--metric-grouping-interval', default=None, type=int,
help='To group metrics based on time interval ex:10 i.e;(10 sec)',
)
parser.add_argument('--debug', default=False, action="store_true",
help='To run the code in debug mode',
) | [
"def",
"define_baseargs",
"(",
"self",
",",
"parser",
")",
":",
"parser",
".",
"add_argument",
"(",
"'--name'",
",",
"default",
"=",
"sys",
".",
"argv",
"[",
"0",
"]",
",",
"help",
"=",
"'Name to identify this instance'",
")",
"parser",
".",
"add_argument",
"(",
"'--log-level'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"'Logging level as picked from the logging module'",
")",
"parser",
".",
"add_argument",
"(",
"'--log-format'",
",",
"default",
"=",
"None",
",",
"# TODO add more formats",
"choices",
"=",
"(",
"\"json\"",
",",
"\"pretty\"",
",",
")",
",",
"help",
"=",
"(",
"\"Force the format of the logs. By default, if the \"",
"\"command is from a terminal, print colorful logs. \"",
"\"Otherwise print json.\"",
")",
",",
")",
"parser",
".",
"add_argument",
"(",
"'--log-file'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"'Writes logs to log file if specified, default: %(default)s'",
",",
")",
"parser",
".",
"add_argument",
"(",
"'--quiet'",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"'if true, does not print logs to stderr, default: %(default)s'",
",",
")",
"parser",
".",
"add_argument",
"(",
"'--metric-grouping-interval'",
",",
"default",
"=",
"None",
",",
"type",
"=",
"int",
",",
"help",
"=",
"'To group metrics based on time interval ex:10 i.e;(10 sec)'",
",",
")",
"parser",
".",
"add_argument",
"(",
"'--debug'",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"'To run the code in debug mode'",
",",
")"
] | Define basic command-line arguments required by the script.
@parser is a parser object created using the `argparse` module.
returns: None | [
"Define",
"basic",
"command",
"-",
"line",
"arguments",
"required",
"by",
"the",
"script",
"."
] | f7233963c5291530fcb2444a7f45b556e6407b90 | https://github.com/deep-compute/basescript/blob/f7233963c5291530fcb2444a7f45b556e6407b90/basescript/basescript.py#L123-L151 | train | 228 |
elbow-jason/Uno-deprecated | uno/parser/source_coder.py | SourceCoder.cleanup_payload | def cleanup_payload(self, payload):
"""
Basically, turns payload that looks like ' \\n ' to ''. In the
calling function, if this function returns '' no object is added
for that payload.
"""
p = payload.replace('\n', '')
p = p.rstrip()
p = p.lstrip()
return p | python | def cleanup_payload(self, payload):
"""
Basically, turns payload that looks like ' \\n ' to ''. In the
calling function, if this function returns '' no object is added
for that payload.
"""
p = payload.replace('\n', '')
p = p.rstrip()
p = p.lstrip()
return p | [
"def",
"cleanup_payload",
"(",
"self",
",",
"payload",
")",
":",
"p",
"=",
"payload",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
"p",
"=",
"p",
".",
"rstrip",
"(",
")",
"p",
"=",
"p",
".",
"lstrip",
"(",
")",
"return",
"p"
] | Basically, turns payload that looks like ' \\n ' to ''. In the
calling function, if this function returns '' no object is added
for that payload. | [
"Basically",
"turns",
"payload",
"that",
"looks",
"like",
"\\\\",
"n",
"to",
".",
"In",
"the",
"calling",
"function",
"if",
"this",
"function",
"returns",
"no",
"object",
"is",
"added",
"for",
"that",
"payload",
"."
] | 4ad07d7b84e5b6e3e2b2c89db69448906f24b4e4 | https://github.com/elbow-jason/Uno-deprecated/blob/4ad07d7b84e5b6e3e2b2c89db69448906f24b4e4/uno/parser/source_coder.py#L73-L82 | train | 229 |
consbio/gis-metadata-parser | gis_metadata/utils.py | get_default_for | def get_default_for(prop, value):
""" Ensures complex property types have the correct default values """
prop = prop.strip('_') # Handle alternate props (leading underscores)
val = reduce_value(value) # Filtering of value happens here
if prop in _COMPLEX_LISTS:
return wrap_value(val)
elif prop in _COMPLEX_STRUCTS:
return val or {}
else:
return u'' if val is None else val | python | def get_default_for(prop, value):
""" Ensures complex property types have the correct default values """
prop = prop.strip('_') # Handle alternate props (leading underscores)
val = reduce_value(value) # Filtering of value happens here
if prop in _COMPLEX_LISTS:
return wrap_value(val)
elif prop in _COMPLEX_STRUCTS:
return val or {}
else:
return u'' if val is None else val | [
"def",
"get_default_for",
"(",
"prop",
",",
"value",
")",
":",
"prop",
"=",
"prop",
".",
"strip",
"(",
"'_'",
")",
"# Handle alternate props (leading underscores)",
"val",
"=",
"reduce_value",
"(",
"value",
")",
"# Filtering of value happens here",
"if",
"prop",
"in",
"_COMPLEX_LISTS",
":",
"return",
"wrap_value",
"(",
"val",
")",
"elif",
"prop",
"in",
"_COMPLEX_STRUCTS",
":",
"return",
"val",
"or",
"{",
"}",
"else",
":",
"return",
"u''",
"if",
"val",
"is",
"None",
"else",
"val"
] | Ensures complex property types have the correct default values | [
"Ensures",
"complex",
"property",
"types",
"have",
"the",
"correct",
"default",
"values"
] | 59eefb2e51cd4d8cc3e94623a2167499ca9ef70f | https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L223-L234 | train | 230 |
consbio/gis-metadata-parser | gis_metadata/utils.py | update_property | def update_property(tree_to_update, xpath_root, xpaths, prop, values, supported=None):
"""
Either update the tree the default way, or call the custom updater
Default Way: Existing values in the tree are overwritten. If xpaths contains a single path,
then each value is written to the tree at that path. If xpaths contains a list of xpaths,
then the values corresponding to each xpath index are written to their respective locations.
In either case, empty values are ignored.
:param tree_to_update: the XML tree compatible with element_utils to be updated
:param xpath_root: the XPATH location shared by all the xpaths passed in
:param xpaths: a string or a list of strings representing the XPATH location(s) to which to write values
:param prop: the name of the property of the parser containing the value(s) with which to update the tree
:param values: a single value, or a list of values to write to the specified XPATHs
:see: ParserProperty for more on custom updaters
:return: a list of all elements updated by this operation
"""
if supported and prop.startswith('_') and prop.strip('_') in supported:
values = u'' # Remove alternate elements: write values only to primary location
else:
values = get_default_for(prop, values) # Enforce defaults as required per property
if not xpaths:
return []
elif not isinstance(xpaths, ParserProperty):
return _update_property(tree_to_update, xpath_root, xpaths, values)
else:
# Call ParserProperty.set_prop without xpath_root (managed internally)
return xpaths.set_prop(tree_to_update=tree_to_update, prop=prop, values=values) | python | def update_property(tree_to_update, xpath_root, xpaths, prop, values, supported=None):
"""
Either update the tree the default way, or call the custom updater
Default Way: Existing values in the tree are overwritten. If xpaths contains a single path,
then each value is written to the tree at that path. If xpaths contains a list of xpaths,
then the values corresponding to each xpath index are written to their respective locations.
In either case, empty values are ignored.
:param tree_to_update: the XML tree compatible with element_utils to be updated
:param xpath_root: the XPATH location shared by all the xpaths passed in
:param xpaths: a string or a list of strings representing the XPATH location(s) to which to write values
:param prop: the name of the property of the parser containing the value(s) with which to update the tree
:param values: a single value, or a list of values to write to the specified XPATHs
:see: ParserProperty for more on custom updaters
:return: a list of all elements updated by this operation
"""
if supported and prop.startswith('_') and prop.strip('_') in supported:
values = u'' # Remove alternate elements: write values only to primary location
else:
values = get_default_for(prop, values) # Enforce defaults as required per property
if not xpaths:
return []
elif not isinstance(xpaths, ParserProperty):
return _update_property(tree_to_update, xpath_root, xpaths, values)
else:
# Call ParserProperty.set_prop without xpath_root (managed internally)
return xpaths.set_prop(tree_to_update=tree_to_update, prop=prop, values=values) | [
"def",
"update_property",
"(",
"tree_to_update",
",",
"xpath_root",
",",
"xpaths",
",",
"prop",
",",
"values",
",",
"supported",
"=",
"None",
")",
":",
"if",
"supported",
"and",
"prop",
".",
"startswith",
"(",
"'_'",
")",
"and",
"prop",
".",
"strip",
"(",
"'_'",
")",
"in",
"supported",
":",
"values",
"=",
"u''",
"# Remove alternate elements: write values only to primary location",
"else",
":",
"values",
"=",
"get_default_for",
"(",
"prop",
",",
"values",
")",
"# Enforce defaults as required per property",
"if",
"not",
"xpaths",
":",
"return",
"[",
"]",
"elif",
"not",
"isinstance",
"(",
"xpaths",
",",
"ParserProperty",
")",
":",
"return",
"_update_property",
"(",
"tree_to_update",
",",
"xpath_root",
",",
"xpaths",
",",
"values",
")",
"else",
":",
"# Call ParserProperty.set_prop without xpath_root (managed internally)",
"return",
"xpaths",
".",
"set_prop",
"(",
"tree_to_update",
"=",
"tree_to_update",
",",
"prop",
"=",
"prop",
",",
"values",
"=",
"values",
")"
] | Either update the tree the default way, or call the custom updater
Default Way: Existing values in the tree are overwritten. If xpaths contains a single path,
then each value is written to the tree at that path. If xpaths contains a list of xpaths,
then the values corresponding to each xpath index are written to their respective locations.
In either case, empty values are ignored.
:param tree_to_update: the XML tree compatible with element_utils to be updated
:param xpath_root: the XPATH location shared by all the xpaths passed in
:param xpaths: a string or a list of strings representing the XPATH location(s) to which to write values
:param prop: the name of the property of the parser containing the value(s) with which to update the tree
:param values: a single value, or a list of values to write to the specified XPATHs
:see: ParserProperty for more on custom updaters
:return: a list of all elements updated by this operation | [
"Either",
"update",
"the",
"tree",
"the",
"default",
"way",
"or",
"call",
"the",
"custom",
"updater"
] | 59eefb2e51cd4d8cc3e94623a2167499ca9ef70f | https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L392-L423 | train | 231 |
consbio/gis-metadata-parser | gis_metadata/utils.py | _update_property | def _update_property(tree_to_update, xpath_root, xpaths, values):
"""
Default update operation for a single parser property. If xpaths contains one xpath,
then one element per value will be inserted at that location in the tree_to_update;
otherwise, the number of values must match the number of xpaths.
"""
# Inner function to update a specific XPATH with the values provided
def update_element(elem, idx, root, path, vals):
""" Internal helper function to encapsulate single item update """
has_root = bool(root and len(path) > len(root) and path.startswith(root))
path, attr = get_xpath_tuple(path) # 'path/@attr' to ('path', 'attr')
if attr:
removed = [get_element(elem, path)]
remove_element_attributes(removed[0], attr)
elif not has_root:
removed = wrap_value(remove_element(elem, path))
else:
path = get_xpath_branch(root, path)
removed = [] if idx != 0 else [remove_element(e, path, True) for e in get_elements(elem, root)]
if not vals:
return removed
items = []
for i, val in enumerate(wrap_value(vals)):
elem_to_update = elem
if has_root:
elem_to_update = insert_element(elem, (i + idx), root)
val = val.decode('utf-8') if not isinstance(val, string_types) else val
if not attr:
items.append(insert_element(elem_to_update, i, path, val))
elif path:
items.append(insert_element(elem_to_update, i, path, **{attr: val}))
else:
set_element_attributes(elem_to_update, **{attr: val})
items.append(elem_to_update)
return items
# Code to update each of the XPATHs with each of the values
xpaths = reduce_value(xpaths)
values = filter_empty(values)
if isinstance(xpaths, string_types):
return update_element(tree_to_update, 0, xpath_root, xpaths, values)
else:
each = []
for index, xpath in enumerate(xpaths):
value = values[index] if values else None
each.extend(update_element(tree_to_update, index, xpath_root, xpath, value))
return each | python | def _update_property(tree_to_update, xpath_root, xpaths, values):
"""
Default update operation for a single parser property. If xpaths contains one xpath,
then one element per value will be inserted at that location in the tree_to_update;
otherwise, the number of values must match the number of xpaths.
"""
# Inner function to update a specific XPATH with the values provided
def update_element(elem, idx, root, path, vals):
""" Internal helper function to encapsulate single item update """
has_root = bool(root and len(path) > len(root) and path.startswith(root))
path, attr = get_xpath_tuple(path) # 'path/@attr' to ('path', 'attr')
if attr:
removed = [get_element(elem, path)]
remove_element_attributes(removed[0], attr)
elif not has_root:
removed = wrap_value(remove_element(elem, path))
else:
path = get_xpath_branch(root, path)
removed = [] if idx != 0 else [remove_element(e, path, True) for e in get_elements(elem, root)]
if not vals:
return removed
items = []
for i, val in enumerate(wrap_value(vals)):
elem_to_update = elem
if has_root:
elem_to_update = insert_element(elem, (i + idx), root)
val = val.decode('utf-8') if not isinstance(val, string_types) else val
if not attr:
items.append(insert_element(elem_to_update, i, path, val))
elif path:
items.append(insert_element(elem_to_update, i, path, **{attr: val}))
else:
set_element_attributes(elem_to_update, **{attr: val})
items.append(elem_to_update)
return items
# Code to update each of the XPATHs with each of the values
xpaths = reduce_value(xpaths)
values = filter_empty(values)
if isinstance(xpaths, string_types):
return update_element(tree_to_update, 0, xpath_root, xpaths, values)
else:
each = []
for index, xpath in enumerate(xpaths):
value = values[index] if values else None
each.extend(update_element(tree_to_update, index, xpath_root, xpath, value))
return each | [
"def",
"_update_property",
"(",
"tree_to_update",
",",
"xpath_root",
",",
"xpaths",
",",
"values",
")",
":",
"# Inner function to update a specific XPATH with the values provided",
"def",
"update_element",
"(",
"elem",
",",
"idx",
",",
"root",
",",
"path",
",",
"vals",
")",
":",
"\"\"\" Internal helper function to encapsulate single item update \"\"\"",
"has_root",
"=",
"bool",
"(",
"root",
"and",
"len",
"(",
"path",
")",
">",
"len",
"(",
"root",
")",
"and",
"path",
".",
"startswith",
"(",
"root",
")",
")",
"path",
",",
"attr",
"=",
"get_xpath_tuple",
"(",
"path",
")",
"# 'path/@attr' to ('path', 'attr')",
"if",
"attr",
":",
"removed",
"=",
"[",
"get_element",
"(",
"elem",
",",
"path",
")",
"]",
"remove_element_attributes",
"(",
"removed",
"[",
"0",
"]",
",",
"attr",
")",
"elif",
"not",
"has_root",
":",
"removed",
"=",
"wrap_value",
"(",
"remove_element",
"(",
"elem",
",",
"path",
")",
")",
"else",
":",
"path",
"=",
"get_xpath_branch",
"(",
"root",
",",
"path",
")",
"removed",
"=",
"[",
"]",
"if",
"idx",
"!=",
"0",
"else",
"[",
"remove_element",
"(",
"e",
",",
"path",
",",
"True",
")",
"for",
"e",
"in",
"get_elements",
"(",
"elem",
",",
"root",
")",
"]",
"if",
"not",
"vals",
":",
"return",
"removed",
"items",
"=",
"[",
"]",
"for",
"i",
",",
"val",
"in",
"enumerate",
"(",
"wrap_value",
"(",
"vals",
")",
")",
":",
"elem_to_update",
"=",
"elem",
"if",
"has_root",
":",
"elem_to_update",
"=",
"insert_element",
"(",
"elem",
",",
"(",
"i",
"+",
"idx",
")",
",",
"root",
")",
"val",
"=",
"val",
".",
"decode",
"(",
"'utf-8'",
")",
"if",
"not",
"isinstance",
"(",
"val",
",",
"string_types",
")",
"else",
"val",
"if",
"not",
"attr",
":",
"items",
".",
"append",
"(",
"insert_element",
"(",
"elem_to_update",
",",
"i",
",",
"path",
",",
"val",
")",
")",
"elif",
"path",
":",
"items",
".",
"append",
"(",
"insert_element",
"(",
"elem_to_update",
",",
"i",
",",
"path",
",",
"*",
"*",
"{",
"attr",
":",
"val",
"}",
")",
")",
"else",
":",
"set_element_attributes",
"(",
"elem_to_update",
",",
"*",
"*",
"{",
"attr",
":",
"val",
"}",
")",
"items",
".",
"append",
"(",
"elem_to_update",
")",
"return",
"items",
"# Code to update each of the XPATHs with each of the values",
"xpaths",
"=",
"reduce_value",
"(",
"xpaths",
")",
"values",
"=",
"filter_empty",
"(",
"values",
")",
"if",
"isinstance",
"(",
"xpaths",
",",
"string_types",
")",
":",
"return",
"update_element",
"(",
"tree_to_update",
",",
"0",
",",
"xpath_root",
",",
"xpaths",
",",
"values",
")",
"else",
":",
"each",
"=",
"[",
"]",
"for",
"index",
",",
"xpath",
"in",
"enumerate",
"(",
"xpaths",
")",
":",
"value",
"=",
"values",
"[",
"index",
"]",
"if",
"values",
"else",
"None",
"each",
".",
"extend",
"(",
"update_element",
"(",
"tree_to_update",
",",
"index",
",",
"xpath_root",
",",
"xpath",
",",
"value",
")",
")",
"return",
"each"
] | Default update operation for a single parser property. If xpaths contains one xpath,
then one element per value will be inserted at that location in the tree_to_update;
otherwise, the number of values must match the number of xpaths. | [
"Default",
"update",
"operation",
"for",
"a",
"single",
"parser",
"property",
".",
"If",
"xpaths",
"contains",
"one",
"xpath",
"then",
"one",
"element",
"per",
"value",
"will",
"be",
"inserted",
"at",
"that",
"location",
"in",
"the",
"tree_to_update",
";",
"otherwise",
"the",
"number",
"of",
"values",
"must",
"match",
"the",
"number",
"of",
"xpaths",
"."
] | 59eefb2e51cd4d8cc3e94623a2167499ca9ef70f | https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L426-L486 | train | 232 |
consbio/gis-metadata-parser | gis_metadata/utils.py | validate_complex | def validate_complex(prop, value, xpath_map=None):
""" Default validation for single complex data structure """
if value is not None:
validate_type(prop, value, dict)
if prop in _complex_definitions:
complex_keys = _complex_definitions[prop]
else:
complex_keys = {} if xpath_map is None else xpath_map
for complex_prop, complex_val in iteritems(value):
complex_key = '.'.join((prop, complex_prop))
if complex_prop not in complex_keys:
_validation_error(prop, None, value, ('keys: {0}'.format(','.join(complex_keys))))
validate_type(complex_key, complex_val, (string_types, list)) | python | def validate_complex(prop, value, xpath_map=None):
""" Default validation for single complex data structure """
if value is not None:
validate_type(prop, value, dict)
if prop in _complex_definitions:
complex_keys = _complex_definitions[prop]
else:
complex_keys = {} if xpath_map is None else xpath_map
for complex_prop, complex_val in iteritems(value):
complex_key = '.'.join((prop, complex_prop))
if complex_prop not in complex_keys:
_validation_error(prop, None, value, ('keys: {0}'.format(','.join(complex_keys))))
validate_type(complex_key, complex_val, (string_types, list)) | [
"def",
"validate_complex",
"(",
"prop",
",",
"value",
",",
"xpath_map",
"=",
"None",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"validate_type",
"(",
"prop",
",",
"value",
",",
"dict",
")",
"if",
"prop",
"in",
"_complex_definitions",
":",
"complex_keys",
"=",
"_complex_definitions",
"[",
"prop",
"]",
"else",
":",
"complex_keys",
"=",
"{",
"}",
"if",
"xpath_map",
"is",
"None",
"else",
"xpath_map",
"for",
"complex_prop",
",",
"complex_val",
"in",
"iteritems",
"(",
"value",
")",
":",
"complex_key",
"=",
"'.'",
".",
"join",
"(",
"(",
"prop",
",",
"complex_prop",
")",
")",
"if",
"complex_prop",
"not",
"in",
"complex_keys",
":",
"_validation_error",
"(",
"prop",
",",
"None",
",",
"value",
",",
"(",
"'keys: {0}'",
".",
"format",
"(",
"','",
".",
"join",
"(",
"complex_keys",
")",
")",
")",
")",
"validate_type",
"(",
"complex_key",
",",
"complex_val",
",",
"(",
"string_types",
",",
"list",
")",
")"
] | Default validation for single complex data structure | [
"Default",
"validation",
"for",
"single",
"complex",
"data",
"structure"
] | 59eefb2e51cd4d8cc3e94623a2167499ca9ef70f | https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L572-L589 | train | 233 |
consbio/gis-metadata-parser | gis_metadata/utils.py | validate_complex_list | def validate_complex_list(prop, value, xpath_map=None):
""" Default validation for Attribute Details data structure """
if value is not None:
validate_type(prop, value, (dict, list))
if prop in _complex_definitions:
complex_keys = _complex_definitions[prop]
else:
complex_keys = {} if xpath_map is None else xpath_map
for idx, complex_struct in enumerate(wrap_value(value)):
cs_idx = prop + '[' + str(idx) + ']'
validate_type(cs_idx, complex_struct, dict)
for cs_prop, cs_val in iteritems(complex_struct):
cs_key = '.'.join((cs_idx, cs_prop))
if cs_prop not in complex_keys:
_validation_error(prop, None, value, ('keys: {0}'.format(','.join(complex_keys))))
if not isinstance(cs_val, list):
validate_type(cs_key, cs_val, (string_types, list))
else:
for list_idx, list_val in enumerate(cs_val):
list_prop = cs_key + '[' + str(list_idx) + ']'
validate_type(list_prop, list_val, string_types) | python | def validate_complex_list(prop, value, xpath_map=None):
""" Default validation for Attribute Details data structure """
if value is not None:
validate_type(prop, value, (dict, list))
if prop in _complex_definitions:
complex_keys = _complex_definitions[prop]
else:
complex_keys = {} if xpath_map is None else xpath_map
for idx, complex_struct in enumerate(wrap_value(value)):
cs_idx = prop + '[' + str(idx) + ']'
validate_type(cs_idx, complex_struct, dict)
for cs_prop, cs_val in iteritems(complex_struct):
cs_key = '.'.join((cs_idx, cs_prop))
if cs_prop not in complex_keys:
_validation_error(prop, None, value, ('keys: {0}'.format(','.join(complex_keys))))
if not isinstance(cs_val, list):
validate_type(cs_key, cs_val, (string_types, list))
else:
for list_idx, list_val in enumerate(cs_val):
list_prop = cs_key + '[' + str(list_idx) + ']'
validate_type(list_prop, list_val, string_types) | [
"def",
"validate_complex_list",
"(",
"prop",
",",
"value",
",",
"xpath_map",
"=",
"None",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"validate_type",
"(",
"prop",
",",
"value",
",",
"(",
"dict",
",",
"list",
")",
")",
"if",
"prop",
"in",
"_complex_definitions",
":",
"complex_keys",
"=",
"_complex_definitions",
"[",
"prop",
"]",
"else",
":",
"complex_keys",
"=",
"{",
"}",
"if",
"xpath_map",
"is",
"None",
"else",
"xpath_map",
"for",
"idx",
",",
"complex_struct",
"in",
"enumerate",
"(",
"wrap_value",
"(",
"value",
")",
")",
":",
"cs_idx",
"=",
"prop",
"+",
"'['",
"+",
"str",
"(",
"idx",
")",
"+",
"']'",
"validate_type",
"(",
"cs_idx",
",",
"complex_struct",
",",
"dict",
")",
"for",
"cs_prop",
",",
"cs_val",
"in",
"iteritems",
"(",
"complex_struct",
")",
":",
"cs_key",
"=",
"'.'",
".",
"join",
"(",
"(",
"cs_idx",
",",
"cs_prop",
")",
")",
"if",
"cs_prop",
"not",
"in",
"complex_keys",
":",
"_validation_error",
"(",
"prop",
",",
"None",
",",
"value",
",",
"(",
"'keys: {0}'",
".",
"format",
"(",
"','",
".",
"join",
"(",
"complex_keys",
")",
")",
")",
")",
"if",
"not",
"isinstance",
"(",
"cs_val",
",",
"list",
")",
":",
"validate_type",
"(",
"cs_key",
",",
"cs_val",
",",
"(",
"string_types",
",",
"list",
")",
")",
"else",
":",
"for",
"list_idx",
",",
"list_val",
"in",
"enumerate",
"(",
"cs_val",
")",
":",
"list_prop",
"=",
"cs_key",
"+",
"'['",
"+",
"str",
"(",
"list_idx",
")",
"+",
"']'",
"validate_type",
"(",
"list_prop",
",",
"list_val",
",",
"string_types",
")"
] | Default validation for Attribute Details data structure | [
"Default",
"validation",
"for",
"Attribute",
"Details",
"data",
"structure"
] | 59eefb2e51cd4d8cc3e94623a2167499ca9ef70f | https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L592-L618 | train | 234 |
consbio/gis-metadata-parser | gis_metadata/utils.py | validate_dates | def validate_dates(prop, value, xpath_map=None):
""" Default validation for Date Types data structure """
if value is not None:
validate_type(prop, value, dict)
date_keys = set(value)
if date_keys:
if DATE_TYPE not in date_keys or DATE_VALUES not in date_keys:
if prop in _complex_definitions:
complex_keys = _complex_definitions[prop]
else:
complex_keys = _complex_definitions[DATES] if xpath_map is None else xpath_map
_validation_error(prop, None, value, ('keys: {0}'.format(','.join(complex_keys))))
date_type = value[DATE_TYPE]
if date_type not in DATE_TYPES:
_validation_error('dates.type', None, date_type, DATE_TYPES)
date_vals = value[DATE_VALUES]
validate_type('dates.values', date_vals, list)
dates_len = len(date_vals)
if date_type == DATE_TYPE_MISSING and dates_len != 0:
_validation_error('len(dates.values)', None, dates_len, 0)
if date_type == DATE_TYPE_SINGLE and dates_len != 1:
_validation_error('len(dates.values)', None, dates_len, 1)
if date_type == DATE_TYPE_RANGE and dates_len != 2:
_validation_error('len(dates.values)', None, dates_len, 2)
if date_type == DATE_TYPE_MULTIPLE and dates_len < 2:
_validation_error('len(dates.values)', None, dates_len, 'at least two')
for idx, date in enumerate(date_vals):
date_key = 'dates.value[' + str(idx) + ']'
validate_type(date_key, date, string_types) | python | def validate_dates(prop, value, xpath_map=None):
""" Default validation for Date Types data structure """
if value is not None:
validate_type(prop, value, dict)
date_keys = set(value)
if date_keys:
if DATE_TYPE not in date_keys or DATE_VALUES not in date_keys:
if prop in _complex_definitions:
complex_keys = _complex_definitions[prop]
else:
complex_keys = _complex_definitions[DATES] if xpath_map is None else xpath_map
_validation_error(prop, None, value, ('keys: {0}'.format(','.join(complex_keys))))
date_type = value[DATE_TYPE]
if date_type not in DATE_TYPES:
_validation_error('dates.type', None, date_type, DATE_TYPES)
date_vals = value[DATE_VALUES]
validate_type('dates.values', date_vals, list)
dates_len = len(date_vals)
if date_type == DATE_TYPE_MISSING and dates_len != 0:
_validation_error('len(dates.values)', None, dates_len, 0)
if date_type == DATE_TYPE_SINGLE and dates_len != 1:
_validation_error('len(dates.values)', None, dates_len, 1)
if date_type == DATE_TYPE_RANGE and dates_len != 2:
_validation_error('len(dates.values)', None, dates_len, 2)
if date_type == DATE_TYPE_MULTIPLE and dates_len < 2:
_validation_error('len(dates.values)', None, dates_len, 'at least two')
for idx, date in enumerate(date_vals):
date_key = 'dates.value[' + str(idx) + ']'
validate_type(date_key, date, string_types) | [
"def",
"validate_dates",
"(",
"prop",
",",
"value",
",",
"xpath_map",
"=",
"None",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"validate_type",
"(",
"prop",
",",
"value",
",",
"dict",
")",
"date_keys",
"=",
"set",
"(",
"value",
")",
"if",
"date_keys",
":",
"if",
"DATE_TYPE",
"not",
"in",
"date_keys",
"or",
"DATE_VALUES",
"not",
"in",
"date_keys",
":",
"if",
"prop",
"in",
"_complex_definitions",
":",
"complex_keys",
"=",
"_complex_definitions",
"[",
"prop",
"]",
"else",
":",
"complex_keys",
"=",
"_complex_definitions",
"[",
"DATES",
"]",
"if",
"xpath_map",
"is",
"None",
"else",
"xpath_map",
"_validation_error",
"(",
"prop",
",",
"None",
",",
"value",
",",
"(",
"'keys: {0}'",
".",
"format",
"(",
"','",
".",
"join",
"(",
"complex_keys",
")",
")",
")",
")",
"date_type",
"=",
"value",
"[",
"DATE_TYPE",
"]",
"if",
"date_type",
"not",
"in",
"DATE_TYPES",
":",
"_validation_error",
"(",
"'dates.type'",
",",
"None",
",",
"date_type",
",",
"DATE_TYPES",
")",
"date_vals",
"=",
"value",
"[",
"DATE_VALUES",
"]",
"validate_type",
"(",
"'dates.values'",
",",
"date_vals",
",",
"list",
")",
"dates_len",
"=",
"len",
"(",
"date_vals",
")",
"if",
"date_type",
"==",
"DATE_TYPE_MISSING",
"and",
"dates_len",
"!=",
"0",
":",
"_validation_error",
"(",
"'len(dates.values)'",
",",
"None",
",",
"dates_len",
",",
"0",
")",
"if",
"date_type",
"==",
"DATE_TYPE_SINGLE",
"and",
"dates_len",
"!=",
"1",
":",
"_validation_error",
"(",
"'len(dates.values)'",
",",
"None",
",",
"dates_len",
",",
"1",
")",
"if",
"date_type",
"==",
"DATE_TYPE_RANGE",
"and",
"dates_len",
"!=",
"2",
":",
"_validation_error",
"(",
"'len(dates.values)'",
",",
"None",
",",
"dates_len",
",",
"2",
")",
"if",
"date_type",
"==",
"DATE_TYPE_MULTIPLE",
"and",
"dates_len",
"<",
"2",
":",
"_validation_error",
"(",
"'len(dates.values)'",
",",
"None",
",",
"dates_len",
",",
"'at least two'",
")",
"for",
"idx",
",",
"date",
"in",
"enumerate",
"(",
"date_vals",
")",
":",
"date_key",
"=",
"'dates.value['",
"+",
"str",
"(",
"idx",
")",
"+",
"']'",
"validate_type",
"(",
"date_key",
",",
"date",
",",
"string_types",
")"
] | Default validation for Date Types data structure | [
"Default",
"validation",
"for",
"Date",
"Types",
"data",
"structure"
] | 59eefb2e51cd4d8cc3e94623a2167499ca9ef70f | https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L621-L663 | train | 235 |
consbio/gis-metadata-parser | gis_metadata/utils.py | validate_process_steps | def validate_process_steps(prop, value):
""" Default validation for Process Steps data structure """
if value is not None:
validate_type(prop, value, (dict, list))
procstep_keys = set(_complex_definitions[prop])
for idx, procstep in enumerate(wrap_value(value)):
ps_idx = prop + '[' + str(idx) + ']'
validate_type(ps_idx, procstep, dict)
for ps_prop, ps_val in iteritems(procstep):
ps_key = '.'.join((ps_idx, ps_prop))
if ps_prop not in procstep_keys:
_validation_error(prop, None, value, ('keys: {0}'.format(','.join(procstep_keys))))
if ps_prop != 'sources':
validate_type(ps_key, ps_val, string_types)
else:
validate_type(ps_key, ps_val, (string_types, list))
for src_idx, src_val in enumerate(wrap_value(ps_val)):
src_key = ps_key + '[' + str(src_idx) + ']'
validate_type(src_key, src_val, string_types) | python | def validate_process_steps(prop, value):
""" Default validation for Process Steps data structure """
if value is not None:
validate_type(prop, value, (dict, list))
procstep_keys = set(_complex_definitions[prop])
for idx, procstep in enumerate(wrap_value(value)):
ps_idx = prop + '[' + str(idx) + ']'
validate_type(ps_idx, procstep, dict)
for ps_prop, ps_val in iteritems(procstep):
ps_key = '.'.join((ps_idx, ps_prop))
if ps_prop not in procstep_keys:
_validation_error(prop, None, value, ('keys: {0}'.format(','.join(procstep_keys))))
if ps_prop != 'sources':
validate_type(ps_key, ps_val, string_types)
else:
validate_type(ps_key, ps_val, (string_types, list))
for src_idx, src_val in enumerate(wrap_value(ps_val)):
src_key = ps_key + '[' + str(src_idx) + ']'
validate_type(src_key, src_val, string_types) | [
"def",
"validate_process_steps",
"(",
"prop",
",",
"value",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"validate_type",
"(",
"prop",
",",
"value",
",",
"(",
"dict",
",",
"list",
")",
")",
"procstep_keys",
"=",
"set",
"(",
"_complex_definitions",
"[",
"prop",
"]",
")",
"for",
"idx",
",",
"procstep",
"in",
"enumerate",
"(",
"wrap_value",
"(",
"value",
")",
")",
":",
"ps_idx",
"=",
"prop",
"+",
"'['",
"+",
"str",
"(",
"idx",
")",
"+",
"']'",
"validate_type",
"(",
"ps_idx",
",",
"procstep",
",",
"dict",
")",
"for",
"ps_prop",
",",
"ps_val",
"in",
"iteritems",
"(",
"procstep",
")",
":",
"ps_key",
"=",
"'.'",
".",
"join",
"(",
"(",
"ps_idx",
",",
"ps_prop",
")",
")",
"if",
"ps_prop",
"not",
"in",
"procstep_keys",
":",
"_validation_error",
"(",
"prop",
",",
"None",
",",
"value",
",",
"(",
"'keys: {0}'",
".",
"format",
"(",
"','",
".",
"join",
"(",
"procstep_keys",
")",
")",
")",
")",
"if",
"ps_prop",
"!=",
"'sources'",
":",
"validate_type",
"(",
"ps_key",
",",
"ps_val",
",",
"string_types",
")",
"else",
":",
"validate_type",
"(",
"ps_key",
",",
"ps_val",
",",
"(",
"string_types",
",",
"list",
")",
")",
"for",
"src_idx",
",",
"src_val",
"in",
"enumerate",
"(",
"wrap_value",
"(",
"ps_val",
")",
")",
":",
"src_key",
"=",
"ps_key",
"+",
"'['",
"+",
"str",
"(",
"src_idx",
")",
"+",
"']'",
"validate_type",
"(",
"src_key",
",",
"src_val",
",",
"string_types",
")"
] | Default validation for Process Steps data structure | [
"Default",
"validation",
"for",
"Process",
"Steps",
"data",
"structure"
] | 59eefb2e51cd4d8cc3e94623a2167499ca9ef70f | https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L666-L691 | train | 236 |
consbio/gis-metadata-parser | gis_metadata/utils.py | validate_type | def validate_type(prop, value, expected):
""" Default validation for all types """
# Validate on expected type(s), but ignore None: defaults handled elsewhere
if value is not None and not isinstance(value, expected):
_validation_error(prop, type(value).__name__, None, expected) | python | def validate_type(prop, value, expected):
""" Default validation for all types """
# Validate on expected type(s), but ignore None: defaults handled elsewhere
if value is not None and not isinstance(value, expected):
_validation_error(prop, type(value).__name__, None, expected) | [
"def",
"validate_type",
"(",
"prop",
",",
"value",
",",
"expected",
")",
":",
"# Validate on expected type(s), but ignore None: defaults handled elsewhere",
"if",
"value",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"value",
",",
"expected",
")",
":",
"_validation_error",
"(",
"prop",
",",
"type",
"(",
"value",
")",
".",
"__name__",
",",
"None",
",",
"expected",
")"
] | Default validation for all types | [
"Default",
"validation",
"for",
"all",
"types"
] | 59eefb2e51cd4d8cc3e94623a2167499ca9ef70f | https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L710-L715 | train | 237 |
consbio/gis-metadata-parser | gis_metadata/utils.py | _validation_error | def _validation_error(prop, prop_type, prop_value, expected):
""" Default validation for updated properties """
if prop_type is None:
attrib = 'value'
assigned = prop_value
else:
attrib = 'type'
assigned = prop_type
raise ValidationError(
'Invalid property {attrib} for {prop}:\n\t{attrib}: {assigned}\n\texpected: {expected}',
attrib=attrib, prop=prop, assigned=assigned, expected=expected,
invalid={prop: prop_value} if attrib == 'value' else {}
) | python | def _validation_error(prop, prop_type, prop_value, expected):
""" Default validation for updated properties """
if prop_type is None:
attrib = 'value'
assigned = prop_value
else:
attrib = 'type'
assigned = prop_type
raise ValidationError(
'Invalid property {attrib} for {prop}:\n\t{attrib}: {assigned}\n\texpected: {expected}',
attrib=attrib, prop=prop, assigned=assigned, expected=expected,
invalid={prop: prop_value} if attrib == 'value' else {}
) | [
"def",
"_validation_error",
"(",
"prop",
",",
"prop_type",
",",
"prop_value",
",",
"expected",
")",
":",
"if",
"prop_type",
"is",
"None",
":",
"attrib",
"=",
"'value'",
"assigned",
"=",
"prop_value",
"else",
":",
"attrib",
"=",
"'type'",
"assigned",
"=",
"prop_type",
"raise",
"ValidationError",
"(",
"'Invalid property {attrib} for {prop}:\\n\\t{attrib}: {assigned}\\n\\texpected: {expected}'",
",",
"attrib",
"=",
"attrib",
",",
"prop",
"=",
"prop",
",",
"assigned",
"=",
"assigned",
",",
"expected",
"=",
"expected",
",",
"invalid",
"=",
"{",
"prop",
":",
"prop_value",
"}",
"if",
"attrib",
"==",
"'value'",
"else",
"{",
"}",
")"
] | Default validation for updated properties | [
"Default",
"validation",
"for",
"updated",
"properties"
] | 59eefb2e51cd4d8cc3e94623a2167499ca9ef70f | https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L718-L732 | train | 238 |
consbio/gis-metadata-parser | gis_metadata/utils.py | ParserProperty.get_prop | def get_prop(self, prop):
""" Calls the getter with no arguments and returns its value """
if self._parser is None:
raise ConfigurationError('Cannot call ParserProperty."get_prop" with no parser configured')
return self._parser(prop) if prop else self._parser() | python | def get_prop(self, prop):
""" Calls the getter with no arguments and returns its value """
if self._parser is None:
raise ConfigurationError('Cannot call ParserProperty."get_prop" with no parser configured')
return self._parser(prop) if prop else self._parser() | [
"def",
"get_prop",
"(",
"self",
",",
"prop",
")",
":",
"if",
"self",
".",
"_parser",
"is",
"None",
":",
"raise",
"ConfigurationError",
"(",
"'Cannot call ParserProperty.\"get_prop\" with no parser configured'",
")",
"return",
"self",
".",
"_parser",
"(",
"prop",
")",
"if",
"prop",
"else",
"self",
".",
"_parser",
"(",
")"
] | Calls the getter with no arguments and returns its value | [
"Calls",
"the",
"getter",
"with",
"no",
"arguments",
"and",
"returns",
"its",
"value"
] | 59eefb2e51cd4d8cc3e94623a2167499ca9ef70f | https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L765-L771 | train | 239 |
disqus/nydus | nydus/db/backends/memcache.py | can_group_commands | def can_group_commands(command, next_command):
"""
Returns a boolean representing whether these commands can be
grouped together or not.
A few things are taken into account for this decision:
For ``set`` commands:
- Are all arguments other than the key/value the same?
For ``delete`` and ``get`` commands:
- Are all arguments other than the key the same?
"""
multi_capable_commands = ('get', 'set', 'delete')
if next_command is None:
return False
name = command.get_name()
# TODO: support multi commands
if name not in multi_capable_commands:
return False
if name != next_command.get_name():
return False
# if the shared args (key, or key/value) do not match, we cannot group
if grouped_args_for_command(command) != grouped_args_for_command(next_command):
return False
# If the keyword arguments do not much (e.g. key_prefix, or timeout on set)
# then we cannot group
if command.get_kwargs() != next_command.get_kwargs():
return False
return True | python | def can_group_commands(command, next_command):
"""
Returns a boolean representing whether these commands can be
grouped together or not.
A few things are taken into account for this decision:
For ``set`` commands:
- Are all arguments other than the key/value the same?
For ``delete`` and ``get`` commands:
- Are all arguments other than the key the same?
"""
multi_capable_commands = ('get', 'set', 'delete')
if next_command is None:
return False
name = command.get_name()
# TODO: support multi commands
if name not in multi_capable_commands:
return False
if name != next_command.get_name():
return False
# if the shared args (key, or key/value) do not match, we cannot group
if grouped_args_for_command(command) != grouped_args_for_command(next_command):
return False
# If the keyword arguments do not much (e.g. key_prefix, or timeout on set)
# then we cannot group
if command.get_kwargs() != next_command.get_kwargs():
return False
return True | [
"def",
"can_group_commands",
"(",
"command",
",",
"next_command",
")",
":",
"multi_capable_commands",
"=",
"(",
"'get'",
",",
"'set'",
",",
"'delete'",
")",
"if",
"next_command",
"is",
"None",
":",
"return",
"False",
"name",
"=",
"command",
".",
"get_name",
"(",
")",
"# TODO: support multi commands",
"if",
"name",
"not",
"in",
"multi_capable_commands",
":",
"return",
"False",
"if",
"name",
"!=",
"next_command",
".",
"get_name",
"(",
")",
":",
"return",
"False",
"# if the shared args (key, or key/value) do not match, we cannot group",
"if",
"grouped_args_for_command",
"(",
"command",
")",
"!=",
"grouped_args_for_command",
"(",
"next_command",
")",
":",
"return",
"False",
"# If the keyword arguments do not much (e.g. key_prefix, or timeout on set)",
"# then we cannot group",
"if",
"command",
".",
"get_kwargs",
"(",
")",
"!=",
"next_command",
".",
"get_kwargs",
"(",
")",
":",
"return",
"False",
"return",
"True"
] | Returns a boolean representing whether these commands can be
grouped together or not.
A few things are taken into account for this decision:
For ``set`` commands:
- Are all arguments other than the key/value the same?
For ``delete`` and ``get`` commands:
- Are all arguments other than the key the same? | [
"Returns",
"a",
"boolean",
"representing",
"whether",
"these",
"commands",
"can",
"be",
"grouped",
"together",
"or",
"not",
"."
] | 9b505840da47a34f758a830c3992fa5dcb7bb7ad | https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/backends/memcache.py#L97-L135 | train | 240 |
christophertbrown/bioscripts | ctbBio/rp16.py | find_databases | def find_databases(databases):
"""
define ribosomal proteins and location of curated databases
"""
# 16 ribosomal proteins in their expected order
proteins = ['L15', 'L18', 'L6', 'S8', 'L5', 'L24', 'L14',
'S17', 'L16', 'S3', 'L22', 'S19', 'L2', 'L4', 'L3', 'S10']
# curated databases
protein_databases = {
'L14': 'rpL14_JGI_MDM.filtered.faa',
'L15': 'rpL15_JGI_MDM.filtered.faa',
'L16': 'rpL16_JGI_MDM.filtered.faa',
'L18': 'rpL18_JGI_MDM.filtered.faa',
'L22': 'rpL22_JGI_MDM.filtered.faa',
'L24': 'rpL24_JGI_MDM.filtered.faa',
'L2': 'rpL2_JGI_MDM.filtered.faa',
'L3': 'rpL3_JGI_MDM.filtered.faa',
'L4': 'rpL4_JGI_MDM.filtered.faa',
'L5': 'rpL5_JGI_MDM.filtered.faa',
'L6': 'rpL6_JGI_MDM.filtered.faa',
'S10': 'rpS10_JGI_MDM.filtered.faa',
'S17': 'rpS17_JGI_MDM.filtered.faa',
'S19': 'rpS19_JGI_MDM.filtered.faa',
'S3': 'rpS3_JGI_MDM.filtered.faa',
'S8': 'rpS8_JGI_MDM.filtered.faa'}
protein_databases = {key: '%s/%s' % (databases, database) \
for key, database in list(protein_databases.items())}
return proteins, protein_databases | python | def find_databases(databases):
"""
define ribosomal proteins and location of curated databases
"""
# 16 ribosomal proteins in their expected order
proteins = ['L15', 'L18', 'L6', 'S8', 'L5', 'L24', 'L14',
'S17', 'L16', 'S3', 'L22', 'S19', 'L2', 'L4', 'L3', 'S10']
# curated databases
protein_databases = {
'L14': 'rpL14_JGI_MDM.filtered.faa',
'L15': 'rpL15_JGI_MDM.filtered.faa',
'L16': 'rpL16_JGI_MDM.filtered.faa',
'L18': 'rpL18_JGI_MDM.filtered.faa',
'L22': 'rpL22_JGI_MDM.filtered.faa',
'L24': 'rpL24_JGI_MDM.filtered.faa',
'L2': 'rpL2_JGI_MDM.filtered.faa',
'L3': 'rpL3_JGI_MDM.filtered.faa',
'L4': 'rpL4_JGI_MDM.filtered.faa',
'L5': 'rpL5_JGI_MDM.filtered.faa',
'L6': 'rpL6_JGI_MDM.filtered.faa',
'S10': 'rpS10_JGI_MDM.filtered.faa',
'S17': 'rpS17_JGI_MDM.filtered.faa',
'S19': 'rpS19_JGI_MDM.filtered.faa',
'S3': 'rpS3_JGI_MDM.filtered.faa',
'S8': 'rpS8_JGI_MDM.filtered.faa'}
protein_databases = {key: '%s/%s' % (databases, database) \
for key, database in list(protein_databases.items())}
return proteins, protein_databases | [
"def",
"find_databases",
"(",
"databases",
")",
":",
"# 16 ribosomal proteins in their expected order",
"proteins",
"=",
"[",
"'L15'",
",",
"'L18'",
",",
"'L6'",
",",
"'S8'",
",",
"'L5'",
",",
"'L24'",
",",
"'L14'",
",",
"'S17'",
",",
"'L16'",
",",
"'S3'",
",",
"'L22'",
",",
"'S19'",
",",
"'L2'",
",",
"'L4'",
",",
"'L3'",
",",
"'S10'",
"]",
"# curated databases",
"protein_databases",
"=",
"{",
"'L14'",
":",
"'rpL14_JGI_MDM.filtered.faa'",
",",
"'L15'",
":",
"'rpL15_JGI_MDM.filtered.faa'",
",",
"'L16'",
":",
"'rpL16_JGI_MDM.filtered.faa'",
",",
"'L18'",
":",
"'rpL18_JGI_MDM.filtered.faa'",
",",
"'L22'",
":",
"'rpL22_JGI_MDM.filtered.faa'",
",",
"'L24'",
":",
"'rpL24_JGI_MDM.filtered.faa'",
",",
"'L2'",
":",
"'rpL2_JGI_MDM.filtered.faa'",
",",
"'L3'",
":",
"'rpL3_JGI_MDM.filtered.faa'",
",",
"'L4'",
":",
"'rpL4_JGI_MDM.filtered.faa'",
",",
"'L5'",
":",
"'rpL5_JGI_MDM.filtered.faa'",
",",
"'L6'",
":",
"'rpL6_JGI_MDM.filtered.faa'",
",",
"'S10'",
":",
"'rpS10_JGI_MDM.filtered.faa'",
",",
"'S17'",
":",
"'rpS17_JGI_MDM.filtered.faa'",
",",
"'S19'",
":",
"'rpS19_JGI_MDM.filtered.faa'",
",",
"'S3'",
":",
"'rpS3_JGI_MDM.filtered.faa'",
",",
"'S8'",
":",
"'rpS8_JGI_MDM.filtered.faa'",
"}",
"protein_databases",
"=",
"{",
"key",
":",
"'%s/%s'",
"%",
"(",
"databases",
",",
"database",
")",
"for",
"key",
",",
"database",
"in",
"list",
"(",
"protein_databases",
".",
"items",
"(",
")",
")",
"}",
"return",
"proteins",
",",
"protein_databases"
] | define ribosomal proteins and location of curated databases | [
"define",
"ribosomal",
"proteins",
"and",
"location",
"of",
"curated",
"databases"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rp16.py#L21-L48 | train | 241 |
christophertbrown/bioscripts | ctbBio/rp16.py | find_next | def find_next(start, stop, i2hits):
"""
which protein has the best hit, the one to the 'right' or to the 'left?'
"""
if start not in i2hits and stop in i2hits:
index = stop
elif stop not in i2hits and start in i2hits:
index = start
elif start not in i2hits and stop not in i2hits:
index = choice([start, stop])
i2hits[index] = [[False]]
else:
A, B = i2hits[start][0], i2hits[stop][0]
if B[10] <= A[10]:
index = stop
else:
index = start
if index == start:
nstart = start - 1
nstop = stop
else:
nstop = stop + 1
nstart = start
match = i2hits[index][0]
rp = match[-1]
return index, nstart, nstop, rp, match | python | def find_next(start, stop, i2hits):
"""
which protein has the best hit, the one to the 'right' or to the 'left?'
"""
if start not in i2hits and stop in i2hits:
index = stop
elif stop not in i2hits and start in i2hits:
index = start
elif start not in i2hits and stop not in i2hits:
index = choice([start, stop])
i2hits[index] = [[False]]
else:
A, B = i2hits[start][0], i2hits[stop][0]
if B[10] <= A[10]:
index = stop
else:
index = start
if index == start:
nstart = start - 1
nstop = stop
else:
nstop = stop + 1
nstart = start
match = i2hits[index][0]
rp = match[-1]
return index, nstart, nstop, rp, match | [
"def",
"find_next",
"(",
"start",
",",
"stop",
",",
"i2hits",
")",
":",
"if",
"start",
"not",
"in",
"i2hits",
"and",
"stop",
"in",
"i2hits",
":",
"index",
"=",
"stop",
"elif",
"stop",
"not",
"in",
"i2hits",
"and",
"start",
"in",
"i2hits",
":",
"index",
"=",
"start",
"elif",
"start",
"not",
"in",
"i2hits",
"and",
"stop",
"not",
"in",
"i2hits",
":",
"index",
"=",
"choice",
"(",
"[",
"start",
",",
"stop",
"]",
")",
"i2hits",
"[",
"index",
"]",
"=",
"[",
"[",
"False",
"]",
"]",
"else",
":",
"A",
",",
"B",
"=",
"i2hits",
"[",
"start",
"]",
"[",
"0",
"]",
",",
"i2hits",
"[",
"stop",
"]",
"[",
"0",
"]",
"if",
"B",
"[",
"10",
"]",
"<=",
"A",
"[",
"10",
"]",
":",
"index",
"=",
"stop",
"else",
":",
"index",
"=",
"start",
"if",
"index",
"==",
"start",
":",
"nstart",
"=",
"start",
"-",
"1",
"nstop",
"=",
"stop",
"else",
":",
"nstop",
"=",
"stop",
"+",
"1",
"nstart",
"=",
"start",
"match",
"=",
"i2hits",
"[",
"index",
"]",
"[",
"0",
"]",
"rp",
"=",
"match",
"[",
"-",
"1",
"]",
"return",
"index",
",",
"nstart",
",",
"nstop",
",",
"rp",
",",
"match"
] | which protein has the best hit, the one to the 'right' or to the 'left?' | [
"which",
"protein",
"has",
"the",
"best",
"hit",
"the",
"one",
"to",
"the",
"right",
"or",
"to",
"the",
"left?"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rp16.py#L77-L102 | train | 242 |
christophertbrown/bioscripts | ctbBio/rp16.py | find_ribosomal | def find_ribosomal(rps, scaffolds, s2rp, min_hits, max_hits_rp, max_errors):
"""
determine which hits represent real ribosomal proteins, identify each in syntenic block
max_hits_rp = maximum number of hits to consider per ribosomal protein per scaffold
"""
for scaffold, proteins in list(s2rp.items()):
# for each scaffold, get best hits for each rp
hits = {p: [i for i in sorted(hits, key = itemgetter(10))][0:max_hits_rp]
for p, hits in list(proteins.items()) if len(hits) > 0}
# skip if fewer than min_hits RPs are identified
if len(hits) < min_hits:
continue
best = sorted([hit[0] + [p]
for p, hit in list(hits.items())], key = itemgetter(10))[0]
block = find_block(rps, scaffolds[scaffold], hits, best, max_errors)
if (len(block) - 1) >= min_hits:
yield scaffold, block | python | def find_ribosomal(rps, scaffolds, s2rp, min_hits, max_hits_rp, max_errors):
"""
determine which hits represent real ribosomal proteins, identify each in syntenic block
max_hits_rp = maximum number of hits to consider per ribosomal protein per scaffold
"""
for scaffold, proteins in list(s2rp.items()):
# for each scaffold, get best hits for each rp
hits = {p: [i for i in sorted(hits, key = itemgetter(10))][0:max_hits_rp]
for p, hits in list(proteins.items()) if len(hits) > 0}
# skip if fewer than min_hits RPs are identified
if len(hits) < min_hits:
continue
best = sorted([hit[0] + [p]
for p, hit in list(hits.items())], key = itemgetter(10))[0]
block = find_block(rps, scaffolds[scaffold], hits, best, max_errors)
if (len(block) - 1) >= min_hits:
yield scaffold, block | [
"def",
"find_ribosomal",
"(",
"rps",
",",
"scaffolds",
",",
"s2rp",
",",
"min_hits",
",",
"max_hits_rp",
",",
"max_errors",
")",
":",
"for",
"scaffold",
",",
"proteins",
"in",
"list",
"(",
"s2rp",
".",
"items",
"(",
")",
")",
":",
"# for each scaffold, get best hits for each rp",
"hits",
"=",
"{",
"p",
":",
"[",
"i",
"for",
"i",
"in",
"sorted",
"(",
"hits",
",",
"key",
"=",
"itemgetter",
"(",
"10",
")",
")",
"]",
"[",
"0",
":",
"max_hits_rp",
"]",
"for",
"p",
",",
"hits",
"in",
"list",
"(",
"proteins",
".",
"items",
"(",
")",
")",
"if",
"len",
"(",
"hits",
")",
">",
"0",
"}",
"# skip if fewer than min_hits RPs are identified",
"if",
"len",
"(",
"hits",
")",
"<",
"min_hits",
":",
"continue",
"best",
"=",
"sorted",
"(",
"[",
"hit",
"[",
"0",
"]",
"+",
"[",
"p",
"]",
"for",
"p",
",",
"hit",
"in",
"list",
"(",
"hits",
".",
"items",
"(",
")",
")",
"]",
",",
"key",
"=",
"itemgetter",
"(",
"10",
")",
")",
"[",
"0",
"]",
"block",
"=",
"find_block",
"(",
"rps",
",",
"scaffolds",
"[",
"scaffold",
"]",
",",
"hits",
",",
"best",
",",
"max_errors",
")",
"if",
"(",
"len",
"(",
"block",
")",
"-",
"1",
")",
">=",
"min_hits",
":",
"yield",
"scaffold",
",",
"block"
] | determine which hits represent real ribosomal proteins, identify each in syntenic block
max_hits_rp = maximum number of hits to consider per ribosomal protein per scaffold | [
"determine",
"which",
"hits",
"represent",
"real",
"ribosomal",
"proteins",
"identify",
"each",
"in",
"syntenic",
"block",
"max_hits_rp",
"=",
"maximum",
"number",
"of",
"hits",
"to",
"consider",
"per",
"ribosomal",
"protein",
"per",
"scaffold"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rp16.py#L134-L150 | train | 243 |
smdabdoub/phylotoast | bin/filter_rep_set.py | filter_rep_set | def filter_rep_set(inF, otuSet):
"""
Parse the rep set file and remove all sequences not associated with unique
OTUs.
:@type inF: file
:@param inF: The representative sequence set
:@rtype: list
:@return: The set of sequences associated with unique OTUs
"""
seqs = []
for record in SeqIO.parse(inF, "fasta"):
if record.id in otuSet:
seqs.append(record)
return seqs | python | def filter_rep_set(inF, otuSet):
"""
Parse the rep set file and remove all sequences not associated with unique
OTUs.
:@type inF: file
:@param inF: The representative sequence set
:@rtype: list
:@return: The set of sequences associated with unique OTUs
"""
seqs = []
for record in SeqIO.parse(inF, "fasta"):
if record.id in otuSet:
seqs.append(record)
return seqs | [
"def",
"filter_rep_set",
"(",
"inF",
",",
"otuSet",
")",
":",
"seqs",
"=",
"[",
"]",
"for",
"record",
"in",
"SeqIO",
".",
"parse",
"(",
"inF",
",",
"\"fasta\"",
")",
":",
"if",
"record",
".",
"id",
"in",
"otuSet",
":",
"seqs",
".",
"append",
"(",
"record",
")",
"return",
"seqs"
] | Parse the rep set file and remove all sequences not associated with unique
OTUs.
:@type inF: file
:@param inF: The representative sequence set
:@rtype: list
:@return: The set of sequences associated with unique OTUs | [
"Parse",
"the",
"rep",
"set",
"file",
"and",
"remove",
"all",
"sequences",
"not",
"associated",
"with",
"unique",
"OTUs",
"."
] | 0b74ef171e6a84761710548501dfac71285a58a3 | https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/filter_rep_set.py#L32-L47 | train | 244 |
consbio/gis-metadata-parser | gis_metadata/arcgis_metadata_parser.py | ArcGISParser._update_report_item | def _update_report_item(self, **update_props):
""" Update the text for each element at the configured path if attribute matches """
tree_to_update = update_props['tree_to_update']
prop = update_props['prop']
values = wrap_value(update_props['values'])
xroot = self._get_xroot_for(prop)
attr_key = 'type'
attr_val = u''
if prop == 'attribute_accuracy':
attr_val = 'DQQuanAttAcc'
elif prop == 'dataset_completeness':
attr_val = 'DQCompOm'
# Clear (make empty) all elements of the appropriate type
for elem in get_elements(tree_to_update, xroot):
if get_element_attributes(elem).get(attr_key) == attr_val:
clear_element(elem)
# Remove all empty elements, including those previously cleared
remove_empty_element(tree_to_update, xroot)
# Insert elements with correct attributes for each new value
attrs = {attr_key: attr_val}
updated = []
for idx, value in enumerate(values):
elem = insert_element(tree_to_update, idx, xroot, **attrs)
updated.append(insert_element(elem, idx, 'measDesc', value))
return updated | python | def _update_report_item(self, **update_props):
""" Update the text for each element at the configured path if attribute matches """
tree_to_update = update_props['tree_to_update']
prop = update_props['prop']
values = wrap_value(update_props['values'])
xroot = self._get_xroot_for(prop)
attr_key = 'type'
attr_val = u''
if prop == 'attribute_accuracy':
attr_val = 'DQQuanAttAcc'
elif prop == 'dataset_completeness':
attr_val = 'DQCompOm'
# Clear (make empty) all elements of the appropriate type
for elem in get_elements(tree_to_update, xroot):
if get_element_attributes(elem).get(attr_key) == attr_val:
clear_element(elem)
# Remove all empty elements, including those previously cleared
remove_empty_element(tree_to_update, xroot)
# Insert elements with correct attributes for each new value
attrs = {attr_key: attr_val}
updated = []
for idx, value in enumerate(values):
elem = insert_element(tree_to_update, idx, xroot, **attrs)
updated.append(insert_element(elem, idx, 'measDesc', value))
return updated | [
"def",
"_update_report_item",
"(",
"self",
",",
"*",
"*",
"update_props",
")",
":",
"tree_to_update",
"=",
"update_props",
"[",
"'tree_to_update'",
"]",
"prop",
"=",
"update_props",
"[",
"'prop'",
"]",
"values",
"=",
"wrap_value",
"(",
"update_props",
"[",
"'values'",
"]",
")",
"xroot",
"=",
"self",
".",
"_get_xroot_for",
"(",
"prop",
")",
"attr_key",
"=",
"'type'",
"attr_val",
"=",
"u''",
"if",
"prop",
"==",
"'attribute_accuracy'",
":",
"attr_val",
"=",
"'DQQuanAttAcc'",
"elif",
"prop",
"==",
"'dataset_completeness'",
":",
"attr_val",
"=",
"'DQCompOm'",
"# Clear (make empty) all elements of the appropriate type",
"for",
"elem",
"in",
"get_elements",
"(",
"tree_to_update",
",",
"xroot",
")",
":",
"if",
"get_element_attributes",
"(",
"elem",
")",
".",
"get",
"(",
"attr_key",
")",
"==",
"attr_val",
":",
"clear_element",
"(",
"elem",
")",
"# Remove all empty elements, including those previously cleared",
"remove_empty_element",
"(",
"tree_to_update",
",",
"xroot",
")",
"# Insert elements with correct attributes for each new value",
"attrs",
"=",
"{",
"attr_key",
":",
"attr_val",
"}",
"updated",
"=",
"[",
"]",
"for",
"idx",
",",
"value",
"in",
"enumerate",
"(",
"values",
")",
":",
"elem",
"=",
"insert_element",
"(",
"tree_to_update",
",",
"idx",
",",
"xroot",
",",
"*",
"*",
"attrs",
")",
"updated",
".",
"append",
"(",
"insert_element",
"(",
"elem",
",",
"idx",
",",
"'measDesc'",
",",
"value",
")",
")",
"return",
"updated"
] | Update the text for each element at the configured path if attribute matches | [
"Update",
"the",
"text",
"for",
"each",
"element",
"at",
"the",
"configured",
"path",
"if",
"attribute",
"matches"
] | 59eefb2e51cd4d8cc3e94623a2167499ca9ef70f | https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/arcgis_metadata_parser.py#L407-L440 | train | 245 |
adafruit/Adafruit_Python_VCNL40xx | Adafruit_VCNL40xx/VCNL40xx.py | VCNL4010._clear_interrupt | def _clear_interrupt(self, intbit):
"""Clear the specified interrupt bit in the interrupt status register.
"""
int_status = self._device.readU8(VCNL4010_INTSTAT);
int_status &= ~intbit;
self._device.write8(VCNL4010_INTSTAT, int_status); | python | def _clear_interrupt(self, intbit):
"""Clear the specified interrupt bit in the interrupt status register.
"""
int_status = self._device.readU8(VCNL4010_INTSTAT);
int_status &= ~intbit;
self._device.write8(VCNL4010_INTSTAT, int_status); | [
"def",
"_clear_interrupt",
"(",
"self",
",",
"intbit",
")",
":",
"int_status",
"=",
"self",
".",
"_device",
".",
"readU8",
"(",
"VCNL4010_INTSTAT",
")",
"int_status",
"&=",
"~",
"intbit",
"self",
".",
"_device",
".",
"write8",
"(",
"VCNL4010_INTSTAT",
",",
"int_status",
")"
] | Clear the specified interrupt bit in the interrupt status register. | [
"Clear",
"the",
"specified",
"interrupt",
"bit",
"in",
"the",
"interrupt",
"status",
"register",
"."
] | f88ec755fd23017028b6dec1be0607ff4a018e10 | https://github.com/adafruit/Adafruit_Python_VCNL40xx/blob/f88ec755fd23017028b6dec1be0607ff4a018e10/Adafruit_VCNL40xx/VCNL40xx.py#L123-L128 | train | 246 |
skojaku/core-periphery-detection | cpalgorithm/Rombach.py | SimAlg.move | def move(self):
"""Swaps two nodes"""
a = random.randint(0, len(self.state) - 1)
b = random.randint(0, len(self.state) - 1)
self.state[[a,b]] = self.state[[b,a]] | python | def move(self):
"""Swaps two nodes"""
a = random.randint(0, len(self.state) - 1)
b = random.randint(0, len(self.state) - 1)
self.state[[a,b]] = self.state[[b,a]] | [
"def",
"move",
"(",
"self",
")",
":",
"a",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"len",
"(",
"self",
".",
"state",
")",
"-",
"1",
")",
"b",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"len",
"(",
"self",
".",
"state",
")",
"-",
"1",
")",
"self",
".",
"state",
"[",
"[",
"a",
",",
"b",
"]",
"]",
"=",
"self",
".",
"state",
"[",
"[",
"b",
",",
"a",
"]",
"]"
] | Swaps two nodes | [
"Swaps",
"two",
"nodes"
] | d724e6441066622506ddb54d81ee9a1cfd15f766 | https://github.com/skojaku/core-periphery-detection/blob/d724e6441066622506ddb54d81ee9a1cfd15f766/cpalgorithm/Rombach.py#L19-L23 | train | 247 |
wbond/certbuilder | certbuilder/__init__.py | CertificateBuilder.self_signed | def self_signed(self, value):
"""
A bool - if the certificate should be self-signed.
"""
self._self_signed = bool(value)
if self._self_signed:
self._issuer = None | python | def self_signed(self, value):
"""
A bool - if the certificate should be self-signed.
"""
self._self_signed = bool(value)
if self._self_signed:
self._issuer = None | [
"def",
"self_signed",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_self_signed",
"=",
"bool",
"(",
"value",
")",
"if",
"self",
".",
"_self_signed",
":",
"self",
".",
"_issuer",
"=",
"None"
] | A bool - if the certificate should be self-signed. | [
"A",
"bool",
"-",
"if",
"the",
"certificate",
"should",
"be",
"self",
"-",
"signed",
"."
] | 969dae884fa7f73988bbf1dcbec4fb51e234a3c5 | https://github.com/wbond/certbuilder/blob/969dae884fa7f73988bbf1dcbec4fb51e234a3c5/certbuilder/__init__.py#L122-L130 | train | 248 |
wbond/certbuilder | certbuilder/__init__.py | CertificateBuilder._get_crl_url | def _get_crl_url(self, distribution_points):
"""
Grabs the first URL out of a asn1crypto.x509.CRLDistributionPoints
object
:param distribution_points:
The x509.CRLDistributionPoints object to pull the URL out of
:return:
A unicode string or None
"""
if distribution_points is None:
return None
for distribution_point in distribution_points:
name = distribution_point['distribution_point']
if name.name == 'full_name' and name.chosen[0].name == 'uniform_resource_identifier':
return name.chosen[0].chosen.native
return None | python | def _get_crl_url(self, distribution_points):
"""
Grabs the first URL out of a asn1crypto.x509.CRLDistributionPoints
object
:param distribution_points:
The x509.CRLDistributionPoints object to pull the URL out of
:return:
A unicode string or None
"""
if distribution_points is None:
return None
for distribution_point in distribution_points:
name = distribution_point['distribution_point']
if name.name == 'full_name' and name.chosen[0].name == 'uniform_resource_identifier':
return name.chosen[0].chosen.native
return None | [
"def",
"_get_crl_url",
"(",
"self",
",",
"distribution_points",
")",
":",
"if",
"distribution_points",
"is",
"None",
":",
"return",
"None",
"for",
"distribution_point",
"in",
"distribution_points",
":",
"name",
"=",
"distribution_point",
"[",
"'distribution_point'",
"]",
"if",
"name",
".",
"name",
"==",
"'full_name'",
"and",
"name",
".",
"chosen",
"[",
"0",
"]",
".",
"name",
"==",
"'uniform_resource_identifier'",
":",
"return",
"name",
".",
"chosen",
"[",
"0",
"]",
".",
"chosen",
".",
"native",
"return",
"None"
] | Grabs the first URL out of a asn1crypto.x509.CRLDistributionPoints
object
:param distribution_points:
The x509.CRLDistributionPoints object to pull the URL out of
:return:
A unicode string or None | [
"Grabs",
"the",
"first",
"URL",
"out",
"of",
"a",
"asn1crypto",
".",
"x509",
".",
"CRLDistributionPoints",
"object"
] | 969dae884fa7f73988bbf1dcbec4fb51e234a3c5 | https://github.com/wbond/certbuilder/blob/969dae884fa7f73988bbf1dcbec4fb51e234a3c5/certbuilder/__init__.py#L544-L564 | train | 249 |
wbond/certbuilder | certbuilder/__init__.py | CertificateBuilder.ocsp_no_check | def ocsp_no_check(self, value):
"""
A bool - if the certificate should have the OCSP no check extension.
Only applicable to certificates created for signing OCSP responses.
Such certificates should normally be issued for a very short period of
time since they are effectively whitelisted by clients.
"""
if value is None:
self._ocsp_no_check = None
else:
self._ocsp_no_check = bool(value) | python | def ocsp_no_check(self, value):
"""
A bool - if the certificate should have the OCSP no check extension.
Only applicable to certificates created for signing OCSP responses.
Such certificates should normally be issued for a very short period of
time since they are effectively whitelisted by clients.
"""
if value is None:
self._ocsp_no_check = None
else:
self._ocsp_no_check = bool(value) | [
"def",
"ocsp_no_check",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"None",
":",
"self",
".",
"_ocsp_no_check",
"=",
"None",
"else",
":",
"self",
".",
"_ocsp_no_check",
"=",
"bool",
"(",
"value",
")"
] | A bool - if the certificate should have the OCSP no check extension.
Only applicable to certificates created for signing OCSP responses.
Such certificates should normally be issued for a very short period of
time since they are effectively whitelisted by clients. | [
"A",
"bool",
"-",
"if",
"the",
"certificate",
"should",
"have",
"the",
"OCSP",
"no",
"check",
"extension",
".",
"Only",
"applicable",
"to",
"certificates",
"created",
"for",
"signing",
"OCSP",
"responses",
".",
"Such",
"certificates",
"should",
"normally",
"be",
"issued",
"for",
"a",
"very",
"short",
"period",
"of",
"time",
"since",
"they",
"are",
"effectively",
"whitelisted",
"by",
"clients",
"."
] | 969dae884fa7f73988bbf1dcbec4fb51e234a3c5 | https://github.com/wbond/certbuilder/blob/969dae884fa7f73988bbf1dcbec4fb51e234a3c5/certbuilder/__init__.py#L688-L699 | train | 250 |
tell-k/django-modelsdoc | modelsdoc/templatetags/modelsdoc_tags.py | emptylineless | def emptylineless(parser, token):
"""
Removes empty line.
Example usage::
{% emptylineless %}
test1
test2
test3
{% endemptylineless %}
This example would return this HTML::
test1
test2
test3
"""
nodelist = parser.parse(('endemptylineless',))
parser.delete_first_token()
return EmptylinelessNode(nodelist) | python | def emptylineless(parser, token):
"""
Removes empty line.
Example usage::
{% emptylineless %}
test1
test2
test3
{% endemptylineless %}
This example would return this HTML::
test1
test2
test3
"""
nodelist = parser.parse(('endemptylineless',))
parser.delete_first_token()
return EmptylinelessNode(nodelist) | [
"def",
"emptylineless",
"(",
"parser",
",",
"token",
")",
":",
"nodelist",
"=",
"parser",
".",
"parse",
"(",
"(",
"'endemptylineless'",
",",
")",
")",
"parser",
".",
"delete_first_token",
"(",
")",
"return",
"EmptylinelessNode",
"(",
"nodelist",
")"
] | Removes empty line.
Example usage::
{% emptylineless %}
test1
test2
test3
{% endemptylineless %}
This example would return this HTML::
test1
test2
test3 | [
"Removes",
"empty",
"line",
"."
] | c9d336e76251feb142347b3a41365430d3365436 | https://github.com/tell-k/django-modelsdoc/blob/c9d336e76251feb142347b3a41365430d3365436/modelsdoc/templatetags/modelsdoc_tags.py#L31-L54 | train | 251 |
justquick/python-varnish | varnish.py | http_purge_url | def http_purge_url(url):
"""
Do an HTTP PURGE of the given asset.
The URL is run through urlparse and must point to the varnish instance not the varnishadm
"""
url = urlparse(url)
connection = HTTPConnection(url.hostname, url.port or 80)
path = url.path or '/'
connection.request('PURGE', '%s?%s' % (path, url.query) if url.query else path, '',
{'Host': '%s:%s' % (url.hostname, url.port) if url.port else url.hostname})
response = connection.getresponse()
if response.status != 200:
logging.error('Purge failed with status: %s' % response.status)
return response | python | def http_purge_url(url):
"""
Do an HTTP PURGE of the given asset.
The URL is run through urlparse and must point to the varnish instance not the varnishadm
"""
url = urlparse(url)
connection = HTTPConnection(url.hostname, url.port or 80)
path = url.path or '/'
connection.request('PURGE', '%s?%s' % (path, url.query) if url.query else path, '',
{'Host': '%s:%s' % (url.hostname, url.port) if url.port else url.hostname})
response = connection.getresponse()
if response.status != 200:
logging.error('Purge failed with status: %s' % response.status)
return response | [
"def",
"http_purge_url",
"(",
"url",
")",
":",
"url",
"=",
"urlparse",
"(",
"url",
")",
"connection",
"=",
"HTTPConnection",
"(",
"url",
".",
"hostname",
",",
"url",
".",
"port",
"or",
"80",
")",
"path",
"=",
"url",
".",
"path",
"or",
"'/'",
"connection",
".",
"request",
"(",
"'PURGE'",
",",
"'%s?%s'",
"%",
"(",
"path",
",",
"url",
".",
"query",
")",
"if",
"url",
".",
"query",
"else",
"path",
",",
"''",
",",
"{",
"'Host'",
":",
"'%s:%s'",
"%",
"(",
"url",
".",
"hostname",
",",
"url",
".",
"port",
")",
"if",
"url",
".",
"port",
"else",
"url",
".",
"hostname",
"}",
")",
"response",
"=",
"connection",
".",
"getresponse",
"(",
")",
"if",
"response",
".",
"status",
"!=",
"200",
":",
"logging",
".",
"error",
"(",
"'Purge failed with status: %s'",
"%",
"response",
".",
"status",
")",
"return",
"response"
] | Do an HTTP PURGE of the given asset.
The URL is run through urlparse and must point to the varnish instance not the varnishadm | [
"Do",
"an",
"HTTP",
"PURGE",
"of",
"the",
"given",
"asset",
".",
"The",
"URL",
"is",
"run",
"through",
"urlparse",
"and",
"must",
"point",
"to",
"the",
"varnish",
"instance",
"not",
"the",
"varnishadm"
] | 8f114c74898e6c5ade2ce49c8b595040bd150465 | https://github.com/justquick/python-varnish/blob/8f114c74898e6c5ade2ce49c8b595040bd150465/varnish.py#L47-L60 | train | 252 |
justquick/python-varnish | varnish.py | run | def run(addr, *commands, **kwargs):
"""
Non-threaded batch command runner returning output results
"""
results = []
handler = VarnishHandler(addr, **kwargs)
for cmd in commands:
if isinstance(cmd, tuple) and len(cmd)>1:
results.extend([getattr(handler, c[0].replace('.','_'))(*c[1:]) for c in cmd])
else:
results.append(getattr(handler, cmd.replace('.','_'))(*commands[1:]))
break
handler.close()
return results | python | def run(addr, *commands, **kwargs):
"""
Non-threaded batch command runner returning output results
"""
results = []
handler = VarnishHandler(addr, **kwargs)
for cmd in commands:
if isinstance(cmd, tuple) and len(cmd)>1:
results.extend([getattr(handler, c[0].replace('.','_'))(*c[1:]) for c in cmd])
else:
results.append(getattr(handler, cmd.replace('.','_'))(*commands[1:]))
break
handler.close()
return results | [
"def",
"run",
"(",
"addr",
",",
"*",
"commands",
",",
"*",
"*",
"kwargs",
")",
":",
"results",
"=",
"[",
"]",
"handler",
"=",
"VarnishHandler",
"(",
"addr",
",",
"*",
"*",
"kwargs",
")",
"for",
"cmd",
"in",
"commands",
":",
"if",
"isinstance",
"(",
"cmd",
",",
"tuple",
")",
"and",
"len",
"(",
"cmd",
")",
">",
"1",
":",
"results",
".",
"extend",
"(",
"[",
"getattr",
"(",
"handler",
",",
"c",
"[",
"0",
"]",
".",
"replace",
"(",
"'.'",
",",
"'_'",
")",
")",
"(",
"*",
"c",
"[",
"1",
":",
"]",
")",
"for",
"c",
"in",
"cmd",
"]",
")",
"else",
":",
"results",
".",
"append",
"(",
"getattr",
"(",
"handler",
",",
"cmd",
".",
"replace",
"(",
"'.'",
",",
"'_'",
")",
")",
"(",
"*",
"commands",
"[",
"1",
":",
"]",
")",
")",
"break",
"handler",
".",
"close",
"(",
")",
"return",
"results"
] | Non-threaded batch command runner returning output results | [
"Non",
"-",
"threaded",
"batch",
"command",
"runner",
"returning",
"output",
"results"
] | 8f114c74898e6c5ade2ce49c8b595040bd150465 | https://github.com/justquick/python-varnish/blob/8f114c74898e6c5ade2ce49c8b595040bd150465/varnish.py#L289-L302 | train | 253 |
madeindjs/Super-Markdown | SuperMarkdown/SuperMarkdown.py | SuperMarkdown.add_stylesheets | def add_stylesheets(self, *css_files):
"""add stylesheet files in HTML head"""
for css_file in css_files:
self.main_soup.style.append(self._text_file(css_file)) | python | def add_stylesheets(self, *css_files):
"""add stylesheet files in HTML head"""
for css_file in css_files:
self.main_soup.style.append(self._text_file(css_file)) | [
"def",
"add_stylesheets",
"(",
"self",
",",
"*",
"css_files",
")",
":",
"for",
"css_file",
"in",
"css_files",
":",
"self",
".",
"main_soup",
".",
"style",
".",
"append",
"(",
"self",
".",
"_text_file",
"(",
"css_file",
")",
")"
] | add stylesheet files in HTML head | [
"add",
"stylesheet",
"files",
"in",
"HTML",
"head"
] | fe2da746afa6a27aaaad27a2db1dca234f802eb0 | https://github.com/madeindjs/Super-Markdown/blob/fe2da746afa6a27aaaad27a2db1dca234f802eb0/SuperMarkdown/SuperMarkdown.py#L43-L46 | train | 254 |
madeindjs/Super-Markdown | SuperMarkdown/SuperMarkdown.py | SuperMarkdown.add_javascripts | def add_javascripts(self, *js_files):
"""add javascripts files in HTML body"""
# create the script tag if don't exists
if self.main_soup.script is None:
script_tag = self.main_soup.new_tag('script')
self.main_soup.body.append(script_tag)
for js_file in js_files:
self.main_soup.script.append(self._text_file(js_file)) | python | def add_javascripts(self, *js_files):
"""add javascripts files in HTML body"""
# create the script tag if don't exists
if self.main_soup.script is None:
script_tag = self.main_soup.new_tag('script')
self.main_soup.body.append(script_tag)
for js_file in js_files:
self.main_soup.script.append(self._text_file(js_file)) | [
"def",
"add_javascripts",
"(",
"self",
",",
"*",
"js_files",
")",
":",
"# create the script tag if don't exists",
"if",
"self",
".",
"main_soup",
".",
"script",
"is",
"None",
":",
"script_tag",
"=",
"self",
".",
"main_soup",
".",
"new_tag",
"(",
"'script'",
")",
"self",
".",
"main_soup",
".",
"body",
".",
"append",
"(",
"script_tag",
")",
"for",
"js_file",
"in",
"js_files",
":",
"self",
".",
"main_soup",
".",
"script",
".",
"append",
"(",
"self",
".",
"_text_file",
"(",
"js_file",
")",
")"
] | add javascripts files in HTML body | [
"add",
"javascripts",
"files",
"in",
"HTML",
"body"
] | fe2da746afa6a27aaaad27a2db1dca234f802eb0 | https://github.com/madeindjs/Super-Markdown/blob/fe2da746afa6a27aaaad27a2db1dca234f802eb0/SuperMarkdown/SuperMarkdown.py#L48-L56 | train | 255 |
madeindjs/Super-Markdown | SuperMarkdown/SuperMarkdown.py | SuperMarkdown.export | def export(self):
"""return the object in a file"""
with open(self.export_url, 'w', encoding='utf-8') as file:
file.write(self.build())
if self.open_browser:
webbrowser.open_new_tab(self.export_url) | python | def export(self):
"""return the object in a file"""
with open(self.export_url, 'w', encoding='utf-8') as file:
file.write(self.build())
if self.open_browser:
webbrowser.open_new_tab(self.export_url) | [
"def",
"export",
"(",
"self",
")",
":",
"with",
"open",
"(",
"self",
".",
"export_url",
",",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"file",
":",
"file",
".",
"write",
"(",
"self",
".",
"build",
"(",
")",
")",
"if",
"self",
".",
"open_browser",
":",
"webbrowser",
".",
"open_new_tab",
"(",
"self",
".",
"export_url",
")"
] | return the object in a file | [
"return",
"the",
"object",
"in",
"a",
"file"
] | fe2da746afa6a27aaaad27a2db1dca234f802eb0 | https://github.com/madeindjs/Super-Markdown/blob/fe2da746afa6a27aaaad27a2db1dca234f802eb0/SuperMarkdown/SuperMarkdown.py#L58-L64 | train | 256 |
madeindjs/Super-Markdown | SuperMarkdown/SuperMarkdown.py | SuperMarkdown.build | def build(self):
"""convert Markdown text as html. return the html file as string"""
markdown_html = markdown.markdown(self.markdown_text, extensions=[
TocExtension(), 'fenced_code', 'markdown_checklist.extension',
'markdown.extensions.tables'])
markdown_soup = BeautifulSoup(markdown_html, 'html.parser')
# include jquery & mermaid.js only if there are Mermaid graph
if markdown_soup.find('code', attrs={'class': 'mermaid'}):
self._add_mermaid_js()
# search in markdown html if there are Dot Graph & replace it with .svg result
for dot_tag in markdown_soup.find_all('code', attrs={'class': 'dotgraph'}):
grap_svg = self._text_to_graphiz(dot_tag.string)
graph_soup = BeautifulSoup(grap_svg, 'html.parser')
dot_tag.parent.replaceWith(graph_soup)
self.main_soup.body.append(markdown_soup)
return self.main_soup.prettify() | python | def build(self):
"""convert Markdown text as html. return the html file as string"""
markdown_html = markdown.markdown(self.markdown_text, extensions=[
TocExtension(), 'fenced_code', 'markdown_checklist.extension',
'markdown.extensions.tables'])
markdown_soup = BeautifulSoup(markdown_html, 'html.parser')
# include jquery & mermaid.js only if there are Mermaid graph
if markdown_soup.find('code', attrs={'class': 'mermaid'}):
self._add_mermaid_js()
# search in markdown html if there are Dot Graph & replace it with .svg result
for dot_tag in markdown_soup.find_all('code', attrs={'class': 'dotgraph'}):
grap_svg = self._text_to_graphiz(dot_tag.string)
graph_soup = BeautifulSoup(grap_svg, 'html.parser')
dot_tag.parent.replaceWith(graph_soup)
self.main_soup.body.append(markdown_soup)
return self.main_soup.prettify() | [
"def",
"build",
"(",
"self",
")",
":",
"markdown_html",
"=",
"markdown",
".",
"markdown",
"(",
"self",
".",
"markdown_text",
",",
"extensions",
"=",
"[",
"TocExtension",
"(",
")",
",",
"'fenced_code'",
",",
"'markdown_checklist.extension'",
",",
"'markdown.extensions.tables'",
"]",
")",
"markdown_soup",
"=",
"BeautifulSoup",
"(",
"markdown_html",
",",
"'html.parser'",
")",
"# include jquery & mermaid.js only if there are Mermaid graph",
"if",
"markdown_soup",
".",
"find",
"(",
"'code'",
",",
"attrs",
"=",
"{",
"'class'",
":",
"'mermaid'",
"}",
")",
":",
"self",
".",
"_add_mermaid_js",
"(",
")",
"# search in markdown html if there are Dot Graph & replace it with .svg result",
"for",
"dot_tag",
"in",
"markdown_soup",
".",
"find_all",
"(",
"'code'",
",",
"attrs",
"=",
"{",
"'class'",
":",
"'dotgraph'",
"}",
")",
":",
"grap_svg",
"=",
"self",
".",
"_text_to_graphiz",
"(",
"dot_tag",
".",
"string",
")",
"graph_soup",
"=",
"BeautifulSoup",
"(",
"grap_svg",
",",
"'html.parser'",
")",
"dot_tag",
".",
"parent",
".",
"replaceWith",
"(",
"graph_soup",
")",
"self",
".",
"main_soup",
".",
"body",
".",
"append",
"(",
"markdown_soup",
")",
"return",
"self",
".",
"main_soup",
".",
"prettify",
"(",
")"
] | convert Markdown text as html. return the html file as string | [
"convert",
"Markdown",
"text",
"as",
"html",
".",
"return",
"the",
"html",
"file",
"as",
"string"
] | fe2da746afa6a27aaaad27a2db1dca234f802eb0 | https://github.com/madeindjs/Super-Markdown/blob/fe2da746afa6a27aaaad27a2db1dca234f802eb0/SuperMarkdown/SuperMarkdown.py#L66-L84 | train | 257 |
madeindjs/Super-Markdown | SuperMarkdown/SuperMarkdown.py | SuperMarkdown._text_file | def _text_file(self, url):
"""return the content of a file"""
try:
with open(url, 'r', encoding='utf-8') as file:
return file.read()
except FileNotFoundError:
print('File `{}` not found'.format(url))
sys.exit(0) | python | def _text_file(self, url):
"""return the content of a file"""
try:
with open(url, 'r', encoding='utf-8') as file:
return file.read()
except FileNotFoundError:
print('File `{}` not found'.format(url))
sys.exit(0) | [
"def",
"_text_file",
"(",
"self",
",",
"url",
")",
":",
"try",
":",
"with",
"open",
"(",
"url",
",",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"file",
":",
"return",
"file",
".",
"read",
"(",
")",
"except",
"FileNotFoundError",
":",
"print",
"(",
"'File `{}` not found'",
".",
"format",
"(",
"url",
")",
")",
"sys",
".",
"exit",
"(",
"0",
")"
] | return the content of a file | [
"return",
"the",
"content",
"of",
"a",
"file"
] | fe2da746afa6a27aaaad27a2db1dca234f802eb0 | https://github.com/madeindjs/Super-Markdown/blob/fe2da746afa6a27aaaad27a2db1dca234f802eb0/SuperMarkdown/SuperMarkdown.py#L86-L93 | train | 258 |
madeindjs/Super-Markdown | SuperMarkdown/SuperMarkdown.py | SuperMarkdown._text_to_graphiz | def _text_to_graphiz(self, text):
"""create a graphviz graph from text"""
dot = Source(text, format='svg')
return dot.pipe().decode('utf-8') | python | def _text_to_graphiz(self, text):
"""create a graphviz graph from text"""
dot = Source(text, format='svg')
return dot.pipe().decode('utf-8') | [
"def",
"_text_to_graphiz",
"(",
"self",
",",
"text",
")",
":",
"dot",
"=",
"Source",
"(",
"text",
",",
"format",
"=",
"'svg'",
")",
"return",
"dot",
".",
"pipe",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")"
] | create a graphviz graph from text | [
"create",
"a",
"graphviz",
"graph",
"from",
"text"
] | fe2da746afa6a27aaaad27a2db1dca234f802eb0 | https://github.com/madeindjs/Super-Markdown/blob/fe2da746afa6a27aaaad27a2db1dca234f802eb0/SuperMarkdown/SuperMarkdown.py#L95-L98 | train | 259 |
madeindjs/Super-Markdown | SuperMarkdown/SuperMarkdown.py | SuperMarkdown._add_mermaid_js | def _add_mermaid_js(self):
"""add js libraries and css files of mermaid js_file"""
self.add_javascripts('{}/js/jquery-1.11.3.min.js'.format(self.resources_path))
self.add_javascripts('{}/js/mermaid.min.js'.format(self.resources_path))
self.add_stylesheets('{}/css/mermaid.css'.format(self.resources_path))
self.main_soup.script.append('mermaid.initialize({startOnLoad:true });') | python | def _add_mermaid_js(self):
"""add js libraries and css files of mermaid js_file"""
self.add_javascripts('{}/js/jquery-1.11.3.min.js'.format(self.resources_path))
self.add_javascripts('{}/js/mermaid.min.js'.format(self.resources_path))
self.add_stylesheets('{}/css/mermaid.css'.format(self.resources_path))
self.main_soup.script.append('mermaid.initialize({startOnLoad:true });') | [
"def",
"_add_mermaid_js",
"(",
"self",
")",
":",
"self",
".",
"add_javascripts",
"(",
"'{}/js/jquery-1.11.3.min.js'",
".",
"format",
"(",
"self",
".",
"resources_path",
")",
")",
"self",
".",
"add_javascripts",
"(",
"'{}/js/mermaid.min.js'",
".",
"format",
"(",
"self",
".",
"resources_path",
")",
")",
"self",
".",
"add_stylesheets",
"(",
"'{}/css/mermaid.css'",
".",
"format",
"(",
"self",
".",
"resources_path",
")",
")",
"self",
".",
"main_soup",
".",
"script",
".",
"append",
"(",
"'mermaid.initialize({startOnLoad:true });'",
")"
] | add js libraries and css files of mermaid js_file | [
"add",
"js",
"libraries",
"and",
"css",
"files",
"of",
"mermaid",
"js_file"
] | fe2da746afa6a27aaaad27a2db1dca234f802eb0 | https://github.com/madeindjs/Super-Markdown/blob/fe2da746afa6a27aaaad27a2db1dca234f802eb0/SuperMarkdown/SuperMarkdown.py#L100-L105 | train | 260 |
paul-wolf/strgen | strgen/__init__.py | StringGenerator.getCharacterSet | def getCharacterSet(self):
'''Get a character set with individual members or ranges.
Current index is on '[', the start of the character set.
'''
chars = u''
c = None
cnt = 1
start = 0
while True:
escaped_slash = False
c = self.next()
# print "pattern : ", self.pattern
# print "C : ", c
# print "Slash : ", c == u'\\'
# print 'chars : ', chars
# print 'index : ', self.index
# print 'last : ', self.last()
# print 'lookahead : ', self.lookahead()
if self.lookahead() == u'-' and not c == u'\\':
f = c
self.next() # skip hyphen
c = self.next() # get far range
if not c or (c in self.meta_chars):
raise StringGenerator.SyntaxError(u"unexpected end of class range")
chars += self.getCharacterRange(f, c)
elif c == u'\\':
if self.lookahead() in self.meta_chars:
c = self.next()
chars += c
continue
elif self.lookahead() in self.string_code:
c = self.next()
chars += self.string_code[c]
elif c and c not in self.meta_chars:
chars += c
if c == u']':
if self.lookahead() == u'{':
[start, cnt] = self.getQuantifier()
else:
start = -1
cnt = 1
break
if c and c in self.meta_chars and not self.last() == u"\\":
raise StringGenerator.SyntaxError(u"Un-escaped character in class definition: %s" % c)
if not c:
break
return StringGenerator.CharacterSet(chars, start, cnt) | python | def getCharacterSet(self):
'''Get a character set with individual members or ranges.
Current index is on '[', the start of the character set.
'''
chars = u''
c = None
cnt = 1
start = 0
while True:
escaped_slash = False
c = self.next()
# print "pattern : ", self.pattern
# print "C : ", c
# print "Slash : ", c == u'\\'
# print 'chars : ', chars
# print 'index : ', self.index
# print 'last : ', self.last()
# print 'lookahead : ', self.lookahead()
if self.lookahead() == u'-' and not c == u'\\':
f = c
self.next() # skip hyphen
c = self.next() # get far range
if not c or (c in self.meta_chars):
raise StringGenerator.SyntaxError(u"unexpected end of class range")
chars += self.getCharacterRange(f, c)
elif c == u'\\':
if self.lookahead() in self.meta_chars:
c = self.next()
chars += c
continue
elif self.lookahead() in self.string_code:
c = self.next()
chars += self.string_code[c]
elif c and c not in self.meta_chars:
chars += c
if c == u']':
if self.lookahead() == u'{':
[start, cnt] = self.getQuantifier()
else:
start = -1
cnt = 1
break
if c and c in self.meta_chars and not self.last() == u"\\":
raise StringGenerator.SyntaxError(u"Un-escaped character in class definition: %s" % c)
if not c:
break
return StringGenerator.CharacterSet(chars, start, cnt) | [
"def",
"getCharacterSet",
"(",
"self",
")",
":",
"chars",
"=",
"u''",
"c",
"=",
"None",
"cnt",
"=",
"1",
"start",
"=",
"0",
"while",
"True",
":",
"escaped_slash",
"=",
"False",
"c",
"=",
"self",
".",
"next",
"(",
")",
"# print \"pattern : \", self.pattern",
"# print \"C : \", c",
"# print \"Slash : \", c == u'\\\\'",
"# print 'chars : ', chars",
"# print 'index : ', self.index",
"# print 'last : ', self.last()",
"# print 'lookahead : ', self.lookahead()",
"if",
"self",
".",
"lookahead",
"(",
")",
"==",
"u'-'",
"and",
"not",
"c",
"==",
"u'\\\\'",
":",
"f",
"=",
"c",
"self",
".",
"next",
"(",
")",
"# skip hyphen",
"c",
"=",
"self",
".",
"next",
"(",
")",
"# get far range",
"if",
"not",
"c",
"or",
"(",
"c",
"in",
"self",
".",
"meta_chars",
")",
":",
"raise",
"StringGenerator",
".",
"SyntaxError",
"(",
"u\"unexpected end of class range\"",
")",
"chars",
"+=",
"self",
".",
"getCharacterRange",
"(",
"f",
",",
"c",
")",
"elif",
"c",
"==",
"u'\\\\'",
":",
"if",
"self",
".",
"lookahead",
"(",
")",
"in",
"self",
".",
"meta_chars",
":",
"c",
"=",
"self",
".",
"next",
"(",
")",
"chars",
"+=",
"c",
"continue",
"elif",
"self",
".",
"lookahead",
"(",
")",
"in",
"self",
".",
"string_code",
":",
"c",
"=",
"self",
".",
"next",
"(",
")",
"chars",
"+=",
"self",
".",
"string_code",
"[",
"c",
"]",
"elif",
"c",
"and",
"c",
"not",
"in",
"self",
".",
"meta_chars",
":",
"chars",
"+=",
"c",
"if",
"c",
"==",
"u']'",
":",
"if",
"self",
".",
"lookahead",
"(",
")",
"==",
"u'{'",
":",
"[",
"start",
",",
"cnt",
"]",
"=",
"self",
".",
"getQuantifier",
"(",
")",
"else",
":",
"start",
"=",
"-",
"1",
"cnt",
"=",
"1",
"break",
"if",
"c",
"and",
"c",
"in",
"self",
".",
"meta_chars",
"and",
"not",
"self",
".",
"last",
"(",
")",
"==",
"u\"\\\\\"",
":",
"raise",
"StringGenerator",
".",
"SyntaxError",
"(",
"u\"Un-escaped character in class definition: %s\"",
"%",
"c",
")",
"if",
"not",
"c",
":",
"break",
"return",
"StringGenerator",
".",
"CharacterSet",
"(",
"chars",
",",
"start",
",",
"cnt",
")"
] | Get a character set with individual members or ranges.
Current index is on '[', the start of the character set. | [
"Get",
"a",
"character",
"set",
"with",
"individual",
"members",
"or",
"ranges",
"."
] | ca1a1484bed5a31dc9ceaef1ab62dd5582cc0d9f | https://github.com/paul-wolf/strgen/blob/ca1a1484bed5a31dc9ceaef1ab62dd5582cc0d9f/strgen/__init__.py#L368-L419 | train | 261 |
paul-wolf/strgen | strgen/__init__.py | StringGenerator.getLiteral | def getLiteral(self):
'''Get a sequence of non-special characters.'''
# we are on the first non-special character
chars = u''
c = self.current()
while True:
if c and c == u"\\":
c = self.next()
if c:
chars += c
continue
elif not c or (c in self.meta_chars):
break
else:
chars += c
if self.lookahead() and self.lookahead() in self.meta_chars:
break
c = self.next()
return StringGenerator.Literal(chars) | python | def getLiteral(self):
'''Get a sequence of non-special characters.'''
# we are on the first non-special character
chars = u''
c = self.current()
while True:
if c and c == u"\\":
c = self.next()
if c:
chars += c
continue
elif not c or (c in self.meta_chars):
break
else:
chars += c
if self.lookahead() and self.lookahead() in self.meta_chars:
break
c = self.next()
return StringGenerator.Literal(chars) | [
"def",
"getLiteral",
"(",
"self",
")",
":",
"# we are on the first non-special character",
"chars",
"=",
"u''",
"c",
"=",
"self",
".",
"current",
"(",
")",
"while",
"True",
":",
"if",
"c",
"and",
"c",
"==",
"u\"\\\\\"",
":",
"c",
"=",
"self",
".",
"next",
"(",
")",
"if",
"c",
":",
"chars",
"+=",
"c",
"continue",
"elif",
"not",
"c",
"or",
"(",
"c",
"in",
"self",
".",
"meta_chars",
")",
":",
"break",
"else",
":",
"chars",
"+=",
"c",
"if",
"self",
".",
"lookahead",
"(",
")",
"and",
"self",
".",
"lookahead",
"(",
")",
"in",
"self",
".",
"meta_chars",
":",
"break",
"c",
"=",
"self",
".",
"next",
"(",
")",
"return",
"StringGenerator",
".",
"Literal",
"(",
"chars",
")"
] | Get a sequence of non-special characters. | [
"Get",
"a",
"sequence",
"of",
"non",
"-",
"special",
"characters",
"."
] | ca1a1484bed5a31dc9ceaef1ab62dd5582cc0d9f | https://github.com/paul-wolf/strgen/blob/ca1a1484bed5a31dc9ceaef1ab62dd5582cc0d9f/strgen/__init__.py#L421-L439 | train | 262 |
paul-wolf/strgen | strgen/__init__.py | StringGenerator.getSequence | def getSequence(self, level=0):
'''Get a sequence of nodes.'''
seq = []
op = ''
left_operand = None
right_operand = None
sequence_closed = False
while True:
c = self.next()
if not c:
break
if c and c not in self.meta_chars:
seq.append(self.getLiteral())
elif c and c == u'$' and self.lookahead() == u'{':
seq.append(self.getSource())
elif c == u'[' and not self.last() == u'\\':
seq.append(self.getCharacterSet())
elif c == u'(' and not self.last() == u'\\':
seq.append(self.getSequence(level + 1))
elif c == u')' and not self.last() == u'\\':
# end of this sequence
if level == 0:
# there should be no parens here
raise StringGenerator.SyntaxError(u"Extra closing parenthesis")
sequence_closed = True
break
elif c == u'|' and not self.last() == u'\\':
op = c
elif c == u'&' and not self.last() == u'\\':
op = c
else:
if c in self.meta_chars and not self.last() == u"\\":
raise StringGenerator.SyntaxError(u"Un-escaped special character: %s" % c)
#print( op,len(seq) )
if op and not left_operand:
if not seq or len(seq) < 1:
raise StringGenerator.SyntaxError(u"Operator: %s with no left operand" % op)
left_operand = seq.pop()
elif op and len(seq) >= 1 and left_operand:
right_operand = seq.pop()
#print( "popped: [%s] %s:%s"%( op, left_operand, right_operand) )
if op == u'|':
seq.append(StringGenerator.SequenceOR([left_operand, right_operand]))
elif op == u'&':
seq.append(StringGenerator.SequenceAND([left_operand, right_operand]))
op = u''
left_operand = None
right_operand = None
# check for syntax errors
if op:
raise StringGenerator.SyntaxError(u"Operator: %s with no right operand" % op)
if level > 0 and not sequence_closed:
# it means we are finishing a non-first-level sequence without closing parens
raise StringGenerator.SyntaxError(u"Missing closing parenthesis")
return StringGenerator.Sequence(seq) | python | def getSequence(self, level=0):
'''Get a sequence of nodes.'''
seq = []
op = ''
left_operand = None
right_operand = None
sequence_closed = False
while True:
c = self.next()
if not c:
break
if c and c not in self.meta_chars:
seq.append(self.getLiteral())
elif c and c == u'$' and self.lookahead() == u'{':
seq.append(self.getSource())
elif c == u'[' and not self.last() == u'\\':
seq.append(self.getCharacterSet())
elif c == u'(' and not self.last() == u'\\':
seq.append(self.getSequence(level + 1))
elif c == u')' and not self.last() == u'\\':
# end of this sequence
if level == 0:
# there should be no parens here
raise StringGenerator.SyntaxError(u"Extra closing parenthesis")
sequence_closed = True
break
elif c == u'|' and not self.last() == u'\\':
op = c
elif c == u'&' and not self.last() == u'\\':
op = c
else:
if c in self.meta_chars and not self.last() == u"\\":
raise StringGenerator.SyntaxError(u"Un-escaped special character: %s" % c)
#print( op,len(seq) )
if op and not left_operand:
if not seq or len(seq) < 1:
raise StringGenerator.SyntaxError(u"Operator: %s with no left operand" % op)
left_operand = seq.pop()
elif op and len(seq) >= 1 and left_operand:
right_operand = seq.pop()
#print( "popped: [%s] %s:%s"%( op, left_operand, right_operand) )
if op == u'|':
seq.append(StringGenerator.SequenceOR([left_operand, right_operand]))
elif op == u'&':
seq.append(StringGenerator.SequenceAND([left_operand, right_operand]))
op = u''
left_operand = None
right_operand = None
# check for syntax errors
if op:
raise StringGenerator.SyntaxError(u"Operator: %s with no right operand" % op)
if level > 0 and not sequence_closed:
# it means we are finishing a non-first-level sequence without closing parens
raise StringGenerator.SyntaxError(u"Missing closing parenthesis")
return StringGenerator.Sequence(seq) | [
"def",
"getSequence",
"(",
"self",
",",
"level",
"=",
"0",
")",
":",
"seq",
"=",
"[",
"]",
"op",
"=",
"''",
"left_operand",
"=",
"None",
"right_operand",
"=",
"None",
"sequence_closed",
"=",
"False",
"while",
"True",
":",
"c",
"=",
"self",
".",
"next",
"(",
")",
"if",
"not",
"c",
":",
"break",
"if",
"c",
"and",
"c",
"not",
"in",
"self",
".",
"meta_chars",
":",
"seq",
".",
"append",
"(",
"self",
".",
"getLiteral",
"(",
")",
")",
"elif",
"c",
"and",
"c",
"==",
"u'$'",
"and",
"self",
".",
"lookahead",
"(",
")",
"==",
"u'{'",
":",
"seq",
".",
"append",
"(",
"self",
".",
"getSource",
"(",
")",
")",
"elif",
"c",
"==",
"u'['",
"and",
"not",
"self",
".",
"last",
"(",
")",
"==",
"u'\\\\'",
":",
"seq",
".",
"append",
"(",
"self",
".",
"getCharacterSet",
"(",
")",
")",
"elif",
"c",
"==",
"u'('",
"and",
"not",
"self",
".",
"last",
"(",
")",
"==",
"u'\\\\'",
":",
"seq",
".",
"append",
"(",
"self",
".",
"getSequence",
"(",
"level",
"+",
"1",
")",
")",
"elif",
"c",
"==",
"u')'",
"and",
"not",
"self",
".",
"last",
"(",
")",
"==",
"u'\\\\'",
":",
"# end of this sequence",
"if",
"level",
"==",
"0",
":",
"# there should be no parens here",
"raise",
"StringGenerator",
".",
"SyntaxError",
"(",
"u\"Extra closing parenthesis\"",
")",
"sequence_closed",
"=",
"True",
"break",
"elif",
"c",
"==",
"u'|'",
"and",
"not",
"self",
".",
"last",
"(",
")",
"==",
"u'\\\\'",
":",
"op",
"=",
"c",
"elif",
"c",
"==",
"u'&'",
"and",
"not",
"self",
".",
"last",
"(",
")",
"==",
"u'\\\\'",
":",
"op",
"=",
"c",
"else",
":",
"if",
"c",
"in",
"self",
".",
"meta_chars",
"and",
"not",
"self",
".",
"last",
"(",
")",
"==",
"u\"\\\\\"",
":",
"raise",
"StringGenerator",
".",
"SyntaxError",
"(",
"u\"Un-escaped special character: %s\"",
"%",
"c",
")",
"#print( op,len(seq) )",
"if",
"op",
"and",
"not",
"left_operand",
":",
"if",
"not",
"seq",
"or",
"len",
"(",
"seq",
")",
"<",
"1",
":",
"raise",
"StringGenerator",
".",
"SyntaxError",
"(",
"u\"Operator: %s with no left operand\"",
"%",
"op",
")",
"left_operand",
"=",
"seq",
".",
"pop",
"(",
")",
"elif",
"op",
"and",
"len",
"(",
"seq",
")",
">=",
"1",
"and",
"left_operand",
":",
"right_operand",
"=",
"seq",
".",
"pop",
"(",
")",
"#print( \"popped: [%s] %s:%s\"%( op, left_operand, right_operand) )",
"if",
"op",
"==",
"u'|'",
":",
"seq",
".",
"append",
"(",
"StringGenerator",
".",
"SequenceOR",
"(",
"[",
"left_operand",
",",
"right_operand",
"]",
")",
")",
"elif",
"op",
"==",
"u'&'",
":",
"seq",
".",
"append",
"(",
"StringGenerator",
".",
"SequenceAND",
"(",
"[",
"left_operand",
",",
"right_operand",
"]",
")",
")",
"op",
"=",
"u''",
"left_operand",
"=",
"None",
"right_operand",
"=",
"None",
"# check for syntax errors",
"if",
"op",
":",
"raise",
"StringGenerator",
".",
"SyntaxError",
"(",
"u\"Operator: %s with no right operand\"",
"%",
"op",
")",
"if",
"level",
">",
"0",
"and",
"not",
"sequence_closed",
":",
"# it means we are finishing a non-first-level sequence without closing parens",
"raise",
"StringGenerator",
".",
"SyntaxError",
"(",
"u\"Missing closing parenthesis\"",
")",
"return",
"StringGenerator",
".",
"Sequence",
"(",
"seq",
")"
] | Get a sequence of nodes. | [
"Get",
"a",
"sequence",
"of",
"nodes",
"."
] | ca1a1484bed5a31dc9ceaef1ab62dd5582cc0d9f | https://github.com/paul-wolf/strgen/blob/ca1a1484bed5a31dc9ceaef1ab62dd5582cc0d9f/strgen/__init__.py#L441-L501 | train | 263 |
paul-wolf/strgen | strgen/__init__.py | StringGenerator.dump | def dump(self, **kwargs):
import sys
'''Print the parse tree and then call render for an example.'''
if not self.seq:
self.seq = self.getSequence()
print("StringGenerator version: %s" % (__version__))
print("Python version: %s" % sys.version)
# this doesn't work anymore in p3
# print("Random method provider class: %s" % randint.im_class.__name__)
self.seq.dump()
return self.render(**kwargs) | python | def dump(self, **kwargs):
import sys
'''Print the parse tree and then call render for an example.'''
if not self.seq:
self.seq = self.getSequence()
print("StringGenerator version: %s" % (__version__))
print("Python version: %s" % sys.version)
# this doesn't work anymore in p3
# print("Random method provider class: %s" % randint.im_class.__name__)
self.seq.dump()
return self.render(**kwargs) | [
"def",
"dump",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"sys",
"if",
"not",
"self",
".",
"seq",
":",
"self",
".",
"seq",
"=",
"self",
".",
"getSequence",
"(",
")",
"print",
"(",
"\"StringGenerator version: %s\"",
"%",
"(",
"__version__",
")",
")",
"print",
"(",
"\"Python version: %s\"",
"%",
"sys",
".",
"version",
")",
"# this doesn't work anymore in p3",
"# print(\"Random method provider class: %s\" % randint.im_class.__name__)",
"self",
".",
"seq",
".",
"dump",
"(",
")",
"return",
"self",
".",
"render",
"(",
"*",
"*",
"kwargs",
")"
] | Print the parse tree and then call render for an example. | [
"Print",
"the",
"parse",
"tree",
"and",
"then",
"call",
"render",
"for",
"an",
"example",
"."
] | ca1a1484bed5a31dc9ceaef1ab62dd5582cc0d9f | https://github.com/paul-wolf/strgen/blob/ca1a1484bed5a31dc9ceaef1ab62dd5582cc0d9f/strgen/__init__.py#L521-L531 | train | 264 |
paul-wolf/strgen | strgen/__init__.py | StringGenerator.render_list | def render_list(self, cnt, unique=False, progress_callback=None, **kwargs):
'''Return a list of generated strings.
Args:
cnt (int): length of list
unique (bool): whether to make entries unique
Returns:
list.
We keep track of total attempts because a template may
specify something impossible to attain, like [1-9]{} with cnt==1000
'''
rendered_list = []
i = 0
total_attempts = 0
while True:
if i >= cnt:
break
if total_attempts > cnt * self.unique_attempts_factor:
raise StringGenerator.UniquenessError(u"couldn't satisfy uniqueness")
s = self.render(**kwargs)
if unique:
if not s in rendered_list:
rendered_list.append(s)
i += 1
else:
rendered_list.append(s)
i += 1
total_attempts += 1
# Optionally trigger the progress indicator to inform others about our progress
if progress_callback and callable(progress_callback):
progress_callback(i, cnt)
return rendered_list | python | def render_list(self, cnt, unique=False, progress_callback=None, **kwargs):
'''Return a list of generated strings.
Args:
cnt (int): length of list
unique (bool): whether to make entries unique
Returns:
list.
We keep track of total attempts because a template may
specify something impossible to attain, like [1-9]{} with cnt==1000
'''
rendered_list = []
i = 0
total_attempts = 0
while True:
if i >= cnt:
break
if total_attempts > cnt * self.unique_attempts_factor:
raise StringGenerator.UniquenessError(u"couldn't satisfy uniqueness")
s = self.render(**kwargs)
if unique:
if not s in rendered_list:
rendered_list.append(s)
i += 1
else:
rendered_list.append(s)
i += 1
total_attempts += 1
# Optionally trigger the progress indicator to inform others about our progress
if progress_callback and callable(progress_callback):
progress_callback(i, cnt)
return rendered_list | [
"def",
"render_list",
"(",
"self",
",",
"cnt",
",",
"unique",
"=",
"False",
",",
"progress_callback",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"rendered_list",
"=",
"[",
"]",
"i",
"=",
"0",
"total_attempts",
"=",
"0",
"while",
"True",
":",
"if",
"i",
">=",
"cnt",
":",
"break",
"if",
"total_attempts",
">",
"cnt",
"*",
"self",
".",
"unique_attempts_factor",
":",
"raise",
"StringGenerator",
".",
"UniquenessError",
"(",
"u\"couldn't satisfy uniqueness\"",
")",
"s",
"=",
"self",
".",
"render",
"(",
"*",
"*",
"kwargs",
")",
"if",
"unique",
":",
"if",
"not",
"s",
"in",
"rendered_list",
":",
"rendered_list",
".",
"append",
"(",
"s",
")",
"i",
"+=",
"1",
"else",
":",
"rendered_list",
".",
"append",
"(",
"s",
")",
"i",
"+=",
"1",
"total_attempts",
"+=",
"1",
"# Optionally trigger the progress indicator to inform others about our progress",
"if",
"progress_callback",
"and",
"callable",
"(",
"progress_callback",
")",
":",
"progress_callback",
"(",
"i",
",",
"cnt",
")",
"return",
"rendered_list"
] | Return a list of generated strings.
Args:
cnt (int): length of list
unique (bool): whether to make entries unique
Returns:
list.
We keep track of total attempts because a template may
specify something impossible to attain, like [1-9]{} with cnt==1000 | [
"Return",
"a",
"list",
"of",
"generated",
"strings",
"."
] | ca1a1484bed5a31dc9ceaef1ab62dd5582cc0d9f | https://github.com/paul-wolf/strgen/blob/ca1a1484bed5a31dc9ceaef1ab62dd5582cc0d9f/strgen/__init__.py#L533-L570 | train | 265 |
seperman/s3utils | s3utils/s3utils.py | S3utils.connect | def connect(self):
"""
Establish the connection. This is done automatically for you.
If you lose the connection, you can manually run this to be re-connected.
"""
self.conn = boto.connect_s3(self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY, debug=self.S3UTILS_DEBUG_LEVEL)
self.bucket = self.conn.get_bucket(self.AWS_STORAGE_BUCKET_NAME)
self.k = Key(self.bucket) | python | def connect(self):
"""
Establish the connection. This is done automatically for you.
If you lose the connection, you can manually run this to be re-connected.
"""
self.conn = boto.connect_s3(self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY, debug=self.S3UTILS_DEBUG_LEVEL)
self.bucket = self.conn.get_bucket(self.AWS_STORAGE_BUCKET_NAME)
self.k = Key(self.bucket) | [
"def",
"connect",
"(",
"self",
")",
":",
"self",
".",
"conn",
"=",
"boto",
".",
"connect_s3",
"(",
"self",
".",
"AWS_ACCESS_KEY_ID",
",",
"self",
".",
"AWS_SECRET_ACCESS_KEY",
",",
"debug",
"=",
"self",
".",
"S3UTILS_DEBUG_LEVEL",
")",
"self",
".",
"bucket",
"=",
"self",
".",
"conn",
".",
"get_bucket",
"(",
"self",
".",
"AWS_STORAGE_BUCKET_NAME",
")",
"self",
".",
"k",
"=",
"Key",
"(",
"self",
".",
"bucket",
")"
] | Establish the connection. This is done automatically for you.
If you lose the connection, you can manually run this to be re-connected. | [
"Establish",
"the",
"connection",
".",
"This",
"is",
"done",
"automatically",
"for",
"you",
"."
] | aea41388a023dcf1e95588402077e31097514cf1 | https://github.com/seperman/s3utils/blob/aea41388a023dcf1e95588402077e31097514cf1/s3utils/s3utils.py#L145-L155 | train | 266 |
seperman/s3utils | s3utils/s3utils.py | S3utils.connect_cloudfront | def connect_cloudfront(self):
"Connect to Cloud Front. This is done automatically for you when needed."
self.conn_cloudfront = connect_cloudfront(self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY, debug=self.S3UTILS_DEBUG_LEVEL) | python | def connect_cloudfront(self):
"Connect to Cloud Front. This is done automatically for you when needed."
self.conn_cloudfront = connect_cloudfront(self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY, debug=self.S3UTILS_DEBUG_LEVEL) | [
"def",
"connect_cloudfront",
"(",
"self",
")",
":",
"self",
".",
"conn_cloudfront",
"=",
"connect_cloudfront",
"(",
"self",
".",
"AWS_ACCESS_KEY_ID",
",",
"self",
".",
"AWS_SECRET_ACCESS_KEY",
",",
"debug",
"=",
"self",
".",
"S3UTILS_DEBUG_LEVEL",
")"
] | Connect to Cloud Front. This is done automatically for you when needed. | [
"Connect",
"to",
"Cloud",
"Front",
".",
"This",
"is",
"done",
"automatically",
"for",
"you",
"when",
"needed",
"."
] | aea41388a023dcf1e95588402077e31097514cf1 | https://github.com/seperman/s3utils/blob/aea41388a023dcf1e95588402077e31097514cf1/s3utils/s3utils.py#L166-L168 | train | 267 |
seperman/s3utils | s3utils/s3utils.py | S3utils.mkdir | def mkdir(self, target_folder):
"""
Create a folder on S3.
Examples
--------
>>> s3utils.mkdir("path/to/my_folder")
Making directory: path/to/my_folder
"""
self.printv("Making directory: %s" % target_folder)
self.k.key = re.sub(r"^/|/$", "", target_folder) + "/"
self.k.set_contents_from_string('')
self.k.close() | python | def mkdir(self, target_folder):
"""
Create a folder on S3.
Examples
--------
>>> s3utils.mkdir("path/to/my_folder")
Making directory: path/to/my_folder
"""
self.printv("Making directory: %s" % target_folder)
self.k.key = re.sub(r"^/|/$", "", target_folder) + "/"
self.k.set_contents_from_string('')
self.k.close() | [
"def",
"mkdir",
"(",
"self",
",",
"target_folder",
")",
":",
"self",
".",
"printv",
"(",
"\"Making directory: %s\"",
"%",
"target_folder",
")",
"self",
".",
"k",
".",
"key",
"=",
"re",
".",
"sub",
"(",
"r\"^/|/$\"",
",",
"\"\"",
",",
"target_folder",
")",
"+",
"\"/\"",
"self",
".",
"k",
".",
"set_contents_from_string",
"(",
"''",
")",
"self",
".",
"k",
".",
"close",
"(",
")"
] | Create a folder on S3.
Examples
--------
>>> s3utils.mkdir("path/to/my_folder")
Making directory: path/to/my_folder | [
"Create",
"a",
"folder",
"on",
"S3",
"."
] | aea41388a023dcf1e95588402077e31097514cf1 | https://github.com/seperman/s3utils/blob/aea41388a023dcf1e95588402077e31097514cf1/s3utils/s3utils.py#L171-L183 | train | 268 |
seperman/s3utils | s3utils/s3utils.py | S3utils.rm | def rm(self, path):
"""
Delete the path and anything under the path.
Example
-------
>>> s3utils.rm("path/to/file_or_folder")
"""
list_of_files = list(self.ls(path))
if list_of_files:
if len(list_of_files) == 1:
self.bucket.delete_key(list_of_files[0])
else:
self.bucket.delete_keys(list_of_files)
self.printv("Deleted: %s" % list_of_files)
else:
logger.error("There was nothing to remove under %s", path) | python | def rm(self, path):
"""
Delete the path and anything under the path.
Example
-------
>>> s3utils.rm("path/to/file_or_folder")
"""
list_of_files = list(self.ls(path))
if list_of_files:
if len(list_of_files) == 1:
self.bucket.delete_key(list_of_files[0])
else:
self.bucket.delete_keys(list_of_files)
self.printv("Deleted: %s" % list_of_files)
else:
logger.error("There was nothing to remove under %s", path) | [
"def",
"rm",
"(",
"self",
",",
"path",
")",
":",
"list_of_files",
"=",
"list",
"(",
"self",
".",
"ls",
"(",
"path",
")",
")",
"if",
"list_of_files",
":",
"if",
"len",
"(",
"list_of_files",
")",
"==",
"1",
":",
"self",
".",
"bucket",
".",
"delete_key",
"(",
"list_of_files",
"[",
"0",
"]",
")",
"else",
":",
"self",
".",
"bucket",
".",
"delete_keys",
"(",
"list_of_files",
")",
"self",
".",
"printv",
"(",
"\"Deleted: %s\"",
"%",
"list_of_files",
")",
"else",
":",
"logger",
".",
"error",
"(",
"\"There was nothing to remove under %s\"",
",",
"path",
")"
] | Delete the path and anything under the path.
Example
-------
>>> s3utils.rm("path/to/file_or_folder") | [
"Delete",
"the",
"path",
"and",
"anything",
"under",
"the",
"path",
"."
] | aea41388a023dcf1e95588402077e31097514cf1 | https://github.com/seperman/s3utils/blob/aea41388a023dcf1e95588402077e31097514cf1/s3utils/s3utils.py#L186-L204 | train | 269 |
seperman/s3utils | s3utils/s3utils.py | S3utils.__put_key | def __put_key(self, local_file, target_file, acl='public-read', del_after_upload=False, overwrite=True, source="filename"):
"""Copy a file to s3."""
action_word = "moving" if del_after_upload else "copying"
try:
self.k.key = target_file # setting the path (key) of file in the container
if source == "filename":
# grabs the contents from local_file address. Note that it loads the whole file into memory
self.k.set_contents_from_filename(local_file, self.AWS_HEADERS)
elif source == "fileobj":
self.k.set_contents_from_file(local_file, self.AWS_HEADERS)
elif source == "string":
self.k.set_contents_from_string(local_file, self.AWS_HEADERS)
else:
raise Exception("%s is not implemented as a source." % source)
self.k.set_acl(acl) # setting the file permissions
self.k.close() # not sure if it is needed. Somewhere I read it is recommended.
self.printv("%s %s to %s" % (action_word, local_file, target_file))
# if it is supposed to delete the local file after uploading
if del_after_upload and source == "filename":
try:
os.remove(local_file)
except:
logger.error("Unable to delete the file: ", local_file, exc_info=True)
return True
except:
logger.error("Error in writing to %s", target_file, exc_info=True)
return False | python | def __put_key(self, local_file, target_file, acl='public-read', del_after_upload=False, overwrite=True, source="filename"):
"""Copy a file to s3."""
action_word = "moving" if del_after_upload else "copying"
try:
self.k.key = target_file # setting the path (key) of file in the container
if source == "filename":
# grabs the contents from local_file address. Note that it loads the whole file into memory
self.k.set_contents_from_filename(local_file, self.AWS_HEADERS)
elif source == "fileobj":
self.k.set_contents_from_file(local_file, self.AWS_HEADERS)
elif source == "string":
self.k.set_contents_from_string(local_file, self.AWS_HEADERS)
else:
raise Exception("%s is not implemented as a source." % source)
self.k.set_acl(acl) # setting the file permissions
self.k.close() # not sure if it is needed. Somewhere I read it is recommended.
self.printv("%s %s to %s" % (action_word, local_file, target_file))
# if it is supposed to delete the local file after uploading
if del_after_upload and source == "filename":
try:
os.remove(local_file)
except:
logger.error("Unable to delete the file: ", local_file, exc_info=True)
return True
except:
logger.error("Error in writing to %s", target_file, exc_info=True)
return False | [
"def",
"__put_key",
"(",
"self",
",",
"local_file",
",",
"target_file",
",",
"acl",
"=",
"'public-read'",
",",
"del_after_upload",
"=",
"False",
",",
"overwrite",
"=",
"True",
",",
"source",
"=",
"\"filename\"",
")",
":",
"action_word",
"=",
"\"moving\"",
"if",
"del_after_upload",
"else",
"\"copying\"",
"try",
":",
"self",
".",
"k",
".",
"key",
"=",
"target_file",
"# setting the path (key) of file in the container",
"if",
"source",
"==",
"\"filename\"",
":",
"# grabs the contents from local_file address. Note that it loads the whole file into memory",
"self",
".",
"k",
".",
"set_contents_from_filename",
"(",
"local_file",
",",
"self",
".",
"AWS_HEADERS",
")",
"elif",
"source",
"==",
"\"fileobj\"",
":",
"self",
".",
"k",
".",
"set_contents_from_file",
"(",
"local_file",
",",
"self",
".",
"AWS_HEADERS",
")",
"elif",
"source",
"==",
"\"string\"",
":",
"self",
".",
"k",
".",
"set_contents_from_string",
"(",
"local_file",
",",
"self",
".",
"AWS_HEADERS",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"%s is not implemented as a source.\"",
"%",
"source",
")",
"self",
".",
"k",
".",
"set_acl",
"(",
"acl",
")",
"# setting the file permissions",
"self",
".",
"k",
".",
"close",
"(",
")",
"# not sure if it is needed. Somewhere I read it is recommended.",
"self",
".",
"printv",
"(",
"\"%s %s to %s\"",
"%",
"(",
"action_word",
",",
"local_file",
",",
"target_file",
")",
")",
"# if it is supposed to delete the local file after uploading",
"if",
"del_after_upload",
"and",
"source",
"==",
"\"filename\"",
":",
"try",
":",
"os",
".",
"remove",
"(",
"local_file",
")",
"except",
":",
"logger",
".",
"error",
"(",
"\"Unable to delete the file: \"",
",",
"local_file",
",",
"exc_info",
"=",
"True",
")",
"return",
"True",
"except",
":",
"logger",
".",
"error",
"(",
"\"Error in writing to %s\"",
",",
"target_file",
",",
"exc_info",
"=",
"True",
")",
"return",
"False"
] | Copy a file to s3. | [
"Copy",
"a",
"file",
"to",
"s3",
"."
] | aea41388a023dcf1e95588402077e31097514cf1 | https://github.com/seperman/s3utils/blob/aea41388a023dcf1e95588402077e31097514cf1/s3utils/s3utils.py#L207-L238 | train | 270 |
seperman/s3utils | s3utils/s3utils.py | S3utils.cp | def cp(self, local_path, target_path, acl='public-read',
del_after_upload=False, overwrite=True, invalidate=False):
"""
Copy a file or folder from local to s3.
Parameters
----------
local_path : string
Path to file or folder. Or if you want to copy only the contents of folder, add /* at the end of folder name
target_path : string
Target path on S3 bucket.
acl : string, optional
File permissions on S3. Default is public-read
options:
- private: Owner gets FULL_CONTROL. No one else has any access rights.
- public-read: Owners gets FULL_CONTROL and the anonymous principal is granted READ access.
- public-read-write: Owner gets FULL_CONTROL and the anonymous principal is granted READ and WRITE access.
- authenticated-read: Owner gets FULL_CONTROL and any principal authenticated as a registered Amazon S3 user is granted READ access
del_after_upload : boolean, optional
delete the local file after uploading. This is effectively like moving the file.
You can use s3utils.mv instead of s3utils.cp to move files from local to S3.
It basically sets this flag to True.
default = False
overwrite : boolean, optional
overwrites files on S3 if set to True. Default is True
invalidate : boolean, optional
invalidates the CDN (a.k.a Distribution) cache if the file already exists on S3
default = False
Note that invalidation might take up to 15 minutes to take place. It is easier and faster to use cache buster
to grab lastest version of your file on CDN than invalidation.
**Returns**
Nothing on success but it will return what went wrong if something fails.
Examples
--------
>>> s3utils.cp("path/to/folder","/test/")
copying /path/to/myfolder/test2.txt to test/myfolder/test2.txt
copying /path/to/myfolder/test.txt to test/myfolder/test.txt
copying /path/to/myfolder/hoho/photo.JPG to test/myfolder/hoho/photo.JPG
copying /path/to/myfolder/hoho/haha/ff to test/myfolder/hoho/haha/ff
>>> # When overwrite is set to False, it returns the file(s) that were already existing on s3 and were not overwritten.
>>> s3utils.cp("/tmp/test3.txt", "test3.txt", overwrite=False)
ERROR:root:test3.txt already exist. Not overwriting.
>>> {'existing_files': {'test3.txt'}}
>>> # To overwrite the files on S3 and invalidate the CDN (cloudfront) cache so the new file goes on CDN:
>>> s3utils.cp("path/to/folder","/test/", invalidate=True)
copying /path/to/myfolder/test2.txt to test/myfolder/test2.txt
copying /path/to/myfolder/test.txt to test/myfolder/test.txt
copying /path/to/myfolder/hoho/photo.JPG to test/myfolder/hoho/photo.JPG
copying /path/to/myfolder/hoho/haha/ff to test/myfolder/hoho/haha/ff
>>> # When file does not exist, it returns a dictionary of what went wrong.
>>> s3utils.cp("/tmp/does_not_exist", "somewhere")
ERROR:root:trying to upload to s3 but file doesn't exist: /tmp/does_not_exist
>>> {'file_does_not_exist': '/tmp/does_not_exist'}
"""
result = None
if overwrite:
list_of_files = []
else:
list_of_files = self.ls(folder=target_path, begin_from_file="", num=-1, get_grants=False, all_grant_data=False)
# copying the contents of the folder and not folder itself
if local_path.endswith("/*"):
local_path = local_path[:-2]
target_path = re.sub(r"^/|/$", "", target_path) # Amazon S3 doesn't let the name to begin with /
# copying folder too
else:
local_base_name = os.path.basename(local_path)
local_path = re.sub(r"/$", "", local_path)
target_path = re.sub(r"^/", "", target_path)
if not target_path.endswith(local_base_name):
target_path = os.path.join(target_path, local_base_name)
if os.path.exists(local_path):
result = self.__find_files_and_copy(local_path, target_path, acl, del_after_upload, overwrite, invalidate, list_of_files)
else:
result = {'file_does_not_exist': local_path}
logger.error("trying to upload to s3 but file doesn't exist: %s" % local_path)
return result | python | def cp(self, local_path, target_path, acl='public-read',
del_after_upload=False, overwrite=True, invalidate=False):
"""
Copy a file or folder from local to s3.
Parameters
----------
local_path : string
Path to file or folder. Or if you want to copy only the contents of folder, add /* at the end of folder name
target_path : string
Target path on S3 bucket.
acl : string, optional
File permissions on S3. Default is public-read
options:
- private: Owner gets FULL_CONTROL. No one else has any access rights.
- public-read: Owners gets FULL_CONTROL and the anonymous principal is granted READ access.
- public-read-write: Owner gets FULL_CONTROL and the anonymous principal is granted READ and WRITE access.
- authenticated-read: Owner gets FULL_CONTROL and any principal authenticated as a registered Amazon S3 user is granted READ access
del_after_upload : boolean, optional
delete the local file after uploading. This is effectively like moving the file.
You can use s3utils.mv instead of s3utils.cp to move files from local to S3.
It basically sets this flag to True.
default = False
overwrite : boolean, optional
overwrites files on S3 if set to True. Default is True
invalidate : boolean, optional
invalidates the CDN (a.k.a Distribution) cache if the file already exists on S3
default = False
Note that invalidation might take up to 15 minutes to take place. It is easier and faster to use cache buster
to grab lastest version of your file on CDN than invalidation.
**Returns**
Nothing on success but it will return what went wrong if something fails.
Examples
--------
>>> s3utils.cp("path/to/folder","/test/")
copying /path/to/myfolder/test2.txt to test/myfolder/test2.txt
copying /path/to/myfolder/test.txt to test/myfolder/test.txt
copying /path/to/myfolder/hoho/photo.JPG to test/myfolder/hoho/photo.JPG
copying /path/to/myfolder/hoho/haha/ff to test/myfolder/hoho/haha/ff
>>> # When overwrite is set to False, it returns the file(s) that were already existing on s3 and were not overwritten.
>>> s3utils.cp("/tmp/test3.txt", "test3.txt", overwrite=False)
ERROR:root:test3.txt already exist. Not overwriting.
>>> {'existing_files': {'test3.txt'}}
>>> # To overwrite the files on S3 and invalidate the CDN (cloudfront) cache so the new file goes on CDN:
>>> s3utils.cp("path/to/folder","/test/", invalidate=True)
copying /path/to/myfolder/test2.txt to test/myfolder/test2.txt
copying /path/to/myfolder/test.txt to test/myfolder/test.txt
copying /path/to/myfolder/hoho/photo.JPG to test/myfolder/hoho/photo.JPG
copying /path/to/myfolder/hoho/haha/ff to test/myfolder/hoho/haha/ff
>>> # When file does not exist, it returns a dictionary of what went wrong.
>>> s3utils.cp("/tmp/does_not_exist", "somewhere")
ERROR:root:trying to upload to s3 but file doesn't exist: /tmp/does_not_exist
>>> {'file_does_not_exist': '/tmp/does_not_exist'}
"""
result = None
if overwrite:
list_of_files = []
else:
list_of_files = self.ls(folder=target_path, begin_from_file="", num=-1, get_grants=False, all_grant_data=False)
# copying the contents of the folder and not folder itself
if local_path.endswith("/*"):
local_path = local_path[:-2]
target_path = re.sub(r"^/|/$", "", target_path) # Amazon S3 doesn't let the name to begin with /
# copying folder too
else:
local_base_name = os.path.basename(local_path)
local_path = re.sub(r"/$", "", local_path)
target_path = re.sub(r"^/", "", target_path)
if not target_path.endswith(local_base_name):
target_path = os.path.join(target_path, local_base_name)
if os.path.exists(local_path):
result = self.__find_files_and_copy(local_path, target_path, acl, del_after_upload, overwrite, invalidate, list_of_files)
else:
result = {'file_does_not_exist': local_path}
logger.error("trying to upload to s3 but file doesn't exist: %s" % local_path)
return result | [
"def",
"cp",
"(",
"self",
",",
"local_path",
",",
"target_path",
",",
"acl",
"=",
"'public-read'",
",",
"del_after_upload",
"=",
"False",
",",
"overwrite",
"=",
"True",
",",
"invalidate",
"=",
"False",
")",
":",
"result",
"=",
"None",
"if",
"overwrite",
":",
"list_of_files",
"=",
"[",
"]",
"else",
":",
"list_of_files",
"=",
"self",
".",
"ls",
"(",
"folder",
"=",
"target_path",
",",
"begin_from_file",
"=",
"\"\"",
",",
"num",
"=",
"-",
"1",
",",
"get_grants",
"=",
"False",
",",
"all_grant_data",
"=",
"False",
")",
"# copying the contents of the folder and not folder itself",
"if",
"local_path",
".",
"endswith",
"(",
"\"/*\"",
")",
":",
"local_path",
"=",
"local_path",
"[",
":",
"-",
"2",
"]",
"target_path",
"=",
"re",
".",
"sub",
"(",
"r\"^/|/$\"",
",",
"\"\"",
",",
"target_path",
")",
"# Amazon S3 doesn't let the name to begin with /",
"# copying folder too",
"else",
":",
"local_base_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"local_path",
")",
"local_path",
"=",
"re",
".",
"sub",
"(",
"r\"/$\"",
",",
"\"\"",
",",
"local_path",
")",
"target_path",
"=",
"re",
".",
"sub",
"(",
"r\"^/\"",
",",
"\"\"",
",",
"target_path",
")",
"if",
"not",
"target_path",
".",
"endswith",
"(",
"local_base_name",
")",
":",
"target_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"target_path",
",",
"local_base_name",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"local_path",
")",
":",
"result",
"=",
"self",
".",
"__find_files_and_copy",
"(",
"local_path",
",",
"target_path",
",",
"acl",
",",
"del_after_upload",
",",
"overwrite",
",",
"invalidate",
",",
"list_of_files",
")",
"else",
":",
"result",
"=",
"{",
"'file_does_not_exist'",
":",
"local_path",
"}",
"logger",
".",
"error",
"(",
"\"trying to upload to s3 but file doesn't exist: %s\"",
"%",
"local_path",
")",
"return",
"result"
] | Copy a file or folder from local to s3.
Parameters
----------
local_path : string
Path to file or folder. Or if you want to copy only the contents of folder, add /* at the end of folder name
target_path : string
Target path on S3 bucket.
acl : string, optional
File permissions on S3. Default is public-read
options:
- private: Owner gets FULL_CONTROL. No one else has any access rights.
- public-read: Owners gets FULL_CONTROL and the anonymous principal is granted READ access.
- public-read-write: Owner gets FULL_CONTROL and the anonymous principal is granted READ and WRITE access.
- authenticated-read: Owner gets FULL_CONTROL and any principal authenticated as a registered Amazon S3 user is granted READ access
del_after_upload : boolean, optional
delete the local file after uploading. This is effectively like moving the file.
You can use s3utils.mv instead of s3utils.cp to move files from local to S3.
It basically sets this flag to True.
default = False
overwrite : boolean, optional
overwrites files on S3 if set to True. Default is True
invalidate : boolean, optional
invalidates the CDN (a.k.a Distribution) cache if the file already exists on S3
default = False
Note that invalidation might take up to 15 minutes to take place. It is easier and faster to use cache buster
to grab lastest version of your file on CDN than invalidation.
**Returns**
Nothing on success but it will return what went wrong if something fails.
Examples
--------
>>> s3utils.cp("path/to/folder","/test/")
copying /path/to/myfolder/test2.txt to test/myfolder/test2.txt
copying /path/to/myfolder/test.txt to test/myfolder/test.txt
copying /path/to/myfolder/hoho/photo.JPG to test/myfolder/hoho/photo.JPG
copying /path/to/myfolder/hoho/haha/ff to test/myfolder/hoho/haha/ff
>>> # When overwrite is set to False, it returns the file(s) that were already existing on s3 and were not overwritten.
>>> s3utils.cp("/tmp/test3.txt", "test3.txt", overwrite=False)
ERROR:root:test3.txt already exist. Not overwriting.
>>> {'existing_files': {'test3.txt'}}
>>> # To overwrite the files on S3 and invalidate the CDN (cloudfront) cache so the new file goes on CDN:
>>> s3utils.cp("path/to/folder","/test/", invalidate=True)
copying /path/to/myfolder/test2.txt to test/myfolder/test2.txt
copying /path/to/myfolder/test.txt to test/myfolder/test.txt
copying /path/to/myfolder/hoho/photo.JPG to test/myfolder/hoho/photo.JPG
copying /path/to/myfolder/hoho/haha/ff to test/myfolder/hoho/haha/ff
>>> # When file does not exist, it returns a dictionary of what went wrong.
>>> s3utils.cp("/tmp/does_not_exist", "somewhere")
ERROR:root:trying to upload to s3 but file doesn't exist: /tmp/does_not_exist
>>> {'file_does_not_exist': '/tmp/does_not_exist'} | [
"Copy",
"a",
"file",
"or",
"folder",
"from",
"local",
"to",
"s3",
"."
] | aea41388a023dcf1e95588402077e31097514cf1 | https://github.com/seperman/s3utils/blob/aea41388a023dcf1e95588402077e31097514cf1/s3utils/s3utils.py#L240-L336 | train | 271 |
seperman/s3utils | s3utils/s3utils.py | S3utils.mv | def mv(self, local_file, target_file, acl='public-read', overwrite=True, invalidate=False):
"""
Similar to Linux mv command.
Move the file to the S3 and deletes the local copy
It is basically s3utils.cp that has del_after_upload=True
Examples
--------
>>> s3utils.mv("path/to/folder","/test/")
moving /path/to/myfolder/test2.txt to test/myfolder/test2.txt
moving /path/to/myfolder/test.txt to test/myfolder/test.txt
moving /path/to/myfolder/hoho/photo.JPG to test/myfolder/hoho/photo.JPG
moving /path/to/myfolder/hoho/haha/ff to test/myfolder/hoho/haha/ff
**Returns:**
Nothing on success, otherwise what went wrong.
Return type:
dict
"""
self.cp(local_file, target_file, acl=acl, del_after_upload=True, overwrite=overwrite, invalidate=invalidate) | python | def mv(self, local_file, target_file, acl='public-read', overwrite=True, invalidate=False):
"""
Similar to Linux mv command.
Move the file to the S3 and deletes the local copy
It is basically s3utils.cp that has del_after_upload=True
Examples
--------
>>> s3utils.mv("path/to/folder","/test/")
moving /path/to/myfolder/test2.txt to test/myfolder/test2.txt
moving /path/to/myfolder/test.txt to test/myfolder/test.txt
moving /path/to/myfolder/hoho/photo.JPG to test/myfolder/hoho/photo.JPG
moving /path/to/myfolder/hoho/haha/ff to test/myfolder/hoho/haha/ff
**Returns:**
Nothing on success, otherwise what went wrong.
Return type:
dict
"""
self.cp(local_file, target_file, acl=acl, del_after_upload=True, overwrite=overwrite, invalidate=invalidate) | [
"def",
"mv",
"(",
"self",
",",
"local_file",
",",
"target_file",
",",
"acl",
"=",
"'public-read'",
",",
"overwrite",
"=",
"True",
",",
"invalidate",
"=",
"False",
")",
":",
"self",
".",
"cp",
"(",
"local_file",
",",
"target_file",
",",
"acl",
"=",
"acl",
",",
"del_after_upload",
"=",
"True",
",",
"overwrite",
"=",
"overwrite",
",",
"invalidate",
"=",
"invalidate",
")"
] | Similar to Linux mv command.
Move the file to the S3 and deletes the local copy
It is basically s3utils.cp that has del_after_upload=True
Examples
--------
>>> s3utils.mv("path/to/folder","/test/")
moving /path/to/myfolder/test2.txt to test/myfolder/test2.txt
moving /path/to/myfolder/test.txt to test/myfolder/test.txt
moving /path/to/myfolder/hoho/photo.JPG to test/myfolder/hoho/photo.JPG
moving /path/to/myfolder/hoho/haha/ff to test/myfolder/hoho/haha/ff
**Returns:**
Nothing on success, otherwise what went wrong.
Return type:
dict | [
"Similar",
"to",
"Linux",
"mv",
"command",
"."
] | aea41388a023dcf1e95588402077e31097514cf1 | https://github.com/seperman/s3utils/blob/aea41388a023dcf1e95588402077e31097514cf1/s3utils/s3utils.py#L483-L507 | train | 272 |
seperman/s3utils | s3utils/s3utils.py | S3utils.cp_cropduster_image | def cp_cropduster_image(self, the_image_path, del_after_upload=False, overwrite=False, invalidate=False):
"""
Deal with saving cropduster images to S3. Cropduster is a Django library for resizing editorial images.
S3utils was originally written to put cropduster images on S3 bucket.
Extra Items in your Django Settings
-----------------------------------
MEDIA_ROOT : string
Django media root.
Currently it is ONLY used in cp_cropduster_image method.
NOT any other method as this library was originally made to put Django cropduster images on s3 bucket.
S3_ROOT_BASE : string
S3 media root base. This will be the root folder in S3.
Currently it is ONLY used in cp_cropduster_image method.
NOT any other method as this library was originally made to put Django cropduster images on s3 bucket.
"""
local_file = os.path.join(settings.MEDIA_ROOT, the_image_path)
# only try to upload things if the origin cropduster file exists (so it is not already uploaded to the CDN)
if os.path.exists(local_file):
the_image_crops_path = os.path.splitext(the_image_path)[0]
the_image_crops_path_full_path = os.path.join(settings.MEDIA_ROOT, the_image_crops_path)
self.cp(local_path=local_file,
target_path=os.path.join(settings.S3_ROOT_BASE, the_image_path),
del_after_upload=del_after_upload,
overwrite=overwrite,
invalidate=invalidate,
)
self.cp(local_path=the_image_crops_path_full_path + "/*",
target_path=os.path.join(settings.S3_ROOT_BASE, the_image_crops_path),
del_after_upload=del_after_upload,
overwrite=overwrite,
invalidate=invalidate,
) | python | def cp_cropduster_image(self, the_image_path, del_after_upload=False, overwrite=False, invalidate=False):
"""
Deal with saving cropduster images to S3. Cropduster is a Django library for resizing editorial images.
S3utils was originally written to put cropduster images on S3 bucket.
Extra Items in your Django Settings
-----------------------------------
MEDIA_ROOT : string
Django media root.
Currently it is ONLY used in cp_cropduster_image method.
NOT any other method as this library was originally made to put Django cropduster images on s3 bucket.
S3_ROOT_BASE : string
S3 media root base. This will be the root folder in S3.
Currently it is ONLY used in cp_cropduster_image method.
NOT any other method as this library was originally made to put Django cropduster images on s3 bucket.
"""
local_file = os.path.join(settings.MEDIA_ROOT, the_image_path)
# only try to upload things if the origin cropduster file exists (so it is not already uploaded to the CDN)
if os.path.exists(local_file):
the_image_crops_path = os.path.splitext(the_image_path)[0]
the_image_crops_path_full_path = os.path.join(settings.MEDIA_ROOT, the_image_crops_path)
self.cp(local_path=local_file,
target_path=os.path.join(settings.S3_ROOT_BASE, the_image_path),
del_after_upload=del_after_upload,
overwrite=overwrite,
invalidate=invalidate,
)
self.cp(local_path=the_image_crops_path_full_path + "/*",
target_path=os.path.join(settings.S3_ROOT_BASE, the_image_crops_path),
del_after_upload=del_after_upload,
overwrite=overwrite,
invalidate=invalidate,
) | [
"def",
"cp_cropduster_image",
"(",
"self",
",",
"the_image_path",
",",
"del_after_upload",
"=",
"False",
",",
"overwrite",
"=",
"False",
",",
"invalidate",
"=",
"False",
")",
":",
"local_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"settings",
".",
"MEDIA_ROOT",
",",
"the_image_path",
")",
"# only try to upload things if the origin cropduster file exists (so it is not already uploaded to the CDN)",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"local_file",
")",
":",
"the_image_crops_path",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"the_image_path",
")",
"[",
"0",
"]",
"the_image_crops_path_full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"settings",
".",
"MEDIA_ROOT",
",",
"the_image_crops_path",
")",
"self",
".",
"cp",
"(",
"local_path",
"=",
"local_file",
",",
"target_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"settings",
".",
"S3_ROOT_BASE",
",",
"the_image_path",
")",
",",
"del_after_upload",
"=",
"del_after_upload",
",",
"overwrite",
"=",
"overwrite",
",",
"invalidate",
"=",
"invalidate",
",",
")",
"self",
".",
"cp",
"(",
"local_path",
"=",
"the_image_crops_path_full_path",
"+",
"\"/*\"",
",",
"target_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"settings",
".",
"S3_ROOT_BASE",
",",
"the_image_crops_path",
")",
",",
"del_after_upload",
"=",
"del_after_upload",
",",
"overwrite",
"=",
"overwrite",
",",
"invalidate",
"=",
"invalidate",
",",
")"
] | Deal with saving cropduster images to S3. Cropduster is a Django library for resizing editorial images.
S3utils was originally written to put cropduster images on S3 bucket.
Extra Items in your Django Settings
-----------------------------------
MEDIA_ROOT : string
Django media root.
Currently it is ONLY used in cp_cropduster_image method.
NOT any other method as this library was originally made to put Django cropduster images on s3 bucket.
S3_ROOT_BASE : string
S3 media root base. This will be the root folder in S3.
Currently it is ONLY used in cp_cropduster_image method.
NOT any other method as this library was originally made to put Django cropduster images on s3 bucket. | [
"Deal",
"with",
"saving",
"cropduster",
"images",
"to",
"S3",
".",
"Cropduster",
"is",
"a",
"Django",
"library",
"for",
"resizing",
"editorial",
"images",
".",
"S3utils",
"was",
"originally",
"written",
"to",
"put",
"cropduster",
"images",
"on",
"S3",
"bucket",
"."
] | aea41388a023dcf1e95588402077e31097514cf1 | https://github.com/seperman/s3utils/blob/aea41388a023dcf1e95588402077e31097514cf1/s3utils/s3utils.py#L510-L551 | train | 273 |
seperman/s3utils | s3utils/s3utils.py | S3utils.chmod | def chmod(self, target_file, acl='public-read'):
"""
sets permissions for a file on S3
Parameters
----------
target_file : string
Path to file on S3
acl : string, optional
File permissions on S3. Default is public-read
options:
- private: Owner gets FULL_CONTROL. No one else has any access rights.
- public-read: Owners gets FULL_CONTROL and the anonymous principal is granted READ access.
- public-read-write: Owner gets FULL_CONTROL and the anonymous principal is granted READ and WRITE access.
- authenticated-read: Owner gets FULL_CONTROL and any principal authenticated as a registered Amazon S3 user is granted READ access
Examples
--------
>>> s3utils.chmod("path/to/file","private")
"""
self.k.key = target_file # setting the path (key) of file in the container
self.k.set_acl(acl) # setting the file permissions
self.k.close() | python | def chmod(self, target_file, acl='public-read'):
"""
sets permissions for a file on S3
Parameters
----------
target_file : string
Path to file on S3
acl : string, optional
File permissions on S3. Default is public-read
options:
- private: Owner gets FULL_CONTROL. No one else has any access rights.
- public-read: Owners gets FULL_CONTROL and the anonymous principal is granted READ access.
- public-read-write: Owner gets FULL_CONTROL and the anonymous principal is granted READ and WRITE access.
- authenticated-read: Owner gets FULL_CONTROL and any principal authenticated as a registered Amazon S3 user is granted READ access
Examples
--------
>>> s3utils.chmod("path/to/file","private")
"""
self.k.key = target_file # setting the path (key) of file in the container
self.k.set_acl(acl) # setting the file permissions
self.k.close() | [
"def",
"chmod",
"(",
"self",
",",
"target_file",
",",
"acl",
"=",
"'public-read'",
")",
":",
"self",
".",
"k",
".",
"key",
"=",
"target_file",
"# setting the path (key) of file in the container",
"self",
".",
"k",
".",
"set_acl",
"(",
"acl",
")",
"# setting the file permissions",
"self",
".",
"k",
".",
"close",
"(",
")"
] | sets permissions for a file on S3
Parameters
----------
target_file : string
Path to file on S3
acl : string, optional
File permissions on S3. Default is public-read
options:
- private: Owner gets FULL_CONTROL. No one else has any access rights.
- public-read: Owners gets FULL_CONTROL and the anonymous principal is granted READ access.
- public-read-write: Owner gets FULL_CONTROL and the anonymous principal is granted READ and WRITE access.
- authenticated-read: Owner gets FULL_CONTROL and any principal authenticated as a registered Amazon S3 user is granted READ access
Examples
--------
>>> s3utils.chmod("path/to/file","private") | [
"sets",
"permissions",
"for",
"a",
"file",
"on",
"S3"
] | aea41388a023dcf1e95588402077e31097514cf1 | https://github.com/seperman/s3utils/blob/aea41388a023dcf1e95588402077e31097514cf1/s3utils/s3utils.py#L582-L610 | train | 274 |
seperman/s3utils | s3utils/s3utils.py | S3utils.ll | def ll(self, folder="", begin_from_file="", num=-1, all_grant_data=False):
"""
Get the list of files and permissions from S3.
This is similar to LL (ls -lah) in Linux: List of files with permissions.
Parameters
----------
folder : string
Path to file on S3
num: integer, optional
number of results to return, by default it returns all results.
begin_from_file : string, optional
which file to start from on S3.
This is usedful in case you are iterating over lists of files and you need to page the result by
starting listing from a certain file and fetching certain num (number) of files.
all_grant_data : Boolean, optional
More detailed file permission data will be returned.
Examples
--------
>>> from s3utils import S3utils
>>> s3utils = S3utils(
... AWS_ACCESS_KEY_ID = 'your access key',
... AWS_SECRET_ACCESS_KEY = 'your secret key',
... AWS_STORAGE_BUCKET_NAME = 'your bucket name',
... S3UTILS_DEBUG_LEVEL = 1, #change it to 0 for less verbose
... )
>>> import json
>>> # We use json.dumps to print the results more readable:
>>> my_folder_stuff = s3utils.ll("/test/")
>>> print(json.dumps(my_folder_stuff, indent=2))
{
"test/myfolder/": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
}
],
"test/myfolder/em/": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
}
],
"test/myfolder/hoho/": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
}
],
"test/myfolder/hoho/.DS_Store": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
},
{
"name": null,
"permission": "READ"
}
],
"test/myfolder/hoho/haha/": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
}
],
"test/myfolder/hoho/haha/ff": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
},
{
"name": null,
"permission": "READ"
}
],
"test/myfolder/hoho/photo.JPG": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
},
{
"name": null,
"permission": "READ"
}
],
}
"""
return self.ls(folder=folder, begin_from_file=begin_from_file, num=num, get_grants=True, all_grant_data=all_grant_data) | python | def ll(self, folder="", begin_from_file="", num=-1, all_grant_data=False):
"""
Get the list of files and permissions from S3.
This is similar to LL (ls -lah) in Linux: List of files with permissions.
Parameters
----------
folder : string
Path to file on S3
num: integer, optional
number of results to return, by default it returns all results.
begin_from_file : string, optional
which file to start from on S3.
This is usedful in case you are iterating over lists of files and you need to page the result by
starting listing from a certain file and fetching certain num (number) of files.
all_grant_data : Boolean, optional
More detailed file permission data will be returned.
Examples
--------
>>> from s3utils import S3utils
>>> s3utils = S3utils(
... AWS_ACCESS_KEY_ID = 'your access key',
... AWS_SECRET_ACCESS_KEY = 'your secret key',
... AWS_STORAGE_BUCKET_NAME = 'your bucket name',
... S3UTILS_DEBUG_LEVEL = 1, #change it to 0 for less verbose
... )
>>> import json
>>> # We use json.dumps to print the results more readable:
>>> my_folder_stuff = s3utils.ll("/test/")
>>> print(json.dumps(my_folder_stuff, indent=2))
{
"test/myfolder/": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
}
],
"test/myfolder/em/": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
}
],
"test/myfolder/hoho/": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
}
],
"test/myfolder/hoho/.DS_Store": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
},
{
"name": null,
"permission": "READ"
}
],
"test/myfolder/hoho/haha/": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
}
],
"test/myfolder/hoho/haha/ff": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
},
{
"name": null,
"permission": "READ"
}
],
"test/myfolder/hoho/photo.JPG": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
},
{
"name": null,
"permission": "READ"
}
],
}
"""
return self.ls(folder=folder, begin_from_file=begin_from_file, num=num, get_grants=True, all_grant_data=all_grant_data) | [
"def",
"ll",
"(",
"self",
",",
"folder",
"=",
"\"\"",
",",
"begin_from_file",
"=",
"\"\"",
",",
"num",
"=",
"-",
"1",
",",
"all_grant_data",
"=",
"False",
")",
":",
"return",
"self",
".",
"ls",
"(",
"folder",
"=",
"folder",
",",
"begin_from_file",
"=",
"begin_from_file",
",",
"num",
"=",
"num",
",",
"get_grants",
"=",
"True",
",",
"all_grant_data",
"=",
"all_grant_data",
")"
] | Get the list of files and permissions from S3.
This is similar to LL (ls -lah) in Linux: List of files with permissions.
Parameters
----------
folder : string
Path to file on S3
num: integer, optional
number of results to return, by default it returns all results.
begin_from_file : string, optional
which file to start from on S3.
This is usedful in case you are iterating over lists of files and you need to page the result by
starting listing from a certain file and fetching certain num (number) of files.
all_grant_data : Boolean, optional
More detailed file permission data will be returned.
Examples
--------
>>> from s3utils import S3utils
>>> s3utils = S3utils(
... AWS_ACCESS_KEY_ID = 'your access key',
... AWS_SECRET_ACCESS_KEY = 'your secret key',
... AWS_STORAGE_BUCKET_NAME = 'your bucket name',
... S3UTILS_DEBUG_LEVEL = 1, #change it to 0 for less verbose
... )
>>> import json
>>> # We use json.dumps to print the results more readable:
>>> my_folder_stuff = s3utils.ll("/test/")
>>> print(json.dumps(my_folder_stuff, indent=2))
{
"test/myfolder/": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
}
],
"test/myfolder/em/": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
}
],
"test/myfolder/hoho/": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
}
],
"test/myfolder/hoho/.DS_Store": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
},
{
"name": null,
"permission": "READ"
}
],
"test/myfolder/hoho/haha/": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
}
],
"test/myfolder/hoho/haha/ff": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
},
{
"name": null,
"permission": "READ"
}
],
"test/myfolder/hoho/photo.JPG": [
{
"name": "owner's name",
"permission": "FULL_CONTROL"
},
{
"name": null,
"permission": "READ"
}
],
} | [
"Get",
"the",
"list",
"of",
"files",
"and",
"permissions",
"from",
"S3",
"."
] | aea41388a023dcf1e95588402077e31097514cf1 | https://github.com/seperman/s3utils/blob/aea41388a023dcf1e95588402077e31097514cf1/s3utils/s3utils.py#L669-L764 | train | 275 |
uktrade/directory-signature-auth | sigauth/helpers.py | get_path | def get_path(url):
"""
Get the path from a given url, including the querystring.
Args:
url (str)
Returns:
str
"""
url = urlsplit(url)
path = url.path
if url.query:
path += "?{}".format(url.query)
return path | python | def get_path(url):
"""
Get the path from a given url, including the querystring.
Args:
url (str)
Returns:
str
"""
url = urlsplit(url)
path = url.path
if url.query:
path += "?{}".format(url.query)
return path | [
"def",
"get_path",
"(",
"url",
")",
":",
"url",
"=",
"urlsplit",
"(",
"url",
")",
"path",
"=",
"url",
".",
"path",
"if",
"url",
".",
"query",
":",
"path",
"+=",
"\"?{}\"",
".",
"format",
"(",
"url",
".",
"query",
")",
"return",
"path"
] | Get the path from a given url, including the querystring.
Args:
url (str)
Returns:
str | [
"Get",
"the",
"path",
"from",
"a",
"given",
"url",
"including",
"the",
"querystring",
"."
] | 1a1b1e887b25a938133d7bcc146d3fecf1079313 | https://github.com/uktrade/directory-signature-auth/blob/1a1b1e887b25a938133d7bcc146d3fecf1079313/sigauth/helpers.py#L79-L94 | train | 276 |
vkruoso/receita-tools | receita/tools/build.py | Build.run | def run(self):
"""Reads data from disk and generates CSV files."""
# Try to create the directory
if not os.path.exists(self.output):
try:
os.mkdir(self.output)
except:
print 'failed to create output directory %s' % self.output
# Be sure it is a directory
if not os.path.isdir(self.output):
print 'invalid output directory %s' % self.output
sys.exit(1)
# Create the CSV handlers
visitors = [
_CompaniesCSV(self.output),
_ActivitiesCSV(self.output),
_ActivitiesSeenCSV(self.output),
_QSACSV(self.output),
]
# Run by each company populating the CSV files
for path in glob.glob(os.path.join(self.input, '*.json')):
with open(path, 'r') as f:
try:
data = json.load(f, encoding='utf-8')
except ValueError:
continue
for visitor in visitors:
visitor.visit(data) | python | def run(self):
"""Reads data from disk and generates CSV files."""
# Try to create the directory
if not os.path.exists(self.output):
try:
os.mkdir(self.output)
except:
print 'failed to create output directory %s' % self.output
# Be sure it is a directory
if not os.path.isdir(self.output):
print 'invalid output directory %s' % self.output
sys.exit(1)
# Create the CSV handlers
visitors = [
_CompaniesCSV(self.output),
_ActivitiesCSV(self.output),
_ActivitiesSeenCSV(self.output),
_QSACSV(self.output),
]
# Run by each company populating the CSV files
for path in glob.glob(os.path.join(self.input, '*.json')):
with open(path, 'r') as f:
try:
data = json.load(f, encoding='utf-8')
except ValueError:
continue
for visitor in visitors:
visitor.visit(data) | [
"def",
"run",
"(",
"self",
")",
":",
"# Try to create the directory",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"output",
")",
":",
"try",
":",
"os",
".",
"mkdir",
"(",
"self",
".",
"output",
")",
"except",
":",
"print",
"'failed to create output directory %s'",
"%",
"self",
".",
"output",
"# Be sure it is a directory",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"self",
".",
"output",
")",
":",
"print",
"'invalid output directory %s'",
"%",
"self",
".",
"output",
"sys",
".",
"exit",
"(",
"1",
")",
"# Create the CSV handlers",
"visitors",
"=",
"[",
"_CompaniesCSV",
"(",
"self",
".",
"output",
")",
",",
"_ActivitiesCSV",
"(",
"self",
".",
"output",
")",
",",
"_ActivitiesSeenCSV",
"(",
"self",
".",
"output",
")",
",",
"_QSACSV",
"(",
"self",
".",
"output",
")",
",",
"]",
"# Run by each company populating the CSV files",
"for",
"path",
"in",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"input",
",",
"'*.json'",
")",
")",
":",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"try",
":",
"data",
"=",
"json",
".",
"load",
"(",
"f",
",",
"encoding",
"=",
"'utf-8'",
")",
"except",
"ValueError",
":",
"continue",
"for",
"visitor",
"in",
"visitors",
":",
"visitor",
".",
"visit",
"(",
"data",
")"
] | Reads data from disk and generates CSV files. | [
"Reads",
"data",
"from",
"disk",
"and",
"generates",
"CSV",
"files",
"."
] | fd62a252c76541c9feac6470b9048b31348ffe86 | https://github.com/vkruoso/receita-tools/blob/fd62a252c76541c9feac6470b9048b31348ffe86/receita/tools/build.py#L144-L175 | train | 277 |
marrow/mongo | marrow/mongo/core/index.py | Index.process_fields | def process_fields(self, fields):
"""Process a list of simple string field definitions and assign their order based on prefix."""
result = []
strip = ''.join(self.PREFIX_MAP)
for field in fields:
direction = self.PREFIX_MAP['']
if field[0] in self.PREFIX_MAP:
direction = self.PREFIX_MAP[field[0]]
field = field.lstrip(strip)
result.append((field, direction))
return result | python | def process_fields(self, fields):
"""Process a list of simple string field definitions and assign their order based on prefix."""
result = []
strip = ''.join(self.PREFIX_MAP)
for field in fields:
direction = self.PREFIX_MAP['']
if field[0] in self.PREFIX_MAP:
direction = self.PREFIX_MAP[field[0]]
field = field.lstrip(strip)
result.append((field, direction))
return result | [
"def",
"process_fields",
"(",
"self",
",",
"fields",
")",
":",
"result",
"=",
"[",
"]",
"strip",
"=",
"''",
".",
"join",
"(",
"self",
".",
"PREFIX_MAP",
")",
"for",
"field",
"in",
"fields",
":",
"direction",
"=",
"self",
".",
"PREFIX_MAP",
"[",
"''",
"]",
"if",
"field",
"[",
"0",
"]",
"in",
"self",
".",
"PREFIX_MAP",
":",
"direction",
"=",
"self",
".",
"PREFIX_MAP",
"[",
"field",
"[",
"0",
"]",
"]",
"field",
"=",
"field",
".",
"lstrip",
"(",
"strip",
")",
"result",
".",
"append",
"(",
"(",
"field",
",",
"direction",
")",
")",
"return",
"result"
] | Process a list of simple string field definitions and assign their order based on prefix. | [
"Process",
"a",
"list",
"of",
"simple",
"string",
"field",
"definitions",
"and",
"assign",
"their",
"order",
"based",
"on",
"prefix",
"."
] | 2066dc73e281b8a46cb5fc965267d6b8e1b18467 | https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/core/index.py#L60-L75 | train | 278 |
svartalf/python-2gis | dgis/__init__.py | API.search_in_rubric | def search_in_rubric(self, **kwargs):
"""Firms search in rubric
http://api.2gis.ru/doc/firms/searches/searchinrubric/
"""
point = kwargs.pop('point', False)
if point:
kwargs['point'] = '%s,%s' % point
bound = kwargs.pop('bound', False)
if bound:
kwargs['bound[point1]'] = bound[0]
kwargs['bound[point2]'] = bound[1]
filters = kwargs.pop('filters', False)
if filters:
for k, v in filters.items():
kwargs['filters[%s]' % k] = v
return self._search_in_rubric(**kwargs) | python | def search_in_rubric(self, **kwargs):
"""Firms search in rubric
http://api.2gis.ru/doc/firms/searches/searchinrubric/
"""
point = kwargs.pop('point', False)
if point:
kwargs['point'] = '%s,%s' % point
bound = kwargs.pop('bound', False)
if bound:
kwargs['bound[point1]'] = bound[0]
kwargs['bound[point2]'] = bound[1]
filters = kwargs.pop('filters', False)
if filters:
for k, v in filters.items():
kwargs['filters[%s]' % k] = v
return self._search_in_rubric(**kwargs) | [
"def",
"search_in_rubric",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"point",
"=",
"kwargs",
".",
"pop",
"(",
"'point'",
",",
"False",
")",
"if",
"point",
":",
"kwargs",
"[",
"'point'",
"]",
"=",
"'%s,%s'",
"%",
"point",
"bound",
"=",
"kwargs",
".",
"pop",
"(",
"'bound'",
",",
"False",
")",
"if",
"bound",
":",
"kwargs",
"[",
"'bound[point1]'",
"]",
"=",
"bound",
"[",
"0",
"]",
"kwargs",
"[",
"'bound[point2]'",
"]",
"=",
"bound",
"[",
"1",
"]",
"filters",
"=",
"kwargs",
".",
"pop",
"(",
"'filters'",
",",
"False",
")",
"if",
"filters",
":",
"for",
"k",
",",
"v",
"in",
"filters",
".",
"items",
"(",
")",
":",
"kwargs",
"[",
"'filters[%s]'",
"%",
"k",
"]",
"=",
"v",
"return",
"self",
".",
"_search_in_rubric",
"(",
"*",
"*",
"kwargs",
")"
] | Firms search in rubric
http://api.2gis.ru/doc/firms/searches/searchinrubric/ | [
"Firms",
"search",
"in",
"rubric"
] | 6eccd6073c99494b7abf20b38a5455cbd55d6420 | https://github.com/svartalf/python-2gis/blob/6eccd6073c99494b7abf20b38a5455cbd55d6420/dgis/__init__.py#L89-L109 | train | 279 |
tonybaloney/retox | retox/ui.py | RetoxRefreshMixin.refresh | def refresh(self):
'''
Refresh the list and the screen
'''
self._screen.force_update()
self._screen.refresh()
self._update(1) | python | def refresh(self):
'''
Refresh the list and the screen
'''
self._screen.force_update()
self._screen.refresh()
self._update(1) | [
"def",
"refresh",
"(",
"self",
")",
":",
"self",
".",
"_screen",
".",
"force_update",
"(",
")",
"self",
".",
"_screen",
".",
"refresh",
"(",
")",
"self",
".",
"_update",
"(",
"1",
")"
] | Refresh the list and the screen | [
"Refresh",
"the",
"list",
"and",
"the",
"screen"
] | 4635e31001d2ac083423f46766249ac8daca7c9c | https://github.com/tonybaloney/retox/blob/4635e31001d2ac083423f46766249ac8daca7c9c/retox/ui.py#L54-L60 | train | 280 |
tonybaloney/retox | retox/ui.py | VirtualEnvironmentFrame.start | def start(self, activity, action):
'''
Mark an action as started
:param activity: The virtualenv activity name
:type activity: ``str``
:param action: The virtualenv action
:type action: :class:`tox.session.Action`
'''
try:
self._start_action(activity, action)
except ValueError:
retox_log.debug("Could not find action %s in env %s" % (activity, self.name))
self.refresh() | python | def start(self, activity, action):
'''
Mark an action as started
:param activity: The virtualenv activity name
:type activity: ``str``
:param action: The virtualenv action
:type action: :class:`tox.session.Action`
'''
try:
self._start_action(activity, action)
except ValueError:
retox_log.debug("Could not find action %s in env %s" % (activity, self.name))
self.refresh() | [
"def",
"start",
"(",
"self",
",",
"activity",
",",
"action",
")",
":",
"try",
":",
"self",
".",
"_start_action",
"(",
"activity",
",",
"action",
")",
"except",
"ValueError",
":",
"retox_log",
".",
"debug",
"(",
"\"Could not find action %s in env %s\"",
"%",
"(",
"activity",
",",
"self",
".",
"name",
")",
")",
"self",
".",
"refresh",
"(",
")"
] | Mark an action as started
:param activity: The virtualenv activity name
:type activity: ``str``
:param action: The virtualenv action
:type action: :class:`tox.session.Action` | [
"Mark",
"an",
"action",
"as",
"started"
] | 4635e31001d2ac083423f46766249ac8daca7c9c | https://github.com/tonybaloney/retox/blob/4635e31001d2ac083423f46766249ac8daca7c9c/retox/ui.py#L233-L247 | train | 281 |
tonybaloney/retox | retox/ui.py | VirtualEnvironmentFrame.stop | def stop(self, activity, action):
'''
Mark a task as completed
:param activity: The virtualenv activity name
:type activity: ``str``
:param action: The virtualenv action
:type action: :class:`tox.session.Action`
'''
try:
self._remove_running_action(activity, action)
except ValueError:
retox_log.debug("Could not find action %s in env %s" % (activity, self.name))
self._mark_action_completed(activity, action)
self.refresh() | python | def stop(self, activity, action):
'''
Mark a task as completed
:param activity: The virtualenv activity name
:type activity: ``str``
:param action: The virtualenv action
:type action: :class:`tox.session.Action`
'''
try:
self._remove_running_action(activity, action)
except ValueError:
retox_log.debug("Could not find action %s in env %s" % (activity, self.name))
self._mark_action_completed(activity, action)
self.refresh() | [
"def",
"stop",
"(",
"self",
",",
"activity",
",",
"action",
")",
":",
"try",
":",
"self",
".",
"_remove_running_action",
"(",
"activity",
",",
"action",
")",
"except",
"ValueError",
":",
"retox_log",
".",
"debug",
"(",
"\"Could not find action %s in env %s\"",
"%",
"(",
"activity",
",",
"self",
".",
"name",
")",
")",
"self",
".",
"_mark_action_completed",
"(",
"activity",
",",
"action",
")",
"self",
".",
"refresh",
"(",
")"
] | Mark a task as completed
:param activity: The virtualenv activity name
:type activity: ``str``
:param action: The virtualenv action
:type action: :class:`tox.session.Action` | [
"Mark",
"a",
"task",
"as",
"completed"
] | 4635e31001d2ac083423f46766249ac8daca7c9c | https://github.com/tonybaloney/retox/blob/4635e31001d2ac083423f46766249ac8daca7c9c/retox/ui.py#L249-L264 | train | 282 |
tonybaloney/retox | retox/ui.py | VirtualEnvironmentFrame.finish | def finish(self, status):
'''
Move laggard tasks over
:param activity: The virtualenv status
:type activity: ``str``
'''
retox_log.info("Completing %s with status %s" % (self.name, status))
result = Screen.COLOUR_GREEN if not status else Screen.COLOUR_RED
self.palette['title'] = (Screen.COLOUR_WHITE, Screen.A_BOLD, result)
for item in list(self._task_view.options):
self._task_view.options.remove(item)
self._completed_view.options.append(item)
self.refresh() | python | def finish(self, status):
'''
Move laggard tasks over
:param activity: The virtualenv status
:type activity: ``str``
'''
retox_log.info("Completing %s with status %s" % (self.name, status))
result = Screen.COLOUR_GREEN if not status else Screen.COLOUR_RED
self.palette['title'] = (Screen.COLOUR_WHITE, Screen.A_BOLD, result)
for item in list(self._task_view.options):
self._task_view.options.remove(item)
self._completed_view.options.append(item)
self.refresh() | [
"def",
"finish",
"(",
"self",
",",
"status",
")",
":",
"retox_log",
".",
"info",
"(",
"\"Completing %s with status %s\"",
"%",
"(",
"self",
".",
"name",
",",
"status",
")",
")",
"result",
"=",
"Screen",
".",
"COLOUR_GREEN",
"if",
"not",
"status",
"else",
"Screen",
".",
"COLOUR_RED",
"self",
".",
"palette",
"[",
"'title'",
"]",
"=",
"(",
"Screen",
".",
"COLOUR_WHITE",
",",
"Screen",
".",
"A_BOLD",
",",
"result",
")",
"for",
"item",
"in",
"list",
"(",
"self",
".",
"_task_view",
".",
"options",
")",
":",
"self",
".",
"_task_view",
".",
"options",
".",
"remove",
"(",
"item",
")",
"self",
".",
"_completed_view",
".",
"options",
".",
"append",
"(",
"item",
")",
"self",
".",
"refresh",
"(",
")"
] | Move laggard tasks over
:param activity: The virtualenv status
:type activity: ``str`` | [
"Move",
"laggard",
"tasks",
"over"
] | 4635e31001d2ac083423f46766249ac8daca7c9c | https://github.com/tonybaloney/retox/blob/4635e31001d2ac083423f46766249ac8daca7c9c/retox/ui.py#L266-L279 | train | 283 |
tonybaloney/retox | retox/ui.py | VirtualEnvironmentFrame.reset | def reset(self):
'''
Reset the frame between jobs
'''
self.palette['title'] = (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_BLUE)
self._completed_view.options = []
self._task_view.options = []
self.refresh() | python | def reset(self):
'''
Reset the frame between jobs
'''
self.palette['title'] = (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_BLUE)
self._completed_view.options = []
self._task_view.options = []
self.refresh() | [
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"palette",
"[",
"'title'",
"]",
"=",
"(",
"Screen",
".",
"COLOUR_WHITE",
",",
"Screen",
".",
"A_BOLD",
",",
"Screen",
".",
"COLOUR_BLUE",
")",
"self",
".",
"_completed_view",
".",
"options",
"=",
"[",
"]",
"self",
".",
"_task_view",
".",
"options",
"=",
"[",
"]",
"self",
".",
"refresh",
"(",
")"
] | Reset the frame between jobs | [
"Reset",
"the",
"frame",
"between",
"jobs"
] | 4635e31001d2ac083423f46766249ac8daca7c9c | https://github.com/tonybaloney/retox/blob/4635e31001d2ac083423f46766249ac8daca7c9c/retox/ui.py#L281-L288 | train | 284 |
ryukinix/decorating | decorating/decorator.py | Decorator.default_arguments | def default_arguments(cls):
"""Returns the available kwargs of the called class"""
func = cls.__init__
args = func.__code__.co_varnames
defaults = func.__defaults__
index = -len(defaults)
return {k: v for k, v in zip(args[index:], defaults)} | python | def default_arguments(cls):
"""Returns the available kwargs of the called class"""
func = cls.__init__
args = func.__code__.co_varnames
defaults = func.__defaults__
index = -len(defaults)
return {k: v for k, v in zip(args[index:], defaults)} | [
"def",
"default_arguments",
"(",
"cls",
")",
":",
"func",
"=",
"cls",
".",
"__init__",
"args",
"=",
"func",
".",
"__code__",
".",
"co_varnames",
"defaults",
"=",
"func",
".",
"__defaults__",
"index",
"=",
"-",
"len",
"(",
"defaults",
")",
"return",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"zip",
"(",
"args",
"[",
"index",
":",
"]",
",",
"defaults",
")",
"}"
] | Returns the available kwargs of the called class | [
"Returns",
"the",
"available",
"kwargs",
"of",
"the",
"called",
"class"
] | df78c3f87800205701704c0bc0fb9b6bb908ba7e | https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/decorator.py#L134-L140 | train | 285 |
ryukinix/decorating | decorating/decorator.py | Decorator.recreate | def recreate(cls, *args, **kwargs):
"""Recreate the class based in your args, multiple uses"""
cls.check_arguments(kwargs)
first_is_callable = True if any(args) and callable(args[0]) else False
signature = cls.default_arguments()
allowed_arguments = {k: v for k, v in kwargs.items() if k in signature}
if (any(allowed_arguments) or any(args)) and not first_is_callable:
if any(args) and not first_is_callable:
return cls(args[0], **allowed_arguments)
elif any(allowed_arguments):
return cls(**allowed_arguments)
return cls.instances[-1] if any(cls.instances) else cls() | python | def recreate(cls, *args, **kwargs):
"""Recreate the class based in your args, multiple uses"""
cls.check_arguments(kwargs)
first_is_callable = True if any(args) and callable(args[0]) else False
signature = cls.default_arguments()
allowed_arguments = {k: v for k, v in kwargs.items() if k in signature}
if (any(allowed_arguments) or any(args)) and not first_is_callable:
if any(args) and not first_is_callable:
return cls(args[0], **allowed_arguments)
elif any(allowed_arguments):
return cls(**allowed_arguments)
return cls.instances[-1] if any(cls.instances) else cls() | [
"def",
"recreate",
"(",
"cls",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"cls",
".",
"check_arguments",
"(",
"kwargs",
")",
"first_is_callable",
"=",
"True",
"if",
"any",
"(",
"args",
")",
"and",
"callable",
"(",
"args",
"[",
"0",
"]",
")",
"else",
"False",
"signature",
"=",
"cls",
".",
"default_arguments",
"(",
")",
"allowed_arguments",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
"if",
"k",
"in",
"signature",
"}",
"if",
"(",
"any",
"(",
"allowed_arguments",
")",
"or",
"any",
"(",
"args",
")",
")",
"and",
"not",
"first_is_callable",
":",
"if",
"any",
"(",
"args",
")",
"and",
"not",
"first_is_callable",
":",
"return",
"cls",
"(",
"args",
"[",
"0",
"]",
",",
"*",
"*",
"allowed_arguments",
")",
"elif",
"any",
"(",
"allowed_arguments",
")",
":",
"return",
"cls",
"(",
"*",
"*",
"allowed_arguments",
")",
"return",
"cls",
".",
"instances",
"[",
"-",
"1",
"]",
"if",
"any",
"(",
"cls",
".",
"instances",
")",
"else",
"cls",
"(",
")"
] | Recreate the class based in your args, multiple uses | [
"Recreate",
"the",
"class",
"based",
"in",
"your",
"args",
"multiple",
"uses"
] | df78c3f87800205701704c0bc0fb9b6bb908ba7e | https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/decorator.py#L143-L155 | train | 286 |
ryukinix/decorating | decorating/decorator.py | Decorator.check_arguments | def check_arguments(cls, passed):
"""Put warnings of arguments whose can't be handle by the class"""
defaults = list(cls.default_arguments().keys())
template = ("Pass arg {argument:!r} in {cname:!r}, can be a typo? "
"Supported key arguments: {defaults}")
fails = []
for arg in passed:
if arg not in defaults:
warn(template.format(argument=arg,
cname=cls.__name__,
defaults=defaults))
fails.append(arg)
return any(fails) | python | def check_arguments(cls, passed):
"""Put warnings of arguments whose can't be handle by the class"""
defaults = list(cls.default_arguments().keys())
template = ("Pass arg {argument:!r} in {cname:!r}, can be a typo? "
"Supported key arguments: {defaults}")
fails = []
for arg in passed:
if arg not in defaults:
warn(template.format(argument=arg,
cname=cls.__name__,
defaults=defaults))
fails.append(arg)
return any(fails) | [
"def",
"check_arguments",
"(",
"cls",
",",
"passed",
")",
":",
"defaults",
"=",
"list",
"(",
"cls",
".",
"default_arguments",
"(",
")",
".",
"keys",
"(",
")",
")",
"template",
"=",
"(",
"\"Pass arg {argument:!r} in {cname:!r}, can be a typo? \"",
"\"Supported key arguments: {defaults}\"",
")",
"fails",
"=",
"[",
"]",
"for",
"arg",
"in",
"passed",
":",
"if",
"arg",
"not",
"in",
"defaults",
":",
"warn",
"(",
"template",
".",
"format",
"(",
"argument",
"=",
"arg",
",",
"cname",
"=",
"cls",
".",
"__name__",
",",
"defaults",
"=",
"defaults",
")",
")",
"fails",
".",
"append",
"(",
"arg",
")",
"return",
"any",
"(",
"fails",
")"
] | Put warnings of arguments whose can't be handle by the class | [
"Put",
"warnings",
"of",
"arguments",
"whose",
"can",
"t",
"be",
"handle",
"by",
"the",
"class"
] | df78c3f87800205701704c0bc0fb9b6bb908ba7e | https://github.com/ryukinix/decorating/blob/df78c3f87800205701704c0bc0fb9b6bb908ba7e/decorating/decorator.py#L158-L171 | train | 287 |
ovnicraft/suds2 | suds/builder.py | Builder.process | def process(self, data, type, history):
""" process the specified type then process its children """
if type in history:
return
if type.enum():
return
history.append(type)
resolved = type.resolve()
value = None
if type.multi_occurrence():
value = []
else:
if len(resolved) > 0:
if resolved.mixed():
value = Factory.property(resolved.name)
md = value.__metadata__
md.sxtype = resolved
else:
value = Factory.object(resolved.name)
md = value.__metadata__
md.sxtype = resolved
md.ordering = self.ordering(resolved)
setattr(data, type.name, value)
if value is not None:
data = value
if not isinstance(data, list):
self.add_attributes(data, resolved)
for child, ancestry in resolved.children():
if self.skip_child(child, ancestry):
continue
self.process(data, child, history[:]) | python | def process(self, data, type, history):
""" process the specified type then process its children """
if type in history:
return
if type.enum():
return
history.append(type)
resolved = type.resolve()
value = None
if type.multi_occurrence():
value = []
else:
if len(resolved) > 0:
if resolved.mixed():
value = Factory.property(resolved.name)
md = value.__metadata__
md.sxtype = resolved
else:
value = Factory.object(resolved.name)
md = value.__metadata__
md.sxtype = resolved
md.ordering = self.ordering(resolved)
setattr(data, type.name, value)
if value is not None:
data = value
if not isinstance(data, list):
self.add_attributes(data, resolved)
for child, ancestry in resolved.children():
if self.skip_child(child, ancestry):
continue
self.process(data, child, history[:]) | [
"def",
"process",
"(",
"self",
",",
"data",
",",
"type",
",",
"history",
")",
":",
"if",
"type",
"in",
"history",
":",
"return",
"if",
"type",
".",
"enum",
"(",
")",
":",
"return",
"history",
".",
"append",
"(",
"type",
")",
"resolved",
"=",
"type",
".",
"resolve",
"(",
")",
"value",
"=",
"None",
"if",
"type",
".",
"multi_occurrence",
"(",
")",
":",
"value",
"=",
"[",
"]",
"else",
":",
"if",
"len",
"(",
"resolved",
")",
">",
"0",
":",
"if",
"resolved",
".",
"mixed",
"(",
")",
":",
"value",
"=",
"Factory",
".",
"property",
"(",
"resolved",
".",
"name",
")",
"md",
"=",
"value",
".",
"__metadata__",
"md",
".",
"sxtype",
"=",
"resolved",
"else",
":",
"value",
"=",
"Factory",
".",
"object",
"(",
"resolved",
".",
"name",
")",
"md",
"=",
"value",
".",
"__metadata__",
"md",
".",
"sxtype",
"=",
"resolved",
"md",
".",
"ordering",
"=",
"self",
".",
"ordering",
"(",
"resolved",
")",
"setattr",
"(",
"data",
",",
"type",
".",
"name",
",",
"value",
")",
"if",
"value",
"is",
"not",
"None",
":",
"data",
"=",
"value",
"if",
"not",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"self",
".",
"add_attributes",
"(",
"data",
",",
"resolved",
")",
"for",
"child",
",",
"ancestry",
"in",
"resolved",
".",
"children",
"(",
")",
":",
"if",
"self",
".",
"skip_child",
"(",
"child",
",",
"ancestry",
")",
":",
"continue",
"self",
".",
"process",
"(",
"data",
",",
"child",
",",
"history",
"[",
":",
"]",
")"
] | process the specified type then process its children | [
"process",
"the",
"specified",
"type",
"then",
"process",
"its",
"children"
] | e5b540792206a41efc22f5d5b9cfac2dbe7a7992 | https://github.com/ovnicraft/suds2/blob/e5b540792206a41efc22f5d5b9cfac2dbe7a7992/suds/builder.py#L60-L90 | train | 288 |
ovnicraft/suds2 | suds/builder.py | Builder.skip_child | def skip_child(self, child, ancestry):
""" get whether or not to skip the specified child """
if child.any(): return True
for x in ancestry:
if x.choice():
return True
return False | python | def skip_child(self, child, ancestry):
""" get whether or not to skip the specified child """
if child.any(): return True
for x in ancestry:
if x.choice():
return True
return False | [
"def",
"skip_child",
"(",
"self",
",",
"child",
",",
"ancestry",
")",
":",
"if",
"child",
".",
"any",
"(",
")",
":",
"return",
"True",
"for",
"x",
"in",
"ancestry",
":",
"if",
"x",
".",
"choice",
"(",
")",
":",
"return",
"True",
"return",
"False"
] | get whether or not to skip the specified child | [
"get",
"whether",
"or",
"not",
"to",
"skip",
"the",
"specified",
"child"
] | e5b540792206a41efc22f5d5b9cfac2dbe7a7992 | https://github.com/ovnicraft/suds2/blob/e5b540792206a41efc22f5d5b9cfac2dbe7a7992/suds/builder.py#L99-L105 | train | 289 |
nephila/django-knocker | knocker/signals.py | active_knocks | def active_knocks(obj):
"""
Checks whether knocks are enabled for the model given as argument
:param obj: model instance
:return True if knocks are active
"""
if not hasattr(_thread_locals, 'knock_enabled'):
return True
return _thread_locals.knock_enabled.get(obj.__class__, True) | python | def active_knocks(obj):
"""
Checks whether knocks are enabled for the model given as argument
:param obj: model instance
:return True if knocks are active
"""
if not hasattr(_thread_locals, 'knock_enabled'):
return True
return _thread_locals.knock_enabled.get(obj.__class__, True) | [
"def",
"active_knocks",
"(",
"obj",
")",
":",
"if",
"not",
"hasattr",
"(",
"_thread_locals",
",",
"'knock_enabled'",
")",
":",
"return",
"True",
"return",
"_thread_locals",
".",
"knock_enabled",
".",
"get",
"(",
"obj",
".",
"__class__",
",",
"True",
")"
] | Checks whether knocks are enabled for the model given as argument
:param obj: model instance
:return True if knocks are active | [
"Checks",
"whether",
"knocks",
"are",
"enabled",
"for",
"the",
"model",
"given",
"as",
"argument"
] | d25380d43a1f91285f1581dcf9db8510fe87f354 | https://github.com/nephila/django-knocker/blob/d25380d43a1f91285f1581dcf9db8510fe87f354/knocker/signals.py#L34-L43 | train | 290 |
nephila/django-knocker | knocker/signals.py | pause_knocks | def pause_knocks(obj):
"""
Context manager to suspend sending knocks for the given model
:param obj: model instance
"""
if not hasattr(_thread_locals, 'knock_enabled'):
_thread_locals.knock_enabled = {}
obj.__class__._disconnect()
_thread_locals.knock_enabled[obj.__class__] = False
yield
_thread_locals.knock_enabled[obj.__class__] = True
obj.__class__._connect() | python | def pause_knocks(obj):
"""
Context manager to suspend sending knocks for the given model
:param obj: model instance
"""
if not hasattr(_thread_locals, 'knock_enabled'):
_thread_locals.knock_enabled = {}
obj.__class__._disconnect()
_thread_locals.knock_enabled[obj.__class__] = False
yield
_thread_locals.knock_enabled[obj.__class__] = True
obj.__class__._connect() | [
"def",
"pause_knocks",
"(",
"obj",
")",
":",
"if",
"not",
"hasattr",
"(",
"_thread_locals",
",",
"'knock_enabled'",
")",
":",
"_thread_locals",
".",
"knock_enabled",
"=",
"{",
"}",
"obj",
".",
"__class__",
".",
"_disconnect",
"(",
")",
"_thread_locals",
".",
"knock_enabled",
"[",
"obj",
".",
"__class__",
"]",
"=",
"False",
"yield",
"_thread_locals",
".",
"knock_enabled",
"[",
"obj",
".",
"__class__",
"]",
"=",
"True",
"obj",
".",
"__class__",
".",
"_connect",
"(",
")"
] | Context manager to suspend sending knocks for the given model
:param obj: model instance | [
"Context",
"manager",
"to",
"suspend",
"sending",
"knocks",
"for",
"the",
"given",
"model"
] | d25380d43a1f91285f1581dcf9db8510fe87f354 | https://github.com/nephila/django-knocker/blob/d25380d43a1f91285f1581dcf9db8510fe87f354/knocker/signals.py#L47-L59 | train | 291 |
tonybaloney/retox | retox/reporter.py | RetoxReporter._loopreport | def _loopreport(self):
'''
Loop over the report progress
'''
while 1:
eventlet.sleep(0.2)
ac2popenlist = {}
for action in self.session._actions:
for popen in action._popenlist:
if popen.poll() is None:
lst = ac2popenlist.setdefault(action.activity, [])
lst.append(popen)
if not action._popenlist and action in self._actionmayfinish:
super(RetoxReporter, self).logaction_finish(action)
self._actionmayfinish.remove(action)
self.screen.draw_next_frame(repeat=False) | python | def _loopreport(self):
'''
Loop over the report progress
'''
while 1:
eventlet.sleep(0.2)
ac2popenlist = {}
for action in self.session._actions:
for popen in action._popenlist:
if popen.poll() is None:
lst = ac2popenlist.setdefault(action.activity, [])
lst.append(popen)
if not action._popenlist and action in self._actionmayfinish:
super(RetoxReporter, self).logaction_finish(action)
self._actionmayfinish.remove(action)
self.screen.draw_next_frame(repeat=False) | [
"def",
"_loopreport",
"(",
"self",
")",
":",
"while",
"1",
":",
"eventlet",
".",
"sleep",
"(",
"0.2",
")",
"ac2popenlist",
"=",
"{",
"}",
"for",
"action",
"in",
"self",
".",
"session",
".",
"_actions",
":",
"for",
"popen",
"in",
"action",
".",
"_popenlist",
":",
"if",
"popen",
".",
"poll",
"(",
")",
"is",
"None",
":",
"lst",
"=",
"ac2popenlist",
".",
"setdefault",
"(",
"action",
".",
"activity",
",",
"[",
"]",
")",
"lst",
".",
"append",
"(",
"popen",
")",
"if",
"not",
"action",
".",
"_popenlist",
"and",
"action",
"in",
"self",
".",
"_actionmayfinish",
":",
"super",
"(",
"RetoxReporter",
",",
"self",
")",
".",
"logaction_finish",
"(",
"action",
")",
"self",
".",
"_actionmayfinish",
".",
"remove",
"(",
"action",
")",
"self",
".",
"screen",
".",
"draw_next_frame",
"(",
"repeat",
"=",
"False",
")"
] | Loop over the report progress | [
"Loop",
"over",
"the",
"report",
"progress"
] | 4635e31001d2ac083423f46766249ac8daca7c9c | https://github.com/tonybaloney/retox/blob/4635e31001d2ac083423f46766249ac8daca7c9c/retox/reporter.py#L49-L65 | train | 292 |
yejianye/mdmail | mdmail/api.py | send | def send(email, subject=None,
from_email=None, to_email=None,
cc=None, bcc=None, reply_to=None,
smtp=None):
"""Send markdown email
Args:
email (str/obj): A markdown string or EmailContent object
subject (str): subject line
from_email (str): sender email address
to_email (str/list): recipient email addresses
cc (str/list): CC email addresses (string or a list)
bcc (str/list): BCC email addresses (string or a list)
reply_to (str): Reply-to email address
smtp (dict): SMTP configuration (dict)
Schema of smtp dict:
host (str): SMTP server host. Default: localhost
port (int): SMTP server port. Default: 25
tls (bool): Use TLS. Default: False
ssl (bool): Use SSL. Default: False
user (bool): SMTP login user. Default empty
password (bool): SMTP login password. Default empty
"""
if is_string(email):
email = EmailContent(email)
from_email = sanitize_email_address(from_email or email.headers.get('from'))
to_email = sanitize_email_address(to_email or email.headers.get('to'))
cc = sanitize_email_address(cc or email.headers.get('cc'))
bcc = sanitize_email_address(bcc or email.headers.get('bcc'))
reply_to = sanitize_email_address(reply_to or email.headers.get('reply-to'))
message_args = {
'html': email.html,
'text': email.text,
'subject': (subject or email.headers.get('subject', '')),
'mail_from': from_email,
'mail_to': to_email
}
if cc:
message_args['cc'] = cc
if bcc:
message_args['bcc'] = bcc
if reply_to:
message_args['headers'] = {'reply-to': reply_to}
message = emails.Message(**message_args)
for filename, data in email.inline_images:
message.attach(filename=filename, content_disposition='inline', data=data)
message.send(smtp=smtp) | python | def send(email, subject=None,
from_email=None, to_email=None,
cc=None, bcc=None, reply_to=None,
smtp=None):
"""Send markdown email
Args:
email (str/obj): A markdown string or EmailContent object
subject (str): subject line
from_email (str): sender email address
to_email (str/list): recipient email addresses
cc (str/list): CC email addresses (string or a list)
bcc (str/list): BCC email addresses (string or a list)
reply_to (str): Reply-to email address
smtp (dict): SMTP configuration (dict)
Schema of smtp dict:
host (str): SMTP server host. Default: localhost
port (int): SMTP server port. Default: 25
tls (bool): Use TLS. Default: False
ssl (bool): Use SSL. Default: False
user (bool): SMTP login user. Default empty
password (bool): SMTP login password. Default empty
"""
if is_string(email):
email = EmailContent(email)
from_email = sanitize_email_address(from_email or email.headers.get('from'))
to_email = sanitize_email_address(to_email or email.headers.get('to'))
cc = sanitize_email_address(cc or email.headers.get('cc'))
bcc = sanitize_email_address(bcc or email.headers.get('bcc'))
reply_to = sanitize_email_address(reply_to or email.headers.get('reply-to'))
message_args = {
'html': email.html,
'text': email.text,
'subject': (subject or email.headers.get('subject', '')),
'mail_from': from_email,
'mail_to': to_email
}
if cc:
message_args['cc'] = cc
if bcc:
message_args['bcc'] = bcc
if reply_to:
message_args['headers'] = {'reply-to': reply_to}
message = emails.Message(**message_args)
for filename, data in email.inline_images:
message.attach(filename=filename, content_disposition='inline', data=data)
message.send(smtp=smtp) | [
"def",
"send",
"(",
"email",
",",
"subject",
"=",
"None",
",",
"from_email",
"=",
"None",
",",
"to_email",
"=",
"None",
",",
"cc",
"=",
"None",
",",
"bcc",
"=",
"None",
",",
"reply_to",
"=",
"None",
",",
"smtp",
"=",
"None",
")",
":",
"if",
"is_string",
"(",
"email",
")",
":",
"email",
"=",
"EmailContent",
"(",
"email",
")",
"from_email",
"=",
"sanitize_email_address",
"(",
"from_email",
"or",
"email",
".",
"headers",
".",
"get",
"(",
"'from'",
")",
")",
"to_email",
"=",
"sanitize_email_address",
"(",
"to_email",
"or",
"email",
".",
"headers",
".",
"get",
"(",
"'to'",
")",
")",
"cc",
"=",
"sanitize_email_address",
"(",
"cc",
"or",
"email",
".",
"headers",
".",
"get",
"(",
"'cc'",
")",
")",
"bcc",
"=",
"sanitize_email_address",
"(",
"bcc",
"or",
"email",
".",
"headers",
".",
"get",
"(",
"'bcc'",
")",
")",
"reply_to",
"=",
"sanitize_email_address",
"(",
"reply_to",
"or",
"email",
".",
"headers",
".",
"get",
"(",
"'reply-to'",
")",
")",
"message_args",
"=",
"{",
"'html'",
":",
"email",
".",
"html",
",",
"'text'",
":",
"email",
".",
"text",
",",
"'subject'",
":",
"(",
"subject",
"or",
"email",
".",
"headers",
".",
"get",
"(",
"'subject'",
",",
"''",
")",
")",
",",
"'mail_from'",
":",
"from_email",
",",
"'mail_to'",
":",
"to_email",
"}",
"if",
"cc",
":",
"message_args",
"[",
"'cc'",
"]",
"=",
"cc",
"if",
"bcc",
":",
"message_args",
"[",
"'bcc'",
"]",
"=",
"bcc",
"if",
"reply_to",
":",
"message_args",
"[",
"'headers'",
"]",
"=",
"{",
"'reply-to'",
":",
"reply_to",
"}",
"message",
"=",
"emails",
".",
"Message",
"(",
"*",
"*",
"message_args",
")",
"for",
"filename",
",",
"data",
"in",
"email",
".",
"inline_images",
":",
"message",
".",
"attach",
"(",
"filename",
"=",
"filename",
",",
"content_disposition",
"=",
"'inline'",
",",
"data",
"=",
"data",
")",
"message",
".",
"send",
"(",
"smtp",
"=",
"smtp",
")"
] | Send markdown email
Args:
email (str/obj): A markdown string or EmailContent object
subject (str): subject line
from_email (str): sender email address
to_email (str/list): recipient email addresses
cc (str/list): CC email addresses (string or a list)
bcc (str/list): BCC email addresses (string or a list)
reply_to (str): Reply-to email address
smtp (dict): SMTP configuration (dict)
Schema of smtp dict:
host (str): SMTP server host. Default: localhost
port (int): SMTP server port. Default: 25
tls (bool): Use TLS. Default: False
ssl (bool): Use SSL. Default: False
user (bool): SMTP login user. Default empty
password (bool): SMTP login password. Default empty | [
"Send",
"markdown",
"email"
] | ef03da8d5836b5ae0a4ad8c44f2fe4936a896644 | https://github.com/yejianye/mdmail/blob/ef03da8d5836b5ae0a4ad8c44f2fe4936a896644/mdmail/api.py#L11-L63 | train | 293 |
marrow/mongo | marrow/mongo/core/field/date.py | Date._process_tz | def _process_tz(self, dt, naive, tz):
"""Process timezone casting and conversion."""
def _tz(t):
if t in (None, 'naive'):
return t
if t == 'local':
if __debug__ and not localtz:
raise ValueError("Requested conversion to local timezone, but `localtz` not installed.")
t = localtz
if not isinstance(t, tzinfo):
if __debug__ and not localtz:
raise ValueError("The `pytz` package must be installed to look up timezone: " + repr(t))
t = get_tz(t)
if not hasattr(t, 'normalize') and get_tz: # Attempt to handle non-pytz tzinfo.
t = get_tz(t.tzname(dt))
return t
naive = _tz(naive)
tz = _tz(tz)
if not dt.tzinfo and naive:
if hasattr(naive, 'localize'):
dt = naive.localize(dt)
else:
dt = dt.replace(tzinfo=naive)
if not tz:
return dt
if hasattr(tz, 'normalize'):
dt = tz.normalize(dt.astimezone(tz))
elif tz == 'naive':
dt = dt.replace(tzinfo=None)
else:
dt = dt.astimezone(tz) # Warning: this might not always be entirely correct!
return dt | python | def _process_tz(self, dt, naive, tz):
"""Process timezone casting and conversion."""
def _tz(t):
if t in (None, 'naive'):
return t
if t == 'local':
if __debug__ and not localtz:
raise ValueError("Requested conversion to local timezone, but `localtz` not installed.")
t = localtz
if not isinstance(t, tzinfo):
if __debug__ and not localtz:
raise ValueError("The `pytz` package must be installed to look up timezone: " + repr(t))
t = get_tz(t)
if not hasattr(t, 'normalize') and get_tz: # Attempt to handle non-pytz tzinfo.
t = get_tz(t.tzname(dt))
return t
naive = _tz(naive)
tz = _tz(tz)
if not dt.tzinfo and naive:
if hasattr(naive, 'localize'):
dt = naive.localize(dt)
else:
dt = dt.replace(tzinfo=naive)
if not tz:
return dt
if hasattr(tz, 'normalize'):
dt = tz.normalize(dt.astimezone(tz))
elif tz == 'naive':
dt = dt.replace(tzinfo=None)
else:
dt = dt.astimezone(tz) # Warning: this might not always be entirely correct!
return dt | [
"def",
"_process_tz",
"(",
"self",
",",
"dt",
",",
"naive",
",",
"tz",
")",
":",
"def",
"_tz",
"(",
"t",
")",
":",
"if",
"t",
"in",
"(",
"None",
",",
"'naive'",
")",
":",
"return",
"t",
"if",
"t",
"==",
"'local'",
":",
"if",
"__debug__",
"and",
"not",
"localtz",
":",
"raise",
"ValueError",
"(",
"\"Requested conversion to local timezone, but `localtz` not installed.\"",
")",
"t",
"=",
"localtz",
"if",
"not",
"isinstance",
"(",
"t",
",",
"tzinfo",
")",
":",
"if",
"__debug__",
"and",
"not",
"localtz",
":",
"raise",
"ValueError",
"(",
"\"The `pytz` package must be installed to look up timezone: \"",
"+",
"repr",
"(",
"t",
")",
")",
"t",
"=",
"get_tz",
"(",
"t",
")",
"if",
"not",
"hasattr",
"(",
"t",
",",
"'normalize'",
")",
"and",
"get_tz",
":",
"# Attempt to handle non-pytz tzinfo.",
"t",
"=",
"get_tz",
"(",
"t",
".",
"tzname",
"(",
"dt",
")",
")",
"return",
"t",
"naive",
"=",
"_tz",
"(",
"naive",
")",
"tz",
"=",
"_tz",
"(",
"tz",
")",
"if",
"not",
"dt",
".",
"tzinfo",
"and",
"naive",
":",
"if",
"hasattr",
"(",
"naive",
",",
"'localize'",
")",
":",
"dt",
"=",
"naive",
".",
"localize",
"(",
"dt",
")",
"else",
":",
"dt",
"=",
"dt",
".",
"replace",
"(",
"tzinfo",
"=",
"naive",
")",
"if",
"not",
"tz",
":",
"return",
"dt",
"if",
"hasattr",
"(",
"tz",
",",
"'normalize'",
")",
":",
"dt",
"=",
"tz",
".",
"normalize",
"(",
"dt",
".",
"astimezone",
"(",
"tz",
")",
")",
"elif",
"tz",
"==",
"'naive'",
":",
"dt",
"=",
"dt",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")",
"else",
":",
"dt",
"=",
"dt",
".",
"astimezone",
"(",
"tz",
")",
"# Warning: this might not always be entirely correct!",
"return",
"dt"
] | Process timezone casting and conversion. | [
"Process",
"timezone",
"casting",
"and",
"conversion",
"."
] | 2066dc73e281b8a46cb5fc965267d6b8e1b18467 | https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/core/field/date.py#L59-L102 | train | 294 |
marrow/mongo | marrow/mongo/core/document.py | Document._prepare_defaults | def _prepare_defaults(self):
"""Trigger assignment of default values."""
for name, field in self.__fields__.items():
if field.assign:
getattr(self, name) | python | def _prepare_defaults(self):
"""Trigger assignment of default values."""
for name, field in self.__fields__.items():
if field.assign:
getattr(self, name) | [
"def",
"_prepare_defaults",
"(",
"self",
")",
":",
"for",
"name",
",",
"field",
"in",
"self",
".",
"__fields__",
".",
"items",
"(",
")",
":",
"if",
"field",
".",
"assign",
":",
"getattr",
"(",
"self",
",",
"name",
")"
] | Trigger assignment of default values. | [
"Trigger",
"assignment",
"of",
"default",
"values",
"."
] | 2066dc73e281b8a46cb5fc965267d6b8e1b18467 | https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/core/document.py#L71-L76 | train | 295 |
marrow/mongo | marrow/mongo/core/document.py | Document.from_mongo | def from_mongo(cls, doc):
"""Convert data coming in from the MongoDB wire driver into a Document instance."""
if doc is None: # To support simplified iterative use, None should return None.
return None
if isinstance(doc, Document): # No need to perform processing on existing Document instances.
return doc
if cls.__type_store__ and cls.__type_store__ in doc: # Instantiate specific class mentioned in the data.
cls = load(doc[cls.__type_store__], 'marrow.mongo.document')
# Prepare a new instance in such a way that changes to the instance will be reflected in the originating doc.
instance = cls(_prepare_defaults=False) # Construct an instance, but delay default value processing.
instance.__data__ = doc # I am Popeye of Borg (pattern); you will be askimilgrated.
instance._prepare_defaults() # pylint:disable=protected-access -- deferred default value processing.
return instance | python | def from_mongo(cls, doc):
"""Convert data coming in from the MongoDB wire driver into a Document instance."""
if doc is None: # To support simplified iterative use, None should return None.
return None
if isinstance(doc, Document): # No need to perform processing on existing Document instances.
return doc
if cls.__type_store__ and cls.__type_store__ in doc: # Instantiate specific class mentioned in the data.
cls = load(doc[cls.__type_store__], 'marrow.mongo.document')
# Prepare a new instance in such a way that changes to the instance will be reflected in the originating doc.
instance = cls(_prepare_defaults=False) # Construct an instance, but delay default value processing.
instance.__data__ = doc # I am Popeye of Borg (pattern); you will be askimilgrated.
instance._prepare_defaults() # pylint:disable=protected-access -- deferred default value processing.
return instance | [
"def",
"from_mongo",
"(",
"cls",
",",
"doc",
")",
":",
"if",
"doc",
"is",
"None",
":",
"# To support simplified iterative use, None should return None.",
"return",
"None",
"if",
"isinstance",
"(",
"doc",
",",
"Document",
")",
":",
"# No need to perform processing on existing Document instances.",
"return",
"doc",
"if",
"cls",
".",
"__type_store__",
"and",
"cls",
".",
"__type_store__",
"in",
"doc",
":",
"# Instantiate specific class mentioned in the data.",
"cls",
"=",
"load",
"(",
"doc",
"[",
"cls",
".",
"__type_store__",
"]",
",",
"'marrow.mongo.document'",
")",
"# Prepare a new instance in such a way that changes to the instance will be reflected in the originating doc.",
"instance",
"=",
"cls",
"(",
"_prepare_defaults",
"=",
"False",
")",
"# Construct an instance, but delay default value processing.",
"instance",
".",
"__data__",
"=",
"doc",
"# I am Popeye of Borg (pattern); you will be askimilgrated.",
"instance",
".",
"_prepare_defaults",
"(",
")",
"# pylint:disable=protected-access -- deferred default value processing.",
"return",
"instance"
] | Convert data coming in from the MongoDB wire driver into a Document instance. | [
"Convert",
"data",
"coming",
"in",
"from",
"the",
"MongoDB",
"wire",
"driver",
"into",
"a",
"Document",
"instance",
"."
] | 2066dc73e281b8a46cb5fc965267d6b8e1b18467 | https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/core/document.py#L81-L98 | train | 296 |
marrow/mongo | marrow/mongo/core/document.py | Document.pop | def pop(self, name, default=SENTINEL):
"""Retrieve and remove a value from the backing store, optionally with a default."""
if default is SENTINEL:
return self.__data__.pop(name)
return self.__data__.pop(name, default) | python | def pop(self, name, default=SENTINEL):
"""Retrieve and remove a value from the backing store, optionally with a default."""
if default is SENTINEL:
return self.__data__.pop(name)
return self.__data__.pop(name, default) | [
"def",
"pop",
"(",
"self",
",",
"name",
",",
"default",
"=",
"SENTINEL",
")",
":",
"if",
"default",
"is",
"SENTINEL",
":",
"return",
"self",
".",
"__data__",
".",
"pop",
"(",
"name",
")",
"return",
"self",
".",
"__data__",
".",
"pop",
"(",
"name",
",",
"default",
")"
] | Retrieve and remove a value from the backing store, optionally with a default. | [
"Retrieve",
"and",
"remove",
"a",
"value",
"from",
"the",
"backing",
"store",
"optionally",
"with",
"a",
"default",
"."
] | 2066dc73e281b8a46cb5fc965267d6b8e1b18467 | https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/core/document.py#L246-L252 | train | 297 |
marrow/mongo | marrow/mongo/query/query.py | Q._op | def _op(self, operation, other, *allowed):
"""A basic operation operating on a single value."""
f = self._field
if self._combining: # We are a field-compound query fragment, e.g. (Foo.bar & Foo.baz).
return reduce(self._combining,
(q._op(operation, other, *allowed) for q in f)) # pylint:disable=protected-access
# Optimize this away in production; diagnosic aide.
if __debug__ and _complex_safety_check(f, {operation} | set(allowed)): # pragma: no cover
raise NotImplementedError("{self!r} does not allow {op} comparison.".format(self=self, op=operation))
if other is not None:
other = f.transformer.foreign(other, (f, self._document))
return Filter({self._name: {operation: other}}) | python | def _op(self, operation, other, *allowed):
"""A basic operation operating on a single value."""
f = self._field
if self._combining: # We are a field-compound query fragment, e.g. (Foo.bar & Foo.baz).
return reduce(self._combining,
(q._op(operation, other, *allowed) for q in f)) # pylint:disable=protected-access
# Optimize this away in production; diagnosic aide.
if __debug__ and _complex_safety_check(f, {operation} | set(allowed)): # pragma: no cover
raise NotImplementedError("{self!r} does not allow {op} comparison.".format(self=self, op=operation))
if other is not None:
other = f.transformer.foreign(other, (f, self._document))
return Filter({self._name: {operation: other}}) | [
"def",
"_op",
"(",
"self",
",",
"operation",
",",
"other",
",",
"*",
"allowed",
")",
":",
"f",
"=",
"self",
".",
"_field",
"if",
"self",
".",
"_combining",
":",
"# We are a field-compound query fragment, e.g. (Foo.bar & Foo.baz).",
"return",
"reduce",
"(",
"self",
".",
"_combining",
",",
"(",
"q",
".",
"_op",
"(",
"operation",
",",
"other",
",",
"*",
"allowed",
")",
"for",
"q",
"in",
"f",
")",
")",
"# pylint:disable=protected-access",
"# Optimize this away in production; diagnosic aide.",
"if",
"__debug__",
"and",
"_complex_safety_check",
"(",
"f",
",",
"{",
"operation",
"}",
"|",
"set",
"(",
"allowed",
")",
")",
":",
"# pragma: no cover",
"raise",
"NotImplementedError",
"(",
"\"{self!r} does not allow {op} comparison.\"",
".",
"format",
"(",
"self",
"=",
"self",
",",
"op",
"=",
"operation",
")",
")",
"if",
"other",
"is",
"not",
"None",
":",
"other",
"=",
"f",
".",
"transformer",
".",
"foreign",
"(",
"other",
",",
"(",
"f",
",",
"self",
".",
"_document",
")",
")",
"return",
"Filter",
"(",
"{",
"self",
".",
"_name",
":",
"{",
"operation",
":",
"other",
"}",
"}",
")"
] | A basic operation operating on a single value. | [
"A",
"basic",
"operation",
"operating",
"on",
"a",
"single",
"value",
"."
] | 2066dc73e281b8a46cb5fc965267d6b8e1b18467 | https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/query/query.py#L154-L170 | train | 298 |
marrow/mongo | marrow/mongo/query/query.py | Q._iop | def _iop(self, operation, other, *allowed):
"""An iterative operation operating on multiple values.
Consumes iterators to construct a concrete list at time of execution.
"""
f = self._field
if self._combining: # We are a field-compound query fragment, e.g. (Foo.bar & Foo.baz).
return reduce(self._combining,
(q._iop(operation, other, *allowed) for q in f)) # pylint:disable=protected-access
# Optimize this away in production; diagnosic aide.
if __debug__ and _complex_safety_check(f, {operation} | set(allowed)): # pragma: no cover
raise NotImplementedError("{self!r} does not allow {op} comparison.".format(
self=self, op=operation))
def _t(o):
for value in o:
yield None if value is None else f.transformer.foreign(value, (f, self._document))
other = other if len(other) > 1 else other[0]
values = list(_t(other))
return Filter({self._name: {operation: values}}) | python | def _iop(self, operation, other, *allowed):
"""An iterative operation operating on multiple values.
Consumes iterators to construct a concrete list at time of execution.
"""
f = self._field
if self._combining: # We are a field-compound query fragment, e.g. (Foo.bar & Foo.baz).
return reduce(self._combining,
(q._iop(operation, other, *allowed) for q in f)) # pylint:disable=protected-access
# Optimize this away in production; diagnosic aide.
if __debug__ and _complex_safety_check(f, {operation} | set(allowed)): # pragma: no cover
raise NotImplementedError("{self!r} does not allow {op} comparison.".format(
self=self, op=operation))
def _t(o):
for value in o:
yield None if value is None else f.transformer.foreign(value, (f, self._document))
other = other if len(other) > 1 else other[0]
values = list(_t(other))
return Filter({self._name: {operation: values}}) | [
"def",
"_iop",
"(",
"self",
",",
"operation",
",",
"other",
",",
"*",
"allowed",
")",
":",
"f",
"=",
"self",
".",
"_field",
"if",
"self",
".",
"_combining",
":",
"# We are a field-compound query fragment, e.g. (Foo.bar & Foo.baz).",
"return",
"reduce",
"(",
"self",
".",
"_combining",
",",
"(",
"q",
".",
"_iop",
"(",
"operation",
",",
"other",
",",
"*",
"allowed",
")",
"for",
"q",
"in",
"f",
")",
")",
"# pylint:disable=protected-access",
"# Optimize this away in production; diagnosic aide.",
"if",
"__debug__",
"and",
"_complex_safety_check",
"(",
"f",
",",
"{",
"operation",
"}",
"|",
"set",
"(",
"allowed",
")",
")",
":",
"# pragma: no cover",
"raise",
"NotImplementedError",
"(",
"\"{self!r} does not allow {op} comparison.\"",
".",
"format",
"(",
"self",
"=",
"self",
",",
"op",
"=",
"operation",
")",
")",
"def",
"_t",
"(",
"o",
")",
":",
"for",
"value",
"in",
"o",
":",
"yield",
"None",
"if",
"value",
"is",
"None",
"else",
"f",
".",
"transformer",
".",
"foreign",
"(",
"value",
",",
"(",
"f",
",",
"self",
".",
"_document",
")",
")",
"other",
"=",
"other",
"if",
"len",
"(",
"other",
")",
">",
"1",
"else",
"other",
"[",
"0",
"]",
"values",
"=",
"list",
"(",
"_t",
"(",
"other",
")",
")",
"return",
"Filter",
"(",
"{",
"self",
".",
"_name",
":",
"{",
"operation",
":",
"values",
"}",
"}",
")"
] | An iterative operation operating on multiple values.
Consumes iterators to construct a concrete list at time of execution. | [
"An",
"iterative",
"operation",
"operating",
"on",
"multiple",
"values",
".",
"Consumes",
"iterators",
"to",
"construct",
"a",
"concrete",
"list",
"at",
"time",
"of",
"execution",
"."
] | 2066dc73e281b8a46cb5fc965267d6b8e1b18467 | https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/query/query.py#L172-L196 | train | 299 |