hexsha
stringlengths
40
40
repo
stringlengths
5
121
path
stringlengths
4
227
license
sequence
language
stringclasses
1 value
identifier
stringlengths
1
107
return_type
stringlengths
2
237
original_string
stringlengths
75
13.4k
original_docstring
stringlengths
13
12.9k
docstring
stringlengths
13
2.57k
docstring_tokens
sequence
code
stringlengths
23
1.88k
code_tokens
sequence
short_docstring
stringlengths
1
1.32k
short_docstring_tokens
sequence
comment
sequence
parameters
list
docstring_params
dict
code_with_imports
stringlengths
23
1.88k
idxs
int64
0
611k
cluster
int64
0
1.02k
e474fbafcc2a622702fc14ec787cca84d3abda75
RensselaerRocketSociety/Watney
lambda_function.py
[ "MIT" ]
Python
construct_json
<not_specific>
def construct_json(**kwargs): ''' Given keyword arguments, construct a JSON string based on that. Example: construct_json(text='\'Hello\', "world"') should return { "text": "'Hello', \"world\"" } ''' return json.dumps(kwargs)
Given keyword arguments, construct a JSON string based on that. Example: construct_json(text='\'Hello\', "world"') should return { "text": "'Hello', \"world\"" }
Given keyword arguments, construct a JSON string based on that.
[ "Given", "keyword", "arguments", "construct", "a", "JSON", "string", "based", "on", "that", "." ]
def construct_json(**kwargs): return json.dumps(kwargs)
[ "def", "construct_json", "(", "**", "kwargs", ")", ":", "return", "json", ".", "dumps", "(", "kwargs", ")" ]
Given keyword arguments, construct a JSON string based on that.
[ "Given", "keyword", "arguments", "construct", "a", "JSON", "string", "based", "on", "that", "." ]
[ "'''\n Given keyword arguments, construct a JSON string based on that.\n\n Example:\n \n construct_json(text='\\'Hello\\', \"world\"') \n \n should return \n\n {\n \"text\": \"'Hello', \\\"world\\\"\"\n }\n '''" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [ { "identifier": "examples", "docstring": null, "docstring_tokens": [ "None" ] } ] }
import json def construct_json(**kwargs): return json.dumps(kwargs)
338
430
ae708334d5527ab235a4b9ecea38ba47d0f4ffd5
samuelcolvin/em2
em2/contacts.py
[ "MIT" ]
Python
add_contacts
null
async def add_contacts(conns: Connections, conv_id: int, actor_id: int): """ Every participant in the conversation should be a contact of an actor in that conversation, they might have been in the conversation before or the actor might just have added them, either way they should be a contact of the actor. """ await conns.main.execute( """ insert into contacts (owner, profile_user) ( select $2, user_id from participants where conv = $1 and user_id != $2 and removal_action_id is null ) on conflict (owner, profile_user) do nothing """, conv_id, actor_id, )
Every participant in the conversation should be a contact of an actor in that conversation, they might have been in the conversation before or the actor might just have added them, either way they should be a contact of the actor.
Every participant in the conversation should be a contact of an actor in that conversation, they might have been in the conversation before or the actor might just have added them, either way they should be a contact of the actor.
[ "Every", "participant", "in", "the", "conversation", "should", "be", "a", "contact", "of", "an", "actor", "in", "that", "conversation", "they", "might", "have", "been", "in", "the", "conversation", "before", "or", "the", "actor", "might", "just", "have", "added", "them", "either", "way", "they", "should", "be", "a", "contact", "of", "the", "actor", "." ]
async def add_contacts(conns: Connections, conv_id: int, actor_id: int): await conns.main.execute( """ insert into contacts (owner, profile_user) ( select $2, user_id from participants where conv = $1 and user_id != $2 and removal_action_id is null ) on conflict (owner, profile_user) do nothing """, conv_id, actor_id, )
[ "async", "def", "add_contacts", "(", "conns", ":", "Connections", ",", "conv_id", ":", "int", ",", "actor_id", ":", "int", ")", ":", "await", "conns", ".", "main", ".", "execute", "(", "\"\"\"\n insert into contacts (owner, profile_user)\n (\n select $2, user_id from participants\n where conv = $1 and user_id != $2 and removal_action_id is null\n )\n on conflict (owner, profile_user) do nothing\n \"\"\"", ",", "conv_id", ",", "actor_id", ",", ")" ]
Every participant in the conversation should be a contact of an actor in that conversation, they might have been in the conversation before or the actor might just have added them, either way they should be a contact of the actor.
[ "Every", "participant", "in", "the", "conversation", "should", "be", "a", "contact", "of", "an", "actor", "in", "that", "conversation", "they", "might", "have", "been", "in", "the", "conversation", "before", "or", "the", "actor", "might", "just", "have", "added", "them", "either", "way", "they", "should", "be", "a", "contact", "of", "the", "actor", "." ]
[ "\"\"\"\n Every participant in the conversation should be a contact of an actor in that conversation, they might have been in\n the conversation before or the actor might just have added them, either way they should be a contact of the actor.\n \"\"\"" ]
[ { "param": "conns", "type": "Connections" }, { "param": "conv_id", "type": "int" }, { "param": "actor_id", "type": "int" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "conns", "type": "Connections", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "conv_id", "type": "int", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "actor_id", "type": "int", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
async def add_contacts(conns: Connections, conv_id: int, actor_id: int): await conns.main.execute( """ insert into contacts (owner, profile_user) ( select $2, user_id from participants where conv = $1 and user_id != $2 and removal_action_id is null ) on conflict (owner, profile_user) do nothing """, conv_id, actor_id, )
339
142
bab7ec19615bc5a7884c732979ec203f8a185bc0
stevommmm/nagios_nfs_verbose
check_nfs_verbose.py
[ "MIT" ]
Python
diff_stats
null
def diff_stats(old_ms, new_ms): '''Diff each op-stat between our new mountstats and historical version Alert if we have any major_timeouts The only ops we really care about are: - Mountstats.GETATTR.major_timeouts - Mountstats.ACCESS.major_timeouts ''' for attr in [x for x in new_ms.__slots__ if x.isupper()]: if hasattr(new_ms, attr) and hasattr(old_ms, attr): if getattr(new_ms, attr).major_timeouts > getattr(old_ms, attr).major_timeouts: yield "%s [%d]" % (attr, getattr(new_ms, attr).major_timeouts - getattr(old_ms, attr).major_timeouts)
Diff each op-stat between our new mountstats and historical version Alert if we have any major_timeouts The only ops we really care about are: - Mountstats.GETATTR.major_timeouts - Mountstats.ACCESS.major_timeouts
Diff each op-stat between our new mountstats and historical version Alert if we have any major_timeouts The only ops we really care about are: Mountstats.GETATTR.major_timeouts Mountstats.ACCESS.major_timeouts
[ "Diff", "each", "op", "-", "stat", "between", "our", "new", "mountstats", "and", "historical", "version", "Alert", "if", "we", "have", "any", "major_timeouts", "The", "only", "ops", "we", "really", "care", "about", "are", ":", "Mountstats", ".", "GETATTR", ".", "major_timeouts", "Mountstats", ".", "ACCESS", ".", "major_timeouts" ]
def diff_stats(old_ms, new_ms): for attr in [x for x in new_ms.__slots__ if x.isupper()]: if hasattr(new_ms, attr) and hasattr(old_ms, attr): if getattr(new_ms, attr).major_timeouts > getattr(old_ms, attr).major_timeouts: yield "%s [%d]" % (attr, getattr(new_ms, attr).major_timeouts - getattr(old_ms, attr).major_timeouts)
[ "def", "diff_stats", "(", "old_ms", ",", "new_ms", ")", ":", "for", "attr", "in", "[", "x", "for", "x", "in", "new_ms", ".", "__slots__", "if", "x", ".", "isupper", "(", ")", "]", ":", "if", "hasattr", "(", "new_ms", ",", "attr", ")", "and", "hasattr", "(", "old_ms", ",", "attr", ")", ":", "if", "getattr", "(", "new_ms", ",", "attr", ")", ".", "major_timeouts", ">", "getattr", "(", "old_ms", ",", "attr", ")", ".", "major_timeouts", ":", "yield", "\"%s [%d]\"", "%", "(", "attr", ",", "getattr", "(", "new_ms", ",", "attr", ")", ".", "major_timeouts", "-", "getattr", "(", "old_ms", ",", "attr", ")", ".", "major_timeouts", ")" ]
Diff each op-stat between our new mountstats and historical version Alert if we have any major_timeouts
[ "Diff", "each", "op", "-", "stat", "between", "our", "new", "mountstats", "and", "historical", "version", "Alert", "if", "we", "have", "any", "major_timeouts" ]
[ "'''Diff each op-stat between our new mountstats and historical version\n\tAlert if we have any major_timeouts\n\n\tThe only ops we really care about are:\n\t- Mountstats.GETATTR.major_timeouts\n\t- Mountstats.ACCESS.major_timeouts\n\t'''" ]
[ { "param": "old_ms", "type": null }, { "param": "new_ms", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "old_ms", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "new_ms", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def diff_stats(old_ms, new_ms): for attr in [x for x in new_ms.__slots__ if x.isupper()]: if hasattr(new_ms, attr) and hasattr(old_ms, attr): if getattr(new_ms, attr).major_timeouts > getattr(old_ms, attr).major_timeouts: yield "%s [%d]" % (attr, getattr(new_ms, attr).major_timeouts - getattr(old_ms, attr).major_timeouts)
340
170
1abd83880dcf3885c701b5afd39b49cd36ad6127
grandrew/taskjuggler-python
taskjuggler_python/juggler.py
[ "MIT" ]
Python
to_tj3time
<not_specific>
def to_tj3time(dt): """ Convert python date or datetime object to TJ3 format """ return dt.isoformat().replace("T", "-").split(".")[0].split("+")[0]
Convert python date or datetime object to TJ3 format
Convert python date or datetime object to TJ3 format
[ "Convert", "python", "date", "or", "datetime", "object", "to", "TJ3", "format" ]
def to_tj3time(dt): return dt.isoformat().replace("T", "-").split(".")[0].split("+")[0]
[ "def", "to_tj3time", "(", "dt", ")", ":", "return", "dt", ".", "isoformat", "(", ")", ".", "replace", "(", "\"T\"", ",", "\"-\"", ")", ".", "split", "(", "\".\"", ")", "[", "0", "]", ".", "split", "(", "\"+\"", ")", "[", "0", "]" ]
Convert python date or datetime object to TJ3 format
[ "Convert", "python", "date", "or", "datetime", "object", "to", "TJ3", "format" ]
[ "\"\"\"\n Convert python date or datetime object to TJ3 format\n \n \"\"\"" ]
[ { "param": "dt", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "dt", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def to_tj3time(dt): return dt.isoformat().replace("T", "-").split(".")[0].split("+")[0]
341
8
3b87723ece85756a641fa386f493eb7727864168
gabrielecker/ru-telegram-bot
bot/commands.py
[ "MIT" ]
Python
start
<not_specific>
def start(): """ This static method returns the message for users '/start' call in chats """ return 'Bot para remover a necessidade de acessar o site do RU para v\ er o cardápio todos os dias, coleta informações diretamente do site.'
This static method returns the message for users '/start' call in chats
This static method returns the message for users '/start' call in chats
[ "This", "static", "method", "returns", "the", "message", "for", "users", "'", "/", "start", "'", "call", "in", "chats" ]
def start(): return 'Bot para remover a necessidade de acessar o site do RU para v\ er o cardápio todos os dias, coleta informações diretamente do site.'
[ "def", "start", "(", ")", ":", "return", "'Bot para remover a necessidade de acessar o site do RU para v\\\ner o cardápio todos os dias, coleta informações diretamente do site.'" ]
This static method returns the message for users '/start' call in chats
[ "This", "static", "method", "returns", "the", "message", "for", "users", "'", "/", "start", "'", "call", "in", "chats" ]
[ "\"\"\"\n This static method returns the message for users '/start' call in chats\n \"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
def start(): return 'Bot para remover a necessidade de acessar o site do RU para v\ er o cardápio todos os dias, coleta informações diretamente do site.'
343
795
8ff48738243bdfb16ec4037c361008ed1a2c54d8
colindean/peru
peru/compat.py
[ "MIT" ]
Python
makedirs
null
def makedirs(path): '''os.makedirs() has an exist_ok param, but it still throws errors when the path exists with non-default permissions. This isn't fixed until 3.4. Pathlib won't be getting an exist_ok param until 3.5.''' path = str(path) # compatibility with pathlib # Use isdir to avoid silently returning if the path exists but isn't a dir. if not os.path.isdir(path): os.makedirs(path)
os.makedirs() has an exist_ok param, but it still throws errors when the path exists with non-default permissions. This isn't fixed until 3.4. Pathlib won't be getting an exist_ok param until 3.5.
os.makedirs() has an exist_ok param, but it still throws errors when the path exists with non-default permissions. This isn't fixed until 3.4. Pathlib won't be getting an exist_ok param until 3.5.
[ "os", ".", "makedirs", "()", "has", "an", "exist_ok", "param", "but", "it", "still", "throws", "errors", "when", "the", "path", "exists", "with", "non", "-", "default", "permissions", ".", "This", "isn", "'", "t", "fixed", "until", "3", ".", "4", ".", "Pathlib", "won", "'", "t", "be", "getting", "an", "exist_ok", "param", "until", "3", ".", "5", "." ]
def makedirs(path): path = str(path) if not os.path.isdir(path): os.makedirs(path)
[ "def", "makedirs", "(", "path", ")", ":", "path", "=", "str", "(", "path", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "os", ".", "makedirs", "(", "path", ")" ]
os.makedirs() has an exist_ok param, but it still throws errors when the path exists with non-default permissions.
[ "os", ".", "makedirs", "()", "has", "an", "exist_ok", "param", "but", "it", "still", "throws", "errors", "when", "the", "path", "exists", "with", "non", "-", "default", "permissions", "." ]
[ "'''os.makedirs() has an exist_ok param, but it still throws errors when the\n path exists with non-default permissions. This isn't fixed until 3.4.\n Pathlib won't be getting an exist_ok param until 3.5.'''", "# compatibility with pathlib", "# Use isdir to avoid silently returning if the path exists but isn't a dir." ]
[ { "param": "path", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "path", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def makedirs(path): path = str(path) if not os.path.isdir(path): os.makedirs(path)
344
487
d02c086f8040179c55c86b486266b8e1378012fd
shawncal/ELL
tools/importers/CNTK/lib/cntk_layers.py
[ "MIT" ]
Python
convert_cntk_layers_to_ell_layers
<not_specific>
def convert_cntk_layers_to_ell_layers(layersToConvert): """Walks a list of CNTK layers and returns a list of ELL Layer objects""" ellLayers = [] for layerObject in layersToConvert: layerObject.process(ellLayers) return ellLayers
Walks a list of CNTK layers and returns a list of ELL Layer objects
Walks a list of CNTK layers and returns a list of ELL Layer objects
[ "Walks", "a", "list", "of", "CNTK", "layers", "and", "returns", "a", "list", "of", "ELL", "Layer", "objects" ]
def convert_cntk_layers_to_ell_layers(layersToConvert): ellLayers = [] for layerObject in layersToConvert: layerObject.process(ellLayers) return ellLayers
[ "def", "convert_cntk_layers_to_ell_layers", "(", "layersToConvert", ")", ":", "ellLayers", "=", "[", "]", "for", "layerObject", "in", "layersToConvert", ":", "layerObject", ".", "process", "(", "ellLayers", ")", "return", "ellLayers" ]
Walks a list of CNTK layers and returns a list of ELL Layer objects
[ "Walks", "a", "list", "of", "CNTK", "layers", "and", "returns", "a", "list", "of", "ELL", "Layer", "objects" ]
[ "\"\"\"Walks a list of CNTK layers and returns a list of ELL Layer objects\"\"\"" ]
[ { "param": "layersToConvert", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "layersToConvert", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def convert_cntk_layers_to_ell_layers(layersToConvert): ellLayers = [] for layerObject in layersToConvert: layerObject.process(ellLayers) return ellLayers
345
610
b373c169c971a78f866caac5a106962b1741b069
pastelmind/d2animdata
src/d2animdata.py
[ "MIT" ]
Python
_sort_records_by_cof_name
None
def _sort_records_by_cof_name(records: List[Record]) -> None: """Sorts a list of Records in-place by COF name in ascending order. :param records: List of `Record`s to sort in-place. """ records.sort(key=lambda record: record.cof_name)
Sorts a list of Records in-place by COF name in ascending order. :param records: List of `Record`s to sort in-place.
Sorts a list of Records in-place by COF name in ascending order.
[ "Sorts", "a", "list", "of", "Records", "in", "-", "place", "by", "COF", "name", "in", "ascending", "order", "." ]
def _sort_records_by_cof_name(records: List[Record]) -> None: records.sort(key=lambda record: record.cof_name)
[ "def", "_sort_records_by_cof_name", "(", "records", ":", "List", "[", "Record", "]", ")", "->", "None", ":", "records", ".", "sort", "(", "key", "=", "lambda", "record", ":", "record", ".", "cof_name", ")" ]
Sorts a list of Records in-place by COF name in ascending order.
[ "Sorts", "a", "list", "of", "Records", "in", "-", "place", "by", "COF", "name", "in", "ascending", "order", "." ]
[ "\"\"\"Sorts a list of Records in-place by COF name in ascending order.\n\n :param records: List of `Record`s to sort in-place.\n \"\"\"" ]
[ { "param": "records", "type": "List[Record]" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "records", "type": "List[Record]", "docstring": "List of `Record`s to sort in-place.", "docstring_tokens": [ "List", "of", "`", "Record", "`", "s", "to", "sort", "in", "-", "place", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _sort_records_by_cof_name(records: List[Record]) -> None: records.sort(key=lambda record: record.cof_name)
346
593
e4fbee9407ddf21cc5c4ef3fba3f03068e6af1ea
mrubio-chavarria/project_2
models.py
[ "MIT" ]
Python
dilation_formula
<not_specific>
def dilation_formula(layer_index, dilation_base): """ DESCRIPTION: A formula that tells you the dilation based on the base and the layer index. :param dilation_base: [int] the base to compute the dilation, when no dilation, base = 1. :param layer_index: [int] the level of the layer. """ return (dilation_base ** layer_index)
DESCRIPTION: A formula that tells you the dilation based on the base and the layer index. :param dilation_base: [int] the base to compute the dilation, when no dilation, base = 1. :param layer_index: [int] the level of the layer.
A formula that tells you the dilation based on the base and the layer index.
[ "A", "formula", "that", "tells", "you", "the", "dilation", "based", "on", "the", "base", "and", "the", "layer", "index", "." ]
def dilation_formula(layer_index, dilation_base): return (dilation_base ** layer_index)
[ "def", "dilation_formula", "(", "layer_index", ",", "dilation_base", ")", ":", "return", "(", "dilation_base", "**", "layer_index", ")" ]
DESCRIPTION: A formula that tells you the dilation based on the base and the layer index.
[ "DESCRIPTION", ":", "A", "formula", "that", "tells", "you", "the", "dilation", "based", "on", "the", "base", "and", "the", "layer", "index", "." ]
[ "\"\"\"\n DESCRIPTION:\n A formula that tells you the dilation based on the base and the layer\n index.\n :param dilation_base: [int] the base to compute the dilation, when no \n dilation, base = 1.\n :param layer_index: [int] the level of the layer.\n \"\"\"" ]
[ { "param": "layer_index", "type": null }, { "param": "dilation_base", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "layer_index", "type": null, "docstring": "[int] the level of the layer.", "docstring_tokens": [ "[", "int", "]", "the", "level", "of", "the", "layer", "." ], "default": null, "is_optional": null }, { "identifier": "dilation_base", "type": null, "docstring": "[int] the base to compute the dilation, when no\ndilation, base = 1.", "docstring_tokens": [ "[", "int", "]", "the", "base", "to", "compute", "the", "dilation", "when", "no", "dilation", "base", "=", "1", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def dilation_formula(layer_index, dilation_base): return (dilation_base ** layer_index)
347
885
f0e523a9e94cad4f5f11a8a9a09af89cf1202dd1
JuPeg/outputformat
outputformat/tools.py
[ "MIT" ]
Python
hsv2rgb
<not_specific>
def hsv2rgb(hsv_tuple): """Converts HSV floats to 8-bit RGB.""" h = hsv_tuple[0] s = hsv_tuple[1] v = hsv_tuple[2] rgb_tuple = tuple(int(round(i * 255)) for i in colorsys.hsv_to_rgb(h, s, v)) return rgb_tuple
Converts HSV floats to 8-bit RGB.
Converts HSV floats to 8-bit RGB.
[ "Converts", "HSV", "floats", "to", "8", "-", "bit", "RGB", "." ]
def hsv2rgb(hsv_tuple): h = hsv_tuple[0] s = hsv_tuple[1] v = hsv_tuple[2] rgb_tuple = tuple(int(round(i * 255)) for i in colorsys.hsv_to_rgb(h, s, v)) return rgb_tuple
[ "def", "hsv2rgb", "(", "hsv_tuple", ")", ":", "h", "=", "hsv_tuple", "[", "0", "]", "s", "=", "hsv_tuple", "[", "1", "]", "v", "=", "hsv_tuple", "[", "2", "]", "rgb_tuple", "=", "tuple", "(", "int", "(", "round", "(", "i", "*", "255", ")", ")", "for", "i", "in", "colorsys", ".", "hsv_to_rgb", "(", "h", ",", "s", ",", "v", ")", ")", "return", "rgb_tuple" ]
Converts HSV floats to 8-bit RGB.
[ "Converts", "HSV", "floats", "to", "8", "-", "bit", "RGB", "." ]
[ "\"\"\"Converts HSV floats to 8-bit RGB.\"\"\"" ]
[ { "param": "hsv_tuple", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "hsv_tuple", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import colorsys def hsv2rgb(hsv_tuple): h = hsv_tuple[0] s = hsv_tuple[1] v = hsv_tuple[2] rgb_tuple = tuple(int(round(i * 255)) for i in colorsys.hsv_to_rgb(h, s, v)) return rgb_tuple
348
28
08bb71ecdbbc421a9bca351f0905347ee2bbcd8b
kprussing/scons-contrib
sconscontrib/SCons/Tool/ocaml/__init__.py
[ "MIT" ]
Python
norm_suffix
<not_specific>
def norm_suffix(f, suffix): """ Add a suffix if not present """ p = str(f) e = p[-len(suffix) :] if e != suffix: p = p + suffix return p
Add a suffix if not present
Add a suffix if not present
[ "Add", "a", "suffix", "if", "not", "present" ]
def norm_suffix(f, suffix): p = str(f) e = p[-len(suffix) :] if e != suffix: p = p + suffix return p
[ "def", "norm_suffix", "(", "f", ",", "suffix", ")", ":", "p", "=", "str", "(", "f", ")", "e", "=", "p", "[", "-", "len", "(", "suffix", ")", ":", "]", "if", "e", "!=", "suffix", ":", "p", "=", "p", "+", "suffix", "return", "p" ]
Add a suffix if not present
[ "Add", "a", "suffix", "if", "not", "present" ]
[ "\"\"\"\n Add a suffix if not present\n \"\"\"" ]
[ { "param": "f", "type": null }, { "param": "suffix", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "f", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "suffix", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def norm_suffix(f, suffix): p = str(f) e = p[-len(suffix) :] if e != suffix: p = p + suffix return p
349
531
fb043209fefff91fb8dbe9e3d9ec3bc700f8226a
DataONEorg/d1_python
gmn/src/d1_gmn/tests/gmn_direct.py
[ "Apache-2.0" ]
Python
_get_resp_dict
<not_specific>
def _get_resp_dict(response): """Log return status of a django.http.response.HttpResponse and arrange the response into a dict of items generally more convenient to work with from tests.""" body_str = ( "".join(response.streaming_content) if response.streaming else response.content ) is_ok = response.status_code in (200,) if not is_ok: logging.warning( 'Request returned unexpected status code. status_code={} body="{}"'.format( response.status_code, body_str ) ) else: logging.info("Request successful. status_code={}".format(response.status_code)) return { "is_ok": is_ok, "status_code_int": response.status_code, "header_dict": dict(list(response.items())), "body_str": body_str, "response": response, }
Log return status of a django.http.response.HttpResponse and arrange the response into a dict of items generally more convenient to work with from tests.
Log return status of a django.http.response.HttpResponse and arrange the response into a dict of items generally more convenient to work with from tests.
[ "Log", "return", "status", "of", "a", "django", ".", "http", ".", "response", ".", "HttpResponse", "and", "arrange", "the", "response", "into", "a", "dict", "of", "items", "generally", "more", "convenient", "to", "work", "with", "from", "tests", "." ]
def _get_resp_dict(response): body_str = ( "".join(response.streaming_content) if response.streaming else response.content ) is_ok = response.status_code in (200,) if not is_ok: logging.warning( 'Request returned unexpected status code. status_code={} body="{}"'.format( response.status_code, body_str ) ) else: logging.info("Request successful. status_code={}".format(response.status_code)) return { "is_ok": is_ok, "status_code_int": response.status_code, "header_dict": dict(list(response.items())), "body_str": body_str, "response": response, }
[ "def", "_get_resp_dict", "(", "response", ")", ":", "body_str", "=", "(", "\"\"", ".", "join", "(", "response", ".", "streaming_content", ")", "if", "response", ".", "streaming", "else", "response", ".", "content", ")", "is_ok", "=", "response", ".", "status_code", "in", "(", "200", ",", ")", "if", "not", "is_ok", ":", "logging", ".", "warning", "(", "'Request returned unexpected status code. status_code={} body=\"{}\"'", ".", "format", "(", "response", ".", "status_code", ",", "body_str", ")", ")", "else", ":", "logging", ".", "info", "(", "\"Request successful. status_code={}\"", ".", "format", "(", "response", ".", "status_code", ")", ")", "return", "{", "\"is_ok\"", ":", "is_ok", ",", "\"status_code_int\"", ":", "response", ".", "status_code", ",", "\"header_dict\"", ":", "dict", "(", "list", "(", "response", ".", "items", "(", ")", ")", ")", ",", "\"body_str\"", ":", "body_str", ",", "\"response\"", ":", "response", ",", "}" ]
Log return status of a django.http.response.HttpResponse and arrange the response into a dict of items generally more convenient to work with from tests.
[ "Log", "return", "status", "of", "a", "django", ".", "http", ".", "response", ".", "HttpResponse", "and", "arrange", "the", "response", "into", "a", "dict", "of", "items", "generally", "more", "convenient", "to", "work", "with", "from", "tests", "." ]
[ "\"\"\"Log return status of a django.http.response.HttpResponse and arrange the response\n into a dict of items generally more convenient to work with from tests.\"\"\"" ]
[ { "param": "response", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "response", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import logging def _get_resp_dict(response): body_str = ( "".join(response.streaming_content) if response.streaming else response.content ) is_ok = response.status_code in (200,) if not is_ok: logging.warning( 'Request returned unexpected status code. status_code={} body="{}"'.format( response.status_code, body_str ) ) else: logging.info("Request successful. status_code={}".format(response.status_code)) return { "is_ok": is_ok, "status_code_int": response.status_code, "header_dict": dict(list(response.items())), "body_str": body_str, "response": response, }
350
943
9622b29548aa7a6b8fca77fd81ff19b9db19f806
mjuarezm/trees
code/forests.py
[ "Apache-2.0" ]
Python
split_data
<not_specific>
def split_data(data, label=0, length=50): 'Take a large text and divide it into chunks' strings = [data[i:i+length] for i in range(0, len(data) - length, length)] random.shuffle(strings) strings = [(s, label) for s in strings] test = strings[:len(strings) * 10 / 100] training = strings[len(strings) * 10 / 100:] return test, training
Take a large text and divide it into chunks
Take a large text and divide it into chunks
[ "Take", "a", "large", "text", "and", "divide", "it", "into", "chunks" ]
def split_data(data, label=0, length=50): strings = [data[i:i+length] for i in range(0, len(data) - length, length)] random.shuffle(strings) strings = [(s, label) for s in strings] test = strings[:len(strings) * 10 / 100] training = strings[len(strings) * 10 / 100:] return test, training
[ "def", "split_data", "(", "data", ",", "label", "=", "0", ",", "length", "=", "50", ")", ":", "strings", "=", "[", "data", "[", "i", ":", "i", "+", "length", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "data", ")", "-", "length", ",", "length", ")", "]", "random", ".", "shuffle", "(", "strings", ")", "strings", "=", "[", "(", "s", ",", "label", ")", "for", "s", "in", "strings", "]", "test", "=", "strings", "[", ":", "len", "(", "strings", ")", "*", "10", "/", "100", "]", "training", "=", "strings", "[", "len", "(", "strings", ")", "*", "10", "/", "100", ":", "]", "return", "test", ",", "training" ]
Take a large text and divide it into chunks
[ "Take", "a", "large", "text", "and", "divide", "it", "into", "chunks" ]
[ "'Take a large text and divide it into chunks'" ]
[ { "param": "data", "type": null }, { "param": "label", "type": null }, { "param": "length", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "data", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "label", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "length", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import random def split_data(data, label=0, length=50): strings = [data[i:i+length] for i in range(0, len(data) - length, length)] random.shuffle(strings) strings = [(s, label) for s in strings] test = strings[:len(strings) * 10 / 100] training = strings[len(strings) * 10 / 100:] return test, training
351
191
7deac36b545ab9f95fe9266637491cb7c3902e26
korpling/graphANNIS-python
graphannis/util.py
[ "Apache-2.0" ]
Python
node_name_from_match
<not_specific>
def node_name_from_match(match): """ Takes a match identifier (which includes the matched annotation name) and returns the node name. This can take a single string or a list of strings as argument. >>> m = node_name_from_match("tiger::cat::topcorpus/subcorpus/doc1#n2") >>> m == "topcorpus/subcorpus/doc1#n2" True """ if isinstance(match, str): elements = re.split('::', match, 3) if len(elements) == 3: return elements[2] elif len(elements) == 2: return elements[1] else: return elements[0] elif isinstance(match, list): result = [] for m in match: result.append(node_name_from_match(m)) return result else: return None
Takes a match identifier (which includes the matched annotation name) and returns the node name. This can take a single string or a list of strings as argument. >>> m = node_name_from_match("tiger::cat::topcorpus/subcorpus/doc1#n2") >>> m == "topcorpus/subcorpus/doc1#n2" True
Takes a match identifier (which includes the matched annotation name) and returns the node name. This can take a single string or a list of strings as argument.
[ "Takes", "a", "match", "identifier", "(", "which", "includes", "the", "matched", "annotation", "name", ")", "and", "returns", "the", "node", "name", ".", "This", "can", "take", "a", "single", "string", "or", "a", "list", "of", "strings", "as", "argument", "." ]
def node_name_from_match(match): if isinstance(match, str): elements = re.split('::', match, 3) if len(elements) == 3: return elements[2] elif len(elements) == 2: return elements[1] else: return elements[0] elif isinstance(match, list): result = [] for m in match: result.append(node_name_from_match(m)) return result else: return None
[ "def", "node_name_from_match", "(", "match", ")", ":", "if", "isinstance", "(", "match", ",", "str", ")", ":", "elements", "=", "re", ".", "split", "(", "'::'", ",", "match", ",", "3", ")", "if", "len", "(", "elements", ")", "==", "3", ":", "return", "elements", "[", "2", "]", "elif", "len", "(", "elements", ")", "==", "2", ":", "return", "elements", "[", "1", "]", "else", ":", "return", "elements", "[", "0", "]", "elif", "isinstance", "(", "match", ",", "list", ")", ":", "result", "=", "[", "]", "for", "m", "in", "match", ":", "result", ".", "append", "(", "node_name_from_match", "(", "m", ")", ")", "return", "result", "else", ":", "return", "None" ]
Takes a match identifier (which includes the matched annotation name) and returns the node name.
[ "Takes", "a", "match", "identifier", "(", "which", "includes", "the", "matched", "annotation", "name", ")", "and", "returns", "the", "node", "name", "." ]
[ "\"\"\" Takes a match identifier (which includes the matched annotation name) and returns the node name. This can take a single string or a list of strings as argument. \n \n >>> m = node_name_from_match(\"tiger::cat::topcorpus/subcorpus/doc1#n2\")\n >>> m == \"topcorpus/subcorpus/doc1#n2\"\n True\n\n \"\"\"" ]
[ { "param": "match", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "match", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def node_name_from_match(match): if isinstance(match, str): elements = re.split('::', match, 3) if len(elements) == 3: return elements[2] elif len(elements) == 2: return elements[1] else: return elements[0] elif isinstance(match, list): result = [] for m in match: result.append(node_name_from_match(m)) return result else: return None
352
152
fd91fb09110983df9589a3d20175483d1abec5b4
matt-gardner/openreview-expertise
expertise/utils/utils.py
[ "MIT" ]
Python
content_to_text
<not_specific>
def content_to_text(content, fields=['title', 'abstract', 'fulltext']): ''' Given a dict "content", such as the content field in an OpenReview record, return a string that is the concatenation of the fields in "fields". Example: >>> sample_note_content = { ... 'title': 'My Title', ... 'abstract': 'This is the abstract of my paper.' ... } >>> content_to_text(sample_note_content) 'My Title This is the abstract of my paper.' ''' return ' '.join([content.get(field, '') for field in fields])
Given a dict "content", such as the content field in an OpenReview record, return a string that is the concatenation of the fields in "fields". Example: >>> sample_note_content = { ... 'title': 'My Title', ... 'abstract': 'This is the abstract of my paper.' ... } >>> content_to_text(sample_note_content) 'My Title This is the abstract of my paper.'
Given a dict "content", such as the content field in an OpenReview record, return a string that is the concatenation of the fields in "fields".
[ "Given", "a", "dict", "\"", "content", "\"", "such", "as", "the", "content", "field", "in", "an", "OpenReview", "record", "return", "a", "string", "that", "is", "the", "concatenation", "of", "the", "fields", "in", "\"", "fields", "\"", "." ]
def content_to_text(content, fields=['title', 'abstract', 'fulltext']): return ' '.join([content.get(field, '') for field in fields])
[ "def", "content_to_text", "(", "content", ",", "fields", "=", "[", "'title'", ",", "'abstract'", ",", "'fulltext'", "]", ")", ":", "return", "' '", ".", "join", "(", "[", "content", ".", "get", "(", "field", ",", "''", ")", "for", "field", "in", "fields", "]", ")" ]
Given a dict "content", such as the content field in an OpenReview record, return a string that is the concatenation of the fields in "fields".
[ "Given", "a", "dict", "\"", "content", "\"", "such", "as", "the", "content", "field", "in", "an", "OpenReview", "record", "return", "a", "string", "that", "is", "the", "concatenation", "of", "the", "fields", "in", "\"", "fields", "\"", "." ]
[ "'''\n Given a dict \"content\", such as the content field in an OpenReview record,\n return a string that is the concatenation of the fields in \"fields\".\n\n Example:\n\n >>> sample_note_content = {\n ... 'title': 'My Title',\n ... 'abstract': 'This is the abstract of my paper.'\n ... }\n >>> content_to_text(sample_note_content)\n 'My Title This is the abstract of my paper.'\n\n '''" ]
[ { "param": "content", "type": null }, { "param": "fields", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "content", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "fields", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [ { "identifier": "examples", "docstring": null, "docstring_tokens": [ "None" ] } ] }
def content_to_text(content, fields=['title', 'abstract', 'fulltext']): return ' '.join([content.get(field, '') for field in fields])
353
48
fd499cacf2af731d0ff922245e1ed7e85e649cee
fashandge/deja
src/experiment/utilities.py
[ "MIT" ]
Python
tokenize_string
<not_specific>
def tokenize_string(string, vocabulary): '''tokenize a string that is a concatenation of words without spaces into a list of words ''' words = [] length = len(string) start = 0 while start < length: found = False for stop in range(length, start, -1): word = string[start:stop] if word in vocabulary: words.append(word) found = True start = stop break if not found: words.append(string[start:]) break return words
tokenize a string that is a concatenation of words without spaces into a list of words
tokenize a string that is a concatenation of words without spaces into a list of words
[ "tokenize", "a", "string", "that", "is", "a", "concatenation", "of", "words", "without", "spaces", "into", "a", "list", "of", "words" ]
def tokenize_string(string, vocabulary): words = [] length = len(string) start = 0 while start < length: found = False for stop in range(length, start, -1): word = string[start:stop] if word in vocabulary: words.append(word) found = True start = stop break if not found: words.append(string[start:]) break return words
[ "def", "tokenize_string", "(", "string", ",", "vocabulary", ")", ":", "words", "=", "[", "]", "length", "=", "len", "(", "string", ")", "start", "=", "0", "while", "start", "<", "length", ":", "found", "=", "False", "for", "stop", "in", "range", "(", "length", ",", "start", ",", "-", "1", ")", ":", "word", "=", "string", "[", "start", ":", "stop", "]", "if", "word", "in", "vocabulary", ":", "words", ".", "append", "(", "word", ")", "found", "=", "True", "start", "=", "stop", "break", "if", "not", "found", ":", "words", ".", "append", "(", "string", "[", "start", ":", "]", ")", "break", "return", "words" ]
tokenize a string that is a concatenation of words without spaces into a list of words
[ "tokenize", "a", "string", "that", "is", "a", "concatenation", "of", "words", "without", "spaces", "into", "a", "list", "of", "words" ]
[ "'''tokenize a string that is a concatenation of words\n without spaces into a list of words\n '''" ]
[ { "param": "string", "type": null }, { "param": "vocabulary", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "string", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "vocabulary", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def tokenize_string(string, vocabulary): words = [] length = len(string) start = 0 while start < length: found = False for stop in range(length, start, -1): word = string[start:stop] if word in vocabulary: words.append(word) found = True start = stop break if not found: words.append(string[start:]) break return words
354
670
caa69d2503ec743b0a3c1008aaffd141c95446d7
Einhard6176/FlightDelayPredictor
EDA/functions.py
[ "MIT" ]
Python
calculateSpeed
<not_specific>
def calculateSpeed(df): ''' Returns column with speed values in MPH ''' df['speed'] = df.distance/df.air_time*60 return df
Returns column with speed values in MPH
Returns column with speed values in MPH
[ "Returns", "column", "with", "speed", "values", "in", "MPH" ]
def calculateSpeed(df): df['speed'] = df.distance/df.air_time*60 return df
[ "def", "calculateSpeed", "(", "df", ")", ":", "df", "[", "'speed'", "]", "=", "df", ".", "distance", "/", "df", ".", "air_time", "*", "60", "return", "df" ]
Returns column with speed values in MPH
[ "Returns", "column", "with", "speed", "values", "in", "MPH" ]
[ "'''\n Returns column with speed values in MPH\n '''" ]
[ { "param": "df", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "df", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def calculateSpeed(df): df['speed'] = df.distance/df.air_time*60 return df
355
247
f384f77e865bcd0ade438da8857f1126259d5b78
Nitin-Mane/openvino
model-optimizer/mo/utils/versions_checker.py
[ "Apache-2.0" ]
Python
check_python_version
<not_specific>
def check_python_version(): """ Checks python version to be greater or equal than 3.4 :return: exit code (1 - error, None - successful) """ if sys.version_info < (3, 4): print('Python version should be of version 3.4 or newer') return 1
Checks python version to be greater or equal than 3.4 :return: exit code (1 - error, None - successful)
Checks python version to be greater or equal than 3.4
[ "Checks", "python", "version", "to", "be", "greater", "or", "equal", "than", "3", ".", "4" ]
def check_python_version(): if sys.version_info < (3, 4): print('Python version should be of version 3.4 or newer') return 1
[ "def", "check_python_version", "(", ")", ":", "if", "sys", ".", "version_info", "<", "(", "3", ",", "4", ")", ":", "print", "(", "'Python version should be of version 3.4 or newer'", ")", "return", "1" ]
Checks python version to be greater or equal than 3.4
[ "Checks", "python", "version", "to", "be", "greater", "or", "equal", "than", "3", ".", "4" ]
[ "\"\"\"\n Checks python version to be greater or equal than 3.4\n :return: exit code (1 - error, None - successful)\n \"\"\"" ]
[]
{ "returns": [ { "docstring": "exit code (1 - error, None - successful)", "docstring_tokens": [ "exit", "code", "(", "1", "-", "error", "None", "-", "successful", ")" ], "type": null } ], "raises": [], "params": [], "outlier_params": [], "others": [] }
import sys def check_python_version(): if sys.version_info < (3, 4): print('Python version should be of version 3.4 or newer') return 1
356
728
63c38991678744f9ec00a90b685e88933077614c
Numerics88/n88tools
tests/regression/config_regression.py
[ "MIT" ]
Python
run_call
<not_specific>
def run_call(command): '''Returns true if call succeeds, false otherwise''' try: subprocess.check_output(command) except subprocess.CalledProcessError as e: return False except OSError as e: return False return True
Returns true if call succeeds, false otherwise
Returns true if call succeeds, false otherwise
[ "Returns", "true", "if", "call", "succeeds", "false", "otherwise" ]
def run_call(command): try: subprocess.check_output(command) except subprocess.CalledProcessError as e: return False except OSError as e: return False return True
[ "def", "run_call", "(", "command", ")", ":", "try", ":", "subprocess", ".", "check_output", "(", "command", ")", "except", "subprocess", ".", "CalledProcessError", "as", "e", ":", "return", "False", "except", "OSError", "as", "e", ":", "return", "False", "return", "True" ]
Returns true if call succeeds, false otherwise
[ "Returns", "true", "if", "call", "succeeds", "false", "otherwise" ]
[ "'''Returns true if call succeeds, false otherwise'''" ]
[ { "param": "command", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "command", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import subprocess def run_call(command): try: subprocess.check_output(command) except subprocess.CalledProcessError as e: return False except OSError as e: return False return True
357
911
f5a8db0af479cf3cfe5bb9c2328760b223ec4711
JordanDekker/ContainR
app/core/preprocessing/parser.py
[ "MIT" ]
Python
handle_zip
<not_specific>
def handle_zip(fw_file, rv_file, uuid): """Unzip forward and reverse FASTQ files if zipped. Args: fw_file: Original (zipped) forward FASTQ file. rv_file: Original (zipped) reverse FASTQ file. uuid: Unique identifier. Returns: fw_file: Unzipped forward files. rv_file: Unzipped reverse files. """ if fw_file.rsplit('.', 1)[1] in ['zip', 'gz']: with zipfile.ZipFile(fw_file, 'r') as fw_zip: fw_zip.extractall("data/"+uuid+"/fw_file/") for root, dirs, files in os.walk("data/"+uuid+"/fw_file/"): for file in files: fw_file = os.path.join(root, file) if rv_file.rsplit('.', 1)[1] in ['zip', 'gz']: with zipfile.ZipFile(rv_file, 'r') as fw_zip: fw_zip.extractall("data/"+uuid+"/rv_file/") for root, dirs, files in os.walk("data/"+uuid+"/rv_file/"): for file in files: rv_file = os.path.join(root, file) return fw_file, rv_file
Unzip forward and reverse FASTQ files if zipped. Args: fw_file: Original (zipped) forward FASTQ file. rv_file: Original (zipped) reverse FASTQ file. uuid: Unique identifier. Returns: fw_file: Unzipped forward files. rv_file: Unzipped reverse files.
Unzip forward and reverse FASTQ files if zipped.
[ "Unzip", "forward", "and", "reverse", "FASTQ", "files", "if", "zipped", "." ]
def handle_zip(fw_file, rv_file, uuid): if fw_file.rsplit('.', 1)[1] in ['zip', 'gz']: with zipfile.ZipFile(fw_file, 'r') as fw_zip: fw_zip.extractall("data/"+uuid+"/fw_file/") for root, dirs, files in os.walk("data/"+uuid+"/fw_file/"): for file in files: fw_file = os.path.join(root, file) if rv_file.rsplit('.', 1)[1] in ['zip', 'gz']: with zipfile.ZipFile(rv_file, 'r') as fw_zip: fw_zip.extractall("data/"+uuid+"/rv_file/") for root, dirs, files in os.walk("data/"+uuid+"/rv_file/"): for file in files: rv_file = os.path.join(root, file) return fw_file, rv_file
[ "def", "handle_zip", "(", "fw_file", ",", "rv_file", ",", "uuid", ")", ":", "if", "fw_file", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "1", "]", "in", "[", "'zip'", ",", "'gz'", "]", ":", "with", "zipfile", ".", "ZipFile", "(", "fw_file", ",", "'r'", ")", "as", "fw_zip", ":", "fw_zip", ".", "extractall", "(", "\"data/\"", "+", "uuid", "+", "\"/fw_file/\"", ")", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "\"data/\"", "+", "uuid", "+", "\"/fw_file/\"", ")", ":", "for", "file", "in", "files", ":", "fw_file", "=", "os", ".", "path", ".", "join", "(", "root", ",", "file", ")", "if", "rv_file", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "1", "]", "in", "[", "'zip'", ",", "'gz'", "]", ":", "with", "zipfile", ".", "ZipFile", "(", "rv_file", ",", "'r'", ")", "as", "fw_zip", ":", "fw_zip", ".", "extractall", "(", "\"data/\"", "+", "uuid", "+", "\"/rv_file/\"", ")", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "\"data/\"", "+", "uuid", "+", "\"/rv_file/\"", ")", ":", "for", "file", "in", "files", ":", "rv_file", "=", "os", ".", "path", ".", "join", "(", "root", ",", "file", ")", "return", "fw_file", ",", "rv_file" ]
Unzip forward and reverse FASTQ files if zipped.
[ "Unzip", "forward", "and", "reverse", "FASTQ", "files", "if", "zipped", "." ]
[ "\"\"\"Unzip forward and reverse FASTQ files if zipped.\n \n Args:\n fw_file: Original (zipped) forward FASTQ file.\n rv_file: Original (zipped) reverse FASTQ file.\n uuid: Unique identifier.\n \n Returns:\n fw_file: Unzipped forward files.\n rv_file: Unzipped reverse files.\n\n \"\"\"" ]
[ { "param": "fw_file", "type": null }, { "param": "rv_file", "type": null }, { "param": "uuid", "type": null } ]
{ "returns": [ { "docstring": "Unzipped forward files.\nrv_file: Unzipped reverse files.", "docstring_tokens": [ "Unzipped", "forward", "files", ".", "rv_file", ":", "Unzipped", "reverse", "files", "." ], "type": "fw_file" } ], "raises": [], "params": [ { "identifier": "fw_file", "type": null, "docstring": "Original (zipped) forward FASTQ file.", "docstring_tokens": [ "Original", "(", "zipped", ")", "forward", "FASTQ", "file", "." ], "default": null, "is_optional": null }, { "identifier": "rv_file", "type": null, "docstring": "Original (zipped) reverse FASTQ file.", "docstring_tokens": [ "Original", "(", "zipped", ")", "reverse", "FASTQ", "file", "." ], "default": null, "is_optional": null }, { "identifier": "uuid", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import zipfile import os def handle_zip(fw_file, rv_file, uuid): if fw_file.rsplit('.', 1)[1] in ['zip', 'gz']: with zipfile.ZipFile(fw_file, 'r') as fw_zip: fw_zip.extractall("data/"+uuid+"/fw_file/") for root, dirs, files in os.walk("data/"+uuid+"/fw_file/"): for file in files: fw_file = os.path.join(root, file) if rv_file.rsplit('.', 1)[1] in ['zip', 'gz']: with zipfile.ZipFile(rv_file, 'r') as fw_zip: fw_zip.extractall("data/"+uuid+"/rv_file/") for root, dirs, files in os.walk("data/"+uuid+"/rv_file/"): for file in files: rv_file = os.path.join(root, file) return fw_file, rv_file
358
64
155a57823471fe4255531cb1046c6a5c99f9da50
sebhaner/OpenSfM
opensfm/matching.py
[ "BSD-2-Clause" ]
Python
_non_static_matches
<not_specific>
def _non_static_matches(p1: np.ndarray, p2: np.ndarray, matches): """Remove matches with same position in both images. That should remove matches on that are likely belong to rig occluders, watermarks or dust, but not discard entirely static images. """ threshold = 0.001 res = [] for match in matches: d = p1[match[0]] - p2[match[1]] if d[0] ** 2 + d[1] ** 2 >= threshold ** 2: res.append(match) static_ratio_threshold = 0.85 static_ratio_removed = 1 - len(res) / max(len(matches), 1) if static_ratio_removed > static_ratio_threshold: return matches else: return res
Remove matches with same position in both images. That should remove matches on that are likely belong to rig occluders, watermarks or dust, but not discard entirely static images.
Remove matches with same position in both images. That should remove matches on that are likely belong to rig occluders, watermarks or dust, but not discard entirely static images.
[ "Remove", "matches", "with", "same", "position", "in", "both", "images", ".", "That", "should", "remove", "matches", "on", "that", "are", "likely", "belong", "to", "rig", "occluders", "watermarks", "or", "dust", "but", "not", "discard", "entirely", "static", "images", "." ]
def _non_static_matches(p1: np.ndarray, p2: np.ndarray, matches): threshold = 0.001 res = [] for match in matches: d = p1[match[0]] - p2[match[1]] if d[0] ** 2 + d[1] ** 2 >= threshold ** 2: res.append(match) static_ratio_threshold = 0.85 static_ratio_removed = 1 - len(res) / max(len(matches), 1) if static_ratio_removed > static_ratio_threshold: return matches else: return res
[ "def", "_non_static_matches", "(", "p1", ":", "np", ".", "ndarray", ",", "p2", ":", "np", ".", "ndarray", ",", "matches", ")", ":", "threshold", "=", "0.001", "res", "=", "[", "]", "for", "match", "in", "matches", ":", "d", "=", "p1", "[", "match", "[", "0", "]", "]", "-", "p2", "[", "match", "[", "1", "]", "]", "if", "d", "[", "0", "]", "**", "2", "+", "d", "[", "1", "]", "**", "2", ">=", "threshold", "**", "2", ":", "res", ".", "append", "(", "match", ")", "static_ratio_threshold", "=", "0.85", "static_ratio_removed", "=", "1", "-", "len", "(", "res", ")", "/", "max", "(", "len", "(", "matches", ")", ",", "1", ")", "if", "static_ratio_removed", ">", "static_ratio_threshold", ":", "return", "matches", "else", ":", "return", "res" ]
Remove matches with same position in both images.
[ "Remove", "matches", "with", "same", "position", "in", "both", "images", "." ]
[ "\"\"\"Remove matches with same position in both images.\n\n That should remove matches on that are likely belong to rig occluders,\n watermarks or dust, but not discard entirely static images.\n \"\"\"" ]
[ { "param": "p1", "type": "np.ndarray" }, { "param": "p2", "type": "np.ndarray" }, { "param": "matches", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "p1", "type": "np.ndarray", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "p2", "type": "np.ndarray", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "matches", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _non_static_matches(p1: np.ndarray, p2: np.ndarray, matches): threshold = 0.001 res = [] for match in matches: d = p1[match[0]] - p2[match[1]] if d[0] ** 2 + d[1] ** 2 >= threshold ** 2: res.append(match) static_ratio_threshold = 0.85 static_ratio_removed = 1 - len(res) / max(len(matches), 1) if static_ratio_removed > static_ratio_threshold: return matches else: return res
359
301
893cba8594f8231c0cf0ba968433627093cc9630
rezoo/chainer_computational_cost
chainer_computational_cost/cost_calculators/array.py
[ "MIT" ]
Python
calc_get_item
<not_specific>
def calc_get_item(func, in_data, **kwargs): """[GetItem](https://docs.chainer.org/en/v4.3.0/reference/generated/chainer.functions.get_item.html) Extract part of an array. This operation is zero FLOPs. Most of tensor libraries have view feature, which doesn't actually create a new array unless necessary, but this is not considered in chainer-computational-cost. Memory read runs only for the necessary elements, so both memory read and write are same as the size of output tensor. | Item | Value | |:-------|:------| | FLOPs | $$ 0 $$ | | mread | $$ \| y \| $$ | | mwrite | $$ \| y \| $$ | | params | `slices`: list of slices, a slice is an int or a tuple with 3 elements | """ # NOQA x, = in_data y = x[func.slices] slices = [(s.start, s.stop, s.step) if type(s) is slice else s for s in func.slices] return (0, y.size, y.size, {'slices': slices})
[GetItem](https://docs.chainer.org/en/v4.3.0/reference/generated/chainer.functions.get_item.html) Extract part of an array. This operation is zero FLOPs. Most of tensor libraries have view feature, which doesn't actually create a new array unless necessary, but this is not considered in chainer-computational-cost. Memory read runs only for the necessary elements, so both memory read and write are same as the size of output tensor. | Item | Value | |:-------|:------| | FLOPs | $$ 0 $$ | | mread | $$ \| y \| $$ | | mwrite | $$ \| y \| $$ | | params | `slices`: list of slices, a slice is an int or a tuple with 3 elements |
[GetItem] Extract part of an array. This operation is zero FLOPs. Most of tensor libraries have view feature, which doesn't actually create a new array unless necessary, but this is not considered in chainer-computational-cost. Memory read runs only for the necessary elements, so both memory read and write are same as the size of output tensor.
[ "[", "GetItem", "]", "Extract", "part", "of", "an", "array", ".", "This", "operation", "is", "zero", "FLOPs", ".", "Most", "of", "tensor", "libraries", "have", "view", "feature", "which", "doesn", "'", "t", "actually", "create", "a", "new", "array", "unless", "necessary", "but", "this", "is", "not", "considered", "in", "chainer", "-", "computational", "-", "cost", ".", "Memory", "read", "runs", "only", "for", "the", "necessary", "elements", "so", "both", "memory", "read", "and", "write", "are", "same", "as", "the", "size", "of", "output", "tensor", "." ]
def calc_get_item(func, in_data, **kwargs): x, = in_data y = x[func.slices] slices = [(s.start, s.stop, s.step) if type(s) is slice else s for s in func.slices] return (0, y.size, y.size, {'slices': slices})
[ "def", "calc_get_item", "(", "func", ",", "in_data", ",", "**", "kwargs", ")", ":", "x", ",", "=", "in_data", "y", "=", "x", "[", "func", ".", "slices", "]", "slices", "=", "[", "(", "s", ".", "start", ",", "s", ".", "stop", ",", "s", ".", "step", ")", "if", "type", "(", "s", ")", "is", "slice", "else", "s", "for", "s", "in", "func", ".", "slices", "]", "return", "(", "0", ",", "y", ".", "size", ",", "y", ".", "size", ",", "{", "'slices'", ":", "slices", "}", ")" ]
[GetItem](https://docs.chainer.org/en/v4.3.0/reference/generated/chainer.functions.get_item.html) Extract part of an array.
[ "[", "GetItem", "]", "(", "https", ":", "//", "docs", ".", "chainer", ".", "org", "/", "en", "/", "v4", ".", "3", ".", "0", "/", "reference", "/", "generated", "/", "chainer", ".", "functions", ".", "get_item", ".", "html", ")", "Extract", "part", "of", "an", "array", "." ]
[ "\"\"\"[GetItem](https://docs.chainer.org/en/v4.3.0/reference/generated/chainer.functions.get_item.html)\n\n Extract part of an array. This operation is zero FLOPs.\n Most of tensor libraries have view feature, which doesn't actually create\n a new array unless necessary, but this is not considered in\n chainer-computational-cost.\n Memory read runs only for the necessary elements, so both memory\n read and write are same as the size of output tensor.\n\n | Item | Value |\n |:-------|:------|\n | FLOPs | $$ 0 $$ |\n | mread | $$ \\| y \\| $$ |\n | mwrite | $$ \\| y \\| $$ |\n | params | `slices`: list of slices, a slice is an int or a tuple with 3 elements |\n \"\"\"", "# NOQA" ]
[ { "param": "func", "type": null }, { "param": "in_data", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "func", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "in_data", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def calc_get_item(func, in_data, **kwargs): x, = in_data y = x[func.slices] slices = [(s.start, s.stop, s.step) if type(s) is slice else s for s in func.slices] return (0, y.size, y.size, {'slices': slices})
360
703
f9ff1381dca4e32553f27282b35a5217f8cdbdbb
MaksimFelchuck/ci-py-lib
lib-utils/git_scm.py
[ "MIT" ]
Python
download_repository
<not_specific>
def download_repository(repository_url, repository_dir, ref): '''Download git repository and checkout to specified ref Args: repository_url: Long string repository url repository_dir: short string folder name ref_name: branch name or commit hash (by default = master) Returns: 0: on success -1: if fail ''' if subprocess.run(['git', 'clone', repository_url, repository_dir], check=True).returncode != 0: return 1 os.chdir(repository_dir) if subprocess.run(['git', 'checkout', ref], check=True).returncode != 0: return 1 return 0
Download git repository and checkout to specified ref Args: repository_url: Long string repository url repository_dir: short string folder name ref_name: branch name or commit hash (by default = master) Returns: 0: on success -1: if fail
Download git repository and checkout to specified ref
[ "Download", "git", "repository", "and", "checkout", "to", "specified", "ref" ]
def download_repository(repository_url, repository_dir, ref): if subprocess.run(['git', 'clone', repository_url, repository_dir], check=True).returncode != 0: return 1 os.chdir(repository_dir) if subprocess.run(['git', 'checkout', ref], check=True).returncode != 0: return 1 return 0
[ "def", "download_repository", "(", "repository_url", ",", "repository_dir", ",", "ref", ")", ":", "if", "subprocess", ".", "run", "(", "[", "'git'", ",", "'clone'", ",", "repository_url", ",", "repository_dir", "]", ",", "check", "=", "True", ")", ".", "returncode", "!=", "0", ":", "return", "1", "os", ".", "chdir", "(", "repository_dir", ")", "if", "subprocess", ".", "run", "(", "[", "'git'", ",", "'checkout'", ",", "ref", "]", ",", "check", "=", "True", ")", ".", "returncode", "!=", "0", ":", "return", "1", "return", "0" ]
Download git repository and checkout to specified ref
[ "Download", "git", "repository", "and", "checkout", "to", "specified", "ref" ]
[ "'''Download git repository and checkout to specified ref\n\n Args:\n repository_url: Long string repository url\n repository_dir: short string folder name\n ref_name: branch name or commit hash (by default = master)\n\n Returns:\n 0: on success\n -1: if fail\n '''" ]
[ { "param": "repository_url", "type": null }, { "param": "repository_dir", "type": null }, { "param": "ref", "type": null } ]
{ "returns": [ { "docstring": "on success\n1: if fail", "docstring_tokens": [ "on", "success", "1", ":", "if", "fail" ], "type": "0" } ], "raises": [], "params": [ { "identifier": "repository_url", "type": null, "docstring": "Long string repository url", "docstring_tokens": [ "Long", "string", "repository", "url" ], "default": null, "is_optional": null }, { "identifier": "repository_dir", "type": null, "docstring": "short string folder name", "docstring_tokens": [ "short", "string", "folder", "name" ], "default": null, "is_optional": null }, { "identifier": "ref", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [ { "identifier": "ref_name", "type": null, "docstring": "branch name or commit hash (by default = master)", "docstring_tokens": [ "branch", "name", "or", "commit", "hash", "(", "by", "default", "=", "master", ")" ], "default": null, "is_optional": null } ], "others": [] }
import subprocess import os def download_repository(repository_url, repository_dir, ref): if subprocess.run(['git', 'clone', repository_url, repository_dir], check=True).returncode != 0: return 1 os.chdir(repository_dir) if subprocess.run(['git', 'checkout', ref], check=True).returncode != 0: return 1 return 0
361
493
fbed579aab02c28a20e132336ce36720f6ecbbb8
dravec-ky/mayaCityGen
src/mayaCityGen.py
[ "MIT" ]
Python
dotproduct
<not_specific>
def dotproduct(a,b): """Returns the dot product of two 2D vectors starting at origin. a - [float,float], b - [float,float] return - (float) """ u = (a[0]*b[0]) + (a[1]*b[1]) return u
Returns the dot product of two 2D vectors starting at origin. a - [float,float], b - [float,float] return - (float)
Returns the dot product of two 2D vectors starting at origin. (float)
[ "Returns", "the", "dot", "product", "of", "two", "2D", "vectors", "starting", "at", "origin", ".", "(", "float", ")" ]
def dotproduct(a,b): u = (a[0]*b[0]) + (a[1]*b[1]) return u
[ "def", "dotproduct", "(", "a", ",", "b", ")", ":", "u", "=", "(", "a", "[", "0", "]", "*", "b", "[", "0", "]", ")", "+", "(", "a", "[", "1", "]", "*", "b", "[", "1", "]", ")", "return", "u" ]
Returns the dot product of two 2D vectors starting at origin.
[ "Returns", "the", "dot", "product", "of", "two", "2D", "vectors", "starting", "at", "origin", "." ]
[ "\"\"\"Returns the dot product of two 2D vectors starting at origin.\r\n\r\n a - [float,float], b - [float,float]\r\n\r\n return - (float)\r\n \"\"\"" ]
[ { "param": "a", "type": null }, { "param": "b", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "a", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "b", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def dotproduct(a,b): u = (a[0]*b[0]) + (a[1]*b[1]) return u
363
800
260fb63b9ef8adf236229ae1d945a09c946b6159
jramcast/rasa_core
rasa_core/channels/channel.py
[ "Apache-2.0" ]
Python
button_to_string
<not_specific>
def button_to_string(button, idx=0): """Create a string representation of a button.""" return "{idx}: {title} ({val})".format( idx=idx + 1, title=button.get('title', ''), val=button.get('payload', ''))
Create a string representation of a button.
Create a string representation of a button.
[ "Create", "a", "string", "representation", "of", "a", "button", "." ]
def button_to_string(button, idx=0): return "{idx}: {title} ({val})".format( idx=idx + 1, title=button.get('title', ''), val=button.get('payload', ''))
[ "def", "button_to_string", "(", "button", ",", "idx", "=", "0", ")", ":", "return", "\"{idx}: {title} ({val})\"", ".", "format", "(", "idx", "=", "idx", "+", "1", ",", "title", "=", "button", ".", "get", "(", "'title'", ",", "''", ")", ",", "val", "=", "button", ".", "get", "(", "'payload'", ",", "''", ")", ")" ]
Create a string representation of a button.
[ "Create", "a", "string", "representation", "of", "a", "button", "." ]
[ "\"\"\"Create a string representation of a button.\"\"\"" ]
[ { "param": "button", "type": null }, { "param": "idx", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "button", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "idx", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def button_to_string(button, idx=0): return "{idx}: {title} ({val})".format( idx=idx + 1, title=button.get('title', ''), val=button.get('payload', ''))
364
873
a0a1df43a44f24446d84c85f55fcc9ad37bfc21e
mriverrose/SEC10KTables
edgar_functions.py
[ "MIT" ]
Python
make_timestamp
<not_specific>
def make_timestamp(): """Create a timestamp to insert into each dataframe.""" now = datetime.now() dtString = now.strftime("%Y-%m-%d %H:%M:%S") return dtString
Create a timestamp to insert into each dataframe.
Create a timestamp to insert into each dataframe.
[ "Create", "a", "timestamp", "to", "insert", "into", "each", "dataframe", "." ]
def make_timestamp(): now = datetime.now() dtString = now.strftime("%Y-%m-%d %H:%M:%S") return dtString
[ "def", "make_timestamp", "(", ")", ":", "now", "=", "datetime", ".", "now", "(", ")", "dtString", "=", "now", ".", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ")", "return", "dtString" ]
Create a timestamp to insert into each dataframe.
[ "Create", "a", "timestamp", "to", "insert", "into", "each", "dataframe", "." ]
[ "\"\"\"Create a timestamp to insert into each dataframe.\"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
import datetime def make_timestamp(): now = datetime.now() dtString = now.strftime("%Y-%m-%d %H:%M:%S") return dtString
365
576
02d04ba9eaa445e144a50299432cab0037c63538
opencinemac/vtc-py
vtc/_framerate.py
[ "MIT" ]
Python
_infer_ntsc
bool
def _infer_ntsc(value: fractions.Fraction, ntsc: Optional[bool]) -> bool: """ _infer_ntsc looks at the parsed fraction value and infers if it should be considered NTSC. """ # If no explicit ntsc value was set, assume that values with a denominator of # 1001 are ntsc. if ntsc is None: if value.denominator == 1001: ntsc = True else: ntsc = False else: ntsc = ntsc return ntsc
_infer_ntsc looks at the parsed fraction value and infers if it should be considered NTSC.
_infer_ntsc looks at the parsed fraction value and infers if it should be considered NTSC.
[ "_infer_ntsc", "looks", "at", "the", "parsed", "fraction", "value", "and", "infers", "if", "it", "should", "be", "considered", "NTSC", "." ]
def _infer_ntsc(value: fractions.Fraction, ntsc: Optional[bool]) -> bool: if ntsc is None: if value.denominator == 1001: ntsc = True else: ntsc = False else: ntsc = ntsc return ntsc
[ "def", "_infer_ntsc", "(", "value", ":", "fractions", ".", "Fraction", ",", "ntsc", ":", "Optional", "[", "bool", "]", ")", "->", "bool", ":", "if", "ntsc", "is", "None", ":", "if", "value", ".", "denominator", "==", "1001", ":", "ntsc", "=", "True", "else", ":", "ntsc", "=", "False", "else", ":", "ntsc", "=", "ntsc", "return", "ntsc" ]
_infer_ntsc looks at the parsed fraction value and infers if it should be considered NTSC.
[ "_infer_ntsc", "looks", "at", "the", "parsed", "fraction", "value", "and", "infers", "if", "it", "should", "be", "considered", "NTSC", "." ]
[ "\"\"\"\n _infer_ntsc looks at the parsed fraction value and infers if it should be considered\n NTSC.\n \"\"\"", "# If no explicit ntsc value was set, assume that values with a denominator of", "# 1001 are ntsc." ]
[ { "param": "value", "type": "fractions.Fraction" }, { "param": "ntsc", "type": "Optional[bool]" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "value", "type": "fractions.Fraction", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "ntsc", "type": "Optional[bool]", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _infer_ntsc(value: fractions.Fraction, ntsc: Optional[bool]) -> bool: if ntsc is None: if value.denominator == 1001: ntsc = True else: ntsc = False else: ntsc = ntsc return ntsc
366
455
c187f01c5b1dce2fe09491ae6d2c8a50655b76fb
chrisjsewell/pyScss
scss/selector.py
[ "MIT" ]
Python
_weave_conflicting_selectors
null
def _weave_conflicting_selectors(prefixes, a, b, suffix=()): """Part of the selector merge algorithm above. Not useful on its own. Pay no attention to the man behind the curtain. """ # OK, what this actually does: given a list of selector chains, two # "conflicting" selector chains, and an optional suffix, return a new list # of chains like this: # prefix[0] + a + b + suffix, # prefix[0] + b + a + suffix, # prefix[1] + a + b + suffix, # ... # In other words, this just appends a new chain to each of a list of given # chains, except that the new chain might be the superposition of two # other incompatible chains. both = a and b for prefix in prefixes: yield prefix + a + b + suffix if both: # Only use both orderings if there's an actual conflict! yield prefix + b + a + suffix
Part of the selector merge algorithm above. Not useful on its own. Pay no attention to the man behind the curtain.
Part of the selector merge algorithm above. Not useful on its own. Pay no attention to the man behind the curtain.
[ "Part", "of", "the", "selector", "merge", "algorithm", "above", ".", "Not", "useful", "on", "its", "own", ".", "Pay", "no", "attention", "to", "the", "man", "behind", "the", "curtain", "." ]
def _weave_conflicting_selectors(prefixes, a, b, suffix=()): both = a and b for prefix in prefixes: yield prefix + a + b + suffix if both: yield prefix + b + a + suffix
[ "def", "_weave_conflicting_selectors", "(", "prefixes", ",", "a", ",", "b", ",", "suffix", "=", "(", ")", ")", ":", "both", "=", "a", "and", "b", "for", "prefix", "in", "prefixes", ":", "yield", "prefix", "+", "a", "+", "b", "+", "suffix", "if", "both", ":", "yield", "prefix", "+", "b", "+", "a", "+", "suffix" ]
Part of the selector merge algorithm above.
[ "Part", "of", "the", "selector", "merge", "algorithm", "above", "." ]
[ "\"\"\"Part of the selector merge algorithm above. Not useful on its own. Pay\n no attention to the man behind the curtain.\n \"\"\"", "# OK, what this actually does: given a list of selector chains, two", "# \"conflicting\" selector chains, and an optional suffix, return a new list", "# of chains like this:", "# prefix[0] + a + b + suffix,", "# prefix[0] + b + a + suffix,", "# prefix[1] + a + b + suffix,", "# ...", "# In other words, this just appends a new chain to each of a list of given", "# chains, except that the new chain might be the superposition of two", "# other incompatible chains.", "# Only use both orderings if there's an actual conflict!" ]
[ { "param": "prefixes", "type": null }, { "param": "a", "type": null }, { "param": "b", "type": null }, { "param": "suffix", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "prefixes", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "a", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "b", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "suffix", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _weave_conflicting_selectors(prefixes, a, b, suffix=()): both = a and b for prefix in prefixes: yield prefix + a + b + suffix if both: yield prefix + b + a + suffix
367
13
3dd10c787113fc5d06677637a837feca948424c7
allaparthi/monorail
appengine/monorail/framework/csv_helpers.py
[ "BSD-3-Clause" ]
Python
EscapeCSV
<not_specific>
def EscapeCSV(s): """Return a version of string S that is safe as part of a CSV file.""" if s is None: return '' if isinstance(s, types.StringTypes): s = s.strip().replace('"', '""') # Prefix any formula cells because some spreadsheets have built-in # formila functions that can actually have side-effects on the user's # computer. if s.startswith(('=', '-', '+', '@')): s = "'" + s return s
Return a version of string S that is safe as part of a CSV file.
Return a version of string S that is safe as part of a CSV file.
[ "Return", "a", "version", "of", "string", "S", "that", "is", "safe", "as", "part", "of", "a", "CSV", "file", "." ]
def EscapeCSV(s): if s is None: return '' if isinstance(s, types.StringTypes): s = s.strip().replace('"', '""') if s.startswith(('=', '-', '+', '@')): s = "'" + s return s
[ "def", "EscapeCSV", "(", "s", ")", ":", "if", "s", "is", "None", ":", "return", "''", "if", "isinstance", "(", "s", ",", "types", ".", "StringTypes", ")", ":", "s", "=", "s", ".", "strip", "(", ")", ".", "replace", "(", "'\"'", ",", "'\"\"'", ")", "if", "s", ".", "startswith", "(", "(", "'='", ",", "'-'", ",", "'+'", ",", "'@'", ")", ")", ":", "s", "=", "\"'\"", "+", "s", "return", "s" ]
Return a version of string S that is safe as part of a CSV file.
[ "Return", "a", "version", "of", "string", "S", "that", "is", "safe", "as", "part", "of", "a", "CSV", "file", "." ]
[ "\"\"\"Return a version of string S that is safe as part of a CSV file.\"\"\"", "# Prefix any formula cells because some spreadsheets have built-in", "# formila functions that can actually have side-effects on the user's", "# computer." ]
[ { "param": "s", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "s", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import types def EscapeCSV(s): if s is None: return '' if isinstance(s, types.StringTypes): s = s.strip().replace('"', '""') if s.startswith(('=', '-', '+', '@')): s = "'" + s return s
368
295
f9643c2275cb136bbd34c6ab242540173a2cf270
nkrishnaswami/dspl
tools/dspltools/packages/dspllib/validation/xml_validation.py
[ "BSD-3-Clause" ]
Python
GetErrorLineNumber
<not_specific>
def GetErrorLineNumber(error_string): """Parse out the line number from a minixsv error message. Args: error_string: String returned by minixsv exception Returns: Integer line number on which error was detected """ line_match = re.search(': line ([0-9]+)', error_string) return int(line_match.group(1))
Parse out the line number from a minixsv error message. Args: error_string: String returned by minixsv exception Returns: Integer line number on which error was detected
Parse out the line number from a minixsv error message.
[ "Parse", "out", "the", "line", "number", "from", "a", "minixsv", "error", "message", "." ]
def GetErrorLineNumber(error_string): line_match = re.search(': line ([0-9]+)', error_string) return int(line_match.group(1))
[ "def", "GetErrorLineNumber", "(", "error_string", ")", ":", "line_match", "=", "re", ".", "search", "(", "': line ([0-9]+)'", ",", "error_string", ")", "return", "int", "(", "line_match", ".", "group", "(", "1", ")", ")" ]
Parse out the line number from a minixsv error message.
[ "Parse", "out", "the", "line", "number", "from", "a", "minixsv", "error", "message", "." ]
[ "\"\"\"Parse out the line number from a minixsv error message.\n\n Args:\n error_string: String returned by minixsv exception\n\n Returns:\n Integer line number on which error was detected\n \"\"\"" ]
[ { "param": "error_string", "type": null } ]
{ "returns": [ { "docstring": "Integer line number on which error was detected", "docstring_tokens": [ "Integer", "line", "number", "on", "which", "error", "was", "detected" ], "type": null } ], "raises": [], "params": [ { "identifier": "error_string", "type": null, "docstring": "String returned by minixsv exception", "docstring_tokens": [ "String", "returned", "by", "minixsv", "exception" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def GetErrorLineNumber(error_string): line_match = re.search(': line ([0-9]+)', error_string) return int(line_match.group(1))
369
154
407a74fa3e877fc676b731e2da712a0ee09eb36f
Harish-developments/jspy
jspy.py
[ "MIT" ]
Python
__py_if_condition
<not_specific>
def __py_if_condition(statements,lineno): """returns a valid python if statement""" if_condition = statements[statements.find('(')+1:statements.find(')')] return statements[:statements.find('if')]\ +f"if {if_condition}:\n".replace('&&',' and ').replace('||',' or ').replace('!',' not ').replace('===','==').replace('{','').replace('/','//')
returns a valid python if statement
returns a valid python if statement
[ "returns", "a", "valid", "python", "if", "statement" ]
def __py_if_condition(statements,lineno): if_condition = statements[statements.find('(')+1:statements.find(')')] return statements[:statements.find('if')]\ +f"if {if_condition}:\n".replace('&&',' and ').replace('||',' or ').replace('!',' not ').replace('===','==').replace('{','').replace('/','//')
[ "def", "__py_if_condition", "(", "statements", ",", "lineno", ")", ":", "if_condition", "=", "statements", "[", "statements", ".", "find", "(", "'('", ")", "+", "1", ":", "statements", ".", "find", "(", "')'", ")", "]", "return", "statements", "[", ":", "statements", ".", "find", "(", "'if'", ")", "]", "+", "f\"if {if_condition}:\\n\"", ".", "replace", "(", "'&&'", ",", "' and '", ")", ".", "replace", "(", "'||'", ",", "' or '", ")", ".", "replace", "(", "'!'", ",", "' not '", ")", ".", "replace", "(", "'==='", ",", "'=='", ")", ".", "replace", "(", "'{'", ",", "''", ")", ".", "replace", "(", "'/'", ",", "'//'", ")" ]
returns a valid python if statement
[ "returns", "a", "valid", "python", "if", "statement" ]
[ "\"\"\"returns a valid python if statement\"\"\"" ]
[ { "param": "statements", "type": null }, { "param": "lineno", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "statements", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "lineno", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def __py_if_condition(statements,lineno): if_condition = statements[statements.find('(')+1:statements.find(')')] return statements[:statements.find('if')]\ +f"if {if_condition}:\n".replace('&&',' and ').replace('||',' or ').replace('!',' not ').replace('===','==').replace('{','').replace('/','//')
370
406
1b3186c99a60818dc9d24b438538877520aa1347
Z2PackDev/bands-inspect
tests/conftest.py
[ "Apache-2.0" ]
Python
compare_data
<not_specific>
def compare_data(request, test_name, scope="session"): # pylint: disable=unused-argument,redefined-outer-name """Returns a function which either saves some data to a file or (if that file exists already) compares it to pre-existing data using a given comparison function.""" def inner(compare_fct, data, tag=None): full_name = test_name + (tag or '') # get rid of json-specific quirks # store as string because I cannot add the decoder to the pytest cache data_str = json.dumps(data) data = json.loads(data_str) val = json.loads(request.config.cache.get(full_name, 'null')) if val is None: request.config.cache.set(full_name, data_str) raise ValueError('Reference data does not exist.') assert compare_fct(val, data) return inner
Returns a function which either saves some data to a file or (if that file exists already) compares it to pre-existing data using a given comparison function.
Returns a function which either saves some data to a file or (if that file exists already) compares it to pre-existing data using a given comparison function.
[ "Returns", "a", "function", "which", "either", "saves", "some", "data", "to", "a", "file", "or", "(", "if", "that", "file", "exists", "already", ")", "compares", "it", "to", "pre", "-", "existing", "data", "using", "a", "given", "comparison", "function", "." ]
def compare_data(request, test_name, scope="session"): def inner(compare_fct, data, tag=None): full_name = test_name + (tag or '') data_str = json.dumps(data) data = json.loads(data_str) val = json.loads(request.config.cache.get(full_name, 'null')) if val is None: request.config.cache.set(full_name, data_str) raise ValueError('Reference data does not exist.') assert compare_fct(val, data) return inner
[ "def", "compare_data", "(", "request", ",", "test_name", ",", "scope", "=", "\"session\"", ")", ":", "def", "inner", "(", "compare_fct", ",", "data", ",", "tag", "=", "None", ")", ":", "full_name", "=", "test_name", "+", "(", "tag", "or", "''", ")", "data_str", "=", "json", ".", "dumps", "(", "data", ")", "data", "=", "json", ".", "loads", "(", "data_str", ")", "val", "=", "json", ".", "loads", "(", "request", ".", "config", ".", "cache", ".", "get", "(", "full_name", ",", "'null'", ")", ")", "if", "val", "is", "None", ":", "request", ".", "config", ".", "cache", ".", "set", "(", "full_name", ",", "data_str", ")", "raise", "ValueError", "(", "'Reference data does not exist.'", ")", "assert", "compare_fct", "(", "val", ",", "data", ")", "return", "inner" ]
Returns a function which either saves some data to a file or (if that file exists already) compares it to pre-existing data using a given comparison function.
[ "Returns", "a", "function", "which", "either", "saves", "some", "data", "to", "a", "file", "or", "(", "if", "that", "file", "exists", "already", ")", "compares", "it", "to", "pre", "-", "existing", "data", "using", "a", "given", "comparison", "function", "." ]
[ "# pylint: disable=unused-argument,redefined-outer-name", "\"\"\"Returns a function which either saves some data to a file or (if that file exists already) compares it to pre-existing data using a given comparison function.\"\"\"", "# get rid of json-specific quirks", "# store as string because I cannot add the decoder to the pytest cache" ]
[ { "param": "request", "type": null }, { "param": "test_name", "type": null }, { "param": "scope", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "request", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "test_name", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "scope", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import json def compare_data(request, test_name, scope="session"): def inner(compare_fct, data, tag=None): full_name = test_name + (tag or '') data_str = json.dumps(data) data = json.loads(data_str) val = json.loads(request.config.cache.get(full_name, 'null')) if val is None: request.config.cache.set(full_name, data_str) raise ValueError('Reference data does not exist.') assert compare_fct(val, data) return inner
371
480
98433d154c009b04a48f5d9aabb7107f5e08386a
tang88888888/naiveproxy
src/tools/grit/grit/util.py
[ "BSD-3-Clause" ]
Python
_GetCommonBaseDirectory
<not_specific>
def _GetCommonBaseDirectory(*args): """Returns the common prefix directory for the given paths Args: The list of paths (at least one of which should be a directory) """ prefix = os.path.commonprefix(args) # prefix is a character-by-character prefix (i.e. it does not end # on a directory bound, so this code fixes that) # if the prefix ends with the separator, then it is prefect. if len(prefix) > 0 and prefix[-1] == os.path.sep: return prefix # We need to loop through all paths or else we can get # tripped up by "c:\a" and "c:\abc". The common prefix # is "c:\a" which is a directory and looks good with # respect to the first directory but it is clear that # isn't a common directory when the second path is # examined. for path in args: assert len(path) >= len(prefix) # If the prefix the same length as the path, # then the prefix must be a directory (since one # of the arguements should be a directory). if path == prefix: continue # if the character after the prefix in the path # is the separator, then the prefix appears to be a # valid a directory as well for the given path if path[len(prefix)] == os.path.sep: continue # Otherwise, the prefix is not a directory, so it needs # to be shortened to be one index_sep = prefix.rfind(os.path.sep) # The use "index_sep + 1" because it includes the final sep # and it handles the case when the index_sep is -1 as well prefix = prefix[:index_sep + 1] # At this point we backed up to a directory bound which is # common to all paths, so we can quit going through all of # the paths. break return prefix
Returns the common prefix directory for the given paths Args: The list of paths (at least one of which should be a directory)
Returns the common prefix directory for the given paths Args: The list of paths (at least one of which should be a directory)
[ "Returns", "the", "common", "prefix", "directory", "for", "the", "given", "paths", "Args", ":", "The", "list", "of", "paths", "(", "at", "least", "one", "of", "which", "should", "be", "a", "directory", ")" ]
def _GetCommonBaseDirectory(*args): prefix = os.path.commonprefix(args) if len(prefix) > 0 and prefix[-1] == os.path.sep: return prefix for path in args: assert len(path) >= len(prefix) if path == prefix: continue if path[len(prefix)] == os.path.sep: continue index_sep = prefix.rfind(os.path.sep) prefix = prefix[:index_sep + 1] break return prefix
[ "def", "_GetCommonBaseDirectory", "(", "*", "args", ")", ":", "prefix", "=", "os", ".", "path", ".", "commonprefix", "(", "args", ")", "if", "len", "(", "prefix", ")", ">", "0", "and", "prefix", "[", "-", "1", "]", "==", "os", ".", "path", ".", "sep", ":", "return", "prefix", "for", "path", "in", "args", ":", "assert", "len", "(", "path", ")", ">=", "len", "(", "prefix", ")", "if", "path", "==", "prefix", ":", "continue", "if", "path", "[", "len", "(", "prefix", ")", "]", "==", "os", ".", "path", ".", "sep", ":", "continue", "index_sep", "=", "prefix", ".", "rfind", "(", "os", ".", "path", ".", "sep", ")", "prefix", "=", "prefix", "[", ":", "index_sep", "+", "1", "]", "break", "return", "prefix" ]
Returns the common prefix directory for the given paths Args: The list of paths (at least one of which should be a directory)
[ "Returns", "the", "common", "prefix", "directory", "for", "the", "given", "paths", "Args", ":", "The", "list", "of", "paths", "(", "at", "least", "one", "of", "which", "should", "be", "a", "directory", ")" ]
[ "\"\"\"Returns the common prefix directory for the given paths\n\n Args:\n The list of paths (at least one of which should be a directory)\n \"\"\"", "# prefix is a character-by-character prefix (i.e. it does not end", "# on a directory bound, so this code fixes that)", "# if the prefix ends with the separator, then it is prefect.", "# We need to loop through all paths or else we can get", "# tripped up by \"c:\\a\" and \"c:\\abc\". The common prefix", "# is \"c:\\a\" which is a directory and looks good with", "# respect to the first directory but it is clear that", "# isn't a common directory when the second path is", "# examined.", "# If the prefix the same length as the path,", "# then the prefix must be a directory (since one", "# of the arguements should be a directory).", "# if the character after the prefix in the path", "# is the separator, then the prefix appears to be a", "# valid a directory as well for the given path", "# Otherwise, the prefix is not a directory, so it needs", "# to be shortened to be one", "# The use \"index_sep + 1\" because it includes the final sep", "# and it handles the case when the index_sep is -1 as well", "# At this point we backed up to a directory bound which is", "# common to all paths, so we can quit going through all of", "# the paths." ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
import os def _GetCommonBaseDirectory(*args): prefix = os.path.commonprefix(args) if len(prefix) > 0 and prefix[-1] == os.path.sep: return prefix for path in args: assert len(path) >= len(prefix) if path == prefix: continue if path[len(prefix)] == os.path.sep: continue index_sep = prefix.rfind(os.path.sep) prefix = prefix[:index_sep + 1] break return prefix
372
509
a9cbf5a752099be378dd9c6e203e37bb366f5a97
jitterjuice/towerlib
towerlib/entities/core.py
[ "MIT" ]
Python
validate_json
<not_specific>
def validate_json(value): """Validates that the provided value is a valid json.""" try: json.loads(value) return True except ValueError: return False
Validates that the provided value is a valid json.
Validates that the provided value is a valid json.
[ "Validates", "that", "the", "provided", "value", "is", "a", "valid", "json", "." ]
def validate_json(value): try: json.loads(value) return True except ValueError: return False
[ "def", "validate_json", "(", "value", ")", ":", "try", ":", "json", ".", "loads", "(", "value", ")", "return", "True", "except", "ValueError", ":", "return", "False" ]
Validates that the provided value is a valid json.
[ "Validates", "that", "the", "provided", "value", "is", "a", "valid", "json", "." ]
[ "\"\"\"Validates that the provided value is a valid json.\"\"\"" ]
[ { "param": "value", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "value", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import json def validate_json(value): try: json.loads(value) return True except ValueError: return False
374
179
169bdf3fb18db94df4c2311d9c6ec1ab1568d895
shad7/tvdbapi_client
tvdbapi_client/options.py
[ "MIT" ]
Python
_make_opt_list
<not_specific>
def _make_opt_list(opts, group): """Generate a list of tuple containing group, options :param opts: option lists associated with a group :type opts: list :param group: name of an option group :type group: str :return: a list of (group_name, opts) tuples :rtype: list """ import copy import itertools _opts = [(group, list(itertools.chain(*opts)))] return [(g, copy.deepcopy(o)) for g, o in _opts]
Generate a list of tuple containing group, options :param opts: option lists associated with a group :type opts: list :param group: name of an option group :type group: str :return: a list of (group_name, opts) tuples :rtype: list
Generate a list of tuple containing group, options
[ "Generate", "a", "list", "of", "tuple", "containing", "group", "options" ]
def _make_opt_list(opts, group): import copy import itertools _opts = [(group, list(itertools.chain(*opts)))] return [(g, copy.deepcopy(o)) for g, o in _opts]
[ "def", "_make_opt_list", "(", "opts", ",", "group", ")", ":", "import", "copy", "import", "itertools", "_opts", "=", "[", "(", "group", ",", "list", "(", "itertools", ".", "chain", "(", "*", "opts", ")", ")", ")", "]", "return", "[", "(", "g", ",", "copy", ".", "deepcopy", "(", "o", ")", ")", "for", "g", ",", "o", "in", "_opts", "]" ]
Generate a list of tuple containing group, options
[ "Generate", "a", "list", "of", "tuple", "containing", "group", "options" ]
[ "\"\"\"Generate a list of tuple containing group, options\n\n :param opts: option lists associated with a group\n :type opts: list\n :param group: name of an option group\n :type group: str\n :return: a list of (group_name, opts) tuples\n :rtype: list\n \"\"\"" ]
[ { "param": "opts", "type": null }, { "param": "group", "type": null } ]
{ "returns": [ { "docstring": "a list of (group_name, opts) tuples", "docstring_tokens": [ "a", "list", "of", "(", "group_name", "opts", ")", "tuples" ], "type": "list" } ], "raises": [], "params": [ { "identifier": "opts", "type": null, "docstring": "option lists associated with a group", "docstring_tokens": [ "option", "lists", "associated", "with", "a", "group" ], "default": null, "is_optional": null }, { "identifier": "group", "type": null, "docstring": "name of an option group", "docstring_tokens": [ "name", "of", "an", "option", "group" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import copy import itertools def _make_opt_list(opts, group): import copy import itertools _opts = [(group, list(itertools.chain(*opts)))] return [(g, copy.deepcopy(o)) for g, o in _opts]
376
191
493f44505676a6b3afab2088fcf53b369d3ae4ac
onealbao/LDax
dax/XnatUtils.py
[ "MIT" ]
Python
extract_exp
<not_specific>
def extract_exp(expression, full_regex=False): """Extract the experession with or without full_regex. :param expression: string to filter :param full_regex: using full regex :return: regex Object from re package """ if not full_regex: exp = fnmatch.translate(expression) return re.compile(exp)
Extract the experession with or without full_regex. :param expression: string to filter :param full_regex: using full regex :return: regex Object from re package
Extract the experession with or without full_regex.
[ "Extract", "the", "experession", "with", "or", "without", "full_regex", "." ]
def extract_exp(expression, full_regex=False): if not full_regex: exp = fnmatch.translate(expression) return re.compile(exp)
[ "def", "extract_exp", "(", "expression", ",", "full_regex", "=", "False", ")", ":", "if", "not", "full_regex", ":", "exp", "=", "fnmatch", ".", "translate", "(", "expression", ")", "return", "re", ".", "compile", "(", "exp", ")" ]
Extract the experession with or without full_regex.
[ "Extract", "the", "experession", "with", "or", "without", "full_regex", "." ]
[ "\"\"\"Extract the experession with or without full_regex.\n\n :param expression: string to filter\n :param full_regex: using full regex\n :return: regex Object from re package\n \"\"\"" ]
[ { "param": "expression", "type": null }, { "param": "full_regex", "type": null } ]
{ "returns": [ { "docstring": "regex Object from re package", "docstring_tokens": [ "regex", "Object", "from", "re", "package" ], "type": null } ], "raises": [], "params": [ { "identifier": "expression", "type": null, "docstring": "string to filter", "docstring_tokens": [ "string", "to", "filter" ], "default": null, "is_optional": null }, { "identifier": "full_regex", "type": null, "docstring": "using full regex", "docstring_tokens": [ "using", "full", "regex" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import fnmatch import re def extract_exp(expression, full_regex=False): if not full_regex: exp = fnmatch.translate(expression) return re.compile(exp)
377
251
aca95e0e54e5ca1a68e0599cbacc7db0b39e9970
NURDspace/jsonbot
jsb/plugs/common/topic.py
[ "MIT" ]
Python
checktopicmode
<not_specific>
def checktopicmode(bot, ievent): """ callback for change in channel topic mode """ chan = ievent.channel mode = ievent.chan.data.mode if mode and 't' in mode: if chan not in bot.state['opchan']: ievent.reply("i'm not op on %s" % chan) return 0 return 1
callback for change in channel topic mode
callback for change in channel topic mode
[ "callback", "for", "change", "in", "channel", "topic", "mode" ]
def checktopicmode(bot, ievent): chan = ievent.channel mode = ievent.chan.data.mode if mode and 't' in mode: if chan not in bot.state['opchan']: ievent.reply("i'm not op on %s" % chan) return 0 return 1
[ "def", "checktopicmode", "(", "bot", ",", "ievent", ")", ":", "chan", "=", "ievent", ".", "channel", "mode", "=", "ievent", ".", "chan", ".", "data", ".", "mode", "if", "mode", "and", "'t'", "in", "mode", ":", "if", "chan", "not", "in", "bot", ".", "state", "[", "'opchan'", "]", ":", "ievent", ".", "reply", "(", "\"i'm not op on %s\"", "%", "chan", ")", "return", "0", "return", "1" ]
callback for change in channel topic mode
[ "callback", "for", "change", "in", "channel", "topic", "mode" ]
[ "\"\"\" callback for change in channel topic mode \"\"\"" ]
[ { "param": "bot", "type": null }, { "param": "ievent", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "bot", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "ievent", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def checktopicmode(bot, ievent): chan = ievent.channel mode = ievent.chan.data.mode if mode and 't' in mode: if chan not in bot.state['opchan']: ievent.reply("i'm not op on %s" % chan) return 0 return 1
378
606
1ce74aa4b4c1f68d80f10d1537b4ca5114a8a559
DavidNemeskey/bert
my_run_pretraining.py
[ "Apache-2.0" ]
Python
read_command_file
<not_specific>
def read_command_file(cmd_file): """ Reads the command line file and returns the command, as well as the output directory. """ with open(cmd_file) as inf: cmd = inf.readline().strip() output_dir = re.search(r'--output_dir\s*=\s*([^ ]+)', cmd).group(1).rstrip('/') return cmd, output_dir
Reads the command line file and returns the command, as well as the output directory.
Reads the command line file and returns the command, as well as the output directory.
[ "Reads", "the", "command", "line", "file", "and", "returns", "the", "command", "as", "well", "as", "the", "output", "directory", "." ]
def read_command_file(cmd_file): with open(cmd_file) as inf: cmd = inf.readline().strip() output_dir = re.search(r'--output_dir\s*=\s*([^ ]+)', cmd).group(1).rstrip('/') return cmd, output_dir
[ "def", "read_command_file", "(", "cmd_file", ")", ":", "with", "open", "(", "cmd_file", ")", "as", "inf", ":", "cmd", "=", "inf", ".", "readline", "(", ")", ".", "strip", "(", ")", "output_dir", "=", "re", ".", "search", "(", "r'--output_dir\\s*=\\s*([^ ]+)'", ",", "cmd", ")", ".", "group", "(", "1", ")", ".", "rstrip", "(", "'/'", ")", "return", "cmd", ",", "output_dir" ]
Reads the command line file and returns the command, as well as the output directory.
[ "Reads", "the", "command", "line", "file", "and", "returns", "the", "command", "as", "well", "as", "the", "output", "directory", "." ]
[ "\"\"\"\n Reads the command line file and returns the command, as well as the output\n directory.\n \"\"\"" ]
[ { "param": "cmd_file", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cmd_file", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def read_command_file(cmd_file): with open(cmd_file) as inf: cmd = inf.readline().strip() output_dir = re.search(r'--output_dir\s*=\s*([^ ]+)', cmd).group(1).rstrip('/') return cmd, output_dir
379
345
be2c86febfcc236b7423366664c39cc848358e09
wotchin/openGauss-server
src/manager/om/script/local/LocalCollect.py
[ "MulanPSL-1.0" ]
Python
appendCommand
null
def appendCommand(cmds, newCommand, dataFileName): """ function: make up the commands into the array input : cmds, newCommand, dataFileName output: NA """ # Execute the command and output to the specified file cmds.append("echo '\n************************************\n* " \ "%s \n" \ "************************************' >> %s 2>&1" % \ (newCommand, dataFileName)) cmds.append("%s >> %s 2>&1" % (newCommand, dataFileName))
function: make up the commands into the array input : cmds, newCommand, dataFileName output: NA
make up the commands into the array input : cmds, newCommand, dataFileName output: NA
[ "make", "up", "the", "commands", "into", "the", "array", "input", ":", "cmds", "newCommand", "dataFileName", "output", ":", "NA" ]
def appendCommand(cmds, newCommand, dataFileName): cmds.append("echo '\n************************************\n* " \ "%s \n" \ "************************************' >> %s 2>&1" % \ (newCommand, dataFileName)) cmds.append("%s >> %s 2>&1" % (newCommand, dataFileName))
[ "def", "appendCommand", "(", "cmds", ",", "newCommand", ",", "dataFileName", ")", ":", "cmds", ".", "append", "(", "\"echo '\\n************************************\\n* \"", "\"%s \\n\"", "\"************************************' >> %s 2>&1\"", "%", "(", "newCommand", ",", "dataFileName", ")", ")", "cmds", ".", "append", "(", "\"%s >> %s 2>&1\"", "%", "(", "newCommand", ",", "dataFileName", ")", ")" ]
function: make up the commands into the array input : cmds, newCommand, dataFileName output: NA
[ "function", ":", "make", "up", "the", "commands", "into", "the", "array", "input", ":", "cmds", "newCommand", "dataFileName", "output", ":", "NA" ]
[ "\"\"\"\n function: make up the commands into the array\n input : cmds, newCommand, dataFileName\n output: NA\n \"\"\"", "# Execute the command and output to the specified file" ]
[ { "param": "cmds", "type": null }, { "param": "newCommand", "type": null }, { "param": "dataFileName", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cmds", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "newCommand", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "dataFileName", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def appendCommand(cmds, newCommand, dataFileName): cmds.append("echo '\n************************************\n* " \ "%s \n" \ "************************************' >> %s 2>&1" % \ (newCommand, dataFileName)) cmds.append("%s >> %s 2>&1" % (newCommand, dataFileName))
380
705
a842aaebf1e559ea74544dd1e8ed592c82211f11
stefanpf/py2gc
py2gc/parsers.py
[ "MIT" ]
Python
parse_path
<not_specific>
def parse_path(path): ''' Currently just returns the given path if it exists, or the default if it doesn't. ''' print('Parsing path...') parsed_path = str(path) if os.path.isdir(path): return parsed_path else: print('''Cannot parse path to credentials. Will try to get credentials from default path $home/.py2gc/.''')
Currently just returns the given path if it exists, or the default if it doesn't.
Currently just returns the given path if it exists, or the default if it doesn't.
[ "Currently", "just", "returns", "the", "given", "path", "if", "it", "exists", "or", "the", "default", "if", "it", "doesn", "'", "t", "." ]
def parse_path(path): print('Parsing path...') parsed_path = str(path) if os.path.isdir(path): return parsed_path else: print('''Cannot parse path to credentials. Will try to get credentials from default path $home/.py2gc/.''')
[ "def", "parse_path", "(", "path", ")", ":", "print", "(", "'Parsing path...'", ")", "parsed_path", "=", "str", "(", "path", ")", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "return", "parsed_path", "else", ":", "print", "(", "'''Cannot parse path to credentials. Will try to get credentials\n from default path $home/.py2gc/.'''", ")" ]
Currently just returns the given path if it exists, or the default if it doesn't.
[ "Currently", "just", "returns", "the", "given", "path", "if", "it", "exists", "or", "the", "default", "if", "it", "doesn", "'", "t", "." ]
[ "'''\n Currently just returns the given path if it exists,\n or the default if it doesn't.\n '''" ]
[ { "param": "path", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "path", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def parse_path(path): print('Parsing path...') parsed_path = str(path) if os.path.isdir(path): return parsed_path else: print('''Cannot parse path to credentials. Will try to get credentials from default path $home/.py2gc/.''')
381
791
384a5ad37bf2d4d4d4760fff465e584c6818f342
nwthomas/code-challenges
src/daily-coding-problem/medium/synonyms/synonyms.py
[ "MIT" ]
Python
are_deeply_linked_synonyms
<not_specific>
def are_deeply_linked_synonyms(reference_dict, first_word, second_word): """Takes in two words with a reference dictionary and iteratively checks if the words (with any in between) are synonyms""" if first_word not in reference_dict or second_word not in reference_dict: return False tracker = {} stack = [word for word in reference_dict[first_word]] while len(stack) > 0: current_word = stack.pop() if current_word in tracker: continue else: tracker[current_word] = True current_word_synonyms = reference_dict[current_word] for word in current_word_synonyms: if word == second_word: return True elif word not in tracker: stack.append(word) return False
Takes in two words with a reference dictionary and iteratively checks if the words (with any in between) are synonyms
Takes in two words with a reference dictionary and iteratively checks if the words (with any in between) are synonyms
[ "Takes", "in", "two", "words", "with", "a", "reference", "dictionary", "and", "iteratively", "checks", "if", "the", "words", "(", "with", "any", "in", "between", ")", "are", "synonyms" ]
def are_deeply_linked_synonyms(reference_dict, first_word, second_word): if first_word not in reference_dict or second_word not in reference_dict: return False tracker = {} stack = [word for word in reference_dict[first_word]] while len(stack) > 0: current_word = stack.pop() if current_word in tracker: continue else: tracker[current_word] = True current_word_synonyms = reference_dict[current_word] for word in current_word_synonyms: if word == second_word: return True elif word not in tracker: stack.append(word) return False
[ "def", "are_deeply_linked_synonyms", "(", "reference_dict", ",", "first_word", ",", "second_word", ")", ":", "if", "first_word", "not", "in", "reference_dict", "or", "second_word", "not", "in", "reference_dict", ":", "return", "False", "tracker", "=", "{", "}", "stack", "=", "[", "word", "for", "word", "in", "reference_dict", "[", "first_word", "]", "]", "while", "len", "(", "stack", ")", ">", "0", ":", "current_word", "=", "stack", ".", "pop", "(", ")", "if", "current_word", "in", "tracker", ":", "continue", "else", ":", "tracker", "[", "current_word", "]", "=", "True", "current_word_synonyms", "=", "reference_dict", "[", "current_word", "]", "for", "word", "in", "current_word_synonyms", ":", "if", "word", "==", "second_word", ":", "return", "True", "elif", "word", "not", "in", "tracker", ":", "stack", ".", "append", "(", "word", ")", "return", "False" ]
Takes in two words with a reference dictionary and iteratively checks if the words (with any in between) are synonyms
[ "Takes", "in", "two", "words", "with", "a", "reference", "dictionary", "and", "iteratively", "checks", "if", "the", "words", "(", "with", "any", "in", "between", ")", "are", "synonyms" ]
[ "\"\"\"Takes in two words with a reference dictionary and iteratively checks if the words (with any in between) are synonyms\"\"\"" ]
[ { "param": "reference_dict", "type": null }, { "param": "first_word", "type": null }, { "param": "second_word", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "reference_dict", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "first_word", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "second_word", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def are_deeply_linked_synonyms(reference_dict, first_word, second_word): if first_word not in reference_dict or second_word not in reference_dict: return False tracker = {} stack = [word for word in reference_dict[first_word]] while len(stack) > 0: current_word = stack.pop() if current_word in tracker: continue else: tracker[current_word] = True current_word_synonyms = reference_dict[current_word] for word in current_word_synonyms: if word == second_word: return True elif word not in tracker: stack.append(word) return False
382
771
989cc6d9b0528fe28c3ee840fd81ed62515687b7
aguai/docook
lib/mistletoe/mistletoe/block_token.py
[ "MIT" ]
Python
parse_align
<not_specific>
def parse_align(column): """ Helper function; returns align option from cell content. Returns: None if align = left; 0 if align = center; 1 if align = right. """ if column[:4] == ':---' and column[-4:] == '---:': return 0 if column[-4:] == '---:': return 1 return None
Helper function; returns align option from cell content. Returns: None if align = left; 0 if align = center; 1 if align = right.
Helper function; returns align option from cell content.
[ "Helper", "function", ";", "returns", "align", "option", "from", "cell", "content", "." ]
def parse_align(column): if column[:4] == ':---' and column[-4:] == '---:': return 0 if column[-4:] == '---:': return 1 return None
[ "def", "parse_align", "(", "column", ")", ":", "if", "column", "[", ":", "4", "]", "==", "':---'", "and", "column", "[", "-", "4", ":", "]", "==", "'---:'", ":", "return", "0", "if", "column", "[", "-", "4", ":", "]", "==", "'---:'", ":", "return", "1", "return", "None" ]
Helper function; returns align option from cell content.
[ "Helper", "function", ";", "returns", "align", "option", "from", "cell", "content", "." ]
[ "\"\"\"\n Helper function; returns align option from cell content.\n\n Returns:\n None if align = left;\n 0 if align = center;\n 1 if align = right.\n \"\"\"" ]
[ { "param": "column", "type": null } ]
{ "returns": [ { "docstring": "None if align = left;\n0 if align = center;\n1 if align = right.", "docstring_tokens": [ "None", "if", "align", "=", "left", ";", "0", "if", "align", "=", "center", ";", "1", "if", "align", "=", "right", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "column", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def parse_align(column): if column[:4] == ':---' and column[-4:] == '---:': return 0 if column[-4:] == '---:': return 1 return None
383
883
b27e441c5c2a5d36dd0abee478f6ed4e6e8b3ffe
HtutLynn/Knowledge_Distillation_Pytorch
customs.py
[ "MIT" ]
Python
compute_param_count
<not_specific>
def compute_param_count(model): """ Compute the total number of trainable parameters in the model :param model: Neural Network model, constructed in pytorch framework :return param_count: Total number of trainable parameters in a network """ param_count = 0 for param in model.parameters(): if param.requires_grad: param_count += param.numel() return param_count
Compute the total number of trainable parameters in the model :param model: Neural Network model, constructed in pytorch framework :return param_count: Total number of trainable parameters in a network
Compute the total number of trainable parameters in the model
[ "Compute", "the", "total", "number", "of", "trainable", "parameters", "in", "the", "model" ]
def compute_param_count(model): param_count = 0 for param in model.parameters(): if param.requires_grad: param_count += param.numel() return param_count
[ "def", "compute_param_count", "(", "model", ")", ":", "param_count", "=", "0", "for", "param", "in", "model", ".", "parameters", "(", ")", ":", "if", "param", ".", "requires_grad", ":", "param_count", "+=", "param", ".", "numel", "(", ")", "return", "param_count" ]
Compute the total number of trainable parameters in the model
[ "Compute", "the", "total", "number", "of", "trainable", "parameters", "in", "the", "model" ]
[ "\"\"\"\n Compute the total number of trainable parameters in the model\n :param model: Neural Network model, constructed in pytorch framework\n :return param_count: Total number of trainable parameters in a network\n \"\"\"" ]
[ { "param": "model", "type": null } ]
{ "returns": [ { "docstring": "Total number of trainable parameters in a network", "docstring_tokens": [ "Total", "number", "of", "trainable", "parameters", "in", "a", "network" ], "type": "param_count" } ], "raises": [], "params": [ { "identifier": "model", "type": null, "docstring": "Neural Network model, constructed in pytorch framework", "docstring_tokens": [ "Neural", "Network", "model", "constructed", "in", "pytorch", "framework" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def compute_param_count(model): param_count = 0 for param in model.parameters(): if param.requires_grad: param_count += param.numel() return param_count
384
216
4cd44412f948c71a3866de82c3b210354cbe1358
rreddykeap/properties-translator
src/utils/proptrans.py
[ "MIT" ]
Python
need_translation
<not_specific>
def need_translation(file_name, src): """ Checks if a translation is needed for the file with the specified name. :param file_name: name of the file :param src: source language :return: True if file needs to be translated else False """ file_name = os.path.splitext(file_name)[0] if file_name.endswith('_' + src) or src == 'auto': return True return False
Checks if a translation is needed for the file with the specified name. :param file_name: name of the file :param src: source language :return: True if file needs to be translated else False
Checks if a translation is needed for the file with the specified name.
[ "Checks", "if", "a", "translation", "is", "needed", "for", "the", "file", "with", "the", "specified", "name", "." ]
def need_translation(file_name, src): file_name = os.path.splitext(file_name)[0] if file_name.endswith('_' + src) or src == 'auto': return True return False
[ "def", "need_translation", "(", "file_name", ",", "src", ")", ":", "file_name", "=", "os", ".", "path", ".", "splitext", "(", "file_name", ")", "[", "0", "]", "if", "file_name", ".", "endswith", "(", "'_'", "+", "src", ")", "or", "src", "==", "'auto'", ":", "return", "True", "return", "False" ]
Checks if a translation is needed for the file with the specified name.
[ "Checks", "if", "a", "translation", "is", "needed", "for", "the", "file", "with", "the", "specified", "name", "." ]
[ "\"\"\"\n Checks if a translation is needed for the file with the specified name.\n\n :param file_name: name of the file\n :param src: source language\n :return: True if file needs to be translated else False\n \"\"\"" ]
[ { "param": "file_name", "type": null }, { "param": "src", "type": null } ]
{ "returns": [ { "docstring": "True if file needs to be translated else False", "docstring_tokens": [ "True", "if", "file", "needs", "to", "be", "translated", "else", "False" ], "type": null } ], "raises": [], "params": [ { "identifier": "file_name", "type": null, "docstring": "name of the file", "docstring_tokens": [ "name", "of", "the", "file" ], "default": null, "is_optional": null }, { "identifier": "src", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def need_translation(file_name, src): file_name = os.path.splitext(file_name)[0] if file_name.endswith('_' + src) or src == 'auto': return True return False
385
976
720a9e8387460cd47cb65d8dc57cec71f5f88371
team-oss/dspg20oss
ossPy/singleFuncs/eraseFromColumn.py
[ "MIT" ]
Python
eraseFromColumn
<not_specific>
def eraseFromColumn(inputColumn,eraseList): """iteratively delete regex query matches from input list Keyword arguments: inputColumn -- a column from a pandas dataframe, this will be the set of target words/entries that deletions will be made from eraseList -- a column containing strings (regex expressions) which will be deleted from the inputColumn, in an iterative fashion """ import pandas as pd import re for index, row in eraseList.iterrows(): curReplaceVal=row[0] currentRegexExpression=curReplaceVal + '(?i)' inputColumn.replace(regex=currentRegexExpression, value='', inplace=True) return inputColumn
iteratively delete regex query matches from input list Keyword arguments: inputColumn -- a column from a pandas dataframe, this will be the set of target words/entries that deletions will be made from eraseList -- a column containing strings (regex expressions) which will be deleted from the inputColumn, in an iterative fashion
iteratively delete regex query matches from input list Keyword arguments: inputColumn -- a column from a pandas dataframe, this will be the set of target words/entries that deletions will be made from eraseList -- a column containing strings (regex expressions) which will be deleted from the inputColumn, in an iterative fashion
[ "iteratively", "delete", "regex", "query", "matches", "from", "input", "list", "Keyword", "arguments", ":", "inputColumn", "--", "a", "column", "from", "a", "pandas", "dataframe", "this", "will", "be", "the", "set", "of", "target", "words", "/", "entries", "that", "deletions", "will", "be", "made", "from", "eraseList", "--", "a", "column", "containing", "strings", "(", "regex", "expressions", ")", "which", "will", "be", "deleted", "from", "the", "inputColumn", "in", "an", "iterative", "fashion" ]
def eraseFromColumn(inputColumn,eraseList): import pandas as pd import re for index, row in eraseList.iterrows(): curReplaceVal=row[0] currentRegexExpression=curReplaceVal + '(?i)' inputColumn.replace(regex=currentRegexExpression, value='', inplace=True) return inputColumn
[ "def", "eraseFromColumn", "(", "inputColumn", ",", "eraseList", ")", ":", "import", "pandas", "as", "pd", "import", "re", "for", "index", ",", "row", "in", "eraseList", ".", "iterrows", "(", ")", ":", "curReplaceVal", "=", "row", "[", "0", "]", "currentRegexExpression", "=", "curReplaceVal", "+", "'(?i)'", "inputColumn", ".", "replace", "(", "regex", "=", "currentRegexExpression", ",", "value", "=", "''", ",", "inplace", "=", "True", ")", "return", "inputColumn" ]
iteratively delete regex query matches from input list Keyword arguments: inputColumn -- a column from a pandas dataframe, this will be the set of target words/entries that deletions will be made from eraseList -- a column containing strings (regex expressions) which will be deleted from the inputColumn, in an iterative fashion
[ "iteratively", "delete", "regex", "query", "matches", "from", "input", "list", "Keyword", "arguments", ":", "inputColumn", "--", "a", "column", "from", "a", "pandas", "dataframe", "this", "will", "be", "the", "set", "of", "target", "words", "/", "entries", "that", "deletions", "will", "be", "made", "from", "eraseList", "--", "a", "column", "containing", "strings", "(", "regex", "expressions", ")", "which", "will", "be", "deleted", "from", "the", "inputColumn", "in", "an", "iterative", "fashion" ]
[ "\"\"\"iteratively delete regex query matches from input list\n \n Keyword arguments:\n inputColumn -- a column from a pandas dataframe, this will be the set of\n target words/entries that deletions will be made from\n eraseList -- a column containing strings (regex expressions) which will be\n deleted from the inputColumn, in an iterative fashion\n \"\"\"" ]
[ { "param": "inputColumn", "type": null }, { "param": "eraseList", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "inputColumn", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "eraseList", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def eraseFromColumn(inputColumn,eraseList): import pandas as pd import re for index, row in eraseList.iterrows(): curReplaceVal=row[0] currentRegexExpression=curReplaceVal + '(?i)' inputColumn.replace(regex=currentRegexExpression, value='', inplace=True) return inputColumn
386
977
54c39e64b7db3f58f24d80fc776b582c2c72cf83
elifesciences/builder
src/utils.py
[ "MIT" ]
Python
walk_nested_struct
<not_specific>
def walk_nested_struct(val, fn): "walks a potentially nested structure, calling `fn` on each value it encounters" if isinstance(val, dict): return {key: walk_nested_struct(i, fn) for key, i in val.items()} if isinstance(val, list): return [walk_nested_struct(i, fn) for i in val] return fn(val)
walks a potentially nested structure, calling `fn` on each value it encounters
walks a potentially nested structure, calling `fn` on each value it encounters
[ "walks", "a", "potentially", "nested", "structure", "calling", "`", "fn", "`", "on", "each", "value", "it", "encounters" ]
def walk_nested_struct(val, fn): if isinstance(val, dict): return {key: walk_nested_struct(i, fn) for key, i in val.items()} if isinstance(val, list): return [walk_nested_struct(i, fn) for i in val] return fn(val)
[ "def", "walk_nested_struct", "(", "val", ",", "fn", ")", ":", "if", "isinstance", "(", "val", ",", "dict", ")", ":", "return", "{", "key", ":", "walk_nested_struct", "(", "i", ",", "fn", ")", "for", "key", ",", "i", "in", "val", ".", "items", "(", ")", "}", "if", "isinstance", "(", "val", ",", "list", ")", ":", "return", "[", "walk_nested_struct", "(", "i", ",", "fn", ")", "for", "i", "in", "val", "]", "return", "fn", "(", "val", ")" ]
walks a potentially nested structure, calling `fn` on each value it encounters
[ "walks", "a", "potentially", "nested", "structure", "calling", "`", "fn", "`", "on", "each", "value", "it", "encounters" ]
[ "\"walks a potentially nested structure, calling `fn` on each value it encounters\"" ]
[ { "param": "val", "type": null }, { "param": "fn", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "val", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "fn", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def walk_nested_struct(val, fn): if isinstance(val, dict): return {key: walk_nested_struct(i, fn) for key, i in val.items()} if isinstance(val, list): return [walk_nested_struct(i, fn) for i in val] return fn(val)
388
133
49790c95d8368c7bff99417246b3459cb82ef6cb
Sagyam/Major-Project-Backend
server/majorProject/helper/linear_solver.py
[ "MIT" ]
Python
isOverDetermined
<not_specific>
def isOverDetermined(equations): ''' Checks if the equations does not contain z or Z. ''' for eq in equations: if 'z' in eq or 'Z' in eq: return False return True
Checks if the equations does not contain z or Z.
Checks if the equations does not contain z or Z.
[ "Checks", "if", "the", "equations", "does", "not", "contain", "z", "or", "Z", "." ]
def isOverDetermined(equations): for eq in equations: if 'z' in eq or 'Z' in eq: return False return True
[ "def", "isOverDetermined", "(", "equations", ")", ":", "for", "eq", "in", "equations", ":", "if", "'z'", "in", "eq", "or", "'Z'", "in", "eq", ":", "return", "False", "return", "True" ]
Checks if the equations does not contain z or Z.
[ "Checks", "if", "the", "equations", "does", "not", "contain", "z", "or", "Z", "." ]
[ "'''\n Checks if the equations does not contain z or Z.\n '''" ]
[ { "param": "equations", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "equations", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def isOverDetermined(equations): for eq in equations: if 'z' in eq or 'Z' in eq: return False return True
389
526
2eeec80c46a420e53df2b8428c292dd5eff0e5d7
jite21/python-list
LinkedList.py
[ "MIT" ]
Python
traverse
<not_specific>
def traverse(graph, q): ''' static method to traverse graphs, BFS and DFS, based on object passed in q if q is stack then DFS if q in queus then BFS Using duck-typing here. ''' visited = set() s = '' visit = 0 q.add(visit) visited.add(visit) while len(q): visit = q.top() s += str(visit) + ' ' q.remove() for i in graph.getedge(visit): if i.value not in visited: visited.add(i.value) q.add(i.value) return s
static method to traverse graphs, BFS and DFS, based on object passed in q if q is stack then DFS if q in queus then BFS Using duck-typing here.
static method to traverse graphs, BFS and DFS, based on object passed in q if q is stack then DFS if q in queus then BFS Using duck-typing here.
[ "static", "method", "to", "traverse", "graphs", "BFS", "and", "DFS", "based", "on", "object", "passed", "in", "q", "if", "q", "is", "stack", "then", "DFS", "if", "q", "in", "queus", "then", "BFS", "Using", "duck", "-", "typing", "here", "." ]
def traverse(graph, q): visited = set() s = '' visit = 0 q.add(visit) visited.add(visit) while len(q): visit = q.top() s += str(visit) + ' ' q.remove() for i in graph.getedge(visit): if i.value not in visited: visited.add(i.value) q.add(i.value) return s
[ "def", "traverse", "(", "graph", ",", "q", ")", ":", "visited", "=", "set", "(", ")", "s", "=", "''", "visit", "=", "0", "q", ".", "add", "(", "visit", ")", "visited", ".", "add", "(", "visit", ")", "while", "len", "(", "q", ")", ":", "visit", "=", "q", ".", "top", "(", ")", "s", "+=", "str", "(", "visit", ")", "+", "' '", "q", ".", "remove", "(", ")", "for", "i", "in", "graph", ".", "getedge", "(", "visit", ")", ":", "if", "i", ".", "value", "not", "in", "visited", ":", "visited", ".", "add", "(", "i", ".", "value", ")", "q", ".", "add", "(", "i", ".", "value", ")", "return", "s" ]
static method to traverse graphs, BFS and DFS, based on object passed in q if q is stack then DFS if q in queus then BFS
[ "static", "method", "to", "traverse", "graphs", "BFS", "and", "DFS", "based", "on", "object", "passed", "in", "q", "if", "q", "is", "stack", "then", "DFS", "if", "q", "in", "queus", "then", "BFS" ]
[ "'''\n static method to traverse graphs, BFS and DFS, based on object passed in q\n if q is stack then DFS\n if q in queus then BFS\n \n Using duck-typing here.\n '''" ]
[ { "param": "graph", "type": null }, { "param": "q", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "graph", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "q", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def traverse(graph, q): visited = set() s = '' visit = 0 q.add(visit) visited.add(visit) while len(q): visit = q.top() s += str(visit) + ' ' q.remove() for i in graph.getedge(visit): if i.value not in visited: visited.add(i.value) q.add(i.value) return s
392
152
741de537d52f53a939824d47908b040042cfdd50
CharlesGovea/Alice
alice_yacc.py
[ "MIT" ]
Python
p_unary
null
def p_unary(p): ''' unary : postfix | MINUS neuralgic_opr postfix neuralgic_unary ''' if len(p) > 2: p[0] = p[3] else: p[0] = p[1]
unary : postfix | MINUS neuralgic_opr postfix neuralgic_unary
unary : postfix | MINUS neuralgic_opr postfix neuralgic_unary
[ "unary", ":", "postfix", "|", "MINUS", "neuralgic_opr", "postfix", "neuralgic_unary" ]
def p_unary(p): if len(p) > 2: p[0] = p[3] else: p[0] = p[1]
[ "def", "p_unary", "(", "p", ")", ":", "if", "len", "(", "p", ")", ">", "2", ":", "p", "[", "0", "]", "=", "p", "[", "3", "]", "else", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]" ]
unary : postfix | MINUS neuralgic_opr postfix neuralgic_unary
[ "unary", ":", "postfix", "|", "MINUS", "neuralgic_opr", "postfix", "neuralgic_unary" ]
[ "'''\n unary : postfix\n | MINUS neuralgic_opr postfix neuralgic_unary\n '''" ]
[ { "param": "p", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "p", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def p_unary(p): if len(p) > 2: p[0] = p[3] else: p[0] = p[1]
393
627
0fd076f9f5a0d229c98336e9b019948f8a94113c
isms/circlemud-world-parser
src/utils.py
[ "MIT" ]
Python
split_on_vnums
null
def split_on_vnums(file_text): """ function specifically to split the file on lines in the form of a vnum (e.g. the entire line is something like '#1234'). this is important because lines within entries can (and do) start with '#' """ split_re = r"""^\#(\d+)""" split_pattern = re.compile(split_re, re.MULTILINE) pieces = iter(split_pattern.split(file_text)) next(pieces) # burn the next one, we won't use it while pieces: vnum = next(pieces) text = next(pieces) yield ''.join((vnum, text))
function specifically to split the file on lines in the form of a vnum (e.g. the entire line is something like '#1234'). this is important because lines within entries can (and do) start with '#'
function specifically to split the file on lines in the form of a vnum . this is important because lines within entries can (and do) start with '#'
[ "function", "specifically", "to", "split", "the", "file", "on", "lines", "in", "the", "form", "of", "a", "vnum", ".", "this", "is", "important", "because", "lines", "within", "entries", "can", "(", "and", "do", ")", "start", "with", "'", "#", "'" ]
def split_on_vnums(file_text): split_re = r"""^\#(\d+)""" split_pattern = re.compile(split_re, re.MULTILINE) pieces = iter(split_pattern.split(file_text)) next(pieces) while pieces: vnum = next(pieces) text = next(pieces) yield ''.join((vnum, text))
[ "def", "split_on_vnums", "(", "file_text", ")", ":", "split_re", "=", "r\"\"\"^\\#(\\d+)\"\"\"", "split_pattern", "=", "re", ".", "compile", "(", "split_re", ",", "re", ".", "MULTILINE", ")", "pieces", "=", "iter", "(", "split_pattern", ".", "split", "(", "file_text", ")", ")", "next", "(", "pieces", ")", "while", "pieces", ":", "vnum", "=", "next", "(", "pieces", ")", "text", "=", "next", "(", "pieces", ")", "yield", "''", ".", "join", "(", "(", "vnum", ",", "text", ")", ")" ]
function specifically to split the file on lines in the form of a vnum (e.g.
[ "function", "specifically", "to", "split", "the", "file", "on", "lines", "in", "the", "form", "of", "a", "vnum", "(", "e", ".", "g", "." ]
[ "\"\"\"\n function specifically to split the file on lines in the form\n of a vnum (e.g. the entire line is something like '#1234'). this is\n important because lines within entries can (and do) start with '#'\n \"\"\"", "# burn the next one, we won't use it" ]
[ { "param": "file_text", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "file_text", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def split_on_vnums(file_text): split_re = r"""^\#(\d+)""" split_pattern = re.compile(split_re, re.MULTILINE) pieces = iter(split_pattern.split(file_text)) next(pieces) while pieces: vnum = next(pieces) text = next(pieces) yield ''.join((vnum, text))
394
324
59098807a5888f1d7bb73a9e4a6475e2d323c67c
DamienIrving/ocean-analysis
modules/general_io.py
[ "MIT" ]
Python
check_depth_coordinate
<not_specific>
def check_depth_coordinate(cube): """Check (and fix if needed) the depth coordinate""" aux_coords = [coord.name() for coord in cube.aux_coords] if 'depth' in aux_coords: cube.remove_coord('depth') for coord in cube.dim_coords: name = coord.name() var_name = coord.var_name if (name == 'lev') or (var_name == 'lev') or (var_name == 'olevel'): coord.standard_name = 'depth' coord.var_name = 'lev' return cube
Check (and fix if needed) the depth coordinate
Check (and fix if needed) the depth coordinate
[ "Check", "(", "and", "fix", "if", "needed", ")", "the", "depth", "coordinate" ]
def check_depth_coordinate(cube): aux_coords = [coord.name() for coord in cube.aux_coords] if 'depth' in aux_coords: cube.remove_coord('depth') for coord in cube.dim_coords: name = coord.name() var_name = coord.var_name if (name == 'lev') or (var_name == 'lev') or (var_name == 'olevel'): coord.standard_name = 'depth' coord.var_name = 'lev' return cube
[ "def", "check_depth_coordinate", "(", "cube", ")", ":", "aux_coords", "=", "[", "coord", ".", "name", "(", ")", "for", "coord", "in", "cube", ".", "aux_coords", "]", "if", "'depth'", "in", "aux_coords", ":", "cube", ".", "remove_coord", "(", "'depth'", ")", "for", "coord", "in", "cube", ".", "dim_coords", ":", "name", "=", "coord", ".", "name", "(", ")", "var_name", "=", "coord", ".", "var_name", "if", "(", "name", "==", "'lev'", ")", "or", "(", "var_name", "==", "'lev'", ")", "or", "(", "var_name", "==", "'olevel'", ")", ":", "coord", ".", "standard_name", "=", "'depth'", "coord", ".", "var_name", "=", "'lev'", "return", "cube" ]
Check (and fix if needed) the depth coordinate
[ "Check", "(", "and", "fix", "if", "needed", ")", "the", "depth", "coordinate" ]
[ "\"\"\"Check (and fix if needed) the depth coordinate\"\"\"" ]
[ { "param": "cube", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cube", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def check_depth_coordinate(cube): aux_coords = [coord.name() for coord in cube.aux_coords] if 'depth' in aux_coords: cube.remove_coord('depth') for coord in cube.dim_coords: name = coord.name() var_name = coord.var_name if (name == 'lev') or (var_name == 'lev') or (var_name == 'olevel'): coord.standard_name = 'depth' coord.var_name = 'lev' return cube
395
22
538444484c45d02ecf10efd7ba24a03f51965a03
joanvaquer/SDV
sdv/metadata/table.py
[ "MIT" ]
Python
from_dict
<not_specific>
def from_dict(cls, metadata_dict): """Load a Table from a metadata dict. Args: metadata_dict (dict): Dict metadata to load. """ instance = cls() instance._fields_metadata = copy.deepcopy(metadata_dict['fields']) instance._constraints = copy.deepcopy(metadata_dict.get('constraints', [])) instance._model_kwargs = copy.deepcopy(metadata_dict.get('model_kwargs')) return instance
Load a Table from a metadata dict. Args: metadata_dict (dict): Dict metadata to load.
Load a Table from a metadata dict.
[ "Load", "a", "Table", "from", "a", "metadata", "dict", "." ]
def from_dict(cls, metadata_dict): instance = cls() instance._fields_metadata = copy.deepcopy(metadata_dict['fields']) instance._constraints = copy.deepcopy(metadata_dict.get('constraints', [])) instance._model_kwargs = copy.deepcopy(metadata_dict.get('model_kwargs')) return instance
[ "def", "from_dict", "(", "cls", ",", "metadata_dict", ")", ":", "instance", "=", "cls", "(", ")", "instance", ".", "_fields_metadata", "=", "copy", ".", "deepcopy", "(", "metadata_dict", "[", "'fields'", "]", ")", "instance", ".", "_constraints", "=", "copy", ".", "deepcopy", "(", "metadata_dict", ".", "get", "(", "'constraints'", ",", "[", "]", ")", ")", "instance", ".", "_model_kwargs", "=", "copy", ".", "deepcopy", "(", "metadata_dict", ".", "get", "(", "'model_kwargs'", ")", ")", "return", "instance" ]
Load a Table from a metadata dict.
[ "Load", "a", "Table", "from", "a", "metadata", "dict", "." ]
[ "\"\"\"Load a Table from a metadata dict.\n\n Args:\n metadata_dict (dict):\n Dict metadata to load.\n \"\"\"" ]
[ { "param": "cls", "type": null }, { "param": "metadata_dict", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "metadata_dict", "type": null, "docstring": "Dict metadata to load.", "docstring_tokens": [ "Dict", "metadata", "to", "load", "." ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
import copy def from_dict(cls, metadata_dict): instance = cls() instance._fields_metadata = copy.deepcopy(metadata_dict['fields']) instance._constraints = copy.deepcopy(metadata_dict.get('constraints', [])) instance._model_kwargs = copy.deepcopy(metadata_dict.get('model_kwargs')) return instance
397
852
faa92c25daa3a05e4b8b1b2bd51019f2e620817c
eleijonmarck/spot-checking-ml
spot.py
[ "MIT" ]
Python
define_models
<not_specific>
def define_models(): """ Defines and intantiates the models we want to evaluate {name : pipeline-object} :return: dict - scikit learn pipelines """ models = {} return models
Defines and intantiates the models we want to evaluate {name : pipeline-object} :return: dict - scikit learn pipelines
Defines and intantiates the models we want to evaluate {name : pipeline-object}
[ "Defines", "and", "intantiates", "the", "models", "we", "want", "to", "evaluate", "{", "name", ":", "pipeline", "-", "object", "}" ]
def define_models(): models = {} return models
[ "def", "define_models", "(", ")", ":", "models", "=", "{", "}", "return", "models" ]
Defines and intantiates the models we want to evaluate
[ "Defines", "and", "intantiates", "the", "models", "we", "want", "to", "evaluate" ]
[ "\"\"\"\n Defines and intantiates the models we want to\n evaluate\n\n {name : pipeline-object}\n :return: dict - scikit learn pipelines\n \"\"\"" ]
[]
{ "returns": [ { "docstring": "scikit learn pipelines", "docstring_tokens": [ "scikit", "learn", "pipelines" ], "type": null } ], "raises": [], "params": [], "outlier_params": [], "others": [] }
def define_models(): models = {} return models
398
625
6909ef8333e5b8a3c50ab3ab4530d304b7bfeed6
artorious/python3_dojo
derivative.py
[ "CNRI-Python" ]
Python
fun
<not_specific>
def fun(x): """ (function) -> float <x> is the function to differentiate """ return 3 * x ** 2 + 5
(function) -> float <x> is the function to differentiate
> float is the function to differentiate
[ ">", "float", "is", "the", "function", "to", "differentiate" ]
def fun(x): return 3 * x ** 2 + 5
[ "def", "fun", "(", "x", ")", ":", "return", "3", "*", "x", "**", "2", "+", "5" ]
(function) -> float <x> is the function to differentiate
[ "(", "function", ")", "-", ">", "float", "<x", ">", "is", "the", "function", "to", "differentiate" ]
[ "\"\"\" (function) -> float\n <x> is the function to differentiate\n \"\"\"" ]
[ { "param": "x", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "x", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def fun(x): return 3 * x ** 2 + 5
400
399
4759402af82769811290c7daa73c1c19ebca474c
xieck13/nilmtk-dl
disaggregate/cnn_rnn.py
[ "MIT" ]
Python
lr_schedule
<not_specific>
def lr_schedule(epoch): """Learning Rate Schedule Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs. Called automatically every epoch as part of callbacks during training. # Arguments epoch (int): The number of epochs # Returns lr (float32): learning rate """ lr = 1e-5 if epoch > 30: lr *= 0.5e-3 elif epoch > 20: lr *= 1e-3 elif epoch > 10: lr *= 1e-2 elif epoch > 5: lr *= 1e-1 print('Learning rate: ', lr) return lr
Learning Rate Schedule Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs. Called automatically every epoch as part of callbacks during training. # Arguments epoch (int): The number of epochs # Returns lr (float32): learning rate
Learning Rate Schedule Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs. Called automatically every epoch as part of callbacks during training. Arguments epoch (int): The number of epochs Returns lr (float32): learning rate
[ "Learning", "Rate", "Schedule", "Learning", "rate", "is", "scheduled", "to", "be", "reduced", "after", "80", "120", "160", "180", "epochs", ".", "Called", "automatically", "every", "epoch", "as", "part", "of", "callbacks", "during", "training", ".", "Arguments", "epoch", "(", "int", ")", ":", "The", "number", "of", "epochs", "Returns", "lr", "(", "float32", ")", ":", "learning", "rate" ]
def lr_schedule(epoch): lr = 1e-5 if epoch > 30: lr *= 0.5e-3 elif epoch > 20: lr *= 1e-3 elif epoch > 10: lr *= 1e-2 elif epoch > 5: lr *= 1e-1 print('Learning rate: ', lr) return lr
[ "def", "lr_schedule", "(", "epoch", ")", ":", "lr", "=", "1e-5", "if", "epoch", ">", "30", ":", "lr", "*=", "0.5e-3", "elif", "epoch", ">", "20", ":", "lr", "*=", "1e-3", "elif", "epoch", ">", "10", ":", "lr", "*=", "1e-2", "elif", "epoch", ">", "5", ":", "lr", "*=", "1e-1", "print", "(", "'Learning rate: '", ",", "lr", ")", "return", "lr" ]
Learning Rate Schedule Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
[ "Learning", "Rate", "Schedule", "Learning", "rate", "is", "scheduled", "to", "be", "reduced", "after", "80", "120", "160", "180", "epochs", "." ]
[ "\"\"\"Learning Rate Schedule\n\n Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.\n Called automatically every epoch as part of callbacks during training.\n\n # Arguments\n epoch (int): The number of epochs\n\n # Returns\n lr (float32): learning rate\n \"\"\"" ]
[ { "param": "epoch", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "epoch", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def lr_schedule(epoch): lr = 1e-5 if epoch > 30: lr *= 0.5e-3 elif epoch > 20: lr *= 1e-3 elif epoch > 10: lr *= 1e-2 elif epoch > 5: lr *= 1e-1 print('Learning rate: ', lr) return lr
401
62
161aec5a3c1979936bd9271422d57a028c0b8171
quantifiedcode-bot/Haul
haul/finders/pipeline/html.py
[ "MIT" ]
Python
a_href_finder
<not_specific>
def a_href_finder(pipeline_index, soup, finder_image_urls=[], *args, **kwargs): """ Find image URL in <a>'s href attribute """ now_finder_image_urls = [] for a in soup.find_all('a'): href = a.get('href', None) if href: href = str(href) if filter(href.lower().endswith, ('.jpg', '.jpeg', '.gif', '.png')): if (href not in finder_image_urls) and \ (href not in now_finder_image_urls): now_finder_image_urls.append(href) output = {} output['finder_image_urls'] = finder_image_urls + now_finder_image_urls return output
Find image URL in <a>'s href attribute
Find image URL in 's href attribute
[ "Find", "image", "URL", "in", "'", "s", "href", "attribute" ]
def a_href_finder(pipeline_index, soup, finder_image_urls=[], *args, **kwargs): now_finder_image_urls = [] for a in soup.find_all('a'): href = a.get('href', None) if href: href = str(href) if filter(href.lower().endswith, ('.jpg', '.jpeg', '.gif', '.png')): if (href not in finder_image_urls) and \ (href not in now_finder_image_urls): now_finder_image_urls.append(href) output = {} output['finder_image_urls'] = finder_image_urls + now_finder_image_urls return output
[ "def", "a_href_finder", "(", "pipeline_index", ",", "soup", ",", "finder_image_urls", "=", "[", "]", ",", "*", "args", ",", "**", "kwargs", ")", ":", "now_finder_image_urls", "=", "[", "]", "for", "a", "in", "soup", ".", "find_all", "(", "'a'", ")", ":", "href", "=", "a", ".", "get", "(", "'href'", ",", "None", ")", "if", "href", ":", "href", "=", "str", "(", "href", ")", "if", "filter", "(", "href", ".", "lower", "(", ")", ".", "endswith", ",", "(", "'.jpg'", ",", "'.jpeg'", ",", "'.gif'", ",", "'.png'", ")", ")", ":", "if", "(", "href", "not", "in", "finder_image_urls", ")", "and", "(", "href", "not", "in", "now_finder_image_urls", ")", ":", "now_finder_image_urls", ".", "append", "(", "href", ")", "output", "=", "{", "}", "output", "[", "'finder_image_urls'", "]", "=", "finder_image_urls", "+", "now_finder_image_urls", "return", "output" ]
Find image URL in <a>'s href attribute
[ "Find", "image", "URL", "in", "<a", ">", "'", "s", "href", "attribute" ]
[ "\"\"\"\n Find image URL in <a>'s href attribute\n \"\"\"" ]
[ { "param": "pipeline_index", "type": null }, { "param": "soup", "type": null }, { "param": "finder_image_urls", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "pipeline_index", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "soup", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "finder_image_urls", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def a_href_finder(pipeline_index, soup, finder_image_urls=[], *args, **kwargs): now_finder_image_urls = [] for a in soup.find_all('a'): href = a.get('href', None) if href: href = str(href) if filter(href.lower().endswith, ('.jpg', '.jpeg', '.gif', '.png')): if (href not in finder_image_urls) and \ (href not in now_finder_image_urls): now_finder_image_urls.append(href) output = {} output['finder_image_urls'] = finder_image_urls + now_finder_image_urls return output
402
834
7de0bb394baa69322bb525c799ca3fd33e6cf6c1
MaastrichtU-IDS/translator-openpredict
openpredict/openpredict_model.py
[ "MIT" ]
Python
multimetric_score
<not_specific>
def multimetric_score(estimator, X_test, y_test, scorers): """Return a dict of score for multimetric scoring :param estimator: Estimator :param X_test: X test :param y_test: Y test :param scorers: Dict of scorers :return: Multimetric scores """ scores = {} for name, scorer in scorers.items(): if y_test is None: score = scorer(estimator, X_test) else: score = scorer(estimator, X_test, y_test) if hasattr(score, 'item'): try: # e.g. unwrap memmapped scalars score = score.item() except ValueError: # non-scalar? pass scores[name] = score if not isinstance(score, numbers.Number): raise ValueError("scoring must return a number, got %s (%s) " "instead. (scorer=%s)" % (str(score), type(score), name)) return scores
Return a dict of score for multimetric scoring :param estimator: Estimator :param X_test: X test :param y_test: Y test :param scorers: Dict of scorers :return: Multimetric scores
Return a dict of score for multimetric scoring
[ "Return", "a", "dict", "of", "score", "for", "multimetric", "scoring" ]
def multimetric_score(estimator, X_test, y_test, scorers): scores = {} for name, scorer in scorers.items(): if y_test is None: score = scorer(estimator, X_test) else: score = scorer(estimator, X_test, y_test) if hasattr(score, 'item'): try: score = score.item() except ValueError: pass scores[name] = score if not isinstance(score, numbers.Number): raise ValueError("scoring must return a number, got %s (%s) " "instead. (scorer=%s)" % (str(score), type(score), name)) return scores
[ "def", "multimetric_score", "(", "estimator", ",", "X_test", ",", "y_test", ",", "scorers", ")", ":", "scores", "=", "{", "}", "for", "name", ",", "scorer", "in", "scorers", ".", "items", "(", ")", ":", "if", "y_test", "is", "None", ":", "score", "=", "scorer", "(", "estimator", ",", "X_test", ")", "else", ":", "score", "=", "scorer", "(", "estimator", ",", "X_test", ",", "y_test", ")", "if", "hasattr", "(", "score", ",", "'item'", ")", ":", "try", ":", "score", "=", "score", ".", "item", "(", ")", "except", "ValueError", ":", "pass", "scores", "[", "name", "]", "=", "score", "if", "not", "isinstance", "(", "score", ",", "numbers", ".", "Number", ")", ":", "raise", "ValueError", "(", "\"scoring must return a number, got %s (%s) \"", "\"instead. (scorer=%s)\"", "%", "(", "str", "(", "score", ")", ",", "type", "(", "score", ")", ",", "name", ")", ")", "return", "scores" ]
Return a dict of score for multimetric scoring
[ "Return", "a", "dict", "of", "score", "for", "multimetric", "scoring" ]
[ "\"\"\"Return a dict of score for multimetric scoring\n\n :param estimator: Estimator\n :param X_test: X test\n :param y_test: Y test\n :param scorers: Dict of scorers\n :return: Multimetric scores\n \"\"\"", "# e.g. unwrap memmapped scalars", "# non-scalar?" ]
[ { "param": "estimator", "type": null }, { "param": "X_test", "type": null }, { "param": "y_test", "type": null }, { "param": "scorers", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "estimator", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "X_test", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "y_test", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "scorers", "type": null, "docstring": "Dict of scorers", "docstring_tokens": [ "Dict", "of", "scorers" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import numbers def multimetric_score(estimator, X_test, y_test, scorers): scores = {} for name, scorer in scorers.items(): if y_test is None: score = scorer(estimator, X_test) else: score = scorer(estimator, X_test, y_test) if hasattr(score, 'item'): try: score = score.item() except ValueError: pass scores[name] = score if not isinstance(score, numbers.Number): raise ValueError("scoring must return a number, got %s (%s) " "instead. (scorer=%s)" % (str(score), type(score), name)) return scores
403
27
a0d5dd86967aba05281a89fc87e312fe977be123
bgporter/wastebook
post.py
[ "MIT" ]
Python
ExtractTags
<not_specific>
def ExtractTags(txt): ''' given input text, look for #tag instances in it, and return a list of those tags without the leading '#'. If there are no tags, we return an empty list. Tags must start and end with an alphanumeric, and can contain dashes and underscores. Before returning the list, we: - remove dupes - sort the tags alphabetically ''' TAG_PATTERN = re.compile(r'(?:^|\s)#([a-zA-Z0-9][a-zA-Z0-9_-]*[a-zA-Z0-9])') tagList = TAG_PATTERN.findall(txt) return sorted(list(set([t.lower() for t in tagList])))
given input text, look for #tag instances in it, and return a list of those tags without the leading '#'. If there are no tags, we return an empty list. Tags must start and end with an alphanumeric, and can contain dashes and underscores. Before returning the list, we: - remove dupes - sort the tags alphabetically
given input text, look for #tag instances in it, and return a list of those tags without the leading '#'. If there are no tags, we return an empty list. Tags must start and end with an alphanumeric, and can contain dashes and underscores. Before returning the list, we: remove dupes sort the tags alphabetically
[ "given", "input", "text", "look", "for", "#tag", "instances", "in", "it", "and", "return", "a", "list", "of", "those", "tags", "without", "the", "leading", "'", "#", "'", ".", "If", "there", "are", "no", "tags", "we", "return", "an", "empty", "list", ".", "Tags", "must", "start", "and", "end", "with", "an", "alphanumeric", "and", "can", "contain", "dashes", "and", "underscores", ".", "Before", "returning", "the", "list", "we", ":", "remove", "dupes", "sort", "the", "tags", "alphabetically" ]
def ExtractTags(txt): TAG_PATTERN = re.compile(r'(?:^|\s)#([a-zA-Z0-9][a-zA-Z0-9_-]*[a-zA-Z0-9])') tagList = TAG_PATTERN.findall(txt) return sorted(list(set([t.lower() for t in tagList])))
[ "def", "ExtractTags", "(", "txt", ")", ":", "TAG_PATTERN", "=", "re", ".", "compile", "(", "r'(?:^|\\s)#([a-zA-Z0-9][a-zA-Z0-9_-]*[a-zA-Z0-9])'", ")", "tagList", "=", "TAG_PATTERN", ".", "findall", "(", "txt", ")", "return", "sorted", "(", "list", "(", "set", "(", "[", "t", ".", "lower", "(", ")", "for", "t", "in", "tagList", "]", ")", ")", ")" ]
given input text, look for #tag instances in it, and return a list of those tags without the leading '#'.
[ "given", "input", "text", "look", "for", "#tag", "instances", "in", "it", "and", "return", "a", "list", "of", "those", "tags", "without", "the", "leading", "'", "#", "'", "." ]
[ "''' given input text, look for #tag instances in it, and return a list \n of those tags without the leading '#'. If there are no tags, we return\n an empty list.\n\n Tags must start and end with an alphanumeric, and can contain dashes \n and underscores.\n\n Before returning the list, we:\n - remove dupes\n - sort the tags alphabetically\n '''" ]
[ { "param": "txt", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "txt", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def ExtractTags(txt): TAG_PATTERN = re.compile(r'(?:^|\s)#([a-zA-Z0-9][a-zA-Z0-9_-]*[a-zA-Z0-9])') tagList = TAG_PATTERN.findall(txt) return sorted(list(set([t.lower() for t in tagList])))
405
69
b7a5ab1a84072daedacf8cab436642c6f23f935d
SirAnthony/chinoka
mqtt/graphite/config.py
[ "MIT" ]
Python
usage
null
def usage(prog): """ Print command line help. Takes an application name to display. """ # Won't strip indentation. logging.info(""" Usage: %s [options] Options: -v, --version show program's version number and exit -h, --help show this help message and exit -c, --config <file> use different config file -m, --map <file> use custom mapping file """, prog)
Print command line help. Takes an application name to display.
Print command line help. Takes an application name to display.
[ "Print", "command", "line", "help", ".", "Takes", "an", "application", "name", "to", "display", "." ]
def usage(prog): logging.info(""" Usage: %s [options] Options: -v, --version show program's version number and exit -h, --help show this help message and exit -c, --config <file> use different config file -m, --map <file> use custom mapping file """, prog)
[ "def", "usage", "(", "prog", ")", ":", "logging", ".", "info", "(", "\"\"\"\n Usage: %s [options]\n Options:\n -v, --version show program's version number and exit\n -h, --help show this help message and exit\n -c, --config <file> use different config file\n -m, --map <file> use custom mapping file\n \"\"\"", ",", "prog", ")" ]
Print command line help.
[ "Print", "command", "line", "help", "." ]
[ "\"\"\"\n Print command line help.\n Takes an application name to display.\n \"\"\"", "# Won't strip indentation." ]
[ { "param": "prog", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "prog", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import logging def usage(prog): logging.info(""" Usage: %s [options] Options: -v, --version show program's version number and exit -h, --help show this help message and exit -c, --config <file> use different config file -m, --map <file> use custom mapping file """, prog)
406
147
8bff04856a0c4af3c0d1c76418267a396433f7bf
matichorvat/hsst
hsst/ruleapplication/mapping_rules.py
[ "MIT" ]
Python
find_mapping_coverages
<not_specific>
def find_mapping_coverages(mapping_node_list, reference_coverage, coverages): """ Find mapping coverages by searching coverages for the ones with 1 in exactly the places mapping requires it to be and nowhere else. e.g. mapping is in node_id 2, reference coverage is [0,1,2,3], mapping coverages are [0,0,1,X0], [0,0,1,0] but not [0,1,1,0] or [0,1,X0,0] :param mapping_node_list: List of node ids corresponding to a single mapping :param reference_coverage: List of node ids in order which coverages are constructed :param coverages: List of coverages obtained by running rule application :return: List of coverages corresponding to the give mapping """ # mapping node positions in reference mapping_node_positions = [reference_coverage.index(node_id) for node_id in mapping_node_list] mapping_coverages = [] # Search rule application coverages that have 1 in exactly those coverages and nowhere else for coverage in coverages: if not all([True if coverage[index] == '1' else False for index in mapping_node_positions]): continue if sum(1 for node in coverage if node == '1') != len(mapping_node_positions): continue mapping_coverages.append(coverage) return mapping_coverages
Find mapping coverages by searching coverages for the ones with 1 in exactly the places mapping requires it to be and nowhere else. e.g. mapping is in node_id 2, reference coverage is [0,1,2,3], mapping coverages are [0,0,1,X0], [0,0,1,0] but not [0,1,1,0] or [0,1,X0,0] :param mapping_node_list: List of node ids corresponding to a single mapping :param reference_coverage: List of node ids in order which coverages are constructed :param coverages: List of coverages obtained by running rule application :return: List of coverages corresponding to the give mapping
Find mapping coverages by searching coverages for the ones with 1 in exactly the places mapping requires it to be and nowhere else.
[ "Find", "mapping", "coverages", "by", "searching", "coverages", "for", "the", "ones", "with", "1", "in", "exactly", "the", "places", "mapping", "requires", "it", "to", "be", "and", "nowhere", "else", "." ]
def find_mapping_coverages(mapping_node_list, reference_coverage, coverages): mapping_node_positions = [reference_coverage.index(node_id) for node_id in mapping_node_list] mapping_coverages = [] for coverage in coverages: if not all([True if coverage[index] == '1' else False for index in mapping_node_positions]): continue if sum(1 for node in coverage if node == '1') != len(mapping_node_positions): continue mapping_coverages.append(coverage) return mapping_coverages
[ "def", "find_mapping_coverages", "(", "mapping_node_list", ",", "reference_coverage", ",", "coverages", ")", ":", "mapping_node_positions", "=", "[", "reference_coverage", ".", "index", "(", "node_id", ")", "for", "node_id", "in", "mapping_node_list", "]", "mapping_coverages", "=", "[", "]", "for", "coverage", "in", "coverages", ":", "if", "not", "all", "(", "[", "True", "if", "coverage", "[", "index", "]", "==", "'1'", "else", "False", "for", "index", "in", "mapping_node_positions", "]", ")", ":", "continue", "if", "sum", "(", "1", "for", "node", "in", "coverage", "if", "node", "==", "'1'", ")", "!=", "len", "(", "mapping_node_positions", ")", ":", "continue", "mapping_coverages", ".", "append", "(", "coverage", ")", "return", "mapping_coverages" ]
Find mapping coverages by searching coverages for the ones with 1 in exactly the places mapping requires it to be and nowhere else.
[ "Find", "mapping", "coverages", "by", "searching", "coverages", "for", "the", "ones", "with", "1", "in", "exactly", "the", "places", "mapping", "requires", "it", "to", "be", "and", "nowhere", "else", "." ]
[ "\"\"\"\n Find mapping coverages by searching coverages for the ones with 1 in exactly the places mapping requires it to be and nowhere else.\n e.g. mapping is in node_id 2, reference coverage is [0,1,2,3], mapping coverages are [0,0,1,X0], [0,0,1,0] but not [0,1,1,0] or [0,1,X0,0]\n :param mapping_node_list: List of node ids corresponding to a single mapping\n :param reference_coverage: List of node ids in order which coverages are constructed\n :param coverages: List of coverages obtained by running rule application\n :return: List of coverages corresponding to the give mapping\n \"\"\"", "# mapping node positions in reference", "# Search rule application coverages that have 1 in exactly those coverages and nowhere else" ]
[ { "param": "mapping_node_list", "type": null }, { "param": "reference_coverage", "type": null }, { "param": "coverages", "type": null } ]
{ "returns": [ { "docstring": "List of coverages corresponding to the give mapping", "docstring_tokens": [ "List", "of", "coverages", "corresponding", "to", "the", "give", "mapping" ], "type": null } ], "raises": [], "params": [ { "identifier": "mapping_node_list", "type": null, "docstring": "List of node ids corresponding to a single mapping", "docstring_tokens": [ "List", "of", "node", "ids", "corresponding", "to", "a", "single", "mapping" ], "default": null, "is_optional": null }, { "identifier": "reference_coverage", "type": null, "docstring": "List of node ids in order which coverages are constructed", "docstring_tokens": [ "List", "of", "node", "ids", "in", "order", "which", "coverages", "are", "constructed" ], "default": null, "is_optional": null }, { "identifier": "coverages", "type": null, "docstring": "List of coverages obtained by running rule application", "docstring_tokens": [ "List", "of", "coverages", "obtained", "by", "running", "rule", "application" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def find_mapping_coverages(mapping_node_list, reference_coverage, coverages): mapping_node_positions = [reference_coverage.index(node_id) for node_id in mapping_node_list] mapping_coverages = [] for coverage in coverages: if not all([True if coverage[index] == '1' else False for index in mapping_node_positions]): continue if sum(1 for node in coverage if node == '1') != len(mapping_node_positions): continue mapping_coverages.append(coverage) return mapping_coverages
407
720
129b9ade7601af9dc5557dc9ef55b7cb41e9518f
boothlinux/poker-takehome
functions.py
[ "MIT" ]
Python
is_pair
<not_specific>
def is_pair(hand): """ This functions takes the hand (list) and returns true if it has one pair """ pair_count = 0 for card in hand: if hand.count(card) is 2: pair_count = pair_count + 1 if pair_count == 2: return True
This functions takes the hand (list) and returns true if it has one pair
This functions takes the hand (list) and returns true if it has one pair
[ "This", "functions", "takes", "the", "hand", "(", "list", ")", "and", "returns", "true", "if", "it", "has", "one", "pair" ]
def is_pair(hand): pair_count = 0 for card in hand: if hand.count(card) is 2: pair_count = pair_count + 1 if pair_count == 2: return True
[ "def", "is_pair", "(", "hand", ")", ":", "pair_count", "=", "0", "for", "card", "in", "hand", ":", "if", "hand", ".", "count", "(", "card", ")", "is", "2", ":", "pair_count", "=", "pair_count", "+", "1", "if", "pair_count", "==", "2", ":", "return", "True" ]
This functions takes the hand (list) and returns true if it has one pair
[ "This", "functions", "takes", "the", "hand", "(", "list", ")", "and", "returns", "true", "if", "it", "has", "one", "pair" ]
[ "\"\"\"\n This functions takes the hand (list) and returns true if it has one pair\n \"\"\"" ]
[ { "param": "hand", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "hand", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def is_pair(hand): pair_count = 0 for card in hand: if hand.count(card) is 2: pair_count = pair_count + 1 if pair_count == 2: return True
408
166
3f99a5c3f145e85f0bbbf4e883b6b1f8a3251dc5
6tudent/pyemf
pyemf.py
[ "BSD-3-Clause-No-Nuclear-Warranty" ]
Python
_round4
<not_specific>
def _round4(num): """Round to the nearest multiple of 4 greater than or equal to the given number. EMF records are required to be aligned to 4 byte boundaries.""" return ((num+3)//4)*4
Round to the nearest multiple of 4 greater than or equal to the given number. EMF records are required to be aligned to 4 byte boundaries.
Round to the nearest multiple of 4 greater than or equal to the given number. EMF records are required to be aligned to 4 byte boundaries.
[ "Round", "to", "the", "nearest", "multiple", "of", "4", "greater", "than", "or", "equal", "to", "the", "given", "number", ".", "EMF", "records", "are", "required", "to", "be", "aligned", "to", "4", "byte", "boundaries", "." ]
def _round4(num): return ((num+3)//4)*4
[ "def", "_round4", "(", "num", ")", ":", "return", "(", "(", "num", "+", "3", ")", "//", "4", ")", "*", "4" ]
Round to the nearest multiple of 4 greater than or equal to the given number.
[ "Round", "to", "the", "nearest", "multiple", "of", "4", "greater", "than", "or", "equal", "to", "the", "given", "number", "." ]
[ "\"\"\"Round to the nearest multiple of 4 greater than or equal to the\n given number. EMF records are required to be aligned to 4 byte\n boundaries.\"\"\"" ]
[ { "param": "num", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "num", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _round4(num): return ((num+3)//4)*4
409
816
2d7949fa807eb841d79798368397c19a968060ee
zipated/src
third_party/catapult/third_party/gsutil/gslib/commands/perfdiag.py
[ "BSD-3-Clause" ]
Python
_GenerateFileData
null
def _GenerateFileData(fp, file_size=0, random_ratio=100, max_unique_random_bytes=5242883): """Writes data into a file like object. Args: fp: A file like object to write the data to. file_size: The amount of data to write to the file. random_ratio: The percentage of randomly generated data to write. This can be any number between 0 and 100 (inclusive), with 0 producing uniform data, and 100 producing random data. max_unique_random_bytes: The maximum number of bytes to generate pseudo-randomly before beginning to repeat bytes. The default was chosen as the next prime larger than 5 MiB. """ # Normalize the ratio. random_ratio /= 100.0 random_bytes = os.urandom(min(file_size, max_unique_random_bytes)) total_bytes_written = 0 while total_bytes_written < file_size: num_bytes = min(max_unique_random_bytes, file_size - total_bytes_written) # Calculate the amount of random and sequential data to write and # resolve rounding errors by adjusting the random bytes to write. num_bytes_seq = int(num_bytes * (1 - random_ratio)) num_bytes_random = num_bytes - num_bytes_seq fp.write(random_bytes[:num_bytes_random]) fp.write(b'x' * num_bytes_seq) total_bytes_written += num_bytes
Writes data into a file like object. Args: fp: A file like object to write the data to. file_size: The amount of data to write to the file. random_ratio: The percentage of randomly generated data to write. This can be any number between 0 and 100 (inclusive), with 0 producing uniform data, and 100 producing random data. max_unique_random_bytes: The maximum number of bytes to generate pseudo-randomly before beginning to repeat bytes. The default was chosen as the next prime larger than 5 MiB.
Writes data into a file like object.
[ "Writes", "data", "into", "a", "file", "like", "object", "." ]
def _GenerateFileData(fp, file_size=0, random_ratio=100, max_unique_random_bytes=5242883): random_ratio /= 100.0 random_bytes = os.urandom(min(file_size, max_unique_random_bytes)) total_bytes_written = 0 while total_bytes_written < file_size: num_bytes = min(max_unique_random_bytes, file_size - total_bytes_written) num_bytes_seq = int(num_bytes * (1 - random_ratio)) num_bytes_random = num_bytes - num_bytes_seq fp.write(random_bytes[:num_bytes_random]) fp.write(b'x' * num_bytes_seq) total_bytes_written += num_bytes
[ "def", "_GenerateFileData", "(", "fp", ",", "file_size", "=", "0", ",", "random_ratio", "=", "100", ",", "max_unique_random_bytes", "=", "5242883", ")", ":", "random_ratio", "/=", "100.0", "random_bytes", "=", "os", ".", "urandom", "(", "min", "(", "file_size", ",", "max_unique_random_bytes", ")", ")", "total_bytes_written", "=", "0", "while", "total_bytes_written", "<", "file_size", ":", "num_bytes", "=", "min", "(", "max_unique_random_bytes", ",", "file_size", "-", "total_bytes_written", ")", "num_bytes_seq", "=", "int", "(", "num_bytes", "*", "(", "1", "-", "random_ratio", ")", ")", "num_bytes_random", "=", "num_bytes", "-", "num_bytes_seq", "fp", ".", "write", "(", "random_bytes", "[", ":", "num_bytes_random", "]", ")", "fp", ".", "write", "(", "b'x'", "*", "num_bytes_seq", ")", "total_bytes_written", "+=", "num_bytes" ]
Writes data into a file like object.
[ "Writes", "data", "into", "a", "file", "like", "object", "." ]
[ "\"\"\"Writes data into a file like object.\n\n Args:\n fp: A file like object to write the data to.\n file_size: The amount of data to write to the file.\n random_ratio: The percentage of randomly generated data to write. This can\n be any number between 0 and 100 (inclusive), with 0 producing uniform\n data, and 100 producing random data.\n max_unique_random_bytes: The maximum number of bytes to generate\n pseudo-randomly before beginning to repeat\n bytes. The default was chosen as the next prime\n larger than 5 MiB.\n \"\"\"", "# Normalize the ratio.", "# Calculate the amount of random and sequential data to write and", "# resolve rounding errors by adjusting the random bytes to write." ]
[ { "param": "fp", "type": null }, { "param": "file_size", "type": null }, { "param": "random_ratio", "type": null }, { "param": "max_unique_random_bytes", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "fp", "type": null, "docstring": "A file like object to write the data to.", "docstring_tokens": [ "A", "file", "like", "object", "to", "write", "the", "data", "to", "." ], "default": null, "is_optional": null }, { "identifier": "file_size", "type": null, "docstring": "The amount of data to write to the file.", "docstring_tokens": [ "The", "amount", "of", "data", "to", "write", "to", "the", "file", "." ], "default": null, "is_optional": null }, { "identifier": "random_ratio", "type": null, "docstring": "The percentage of randomly generated data to write. This can\nbe any number between 0 and 100 (inclusive), with 0 producing uniform\ndata, and 100 producing random data.", "docstring_tokens": [ "The", "percentage", "of", "randomly", "generated", "data", "to", "write", ".", "This", "can", "be", "any", "number", "between", "0", "and", "100", "(", "inclusive", ")", "with", "0", "producing", "uniform", "data", "and", "100", "producing", "random", "data", "." ], "default": null, "is_optional": null }, { "identifier": "max_unique_random_bytes", "type": null, "docstring": "The maximum number of bytes to generate\npseudo-randomly before beginning to repeat\nbytes. The default was chosen as the next prime\nlarger than 5 MiB.", "docstring_tokens": [ "The", "maximum", "number", "of", "bytes", "to", "generate", "pseudo", "-", "randomly", "before", "beginning", "to", "repeat", "bytes", ".", "The", "default", "was", "chosen", "as", "the", "next", "prime", "larger", "than", "5", "MiB", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def _GenerateFileData(fp, file_size=0, random_ratio=100, max_unique_random_bytes=5242883): random_ratio /= 100.0 random_bytes = os.urandom(min(file_size, max_unique_random_bytes)) total_bytes_written = 0 while total_bytes_written < file_size: num_bytes = min(max_unique_random_bytes, file_size - total_bytes_written) num_bytes_seq = int(num_bytes * (1 - random_ratio)) num_bytes_random = num_bytes - num_bytes_seq fp.write(random_bytes[:num_bytes_random]) fp.write(b'x' * num_bytes_seq) total_bytes_written += num_bytes
410
82
fc66633f2455e1a12c1c67d1419b9b97b698a313
dsysoev/fun-with-tensorflow
timeseries/smoothing.py
[ "MIT" ]
Python
weighted_average
<not_specific>
def weighted_average(series, weights): """ calculate list with weighted moving average values weights - list of weigths for averaging """ weights_sum = sum(weights) # normalize the weights and reverse it weigths_norm = [weight / weights_sum for weight in reversed(weights)] num_weights = len(weights) # set NaN for the first undefined values lst = [float('NaN')] * (num_weights - 1) for pos in range(num_weights, len(series) + 1): # calculate value for current time frame value = sum([v * w for v, w in zip(series[pos-num_weights:pos], weigths_norm)]) lst.append(value) return lst
calculate list with weighted moving average values weights - list of weigths for averaging
calculate list with weighted moving average values weights - list of weigths for averaging
[ "calculate", "list", "with", "weighted", "moving", "average", "values", "weights", "-", "list", "of", "weigths", "for", "averaging" ]
def weighted_average(series, weights): weights_sum = sum(weights) weigths_norm = [weight / weights_sum for weight in reversed(weights)] num_weights = len(weights) lst = [float('NaN')] * (num_weights - 1) for pos in range(num_weights, len(series) + 1): value = sum([v * w for v, w in zip(series[pos-num_weights:pos], weigths_norm)]) lst.append(value) return lst
[ "def", "weighted_average", "(", "series", ",", "weights", ")", ":", "weights_sum", "=", "sum", "(", "weights", ")", "weigths_norm", "=", "[", "weight", "/", "weights_sum", "for", "weight", "in", "reversed", "(", "weights", ")", "]", "num_weights", "=", "len", "(", "weights", ")", "lst", "=", "[", "float", "(", "'NaN'", ")", "]", "*", "(", "num_weights", "-", "1", ")", "for", "pos", "in", "range", "(", "num_weights", ",", "len", "(", "series", ")", "+", "1", ")", ":", "value", "=", "sum", "(", "[", "v", "*", "w", "for", "v", ",", "w", "in", "zip", "(", "series", "[", "pos", "-", "num_weights", ":", "pos", "]", ",", "weigths_norm", ")", "]", ")", "lst", ".", "append", "(", "value", ")", "return", "lst" ]
calculate list with weighted moving average values weights - list of weigths for averaging
[ "calculate", "list", "with", "weighted", "moving", "average", "values", "weights", "-", "list", "of", "weigths", "for", "averaging" ]
[ "\"\"\" calculate list with weighted moving average values\n\n weights - list of weigths for averaging\n \"\"\"", "# normalize the weights and reverse it", "# set NaN for the first undefined values", "# calculate value for current time frame" ]
[ { "param": "series", "type": null }, { "param": "weights", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "series", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "weights", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def weighted_average(series, weights): weights_sum = sum(weights) weigths_norm = [weight / weights_sum for weight in reversed(weights)] num_weights = len(weights) lst = [float('NaN')] * (num_weights - 1) for pos in range(num_weights, len(series) + 1): value = sum([v * w for v, w in zip(series[pos-num_weights:pos], weigths_norm)]) lst.append(value) return lst
411
659
a4fb7ed0f5b98337029e0b4c7e10490b1c2606a8
dunnkers/client
wandb/vendor/prompt_toolkit/layout/utils.py
[ "MIT" ]
Python
split_lines
null
def split_lines(tokenlist): """ Take a single list of (Token, text) tuples and yield one such list for each line. Just like str.split, this will yield at least one item. :param tokenlist: List of (token, text) or (token, text, mouse_handler) tuples. """ line = [] for item in tokenlist: # For (token, text) tuples. if len(item) == 2: token, string = item parts = string.split('\n') for part in parts[:-1]: if part: line.append((token, part)) yield line line = [] line.append((token, parts[-1])) # Note that parts[-1] can be empty, and that's fine. It happens # in the case of [(Token.SetCursorPosition, '')]. # For (token, text, mouse_handler) tuples. # I know, partly copy/paste, but understandable and more efficient # than many tests. else: token, string, mouse_handler = item parts = string.split('\n') for part in parts[:-1]: if part: line.append((token, part, mouse_handler)) yield line line = [] line.append((token, parts[-1], mouse_handler)) # Always yield the last line, even when this is an empty line. This ensures # that when `tokenlist` ends with a newline character, an additional empty # line is yielded. (Otherwise, there's no way to differentiate between the # cases where `tokenlist` does and doesn't end with a newline.) yield line
Take a single list of (Token, text) tuples and yield one such list for each line. Just like str.split, this will yield at least one item. :param tokenlist: List of (token, text) or (token, text, mouse_handler) tuples.
Take a single list of (Token, text) tuples and yield one such list for each line. Just like str.split, this will yield at least one item.
[ "Take", "a", "single", "list", "of", "(", "Token", "text", ")", "tuples", "and", "yield", "one", "such", "list", "for", "each", "line", ".", "Just", "like", "str", ".", "split", "this", "will", "yield", "at", "least", "one", "item", "." ]
def split_lines(tokenlist): line = [] for item in tokenlist: if len(item) == 2: token, string = item parts = string.split('\n') for part in parts[:-1]: if part: line.append((token, part)) yield line line = [] line.append((token, parts[-1])) else: token, string, mouse_handler = item parts = string.split('\n') for part in parts[:-1]: if part: line.append((token, part, mouse_handler)) yield line line = [] line.append((token, parts[-1], mouse_handler)) yield line
[ "def", "split_lines", "(", "tokenlist", ")", ":", "line", "=", "[", "]", "for", "item", "in", "tokenlist", ":", "if", "len", "(", "item", ")", "==", "2", ":", "token", ",", "string", "=", "item", "parts", "=", "string", ".", "split", "(", "'\\n'", ")", "for", "part", "in", "parts", "[", ":", "-", "1", "]", ":", "if", "part", ":", "line", ".", "append", "(", "(", "token", ",", "part", ")", ")", "yield", "line", "line", "=", "[", "]", "line", ".", "append", "(", "(", "token", ",", "parts", "[", "-", "1", "]", ")", ")", "else", ":", "token", ",", "string", ",", "mouse_handler", "=", "item", "parts", "=", "string", ".", "split", "(", "'\\n'", ")", "for", "part", "in", "parts", "[", ":", "-", "1", "]", ":", "if", "part", ":", "line", ".", "append", "(", "(", "token", ",", "part", ",", "mouse_handler", ")", ")", "yield", "line", "line", "=", "[", "]", "line", ".", "append", "(", "(", "token", ",", "parts", "[", "-", "1", "]", ",", "mouse_handler", ")", ")", "yield", "line" ]
Take a single list of (Token, text) tuples and yield one such list for each line.
[ "Take", "a", "single", "list", "of", "(", "Token", "text", ")", "tuples", "and", "yield", "one", "such", "list", "for", "each", "line", "." ]
[ "\"\"\"\n Take a single list of (Token, text) tuples and yield one such list for each\n line. Just like str.split, this will yield at least one item.\n\n :param tokenlist: List of (token, text) or (token, text, mouse_handler)\n tuples.\n \"\"\"", "# For (token, text) tuples.", "# Note that parts[-1] can be empty, and that's fine. It happens", "# in the case of [(Token.SetCursorPosition, '')].", "# For (token, text, mouse_handler) tuples.", "# I know, partly copy/paste, but understandable and more efficient", "# than many tests.", "# Always yield the last line, even when this is an empty line. This ensures", "# that when `tokenlist` ends with a newline character, an additional empty", "# line is yielded. (Otherwise, there's no way to differentiate between the", "# cases where `tokenlist` does and doesn't end with a newline.)" ]
[ { "param": "tokenlist", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "tokenlist", "type": null, "docstring": "List of (token, text) or (token, text, mouse_handler)\ntuples.", "docstring_tokens": [ "List", "of", "(", "token", "text", ")", "or", "(", "token", "text", "mouse_handler", ")", "tuples", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def split_lines(tokenlist): line = [] for item in tokenlist: if len(item) == 2: token, string = item parts = string.split('\n') for part in parts[:-1]: if part: line.append((token, part)) yield line line = [] line.append((token, parts[-1])) else: token, string, mouse_handler = item parts = string.split('\n') for part in parts[:-1]: if part: line.append((token, part, mouse_handler)) yield line line = [] line.append((token, parts[-1], mouse_handler)) yield line
412
600
df427d58ef84be05535375800c3a58bfd5474c17
spiperac/bctf
apps/administration/utils.py
[ "MIT" ]
Python
check_docker
<not_specific>
def check_docker(): """ Checks if docker is installed. """ return True if shutil.which('docker') else False
Checks if docker is installed.
Checks if docker is installed.
[ "Checks", "if", "docker", "is", "installed", "." ]
def check_docker(): return True if shutil.which('docker') else False
[ "def", "check_docker", "(", ")", ":", "return", "True", "if", "shutil", ".", "which", "(", "'docker'", ")", "else", "False" ]
Checks if docker is installed.
[ "Checks", "if", "docker", "is", "installed", "." ]
[ "\"\"\"\n Checks if docker is installed.\n \"\"\"" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
import shutil def check_docker(): return True if shutil.which('docker') else False
413
293
425a6e622b676a6ec90bb1362fc863d5bc040308
snassef/aqueduct
civis/imports/socrata_helpers.py
[ "Apache-2.0" ]
Python
Merge
<not_specific>
def Merge(dict1, dict2): """ Appends two dicts and returns a dict. """ res = {**dict1, **dict2} return res
Appends two dicts and returns a dict.
Appends two dicts and returns a dict.
[ "Appends", "two", "dicts", "and", "returns", "a", "dict", "." ]
def Merge(dict1, dict2): res = {**dict1, **dict2} return res
[ "def", "Merge", "(", "dict1", ",", "dict2", ")", ":", "res", "=", "{", "**", "dict1", ",", "**", "dict2", "}", "return", "res" ]
Appends two dicts and returns a dict.
[ "Appends", "two", "dicts", "and", "returns", "a", "dict", "." ]
[ "\"\"\"\n Appends two dicts and returns a dict.\n \"\"\"" ]
[ { "param": "dict1", "type": null }, { "param": "dict2", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "dict1", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "dict2", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def Merge(dict1, dict2): res = {**dict1, **dict2} return res
414
451
26b86de77121bf1b8c931518ae5f4a380a40c1bd
python-packages/vanity_app
src/vanity_app/vanity_app/utils.py
[ "MIT" ]
Python
flatten_params
<not_specific>
def flatten_params(params): """ Convenience method used by rjones' PyPI OAuth1 code """ flattened = [] for k, v in params.items(): if isinstance(v, list): for v in v: if v: flattened.append((k, v)) elif v: flattened.append((k, v)) return flattened
Convenience method used by rjones' PyPI OAuth1 code
Convenience method used by rjones' PyPI OAuth1 code
[ "Convenience", "method", "used", "by", "rjones", "'", "PyPI", "OAuth1", "code" ]
def flatten_params(params): flattened = [] for k, v in params.items(): if isinstance(v, list): for v in v: if v: flattened.append((k, v)) elif v: flattened.append((k, v)) return flattened
[ "def", "flatten_params", "(", "params", ")", ":", "flattened", "=", "[", "]", "for", "k", ",", "v", "in", "params", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "list", ")", ":", "for", "v", "in", "v", ":", "if", "v", ":", "flattened", ".", "append", "(", "(", "k", ",", "v", ")", ")", "elif", "v", ":", "flattened", ".", "append", "(", "(", "k", ",", "v", ")", ")", "return", "flattened" ]
Convenience method used by rjones' PyPI OAuth1 code
[ "Convenience", "method", "used", "by", "rjones", "'", "PyPI", "OAuth1", "code" ]
[ "\"\"\"\n Convenience method used by rjones' PyPI OAuth1 code\n \"\"\"" ]
[ { "param": "params", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "params", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def flatten_params(params): flattened = [] for k, v in params.items(): if isinstance(v, list): for v in v: if v: flattened.append((k, v)) elif v: flattened.append((k, v)) return flattened
415
356
e9122b3509c67221f7c1e287a8008aedaef96198
stgeke/pyJac-v2
pyjac/utils.py
[ "MIT" ]
Python
enum_to_string
<not_specific>
def enum_to_string(enum): """ Convenience method that converts an IntEnum/Enum to string Parameters ---------- enum: Enum The enum to convert Returns ------- name: str The stringified enum """ enum = str(enum) return enum[enum.index('.') + 1:]
Convenience method that converts an IntEnum/Enum to string Parameters ---------- enum: Enum The enum to convert Returns ------- name: str The stringified enum
Convenience method that converts an IntEnum/Enum to string Parameters Enum The enum to convert Returns str The stringified enum
[ "Convenience", "method", "that", "converts", "an", "IntEnum", "/", "Enum", "to", "string", "Parameters", "Enum", "The", "enum", "to", "convert", "Returns", "str", "The", "stringified", "enum" ]
def enum_to_string(enum): enum = str(enum) return enum[enum.index('.') + 1:]
[ "def", "enum_to_string", "(", "enum", ")", ":", "enum", "=", "str", "(", "enum", ")", "return", "enum", "[", "enum", ".", "index", "(", "'.'", ")", "+", "1", ":", "]" ]
Convenience method that converts an IntEnum/Enum to string Parameters
[ "Convenience", "method", "that", "converts", "an", "IntEnum", "/", "Enum", "to", "string", "Parameters" ]
[ "\"\"\"\n Convenience method that converts an IntEnum/Enum to string\n\n Parameters\n ----------\n enum: Enum\n The enum to convert\n\n Returns\n -------\n name: str\n The stringified enum\n \"\"\"" ]
[ { "param": "enum", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "enum", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def enum_to_string(enum): enum = str(enum) return enum[enum.index('.') + 1:]
416
251
22de3d8540e18ae321214f1d73bc3bee9dbabfd7
graceshaw/uwsgiconf
uwsgiconf/uwsgi_stub.py
[ "BSD-3-Clause" ]
Python
metric_dec
bool
def metric_dec(key: str, value: int = 1) -> bool: """Decrements the specified metric key value by the specified value. :param key: :param value: """ return False
Decrements the specified metric key value by the specified value. :param key: :param value:
Decrements the specified metric key value by the specified value.
[ "Decrements", "the", "specified", "metric", "key", "value", "by", "the", "specified", "value", "." ]
def metric_dec(key: str, value: int = 1) -> bool: return False
[ "def", "metric_dec", "(", "key", ":", "str", ",", "value", ":", "int", "=", "1", ")", "->", "bool", ":", "return", "False" ]
Decrements the specified metric key value by the specified value.
[ "Decrements", "the", "specified", "metric", "key", "value", "by", "the", "specified", "value", "." ]
[ "\"\"\"Decrements the specified metric key value by the specified value.\n\n :param key:\n\n :param value:\n\n \"\"\"" ]
[ { "param": "key", "type": "str" }, { "param": "value", "type": "int" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "key", "type": "str", "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "value", "type": "int", "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def metric_dec(key: str, value: int = 1) -> bool: return False
417
485
e48f1dda432c5bb356bf71024034c4905c169be5
ahmedkareem999/MITx-6.00.1x
isIn.py
[ "MIT" ]
Python
isIn
<not_specific>
def isIn(char, aStr): ''' char: a single character aStr: an alphabetized string returns: True if char is in aStr; False otherwise ''' # Your code here val1 = len(aStr)//2 if aStr == "": return False elif len(aStr) <=1 and char != aStr[0]: return False elif char == aStr[val1]: return True elif char < aStr[val1]: val2 = aStr[:val1] return isIn(char,val2) elif char > aStr[val1]: val3 = aStr[val1:] return isIn(char, val3) elif char != val2 and char != val3: return False
char: a single character aStr: an alphabetized string returns: True if char is in aStr; False otherwise
a single character aStr: an alphabetized string True if char is in aStr; False otherwise
[ "a", "single", "character", "aStr", ":", "an", "alphabetized", "string", "True", "if", "char", "is", "in", "aStr", ";", "False", "otherwise" ]
def isIn(char, aStr): val1 = len(aStr)//2 if aStr == "": return False elif len(aStr) <=1 and char != aStr[0]: return False elif char == aStr[val1]: return True elif char < aStr[val1]: val2 = aStr[:val1] return isIn(char,val2) elif char > aStr[val1]: val3 = aStr[val1:] return isIn(char, val3) elif char != val2 and char != val3: return False
[ "def", "isIn", "(", "char", ",", "aStr", ")", ":", "val1", "=", "len", "(", "aStr", ")", "//", "2", "if", "aStr", "==", "\"\"", ":", "return", "False", "elif", "len", "(", "aStr", ")", "<=", "1", "and", "char", "!=", "aStr", "[", "0", "]", ":", "return", "False", "elif", "char", "==", "aStr", "[", "val1", "]", ":", "return", "True", "elif", "char", "<", "aStr", "[", "val1", "]", ":", "val2", "=", "aStr", "[", ":", "val1", "]", "return", "isIn", "(", "char", ",", "val2", ")", "elif", "char", ">", "aStr", "[", "val1", "]", ":", "val3", "=", "aStr", "[", "val1", ":", "]", "return", "isIn", "(", "char", ",", "val3", ")", "elif", "char", "!=", "val2", "and", "char", "!=", "val3", ":", "return", "False" ]
char: a single character aStr: an alphabetized string
[ "char", ":", "a", "single", "character", "aStr", ":", "an", "alphabetized", "string" ]
[ "'''\n char: a single character\n aStr: an alphabetized string\n \n returns: True if char is in aStr; False otherwise\n '''", "# Your code here" ]
[ { "param": "char", "type": null }, { "param": "aStr", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "char", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "aStr", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def isIn(char, aStr): val1 = len(aStr)//2 if aStr == "": return False elif len(aStr) <=1 and char != aStr[0]: return False elif char == aStr[val1]: return True elif char < aStr[val1]: val2 = aStr[:val1] return isIn(char,val2) elif char > aStr[val1]: val3 = aStr[val1:] return isIn(char, val3) elif char != val2 and char != val3: return False
418
583
5f32aef4f913fa9d09c2e9d898570d0776a576c9
jlmartinnc/PythonStdioGames
src/gamesbyexample/parkingvalet.py
[ "Python-2.0" ]
Python
hasWon
<not_specific>
def hasWon(board): """Return True if the 'A' car has reached the right edge.""" # The puzzle is solved when the 'A' car reaches the right edge. for y in range(board['height']): if board[(board['width'] - 1, y)] == 'A': return True return False
Return True if the 'A' car has reached the right edge.
Return True if the 'A' car has reached the right edge.
[ "Return", "True", "if", "the", "'", "A", "'", "car", "has", "reached", "the", "right", "edge", "." ]
def hasWon(board): for y in range(board['height']): if board[(board['width'] - 1, y)] == 'A': return True return False
[ "def", "hasWon", "(", "board", ")", ":", "for", "y", "in", "range", "(", "board", "[", "'height'", "]", ")", ":", "if", "board", "[", "(", "board", "[", "'width'", "]", "-", "1", ",", "y", ")", "]", "==", "'A'", ":", "return", "True", "return", "False" ]
Return True if the 'A' car has reached the right edge.
[ "Return", "True", "if", "the", "'", "A", "'", "car", "has", "reached", "the", "right", "edge", "." ]
[ "\"\"\"Return True if the 'A' car has reached the right edge.\"\"\"", "# The puzzle is solved when the 'A' car reaches the right edge." ]
[ { "param": "board", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "board", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def hasWon(board): for y in range(board['height']): if board[(board['width'] - 1, y)] == 'A': return True return False
419
728
691bbca4c37aa6dfa95dd54a2aa3d519c36bfc9b
klen/euler
003/solution.py
[ "MIT" ]
Python
prime_factors
null
def prime_factors(num): """ Get all prime factors for number. """ dd = 2 while num > 1: while num % dd == 0: yield dd num /= dd dd = dd + 1
Get all prime factors for number.
Get all prime factors for number.
[ "Get", "all", "prime", "factors", "for", "number", "." ]
def prime_factors(num): dd = 2 while num > 1: while num % dd == 0: yield dd num /= dd dd = dd + 1
[ "def", "prime_factors", "(", "num", ")", ":", "dd", "=", "2", "while", "num", ">", "1", ":", "while", "num", "%", "dd", "==", "0", ":", "yield", "dd", "num", "/=", "dd", "dd", "=", "dd", "+", "1" ]
Get all prime factors for number.
[ "Get", "all", "prime", "factors", "for", "number", "." ]
[ "\"\"\" Get all prime factors for number. \"\"\"" ]
[ { "param": "num", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "num", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def prime_factors(num): dd = 2 while num > 1: while num % dd == 0: yield dd num /= dd dd = dd + 1
420
23
63325c1c505f9b52dbc8ec019d46dd1fcce92f02
Kushmaniar/clover
clover_simulation/src/clover_simulation/world.py
[ "MIT" ]
Python
save_world
<not_specific>
def save_world(world, file): ''' Save the world to file-like object ''' return world.write(file, encoding='unicode')
Save the world to file-like object
Save the world to file-like object
[ "Save", "the", "world", "to", "file", "-", "like", "object" ]
def save_world(world, file): return world.write(file, encoding='unicode')
[ "def", "save_world", "(", "world", ",", "file", ")", ":", "return", "world", ".", "write", "(", "file", ",", "encoding", "=", "'unicode'", ")" ]
Save the world to file-like object
[ "Save", "the", "world", "to", "file", "-", "like", "object" ]
[ "'''\n Save the world to file-like object\n '''" ]
[ { "param": "world", "type": null }, { "param": "file", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "world", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "file", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def save_world(world, file): return world.write(file, encoding='unicode')
421
391
4092e8f3a5362f34410668538d095bf2fb41be83
ahstewart/OASIS
OasisPy/initialize.py
[ "MIT" ]
Python
create
null
def create(location): ''' creates a data, template, psf, sources, and residual directory ''' dirs = ["data", "templates", "residuals", "sources", "psf", "sources"] for d in dirs: if os.path.exists("%s/%s" % (location, d)) == False: os.system("mkdir %s/%s" % (location, d)) print("-> %s directory created in %s\n" % (d, location))
creates a data, template, psf, sources, and residual directory
creates a data, template, psf, sources, and residual directory
[ "creates", "a", "data", "template", "psf", "sources", "and", "residual", "directory" ]
def create(location): dirs = ["data", "templates", "residuals", "sources", "psf", "sources"] for d in dirs: if os.path.exists("%s/%s" % (location, d)) == False: os.system("mkdir %s/%s" % (location, d)) print("-> %s directory created in %s\n" % (d, location))
[ "def", "create", "(", "location", ")", ":", "dirs", "=", "[", "\"data\"", ",", "\"templates\"", ",", "\"residuals\"", ",", "\"sources\"", ",", "\"psf\"", ",", "\"sources\"", "]", "for", "d", "in", "dirs", ":", "if", "os", ".", "path", ".", "exists", "(", "\"%s/%s\"", "%", "(", "location", ",", "d", ")", ")", "==", "False", ":", "os", ".", "system", "(", "\"mkdir %s/%s\"", "%", "(", "location", ",", "d", ")", ")", "print", "(", "\"-> %s directory created in %s\\n\"", "%", "(", "d", ",", "location", ")", ")" ]
creates a data, template, psf, sources, and residual directory
[ "creates", "a", "data", "template", "psf", "sources", "and", "residual", "directory" ]
[ "'''\n creates a data, template, psf, sources, and residual directory\n '''" ]
[ { "param": "location", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "location", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def create(location): dirs = ["data", "templates", "residuals", "sources", "psf", "sources"] for d in dirs: if os.path.exists("%s/%s" % (location, d)) == False: os.system("mkdir %s/%s" % (location, d)) print("-> %s directory created in %s\n" % (d, location))
422
987
cb56ca1b5612ab68ab8fa1c8f65ce9a97e9a3d63
dbmi-bgm/cgap-portal
src/encoded/inheritance_mode.py
[ "MIT" ]
Python
compute_cmphet_inheritance_modes
<not_specific>
def compute_cmphet_inheritance_modes(cmphet): """ Collects and summarizes Compound het caller results """ inheritance_modes_set = set() inheritance_modes = [] if cmphet is not None: for entry in cmphet: phase = entry.get('comhet_phase') impact = entry.get('comhet_impact_gene') s = 'Compound Het (%s/%s)' % (phase, impact.lower()) inheritance_modes_set.add(s) inheritance_modes = list(inheritance_modes_set) inheritance_modes.sort() return inheritance_modes
Collects and summarizes Compound het caller results
Collects and summarizes Compound het caller results
[ "Collects", "and", "summarizes", "Compound", "het", "caller", "results" ]
def compute_cmphet_inheritance_modes(cmphet): inheritance_modes_set = set() inheritance_modes = [] if cmphet is not None: for entry in cmphet: phase = entry.get('comhet_phase') impact = entry.get('comhet_impact_gene') s = 'Compound Het (%s/%s)' % (phase, impact.lower()) inheritance_modes_set.add(s) inheritance_modes = list(inheritance_modes_set) inheritance_modes.sort() return inheritance_modes
[ "def", "compute_cmphet_inheritance_modes", "(", "cmphet", ")", ":", "inheritance_modes_set", "=", "set", "(", ")", "inheritance_modes", "=", "[", "]", "if", "cmphet", "is", "not", "None", ":", "for", "entry", "in", "cmphet", ":", "phase", "=", "entry", ".", "get", "(", "'comhet_phase'", ")", "impact", "=", "entry", ".", "get", "(", "'comhet_impact_gene'", ")", "s", "=", "'Compound Het (%s/%s)'", "%", "(", "phase", ",", "impact", ".", "lower", "(", ")", ")", "inheritance_modes_set", ".", "add", "(", "s", ")", "inheritance_modes", "=", "list", "(", "inheritance_modes_set", ")", "inheritance_modes", ".", "sort", "(", ")", "return", "inheritance_modes" ]
Collects and summarizes Compound het caller results
[ "Collects", "and", "summarizes", "Compound", "het", "caller", "results" ]
[ "\"\"\" Collects and summarizes Compound het caller results \"\"\"" ]
[ { "param": "cmphet", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "cmphet", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def compute_cmphet_inheritance_modes(cmphet): inheritance_modes_set = set() inheritance_modes = [] if cmphet is not None: for entry in cmphet: phase = entry.get('comhet_phase') impact = entry.get('comhet_impact_gene') s = 'Compound Het (%s/%s)' % (phase, impact.lower()) inheritance_modes_set.add(s) inheritance_modes = list(inheritance_modes_set) inheritance_modes.sort() return inheritance_modes
423
557
230ebf4fbb1c3e6e0bbdadb5d6727cf5b8f11f93
suomesta/ken3
checker/errortest/errortest.py
[ "MIT" ]
Python
filenames
<not_specific>
def filenames(targets, exclude): """Pick cpp files from current directory. param[in] targets: appointed targets file names. if None, pick all files with extension '.cpp' should be string-list or None param[in] exclude: appointed file names to be excluded. should be string-list or None return targer file names in string-list """ if targets: names = list(targets) else: names = [i for i in glob.glob('*.cpp')] names += [i for i in glob.glob('*/*.cpp')] if exclude: names = [i for i in names if i not in exclude] return sorted(names)
Pick cpp files from current directory. param[in] targets: appointed targets file names. if None, pick all files with extension '.cpp' should be string-list or None param[in] exclude: appointed file names to be excluded. should be string-list or None return targer file names in string-list
Pick cpp files from current directory. param[in] targets: appointed targets file names. if None, pick all files with extension '.cpp' should be string-list or None param[in] exclude: appointed file names to be excluded. should be string-list or None return targer file names in string-list
[ "Pick", "cpp", "files", "from", "current", "directory", ".", "param", "[", "in", "]", "targets", ":", "appointed", "targets", "file", "names", ".", "if", "None", "pick", "all", "files", "with", "extension", "'", ".", "cpp", "'", "should", "be", "string", "-", "list", "or", "None", "param", "[", "in", "]", "exclude", ":", "appointed", "file", "names", "to", "be", "excluded", ".", "should", "be", "string", "-", "list", "or", "None", "return", "targer", "file", "names", "in", "string", "-", "list" ]
def filenames(targets, exclude): if targets: names = list(targets) else: names = [i for i in glob.glob('*.cpp')] names += [i for i in glob.glob('*/*.cpp')] if exclude: names = [i for i in names if i not in exclude] return sorted(names)
[ "def", "filenames", "(", "targets", ",", "exclude", ")", ":", "if", "targets", ":", "names", "=", "list", "(", "targets", ")", "else", ":", "names", "=", "[", "i", "for", "i", "in", "glob", ".", "glob", "(", "'*.cpp'", ")", "]", "names", "+=", "[", "i", "for", "i", "in", "glob", ".", "glob", "(", "'*/*.cpp'", ")", "]", "if", "exclude", ":", "names", "=", "[", "i", "for", "i", "in", "names", "if", "i", "not", "in", "exclude", "]", "return", "sorted", "(", "names", ")" ]
Pick cpp files from current directory.
[ "Pick", "cpp", "files", "from", "current", "directory", "." ]
[ "\"\"\"Pick cpp files from current directory.\n\n param[in] targets: appointed targets file names.\n if None, pick all files with extension '.cpp'\n should be string-list or None\n param[in] exclude: appointed file names to be excluded.\n should be string-list or None\n return targer file names in string-list\n \"\"\"" ]
[ { "param": "targets", "type": null }, { "param": "exclude", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "targets", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "exclude", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import glob def filenames(targets, exclude): if targets: names = list(targets) else: names = [i for i in glob.glob('*.cpp')] names += [i for i in glob.glob('*/*.cpp')] if exclude: names = [i for i in names if i not in exclude] return sorted(names)
424
978
11d85f36dfd1258ff9f241077c986ef780555a53
pyantonio-coder/crypto
cryptology/tools.py
[ "BSD-3-Clause" ]
Python
playfair_encrypt_col_pair
<not_specific>
def playfair_encrypt_col_pair(grid, pair): """if two letters are in the same col, this is the method to decrypt Args: grid ([list]): [list of the playfair grid] pair ([str]): [pair of strings in form e.g "AB"] Returns: [first_decrpyted_letter, second_decrpyted_letter]: [Returns encrypted letters as a tuple] """ pos_in_list_1=grid.index(pair[0]) pos_in_list_2=grid.index(pair[1]) if pos_in_list_1 >19: first_encrypted_letter=grid[(pos_in_list_1-20)] else: first_encrypted_letter=grid[(pos_in_list_1+5)] if pos_in_list_2 >19: second_encrypted_letter=grid[(pos_in_list_2-20)] else: second_encrypted_letter=grid[(pos_in_list_2+5)] return first_encrypted_letter,second_encrypted_letter
if two letters are in the same col, this is the method to decrypt Args: grid ([list]): [list of the playfair grid] pair ([str]): [pair of strings in form e.g "AB"] Returns: [first_decrpyted_letter, second_decrpyted_letter]: [Returns encrypted letters as a tuple]
if two letters are in the same col, this is the method to decrypt
[ "if", "two", "letters", "are", "in", "the", "same", "col", "this", "is", "the", "method", "to", "decrypt" ]
def playfair_encrypt_col_pair(grid, pair): pos_in_list_1=grid.index(pair[0]) pos_in_list_2=grid.index(pair[1]) if pos_in_list_1 >19: first_encrypted_letter=grid[(pos_in_list_1-20)] else: first_encrypted_letter=grid[(pos_in_list_1+5)] if pos_in_list_2 >19: second_encrypted_letter=grid[(pos_in_list_2-20)] else: second_encrypted_letter=grid[(pos_in_list_2+5)] return first_encrypted_letter,second_encrypted_letter
[ "def", "playfair_encrypt_col_pair", "(", "grid", ",", "pair", ")", ":", "pos_in_list_1", "=", "grid", ".", "index", "(", "pair", "[", "0", "]", ")", "pos_in_list_2", "=", "grid", ".", "index", "(", "pair", "[", "1", "]", ")", "if", "pos_in_list_1", ">", "19", ":", "first_encrypted_letter", "=", "grid", "[", "(", "pos_in_list_1", "-", "20", ")", "]", "else", ":", "first_encrypted_letter", "=", "grid", "[", "(", "pos_in_list_1", "+", "5", ")", "]", "if", "pos_in_list_2", ">", "19", ":", "second_encrypted_letter", "=", "grid", "[", "(", "pos_in_list_2", "-", "20", ")", "]", "else", ":", "second_encrypted_letter", "=", "grid", "[", "(", "pos_in_list_2", "+", "5", ")", "]", "return", "first_encrypted_letter", ",", "second_encrypted_letter" ]
if two letters are in the same col, this is the method to decrypt
[ "if", "two", "letters", "are", "in", "the", "same", "col", "this", "is", "the", "method", "to", "decrypt" ]
[ "\"\"\"if two letters are in the same col,\n this is the method to decrypt \n\n Args:\n grid ([list]): [list of the playfair grid]\n pair ([str]): [pair of strings in form e.g \"AB\"]\n\n Returns:\n [first_decrpyted_letter,\n second_decrpyted_letter]: [Returns encrypted letters as a tuple]\n \"\"\"" ]
[ { "param": "grid", "type": null }, { "param": "pair", "type": null } ]
{ "returns": [ { "docstring": "[Returns encrypted letters as a tuple]", "docstring_tokens": [ "[", "Returns", "encrypted", "letters", "as", "a", "tuple", "]" ], "type": "[first_decrpyted_letter,\nsecond_decrpyted_letter]" } ], "raises": [], "params": [ { "identifier": "grid", "type": null, "docstring": "[list of the playfair grid]", "docstring_tokens": [ "[", "list", "of", "the", "playfair", "grid", "]" ], "default": null, "is_optional": false }, { "identifier": "pair", "type": null, "docstring": "[pair of strings in form e.g \"AB\"]", "docstring_tokens": [ "[", "pair", "of", "strings", "in", "form", "e", ".", "g", "\"", "AB", "\"", "]" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def playfair_encrypt_col_pair(grid, pair): pos_in_list_1=grid.index(pair[0]) pos_in_list_2=grid.index(pair[1]) if pos_in_list_1 >19: first_encrypted_letter=grid[(pos_in_list_1-20)] else: first_encrypted_letter=grid[(pos_in_list_1+5)] if pos_in_list_2 >19: second_encrypted_letter=grid[(pos_in_list_2-20)] else: second_encrypted_letter=grid[(pos_in_list_2+5)] return first_encrypted_letter,second_encrypted_letter
425
96
450b4eba7a889b24a6d040fea2e53b2828858542
stan-v/wsn-simulation
node.py
[ "MIT" ]
Python
calc_toa_ble
<not_specific>
def calc_toa_ble(PL=10, SF=0): ''' BLE has a protocol which sends 14 bytes of overhead along with the actual payload. The information is modulated on a 1 MBit/s speed, which gives a Time on air of 1 microsecond per bit or 8 microsecond per byte/octet. ''' amount_of_bytes = 14 + PL T = amount_of_bytes*8/10**6 return T
BLE has a protocol which sends 14 bytes of overhead along with the actual payload. The information is modulated on a 1 MBit/s speed, which gives a Time on air of 1 microsecond per bit or 8 microsecond per byte/octet.
BLE has a protocol which sends 14 bytes of overhead along with the actual payload. The information is modulated on a 1 MBit/s speed, which gives a Time on air of 1 microsecond per bit or 8 microsecond per byte/octet.
[ "BLE", "has", "a", "protocol", "which", "sends", "14", "bytes", "of", "overhead", "along", "with", "the", "actual", "payload", ".", "The", "information", "is", "modulated", "on", "a", "1", "MBit", "/", "s", "speed", "which", "gives", "a", "Time", "on", "air", "of", "1", "microsecond", "per", "bit", "or", "8", "microsecond", "per", "byte", "/", "octet", "." ]
def calc_toa_ble(PL=10, SF=0): amount_of_bytes = 14 + PL T = amount_of_bytes*8/10**6 return T
[ "def", "calc_toa_ble", "(", "PL", "=", "10", ",", "SF", "=", "0", ")", ":", "amount_of_bytes", "=", "14", "+", "PL", "T", "=", "amount_of_bytes", "*", "8", "/", "10", "**", "6", "return", "T" ]
BLE has a protocol which sends 14 bytes of overhead along with the actual payload.
[ "BLE", "has", "a", "protocol", "which", "sends", "14", "bytes", "of", "overhead", "along", "with", "the", "actual", "payload", "." ]
[ "'''\r\n BLE has a protocol which sends 14 bytes of overhead along with the actual\r\n payload. The information is modulated on a 1 MBit/s speed, which gives a \r\n Time on air of 1 microsecond per bit or 8 microsecond per byte/octet. \r\n '''" ]
[ { "param": "PL", "type": null }, { "param": "SF", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "PL", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "SF", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def calc_toa_ble(PL=10, SF=0): amount_of_bytes = 14 + PL T = amount_of_bytes*8/10**6 return T
426
209
4b4f1d434b98c446c7e6a755868ba6c092294fa1
igor-93/ml_utils
timeseries/stock_indicators.py
[ "BSD-2-Clause" ]
Python
macd
<not_specific>
def macd(df, ewa_short, ewa_long, ewa_signal, price_col="adj_close"): """Moving Average Convergence Divergence Parameters: ----------- df : DataFrame Input dataframe. ewa_short : int Exponentially weighted average time-window for a short time-span. A common choice for the short time-window is 12 intervals. ewa_long : int Exponentially weighted average time-window for a longer time-span. A common choice for the long time-window is 26 intervals. ewa_signal : int Time-window for the EWA of the difference between long and short averages. price_col : str Column name in `df` used for defining the current indicator (e.g. "open", "close", etc.) Returns: -------- macd_ts : Series Moving average convergence-divergence indicator for the time series. """ ewa_short = int(ewa_short) ewa_long = int(ewa_long) ewa_signal = int(ewa_signal) ewa12 = df[price_col].ewm(span=ewa_short).mean() ewa26 = df[price_col].ewm(span=ewa_long).mean() macd_ts = ewa12 - ewa26 signal_line = macd_ts.ewm(span=ewa_signal).mean() return macd_ts - signal_line, 'stationary'
Moving Average Convergence Divergence Parameters: ----------- df : DataFrame Input dataframe. ewa_short : int Exponentially weighted average time-window for a short time-span. A common choice for the short time-window is 12 intervals. ewa_long : int Exponentially weighted average time-window for a longer time-span. A common choice for the long time-window is 26 intervals. ewa_signal : int Time-window for the EWA of the difference between long and short averages. price_col : str Column name in `df` used for defining the current indicator (e.g. "open", "close", etc.) Returns: -------- macd_ts : Series Moving average convergence-divergence indicator for the time series.
Moving Average Convergence Divergence Parameters. df : DataFrame Input dataframe. ewa_short : int Exponentially weighted average time-window for a short time-span. A common choice for the short time-window is 12 intervals. ewa_long : int Exponentially weighted average time-window for a longer time-span. A common choice for the long time-window is 26 intervals. ewa_signal : int Time-window for the EWA of the difference between long and short averages. price_col : str Column name in `df` used for defining the current indicator Returns. macd_ts : Series Moving average convergence-divergence indicator for the time series.
[ "Moving", "Average", "Convergence", "Divergence", "Parameters", ".", "df", ":", "DataFrame", "Input", "dataframe", ".", "ewa_short", ":", "int", "Exponentially", "weighted", "average", "time", "-", "window", "for", "a", "short", "time", "-", "span", ".", "A", "common", "choice", "for", "the", "short", "time", "-", "window", "is", "12", "intervals", ".", "ewa_long", ":", "int", "Exponentially", "weighted", "average", "time", "-", "window", "for", "a", "longer", "time", "-", "span", ".", "A", "common", "choice", "for", "the", "long", "time", "-", "window", "is", "26", "intervals", ".", "ewa_signal", ":", "int", "Time", "-", "window", "for", "the", "EWA", "of", "the", "difference", "between", "long", "and", "short", "averages", ".", "price_col", ":", "str", "Column", "name", "in", "`", "df", "`", "used", "for", "defining", "the", "current", "indicator", "Returns", ".", "macd_ts", ":", "Series", "Moving", "average", "convergence", "-", "divergence", "indicator", "for", "the", "time", "series", "." ]
def macd(df, ewa_short, ewa_long, ewa_signal, price_col="adj_close"): ewa_short = int(ewa_short) ewa_long = int(ewa_long) ewa_signal = int(ewa_signal) ewa12 = df[price_col].ewm(span=ewa_short).mean() ewa26 = df[price_col].ewm(span=ewa_long).mean() macd_ts = ewa12 - ewa26 signal_line = macd_ts.ewm(span=ewa_signal).mean() return macd_ts - signal_line, 'stationary'
[ "def", "macd", "(", "df", ",", "ewa_short", ",", "ewa_long", ",", "ewa_signal", ",", "price_col", "=", "\"adj_close\"", ")", ":", "ewa_short", "=", "int", "(", "ewa_short", ")", "ewa_long", "=", "int", "(", "ewa_long", ")", "ewa_signal", "=", "int", "(", "ewa_signal", ")", "ewa12", "=", "df", "[", "price_col", "]", ".", "ewm", "(", "span", "=", "ewa_short", ")", ".", "mean", "(", ")", "ewa26", "=", "df", "[", "price_col", "]", ".", "ewm", "(", "span", "=", "ewa_long", ")", ".", "mean", "(", ")", "macd_ts", "=", "ewa12", "-", "ewa26", "signal_line", "=", "macd_ts", ".", "ewm", "(", "span", "=", "ewa_signal", ")", ".", "mean", "(", ")", "return", "macd_ts", "-", "signal_line", ",", "'stationary'" ]
Moving Average Convergence Divergence Parameters:
[ "Moving", "Average", "Convergence", "Divergence", "Parameters", ":" ]
[ "\"\"\"Moving Average Convergence Divergence\n Parameters:\n -----------\n df : DataFrame\n Input dataframe.\n ewa_short : int\n Exponentially weighted average time-window for a short time-span.\n A common choice for the short time-window is 12 intervals.\n ewa_long : int\n Exponentially weighted average time-window for a longer time-span.\n A common choice for the long time-window is 26 intervals.\n ewa_signal : int\n Time-window for the EWA of the difference between long and short\n averages.\n price_col : str\n Column name in `df` used for defining the current indicator (e.g. \"open\",\n \"close\", etc.)\n Returns:\n --------\n macd_ts : Series\n Moving average convergence-divergence indicator for the time series.\n \"\"\"" ]
[ { "param": "df", "type": null }, { "param": "ewa_short", "type": null }, { "param": "ewa_long", "type": null }, { "param": "ewa_signal", "type": null }, { "param": "price_col", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "df", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "ewa_short", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "ewa_long", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "ewa_signal", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "price_col", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def macd(df, ewa_short, ewa_long, ewa_signal, price_col="adj_close"): ewa_short = int(ewa_short) ewa_long = int(ewa_long) ewa_signal = int(ewa_signal) ewa12 = df[price_col].ewm(span=ewa_short).mean() ewa26 = df[price_col].ewm(span=ewa_long).mean() macd_ts = ewa12 - ewa26 signal_line = macd_ts.ewm(span=ewa_signal).mean() return macd_ts - signal_line, 'stationary'
427
861
387bab85d398c1b63a768cddc905268628528d4c
kkkkv/tgnms
optimizer_service/optimizer_service/optimizations/graph.py
[ "MIT" ]
Python
remove_low_uptime_links
None
def remove_low_uptime_links( graph: nx.Graph, active_links: Dict, link_uptime_threshold: float ) -> None: """Removes links with an average uptime below 'link_uptime_threshold' from 'graph'.""" for edge in graph.edges: link_attributes = graph.get_edge_data(*edge) link_name = link_attributes.get("name") uptime = active_links.get(link_name, 0) if uptime < link_uptime_threshold: logging.debug( f"Removing {link_name}: Uptime ({uptime}) is less than {link_uptime_threshold}." ) graph.remove_edge(*edge)
Removes links with an average uptime below 'link_uptime_threshold' from 'graph'.
Removes links with an average uptime below 'link_uptime_threshold' from 'graph'.
[ "Removes", "links", "with", "an", "average", "uptime", "below", "'", "link_uptime_threshold", "'", "from", "'", "graph", "'", "." ]
def remove_low_uptime_links( graph: nx.Graph, active_links: Dict, link_uptime_threshold: float ) -> None: for edge in graph.edges: link_attributes = graph.get_edge_data(*edge) link_name = link_attributes.get("name") uptime = active_links.get(link_name, 0) if uptime < link_uptime_threshold: logging.debug( f"Removing {link_name}: Uptime ({uptime}) is less than {link_uptime_threshold}." ) graph.remove_edge(*edge)
[ "def", "remove_low_uptime_links", "(", "graph", ":", "nx", ".", "Graph", ",", "active_links", ":", "Dict", ",", "link_uptime_threshold", ":", "float", ")", "->", "None", ":", "for", "edge", "in", "graph", ".", "edges", ":", "link_attributes", "=", "graph", ".", "get_edge_data", "(", "*", "edge", ")", "link_name", "=", "link_attributes", ".", "get", "(", "\"name\"", ")", "uptime", "=", "active_links", ".", "get", "(", "link_name", ",", "0", ")", "if", "uptime", "<", "link_uptime_threshold", ":", "logging", ".", "debug", "(", "f\"Removing {link_name}: Uptime ({uptime}) is less than {link_uptime_threshold}.\"", ")", "graph", ".", "remove_edge", "(", "*", "edge", ")" ]
Removes links with an average uptime below 'link_uptime_threshold' from 'graph'.
[ "Removes", "links", "with", "an", "average", "uptime", "below", "'", "link_uptime_threshold", "'", "from", "'", "graph", "'", "." ]
[ "\"\"\"Removes links with an average uptime below 'link_uptime_threshold' from 'graph'.\"\"\"" ]
[ { "param": "graph", "type": "nx.Graph" }, { "param": "active_links", "type": "Dict" }, { "param": "link_uptime_threshold", "type": "float" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "graph", "type": "nx.Graph", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "active_links", "type": "Dict", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "link_uptime_threshold", "type": "float", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import logging def remove_low_uptime_links( graph: nx.Graph, active_links: Dict, link_uptime_threshold: float ) -> None: for edge in graph.edges: link_attributes = graph.get_edge_data(*edge) link_name = link_attributes.get("name") uptime = active_links.get(link_name, 0) if uptime < link_uptime_threshold: logging.debug( f"Removing {link_name}: Uptime ({uptime}) is less than {link_uptime_threshold}." ) graph.remove_edge(*edge)
428
418
8db2a3b3e21c3ff2b0194c67a24acc363a88470d
UpyExplorer/upy-error
upy_error/base.py
[ "MIT" ]
Python
format_exception
<not_specific>
def format_exception(error): """Return the exception in readable form Args: error (class): Exception Returns: str: Traceback Error """ exception_list = traceback.format_stack() exception_list = exception_list[:-2] exception_list.extend(traceback.format_tb(sys.exc_info()[2])) exception_list.extend(traceback.format_exception_only(sys.exc_info()[0], sys.exc_info()[1])) exception_str = "Traceback (most recent call last):\n" exception_str += "".join(exception_list) exception_str = exception_str[:-1] return exception_str
Return the exception in readable form Args: error (class): Exception Returns: str: Traceback Error
Return the exception in readable form
[ "Return", "the", "exception", "in", "readable", "form" ]
def format_exception(error): exception_list = traceback.format_stack() exception_list = exception_list[:-2] exception_list.extend(traceback.format_tb(sys.exc_info()[2])) exception_list.extend(traceback.format_exception_only(sys.exc_info()[0], sys.exc_info()[1])) exception_str = "Traceback (most recent call last):\n" exception_str += "".join(exception_list) exception_str = exception_str[:-1] return exception_str
[ "def", "format_exception", "(", "error", ")", ":", "exception_list", "=", "traceback", ".", "format_stack", "(", ")", "exception_list", "=", "exception_list", "[", ":", "-", "2", "]", "exception_list", ".", "extend", "(", "traceback", ".", "format_tb", "(", "sys", ".", "exc_info", "(", ")", "[", "2", "]", ")", ")", "exception_list", ".", "extend", "(", "traceback", ".", "format_exception_only", "(", "sys", ".", "exc_info", "(", ")", "[", "0", "]", ",", "sys", ".", "exc_info", "(", ")", "[", "1", "]", ")", ")", "exception_str", "=", "\"Traceback (most recent call last):\\n\"", "exception_str", "+=", "\"\"", ".", "join", "(", "exception_list", ")", "exception_str", "=", "exception_str", "[", ":", "-", "1", "]", "return", "exception_str" ]
Return the exception in readable form
[ "Return", "the", "exception", "in", "readable", "form" ]
[ "\"\"\"Return the exception in readable form\n Args:\n error (class): Exception\n Returns:\n str: Traceback Error\n \"\"\"" ]
[ { "param": "error", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": "str" } ], "raises": [], "params": [ { "identifier": "error", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
import sys import traceback def format_exception(error): exception_list = traceback.format_stack() exception_list = exception_list[:-2] exception_list.extend(traceback.format_tb(sys.exc_info()[2])) exception_list.extend(traceback.format_exception_only(sys.exc_info()[0], sys.exc_info()[1])) exception_str = "Traceback (most recent call last):\n" exception_str += "".join(exception_list) exception_str = exception_str[:-1] return exception_str
429
809
e16597e3131fe21f99a4452f8c411c7ab0737c6a
inomuh/indoor_localization
src/indoor_localization/positioning_node.py
[ "Apache-2.0" ]
Python
add_noise
<not_specific>
def add_noise(denominator): """ This function adds some noie to the denominator to prevent the ZeroDivisionError while calculating the pre_tag_z, the pre_tag_y and the pre_tag_x values. """ if (denominator >= -0.01) and (denominator <= 0): denominator -= random.uniform(0, 0.1) elif (denominator >= 0) and (denominator <= 0.01): denominator += random.uniform(0, 0.1) return denominator
This function adds some noie to the denominator to prevent the ZeroDivisionError while calculating the pre_tag_z, the pre_tag_y and the pre_tag_x values.
This function adds some noie to the denominator to prevent the ZeroDivisionError while calculating the pre_tag_z, the pre_tag_y and the pre_tag_x values.
[ "This", "function", "adds", "some", "noie", "to", "the", "denominator", "to", "prevent", "the", "ZeroDivisionError", "while", "calculating", "the", "pre_tag_z", "the", "pre_tag_y", "and", "the", "pre_tag_x", "values", "." ]
def add_noise(denominator): if (denominator >= -0.01) and (denominator <= 0): denominator -= random.uniform(0, 0.1) elif (denominator >= 0) and (denominator <= 0.01): denominator += random.uniform(0, 0.1) return denominator
[ "def", "add_noise", "(", "denominator", ")", ":", "if", "(", "denominator", ">=", "-", "0.01", ")", "and", "(", "denominator", "<=", "0", ")", ":", "denominator", "-=", "random", ".", "uniform", "(", "0", ",", "0.1", ")", "elif", "(", "denominator", ">=", "0", ")", "and", "(", "denominator", "<=", "0.01", ")", ":", "denominator", "+=", "random", ".", "uniform", "(", "0", ",", "0.1", ")", "return", "denominator" ]
This function adds some noie to the denominator to prevent the ZeroDivisionError while calculating the pre_tag_z, the pre_tag_y and the pre_tag_x values.
[ "This", "function", "adds", "some", "noie", "to", "the", "denominator", "to", "prevent", "the", "ZeroDivisionError", "while", "calculating", "the", "pre_tag_z", "the", "pre_tag_y", "and", "the", "pre_tag_x", "values", "." ]
[ "\"\"\"\n This function adds some noie to the denominator to prevent the ZeroDivisionError\n while calculating the pre_tag_z, the pre_tag_y and the pre_tag_x values.\n \"\"\"" ]
[ { "param": "denominator", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "denominator", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import random def add_noise(denominator): if (denominator >= -0.01) and (denominator <= 0): denominator -= random.uniform(0, 0.1) elif (denominator >= 0) and (denominator <= 0.01): denominator += random.uniform(0, 0.1) return denominator
430
432
a49702faea3f8a66804bbbb4b2090eff2ca9f885
Edinburgh-Genome-Foundry/BandWitch
bandwitch/DigestionProblem/SeparatingDigestionProblem.py
[ "MIT" ]
Python
_score_to_color
<not_specific>
def _score_to_color(score, maxi=0.1): """Transform a similarity score to a green/red color. Parameters ---------- score Value between 0 (perfect similarity, green) and 1 (red). maxi Value of the score above which everything appears completely red. Below this value the color goes progressively from red to green in 0. """ return ( max(0, min(1, score / maxi)), min(1, max(0, 1 - score / maxi)), 0, 0.5, )
Transform a similarity score to a green/red color. Parameters ---------- score Value between 0 (perfect similarity, green) and 1 (red). maxi Value of the score above which everything appears completely red. Below this value the color goes progressively from red to green in 0.
Transform a similarity score to a green/red color. Parameters score Value between 0 (perfect similarity, green) and 1 (red). maxi Value of the score above which everything appears completely red. Below this value the color goes progressively from red to green in 0.
[ "Transform", "a", "similarity", "score", "to", "a", "green", "/", "red", "color", ".", "Parameters", "score", "Value", "between", "0", "(", "perfect", "similarity", "green", ")", "and", "1", "(", "red", ")", ".", "maxi", "Value", "of", "the", "score", "above", "which", "everything", "appears", "completely", "red", ".", "Below", "this", "value", "the", "color", "goes", "progressively", "from", "red", "to", "green", "in", "0", "." ]
def _score_to_color(score, maxi=0.1): return ( max(0, min(1, score / maxi)), min(1, max(0, 1 - score / maxi)), 0, 0.5, )
[ "def", "_score_to_color", "(", "score", ",", "maxi", "=", "0.1", ")", ":", "return", "(", "max", "(", "0", ",", "min", "(", "1", ",", "score", "/", "maxi", ")", ")", ",", "min", "(", "1", ",", "max", "(", "0", ",", "1", "-", "score", "/", "maxi", ")", ")", ",", "0", ",", "0.5", ",", ")" ]
Transform a similarity score to a green/red color.
[ "Transform", "a", "similarity", "score", "to", "a", "green", "/", "red", "color", "." ]
[ "\"\"\"Transform a similarity score to a green/red color.\n\n Parameters\n ----------\n score\n Value between 0 (perfect similarity, green) and 1 (red).\n\n maxi\n Value of the score above which everything appears completely red.\n Below this value the color goes progressively from red to green in 0.\n \"\"\"" ]
[ { "param": "score", "type": null }, { "param": "maxi", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "score", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "maxi", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _score_to_color(score, maxi=0.1): return ( max(0, min(1, score / maxi)), min(1, max(0, 1 - score / maxi)), 0, 0.5, )
431
220
86ab0518fd4c0743ea0af1dd4809bdce6edd0e4b
valentinogeron/RAMP
RAMP/ramp.py
[ "BSD-2-Clause" ]
Python
comma_seperated_to_list
<not_specific>
def comma_seperated_to_list(ctx, param, value): """ Converts a comma seperated string into a list. """ if value is None: return [] else: items = value.split(',') return list(set(items))
Converts a comma seperated string into a list.
Converts a comma seperated string into a list.
[ "Converts", "a", "comma", "seperated", "string", "into", "a", "list", "." ]
def comma_seperated_to_list(ctx, param, value): if value is None: return [] else: items = value.split(',') return list(set(items))
[ "def", "comma_seperated_to_list", "(", "ctx", ",", "param", ",", "value", ")", ":", "if", "value", "is", "None", ":", "return", "[", "]", "else", ":", "items", "=", "value", ".", "split", "(", "','", ")", "return", "list", "(", "set", "(", "items", ")", ")" ]
Converts a comma seperated string into a list.
[ "Converts", "a", "comma", "seperated", "string", "into", "a", "list", "." ]
[ "\"\"\"\n Converts a comma seperated string into a list.\n \"\"\"" ]
[ { "param": "ctx", "type": null }, { "param": "param", "type": null }, { "param": "value", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "ctx", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "param", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "value", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def comma_seperated_to_list(ctx, param, value): if value is None: return [] else: items = value.split(',') return list(set(items))
432
188
1e61993ae8e335287792a99de45ad4497c8703f1
jfirebaugh/rules_rust
rust/private/utils.bzl
[ "Apache-2.0" ]
Python
_replace_all
<not_specific>
def _replace_all(string, substitutions): """Replaces occurrences of the given patterns in `string`. There are a few reasons this looks complicated: * The substitutions are performed with some priority, i.e. patterns that are listed first in `substitutions` are higher priority than patterns that are listed later. * We also take pains to avoid doing replacements that overlap with each other, since overlaps invalidate pattern matches. * To avoid hairy offset invalidation, we apply the substitutions right-to-left. * To avoid the "_quote" -> "_quotequote_" rule introducing new pattern matches later in the string during decoding, we take the leftmost replacement, in cases of overlap. (Note that no rule can induce new pattern matches *earlier* in the string.) (E.g. "_quotedot_" encodes to "_quotequote_dot_". Note that "_quotequote_" and "_dot_" both occur in this string, and overlap.). Args: string (string): the string in which the replacements should be performed. substitutions: the list of patterns and replacements to apply. Returns: A string with the appropriate substitutions performed. """ # Find the highest-priority pattern matches for each string index, going # left-to-right and skipping indices that are already involved in a # pattern match. plan = {} matched_indices_set = {} for pattern_start in range(len(string)): if pattern_start in matched_indices_set: continue for (pattern, replacement) in substitutions: if not string.startswith(pattern, pattern_start): continue length = len(pattern) plan[pattern_start] = (length, replacement) matched_indices_set.update([(pattern_start + i, True) for i in range(length)]) break # Execute the replacement plan, working from right to left. for pattern_start in sorted(plan.keys(), reverse = True): length, replacement = plan[pattern_start] after_pattern = pattern_start + length string = string[:pattern_start] + replacement + string[after_pattern:] return string
Replaces occurrences of the given patterns in `string`. There are a few reasons this looks complicated: * The substitutions are performed with some priority, i.e. patterns that are listed first in `substitutions` are higher priority than patterns that are listed later. * We also take pains to avoid doing replacements that overlap with each other, since overlaps invalidate pattern matches. * To avoid hairy offset invalidation, we apply the substitutions right-to-left. * To avoid the "_quote" -> "_quotequote_" rule introducing new pattern matches later in the string during decoding, we take the leftmost replacement, in cases of overlap. (Note that no rule can induce new pattern matches *earlier* in the string.) (E.g. "_quotedot_" encodes to "_quotequote_dot_". Note that "_quotequote_" and "_dot_" both occur in this string, and overlap.). Args: string (string): the string in which the replacements should be performed. substitutions: the list of patterns and replacements to apply. Returns: A string with the appropriate substitutions performed.
Replaces occurrences of the given patterns in `string`. There are a few reasons this looks complicated: The substitutions are performed with some priority, i.e. patterns that are listed first in `substitutions` are higher priority than patterns that are listed later. We also take pains to avoid doing replacements that overlap with each other, since overlaps invalidate pattern matches. To avoid hairy offset invalidation, we apply the substitutions right-to-left. To avoid the "_quote" -> "_quotequote_" rule introducing new pattern matches later in the string during decoding, we take the leftmost replacement, in cases of overlap. (Note that no rule can induce new pattern matches *earlier* in the string.) .
[ "Replaces", "occurrences", "of", "the", "given", "patterns", "in", "`", "string", "`", ".", "There", "are", "a", "few", "reasons", "this", "looks", "complicated", ":", "The", "substitutions", "are", "performed", "with", "some", "priority", "i", ".", "e", ".", "patterns", "that", "are", "listed", "first", "in", "`", "substitutions", "`", "are", "higher", "priority", "than", "patterns", "that", "are", "listed", "later", ".", "We", "also", "take", "pains", "to", "avoid", "doing", "replacements", "that", "overlap", "with", "each", "other", "since", "overlaps", "invalidate", "pattern", "matches", ".", "To", "avoid", "hairy", "offset", "invalidation", "we", "apply", "the", "substitutions", "right", "-", "to", "-", "left", ".", "To", "avoid", "the", "\"", "_quote", "\"", "-", ">", "\"", "_quotequote_", "\"", "rule", "introducing", "new", "pattern", "matches", "later", "in", "the", "string", "during", "decoding", "we", "take", "the", "leftmost", "replacement", "in", "cases", "of", "overlap", ".", "(", "Note", "that", "no", "rule", "can", "induce", "new", "pattern", "matches", "*", "earlier", "*", "in", "the", "string", ".", ")", "." ]
def _replace_all(string, substitutions): plan = {} matched_indices_set = {} for pattern_start in range(len(string)): if pattern_start in matched_indices_set: continue for (pattern, replacement) in substitutions: if not string.startswith(pattern, pattern_start): continue length = len(pattern) plan[pattern_start] = (length, replacement) matched_indices_set.update([(pattern_start + i, True) for i in range(length)]) break for pattern_start in sorted(plan.keys(), reverse = True): length, replacement = plan[pattern_start] after_pattern = pattern_start + length string = string[:pattern_start] + replacement + string[after_pattern:] return string
[ "def", "_replace_all", "(", "string", ",", "substitutions", ")", ":", "plan", "=", "{", "}", "matched_indices_set", "=", "{", "}", "for", "pattern_start", "in", "range", "(", "len", "(", "string", ")", ")", ":", "if", "pattern_start", "in", "matched_indices_set", ":", "continue", "for", "(", "pattern", ",", "replacement", ")", "in", "substitutions", ":", "if", "not", "string", ".", "startswith", "(", "pattern", ",", "pattern_start", ")", ":", "continue", "length", "=", "len", "(", "pattern", ")", "plan", "[", "pattern_start", "]", "=", "(", "length", ",", "replacement", ")", "matched_indices_set", ".", "update", "(", "[", "(", "pattern_start", "+", "i", ",", "True", ")", "for", "i", "in", "range", "(", "length", ")", "]", ")", "break", "for", "pattern_start", "in", "sorted", "(", "plan", ".", "keys", "(", ")", ",", "reverse", "=", "True", ")", ":", "length", ",", "replacement", "=", "plan", "[", "pattern_start", "]", "after_pattern", "=", "pattern_start", "+", "length", "string", "=", "string", "[", ":", "pattern_start", "]", "+", "replacement", "+", "string", "[", "after_pattern", ":", "]", "return", "string" ]
Replaces occurrences of the given patterns in `string`.
[ "Replaces", "occurrences", "of", "the", "given", "patterns", "in", "`", "string", "`", "." ]
[ "\"\"\"Replaces occurrences of the given patterns in `string`.\n\n There are a few reasons this looks complicated:\n * The substitutions are performed with some priority, i.e. patterns that are\n listed first in `substitutions` are higher priority than patterns that are\n listed later.\n * We also take pains to avoid doing replacements that overlap with each\n other, since overlaps invalidate pattern matches.\n * To avoid hairy offset invalidation, we apply the substitutions\n right-to-left.\n * To avoid the \"_quote\" -> \"_quotequote_\" rule introducing new pattern\n matches later in the string during decoding, we take the leftmost\n replacement, in cases of overlap. (Note that no rule can induce new\n pattern matches *earlier* in the string.) (E.g. \"_quotedot_\" encodes to\n \"_quotequote_dot_\". Note that \"_quotequote_\" and \"_dot_\" both occur in\n this string, and overlap.).\n\n Args:\n string (string): the string in which the replacements should be performed.\n substitutions: the list of patterns and replacements to apply.\n\n Returns:\n A string with the appropriate substitutions performed.\n \"\"\"", "# Find the highest-priority pattern matches for each string index, going", "# left-to-right and skipping indices that are already involved in a", "# pattern match.", "# Execute the replacement plan, working from right to left." ]
[ { "param": "string", "type": null }, { "param": "substitutions", "type": null } ]
{ "returns": [ { "docstring": "A string with the appropriate substitutions performed.", "docstring_tokens": [ "A", "string", "with", "the", "appropriate", "substitutions", "performed", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "string", "type": null, "docstring": "the string in which the replacements should be performed.", "docstring_tokens": [ "the", "string", "in", "which", "the", "replacements", "should", "be", "performed", "." ], "default": null, "is_optional": false }, { "identifier": "substitutions", "type": null, "docstring": "the list of patterns and replacements to apply.", "docstring_tokens": [ "the", "list", "of", "patterns", "and", "replacements", "to", "apply", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _replace_all(string, substitutions): plan = {} matched_indices_set = {} for pattern_start in range(len(string)): if pattern_start in matched_indices_set: continue for (pattern, replacement) in substitutions: if not string.startswith(pattern, pattern_start): continue length = len(pattern) plan[pattern_start] = (length, replacement) matched_indices_set.update([(pattern_start + i, True) for i in range(length)]) break for pattern_start in sorted(plan.keys(), reverse = True): length, replacement = plan[pattern_start] after_pattern = pattern_start + length string = string[:pattern_start] + replacement + string[after_pattern:] return string
433
158
8e77f12a9d529c15d0b0df96ddb782b424bed341
noahjgreen295/feature_engine
feature_engine/dataframe_checks.py
[ "BSD-3-Clause" ]
Python
_check_X_matches_training_df
None
def _check_X_matches_training_df(X: pd.DataFrame, reference: int) -> None: """ Checks that DataFrame to transform has the same number of columns that the DataFrame used with the fit() method. Parameters ---------- X : Pandas DataFrame The df to be checked reference : int The number of columns in the dataframe that was used with the fit() method. Raises ------ ValueError If the number of columns does not match. Returns ------- None """ if X.shape[1] != reference: raise ValueError( "The number of columns in this dataset is different from the one used to " "fit this transformer (when using the fit() method)." ) return None
Checks that DataFrame to transform has the same number of columns that the DataFrame used with the fit() method. Parameters ---------- X : Pandas DataFrame The df to be checked reference : int The number of columns in the dataframe that was used with the fit() method. Raises ------ ValueError If the number of columns does not match. Returns ------- None
Checks that DataFrame to transform has the same number of columns that the DataFrame used with the fit() method. Parameters X : Pandas DataFrame The df to be checked reference : int The number of columns in the dataframe that was used with the fit() method. Raises ValueError If the number of columns does not match. Returns None
[ "Checks", "that", "DataFrame", "to", "transform", "has", "the", "same", "number", "of", "columns", "that", "the", "DataFrame", "used", "with", "the", "fit", "()", "method", ".", "Parameters", "X", ":", "Pandas", "DataFrame", "The", "df", "to", "be", "checked", "reference", ":", "int", "The", "number", "of", "columns", "in", "the", "dataframe", "that", "was", "used", "with", "the", "fit", "()", "method", ".", "Raises", "ValueError", "If", "the", "number", "of", "columns", "does", "not", "match", ".", "Returns", "None" ]
def _check_X_matches_training_df(X: pd.DataFrame, reference: int) -> None: if X.shape[1] != reference: raise ValueError( "The number of columns in this dataset is different from the one used to " "fit this transformer (when using the fit() method)." ) return None
[ "def", "_check_X_matches_training_df", "(", "X", ":", "pd", ".", "DataFrame", ",", "reference", ":", "int", ")", "->", "None", ":", "if", "X", ".", "shape", "[", "1", "]", "!=", "reference", ":", "raise", "ValueError", "(", "\"The number of columns in this dataset is different from the one used to \"", "\"fit this transformer (when using the fit() method).\"", ")", "return", "None" ]
Checks that DataFrame to transform has the same number of columns that the DataFrame used with the fit() method.
[ "Checks", "that", "DataFrame", "to", "transform", "has", "the", "same", "number", "of", "columns", "that", "the", "DataFrame", "used", "with", "the", "fit", "()", "method", "." ]
[ "\"\"\"\n Checks that DataFrame to transform has the same number of columns that the\n DataFrame used with the fit() method.\n\n Parameters\n ----------\n X : Pandas DataFrame\n The df to be checked\n reference : int\n The number of columns in the dataframe that was used with the fit() method.\n\n Raises\n ------\n ValueError\n If the number of columns does not match.\n\n Returns\n -------\n None\n \"\"\"" ]
[ { "param": "X", "type": "pd.DataFrame" }, { "param": "reference", "type": "int" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "X", "type": "pd.DataFrame", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "reference", "type": "int", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def _check_X_matches_training_df(X: pd.DataFrame, reference: int) -> None: if X.shape[1] != reference: raise ValueError( "The number of columns in this dataset is different from the one used to " "fit this transformer (when using the fit() method)." ) return None
434
685
a1cea4c505c018427cfc66b5f3abf5dda1ff6556
quantmatt/nitrade-trading-software
Build/Release/python_scripts/data_builder.py
[ "Apache-2.0" ]
Python
add_additional_features
<not_specific>
def add_additional_features(asset_data, features, save_path=None, verbose=True): """ Can add in more features to the data if already processed, saves calculating all features again Args: asset_data ((str, pd.DataFrame) or (str, pd.DataFrame)[]): a single or list of tuples (asset name, bar data) containing the bar data features (str[]): a string list of features to calculate save_path (str): Path to save the bar data with features. A placeholder {ASSET} that will be substituted with the asset name verbose (bool): True if progress printing to console is desired Returns: (str, pd.DataFrame): a single tuple of (asset name, dataframe) of the bar data if a single asset was passed or an array of (asset name, dataframes) if an array of assets was passed """ import pandas as pd #if a single dataframe is passed just put it in a single item list if type(asset_data) == tuple: asset_data = [asset_data] elif type(asset_data) != list: raise ValueError('asset_data must be a (str, pandas.DataFrame) or a list of (str, pandas.Dataframe).') feature_bars = [] for asset, data in asset_data: #add a new feature - this line is temporary and can be change to #whatever calcualting is required data["feature"] = 0 feature_bars.append((asset, data)) if save_path is not None: if verbose: print("Saving {0}...".format(asset)) save_location = save_path.replace("{ASSET}", asset) data.to_csv(save_location) #if there was just one dataFrame just return a single dataframe, otherwise #return the list of dataframes, one for each asset if len(feature_bars) == 1: return feature_bars[0] else: return feature_bars
Can add in more features to the data if already processed, saves calculating all features again Args: asset_data ((str, pd.DataFrame) or (str, pd.DataFrame)[]): a single or list of tuples (asset name, bar data) containing the bar data features (str[]): a string list of features to calculate save_path (str): Path to save the bar data with features. A placeholder {ASSET} that will be substituted with the asset name verbose (bool): True if progress printing to console is desired Returns: (str, pd.DataFrame): a single tuple of (asset name, dataframe) of the bar data if a single asset was passed or an array of (asset name, dataframes) if an array of assets was passed
Can add in more features to the data if already processed, saves calculating all features again
[ "Can", "add", "in", "more", "features", "to", "the", "data", "if", "already", "processed", "saves", "calculating", "all", "features", "again" ]
def add_additional_features(asset_data, features, save_path=None, verbose=True): import pandas as pd if type(asset_data) == tuple: asset_data = [asset_data] elif type(asset_data) != list: raise ValueError('asset_data must be a (str, pandas.DataFrame) or a list of (str, pandas.Dataframe).') feature_bars = [] for asset, data in asset_data: data["feature"] = 0 feature_bars.append((asset, data)) if save_path is not None: if verbose: print("Saving {0}...".format(asset)) save_location = save_path.replace("{ASSET}", asset) data.to_csv(save_location) if len(feature_bars) == 1: return feature_bars[0] else: return feature_bars
[ "def", "add_additional_features", "(", "asset_data", ",", "features", ",", "save_path", "=", "None", ",", "verbose", "=", "True", ")", ":", "import", "pandas", "as", "pd", "if", "type", "(", "asset_data", ")", "==", "tuple", ":", "asset_data", "=", "[", "asset_data", "]", "elif", "type", "(", "asset_data", ")", "!=", "list", ":", "raise", "ValueError", "(", "'asset_data must be a (str, pandas.DataFrame) or a list of (str, pandas.Dataframe).'", ")", "feature_bars", "=", "[", "]", "for", "asset", ",", "data", "in", "asset_data", ":", "data", "[", "\"feature\"", "]", "=", "0", "feature_bars", ".", "append", "(", "(", "asset", ",", "data", ")", ")", "if", "save_path", "is", "not", "None", ":", "if", "verbose", ":", "print", "(", "\"Saving {0}...\"", ".", "format", "(", "asset", ")", ")", "save_location", "=", "save_path", ".", "replace", "(", "\"{ASSET}\"", ",", "asset", ")", "data", ".", "to_csv", "(", "save_location", ")", "if", "len", "(", "feature_bars", ")", "==", "1", ":", "return", "feature_bars", "[", "0", "]", "else", ":", "return", "feature_bars" ]
Can add in more features to the data if already processed, saves calculating all features again
[ "Can", "add", "in", "more", "features", "to", "the", "data", "if", "already", "processed", "saves", "calculating", "all", "features", "again" ]
[ "\"\"\" Can add in more features to the data if already processed, saves \n calculating all features again\n \n Args:\n asset_data ((str, pd.DataFrame) or (str, pd.DataFrame)[]): a single \n or list of tuples (asset name, bar data) containing the bar data\n features (str[]): a string list of features to calculate\n save_path (str): Path to save the bar data with features. A \n placeholder {ASSET} that will be substituted with the asset name\n verbose (bool): True if progress printing to console is desired\n \n \n Returns:\n (str, pd.DataFrame): a single tuple of (asset name, dataframe) of the \n bar data if a single asset was passed or an array of (asset name, dataframes)\n if an array of assets was passed\n \n \"\"\"", "#if a single dataframe is passed just put it in a single item list", "#add a new feature - this line is temporary and can be change to ", "#whatever calcualting is required", "#if there was just one dataFrame just return a single dataframe, otherwise ", "#return the list of dataframes, one for each asset" ]
[ { "param": "asset_data", "type": null }, { "param": "features", "type": null }, { "param": "save_path", "type": null }, { "param": "verbose", "type": null } ]
{ "returns": [ { "docstring": null, "docstring_tokens": [ "None" ], "type": null } ], "raises": [], "params": [ { "identifier": "asset_data", "type": null, "docstring": "a single\nor list of tuples (asset name, bar data) containing the bar data", "docstring_tokens": [ "a", "single", "or", "list", "of", "tuples", "(", "asset", "name", "bar", "data", ")", "containing", "the", "bar", "data" ], "default": null, "is_optional": false }, { "identifier": "features", "type": null, "docstring": "a string list of features to calculate", "docstring_tokens": [ "a", "string", "list", "of", "features", "to", "calculate" ], "default": null, "is_optional": false }, { "identifier": "save_path", "type": null, "docstring": "Path to save the bar data with features. A\nplaceholder {ASSET} that will be substituted with the asset name", "docstring_tokens": [ "Path", "to", "save", "the", "bar", "data", "with", "features", ".", "A", "placeholder", "{", "ASSET", "}", "that", "will", "be", "substituted", "with", "the", "asset", "name" ], "default": null, "is_optional": false }, { "identifier": "verbose", "type": null, "docstring": "True if progress printing to console is desired", "docstring_tokens": [ "True", "if", "progress", "printing", "to", "console", "is", "desired" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def add_additional_features(asset_data, features, save_path=None, verbose=True): import pandas as pd if type(asset_data) == tuple: asset_data = [asset_data] elif type(asset_data) != list: raise ValueError('asset_data must be a (str, pandas.DataFrame) or a list of (str, pandas.Dataframe).') feature_bars = [] for asset, data in asset_data: data["feature"] = 0 feature_bars.append((asset, data)) if save_path is not None: if verbose: print("Saving {0}...".format(asset)) save_location = save_path.replace("{ASSET}", asset) data.to_csv(save_location) if len(feature_bars) == 1: return feature_bars[0] else: return feature_bars
435
539
34f3be64ca9c7626da8bd021c650d2a479fb9102
dsambugaro/TSP_genetic_algorithm
Bilbo.py
[ "MIT" ]
Python
alternativeCrossover
<not_specific>
def alternativeCrossover(parent_1, parent_2): ''' Returns a child created with an alternative crossover ''' child = list(parent_1) cut = random.randrange(0,len(parent_1)-1) for i in range(cut, len(child)): if parent_1[i] not in parent_2[cut:] and parent_2[i] not in child[:cut]: child[child.index(parent_2[i])] = child[i] child[i] = parent_2[i] return child
Returns a child created with an alternative crossover
Returns a child created with an alternative crossover
[ "Returns", "a", "child", "created", "with", "an", "alternative", "crossover" ]
def alternativeCrossover(parent_1, parent_2): child = list(parent_1) cut = random.randrange(0,len(parent_1)-1) for i in range(cut, len(child)): if parent_1[i] not in parent_2[cut:] and parent_2[i] not in child[:cut]: child[child.index(parent_2[i])] = child[i] child[i] = parent_2[i] return child
[ "def", "alternativeCrossover", "(", "parent_1", ",", "parent_2", ")", ":", "child", "=", "list", "(", "parent_1", ")", "cut", "=", "random", ".", "randrange", "(", "0", ",", "len", "(", "parent_1", ")", "-", "1", ")", "for", "i", "in", "range", "(", "cut", ",", "len", "(", "child", ")", ")", ":", "if", "parent_1", "[", "i", "]", "not", "in", "parent_2", "[", "cut", ":", "]", "and", "parent_2", "[", "i", "]", "not", "in", "child", "[", ":", "cut", "]", ":", "child", "[", "child", ".", "index", "(", "parent_2", "[", "i", "]", ")", "]", "=", "child", "[", "i", "]", "child", "[", "i", "]", "=", "parent_2", "[", "i", "]", "return", "child" ]
Returns a child created with an alternative crossover
[ "Returns", "a", "child", "created", "with", "an", "alternative", "crossover" ]
[ "'''\n Returns a child created with an alternative crossover\n '''" ]
[ { "param": "parent_1", "type": null }, { "param": "parent_2", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "parent_1", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "parent_2", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import random def alternativeCrossover(parent_1, parent_2): child = list(parent_1) cut = random.randrange(0,len(parent_1)-1) for i in range(cut, len(child)): if parent_1[i] not in parent_2[cut:] and parent_2[i] not in child[:cut]: child[child.index(parent_2[i])] = child[i] child[i] = parent_2[i] return child
436
364
e00685998ae336089328842d142228c5a311f3d3
jbn/vaquero
vaquero/util.py
[ "MIT" ]
Python
nth
<not_specific>
def nth(items, i): """ Get the nth item from an iterable. Warning: It consumes from a generator. :param items: an iterable :return: the first in the iterable """ for j, item in enumerate(items): if j == i: return item
Get the nth item from an iterable. Warning: It consumes from a generator. :param items: an iterable :return: the first in the iterable
Get the nth item from an iterable. Warning: It consumes from a generator.
[ "Get", "the", "nth", "item", "from", "an", "iterable", ".", "Warning", ":", "It", "consumes", "from", "a", "generator", "." ]
def nth(items, i): for j, item in enumerate(items): if j == i: return item
[ "def", "nth", "(", "items", ",", "i", ")", ":", "for", "j", ",", "item", "in", "enumerate", "(", "items", ")", ":", "if", "j", "==", "i", ":", "return", "item" ]
Get the nth item from an iterable.
[ "Get", "the", "nth", "item", "from", "an", "iterable", "." ]
[ "\"\"\"\n Get the nth item from an iterable.\n\n Warning: It consumes from a generator.\n\n :param items: an iterable\n :return: the first in the iterable\n \"\"\"" ]
[ { "param": "items", "type": null }, { "param": "i", "type": null } ]
{ "returns": [ { "docstring": "the first in the iterable", "docstring_tokens": [ "the", "first", "in", "the", "iterable" ], "type": null } ], "raises": [], "params": [ { "identifier": "items", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": null }, { "identifier": "i", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def nth(items, i): for j, item in enumerate(items): if j == i: return item
437
526
8fd3ee6e325022722b10964e06763d64cc2b34fe
blumt/GerryChain
gerrychain/graph/graph.py
[ "BSD-3-Clause" ]
Python
convert_geometries_to_geojson
null
def convert_geometries_to_geojson(data): """Convert geometry attributes in a NetworkX adjacency data object to GeoJSON, so that they can be serialized. Mutates the ``data`` object. Does nothing if no geometry attributes are found. :param data: an adjacency data object (returned by :func:`networkx.readwrite.json_graph.adjacency_data`) """ for node in data["nodes"]: for key in node: # having a ``__geo_interface__``` property identifies the object # as being a ``shapely`` geometry object if hasattr(node[key], "__geo_interface__"): # The ``__geo_interface__`` property is essentially GeoJSON. # This is what :func:`geopandas.GeoSeries.to_json` uses under # the hood. node[key] = node[key].__geo_interface__
Convert geometry attributes in a NetworkX adjacency data object to GeoJSON, so that they can be serialized. Mutates the ``data`` object. Does nothing if no geometry attributes are found. :param data: an adjacency data object (returned by :func:`networkx.readwrite.json_graph.adjacency_data`)
Convert geometry attributes in a NetworkX adjacency data object to GeoJSON, so that they can be serialized. Mutates the ``data`` object. Does nothing if no geometry attributes are found.
[ "Convert", "geometry", "attributes", "in", "a", "NetworkX", "adjacency", "data", "object", "to", "GeoJSON", "so", "that", "they", "can", "be", "serialized", ".", "Mutates", "the", "`", "`", "data", "`", "`", "object", ".", "Does", "nothing", "if", "no", "geometry", "attributes", "are", "found", "." ]
def convert_geometries_to_geojson(data): for node in data["nodes"]: for key in node: if hasattr(node[key], "__geo_interface__"): node[key] = node[key].__geo_interface__
[ "def", "convert_geometries_to_geojson", "(", "data", ")", ":", "for", "node", "in", "data", "[", "\"nodes\"", "]", ":", "for", "key", "in", "node", ":", "if", "hasattr", "(", "node", "[", "key", "]", ",", "\"__geo_interface__\"", ")", ":", "node", "[", "key", "]", "=", "node", "[", "key", "]", ".", "__geo_interface__" ]
Convert geometry attributes in a NetworkX adjacency data object to GeoJSON, so that they can be serialized.
[ "Convert", "geometry", "attributes", "in", "a", "NetworkX", "adjacency", "data", "object", "to", "GeoJSON", "so", "that", "they", "can", "be", "serialized", "." ]
[ "\"\"\"Convert geometry attributes in a NetworkX adjacency data object\n to GeoJSON, so that they can be serialized. Mutates the ``data`` object.\n\n Does nothing if no geometry attributes are found.\n\n :param data: an adjacency data object (returned by\n :func:`networkx.readwrite.json_graph.adjacency_data`)\n \"\"\"", "# having a ``__geo_interface__``` property identifies the object", "# as being a ``shapely`` geometry object", "# The ``__geo_interface__`` property is essentially GeoJSON.", "# This is what :func:`geopandas.GeoSeries.to_json` uses under", "# the hood." ]
[ { "param": "data", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "data", "type": null, "docstring": "an adjacency data object (returned by\n:func:`networkx.readwrite.json_graph.adjacency_data`)", "docstring_tokens": [ "an", "adjacency", "data", "object", "(", "returned", "by", ":", "func", ":", "`", "networkx", ".", "readwrite", ".", "json_graph", ".", "adjacency_data", "`", ")" ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def convert_geometries_to_geojson(data): for node in data["nodes"]: for key in node: if hasattr(node[key], "__geo_interface__"): node[key] = node[key].__geo_interface__
438
143
b1e83a1ec6fc57ffe4228cf1246a9a1dda9b2f1f
purplediane/shortbread
tests/test_shortbread.py
[ "MIT" ]
Python
check_four_doz_plain
null
def check_four_doz_plain(result): """Helper function for output of four dozen cookie recipe.""" assert 'Shortbread Cookies - makes approx. 4 doz.' in result.output assert '4 cups flour' in result.output assert '1 cup sugar' in result.output assert '2 cups salted butter' in result.output assert '1 teaspoon vanilla (optional)' in result.output
Helper function for output of four dozen cookie recipe.
Helper function for output of four dozen cookie recipe.
[ "Helper", "function", "for", "output", "of", "four", "dozen", "cookie", "recipe", "." ]
def check_four_doz_plain(result): assert 'Shortbread Cookies - makes approx. 4 doz.' in result.output assert '4 cups flour' in result.output assert '1 cup sugar' in result.output assert '2 cups salted butter' in result.output assert '1 teaspoon vanilla (optional)' in result.output
[ "def", "check_four_doz_plain", "(", "result", ")", ":", "assert", "'Shortbread Cookies - makes approx. 4 doz.'", "in", "result", ".", "output", "assert", "'4 cups flour'", "in", "result", ".", "output", "assert", "'1 cup sugar'", "in", "result", ".", "output", "assert", "'2 cups salted butter'", "in", "result", ".", "output", "assert", "'1 teaspoon vanilla (optional)'", "in", "result", ".", "output" ]
Helper function for output of four dozen cookie recipe.
[ "Helper", "function", "for", "output", "of", "four", "dozen", "cookie", "recipe", "." ]
[ "\"\"\"Helper function for output of four dozen cookie recipe.\"\"\"" ]
[ { "param": "result", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "result", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def check_four_doz_plain(result): assert 'Shortbread Cookies - makes approx. 4 doz.' in result.output assert '4 cups flour' in result.output assert '1 cup sugar' in result.output assert '2 cups salted butter' in result.output assert '1 teaspoon vanilla (optional)' in result.output
439
724
d1e6681cd5231d3a2b5e2a239a8f8e87280593d3
Sephta/time-series-modeling
transformation_tree/tree.py
[ "MIT" ]
Python
load_tree
<not_specific>
def load_tree(file: str): """Loads tree as python object from specified file path""" with open(file, 'rb') as handle: tree = pickle.load(handle) return tree
Loads tree as python object from specified file path
Loads tree as python object from specified file path
[ "Loads", "tree", "as", "python", "object", "from", "specified", "file", "path" ]
def load_tree(file: str): with open(file, 'rb') as handle: tree = pickle.load(handle) return tree
[ "def", "load_tree", "(", "file", ":", "str", ")", ":", "with", "open", "(", "file", ",", "'rb'", ")", "as", "handle", ":", "tree", "=", "pickle", ".", "load", "(", "handle", ")", "return", "tree" ]
Loads tree as python object from specified file path
[ "Loads", "tree", "as", "python", "object", "from", "specified", "file", "path" ]
[ "\"\"\"Loads tree as python object from specified file path\"\"\"" ]
[ { "param": "file", "type": "str" } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "file", "type": "str", "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import pickle def load_tree(file: str): with open(file, 'rb') as handle: tree = pickle.load(handle) return tree
440
515
35bd00dff5214f7993e5de8418e39979b321422a
PointKernel/TiMemory
scripts/type-generator/generate-native-extern.py
[ "MIT" ]
Python
generate_extern
<not_specific>
def generate_extern(component, key, alias, suffix, specify_comp=True): """ This function generates a type trait label for C++ """ if specify_comp: return "TIMEMORY_{}_EXTERN_{}({}, {})".format( key.upper(), suffix.upper(), alias, "::tim::component::{}".format(component)) else: return "TIMEMORY_{}_EXTERN_{}({})".format( key.upper(), suffix.upper(), alias)
This function generates a type trait label for C++
This function generates a type trait label for C
[ "This", "function", "generates", "a", "type", "trait", "label", "for", "C" ]
def generate_extern(component, key, alias, suffix, specify_comp=True): if specify_comp: return "TIMEMORY_{}_EXTERN_{}({}, {})".format( key.upper(), suffix.upper(), alias, "::tim::component::{}".format(component)) else: return "TIMEMORY_{}_EXTERN_{}({})".format( key.upper(), suffix.upper(), alias)
[ "def", "generate_extern", "(", "component", ",", "key", ",", "alias", ",", "suffix", ",", "specify_comp", "=", "True", ")", ":", "if", "specify_comp", ":", "return", "\"TIMEMORY_{}_EXTERN_{}({}, {})\"", ".", "format", "(", "key", ".", "upper", "(", ")", ",", "suffix", ".", "upper", "(", ")", ",", "alias", ",", "\"::tim::component::{}\"", ".", "format", "(", "component", ")", ")", "else", ":", "return", "\"TIMEMORY_{}_EXTERN_{}({})\"", ".", "format", "(", "key", ".", "upper", "(", ")", ",", "suffix", ".", "upper", "(", ")", ",", "alias", ")" ]
This function generates a type trait label for C++
[ "This", "function", "generates", "a", "type", "trait", "label", "for", "C", "++" ]
[ "\"\"\"\n This function generates a type trait label for C++\n \"\"\"" ]
[ { "param": "component", "type": null }, { "param": "key", "type": null }, { "param": "alias", "type": null }, { "param": "suffix", "type": null }, { "param": "specify_comp", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "component", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "key", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "alias", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "suffix", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "specify_comp", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def generate_extern(component, key, alias, suffix, specify_comp=True): if specify_comp: return "TIMEMORY_{}_EXTERN_{}({}, {})".format( key.upper(), suffix.upper(), alias, "::tim::component::{}".format(component)) else: return "TIMEMORY_{}_EXTERN_{}({})".format( key.upper(), suffix.upper(), alias)
441
228
bea0dbc56a41b696f25dbcad67a27f8b96a4cd67
biocad/leadoptmap
contribute/graph.py
[ "BSD-3-Clause" ]
Python
cutoff_graph
<not_specific>
def cutoff_graph( g, simi_cutoff ) : """ Generates a new graph by cutting off the similarity scores. @type g: C{networkx.Graph} @param g: Original graph @type simi_cutoff: C{float} @param simi_cutoff: Similarity cutoff @return: A new graph """ g = copy.deepcopy( g ) edges_to_be_deleted = [] for e in g.edges() : if (g[e[0]][e[1]]["similarity"] < simi_cutoff) : edges_to_be_deleted.append( e ) g.remove_edges_from( edges_to_be_deleted ) return g
Generates a new graph by cutting off the similarity scores. @type g: C{networkx.Graph} @param g: Original graph @type simi_cutoff: C{float} @param simi_cutoff: Similarity cutoff @return: A new graph
Generates a new graph by cutting off the similarity scores.
[ "Generates", "a", "new", "graph", "by", "cutting", "off", "the", "similarity", "scores", "." ]
def cutoff_graph( g, simi_cutoff ) : g = copy.deepcopy( g ) edges_to_be_deleted = [] for e in g.edges() : if (g[e[0]][e[1]]["similarity"] < simi_cutoff) : edges_to_be_deleted.append( e ) g.remove_edges_from( edges_to_be_deleted ) return g
[ "def", "cutoff_graph", "(", "g", ",", "simi_cutoff", ")", ":", "g", "=", "copy", ".", "deepcopy", "(", "g", ")", "edges_to_be_deleted", "=", "[", "]", "for", "e", "in", "g", ".", "edges", "(", ")", ":", "if", "(", "g", "[", "e", "[", "0", "]", "]", "[", "e", "[", "1", "]", "]", "[", "\"similarity\"", "]", "<", "simi_cutoff", ")", ":", "edges_to_be_deleted", ".", "append", "(", "e", ")", "g", ".", "remove_edges_from", "(", "edges_to_be_deleted", ")", "return", "g" ]
Generates a new graph by cutting off the similarity scores.
[ "Generates", "a", "new", "graph", "by", "cutting", "off", "the", "similarity", "scores", "." ]
[ "\"\"\"\n Generates a new graph by cutting off the similarity scores.\n\n @type g: C{networkx.Graph}\n @param g: Original graph\n @type simi_cutoff: C{float}\n @param simi_cutoff: Similarity cutoff\n \n @return: A new graph\n \"\"\"" ]
[ { "param": "g", "type": null }, { "param": "simi_cutoff", "type": null } ]
{ "returns": [ { "docstring": "A new graph", "docstring_tokens": [ "A", "new", "graph" ], "type": null } ], "raises": [], "params": [ { "identifier": "g", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false }, { "identifier": "simi_cutoff", "type": null, "docstring": null, "docstring_tokens": [ "None" ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
import copy def cutoff_graph( g, simi_cutoff ) : g = copy.deepcopy( g ) edges_to_be_deleted = [] for e in g.edges() : if (g[e[0]][e[1]]["similarity"] < simi_cutoff) : edges_to_be_deleted.append( e ) g.remove_edges_from( edges_to_be_deleted ) return g
442
934
856e7f70af58fb3968d4c8a1f54500fdfc4f1731
stackdump/factom-hackathon
bitwrap_io/config.py
[ "MIT" ]
Python
options
<not_specific>
def options(opts=None): """ return options as dict from env vars """ if opts is None: opts = {} def _opt(optkey, key, default): if optkey not in opts: opts[optkey] = os.environ.get(key, default) _opt('pg-host', 'POSTGRES_HOST', '127.0.0.1') _opt('pg-username', 'POSTGRES_USER', 'bitwrap') _opt('pg-database', 'POSTGRES_DB', 'bitwrap') _opt('pg-password', 'POSTGRES_PASS', 'bitwrap') _opt('listen-port', 'LISTEN_PORT', '8080') _opt('listen-ip', 'LISTEN_IP', '0.0.0.0') return opts
return options as dict from env vars
return options as dict from env vars
[ "return", "options", "as", "dict", "from", "env", "vars" ]
def options(opts=None): if opts is None: opts = {} def _opt(optkey, key, default): if optkey not in opts: opts[optkey] = os.environ.get(key, default) _opt('pg-host', 'POSTGRES_HOST', '127.0.0.1') _opt('pg-username', 'POSTGRES_USER', 'bitwrap') _opt('pg-database', 'POSTGRES_DB', 'bitwrap') _opt('pg-password', 'POSTGRES_PASS', 'bitwrap') _opt('listen-port', 'LISTEN_PORT', '8080') _opt('listen-ip', 'LISTEN_IP', '0.0.0.0') return opts
[ "def", "options", "(", "opts", "=", "None", ")", ":", "if", "opts", "is", "None", ":", "opts", "=", "{", "}", "def", "_opt", "(", "optkey", ",", "key", ",", "default", ")", ":", "if", "optkey", "not", "in", "opts", ":", "opts", "[", "optkey", "]", "=", "os", ".", "environ", ".", "get", "(", "key", ",", "default", ")", "_opt", "(", "'pg-host'", ",", "'POSTGRES_HOST'", ",", "'127.0.0.1'", ")", "_opt", "(", "'pg-username'", ",", "'POSTGRES_USER'", ",", "'bitwrap'", ")", "_opt", "(", "'pg-database'", ",", "'POSTGRES_DB'", ",", "'bitwrap'", ")", "_opt", "(", "'pg-password'", ",", "'POSTGRES_PASS'", ",", "'bitwrap'", ")", "_opt", "(", "'listen-port'", ",", "'LISTEN_PORT'", ",", "'8080'", ")", "_opt", "(", "'listen-ip'", ",", "'LISTEN_IP'", ",", "'0.0.0.0'", ")", "return", "opts" ]
return options as dict from env vars
[ "return", "options", "as", "dict", "from", "env", "vars" ]
[ "\"\"\" return options as dict from env vars \"\"\"" ]
[ { "param": "opts", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "opts", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import os def options(opts=None): if opts is None: opts = {} def _opt(optkey, key, default): if optkey not in opts: opts[optkey] = os.environ.get(key, default) _opt('pg-host', 'POSTGRES_HOST', '127.0.0.1') _opt('pg-username', 'POSTGRES_USER', 'bitwrap') _opt('pg-database', 'POSTGRES_DB', 'bitwrap') _opt('pg-password', 'POSTGRES_PASS', 'bitwrap') _opt('listen-port', 'LISTEN_PORT', '8080') _opt('listen-ip', 'LISTEN_IP', '0.0.0.0') return opts
443
679
0f6ed79790a112be9c4f21a9605d914a0fa8aafd
InnovativeInventor/retworkx
retworkx/__init__.py
[ "Apache-2.0" ]
Python
is_subgraph_isomorphic
null
def is_subgraph_isomorphic( first, second, node_matcher=None, edge_matcher=None, id_order=False, induced=True, call_limit=None, ): """Determine if 2 graphs are subgraph isomorphic This checks if 2 graphs are subgraph isomorphic both structurally and also comparing the node and edge data using the provided matcher functions. The matcher functions take in 2 data objects and will compare them. Since there is an ambiguity in the term 'subgraph', do note that we check for an node-induced subgraph if argument `induced` is set to `True`. If it is set to `False`, we check for a non induced subgraph, meaning the second graph can have fewer edges than the subgraph of the first. By default it's `True`. A simple example that checks if they're just equal would be:: graph_a = retworkx.PyGraph() graph_b = retworkx.PyGraph() retworkx.is_subgraph_isomorphic(graph_a, graph_b, lambda x, y: x == y) :param first: The first graph to compare. Can either be a :class:`~retworkx.PyGraph` or :class:`~retworkx.PyDiGraph`. :param second: The second graph to compare. Can either be a :class:`~retworkx.PyGraph` or :class:`~retworkx.PyDiGraph`. It should be the same type as the first graph. :param callable node_matcher: A python callable object that takes 2 positional one for each node data object. If the return of this function evaluates to True then the nodes passed to it are viewed as matching. :param callable edge_matcher: A python callable object that takes 2 positional one for each edge data object. If the return of this function evaluates to True then the edges passed to it are viewed as matching. :param bool id_order: If set to ``True`` this function will match the nodes in order specified by their ids. Otherwise it will default to a heuristic matching order based on [VF2]_ paper. :param bool induced: If set to ``True`` this function will check the existence of a node-induced subgraph of first isomorphic to second graph. Default: ``True``. :param int call_limit: An optional bound on the number of states that VF2 algorithm visits while searching for a solution. If it exceeds this limit, the algorithm will stop and return ``False``. :returns: ``True`` if there is a subgraph of `first` isomorphic to `second` , ``False`` if there is not. :rtype: bool """ raise TypeError("Invalid Input Type %s for graph" % type(first))
Determine if 2 graphs are subgraph isomorphic This checks if 2 graphs are subgraph isomorphic both structurally and also comparing the node and edge data using the provided matcher functions. The matcher functions take in 2 data objects and will compare them. Since there is an ambiguity in the term 'subgraph', do note that we check for an node-induced subgraph if argument `induced` is set to `True`. If it is set to `False`, we check for a non induced subgraph, meaning the second graph can have fewer edges than the subgraph of the first. By default it's `True`. A simple example that checks if they're just equal would be:: graph_a = retworkx.PyGraph() graph_b = retworkx.PyGraph() retworkx.is_subgraph_isomorphic(graph_a, graph_b, lambda x, y: x == y) :param first: The first graph to compare. Can either be a :class:`~retworkx.PyGraph` or :class:`~retworkx.PyDiGraph`. :param second: The second graph to compare. Can either be a :class:`~retworkx.PyGraph` or :class:`~retworkx.PyDiGraph`. It should be the same type as the first graph. :param callable node_matcher: A python callable object that takes 2 positional one for each node data object. If the return of this function evaluates to True then the nodes passed to it are viewed as matching. :param callable edge_matcher: A python callable object that takes 2 positional one for each edge data object. If the return of this function evaluates to True then the edges passed to it are viewed as matching. :param bool id_order: If set to ``True`` this function will match the nodes in order specified by their ids. Otherwise it will default to a heuristic matching order based on [VF2]_ paper. :param bool induced: If set to ``True`` this function will check the existence of a node-induced subgraph of first isomorphic to second graph. Default: ``True``. :param int call_limit: An optional bound on the number of states that VF2 algorithm visits while searching for a solution. If it exceeds this limit, the algorithm will stop and return ``False``. :returns: ``True`` if there is a subgraph of `first` isomorphic to `second` , ``False`` if there is not. :rtype: bool
Determine if 2 graphs are subgraph isomorphic This checks if 2 graphs are subgraph isomorphic both structurally and also comparing the node and edge data using the provided matcher functions. The matcher functions take in 2 data objects and will compare them. Since there is an ambiguity in the term 'subgraph', do note that we check for an node-induced subgraph if argument `induced` is set to `True`. If it is set to `False`, we check for a non induced subgraph, meaning the second graph can have fewer edges than the subgraph of the first. By default it's `True`. A simple example that checks if they're just equal would be:.
[ "Determine", "if", "2", "graphs", "are", "subgraph", "isomorphic", "This", "checks", "if", "2", "graphs", "are", "subgraph", "isomorphic", "both", "structurally", "and", "also", "comparing", "the", "node", "and", "edge", "data", "using", "the", "provided", "matcher", "functions", ".", "The", "matcher", "functions", "take", "in", "2", "data", "objects", "and", "will", "compare", "them", ".", "Since", "there", "is", "an", "ambiguity", "in", "the", "term", "'", "subgraph", "'", "do", "note", "that", "we", "check", "for", "an", "node", "-", "induced", "subgraph", "if", "argument", "`", "induced", "`", "is", "set", "to", "`", "True", "`", ".", "If", "it", "is", "set", "to", "`", "False", "`", "we", "check", "for", "a", "non", "induced", "subgraph", "meaning", "the", "second", "graph", "can", "have", "fewer", "edges", "than", "the", "subgraph", "of", "the", "first", ".", "By", "default", "it", "'", "s", "`", "True", "`", ".", "A", "simple", "example", "that", "checks", "if", "they", "'", "re", "just", "equal", "would", "be", ":", "." ]
def is_subgraph_isomorphic( first, second, node_matcher=None, edge_matcher=None, id_order=False, induced=True, call_limit=None, ): raise TypeError("Invalid Input Type %s for graph" % type(first))
[ "def", "is_subgraph_isomorphic", "(", "first", ",", "second", ",", "node_matcher", "=", "None", ",", "edge_matcher", "=", "None", ",", "id_order", "=", "False", ",", "induced", "=", "True", ",", "call_limit", "=", "None", ",", ")", ":", "raise", "TypeError", "(", "\"Invalid Input Type %s for graph\"", "%", "type", "(", "first", ")", ")" ]
Determine if 2 graphs are subgraph isomorphic This checks if 2 graphs are subgraph isomorphic both structurally and also comparing the node and edge data using the provided matcher functions.
[ "Determine", "if", "2", "graphs", "are", "subgraph", "isomorphic", "This", "checks", "if", "2", "graphs", "are", "subgraph", "isomorphic", "both", "structurally", "and", "also", "comparing", "the", "node", "and", "edge", "data", "using", "the", "provided", "matcher", "functions", "." ]
[ "\"\"\"Determine if 2 graphs are subgraph isomorphic\n\n This checks if 2 graphs are subgraph isomorphic both structurally and also\n comparing the node and edge data using the provided matcher functions.\n The matcher functions take in 2 data objects and will compare them.\n Since there is an ambiguity in the term 'subgraph', do note that we check\n for an node-induced subgraph if argument `induced` is set to `True`. If it is\n set to `False`, we check for a non induced subgraph, meaning the second graph\n can have fewer edges than the subgraph of the first. By default it's `True`. A\n simple example that checks if they're just equal would be::\n\n graph_a = retworkx.PyGraph()\n graph_b = retworkx.PyGraph()\n retworkx.is_subgraph_isomorphic(graph_a, graph_b,\n lambda x, y: x == y)\n\n\n :param first: The first graph to compare. Can either be a\n :class:`~retworkx.PyGraph` or :class:`~retworkx.PyDiGraph`.\n :param second: The second graph to compare. Can either be a\n :class:`~retworkx.PyGraph` or :class:`~retworkx.PyDiGraph`.\n It should be the same type as the first graph.\n :param callable node_matcher: A python callable object that takes 2\n positional one for each node data object. If the return of this\n function evaluates to True then the nodes passed to it are viewed\n as matching.\n :param callable edge_matcher: A python callable object that takes 2\n positional one for each edge data object. If the return of this\n function evaluates to True then the edges passed to it are viewed\n as matching.\n :param bool id_order: If set to ``True`` this function will match the nodes\n in order specified by their ids. Otherwise it will default to a heuristic\n matching order based on [VF2]_ paper.\n :param bool induced: If set to ``True`` this function will check the existence\n of a node-induced subgraph of first isomorphic to second graph.\n Default: ``True``.\n :param int call_limit: An optional bound on the number of states that VF2\n algorithm visits while searching for a solution. If it exceeds this limit,\n the algorithm will stop and return ``False``.\n\n :returns: ``True`` if there is a subgraph of `first` isomorphic to `second`\n , ``False`` if there is not.\n :rtype: bool\n \"\"\"" ]
[ { "param": "first", "type": null }, { "param": "second", "type": null }, { "param": "node_matcher", "type": null }, { "param": "edge_matcher", "type": null }, { "param": "id_order", "type": null }, { "param": "induced", "type": null }, { "param": "call_limit", "type": null } ]
{ "returns": [ { "docstring": "``True`` if there is a subgraph of `first` isomorphic to `second`\n, ``False`` if there is not.", "docstring_tokens": [ "`", "`", "True", "`", "`", "if", "there", "is", "a", "subgraph", "of", "`", "first", "`", "isomorphic", "to", "`", "second", "`", "`", "`", "False", "`", "`", "if", "there", "is", "not", "." ], "type": "bool" } ], "raises": [], "params": [ { "identifier": "first", "type": null, "docstring": "The first graph to compare. Can either be a\n:class:`~retworkx.PyGraph` or :class:`~retworkx.PyDiGraph`.", "docstring_tokens": [ "The", "first", "graph", "to", "compare", ".", "Can", "either", "be", "a", ":", "class", ":", "`", "~retworkx", ".", "PyGraph", "`", "or", ":", "class", ":", "`", "~retworkx", ".", "PyDiGraph", "`", "." ], "default": null, "is_optional": null }, { "identifier": "second", "type": null, "docstring": "The second graph to compare. Can either be a\n:class:`~retworkx.PyGraph` or :class:`~retworkx.PyDiGraph`.\nIt should be the same type as the first graph.", "docstring_tokens": [ "The", "second", "graph", "to", "compare", ".", "Can", "either", "be", "a", ":", "class", ":", "`", "~retworkx", ".", "PyGraph", "`", "or", ":", "class", ":", "`", "~retworkx", ".", "PyDiGraph", "`", ".", "It", "should", "be", "the", "same", "type", "as", "the", "first", "graph", "." ], "default": null, "is_optional": null }, { "identifier": "node_matcher", "type": null, "docstring": "A python callable object that takes 2\npositional one for each node data object. If the return of this\nfunction evaluates to True then the nodes passed to it are viewed\nas matching.", "docstring_tokens": [ "A", "python", "callable", "object", "that", "takes", "2", "positional", "one", "for", "each", "node", "data", "object", ".", "If", "the", "return", "of", "this", "function", "evaluates", "to", "True", "then", "the", "nodes", "passed", "to", "it", "are", "viewed", "as", "matching", "." ], "default": null, "is_optional": false }, { "identifier": "edge_matcher", "type": null, "docstring": "A python callable object that takes 2\npositional one for each edge data object. If the return of this\nfunction evaluates to True then the edges passed to it are viewed\nas matching.", "docstring_tokens": [ "A", "python", "callable", "object", "that", "takes", "2", "positional", "one", "for", "each", "edge", "data", "object", ".", "If", "the", "return", "of", "this", "function", "evaluates", "to", "True", "then", "the", "edges", "passed", "to", "it", "are", "viewed", "as", "matching", "." ], "default": null, "is_optional": false }, { "identifier": "id_order", "type": null, "docstring": "If set to ``True`` this function will match the nodes\nin order specified by their ids. Otherwise it will default to a heuristic\nmatching order based on [VF2]_ paper.", "docstring_tokens": [ "If", "set", "to", "`", "`", "True", "`", "`", "this", "function", "will", "match", "the", "nodes", "in", "order", "specified", "by", "their", "ids", ".", "Otherwise", "it", "will", "default", "to", "a", "heuristic", "matching", "order", "based", "on", "[", "VF2", "]", "_", "paper", "." ], "default": null, "is_optional": false }, { "identifier": "induced", "type": null, "docstring": "If set to ``True`` this function will check the existence\nof a node-induced subgraph of first isomorphic to second graph.", "docstring_tokens": [ "If", "set", "to", "`", "`", "True", "`", "`", "this", "function", "will", "check", "the", "existence", "of", "a", "node", "-", "induced", "subgraph", "of", "first", "isomorphic", "to", "second", "graph", "." ], "default": null, "is_optional": false }, { "identifier": "call_limit", "type": null, "docstring": "An optional bound on the number of states that VF2\nalgorithm visits while searching for a solution. If it exceeds this limit,\nthe algorithm will stop and return ``False``.", "docstring_tokens": [ "An", "optional", "bound", "on", "the", "number", "of", "states", "that", "VF2", "algorithm", "visits", "while", "searching", "for", "a", "solution", ".", "If", "it", "exceeds", "this", "limit", "the", "algorithm", "will", "stop", "and", "return", "`", "`", "False", "`", "`", "." ], "default": null, "is_optional": false } ], "outlier_params": [], "others": [] }
def is_subgraph_isomorphic( first, second, node_matcher=None, edge_matcher=None, id_order=False, induced=True, call_limit=None, ): raise TypeError("Invalid Input Type %s for graph" % type(first))
445
544
1ad35fc8373760c197d3bd8dba0fcb0a89fdd040
scuc/GoPro-Concat-Automation
gopro_concat.py
[ "MIT" ]
Python
print_intro
<not_specific>
def print_intro(): ''' Get the user input for starting values (Source, Output, Downcovert) and begin the concat process. ''' open_msg = f"\n\ ================================================================\n \ GoPro Automation Script, version 0.2 \n \ This script will take a collection of chaptered GoPro video files \n \ and stitch them together with the option to also downconvert them \n \ to a 10Mbit file. \n \ ================================================================\n" print(open_msg) source_path = str(input("Source path: ")) output_path = str(input("Output path: ")) down_convert = str(input("Downconvert the GoPro files to 10Mbit [Yes/No]: ")) while True: if down_convert.lower() == 'yes': down_convert = True break elif down_convert.lower() == 'no': down_convert = False break else: print(f"{down_convert} is not a valid choice.") down_convert = str(input("Please select Yes or No (Y/N): ")) continue return [source_path, output_path, down_convert]
Get the user input for starting values (Source, Output, Downcovert) and begin the concat process.
Get the user input for starting values (Source, Output, Downcovert) and begin the concat process.
[ "Get", "the", "user", "input", "for", "starting", "values", "(", "Source", "Output", "Downcovert", ")", "and", "begin", "the", "concat", "process", "." ]
def print_intro(): open_msg = f"\n\ ================================================================\n \ GoPro Automation Script, version 0.2 \n \ This script will take a collection of chaptered GoPro video files \n \ and stitch them together with the option to also downconvert them \n \ to a 10Mbit file. \n \ ================================================================\n" print(open_msg) source_path = str(input("Source path: ")) output_path = str(input("Output path: ")) down_convert = str(input("Downconvert the GoPro files to 10Mbit [Yes/No]: ")) while True: if down_convert.lower() == 'yes': down_convert = True break elif down_convert.lower() == 'no': down_convert = False break else: print(f"{down_convert} is not a valid choice.") down_convert = str(input("Please select Yes or No (Y/N): ")) continue return [source_path, output_path, down_convert]
[ "def", "print_intro", "(", ")", ":", "open_msg", "=", "f\"\\n\\\r\n ================================================================\\n \\\r\n GoPro Automation Script, version 0.2 \\n \\\r\n This script will take a collection of chaptered GoPro video files \\n \\\r\n and stitch them together with the option to also downconvert them \\n \\\r\n to a 10Mbit file. \\n \\\r\n ================================================================\\n\"", "print", "(", "open_msg", ")", "source_path", "=", "str", "(", "input", "(", "\"Source path: \"", ")", ")", "output_path", "=", "str", "(", "input", "(", "\"Output path: \"", ")", ")", "down_convert", "=", "str", "(", "input", "(", "\"Downconvert the GoPro files to 10Mbit [Yes/No]: \"", ")", ")", "while", "True", ":", "if", "down_convert", ".", "lower", "(", ")", "==", "'yes'", ":", "down_convert", "=", "True", "break", "elif", "down_convert", ".", "lower", "(", ")", "==", "'no'", ":", "down_convert", "=", "False", "break", "else", ":", "print", "(", "f\"{down_convert} is not a valid choice.\"", ")", "down_convert", "=", "str", "(", "input", "(", "\"Please select Yes or No (Y/N): \"", ")", ")", "continue", "return", "[", "source_path", ",", "output_path", ",", "down_convert", "]" ]
Get the user input for starting values (Source, Output, Downcovert) and begin the concat process.
[ "Get", "the", "user", "input", "for", "starting", "values", "(", "Source", "Output", "Downcovert", ")", "and", "begin", "the", "concat", "process", "." ]
[ "'''\r\n Get the user input for starting values (Source, Output, Downcovert)\r\n and begin the concat process.\r\n '''" ]
[]
{ "returns": [], "raises": [], "params": [], "outlier_params": [], "others": [] }
def print_intro(): open_msg = f"\n\ ================================================================\n \ GoPro Automation Script, version 0.2 \n \ This script will take a collection of chaptered GoPro video files \n \ and stitch them together with the option to also downconvert them \n \ to a 10Mbit file. \n \ ================================================================\n" print(open_msg) source_path = str(input("Source path: ")) output_path = str(input("Output path: ")) down_convert = str(input("Downconvert the GoPro files to 10Mbit [Yes/No]: ")) while True: if down_convert.lower() == 'yes': down_convert = True break elif down_convert.lower() == 'no': down_convert = False break else: print(f"{down_convert} is not a valid choice.") down_convert = str(input("Please select Yes or No (Y/N): ")) continue return [source_path, output_path, down_convert]
446
489
7bf3f7e197f0f0a4e6481eb4681c63649883ecdc
conan25216/pypi_mal
malware/dark-magic-0.1.2/dark_magic/__main__.py
[ "Unlicense" ]
Python
check_valid_lp
<not_specific>
def check_valid_lp(var_signs, A, directions, bvals, max_or_min, cvals, verbose): ''' Checks if an LP is valid. By valid we mean that the number constraints and variables match in all four files. ''' num_constraints, num_vars = A.shape if len(var_signs) != num_vars: return False, 'Number of variables in A and x file do not match.' if len(directions) != num_constraints or len(bvals) != num_constraints: return False, 'Number of constraints in A and b do not match.' if len(cvals) != num_vars: return False, 'Number of variables in A and c file do not match.' return True, ''
Checks if an LP is valid. By valid we mean that the number constraints and variables match in all four files.
Checks if an LP is valid. By valid we mean that the number constraints and variables match in all four files.
[ "Checks", "if", "an", "LP", "is", "valid", ".", "By", "valid", "we", "mean", "that", "the", "number", "constraints", "and", "variables", "match", "in", "all", "four", "files", "." ]
def check_valid_lp(var_signs, A, directions, bvals, max_or_min, cvals, verbose): num_constraints, num_vars = A.shape if len(var_signs) != num_vars: return False, 'Number of variables in A and x file do not match.' if len(directions) != num_constraints or len(bvals) != num_constraints: return False, 'Number of constraints in A and b do not match.' if len(cvals) != num_vars: return False, 'Number of variables in A and c file do not match.' return True, ''
[ "def", "check_valid_lp", "(", "var_signs", ",", "A", ",", "directions", ",", "bvals", ",", "max_or_min", ",", "cvals", ",", "verbose", ")", ":", "num_constraints", ",", "num_vars", "=", "A", ".", "shape", "if", "len", "(", "var_signs", ")", "!=", "num_vars", ":", "return", "False", ",", "'Number of variables in A and x file do not match.'", "if", "len", "(", "directions", ")", "!=", "num_constraints", "or", "len", "(", "bvals", ")", "!=", "num_constraints", ":", "return", "False", ",", "'Number of constraints in A and b do not match.'", "if", "len", "(", "cvals", ")", "!=", "num_vars", ":", "return", "False", ",", "'Number of variables in A and c file do not match.'", "return", "True", ",", "''" ]
Checks if an LP is valid.
[ "Checks", "if", "an", "LP", "is", "valid", "." ]
[ "'''\n Checks if an LP is valid. By valid we mean that the number constraints\n and variables match in all four files.\n '''" ]
[ { "param": "var_signs", "type": null }, { "param": "A", "type": null }, { "param": "directions", "type": null }, { "param": "bvals", "type": null }, { "param": "max_or_min", "type": null }, { "param": "cvals", "type": null }, { "param": "verbose", "type": null } ]
{ "returns": [], "raises": [], "params": [ { "identifier": "var_signs", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "A", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "directions", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "bvals", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "max_or_min", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "cvals", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null }, { "identifier": "verbose", "type": null, "docstring": null, "docstring_tokens": [], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
def check_valid_lp(var_signs, A, directions, bvals, max_or_min, cvals, verbose): num_constraints, num_vars = A.shape if len(var_signs) != num_vars: return False, 'Number of variables in A and x file do not match.' if len(directions) != num_constraints or len(bvals) != num_constraints: return False, 'Number of constraints in A and b do not match.' if len(cvals) != num_vars: return False, 'Number of variables in A and c file do not match.' return True, ''
447
744
53692e5e806f1fd4536ddd48dcc9d09a09c52ba5
amyhemmeter/baseline
python/baseline/utils.py
[ "Apache-2.0" ]
Python
convert_iobes_to_bio
<not_specific>
def convert_iobes_to_bio(seq): """Convert a sequence of IOBES tags to BIO tags :param seq: `List[str]` The list of IOBES tags. :returns: `List[str]` The list of BIO tags. """ # Use re over `.replace` to make sure it only acts on the beginning of the token. return list(map(lambda x: re.sub(r'^S-', 'B-', re.sub(r'^E-', 'I-', x)), seq))
Convert a sequence of IOBES tags to BIO tags :param seq: `List[str]` The list of IOBES tags. :returns: `List[str]` The list of BIO tags.
Convert a sequence of IOBES tags to BIO tags
[ "Convert", "a", "sequence", "of", "IOBES", "tags", "to", "BIO", "tags" ]
def convert_iobes_to_bio(seq): return list(map(lambda x: re.sub(r'^S-', 'B-', re.sub(r'^E-', 'I-', x)), seq))
[ "def", "convert_iobes_to_bio", "(", "seq", ")", ":", "return", "list", "(", "map", "(", "lambda", "x", ":", "re", ".", "sub", "(", "r'^S-'", ",", "'B-'", ",", "re", ".", "sub", "(", "r'^E-'", ",", "'I-'", ",", "x", ")", ")", ",", "seq", ")", ")" ]
Convert a sequence of IOBES tags to BIO tags
[ "Convert", "a", "sequence", "of", "IOBES", "tags", "to", "BIO", "tags" ]
[ "\"\"\"Convert a sequence of IOBES tags to BIO tags\n\n :param seq: `List[str]` The list of IOBES tags.\n\n :returns: `List[str]` The list of BIO tags.\n \"\"\"", "# Use re over `.replace` to make sure it only acts on the beginning of the token." ]
[ { "param": "seq", "type": null } ]
{ "returns": [ { "docstring": "`List[str]` The list of BIO tags.", "docstring_tokens": [ "`", "List", "[", "str", "]", "`", "The", "list", "of", "BIO", "tags", "." ], "type": null } ], "raises": [], "params": [ { "identifier": "seq", "type": null, "docstring": "`List[str]` The list of IOBES tags.", "docstring_tokens": [ "`", "List", "[", "str", "]", "`", "The", "list", "of", "IOBES", "tags", "." ], "default": null, "is_optional": null } ], "outlier_params": [], "others": [] }
import re def convert_iobes_to_bio(seq): return list(map(lambda x: re.sub(r'^S-', 'B-', re.sub(r'^E-', 'I-', x)), seq))
448
735