repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
BBVA/data-refinery
datarefinery/tuple/Formats.py
csv_to_map
def csv_to_map(fields, delimiter=','): """ Convert csv to dict :param delimiter: :param fields: :return: """ def _csv_to_list(csv_input): """ Util function to overcome the use of files by in-memory io buffer :param csv_input: :return: """ io_file = io.StringIO(csv_input) return next(csv.reader(io_file, delimiter=delimiter)) def _app(current_tuple, e=None): if current_tuple is None or len(current_tuple) == 0: return None, "no input" csv_list = _csv_to_list(current_tuple) if len(csv_list) != len(fields): e = {"input": "unexpected number of fields {} obtained {} expected".format(len(csv_list), len(fields))} return None, e return {k: v for (k, v) in zip(fields, csv_list)}, e if fields is None or len(fields) == 0: return fixed_input(None, "no fields provided, cannot proceed without order") return _app
python
def csv_to_map(fields, delimiter=','): """ Convert csv to dict :param delimiter: :param fields: :return: """ def _csv_to_list(csv_input): """ Util function to overcome the use of files by in-memory io buffer :param csv_input: :return: """ io_file = io.StringIO(csv_input) return next(csv.reader(io_file, delimiter=delimiter)) def _app(current_tuple, e=None): if current_tuple is None or len(current_tuple) == 0: return None, "no input" csv_list = _csv_to_list(current_tuple) if len(csv_list) != len(fields): e = {"input": "unexpected number of fields {} obtained {} expected".format(len(csv_list), len(fields))} return None, e return {k: v for (k, v) in zip(fields, csv_list)}, e if fields is None or len(fields) == 0: return fixed_input(None, "no fields provided, cannot proceed without order") return _app
[ "def", "csv_to_map", "(", "fields", ",", "delimiter", "=", "','", ")", ":", "def", "_csv_to_list", "(", "csv_input", ")", ":", "\"\"\"\n Util function to overcome the use of files by in-memory io buffer\n\n :param csv_input:\n :return:\n \"\"\"", "io_file", "=", "io", ".", "StringIO", "(", "csv_input", ")", "return", "next", "(", "csv", ".", "reader", "(", "io_file", ",", "delimiter", "=", "delimiter", ")", ")", "def", "_app", "(", "current_tuple", ",", "e", "=", "None", ")", ":", "if", "current_tuple", "is", "None", "or", "len", "(", "current_tuple", ")", "==", "0", ":", "return", "None", ",", "\"no input\"", "csv_list", "=", "_csv_to_list", "(", "current_tuple", ")", "if", "len", "(", "csv_list", ")", "!=", "len", "(", "fields", ")", ":", "e", "=", "{", "\"input\"", ":", "\"unexpected number of fields {} obtained {} expected\"", ".", "format", "(", "len", "(", "csv_list", ")", ",", "len", "(", "fields", ")", ")", "}", "return", "None", ",", "e", "return", "{", "k", ":", "v", "for", "(", "k", ",", "v", ")", "in", "zip", "(", "fields", ",", "csv_list", ")", "}", ",", "e", "if", "fields", "is", "None", "or", "len", "(", "fields", ")", "==", "0", ":", "return", "fixed_input", "(", "None", ",", "\"no fields provided, cannot proceed without order\"", ")", "return", "_app" ]
Convert csv to dict :param delimiter: :param fields: :return:
[ "Convert", "csv", "to", "dict" ]
train
https://github.com/BBVA/data-refinery/blob/4ff19186ac570269f64a245ad6297cf882c70aa4/datarefinery/tuple/Formats.py#L34-L63
BBVA/data-refinery
datarefinery/tuple/Formats.py
map_to_csv
def map_to_csv(fields, delimiter=","): """ Convert dict to csv :param fields: :return: """ def _list_to_csv(l): """ Util function to overcome the use of files by in-memory io buffer :param l: :return: """ io_file = io.StringIO() writer = csv.writer(io_file, quoting=csv.QUOTE_NONNUMERIC, lineterminator='', delimiter=delimiter) writer.writerow(l) return io_file.getvalue() def _app(current_tuple, e=None): if e is not None: return None, e csv_list = [] for f in fields: if f in current_tuple: csv_list.append(current_tuple[f]) else: e.update({"output": "expected field {} not found".format(f)}) return None, e return _list_to_csv(csv_list), e if fields is None or len(fields) == 0: return fixed_input(None, "no fields provided, cannot proceed without order") return _app
python
def map_to_csv(fields, delimiter=","): """ Convert dict to csv :param fields: :return: """ def _list_to_csv(l): """ Util function to overcome the use of files by in-memory io buffer :param l: :return: """ io_file = io.StringIO() writer = csv.writer(io_file, quoting=csv.QUOTE_NONNUMERIC, lineterminator='', delimiter=delimiter) writer.writerow(l) return io_file.getvalue() def _app(current_tuple, e=None): if e is not None: return None, e csv_list = [] for f in fields: if f in current_tuple: csv_list.append(current_tuple[f]) else: e.update({"output": "expected field {} not found".format(f)}) return None, e return _list_to_csv(csv_list), e if fields is None or len(fields) == 0: return fixed_input(None, "no fields provided, cannot proceed without order") return _app
[ "def", "map_to_csv", "(", "fields", ",", "delimiter", "=", "\",\"", ")", ":", "def", "_list_to_csv", "(", "l", ")", ":", "\"\"\"\n Util function to overcome the use of files by in-memory io buffer\n\n :param l:\n :return:\n \"\"\"", "io_file", "=", "io", ".", "StringIO", "(", ")", "writer", "=", "csv", ".", "writer", "(", "io_file", ",", "quoting", "=", "csv", ".", "QUOTE_NONNUMERIC", ",", "lineterminator", "=", "''", ",", "delimiter", "=", "delimiter", ")", "writer", ".", "writerow", "(", "l", ")", "return", "io_file", ".", "getvalue", "(", ")", "def", "_app", "(", "current_tuple", ",", "e", "=", "None", ")", ":", "if", "e", "is", "not", "None", ":", "return", "None", ",", "e", "csv_list", "=", "[", "]", "for", "f", "in", "fields", ":", "if", "f", "in", "current_tuple", ":", "csv_list", ".", "append", "(", "current_tuple", "[", "f", "]", ")", "else", ":", "e", ".", "update", "(", "{", "\"output\"", ":", "\"expected field {} not found\"", ".", "format", "(", "f", ")", "}", ")", "return", "None", ",", "e", "return", "_list_to_csv", "(", "csv_list", ")", ",", "e", "if", "fields", "is", "None", "or", "len", "(", "fields", ")", "==", "0", ":", "return", "fixed_input", "(", "None", ",", "\"no fields provided, cannot proceed without order\"", ")", "return", "_app" ]
Convert dict to csv :param fields: :return:
[ "Convert", "dict", "to", "csv" ]
train
https://github.com/BBVA/data-refinery/blob/4ff19186ac570269f64a245ad6297cf882c70aa4/datarefinery/tuple/Formats.py#L66-L99
deshima-dev/decode
decode/core/array/decorators.py
xarrayfunc
def xarrayfunc(func): """Make a function compatible with xarray.DataArray. This function is intended to be used as a decorator like:: >>> @dc.xarrayfunc >>> def func(array): ... # do something ... return newarray >>> >>> result = func(array) Args: func (function): Function to be wrapped. The first argument of the function must be an array to be processed. Returns: wrapper (function): Wrapped function. """ @wraps(func) def wrapper(*args, **kwargs): if any(isinstance(arg, xr.DataArray) for arg in args): newargs = [] for arg in args: if isinstance(arg, xr.DataArray): newargs.append(arg.values) else: newargs.append(arg) return dc.full_like(args[0], func(*newargs, **kwargs)) else: return func(*args, **kwargs) return wrapper
python
def xarrayfunc(func): """Make a function compatible with xarray.DataArray. This function is intended to be used as a decorator like:: >>> @dc.xarrayfunc >>> def func(array): ... # do something ... return newarray >>> >>> result = func(array) Args: func (function): Function to be wrapped. The first argument of the function must be an array to be processed. Returns: wrapper (function): Wrapped function. """ @wraps(func) def wrapper(*args, **kwargs): if any(isinstance(arg, xr.DataArray) for arg in args): newargs = [] for arg in args: if isinstance(arg, xr.DataArray): newargs.append(arg.values) else: newargs.append(arg) return dc.full_like(args[0], func(*newargs, **kwargs)) else: return func(*args, **kwargs) return wrapper
[ "def", "xarrayfunc", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "any", "(", "isinstance", "(", "arg", ",", "xr", ".", "DataArray", ")", "for", "arg", "in", "args", ")", ":", "newargs", "=", "[", "]", "for", "arg", "in", "args", ":", "if", "isinstance", "(", "arg", ",", "xr", ".", "DataArray", ")", ":", "newargs", ".", "append", "(", "arg", ".", "values", ")", "else", ":", "newargs", ".", "append", "(", "arg", ")", "return", "dc", ".", "full_like", "(", "args", "[", "0", "]", ",", "func", "(", "*", "newargs", ",", "*", "*", "kwargs", ")", ")", "else", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
Make a function compatible with xarray.DataArray. This function is intended to be used as a decorator like:: >>> @dc.xarrayfunc >>> def func(array): ... # do something ... return newarray >>> >>> result = func(array) Args: func (function): Function to be wrapped. The first argument of the function must be an array to be processed. Returns: wrapper (function): Wrapped function.
[ "Make", "a", "function", "compatible", "with", "xarray", ".", "DataArray", "." ]
train
https://github.com/deshima-dev/decode/blob/e789e174cd316e7ec8bc55be7009ad35baced3c0/decode/core/array/decorators.py#L30-L63
deshima-dev/decode
decode/core/array/decorators.py
chunk
def chunk(*argnames, concatfunc=None): """Make a function compatible with multicore chunk processing. This function is intended to be used as a decorator like:: >>> @dc.chunk('array') >>> def func(array): ... # do something ... return newarray >>> >>> result = func(array, timechunk=10) or you can set a global chunk parameter outside the function:: >>> timechunk = 10 >>> result = func(array) """ def _chunk(func): depth = [s.function for s in stack()].index('<module>') f_globals = getframe(depth).f_globals # original (unwrapped) function orgname = '_original_' + func.__name__ orgfunc = dc.utils.copy_function(func, orgname) f_globals[orgname] = orgfunc @wraps(func) def wrapper(*args, **kwargs): depth = [s.function for s in stack()].index('<module>') f_globals = getframe(depth).f_globals # parse args and kwargs params = signature(func).parameters for i, (key, val) in enumerate(params.items()): if not val.kind == Parameter.POSITIONAL_OR_KEYWORD: break try: kwargs.update({key: args[i]}) except IndexError: kwargs.setdefault(key, val.default) # n_chunks and n_processes n_chunks = DEFAULT_N_CHUNKS n_processes = MAX_WORKERS if argnames: length = len(kwargs[argnames[0]]) if 'numchunk' in kwargs: n_chunks = kwargs.pop('numchunk') elif 'timechunk' in kwargs: n_chunks = round(length / kwargs.pop('timechunk')) elif 'numchunk' in f_globals: n_chunks = f_globals['numchunk'] elif 'timechunk' in f_globals: n_chunks = round(length / f_globals['timechunk']) if 'n_processes' in kwargs: n_processes = kwargs.pop('n_processes') elif 'n_processes' in f_globals: n_processes = f_globals['n_processes'] # make chunked args chunks = {} for name in argnames: arg = kwargs.pop(name) try: chunks.update({name: np.array_split(arg, n_chunks)}) except TypeError: chunks.update({name: np.tile(arg, n_chunks)}) # run the function futures = [] results = [] with dc.utils.one_thread_per_process(), Pool(n_processes) as p: for i in range(n_chunks): chunk = {key: val[i] for key, val in chunks.items()} futures.append(p.submit(orgfunc, **{**chunk, **kwargs})) for future in futures: results.append(future.result()) # make an output if concatfunc is not None: return concatfunc(results) try: return xr.concat(results, 't') except TypeError: return np.concatenate(results, 0) return wrapper return _chunk
python
def chunk(*argnames, concatfunc=None): """Make a function compatible with multicore chunk processing. This function is intended to be used as a decorator like:: >>> @dc.chunk('array') >>> def func(array): ... # do something ... return newarray >>> >>> result = func(array, timechunk=10) or you can set a global chunk parameter outside the function:: >>> timechunk = 10 >>> result = func(array) """ def _chunk(func): depth = [s.function for s in stack()].index('<module>') f_globals = getframe(depth).f_globals # original (unwrapped) function orgname = '_original_' + func.__name__ orgfunc = dc.utils.copy_function(func, orgname) f_globals[orgname] = orgfunc @wraps(func) def wrapper(*args, **kwargs): depth = [s.function for s in stack()].index('<module>') f_globals = getframe(depth).f_globals # parse args and kwargs params = signature(func).parameters for i, (key, val) in enumerate(params.items()): if not val.kind == Parameter.POSITIONAL_OR_KEYWORD: break try: kwargs.update({key: args[i]}) except IndexError: kwargs.setdefault(key, val.default) # n_chunks and n_processes n_chunks = DEFAULT_N_CHUNKS n_processes = MAX_WORKERS if argnames: length = len(kwargs[argnames[0]]) if 'numchunk' in kwargs: n_chunks = kwargs.pop('numchunk') elif 'timechunk' in kwargs: n_chunks = round(length / kwargs.pop('timechunk')) elif 'numchunk' in f_globals: n_chunks = f_globals['numchunk'] elif 'timechunk' in f_globals: n_chunks = round(length / f_globals['timechunk']) if 'n_processes' in kwargs: n_processes = kwargs.pop('n_processes') elif 'n_processes' in f_globals: n_processes = f_globals['n_processes'] # make chunked args chunks = {} for name in argnames: arg = kwargs.pop(name) try: chunks.update({name: np.array_split(arg, n_chunks)}) except TypeError: chunks.update({name: np.tile(arg, n_chunks)}) # run the function futures = [] results = [] with dc.utils.one_thread_per_process(), Pool(n_processes) as p: for i in range(n_chunks): chunk = {key: val[i] for key, val in chunks.items()} futures.append(p.submit(orgfunc, **{**chunk, **kwargs})) for future in futures: results.append(future.result()) # make an output if concatfunc is not None: return concatfunc(results) try: return xr.concat(results, 't') except TypeError: return np.concatenate(results, 0) return wrapper return _chunk
[ "def", "chunk", "(", "*", "argnames", ",", "concatfunc", "=", "None", ")", ":", "def", "_chunk", "(", "func", ")", ":", "depth", "=", "[", "s", ".", "function", "for", "s", "in", "stack", "(", ")", "]", ".", "index", "(", "'<module>'", ")", "f_globals", "=", "getframe", "(", "depth", ")", ".", "f_globals", "# original (unwrapped) function", "orgname", "=", "'_original_'", "+", "func", ".", "__name__", "orgfunc", "=", "dc", ".", "utils", ".", "copy_function", "(", "func", ",", "orgname", ")", "f_globals", "[", "orgname", "]", "=", "orgfunc", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "depth", "=", "[", "s", ".", "function", "for", "s", "in", "stack", "(", ")", "]", ".", "index", "(", "'<module>'", ")", "f_globals", "=", "getframe", "(", "depth", ")", ".", "f_globals", "# parse args and kwargs", "params", "=", "signature", "(", "func", ")", ".", "parameters", "for", "i", ",", "(", "key", ",", "val", ")", "in", "enumerate", "(", "params", ".", "items", "(", ")", ")", ":", "if", "not", "val", ".", "kind", "==", "Parameter", ".", "POSITIONAL_OR_KEYWORD", ":", "break", "try", ":", "kwargs", ".", "update", "(", "{", "key", ":", "args", "[", "i", "]", "}", ")", "except", "IndexError", ":", "kwargs", ".", "setdefault", "(", "key", ",", "val", ".", "default", ")", "# n_chunks and n_processes", "n_chunks", "=", "DEFAULT_N_CHUNKS", "n_processes", "=", "MAX_WORKERS", "if", "argnames", ":", "length", "=", "len", "(", "kwargs", "[", "argnames", "[", "0", "]", "]", ")", "if", "'numchunk'", "in", "kwargs", ":", "n_chunks", "=", "kwargs", ".", "pop", "(", "'numchunk'", ")", "elif", "'timechunk'", "in", "kwargs", ":", "n_chunks", "=", "round", "(", "length", "/", "kwargs", ".", "pop", "(", "'timechunk'", ")", ")", "elif", "'numchunk'", "in", "f_globals", ":", "n_chunks", "=", "f_globals", "[", "'numchunk'", "]", "elif", "'timechunk'", "in", "f_globals", ":", "n_chunks", "=", "round", "(", "length", "/", "f_globals", "[", "'timechunk'", "]", ")", "if", "'n_processes'", "in", "kwargs", ":", "n_processes", "=", "kwargs", ".", "pop", "(", "'n_processes'", ")", "elif", "'n_processes'", "in", "f_globals", ":", "n_processes", "=", "f_globals", "[", "'n_processes'", "]", "# make chunked args", "chunks", "=", "{", "}", "for", "name", "in", "argnames", ":", "arg", "=", "kwargs", ".", "pop", "(", "name", ")", "try", ":", "chunks", ".", "update", "(", "{", "name", ":", "np", ".", "array_split", "(", "arg", ",", "n_chunks", ")", "}", ")", "except", "TypeError", ":", "chunks", ".", "update", "(", "{", "name", ":", "np", ".", "tile", "(", "arg", ",", "n_chunks", ")", "}", ")", "# run the function", "futures", "=", "[", "]", "results", "=", "[", "]", "with", "dc", ".", "utils", ".", "one_thread_per_process", "(", ")", ",", "Pool", "(", "n_processes", ")", "as", "p", ":", "for", "i", "in", "range", "(", "n_chunks", ")", ":", "chunk", "=", "{", "key", ":", "val", "[", "i", "]", "for", "key", ",", "val", "in", "chunks", ".", "items", "(", ")", "}", "futures", ".", "append", "(", "p", ".", "submit", "(", "orgfunc", ",", "*", "*", "{", "*", "*", "chunk", ",", "*", "*", "kwargs", "}", ")", ")", "for", "future", "in", "futures", ":", "results", ".", "append", "(", "future", ".", "result", "(", ")", ")", "# make an output", "if", "concatfunc", "is", "not", "None", ":", "return", "concatfunc", "(", "results", ")", "try", ":", "return", "xr", ".", "concat", "(", "results", ",", "'t'", ")", "except", "TypeError", ":", "return", "np", ".", "concatenate", "(", "results", ",", "0", ")", "return", "wrapper", "return", "_chunk" ]
Make a function compatible with multicore chunk processing. This function is intended to be used as a decorator like:: >>> @dc.chunk('array') >>> def func(array): ... # do something ... return newarray >>> >>> result = func(array, timechunk=10) or you can set a global chunk parameter outside the function:: >>> timechunk = 10 >>> result = func(array)
[ "Make", "a", "function", "compatible", "with", "multicore", "chunk", "processing", "." ]
train
https://github.com/deshima-dev/decode/blob/e789e174cd316e7ec8bc55be7009ad35baced3c0/decode/core/array/decorators.py#L66-L160
deshima-dev/decode
decode/core/__init__.py
BaseAccessor.scalarcoords
def scalarcoords(self): """A dictionary of values that don't label any axes (point-like).""" return {k: v.values for k, v in self.coords.items() if v.dims==()}
python
def scalarcoords(self): """A dictionary of values that don't label any axes (point-like).""" return {k: v.values for k, v in self.coords.items() if v.dims==()}
[ "def", "scalarcoords", "(", "self", ")", ":", "return", "{", "k", ":", "v", ".", "values", "for", "k", ",", "v", "in", "self", ".", "coords", ".", "items", "(", ")", "if", "v", ".", "dims", "==", "(", ")", "}" ]
A dictionary of values that don't label any axes (point-like).
[ "A", "dictionary", "of", "values", "that", "don", "t", "label", "any", "axes", "(", "point", "-", "like", ")", "." ]
train
https://github.com/deshima-dev/decode/blob/e789e174cd316e7ec8bc55be7009ad35baced3c0/decode/core/__init__.py#L22-L24
facelessuser/pyspelling
pyspelling/filters/markdown.py
MarkdownFilter.setup
def setup(self): """Setup.""" extensions = [] extension_configs = {} for item in self.config['markdown_extensions']: if isinstance(item, str): extensions.append(item) else: k, v = list(item.items())[0] extensions.append(k) if v is not None: extension_configs[k] = v self.markdown = markdown.Markdown(extensions=extensions, extension_configs=extension_configs)
python
def setup(self): """Setup.""" extensions = [] extension_configs = {} for item in self.config['markdown_extensions']: if isinstance(item, str): extensions.append(item) else: k, v = list(item.items())[0] extensions.append(k) if v is not None: extension_configs[k] = v self.markdown = markdown.Markdown(extensions=extensions, extension_configs=extension_configs)
[ "def", "setup", "(", "self", ")", ":", "extensions", "=", "[", "]", "extension_configs", "=", "{", "}", "for", "item", "in", "self", ".", "config", "[", "'markdown_extensions'", "]", ":", "if", "isinstance", "(", "item", ",", "str", ")", ":", "extensions", ".", "append", "(", "item", ")", "else", ":", "k", ",", "v", "=", "list", "(", "item", ".", "items", "(", ")", ")", "[", "0", "]", "extensions", ".", "append", "(", "k", ")", "if", "v", "is", "not", "None", ":", "extension_configs", "[", "k", "]", "=", "v", "self", ".", "markdown", "=", "markdown", ".", "Markdown", "(", "extensions", "=", "extensions", ",", "extension_configs", "=", "extension_configs", ")" ]
Setup.
[ "Setup", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/markdown.py#L23-L36
facelessuser/pyspelling
pyspelling/filters/markdown.py
MarkdownFilter._filter
def _filter(self, text): """Filter markdown.""" self.markdown.reset() return self.markdown.convert(text)
python
def _filter(self, text): """Filter markdown.""" self.markdown.reset() return self.markdown.convert(text)
[ "def", "_filter", "(", "self", ",", "text", ")", ":", "self", ".", "markdown", ".", "reset", "(", ")", "return", "self", ".", "markdown", ".", "convert", "(", "text", ")" ]
Filter markdown.
[ "Filter", "markdown", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/markdown.py#L45-L49
runfalk/spans
benchmark.py
format_sec
def format_sec(s): """ Format seconds in a more human readable way. It supports units down to nanoseconds. :param s: Float of seconds to format :return: String second representation, like 12.4 us """ prefixes = ["", "m", "u", "n"] unit = 0 while s < 1 and unit + 1 < len(prefixes): s *= 1000 unit += 1 return "{:.1f} {}s".format(s, prefixes[unit])
python
def format_sec(s): """ Format seconds in a more human readable way. It supports units down to nanoseconds. :param s: Float of seconds to format :return: String second representation, like 12.4 us """ prefixes = ["", "m", "u", "n"] unit = 0 while s < 1 and unit + 1 < len(prefixes): s *= 1000 unit += 1 return "{:.1f} {}s".format(s, prefixes[unit])
[ "def", "format_sec", "(", "s", ")", ":", "prefixes", "=", "[", "\"\"", ",", "\"m\"", ",", "\"u\"", ",", "\"n\"", "]", "unit", "=", "0", "while", "s", "<", "1", "and", "unit", "+", "1", "<", "len", "(", "prefixes", ")", ":", "s", "*=", "1000", "unit", "+=", "1", "return", "\"{:.1f} {}s\"", ".", "format", "(", "s", ",", "prefixes", "[", "unit", "]", ")" ]
Format seconds in a more human readable way. It supports units down to nanoseconds. :param s: Float of seconds to format :return: String second representation, like 12.4 us
[ "Format", "seconds", "in", "a", "more", "human", "readable", "way", ".", "It", "supports", "units", "down", "to", "nanoseconds", "." ]
train
https://github.com/runfalk/spans/blob/59ed73407a569c3be86cfdb4b8f438cb8c794540/benchmark.py#L6-L22
racker/rackspace-monitoring
rackspace_monitoring/drivers/rackspace.py
RackspaceMonitoringDriver.create_entity
def create_entity(self, **kwargs): """ kwargs expected: 'who': kwargs.get('who') 'why': kwargs.get('why') 'uri': kwargs.get('uri') 'ip_addresses': kwargs.get('ip_addresses', {}) 'label': kwargs.get('label') 'agent_id': kwargs.get('agent_id') 'metadata': kwargs.get('extra', {}) """ data = kwargs headers = kwargs.get('headers', {}) if 'headers' in data: del data['headers'] return self._create("/entities", data=data, coerce=self.get_entity, headers=headers)
python
def create_entity(self, **kwargs): """ kwargs expected: 'who': kwargs.get('who') 'why': kwargs.get('why') 'uri': kwargs.get('uri') 'ip_addresses': kwargs.get('ip_addresses', {}) 'label': kwargs.get('label') 'agent_id': kwargs.get('agent_id') 'metadata': kwargs.get('extra', {}) """ data = kwargs headers = kwargs.get('headers', {}) if 'headers' in data: del data['headers'] return self._create("/entities", data=data, coerce=self.get_entity, headers=headers)
[ "def", "create_entity", "(", "self", ",", "*", "*", "kwargs", ")", ":", "data", "=", "kwargs", "headers", "=", "kwargs", ".", "get", "(", "'headers'", ",", "{", "}", ")", "if", "'headers'", "in", "data", ":", "del", "data", "[", "'headers'", "]", "return", "self", ".", "_create", "(", "\"/entities\"", ",", "data", "=", "data", ",", "coerce", "=", "self", ".", "get_entity", ",", "headers", "=", "headers", ")" ]
kwargs expected: 'who': kwargs.get('who') 'why': kwargs.get('why') 'uri': kwargs.get('uri') 'ip_addresses': kwargs.get('ip_addresses', {}) 'label': kwargs.get('label') 'agent_id': kwargs.get('agent_id') 'metadata': kwargs.get('extra', {})
[ "kwargs", "expected", ":", "who", ":", "kwargs", ".", "get", "(", "who", ")", "why", ":", "kwargs", ".", "get", "(", "why", ")", "uri", ":", "kwargs", ".", "get", "(", "uri", ")", "ip_addresses", ":", "kwargs", ".", "get", "(", "ip_addresses", "{}", ")", "label", ":", "kwargs", ".", "get", "(", "label", ")", "agent_id", ":", "kwargs", ".", "get", "(", "agent_id", ")", "metadata", ":", "kwargs", ".", "get", "(", "extra", "{}", ")" ]
train
https://github.com/racker/rackspace-monitoring/blob/8a9929e5fd51826c0a392e21bc55acb2aefe54f7/rackspace_monitoring/drivers/rackspace.py#L846-L861
mrcagney/make_gtfs
make_gtfs/cli.py
make_gtfs
def make_gtfs(source_path, target_path, buffer, ndigits): """ Create a GTFS feed from the files in the directory SOURCE_PATH. See the project README for a description of the required source files. Save the feed to the file or directory TARGET_PATH. If the target path ends in '.zip', then write the feed as a zip archive. Otherwise assume the path is a directory, and write the feed as a collection of CSV files to that directory, creating the directory if it does not exist. If a stops file is present, then search within ``buffer`` meters on the traffic side of trip paths for stops. Round all decimals to ndigits decimal places. All distances in the resulting GTFS feed will be in kilometers. """ pfeed = pf.read_protofeed(source_path) feed = m.build_feed(pfeed, buffer=buffer) gt.write_gtfs(feed, target_path, ndigits=ndigits)
python
def make_gtfs(source_path, target_path, buffer, ndigits): """ Create a GTFS feed from the files in the directory SOURCE_PATH. See the project README for a description of the required source files. Save the feed to the file or directory TARGET_PATH. If the target path ends in '.zip', then write the feed as a zip archive. Otherwise assume the path is a directory, and write the feed as a collection of CSV files to that directory, creating the directory if it does not exist. If a stops file is present, then search within ``buffer`` meters on the traffic side of trip paths for stops. Round all decimals to ndigits decimal places. All distances in the resulting GTFS feed will be in kilometers. """ pfeed = pf.read_protofeed(source_path) feed = m.build_feed(pfeed, buffer=buffer) gt.write_gtfs(feed, target_path, ndigits=ndigits)
[ "def", "make_gtfs", "(", "source_path", ",", "target_path", ",", "buffer", ",", "ndigits", ")", ":", "pfeed", "=", "pf", ".", "read_protofeed", "(", "source_path", ")", "feed", "=", "m", ".", "build_feed", "(", "pfeed", ",", "buffer", "=", "buffer", ")", "gt", ".", "write_gtfs", "(", "feed", ",", "target_path", ",", "ndigits", "=", "ndigits", ")" ]
Create a GTFS feed from the files in the directory SOURCE_PATH. See the project README for a description of the required source files. Save the feed to the file or directory TARGET_PATH. If the target path ends in '.zip', then write the feed as a zip archive. Otherwise assume the path is a directory, and write the feed as a collection of CSV files to that directory, creating the directory if it does not exist. If a stops file is present, then search within ``buffer`` meters on the traffic side of trip paths for stops. Round all decimals to ndigits decimal places. All distances in the resulting GTFS feed will be in kilometers.
[ "Create", "a", "GTFS", "feed", "from", "the", "files", "in", "the", "directory", "SOURCE_PATH", ".", "See", "the", "project", "README", "for", "a", "description", "of", "the", "required", "source", "files", ".", "Save", "the", "feed", "to", "the", "file", "or", "directory", "TARGET_PATH", ".", "If", "the", "target", "path", "ends", "in", ".", "zip", "then", "write", "the", "feed", "as", "a", "zip", "archive", ".", "Otherwise", "assume", "the", "path", "is", "a", "directory", "and", "write", "the", "feed", "as", "a", "collection", "of", "CSV", "files", "to", "that", "directory", "creating", "the", "directory", "if", "it", "does", "not", "exist", "." ]
train
https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/cli.py#L18-L37
deshima-dev/decode
decode/utils/ndarray/functions.py
psd
def psd(data, dt, ndivide=1, window=hanning, overlap_half=False): """Calculate power spectrum density of data. Args: data (np.ndarray): Input data. dt (float): Time between each data. ndivide (int): Do averaging (split data into ndivide, get psd of each, and average them). ax (matplotlib.axes): Axis you want to plot on. doplot (bool): Plot how averaging works. overlap_half (bool): Split data to half-overlapped regions. Returns: vk (np.ndarray): Frequency. psd (np.ndarray): PSD """ logger = getLogger('decode.utils.ndarray.psd') if overlap_half: step = int(len(data) / (ndivide + 1)) size = step * 2 else: step = int(len(data) / ndivide) size = step if bin(len(data)).count('1') != 1: logger.warning('warning: length of data is not power of 2: {}'.format(len(data))) size = int(len(data) / ndivide) if bin(size).count('1') != 1.: if overlap_half: logger.warning('warning: ((length of data) / (ndivide+1)) * 2 is not power of 2: {}'.format(size)) else: logger.warning('warning: (length of data) / ndivide is not power of 2: {}'.format(size)) psd = np.zeros(size) T = (size - 1) * dt vs = 1 / dt vk_ = fftfreq(size, dt) vk = vk_[np.where(vk_ >= 0)] for i in range(ndivide): d = data[i * step:i * step + size] if window is None: w = np.ones(size) corr = 1.0 else: w = window(size) corr = np.mean(w**2) psd = psd + 2 * (np.abs(fft(d * w)))**2 / size * dt / corr return vk, psd[:len(vk)] / ndivide
python
def psd(data, dt, ndivide=1, window=hanning, overlap_half=False): """Calculate power spectrum density of data. Args: data (np.ndarray): Input data. dt (float): Time between each data. ndivide (int): Do averaging (split data into ndivide, get psd of each, and average them). ax (matplotlib.axes): Axis you want to plot on. doplot (bool): Plot how averaging works. overlap_half (bool): Split data to half-overlapped regions. Returns: vk (np.ndarray): Frequency. psd (np.ndarray): PSD """ logger = getLogger('decode.utils.ndarray.psd') if overlap_half: step = int(len(data) / (ndivide + 1)) size = step * 2 else: step = int(len(data) / ndivide) size = step if bin(len(data)).count('1') != 1: logger.warning('warning: length of data is not power of 2: {}'.format(len(data))) size = int(len(data) / ndivide) if bin(size).count('1') != 1.: if overlap_half: logger.warning('warning: ((length of data) / (ndivide+1)) * 2 is not power of 2: {}'.format(size)) else: logger.warning('warning: (length of data) / ndivide is not power of 2: {}'.format(size)) psd = np.zeros(size) T = (size - 1) * dt vs = 1 / dt vk_ = fftfreq(size, dt) vk = vk_[np.where(vk_ >= 0)] for i in range(ndivide): d = data[i * step:i * step + size] if window is None: w = np.ones(size) corr = 1.0 else: w = window(size) corr = np.mean(w**2) psd = psd + 2 * (np.abs(fft(d * w)))**2 / size * dt / corr return vk, psd[:len(vk)] / ndivide
[ "def", "psd", "(", "data", ",", "dt", ",", "ndivide", "=", "1", ",", "window", "=", "hanning", ",", "overlap_half", "=", "False", ")", ":", "logger", "=", "getLogger", "(", "'decode.utils.ndarray.psd'", ")", "if", "overlap_half", ":", "step", "=", "int", "(", "len", "(", "data", ")", "/", "(", "ndivide", "+", "1", ")", ")", "size", "=", "step", "*", "2", "else", ":", "step", "=", "int", "(", "len", "(", "data", ")", "/", "ndivide", ")", "size", "=", "step", "if", "bin", "(", "len", "(", "data", ")", ")", ".", "count", "(", "'1'", ")", "!=", "1", ":", "logger", ".", "warning", "(", "'warning: length of data is not power of 2: {}'", ".", "format", "(", "len", "(", "data", ")", ")", ")", "size", "=", "int", "(", "len", "(", "data", ")", "/", "ndivide", ")", "if", "bin", "(", "size", ")", ".", "count", "(", "'1'", ")", "!=", "1.", ":", "if", "overlap_half", ":", "logger", ".", "warning", "(", "'warning: ((length of data) / (ndivide+1)) * 2 is not power of 2: {}'", ".", "format", "(", "size", ")", ")", "else", ":", "logger", ".", "warning", "(", "'warning: (length of data) / ndivide is not power of 2: {}'", ".", "format", "(", "size", ")", ")", "psd", "=", "np", ".", "zeros", "(", "size", ")", "T", "=", "(", "size", "-", "1", ")", "*", "dt", "vs", "=", "1", "/", "dt", "vk_", "=", "fftfreq", "(", "size", ",", "dt", ")", "vk", "=", "vk_", "[", "np", ".", "where", "(", "vk_", ">=", "0", ")", "]", "for", "i", "in", "range", "(", "ndivide", ")", ":", "d", "=", "data", "[", "i", "*", "step", ":", "i", "*", "step", "+", "size", "]", "if", "window", "is", "None", ":", "w", "=", "np", ".", "ones", "(", "size", ")", "corr", "=", "1.0", "else", ":", "w", "=", "window", "(", "size", ")", "corr", "=", "np", ".", "mean", "(", "w", "**", "2", ")", "psd", "=", "psd", "+", "2", "*", "(", "np", ".", "abs", "(", "fft", "(", "d", "*", "w", ")", ")", ")", "**", "2", "/", "size", "*", "dt", "/", "corr", "return", "vk", ",", "psd", "[", ":", "len", "(", "vk", ")", "]", "/", "ndivide" ]
Calculate power spectrum density of data. Args: data (np.ndarray): Input data. dt (float): Time between each data. ndivide (int): Do averaging (split data into ndivide, get psd of each, and average them). ax (matplotlib.axes): Axis you want to plot on. doplot (bool): Plot how averaging works. overlap_half (bool): Split data to half-overlapped regions. Returns: vk (np.ndarray): Frequency. psd (np.ndarray): PSD
[ "Calculate", "power", "spectrum", "density", "of", "data", "." ]
train
https://github.com/deshima-dev/decode/blob/e789e174cd316e7ec8bc55be7009ad35baced3c0/decode/utils/ndarray/functions.py#L20-L68
deshima-dev/decode
decode/utils/ndarray/functions.py
allan_variance
def allan_variance(data, dt, tmax=10): """Calculate Allan variance. Args: data (np.ndarray): Input data. dt (float): Time between each data. tmax (float): Maximum time. Returns: vk (np.ndarray): Frequency. allanvar (np.ndarray): Allan variance. """ allanvar = [] nmax = len(data) if len(data) < tmax / dt else int(tmax / dt) for i in range(1, nmax+1): databis = data[len(data) % i:] y = databis.reshape(len(data)//i, i).mean(axis=1) allanvar.append(((y[1:] - y[:-1])**2).mean() / 2) return dt * np.arange(1, nmax+1), np.array(allanvar)
python
def allan_variance(data, dt, tmax=10): """Calculate Allan variance. Args: data (np.ndarray): Input data. dt (float): Time between each data. tmax (float): Maximum time. Returns: vk (np.ndarray): Frequency. allanvar (np.ndarray): Allan variance. """ allanvar = [] nmax = len(data) if len(data) < tmax / dt else int(tmax / dt) for i in range(1, nmax+1): databis = data[len(data) % i:] y = databis.reshape(len(data)//i, i).mean(axis=1) allanvar.append(((y[1:] - y[:-1])**2).mean() / 2) return dt * np.arange(1, nmax+1), np.array(allanvar)
[ "def", "allan_variance", "(", "data", ",", "dt", ",", "tmax", "=", "10", ")", ":", "allanvar", "=", "[", "]", "nmax", "=", "len", "(", "data", ")", "if", "len", "(", "data", ")", "<", "tmax", "/", "dt", "else", "int", "(", "tmax", "/", "dt", ")", "for", "i", "in", "range", "(", "1", ",", "nmax", "+", "1", ")", ":", "databis", "=", "data", "[", "len", "(", "data", ")", "%", "i", ":", "]", "y", "=", "databis", ".", "reshape", "(", "len", "(", "data", ")", "//", "i", ",", "i", ")", ".", "mean", "(", "axis", "=", "1", ")", "allanvar", ".", "append", "(", "(", "(", "y", "[", "1", ":", "]", "-", "y", "[", ":", "-", "1", "]", ")", "**", "2", ")", ".", "mean", "(", ")", "/", "2", ")", "return", "dt", "*", "np", ".", "arange", "(", "1", ",", "nmax", "+", "1", ")", ",", "np", ".", "array", "(", "allanvar", ")" ]
Calculate Allan variance. Args: data (np.ndarray): Input data. dt (float): Time between each data. tmax (float): Maximum time. Returns: vk (np.ndarray): Frequency. allanvar (np.ndarray): Allan variance.
[ "Calculate", "Allan", "variance", "." ]
train
https://github.com/deshima-dev/decode/blob/e789e174cd316e7ec8bc55be7009ad35baced3c0/decode/utils/ndarray/functions.py#L71-L89
etesync/radicale_storage_etesync
radicale_storage_etesync/__init__.py
Collection.discover
def discover(cls, path, depth="0"): """Discover a list of collections under the given ``path``. If ``depth`` is "0", only the actual object under ``path`` is returned. If ``depth`` is anything but "0", it is considered as "1" and direct children are included in the result. The ``path`` is relative. The root collection "/" must always exist. """ # Path should already be sanitized attributes = _get_attributes_from_path(path) try: if len(attributes) == 3: # If an item, create a collection for the item. item = attributes.pop() path = "/".join(attributes) collection = cls(path, _is_principal(path)) yield collection.get(item) return collection = cls(path, _is_principal(path)) except api.exceptions.DoesNotExist: return yield collection if depth == "0": return if len(attributes) == 0: yield cls(posixpath.join(path, cls.user), principal=True) elif len(attributes) == 1: for journal in cls.etesync.list(): if journal.collection.TYPE in (api.AddressBook.TYPE, api.Calendar.TYPE, api.TaskList.TYPE): yield cls(posixpath.join(path, journal.uid), principal=False) elif len(attributes) == 2: for item in collection.list(): yield collection.get(item) elif len(attributes) > 2: raise RuntimeError("Found more than one attribute. Shouldn't happen")
python
def discover(cls, path, depth="0"): """Discover a list of collections under the given ``path``. If ``depth`` is "0", only the actual object under ``path`` is returned. If ``depth`` is anything but "0", it is considered as "1" and direct children are included in the result. The ``path`` is relative. The root collection "/" must always exist. """ # Path should already be sanitized attributes = _get_attributes_from_path(path) try: if len(attributes) == 3: # If an item, create a collection for the item. item = attributes.pop() path = "/".join(attributes) collection = cls(path, _is_principal(path)) yield collection.get(item) return collection = cls(path, _is_principal(path)) except api.exceptions.DoesNotExist: return yield collection if depth == "0": return if len(attributes) == 0: yield cls(posixpath.join(path, cls.user), principal=True) elif len(attributes) == 1: for journal in cls.etesync.list(): if journal.collection.TYPE in (api.AddressBook.TYPE, api.Calendar.TYPE, api.TaskList.TYPE): yield cls(posixpath.join(path, journal.uid), principal=False) elif len(attributes) == 2: for item in collection.list(): yield collection.get(item) elif len(attributes) > 2: raise RuntimeError("Found more than one attribute. Shouldn't happen")
[ "def", "discover", "(", "cls", ",", "path", ",", "depth", "=", "\"0\"", ")", ":", "# Path should already be sanitized", "attributes", "=", "_get_attributes_from_path", "(", "path", ")", "try", ":", "if", "len", "(", "attributes", ")", "==", "3", ":", "# If an item, create a collection for the item.", "item", "=", "attributes", ".", "pop", "(", ")", "path", "=", "\"/\"", ".", "join", "(", "attributes", ")", "collection", "=", "cls", "(", "path", ",", "_is_principal", "(", "path", ")", ")", "yield", "collection", ".", "get", "(", "item", ")", "return", "collection", "=", "cls", "(", "path", ",", "_is_principal", "(", "path", ")", ")", "except", "api", ".", "exceptions", ".", "DoesNotExist", ":", "return", "yield", "collection", "if", "depth", "==", "\"0\"", ":", "return", "if", "len", "(", "attributes", ")", "==", "0", ":", "yield", "cls", "(", "posixpath", ".", "join", "(", "path", ",", "cls", ".", "user", ")", ",", "principal", "=", "True", ")", "elif", "len", "(", "attributes", ")", "==", "1", ":", "for", "journal", "in", "cls", ".", "etesync", ".", "list", "(", ")", ":", "if", "journal", ".", "collection", ".", "TYPE", "in", "(", "api", ".", "AddressBook", ".", "TYPE", ",", "api", ".", "Calendar", ".", "TYPE", ",", "api", ".", "TaskList", ".", "TYPE", ")", ":", "yield", "cls", "(", "posixpath", ".", "join", "(", "path", ",", "journal", ".", "uid", ")", ",", "principal", "=", "False", ")", "elif", "len", "(", "attributes", ")", "==", "2", ":", "for", "item", "in", "collection", ".", "list", "(", ")", ":", "yield", "collection", ".", "get", "(", "item", ")", "elif", "len", "(", "attributes", ")", ">", "2", ":", "raise", "RuntimeError", "(", "\"Found more than one attribute. Shouldn't happen\"", ")" ]
Discover a list of collections under the given ``path``. If ``depth`` is "0", only the actual object under ``path`` is returned. If ``depth`` is anything but "0", it is considered as "1" and direct children are included in the result. The ``path`` is relative. The root collection "/" must always exist.
[ "Discover", "a", "list", "of", "collections", "under", "the", "given", "path", "." ]
train
https://github.com/etesync/radicale_storage_etesync/blob/73d549bad7a37f060ece65c653c18a859a9962f2/radicale_storage_etesync/__init__.py#L185-L232
etesync/radicale_storage_etesync
radicale_storage_etesync/__init__.py
Collection.etag
def etag(self): """Encoded as quoted-string (see RFC 2616).""" if self.is_fake: return entry = None for entry in self.journal.list(): pass return entry.uid if entry is not None else hashlib.sha256(b"").hexdigest()
python
def etag(self): """Encoded as quoted-string (see RFC 2616).""" if self.is_fake: return entry = None for entry in self.journal.list(): pass return entry.uid if entry is not None else hashlib.sha256(b"").hexdigest()
[ "def", "etag", "(", "self", ")", ":", "if", "self", ".", "is_fake", ":", "return", "entry", "=", "None", "for", "entry", "in", "self", ".", "journal", ".", "list", "(", ")", ":", "pass", "return", "entry", ".", "uid", "if", "entry", "is", "not", "None", "else", "hashlib", ".", "sha256", "(", "b\"\"", ")", ".", "hexdigest", "(", ")" ]
Encoded as quoted-string (see RFC 2616).
[ "Encoded", "as", "quoted", "-", "string", "(", "see", "RFC", "2616", ")", "." ]
train
https://github.com/etesync/radicale_storage_etesync/blob/73d549bad7a37f060ece65c653c18a859a9962f2/radicale_storage_etesync/__init__.py#L235-L244
etesync/radicale_storage_etesync
radicale_storage_etesync/__init__.py
Collection.create_collection
def create_collection(cls, href, collection=None, props=None): """Create a collection. If the collection already exists and neither ``collection`` nor ``props`` are set, this method shouldn't do anything. Otherwise the existing collection must be replaced. ``collection`` is a list of vobject components. ``props`` are metadata values for the collection. ``props["tag"]`` is the type of collection (VCALENDAR or VADDRESSBOOK). If the key ``tag`` is missing, it is guessed from the collection. """ # Path should already be sanitized attributes = _get_attributes_from_path(href) if len(attributes) <= 1: raise PrincipalNotAllowedError # Try to infer tag if not props: props = {} if not props.get("tag") and collection: props["tag"] = collection[0].name # Try first getting the collection if exists, or create a new one otherwise. try: self = cls(href, principal=False, tag=props.get("tag")) except api.exceptions.DoesNotExist: user_path = posixpath.join('/', cls.user) collection_name = hashlib.sha256(str(time.time()).encode()).hexdigest() sane_path = posixpath.join(user_path, collection_name) if props.get("tag") == "VCALENDAR": inst = api.Calendar.create(cls.etesync, collection_name, None) elif props.get("tag") == "VADDRESSBOOK": inst = api.AddressBook.create(cls.etesync, collection_name, None) else: raise RuntimeError("Bad tag.") inst.save() self = cls(sane_path, principal=False) self.set_meta(props) if collection: if props.get("tag") == "VCALENDAR": collection, = collection items = [] for content in ("vevent", "vtodo", "vjournal"): items.extend( getattr(collection, "%s_list" % content, [])) items_by_uid = groupby(sorted(items, key=get_uid), get_uid) vobject_items = {} for uid, items in items_by_uid: new_collection = vobject.iCalendar() for item in items: new_collection.add(item) href = self._find_available_file_name( vobject_items.get) vobject_items[href] = new_collection self.upload_all_nonatomic(vobject_items) elif props.get("tag") == "VADDRESSBOOK": vobject_items = {} for card in collection: href = self._find_available_file_name( vobject_items.get) vobject_items[href] = card self.upload_all_nonatomic(vobject_items) return self
python
def create_collection(cls, href, collection=None, props=None): """Create a collection. If the collection already exists and neither ``collection`` nor ``props`` are set, this method shouldn't do anything. Otherwise the existing collection must be replaced. ``collection`` is a list of vobject components. ``props`` are metadata values for the collection. ``props["tag"]`` is the type of collection (VCALENDAR or VADDRESSBOOK). If the key ``tag`` is missing, it is guessed from the collection. """ # Path should already be sanitized attributes = _get_attributes_from_path(href) if len(attributes) <= 1: raise PrincipalNotAllowedError # Try to infer tag if not props: props = {} if not props.get("tag") and collection: props["tag"] = collection[0].name # Try first getting the collection if exists, or create a new one otherwise. try: self = cls(href, principal=False, tag=props.get("tag")) except api.exceptions.DoesNotExist: user_path = posixpath.join('/', cls.user) collection_name = hashlib.sha256(str(time.time()).encode()).hexdigest() sane_path = posixpath.join(user_path, collection_name) if props.get("tag") == "VCALENDAR": inst = api.Calendar.create(cls.etesync, collection_name, None) elif props.get("tag") == "VADDRESSBOOK": inst = api.AddressBook.create(cls.etesync, collection_name, None) else: raise RuntimeError("Bad tag.") inst.save() self = cls(sane_path, principal=False) self.set_meta(props) if collection: if props.get("tag") == "VCALENDAR": collection, = collection items = [] for content in ("vevent", "vtodo", "vjournal"): items.extend( getattr(collection, "%s_list" % content, [])) items_by_uid = groupby(sorted(items, key=get_uid), get_uid) vobject_items = {} for uid, items in items_by_uid: new_collection = vobject.iCalendar() for item in items: new_collection.add(item) href = self._find_available_file_name( vobject_items.get) vobject_items[href] = new_collection self.upload_all_nonatomic(vobject_items) elif props.get("tag") == "VADDRESSBOOK": vobject_items = {} for card in collection: href = self._find_available_file_name( vobject_items.get) vobject_items[href] = card self.upload_all_nonatomic(vobject_items) return self
[ "def", "create_collection", "(", "cls", ",", "href", ",", "collection", "=", "None", ",", "props", "=", "None", ")", ":", "# Path should already be sanitized", "attributes", "=", "_get_attributes_from_path", "(", "href", ")", "if", "len", "(", "attributes", ")", "<=", "1", ":", "raise", "PrincipalNotAllowedError", "# Try to infer tag", "if", "not", "props", ":", "props", "=", "{", "}", "if", "not", "props", ".", "get", "(", "\"tag\"", ")", "and", "collection", ":", "props", "[", "\"tag\"", "]", "=", "collection", "[", "0", "]", ".", "name", "# Try first getting the collection if exists, or create a new one otherwise.", "try", ":", "self", "=", "cls", "(", "href", ",", "principal", "=", "False", ",", "tag", "=", "props", ".", "get", "(", "\"tag\"", ")", ")", "except", "api", ".", "exceptions", ".", "DoesNotExist", ":", "user_path", "=", "posixpath", ".", "join", "(", "'/'", ",", "cls", ".", "user", ")", "collection_name", "=", "hashlib", ".", "sha256", "(", "str", "(", "time", ".", "time", "(", ")", ")", ".", "encode", "(", ")", ")", ".", "hexdigest", "(", ")", "sane_path", "=", "posixpath", ".", "join", "(", "user_path", ",", "collection_name", ")", "if", "props", ".", "get", "(", "\"tag\"", ")", "==", "\"VCALENDAR\"", ":", "inst", "=", "api", ".", "Calendar", ".", "create", "(", "cls", ".", "etesync", ",", "collection_name", ",", "None", ")", "elif", "props", ".", "get", "(", "\"tag\"", ")", "==", "\"VADDRESSBOOK\"", ":", "inst", "=", "api", ".", "AddressBook", ".", "create", "(", "cls", ".", "etesync", ",", "collection_name", ",", "None", ")", "else", ":", "raise", "RuntimeError", "(", "\"Bad tag.\"", ")", "inst", ".", "save", "(", ")", "self", "=", "cls", "(", "sane_path", ",", "principal", "=", "False", ")", "self", ".", "set_meta", "(", "props", ")", "if", "collection", ":", "if", "props", ".", "get", "(", "\"tag\"", ")", "==", "\"VCALENDAR\"", ":", "collection", ",", "=", "collection", "items", "=", "[", "]", "for", "content", "in", "(", "\"vevent\"", ",", "\"vtodo\"", ",", "\"vjournal\"", ")", ":", "items", ".", "extend", "(", "getattr", "(", "collection", ",", "\"%s_list\"", "%", "content", ",", "[", "]", ")", ")", "items_by_uid", "=", "groupby", "(", "sorted", "(", "items", ",", "key", "=", "get_uid", ")", ",", "get_uid", ")", "vobject_items", "=", "{", "}", "for", "uid", ",", "items", "in", "items_by_uid", ":", "new_collection", "=", "vobject", ".", "iCalendar", "(", ")", "for", "item", "in", "items", ":", "new_collection", ".", "add", "(", "item", ")", "href", "=", "self", ".", "_find_available_file_name", "(", "vobject_items", ".", "get", ")", "vobject_items", "[", "href", "]", "=", "new_collection", "self", ".", "upload_all_nonatomic", "(", "vobject_items", ")", "elif", "props", ".", "get", "(", "\"tag\"", ")", "==", "\"VADDRESSBOOK\"", ":", "vobject_items", "=", "{", "}", "for", "card", "in", "collection", ":", "href", "=", "self", ".", "_find_available_file_name", "(", "vobject_items", ".", "get", ")", "vobject_items", "[", "href", "]", "=", "card", "self", ".", "upload_all_nonatomic", "(", "vobject_items", ")", "return", "self" ]
Create a collection. If the collection already exists and neither ``collection`` nor ``props`` are set, this method shouldn't do anything. Otherwise the existing collection must be replaced. ``collection`` is a list of vobject components. ``props`` are metadata values for the collection. ``props["tag"]`` is the type of collection (VCALENDAR or VADDRESSBOOK). If the key ``tag`` is missing, it is guessed from the collection.
[ "Create", "a", "collection", "." ]
train
https://github.com/etesync/radicale_storage_etesync/blob/73d549bad7a37f060ece65c653c18a859a9962f2/radicale_storage_etesync/__init__.py#L247-L319
etesync/radicale_storage_etesync
radicale_storage_etesync/__init__.py
Collection.sync
def sync(self, old_token=None): """Get the current sync token and changed items for synchronization. ``old_token`` an old sync token which is used as the base of the delta update. If sync token is missing, all items are returned. ValueError is raised for invalid or old tokens. """ # FIXME: Actually implement token = "http://radicale.org/ns/sync/%s" % self.etag.strip("\"") if old_token: raise ValueError("Sync token are not supported (you can ignore this warning)") return token, self.list()
python
def sync(self, old_token=None): """Get the current sync token and changed items for synchronization. ``old_token`` an old sync token which is used as the base of the delta update. If sync token is missing, all items are returned. ValueError is raised for invalid or old tokens. """ # FIXME: Actually implement token = "http://radicale.org/ns/sync/%s" % self.etag.strip("\"") if old_token: raise ValueError("Sync token are not supported (you can ignore this warning)") return token, self.list()
[ "def", "sync", "(", "self", ",", "old_token", "=", "None", ")", ":", "# FIXME: Actually implement", "token", "=", "\"http://radicale.org/ns/sync/%s\"", "%", "self", ".", "etag", ".", "strip", "(", "\"\\\"\"", ")", "if", "old_token", ":", "raise", "ValueError", "(", "\"Sync token are not supported (you can ignore this warning)\"", ")", "return", "token", ",", "self", ".", "list", "(", ")" ]
Get the current sync token and changed items for synchronization. ``old_token`` an old sync token which is used as the base of the delta update. If sync token is missing, all items are returned. ValueError is raised for invalid or old tokens.
[ "Get", "the", "current", "sync", "token", "and", "changed", "items", "for", "synchronization", "." ]
train
https://github.com/etesync/radicale_storage_etesync/blob/73d549bad7a37f060ece65c653c18a859a9962f2/radicale_storage_etesync/__init__.py#L321-L332
etesync/radicale_storage_etesync
radicale_storage_etesync/__init__.py
Collection.list
def list(self): """List collection items.""" if self.is_fake: return for item in self.collection.list(): yield item.uid + self.content_suffix
python
def list(self): """List collection items.""" if self.is_fake: return for item in self.collection.list(): yield item.uid + self.content_suffix
[ "def", "list", "(", "self", ")", ":", "if", "self", ".", "is_fake", ":", "return", "for", "item", "in", "self", ".", "collection", ".", "list", "(", ")", ":", "yield", "item", ".", "uid", "+", "self", ".", "content_suffix" ]
List collection items.
[ "List", "collection", "items", "." ]
train
https://github.com/etesync/radicale_storage_etesync/blob/73d549bad7a37f060ece65c653c18a859a9962f2/radicale_storage_etesync/__init__.py#L334-L340
etesync/radicale_storage_etesync
radicale_storage_etesync/__init__.py
Collection.get
def get(self, href): """Fetch a single item.""" if self.is_fake: return uid = _trim_suffix(href, ('.ics', '.ical', '.vcf')) etesync_item = self.collection.get(uid) if etesync_item is None: return None try: item = vobject.readOne(etesync_item.content) except Exception as e: raise RuntimeError("Failed to parse item %r in %r" % (href, self.path)) from e # FIXME: Make this sensible last_modified = time.strftime( "%a, %d %b %Y %H:%M:%S GMT", time.gmtime(time.time())) return EteSyncItem(self, item, href, last_modified=last_modified, etesync_item=etesync_item)
python
def get(self, href): """Fetch a single item.""" if self.is_fake: return uid = _trim_suffix(href, ('.ics', '.ical', '.vcf')) etesync_item = self.collection.get(uid) if etesync_item is None: return None try: item = vobject.readOne(etesync_item.content) except Exception as e: raise RuntimeError("Failed to parse item %r in %r" % (href, self.path)) from e # FIXME: Make this sensible last_modified = time.strftime( "%a, %d %b %Y %H:%M:%S GMT", time.gmtime(time.time())) return EteSyncItem(self, item, href, last_modified=last_modified, etesync_item=etesync_item)
[ "def", "get", "(", "self", ",", "href", ")", ":", "if", "self", ".", "is_fake", ":", "return", "uid", "=", "_trim_suffix", "(", "href", ",", "(", "'.ics'", ",", "'.ical'", ",", "'.vcf'", ")", ")", "etesync_item", "=", "self", ".", "collection", ".", "get", "(", "uid", ")", "if", "etesync_item", "is", "None", ":", "return", "None", "try", ":", "item", "=", "vobject", ".", "readOne", "(", "etesync_item", ".", "content", ")", "except", "Exception", "as", "e", ":", "raise", "RuntimeError", "(", "\"Failed to parse item %r in %r\"", "%", "(", "href", ",", "self", ".", "path", ")", ")", "from", "e", "# FIXME: Make this sensible", "last_modified", "=", "time", ".", "strftime", "(", "\"%a, %d %b %Y %H:%M:%S GMT\"", ",", "time", ".", "gmtime", "(", "time", ".", "time", "(", ")", ")", ")", "return", "EteSyncItem", "(", "self", ",", "item", ",", "href", ",", "last_modified", "=", "last_modified", ",", "etesync_item", "=", "etesync_item", ")" ]
Fetch a single item.
[ "Fetch", "a", "single", "item", "." ]
train
https://github.com/etesync/radicale_storage_etesync/blob/73d549bad7a37f060ece65c653c18a859a9962f2/radicale_storage_etesync/__init__.py#L342-L361
etesync/radicale_storage_etesync
radicale_storage_etesync/__init__.py
Collection.upload
def upload(self, href, vobject_item): """Upload a new or replace an existing item.""" if self.is_fake: return content = vobject_item.serialize() try: item = self.get(href) etesync_item = item.etesync_item etesync_item.content = content except api.exceptions.DoesNotExist: etesync_item = self.collection.get_content_class().create(self.collection, content) etesync_item.save() return self.get(href)
python
def upload(self, href, vobject_item): """Upload a new or replace an existing item.""" if self.is_fake: return content = vobject_item.serialize() try: item = self.get(href) etesync_item = item.etesync_item etesync_item.content = content except api.exceptions.DoesNotExist: etesync_item = self.collection.get_content_class().create(self.collection, content) etesync_item.save() return self.get(href)
[ "def", "upload", "(", "self", ",", "href", ",", "vobject_item", ")", ":", "if", "self", ".", "is_fake", ":", "return", "content", "=", "vobject_item", ".", "serialize", "(", ")", "try", ":", "item", "=", "self", ".", "get", "(", "href", ")", "etesync_item", "=", "item", ".", "etesync_item", "etesync_item", ".", "content", "=", "content", "except", "api", ".", "exceptions", ".", "DoesNotExist", ":", "etesync_item", "=", "self", ".", "collection", ".", "get_content_class", "(", ")", ".", "create", "(", "self", ".", "collection", ",", "content", ")", "etesync_item", ".", "save", "(", ")", "return", "self", ".", "get", "(", "href", ")" ]
Upload a new or replace an existing item.
[ "Upload", "a", "new", "or", "replace", "an", "existing", "item", "." ]
train
https://github.com/etesync/radicale_storage_etesync/blob/73d549bad7a37f060ece65c653c18a859a9962f2/radicale_storage_etesync/__init__.py#L363-L378
etesync/radicale_storage_etesync
radicale_storage_etesync/__init__.py
Collection.delete
def delete(self, href=None): """Delete an item. When ``href`` is ``None``, delete the collection. """ if self.is_fake: return if href is None: self.collection.delete() return item = self.get(href) if item is None: raise ComponentNotFoundError(href) item.etesync_item.delete()
python
def delete(self, href=None): """Delete an item. When ``href`` is ``None``, delete the collection. """ if self.is_fake: return if href is None: self.collection.delete() return item = self.get(href) if item is None: raise ComponentNotFoundError(href) item.etesync_item.delete()
[ "def", "delete", "(", "self", ",", "href", "=", "None", ")", ":", "if", "self", ".", "is_fake", ":", "return", "if", "href", "is", "None", ":", "self", ".", "collection", ".", "delete", "(", ")", "return", "item", "=", "self", ".", "get", "(", "href", ")", "if", "item", "is", "None", ":", "raise", "ComponentNotFoundError", "(", "href", ")", "item", ".", "etesync_item", ".", "delete", "(", ")" ]
Delete an item. When ``href`` is ``None``, delete the collection.
[ "Delete", "an", "item", "." ]
train
https://github.com/etesync/radicale_storage_etesync/blob/73d549bad7a37f060ece65c653c18a859a9962f2/radicale_storage_etesync/__init__.py#L380-L397
etesync/radicale_storage_etesync
radicale_storage_etesync/__init__.py
Collection.get_meta
def get_meta(self, key=None): """Get metadata value for collection.""" if self.is_fake: return {} if key == "tag": return self.tag elif key is None: ret = {} for key in self.journal.info.keys(): ret[key] = self.meta_mappings.map_get(self.journal.info, key)[1] return ret else: key, value = self.meta_mappings.map_get(self.journal.info, key) return value
python
def get_meta(self, key=None): """Get metadata value for collection.""" if self.is_fake: return {} if key == "tag": return self.tag elif key is None: ret = {} for key in self.journal.info.keys(): ret[key] = self.meta_mappings.map_get(self.journal.info, key)[1] return ret else: key, value = self.meta_mappings.map_get(self.journal.info, key) return value
[ "def", "get_meta", "(", "self", ",", "key", "=", "None", ")", ":", "if", "self", ".", "is_fake", ":", "return", "{", "}", "if", "key", "==", "\"tag\"", ":", "return", "self", ".", "tag", "elif", "key", "is", "None", ":", "ret", "=", "{", "}", "for", "key", "in", "self", ".", "journal", ".", "info", ".", "keys", "(", ")", ":", "ret", "[", "key", "]", "=", "self", ".", "meta_mappings", ".", "map_get", "(", "self", ".", "journal", ".", "info", ",", "key", ")", "[", "1", "]", "return", "ret", "else", ":", "key", ",", "value", "=", "self", ".", "meta_mappings", ".", "map_get", "(", "self", ".", "journal", ".", "info", ",", "key", ")", "return", "value" ]
Get metadata value for collection.
[ "Get", "metadata", "value", "for", "collection", "." ]
train
https://github.com/etesync/radicale_storage_etesync/blob/73d549bad7a37f060ece65c653c18a859a9962f2/radicale_storage_etesync/__init__.py#L399-L413
etesync/radicale_storage_etesync
radicale_storage_etesync/__init__.py
Collection.set_meta
def set_meta(self, _props): """Set metadata values for collection.""" if self.is_fake: return props = {} for key, value in _props.items(): key, value = self.meta_mappings.map_set(key, value) props[key] = value # Pop out tag which we don't want props.pop("tag", None) self.journal.update_info({}) self.journal.update_info(props) self.journal.save()
python
def set_meta(self, _props): """Set metadata values for collection.""" if self.is_fake: return props = {} for key, value in _props.items(): key, value = self.meta_mappings.map_set(key, value) props[key] = value # Pop out tag which we don't want props.pop("tag", None) self.journal.update_info({}) self.journal.update_info(props) self.journal.save()
[ "def", "set_meta", "(", "self", ",", "_props", ")", ":", "if", "self", ".", "is_fake", ":", "return", "props", "=", "{", "}", "for", "key", ",", "value", "in", "_props", ".", "items", "(", ")", ":", "key", ",", "value", "=", "self", ".", "meta_mappings", ".", "map_set", "(", "key", ",", "value", ")", "props", "[", "key", "]", "=", "value", "# Pop out tag which we don't want", "props", ".", "pop", "(", "\"tag\"", ",", "None", ")", "self", ".", "journal", ".", "update_info", "(", "{", "}", ")", "self", ".", "journal", ".", "update_info", "(", "props", ")", "self", ".", "journal", ".", "save", "(", ")" ]
Set metadata values for collection.
[ "Set", "metadata", "values", "for", "collection", "." ]
train
https://github.com/etesync/radicale_storage_etesync/blob/73d549bad7a37f060ece65c653c18a859a9962f2/radicale_storage_etesync/__init__.py#L415-L430
etesync/radicale_storage_etesync
radicale_storage_etesync/__init__.py
Collection.set_meta_all
def set_meta_all(self, props): """Set metadata values for collection. ``props`` a dict with values for properties. """ delta_props = self.get_meta() for key in delta_props.keys(): if key not in props: delta_props[key] = None delta_props.update(props) self.set_meta(delta_props)
python
def set_meta_all(self, props): """Set metadata values for collection. ``props`` a dict with values for properties. """ delta_props = self.get_meta() for key in delta_props.keys(): if key not in props: delta_props[key] = None delta_props.update(props) self.set_meta(delta_props)
[ "def", "set_meta_all", "(", "self", ",", "props", ")", ":", "delta_props", "=", "self", ".", "get_meta", "(", ")", "for", "key", "in", "delta_props", ".", "keys", "(", ")", ":", "if", "key", "not", "in", "props", ":", "delta_props", "[", "key", "]", "=", "None", "delta_props", ".", "update", "(", "props", ")", "self", ".", "set_meta", "(", "delta_props", ")" ]
Set metadata values for collection. ``props`` a dict with values for properties.
[ "Set", "metadata", "values", "for", "collection", "." ]
train
https://github.com/etesync/radicale_storage_etesync/blob/73d549bad7a37f060ece65c653c18a859a9962f2/radicale_storage_etesync/__init__.py#L433-L444
etesync/radicale_storage_etesync
radicale_storage_etesync/__init__.py
Collection.last_modified
def last_modified(self): """Get the HTTP-datetime of when the collection was modified.""" # FIXME: Make this sensible last_modified = time.strftime( "%a, %d %b %Y %H:%M:%S GMT", time.gmtime(time.time())) return last_modified
python
def last_modified(self): """Get the HTTP-datetime of when the collection was modified.""" # FIXME: Make this sensible last_modified = time.strftime( "%a, %d %b %Y %H:%M:%S GMT", time.gmtime(time.time())) return last_modified
[ "def", "last_modified", "(", "self", ")", ":", "# FIXME: Make this sensible", "last_modified", "=", "time", ".", "strftime", "(", "\"%a, %d %b %Y %H:%M:%S GMT\"", ",", "time", ".", "gmtime", "(", "time", ".", "time", "(", ")", ")", ")", "return", "last_modified" ]
Get the HTTP-datetime of when the collection was modified.
[ "Get", "the", "HTTP", "-", "datetime", "of", "when", "the", "collection", "was", "modified", "." ]
train
https://github.com/etesync/radicale_storage_etesync/blob/73d549bad7a37f060ece65c653c18a859a9962f2/radicale_storage_etesync/__init__.py#L447-L453
etesync/radicale_storage_etesync
radicale_storage_etesync/__init__.py
Collection.serialize
def serialize(self): """Get the unicode string representing the whole collection.""" import datetime items = [] time_begin = datetime.datetime.now() for href in self.list(): items.append(self.get(href).item) time_end = datetime.datetime.now() self.logger.info( "Collection read %d items in %s sec from %s", len(items), (time_end - time_begin).total_seconds(), self.path) if self.get_meta("tag") == "VCALENDAR": collection = vobject.iCalendar() for item in items: for content in ("vevent", "vtodo", "vjournal"): if content in item.contents: for item_part in getattr(item, "%s_list" % content): collection.add(item_part) break return collection.serialize() elif self.get_meta("tag") == "VADDRESSBOOK": return "".join([item.serialize() for item in items]) return ""
python
def serialize(self): """Get the unicode string representing the whole collection.""" import datetime items = [] time_begin = datetime.datetime.now() for href in self.list(): items.append(self.get(href).item) time_end = datetime.datetime.now() self.logger.info( "Collection read %d items in %s sec from %s", len(items), (time_end - time_begin).total_seconds(), self.path) if self.get_meta("tag") == "VCALENDAR": collection = vobject.iCalendar() for item in items: for content in ("vevent", "vtodo", "vjournal"): if content in item.contents: for item_part in getattr(item, "%s_list" % content): collection.add(item_part) break return collection.serialize() elif self.get_meta("tag") == "VADDRESSBOOK": return "".join([item.serialize() for item in items]) return ""
[ "def", "serialize", "(", "self", ")", ":", "import", "datetime", "items", "=", "[", "]", "time_begin", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "for", "href", "in", "self", ".", "list", "(", ")", ":", "items", ".", "append", "(", "self", ".", "get", "(", "href", ")", ".", "item", ")", "time_end", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "self", ".", "logger", ".", "info", "(", "\"Collection read %d items in %s sec from %s\"", ",", "len", "(", "items", ")", ",", "(", "time_end", "-", "time_begin", ")", ".", "total_seconds", "(", ")", ",", "self", ".", "path", ")", "if", "self", ".", "get_meta", "(", "\"tag\"", ")", "==", "\"VCALENDAR\"", ":", "collection", "=", "vobject", ".", "iCalendar", "(", ")", "for", "item", "in", "items", ":", "for", "content", "in", "(", "\"vevent\"", ",", "\"vtodo\"", ",", "\"vjournal\"", ")", ":", "if", "content", "in", "item", ".", "contents", ":", "for", "item_part", "in", "getattr", "(", "item", ",", "\"%s_list\"", "%", "content", ")", ":", "collection", ".", "add", "(", "item_part", ")", "break", "return", "collection", ".", "serialize", "(", ")", "elif", "self", ".", "get_meta", "(", "\"tag\"", ")", "==", "\"VADDRESSBOOK\"", ":", "return", "\"\"", ".", "join", "(", "[", "item", ".", "serialize", "(", ")", "for", "item", "in", "items", "]", ")", "return", "\"\"" ]
Get the unicode string representing the whole collection.
[ "Get", "the", "unicode", "string", "representing", "the", "whole", "collection", "." ]
train
https://github.com/etesync/radicale_storage_etesync/blob/73d549bad7a37f060ece65c653c18a859a9962f2/radicale_storage_etesync/__init__.py#L455-L477
etesync/radicale_storage_etesync
radicale_storage_etesync/__init__.py
Collection.acquire_lock
def acquire_lock(cls, mode, user=None): """Set a context manager to lock the whole storage. ``mode`` must either be "r" for shared access or "w" for exclusive access. ``user`` is the name of the logged in user or empty. """ if not user: return with EteSyncCache.lock: cls.user = user cls.etesync = cls._get_etesync_for_user(cls.user) if cls._should_sync(): cls._mark_sync() cls.etesync.get_or_create_user_info(force_fetch=True) cls.etesync.sync_journal_list() for journal in cls.etesync.list(): cls.etesync.pull_journal(journal.uid) yield if cls.etesync.journal_list_is_dirty(): cls.etesync.sync_journal_list() for journal in cls.etesync.list(): if cls.etesync.journal_is_dirty(journal.uid): cls.etesync.sync_journal(journal.uid) cls.etesync = None cls.user = None
python
def acquire_lock(cls, mode, user=None): """Set a context manager to lock the whole storage. ``mode`` must either be "r" for shared access or "w" for exclusive access. ``user`` is the name of the logged in user or empty. """ if not user: return with EteSyncCache.lock: cls.user = user cls.etesync = cls._get_etesync_for_user(cls.user) if cls._should_sync(): cls._mark_sync() cls.etesync.get_or_create_user_info(force_fetch=True) cls.etesync.sync_journal_list() for journal in cls.etesync.list(): cls.etesync.pull_journal(journal.uid) yield if cls.etesync.journal_list_is_dirty(): cls.etesync.sync_journal_list() for journal in cls.etesync.list(): if cls.etesync.journal_is_dirty(journal.uid): cls.etesync.sync_journal(journal.uid) cls.etesync = None cls.user = None
[ "def", "acquire_lock", "(", "cls", ",", "mode", ",", "user", "=", "None", ")", ":", "if", "not", "user", ":", "return", "with", "EteSyncCache", ".", "lock", ":", "cls", ".", "user", "=", "user", "cls", ".", "etesync", "=", "cls", ".", "_get_etesync_for_user", "(", "cls", ".", "user", ")", "if", "cls", ".", "_should_sync", "(", ")", ":", "cls", ".", "_mark_sync", "(", ")", "cls", ".", "etesync", ".", "get_or_create_user_info", "(", "force_fetch", "=", "True", ")", "cls", ".", "etesync", ".", "sync_journal_list", "(", ")", "for", "journal", "in", "cls", ".", "etesync", ".", "list", "(", ")", ":", "cls", ".", "etesync", ".", "pull_journal", "(", "journal", ".", "uid", ")", "yield", "if", "cls", ".", "etesync", ".", "journal_list_is_dirty", "(", ")", ":", "cls", ".", "etesync", ".", "sync_journal_list", "(", ")", "for", "journal", "in", "cls", ".", "etesync", ".", "list", "(", ")", ":", "if", "cls", ".", "etesync", ".", "journal_is_dirty", "(", "journal", ".", "uid", ")", ":", "cls", ".", "etesync", ".", "sync_journal", "(", "journal", ".", "uid", ")", "cls", ".", "etesync", "=", "None", "cls", ".", "user", "=", "None" ]
Set a context manager to lock the whole storage. ``mode`` must either be "r" for shared access or "w" for exclusive access. ``user`` is the name of the logged in user or empty.
[ "Set", "a", "context", "manager", "to", "lock", "the", "whole", "storage", "." ]
train
https://github.com/etesync/radicale_storage_etesync/blob/73d549bad7a37f060ece65c653c18a859a9962f2/radicale_storage_etesync/__init__.py#L505-L536
facelessuser/pyspelling
pyspelling/filters/cpp.py
CppFilter.validate_options
def validate_options(self, k, v): """Validate options.""" super().validate_options(k, v) if k == 'charset_size': if v not in (1, 2, 4): raise ValueError("{}: '{}' is an unsupported charset size".format(self.__class__.__name__, v)) elif k == 'wide_charset_size': if v not in (2, 4): raise ValueError("{}: '{}' is an unsupported wide charset size".format(self.__class__.__name__, v)) elif k in ('exec_charset', 'wide_exec_charset'): # See if parsing fails. self.get_encoding_name(v) elif k == 'string_types': if RE_VALID_STRING_TYPES.match(v) is None: raise ValueError("{}: '{}' does not define valid string types".format(self.__class__.__name__, v))
python
def validate_options(self, k, v): """Validate options.""" super().validate_options(k, v) if k == 'charset_size': if v not in (1, 2, 4): raise ValueError("{}: '{}' is an unsupported charset size".format(self.__class__.__name__, v)) elif k == 'wide_charset_size': if v not in (2, 4): raise ValueError("{}: '{}' is an unsupported wide charset size".format(self.__class__.__name__, v)) elif k in ('exec_charset', 'wide_exec_charset'): # See if parsing fails. self.get_encoding_name(v) elif k == 'string_types': if RE_VALID_STRING_TYPES.match(v) is None: raise ValueError("{}: '{}' does not define valid string types".format(self.__class__.__name__, v))
[ "def", "validate_options", "(", "self", ",", "k", ",", "v", ")", ":", "super", "(", ")", ".", "validate_options", "(", "k", ",", "v", ")", "if", "k", "==", "'charset_size'", ":", "if", "v", "not", "in", "(", "1", ",", "2", ",", "4", ")", ":", "raise", "ValueError", "(", "\"{}: '{}' is an unsupported charset size\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ",", "v", ")", ")", "elif", "k", "==", "'wide_charset_size'", ":", "if", "v", "not", "in", "(", "2", ",", "4", ")", ":", "raise", "ValueError", "(", "\"{}: '{}' is an unsupported wide charset size\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ",", "v", ")", ")", "elif", "k", "in", "(", "'exec_charset'", ",", "'wide_exec_charset'", ")", ":", "# See if parsing fails.", "self", ".", "get_encoding_name", "(", "v", ")", "elif", "k", "==", "'string_types'", ":", "if", "RE_VALID_STRING_TYPES", ".", "match", "(", "v", ")", "is", "None", ":", "raise", "ValueError", "(", "\"{}: '{}' does not define valid string types\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ",", "v", ")", ")" ]
Validate options.
[ "Validate", "options", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/cpp.py#L130-L145
facelessuser/pyspelling
pyspelling/filters/cpp.py
CppFilter.match_string
def match_string(self, stype): """Match string type.""" return not (stype - self.string_types) or bool(stype & self.wild_string_types)
python
def match_string(self, stype): """Match string type.""" return not (stype - self.string_types) or bool(stype & self.wild_string_types)
[ "def", "match_string", "(", "self", ",", "stype", ")", ":", "return", "not", "(", "stype", "-", "self", ".", "string_types", ")", "or", "bool", "(", "stype", "&", "self", ".", "wild_string_types", ")" ]
Match string type.
[ "Match", "string", "type", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/cpp.py#L169-L172
facelessuser/pyspelling
pyspelling/filters/cpp.py
CppFilter.get_encoding_name
def get_encoding_name(self, name): """Get encoding name.""" name = codecs.lookup( filters.PYTHON_ENCODING_NAMES.get(name, name).lower() ).name if name.startswith(('utf-32', 'utf-16')): name = name[:6] if CURRENT_ENDIAN == BIG_ENDIAN: name += '-be' else: name += '-le' if name == 'utf-8-sig': name = 'utf-8' return name
python
def get_encoding_name(self, name): """Get encoding name.""" name = codecs.lookup( filters.PYTHON_ENCODING_NAMES.get(name, name).lower() ).name if name.startswith(('utf-32', 'utf-16')): name = name[:6] if CURRENT_ENDIAN == BIG_ENDIAN: name += '-be' else: name += '-le' if name == 'utf-8-sig': name = 'utf-8' return name
[ "def", "get_encoding_name", "(", "self", ",", "name", ")", ":", "name", "=", "codecs", ".", "lookup", "(", "filters", ".", "PYTHON_ENCODING_NAMES", ".", "get", "(", "name", ",", "name", ")", ".", "lower", "(", ")", ")", ".", "name", "if", "name", ".", "startswith", "(", "(", "'utf-32'", ",", "'utf-16'", ")", ")", ":", "name", "=", "name", "[", ":", "6", "]", "if", "CURRENT_ENDIAN", "==", "BIG_ENDIAN", ":", "name", "+=", "'-be'", "else", ":", "name", "+=", "'-le'", "if", "name", "==", "'utf-8-sig'", ":", "name", "=", "'utf-8'", "return", "name" ]
Get encoding name.
[ "Get", "encoding", "name", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/cpp.py#L179-L195
facelessuser/pyspelling
pyspelling/filters/cpp.py
CppFilter.setup
def setup(self): """Setup.""" self.blocks = self.config['block_comments'] self.lines = self.config['line_comments'] self.group_comments = self.config['group_comments'] self.prefix = self.config['prefix'] self.generic_mode = self.config['generic_mode'] self.strings = self.config['strings'] self.trigraphs = self.config['trigraphs'] self.decode_escapes = self.config['decode_escapes'] self.charset_size = self.config['charset_size'] self.wide_charset_size = self.config['wide_charset_size'] self.exec_charset = self.get_encoding_name(self.config['exec_charset']) self.wide_exec_charset = self.get_encoding_name(self.config['wide_exec_charset']) self.string_types, self.wild_string_types = self.eval_string_type(self.config['string_types']) if not self.generic_mode: self.pattern = RE_CPP
python
def setup(self): """Setup.""" self.blocks = self.config['block_comments'] self.lines = self.config['line_comments'] self.group_comments = self.config['group_comments'] self.prefix = self.config['prefix'] self.generic_mode = self.config['generic_mode'] self.strings = self.config['strings'] self.trigraphs = self.config['trigraphs'] self.decode_escapes = self.config['decode_escapes'] self.charset_size = self.config['charset_size'] self.wide_charset_size = self.config['wide_charset_size'] self.exec_charset = self.get_encoding_name(self.config['exec_charset']) self.wide_exec_charset = self.get_encoding_name(self.config['wide_exec_charset']) self.string_types, self.wild_string_types = self.eval_string_type(self.config['string_types']) if not self.generic_mode: self.pattern = RE_CPP
[ "def", "setup", "(", "self", ")", ":", "self", ".", "blocks", "=", "self", ".", "config", "[", "'block_comments'", "]", "self", ".", "lines", "=", "self", ".", "config", "[", "'line_comments'", "]", "self", ".", "group_comments", "=", "self", ".", "config", "[", "'group_comments'", "]", "self", ".", "prefix", "=", "self", ".", "config", "[", "'prefix'", "]", "self", ".", "generic_mode", "=", "self", ".", "config", "[", "'generic_mode'", "]", "self", ".", "strings", "=", "self", ".", "config", "[", "'strings'", "]", "self", ".", "trigraphs", "=", "self", ".", "config", "[", "'trigraphs'", "]", "self", ".", "decode_escapes", "=", "self", ".", "config", "[", "'decode_escapes'", "]", "self", ".", "charset_size", "=", "self", ".", "config", "[", "'charset_size'", "]", "self", ".", "wide_charset_size", "=", "self", ".", "config", "[", "'wide_charset_size'", "]", "self", ".", "exec_charset", "=", "self", ".", "get_encoding_name", "(", "self", ".", "config", "[", "'exec_charset'", "]", ")", "self", ".", "wide_exec_charset", "=", "self", ".", "get_encoding_name", "(", "self", ".", "config", "[", "'wide_exec_charset'", "]", ")", "self", ".", "string_types", ",", "self", ".", "wild_string_types", "=", "self", ".", "eval_string_type", "(", "self", ".", "config", "[", "'string_types'", "]", ")", "if", "not", "self", ".", "generic_mode", ":", "self", ".", "pattern", "=", "RE_CPP" ]
Setup.
[ "Setup", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/cpp.py#L197-L214
facelessuser/pyspelling
pyspelling/filters/cpp.py
CppFilter.evaluate_block
def evaluate_block(self, groups): """Evaluate block comments.""" if self.blocks: self.block_comments.append([groups['block'][2:-2], self.line_num, self.current_encoding])
python
def evaluate_block(self, groups): """Evaluate block comments.""" if self.blocks: self.block_comments.append([groups['block'][2:-2], self.line_num, self.current_encoding])
[ "def", "evaluate_block", "(", "self", ",", "groups", ")", ":", "if", "self", ".", "blocks", ":", "self", ".", "block_comments", ".", "append", "(", "[", "groups", "[", "'block'", "]", "[", "2", ":", "-", "2", "]", ",", "self", ".", "line_num", ",", "self", ".", "current_encoding", "]", ")" ]
Evaluate block comments.
[ "Evaluate", "block", "comments", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/cpp.py#L216-L220
facelessuser/pyspelling
pyspelling/filters/cpp.py
CppFilter.evaluate_inline_tail
def evaluate_inline_tail(self, groups): """Evaluate inline comments at the tail of source code.""" if self.lines: self.line_comments.append([groups['line'][2:].replace('\\\n', ''), self.line_num, self.current_encoding])
python
def evaluate_inline_tail(self, groups): """Evaluate inline comments at the tail of source code.""" if self.lines: self.line_comments.append([groups['line'][2:].replace('\\\n', ''), self.line_num, self.current_encoding])
[ "def", "evaluate_inline_tail", "(", "self", ",", "groups", ")", ":", "if", "self", ".", "lines", ":", "self", ".", "line_comments", ".", "append", "(", "[", "groups", "[", "'line'", "]", "[", "2", ":", "]", ".", "replace", "(", "'\\\\\\n'", ",", "''", ")", ",", "self", ".", "line_num", ",", "self", ".", "current_encoding", "]", ")" ]
Evaluate inline comments at the tail of source code.
[ "Evaluate", "inline", "comments", "at", "the", "tail", "of", "source", "code", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/cpp.py#L222-L226
facelessuser/pyspelling
pyspelling/filters/cpp.py
CppFilter.evaluate_inline
def evaluate_inline(self, groups): """Evaluate inline comments on their own lines.""" # Consecutive lines with only comments with same leading whitespace # will be captured as a single block. if self.lines: if ( self.group_comments and self.line_num == self.prev_line + 1 and groups['leading_space'] == self.leading ): self.line_comments[-1][0] += '\n' + groups['line'][2:].replace('\\\n', '') else: self.line_comments.append( [groups['line'][2:].replace('\\\n', ''), self.line_num, self.current_encoding] ) self.leading = groups['leading_space'] self.prev_line = self.line_num
python
def evaluate_inline(self, groups): """Evaluate inline comments on their own lines.""" # Consecutive lines with only comments with same leading whitespace # will be captured as a single block. if self.lines: if ( self.group_comments and self.line_num == self.prev_line + 1 and groups['leading_space'] == self.leading ): self.line_comments[-1][0] += '\n' + groups['line'][2:].replace('\\\n', '') else: self.line_comments.append( [groups['line'][2:].replace('\\\n', ''), self.line_num, self.current_encoding] ) self.leading = groups['leading_space'] self.prev_line = self.line_num
[ "def", "evaluate_inline", "(", "self", ",", "groups", ")", ":", "# Consecutive lines with only comments with same leading whitespace", "# will be captured as a single block.", "if", "self", ".", "lines", ":", "if", "(", "self", ".", "group_comments", "and", "self", ".", "line_num", "==", "self", ".", "prev_line", "+", "1", "and", "groups", "[", "'leading_space'", "]", "==", "self", ".", "leading", ")", ":", "self", ".", "line_comments", "[", "-", "1", "]", "[", "0", "]", "+=", "'\\n'", "+", "groups", "[", "'line'", "]", "[", "2", ":", "]", ".", "replace", "(", "'\\\\\\n'", ",", "''", ")", "else", ":", "self", ".", "line_comments", ".", "append", "(", "[", "groups", "[", "'line'", "]", "[", "2", ":", "]", ".", "replace", "(", "'\\\\\\n'", ",", "''", ")", ",", "self", ".", "line_num", ",", "self", ".", "current_encoding", "]", ")", "self", ".", "leading", "=", "groups", "[", "'leading_space'", "]", "self", ".", "prev_line", "=", "self", ".", "line_num" ]
Evaluate inline comments on their own lines.
[ "Evaluate", "inline", "comments", "on", "their", "own", "lines", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/cpp.py#L228-L245
facelessuser/pyspelling
pyspelling/filters/cpp.py
CppFilter.evaluate_unicode
def evaluate_unicode(self, value): """Evaluate Unicode.""" if value.startswith('u8'): length = 1 value = value[3:-1] encoding = 'utf-8' elif value.startswith('u'): length = 2 value = value[2:-1] encoding = 'utf-16' else: length = 4 value = value[2:-1] encoding = 'utf-32' def replace_unicode(m): """Replace Unicode.""" groups = m.groupdict() esc = m.group(0) value = esc if groups.get('special'): # Handle basic string escapes. value = BACK_SLASH_TRANSLATION[esc] elif groups.get('char') or groups.get('oct'): # Handle character escapes. integer = int(esc[2:], 16) if groups.get('char') else int(esc[1:], 8) if ( (length < 2 and integer <= 0xFF) or (length < 4 and integer <= 0xFFFF) or (length >= 4 and integer <= 0x10FFFF) ): try: value = chr(integer) except Exception: value = ' ' return value return self.norm_nl(RE_UESC.sub(replace_unicode, value).replace('\x00', '\n')), encoding
python
def evaluate_unicode(self, value): """Evaluate Unicode.""" if value.startswith('u8'): length = 1 value = value[3:-1] encoding = 'utf-8' elif value.startswith('u'): length = 2 value = value[2:-1] encoding = 'utf-16' else: length = 4 value = value[2:-1] encoding = 'utf-32' def replace_unicode(m): """Replace Unicode.""" groups = m.groupdict() esc = m.group(0) value = esc if groups.get('special'): # Handle basic string escapes. value = BACK_SLASH_TRANSLATION[esc] elif groups.get('char') or groups.get('oct'): # Handle character escapes. integer = int(esc[2:], 16) if groups.get('char') else int(esc[1:], 8) if ( (length < 2 and integer <= 0xFF) or (length < 4 and integer <= 0xFFFF) or (length >= 4 and integer <= 0x10FFFF) ): try: value = chr(integer) except Exception: value = ' ' return value return self.norm_nl(RE_UESC.sub(replace_unicode, value).replace('\x00', '\n')), encoding
[ "def", "evaluate_unicode", "(", "self", ",", "value", ")", ":", "if", "value", ".", "startswith", "(", "'u8'", ")", ":", "length", "=", "1", "value", "=", "value", "[", "3", ":", "-", "1", "]", "encoding", "=", "'utf-8'", "elif", "value", ".", "startswith", "(", "'u'", ")", ":", "length", "=", "2", "value", "=", "value", "[", "2", ":", "-", "1", "]", "encoding", "=", "'utf-16'", "else", ":", "length", "=", "4", "value", "=", "value", "[", "2", ":", "-", "1", "]", "encoding", "=", "'utf-32'", "def", "replace_unicode", "(", "m", ")", ":", "\"\"\"Replace Unicode.\"\"\"", "groups", "=", "m", ".", "groupdict", "(", ")", "esc", "=", "m", ".", "group", "(", "0", ")", "value", "=", "esc", "if", "groups", ".", "get", "(", "'special'", ")", ":", "# Handle basic string escapes.", "value", "=", "BACK_SLASH_TRANSLATION", "[", "esc", "]", "elif", "groups", ".", "get", "(", "'char'", ")", "or", "groups", ".", "get", "(", "'oct'", ")", ":", "# Handle character escapes.", "integer", "=", "int", "(", "esc", "[", "2", ":", "]", ",", "16", ")", "if", "groups", ".", "get", "(", "'char'", ")", "else", "int", "(", "esc", "[", "1", ":", "]", ",", "8", ")", "if", "(", "(", "length", "<", "2", "and", "integer", "<=", "0xFF", ")", "or", "(", "length", "<", "4", "and", "integer", "<=", "0xFFFF", ")", "or", "(", "length", ">=", "4", "and", "integer", "<=", "0x10FFFF", ")", ")", ":", "try", ":", "value", "=", "chr", "(", "integer", ")", "except", "Exception", ":", "value", "=", "' '", "return", "value", "return", "self", ".", "norm_nl", "(", "RE_UESC", ".", "sub", "(", "replace_unicode", ",", "value", ")", ".", "replace", "(", "'\\x00'", ",", "'\\n'", ")", ")", ",", "encoding" ]
Evaluate Unicode.
[ "Evaluate", "Unicode", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/cpp.py#L247-L286
facelessuser/pyspelling
pyspelling/filters/cpp.py
CppFilter.evaluate_normal
def evaluate_normal(self, value): """Evaluate normal string.""" if value.startswith('L'): size = self.wide_charset_size encoding = self.wide_exec_charset value = value[2:-1] pack = BYTE_STORE[size | CURRENT_ENDIAN] else: size = self.charset_size encoding = self.exec_charset value = value[1:-1] pack = BYTE_STORE[size | CURRENT_ENDIAN] max_value = 2 ** (size * 8) - 1 def replace(m): """Replace.""" groups = m.groupdict() esc = m.group(0) value = esc if groups.get('special'): # Handle basic string escapes. value = BACK_SLASH_TRANSLATION[esc] elif groups.get('char'): # Handle hex/Unicode character escapes if value.startswith('\\x'): values = [int(x, 16) for x in value[2:].split('\\x')] for i, v in enumerate(values): if v <= max_value: values[i] = struct.pack(pack, v) else: values[i] = b' ' value = b''.join(values).decode(encoding, errors='replace') else: integer = int(value[2:], 16) value = chr(integer).encode(encoding, errors='replace').decode(encoding) elif groups.get('oct'): # Handle octal escapes. values = [int(x, 8) for x in value[1:].split('\\')] for i, v in enumerate(values): if v <= max_value: values[i] = struct.pack(pack, v) else: values[i] = b' ' value = b''.join(values).decode(encoding, errors='replace') return value return self.norm_nl(RE_ESC.sub(replace, value)).replace('\x00', '\n'), encoding
python
def evaluate_normal(self, value): """Evaluate normal string.""" if value.startswith('L'): size = self.wide_charset_size encoding = self.wide_exec_charset value = value[2:-1] pack = BYTE_STORE[size | CURRENT_ENDIAN] else: size = self.charset_size encoding = self.exec_charset value = value[1:-1] pack = BYTE_STORE[size | CURRENT_ENDIAN] max_value = 2 ** (size * 8) - 1 def replace(m): """Replace.""" groups = m.groupdict() esc = m.group(0) value = esc if groups.get('special'): # Handle basic string escapes. value = BACK_SLASH_TRANSLATION[esc] elif groups.get('char'): # Handle hex/Unicode character escapes if value.startswith('\\x'): values = [int(x, 16) for x in value[2:].split('\\x')] for i, v in enumerate(values): if v <= max_value: values[i] = struct.pack(pack, v) else: values[i] = b' ' value = b''.join(values).decode(encoding, errors='replace') else: integer = int(value[2:], 16) value = chr(integer).encode(encoding, errors='replace').decode(encoding) elif groups.get('oct'): # Handle octal escapes. values = [int(x, 8) for x in value[1:].split('\\')] for i, v in enumerate(values): if v <= max_value: values[i] = struct.pack(pack, v) else: values[i] = b' ' value = b''.join(values).decode(encoding, errors='replace') return value return self.norm_nl(RE_ESC.sub(replace, value)).replace('\x00', '\n'), encoding
[ "def", "evaluate_normal", "(", "self", ",", "value", ")", ":", "if", "value", ".", "startswith", "(", "'L'", ")", ":", "size", "=", "self", ".", "wide_charset_size", "encoding", "=", "self", ".", "wide_exec_charset", "value", "=", "value", "[", "2", ":", "-", "1", "]", "pack", "=", "BYTE_STORE", "[", "size", "|", "CURRENT_ENDIAN", "]", "else", ":", "size", "=", "self", ".", "charset_size", "encoding", "=", "self", ".", "exec_charset", "value", "=", "value", "[", "1", ":", "-", "1", "]", "pack", "=", "BYTE_STORE", "[", "size", "|", "CURRENT_ENDIAN", "]", "max_value", "=", "2", "**", "(", "size", "*", "8", ")", "-", "1", "def", "replace", "(", "m", ")", ":", "\"\"\"Replace.\"\"\"", "groups", "=", "m", ".", "groupdict", "(", ")", "esc", "=", "m", ".", "group", "(", "0", ")", "value", "=", "esc", "if", "groups", ".", "get", "(", "'special'", ")", ":", "# Handle basic string escapes.", "value", "=", "BACK_SLASH_TRANSLATION", "[", "esc", "]", "elif", "groups", ".", "get", "(", "'char'", ")", ":", "# Handle hex/Unicode character escapes", "if", "value", ".", "startswith", "(", "'\\\\x'", ")", ":", "values", "=", "[", "int", "(", "x", ",", "16", ")", "for", "x", "in", "value", "[", "2", ":", "]", ".", "split", "(", "'\\\\x'", ")", "]", "for", "i", ",", "v", "in", "enumerate", "(", "values", ")", ":", "if", "v", "<=", "max_value", ":", "values", "[", "i", "]", "=", "struct", ".", "pack", "(", "pack", ",", "v", ")", "else", ":", "values", "[", "i", "]", "=", "b' '", "value", "=", "b''", ".", "join", "(", "values", ")", ".", "decode", "(", "encoding", ",", "errors", "=", "'replace'", ")", "else", ":", "integer", "=", "int", "(", "value", "[", "2", ":", "]", ",", "16", ")", "value", "=", "chr", "(", "integer", ")", ".", "encode", "(", "encoding", ",", "errors", "=", "'replace'", ")", ".", "decode", "(", "encoding", ")", "elif", "groups", ".", "get", "(", "'oct'", ")", ":", "# Handle octal escapes.", "values", "=", "[", "int", "(", "x", ",", "8", ")", "for", "x", "in", "value", "[", "1", ":", "]", ".", "split", "(", "'\\\\'", ")", "]", "for", "i", ",", "v", "in", "enumerate", "(", "values", ")", ":", "if", "v", "<=", "max_value", ":", "values", "[", "i", "]", "=", "struct", ".", "pack", "(", "pack", ",", "v", ")", "else", ":", "values", "[", "i", "]", "=", "b' '", "value", "=", "b''", ".", "join", "(", "values", ")", ".", "decode", "(", "encoding", ",", "errors", "=", "'replace'", ")", "return", "value", "return", "self", ".", "norm_nl", "(", "RE_ESC", ".", "sub", "(", "replace", ",", "value", ")", ")", ".", "replace", "(", "'\\x00'", ",", "'\\n'", ")", ",", "encoding" ]
Evaluate normal string.
[ "Evaluate", "normal", "string", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/cpp.py#L288-L336
facelessuser/pyspelling
pyspelling/filters/cpp.py
CppFilter.evaluate_strings
def evaluate_strings(self, groups): """Evaluate strings.""" if self.strings: encoding = self.current_encoding if self.generic_mode: # Generic assumes no escapes rules. self.quoted_strings.append([groups['strings'][1:-1], self.line_num, encoding]) else: value = groups['strings'] stype = set() if value.endswith('"'): stype = self.get_string_type(value[:value.index('"')].lower().replace('8', '')) if not self.match_string(stype) or value.endswith("'"): return if 'r' in stype: # Handle raw strings. We can handle even if decoding is disabled. olen = len(groups.get('raw')) + len(groups.get('delim')) + 2 clen = len(groups.get('delim')) + 2 value = self.norm_nl(value[olen:-clen].replace('\x00', '\n')) elif ( self.decode_escapes and not value.startswith(('\'', '"')) and 'l' not in stype ): # Decode Unicode string. May have added unsupported chars, so use `UTF-8`. value, encoding = self.evaluate_unicode(value) elif self.decode_escapes: # Decode normal strings. value, encoding = self.evaluate_normal(value) else: # Don't decode and just return string content. value = self.norm_nl(value[value.index('"') + 1:-1]).replace('\x00', '\n') if value: self.quoted_strings.append([value, self.line_num, encoding])
python
def evaluate_strings(self, groups): """Evaluate strings.""" if self.strings: encoding = self.current_encoding if self.generic_mode: # Generic assumes no escapes rules. self.quoted_strings.append([groups['strings'][1:-1], self.line_num, encoding]) else: value = groups['strings'] stype = set() if value.endswith('"'): stype = self.get_string_type(value[:value.index('"')].lower().replace('8', '')) if not self.match_string(stype) or value.endswith("'"): return if 'r' in stype: # Handle raw strings. We can handle even if decoding is disabled. olen = len(groups.get('raw')) + len(groups.get('delim')) + 2 clen = len(groups.get('delim')) + 2 value = self.norm_nl(value[olen:-clen].replace('\x00', '\n')) elif ( self.decode_escapes and not value.startswith(('\'', '"')) and 'l' not in stype ): # Decode Unicode string. May have added unsupported chars, so use `UTF-8`. value, encoding = self.evaluate_unicode(value) elif self.decode_escapes: # Decode normal strings. value, encoding = self.evaluate_normal(value) else: # Don't decode and just return string content. value = self.norm_nl(value[value.index('"') + 1:-1]).replace('\x00', '\n') if value: self.quoted_strings.append([value, self.line_num, encoding])
[ "def", "evaluate_strings", "(", "self", ",", "groups", ")", ":", "if", "self", ".", "strings", ":", "encoding", "=", "self", ".", "current_encoding", "if", "self", ".", "generic_mode", ":", "# Generic assumes no escapes rules.", "self", ".", "quoted_strings", ".", "append", "(", "[", "groups", "[", "'strings'", "]", "[", "1", ":", "-", "1", "]", ",", "self", ".", "line_num", ",", "encoding", "]", ")", "else", ":", "value", "=", "groups", "[", "'strings'", "]", "stype", "=", "set", "(", ")", "if", "value", ".", "endswith", "(", "'\"'", ")", ":", "stype", "=", "self", ".", "get_string_type", "(", "value", "[", ":", "value", ".", "index", "(", "'\"'", ")", "]", ".", "lower", "(", ")", ".", "replace", "(", "'8'", ",", "''", ")", ")", "if", "not", "self", ".", "match_string", "(", "stype", ")", "or", "value", ".", "endswith", "(", "\"'\"", ")", ":", "return", "if", "'r'", "in", "stype", ":", "# Handle raw strings. We can handle even if decoding is disabled.", "olen", "=", "len", "(", "groups", ".", "get", "(", "'raw'", ")", ")", "+", "len", "(", "groups", ".", "get", "(", "'delim'", ")", ")", "+", "2", "clen", "=", "len", "(", "groups", ".", "get", "(", "'delim'", ")", ")", "+", "2", "value", "=", "self", ".", "norm_nl", "(", "value", "[", "olen", ":", "-", "clen", "]", ".", "replace", "(", "'\\x00'", ",", "'\\n'", ")", ")", "elif", "(", "self", ".", "decode_escapes", "and", "not", "value", ".", "startswith", "(", "(", "'\\''", ",", "'\"'", ")", ")", "and", "'l'", "not", "in", "stype", ")", ":", "# Decode Unicode string. May have added unsupported chars, so use `UTF-8`.", "value", ",", "encoding", "=", "self", ".", "evaluate_unicode", "(", "value", ")", "elif", "self", ".", "decode_escapes", ":", "# Decode normal strings.", "value", ",", "encoding", "=", "self", ".", "evaluate_normal", "(", "value", ")", "else", ":", "# Don't decode and just return string content.", "value", "=", "self", ".", "norm_nl", "(", "value", "[", "value", ".", "index", "(", "'\"'", ")", "+", "1", ":", "-", "1", "]", ")", ".", "replace", "(", "'\\x00'", ",", "'\\n'", ")", "if", "value", ":", "self", ".", "quoted_strings", ".", "append", "(", "[", "value", ",", "self", ".", "line_num", ",", "encoding", "]", ")" ]
Evaluate strings.
[ "Evaluate", "strings", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/cpp.py#L338-L372
facelessuser/pyspelling
pyspelling/filters/cpp.py
CppFilter.evaluate
def evaluate(self, m): """Search for comments.""" g = m.groupdict() if g["strings"]: self.evaluate_strings(g) self.line_num += g['strings'].count('\n') elif g["code"]: self.line_num += g["code"].count('\n') else: if g['block']: self.evaluate_block(g) elif g['start'] is None: self.evaluate_inline_tail(g) else: self.evaluate_inline(g) self.line_num += g['comments'].count('\n')
python
def evaluate(self, m): """Search for comments.""" g = m.groupdict() if g["strings"]: self.evaluate_strings(g) self.line_num += g['strings'].count('\n') elif g["code"]: self.line_num += g["code"].count('\n') else: if g['block']: self.evaluate_block(g) elif g['start'] is None: self.evaluate_inline_tail(g) else: self.evaluate_inline(g) self.line_num += g['comments'].count('\n')
[ "def", "evaluate", "(", "self", ",", "m", ")", ":", "g", "=", "m", ".", "groupdict", "(", ")", "if", "g", "[", "\"strings\"", "]", ":", "self", ".", "evaluate_strings", "(", "g", ")", "self", ".", "line_num", "+=", "g", "[", "'strings'", "]", ".", "count", "(", "'\\n'", ")", "elif", "g", "[", "\"code\"", "]", ":", "self", ".", "line_num", "+=", "g", "[", "\"code\"", "]", ".", "count", "(", "'\\n'", ")", "else", ":", "if", "g", "[", "'block'", "]", ":", "self", ".", "evaluate_block", "(", "g", ")", "elif", "g", "[", "'start'", "]", "is", "None", ":", "self", ".", "evaluate_inline_tail", "(", "g", ")", "else", ":", "self", ".", "evaluate_inline", "(", "g", ")", "self", ".", "line_num", "+=", "g", "[", "'comments'", "]", ".", "count", "(", "'\\n'", ")" ]
Search for comments.
[ "Search", "for", "comments", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/cpp.py#L374-L390
facelessuser/pyspelling
pyspelling/filters/cpp.py
CppFilter.extend_src_text
def extend_src_text(self, content, context, text_list, category): """Extend the source text list with the gathered text data.""" prefix = self.prefix + '-' if self.prefix else '' for comment, line, encoding in text_list: content.append( filters.SourceText( textwrap.dedent(comment), "%s (%d)" % (context, line), encoding, prefix + category ) )
python
def extend_src_text(self, content, context, text_list, category): """Extend the source text list with the gathered text data.""" prefix = self.prefix + '-' if self.prefix else '' for comment, line, encoding in text_list: content.append( filters.SourceText( textwrap.dedent(comment), "%s (%d)" % (context, line), encoding, prefix + category ) )
[ "def", "extend_src_text", "(", "self", ",", "content", ",", "context", ",", "text_list", ",", "category", ")", ":", "prefix", "=", "self", ".", "prefix", "+", "'-'", "if", "self", ".", "prefix", "else", "''", "for", "comment", ",", "line", ",", "encoding", "in", "text_list", ":", "content", ".", "append", "(", "filters", ".", "SourceText", "(", "textwrap", ".", "dedent", "(", "comment", ")", ",", "\"%s (%d)\"", "%", "(", "context", ",", "line", ")", ",", "encoding", ",", "prefix", "+", "category", ")", ")" ]
Extend the source text list with the gathered text data.
[ "Extend", "the", "source", "text", "list", "with", "the", "gathered", "text", "data", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/cpp.py#L392-L405
facelessuser/pyspelling
pyspelling/filters/cpp.py
CppFilter.find_content
def find_content(self, text): """Find content.""" if self.trigraphs: text = RE_TRIGRAPHS.sub(self.process_trigraphs, text) for m in self.pattern.finditer(self.norm_nl(text)): self.evaluate(m)
python
def find_content(self, text): """Find content.""" if self.trigraphs: text = RE_TRIGRAPHS.sub(self.process_trigraphs, text) for m in self.pattern.finditer(self.norm_nl(text)): self.evaluate(m)
[ "def", "find_content", "(", "self", ",", "text", ")", ":", "if", "self", ".", "trigraphs", ":", "text", "=", "RE_TRIGRAPHS", ".", "sub", "(", "self", ".", "process_trigraphs", ",", "text", ")", "for", "m", "in", "self", ".", "pattern", ".", "finditer", "(", "self", ".", "norm_nl", "(", "text", ")", ")", ":", "self", ".", "evaluate", "(", "m", ")" ]
Find content.
[ "Find", "content", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/cpp.py#L419-L426
deshima-dev/decode
decode/core/cube/functions.py
cube
def cube(data, xcoords=None, ycoords=None, chcoords=None, scalarcoords=None, datacoords=None, attrs=None, name=None): """Create a cube as an instance of xarray.DataArray with Decode accessor. Args: data (numpy.ndarray): 3D (x x y x channel) array. xcoords (dict, optional): Dictionary of arrays that label x axis. ycoords (dict, optional): Dictionary of arrays that label y axis. chcoords (dict, optional): Dictionary of arrays that label channel axis. scalarcoords (dict, optional): Dictionary of values that don't label any axes (point-like). datacoords (dict, optional): Dictionary of arrays that label x, y, and channel axes. attrs (dict, optional): Dictionary of attributes to add to the instance. name (str, optional): String that names the instance. Returns: decode cube (decode.cube): Decode cube. """ # initialize coords with default values cube = xr.DataArray(data, dims=('x', 'y', 'ch'), attrs=attrs, name=name) cube.dcc._initcoords() # update coords with input values (if any) if xcoords is not None: cube.coords.update({key: ('x', xcoords[key]) for key in xcoords}) if ycoords is not None: cube.coords.update({key: ('y', ycoords[key]) for key in ycoords}) if chcoords is not None: cube.coords.update({key: ('ch', chcoords[key]) for key in chcoords}) if datacoords is not None: cube.coords.update({key: (('x', 'y', 'ch'), datacoords[key]) for key in datacoords}) if scalarcoords is not None: cube.coords.update(scalarcoords) return cube
python
def cube(data, xcoords=None, ycoords=None, chcoords=None, scalarcoords=None, datacoords=None, attrs=None, name=None): """Create a cube as an instance of xarray.DataArray with Decode accessor. Args: data (numpy.ndarray): 3D (x x y x channel) array. xcoords (dict, optional): Dictionary of arrays that label x axis. ycoords (dict, optional): Dictionary of arrays that label y axis. chcoords (dict, optional): Dictionary of arrays that label channel axis. scalarcoords (dict, optional): Dictionary of values that don't label any axes (point-like). datacoords (dict, optional): Dictionary of arrays that label x, y, and channel axes. attrs (dict, optional): Dictionary of attributes to add to the instance. name (str, optional): String that names the instance. Returns: decode cube (decode.cube): Decode cube. """ # initialize coords with default values cube = xr.DataArray(data, dims=('x', 'y', 'ch'), attrs=attrs, name=name) cube.dcc._initcoords() # update coords with input values (if any) if xcoords is not None: cube.coords.update({key: ('x', xcoords[key]) for key in xcoords}) if ycoords is not None: cube.coords.update({key: ('y', ycoords[key]) for key in ycoords}) if chcoords is not None: cube.coords.update({key: ('ch', chcoords[key]) for key in chcoords}) if datacoords is not None: cube.coords.update({key: (('x', 'y', 'ch'), datacoords[key]) for key in datacoords}) if scalarcoords is not None: cube.coords.update(scalarcoords) return cube
[ "def", "cube", "(", "data", ",", "xcoords", "=", "None", ",", "ycoords", "=", "None", ",", "chcoords", "=", "None", ",", "scalarcoords", "=", "None", ",", "datacoords", "=", "None", ",", "attrs", "=", "None", ",", "name", "=", "None", ")", ":", "# initialize coords with default values", "cube", "=", "xr", ".", "DataArray", "(", "data", ",", "dims", "=", "(", "'x'", ",", "'y'", ",", "'ch'", ")", ",", "attrs", "=", "attrs", ",", "name", "=", "name", ")", "cube", ".", "dcc", ".", "_initcoords", "(", ")", "# update coords with input values (if any)", "if", "xcoords", "is", "not", "None", ":", "cube", ".", "coords", ".", "update", "(", "{", "key", ":", "(", "'x'", ",", "xcoords", "[", "key", "]", ")", "for", "key", "in", "xcoords", "}", ")", "if", "ycoords", "is", "not", "None", ":", "cube", ".", "coords", ".", "update", "(", "{", "key", ":", "(", "'y'", ",", "ycoords", "[", "key", "]", ")", "for", "key", "in", "ycoords", "}", ")", "if", "chcoords", "is", "not", "None", ":", "cube", ".", "coords", ".", "update", "(", "{", "key", ":", "(", "'ch'", ",", "chcoords", "[", "key", "]", ")", "for", "key", "in", "chcoords", "}", ")", "if", "datacoords", "is", "not", "None", ":", "cube", ".", "coords", ".", "update", "(", "{", "key", ":", "(", "(", "'x'", ",", "'y'", ",", "'ch'", ")", ",", "datacoords", "[", "key", "]", ")", "for", "key", "in", "datacoords", "}", ")", "if", "scalarcoords", "is", "not", "None", ":", "cube", ".", "coords", ".", "update", "(", "scalarcoords", ")", "return", "cube" ]
Create a cube as an instance of xarray.DataArray with Decode accessor. Args: data (numpy.ndarray): 3D (x x y x channel) array. xcoords (dict, optional): Dictionary of arrays that label x axis. ycoords (dict, optional): Dictionary of arrays that label y axis. chcoords (dict, optional): Dictionary of arrays that label channel axis. scalarcoords (dict, optional): Dictionary of values that don't label any axes (point-like). datacoords (dict, optional): Dictionary of arrays that label x, y, and channel axes. attrs (dict, optional): Dictionary of attributes to add to the instance. name (str, optional): String that names the instance. Returns: decode cube (decode.cube): Decode cube.
[ "Create", "a", "cube", "as", "an", "instance", "of", "xarray", ".", "DataArray", "with", "Decode", "accessor", "." ]
train
https://github.com/deshima-dev/decode/blob/e789e174cd316e7ec8bc55be7009ad35baced3c0/decode/core/cube/functions.py#L25-L61
deshima-dev/decode
decode/core/cube/functions.py
fromcube
def fromcube(cube, template): """Covert a decode cube to a decode array. Args: cube (decode.cube): Decode cube to be cast. template (decode.array): Decode array whose shape the cube is cast on. Returns: decode array (decode.array): Decode array. Notes: This functions is under development. """ array = dc.zeros_like(template) y, x = array.y.values, array.x.values gy, gx = cube.y.values, cube.x.values iy = interp1d(gy, np.arange(len(gy)))(y) ix = interp1d(gx, np.arange(len(gx)))(x) for ch in range(len(cube.ch)): array[:,ch] = map_coordinates(cube.values[:,:,ch], (ix, iy)) return array
python
def fromcube(cube, template): """Covert a decode cube to a decode array. Args: cube (decode.cube): Decode cube to be cast. template (decode.array): Decode array whose shape the cube is cast on. Returns: decode array (decode.array): Decode array. Notes: This functions is under development. """ array = dc.zeros_like(template) y, x = array.y.values, array.x.values gy, gx = cube.y.values, cube.x.values iy = interp1d(gy, np.arange(len(gy)))(y) ix = interp1d(gx, np.arange(len(gx)))(x) for ch in range(len(cube.ch)): array[:,ch] = map_coordinates(cube.values[:,:,ch], (ix, iy)) return array
[ "def", "fromcube", "(", "cube", ",", "template", ")", ":", "array", "=", "dc", ".", "zeros_like", "(", "template", ")", "y", ",", "x", "=", "array", ".", "y", ".", "values", ",", "array", ".", "x", ".", "values", "gy", ",", "gx", "=", "cube", ".", "y", ".", "values", ",", "cube", ".", "x", ".", "values", "iy", "=", "interp1d", "(", "gy", ",", "np", ".", "arange", "(", "len", "(", "gy", ")", ")", ")", "(", "y", ")", "ix", "=", "interp1d", "(", "gx", ",", "np", ".", "arange", "(", "len", "(", "gx", ")", ")", ")", "(", "x", ")", "for", "ch", "in", "range", "(", "len", "(", "cube", ".", "ch", ")", ")", ":", "array", "[", ":", ",", "ch", "]", "=", "map_coordinates", "(", "cube", ".", "values", "[", ":", ",", ":", ",", "ch", "]", ",", "(", "ix", ",", "iy", ")", ")", "return", "array" ]
Covert a decode cube to a decode array. Args: cube (decode.cube): Decode cube to be cast. template (decode.array): Decode array whose shape the cube is cast on. Returns: decode array (decode.array): Decode array. Notes: This functions is under development.
[ "Covert", "a", "decode", "cube", "to", "a", "decode", "array", "." ]
train
https://github.com/deshima-dev/decode/blob/e789e174cd316e7ec8bc55be7009ad35baced3c0/decode/core/cube/functions.py#L64-L87
deshima-dev/decode
decode/core/cube/functions.py
tocube
def tocube(array, **kwargs): """Convert a decode array to decode cube. Args: array (decode.array): Decode array which will be converted. kwargs (optional): Other arguments. xarr (list or numpy.ndarray): Grid array of x direction. yarr (list or numpy.ndarray): Grid array of y direction. gx (float): The size of grid of x. gy (float): The size of grid of y. nx (int): The number of grid of x direction. ny (int): The number of grid of y direction. xmin (float): Minimum value of x. xmax (float): Maximum value of x. ymin (float): Minimum value of y. ymax (float): Maximum value of y. xc (float): Center of x. yc (float): Center of y. unit (str): Unit of x/y. 'deg' or 'degree': Degree (Default). 'arcmin': Arcminute. 'arcsec': Arcsecond. Returns: decode cube (decode.cube): Decode cube. Notes: Available combinations of kwargs are (1) xarr/yarr and xc/yc (2) gx/gy and xmin/xmax/ymin/ymax and xc/yc (3) nx/ny and xmin/xmax/ymin/ymax """ # pick up kwargs unit = kwargs.pop('unit', 'deg') unit2deg = getattr(u, unit).to('deg') xc = kwargs.pop('xc', float(array.xref)) * unit2deg yc = kwargs.pop('yc', float(array.yref)) * unit2deg xarr = kwargs.pop('xarr', None) yarr = kwargs.pop('yarr', None) xmin = kwargs.pop('xmin', None) xmax = kwargs.pop('xmax', None) ymin = kwargs.pop('ymin', None) ymax = kwargs.pop('ymax', None) gx = kwargs.pop('gx', None) gy = kwargs.pop('gy', None) nx = kwargs.pop('nx', None) ny = kwargs.pop('ny', None) if None not in [xarr, yarr]: x_grid = xr.DataArray(xarr * unit2deg, dims='grid') y_grid = xr.DataArray(yarr * unit2deg, dims='grid') else: if None not in [xmin, xmax, ymin, ymax]: xmin = xmin * unit2deg xmax = xmax * unit2deg ymin = ymin * unit2deg ymax = ymax * unit2deg else: xmin = array.x.min() xmax = array.x.max() ymin = array.y.min() ymax = array.y.max() logger.info('xmin xmax ymin ymax') logger.info('{} {} {} {}'.format(xmin, xmax, ymin, ymax)) if None not in [gx, gy]: gx = gx * unit2deg gy = gy * unit2deg logger.info('xc yc gx gy') logger.info('{} {} {} {}'.format(xc, yc, gx, gy)) gxmin = np.floor((xmin - xc) / gx) gxmax = np.ceil((xmax - xc) / gx) gymin = np.floor((ymin - yc) / gy) gymax = np.ceil((ymax - yc) / gy) xmin = gxmin * gx + xc xmax = gxmax * gx + xc ymin = gymin * gy + yc ymax = gymax * gy + yc x_grid = xr.DataArray(np.arange(xmin, xmax+gx, gx), dims='grid') y_grid = xr.DataArray(np.arange(ymin, ymax+gy, gy), dims='grid') elif None not in [nx, ny]: logger.info('nx ny') logger.info('{} {}'.format(nx, ny)) ### nx/ny does not support xc/yc xc = 0 yc = 0 x_grid = xr.DataArray(np.linspace(xmin, xmax, nx), dims='grid') y_grid = xr.DataArray(np.linspace(ymin, ymax, ny), dims='grid') else: raise KeyError('Arguments are wrong.') # reverse the direction of x when coordsys == 'RADEC' if array.coordsys == 'RADEC': x_grid = x_grid[::-1] # compute gridding nx, ny, nch = len(x_grid), len(y_grid), len(array.ch) i = np.abs(array.x - x_grid).argmin('grid').compute() j = np.abs(array.y - y_grid).argmin('grid').compute() index = i + nx*j array.coords.update({'index': index}) groupedarray = array.groupby('index') groupedones = dc.ones_like(array).groupby('index') gridarray = groupedarray.mean('t') stdarray = groupedarray.std('t') numarray = groupedones.sum('t') logger.info('Gridding started.') gridarray = gridarray.compute() noisearray = (stdarray / numarray**0.5).compute() logger.info('Gridding finished.') # create cube mask = gridarray.index.values temp = np.full([ny*nx, nch], np.nan) temp[mask] = gridarray.values data = temp.reshape((ny, nx, nch)).swapaxes(0, 1) temp = np.full([ny*nx, nch], np.nan) temp[mask] = noisearray.values noise = temp.reshape((ny, nx, nch)).swapaxes(0, 1) xcoords = {'x': x_grid.values} ycoords = {'y': y_grid.values} chcoords = {'masterid': array.masterid.values, 'kidid': array.kidid.values, 'kidfq': array.kidfq.values, 'kidtp': array.kidtp.values} scalarcoords = {'coordsys': array.coordsys.values, 'datatype': array.datatype.values, 'xref': array.xref.values, 'yref': array.yref.values} datacoords = {'noise': noise} return dc.cube(data, xcoords=xcoords, ycoords=ycoords, chcoords=chcoords, scalarcoords=scalarcoords, datacoords=datacoords)
python
def tocube(array, **kwargs): """Convert a decode array to decode cube. Args: array (decode.array): Decode array which will be converted. kwargs (optional): Other arguments. xarr (list or numpy.ndarray): Grid array of x direction. yarr (list or numpy.ndarray): Grid array of y direction. gx (float): The size of grid of x. gy (float): The size of grid of y. nx (int): The number of grid of x direction. ny (int): The number of grid of y direction. xmin (float): Minimum value of x. xmax (float): Maximum value of x. ymin (float): Minimum value of y. ymax (float): Maximum value of y. xc (float): Center of x. yc (float): Center of y. unit (str): Unit of x/y. 'deg' or 'degree': Degree (Default). 'arcmin': Arcminute. 'arcsec': Arcsecond. Returns: decode cube (decode.cube): Decode cube. Notes: Available combinations of kwargs are (1) xarr/yarr and xc/yc (2) gx/gy and xmin/xmax/ymin/ymax and xc/yc (3) nx/ny and xmin/xmax/ymin/ymax """ # pick up kwargs unit = kwargs.pop('unit', 'deg') unit2deg = getattr(u, unit).to('deg') xc = kwargs.pop('xc', float(array.xref)) * unit2deg yc = kwargs.pop('yc', float(array.yref)) * unit2deg xarr = kwargs.pop('xarr', None) yarr = kwargs.pop('yarr', None) xmin = kwargs.pop('xmin', None) xmax = kwargs.pop('xmax', None) ymin = kwargs.pop('ymin', None) ymax = kwargs.pop('ymax', None) gx = kwargs.pop('gx', None) gy = kwargs.pop('gy', None) nx = kwargs.pop('nx', None) ny = kwargs.pop('ny', None) if None not in [xarr, yarr]: x_grid = xr.DataArray(xarr * unit2deg, dims='grid') y_grid = xr.DataArray(yarr * unit2deg, dims='grid') else: if None not in [xmin, xmax, ymin, ymax]: xmin = xmin * unit2deg xmax = xmax * unit2deg ymin = ymin * unit2deg ymax = ymax * unit2deg else: xmin = array.x.min() xmax = array.x.max() ymin = array.y.min() ymax = array.y.max() logger.info('xmin xmax ymin ymax') logger.info('{} {} {} {}'.format(xmin, xmax, ymin, ymax)) if None not in [gx, gy]: gx = gx * unit2deg gy = gy * unit2deg logger.info('xc yc gx gy') logger.info('{} {} {} {}'.format(xc, yc, gx, gy)) gxmin = np.floor((xmin - xc) / gx) gxmax = np.ceil((xmax - xc) / gx) gymin = np.floor((ymin - yc) / gy) gymax = np.ceil((ymax - yc) / gy) xmin = gxmin * gx + xc xmax = gxmax * gx + xc ymin = gymin * gy + yc ymax = gymax * gy + yc x_grid = xr.DataArray(np.arange(xmin, xmax+gx, gx), dims='grid') y_grid = xr.DataArray(np.arange(ymin, ymax+gy, gy), dims='grid') elif None not in [nx, ny]: logger.info('nx ny') logger.info('{} {}'.format(nx, ny)) ### nx/ny does not support xc/yc xc = 0 yc = 0 x_grid = xr.DataArray(np.linspace(xmin, xmax, nx), dims='grid') y_grid = xr.DataArray(np.linspace(ymin, ymax, ny), dims='grid') else: raise KeyError('Arguments are wrong.') # reverse the direction of x when coordsys == 'RADEC' if array.coordsys == 'RADEC': x_grid = x_grid[::-1] # compute gridding nx, ny, nch = len(x_grid), len(y_grid), len(array.ch) i = np.abs(array.x - x_grid).argmin('grid').compute() j = np.abs(array.y - y_grid).argmin('grid').compute() index = i + nx*j array.coords.update({'index': index}) groupedarray = array.groupby('index') groupedones = dc.ones_like(array).groupby('index') gridarray = groupedarray.mean('t') stdarray = groupedarray.std('t') numarray = groupedones.sum('t') logger.info('Gridding started.') gridarray = gridarray.compute() noisearray = (stdarray / numarray**0.5).compute() logger.info('Gridding finished.') # create cube mask = gridarray.index.values temp = np.full([ny*nx, nch], np.nan) temp[mask] = gridarray.values data = temp.reshape((ny, nx, nch)).swapaxes(0, 1) temp = np.full([ny*nx, nch], np.nan) temp[mask] = noisearray.values noise = temp.reshape((ny, nx, nch)).swapaxes(0, 1) xcoords = {'x': x_grid.values} ycoords = {'y': y_grid.values} chcoords = {'masterid': array.masterid.values, 'kidid': array.kidid.values, 'kidfq': array.kidfq.values, 'kidtp': array.kidtp.values} scalarcoords = {'coordsys': array.coordsys.values, 'datatype': array.datatype.values, 'xref': array.xref.values, 'yref': array.yref.values} datacoords = {'noise': noise} return dc.cube(data, xcoords=xcoords, ycoords=ycoords, chcoords=chcoords, scalarcoords=scalarcoords, datacoords=datacoords)
[ "def", "tocube", "(", "array", ",", "*", "*", "kwargs", ")", ":", "# pick up kwargs", "unit", "=", "kwargs", ".", "pop", "(", "'unit'", ",", "'deg'", ")", "unit2deg", "=", "getattr", "(", "u", ",", "unit", ")", ".", "to", "(", "'deg'", ")", "xc", "=", "kwargs", ".", "pop", "(", "'xc'", ",", "float", "(", "array", ".", "xref", ")", ")", "*", "unit2deg", "yc", "=", "kwargs", ".", "pop", "(", "'yc'", ",", "float", "(", "array", ".", "yref", ")", ")", "*", "unit2deg", "xarr", "=", "kwargs", ".", "pop", "(", "'xarr'", ",", "None", ")", "yarr", "=", "kwargs", ".", "pop", "(", "'yarr'", ",", "None", ")", "xmin", "=", "kwargs", ".", "pop", "(", "'xmin'", ",", "None", ")", "xmax", "=", "kwargs", ".", "pop", "(", "'xmax'", ",", "None", ")", "ymin", "=", "kwargs", ".", "pop", "(", "'ymin'", ",", "None", ")", "ymax", "=", "kwargs", ".", "pop", "(", "'ymax'", ",", "None", ")", "gx", "=", "kwargs", ".", "pop", "(", "'gx'", ",", "None", ")", "gy", "=", "kwargs", ".", "pop", "(", "'gy'", ",", "None", ")", "nx", "=", "kwargs", ".", "pop", "(", "'nx'", ",", "None", ")", "ny", "=", "kwargs", ".", "pop", "(", "'ny'", ",", "None", ")", "if", "None", "not", "in", "[", "xarr", ",", "yarr", "]", ":", "x_grid", "=", "xr", ".", "DataArray", "(", "xarr", "*", "unit2deg", ",", "dims", "=", "'grid'", ")", "y_grid", "=", "xr", ".", "DataArray", "(", "yarr", "*", "unit2deg", ",", "dims", "=", "'grid'", ")", "else", ":", "if", "None", "not", "in", "[", "xmin", ",", "xmax", ",", "ymin", ",", "ymax", "]", ":", "xmin", "=", "xmin", "*", "unit2deg", "xmax", "=", "xmax", "*", "unit2deg", "ymin", "=", "ymin", "*", "unit2deg", "ymax", "=", "ymax", "*", "unit2deg", "else", ":", "xmin", "=", "array", ".", "x", ".", "min", "(", ")", "xmax", "=", "array", ".", "x", ".", "max", "(", ")", "ymin", "=", "array", ".", "y", ".", "min", "(", ")", "ymax", "=", "array", ".", "y", ".", "max", "(", ")", "logger", ".", "info", "(", "'xmin xmax ymin ymax'", ")", "logger", ".", "info", "(", "'{} {} {} {}'", ".", "format", "(", "xmin", ",", "xmax", ",", "ymin", ",", "ymax", ")", ")", "if", "None", "not", "in", "[", "gx", ",", "gy", "]", ":", "gx", "=", "gx", "*", "unit2deg", "gy", "=", "gy", "*", "unit2deg", "logger", ".", "info", "(", "'xc yc gx gy'", ")", "logger", ".", "info", "(", "'{} {} {} {}'", ".", "format", "(", "xc", ",", "yc", ",", "gx", ",", "gy", ")", ")", "gxmin", "=", "np", ".", "floor", "(", "(", "xmin", "-", "xc", ")", "/", "gx", ")", "gxmax", "=", "np", ".", "ceil", "(", "(", "xmax", "-", "xc", ")", "/", "gx", ")", "gymin", "=", "np", ".", "floor", "(", "(", "ymin", "-", "yc", ")", "/", "gy", ")", "gymax", "=", "np", ".", "ceil", "(", "(", "ymax", "-", "yc", ")", "/", "gy", ")", "xmin", "=", "gxmin", "*", "gx", "+", "xc", "xmax", "=", "gxmax", "*", "gx", "+", "xc", "ymin", "=", "gymin", "*", "gy", "+", "yc", "ymax", "=", "gymax", "*", "gy", "+", "yc", "x_grid", "=", "xr", ".", "DataArray", "(", "np", ".", "arange", "(", "xmin", ",", "xmax", "+", "gx", ",", "gx", ")", ",", "dims", "=", "'grid'", ")", "y_grid", "=", "xr", ".", "DataArray", "(", "np", ".", "arange", "(", "ymin", ",", "ymax", "+", "gy", ",", "gy", ")", ",", "dims", "=", "'grid'", ")", "elif", "None", "not", "in", "[", "nx", ",", "ny", "]", ":", "logger", ".", "info", "(", "'nx ny'", ")", "logger", ".", "info", "(", "'{} {}'", ".", "format", "(", "nx", ",", "ny", ")", ")", "### nx/ny does not support xc/yc", "xc", "=", "0", "yc", "=", "0", "x_grid", "=", "xr", ".", "DataArray", "(", "np", ".", "linspace", "(", "xmin", ",", "xmax", ",", "nx", ")", ",", "dims", "=", "'grid'", ")", "y_grid", "=", "xr", ".", "DataArray", "(", "np", ".", "linspace", "(", "ymin", ",", "ymax", ",", "ny", ")", ",", "dims", "=", "'grid'", ")", "else", ":", "raise", "KeyError", "(", "'Arguments are wrong.'", ")", "# reverse the direction of x when coordsys == 'RADEC'", "if", "array", ".", "coordsys", "==", "'RADEC'", ":", "x_grid", "=", "x_grid", "[", ":", ":", "-", "1", "]", "# compute gridding", "nx", ",", "ny", ",", "nch", "=", "len", "(", "x_grid", ")", ",", "len", "(", "y_grid", ")", ",", "len", "(", "array", ".", "ch", ")", "i", "=", "np", ".", "abs", "(", "array", ".", "x", "-", "x_grid", ")", ".", "argmin", "(", "'grid'", ")", ".", "compute", "(", ")", "j", "=", "np", ".", "abs", "(", "array", ".", "y", "-", "y_grid", ")", ".", "argmin", "(", "'grid'", ")", ".", "compute", "(", ")", "index", "=", "i", "+", "nx", "*", "j", "array", ".", "coords", ".", "update", "(", "{", "'index'", ":", "index", "}", ")", "groupedarray", "=", "array", ".", "groupby", "(", "'index'", ")", "groupedones", "=", "dc", ".", "ones_like", "(", "array", ")", ".", "groupby", "(", "'index'", ")", "gridarray", "=", "groupedarray", ".", "mean", "(", "'t'", ")", "stdarray", "=", "groupedarray", ".", "std", "(", "'t'", ")", "numarray", "=", "groupedones", ".", "sum", "(", "'t'", ")", "logger", ".", "info", "(", "'Gridding started.'", ")", "gridarray", "=", "gridarray", ".", "compute", "(", ")", "noisearray", "=", "(", "stdarray", "/", "numarray", "**", "0.5", ")", ".", "compute", "(", ")", "logger", ".", "info", "(", "'Gridding finished.'", ")", "# create cube", "mask", "=", "gridarray", ".", "index", ".", "values", "temp", "=", "np", ".", "full", "(", "[", "ny", "*", "nx", ",", "nch", "]", ",", "np", ".", "nan", ")", "temp", "[", "mask", "]", "=", "gridarray", ".", "values", "data", "=", "temp", ".", "reshape", "(", "(", "ny", ",", "nx", ",", "nch", ")", ")", ".", "swapaxes", "(", "0", ",", "1", ")", "temp", "=", "np", ".", "full", "(", "[", "ny", "*", "nx", ",", "nch", "]", ",", "np", ".", "nan", ")", "temp", "[", "mask", "]", "=", "noisearray", ".", "values", "noise", "=", "temp", ".", "reshape", "(", "(", "ny", ",", "nx", ",", "nch", ")", ")", ".", "swapaxes", "(", "0", ",", "1", ")", "xcoords", "=", "{", "'x'", ":", "x_grid", ".", "values", "}", "ycoords", "=", "{", "'y'", ":", "y_grid", ".", "values", "}", "chcoords", "=", "{", "'masterid'", ":", "array", ".", "masterid", ".", "values", ",", "'kidid'", ":", "array", ".", "kidid", ".", "values", ",", "'kidfq'", ":", "array", ".", "kidfq", ".", "values", ",", "'kidtp'", ":", "array", ".", "kidtp", ".", "values", "}", "scalarcoords", "=", "{", "'coordsys'", ":", "array", ".", "coordsys", ".", "values", ",", "'datatype'", ":", "array", ".", "datatype", ".", "values", ",", "'xref'", ":", "array", ".", "xref", ".", "values", ",", "'yref'", ":", "array", ".", "yref", ".", "values", "}", "datacoords", "=", "{", "'noise'", ":", "noise", "}", "return", "dc", ".", "cube", "(", "data", ",", "xcoords", "=", "xcoords", ",", "ycoords", "=", "ycoords", ",", "chcoords", "=", "chcoords", ",", "scalarcoords", "=", "scalarcoords", ",", "datacoords", "=", "datacoords", ")" ]
Convert a decode array to decode cube. Args: array (decode.array): Decode array which will be converted. kwargs (optional): Other arguments. xarr (list or numpy.ndarray): Grid array of x direction. yarr (list or numpy.ndarray): Grid array of y direction. gx (float): The size of grid of x. gy (float): The size of grid of y. nx (int): The number of grid of x direction. ny (int): The number of grid of y direction. xmin (float): Minimum value of x. xmax (float): Maximum value of x. ymin (float): Minimum value of y. ymax (float): Maximum value of y. xc (float): Center of x. yc (float): Center of y. unit (str): Unit of x/y. 'deg' or 'degree': Degree (Default). 'arcmin': Arcminute. 'arcsec': Arcsecond. Returns: decode cube (decode.cube): Decode cube. Notes: Available combinations of kwargs are (1) xarr/yarr and xc/yc (2) gx/gy and xmin/xmax/ymin/ymax and xc/yc (3) nx/ny and xmin/xmax/ymin/ymax
[ "Convert", "a", "decode", "array", "to", "decode", "cube", "." ]
train
https://github.com/deshima-dev/decode/blob/e789e174cd316e7ec8bc55be7009ad35baced3c0/decode/core/cube/functions.py#L90-L231
deshima-dev/decode
decode/core/cube/functions.py
makecontinuum
def makecontinuum(cube, **kwargs): """Make a continuum array. Args: cube (decode.cube): Decode cube which will be averaged over channels. kwargs (optional): Other arguments. inchs (list): Included channel kidids. exchs (list): Excluded channel kidids. Returns: decode cube (decode.cube): Decode cube (2d). """ ### pick up kwargs inchs = kwargs.pop('inchs', None) exchs = kwargs.pop('exchs', None) if (inchs is not None) or (exchs is not None): raise KeyError('Inchs and exchs are no longer supported. Use weight instead.') # if inchs is not None: # logger.info('inchs') # logger.info('{}'.format(inchs)) # subcube = cube[:, :, inchs] # else: # mask = np.full(len(cube.ch), True) # if exchs is not None: # logger.info('exchs') # logger.info('{}'.format(exchs)) # mask[exchs] = False # subcube = cube[:, :, mask] if weight is None: weight = 1. # else: # cont = (subcube * (1 / subcube.noise**2)).sum(dim='ch') / (1 / subcube.noise**2).sum(dim='ch') # cont = cont.expand_dims(dim='ch', axis=2) cont = (cube * (1 / weight**2)).sum(dim='ch') / (1 / weight**2).sum(dim='ch') ### define coordinates xcoords = {'x': cube.x.values} ycoords = {'y': cube.y.values} chcoords = {'masterid': np.array([0]), # np.array([int(subcube.masterid.mean(dim='ch'))]), 'kidid': np.array([0]), # np.array([int(subcube.kidid.mean(dim='ch'))]), 'kidfq': np.array([0]), # np.array([float(subcube.kidfq.mean(dim='ch'))]), 'kidtp': np.array([1])} # np.array([1])} scalarcoords = {'coordsys': cube.coordsys.values, 'datatype': cube.datatype.values, 'xref': cube.xref.values, 'yref': cube.yref.values} return dc.cube(cont.values, xcoords=xcoords, ycoords=ycoords, chcoords=chcoords, scalarcoords=scalarcoords)
python
def makecontinuum(cube, **kwargs): """Make a continuum array. Args: cube (decode.cube): Decode cube which will be averaged over channels. kwargs (optional): Other arguments. inchs (list): Included channel kidids. exchs (list): Excluded channel kidids. Returns: decode cube (decode.cube): Decode cube (2d). """ ### pick up kwargs inchs = kwargs.pop('inchs', None) exchs = kwargs.pop('exchs', None) if (inchs is not None) or (exchs is not None): raise KeyError('Inchs and exchs are no longer supported. Use weight instead.') # if inchs is not None: # logger.info('inchs') # logger.info('{}'.format(inchs)) # subcube = cube[:, :, inchs] # else: # mask = np.full(len(cube.ch), True) # if exchs is not None: # logger.info('exchs') # logger.info('{}'.format(exchs)) # mask[exchs] = False # subcube = cube[:, :, mask] if weight is None: weight = 1. # else: # cont = (subcube * (1 / subcube.noise**2)).sum(dim='ch') / (1 / subcube.noise**2).sum(dim='ch') # cont = cont.expand_dims(dim='ch', axis=2) cont = (cube * (1 / weight**2)).sum(dim='ch') / (1 / weight**2).sum(dim='ch') ### define coordinates xcoords = {'x': cube.x.values} ycoords = {'y': cube.y.values} chcoords = {'masterid': np.array([0]), # np.array([int(subcube.masterid.mean(dim='ch'))]), 'kidid': np.array([0]), # np.array([int(subcube.kidid.mean(dim='ch'))]), 'kidfq': np.array([0]), # np.array([float(subcube.kidfq.mean(dim='ch'))]), 'kidtp': np.array([1])} # np.array([1])} scalarcoords = {'coordsys': cube.coordsys.values, 'datatype': cube.datatype.values, 'xref': cube.xref.values, 'yref': cube.yref.values} return dc.cube(cont.values, xcoords=xcoords, ycoords=ycoords, chcoords=chcoords, scalarcoords=scalarcoords)
[ "def", "makecontinuum", "(", "cube", ",", "*", "*", "kwargs", ")", ":", "### pick up kwargs", "inchs", "=", "kwargs", ".", "pop", "(", "'inchs'", ",", "None", ")", "exchs", "=", "kwargs", ".", "pop", "(", "'exchs'", ",", "None", ")", "if", "(", "inchs", "is", "not", "None", ")", "or", "(", "exchs", "is", "not", "None", ")", ":", "raise", "KeyError", "(", "'Inchs and exchs are no longer supported. Use weight instead.'", ")", "# if inchs is not None:", "# logger.info('inchs')", "# logger.info('{}'.format(inchs))", "# subcube = cube[:, :, inchs]", "# else:", "# mask = np.full(len(cube.ch), True)", "# if exchs is not None:", "# logger.info('exchs')", "# logger.info('{}'.format(exchs))", "# mask[exchs] = False", "# subcube = cube[:, :, mask]", "if", "weight", "is", "None", ":", "weight", "=", "1.", "# else:", "# cont = (subcube * (1 / subcube.noise**2)).sum(dim='ch') / (1 / subcube.noise**2).sum(dim='ch')", "# cont = cont.expand_dims(dim='ch', axis=2)", "cont", "=", "(", "cube", "*", "(", "1", "/", "weight", "**", "2", ")", ")", ".", "sum", "(", "dim", "=", "'ch'", ")", "/", "(", "1", "/", "weight", "**", "2", ")", ".", "sum", "(", "dim", "=", "'ch'", ")", "### define coordinates", "xcoords", "=", "{", "'x'", ":", "cube", ".", "x", ".", "values", "}", "ycoords", "=", "{", "'y'", ":", "cube", ".", "y", ".", "values", "}", "chcoords", "=", "{", "'masterid'", ":", "np", ".", "array", "(", "[", "0", "]", ")", ",", "# np.array([int(subcube.masterid.mean(dim='ch'))]),", "'kidid'", ":", "np", ".", "array", "(", "[", "0", "]", ")", ",", "# np.array([int(subcube.kidid.mean(dim='ch'))]),", "'kidfq'", ":", "np", ".", "array", "(", "[", "0", "]", ")", ",", "# np.array([float(subcube.kidfq.mean(dim='ch'))]),", "'kidtp'", ":", "np", ".", "array", "(", "[", "1", "]", ")", "}", "# np.array([1])}", "scalarcoords", "=", "{", "'coordsys'", ":", "cube", ".", "coordsys", ".", "values", ",", "'datatype'", ":", "cube", ".", "datatype", ".", "values", ",", "'xref'", ":", "cube", ".", "xref", ".", "values", ",", "'yref'", ":", "cube", ".", "yref", ".", "values", "}", "return", "dc", ".", "cube", "(", "cont", ".", "values", ",", "xcoords", "=", "xcoords", ",", "ycoords", "=", "ycoords", ",", "chcoords", "=", "chcoords", ",", "scalarcoords", "=", "scalarcoords", ")" ]
Make a continuum array. Args: cube (decode.cube): Decode cube which will be averaged over channels. kwargs (optional): Other arguments. inchs (list): Included channel kidids. exchs (list): Excluded channel kidids. Returns: decode cube (decode.cube): Decode cube (2d).
[ "Make", "a", "continuum", "array", "." ]
train
https://github.com/deshima-dev/decode/blob/e789e174cd316e7ec8bc55be7009ad35baced3c0/decode/core/cube/functions.py#L234-L283
facelessuser/pyspelling
pyspelling/filters/__init__.py
Filter._verify_encoding
def _verify_encoding(self, enc): """Verify encoding is okay.""" enc = PYTHON_ENCODING_NAMES.get(enc, enc) try: codecs.getencoder(enc) encoding = enc except LookupError: encoding = None return encoding
python
def _verify_encoding(self, enc): """Verify encoding is okay.""" enc = PYTHON_ENCODING_NAMES.get(enc, enc) try: codecs.getencoder(enc) encoding = enc except LookupError: encoding = None return encoding
[ "def", "_verify_encoding", "(", "self", ",", "enc", ")", ":", "enc", "=", "PYTHON_ENCODING_NAMES", ".", "get", "(", "enc", ",", "enc", ")", "try", ":", "codecs", ".", "getencoder", "(", "enc", ")", "encoding", "=", "enc", "except", "LookupError", ":", "encoding", "=", "None", "return", "encoding" ]
Verify encoding is okay.
[ "Verify", "encoding", "is", "okay", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/__init__.py#L95-L104
facelessuser/pyspelling
pyspelling/filters/__init__.py
Filter.has_bom
def has_bom(self, f): """Check for UTF8, UTF16, and UTF32 BOMs.""" content = f.read(4) encoding = None m = RE_UTF_BOM.match(content) if m is not None: if m.group(1): encoding = 'utf-8-sig' elif m.group(2): encoding = 'utf-32' elif m.group(3): encoding = 'utf-32' elif m.group(4): encoding = 'utf-16' elif m.group(5): encoding = 'utf-16' return encoding
python
def has_bom(self, f): """Check for UTF8, UTF16, and UTF32 BOMs.""" content = f.read(4) encoding = None m = RE_UTF_BOM.match(content) if m is not None: if m.group(1): encoding = 'utf-8-sig' elif m.group(2): encoding = 'utf-32' elif m.group(3): encoding = 'utf-32' elif m.group(4): encoding = 'utf-16' elif m.group(5): encoding = 'utf-16' return encoding
[ "def", "has_bom", "(", "self", ",", "f", ")", ":", "content", "=", "f", ".", "read", "(", "4", ")", "encoding", "=", "None", "m", "=", "RE_UTF_BOM", ".", "match", "(", "content", ")", "if", "m", "is", "not", "None", ":", "if", "m", ".", "group", "(", "1", ")", ":", "encoding", "=", "'utf-8-sig'", "elif", "m", ".", "group", "(", "2", ")", ":", "encoding", "=", "'utf-32'", "elif", "m", ".", "group", "(", "3", ")", ":", "encoding", "=", "'utf-32'", "elif", "m", ".", "group", "(", "4", ")", ":", "encoding", "=", "'utf-16'", "elif", "m", ".", "group", "(", "5", ")", ":", "encoding", "=", "'utf-16'", "return", "encoding" ]
Check for UTF8, UTF16, and UTF32 BOMs.
[ "Check", "for", "UTF8", "UTF16", "and", "UTF32", "BOMs", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/__init__.py#L106-L123
facelessuser/pyspelling
pyspelling/filters/__init__.py
Filter._utf_strip_bom
def _utf_strip_bom(self, encoding): """Return an encoding that will ignore the BOM.""" if encoding is None: pass elif encoding.lower() == 'utf-8': encoding = 'utf-8-sig' elif encoding.lower().startswith('utf-16'): encoding = 'utf-16' elif encoding.lower().startswith('utf-32'): encoding = 'utf-32' return encoding
python
def _utf_strip_bom(self, encoding): """Return an encoding that will ignore the BOM.""" if encoding is None: pass elif encoding.lower() == 'utf-8': encoding = 'utf-8-sig' elif encoding.lower().startswith('utf-16'): encoding = 'utf-16' elif encoding.lower().startswith('utf-32'): encoding = 'utf-32' return encoding
[ "def", "_utf_strip_bom", "(", "self", ",", "encoding", ")", ":", "if", "encoding", "is", "None", ":", "pass", "elif", "encoding", ".", "lower", "(", ")", "==", "'utf-8'", ":", "encoding", "=", "'utf-8-sig'", "elif", "encoding", ".", "lower", "(", ")", ".", "startswith", "(", "'utf-16'", ")", ":", "encoding", "=", "'utf-16'", "elif", "encoding", ".", "lower", "(", ")", ".", "startswith", "(", "'utf-32'", ")", ":", "encoding", "=", "'utf-32'", "return", "encoding" ]
Return an encoding that will ignore the BOM.
[ "Return", "an", "encoding", "that", "will", "ignore", "the", "BOM", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/__init__.py#L125-L136
facelessuser/pyspelling
pyspelling/filters/__init__.py
Filter._detect_buffer_encoding
def _detect_buffer_encoding(self, f): """Guess by checking BOM, and checking `_special_encode_check`, and using memory map.""" encoding = None with contextlib.closing(mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)) as m: encoding = self._analyze_file(m) return encoding
python
def _detect_buffer_encoding(self, f): """Guess by checking BOM, and checking `_special_encode_check`, and using memory map.""" encoding = None with contextlib.closing(mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)) as m: encoding = self._analyze_file(m) return encoding
[ "def", "_detect_buffer_encoding", "(", "self", ",", "f", ")", ":", "encoding", "=", "None", "with", "contextlib", ".", "closing", "(", "mmap", ".", "mmap", "(", "f", ".", "fileno", "(", ")", ",", "0", ",", "access", "=", "mmap", ".", "ACCESS_READ", ")", ")", "as", "m", ":", "encoding", "=", "self", ".", "_analyze_file", "(", "m", ")", "return", "encoding" ]
Guess by checking BOM, and checking `_special_encode_check`, and using memory map.
[ "Guess", "by", "checking", "BOM", "and", "checking", "_special_encode_check", "and", "using", "memory", "map", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/__init__.py#L138-L144
facelessuser/pyspelling
pyspelling/filters/__init__.py
Filter._analyze_file
def _analyze_file(self, f): """Analyze the file.""" f.seek(0) # Check for BOMs if self.CHECK_BOM: encoding = self.has_bom(f) f.seek(0) else: util.warn_deprecated( "'CHECK_BOM' attribute is deprecated. " "Please override 'has_bom` function to control or avoid BOM detection." ) # Check file extensions if encoding is None: encoding = self._utf_strip_bom(self.header_check(f.read(1024))) f.seek(0) if encoding is None: encoding = self._utf_strip_bom(self.content_check(f)) f.seek(0) return encoding
python
def _analyze_file(self, f): """Analyze the file.""" f.seek(0) # Check for BOMs if self.CHECK_BOM: encoding = self.has_bom(f) f.seek(0) else: util.warn_deprecated( "'CHECK_BOM' attribute is deprecated. " "Please override 'has_bom` function to control or avoid BOM detection." ) # Check file extensions if encoding is None: encoding = self._utf_strip_bom(self.header_check(f.read(1024))) f.seek(0) if encoding is None: encoding = self._utf_strip_bom(self.content_check(f)) f.seek(0) return encoding
[ "def", "_analyze_file", "(", "self", ",", "f", ")", ":", "f", ".", "seek", "(", "0", ")", "# Check for BOMs", "if", "self", ".", "CHECK_BOM", ":", "encoding", "=", "self", ".", "has_bom", "(", "f", ")", "f", ".", "seek", "(", "0", ")", "else", ":", "util", ".", "warn_deprecated", "(", "\"'CHECK_BOM' attribute is deprecated. \"", "\"Please override 'has_bom` function to control or avoid BOM detection.\"", ")", "# Check file extensions", "if", "encoding", "is", "None", ":", "encoding", "=", "self", ".", "_utf_strip_bom", "(", "self", ".", "header_check", "(", "f", ".", "read", "(", "1024", ")", ")", ")", "f", ".", "seek", "(", "0", ")", "if", "encoding", "is", "None", ":", "encoding", "=", "self", ".", "_utf_strip_bom", "(", "self", ".", "content_check", "(", "f", ")", ")", "f", ".", "seek", "(", "0", ")", "return", "encoding" ]
Analyze the file.
[ "Analyze", "the", "file", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/__init__.py#L146-L167
facelessuser/pyspelling
pyspelling/filters/__init__.py
Filter._detect_encoding
def _detect_encoding(self, source_file): """Detect encoding.""" encoding = self._guess(source_file) # If we didn't explicitly detect an encoding, assume default. if encoding is None: encoding = self.default_encoding return encoding
python
def _detect_encoding(self, source_file): """Detect encoding.""" encoding = self._guess(source_file) # If we didn't explicitly detect an encoding, assume default. if encoding is None: encoding = self.default_encoding return encoding
[ "def", "_detect_encoding", "(", "self", ",", "source_file", ")", ":", "encoding", "=", "self", ".", "_guess", "(", "source_file", ")", "# If we didn't explicitly detect an encoding, assume default.", "if", "encoding", "is", "None", ":", "encoding", "=", "self", ".", "default_encoding", "return", "encoding" ]
Detect encoding.
[ "Detect", "encoding", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/__init__.py#L169-L177
facelessuser/pyspelling
pyspelling/filters/__init__.py
Filter._run_first
def _run_first(self, source_file): """Run on as first in chain.""" self.reset() self.current_encoding = self.default_encoding encoding = None try: encoding = self._detect_encoding(source_file) content = self.filter(source_file, encoding) except UnicodeDecodeError: if not encoding or encoding != self.default_encoding: content = self.filter(source_file, self.default_encoding) else: raise return content
python
def _run_first(self, source_file): """Run on as first in chain.""" self.reset() self.current_encoding = self.default_encoding encoding = None try: encoding = self._detect_encoding(source_file) content = self.filter(source_file, encoding) except UnicodeDecodeError: if not encoding or encoding != self.default_encoding: content = self.filter(source_file, self.default_encoding) else: raise return content
[ "def", "_run_first", "(", "self", ",", "source_file", ")", ":", "self", ".", "reset", "(", ")", "self", ".", "current_encoding", "=", "self", ".", "default_encoding", "encoding", "=", "None", "try", ":", "encoding", "=", "self", ".", "_detect_encoding", "(", "source_file", ")", "content", "=", "self", ".", "filter", "(", "source_file", ",", "encoding", ")", "except", "UnicodeDecodeError", ":", "if", "not", "encoding", "or", "encoding", "!=", "self", ".", "default_encoding", ":", "content", "=", "self", ".", "filter", "(", "source_file", ",", "self", ".", "default_encoding", ")", "else", ":", "raise", "return", "content" ]
Run on as first in chain.
[ "Run", "on", "as", "first", "in", "chain", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/__init__.py#L179-L193
facelessuser/pyspelling
pyspelling/filters/__init__.py
Filter._guess
def _guess(self, filename): """Guess the encoding and decode the content of the file.""" encoding = None file_size = os.path.getsize(filename) # If the file is really big, lets just call it binary. # We don't have time to let Python chug through a massive file. if not self._is_very_large(file_size): with open(filename, "rb") as f: if file_size == 0: encoding = 'ascii' else: encoding = self._detect_buffer_encoding(f) if encoding is None: raise UnicodeDecodeError('None', b'', 0, 0, 'Unicode cannot be detected.') if encoding != BINARY_ENCODE: encoding = self._verify_encoding(encoding) else: # pragma: no cover raise UnicodeDecodeError('None', b'', 0, 0, 'Unicode detection is not applied to very large files!') return encoding
python
def _guess(self, filename): """Guess the encoding and decode the content of the file.""" encoding = None file_size = os.path.getsize(filename) # If the file is really big, lets just call it binary. # We don't have time to let Python chug through a massive file. if not self._is_very_large(file_size): with open(filename, "rb") as f: if file_size == 0: encoding = 'ascii' else: encoding = self._detect_buffer_encoding(f) if encoding is None: raise UnicodeDecodeError('None', b'', 0, 0, 'Unicode cannot be detected.') if encoding != BINARY_ENCODE: encoding = self._verify_encoding(encoding) else: # pragma: no cover raise UnicodeDecodeError('None', b'', 0, 0, 'Unicode detection is not applied to very large files!') return encoding
[ "def", "_guess", "(", "self", ",", "filename", ")", ":", "encoding", "=", "None", "file_size", "=", "os", ".", "path", ".", "getsize", "(", "filename", ")", "# If the file is really big, lets just call it binary.", "# We don't have time to let Python chug through a massive file.", "if", "not", "self", ".", "_is_very_large", "(", "file_size", ")", ":", "with", "open", "(", "filename", ",", "\"rb\"", ")", "as", "f", ":", "if", "file_size", "==", "0", ":", "encoding", "=", "'ascii'", "else", ":", "encoding", "=", "self", ".", "_detect_buffer_encoding", "(", "f", ")", "if", "encoding", "is", "None", ":", "raise", "UnicodeDecodeError", "(", "'None'", ",", "b''", ",", "0", ",", "0", ",", "'Unicode cannot be detected.'", ")", "if", "encoding", "!=", "BINARY_ENCODE", ":", "encoding", "=", "self", ".", "_verify_encoding", "(", "encoding", ")", "else", ":", "# pragma: no cover", "raise", "UnicodeDecodeError", "(", "'None'", ",", "b''", ",", "0", ",", "0", ",", "'Unicode detection is not applied to very large files!'", ")", "return", "encoding" ]
Guess the encoding and decode the content of the file.
[ "Guess", "the", "encoding", "and", "decode", "the", "content", "of", "the", "file", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/__init__.py#L201-L222
facelessuser/pyspelling
pyspelling/filters/__init__.py
Filter.sfilter
def sfilter(self, source): """Execute filter.""" return [SourceText(source.text, source.context, source.encoding, 'text')]
python
def sfilter(self, source): """Execute filter.""" return [SourceText(source.text, source.context, source.encoding, 'text')]
[ "def", "sfilter", "(", "self", ",", "source", ")", ":", "return", "[", "SourceText", "(", "source", ".", "text", ",", "source", ".", "context", ",", "source", ".", "encoding", ",", "'text'", ")", "]" ]
Execute filter.
[ "Execute", "filter", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/__init__.py#L246-L249
JMSwag/dsdev-utils
dsdev_utils/terminal.py
ask_yes_no
def ask_yes_no(question, default='no', answer=None): u"""Will ask a question and keeps prompting until answered. Args: question (str): Question to ask end user default (str): Default answer if user just press enter at prompt answer (str): Used for testing Returns: (bool) Meaning: True - Answer is yes False - Answer is no """ default = default.lower() yes = [u'yes', u'ye', u'y'] no = [u'no', u'n'] if default in no: help_ = u'[N/y]?' default = False else: default = True help_ = u'[Y/n]?' while 1: display = question + '\n' + help_ if answer is None: log.debug(u'Under None') answer = six.moves.input(display) answer = answer.lower() if answer == u'': log.debug(u'Under blank') return default if answer in yes: log.debug(u'Must be true') return True elif answer in no: log.debug(u'Must be false') return False else: sys.stdout.write(u'Please answer yes or no only!\n\n') sys.stdout.flush() answer = None six.moves.input(u'Press enter to continue') sys.stdout.write('\n\n\n\n\n') sys.stdout.flush()
python
def ask_yes_no(question, default='no', answer=None): u"""Will ask a question and keeps prompting until answered. Args: question (str): Question to ask end user default (str): Default answer if user just press enter at prompt answer (str): Used for testing Returns: (bool) Meaning: True - Answer is yes False - Answer is no """ default = default.lower() yes = [u'yes', u'ye', u'y'] no = [u'no', u'n'] if default in no: help_ = u'[N/y]?' default = False else: default = True help_ = u'[Y/n]?' while 1: display = question + '\n' + help_ if answer is None: log.debug(u'Under None') answer = six.moves.input(display) answer = answer.lower() if answer == u'': log.debug(u'Under blank') return default if answer in yes: log.debug(u'Must be true') return True elif answer in no: log.debug(u'Must be false') return False else: sys.stdout.write(u'Please answer yes or no only!\n\n') sys.stdout.flush() answer = None six.moves.input(u'Press enter to continue') sys.stdout.write('\n\n\n\n\n') sys.stdout.flush()
[ "def", "ask_yes_no", "(", "question", ",", "default", "=", "'no'", ",", "answer", "=", "None", ")", ":", "default", "=", "default", ".", "lower", "(", ")", "yes", "=", "[", "u'yes'", ",", "u'ye'", ",", "u'y'", "]", "no", "=", "[", "u'no'", ",", "u'n'", "]", "if", "default", "in", "no", ":", "help_", "=", "u'[N/y]?'", "default", "=", "False", "else", ":", "default", "=", "True", "help_", "=", "u'[Y/n]?'", "while", "1", ":", "display", "=", "question", "+", "'\\n'", "+", "help_", "if", "answer", "is", "None", ":", "log", ".", "debug", "(", "u'Under None'", ")", "answer", "=", "six", ".", "moves", ".", "input", "(", "display", ")", "answer", "=", "answer", ".", "lower", "(", ")", "if", "answer", "==", "u''", ":", "log", ".", "debug", "(", "u'Under blank'", ")", "return", "default", "if", "answer", "in", "yes", ":", "log", ".", "debug", "(", "u'Must be true'", ")", "return", "True", "elif", "answer", "in", "no", ":", "log", ".", "debug", "(", "u'Must be false'", ")", "return", "False", "else", ":", "sys", ".", "stdout", ".", "write", "(", "u'Please answer yes or no only!\\n\\n'", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "answer", "=", "None", "six", ".", "moves", ".", "input", "(", "u'Press enter to continue'", ")", "sys", ".", "stdout", ".", "write", "(", "'\\n\\n\\n\\n\\n'", ")", "sys", ".", "stdout", ".", "flush", "(", ")" ]
u"""Will ask a question and keeps prompting until answered. Args: question (str): Question to ask end user default (str): Default answer if user just press enter at prompt answer (str): Used for testing Returns: (bool) Meaning: True - Answer is yes False - Answer is no
[ "u", "Will", "ask", "a", "question", "and", "keeps", "prompting", "until", "answered", "." ]
train
https://github.com/JMSwag/dsdev-utils/blob/5adbf9b3fd9fff92d1dd714423b08e26a5038e14/dsdev_utils/terminal.py#L192-L242
JMSwag/dsdev-utils
dsdev_utils/terminal.py
get_correct_answer
def get_correct_answer(question, default=None, required=False, answer=None, is_answer_correct=None): u"""Ask user a question and confirm answer Args: question (str): Question to ask user default (str): Default answer if no input from user required (str): Require user to input answer answer (str): Used for testing is_answer_correct (str): Used for testing """ while 1: if default is None: msg = u' - No Default Available' else: msg = (u'\n[DEFAULT] -> {}\nPress Enter To ' u'Use Default'.format(default)) prompt = question + msg + u'\n--> ' if answer is None: answer = six.moves.input(prompt) if answer == '' and required and default is not None: print(u'You have to enter a value\n\n') six.moves.input(u'Press enter to continue') print(u'\n\n') answer = None continue if answer == u'' and default is not None: answer = default _ans = ask_yes_no(u'You entered {}, is this ' u'correct?'.format(answer), answer=is_answer_correct) if _ans: return answer else: answer = None
python
def get_correct_answer(question, default=None, required=False, answer=None, is_answer_correct=None): u"""Ask user a question and confirm answer Args: question (str): Question to ask user default (str): Default answer if no input from user required (str): Require user to input answer answer (str): Used for testing is_answer_correct (str): Used for testing """ while 1: if default is None: msg = u' - No Default Available' else: msg = (u'\n[DEFAULT] -> {}\nPress Enter To ' u'Use Default'.format(default)) prompt = question + msg + u'\n--> ' if answer is None: answer = six.moves.input(prompt) if answer == '' and required and default is not None: print(u'You have to enter a value\n\n') six.moves.input(u'Press enter to continue') print(u'\n\n') answer = None continue if answer == u'' and default is not None: answer = default _ans = ask_yes_no(u'You entered {}, is this ' u'correct?'.format(answer), answer=is_answer_correct) if _ans: return answer else: answer = None
[ "def", "get_correct_answer", "(", "question", ",", "default", "=", "None", ",", "required", "=", "False", ",", "answer", "=", "None", ",", "is_answer_correct", "=", "None", ")", ":", "while", "1", ":", "if", "default", "is", "None", ":", "msg", "=", "u' - No Default Available'", "else", ":", "msg", "=", "(", "u'\\n[DEFAULT] -> {}\\nPress Enter To '", "u'Use Default'", ".", "format", "(", "default", ")", ")", "prompt", "=", "question", "+", "msg", "+", "u'\\n--> '", "if", "answer", "is", "None", ":", "answer", "=", "six", ".", "moves", ".", "input", "(", "prompt", ")", "if", "answer", "==", "''", "and", "required", "and", "default", "is", "not", "None", ":", "print", "(", "u'You have to enter a value\\n\\n'", ")", "six", ".", "moves", ".", "input", "(", "u'Press enter to continue'", ")", "print", "(", "u'\\n\\n'", ")", "answer", "=", "None", "continue", "if", "answer", "==", "u''", "and", "default", "is", "not", "None", ":", "answer", "=", "default", "_ans", "=", "ask_yes_no", "(", "u'You entered {}, is this '", "u'correct?'", ".", "format", "(", "answer", ")", ",", "answer", "=", "is_answer_correct", ")", "if", "_ans", ":", "return", "answer", "else", ":", "answer", "=", "None" ]
u"""Ask user a question and confirm answer Args: question (str): Question to ask user default (str): Default answer if no input from user required (str): Require user to input answer answer (str): Used for testing is_answer_correct (str): Used for testing
[ "u", "Ask", "user", "a", "question", "and", "confirm", "answer" ]
train
https://github.com/JMSwag/dsdev-utils/blob/5adbf9b3fd9fff92d1dd714423b08e26a5038e14/dsdev_utils/terminal.py#L245-L284
facelessuser/pyspelling
pyspelling/__main__.py
main
def main(): """Main.""" parser = argparse.ArgumentParser(prog='spellcheck', description='Spell checking tool.') # Flag arguments parser.add_argument('--version', action='version', version=('%(prog)s ' + __version__)) parser.add_argument('--debug', action='store_true', default=False, help=argparse.SUPPRESS) parser.add_argument('--verbose', '-v', action='count', default=0, help="Verbosity level.") group = parser.add_mutually_exclusive_group() group.add_argument('--name', '-n', action='append', help="Specific spelling task by name to run.") group.add_argument('--group', '-g', action='append', help="Specific spelling task groun to run.") parser.add_argument('--binary', '-b', action='store', default='', help="Provide path to spell checker's binary.") parser.add_argument('--config', '-c', action='store', default='', help="Spelling config.") parser.add_argument('--source', '-S', action='append', help="Specify override file pattern.") parser.add_argument( '--spellchecker', '-s', action='store', default='', help="Choose between aspell and hunspell" ) args = parser.parse_args() return run( args.config, names=args.name, groups=args.group, binary=args.binary, spellchecker=args.spellchecker, sources=args.source, verbose=args.verbose, debug=args.debug )
python
def main(): """Main.""" parser = argparse.ArgumentParser(prog='spellcheck', description='Spell checking tool.') # Flag arguments parser.add_argument('--version', action='version', version=('%(prog)s ' + __version__)) parser.add_argument('--debug', action='store_true', default=False, help=argparse.SUPPRESS) parser.add_argument('--verbose', '-v', action='count', default=0, help="Verbosity level.") group = parser.add_mutually_exclusive_group() group.add_argument('--name', '-n', action='append', help="Specific spelling task by name to run.") group.add_argument('--group', '-g', action='append', help="Specific spelling task groun to run.") parser.add_argument('--binary', '-b', action='store', default='', help="Provide path to spell checker's binary.") parser.add_argument('--config', '-c', action='store', default='', help="Spelling config.") parser.add_argument('--source', '-S', action='append', help="Specify override file pattern.") parser.add_argument( '--spellchecker', '-s', action='store', default='', help="Choose between aspell and hunspell" ) args = parser.parse_args() return run( args.config, names=args.name, groups=args.group, binary=args.binary, spellchecker=args.spellchecker, sources=args.source, verbose=args.verbose, debug=args.debug )
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "'spellcheck'", ",", "description", "=", "'Spell checking tool.'", ")", "# Flag arguments", "parser", ".", "add_argument", "(", "'--version'", ",", "action", "=", "'version'", ",", "version", "=", "(", "'%(prog)s '", "+", "__version__", ")", ")", "parser", ".", "add_argument", "(", "'--debug'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "argparse", ".", "SUPPRESS", ")", "parser", ".", "add_argument", "(", "'--verbose'", ",", "'-v'", ",", "action", "=", "'count'", ",", "default", "=", "0", ",", "help", "=", "\"Verbosity level.\"", ")", "group", "=", "parser", ".", "add_mutually_exclusive_group", "(", ")", "group", ".", "add_argument", "(", "'--name'", ",", "'-n'", ",", "action", "=", "'append'", ",", "help", "=", "\"Specific spelling task by name to run.\"", ")", "group", ".", "add_argument", "(", "'--group'", ",", "'-g'", ",", "action", "=", "'append'", ",", "help", "=", "\"Specific spelling task groun to run.\"", ")", "parser", ".", "add_argument", "(", "'--binary'", ",", "'-b'", ",", "action", "=", "'store'", ",", "default", "=", "''", ",", "help", "=", "\"Provide path to spell checker's binary.\"", ")", "parser", ".", "add_argument", "(", "'--config'", ",", "'-c'", ",", "action", "=", "'store'", ",", "default", "=", "''", ",", "help", "=", "\"Spelling config.\"", ")", "parser", ".", "add_argument", "(", "'--source'", ",", "'-S'", ",", "action", "=", "'append'", ",", "help", "=", "\"Specify override file pattern.\"", ")", "parser", ".", "add_argument", "(", "'--spellchecker'", ",", "'-s'", ",", "action", "=", "'store'", ",", "default", "=", "''", ",", "help", "=", "\"Choose between aspell and hunspell\"", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "return", "run", "(", "args", ".", "config", ",", "names", "=", "args", ".", "name", ",", "groups", "=", "args", ".", "group", ",", "binary", "=", "args", ".", "binary", ",", "spellchecker", "=", "args", ".", "spellchecker", ",", "sources", "=", "args", ".", "source", ",", "verbose", "=", "args", ".", "verbose", ",", "debug", "=", "args", ".", "debug", ")" ]
Main.
[ "Main", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/__main__.py#L8-L36
facelessuser/pyspelling
pyspelling/__main__.py
run
def run(config, **kwargs): """Run.""" names = kwargs.get('names', []) groups = kwargs.get('groups', []) binary = kwargs.get('binary', '') spellchecker = kwargs.get('spellchecker', '') verbose = kwargs.get('verbose', 0) sources = kwargs.get('sources', []) debug = kwargs.get('debug', False) fail = False count = 0 for results in spellcheck( config, names=names, groups=groups, binary=binary, checker=spellchecker, sources=sources, verbose=verbose, debug=debug ): count += 1 if results.error: fail = True print('ERROR: %s -- %s' % (results.context, results.error)) elif results.words: fail = True print('Misspelled words:\n<%s> %s' % (results.category, results.context)) print('-' * 80) for word in results.words: print(word) print('-' * 80) print('') if fail: print('!!!Spelling check failed!!!') else: print('Spelling check passed :)') return fail
python
def run(config, **kwargs): """Run.""" names = kwargs.get('names', []) groups = kwargs.get('groups', []) binary = kwargs.get('binary', '') spellchecker = kwargs.get('spellchecker', '') verbose = kwargs.get('verbose', 0) sources = kwargs.get('sources', []) debug = kwargs.get('debug', False) fail = False count = 0 for results in spellcheck( config, names=names, groups=groups, binary=binary, checker=spellchecker, sources=sources, verbose=verbose, debug=debug ): count += 1 if results.error: fail = True print('ERROR: %s -- %s' % (results.context, results.error)) elif results.words: fail = True print('Misspelled words:\n<%s> %s' % (results.category, results.context)) print('-' * 80) for word in results.words: print(word) print('-' * 80) print('') if fail: print('!!!Spelling check failed!!!') else: print('Spelling check passed :)') return fail
[ "def", "run", "(", "config", ",", "*", "*", "kwargs", ")", ":", "names", "=", "kwargs", ".", "get", "(", "'names'", ",", "[", "]", ")", "groups", "=", "kwargs", ".", "get", "(", "'groups'", ",", "[", "]", ")", "binary", "=", "kwargs", ".", "get", "(", "'binary'", ",", "''", ")", "spellchecker", "=", "kwargs", ".", "get", "(", "'spellchecker'", ",", "''", ")", "verbose", "=", "kwargs", ".", "get", "(", "'verbose'", ",", "0", ")", "sources", "=", "kwargs", ".", "get", "(", "'sources'", ",", "[", "]", ")", "debug", "=", "kwargs", ".", "get", "(", "'debug'", ",", "False", ")", "fail", "=", "False", "count", "=", "0", "for", "results", "in", "spellcheck", "(", "config", ",", "names", "=", "names", ",", "groups", "=", "groups", ",", "binary", "=", "binary", ",", "checker", "=", "spellchecker", ",", "sources", "=", "sources", ",", "verbose", "=", "verbose", ",", "debug", "=", "debug", ")", ":", "count", "+=", "1", "if", "results", ".", "error", ":", "fail", "=", "True", "print", "(", "'ERROR: %s -- %s'", "%", "(", "results", ".", "context", ",", "results", ".", "error", ")", ")", "elif", "results", ".", "words", ":", "fail", "=", "True", "print", "(", "'Misspelled words:\\n<%s> %s'", "%", "(", "results", ".", "category", ",", "results", ".", "context", ")", ")", "print", "(", "'-'", "*", "80", ")", "for", "word", "in", "results", ".", "words", ":", "print", "(", "word", ")", "print", "(", "'-'", "*", "80", ")", "print", "(", "''", ")", "if", "fail", ":", "print", "(", "'!!!Spelling check failed!!!'", ")", "else", ":", "print", "(", "'Spelling check passed :)'", ")", "return", "fail" ]
Run.
[ "Run", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/__main__.py#L39-L80
facelessuser/pyspelling
pyspelling/util/__init__.py
deprecated
def deprecated(message): # pragma: no cover """ Raise a `DeprecationWarning` when wrapped function/method is called. Borrowed from https://stackoverflow.com/a/48632082/866026 """ def deprecated_decorator(func): """Deprecation decorator.""" @wraps(func) def deprecated_func(*args, **kwargs): """Display deprecation warning.""" warnings.warn( "'{}' is deprecated. {}".format(func.__name__, message), category=DeprecationWarning, stacklevel=2 ) return func(*args, **kwargs) return deprecated_func return deprecated_decorator
python
def deprecated(message): # pragma: no cover """ Raise a `DeprecationWarning` when wrapped function/method is called. Borrowed from https://stackoverflow.com/a/48632082/866026 """ def deprecated_decorator(func): """Deprecation decorator.""" @wraps(func) def deprecated_func(*args, **kwargs): """Display deprecation warning.""" warnings.warn( "'{}' is deprecated. {}".format(func.__name__, message), category=DeprecationWarning, stacklevel=2 ) return func(*args, **kwargs) return deprecated_func return deprecated_decorator
[ "def", "deprecated", "(", "message", ")", ":", "# pragma: no cover", "def", "deprecated_decorator", "(", "func", ")", ":", "\"\"\"Deprecation decorator.\"\"\"", "@", "wraps", "(", "func", ")", "def", "deprecated_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Display deprecation warning.\"\"\"", "warnings", ".", "warn", "(", "\"'{}' is deprecated. {}\"", ".", "format", "(", "func", ".", "__name__", ",", "message", ")", ",", "category", "=", "DeprecationWarning", ",", "stacklevel", "=", "2", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "deprecated_func", "return", "deprecated_decorator" ]
Raise a `DeprecationWarning` when wrapped function/method is called. Borrowed from https://stackoverflow.com/a/48632082/866026
[ "Raise", "a", "DeprecationWarning", "when", "wrapped", "function", "/", "method", "is", "called", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/util/__init__.py#L18-L39
facelessuser/pyspelling
pyspelling/util/__init__.py
get_process
def get_process(cmd): """Get a command process.""" if sys.platform.startswith('win'): startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW process = subprocess.Popen( cmd, startupinfo=startupinfo, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, shell=False ) else: process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, shell=False ) return process
python
def get_process(cmd): """Get a command process.""" if sys.platform.startswith('win'): startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW process = subprocess.Popen( cmd, startupinfo=startupinfo, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, shell=False ) else: process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, shell=False ) return process
[ "def", "get_process", "(", "cmd", ")", ":", "if", "sys", ".", "platform", ".", "startswith", "(", "'win'", ")", ":", "startupinfo", "=", "subprocess", ".", "STARTUPINFO", "(", ")", "startupinfo", ".", "dwFlags", "|=", "subprocess", ".", "STARTF_USESHOWWINDOW", "process", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "startupinfo", "=", "startupinfo", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "shell", "=", "False", ")", "else", ":", "process", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "shell", "=", "False", ")", "return", "process" ]
Get a command process.
[ "Get", "a", "command", "process", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/util/__init__.py#L52-L74
facelessuser/pyspelling
pyspelling/util/__init__.py
get_process_output
def get_process_output(process, encoding=None): """Get the output from the process.""" output = process.communicate() returncode = process.returncode if not encoding: try: encoding = sys.stdout.encoding except Exception: encoding = locale.getpreferredencoding() if returncode != 0: raise RuntimeError("Runtime Error: %s" % (output[0].rstrip().decode(encoding, errors='replace'))) return output[0].decode(encoding, errors='replace')
python
def get_process_output(process, encoding=None): """Get the output from the process.""" output = process.communicate() returncode = process.returncode if not encoding: try: encoding = sys.stdout.encoding except Exception: encoding = locale.getpreferredencoding() if returncode != 0: raise RuntimeError("Runtime Error: %s" % (output[0].rstrip().decode(encoding, errors='replace'))) return output[0].decode(encoding, errors='replace')
[ "def", "get_process_output", "(", "process", ",", "encoding", "=", "None", ")", ":", "output", "=", "process", ".", "communicate", "(", ")", "returncode", "=", "process", ".", "returncode", "if", "not", "encoding", ":", "try", ":", "encoding", "=", "sys", ".", "stdout", ".", "encoding", "except", "Exception", ":", "encoding", "=", "locale", ".", "getpreferredencoding", "(", ")", "if", "returncode", "!=", "0", ":", "raise", "RuntimeError", "(", "\"Runtime Error: %s\"", "%", "(", "output", "[", "0", "]", ".", "rstrip", "(", ")", ".", "decode", "(", "encoding", ",", "errors", "=", "'replace'", ")", ")", ")", "return", "output", "[", "0", "]", ".", "decode", "(", "encoding", ",", "errors", "=", "'replace'", ")" ]
Get the output from the process.
[ "Get", "the", "output", "from", "the", "process", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/util/__init__.py#L77-L92
facelessuser/pyspelling
pyspelling/util/__init__.py
call
def call(cmd, input_file=None, input_text=None, encoding=None): """Call with arguments.""" process = get_process(cmd) if input_file is not None: with open(input_file, 'rb') as f: process.stdin.write(f.read()) if input_text is not None: process.stdin.write(input_text) return get_process_output(process, encoding)
python
def call(cmd, input_file=None, input_text=None, encoding=None): """Call with arguments.""" process = get_process(cmd) if input_file is not None: with open(input_file, 'rb') as f: process.stdin.write(f.read()) if input_text is not None: process.stdin.write(input_text) return get_process_output(process, encoding)
[ "def", "call", "(", "cmd", ",", "input_file", "=", "None", ",", "input_text", "=", "None", ",", "encoding", "=", "None", ")", ":", "process", "=", "get_process", "(", "cmd", ")", "if", "input_file", "is", "not", "None", ":", "with", "open", "(", "input_file", ",", "'rb'", ")", "as", "f", ":", "process", ".", "stdin", ".", "write", "(", "f", ".", "read", "(", ")", ")", "if", "input_text", "is", "not", "None", ":", "process", ".", "stdin", ".", "write", "(", "input_text", ")", "return", "get_process_output", "(", "process", ",", "encoding", ")" ]
Call with arguments.
[ "Call", "with", "arguments", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/util/__init__.py#L95-L106
facelessuser/pyspelling
pyspelling/util/__init__.py
call_spellchecker
def call_spellchecker(cmd, input_text=None, encoding=None): """Call spell checker with arguments.""" process = get_process(cmd) # A buffer has been provided if input_text is not None: for line in input_text.splitlines(): # Hunspell truncates lines at `0x1fff` (at least on Windows this has been observed) # Avoid truncation by chunking the line on white space and inserting a new line to break it. offset = 0 end = len(line) while True: chunk_end = offset + 0x1fff m = None if chunk_end >= end else RE_LAST_SPACE_IN_CHUNK.search(line, offset, chunk_end) if m: chunk_end = m.start(1) chunk = line[offset:m.start(1)] offset = m.end(1) else: chunk = line[offset:chunk_end] offset = chunk_end # Avoid wasted calls to empty strings if chunk and not chunk.isspace(): process.stdin.write(chunk + b'\n') if offset >= end: break return get_process_output(process, encoding)
python
def call_spellchecker(cmd, input_text=None, encoding=None): """Call spell checker with arguments.""" process = get_process(cmd) # A buffer has been provided if input_text is not None: for line in input_text.splitlines(): # Hunspell truncates lines at `0x1fff` (at least on Windows this has been observed) # Avoid truncation by chunking the line on white space and inserting a new line to break it. offset = 0 end = len(line) while True: chunk_end = offset + 0x1fff m = None if chunk_end >= end else RE_LAST_SPACE_IN_CHUNK.search(line, offset, chunk_end) if m: chunk_end = m.start(1) chunk = line[offset:m.start(1)] offset = m.end(1) else: chunk = line[offset:chunk_end] offset = chunk_end # Avoid wasted calls to empty strings if chunk and not chunk.isspace(): process.stdin.write(chunk + b'\n') if offset >= end: break return get_process_output(process, encoding)
[ "def", "call_spellchecker", "(", "cmd", ",", "input_text", "=", "None", ",", "encoding", "=", "None", ")", ":", "process", "=", "get_process", "(", "cmd", ")", "# A buffer has been provided", "if", "input_text", "is", "not", "None", ":", "for", "line", "in", "input_text", ".", "splitlines", "(", ")", ":", "# Hunspell truncates lines at `0x1fff` (at least on Windows this has been observed)", "# Avoid truncation by chunking the line on white space and inserting a new line to break it.", "offset", "=", "0", "end", "=", "len", "(", "line", ")", "while", "True", ":", "chunk_end", "=", "offset", "+", "0x1fff", "m", "=", "None", "if", "chunk_end", ">=", "end", "else", "RE_LAST_SPACE_IN_CHUNK", ".", "search", "(", "line", ",", "offset", ",", "chunk_end", ")", "if", "m", ":", "chunk_end", "=", "m", ".", "start", "(", "1", ")", "chunk", "=", "line", "[", "offset", ":", "m", ".", "start", "(", "1", ")", "]", "offset", "=", "m", ".", "end", "(", "1", ")", "else", ":", "chunk", "=", "line", "[", "offset", ":", "chunk_end", "]", "offset", "=", "chunk_end", "# Avoid wasted calls to empty strings", "if", "chunk", "and", "not", "chunk", ".", "isspace", "(", ")", ":", "process", ".", "stdin", ".", "write", "(", "chunk", "+", "b'\\n'", ")", "if", "offset", ">=", "end", ":", "break", "return", "get_process_output", "(", "process", ",", "encoding", ")" ]
Call spell checker with arguments.
[ "Call", "spell", "checker", "with", "arguments", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/util/__init__.py#L109-L137
facelessuser/pyspelling
pyspelling/util/__init__.py
random_name_gen
def random_name_gen(size=6): """Generate a random python attribute name.""" return ''.join( [random.choice(string.ascii_uppercase)] + [random.choice(string.ascii_uppercase + string.digits) for i in range(size - 1)] ) if size > 0 else ''
python
def random_name_gen(size=6): """Generate a random python attribute name.""" return ''.join( [random.choice(string.ascii_uppercase)] + [random.choice(string.ascii_uppercase + string.digits) for i in range(size - 1)] ) if size > 0 else ''
[ "def", "random_name_gen", "(", "size", "=", "6", ")", ":", "return", "''", ".", "join", "(", "[", "random", ".", "choice", "(", "string", ".", "ascii_uppercase", ")", "]", "+", "[", "random", ".", "choice", "(", "string", ".", "ascii_uppercase", "+", "string", ".", "digits", ")", "for", "i", "in", "range", "(", "size", "-", "1", ")", "]", ")", "if", "size", ">", "0", "else", "''" ]
Generate a random python attribute name.
[ "Generate", "a", "random", "python", "attribute", "name", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/util/__init__.py#L140-L146
facelessuser/pyspelling
pyspelling/util/__init__.py
yaml_load
def yaml_load(source, loader=yaml.Loader): """ Wrap PyYaml's loader so we can extend it to suit our needs. Load all strings as Unicode: http://stackoverflow.com/a/2967461/3609487. """ def construct_yaml_str(self, node): """Override the default string handling function to always return Unicode objects.""" return self.construct_scalar(node) class Loader(loader): """Define a custom loader to leave the global loader unaltered.""" # Attach our Unicode constructor to our custom loader ensuring all strings # will be Unicode on translation. Loader.add_constructor('tag:yaml.org,2002:str', construct_yaml_str) return yaml.load(source, Loader)
python
def yaml_load(source, loader=yaml.Loader): """ Wrap PyYaml's loader so we can extend it to suit our needs. Load all strings as Unicode: http://stackoverflow.com/a/2967461/3609487. """ def construct_yaml_str(self, node): """Override the default string handling function to always return Unicode objects.""" return self.construct_scalar(node) class Loader(loader): """Define a custom loader to leave the global loader unaltered.""" # Attach our Unicode constructor to our custom loader ensuring all strings # will be Unicode on translation. Loader.add_constructor('tag:yaml.org,2002:str', construct_yaml_str) return yaml.load(source, Loader)
[ "def", "yaml_load", "(", "source", ",", "loader", "=", "yaml", ".", "Loader", ")", ":", "def", "construct_yaml_str", "(", "self", ",", "node", ")", ":", "\"\"\"Override the default string handling function to always return Unicode objects.\"\"\"", "return", "self", ".", "construct_scalar", "(", "node", ")", "class", "Loader", "(", "loader", ")", ":", "\"\"\"Define a custom loader to leave the global loader unaltered.\"\"\"", "# Attach our Unicode constructor to our custom loader ensuring all strings", "# will be Unicode on translation.", "Loader", ".", "add_constructor", "(", "'tag:yaml.org,2002:str'", ",", "construct_yaml_str", ")", "return", "yaml", ".", "load", "(", "source", ",", "Loader", ")" ]
Wrap PyYaml's loader so we can extend it to suit our needs. Load all strings as Unicode: http://stackoverflow.com/a/2967461/3609487.
[ "Wrap", "PyYaml", "s", "loader", "so", "we", "can", "extend", "it", "to", "suit", "our", "needs", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/util/__init__.py#L149-L167
facelessuser/pyspelling
pyspelling/util/__init__.py
read_config
def read_config(file_name): """Read configuration.""" config = {} for name in (['.pyspelling.yml', '.spelling.yml'] if not file_name else [file_name]): if os.path.exists(name): if not file_name and name == '.spelling.yml': warn_deprecated( "Using '.spelling.yml' as the default is deprecated. Default config is now '.pyspelling.yml'" ) with codecs.open(name, 'r', encoding='utf-8') as f: config = yaml_load(f.read()) break return config
python
def read_config(file_name): """Read configuration.""" config = {} for name in (['.pyspelling.yml', '.spelling.yml'] if not file_name else [file_name]): if os.path.exists(name): if not file_name and name == '.spelling.yml': warn_deprecated( "Using '.spelling.yml' as the default is deprecated. Default config is now '.pyspelling.yml'" ) with codecs.open(name, 'r', encoding='utf-8') as f: config = yaml_load(f.read()) break return config
[ "def", "read_config", "(", "file_name", ")", ":", "config", "=", "{", "}", "for", "name", "in", "(", "[", "'.pyspelling.yml'", ",", "'.spelling.yml'", "]", "if", "not", "file_name", "else", "[", "file_name", "]", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "name", ")", ":", "if", "not", "file_name", "and", "name", "==", "'.spelling.yml'", ":", "warn_deprecated", "(", "\"Using '.spelling.yml' as the default is deprecated. Default config is now '.pyspelling.yml'\"", ")", "with", "codecs", ".", "open", "(", "name", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "config", "=", "yaml_load", "(", "f", ".", "read", "(", ")", ")", "break", "return", "config" ]
Read configuration.
[ "Read", "configuration", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/util/__init__.py#L170-L183
runfalk/spans
spans/_compat.py
fix_timedelta_repr
def fix_timedelta_repr(func): """ Account repr change for timedelta in Python 3.7 and above in docstrings. This is needed to make some doctests pass on Python 3.7 and above. This change was introduced by `bpo-30302 <https://bugs.python.org/issue30302>`_ """ # We don't need to do anything if we're not on 3.7 or above if version < (3, 7): return func def fix_timedelta(match): values = match.group(1).split(", ") param_repr = ", ".join( "{}={}".format(param, value) for param, value in zip(("days", "seconds", "microseconds"), values) if value != "0" ) # If we have a zero length timedelta it should be represented as # timedelta(0), i.e. without named parameters if not param_repr: param_repr = "0" return "timedelta({})".format(param_repr) func.__doc__ = re.sub(r"timedelta\(([^)]+)\)", fix_timedelta, func.__doc__) return func
python
def fix_timedelta_repr(func): """ Account repr change for timedelta in Python 3.7 and above in docstrings. This is needed to make some doctests pass on Python 3.7 and above. This change was introduced by `bpo-30302 <https://bugs.python.org/issue30302>`_ """ # We don't need to do anything if we're not on 3.7 or above if version < (3, 7): return func def fix_timedelta(match): values = match.group(1).split(", ") param_repr = ", ".join( "{}={}".format(param, value) for param, value in zip(("days", "seconds", "microseconds"), values) if value != "0" ) # If we have a zero length timedelta it should be represented as # timedelta(0), i.e. without named parameters if not param_repr: param_repr = "0" return "timedelta({})".format(param_repr) func.__doc__ = re.sub(r"timedelta\(([^)]+)\)", fix_timedelta, func.__doc__) return func
[ "def", "fix_timedelta_repr", "(", "func", ")", ":", "# We don't need to do anything if we're not on 3.7 or above", "if", "version", "<", "(", "3", ",", "7", ")", ":", "return", "func", "def", "fix_timedelta", "(", "match", ")", ":", "values", "=", "match", ".", "group", "(", "1", ")", ".", "split", "(", "\", \"", ")", "param_repr", "=", "\", \"", ".", "join", "(", "\"{}={}\"", ".", "format", "(", "param", ",", "value", ")", "for", "param", ",", "value", "in", "zip", "(", "(", "\"days\"", ",", "\"seconds\"", ",", "\"microseconds\"", ")", ",", "values", ")", "if", "value", "!=", "\"0\"", ")", "# If we have a zero length timedelta it should be represented as", "# timedelta(0), i.e. without named parameters", "if", "not", "param_repr", ":", "param_repr", "=", "\"0\"", "return", "\"timedelta({})\"", ".", "format", "(", "param_repr", ")", "func", ".", "__doc__", "=", "re", ".", "sub", "(", "r\"timedelta\\(([^)]+)\\)\"", ",", "fix_timedelta", ",", "func", ".", "__doc__", ")", "return", "func" ]
Account repr change for timedelta in Python 3.7 and above in docstrings. This is needed to make some doctests pass on Python 3.7 and above. This change was introduced by `bpo-30302 <https://bugs.python.org/issue30302>`_
[ "Account", "repr", "change", "for", "timedelta", "in", "Python", "3", ".", "7", "and", "above", "in", "docstrings", "." ]
train
https://github.com/runfalk/spans/blob/59ed73407a569c3be86cfdb4b8f438cb8c794540/spans/_compat.py#L69-L96
j0ack/flask-codemirror
flask_codemirror/__init__.py
CodeMirrorHeaders._get_tag
def _get_tag(self, url, tag, print_warn=True): """Check if url is available and returns given tag type :param url: url to content relative to base url :param anchor: anchor type to return :param print_warn: if True print warn when url is unavailable """ # construct complete url complete_url = urljoin(self.base_url, url) # check if exists if requests.get('http:' + complete_url).ok: # construct tag if tag == 'script': return '<script src="{0}"></script>'.format(complete_url) elif tag == 'stylesheet': return '<link rel="stylesheet" href="{0}">'.format(complete_url) else: warnings.warn('Given tag is not valid') elif print_warn: warnings.warn('Url {0} not valid'.format(complete_url)) return None
python
def _get_tag(self, url, tag, print_warn=True): """Check if url is available and returns given tag type :param url: url to content relative to base url :param anchor: anchor type to return :param print_warn: if True print warn when url is unavailable """ # construct complete url complete_url = urljoin(self.base_url, url) # check if exists if requests.get('http:' + complete_url).ok: # construct tag if tag == 'script': return '<script src="{0}"></script>'.format(complete_url) elif tag == 'stylesheet': return '<link rel="stylesheet" href="{0}">'.format(complete_url) else: warnings.warn('Given tag is not valid') elif print_warn: warnings.warn('Url {0} not valid'.format(complete_url)) return None
[ "def", "_get_tag", "(", "self", ",", "url", ",", "tag", ",", "print_warn", "=", "True", ")", ":", "# construct complete url", "complete_url", "=", "urljoin", "(", "self", ".", "base_url", ",", "url", ")", "# check if exists", "if", "requests", ".", "get", "(", "'http:'", "+", "complete_url", ")", ".", "ok", ":", "# construct tag", "if", "tag", "==", "'script'", ":", "return", "'<script src=\"{0}\"></script>'", ".", "format", "(", "complete_url", ")", "elif", "tag", "==", "'stylesheet'", ":", "return", "'<link rel=\"stylesheet\" href=\"{0}\">'", ".", "format", "(", "complete_url", ")", "else", ":", "warnings", ".", "warn", "(", "'Given tag is not valid'", ")", "elif", "print_warn", ":", "warnings", ".", "warn", "(", "'Url {0} not valid'", ".", "format", "(", "complete_url", ")", ")", "return", "None" ]
Check if url is available and returns given tag type :param url: url to content relative to base url :param anchor: anchor type to return :param print_warn: if True print warn when url is unavailable
[ "Check", "if", "url", "is", "available", "and", "returns", "given", "tag", "type", ":", "param", "url", ":", "url", "to", "content", "relative", "to", "base", "url", ":", "param", "anchor", ":", "anchor", "type", "to", "return", ":", "param", "print_warn", ":", "if", "True", "print", "warn", "when", "url", "is", "unavailable" ]
train
https://github.com/j0ack/flask-codemirror/blob/81ad831ff849b60bb34de5db727ad626ff3c9bdc/flask_codemirror/__init__.py#L71-L90
j0ack/flask-codemirror
flask_codemirror/__init__.py
CodeMirrorHeaders.include_codemirror
def include_codemirror(self): """Include resources in pages""" contents = [] # base js = self._get_tag('codemirror.js', 'script') css = self._get_tag('codemirror.css', 'stylesheet') if js and css: contents.append(js) contents.append(css) # languages for language in self.languages: url = self.__class__.LANGUAGE_REL_URL.format(language) js = self._get_tag(url, 'script') if js: contents.append(js) # theme if self.theme: url = self.__class__.THEME_REL_URL.format(self.theme) css = self._get_tag(url, 'stylesheet') if css: contents.append(css) # addons if self.addons: # add to list for addon_type, name in self.addons: url = self.__class__.ADDON_REL_URL.format(addon_type, name) js = self._get_tag(url, 'script') if js: contents.append(js) # if there is a css file relative to this addon url = self.__class__.ADDON_CSS_REL_URL.format(addon_type, name) css = self._get_tag(url, 'stylesheet', False) if css: contents.append(css) # return html return Markup('\n'.join(contents))
python
def include_codemirror(self): """Include resources in pages""" contents = [] # base js = self._get_tag('codemirror.js', 'script') css = self._get_tag('codemirror.css', 'stylesheet') if js and css: contents.append(js) contents.append(css) # languages for language in self.languages: url = self.__class__.LANGUAGE_REL_URL.format(language) js = self._get_tag(url, 'script') if js: contents.append(js) # theme if self.theme: url = self.__class__.THEME_REL_URL.format(self.theme) css = self._get_tag(url, 'stylesheet') if css: contents.append(css) # addons if self.addons: # add to list for addon_type, name in self.addons: url = self.__class__.ADDON_REL_URL.format(addon_type, name) js = self._get_tag(url, 'script') if js: contents.append(js) # if there is a css file relative to this addon url = self.__class__.ADDON_CSS_REL_URL.format(addon_type, name) css = self._get_tag(url, 'stylesheet', False) if css: contents.append(css) # return html return Markup('\n'.join(contents))
[ "def", "include_codemirror", "(", "self", ")", ":", "contents", "=", "[", "]", "# base", "js", "=", "self", ".", "_get_tag", "(", "'codemirror.js'", ",", "'script'", ")", "css", "=", "self", ".", "_get_tag", "(", "'codemirror.css'", ",", "'stylesheet'", ")", "if", "js", "and", "css", ":", "contents", ".", "append", "(", "js", ")", "contents", ".", "append", "(", "css", ")", "# languages", "for", "language", "in", "self", ".", "languages", ":", "url", "=", "self", ".", "__class__", ".", "LANGUAGE_REL_URL", ".", "format", "(", "language", ")", "js", "=", "self", ".", "_get_tag", "(", "url", ",", "'script'", ")", "if", "js", ":", "contents", ".", "append", "(", "js", ")", "# theme", "if", "self", ".", "theme", ":", "url", "=", "self", ".", "__class__", ".", "THEME_REL_URL", ".", "format", "(", "self", ".", "theme", ")", "css", "=", "self", ".", "_get_tag", "(", "url", ",", "'stylesheet'", ")", "if", "css", ":", "contents", ".", "append", "(", "css", ")", "# addons", "if", "self", ".", "addons", ":", "# add to list", "for", "addon_type", ",", "name", "in", "self", ".", "addons", ":", "url", "=", "self", ".", "__class__", ".", "ADDON_REL_URL", ".", "format", "(", "addon_type", ",", "name", ")", "js", "=", "self", ".", "_get_tag", "(", "url", ",", "'script'", ")", "if", "js", ":", "contents", ".", "append", "(", "js", ")", "# if there is a css file relative to this addon", "url", "=", "self", ".", "__class__", ".", "ADDON_CSS_REL_URL", ".", "format", "(", "addon_type", ",", "name", ")", "css", "=", "self", ".", "_get_tag", "(", "url", ",", "'stylesheet'", ",", "False", ")", "if", "css", ":", "contents", ".", "append", "(", "css", ")", "# return html", "return", "Markup", "(", "'\\n'", ".", "join", "(", "contents", ")", ")" ]
Include resources in pages
[ "Include", "resources", "in", "pages" ]
train
https://github.com/j0ack/flask-codemirror/blob/81ad831ff849b60bb34de5db727ad626ff3c9bdc/flask_codemirror/__init__.py#L92-L127
j0ack/flask-codemirror
flask_codemirror/__init__.py
CodeMirror.init_app
def init_app(self, app): """Register CodeMirror as a Flask extension :param app: Flask instance """ if not hasattr(app, 'extensions'): app.extensions = {} app.extensions['codemirror'] = CodeMirrorHeaders(app.config) app.context_processor(self.context_processor)
python
def init_app(self, app): """Register CodeMirror as a Flask extension :param app: Flask instance """ if not hasattr(app, 'extensions'): app.extensions = {} app.extensions['codemirror'] = CodeMirrorHeaders(app.config) app.context_processor(self.context_processor)
[ "def", "init_app", "(", "self", ",", "app", ")", ":", "if", "not", "hasattr", "(", "app", ",", "'extensions'", ")", ":", "app", ".", "extensions", "=", "{", "}", "app", ".", "extensions", "[", "'codemirror'", "]", "=", "CodeMirrorHeaders", "(", "app", ".", "config", ")", "app", ".", "context_processor", "(", "self", ".", "context_processor", ")" ]
Register CodeMirror as a Flask extension :param app: Flask instance
[ "Register", "CodeMirror", "as", "a", "Flask", "extension", ":", "param", "app", ":", "Flask", "instance" ]
train
https://github.com/j0ack/flask-codemirror/blob/81ad831ff849b60bb34de5db727ad626ff3c9bdc/flask_codemirror/__init__.py#L138-L145
sloria/tinynetrc
tinynetrc.py
Netrc.format
def format(self): """Dump the class data in the format of a .netrc file.""" self._netrc.hosts = dedictify_machines(self.machines) rep = "" for host in self._netrc.hosts.keys(): attrs = self._netrc.hosts[host] rep += "machine {host}\n\tlogin {attrs[0]}\n".format(host=host, attrs=attrs) if attrs[1]: rep += "\taccount {attrs[1]}\n".format(attrs=attrs) rep += "\tpassword {attrs[2]}\n".format(attrs=attrs) for macro in self._netrc.macros.keys(): rep += "macdef {macro}\n".format(macro=macro) for line in self._netrc.macros[macro]: rep += line rep += "\n" return rep
python
def format(self): """Dump the class data in the format of a .netrc file.""" self._netrc.hosts = dedictify_machines(self.machines) rep = "" for host in self._netrc.hosts.keys(): attrs = self._netrc.hosts[host] rep += "machine {host}\n\tlogin {attrs[0]}\n".format(host=host, attrs=attrs) if attrs[1]: rep += "\taccount {attrs[1]}\n".format(attrs=attrs) rep += "\tpassword {attrs[2]}\n".format(attrs=attrs) for macro in self._netrc.macros.keys(): rep += "macdef {macro}\n".format(macro=macro) for line in self._netrc.macros[macro]: rep += line rep += "\n" return rep
[ "def", "format", "(", "self", ")", ":", "self", ".", "_netrc", ".", "hosts", "=", "dedictify_machines", "(", "self", ".", "machines", ")", "rep", "=", "\"\"", "for", "host", "in", "self", ".", "_netrc", ".", "hosts", ".", "keys", "(", ")", ":", "attrs", "=", "self", ".", "_netrc", ".", "hosts", "[", "host", "]", "rep", "+=", "\"machine {host}\\n\\tlogin {attrs[0]}\\n\"", ".", "format", "(", "host", "=", "host", ",", "attrs", "=", "attrs", ")", "if", "attrs", "[", "1", "]", ":", "rep", "+=", "\"\\taccount {attrs[1]}\\n\"", ".", "format", "(", "attrs", "=", "attrs", ")", "rep", "+=", "\"\\tpassword {attrs[2]}\\n\"", ".", "format", "(", "attrs", "=", "attrs", ")", "for", "macro", "in", "self", ".", "_netrc", ".", "macros", ".", "keys", "(", ")", ":", "rep", "+=", "\"macdef {macro}\\n\"", ".", "format", "(", "macro", "=", "macro", ")", "for", "line", "in", "self", ".", "_netrc", ".", "macros", "[", "macro", "]", ":", "rep", "+=", "line", "rep", "+=", "\"\\n\"", "return", "rep" ]
Dump the class data in the format of a .netrc file.
[ "Dump", "the", "class", "data", "in", "the", "format", "of", "a", ".", "netrc", "file", "." ]
train
https://github.com/sloria/tinynetrc/blob/1d65069e520dce969b6740fa49bf87a04a1d7b59/tinynetrc.py#L84-L100
KeithSSmith/switcheo-python
switcheo/neo/utils.py
neo_get_scripthash_from_address
def neo_get_scripthash_from_address(address): """ Convert a Public Address String to a ScriptHash (Address) String. :param address: The Public address to convert. :type address: str :return: String containing the converted ScriptHash. """ hash_bytes = binascii.hexlify(base58.b58decode_check(address)) return reverse_hex(hash_bytes[2:].decode('utf-8'))
python
def neo_get_scripthash_from_address(address): """ Convert a Public Address String to a ScriptHash (Address) String. :param address: The Public address to convert. :type address: str :return: String containing the converted ScriptHash. """ hash_bytes = binascii.hexlify(base58.b58decode_check(address)) return reverse_hex(hash_bytes[2:].decode('utf-8'))
[ "def", "neo_get_scripthash_from_address", "(", "address", ")", ":", "hash_bytes", "=", "binascii", ".", "hexlify", "(", "base58", ".", "b58decode_check", "(", "address", ")", ")", "return", "reverse_hex", "(", "hash_bytes", "[", "2", ":", "]", ".", "decode", "(", "'utf-8'", ")", ")" ]
Convert a Public Address String to a ScriptHash (Address) String. :param address: The Public address to convert. :type address: str :return: String containing the converted ScriptHash.
[ "Convert", "a", "Public", "Address", "String", "to", "a", "ScriptHash", "(", "Address", ")", "String", "." ]
train
https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/neo/utils.py#L51-L60
runfalk/spans
setup.py
rst_preprocess
def rst_preprocess(file): """ Preprocess reST file to support Sphinx like include directive. Includes are relative to the current working directory. """ with open(file) as fp: return re.sub( "^\.\.\s+include:: (.*?)$", lambda x: (rst_preprocess(x.group(1)) or "").rstrip(), fp.read(), flags=re.MULTILINE)
python
def rst_preprocess(file): """ Preprocess reST file to support Sphinx like include directive. Includes are relative to the current working directory. """ with open(file) as fp: return re.sub( "^\.\.\s+include:: (.*?)$", lambda x: (rst_preprocess(x.group(1)) or "").rstrip(), fp.read(), flags=re.MULTILINE)
[ "def", "rst_preprocess", "(", "file", ")", ":", "with", "open", "(", "file", ")", "as", "fp", ":", "return", "re", ".", "sub", "(", "\"^\\.\\.\\s+include:: (.*?)$\"", ",", "lambda", "x", ":", "(", "rst_preprocess", "(", "x", ".", "group", "(", "1", ")", ")", "or", "\"\"", ")", ".", "rstrip", "(", ")", ",", "fp", ".", "read", "(", ")", ",", "flags", "=", "re", ".", "MULTILINE", ")" ]
Preprocess reST file to support Sphinx like include directive. Includes are relative to the current working directory.
[ "Preprocess", "reST", "file", "to", "support", "Sphinx", "like", "include", "directive", ".", "Includes", "are", "relative", "to", "the", "current", "working", "directory", "." ]
train
https://github.com/runfalk/spans/blob/59ed73407a569c3be86cfdb4b8f438cb8c794540/setup.py#L113-L124
KeithSSmith/switcheo-python
switcheo/utils.py
num2hexstring
def num2hexstring(number, size=1, little_endian=False): """ Converts a number to a big endian hexstring of a suitable size, optionally little endian :param {number} number :param {number} size - The required size in hex chars, eg 2 for Uint8, 4 for Uint16. Defaults to 2. :param {boolean} little_endian - Encode the hex in little endian form :return {string} """ # if (type(number) != = 'number') throw new Error('num must be numeric') # if (num < 0) throw new RangeError('num is unsigned (>= 0)') # if (size % 1 !== 0) throw new Error('size must be a whole integer') # if (!Number.isSafeInteger(num)) throw new RangeError(`num (${num}) must be a safe integer`) size = size * 2 hexstring = hex(number)[2:] if len(hexstring) % size != 0: hexstring = ('0' * size + hexstring)[len(hexstring):] if little_endian: hexstring = reverse_hex(hexstring) return hexstring
python
def num2hexstring(number, size=1, little_endian=False): """ Converts a number to a big endian hexstring of a suitable size, optionally little endian :param {number} number :param {number} size - The required size in hex chars, eg 2 for Uint8, 4 for Uint16. Defaults to 2. :param {boolean} little_endian - Encode the hex in little endian form :return {string} """ # if (type(number) != = 'number') throw new Error('num must be numeric') # if (num < 0) throw new RangeError('num is unsigned (>= 0)') # if (size % 1 !== 0) throw new Error('size must be a whole integer') # if (!Number.isSafeInteger(num)) throw new RangeError(`num (${num}) must be a safe integer`) size = size * 2 hexstring = hex(number)[2:] if len(hexstring) % size != 0: hexstring = ('0' * size + hexstring)[len(hexstring):] if little_endian: hexstring = reverse_hex(hexstring) return hexstring
[ "def", "num2hexstring", "(", "number", ",", "size", "=", "1", ",", "little_endian", "=", "False", ")", ":", "# if (type(number) != = 'number') throw new Error('num must be numeric')", "# if (num < 0) throw new RangeError('num is unsigned (>= 0)')", "# if (size % 1 !== 0) throw new Error('size must be a whole integer')", "# if (!Number.isSafeInteger(num)) throw new RangeError(`num (${num}) must be a safe integer`)", "size", "=", "size", "*", "2", "hexstring", "=", "hex", "(", "number", ")", "[", "2", ":", "]", "if", "len", "(", "hexstring", ")", "%", "size", "!=", "0", ":", "hexstring", "=", "(", "'0'", "*", "size", "+", "hexstring", ")", "[", "len", "(", "hexstring", ")", ":", "]", "if", "little_endian", ":", "hexstring", "=", "reverse_hex", "(", "hexstring", ")", "return", "hexstring" ]
Converts a number to a big endian hexstring of a suitable size, optionally little endian :param {number} number :param {number} size - The required size in hex chars, eg 2 for Uint8, 4 for Uint16. Defaults to 2. :param {boolean} little_endian - Encode the hex in little endian form :return {string}
[ "Converts", "a", "number", "to", "a", "big", "endian", "hexstring", "of", "a", "suitable", "size", "optionally", "little", "endian", ":", "param", "{", "number", "}", "number", ":", "param", "{", "number", "}", "size", "-", "The", "required", "size", "in", "hex", "chars", "eg", "2", "for", "Uint8", "4", "for", "Uint16", ".", "Defaults", "to", "2", ".", ":", "param", "{", "boolean", "}", "little_endian", "-", "Encode", "the", "hex", "in", "little", "endian", "form", ":", "return", "{", "string", "}" ]
train
https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/utils.py#L29-L47
KeithSSmith/switcheo-python
switcheo/utils.py
num2varint
def num2varint(num): """ Converts a number to a variable length Int. Used for array length header :param: {number} num - The number :return: {string} hexstring of the variable Int. """ # if (typeof num !== 'number') throw new Error('VarInt must be numeric') # if (num < 0) throw new RangeError('VarInts are unsigned (> 0)') # if (!Number.isSafeInteger(num)) throw new RangeError('VarInt must be a safe integer') if num < 0xfd: return num2hexstring(num) elif num <= 0xffff: # uint16 return 'fd' + num2hexstring(number=num, size=2, little_endian=True) elif num <= 0xffffffff: # uint32 return 'fe' + num2hexstring(number=num, size=4, little_endian=True) else: # uint64 return 'ff' + num2hexstring(number=num, size=8, little_endian=True)
python
def num2varint(num): """ Converts a number to a variable length Int. Used for array length header :param: {number} num - The number :return: {string} hexstring of the variable Int. """ # if (typeof num !== 'number') throw new Error('VarInt must be numeric') # if (num < 0) throw new RangeError('VarInts are unsigned (> 0)') # if (!Number.isSafeInteger(num)) throw new RangeError('VarInt must be a safe integer') if num < 0xfd: return num2hexstring(num) elif num <= 0xffff: # uint16 return 'fd' + num2hexstring(number=num, size=2, little_endian=True) elif num <= 0xffffffff: # uint32 return 'fe' + num2hexstring(number=num, size=4, little_endian=True) else: # uint64 return 'ff' + num2hexstring(number=num, size=8, little_endian=True)
[ "def", "num2varint", "(", "num", ")", ":", "# if (typeof num !== 'number') throw new Error('VarInt must be numeric')", "# if (num < 0) throw new RangeError('VarInts are unsigned (> 0)')", "# if (!Number.isSafeInteger(num)) throw new RangeError('VarInt must be a safe integer')", "if", "num", "<", "0xfd", ":", "return", "num2hexstring", "(", "num", ")", "elif", "num", "<=", "0xffff", ":", "# uint16", "return", "'fd'", "+", "num2hexstring", "(", "number", "=", "num", ",", "size", "=", "2", ",", "little_endian", "=", "True", ")", "elif", "num", "<=", "0xffffffff", ":", "# uint32", "return", "'fe'", "+", "num2hexstring", "(", "number", "=", "num", ",", "size", "=", "4", ",", "little_endian", "=", "True", ")", "else", ":", "# uint64", "return", "'ff'", "+", "num2hexstring", "(", "number", "=", "num", ",", "size", "=", "8", ",", "little_endian", "=", "True", ")" ]
Converts a number to a variable length Int. Used for array length header :param: {number} num - The number :return: {string} hexstring of the variable Int.
[ "Converts", "a", "number", "to", "a", "variable", "length", "Int", ".", "Used", "for", "array", "length", "header" ]
train
https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/utils.py#L50-L70
KeithSSmith/switcheo-python
switcheo/utils.py
Request.get
def get(self, path, params=None): """Perform GET request""" r = requests.get(url=self.url + path, params=params, timeout=self.timeout) r.raise_for_status() return r.json()
python
def get(self, path, params=None): """Perform GET request""" r = requests.get(url=self.url + path, params=params, timeout=self.timeout) r.raise_for_status() return r.json()
[ "def", "get", "(", "self", ",", "path", ",", "params", "=", "None", ")", ":", "r", "=", "requests", ".", "get", "(", "url", "=", "self", ".", "url", "+", "path", ",", "params", "=", "params", ",", "timeout", "=", "self", ".", "timeout", ")", "r", ".", "raise_for_status", "(", ")", "return", "r", ".", "json", "(", ")" ]
Perform GET request
[ "Perform", "GET", "request" ]
train
https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/utils.py#L101-L105
KeithSSmith/switcheo-python
switcheo/utils.py
Request.post
def post(self, path, data=None, json_data=None, params=None): """Perform POST request""" r = requests.post(url=self.url + path, data=data, json=json_data, params=params, timeout=self.timeout) try: r.raise_for_status() except requests.exceptions.HTTPError: raise SwitcheoApiException(r.json()['error_code'], r.json()['error_message'], r.json()['error']) return r.json()
python
def post(self, path, data=None, json_data=None, params=None): """Perform POST request""" r = requests.post(url=self.url + path, data=data, json=json_data, params=params, timeout=self.timeout) try: r.raise_for_status() except requests.exceptions.HTTPError: raise SwitcheoApiException(r.json()['error_code'], r.json()['error_message'], r.json()['error']) return r.json()
[ "def", "post", "(", "self", ",", "path", ",", "data", "=", "None", ",", "json_data", "=", "None", ",", "params", "=", "None", ")", ":", "r", "=", "requests", ".", "post", "(", "url", "=", "self", ".", "url", "+", "path", ",", "data", "=", "data", ",", "json", "=", "json_data", ",", "params", "=", "params", ",", "timeout", "=", "self", ".", "timeout", ")", "try", ":", "r", ".", "raise_for_status", "(", ")", "except", "requests", ".", "exceptions", ".", "HTTPError", ":", "raise", "SwitcheoApiException", "(", "r", ".", "json", "(", ")", "[", "'error_code'", "]", ",", "r", ".", "json", "(", ")", "[", "'error_message'", "]", ",", "r", ".", "json", "(", ")", "[", "'error'", "]", ")", "return", "r", ".", "json", "(", ")" ]
Perform POST request
[ "Perform", "POST", "request" ]
train
https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/utils.py#L107-L114
facelessuser/pyspelling
pyspelling/filters/html.py
HtmlFilter.validate_options
def validate_options(self, k, v): """Validate options.""" super().validate_options(k, v) if k == 'mode' and v not in MODE: raise ValueError("{}: '{}' is not a valid value for '{}'".format(self.__class__.__name, v, k))
python
def validate_options(self, k, v): """Validate options.""" super().validate_options(k, v) if k == 'mode' and v not in MODE: raise ValueError("{}: '{}' is not a valid value for '{}'".format(self.__class__.__name, v, k))
[ "def", "validate_options", "(", "self", ",", "k", ",", "v", ")", ":", "super", "(", ")", ".", "validate_options", "(", "k", ",", "v", ")", "if", "k", "==", "'mode'", "and", "v", "not", "in", "MODE", ":", "raise", "ValueError", "(", "\"{}: '{}' is not a valid value for '{}'\"", ".", "format", "(", "self", ".", "__class__", ".", "__name", ",", "v", ",", "k", ")", ")" ]
Validate options.
[ "Validate", "options", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/html.py#L57-L62
facelessuser/pyspelling
pyspelling/filters/html.py
HtmlFilter.setup
def setup(self): """Setup.""" self.user_break_tags = set(self.config['break_tags']) self.comments = self.config.get('comments', True) is True self.attributes = set(self.config['attributes']) self.type = self.config['mode'] if self.type not in MODE: self.type = 'html' self.parser = MODE[self.type] ignores = ','.join(self.config['ignores']) self.ignores = sv.compile(ignores, self.config['namespaces']) if ignores.strip() else None captures = ','.join(self.config['captures']) self.captures = sv.compile(captures, self.config['namespaces']) if captures.strip() else None
python
def setup(self): """Setup.""" self.user_break_tags = set(self.config['break_tags']) self.comments = self.config.get('comments', True) is True self.attributes = set(self.config['attributes']) self.type = self.config['mode'] if self.type not in MODE: self.type = 'html' self.parser = MODE[self.type] ignores = ','.join(self.config['ignores']) self.ignores = sv.compile(ignores, self.config['namespaces']) if ignores.strip() else None captures = ','.join(self.config['captures']) self.captures = sv.compile(captures, self.config['namespaces']) if captures.strip() else None
[ "def", "setup", "(", "self", ")", ":", "self", ".", "user_break_tags", "=", "set", "(", "self", ".", "config", "[", "'break_tags'", "]", ")", "self", ".", "comments", "=", "self", ".", "config", ".", "get", "(", "'comments'", ",", "True", ")", "is", "True", "self", ".", "attributes", "=", "set", "(", "self", ".", "config", "[", "'attributes'", "]", ")", "self", ".", "type", "=", "self", ".", "config", "[", "'mode'", "]", "if", "self", ".", "type", "not", "in", "MODE", ":", "self", ".", "type", "=", "'html'", "self", ".", "parser", "=", "MODE", "[", "self", ".", "type", "]", "ignores", "=", "','", ".", "join", "(", "self", ".", "config", "[", "'ignores'", "]", ")", "self", ".", "ignores", "=", "sv", ".", "compile", "(", "ignores", ",", "self", ".", "config", "[", "'namespaces'", "]", ")", "if", "ignores", ".", "strip", "(", ")", "else", "None", "captures", "=", "','", ".", "join", "(", "self", ".", "config", "[", "'captures'", "]", ")", "self", ".", "captures", "=", "sv", ".", "compile", "(", "captures", ",", "self", ".", "config", "[", "'namespaces'", "]", ")", "if", "captures", ".", "strip", "(", ")", "else", "None" ]
Setup.
[ "Setup", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/html.py#L64-L77
facelessuser/pyspelling
pyspelling/filters/html.py
HtmlFilter.header_check
def header_check(self, content): """Special HTML encoding check.""" encode = None # Look for meta charset m = RE_HTML_ENCODE.search(content) if m: enc = m.group(1).decode('ascii') try: codecs.getencoder(enc) encode = enc except LookupError: pass else: encode = self._has_xml_encode(content) return encode
python
def header_check(self, content): """Special HTML encoding check.""" encode = None # Look for meta charset m = RE_HTML_ENCODE.search(content) if m: enc = m.group(1).decode('ascii') try: codecs.getencoder(enc) encode = enc except LookupError: pass else: encode = self._has_xml_encode(content) return encode
[ "def", "header_check", "(", "self", ",", "content", ")", ":", "encode", "=", "None", "# Look for meta charset", "m", "=", "RE_HTML_ENCODE", ".", "search", "(", "content", ")", "if", "m", ":", "enc", "=", "m", ".", "group", "(", "1", ")", ".", "decode", "(", "'ascii'", ")", "try", ":", "codecs", ".", "getencoder", "(", "enc", ")", "encode", "=", "enc", "except", "LookupError", ":", "pass", "else", ":", "encode", "=", "self", ".", "_has_xml_encode", "(", "content", ")", "return", "encode" ]
Special HTML encoding check.
[ "Special", "HTML", "encoding", "check", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/html.py#L79-L96
facelessuser/pyspelling
pyspelling/filters/html.py
HtmlFilter.is_break_tag
def is_break_tag(self, el): """Check if tag is an element we should break on.""" name = sv.util.lower(el.name) if self.type != 'xhtml' else el.name return name in self.break_tags or name in self.user_break_tags
python
def is_break_tag(self, el): """Check if tag is an element we should break on.""" name = sv.util.lower(el.name) if self.type != 'xhtml' else el.name return name in self.break_tags or name in self.user_break_tags
[ "def", "is_break_tag", "(", "self", ",", "el", ")", ":", "name", "=", "sv", ".", "util", ".", "lower", "(", "el", ".", "name", ")", "if", "self", ".", "type", "!=", "'xhtml'", "else", "el", ".", "name", "return", "name", "in", "self", ".", "break_tags", "or", "name", "in", "self", ".", "user_break_tags" ]
Check if tag is an element we should break on.
[ "Check", "if", "tag", "is", "an", "element", "we", "should", "break", "on", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/html.py#L98-L102
facelessuser/pyspelling
pyspelling/filters/html.py
HtmlFilter.get_classes
def get_classes(self, el): """Get classes.""" if self.type != 'xhtml': return el.attrs.get('class', []) else: return [c for c in el.attrs.get('class', '').strip().split(' ') if c]
python
def get_classes(self, el): """Get classes.""" if self.type != 'xhtml': return el.attrs.get('class', []) else: return [c for c in el.attrs.get('class', '').strip().split(' ') if c]
[ "def", "get_classes", "(", "self", ",", "el", ")", ":", "if", "self", ".", "type", "!=", "'xhtml'", ":", "return", "el", ".", "attrs", ".", "get", "(", "'class'", ",", "[", "]", ")", "else", ":", "return", "[", "c", "for", "c", "in", "el", ".", "attrs", ".", "get", "(", "'class'", ",", "''", ")", ".", "strip", "(", ")", ".", "split", "(", "' '", ")", "if", "c", "]" ]
Get classes.
[ "Get", "classes", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/html.py#L104-L110
facelessuser/pyspelling
pyspelling/filters/html.py
HtmlFilter.construct_selector
def construct_selector(self, el, attr=''): """Construct an selector for context.""" selector = deque() ancestor = el while ancestor and ancestor.parent: if ancestor is not el: selector.appendleft(ancestor.name) else: tag = ancestor.name prefix = ancestor.prefix classes = self.get_classes(ancestor) tag_id = ancestor.attrs.get('id', '').strip() sel = '' if prefix: sel += prefix + '|' sel += tag if tag_id: sel += '#' + tag_id if classes: sel += '.' + '.'.join(classes) if attr: sel += '[%s]' % attr selector.appendleft(sel) ancestor = ancestor.parent return '>'.join(selector)
python
def construct_selector(self, el, attr=''): """Construct an selector for context.""" selector = deque() ancestor = el while ancestor and ancestor.parent: if ancestor is not el: selector.appendleft(ancestor.name) else: tag = ancestor.name prefix = ancestor.prefix classes = self.get_classes(ancestor) tag_id = ancestor.attrs.get('id', '').strip() sel = '' if prefix: sel += prefix + '|' sel += tag if tag_id: sel += '#' + tag_id if classes: sel += '.' + '.'.join(classes) if attr: sel += '[%s]' % attr selector.appendleft(sel) ancestor = ancestor.parent return '>'.join(selector)
[ "def", "construct_selector", "(", "self", ",", "el", ",", "attr", "=", "''", ")", ":", "selector", "=", "deque", "(", ")", "ancestor", "=", "el", "while", "ancestor", "and", "ancestor", ".", "parent", ":", "if", "ancestor", "is", "not", "el", ":", "selector", ".", "appendleft", "(", "ancestor", ".", "name", ")", "else", ":", "tag", "=", "ancestor", ".", "name", "prefix", "=", "ancestor", ".", "prefix", "classes", "=", "self", ".", "get_classes", "(", "ancestor", ")", "tag_id", "=", "ancestor", ".", "attrs", ".", "get", "(", "'id'", ",", "''", ")", ".", "strip", "(", ")", "sel", "=", "''", "if", "prefix", ":", "sel", "+=", "prefix", "+", "'|'", "sel", "+=", "tag", "if", "tag_id", ":", "sel", "+=", "'#'", "+", "tag_id", "if", "classes", ":", "sel", "+=", "'.'", "+", "'.'", ".", "join", "(", "classes", ")", "if", "attr", ":", "sel", "+=", "'[%s]'", "%", "attr", "selector", ".", "appendleft", "(", "sel", ")", "ancestor", "=", "ancestor", ".", "parent", "return", "'>'", ".", "join", "(", "selector", ")" ]
Construct an selector for context.
[ "Construct", "an", "selector", "for", "context", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/html.py#L122-L147
facelessuser/pyspelling
pyspelling/filters/ooxml.py
OoxmlFilter.setup
def setup(self): """Setup.""" self.additional_context = '' self.comments = False self.attributes = [] self.parser = 'xml' self.type = None self.filepattern = '' self.ignores = None self.captures = None
python
def setup(self): """Setup.""" self.additional_context = '' self.comments = False self.attributes = [] self.parser = 'xml' self.type = None self.filepattern = '' self.ignores = None self.captures = None
[ "def", "setup", "(", "self", ")", ":", "self", ".", "additional_context", "=", "''", "self", ".", "comments", "=", "False", "self", ".", "attributes", "=", "[", "]", "self", ".", "parser", "=", "'xml'", "self", ".", "type", "=", "None", "self", ".", "filepattern", "=", "''", "self", ".", "ignores", "=", "None", "self", ".", "captures", "=", "None" ]
Setup.
[ "Setup", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/ooxml.py#L57-L67
facelessuser/pyspelling
pyspelling/filters/ooxml.py
OoxmlFilter.has_bom
def has_bom(self, filestream): """Check if has BOM.""" content = filestream.read(4) if content == b'PK\x03\x04': # Zip file found. # Return `BINARY_ENCODE` as content is binary type, # but don't return None which means we don't know what we have. return filters.BINARY_ENCODE # We only handle zip files, so if we are checking this, we've already failed. return None
python
def has_bom(self, filestream): """Check if has BOM.""" content = filestream.read(4) if content == b'PK\x03\x04': # Zip file found. # Return `BINARY_ENCODE` as content is binary type, # but don't return None which means we don't know what we have. return filters.BINARY_ENCODE # We only handle zip files, so if we are checking this, we've already failed. return None
[ "def", "has_bom", "(", "self", ",", "filestream", ")", ":", "content", "=", "filestream", ".", "read", "(", "4", ")", "if", "content", "==", "b'PK\\x03\\x04'", ":", "# Zip file found.", "# Return `BINARY_ENCODE` as content is binary type,", "# but don't return None which means we don't know what we have.", "return", "filters", ".", "BINARY_ENCODE", "# We only handle zip files, so if we are checking this, we've already failed.", "return", "None" ]
Check if has BOM.
[ "Check", "if", "has", "BOM", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/ooxml.py#L69-L79
facelessuser/pyspelling
pyspelling/filters/ooxml.py
OoxmlFilter.determine_file_type
def determine_file_type(self, z): """Determine file type.""" content = z.read('[Content_Types].xml') with io.BytesIO(content) as b: encoding = self._analyze_file(b) if encoding is None: encoding = 'utf-8' b.seek(0) text = b.read().decode(encoding) soup = bs4.BeautifulSoup(text, 'xml') for o in soup.find_all('Override'): name = o.attrs.get('PartName') for k, v in MIMEMAP.items(): if name.startswith('/{}/'.format(k)): self.type = v break if self.type: break self.filepattern = DOC_PARAMS[self.type]['filepattern'] self.namespaces = DOC_PARAMS[self.type]['namespaces'] self.captures = sv.compile(DOC_PARAMS[self.type]['captures'], DOC_PARAMS[self.type]['namespaces'])
python
def determine_file_type(self, z): """Determine file type.""" content = z.read('[Content_Types].xml') with io.BytesIO(content) as b: encoding = self._analyze_file(b) if encoding is None: encoding = 'utf-8' b.seek(0) text = b.read().decode(encoding) soup = bs4.BeautifulSoup(text, 'xml') for o in soup.find_all('Override'): name = o.attrs.get('PartName') for k, v in MIMEMAP.items(): if name.startswith('/{}/'.format(k)): self.type = v break if self.type: break self.filepattern = DOC_PARAMS[self.type]['filepattern'] self.namespaces = DOC_PARAMS[self.type]['namespaces'] self.captures = sv.compile(DOC_PARAMS[self.type]['captures'], DOC_PARAMS[self.type]['namespaces'])
[ "def", "determine_file_type", "(", "self", ",", "z", ")", ":", "content", "=", "z", ".", "read", "(", "'[Content_Types].xml'", ")", "with", "io", ".", "BytesIO", "(", "content", ")", "as", "b", ":", "encoding", "=", "self", ".", "_analyze_file", "(", "b", ")", "if", "encoding", "is", "None", ":", "encoding", "=", "'utf-8'", "b", ".", "seek", "(", "0", ")", "text", "=", "b", ".", "read", "(", ")", ".", "decode", "(", "encoding", ")", "soup", "=", "bs4", ".", "BeautifulSoup", "(", "text", ",", "'xml'", ")", "for", "o", "in", "soup", ".", "find_all", "(", "'Override'", ")", ":", "name", "=", "o", ".", "attrs", ".", "get", "(", "'PartName'", ")", "for", "k", ",", "v", "in", "MIMEMAP", ".", "items", "(", ")", ":", "if", "name", ".", "startswith", "(", "'/{}/'", ".", "format", "(", "k", ")", ")", ":", "self", ".", "type", "=", "v", "break", "if", "self", ".", "type", ":", "break", "self", ".", "filepattern", "=", "DOC_PARAMS", "[", "self", ".", "type", "]", "[", "'filepattern'", "]", "self", ".", "namespaces", "=", "DOC_PARAMS", "[", "self", ".", "type", "]", "[", "'namespaces'", "]", "self", ".", "captures", "=", "sv", ".", "compile", "(", "DOC_PARAMS", "[", "self", ".", "type", "]", "[", "'captures'", "]", ",", "DOC_PARAMS", "[", "self", ".", "type", "]", "[", "'namespaces'", "]", ")" ]
Determine file type.
[ "Determine", "file", "type", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/ooxml.py#L93-L114
facelessuser/pyspelling
pyspelling/filters/ooxml.py
OoxmlFilter.soft_break
def soft_break(self, el, text): """Apply soft break.""" # Break word documents by paragraphs. if self.type == 'docx' and el.namespace == self.namespaces['w'] and el.name == 'p': text.append('\n') # Break slides by paragraphs. if self.type == 'pptx' and el.namespace == self.namespaces['a'] and el.name == 'p': text.append('\n')
python
def soft_break(self, el, text): """Apply soft break.""" # Break word documents by paragraphs. if self.type == 'docx' and el.namespace == self.namespaces['w'] and el.name == 'p': text.append('\n') # Break slides by paragraphs. if self.type == 'pptx' and el.namespace == self.namespaces['a'] and el.name == 'p': text.append('\n')
[ "def", "soft_break", "(", "self", ",", "el", ",", "text", ")", ":", "# Break word documents by paragraphs.", "if", "self", ".", "type", "==", "'docx'", "and", "el", ".", "namespace", "==", "self", ".", "namespaces", "[", "'w'", "]", "and", "el", ".", "name", "==", "'p'", ":", "text", ".", "append", "(", "'\\n'", ")", "# Break slides by paragraphs.", "if", "self", ".", "type", "==", "'pptx'", "and", "el", ".", "namespace", "==", "self", ".", "namespaces", "[", "'a'", "]", "and", "el", ".", "name", "==", "'p'", ":", "text", ".", "append", "(", "'\\n'", ")" ]
Apply soft break.
[ "Apply", "soft", "break", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/ooxml.py#L116-L124
facelessuser/pyspelling
pyspelling/filters/ooxml.py
OoxmlFilter.get_context
def get_context(self, filename): """Get context.""" if self.type == 'pptx': context = '{}: '.format(RE_SLIDE.search(filename).group(1)) elif self.type == 'docx': context = '{}: '.format(RE_DOCS.match(filename).group(1)) else: context = '' return context
python
def get_context(self, filename): """Get context.""" if self.type == 'pptx': context = '{}: '.format(RE_SLIDE.search(filename).group(1)) elif self.type == 'docx': context = '{}: '.format(RE_DOCS.match(filename).group(1)) else: context = '' return context
[ "def", "get_context", "(", "self", ",", "filename", ")", ":", "if", "self", ".", "type", "==", "'pptx'", ":", "context", "=", "'{}: '", ".", "format", "(", "RE_SLIDE", ".", "search", "(", "filename", ")", ".", "group", "(", "1", ")", ")", "elif", "self", ".", "type", "==", "'docx'", ":", "context", "=", "'{}: '", ".", "format", "(", "RE_DOCS", ".", "match", "(", "filename", ")", ".", "group", "(", "1", ")", ")", "else", ":", "context", "=", "''", "return", "context" ]
Get context.
[ "Get", "context", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/ooxml.py#L132-L141
facelessuser/pyspelling
pyspelling/filters/ooxml.py
OoxmlFilter.filter
def filter(self, source_file, encoding): # noqa A001 """Parse XML file.""" sources = [] for content, filename, enc in self.get_content(source_file): self.additional_context = self.get_context(filename) sources.extend(self._filter(content, source_file, enc)) return sources
python
def filter(self, source_file, encoding): # noqa A001 """Parse XML file.""" sources = [] for content, filename, enc in self.get_content(source_file): self.additional_context = self.get_context(filename) sources.extend(self._filter(content, source_file, enc)) return sources
[ "def", "filter", "(", "self", ",", "source_file", ",", "encoding", ")", ":", "# noqa A001", "sources", "=", "[", "]", "for", "content", ",", "filename", ",", "enc", "in", "self", ".", "get_content", "(", "source_file", ")", ":", "self", ".", "additional_context", "=", "self", ".", "get_context", "(", "filename", ")", "sources", ".", "extend", "(", "self", ".", "_filter", "(", "content", ",", "source_file", ",", "enc", ")", ")", "return", "sources" ]
Parse XML file.
[ "Parse", "XML", "file", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/ooxml.py#L148-L155
facelessuser/pyspelling
pyspelling/filters/ooxml.py
OoxmlFilter.sfilter
def sfilter(self, source): """Filter.""" sources = [] for content, filename, enc in self.get_content(io.BytesIO(source.text.encode(source.encoding))): self.additional_context = self.get_context(filename) sources.extend(self._filter(content, source.context, enc)) return sources
python
def sfilter(self, source): """Filter.""" sources = [] for content, filename, enc in self.get_content(io.BytesIO(source.text.encode(source.encoding))): self.additional_context = self.get_context(filename) sources.extend(self._filter(content, source.context, enc)) return sources
[ "def", "sfilter", "(", "self", ",", "source", ")", ":", "sources", "=", "[", "]", "for", "content", ",", "filename", ",", "enc", "in", "self", ".", "get_content", "(", "io", ".", "BytesIO", "(", "source", ".", "text", ".", "encode", "(", "source", ".", "encoding", ")", ")", ")", ":", "self", ".", "additional_context", "=", "self", ".", "get_context", "(", "filename", ")", "sources", ".", "extend", "(", "self", ".", "_filter", "(", "content", ",", "source", ".", "context", ",", "enc", ")", ")", "return", "sources" ]
Filter.
[ "Filter", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/ooxml.py#L157-L164
mrcagney/make_gtfs
make_gtfs/protofeed.py
read_protofeed
def read_protofeed(path): """ Read the data files at the given directory path (string or Path object) and build a ProtoFeed from them. Validate the resulting ProtoFeed. If invalid, raise a ``ValueError`` specifying the errors. Otherwise, return the resulting ProtoFeed. The data files needed to build a ProtoFeed are - ``frequencies.csv``: (required) A CSV file containing route frequency information. The CSV file contains the columns - ``route_short_name``: (required) String. A unique short name for the route, e.g. '51X' - ``route_long_name``: (required) String. Full name of the route that is more descriptive than ``route_short_name`` - ``route_type``: (required) Integer. The `GTFS type of the route <https://developers.google.com/transit/gtfs/reference/#routestxt>`_ - ``service_window_id`` (required): String. A service window ID for the route taken from the file ``service_windows.csv`` - ``direction``: (required) Integer 0, 1, or 2. Indicates whether the route travels in GTFS direction 0, GTFS direction 1, or in both directions. In the latter case, trips will be created that travel in both directions along the route's path, each direction operating at the given frequency. Otherwise, trips will be created that travel in only the given direction. - ``frequency`` (required): Integer. The frequency of the route during the service window in vehicles per hour. - ``speed``: (optional) Float. The speed of the route in kilometers per hour - ``shape_id``: (required) String. A shape ID that is listed in ``shapes.geojson`` and corresponds to the linestring of the (route, direction, service window) tuple. - ``meta.csv``: (required) A CSV file containing network metadata. The CSV file contains the columns - ``agency_name``: (required) String. The name of the transport agency - ``agency_url``: (required) String. A fully qualified URL for the transport agency - ``agency_timezone``: (required) String. Timezone where the transit agency is located. Timezone names never contain the space character but may contain an underscore. Refer to `http://en.wikipedia.org/wiki/List_of_tz_zones <http://en.wikipedia.org/wiki/List_of_tz_zones>`_ for a list of valid values - ``start_date``, ``end_date`` (required): Strings. The start and end dates for which all this network information is valid formated as YYYYMMDD strings - ``default_route_speed``: (required) Float. Default speed in kilometers per hour to assign to routes with no ``speed`` entry in the file ``routes.csv`` - ``service_windows.csv``: (required) A CSV file containing service window information. A *service window* is a time interval and a set of days of the week during which all routes have constant service frequency, e.g. Saturday and Sunday 07:00 to 09:00. The CSV file contains the columns - ``service_window_id``: (required) String. A unique identifier for a service window - ``start_time``, ``end_time``: (required) Strings. The start and end times of the service window in HH:MM:SS format where the hour is less than 24 - ``monday``, ``tuesday``, ``wednesday``, ``thursday``, ``friday``, ``saturday``, ``sunday`` (required): Integer 0 or 1. Indicates whether the service is active on the given day (1) or not (0) - ``shapes.geojson``: (required) A GeoJSON file containing route shapes. The file consists of one feature collection of LineString features, where each feature's properties contains at least the attribute ``shape_id``, which links the route's shape to the route's information in ``routes.csv``. - ``stops.csv``: (optional) A CSV file containing all the required and optional fields of ``stops.txt`` in `the GTFS <https://developers.google.com/transit/gtfs/reference/#stopstxt>`_ """ path = Path(path) service_windows = pd.read_csv( path/'service_windows.csv') meta = pd.read_csv(path/'meta.csv', dtype={'start_date': str, 'end_date': str}) shapes = gpd.read_file(str(path/'shapes.geojson'), driver='GeoJSON') if (path/'stops.csv').exists(): stops = ( pd.read_csv(path/'stops.csv', dtype={ 'stop_id': str, 'stop_code': str, 'zone_id': str, 'location_type': int, 'parent_station': str, 'stop_timezone': str, 'wheelchair_boarding': int, }) .drop_duplicates(subset=['stop_lon', 'stop_lat']) .dropna(subset=['stop_lon', 'stop_lat'], how='any') ) else: stops = None frequencies = pd.read_csv(path/'frequencies.csv', dtype={ 'route_short_name': str, 'service_window_id': str, 'shape_id': str, 'direction': int, 'frequency': int, }) pfeed = ProtoFeed(frequencies, meta, service_windows, shapes, stops) # Validate v = vd.validate(pfeed) if 'error' in v.type.values: raise ValueError( "Invalid ProtoFeed files:\n\n" + v.to_string(justify='left')) return pfeed
python
def read_protofeed(path): """ Read the data files at the given directory path (string or Path object) and build a ProtoFeed from them. Validate the resulting ProtoFeed. If invalid, raise a ``ValueError`` specifying the errors. Otherwise, return the resulting ProtoFeed. The data files needed to build a ProtoFeed are - ``frequencies.csv``: (required) A CSV file containing route frequency information. The CSV file contains the columns - ``route_short_name``: (required) String. A unique short name for the route, e.g. '51X' - ``route_long_name``: (required) String. Full name of the route that is more descriptive than ``route_short_name`` - ``route_type``: (required) Integer. The `GTFS type of the route <https://developers.google.com/transit/gtfs/reference/#routestxt>`_ - ``service_window_id`` (required): String. A service window ID for the route taken from the file ``service_windows.csv`` - ``direction``: (required) Integer 0, 1, or 2. Indicates whether the route travels in GTFS direction 0, GTFS direction 1, or in both directions. In the latter case, trips will be created that travel in both directions along the route's path, each direction operating at the given frequency. Otherwise, trips will be created that travel in only the given direction. - ``frequency`` (required): Integer. The frequency of the route during the service window in vehicles per hour. - ``speed``: (optional) Float. The speed of the route in kilometers per hour - ``shape_id``: (required) String. A shape ID that is listed in ``shapes.geojson`` and corresponds to the linestring of the (route, direction, service window) tuple. - ``meta.csv``: (required) A CSV file containing network metadata. The CSV file contains the columns - ``agency_name``: (required) String. The name of the transport agency - ``agency_url``: (required) String. A fully qualified URL for the transport agency - ``agency_timezone``: (required) String. Timezone where the transit agency is located. Timezone names never contain the space character but may contain an underscore. Refer to `http://en.wikipedia.org/wiki/List_of_tz_zones <http://en.wikipedia.org/wiki/List_of_tz_zones>`_ for a list of valid values - ``start_date``, ``end_date`` (required): Strings. The start and end dates for which all this network information is valid formated as YYYYMMDD strings - ``default_route_speed``: (required) Float. Default speed in kilometers per hour to assign to routes with no ``speed`` entry in the file ``routes.csv`` - ``service_windows.csv``: (required) A CSV file containing service window information. A *service window* is a time interval and a set of days of the week during which all routes have constant service frequency, e.g. Saturday and Sunday 07:00 to 09:00. The CSV file contains the columns - ``service_window_id``: (required) String. A unique identifier for a service window - ``start_time``, ``end_time``: (required) Strings. The start and end times of the service window in HH:MM:SS format where the hour is less than 24 - ``monday``, ``tuesday``, ``wednesday``, ``thursday``, ``friday``, ``saturday``, ``sunday`` (required): Integer 0 or 1. Indicates whether the service is active on the given day (1) or not (0) - ``shapes.geojson``: (required) A GeoJSON file containing route shapes. The file consists of one feature collection of LineString features, where each feature's properties contains at least the attribute ``shape_id``, which links the route's shape to the route's information in ``routes.csv``. - ``stops.csv``: (optional) A CSV file containing all the required and optional fields of ``stops.txt`` in `the GTFS <https://developers.google.com/transit/gtfs/reference/#stopstxt>`_ """ path = Path(path) service_windows = pd.read_csv( path/'service_windows.csv') meta = pd.read_csv(path/'meta.csv', dtype={'start_date': str, 'end_date': str}) shapes = gpd.read_file(str(path/'shapes.geojson'), driver='GeoJSON') if (path/'stops.csv').exists(): stops = ( pd.read_csv(path/'stops.csv', dtype={ 'stop_id': str, 'stop_code': str, 'zone_id': str, 'location_type': int, 'parent_station': str, 'stop_timezone': str, 'wheelchair_boarding': int, }) .drop_duplicates(subset=['stop_lon', 'stop_lat']) .dropna(subset=['stop_lon', 'stop_lat'], how='any') ) else: stops = None frequencies = pd.read_csv(path/'frequencies.csv', dtype={ 'route_short_name': str, 'service_window_id': str, 'shape_id': str, 'direction': int, 'frequency': int, }) pfeed = ProtoFeed(frequencies, meta, service_windows, shapes, stops) # Validate v = vd.validate(pfeed) if 'error' in v.type.values: raise ValueError( "Invalid ProtoFeed files:\n\n" + v.to_string(justify='left')) return pfeed
[ "def", "read_protofeed", "(", "path", ")", ":", "path", "=", "Path", "(", "path", ")", "service_windows", "=", "pd", ".", "read_csv", "(", "path", "/", "'service_windows.csv'", ")", "meta", "=", "pd", ".", "read_csv", "(", "path", "/", "'meta.csv'", ",", "dtype", "=", "{", "'start_date'", ":", "str", ",", "'end_date'", ":", "str", "}", ")", "shapes", "=", "gpd", ".", "read_file", "(", "str", "(", "path", "/", "'shapes.geojson'", ")", ",", "driver", "=", "'GeoJSON'", ")", "if", "(", "path", "/", "'stops.csv'", ")", ".", "exists", "(", ")", ":", "stops", "=", "(", "pd", ".", "read_csv", "(", "path", "/", "'stops.csv'", ",", "dtype", "=", "{", "'stop_id'", ":", "str", ",", "'stop_code'", ":", "str", ",", "'zone_id'", ":", "str", ",", "'location_type'", ":", "int", ",", "'parent_station'", ":", "str", ",", "'stop_timezone'", ":", "str", ",", "'wheelchair_boarding'", ":", "int", ",", "}", ")", ".", "drop_duplicates", "(", "subset", "=", "[", "'stop_lon'", ",", "'stop_lat'", "]", ")", ".", "dropna", "(", "subset", "=", "[", "'stop_lon'", ",", "'stop_lat'", "]", ",", "how", "=", "'any'", ")", ")", "else", ":", "stops", "=", "None", "frequencies", "=", "pd", ".", "read_csv", "(", "path", "/", "'frequencies.csv'", ",", "dtype", "=", "{", "'route_short_name'", ":", "str", ",", "'service_window_id'", ":", "str", ",", "'shape_id'", ":", "str", ",", "'direction'", ":", "int", ",", "'frequency'", ":", "int", ",", "}", ")", "pfeed", "=", "ProtoFeed", "(", "frequencies", ",", "meta", ",", "service_windows", ",", "shapes", ",", "stops", ")", "# Validate", "v", "=", "vd", ".", "validate", "(", "pfeed", ")", "if", "'error'", "in", "v", ".", "type", ".", "values", ":", "raise", "ValueError", "(", "\"Invalid ProtoFeed files:\\n\\n\"", "+", "v", ".", "to_string", "(", "justify", "=", "'left'", ")", ")", "return", "pfeed" ]
Read the data files at the given directory path (string or Path object) and build a ProtoFeed from them. Validate the resulting ProtoFeed. If invalid, raise a ``ValueError`` specifying the errors. Otherwise, return the resulting ProtoFeed. The data files needed to build a ProtoFeed are - ``frequencies.csv``: (required) A CSV file containing route frequency information. The CSV file contains the columns - ``route_short_name``: (required) String. A unique short name for the route, e.g. '51X' - ``route_long_name``: (required) String. Full name of the route that is more descriptive than ``route_short_name`` - ``route_type``: (required) Integer. The `GTFS type of the route <https://developers.google.com/transit/gtfs/reference/#routestxt>`_ - ``service_window_id`` (required): String. A service window ID for the route taken from the file ``service_windows.csv`` - ``direction``: (required) Integer 0, 1, or 2. Indicates whether the route travels in GTFS direction 0, GTFS direction 1, or in both directions. In the latter case, trips will be created that travel in both directions along the route's path, each direction operating at the given frequency. Otherwise, trips will be created that travel in only the given direction. - ``frequency`` (required): Integer. The frequency of the route during the service window in vehicles per hour. - ``speed``: (optional) Float. The speed of the route in kilometers per hour - ``shape_id``: (required) String. A shape ID that is listed in ``shapes.geojson`` and corresponds to the linestring of the (route, direction, service window) tuple. - ``meta.csv``: (required) A CSV file containing network metadata. The CSV file contains the columns - ``agency_name``: (required) String. The name of the transport agency - ``agency_url``: (required) String. A fully qualified URL for the transport agency - ``agency_timezone``: (required) String. Timezone where the transit agency is located. Timezone names never contain the space character but may contain an underscore. Refer to `http://en.wikipedia.org/wiki/List_of_tz_zones <http://en.wikipedia.org/wiki/List_of_tz_zones>`_ for a list of valid values - ``start_date``, ``end_date`` (required): Strings. The start and end dates for which all this network information is valid formated as YYYYMMDD strings - ``default_route_speed``: (required) Float. Default speed in kilometers per hour to assign to routes with no ``speed`` entry in the file ``routes.csv`` - ``service_windows.csv``: (required) A CSV file containing service window information. A *service window* is a time interval and a set of days of the week during which all routes have constant service frequency, e.g. Saturday and Sunday 07:00 to 09:00. The CSV file contains the columns - ``service_window_id``: (required) String. A unique identifier for a service window - ``start_time``, ``end_time``: (required) Strings. The start and end times of the service window in HH:MM:SS format where the hour is less than 24 - ``monday``, ``tuesday``, ``wednesday``, ``thursday``, ``friday``, ``saturday``, ``sunday`` (required): Integer 0 or 1. Indicates whether the service is active on the given day (1) or not (0) - ``shapes.geojson``: (required) A GeoJSON file containing route shapes. The file consists of one feature collection of LineString features, where each feature's properties contains at least the attribute ``shape_id``, which links the route's shape to the route's information in ``routes.csv``. - ``stops.csv``: (optional) A CSV file containing all the required and optional fields of ``stops.txt`` in `the GTFS <https://developers.google.com/transit/gtfs/reference/#stopstxt>`_
[ "Read", "the", "data", "files", "at", "the", "given", "directory", "path", "(", "string", "or", "Path", "object", ")", "and", "build", "a", "ProtoFeed", "from", "them", ".", "Validate", "the", "resulting", "ProtoFeed", ".", "If", "invalid", "raise", "a", "ValueError", "specifying", "the", "errors", ".", "Otherwise", "return", "the", "resulting", "ProtoFeed", "." ]
train
https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/protofeed.py#L88-L213
mrcagney/make_gtfs
make_gtfs/protofeed.py
ProtoFeed.copy
def copy(self): """ Return a copy of this ProtoFeed, that is, a feed with all the same attributes. """ other = ProtoFeed() for key in cs.PROTOFEED_ATTRS: value = getattr(self, key) if isinstance(value, pd.DataFrame): # Pandas copy DataFrame value = value.copy() setattr(other, key, value) return other
python
def copy(self): """ Return a copy of this ProtoFeed, that is, a feed with all the same attributes. """ other = ProtoFeed() for key in cs.PROTOFEED_ATTRS: value = getattr(self, key) if isinstance(value, pd.DataFrame): # Pandas copy DataFrame value = value.copy() setattr(other, key, value) return other
[ "def", "copy", "(", "self", ")", ":", "other", "=", "ProtoFeed", "(", ")", "for", "key", "in", "cs", ".", "PROTOFEED_ATTRS", ":", "value", "=", "getattr", "(", "self", ",", "key", ")", "if", "isinstance", "(", "value", ",", "pd", ".", "DataFrame", ")", ":", "# Pandas copy DataFrame", "value", "=", "value", ".", "copy", "(", ")", "setattr", "(", "other", ",", "key", ",", "value", ")", "return", "other" ]
Return a copy of this ProtoFeed, that is, a feed with all the same attributes.
[ "Return", "a", "copy", "of", "this", "ProtoFeed", "that", "is", "a", "feed", "with", "all", "the", "same", "attributes", "." ]
train
https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/protofeed.py#L73-L86
dougn/jsontree
jsontree.py
mapped_jsontree_class
def mapped_jsontree_class(mapping): """Return a class which is a jsontree, but with a supplied attribute name mapping. The mapping argument can be a mapping object (dict, jsontree, etc.) or it can be a callable which takes a single argument (the attribute name), and returns a new name. This is useful in situations where you have a jsontree with keys that are not valid python attribute names, to simplify communication with a client library, or allow for configurable names. For example: >>> numjt = mapped_jsontree_class(dict(one='1', two='2', three='3')) >>> number = numjt() >>> number.one = 'something' >>> number defaultdict(<class 'jsontree.mapped_jsontree'>, {'1': 'something'}) This is very useful for abstracting field names that may change between a development sandbox and production environment. Both FogBugz and Jira bug trackers have custom fields with dynamically generated values. These field names can be abstracted out into a configruation mapping, and the jsontree code can be standardized. This can also be iseful for JavaScript API's (PHPCake) which insist on having spaces in some key names. A function can be supplied which maps all '_'s in the attribute name to spaces: >>> spacify = lambda name: name.replace('_', ' ') >>> spacemapped = mapped_jsontree_class(spacify) >>> sm = spacemapped() >>> sm.hello_there = 5 >>> sm.hello_there 5 >>> sm.keys() ['hello there'] This will also work with non-string keys for translating from libraries that use object keys in python over to string versions of the keys in JSON >>> numjt = mapped_jsontree_class(dict(one=1, two=2)) >>> number = numjt() >>> number.one = 'something' >>> number defaultdict(<class 'jsontree.mapped_jsontree'>, {1: 'something'}) >>> numjt_as_text = mapped_jsontree_class(dict(one='1', two='2')) >>> dumped_number = dumps(number) >>> loaded_number = loads(dumped_number, jsontreecls=numjt_as_text) >>> loaded_number.one 'something' >>> loaded_number defaultdict(<class 'jsontree.mapped_jsontree'>, {'1': 'something'}) """ mapper = mapping if not callable(mapping): if not isinstance(mapping, collections.Mapping): raise TypeError("Argument mapping is not collable or an instance " "of collections.Mapping") mapper = lambda name: mapping.get(name, name) class mapped_jsontree(collections.defaultdict): def __init__(self, *args, **kwdargs): super(mapped_jsontree, self).__init__(mapped_jsontree, *args, **kwdargs) def __getattribute__(self, name): mapped_name = mapper(name) if not isinstance(mapped_name, basestring): return self[mapped_name] try: return object.__getattribute__(self, mapped_name) except AttributeError: return self[mapped_name] def __setattr__(self, name, value): mapped_name = mapper(name) self[mapped_name] = value return value return mapped_jsontree
python
def mapped_jsontree_class(mapping): """Return a class which is a jsontree, but with a supplied attribute name mapping. The mapping argument can be a mapping object (dict, jsontree, etc.) or it can be a callable which takes a single argument (the attribute name), and returns a new name. This is useful in situations where you have a jsontree with keys that are not valid python attribute names, to simplify communication with a client library, or allow for configurable names. For example: >>> numjt = mapped_jsontree_class(dict(one='1', two='2', three='3')) >>> number = numjt() >>> number.one = 'something' >>> number defaultdict(<class 'jsontree.mapped_jsontree'>, {'1': 'something'}) This is very useful for abstracting field names that may change between a development sandbox and production environment. Both FogBugz and Jira bug trackers have custom fields with dynamically generated values. These field names can be abstracted out into a configruation mapping, and the jsontree code can be standardized. This can also be iseful for JavaScript API's (PHPCake) which insist on having spaces in some key names. A function can be supplied which maps all '_'s in the attribute name to spaces: >>> spacify = lambda name: name.replace('_', ' ') >>> spacemapped = mapped_jsontree_class(spacify) >>> sm = spacemapped() >>> sm.hello_there = 5 >>> sm.hello_there 5 >>> sm.keys() ['hello there'] This will also work with non-string keys for translating from libraries that use object keys in python over to string versions of the keys in JSON >>> numjt = mapped_jsontree_class(dict(one=1, two=2)) >>> number = numjt() >>> number.one = 'something' >>> number defaultdict(<class 'jsontree.mapped_jsontree'>, {1: 'something'}) >>> numjt_as_text = mapped_jsontree_class(dict(one='1', two='2')) >>> dumped_number = dumps(number) >>> loaded_number = loads(dumped_number, jsontreecls=numjt_as_text) >>> loaded_number.one 'something' >>> loaded_number defaultdict(<class 'jsontree.mapped_jsontree'>, {'1': 'something'}) """ mapper = mapping if not callable(mapping): if not isinstance(mapping, collections.Mapping): raise TypeError("Argument mapping is not collable or an instance " "of collections.Mapping") mapper = lambda name: mapping.get(name, name) class mapped_jsontree(collections.defaultdict): def __init__(self, *args, **kwdargs): super(mapped_jsontree, self).__init__(mapped_jsontree, *args, **kwdargs) def __getattribute__(self, name): mapped_name = mapper(name) if not isinstance(mapped_name, basestring): return self[mapped_name] try: return object.__getattribute__(self, mapped_name) except AttributeError: return self[mapped_name] def __setattr__(self, name, value): mapped_name = mapper(name) self[mapped_name] = value return value return mapped_jsontree
[ "def", "mapped_jsontree_class", "(", "mapping", ")", ":", "mapper", "=", "mapping", "if", "not", "callable", "(", "mapping", ")", ":", "if", "not", "isinstance", "(", "mapping", ",", "collections", ".", "Mapping", ")", ":", "raise", "TypeError", "(", "\"Argument mapping is not collable or an instance \"", "\"of collections.Mapping\"", ")", "mapper", "=", "lambda", "name", ":", "mapping", ".", "get", "(", "name", ",", "name", ")", "class", "mapped_jsontree", "(", "collections", ".", "defaultdict", ")", ":", "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwdargs", ")", ":", "super", "(", "mapped_jsontree", ",", "self", ")", ".", "__init__", "(", "mapped_jsontree", ",", "*", "args", ",", "*", "*", "kwdargs", ")", "def", "__getattribute__", "(", "self", ",", "name", ")", ":", "mapped_name", "=", "mapper", "(", "name", ")", "if", "not", "isinstance", "(", "mapped_name", ",", "basestring", ")", ":", "return", "self", "[", "mapped_name", "]", "try", ":", "return", "object", ".", "__getattribute__", "(", "self", ",", "mapped_name", ")", "except", "AttributeError", ":", "return", "self", "[", "mapped_name", "]", "def", "__setattr__", "(", "self", ",", "name", ",", "value", ")", ":", "mapped_name", "=", "mapper", "(", "name", ")", "self", "[", "mapped_name", "]", "=", "value", "return", "value", "return", "mapped_jsontree" ]
Return a class which is a jsontree, but with a supplied attribute name mapping. The mapping argument can be a mapping object (dict, jsontree, etc.) or it can be a callable which takes a single argument (the attribute name), and returns a new name. This is useful in situations where you have a jsontree with keys that are not valid python attribute names, to simplify communication with a client library, or allow for configurable names. For example: >>> numjt = mapped_jsontree_class(dict(one='1', two='2', three='3')) >>> number = numjt() >>> number.one = 'something' >>> number defaultdict(<class 'jsontree.mapped_jsontree'>, {'1': 'something'}) This is very useful for abstracting field names that may change between a development sandbox and production environment. Both FogBugz and Jira bug trackers have custom fields with dynamically generated values. These field names can be abstracted out into a configruation mapping, and the jsontree code can be standardized. This can also be iseful for JavaScript API's (PHPCake) which insist on having spaces in some key names. A function can be supplied which maps all '_'s in the attribute name to spaces: >>> spacify = lambda name: name.replace('_', ' ') >>> spacemapped = mapped_jsontree_class(spacify) >>> sm = spacemapped() >>> sm.hello_there = 5 >>> sm.hello_there 5 >>> sm.keys() ['hello there'] This will also work with non-string keys for translating from libraries that use object keys in python over to string versions of the keys in JSON >>> numjt = mapped_jsontree_class(dict(one=1, two=2)) >>> number = numjt() >>> number.one = 'something' >>> number defaultdict(<class 'jsontree.mapped_jsontree'>, {1: 'something'}) >>> numjt_as_text = mapped_jsontree_class(dict(one='1', two='2')) >>> dumped_number = dumps(number) >>> loaded_number = loads(dumped_number, jsontreecls=numjt_as_text) >>> loaded_number.one 'something' >>> loaded_number defaultdict(<class 'jsontree.mapped_jsontree'>, {'1': 'something'})
[ "Return", "a", "class", "which", "is", "a", "jsontree", "but", "with", "a", "supplied", "attribute", "name", "mapping", ".", "The", "mapping", "argument", "can", "be", "a", "mapping", "object", "(", "dict", "jsontree", "etc", ".", ")", "or", "it", "can", "be", "a", "callable", "which", "takes", "a", "single", "argument", "(", "the", "attribute", "name", ")", "and", "returns", "a", "new", "name", ".", "This", "is", "useful", "in", "situations", "where", "you", "have", "a", "jsontree", "with", "keys", "that", "are", "not", "valid", "python", "attribute", "names", "to", "simplify", "communication", "with", "a", "client", "library", "or", "allow", "for", "configurable", "names", ".", "For", "example", ":", ">>>", "numjt", "=", "mapped_jsontree_class", "(", "dict", "(", "one", "=", "1", "two", "=", "2", "three", "=", "3", "))", ">>>", "number", "=", "numjt", "()", ">>>", "number", ".", "one", "=", "something", ">>>", "number", "defaultdict", "(", "<class", "jsontree", ".", "mapped_jsontree", ">", "{", "1", ":", "something", "}", ")", "This", "is", "very", "useful", "for", "abstracting", "field", "names", "that", "may", "change", "between", "a", "development", "sandbox", "and", "production", "environment", ".", "Both", "FogBugz", "and", "Jira", "bug", "trackers", "have", "custom", "fields", "with", "dynamically", "generated", "values", ".", "These", "field", "names", "can", "be", "abstracted", "out", "into", "a", "configruation", "mapping", "and", "the", "jsontree", "code", "can", "be", "standardized", ".", "This", "can", "also", "be", "iseful", "for", "JavaScript", "API", "s", "(", "PHPCake", ")", "which", "insist", "on", "having", "spaces", "in", "some", "key", "names", ".", "A", "function", "can", "be", "supplied", "which", "maps", "all", "_", "s", "in", "the", "attribute", "name", "to", "spaces", ":", ">>>", "spacify", "=", "lambda", "name", ":", "name", ".", "replace", "(", "_", ")", ">>>", "spacemapped", "=", "mapped_jsontree_class", "(", "spacify", ")", ">>>", "sm", "=", "spacemapped", "()", ">>>", "sm", ".", "hello_there", "=", "5", ">>>", "sm", ".", "hello_there", "5", ">>>", "sm", ".", "keys", "()", "[", "hello", "there", "]", "This", "will", "also", "work", "with", "non", "-", "string", "keys", "for", "translating", "from", "libraries", "that", "use", "object", "keys", "in", "python", "over", "to", "string", "versions", "of", "the", "keys", "in", "JSON", ">>>", "numjt", "=", "mapped_jsontree_class", "(", "dict", "(", "one", "=", "1", "two", "=", "2", "))", ">>>", "number", "=", "numjt", "()", ">>>", "number", ".", "one", "=", "something", ">>>", "number", "defaultdict", "(", "<class", "jsontree", ".", "mapped_jsontree", ">", "{", "1", ":", "something", "}", ")", ">>>", "numjt_as_text", "=", "mapped_jsontree_class", "(", "dict", "(", "one", "=", "1", "two", "=", "2", "))", ">>>", "dumped_number", "=", "dumps", "(", "number", ")", ">>>", "loaded_number", "=", "loads", "(", "dumped_number", "jsontreecls", "=", "numjt_as_text", ")", ">>>", "loaded_number", ".", "one", "something", ">>>", "loaded_number", "defaultdict", "(", "<class", "jsontree", ".", "mapped_jsontree", ">", "{", "1", ":", "something", "}", ")" ]
train
https://github.com/dougn/jsontree/blob/e65ebc220528dfc15acb3813ab77c936c1ffc623/jsontree.py#L98-L174
dougn/jsontree
jsontree.py
clone
def clone(root, jsontreecls=jsontree, datetimeencoder=_datetimeencoder, datetimedecoder=_datetimedecoder): """Clone an object by first searializing out and then loading it back in. """ return json.loads(json.dumps(root, cls=JSONTreeEncoder, datetimeencoder=datetimeencoder), cls=JSONTreeDecoder, jsontreecls=jsontreecls, datetimedecoder=datetimedecoder)
python
def clone(root, jsontreecls=jsontree, datetimeencoder=_datetimeencoder, datetimedecoder=_datetimedecoder): """Clone an object by first searializing out and then loading it back in. """ return json.loads(json.dumps(root, cls=JSONTreeEncoder, datetimeencoder=datetimeencoder), cls=JSONTreeDecoder, jsontreecls=jsontreecls, datetimedecoder=datetimedecoder)
[ "def", "clone", "(", "root", ",", "jsontreecls", "=", "jsontree", ",", "datetimeencoder", "=", "_datetimeencoder", ",", "datetimedecoder", "=", "_datetimedecoder", ")", ":", "return", "json", ".", "loads", "(", "json", ".", "dumps", "(", "root", ",", "cls", "=", "JSONTreeEncoder", ",", "datetimeencoder", "=", "datetimeencoder", ")", ",", "cls", "=", "JSONTreeDecoder", ",", "jsontreecls", "=", "jsontreecls", ",", "datetimedecoder", "=", "datetimedecoder", ")" ]
Clone an object by first searializing out and then loading it back in.
[ "Clone", "an", "object", "by", "first", "searializing", "out", "and", "then", "loading", "it", "back", "in", "." ]
train
https://github.com/dougn/jsontree/blob/e65ebc220528dfc15acb3813ab77c936c1ffc623/jsontree.py#L233-L240
dougn/jsontree
jsontree.py
load
def load(fp, encoding=None, cls=JSONTreeDecoder, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kargs): """JSON load from file function that defaults the loading class to be JSONTreeDecoder """ return json.load(fp, encoding, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kargs)
python
def load(fp, encoding=None, cls=JSONTreeDecoder, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kargs): """JSON load from file function that defaults the loading class to be JSONTreeDecoder """ return json.load(fp, encoding, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kargs)
[ "def", "load", "(", "fp", ",", "encoding", "=", "None", ",", "cls", "=", "JSONTreeDecoder", ",", "object_hook", "=", "None", ",", "parse_float", "=", "None", ",", "parse_int", "=", "None", ",", "parse_constant", "=", "None", ",", "object_pairs_hook", "=", "None", ",", "*", "*", "kargs", ")", ":", "return", "json", ".", "load", "(", "fp", ",", "encoding", ",", "cls", ",", "object_hook", ",", "parse_float", ",", "parse_int", ",", "parse_constant", ",", "object_pairs_hook", ",", "*", "*", "kargs", ")" ]
JSON load from file function that defaults the loading class to be JSONTreeDecoder
[ "JSON", "load", "from", "file", "function", "that", "defaults", "the", "loading", "class", "to", "be", "JSONTreeDecoder" ]
train
https://github.com/dougn/jsontree/blob/e65ebc220528dfc15acb3813ab77c936c1ffc623/jsontree.py#L261-L269
dougn/jsontree
jsontree.py
loads
def loads(s, encoding=None, cls=JSONTreeDecoder, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kargs): """JSON load from string function that defaults the loading class to be JSONTreeDecoder """ return json.loads(s, encoding, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kargs)
python
def loads(s, encoding=None, cls=JSONTreeDecoder, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kargs): """JSON load from string function that defaults the loading class to be JSONTreeDecoder """ return json.loads(s, encoding, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kargs)
[ "def", "loads", "(", "s", ",", "encoding", "=", "None", ",", "cls", "=", "JSONTreeDecoder", ",", "object_hook", "=", "None", ",", "parse_float", "=", "None", ",", "parse_int", "=", "None", ",", "parse_constant", "=", "None", ",", "object_pairs_hook", "=", "None", ",", "*", "*", "kargs", ")", ":", "return", "json", ".", "loads", "(", "s", ",", "encoding", ",", "cls", ",", "object_hook", ",", "parse_float", ",", "parse_int", ",", "parse_constant", ",", "object_pairs_hook", ",", "*", "*", "kargs", ")" ]
JSON load from string function that defaults the loading class to be JSONTreeDecoder
[ "JSON", "load", "from", "string", "function", "that", "defaults", "the", "loading", "class", "to", "be", "JSONTreeDecoder" ]
train
https://github.com/dougn/jsontree/blob/e65ebc220528dfc15acb3813ab77c936c1ffc623/jsontree.py#L271-L279
facelessuser/pyspelling
pyspelling/filters/python.py
PythonFilter.setup
def setup(self): """Setup.""" self.comments = self.config['comments'] self.docstrings = self.config['docstrings'] self.strings = self.config['strings'] self.group_comments = self.config['group_comments'] self.string_types, self.wild_string_types = self.eval_string_type(self.config['string_types']) self.decode_escapes = self.config['decode_escapes']
python
def setup(self): """Setup.""" self.comments = self.config['comments'] self.docstrings = self.config['docstrings'] self.strings = self.config['strings'] self.group_comments = self.config['group_comments'] self.string_types, self.wild_string_types = self.eval_string_type(self.config['string_types']) self.decode_escapes = self.config['decode_escapes']
[ "def", "setup", "(", "self", ")", ":", "self", ".", "comments", "=", "self", ".", "config", "[", "'comments'", "]", "self", ".", "docstrings", "=", "self", ".", "config", "[", "'docstrings'", "]", "self", ".", "strings", "=", "self", ".", "config", "[", "'strings'", "]", "self", ".", "group_comments", "=", "self", ".", "config", "[", "'group_comments'", "]", "self", ".", "string_types", ",", "self", ".", "wild_string_types", "=", "self", ".", "eval_string_type", "(", "self", ".", "config", "[", "'string_types'", "]", ")", "self", ".", "decode_escapes", "=", "self", ".", "config", "[", "'decode_escapes'", "]" ]
Setup.
[ "Setup", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/python.py#L115-L123
facelessuser/pyspelling
pyspelling/filters/python.py
PythonFilter.validate_options
def validate_options(self, k, v): """Validate options.""" super().validate_options(k, v) if k == 'string_types': if RE_VALID_STRING_TYPES.match(v) is None: raise ValueError("{}: '{}' does not define valid string types".format(self.__class__.__name__, v))
python
def validate_options(self, k, v): """Validate options.""" super().validate_options(k, v) if k == 'string_types': if RE_VALID_STRING_TYPES.match(v) is None: raise ValueError("{}: '{}' does not define valid string types".format(self.__class__.__name__, v))
[ "def", "validate_options", "(", "self", ",", "k", ",", "v", ")", ":", "super", "(", ")", ".", "validate_options", "(", "k", ",", "v", ")", "if", "k", "==", "'string_types'", ":", "if", "RE_VALID_STRING_TYPES", ".", "match", "(", "v", ")", "is", "None", ":", "raise", "ValueError", "(", "\"{}: '{}' does not define valid string types\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ",", "v", ")", ")" ]
Validate options.
[ "Validate", "options", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/python.py#L125-L131
facelessuser/pyspelling
pyspelling/filters/python.py
PythonFilter.header_check
def header_check(self, content): """Special Python encoding check.""" encode = None m = RE_PY_ENCODE.match(content) if m: if m.group(1): encode = m.group(1).decode('ascii') elif m.group(2): encode = m.group(2).decode('ascii') if encode is None: encode = 'utf-8' return encode
python
def header_check(self, content): """Special Python encoding check.""" encode = None m = RE_PY_ENCODE.match(content) if m: if m.group(1): encode = m.group(1).decode('ascii') elif m.group(2): encode = m.group(2).decode('ascii') if encode is None: encode = 'utf-8' return encode
[ "def", "header_check", "(", "self", ",", "content", ")", ":", "encode", "=", "None", "m", "=", "RE_PY_ENCODE", ".", "match", "(", "content", ")", "if", "m", ":", "if", "m", ".", "group", "(", "1", ")", ":", "encode", "=", "m", ".", "group", "(", "1", ")", ".", "decode", "(", "'ascii'", ")", "elif", "m", ".", "group", "(", "2", ")", ":", "encode", "=", "m", ".", "group", "(", "2", ")", ".", "decode", "(", "'ascii'", ")", "if", "encode", "is", "None", ":", "encode", "=", "'utf-8'", "return", "encode" ]
Special Python encoding check.
[ "Special", "Python", "encoding", "check", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/python.py#L133-L146
facelessuser/pyspelling
pyspelling/filters/python.py
PythonFilter.eval_string_type
def eval_string_type(self, text, is_string=False): """Evaluate string type.""" stype = set() wstype = set() for m in RE_ITER_STRING_TYPES.finditer(text): value = m.group(0) if value == '*': wstype.add('u') wstype.add('f') wstype.add('r') wstype.add('b') elif value.endswith('*'): wstype.add(value[0].lower()) else: stype.add(value.lower()) if is_string and 'b' not in stype and 'f' not in stype: stype.add('u') return stype, wstype
python
def eval_string_type(self, text, is_string=False): """Evaluate string type.""" stype = set() wstype = set() for m in RE_ITER_STRING_TYPES.finditer(text): value = m.group(0) if value == '*': wstype.add('u') wstype.add('f') wstype.add('r') wstype.add('b') elif value.endswith('*'): wstype.add(value[0].lower()) else: stype.add(value.lower()) if is_string and 'b' not in stype and 'f' not in stype: stype.add('u') return stype, wstype
[ "def", "eval_string_type", "(", "self", ",", "text", ",", "is_string", "=", "False", ")", ":", "stype", "=", "set", "(", ")", "wstype", "=", "set", "(", ")", "for", "m", "in", "RE_ITER_STRING_TYPES", ".", "finditer", "(", "text", ")", ":", "value", "=", "m", ".", "group", "(", "0", ")", "if", "value", "==", "'*'", ":", "wstype", ".", "add", "(", "'u'", ")", "wstype", ".", "add", "(", "'f'", ")", "wstype", ".", "add", "(", "'r'", ")", "wstype", ".", "add", "(", "'b'", ")", "elif", "value", ".", "endswith", "(", "'*'", ")", ":", "wstype", ".", "add", "(", "value", "[", "0", "]", ".", "lower", "(", ")", ")", "else", ":", "stype", ".", "add", "(", "value", ".", "lower", "(", ")", ")", "if", "is_string", "and", "'b'", "not", "in", "stype", "and", "'f'", "not", "in", "stype", ":", "stype", ".", "add", "(", "'u'", ")", "return", "stype", ",", "wstype" ]
Evaluate string type.
[ "Evaluate", "string", "type", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/python.py#L148-L169
facelessuser/pyspelling
pyspelling/filters/python.py
PythonFilter.replace_unicode
def replace_unicode(self, m): """Replace escapes.""" groups = m.groupdict() esc = m.group(0) if groups.get('fesc'): value = m.group(0) elif groups.get('format'): value = ' ' elif groups.get('special'): value = BACK_SLASH_TRANSLATION[esc] elif groups.get('char'): try: value = chr(int(esc[2:], 16)) except Exception: value = esc elif groups.get('oct'): value = chr(int(esc[1:], 8)) elif groups.get('name'): try: value = unicodedata.lookup(esc[3:-1]) except Exception: value = esc return value.replace('\x00', '\n')
python
def replace_unicode(self, m): """Replace escapes.""" groups = m.groupdict() esc = m.group(0) if groups.get('fesc'): value = m.group(0) elif groups.get('format'): value = ' ' elif groups.get('special'): value = BACK_SLASH_TRANSLATION[esc] elif groups.get('char'): try: value = chr(int(esc[2:], 16)) except Exception: value = esc elif groups.get('oct'): value = chr(int(esc[1:], 8)) elif groups.get('name'): try: value = unicodedata.lookup(esc[3:-1]) except Exception: value = esc return value.replace('\x00', '\n')
[ "def", "replace_unicode", "(", "self", ",", "m", ")", ":", "groups", "=", "m", ".", "groupdict", "(", ")", "esc", "=", "m", ".", "group", "(", "0", ")", "if", "groups", ".", "get", "(", "'fesc'", ")", ":", "value", "=", "m", ".", "group", "(", "0", ")", "elif", "groups", ".", "get", "(", "'format'", ")", ":", "value", "=", "' '", "elif", "groups", ".", "get", "(", "'special'", ")", ":", "value", "=", "BACK_SLASH_TRANSLATION", "[", "esc", "]", "elif", "groups", ".", "get", "(", "'char'", ")", ":", "try", ":", "value", "=", "chr", "(", "int", "(", "esc", "[", "2", ":", "]", ",", "16", ")", ")", "except", "Exception", ":", "value", "=", "esc", "elif", "groups", ".", "get", "(", "'oct'", ")", ":", "value", "=", "chr", "(", "int", "(", "esc", "[", "1", ":", "]", ",", "8", ")", ")", "elif", "groups", ".", "get", "(", "'name'", ")", ":", "try", ":", "value", "=", "unicodedata", ".", "lookup", "(", "esc", "[", "3", ":", "-", "1", "]", ")", "except", "Exception", ":", "value", "=", "esc", "return", "value", ".", "replace", "(", "'\\x00'", ",", "'\\n'", ")" ]
Replace escapes.
[ "Replace", "escapes", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/python.py#L181-L204
facelessuser/pyspelling
pyspelling/filters/python.py
PythonFilter.replace_bytes
def replace_bytes(self, m): """Replace escapes.""" esc = m.group(0) value = esc if m.group('special'): value = BACK_SLASH_TRANSLATION[esc] elif m.group('char'): try: value = chr(int(esc[2:], 16)) except Exception: value = esc elif m.group('oct'): value = int(esc[1:], 8) if value > 255: value -= 256 value = chr(value) return value.replace('\x00', '\n')
python
def replace_bytes(self, m): """Replace escapes.""" esc = m.group(0) value = esc if m.group('special'): value = BACK_SLASH_TRANSLATION[esc] elif m.group('char'): try: value = chr(int(esc[2:], 16)) except Exception: value = esc elif m.group('oct'): value = int(esc[1:], 8) if value > 255: value -= 256 value = chr(value) return value.replace('\x00', '\n')
[ "def", "replace_bytes", "(", "self", ",", "m", ")", ":", "esc", "=", "m", ".", "group", "(", "0", ")", "value", "=", "esc", "if", "m", ".", "group", "(", "'special'", ")", ":", "value", "=", "BACK_SLASH_TRANSLATION", "[", "esc", "]", "elif", "m", ".", "group", "(", "'char'", ")", ":", "try", ":", "value", "=", "chr", "(", "int", "(", "esc", "[", "2", ":", "]", ",", "16", ")", ")", "except", "Exception", ":", "value", "=", "esc", "elif", "m", ".", "group", "(", "'oct'", ")", ":", "value", "=", "int", "(", "esc", "[", "1", ":", "]", ",", "8", ")", "if", "value", ">", "255", ":", "value", "-=", "256", "value", "=", "chr", "(", "value", ")", "return", "value", ".", "replace", "(", "'\\x00'", ",", "'\\n'", ")" ]
Replace escapes.
[ "Replace", "escapes", "." ]
train
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/python.py#L206-L223