text
stringlengths 75
104k
| code_tokens
sequence | avg_line_len
float64 7.91
980
| score
float64 0
0.18
|
---|---|---|---|
def add_variable(self, name, fetch_as=None):
"""Add a new variable to the configuration.
name - Complete name of the variable in the form group.name
fetch_as - String representation of the type the variable should be
fetched as (i.e uint8_t, float, FP16, etc)
If no fetch_as type is supplied, then the stored as type will be used
(i.e the type of the fetched variable is the same as it's stored in the
Crazyflie)."""
if fetch_as:
self.variables.append(LogVariable(name, fetch_as))
else:
# We cannot determine the default type until we have connected. So
# save the name and we will add these once we are connected.
self.default_fetch_as.append(name) | [
"def",
"add_variable",
"(",
"self",
",",
"name",
",",
"fetch_as",
"=",
"None",
")",
":",
"if",
"fetch_as",
":",
"self",
".",
"variables",
".",
"append",
"(",
"LogVariable",
"(",
"name",
",",
"fetch_as",
")",
")",
"else",
":",
"# We cannot determine the default type until we have connected. So",
"# save the name and we will add these once we are connected.",
"self",
".",
"default_fetch_as",
".",
"append",
"(",
"name",
")"
] | 47.9375 | 0.002558 |
def read_paraphrase_file(filename):
'''
Reads in a GermaNet wiktionary paraphrase file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
assert doc.getroot().tag == 'wiktionaryParaphrases'
paraphrases = []
for child in doc.getroot():
if child.tag == 'wiktionaryParaphrase':
paraphrase = child
warn_attribs('', paraphrase, PARAPHRASE_ATTRIBS)
if 0 < len(paraphrase):
print('unrecognised child of <wiktionaryParaphrase>',
list(paraphrase))
paraphrase_dict = dict(paraphrase.items())
if paraphrase_dict['edited'] not in MAP_YESNO_TO_BOOL:
print('<paraphrase> attribute "edited" has unexpected value',
paraphrase_dict['edited'])
else:
paraphrase_dict['edited'] = MAP_YESNO_TO_BOOL[
paraphrase_dict['edited']]
if not paraphrase_dict['wiktionarySenseId'].isdigit():
print('<paraphrase> attribute "wiktionarySenseId" has '
'non-integer value', paraphrase_dict['edited'])
else:
paraphrase_dict['wiktionarySenseId'] = \
int(paraphrase_dict['wiktionarySenseId'], 10)
paraphrases.append(paraphrase_dict)
else:
print('unknown child of <wiktionaryParaphrases>', child)
return paraphrases | [
"def",
"read_paraphrase_file",
"(",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"input_file",
":",
"doc",
"=",
"etree",
".",
"parse",
"(",
"input_file",
")",
"assert",
"doc",
".",
"getroot",
"(",
")",
".",
"tag",
"==",
"'wiktionaryParaphrases'",
"paraphrases",
"=",
"[",
"]",
"for",
"child",
"in",
"doc",
".",
"getroot",
"(",
")",
":",
"if",
"child",
".",
"tag",
"==",
"'wiktionaryParaphrase'",
":",
"paraphrase",
"=",
"child",
"warn_attribs",
"(",
"''",
",",
"paraphrase",
",",
"PARAPHRASE_ATTRIBS",
")",
"if",
"0",
"<",
"len",
"(",
"paraphrase",
")",
":",
"print",
"(",
"'unrecognised child of <wiktionaryParaphrase>'",
",",
"list",
"(",
"paraphrase",
")",
")",
"paraphrase_dict",
"=",
"dict",
"(",
"paraphrase",
".",
"items",
"(",
")",
")",
"if",
"paraphrase_dict",
"[",
"'edited'",
"]",
"not",
"in",
"MAP_YESNO_TO_BOOL",
":",
"print",
"(",
"'<paraphrase> attribute \"edited\" has unexpected value'",
",",
"paraphrase_dict",
"[",
"'edited'",
"]",
")",
"else",
":",
"paraphrase_dict",
"[",
"'edited'",
"]",
"=",
"MAP_YESNO_TO_BOOL",
"[",
"paraphrase_dict",
"[",
"'edited'",
"]",
"]",
"if",
"not",
"paraphrase_dict",
"[",
"'wiktionarySenseId'",
"]",
".",
"isdigit",
"(",
")",
":",
"print",
"(",
"'<paraphrase> attribute \"wiktionarySenseId\" has '",
"'non-integer value'",
",",
"paraphrase_dict",
"[",
"'edited'",
"]",
")",
"else",
":",
"paraphrase_dict",
"[",
"'wiktionarySenseId'",
"]",
"=",
"int",
"(",
"paraphrase_dict",
"[",
"'wiktionarySenseId'",
"]",
",",
"10",
")",
"paraphrases",
".",
"append",
"(",
"paraphrase_dict",
")",
"else",
":",
"print",
"(",
"'unknown child of <wiktionaryParaphrases>'",
",",
"child",
")",
"return",
"paraphrases"
] | 40.131579 | 0.00064 |
def parse(self, words, S='S'):
"""Parse a list of words; according to the grammar.
Leave results in the chart."""
self.chart = [[] for i in range(len(words)+1)]
self.add_edge([0, 0, 'S_', [], [S]])
for i in range(len(words)):
self.scanner(i, words[i])
return self.chart | [
"def",
"parse",
"(",
"self",
",",
"words",
",",
"S",
"=",
"'S'",
")",
":",
"self",
".",
"chart",
"=",
"[",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"words",
")",
"+",
"1",
")",
"]",
"self",
".",
"add_edge",
"(",
"[",
"0",
",",
"0",
",",
"'S_'",
",",
"[",
"]",
",",
"[",
"S",
"]",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"words",
")",
")",
":",
"self",
".",
"scanner",
"(",
"i",
",",
"words",
"[",
"i",
"]",
")",
"return",
"self",
".",
"chart"
] | 40.25 | 0.006079 |
def on_message(self, ws, message):
"""Websocket on_message event handler
Saves message as RTMMessage in self._inbox
"""
try:
data = json.loads(message)
except Exception:
self._set_error(message, "decode message failed")
else:
self._inbox.put(RTMMessage(data)) | [
"def",
"on_message",
"(",
"self",
",",
"ws",
",",
"message",
")",
":",
"try",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"message",
")",
"except",
"Exception",
":",
"self",
".",
"_set_error",
"(",
"message",
",",
"\"decode message failed\"",
")",
"else",
":",
"self",
".",
"_inbox",
".",
"put",
"(",
"RTMMessage",
"(",
"data",
")",
")"
] | 30.363636 | 0.005814 |
def concatenate(cls, datasets, datatype=None, new_type=None):
"""
Utility function to concatenate an NdMapping of Dataset objects.
"""
from . import Dataset, default_datatype
new_type = new_type or Dataset
if isinstance(datasets, NdMapping):
dimensions = datasets.kdims
keys, datasets = zip(*datasets.data.items())
elif isinstance(datasets, list) and all(not isinstance(v, tuple) for v in datasets):
# Allow concatenating list of datasets (by declaring no dimensions and keys)
dimensions, keys = [], [()]*len(datasets)
else:
raise DataError('Concatenation only supported for NdMappings '
'and lists of Datasets, found %s.' % type(datasets).__name__)
template = datasets[0]
datatype = datatype or template.interface.datatype
# Handle non-general datatypes by casting to general type
if datatype == 'array':
datatype = default_datatype
elif datatype == 'image':
datatype = 'grid'
if len(datasets) > 1 and not dimensions and cls.interfaces[datatype].gridded:
raise DataError('Datasets with %s datatype cannot be concatenated '
'without defining the dimensions to concatenate along. '
'Ensure you pass in a NdMapping (e.g. a HoloMap) '
'of Dataset types, not a list.' % datatype)
datasets = template.interface.cast(datasets, datatype)
template = datasets[0]
data = list(zip(keys, datasets)) if keys else datasets
concat_data = template.interface.concat(data, dimensions, vdims=template.vdims)
return template.clone(concat_data, kdims=dimensions+template.kdims, new_type=new_type) | [
"def",
"concatenate",
"(",
"cls",
",",
"datasets",
",",
"datatype",
"=",
"None",
",",
"new_type",
"=",
"None",
")",
":",
"from",
".",
"import",
"Dataset",
",",
"default_datatype",
"new_type",
"=",
"new_type",
"or",
"Dataset",
"if",
"isinstance",
"(",
"datasets",
",",
"NdMapping",
")",
":",
"dimensions",
"=",
"datasets",
".",
"kdims",
"keys",
",",
"datasets",
"=",
"zip",
"(",
"*",
"datasets",
".",
"data",
".",
"items",
"(",
")",
")",
"elif",
"isinstance",
"(",
"datasets",
",",
"list",
")",
"and",
"all",
"(",
"not",
"isinstance",
"(",
"v",
",",
"tuple",
")",
"for",
"v",
"in",
"datasets",
")",
":",
"# Allow concatenating list of datasets (by declaring no dimensions and keys)",
"dimensions",
",",
"keys",
"=",
"[",
"]",
",",
"[",
"(",
")",
"]",
"*",
"len",
"(",
"datasets",
")",
"else",
":",
"raise",
"DataError",
"(",
"'Concatenation only supported for NdMappings '",
"'and lists of Datasets, found %s.'",
"%",
"type",
"(",
"datasets",
")",
".",
"__name__",
")",
"template",
"=",
"datasets",
"[",
"0",
"]",
"datatype",
"=",
"datatype",
"or",
"template",
".",
"interface",
".",
"datatype",
"# Handle non-general datatypes by casting to general type",
"if",
"datatype",
"==",
"'array'",
":",
"datatype",
"=",
"default_datatype",
"elif",
"datatype",
"==",
"'image'",
":",
"datatype",
"=",
"'grid'",
"if",
"len",
"(",
"datasets",
")",
">",
"1",
"and",
"not",
"dimensions",
"and",
"cls",
".",
"interfaces",
"[",
"datatype",
"]",
".",
"gridded",
":",
"raise",
"DataError",
"(",
"'Datasets with %s datatype cannot be concatenated '",
"'without defining the dimensions to concatenate along. '",
"'Ensure you pass in a NdMapping (e.g. a HoloMap) '",
"'of Dataset types, not a list.'",
"%",
"datatype",
")",
"datasets",
"=",
"template",
".",
"interface",
".",
"cast",
"(",
"datasets",
",",
"datatype",
")",
"template",
"=",
"datasets",
"[",
"0",
"]",
"data",
"=",
"list",
"(",
"zip",
"(",
"keys",
",",
"datasets",
")",
")",
"if",
"keys",
"else",
"datasets",
"concat_data",
"=",
"template",
".",
"interface",
".",
"concat",
"(",
"data",
",",
"dimensions",
",",
"vdims",
"=",
"template",
".",
"vdims",
")",
"return",
"template",
".",
"clone",
"(",
"concat_data",
",",
"kdims",
"=",
"dimensions",
"+",
"template",
".",
"kdims",
",",
"new_type",
"=",
"new_type",
")"
] | 50.111111 | 0.004894 |
def extract(self, html_text: str, strategy: Strategy=Strategy.ALL_TEXT) \
-> List[Extraction]:
"""
Extracts text from an HTML page using a variety of strategies
Args:
html_text (str): html page in string
strategy (enum[Strategy.ALL_TEXT, Strategy.MAIN_CONTENT_RELAXED, Strategy.MAIN_CONTENT_STRICT]): one of
Strategy.ALL_TEXT, Strategy.MAIN_CONTENT_STRICT and Strategy.MAIN_CONTENT_RELAXED
Returns:
List[Extraction]: typically a singleton list with the extracted text
"""
if html_text:
if strategy == Strategy.ALL_TEXT:
soup = BeautifulSoup(html_text, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(self._tag_visible, texts)
all_text = u" ".join(t.strip() for t in visible_texts)
return [Extraction(all_text, self.name)]
else:
relax = strategy == Strategy.MAIN_CONTENT_RELAXED
readable = Document(html_text, recallPriority=relax).summary(html_partial=False)
clean_text = BeautifulSoup(readable.encode('utf-8'), 'lxml').strings
readability_text = ' '.join(clean_text)
return [Extraction(readability_text, self.name)]
else:
return [] | [
"def",
"extract",
"(",
"self",
",",
"html_text",
":",
"str",
",",
"strategy",
":",
"Strategy",
"=",
"Strategy",
".",
"ALL_TEXT",
")",
"->",
"List",
"[",
"Extraction",
"]",
":",
"if",
"html_text",
":",
"if",
"strategy",
"==",
"Strategy",
".",
"ALL_TEXT",
":",
"soup",
"=",
"BeautifulSoup",
"(",
"html_text",
",",
"'html.parser'",
")",
"texts",
"=",
"soup",
".",
"findAll",
"(",
"text",
"=",
"True",
")",
"visible_texts",
"=",
"filter",
"(",
"self",
".",
"_tag_visible",
",",
"texts",
")",
"all_text",
"=",
"u\" \"",
".",
"join",
"(",
"t",
".",
"strip",
"(",
")",
"for",
"t",
"in",
"visible_texts",
")",
"return",
"[",
"Extraction",
"(",
"all_text",
",",
"self",
".",
"name",
")",
"]",
"else",
":",
"relax",
"=",
"strategy",
"==",
"Strategy",
".",
"MAIN_CONTENT_RELAXED",
"readable",
"=",
"Document",
"(",
"html_text",
",",
"recallPriority",
"=",
"relax",
")",
".",
"summary",
"(",
"html_partial",
"=",
"False",
")",
"clean_text",
"=",
"BeautifulSoup",
"(",
"readable",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"'lxml'",
")",
".",
"strings",
"readability_text",
"=",
"' '",
".",
"join",
"(",
"clean_text",
")",
"return",
"[",
"Extraction",
"(",
"readability_text",
",",
"self",
".",
"name",
")",
"]",
"else",
":",
"return",
"[",
"]"
] | 46.275862 | 0.006569 |
def populate_token_attributes(self, response):
"""Add attributes from a token exchange response to self."""
if 'access_token' in response:
self.access_token = response.get('access_token')
if 'refresh_token' in response:
self.refresh_token = response.get('refresh_token')
if 'token_type' in response:
self.token_type = response.get('token_type')
if 'expires_in' in response:
self.expires_in = response.get('expires_in')
self._expires_at = time.time() + int(self.expires_in)
if 'expires_at' in response:
self._expires_at = int(response.get('expires_at'))
if 'mac_key' in response:
self.mac_key = response.get('mac_key')
if 'mac_algorithm' in response:
self.mac_algorithm = response.get('mac_algorithm') | [
"def",
"populate_token_attributes",
"(",
"self",
",",
"response",
")",
":",
"if",
"'access_token'",
"in",
"response",
":",
"self",
".",
"access_token",
"=",
"response",
".",
"get",
"(",
"'access_token'",
")",
"if",
"'refresh_token'",
"in",
"response",
":",
"self",
".",
"refresh_token",
"=",
"response",
".",
"get",
"(",
"'refresh_token'",
")",
"if",
"'token_type'",
"in",
"response",
":",
"self",
".",
"token_type",
"=",
"response",
".",
"get",
"(",
"'token_type'",
")",
"if",
"'expires_in'",
"in",
"response",
":",
"self",
".",
"expires_in",
"=",
"response",
".",
"get",
"(",
"'expires_in'",
")",
"self",
".",
"_expires_at",
"=",
"time",
".",
"time",
"(",
")",
"+",
"int",
"(",
"self",
".",
"expires_in",
")",
"if",
"'expires_at'",
"in",
"response",
":",
"self",
".",
"_expires_at",
"=",
"int",
"(",
"response",
".",
"get",
"(",
"'expires_at'",
")",
")",
"if",
"'mac_key'",
"in",
"response",
":",
"self",
".",
"mac_key",
"=",
"response",
".",
"get",
"(",
"'mac_key'",
")",
"if",
"'mac_algorithm'",
"in",
"response",
":",
"self",
".",
"mac_algorithm",
"=",
"response",
".",
"get",
"(",
"'mac_algorithm'",
")"
] | 35.166667 | 0.002307 |
def do_startInstance(self,args):
"""Start specified instance"""
parser = CommandArgumentParser("startInstance")
parser.add_argument(dest='instance',help='instance index or name');
args = vars(parser.parse_args(args))
instanceId = args['instance']
force = args['force']
try:
index = int(instanceId)
instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances']
instanceId = instances[index]
except ValueError:
pass
client = AwsConnectionFactory.getEc2Client()
client.start_instances(InstanceIds=[instanceId['InstanceId']]) | [
"def",
"do_startInstance",
"(",
"self",
",",
"args",
")",
":",
"parser",
"=",
"CommandArgumentParser",
"(",
"\"startInstance\"",
")",
"parser",
".",
"add_argument",
"(",
"dest",
"=",
"'instance'",
",",
"help",
"=",
"'instance index or name'",
")",
"args",
"=",
"vars",
"(",
"parser",
".",
"parse_args",
"(",
"args",
")",
")",
"instanceId",
"=",
"args",
"[",
"'instance'",
"]",
"force",
"=",
"args",
"[",
"'force'",
"]",
"try",
":",
"index",
"=",
"int",
"(",
"instanceId",
")",
"instances",
"=",
"self",
".",
"scalingGroupDescription",
"[",
"'AutoScalingGroups'",
"]",
"[",
"0",
"]",
"[",
"'Instances'",
"]",
"instanceId",
"=",
"instances",
"[",
"index",
"]",
"except",
"ValueError",
":",
"pass",
"client",
"=",
"AwsConnectionFactory",
".",
"getEc2Client",
"(",
")",
"client",
".",
"start_instances",
"(",
"InstanceIds",
"=",
"[",
"instanceId",
"[",
"'InstanceId'",
"]",
"]",
")"
] | 38.294118 | 0.008996 |
def config():
"""
Load system configuration
@rtype: ConfigParser
"""
cfg = ConfigParser()
cfg.read(os.path.join(os.path.dirname(os.path.realpath(ips_vagrant.__file__)), 'config/ipsv.conf'))
return cfg | [
"def",
"config",
"(",
")",
":",
"cfg",
"=",
"ConfigParser",
"(",
")",
"cfg",
".",
"read",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"ips_vagrant",
".",
"__file__",
")",
")",
",",
"'config/ipsv.conf'",
")",
")",
"return",
"cfg"
] | 27.625 | 0.008772 |
def nvmlDeviceRegisterEvents(handle, eventTypes, eventSet):
r"""
/**
* Starts recording of events on a specified devices and add the events to specified \ref nvmlEventSet_t
*
* For Fermi &tm; or newer fully supported devices.
* Ecc events are available only on ECC enabled devices (see \ref nvmlDeviceGetTotalEccErrors)
* Power capping events are available only on Power Management enabled devices (see \ref nvmlDeviceGetPowerManagementMode)
*
* For Linux only.
*
* \b IMPORTANT: Operations on \a set are not thread safe
*
* This call starts recording of events on specific device.
* All events that occurred before this call are not recorded.
* Checking if some event occurred can be done with \ref nvmlEventSetWait
*
* If function reports NVML_ERROR_UNKNOWN, event set is in undefined state and should be freed.
* If function reports NVML_ERROR_NOT_SUPPORTED, event set can still be used. None of the requested eventTypes
* are registered in that case.
*
* @param device The identifier of the target device
* @param eventTypes Bitmask of \ref nvmlEventType to record
* @param set Set to which add new event types
*
* @return
* - \ref NVML_SUCCESS if the event has been set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a eventTypes is invalid or \a set is NULL
* - \ref NVML_ERROR_NOT_SUPPORTED if the platform does not support this feature or some of requested event types
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*
* @see nvmlEventType
* @see nvmlDeviceGetSupportedEventTypes
* @see nvmlEventSetWait
* @see nvmlEventSetFree
*/
nvmlReturn_t DECLDIR nvmlDeviceRegisterEvents
"""
fn = _nvmlGetFunctionPointer("nvmlDeviceRegisterEvents")
ret = fn(handle, c_ulonglong(eventTypes), eventSet)
_nvmlCheckReturn(ret)
return None | [
"def",
"nvmlDeviceRegisterEvents",
"(",
"handle",
",",
"eventTypes",
",",
"eventSet",
")",
":",
"fn",
"=",
"_nvmlGetFunctionPointer",
"(",
"\"nvmlDeviceRegisterEvents\"",
")",
"ret",
"=",
"fn",
"(",
"handle",
",",
"c_ulonglong",
"(",
"eventTypes",
")",
",",
"eventSet",
")",
"_nvmlCheckReturn",
"(",
"ret",
")",
"return",
"None"
] | 51.454545 | 0.005635 |
def clip_by_extent(layer, extent):
"""Clip a raster using a bounding box using processing.
Issue https://github.com/inasafe/inasafe/issues/3183
:param layer: The layer to clip.
:type layer: QgsRasterLayer
:param extent: The extent.
:type extent: QgsRectangle
:return: Clipped layer.
:rtype: QgsRasterLayer
.. versionadded:: 4.0
"""
parameters = dict()
# noinspection PyBroadException
try:
output_layer_name = quick_clip_steps['output_layer_name']
output_layer_name = output_layer_name % layer.keywords['layer_purpose']
output_raster = unique_filename(suffix='.tif', dir=temp_dir())
# We make one pixel size buffer on the extent to cover every pixels.
# See https://github.com/inasafe/inasafe/issues/3655
pixel_size_x = layer.rasterUnitsPerPixelX()
pixel_size_y = layer.rasterUnitsPerPixelY()
buffer_size = max(pixel_size_x, pixel_size_y)
extent = extent.buffered(buffer_size)
if is_raster_y_inverted(layer):
# The raster is Y inverted. We need to switch Y min and Y max.
bbox = [
str(extent.xMinimum()),
str(extent.xMaximum()),
str(extent.yMaximum()),
str(extent.yMinimum())
]
else:
# The raster is normal.
bbox = [
str(extent.xMinimum()),
str(extent.xMaximum()),
str(extent.yMinimum()),
str(extent.yMaximum())
]
# These values are all from the processing algorithm.
# https://github.com/qgis/QGIS/blob/master/python/plugins/processing/
# algs/gdal/ClipByExtent.py
# Please read the file to know these parameters.
parameters['INPUT'] = layer.source()
parameters['NO_DATA'] = ''
parameters['PROJWIN'] = ','.join(bbox)
parameters['DATA_TYPE'] = 5
parameters['COMPRESS'] = 4
parameters['JPEGCOMPRESSION'] = 75
parameters['ZLEVEL'] = 6
parameters['PREDICTOR'] = 1
parameters['TILED'] = False
parameters['BIGTIFF'] = 0
parameters['TFW'] = False
parameters['EXTRA'] = ''
parameters['OUTPUT'] = output_raster
initialize_processing()
feedback = create_processing_feedback()
context = create_processing_context(feedback=feedback)
result = processing.run(
"gdal:cliprasterbyextent",
parameters,
context=context)
if result is None:
raise ProcessingInstallationError
clipped = QgsRasterLayer(result['OUTPUT'], output_layer_name)
# We transfer keywords to the output.
clipped.keywords = layer.keywords.copy()
clipped.keywords['title'] = output_layer_name
check_layer(clipped)
except Exception as e:
# This step clip_raster_by_extent was nice to speedup the analysis.
# As we got an exception because the layer is invalid, we are not going
# to stop the analysis. We will return the original raster layer.
# It will take more processing time until we clip the vector layer.
# Check https://github.com/inasafe/inasafe/issues/4026 why we got some
# exceptions with this step.
LOGGER.exception(parameters)
LOGGER.exception(
'Error from QGIS clip raster by extent. Please check the QGIS '
'logs too !')
LOGGER.info(
'Even if we got an exception, we are continuing the analysis. The '
'layer was not clipped.')
LOGGER.exception(str(e))
LOGGER.exception(get_error_message(e).to_text())
clipped = layer
return clipped | [
"def",
"clip_by_extent",
"(",
"layer",
",",
"extent",
")",
":",
"parameters",
"=",
"dict",
"(",
")",
"# noinspection PyBroadException",
"try",
":",
"output_layer_name",
"=",
"quick_clip_steps",
"[",
"'output_layer_name'",
"]",
"output_layer_name",
"=",
"output_layer_name",
"%",
"layer",
".",
"keywords",
"[",
"'layer_purpose'",
"]",
"output_raster",
"=",
"unique_filename",
"(",
"suffix",
"=",
"'.tif'",
",",
"dir",
"=",
"temp_dir",
"(",
")",
")",
"# We make one pixel size buffer on the extent to cover every pixels.",
"# See https://github.com/inasafe/inasafe/issues/3655",
"pixel_size_x",
"=",
"layer",
".",
"rasterUnitsPerPixelX",
"(",
")",
"pixel_size_y",
"=",
"layer",
".",
"rasterUnitsPerPixelY",
"(",
")",
"buffer_size",
"=",
"max",
"(",
"pixel_size_x",
",",
"pixel_size_y",
")",
"extent",
"=",
"extent",
".",
"buffered",
"(",
"buffer_size",
")",
"if",
"is_raster_y_inverted",
"(",
"layer",
")",
":",
"# The raster is Y inverted. We need to switch Y min and Y max.",
"bbox",
"=",
"[",
"str",
"(",
"extent",
".",
"xMinimum",
"(",
")",
")",
",",
"str",
"(",
"extent",
".",
"xMaximum",
"(",
")",
")",
",",
"str",
"(",
"extent",
".",
"yMaximum",
"(",
")",
")",
",",
"str",
"(",
"extent",
".",
"yMinimum",
"(",
")",
")",
"]",
"else",
":",
"# The raster is normal.",
"bbox",
"=",
"[",
"str",
"(",
"extent",
".",
"xMinimum",
"(",
")",
")",
",",
"str",
"(",
"extent",
".",
"xMaximum",
"(",
")",
")",
",",
"str",
"(",
"extent",
".",
"yMinimum",
"(",
")",
")",
",",
"str",
"(",
"extent",
".",
"yMaximum",
"(",
")",
")",
"]",
"# These values are all from the processing algorithm.",
"# https://github.com/qgis/QGIS/blob/master/python/plugins/processing/",
"# algs/gdal/ClipByExtent.py",
"# Please read the file to know these parameters.",
"parameters",
"[",
"'INPUT'",
"]",
"=",
"layer",
".",
"source",
"(",
")",
"parameters",
"[",
"'NO_DATA'",
"]",
"=",
"''",
"parameters",
"[",
"'PROJWIN'",
"]",
"=",
"','",
".",
"join",
"(",
"bbox",
")",
"parameters",
"[",
"'DATA_TYPE'",
"]",
"=",
"5",
"parameters",
"[",
"'COMPRESS'",
"]",
"=",
"4",
"parameters",
"[",
"'JPEGCOMPRESSION'",
"]",
"=",
"75",
"parameters",
"[",
"'ZLEVEL'",
"]",
"=",
"6",
"parameters",
"[",
"'PREDICTOR'",
"]",
"=",
"1",
"parameters",
"[",
"'TILED'",
"]",
"=",
"False",
"parameters",
"[",
"'BIGTIFF'",
"]",
"=",
"0",
"parameters",
"[",
"'TFW'",
"]",
"=",
"False",
"parameters",
"[",
"'EXTRA'",
"]",
"=",
"''",
"parameters",
"[",
"'OUTPUT'",
"]",
"=",
"output_raster",
"initialize_processing",
"(",
")",
"feedback",
"=",
"create_processing_feedback",
"(",
")",
"context",
"=",
"create_processing_context",
"(",
"feedback",
"=",
"feedback",
")",
"result",
"=",
"processing",
".",
"run",
"(",
"\"gdal:cliprasterbyextent\"",
",",
"parameters",
",",
"context",
"=",
"context",
")",
"if",
"result",
"is",
"None",
":",
"raise",
"ProcessingInstallationError",
"clipped",
"=",
"QgsRasterLayer",
"(",
"result",
"[",
"'OUTPUT'",
"]",
",",
"output_layer_name",
")",
"# We transfer keywords to the output.",
"clipped",
".",
"keywords",
"=",
"layer",
".",
"keywords",
".",
"copy",
"(",
")",
"clipped",
".",
"keywords",
"[",
"'title'",
"]",
"=",
"output_layer_name",
"check_layer",
"(",
"clipped",
")",
"except",
"Exception",
"as",
"e",
":",
"# This step clip_raster_by_extent was nice to speedup the analysis.",
"# As we got an exception because the layer is invalid, we are not going",
"# to stop the analysis. We will return the original raster layer.",
"# It will take more processing time until we clip the vector layer.",
"# Check https://github.com/inasafe/inasafe/issues/4026 why we got some",
"# exceptions with this step.",
"LOGGER",
".",
"exception",
"(",
"parameters",
")",
"LOGGER",
".",
"exception",
"(",
"'Error from QGIS clip raster by extent. Please check the QGIS '",
"'logs too !'",
")",
"LOGGER",
".",
"info",
"(",
"'Even if we got an exception, we are continuing the analysis. The '",
"'layer was not clipped.'",
")",
"LOGGER",
".",
"exception",
"(",
"str",
"(",
"e",
")",
")",
"LOGGER",
".",
"exception",
"(",
"get_error_message",
"(",
"e",
")",
".",
"to_text",
"(",
")",
")",
"clipped",
"=",
"layer",
"return",
"clipped"
] | 34.904762 | 0.000265 |
def write_template(fn, lang="python"):
"""
Write language-specific script template to file.
Arguments:
- fn(``string``) path to save the template to
- lang('python', 'bash') which programming language
"""
with open(fn, "wb") as fh:
if lang == "python":
fh.write(PY_TEMPLATE)
elif lang == "bash":
fh.write(SH_TEMPLATE) | [
"def",
"write_template",
"(",
"fn",
",",
"lang",
"=",
"\"python\"",
")",
":",
"with",
"open",
"(",
"fn",
",",
"\"wb\"",
")",
"as",
"fh",
":",
"if",
"lang",
"==",
"\"python\"",
":",
"fh",
".",
"write",
"(",
"PY_TEMPLATE",
")",
"elif",
"lang",
"==",
"\"bash\"",
":",
"fh",
".",
"write",
"(",
"SH_TEMPLATE",
")"
] | 25.6 | 0.005025 |
def attach(self, container, stdout=True, stderr=True,
stream=False, logs=False, demux=False):
"""
Attach to a container.
The ``.logs()`` function is a wrapper around this method, which you can
use instead if you want to fetch/stream container output without first
retrieving the entire backlog.
Args:
container (str): The container to attach to.
stdout (bool): Include stdout.
stderr (bool): Include stderr.
stream (bool): Return container output progressively as an iterator
of strings, rather than a single string.
logs (bool): Include the container's previous output.
demux (bool): Keep stdout and stderr separate.
Returns:
By default, the container's output as a single string (two if
``demux=True``: one for stdout and one for stderr).
If ``stream=True``, an iterator of output strings. If
``demux=True``, two iterators are returned: one for stdout and one
for stderr.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {
'logs': logs and 1 or 0,
'stdout': stdout and 1 or 0,
'stderr': stderr and 1 or 0,
'stream': stream and 1 or 0
}
headers = {
'Connection': 'Upgrade',
'Upgrade': 'tcp'
}
u = self._url("/containers/{0}/attach", container)
response = self._post(u, headers=headers, params=params, stream=True)
output = self._read_from_socket(
response, stream, self._check_is_tty(container), demux=demux)
if stream:
return CancellableStream(output, response)
else:
return output | [
"def",
"attach",
"(",
"self",
",",
"container",
",",
"stdout",
"=",
"True",
",",
"stderr",
"=",
"True",
",",
"stream",
"=",
"False",
",",
"logs",
"=",
"False",
",",
"demux",
"=",
"False",
")",
":",
"params",
"=",
"{",
"'logs'",
":",
"logs",
"and",
"1",
"or",
"0",
",",
"'stdout'",
":",
"stdout",
"and",
"1",
"or",
"0",
",",
"'stderr'",
":",
"stderr",
"and",
"1",
"or",
"0",
",",
"'stream'",
":",
"stream",
"and",
"1",
"or",
"0",
"}",
"headers",
"=",
"{",
"'Connection'",
":",
"'Upgrade'",
",",
"'Upgrade'",
":",
"'tcp'",
"}",
"u",
"=",
"self",
".",
"_url",
"(",
"\"/containers/{0}/attach\"",
",",
"container",
")",
"response",
"=",
"self",
".",
"_post",
"(",
"u",
",",
"headers",
"=",
"headers",
",",
"params",
"=",
"params",
",",
"stream",
"=",
"True",
")",
"output",
"=",
"self",
".",
"_read_from_socket",
"(",
"response",
",",
"stream",
",",
"self",
".",
"_check_is_tty",
"(",
"container",
")",
",",
"demux",
"=",
"demux",
")",
"if",
"stream",
":",
"return",
"CancellableStream",
"(",
"output",
",",
"response",
")",
"else",
":",
"return",
"output"
] | 35.057692 | 0.001601 |
def can_attack_air(self) -> bool:
""" Does not include upgrades """
if self._weapons:
weapon = next(
(weapon for weapon in self._weapons if weapon.type in {TargetType.Air.value, TargetType.Any.value}),
None,
)
return weapon is not None
return False | [
"def",
"can_attack_air",
"(",
"self",
")",
"->",
"bool",
":",
"if",
"self",
".",
"_weapons",
":",
"weapon",
"=",
"next",
"(",
"(",
"weapon",
"for",
"weapon",
"in",
"self",
".",
"_weapons",
"if",
"weapon",
".",
"type",
"in",
"{",
"TargetType",
".",
"Air",
".",
"value",
",",
"TargetType",
".",
"Any",
".",
"value",
"}",
")",
",",
"None",
",",
")",
"return",
"weapon",
"is",
"not",
"None",
"return",
"False"
] | 36.888889 | 0.008824 |
def main():
"""
NAME
convert2unix.py
DESCRIPTION
converts mac or dos formatted file to unix file in place
SYNTAX
convert2unix.py FILE
OPTIONS
-h prints help and quits
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
file=sys.argv[1]
f=open(file,'r')
Input=f.readlines()
f.close()
out=open(file,'w')
for line in Input:
out.write(line)
out.close() | [
"def",
"main",
"(",
")",
":",
"if",
"'-h'",
"in",
"sys",
".",
"argv",
":",
"print",
"(",
"main",
".",
"__doc__",
")",
"sys",
".",
"exit",
"(",
")",
"file",
"=",
"sys",
".",
"argv",
"[",
"1",
"]",
"f",
"=",
"open",
"(",
"file",
",",
"'r'",
")",
"Input",
"=",
"f",
".",
"readlines",
"(",
")",
"f",
".",
"close",
"(",
")",
"out",
"=",
"open",
"(",
"file",
",",
"'w'",
")",
"for",
"line",
"in",
"Input",
":",
"out",
".",
"write",
"(",
"line",
")",
"out",
".",
"close",
"(",
")"
] | 17.615385 | 0.024845 |
def is_owner(self, user):
"""
Checks if user is the owner of object
Parameters
----------
user: get_user_model() instance
Returns
-------
bool
Author
------
Himanshu Shankar (https://himanshus.com)
"""
if user.is_authenticated:
return self.created_by.id == user.id
return False | [
"def",
"is_owner",
"(",
"self",
",",
"user",
")",
":",
"if",
"user",
".",
"is_authenticated",
":",
"return",
"self",
".",
"created_by",
".",
"id",
"==",
"user",
".",
"id",
"return",
"False"
] | 20.315789 | 0.00495 |
def account_overview(object):
"""Create layout for user profile"""
return Layout(
Container(
Row(
Column2(
Panel(
'Avatar',
Img(src="{}{}".format(settings.MEDIA_URL, object.avatar)),
collapse=True,
),
),
Column10(
Panel(
'Account information',
DescriptionList(
'email',
'first_name',
'last_name',
),
)
),
)
)
) | [
"def",
"account_overview",
"(",
"object",
")",
":",
"return",
"Layout",
"(",
"Container",
"(",
"Row",
"(",
"Column2",
"(",
"Panel",
"(",
"'Avatar'",
",",
"Img",
"(",
"src",
"=",
"\"{}{}\"",
".",
"format",
"(",
"settings",
".",
"MEDIA_URL",
",",
"object",
".",
"avatar",
")",
")",
",",
"collapse",
"=",
"True",
",",
")",
",",
")",
",",
"Column10",
"(",
"Panel",
"(",
"'Account information'",
",",
"DescriptionList",
"(",
"'email'",
",",
"'first_name'",
",",
"'last_name'",
",",
")",
",",
")",
")",
",",
")",
")",
")"
] | 28.4 | 0.002725 |
def low(self, fun, low, print_event=True, full_return=False):
'''
Execute a function from low data
Low data includes:
required:
- fun: the name of the function to run
optional:
- arg: a list of args to pass to fun
- kwarg: kwargs for fun
- __user__: user who is running the command
- __jid__: jid to run under
- __tag__: tag to run under
'''
# fire the mminion loading (if not already done) here
# this is not to clutter the output with the module loading
# if we have a high debug level.
self.mminion # pylint: disable=W0104
jid = low.get('__jid__', salt.utils.jid.gen_jid(self.opts))
tag = low.get('__tag__', salt.utils.event.tagify(jid, prefix=self.tag_prefix))
data = {'fun': '{0}.{1}'.format(self.client, fun),
'jid': jid,
'user': low.get('__user__', 'UNKNOWN'),
}
event = salt.utils.event.get_event(
'master',
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
if print_event:
print_func = self.print_async_event \
if hasattr(self, 'print_async_event') \
else None
else:
# Suppress printing of return event (this keeps us from printing
# runner/wheel output during orchestration).
print_func = None
namespaced_event = salt.utils.event.NamespacedEvent(
event,
tag,
print_func=print_func
)
# TODO: test that they exist
# TODO: Other things to inject??
func_globals = {'__jid__': jid,
'__user__': data['user'],
'__tag__': tag,
# weak ref to avoid the Exception in interpreter
# teardown of event
'__jid_event__': weakref.proxy(namespaced_event),
}
try:
self_functions = pycopy.copy(self.functions)
salt.utils.lazy.verify_fun(self_functions, fun)
# Inject some useful globals to *all* the function's global
# namespace only once per module-- not per func
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if '.' not in mod_name:
continue
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
# There are some discrepancies of what a "low" structure is in the
# publisher world it is a dict including stuff such as jid, fun,
# arg (a list of args, with kwargs packed in). Historically this
# particular one has had no "arg" and just has had all the kwargs
# packed into the top level object. The plan is to move away from
# that since the caller knows what is an arg vs a kwarg, but while
# we make the transition we will load "kwargs" using format_call if
# there are no kwargs in the low object passed in.
if 'arg' in low and 'kwarg' in low:
args = low['arg']
kwargs = low['kwarg']
else:
f_call = salt.utils.args.format_call(
self.functions[fun],
low,
expected_extra_kws=CLIENT_INTERNAL_KEYWORDS
)
args = f_call.get('args', ())
kwargs = f_call.get('kwargs', {})
# Update the event data with loaded args and kwargs
data['fun_args'] = list(args) + ([kwargs] if kwargs else [])
func_globals['__jid_event__'].fire_event(data, 'new')
# Track the job locally so we know what is running on the master
serial = salt.payload.Serial(self.opts)
jid_proc_file = os.path.join(*[self.opts['cachedir'], 'proc', jid])
data['pid'] = os.getpid()
with salt.utils.files.fopen(jid_proc_file, 'w+b') as fp_:
fp_.write(serial.dumps(data))
del data['pid']
# Initialize a context for executing the method.
with tornado.stack_context.StackContext(self.functions.context_dict.clone):
func = self.functions[fun]
try:
data['return'] = func(*args, **kwargs)
except TypeError as exc:
data['return'] = salt.utils.text.cli_info('Error: {exc}\nUsage:\n{doc}'.format(
exc=exc, doc=func.__doc__), 'Passed invalid arguments')
except Exception as exc:
data['return'] = salt.utils.text.cli_info(six.text_type(exc), 'General error occurred')
try:
data['success'] = self.context.get('retcode', 0) == 0
except AttributeError:
# Assume a True result if no context attribute
data['success'] = True
if isinstance(data['return'], dict) and 'data' in data['return']:
# some functions can return boolean values
data['success'] = salt.utils.state.check_result(data['return']['data'])
except (Exception, SystemExit) as ex:
if isinstance(ex, salt.exceptions.NotImplemented):
data['return'] = six.text_type(ex)
else:
data['return'] = 'Exception occurred in {client} {fun}: {tb}'.format(
client=self.client, fun=fun, tb=traceback.format_exc())
data['success'] = False
finally:
# Job has finished or issue found, so let's clean up after ourselves
try:
os.remove(jid_proc_file)
except OSError as err:
log.error("Error attempting to remove master job tracker: %s", err)
if self.store_job:
try:
salt.utils.job.store_job(
self.opts,
{
'id': self.opts['id'],
'tgt': self.opts['id'],
'jid': data['jid'],
'return': data,
},
event=None,
mminion=self.mminion,
)
except salt.exceptions.SaltCacheError:
log.error('Could not store job cache info. '
'Job details for this run may be unavailable.')
# Outputters _can_ mutate data so write to the job cache first!
namespaced_event.fire_event(data, 'ret')
# if we fired an event, make sure to delete the event object.
# This will ensure that we call destroy, which will do the 0MQ linger
log.info('Runner completed: %s', data['jid'])
del event
del namespaced_event
return data if full_return else data['return'] | [
"def",
"low",
"(",
"self",
",",
"fun",
",",
"low",
",",
"print_event",
"=",
"True",
",",
"full_return",
"=",
"False",
")",
":",
"# fire the mminion loading (if not already done) here",
"# this is not to clutter the output with the module loading",
"# if we have a high debug level.",
"self",
".",
"mminion",
"# pylint: disable=W0104",
"jid",
"=",
"low",
".",
"get",
"(",
"'__jid__'",
",",
"salt",
".",
"utils",
".",
"jid",
".",
"gen_jid",
"(",
"self",
".",
"opts",
")",
")",
"tag",
"=",
"low",
".",
"get",
"(",
"'__tag__'",
",",
"salt",
".",
"utils",
".",
"event",
".",
"tagify",
"(",
"jid",
",",
"prefix",
"=",
"self",
".",
"tag_prefix",
")",
")",
"data",
"=",
"{",
"'fun'",
":",
"'{0}.{1}'",
".",
"format",
"(",
"self",
".",
"client",
",",
"fun",
")",
",",
"'jid'",
":",
"jid",
",",
"'user'",
":",
"low",
".",
"get",
"(",
"'__user__'",
",",
"'UNKNOWN'",
")",
",",
"}",
"event",
"=",
"salt",
".",
"utils",
".",
"event",
".",
"get_event",
"(",
"'master'",
",",
"self",
".",
"opts",
"[",
"'sock_dir'",
"]",
",",
"self",
".",
"opts",
"[",
"'transport'",
"]",
",",
"opts",
"=",
"self",
".",
"opts",
",",
"listen",
"=",
"False",
")",
"if",
"print_event",
":",
"print_func",
"=",
"self",
".",
"print_async_event",
"if",
"hasattr",
"(",
"self",
",",
"'print_async_event'",
")",
"else",
"None",
"else",
":",
"# Suppress printing of return event (this keeps us from printing",
"# runner/wheel output during orchestration).",
"print_func",
"=",
"None",
"namespaced_event",
"=",
"salt",
".",
"utils",
".",
"event",
".",
"NamespacedEvent",
"(",
"event",
",",
"tag",
",",
"print_func",
"=",
"print_func",
")",
"# TODO: test that they exist",
"# TODO: Other things to inject??",
"func_globals",
"=",
"{",
"'__jid__'",
":",
"jid",
",",
"'__user__'",
":",
"data",
"[",
"'user'",
"]",
",",
"'__tag__'",
":",
"tag",
",",
"# weak ref to avoid the Exception in interpreter",
"# teardown of event",
"'__jid_event__'",
":",
"weakref",
".",
"proxy",
"(",
"namespaced_event",
")",
",",
"}",
"try",
":",
"self_functions",
"=",
"pycopy",
".",
"copy",
"(",
"self",
".",
"functions",
")",
"salt",
".",
"utils",
".",
"lazy",
".",
"verify_fun",
"(",
"self_functions",
",",
"fun",
")",
"# Inject some useful globals to *all* the function's global",
"# namespace only once per module-- not per func",
"completed_funcs",
"=",
"[",
"]",
"for",
"mod_name",
"in",
"six",
".",
"iterkeys",
"(",
"self_functions",
")",
":",
"if",
"'.'",
"not",
"in",
"mod_name",
":",
"continue",
"mod",
",",
"_",
"=",
"mod_name",
".",
"split",
"(",
"'.'",
",",
"1",
")",
"if",
"mod",
"in",
"completed_funcs",
":",
"continue",
"completed_funcs",
".",
"append",
"(",
"mod",
")",
"for",
"global_key",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"func_globals",
")",
":",
"self",
".",
"functions",
"[",
"mod_name",
"]",
".",
"__globals__",
"[",
"global_key",
"]",
"=",
"value",
"# There are some discrepancies of what a \"low\" structure is in the",
"# publisher world it is a dict including stuff such as jid, fun,",
"# arg (a list of args, with kwargs packed in). Historically this",
"# particular one has had no \"arg\" and just has had all the kwargs",
"# packed into the top level object. The plan is to move away from",
"# that since the caller knows what is an arg vs a kwarg, but while",
"# we make the transition we will load \"kwargs\" using format_call if",
"# there are no kwargs in the low object passed in.",
"if",
"'arg'",
"in",
"low",
"and",
"'kwarg'",
"in",
"low",
":",
"args",
"=",
"low",
"[",
"'arg'",
"]",
"kwargs",
"=",
"low",
"[",
"'kwarg'",
"]",
"else",
":",
"f_call",
"=",
"salt",
".",
"utils",
".",
"args",
".",
"format_call",
"(",
"self",
".",
"functions",
"[",
"fun",
"]",
",",
"low",
",",
"expected_extra_kws",
"=",
"CLIENT_INTERNAL_KEYWORDS",
")",
"args",
"=",
"f_call",
".",
"get",
"(",
"'args'",
",",
"(",
")",
")",
"kwargs",
"=",
"f_call",
".",
"get",
"(",
"'kwargs'",
",",
"{",
"}",
")",
"# Update the event data with loaded args and kwargs",
"data",
"[",
"'fun_args'",
"]",
"=",
"list",
"(",
"args",
")",
"+",
"(",
"[",
"kwargs",
"]",
"if",
"kwargs",
"else",
"[",
"]",
")",
"func_globals",
"[",
"'__jid_event__'",
"]",
".",
"fire_event",
"(",
"data",
",",
"'new'",
")",
"# Track the job locally so we know what is running on the master",
"serial",
"=",
"salt",
".",
"payload",
".",
"Serial",
"(",
"self",
".",
"opts",
")",
"jid_proc_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"*",
"[",
"self",
".",
"opts",
"[",
"'cachedir'",
"]",
",",
"'proc'",
",",
"jid",
"]",
")",
"data",
"[",
"'pid'",
"]",
"=",
"os",
".",
"getpid",
"(",
")",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"jid_proc_file",
",",
"'w+b'",
")",
"as",
"fp_",
":",
"fp_",
".",
"write",
"(",
"serial",
".",
"dumps",
"(",
"data",
")",
")",
"del",
"data",
"[",
"'pid'",
"]",
"# Initialize a context for executing the method.",
"with",
"tornado",
".",
"stack_context",
".",
"StackContext",
"(",
"self",
".",
"functions",
".",
"context_dict",
".",
"clone",
")",
":",
"func",
"=",
"self",
".",
"functions",
"[",
"fun",
"]",
"try",
":",
"data",
"[",
"'return'",
"]",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"TypeError",
"as",
"exc",
":",
"data",
"[",
"'return'",
"]",
"=",
"salt",
".",
"utils",
".",
"text",
".",
"cli_info",
"(",
"'Error: {exc}\\nUsage:\\n{doc}'",
".",
"format",
"(",
"exc",
"=",
"exc",
",",
"doc",
"=",
"func",
".",
"__doc__",
")",
",",
"'Passed invalid arguments'",
")",
"except",
"Exception",
"as",
"exc",
":",
"data",
"[",
"'return'",
"]",
"=",
"salt",
".",
"utils",
".",
"text",
".",
"cli_info",
"(",
"six",
".",
"text_type",
"(",
"exc",
")",
",",
"'General error occurred'",
")",
"try",
":",
"data",
"[",
"'success'",
"]",
"=",
"self",
".",
"context",
".",
"get",
"(",
"'retcode'",
",",
"0",
")",
"==",
"0",
"except",
"AttributeError",
":",
"# Assume a True result if no context attribute",
"data",
"[",
"'success'",
"]",
"=",
"True",
"if",
"isinstance",
"(",
"data",
"[",
"'return'",
"]",
",",
"dict",
")",
"and",
"'data'",
"in",
"data",
"[",
"'return'",
"]",
":",
"# some functions can return boolean values",
"data",
"[",
"'success'",
"]",
"=",
"salt",
".",
"utils",
".",
"state",
".",
"check_result",
"(",
"data",
"[",
"'return'",
"]",
"[",
"'data'",
"]",
")",
"except",
"(",
"Exception",
",",
"SystemExit",
")",
"as",
"ex",
":",
"if",
"isinstance",
"(",
"ex",
",",
"salt",
".",
"exceptions",
".",
"NotImplemented",
")",
":",
"data",
"[",
"'return'",
"]",
"=",
"six",
".",
"text_type",
"(",
"ex",
")",
"else",
":",
"data",
"[",
"'return'",
"]",
"=",
"'Exception occurred in {client} {fun}: {tb}'",
".",
"format",
"(",
"client",
"=",
"self",
".",
"client",
",",
"fun",
"=",
"fun",
",",
"tb",
"=",
"traceback",
".",
"format_exc",
"(",
")",
")",
"data",
"[",
"'success'",
"]",
"=",
"False",
"finally",
":",
"# Job has finished or issue found, so let's clean up after ourselves",
"try",
":",
"os",
".",
"remove",
"(",
"jid_proc_file",
")",
"except",
"OSError",
"as",
"err",
":",
"log",
".",
"error",
"(",
"\"Error attempting to remove master job tracker: %s\"",
",",
"err",
")",
"if",
"self",
".",
"store_job",
":",
"try",
":",
"salt",
".",
"utils",
".",
"job",
".",
"store_job",
"(",
"self",
".",
"opts",
",",
"{",
"'id'",
":",
"self",
".",
"opts",
"[",
"'id'",
"]",
",",
"'tgt'",
":",
"self",
".",
"opts",
"[",
"'id'",
"]",
",",
"'jid'",
":",
"data",
"[",
"'jid'",
"]",
",",
"'return'",
":",
"data",
",",
"}",
",",
"event",
"=",
"None",
",",
"mminion",
"=",
"self",
".",
"mminion",
",",
")",
"except",
"salt",
".",
"exceptions",
".",
"SaltCacheError",
":",
"log",
".",
"error",
"(",
"'Could not store job cache info. '",
"'Job details for this run may be unavailable.'",
")",
"# Outputters _can_ mutate data so write to the job cache first!",
"namespaced_event",
".",
"fire_event",
"(",
"data",
",",
"'ret'",
")",
"# if we fired an event, make sure to delete the event object.",
"# This will ensure that we call destroy, which will do the 0MQ linger",
"log",
".",
"info",
"(",
"'Runner completed: %s'",
",",
"data",
"[",
"'jid'",
"]",
")",
"del",
"event",
"del",
"namespaced_event",
"return",
"data",
"if",
"full_return",
"else",
"data",
"[",
"'return'",
"]"
] | 43.271084 | 0.001633 |
def dataset_create_new_cli(self,
folder=None,
public=False,
quiet=False,
convert_to_csv=True,
dir_mode='skip'):
""" client wrapper for creating a new dataset
Parameters
==========
folder: the folder to initialize the metadata file in
public: should the dataset be public?
quiet: suppress verbose output (default is False)
convert_to_csv: if True, convert data to comma separated value
dir_mode: What to do with directories: "skip" - ignore; "zip" - compress and upload
"""
folder = folder or os.getcwd()
result = self.dataset_create_new(folder, public, quiet, convert_to_csv,
dir_mode)
if result.invalidTags:
print('The following are not valid tags and could not be added to '
'the dataset: ' + str(result.invalidTags))
if result.status.lower() == 'ok':
if public:
print('Your public Dataset is being created. Please check '
'progress at ' + result.url)
else:
print('Your private Dataset is being created. Please check '
'progress at ' + result.url)
else:
print('Dataset creation error: ' + result.error) | [
"def",
"dataset_create_new_cli",
"(",
"self",
",",
"folder",
"=",
"None",
",",
"public",
"=",
"False",
",",
"quiet",
"=",
"False",
",",
"convert_to_csv",
"=",
"True",
",",
"dir_mode",
"=",
"'skip'",
")",
":",
"folder",
"=",
"folder",
"or",
"os",
".",
"getcwd",
"(",
")",
"result",
"=",
"self",
".",
"dataset_create_new",
"(",
"folder",
",",
"public",
",",
"quiet",
",",
"convert_to_csv",
",",
"dir_mode",
")",
"if",
"result",
".",
"invalidTags",
":",
"print",
"(",
"'The following are not valid tags and could not be added to '",
"'the dataset: '",
"+",
"str",
"(",
"result",
".",
"invalidTags",
")",
")",
"if",
"result",
".",
"status",
".",
"lower",
"(",
")",
"==",
"'ok'",
":",
"if",
"public",
":",
"print",
"(",
"'Your public Dataset is being created. Please check '",
"'progress at '",
"+",
"result",
".",
"url",
")",
"else",
":",
"print",
"(",
"'Your private Dataset is being created. Please check '",
"'progress at '",
"+",
"result",
".",
"url",
")",
"else",
":",
"print",
"(",
"'Dataset creation error: '",
"+",
"result",
".",
"error",
")"
] | 48.466667 | 0.005394 |
def field(cls, field, query, boost=None, enable_position_increments=None):
'''
A query that executes a query string against a specific field. It is a simplified version of query_string query (by setting the default_field to the field this query executed against). In its simplest form:
{
"field" : {
"name.first" : "+something -else"
}
}
Most of the query_string parameters are allowed with the field query as well, in such a case, the query should be formatted as follows:
{
"field" : {
"name.first" : {
"query" : "+something -else",
"boost" : 2.0,
"enable_position_increments": false
}
}
}
'''
instance = cls(field={field: {'query': query}})
if boost is not None:
instance['field']['boost'] = boost
if enable_position_increments is not None:
instance['field']['enable_position_increments'] = enable_position_increments
return instance | [
"def",
"field",
"(",
"cls",
",",
"field",
",",
"query",
",",
"boost",
"=",
"None",
",",
"enable_position_increments",
"=",
"None",
")",
":",
"instance",
"=",
"cls",
"(",
"field",
"=",
"{",
"field",
":",
"{",
"'query'",
":",
"query",
"}",
"}",
")",
"if",
"boost",
"is",
"not",
"None",
":",
"instance",
"[",
"'field'",
"]",
"[",
"'boost'",
"]",
"=",
"boost",
"if",
"enable_position_increments",
"is",
"not",
"None",
":",
"instance",
"[",
"'field'",
"]",
"[",
"'enable_position_increments'",
"]",
"=",
"enable_position_increments",
"return",
"instance"
] | 40.296296 | 0.004488 |
def get(path, name): # type: (str, str) -> _EntryPointType
"""
Args:
path (string): Directory where the entry point is located
name (string): Name of the entry point file
Returns:
(_EntryPointType): The type of the entry point
"""
if 'setup.py' in os.listdir(path):
return _EntryPointType.PYTHON_PACKAGE
elif name.endswith('.py'):
return _EntryPointType.PYTHON_PROGRAM
else:
return _EntryPointType.COMMAND | [
"def",
"get",
"(",
"path",
",",
"name",
")",
":",
"# type: (str, str) -> _EntryPointType",
"if",
"'setup.py'",
"in",
"os",
".",
"listdir",
"(",
"path",
")",
":",
"return",
"_EntryPointType",
".",
"PYTHON_PACKAGE",
"elif",
"name",
".",
"endswith",
"(",
"'.py'",
")",
":",
"return",
"_EntryPointType",
".",
"PYTHON_PROGRAM",
"else",
":",
"return",
"_EntryPointType",
".",
"COMMAND"
] | 31.266667 | 0.00207 |
def _check_file_field(self, field):
"""Check that field exists and is a file field"""
is_field = field in self.field_names
is_file = self.__meta_metadata(field, 'field_type') == 'file'
if not (is_field and is_file):
msg = "'%s' is not a field or not a 'file' field" % field
raise ValueError(msg)
else:
return True | [
"def",
"_check_file_field",
"(",
"self",
",",
"field",
")",
":",
"is_field",
"=",
"field",
"in",
"self",
".",
"field_names",
"is_file",
"=",
"self",
".",
"__meta_metadata",
"(",
"field",
",",
"'field_type'",
")",
"==",
"'file'",
"if",
"not",
"(",
"is_field",
"and",
"is_file",
")",
":",
"msg",
"=",
"\"'%s' is not a field or not a 'file' field\"",
"%",
"field",
"raise",
"ValueError",
"(",
"msg",
")",
"else",
":",
"return",
"True"
] | 42.333333 | 0.005141 |
def destroy_vm(self, vm, logger):
"""
destroy the given vm
:param vm: virutal machine pyvmomi object
:param logger:
"""
self.power_off_before_destroy(logger, vm)
logger.info(("Destroying VM {0}".format(vm.name)))
task = vm.Destroy_Task()
return self.task_waiter.wait_for_task(task=task, logger=logger, action_name="Destroy VM") | [
"def",
"destroy_vm",
"(",
"self",
",",
"vm",
",",
"logger",
")",
":",
"self",
".",
"power_off_before_destroy",
"(",
"logger",
",",
"vm",
")",
"logger",
".",
"info",
"(",
"(",
"\"Destroying VM {0}\"",
".",
"format",
"(",
"vm",
".",
"name",
")",
")",
")",
"task",
"=",
"vm",
".",
"Destroy_Task",
"(",
")",
"return",
"self",
".",
"task_waiter",
".",
"wait_for_task",
"(",
"task",
"=",
"task",
",",
"logger",
"=",
"logger",
",",
"action_name",
"=",
"\"Destroy VM\"",
")"
] | 30.230769 | 0.012346 |
def ll(self,*args,**kwargs):
"""
NAME:
ll
PURPOSE:
return Galactic longitude
INPUT:
t - (optional) time at which to get ll (can be Quantity)
obs=[X,Y,Z] - (optional) position of observer (in kpc; entries can be Quantity)
(default=[8.0,0.,0.]) OR Orbit object that corresponds to the orbit of the observer
Y is ignored and always assumed to be zero
ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity)
OUTPUT:
l(t) in deg
HISTORY:
2011-02-23 - Written - Bovy (NYU)
"""
out= self._orb.ll(*args,**kwargs)
if len(out) == 1: return out[0]
else: return out | [
"def",
"ll",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"out",
"=",
"self",
".",
"_orb",
".",
"ll",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"len",
"(",
"out",
")",
"==",
"1",
":",
"return",
"out",
"[",
"0",
"]",
"else",
":",
"return",
"out"
] | 23.40625 | 0.015385 |
def capwords(s, sep=None):
"""capwords(s [,sep]) -> string
Split the argument into words using split, capitalize each
word using capitalize, and join the capitalized words using
join. If the optional second argument sep is absent or None,
runs of whitespace characters are replaced by a single space
and leading and trailing whitespace are removed, otherwise
sep is used to split and join the words.
"""
return (sep or ' ').join(x.capitalize() for x in s.split(sep)) | [
"def",
"capwords",
"(",
"s",
",",
"sep",
"=",
"None",
")",
":",
"return",
"(",
"sep",
"or",
"' '",
")",
".",
"join",
"(",
"x",
".",
"capitalize",
"(",
")",
"for",
"x",
"in",
"s",
".",
"split",
"(",
"sep",
")",
")"
] | 41.166667 | 0.00198 |
def pmllpmbb_to_pmrapmdec(pmll,pmbb,l,b,degree=False,epoch=2000.0):
"""
NAME:
pmllpmbb_to_pmrapmdec
PURPOSE:
rotate proper motions in (l,b) into proper motions in (ra,dec)
INPUT:
pmll - proper motion in l (multplied with cos(b)) [mas/yr]
pmbb - proper motion in b [mas/yr]
l - Galactic longitude
b - Galactic lattitude
degree - if True, l and b are given in degrees (default=False)
epoch - epoch of ra,dec (right now only 2000.0 and 1950.0 are supported when not using astropy's transformations internally; when internally using astropy's coordinate transformations, epoch can be None for ICRS, 'JXXXX' for FK5, and 'BXXXX' for FK4)
OUTPUT:
(pmra x cos(dec),pmdec), for vector inputs [:,2]
HISTORY:
2010-04-07 - Written - Bovy (NYU)
2014-06-14 - Re-written w/ numpy functions for speed and w/ decorators for beauty - Bovy (IAS)
"""
theta,dec_ngp,ra_ngp= get_epoch_angles(epoch)
#Whether to use degrees and scalar input is handled by decorators
radec = lb_to_radec(l,b,degree=False,epoch=epoch)
ra= radec[:,0]
dec= radec[:,1]
dec[dec == dec_ngp]+= 10.**-16 #deal w/ pole.
sindec_ngp= nu.sin(dec_ngp)
cosdec_ngp= nu.cos(dec_ngp)
sindec= nu.sin(dec)
cosdec= nu.cos(dec)
sinrarangp= nu.sin(ra-ra_ngp)
cosrarangp= nu.cos(ra-ra_ngp)
#These were replaced by Poleski (2013)'s equivalent form that is better at the poles
#cosphi= (sindec_ngp-sindec*sinb)/cosdec/cosb
#sinphi= sinrarangp*cosdec_ngp/cosb
cosphi= sindec_ngp*cosdec-cosdec_ngp*sindec*cosrarangp
sinphi= sinrarangp*cosdec_ngp
norm= nu.sqrt(cosphi**2.+sinphi**2.)
cosphi/= norm
sinphi/= norm
return (nu.array([[cosphi,sinphi],[-sinphi,cosphi]]).T\
*nu.array([[pmll,pmll],[pmbb,pmbb]]).T).sum(-1) | [
"def",
"pmllpmbb_to_pmrapmdec",
"(",
"pmll",
",",
"pmbb",
",",
"l",
",",
"b",
",",
"degree",
"=",
"False",
",",
"epoch",
"=",
"2000.0",
")",
":",
"theta",
",",
"dec_ngp",
",",
"ra_ngp",
"=",
"get_epoch_angles",
"(",
"epoch",
")",
"#Whether to use degrees and scalar input is handled by decorators",
"radec",
"=",
"lb_to_radec",
"(",
"l",
",",
"b",
",",
"degree",
"=",
"False",
",",
"epoch",
"=",
"epoch",
")",
"ra",
"=",
"radec",
"[",
":",
",",
"0",
"]",
"dec",
"=",
"radec",
"[",
":",
",",
"1",
"]",
"dec",
"[",
"dec",
"==",
"dec_ngp",
"]",
"+=",
"10.",
"**",
"-",
"16",
"#deal w/ pole.",
"sindec_ngp",
"=",
"nu",
".",
"sin",
"(",
"dec_ngp",
")",
"cosdec_ngp",
"=",
"nu",
".",
"cos",
"(",
"dec_ngp",
")",
"sindec",
"=",
"nu",
".",
"sin",
"(",
"dec",
")",
"cosdec",
"=",
"nu",
".",
"cos",
"(",
"dec",
")",
"sinrarangp",
"=",
"nu",
".",
"sin",
"(",
"ra",
"-",
"ra_ngp",
")",
"cosrarangp",
"=",
"nu",
".",
"cos",
"(",
"ra",
"-",
"ra_ngp",
")",
"#These were replaced by Poleski (2013)'s equivalent form that is better at the poles",
"#cosphi= (sindec_ngp-sindec*sinb)/cosdec/cosb",
"#sinphi= sinrarangp*cosdec_ngp/cosb",
"cosphi",
"=",
"sindec_ngp",
"*",
"cosdec",
"-",
"cosdec_ngp",
"*",
"sindec",
"*",
"cosrarangp",
"sinphi",
"=",
"sinrarangp",
"*",
"cosdec_ngp",
"norm",
"=",
"nu",
".",
"sqrt",
"(",
"cosphi",
"**",
"2.",
"+",
"sinphi",
"**",
"2.",
")",
"cosphi",
"/=",
"norm",
"sinphi",
"/=",
"norm",
"return",
"(",
"nu",
".",
"array",
"(",
"[",
"[",
"cosphi",
",",
"sinphi",
"]",
",",
"[",
"-",
"sinphi",
",",
"cosphi",
"]",
"]",
")",
".",
"T",
"*",
"nu",
".",
"array",
"(",
"[",
"[",
"pmll",
",",
"pmll",
"]",
",",
"[",
"pmbb",
",",
"pmbb",
"]",
"]",
")",
".",
"T",
")",
".",
"sum",
"(",
"-",
"1",
")"
] | 31.789474 | 0.025161 |
def generatePandaEnumCols(pandaFtrain, cname, nrows, domainL):
"""
For an H2O Enum column, we perform one-hot-encoding here and add one more column, "missing(NA)" to it.
:param pandaFtrain: panda frame derived from H2OFrame
:param cname: column name of enum col
:param nrows: number of rows of enum col
:return: panda frame with enum col encoded correctly for native XGBoost
"""
import numpy as np
import pandas as pd
cmissingNames=[cname+".missing(NA)"]
tempnp = np.zeros((nrows,1), dtype=np.int)
# check for nan and assign it correct value
colVals = pandaFtrain[cname]
for ind in range(nrows):
try:
if not(colVals[ind] in domainL):
tempnp[ind]=1
except ValueError:
pass
zeroFrame = pd.DataFrame(tempnp)
zeroFrame.columns=cmissingNames
temp = pd.get_dummies(pandaFtrain[cname], prefix=cname, drop_first=False)
tempNames = list(temp) # get column names
colLength = len(tempNames)
newNames = ['a']*colLength
for ind in range(0,colLength):
newNames[ind]=cname+"_"+domainL[ind]
ftemp = temp[newNames]
ctemp = pd.concat([ftemp, zeroFrame], axis=1)
return ctemp | [
"def",
"generatePandaEnumCols",
"(",
"pandaFtrain",
",",
"cname",
",",
"nrows",
",",
"domainL",
")",
":",
"import",
"numpy",
"as",
"np",
"import",
"pandas",
"as",
"pd",
"cmissingNames",
"=",
"[",
"cname",
"+",
"\".missing(NA)\"",
"]",
"tempnp",
"=",
"np",
".",
"zeros",
"(",
"(",
"nrows",
",",
"1",
")",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"# check for nan and assign it correct value",
"colVals",
"=",
"pandaFtrain",
"[",
"cname",
"]",
"for",
"ind",
"in",
"range",
"(",
"nrows",
")",
":",
"try",
":",
"if",
"not",
"(",
"colVals",
"[",
"ind",
"]",
"in",
"domainL",
")",
":",
"tempnp",
"[",
"ind",
"]",
"=",
"1",
"except",
"ValueError",
":",
"pass",
"zeroFrame",
"=",
"pd",
".",
"DataFrame",
"(",
"tempnp",
")",
"zeroFrame",
".",
"columns",
"=",
"cmissingNames",
"temp",
"=",
"pd",
".",
"get_dummies",
"(",
"pandaFtrain",
"[",
"cname",
"]",
",",
"prefix",
"=",
"cname",
",",
"drop_first",
"=",
"False",
")",
"tempNames",
"=",
"list",
"(",
"temp",
")",
"# get column names",
"colLength",
"=",
"len",
"(",
"tempNames",
")",
"newNames",
"=",
"[",
"'a'",
"]",
"*",
"colLength",
"for",
"ind",
"in",
"range",
"(",
"0",
",",
"colLength",
")",
":",
"newNames",
"[",
"ind",
"]",
"=",
"cname",
"+",
"\"_\"",
"+",
"domainL",
"[",
"ind",
"]",
"ftemp",
"=",
"temp",
"[",
"newNames",
"]",
"ctemp",
"=",
"pd",
".",
"concat",
"(",
"[",
"ftemp",
",",
"zeroFrame",
"]",
",",
"axis",
"=",
"1",
")",
"return",
"ctemp"
] | 35.029412 | 0.007353 |
def draw(self, clear=True):
"""Draw each visible mesh in the scene from the perspective of the scene's camera and lit by its light."""
if clear:
self.clear()
with self.gl_states, self.camera, self.light:
for mesh in self.meshes:
try:
mesh.draw()
except AttributeError:
pass | [
"def",
"draw",
"(",
"self",
",",
"clear",
"=",
"True",
")",
":",
"if",
"clear",
":",
"self",
".",
"clear",
"(",
")",
"with",
"self",
".",
"gl_states",
",",
"self",
".",
"camera",
",",
"self",
".",
"light",
":",
"for",
"mesh",
"in",
"self",
".",
"meshes",
":",
"try",
":",
"mesh",
".",
"draw",
"(",
")",
"except",
"AttributeError",
":",
"pass"
] | 34.909091 | 0.007614 |
def extract_pagination(self, params):
'''Extract and build pagination from parameters'''
try:
params_page = int(params.pop('page', 1) or 1)
self.page = max(params_page, 1)
except:
# Failsafe, if page cannot be parsed, we falback on first page
self.page = 1
try:
params_page_size = params.pop('page_size', DEFAULT_PAGE_SIZE)
self.page_size = int(params_page_size or DEFAULT_PAGE_SIZE)
except:
# Failsafe, if page_size cannot be parsed, we falback on default
self.page_size = DEFAULT_PAGE_SIZE
self.page_start = (self.page - 1) * self.page_size
self.page_end = self.page_start + self.page_size | [
"def",
"extract_pagination",
"(",
"self",
",",
"params",
")",
":",
"try",
":",
"params_page",
"=",
"int",
"(",
"params",
".",
"pop",
"(",
"'page'",
",",
"1",
")",
"or",
"1",
")",
"self",
".",
"page",
"=",
"max",
"(",
"params_page",
",",
"1",
")",
"except",
":",
"# Failsafe, if page cannot be parsed, we falback on first page",
"self",
".",
"page",
"=",
"1",
"try",
":",
"params_page_size",
"=",
"params",
".",
"pop",
"(",
"'page_size'",
",",
"DEFAULT_PAGE_SIZE",
")",
"self",
".",
"page_size",
"=",
"int",
"(",
"params_page_size",
"or",
"DEFAULT_PAGE_SIZE",
")",
"except",
":",
"# Failsafe, if page_size cannot be parsed, we falback on default",
"self",
".",
"page_size",
"=",
"DEFAULT_PAGE_SIZE",
"self",
".",
"page_start",
"=",
"(",
"self",
".",
"page",
"-",
"1",
")",
"*",
"self",
".",
"page_size",
"self",
".",
"page_end",
"=",
"self",
".",
"page_start",
"+",
"self",
".",
"page_size"
] | 45.5 | 0.005384 |
def merge_overlaps(self):
"""
Merges overlaps by merging overlapping Intervals.
The function takes no arguments and returns ``None``. It operates on
the striplog 'in place'
TODO: This function will not work if any interval overlaps more than
one other intervals at either its base or top.
"""
overlaps = np.array(self.find_overlaps(index=True))
if not overlaps.any():
return
for overlap in overlaps:
before = self[overlap].copy()
after = self[overlap + 1].copy()
# Get rid of the before and after pieces.
del self[overlap]
del self[overlap]
# Make the new piece.
new_segment = before.merge(after)
# Insert it.
self.__insert(overlap, new_segment)
overlaps += 1
return | [
"def",
"merge_overlaps",
"(",
"self",
")",
":",
"overlaps",
"=",
"np",
".",
"array",
"(",
"self",
".",
"find_overlaps",
"(",
"index",
"=",
"True",
")",
")",
"if",
"not",
"overlaps",
".",
"any",
"(",
")",
":",
"return",
"for",
"overlap",
"in",
"overlaps",
":",
"before",
"=",
"self",
"[",
"overlap",
"]",
".",
"copy",
"(",
")",
"after",
"=",
"self",
"[",
"overlap",
"+",
"1",
"]",
".",
"copy",
"(",
")",
"# Get rid of the before and after pieces.",
"del",
"self",
"[",
"overlap",
"]",
"del",
"self",
"[",
"overlap",
"]",
"# Make the new piece.",
"new_segment",
"=",
"before",
".",
"merge",
"(",
"after",
")",
"# Insert it.",
"self",
".",
"__insert",
"(",
"overlap",
",",
"new_segment",
")",
"overlaps",
"+=",
"1",
"return"
] | 27.125 | 0.002225 |
def write_alf_params_(self):
""" DEPRECATED """
if not hasattr(self, 'alf_dirs'):
self.make_alf_dirs()
if not hasattr(self, 'class_trees'):
self.generate_class_trees()
alf_params = {}
for k in range(self.num_classes):
alfdir = self.alf_dirs[k + 1]
tree = self.class_trees[k + 1]
datatype = self.datatype
name = 'class{0}'.format(k + 1)
num_genes = self.class_list[k]
seqlength = self.gene_length_min
gene_length_kappa = self.gene_length_kappa
gene_length_theta = self.gene_length_theta
alf_obj = ALF(tree=tree,
datatype=datatype, num_genes=num_genes,
seqlength=seqlength, gene_length_kappa=gene_length_kappa,
gene_length_theta=gene_length_theta, name=name, tmpdir=alfdir)
if datatype == 'protein':
alf_obj.params.one_word_model('WAG')
else:
alf_obj.params.jc_model()
alf_params[k + 1] = alf_obj
self.alf_params = alf_params | [
"def",
"write_alf_params_",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'alf_dirs'",
")",
":",
"self",
".",
"make_alf_dirs",
"(",
")",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'class_trees'",
")",
":",
"self",
".",
"generate_class_trees",
"(",
")",
"alf_params",
"=",
"{",
"}",
"for",
"k",
"in",
"range",
"(",
"self",
".",
"num_classes",
")",
":",
"alfdir",
"=",
"self",
".",
"alf_dirs",
"[",
"k",
"+",
"1",
"]",
"tree",
"=",
"self",
".",
"class_trees",
"[",
"k",
"+",
"1",
"]",
"datatype",
"=",
"self",
".",
"datatype",
"name",
"=",
"'class{0}'",
".",
"format",
"(",
"k",
"+",
"1",
")",
"num_genes",
"=",
"self",
".",
"class_list",
"[",
"k",
"]",
"seqlength",
"=",
"self",
".",
"gene_length_min",
"gene_length_kappa",
"=",
"self",
".",
"gene_length_kappa",
"gene_length_theta",
"=",
"self",
".",
"gene_length_theta",
"alf_obj",
"=",
"ALF",
"(",
"tree",
"=",
"tree",
",",
"datatype",
"=",
"datatype",
",",
"num_genes",
"=",
"num_genes",
",",
"seqlength",
"=",
"seqlength",
",",
"gene_length_kappa",
"=",
"gene_length_kappa",
",",
"gene_length_theta",
"=",
"gene_length_theta",
",",
"name",
"=",
"name",
",",
"tmpdir",
"=",
"alfdir",
")",
"if",
"datatype",
"==",
"'protein'",
":",
"alf_obj",
".",
"params",
".",
"one_word_model",
"(",
"'WAG'",
")",
"else",
":",
"alf_obj",
".",
"params",
".",
"jc_model",
"(",
")",
"alf_params",
"[",
"k",
"+",
"1",
"]",
"=",
"alf_obj",
"self",
".",
"alf_params",
"=",
"alf_params"
] | 38.758621 | 0.003472 |
def harris_feature(im, region_size=5, to_return='harris', scale=0.05):
"""
Harris-motivated feature detection on a d-dimensional image.
Parameters
---------
im
region_size
to_return : {'harris','matrix','trace-determinant'}
"""
ndim = im.ndim
#1. Gradient of image
grads = [nd.sobel(im, axis=i) for i in range(ndim)]
#2. Corner response matrix
matrix = np.zeros((ndim, ndim) + im.shape)
for a in range(ndim):
for b in range(ndim):
matrix[a,b] = nd.filters.gaussian_filter(grads[a]*grads[b],
region_size)
if to_return == 'matrix':
return matrix
#3. Trace, determinant
trc = np.trace(matrix, axis1=0, axis2=1)
det = np.linalg.det(matrix.T).T
if to_return == 'trace-determinant':
return trc, det
else:
#4. Harris detector:
harris = det - scale*trc*trc
return harris | [
"def",
"harris_feature",
"(",
"im",
",",
"region_size",
"=",
"5",
",",
"to_return",
"=",
"'harris'",
",",
"scale",
"=",
"0.05",
")",
":",
"ndim",
"=",
"im",
".",
"ndim",
"#1. Gradient of image",
"grads",
"=",
"[",
"nd",
".",
"sobel",
"(",
"im",
",",
"axis",
"=",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"ndim",
")",
"]",
"#2. Corner response matrix",
"matrix",
"=",
"np",
".",
"zeros",
"(",
"(",
"ndim",
",",
"ndim",
")",
"+",
"im",
".",
"shape",
")",
"for",
"a",
"in",
"range",
"(",
"ndim",
")",
":",
"for",
"b",
"in",
"range",
"(",
"ndim",
")",
":",
"matrix",
"[",
"a",
",",
"b",
"]",
"=",
"nd",
".",
"filters",
".",
"gaussian_filter",
"(",
"grads",
"[",
"a",
"]",
"*",
"grads",
"[",
"b",
"]",
",",
"region_size",
")",
"if",
"to_return",
"==",
"'matrix'",
":",
"return",
"matrix",
"#3. Trace, determinant",
"trc",
"=",
"np",
".",
"trace",
"(",
"matrix",
",",
"axis1",
"=",
"0",
",",
"axis2",
"=",
"1",
")",
"det",
"=",
"np",
".",
"linalg",
".",
"det",
"(",
"matrix",
".",
"T",
")",
".",
"T",
"if",
"to_return",
"==",
"'trace-determinant'",
":",
"return",
"trc",
",",
"det",
"else",
":",
"#4. Harris detector:",
"harris",
"=",
"det",
"-",
"scale",
"*",
"trc",
"*",
"trc",
"return",
"harris"
] | 29.225806 | 0.007479 |
def get_PCA_parameters(self, specimen, fit, tmin, tmax, coordinate_system, calculation_type):
"""
Uses pmag.domean to preform a line, line-with-origin, line-anchored,
or plane least squared regression or a fisher mean on the
measurement data of specimen in coordinate system between bounds
tmin to tmax
Parameters
----------
specimen : specimen with measurement data in self.Data
fit : fit for which the regression or mean is being applied
(used for calculating measurement index of tmin and tmax)
tmin : lower bound of measurement data
tmax : upper bound of measurement data
coordinate_system : which coordinate system the measurement data
should be in
calculation_type : type of regression or mean to preform
(options - DE-BFL:line,DE-BFL-A:line-anchored,DE-BFL-O:line-with-
origin,DE-FM:fisher,DE-BFP:plane)
Returns
-------
mpars : a 2.5 data model dictionary type specimen record of the dec,
inc, etc of the regression or mean
"""
if tmin == '' or tmax == '':
return
beg_pca, end_pca = self.get_indices(fit, tmin, tmax, specimen)
if coordinate_system == 'geographic' or coordinate_system == 'DA-DIR-GEO':
block = self.Data[specimen]['zijdblock_geo']
elif coordinate_system == 'tilt-corrected' or coordinate_system == 'DA-DIR-TILT':
block = self.Data[specimen]['zijdblock_tilt']
else:
block = self.Data[specimen]['zijdblock']
if block == []:
print(("-E- no measurement data for specimen %s in coordinate system %s" %
(specimen, coordinate_system)))
mpars = {}
elif end_pca > beg_pca and end_pca - beg_pca > 1:
try:
# preformes regression
mpars = pmag.domean(block, beg_pca, end_pca, calculation_type)
except:
print((block, beg_pca, end_pca, calculation_type,
specimen, fit.name, tmin, tmax, coordinate_system))
return
if 'specimen_direction_type' in mpars and mpars['specimen_direction_type'] == 'Error':
print(("-E- no measurement data for specimen %s in coordinate system %s" %
(specimen, coordinate_system)))
return {}
else:
mpars = {}
for k in list(mpars.keys()):
try:
if math.isnan(float(mpars[k])):
mpars[k] = 0
except:
pass
if "DE-BFL" in calculation_type and 'specimen_dang' not in list(mpars.keys()):
mpars['specimen_dang'] = 0
if 'best fit vector' in self.plane_display_box.GetValue():
self.calculate_best_fit_vectors()
return(mpars) | [
"def",
"get_PCA_parameters",
"(",
"self",
",",
"specimen",
",",
"fit",
",",
"tmin",
",",
"tmax",
",",
"coordinate_system",
",",
"calculation_type",
")",
":",
"if",
"tmin",
"==",
"''",
"or",
"tmax",
"==",
"''",
":",
"return",
"beg_pca",
",",
"end_pca",
"=",
"self",
".",
"get_indices",
"(",
"fit",
",",
"tmin",
",",
"tmax",
",",
"specimen",
")",
"if",
"coordinate_system",
"==",
"'geographic'",
"or",
"coordinate_system",
"==",
"'DA-DIR-GEO'",
":",
"block",
"=",
"self",
".",
"Data",
"[",
"specimen",
"]",
"[",
"'zijdblock_geo'",
"]",
"elif",
"coordinate_system",
"==",
"'tilt-corrected'",
"or",
"coordinate_system",
"==",
"'DA-DIR-TILT'",
":",
"block",
"=",
"self",
".",
"Data",
"[",
"specimen",
"]",
"[",
"'zijdblock_tilt'",
"]",
"else",
":",
"block",
"=",
"self",
".",
"Data",
"[",
"specimen",
"]",
"[",
"'zijdblock'",
"]",
"if",
"block",
"==",
"[",
"]",
":",
"print",
"(",
"(",
"\"-E- no measurement data for specimen %s in coordinate system %s\"",
"%",
"(",
"specimen",
",",
"coordinate_system",
")",
")",
")",
"mpars",
"=",
"{",
"}",
"elif",
"end_pca",
">",
"beg_pca",
"and",
"end_pca",
"-",
"beg_pca",
">",
"1",
":",
"try",
":",
"# preformes regression",
"mpars",
"=",
"pmag",
".",
"domean",
"(",
"block",
",",
"beg_pca",
",",
"end_pca",
",",
"calculation_type",
")",
"except",
":",
"print",
"(",
"(",
"block",
",",
"beg_pca",
",",
"end_pca",
",",
"calculation_type",
",",
"specimen",
",",
"fit",
".",
"name",
",",
"tmin",
",",
"tmax",
",",
"coordinate_system",
")",
")",
"return",
"if",
"'specimen_direction_type'",
"in",
"mpars",
"and",
"mpars",
"[",
"'specimen_direction_type'",
"]",
"==",
"'Error'",
":",
"print",
"(",
"(",
"\"-E- no measurement data for specimen %s in coordinate system %s\"",
"%",
"(",
"specimen",
",",
"coordinate_system",
")",
")",
")",
"return",
"{",
"}",
"else",
":",
"mpars",
"=",
"{",
"}",
"for",
"k",
"in",
"list",
"(",
"mpars",
".",
"keys",
"(",
")",
")",
":",
"try",
":",
"if",
"math",
".",
"isnan",
"(",
"float",
"(",
"mpars",
"[",
"k",
"]",
")",
")",
":",
"mpars",
"[",
"k",
"]",
"=",
"0",
"except",
":",
"pass",
"if",
"\"DE-BFL\"",
"in",
"calculation_type",
"and",
"'specimen_dang'",
"not",
"in",
"list",
"(",
"mpars",
".",
"keys",
"(",
")",
")",
":",
"mpars",
"[",
"'specimen_dang'",
"]",
"=",
"0",
"if",
"'best fit vector'",
"in",
"self",
".",
"plane_display_box",
".",
"GetValue",
"(",
")",
":",
"self",
".",
"calculate_best_fit_vectors",
"(",
")",
"return",
"(",
"mpars",
")"
] | 42.044118 | 0.003759 |
def can_remove(self):
'''
Get if current node can be removed based on app config's
directory_remove.
:returns: True if current node can be removed, False otherwise.
:rtype: bool
'''
dirbase = self.app.config["directory_remove"]
return bool(dirbase and check_under_base(self.path, dirbase)) | [
"def",
"can_remove",
"(",
"self",
")",
":",
"dirbase",
"=",
"self",
".",
"app",
".",
"config",
"[",
"\"directory_remove\"",
"]",
"return",
"bool",
"(",
"dirbase",
"and",
"check_under_base",
"(",
"self",
".",
"path",
",",
"dirbase",
")",
")"
] | 34.5 | 0.00565 |
def _check_keyserver(location):
"""Check that a given keyserver is a known protocol and does not contain
shell escape characters.
:param str location: A string containing the default keyserver. This
should contain the desired keyserver protocol which
is supported by the keyserver, for example, the
default is ``'hkp://wwwkeys .pgp.net'``.
:rtype: :obj:`str` or :obj:`None`
:returns: A string specifying the protocol and keyserver hostname, if the
checks passed. If not, returns None.
"""
protocols = ['hkp://', 'hkps://', 'http://', 'https://', 'ldap://',
'mailto:'] ## xxx feels like i麓m forgetting one...
for proto in protocols:
if location.startswith(proto):
url = location.replace(proto, str())
host, slash, extra = url.partition('/')
if extra: log.warn("URI text for %s: '%s'" % (host, extra))
log.debug("Got host string for keyserver setting: '%s'" % host)
host = _fix_unsafe(host)
if host:
log.debug("Cleaned host string: '%s'" % host)
keyserver = proto + host
return keyserver
return None | [
"def",
"_check_keyserver",
"(",
"location",
")",
":",
"protocols",
"=",
"[",
"'hkp://'",
",",
"'hkps://'",
",",
"'http://'",
",",
"'https://'",
",",
"'ldap://'",
",",
"'mailto:'",
"]",
"## xxx feels like i麓m forgetting one...",
"for",
"proto",
"in",
"protocols",
":",
"if",
"location",
".",
"startswith",
"(",
"proto",
")",
":",
"url",
"=",
"location",
".",
"replace",
"(",
"proto",
",",
"str",
"(",
")",
")",
"host",
",",
"slash",
",",
"extra",
"=",
"url",
".",
"partition",
"(",
"'/'",
")",
"if",
"extra",
":",
"log",
".",
"warn",
"(",
"\"URI text for %s: '%s'\"",
"%",
"(",
"host",
",",
"extra",
")",
")",
"log",
".",
"debug",
"(",
"\"Got host string for keyserver setting: '%s'\"",
"%",
"host",
")",
"host",
"=",
"_fix_unsafe",
"(",
"host",
")",
"if",
"host",
":",
"log",
".",
"debug",
"(",
"\"Cleaned host string: '%s'\"",
"%",
"host",
")",
"keyserver",
"=",
"proto",
"+",
"host",
"return",
"keyserver",
"return",
"None"
] | 46.333333 | 0.003132 |
def spiceFoundExceptionThrower(f):
"""
Decorator for wrapping functions that use status codes
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
res = f(*args, **kwargs)
if config.catch_false_founds:
found = res[-1]
if isinstance(found, bool) and not found:
raise stypes.SpiceyError("Spice returns not found for function: {}".format(f.__name__), found=found)
elif hasattr(found, '__iter__') and not all(found):
raise stypes.SpiceyError("Spice returns not found in a series of calls for function: {}".format(f.__name__), found=found)
else:
actualres = res[0:-1]
if len(actualres) == 1:
return actualres[0]
else:
return actualres
else:
return res
return wrapper | [
"def",
"spiceFoundExceptionThrower",
"(",
"f",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"res",
"=",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"config",
".",
"catch_false_founds",
":",
"found",
"=",
"res",
"[",
"-",
"1",
"]",
"if",
"isinstance",
"(",
"found",
",",
"bool",
")",
"and",
"not",
"found",
":",
"raise",
"stypes",
".",
"SpiceyError",
"(",
"\"Spice returns not found for function: {}\"",
".",
"format",
"(",
"f",
".",
"__name__",
")",
",",
"found",
"=",
"found",
")",
"elif",
"hasattr",
"(",
"found",
",",
"'__iter__'",
")",
"and",
"not",
"all",
"(",
"found",
")",
":",
"raise",
"stypes",
".",
"SpiceyError",
"(",
"\"Spice returns not found in a series of calls for function: {}\"",
".",
"format",
"(",
"f",
".",
"__name__",
")",
",",
"found",
"=",
"found",
")",
"else",
":",
"actualres",
"=",
"res",
"[",
"0",
":",
"-",
"1",
"]",
"if",
"len",
"(",
"actualres",
")",
"==",
"1",
":",
"return",
"actualres",
"[",
"0",
"]",
"else",
":",
"return",
"actualres",
"else",
":",
"return",
"res",
"return",
"wrapper"
] | 37.782609 | 0.003367 |
def post(json_data,
url,
dry_run=False):
"""
POST json data to the url provided and verify the requests was successful
"""
if dry_run:
info('POST: %s' % json.dumps(json_data, indent=4))
else:
response = SESSION.post(url,
data=json.dumps(json_data),
headers={'content-type': 'application/json'})
if response.status_code != 200:
raise Exception("Failed to import %s with %s: %s" %
(json_data, response.status_code, response.text)) | [
"def",
"post",
"(",
"json_data",
",",
"url",
",",
"dry_run",
"=",
"False",
")",
":",
"if",
"dry_run",
":",
"info",
"(",
"'POST: %s'",
"%",
"json",
".",
"dumps",
"(",
"json_data",
",",
"indent",
"=",
"4",
")",
")",
"else",
":",
"response",
"=",
"SESSION",
".",
"post",
"(",
"url",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"json_data",
")",
",",
"headers",
"=",
"{",
"'content-type'",
":",
"'application/json'",
"}",
")",
"if",
"response",
".",
"status_code",
"!=",
"200",
":",
"raise",
"Exception",
"(",
"\"Failed to import %s with %s: %s\"",
"%",
"(",
"json_data",
",",
"response",
".",
"status_code",
",",
"response",
".",
"text",
")",
")"
] | 32.222222 | 0.001675 |
def IsDevice(self):
"""Determines if the file entry is a device.
Returns:
bool: True if the file entry is a device.
"""
if self._stat_object is None:
self._stat_object = self._GetStat()
if self._stat_object is not None:
self.entry_type = self._stat_object.type
return self.entry_type == definitions.FILE_ENTRY_TYPE_DEVICE | [
"def",
"IsDevice",
"(",
"self",
")",
":",
"if",
"self",
".",
"_stat_object",
"is",
"None",
":",
"self",
".",
"_stat_object",
"=",
"self",
".",
"_GetStat",
"(",
")",
"if",
"self",
".",
"_stat_object",
"is",
"not",
"None",
":",
"self",
".",
"entry_type",
"=",
"self",
".",
"_stat_object",
".",
"type",
"return",
"self",
".",
"entry_type",
"==",
"definitions",
".",
"FILE_ENTRY_TYPE_DEVICE"
] | 32.181818 | 0.008242 |
def fullname(self) -> str:
"""
Description of the process.
"""
fullname = "Process {}/{} ({})".format(self.procnum, self.nprocs,
self.details.name)
if self.running:
fullname += " (PID={})".format(self.process.pid)
return fullname | [
"def",
"fullname",
"(",
"self",
")",
"->",
"str",
":",
"fullname",
"=",
"\"Process {}/{} ({})\"",
".",
"format",
"(",
"self",
".",
"procnum",
",",
"self",
".",
"nprocs",
",",
"self",
".",
"details",
".",
"name",
")",
"if",
"self",
".",
"running",
":",
"fullname",
"+=",
"\" (PID={})\"",
".",
"format",
"(",
"self",
".",
"process",
".",
"pid",
")",
"return",
"fullname"
] | 36.444444 | 0.005952 |
def OpenAndRead(relative_path='debugger-blacklist.yaml'):
"""Attempts to find the yaml configuration file, then read it.
Args:
relative_path: Optional relative path override.
Returns:
A Config object if the open and read were successful, None if the file
does not exist (which is not considered an error).
Raises:
Error (some subclass): As thrown by the called Read() function.
"""
# Note: This logic follows the convention established by source-context.json
try:
with open(os.path.join(sys.path[0], relative_path), 'r') as f:
return Read(f)
except IOError:
return None | [
"def",
"OpenAndRead",
"(",
"relative_path",
"=",
"'debugger-blacklist.yaml'",
")",
":",
"# Note: This logic follows the convention established by source-context.json",
"try",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"sys",
".",
"path",
"[",
"0",
"]",
",",
"relative_path",
")",
",",
"'r'",
")",
"as",
"f",
":",
"return",
"Read",
"(",
"f",
")",
"except",
"IOError",
":",
"return",
"None"
] | 30 | 0.009693 |
def _compute_remote_size(self, options):
# type: (Descriptor, blobxfer.models.options.Upload) -> None
"""Compute total remote file size
:param Descriptor self: this
:param blobxfer.models.options.Upload options: upload options
:rtype: int
:return: remote file size
"""
size = self.local_path.size
if (self._ase.mode == blobxfer.models.azure.StorageModes.Page and
self.local_path.use_stdin):
if options.stdin_as_page_blob_size == 0:
allocatesize = _MAX_PAGE_BLOB_SIZE
self._needs_resize = True
else:
allocatesize = options.stdin_as_page_blob_size
elif size > 0:
if self._ase.is_encrypted:
# cipher_len_without_iv = (clear_len / aes_bs + 1) * aes_bs
allocatesize = (size // self._AES_BLOCKSIZE + 1) * \
self._AES_BLOCKSIZE
else:
allocatesize = size
else:
allocatesize = 0
self._ase.size = allocatesize
if blobxfer.util.is_not_empty(self._ase.replica_targets):
for rt in self._ase.replica_targets:
rt.size = allocatesize
if self._verbose:
logger.debug('remote size for {} is {} bytes'.format(
self._ase.path, self._ase.size)) | [
"def",
"_compute_remote_size",
"(",
"self",
",",
"options",
")",
":",
"# type: (Descriptor, blobxfer.models.options.Upload) -> None",
"size",
"=",
"self",
".",
"local_path",
".",
"size",
"if",
"(",
"self",
".",
"_ase",
".",
"mode",
"==",
"blobxfer",
".",
"models",
".",
"azure",
".",
"StorageModes",
".",
"Page",
"and",
"self",
".",
"local_path",
".",
"use_stdin",
")",
":",
"if",
"options",
".",
"stdin_as_page_blob_size",
"==",
"0",
":",
"allocatesize",
"=",
"_MAX_PAGE_BLOB_SIZE",
"self",
".",
"_needs_resize",
"=",
"True",
"else",
":",
"allocatesize",
"=",
"options",
".",
"stdin_as_page_blob_size",
"elif",
"size",
">",
"0",
":",
"if",
"self",
".",
"_ase",
".",
"is_encrypted",
":",
"# cipher_len_without_iv = (clear_len / aes_bs + 1) * aes_bs",
"allocatesize",
"=",
"(",
"size",
"//",
"self",
".",
"_AES_BLOCKSIZE",
"+",
"1",
")",
"*",
"self",
".",
"_AES_BLOCKSIZE",
"else",
":",
"allocatesize",
"=",
"size",
"else",
":",
"allocatesize",
"=",
"0",
"self",
".",
"_ase",
".",
"size",
"=",
"allocatesize",
"if",
"blobxfer",
".",
"util",
".",
"is_not_empty",
"(",
"self",
".",
"_ase",
".",
"replica_targets",
")",
":",
"for",
"rt",
"in",
"self",
".",
"_ase",
".",
"replica_targets",
":",
"rt",
".",
"size",
"=",
"allocatesize",
"if",
"self",
".",
"_verbose",
":",
"logger",
".",
"debug",
"(",
"'remote size for {} is {} bytes'",
".",
"format",
"(",
"self",
".",
"_ase",
".",
"path",
",",
"self",
".",
"_ase",
".",
"size",
")",
")"
] | 42.21875 | 0.002171 |
def result(self, *args, **kwargs):
"""
Construye la consulta SQL
"""
prettify = kwargs.get('pretty', False)
sql = 'CREATE %s %s' % (self._type, self._class)
if prettify:
sql += '\n'
else:
sql += ' '
if self._type.lower() == 'edge':
sql += " FROM %s TO %s " % (self._from, self._to)
if self._cluster:
sql += 'CLUSTER %s' % self._cluster
if prettify:
sql += '\n'
else:
sql += ' '
if self.data:
sql += 'CONTENT ' + json.dumps(self.data)
return sql | [
"def",
"result",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"prettify",
"=",
"kwargs",
".",
"get",
"(",
"'pretty'",
",",
"False",
")",
"sql",
"=",
"'CREATE %s %s'",
"%",
"(",
"self",
".",
"_type",
",",
"self",
".",
"_class",
")",
"if",
"prettify",
":",
"sql",
"+=",
"'\\n'",
"else",
":",
"sql",
"+=",
"' '",
"if",
"self",
".",
"_type",
".",
"lower",
"(",
")",
"==",
"'edge'",
":",
"sql",
"+=",
"\" FROM %s TO %s \"",
"%",
"(",
"self",
".",
"_from",
",",
"self",
".",
"_to",
")",
"if",
"self",
".",
"_cluster",
":",
"sql",
"+=",
"'CLUSTER %s'",
"%",
"self",
".",
"_cluster",
"if",
"prettify",
":",
"sql",
"+=",
"'\\n'",
"else",
":",
"sql",
"+=",
"' '",
"if",
"self",
".",
"data",
":",
"sql",
"+=",
"'CONTENT '",
"+",
"json",
".",
"dumps",
"(",
"self",
".",
"data",
")",
"return",
"sql"
] | 25.076923 | 0.007386 |
def filter_by_value(cls, value):
"""
Get all constants which have given value.
:param value: value of the constants to look for
:returns: list of all found constants with given value
"""
constants = []
for constant in cls.iterconstants():
if constant.value == value:
constants.append(constant)
return constants | [
"def",
"filter_by_value",
"(",
"cls",
",",
"value",
")",
":",
"constants",
"=",
"[",
"]",
"for",
"constant",
"in",
"cls",
".",
"iterconstants",
"(",
")",
":",
"if",
"constant",
".",
"value",
"==",
"value",
":",
"constants",
".",
"append",
"(",
"constant",
")",
"return",
"constants"
] | 32.666667 | 0.004963 |
def _analyse(self, table=''):
"""Analyses the database, or `table` if it is supplied.
:param table: optional name of table to analyse
:type table: `str`
"""
self._logger.info('Starting analysis of database')
self._conn.execute(constants.ANALYSE_SQL.format(table))
self._logger.info('Analysis of database complete') | [
"def",
"_analyse",
"(",
"self",
",",
"table",
"=",
"''",
")",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"'Starting analysis of database'",
")",
"self",
".",
"_conn",
".",
"execute",
"(",
"constants",
".",
"ANALYSE_SQL",
".",
"format",
"(",
"table",
")",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"'Analysis of database complete'",
")"
] | 36.3 | 0.005376 |
def parse_config(self, config_file):
"""
Given a configuration file, read in and interpret the results
:param config_file:
:return:
"""
with open(config_file, 'r') as f:
config = json.load(f)
self.params = config
if self.params['proxy']['proxy_type']:
self.params['proxy'] = {self.params['proxy']['proxy_type']:
self.params['proxy']['proxy_url']} | [
"def",
"parse_config",
"(",
"self",
",",
"config_file",
")",
":",
"with",
"open",
"(",
"config_file",
",",
"'r'",
")",
"as",
"f",
":",
"config",
"=",
"json",
".",
"load",
"(",
"f",
")",
"self",
".",
"params",
"=",
"config",
"if",
"self",
".",
"params",
"[",
"'proxy'",
"]",
"[",
"'proxy_type'",
"]",
":",
"self",
".",
"params",
"[",
"'proxy'",
"]",
"=",
"{",
"self",
".",
"params",
"[",
"'proxy'",
"]",
"[",
"'proxy_type'",
"]",
":",
"self",
".",
"params",
"[",
"'proxy'",
"]",
"[",
"'proxy_url'",
"]",
"}"
] | 32.785714 | 0.004237 |
def copy(self):
''' Return a copy of this color value.
Returns:
:class:`~bokeh.colors.rgb.RGB`
'''
return RGB(self.r, self.g, self.b, self.a) | [
"def",
"copy",
"(",
"self",
")",
":",
"return",
"RGB",
"(",
"self",
".",
"r",
",",
"self",
".",
"g",
",",
"self",
".",
"b",
",",
"self",
".",
"a",
")"
] | 22.5 | 0.010695 |
def preloop(self):
''' Keep persistent command history. '''
if not self.already_prelooped:
self.already_prelooped = True
open('.psiturk_history', 'a').close() # create file if it doesn't exist
readline.read_history_file('.psiturk_history')
for i in range(readline.get_current_history_length()):
if readline.get_history_item(i) is not None:
self.history.append(readline.get_history_item(i)) | [
"def",
"preloop",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"already_prelooped",
":",
"self",
".",
"already_prelooped",
"=",
"True",
"open",
"(",
"'.psiturk_history'",
",",
"'a'",
")",
".",
"close",
"(",
")",
"# create file if it doesn't exist",
"readline",
".",
"read_history_file",
"(",
"'.psiturk_history'",
")",
"for",
"i",
"in",
"range",
"(",
"readline",
".",
"get_current_history_length",
"(",
")",
")",
":",
"if",
"readline",
".",
"get_history_item",
"(",
"i",
")",
"is",
"not",
"None",
":",
"self",
".",
"history",
".",
"append",
"(",
"readline",
".",
"get_history_item",
"(",
"i",
")",
")"
] | 53.555556 | 0.006122 |
def get_assignable_book_ids(self, book_id):
"""Gets a list of books including and under the given book node in which any comment can be assigned.
arg: book_id (osid.id.Id): the ``Id`` of the ``Book``
return: (osid.id.IdList) - list of assignable book ``Ids``
raise: NullArgument - ``book_id`` is ``null``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids
# This will likely be overridden by an authorization adapter
mgr = self._get_provider_manager('COMMENTING', local=True)
lookup_session = mgr.get_book_lookup_session(proxy=self._proxy)
books = lookup_session.get_books()
id_list = []
for book in books:
id_list.append(book.get_id())
return IdList(id_list) | [
"def",
"get_assignable_book_ids",
"(",
"self",
",",
"book_id",
")",
":",
"# Implemented from template for",
"# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids",
"# This will likely be overridden by an authorization adapter",
"mgr",
"=",
"self",
".",
"_get_provider_manager",
"(",
"'COMMENTING'",
",",
"local",
"=",
"True",
")",
"lookup_session",
"=",
"mgr",
".",
"get_book_lookup_session",
"(",
"proxy",
"=",
"self",
".",
"_proxy",
")",
"books",
"=",
"lookup_session",
".",
"get_books",
"(",
")",
"id_list",
"=",
"[",
"]",
"for",
"book",
"in",
"books",
":",
"id_list",
".",
"append",
"(",
"book",
".",
"get_id",
"(",
")",
")",
"return",
"IdList",
"(",
"id_list",
")"
] | 47.6 | 0.00309 |
def countok(self):
"""
Boolean array showing which stars pass all count constraints.
A "count constraint" is a constraint that affects the number of stars.
"""
ok = np.ones(len(self.stars)).astype(bool)
for name in self.constraints:
c = self.constraints[name]
if c.name not in self.selectfrac_skip:
ok &= c.ok
return ok | [
"def",
"countok",
"(",
"self",
")",
":",
"ok",
"=",
"np",
".",
"ones",
"(",
"len",
"(",
"self",
".",
"stars",
")",
")",
".",
"astype",
"(",
"bool",
")",
"for",
"name",
"in",
"self",
".",
"constraints",
":",
"c",
"=",
"self",
".",
"constraints",
"[",
"name",
"]",
"if",
"c",
".",
"name",
"not",
"in",
"self",
".",
"selectfrac_skip",
":",
"ok",
"&=",
"c",
".",
"ok",
"return",
"ok"
] | 33.75 | 0.004808 |
def del_object(self, obj):
"""Debug deletes obj of obj[_type] with id of obj['_id']"""
if obj['_index'] is None or obj['_index'] == "":
raise Exception("Invalid Object")
if obj['_id'] is None or obj['_id'] == "":
raise Exception("Invalid Object")
if obj['_type'] is None or obj['_type'] == "":
raise Exception("Invalid Object")
self.connect_es()
self.es.delete(index=obj['_index'],
id=obj['_id'],
doc_type=obj['_type']) | [
"def",
"del_object",
"(",
"self",
",",
"obj",
")",
":",
"if",
"obj",
"[",
"'_index'",
"]",
"is",
"None",
"or",
"obj",
"[",
"'_index'",
"]",
"==",
"\"\"",
":",
"raise",
"Exception",
"(",
"\"Invalid Object\"",
")",
"if",
"obj",
"[",
"'_id'",
"]",
"is",
"None",
"or",
"obj",
"[",
"'_id'",
"]",
"==",
"\"\"",
":",
"raise",
"Exception",
"(",
"\"Invalid Object\"",
")",
"if",
"obj",
"[",
"'_type'",
"]",
"is",
"None",
"or",
"obj",
"[",
"'_type'",
"]",
"==",
"\"\"",
":",
"raise",
"Exception",
"(",
"\"Invalid Object\"",
")",
"self",
".",
"connect_es",
"(",
")",
"self",
".",
"es",
".",
"delete",
"(",
"index",
"=",
"obj",
"[",
"'_index'",
"]",
",",
"id",
"=",
"obj",
"[",
"'_id'",
"]",
",",
"doc_type",
"=",
"obj",
"[",
"'_type'",
"]",
")"
] | 44.833333 | 0.003643 |
def create_paired_device(self, dev_id, agent_path,
capability, cb_notify_device, cb_notify_error):
"""
Creates a new object path for a remote device. This
method will connect to the remote device and retrieve
all SDP records and then initiate the pairing.
If a previously :py:meth:`create_device` was used
successfully, this method will only initiate the pairing.
Compared to :py:meth:`create_device` this method will
fail if the pairing already exists, but not if the object
path already has been created. This allows applications
to use :py:meth:`create_device` first and then, if needed,
use :py:meth:`create_paired_device` to initiate pairing.
The agent object path is assumed to reside within the
process (D-Bus connection instance) that calls this
method. No separate registration procedure is needed
for it and it gets automatically released once the
pairing operation is complete.
:param str dev_id: New device MAC address create
e.g., '11:22:33:44:55:66'
:param str agent_path: Path used when creating the
bluetooth agent e.g., '/test/agent'
:param str capability: Pairing agent capability
e.g., 'DisplayYesNo', etc
:param func cb_notify_device: Callback on success. The
callback is called with the new device's object
path as an argument.
:param func cb_notify_error: Callback on error. The
callback is called with the error reason.
:return:
:raises dbus.Exception: org.bluez.Error.InvalidArguments
:raises dbus.Exception: org.bluez.Error.Failed
"""
return self._interface.CreatePairedDevice(dev_id,
agent_path,
capability,
reply_handler=cb_notify_device, # noqa
error_handler=cb_notify_error) | [
"def",
"create_paired_device",
"(",
"self",
",",
"dev_id",
",",
"agent_path",
",",
"capability",
",",
"cb_notify_device",
",",
"cb_notify_error",
")",
":",
"return",
"self",
".",
"_interface",
".",
"CreatePairedDevice",
"(",
"dev_id",
",",
"agent_path",
",",
"capability",
",",
"reply_handler",
"=",
"cb_notify_device",
",",
"# noqa",
"error_handler",
"=",
"cb_notify_error",
")"
] | 49.47619 | 0.001888 |
def to_pb(self):
"""Converts the column family to a protobuf.
:rtype: :class:`.table_v2_pb2.ColumnFamily`
:returns: The converted current object.
"""
if self.gc_rule is None:
return table_v2_pb2.ColumnFamily()
else:
return table_v2_pb2.ColumnFamily(gc_rule=self.gc_rule.to_pb()) | [
"def",
"to_pb",
"(",
"self",
")",
":",
"if",
"self",
".",
"gc_rule",
"is",
"None",
":",
"return",
"table_v2_pb2",
".",
"ColumnFamily",
"(",
")",
"else",
":",
"return",
"table_v2_pb2",
".",
"ColumnFamily",
"(",
"gc_rule",
"=",
"self",
".",
"gc_rule",
".",
"to_pb",
"(",
")",
")"
] | 34.2 | 0.005698 |
def to_py(o, keyword_fn: Callable[[kw.Keyword], Any] = _kw_name):
"""Recursively convert Lisp collections into Python collections."""
if isinstance(o, ISeq):
return _to_py_list(o, keyword_fn=keyword_fn)
elif not isinstance(
o, (IPersistentList, IPersistentMap, IPersistentSet, IPersistentVector)
):
return o
else: # pragma: no cover
return _to_py_backup(o, keyword_fn=keyword_fn) | [
"def",
"to_py",
"(",
"o",
",",
"keyword_fn",
":",
"Callable",
"[",
"[",
"kw",
".",
"Keyword",
"]",
",",
"Any",
"]",
"=",
"_kw_name",
")",
":",
"if",
"isinstance",
"(",
"o",
",",
"ISeq",
")",
":",
"return",
"_to_py_list",
"(",
"o",
",",
"keyword_fn",
"=",
"keyword_fn",
")",
"elif",
"not",
"isinstance",
"(",
"o",
",",
"(",
"IPersistentList",
",",
"IPersistentMap",
",",
"IPersistentSet",
",",
"IPersistentVector",
")",
")",
":",
"return",
"o",
"else",
":",
"# pragma: no cover",
"return",
"_to_py_backup",
"(",
"o",
",",
"keyword_fn",
"=",
"keyword_fn",
")"
] | 42.3 | 0.002315 |
def evaluate_variable(self, name):
"""Evaluates the variable given by name."""
if isinstance(self.variables[name], six.string_types):
# TODO: this does not allow more than one level deep variable, like a depends on b, b on c, c is a const
value = eval(self.variables[name], expression_namespace, self.variables)
return value
else:
return self.variables[name] | [
"def",
"evaluate_variable",
"(",
"self",
",",
"name",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"variables",
"[",
"name",
"]",
",",
"six",
".",
"string_types",
")",
":",
"# TODO: this does not allow more than one level deep variable, like a depends on b, b on c, c is a const",
"value",
"=",
"eval",
"(",
"self",
".",
"variables",
"[",
"name",
"]",
",",
"expression_namespace",
",",
"self",
".",
"variables",
")",
"return",
"value",
"else",
":",
"return",
"self",
".",
"variables",
"[",
"name",
"]"
] | 52.875 | 0.009302 |
def get_service_types(self):
"""
Get all service types supported by this cluster.
@return: A list of service types (strings)
"""
resp = self._get_resource_root().get(self._path() + '/serviceTypes')
return resp[ApiList.LIST_KEY] | [
"def",
"get_service_types",
"(",
"self",
")",
":",
"resp",
"=",
"self",
".",
"_get_resource_root",
"(",
")",
".",
"get",
"(",
"self",
".",
"_path",
"(",
")",
"+",
"'/serviceTypes'",
")",
"return",
"resp",
"[",
"ApiList",
".",
"LIST_KEY",
"]"
] | 30.625 | 0.003968 |
def check_street_suffix(self, token):
"""
Attempts to match a street suffix. If found, it will return the abbreviation, with the first letter capitalized
and a period after it. E.g. "St." or "Ave."
"""
# Suffix must come before street
# print "Suffix check", token, "suffix", self.street_suffix, "street", self.street
if self.street_suffix is None and self.street is None:
# print "upper", token.upper()
if token.upper() in self.parser.suffixes.keys():
suffix = self.parser.suffixes[token.upper()]
self.street_suffix = self._clean(suffix.capitalize() + '.')
return True
elif token.upper() in self.parser.suffixes.values():
self.street_suffix = self._clean(token.capitalize() + '.')
return True
return False | [
"def",
"check_street_suffix",
"(",
"self",
",",
"token",
")",
":",
"# Suffix must come before street",
"# print \"Suffix check\", token, \"suffix\", self.street_suffix, \"street\", self.street",
"if",
"self",
".",
"street_suffix",
"is",
"None",
"and",
"self",
".",
"street",
"is",
"None",
":",
"# print \"upper\", token.upper()",
"if",
"token",
".",
"upper",
"(",
")",
"in",
"self",
".",
"parser",
".",
"suffixes",
".",
"keys",
"(",
")",
":",
"suffix",
"=",
"self",
".",
"parser",
".",
"suffixes",
"[",
"token",
".",
"upper",
"(",
")",
"]",
"self",
".",
"street_suffix",
"=",
"self",
".",
"_clean",
"(",
"suffix",
".",
"capitalize",
"(",
")",
"+",
"'.'",
")",
"return",
"True",
"elif",
"token",
".",
"upper",
"(",
")",
"in",
"self",
".",
"parser",
".",
"suffixes",
".",
"values",
"(",
")",
":",
"self",
".",
"street_suffix",
"=",
"self",
".",
"_clean",
"(",
"token",
".",
"capitalize",
"(",
")",
"+",
"'.'",
")",
"return",
"True",
"return",
"False"
] | 51.176471 | 0.004515 |
def getDefaultItems(self):
""" Returns a list with the default plugins in the repo tree item registry.
"""
return [
RtiRegItem('HDF-5 file',
'argos.repo.rtiplugins.hdf5.H5pyFileRti',
extensions=['hdf5', 'h5', 'h5e', 'he5', 'nc']), # hdf extension is for HDF-4
RtiRegItem('MATLAB file',
'argos.repo.rtiplugins.scipyio.MatlabFileRti',
extensions=['mat']),
RtiRegItem('NetCDF file',
'argos.repo.rtiplugins.ncdf.NcdfFileRti',
#extensions=['nc', 'nc3', 'nc4']),
extensions=['nc', 'nc4']),
#extensions=[]),
RtiRegItem('NumPy binary file',
'argos.repo.rtiplugins.numpyio.NumpyBinaryFileRti',
extensions=['npy']),
RtiRegItem('NumPy compressed file',
'argos.repo.rtiplugins.numpyio.NumpyCompressedFileRti',
extensions=['npz']),
RtiRegItem('NumPy text file',
'argos.repo.rtiplugins.numpyio.NumpyTextFileRti',
#extensions=['txt', 'text']),
extensions=['dat']),
RtiRegItem('IDL save file',
'argos.repo.rtiplugins.scipyio.IdlSaveFileRti',
extensions=['sav']),
RtiRegItem('Pandas CSV file',
'argos.repo.rtiplugins.pandasio.PandasCsvFileRti',
extensions=['csv']),
RtiRegItem('Pillow image',
'argos.repo.rtiplugins.pillowio.PillowFileRti',
extensions=['bmp', 'eps', 'im', 'gif', 'jpg', 'jpeg', 'msp', 'pcx',
'png', 'ppm', 'spi', 'tif', 'tiff', 'xbm', 'xv']),
RtiRegItem('Wav file',
'argos.repo.rtiplugins.scipyio.WavFileRti',
extensions=['wav'])] | [
"def",
"getDefaultItems",
"(",
"self",
")",
":",
"return",
"[",
"RtiRegItem",
"(",
"'HDF-5 file'",
",",
"'argos.repo.rtiplugins.hdf5.H5pyFileRti'",
",",
"extensions",
"=",
"[",
"'hdf5'",
",",
"'h5'",
",",
"'h5e'",
",",
"'he5'",
",",
"'nc'",
"]",
")",
",",
"# hdf extension is for HDF-4",
"RtiRegItem",
"(",
"'MATLAB file'",
",",
"'argos.repo.rtiplugins.scipyio.MatlabFileRti'",
",",
"extensions",
"=",
"[",
"'mat'",
"]",
")",
",",
"RtiRegItem",
"(",
"'NetCDF file'",
",",
"'argos.repo.rtiplugins.ncdf.NcdfFileRti'",
",",
"#extensions=['nc', 'nc3', 'nc4']),",
"extensions",
"=",
"[",
"'nc'",
",",
"'nc4'",
"]",
")",
",",
"#extensions=[]),",
"RtiRegItem",
"(",
"'NumPy binary file'",
",",
"'argos.repo.rtiplugins.numpyio.NumpyBinaryFileRti'",
",",
"extensions",
"=",
"[",
"'npy'",
"]",
")",
",",
"RtiRegItem",
"(",
"'NumPy compressed file'",
",",
"'argos.repo.rtiplugins.numpyio.NumpyCompressedFileRti'",
",",
"extensions",
"=",
"[",
"'npz'",
"]",
")",
",",
"RtiRegItem",
"(",
"'NumPy text file'",
",",
"'argos.repo.rtiplugins.numpyio.NumpyTextFileRti'",
",",
"#extensions=['txt', 'text']),",
"extensions",
"=",
"[",
"'dat'",
"]",
")",
",",
"RtiRegItem",
"(",
"'IDL save file'",
",",
"'argos.repo.rtiplugins.scipyio.IdlSaveFileRti'",
",",
"extensions",
"=",
"[",
"'sav'",
"]",
")",
",",
"RtiRegItem",
"(",
"'Pandas CSV file'",
",",
"'argos.repo.rtiplugins.pandasio.PandasCsvFileRti'",
",",
"extensions",
"=",
"[",
"'csv'",
"]",
")",
",",
"RtiRegItem",
"(",
"'Pillow image'",
",",
"'argos.repo.rtiplugins.pillowio.PillowFileRti'",
",",
"extensions",
"=",
"[",
"'bmp'",
",",
"'eps'",
",",
"'im'",
",",
"'gif'",
",",
"'jpg'",
",",
"'jpeg'",
",",
"'msp'",
",",
"'pcx'",
",",
"'png'",
",",
"'ppm'",
",",
"'spi'",
",",
"'tif'",
",",
"'tiff'",
",",
"'xbm'",
",",
"'xv'",
"]",
")",
",",
"RtiRegItem",
"(",
"'Wav file'",
",",
"'argos.repo.rtiplugins.scipyio.WavFileRti'",
",",
"extensions",
"=",
"[",
"'wav'",
"]",
")",
"]"
] | 42.638298 | 0.006341 |
def update_fname_label(self):
"""Upadte file name label."""
filename = to_text_string(self.get_current_filename())
if len(filename) > 100:
shorten_filename = u'...' + filename[-100:]
else:
shorten_filename = filename
self.fname_label.setText(shorten_filename) | [
"def",
"update_fname_label",
"(",
"self",
")",
":",
"filename",
"=",
"to_text_string",
"(",
"self",
".",
"get_current_filename",
"(",
")",
")",
"if",
"len",
"(",
"filename",
")",
">",
"100",
":",
"shorten_filename",
"=",
"u'...'",
"+",
"filename",
"[",
"-",
"100",
":",
"]",
"else",
":",
"shorten_filename",
"=",
"filename",
"self",
".",
"fname_label",
".",
"setText",
"(",
"shorten_filename",
")"
] | 40.375 | 0.006061 |
def _bump_version(version, component='patch'):
# type: (str, str) -> str
""" Bump the given version component.
Args:
version (str):
The current version. The format is: MAJOR.MINOR[.PATCH].
component (str):
What part of the version should be bumped. Can be one of:
- major
- minor
- patch
Returns:
str: Bumped version as a string.
"""
if component not in ('major', 'minor', 'patch'):
raise ValueError("Invalid version component: {}".format(component))
m = RE_VERSION.match(version)
if m is None:
raise ValueError("Version must be in MAJOR.MINOR[.PATCH] format")
major = m.group('major')
minor = m.group('minor') or '0'
patch = m.group('patch') or None
if patch == '0':
patch = None
if component == 'major':
major = str(int(major) + 1)
minor = '0'
patch = None
elif component == 'minor':
minor = str(int(minor) + 1)
patch = None
else:
patch = patch or 0
patch = str(int(patch) + 1)
new_ver = '{}.{}'.format(major, minor)
if patch is not None:
new_ver += '.' + patch
return new_ver | [
"def",
"_bump_version",
"(",
"version",
",",
"component",
"=",
"'patch'",
")",
":",
"# type: (str, str) -> str",
"if",
"component",
"not",
"in",
"(",
"'major'",
",",
"'minor'",
",",
"'patch'",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid version component: {}\"",
".",
"format",
"(",
"component",
")",
")",
"m",
"=",
"RE_VERSION",
".",
"match",
"(",
"version",
")",
"if",
"m",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Version must be in MAJOR.MINOR[.PATCH] format\"",
")",
"major",
"=",
"m",
".",
"group",
"(",
"'major'",
")",
"minor",
"=",
"m",
".",
"group",
"(",
"'minor'",
")",
"or",
"'0'",
"patch",
"=",
"m",
".",
"group",
"(",
"'patch'",
")",
"or",
"None",
"if",
"patch",
"==",
"'0'",
":",
"patch",
"=",
"None",
"if",
"component",
"==",
"'major'",
":",
"major",
"=",
"str",
"(",
"int",
"(",
"major",
")",
"+",
"1",
")",
"minor",
"=",
"'0'",
"patch",
"=",
"None",
"elif",
"component",
"==",
"'minor'",
":",
"minor",
"=",
"str",
"(",
"int",
"(",
"minor",
")",
"+",
"1",
")",
"patch",
"=",
"None",
"else",
":",
"patch",
"=",
"patch",
"or",
"0",
"patch",
"=",
"str",
"(",
"int",
"(",
"patch",
")",
"+",
"1",
")",
"new_ver",
"=",
"'{}.{}'",
".",
"format",
"(",
"major",
",",
"minor",
")",
"if",
"patch",
"is",
"not",
"None",
":",
"new_ver",
"+=",
"'.'",
"+",
"patch",
"return",
"new_ver"
] | 24.183673 | 0.000811 |
def check_password(self, raw_password):
"""Calls :py:func:`~xmpp_backends.base.XmppBackendBase.check_password` for the user."""
return xmpp_backend.check_password(self.node, self.domain, raw_password) | [
"def",
"check_password",
"(",
"self",
",",
"raw_password",
")",
":",
"return",
"xmpp_backend",
".",
"check_password",
"(",
"self",
".",
"node",
",",
"self",
".",
"domain",
",",
"raw_password",
")"
] | 71.333333 | 0.018519 |
def solve_kkt(U_Q, d, G, A, U_S, rx, rs, rz, ry, dbg=False):
""" Solve KKT equations for the affine step"""
nineq, nz, neq, _ = get_sizes(G, A)
invQ_rx = torch.potrs(rx.view(-1, 1), U_Q).view(-1)
if neq > 0:
h = torch.cat([torch.mv(A, invQ_rx) - ry,
torch.mv(G, invQ_rx) + rs / d - rz], 0)
else:
h = torch.mv(G, invQ_rx) + rs / d - rz
w = -torch.potrs(h.view(-1, 1), U_S).view(-1)
g1 = -rx - torch.mv(G.t(), w[neq:])
if neq > 0:
g1 -= torch.mv(A.t(), w[:neq])
g2 = -rs - w[neq:]
dx = torch.potrs(g1.view(-1, 1), U_Q).view(-1)
ds = g2 / d
dz = w[neq:]
dy = w[:neq] if neq > 0 else None
# if np.all(np.array([x.norm() for x in [rx, rs, rz, ry]]) != 0):
if dbg:
import IPython
import sys
IPython.embed()
sys.exit(-1)
# if rs.norm() > 0: import IPython, sys; IPython.embed(); sys.exit(-1)
return dx, ds, dz, dy | [
"def",
"solve_kkt",
"(",
"U_Q",
",",
"d",
",",
"G",
",",
"A",
",",
"U_S",
",",
"rx",
",",
"rs",
",",
"rz",
",",
"ry",
",",
"dbg",
"=",
"False",
")",
":",
"nineq",
",",
"nz",
",",
"neq",
",",
"_",
"=",
"get_sizes",
"(",
"G",
",",
"A",
")",
"invQ_rx",
"=",
"torch",
".",
"potrs",
"(",
"rx",
".",
"view",
"(",
"-",
"1",
",",
"1",
")",
",",
"U_Q",
")",
".",
"view",
"(",
"-",
"1",
")",
"if",
"neq",
">",
"0",
":",
"h",
"=",
"torch",
".",
"cat",
"(",
"[",
"torch",
".",
"mv",
"(",
"A",
",",
"invQ_rx",
")",
"-",
"ry",
",",
"torch",
".",
"mv",
"(",
"G",
",",
"invQ_rx",
")",
"+",
"rs",
"/",
"d",
"-",
"rz",
"]",
",",
"0",
")",
"else",
":",
"h",
"=",
"torch",
".",
"mv",
"(",
"G",
",",
"invQ_rx",
")",
"+",
"rs",
"/",
"d",
"-",
"rz",
"w",
"=",
"-",
"torch",
".",
"potrs",
"(",
"h",
".",
"view",
"(",
"-",
"1",
",",
"1",
")",
",",
"U_S",
")",
".",
"view",
"(",
"-",
"1",
")",
"g1",
"=",
"-",
"rx",
"-",
"torch",
".",
"mv",
"(",
"G",
".",
"t",
"(",
")",
",",
"w",
"[",
"neq",
":",
"]",
")",
"if",
"neq",
">",
"0",
":",
"g1",
"-=",
"torch",
".",
"mv",
"(",
"A",
".",
"t",
"(",
")",
",",
"w",
"[",
":",
"neq",
"]",
")",
"g2",
"=",
"-",
"rs",
"-",
"w",
"[",
"neq",
":",
"]",
"dx",
"=",
"torch",
".",
"potrs",
"(",
"g1",
".",
"view",
"(",
"-",
"1",
",",
"1",
")",
",",
"U_Q",
")",
".",
"view",
"(",
"-",
"1",
")",
"ds",
"=",
"g2",
"/",
"d",
"dz",
"=",
"w",
"[",
"neq",
":",
"]",
"dy",
"=",
"w",
"[",
":",
"neq",
"]",
"if",
"neq",
">",
"0",
"else",
"None",
"# if np.all(np.array([x.norm() for x in [rx, rs, rz, ry]]) != 0):",
"if",
"dbg",
":",
"import",
"IPython",
"import",
"sys",
"IPython",
".",
"embed",
"(",
")",
"sys",
".",
"exit",
"(",
"-",
"1",
")",
"# if rs.norm() > 0: import IPython, sys; IPython.embed(); sys.exit(-1)",
"return",
"dx",
",",
"ds",
",",
"dz",
",",
"dy"
] | 29 | 0.001043 |
def count(self, key):
"""Return the number of pairs with key *key*."""
count = 0
pos = self.index(key, -1)
if pos == -1:
return count
count += 1
for i in range(pos+1, len(self)):
if self[i][0] != key:
break
count += 1
return count | [
"def",
"count",
"(",
"self",
",",
"key",
")",
":",
"count",
"=",
"0",
"pos",
"=",
"self",
".",
"index",
"(",
"key",
",",
"-",
"1",
")",
"if",
"pos",
"==",
"-",
"1",
":",
"return",
"count",
"count",
"+=",
"1",
"for",
"i",
"in",
"range",
"(",
"pos",
"+",
"1",
",",
"len",
"(",
"self",
")",
")",
":",
"if",
"self",
"[",
"i",
"]",
"[",
"0",
"]",
"!=",
"key",
":",
"break",
"count",
"+=",
"1",
"return",
"count"
] | 27.25 | 0.005917 |
def register(nbtool):
"""
Register the provided NBTool object
"""
global _py_funcs
_lazy_init()
# Save references to the tool's load() and render() functions
load_key = nbtool.origin + '|' + nbtool.id + '|load'
render_key = nbtool.origin + '|' + nbtool.id + '|render'
_py_funcs[load_key] = nbtool.load
_py_funcs[render_key] = nbtool.render
# Clean optional metadata for inclusion in JavaScript
clean_description = "null" if nbtool.description is None else '"' + nbtool.description.replace('"','\\"') + '"'
clean_version = "null" if nbtool.version is None else '"' + nbtool.version.replace('"','\\"') + '"'
clean_tags = "null" if nbtool.tags is None else json.dumps(nbtool.tags)
clean_attributes = "null" if nbtool.attributes is None else json.dumps(nbtool.attributes)
# Pass the metadata to JavaScript
IPython.display.display_javascript("""
console.log('ok');
NBToolManager.instance().register(new NBToolManager.NBTool({
origin: "%s",
id: "%s",
name: "%s",
description: %s,
version: %s,
tags: %s,
attributes: %s,
load: function() {
var x = Jupyter.notebook.kernel.execute('nbtools._py_funcs["%s"]()',
{
iopub: {
output: function(response) {
// Print the return value of the Python code to the console
console.log(response.content.data["text/plain"]);
}
}
},
{
silent: false,
store_history: false,
stop_on_error: true
});
return true;
},
render: function() {
var x = Jupyter.notebook.kernel.execute('nbtools._py_funcs["%s"]()',
{
iopub: {
output: function(response) {
// Print the return value of the Python code to the console
console.log(response.content.data["text/plain"]);
}
}
},
{
silent: false,
store_history: false,
stop_on_error: true
});
return null;
},
}));
""" % (nbtool.origin, nbtool.id, nbtool.name,
clean_description, clean_version, clean_tags, clean_attributes,
load_key, render_key), raw=True)
return True | [
"def",
"register",
"(",
"nbtool",
")",
":",
"global",
"_py_funcs",
"_lazy_init",
"(",
")",
"# Save references to the tool's load() and render() functions",
"load_key",
"=",
"nbtool",
".",
"origin",
"+",
"'|'",
"+",
"nbtool",
".",
"id",
"+",
"'|load'",
"render_key",
"=",
"nbtool",
".",
"origin",
"+",
"'|'",
"+",
"nbtool",
".",
"id",
"+",
"'|render'",
"_py_funcs",
"[",
"load_key",
"]",
"=",
"nbtool",
".",
"load",
"_py_funcs",
"[",
"render_key",
"]",
"=",
"nbtool",
".",
"render",
"# Clean optional metadata for inclusion in JavaScript",
"clean_description",
"=",
"\"null\"",
"if",
"nbtool",
".",
"description",
"is",
"None",
"else",
"'\"'",
"+",
"nbtool",
".",
"description",
".",
"replace",
"(",
"'\"'",
",",
"'\\\\\"'",
")",
"+",
"'\"'",
"clean_version",
"=",
"\"null\"",
"if",
"nbtool",
".",
"version",
"is",
"None",
"else",
"'\"'",
"+",
"nbtool",
".",
"version",
".",
"replace",
"(",
"'\"'",
",",
"'\\\\\"'",
")",
"+",
"'\"'",
"clean_tags",
"=",
"\"null\"",
"if",
"nbtool",
".",
"tags",
"is",
"None",
"else",
"json",
".",
"dumps",
"(",
"nbtool",
".",
"tags",
")",
"clean_attributes",
"=",
"\"null\"",
"if",
"nbtool",
".",
"attributes",
"is",
"None",
"else",
"json",
".",
"dumps",
"(",
"nbtool",
".",
"attributes",
")",
"# Pass the metadata to JavaScript",
"IPython",
".",
"display",
".",
"display_javascript",
"(",
"\"\"\"\n console.log('ok');\n NBToolManager.instance().register(new NBToolManager.NBTool({\n origin: \"%s\",\n id: \"%s\",\n name: \"%s\",\n description: %s,\n version: %s,\n tags: %s,\n attributes: %s,\n load: function() {\n var x = Jupyter.notebook.kernel.execute('nbtools._py_funcs[\"%s\"]()',\n {\n iopub: {\n output: function(response) {\n // Print the return value of the Python code to the console\n console.log(response.content.data[\"text/plain\"]);\n }\n }\n },\n {\n silent: false,\n store_history: false,\n stop_on_error: true\n });\n return true;\n },\n render: function() {\n var x = Jupyter.notebook.kernel.execute('nbtools._py_funcs[\"%s\"]()',\n {\n iopub: {\n output: function(response) {\n // Print the return value of the Python code to the console\n console.log(response.content.data[\"text/plain\"]);\n }\n }\n },\n {\n silent: false,\n store_history: false,\n stop_on_error: true\n });\n return null;\n },\n }));\n \"\"\"",
"%",
"(",
"nbtool",
".",
"origin",
",",
"nbtool",
".",
"id",
",",
"nbtool",
".",
"name",
",",
"clean_description",
",",
"clean_version",
",",
"clean_tags",
",",
"clean_attributes",
",",
"load_key",
",",
"render_key",
")",
",",
"raw",
"=",
"True",
")",
"return",
"True"
] | 39.347826 | 0.003593 |
def _validate_message(self, message):
"""
Is C{message} a valid direct child of this action?
@param message: Either a C{WrittenAction} or a C{WrittenMessage}.
@raise WrongTask: If C{message} has a C{task_uuid} that differs from the
action's C{task_uuid}.
@raise WrongTaskLevel: If C{message} has a C{task_level} that means
it's not a direct child.
"""
if message.task_uuid != self.task_uuid:
raise WrongTask(self, message)
if not message.task_level.parent() == self.task_level:
raise WrongTaskLevel(self, message) | [
"def",
"_validate_message",
"(",
"self",
",",
"message",
")",
":",
"if",
"message",
".",
"task_uuid",
"!=",
"self",
".",
"task_uuid",
":",
"raise",
"WrongTask",
"(",
"self",
",",
"message",
")",
"if",
"not",
"message",
".",
"task_level",
".",
"parent",
"(",
")",
"==",
"self",
".",
"task_level",
":",
"raise",
"WrongTaskLevel",
"(",
"self",
",",
"message",
")"
] | 40.866667 | 0.004785 |
def xSectionChunk(lines):
"""
Parse XSEC Method
"""
# Constants
KEYWORDS = ('MANNINGS_N',
'BOTTOM_WIDTH',
'BANKFULL_DEPTH',
'SIDE_SLOPE',
'NPAIRS',
'NUM_INTERP',
'X1',
'ERODE',
'MAX_EROSION',
'SUBSURFACE',
'M_RIVER',
'K_RIVER')
result = {'mannings_n': None,
'bottom_width': None,
'bankfull_depth': None,
'side_slope': None,
'npairs': None,
'num_interp': None,
'erode': False,
'subsurface': False,
'max_erosion': None,
'm_river': None,
'k_river': None,
'breakpoints': []}
chunks = pt.chunk(KEYWORDS, lines)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
# Strip and split the line (only one item in each list)
schunk = chunk[0].strip().split()
# Cases
if key == 'X1':
# Extract breakpoint XY pairs
x = schunk[1]
y = schunk[2]
result['breakpoints'].append({'x': x, 'y': y})
if key in ('SUBSURFACE', 'ERODE'):
# Set booleans
result[key.lower()] = True
else:
# Extract value
result[key.lower()] = schunk[1]
return result | [
"def",
"xSectionChunk",
"(",
"lines",
")",
":",
"# Constants",
"KEYWORDS",
"=",
"(",
"'MANNINGS_N'",
",",
"'BOTTOM_WIDTH'",
",",
"'BANKFULL_DEPTH'",
",",
"'SIDE_SLOPE'",
",",
"'NPAIRS'",
",",
"'NUM_INTERP'",
",",
"'X1'",
",",
"'ERODE'",
",",
"'MAX_EROSION'",
",",
"'SUBSURFACE'",
",",
"'M_RIVER'",
",",
"'K_RIVER'",
")",
"result",
"=",
"{",
"'mannings_n'",
":",
"None",
",",
"'bottom_width'",
":",
"None",
",",
"'bankfull_depth'",
":",
"None",
",",
"'side_slope'",
":",
"None",
",",
"'npairs'",
":",
"None",
",",
"'num_interp'",
":",
"None",
",",
"'erode'",
":",
"False",
",",
"'subsurface'",
":",
"False",
",",
"'max_erosion'",
":",
"None",
",",
"'m_river'",
":",
"None",
",",
"'k_river'",
":",
"None",
",",
"'breakpoints'",
":",
"[",
"]",
"}",
"chunks",
"=",
"pt",
".",
"chunk",
"(",
"KEYWORDS",
",",
"lines",
")",
"# Parse chunks associated with each key",
"for",
"key",
",",
"chunkList",
"in",
"iteritems",
"(",
"chunks",
")",
":",
"# Parse each chunk in the chunk list",
"for",
"chunk",
"in",
"chunkList",
":",
"# Strip and split the line (only one item in each list)",
"schunk",
"=",
"chunk",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"# Cases",
"if",
"key",
"==",
"'X1'",
":",
"# Extract breakpoint XY pairs",
"x",
"=",
"schunk",
"[",
"1",
"]",
"y",
"=",
"schunk",
"[",
"2",
"]",
"result",
"[",
"'breakpoints'",
"]",
".",
"append",
"(",
"{",
"'x'",
":",
"x",
",",
"'y'",
":",
"y",
"}",
")",
"if",
"key",
"in",
"(",
"'SUBSURFACE'",
",",
"'ERODE'",
")",
":",
"# Set booleans",
"result",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"True",
"else",
":",
"# Extract value",
"result",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"schunk",
"[",
"1",
"]",
"return",
"result"
] | 28.163636 | 0.000624 |
def rmmkdir(dir_path):
# type: (AnyStr) -> None
"""If directory existed, then remove and make; else make it."""
if not os.path.isdir(dir_path) or not os.path.exists(dir_path):
os.makedirs(dir_path)
else:
rmtree(dir_path, True)
os.makedirs(dir_path) | [
"def",
"rmmkdir",
"(",
"dir_path",
")",
":",
"# type: (AnyStr) -> None",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dir_path",
")",
"or",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dir_path",
")",
":",
"os",
".",
"makedirs",
"(",
"dir_path",
")",
"else",
":",
"rmtree",
"(",
"dir_path",
",",
"True",
")",
"os",
".",
"makedirs",
"(",
"dir_path",
")"
] | 38.625 | 0.009494 |
def recordsDF(token='', version=''):
'''https://iexcloud.io/docs/api/#stats-records
Args:
token (string); Access token
version (string); API version
Returns:
DataFrame: result
'''
df = pd.DataFrame(records(token, version))
_toDatetime(df)
return df | [
"def",
"recordsDF",
"(",
"token",
"=",
"''",
",",
"version",
"=",
"''",
")",
":",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"records",
"(",
"token",
",",
"version",
")",
")",
"_toDatetime",
"(",
"df",
")",
"return",
"df"
] | 22.307692 | 0.003311 |
def _create_delete_request(self, resource, billomat_id):
"""
Creates a post request and return the response data
"""
assert (isinstance(resource, str))
if isinstance(billomat_id, int):
billomat_id = str(billomat_id)
response = self.session.delete(
url=self.api_url + resource + '/' + billomat_id,
)
return self._handle_response(response) | [
"def",
"_create_delete_request",
"(",
"self",
",",
"resource",
",",
"billomat_id",
")",
":",
"assert",
"(",
"isinstance",
"(",
"resource",
",",
"str",
")",
")",
"if",
"isinstance",
"(",
"billomat_id",
",",
"int",
")",
":",
"billomat_id",
"=",
"str",
"(",
"billomat_id",
")",
"response",
"=",
"self",
".",
"session",
".",
"delete",
"(",
"url",
"=",
"self",
".",
"api_url",
"+",
"resource",
"+",
"'/'",
"+",
"billomat_id",
",",
")",
"return",
"self",
".",
"_handle_response",
"(",
"response",
")"
] | 29.642857 | 0.004673 |
def _filter(self, filename: str, df: pd.DataFrame) -> pd.DataFrame:
"""Apply view filters"""
view = self._view.get(filename)
if view is None:
return df
for col, values in view.items():
# If applicable, filter this dataframe by the given set of values
if col in df.columns:
df = df[df[col].isin(setwrap(values))]
return df | [
"def",
"_filter",
"(",
"self",
",",
"filename",
":",
"str",
",",
"df",
":",
"pd",
".",
"DataFrame",
")",
"->",
"pd",
".",
"DataFrame",
":",
"view",
"=",
"self",
".",
"_view",
".",
"get",
"(",
"filename",
")",
"if",
"view",
"is",
"None",
":",
"return",
"df",
"for",
"col",
",",
"values",
"in",
"view",
".",
"items",
"(",
")",
":",
"# If applicable, filter this dataframe by the given set of values",
"if",
"col",
"in",
"df",
".",
"columns",
":",
"df",
"=",
"df",
"[",
"df",
"[",
"col",
"]",
".",
"isin",
"(",
"setwrap",
"(",
"values",
")",
")",
"]",
"return",
"df"
] | 33.666667 | 0.004819 |
def decrypt(payload, private_key):
"""Decrypt an encrypted JSON payload and return the Magic Envelope document inside."""
cipher = PKCS1_v1_5.new(private_key)
aes_key_str = cipher.decrypt(b64decode(payload.get("aes_key")), sentinel=None)
aes_key = json.loads(aes_key_str.decode("utf-8"))
key = b64decode(aes_key.get("key"))
iv = b64decode(aes_key.get("iv"))
encrypted_magic_envelope = b64decode(payload.get("encrypted_magic_envelope"))
encrypter = AES.new(key, AES.MODE_CBC, iv)
content = encrypter.decrypt(encrypted_magic_envelope)
return etree.fromstring(pkcs7_unpad(content)) | [
"def",
"decrypt",
"(",
"payload",
",",
"private_key",
")",
":",
"cipher",
"=",
"PKCS1_v1_5",
".",
"new",
"(",
"private_key",
")",
"aes_key_str",
"=",
"cipher",
".",
"decrypt",
"(",
"b64decode",
"(",
"payload",
".",
"get",
"(",
"\"aes_key\"",
")",
")",
",",
"sentinel",
"=",
"None",
")",
"aes_key",
"=",
"json",
".",
"loads",
"(",
"aes_key_str",
".",
"decode",
"(",
"\"utf-8\"",
")",
")",
"key",
"=",
"b64decode",
"(",
"aes_key",
".",
"get",
"(",
"\"key\"",
")",
")",
"iv",
"=",
"b64decode",
"(",
"aes_key",
".",
"get",
"(",
"\"iv\"",
")",
")",
"encrypted_magic_envelope",
"=",
"b64decode",
"(",
"payload",
".",
"get",
"(",
"\"encrypted_magic_envelope\"",
")",
")",
"encrypter",
"=",
"AES",
".",
"new",
"(",
"key",
",",
"AES",
".",
"MODE_CBC",
",",
"iv",
")",
"content",
"=",
"encrypter",
".",
"decrypt",
"(",
"encrypted_magic_envelope",
")",
"return",
"etree",
".",
"fromstring",
"(",
"pkcs7_unpad",
"(",
"content",
")",
")"
] | 58.909091 | 0.007599 |
def logical_repr(self, value):
"""Set the string representation of logical values."""
if not any(isinstance(value, t) for t in (list, tuple)):
raise TypeError("Logical representation must be a tuple with "
"a valid true and false value.")
if not len(value) == 2:
raise ValueError("List must contain two values.")
self.false_repr = value[0]
self.true_repr = value[1] | [
"def",
"logical_repr",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"any",
"(",
"isinstance",
"(",
"value",
",",
"t",
")",
"for",
"t",
"in",
"(",
"list",
",",
"tuple",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"Logical representation must be a tuple with \"",
"\"a valid true and false value.\"",
")",
"if",
"not",
"len",
"(",
"value",
")",
"==",
"2",
":",
"raise",
"ValueError",
"(",
"\"List must contain two values.\"",
")",
"self",
".",
"false_repr",
"=",
"value",
"[",
"0",
"]",
"self",
".",
"true_repr",
"=",
"value",
"[",
"1",
"]"
] | 44.9 | 0.004367 |
def change_column_name(
conn,
table,
old_column_name,
new_column_name,
schema=None
):
"""
Changes given `activity` jsonb data column key. This function is useful
when you want to reflect column name changes to activity table.
::
from alembic import op
from postgresql_audit import change_column_name
def upgrade():
op.alter_column(
'my_table',
'my_column',
new_column_name='some_column'
)
change_column_name(op, 'my_table', 'my_column', 'some_column')
:param conn:
An object that is able to execute SQL (either SQLAlchemy Connection,
Engine or Alembic Operations object)
:param table:
The table to run the column name changes against
:param old_column_name:
Name of the column to change
:param new_column_name:
New colum name
:param schema:
Optional name of schema to use.
"""
activity_table = get_activity_table(schema=schema)
query = (
activity_table
.update()
.values(
old_data=jsonb_change_key_name(
activity_table.c.old_data,
old_column_name,
new_column_name
),
changed_data=jsonb_change_key_name(
activity_table.c.changed_data,
old_column_name,
new_column_name
)
)
.where(activity_table.c.table_name == table)
)
return conn.execute(query) | [
"def",
"change_column_name",
"(",
"conn",
",",
"table",
",",
"old_column_name",
",",
"new_column_name",
",",
"schema",
"=",
"None",
")",
":",
"activity_table",
"=",
"get_activity_table",
"(",
"schema",
"=",
"schema",
")",
"query",
"=",
"(",
"activity_table",
".",
"update",
"(",
")",
".",
"values",
"(",
"old_data",
"=",
"jsonb_change_key_name",
"(",
"activity_table",
".",
"c",
".",
"old_data",
",",
"old_column_name",
",",
"new_column_name",
")",
",",
"changed_data",
"=",
"jsonb_change_key_name",
"(",
"activity_table",
".",
"c",
".",
"changed_data",
",",
"old_column_name",
",",
"new_column_name",
")",
")",
".",
"where",
"(",
"activity_table",
".",
"c",
".",
"table_name",
"==",
"table",
")",
")",
"return",
"conn",
".",
"execute",
"(",
"query",
")"
] | 26.034483 | 0.000638 |
def find_all(cls, vid=None, pid=None):
"""
Returns all FTDI devices matching our vendor and product IDs.
:returns: list of devices
:raises: :py:class:`~alarmdecoder.util.CommError`
"""
if not have_pyftdi:
raise ImportError('The USBDevice class has been disabled due to missing requirement: pyftdi or pyusb.')
cls.__devices = []
query = cls.PRODUCT_IDS
if vid and pid:
query = [(vid, pid)]
try:
cls.__devices = Ftdi.find_all(query, nocache=True)
except (usb.core.USBError, FtdiError) as err:
raise CommError('Error enumerating AD2USB devices: {0}'.format(str(err)), err)
return cls.__devices | [
"def",
"find_all",
"(",
"cls",
",",
"vid",
"=",
"None",
",",
"pid",
"=",
"None",
")",
":",
"if",
"not",
"have_pyftdi",
":",
"raise",
"ImportError",
"(",
"'The USBDevice class has been disabled due to missing requirement: pyftdi or pyusb.'",
")",
"cls",
".",
"__devices",
"=",
"[",
"]",
"query",
"=",
"cls",
".",
"PRODUCT_IDS",
"if",
"vid",
"and",
"pid",
":",
"query",
"=",
"[",
"(",
"vid",
",",
"pid",
")",
"]",
"try",
":",
"cls",
".",
"__devices",
"=",
"Ftdi",
".",
"find_all",
"(",
"query",
",",
"nocache",
"=",
"True",
")",
"except",
"(",
"usb",
".",
"core",
".",
"USBError",
",",
"FtdiError",
")",
"as",
"err",
":",
"raise",
"CommError",
"(",
"'Error enumerating AD2USB devices: {0}'",
".",
"format",
"(",
"str",
"(",
"err",
")",
")",
",",
"err",
")",
"return",
"cls",
".",
"__devices"
] | 31.217391 | 0.005405 |
def _validate_other(
self,
other,
axis,
numeric_only=False,
numeric_or_time_only=False,
numeric_or_object_only=False,
comparison_dtypes_only=False,
):
"""Helper method to check validity of other in inter-df operations"""
axis = self._get_axis_number(axis) if axis is not None else 1
result = other
if isinstance(other, BasePandasDataset):
return other._query_compiler
elif is_list_like(other):
if axis == 0:
if len(other) != len(self._query_compiler.index):
raise ValueError(
"Unable to coerce to Series, length must be {0}: "
"given {1}".format(len(self._query_compiler.index), len(other))
)
else:
if len(other) != len(self._query_compiler.columns):
raise ValueError(
"Unable to coerce to Series, length must be {0}: "
"given {1}".format(
len(self._query_compiler.columns), len(other)
)
)
if hasattr(other, "dtype"):
other_dtypes = [other.dtype] * len(other)
else:
other_dtypes = [type(x) for x in other]
else:
other_dtypes = [
type(other)
for _ in range(
len(self._query_compiler.index)
if axis
else len(self._query_compiler.columns)
)
]
# Do dtype checking
if numeric_only:
if not all(
is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype)
for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes)
):
raise TypeError("Cannot do operation on non-numeric dtypes")
elif numeric_or_object_only:
if not all(
(is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype))
or (is_object_dtype(self_dtype) and is_object_dtype(other_dtype))
for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes)
):
raise TypeError("Cannot do operation non-numeric dtypes")
elif comparison_dtypes_only:
if not all(
(is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype))
or (
is_datetime_or_timedelta_dtype(self_dtype)
and is_datetime_or_timedelta_dtype(other_dtype)
)
or is_dtype_equal(self_dtype, other_dtype)
for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes)
):
raise TypeError(
"Cannot do operation non-numeric objects with numeric objects"
)
elif numeric_or_time_only:
if not all(
(is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype))
or (
is_datetime_or_timedelta_dtype(self_dtype)
and is_datetime_or_timedelta_dtype(other_dtype)
)
for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes)
):
raise TypeError(
"Cannot do operation non-numeric objects with numeric objects"
)
return result | [
"def",
"_validate_other",
"(",
"self",
",",
"other",
",",
"axis",
",",
"numeric_only",
"=",
"False",
",",
"numeric_or_time_only",
"=",
"False",
",",
"numeric_or_object_only",
"=",
"False",
",",
"comparison_dtypes_only",
"=",
"False",
",",
")",
":",
"axis",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis",
")",
"if",
"axis",
"is",
"not",
"None",
"else",
"1",
"result",
"=",
"other",
"if",
"isinstance",
"(",
"other",
",",
"BasePandasDataset",
")",
":",
"return",
"other",
".",
"_query_compiler",
"elif",
"is_list_like",
"(",
"other",
")",
":",
"if",
"axis",
"==",
"0",
":",
"if",
"len",
"(",
"other",
")",
"!=",
"len",
"(",
"self",
".",
"_query_compiler",
".",
"index",
")",
":",
"raise",
"ValueError",
"(",
"\"Unable to coerce to Series, length must be {0}: \"",
"\"given {1}\"",
".",
"format",
"(",
"len",
"(",
"self",
".",
"_query_compiler",
".",
"index",
")",
",",
"len",
"(",
"other",
")",
")",
")",
"else",
":",
"if",
"len",
"(",
"other",
")",
"!=",
"len",
"(",
"self",
".",
"_query_compiler",
".",
"columns",
")",
":",
"raise",
"ValueError",
"(",
"\"Unable to coerce to Series, length must be {0}: \"",
"\"given {1}\"",
".",
"format",
"(",
"len",
"(",
"self",
".",
"_query_compiler",
".",
"columns",
")",
",",
"len",
"(",
"other",
")",
")",
")",
"if",
"hasattr",
"(",
"other",
",",
"\"dtype\"",
")",
":",
"other_dtypes",
"=",
"[",
"other",
".",
"dtype",
"]",
"*",
"len",
"(",
"other",
")",
"else",
":",
"other_dtypes",
"=",
"[",
"type",
"(",
"x",
")",
"for",
"x",
"in",
"other",
"]",
"else",
":",
"other_dtypes",
"=",
"[",
"type",
"(",
"other",
")",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"_query_compiler",
".",
"index",
")",
"if",
"axis",
"else",
"len",
"(",
"self",
".",
"_query_compiler",
".",
"columns",
")",
")",
"]",
"# Do dtype checking\r",
"if",
"numeric_only",
":",
"if",
"not",
"all",
"(",
"is_numeric_dtype",
"(",
"self_dtype",
")",
"and",
"is_numeric_dtype",
"(",
"other_dtype",
")",
"for",
"self_dtype",
",",
"other_dtype",
"in",
"zip",
"(",
"self",
".",
"_get_dtypes",
"(",
")",
",",
"other_dtypes",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"Cannot do operation on non-numeric dtypes\"",
")",
"elif",
"numeric_or_object_only",
":",
"if",
"not",
"all",
"(",
"(",
"is_numeric_dtype",
"(",
"self_dtype",
")",
"and",
"is_numeric_dtype",
"(",
"other_dtype",
")",
")",
"or",
"(",
"is_object_dtype",
"(",
"self_dtype",
")",
"and",
"is_object_dtype",
"(",
"other_dtype",
")",
")",
"for",
"self_dtype",
",",
"other_dtype",
"in",
"zip",
"(",
"self",
".",
"_get_dtypes",
"(",
")",
",",
"other_dtypes",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"Cannot do operation non-numeric dtypes\"",
")",
"elif",
"comparison_dtypes_only",
":",
"if",
"not",
"all",
"(",
"(",
"is_numeric_dtype",
"(",
"self_dtype",
")",
"and",
"is_numeric_dtype",
"(",
"other_dtype",
")",
")",
"or",
"(",
"is_datetime_or_timedelta_dtype",
"(",
"self_dtype",
")",
"and",
"is_datetime_or_timedelta_dtype",
"(",
"other_dtype",
")",
")",
"or",
"is_dtype_equal",
"(",
"self_dtype",
",",
"other_dtype",
")",
"for",
"self_dtype",
",",
"other_dtype",
"in",
"zip",
"(",
"self",
".",
"_get_dtypes",
"(",
")",
",",
"other_dtypes",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"Cannot do operation non-numeric objects with numeric objects\"",
")",
"elif",
"numeric_or_time_only",
":",
"if",
"not",
"all",
"(",
"(",
"is_numeric_dtype",
"(",
"self_dtype",
")",
"and",
"is_numeric_dtype",
"(",
"other_dtype",
")",
")",
"or",
"(",
"is_datetime_or_timedelta_dtype",
"(",
"self_dtype",
")",
"and",
"is_datetime_or_timedelta_dtype",
"(",
"other_dtype",
")",
")",
"for",
"self_dtype",
",",
"other_dtype",
"in",
"zip",
"(",
"self",
".",
"_get_dtypes",
"(",
")",
",",
"other_dtypes",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"Cannot do operation non-numeric objects with numeric objects\"",
")",
"return",
"result"
] | 43.085366 | 0.003874 |
def ConsultarTiposLiquidacion(self, sep="||"):
"Retorna un listado de tipos de liquidaci贸n con c贸digo y descripci贸n"
ret = self.client.consultarTiposLiquidacion(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['respuesta']
self.__analizar_errores(ret)
array = ret.get('tipoLiquidacion', [])
if sep is None:
return dict([(it['codigo'], it['descripcion']) for it in array])
else:
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigo'], it['descripcion']) for it in array] | [
"def",
"ConsultarTiposLiquidacion",
"(",
"self",
",",
"sep",
"=",
"\"||\"",
")",
":",
"ret",
"=",
"self",
".",
"client",
".",
"consultarTiposLiquidacion",
"(",
"auth",
"=",
"{",
"'token'",
":",
"self",
".",
"Token",
",",
"'sign'",
":",
"self",
".",
"Sign",
",",
"'cuit'",
":",
"self",
".",
"Cuit",
",",
"}",
",",
")",
"[",
"'respuesta'",
"]",
"self",
".",
"__analizar_errores",
"(",
"ret",
")",
"array",
"=",
"ret",
".",
"get",
"(",
"'tipoLiquidacion'",
",",
"[",
"]",
")",
"if",
"sep",
"is",
"None",
":",
"return",
"dict",
"(",
"[",
"(",
"it",
"[",
"'codigo'",
"]",
",",
"it",
"[",
"'descripcion'",
"]",
")",
"for",
"it",
"in",
"array",
"]",
")",
"else",
":",
"return",
"[",
"(",
"\"%s %%s %s %%s %s\"",
"%",
"(",
"sep",
",",
"sep",
",",
"sep",
")",
")",
"%",
"(",
"it",
"[",
"'codigo'",
"]",
",",
"it",
"[",
"'descripcion'",
"]",
")",
"for",
"it",
"in",
"array",
"]"
] | 49.071429 | 0.002857 |
def poke(self, context):
"""
Checks for existence of the partition in the AWS Glue Catalog table
"""
if '.' in self.table_name:
self.database_name, self.table_name = self.table_name.split('.')
self.log.info(
'Poking for table %s. %s, expression %s', self.database_name, self.table_name, self.expression
)
return self.get_hook().check_for_partition(
self.database_name, self.table_name, self.expression) | [
"def",
"poke",
"(",
"self",
",",
"context",
")",
":",
"if",
"'.'",
"in",
"self",
".",
"table_name",
":",
"self",
".",
"database_name",
",",
"self",
".",
"table_name",
"=",
"self",
".",
"table_name",
".",
"split",
"(",
"'.'",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Poking for table %s. %s, expression %s'",
",",
"self",
".",
"database_name",
",",
"self",
".",
"table_name",
",",
"self",
".",
"expression",
")",
"return",
"self",
".",
"get_hook",
"(",
")",
".",
"check_for_partition",
"(",
"self",
".",
"database_name",
",",
"self",
".",
"table_name",
",",
"self",
".",
"expression",
")"
] | 40.333333 | 0.006061 |
def get_name(self, language):
""" Return the name of this course """
return self.gettext(language, self._name) if self._name else "" | [
"def",
"get_name",
"(",
"self",
",",
"language",
")",
":",
"return",
"self",
".",
"gettext",
"(",
"language",
",",
"self",
".",
"_name",
")",
"if",
"self",
".",
"_name",
"else",
"\"\""
] | 48.666667 | 0.013514 |
def instanceStarted(self, *args, **kwargs):
"""
Report an instance starting
An instance will report in by giving its instance id as well
as its security token. The token is given and checked to ensure
that it matches a real token that exists to ensure that random
machines do not check in. We could generate a different token
but that seems like overkill
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["instanceStarted"], *args, **kwargs) | [
"def",
"instanceStarted",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_makeApiCall",
"(",
"self",
".",
"funcinfo",
"[",
"\"instanceStarted\"",
"]",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 38 | 0.005505 |
def install_build_requires(pkg_targets):
"""Iterate through build_requires list and pip install if package is not present
accounting for version"""
def pip_install(pkg_name, pkg_vers=None):
pkg_name_version = '%s==%s' % (pkg_name, pkg_vers) if pkg_vers else pkg_name
print '[WARNING] %s not found, attempting to install using a raw "pip install" call!' % pkg_name_version
subprocess.Popen('pip install %s' % pkg_name_version, shell=True).communicate()
def get_pkg_info(pkg):
"""Get package name and version given a build_requires element"""
pkg_name, pkg_vers = None, None
if '==' in pkg:
pkg_name, pkg_vers = pkg.split('==')
else:
pkg_name = pkg.replace('>', '').replace('<', '').split('=')[0]
return pkg_name, pkg_vers
for pkg in pkg_targets:
pkg_name, pkg_vers = get_pkg_info(pkg)
try:
pkg_name_version = '%s==%s' % (pkg_name, pkg_vers) if pkg_vers else pkg_name
if pkg_vers:
version = getattr(importlib.import_module(pkg_name), '__version__')
if version != pkg_vers:
pip_install(pkg_name, pkg_vers)
else:
importlib.import_module(pkg_name)
except ImportError:
pip_install(pkg_name, pkg_vers) | [
"def",
"install_build_requires",
"(",
"pkg_targets",
")",
":",
"def",
"pip_install",
"(",
"pkg_name",
",",
"pkg_vers",
"=",
"None",
")",
":",
"pkg_name_version",
"=",
"'%s==%s'",
"%",
"(",
"pkg_name",
",",
"pkg_vers",
")",
"if",
"pkg_vers",
"else",
"pkg_name",
"print",
"'[WARNING] %s not found, attempting to install using a raw \"pip install\" call!'",
"%",
"pkg_name_version",
"subprocess",
".",
"Popen",
"(",
"'pip install %s'",
"%",
"pkg_name_version",
",",
"shell",
"=",
"True",
")",
".",
"communicate",
"(",
")",
"def",
"get_pkg_info",
"(",
"pkg",
")",
":",
"\"\"\"Get package name and version given a build_requires element\"\"\"",
"pkg_name",
",",
"pkg_vers",
"=",
"None",
",",
"None",
"if",
"'=='",
"in",
"pkg",
":",
"pkg_name",
",",
"pkg_vers",
"=",
"pkg",
".",
"split",
"(",
"'=='",
")",
"else",
":",
"pkg_name",
"=",
"pkg",
".",
"replace",
"(",
"'>'",
",",
"''",
")",
".",
"replace",
"(",
"'<'",
",",
"''",
")",
".",
"split",
"(",
"'='",
")",
"[",
"0",
"]",
"return",
"pkg_name",
",",
"pkg_vers",
"for",
"pkg",
"in",
"pkg_targets",
":",
"pkg_name",
",",
"pkg_vers",
"=",
"get_pkg_info",
"(",
"pkg",
")",
"try",
":",
"pkg_name_version",
"=",
"'%s==%s'",
"%",
"(",
"pkg_name",
",",
"pkg_vers",
")",
"if",
"pkg_vers",
"else",
"pkg_name",
"if",
"pkg_vers",
":",
"version",
"=",
"getattr",
"(",
"importlib",
".",
"import_module",
"(",
"pkg_name",
")",
",",
"'__version__'",
")",
"if",
"version",
"!=",
"pkg_vers",
":",
"pip_install",
"(",
"pkg_name",
",",
"pkg_vers",
")",
"else",
":",
"importlib",
".",
"import_module",
"(",
"pkg_name",
")",
"except",
"ImportError",
":",
"pip_install",
"(",
"pkg_name",
",",
"pkg_vers",
")"
] | 43.866667 | 0.005204 |
def ifaces(cls, name):
""" Get vlan attached ifaces. """
ifaces = Iface.list({'vlan_id': cls.usable_id(name)})
ret = []
for iface in ifaces:
ret.append(Iface.info(iface['id']))
return ret | [
"def",
"ifaces",
"(",
"cls",
",",
"name",
")",
":",
"ifaces",
"=",
"Iface",
".",
"list",
"(",
"{",
"'vlan_id'",
":",
"cls",
".",
"usable_id",
"(",
"name",
")",
"}",
")",
"ret",
"=",
"[",
"]",
"for",
"iface",
"in",
"ifaces",
":",
"ret",
".",
"append",
"(",
"Iface",
".",
"info",
"(",
"iface",
"[",
"'id'",
"]",
")",
")",
"return",
"ret"
] | 33.285714 | 0.008368 |
def container_new_folder(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /container-xxxx/newFolder API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2FnewFolder
"""
return DXHTTPRequest('/%s/newFolder' % object_id, input_params, always_retry=always_retry, **kwargs) | [
"def",
"container_new_folder",
"(",
"object_id",
",",
"input_params",
"=",
"{",
"}",
",",
"always_retry",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"DXHTTPRequest",
"(",
"'/%s/newFolder'",
"%",
"object_id",
",",
"input_params",
",",
"always_retry",
"=",
"always_retry",
",",
"*",
"*",
"kwargs",
")"
] | 55.428571 | 0.010152 |
def get_object_from_content(entity, key):
"""Get an object from the database given an entity and the content key.
:param entity: Class type of the object to retrieve.
:param key: Array that defines the path of the value inside the message.
"""
def object_from_content_function(service, message):
"""Actual implementation of get_object_from_content function.
:param service: SelenolService object.
:param message: SelenolMessage request.
"""
id_ = get_value_from_content(key)(service, message)
result = service.session.query(entity).get(id_)
if not result:
raise SelenolInvalidArgumentException(key, id_)
return result
return object_from_content_function | [
"def",
"get_object_from_content",
"(",
"entity",
",",
"key",
")",
":",
"def",
"object_from_content_function",
"(",
"service",
",",
"message",
")",
":",
"\"\"\"Actual implementation of get_object_from_content function.\n\n :param service: SelenolService object.\n :param message: SelenolMessage request.\n \"\"\"",
"id_",
"=",
"get_value_from_content",
"(",
"key",
")",
"(",
"service",
",",
"message",
")",
"result",
"=",
"service",
".",
"session",
".",
"query",
"(",
"entity",
")",
".",
"get",
"(",
"id_",
")",
"if",
"not",
"result",
":",
"raise",
"SelenolInvalidArgumentException",
"(",
"key",
",",
"id_",
")",
"return",
"result",
"return",
"object_from_content_function"
] | 41 | 0.001325 |
def gunzip(input_gzip_file, block_size=1024):
"""
Gunzips the input file to the same directory
:param input_gzip_file: File to be gunzipped
:return: path to the gunzipped file
:rtype: str
"""
assert os.path.splitext(input_gzip_file)[1] == '.gz'
assert is_gzipfile(input_gzip_file)
with gzip.open(input_gzip_file) as infile:
with open(os.path.splitext(input_gzip_file)[0], 'w') as outfile:
while True:
block = infile.read(block_size)
if block == '':
break
else:
outfile.write(block)
return outfile.name | [
"def",
"gunzip",
"(",
"input_gzip_file",
",",
"block_size",
"=",
"1024",
")",
":",
"assert",
"os",
".",
"path",
".",
"splitext",
"(",
"input_gzip_file",
")",
"[",
"1",
"]",
"==",
"'.gz'",
"assert",
"is_gzipfile",
"(",
"input_gzip_file",
")",
"with",
"gzip",
".",
"open",
"(",
"input_gzip_file",
")",
"as",
"infile",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"input_gzip_file",
")",
"[",
"0",
"]",
",",
"'w'",
")",
"as",
"outfile",
":",
"while",
"True",
":",
"block",
"=",
"infile",
".",
"read",
"(",
"block_size",
")",
"if",
"block",
"==",
"''",
":",
"break",
"else",
":",
"outfile",
".",
"write",
"(",
"block",
")",
"return",
"outfile",
".",
"name"
] | 33.263158 | 0.001538 |
def xmatch_cpdir_external_catalogs(cpdir,
xmatchpkl,
cpfileglob='checkplot-*.pkl*',
xmatchradiusarcsec=2.0,
updateexisting=True,
resultstodir=None):
'''This xmatches external catalogs to all checkplots in a directory.
Parameters
-----------
cpdir : str
This is the directory to search in for checkplots.
xmatchpkl : str
The filename of a pickle prepared beforehand with the
`checkplot.pkl_xmatch.load_xmatch_external_catalogs` function,
containing collected external catalogs to cross-match the objects in the
input `cplist` against.
cpfileglob : str
This is the UNIX fileglob to use in searching for checkplots.
xmatchradiusarcsec : float
The match radius to use for the cross-match in arcseconds.
updateexisting : bool
If this is True, will only update the `xmatch` dict in each checkplot
pickle with any new cross-matches to the external catalogs. If False,
will overwrite the `xmatch` dict with results from the current run.
resultstodir : str or None
If this is provided, then it must be a directory to write the resulting
checkplots to after xmatch is done. This can be used to keep the
original checkplots in pristine condition for some reason.
Returns
-------
dict
Returns a dict with keys = input checkplot pickle filenames and vals =
xmatch status dict for each checkplot pickle.
'''
cplist = glob.glob(os.path.join(cpdir, cpfileglob))
return xmatch_cplist_external_catalogs(
cplist,
xmatchpkl,
xmatchradiusarcsec=xmatchradiusarcsec,
updateexisting=updateexisting,
resultstodir=resultstodir
) | [
"def",
"xmatch_cpdir_external_catalogs",
"(",
"cpdir",
",",
"xmatchpkl",
",",
"cpfileglob",
"=",
"'checkplot-*.pkl*'",
",",
"xmatchradiusarcsec",
"=",
"2.0",
",",
"updateexisting",
"=",
"True",
",",
"resultstodir",
"=",
"None",
")",
":",
"cplist",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"cpdir",
",",
"cpfileglob",
")",
")",
"return",
"xmatch_cplist_external_catalogs",
"(",
"cplist",
",",
"xmatchpkl",
",",
"xmatchradiusarcsec",
"=",
"xmatchradiusarcsec",
",",
"updateexisting",
"=",
"updateexisting",
",",
"resultstodir",
"=",
"resultstodir",
")"
] | 34.444444 | 0.001045 |
def parse_updates(rule):
'''
Parse the updates line
'''
rules = shlex.split(rule)
rules.pop(0)
return {'url': rules[0]} if rules else True | [
"def",
"parse_updates",
"(",
"rule",
")",
":",
"rules",
"=",
"shlex",
".",
"split",
"(",
"rule",
")",
"rules",
".",
"pop",
"(",
"0",
")",
"return",
"{",
"'url'",
":",
"rules",
"[",
"0",
"]",
"}",
"if",
"rules",
"else",
"True"
] | 22.285714 | 0.006173 |
async def genKeys(self, schemaId: ID, p_prime=None, q_prime=None) -> (
PublicKey, RevocationPublicKey):
"""
Generates and submits keys (both public and secret, primary and
non-revocation).
:param schemaId: The schema ID (reference to claim
definition schema)
:param p_prime: optional p_prime parameter
:param q_prime: optional q_prime parameter
:return: Submitted Public keys (both primary and non-revocation)
"""
pk, sk = await self._primaryIssuer.genKeys(schemaId, p_prime, q_prime)
pkR, skR = await self._nonRevocationIssuer.genRevocationKeys()
pk = await self.wallet.submitPublicKeys(schemaId=schemaId, pk=pk,
pkR=pkR)
pkR = await self.wallet.submitSecretKeys(schemaId=schemaId, sk=sk,
skR=skR)
return pk, pkR | [
"async",
"def",
"genKeys",
"(",
"self",
",",
"schemaId",
":",
"ID",
",",
"p_prime",
"=",
"None",
",",
"q_prime",
"=",
"None",
")",
"->",
"(",
"PublicKey",
",",
"RevocationPublicKey",
")",
":",
"pk",
",",
"sk",
"=",
"await",
"self",
".",
"_primaryIssuer",
".",
"genKeys",
"(",
"schemaId",
",",
"p_prime",
",",
"q_prime",
")",
"pkR",
",",
"skR",
"=",
"await",
"self",
".",
"_nonRevocationIssuer",
".",
"genRevocationKeys",
"(",
")",
"pk",
"=",
"await",
"self",
".",
"wallet",
".",
"submitPublicKeys",
"(",
"schemaId",
"=",
"schemaId",
",",
"pk",
"=",
"pk",
",",
"pkR",
"=",
"pkR",
")",
"pkR",
"=",
"await",
"self",
".",
"wallet",
".",
"submitSecretKeys",
"(",
"schemaId",
"=",
"schemaId",
",",
"sk",
"=",
"sk",
",",
"skR",
"=",
"skR",
")",
"return",
"pk",
",",
"pkR"
] | 48.263158 | 0.002139 |
def remove(self, cls, originalMemberNameList, classNamingConvention):
"""
:type cls: type
:type originalMemberNameList: list(str)
:type classNamingConvention: INamingConvention
"""
self._memberDelegate.remove(cls = cls,
originalMemberNameList = originalMemberNameList,
memberName = self._memberName,
classNamingConvention = classNamingConvention) | [
"def",
"remove",
"(",
"self",
",",
"cls",
",",
"originalMemberNameList",
",",
"classNamingConvention",
")",
":",
"self",
".",
"_memberDelegate",
".",
"remove",
"(",
"cls",
"=",
"cls",
",",
"originalMemberNameList",
"=",
"originalMemberNameList",
",",
"memberName",
"=",
"self",
".",
"_memberName",
",",
"classNamingConvention",
"=",
"classNamingConvention",
")"
] | 47.3 | 0.024896 |
def get_threats_update(self, client_state):
"""Fetch hash prefixes update for given threat list.
client_state is a dict which looks like {(threatType, platformType, threatEntryType): clientState}
"""
request_body = {
"client": {
"clientId": self.client_id,
"clientVersion": self.client_version,
},
"listUpdateRequests": [],
}
for (threat_type, platform_type, threat_entry_type), current_state in client_state.items():
request_body['listUpdateRequests'].append(
{
"threatType": threat_type,
"platformType": platform_type,
"threatEntryType": threat_entry_type,
"state": current_state,
"constraints": {
"supportedCompressions": ["RAW"]
}
}
)
response = self.service.threatListUpdates().fetch(body=request_body).execute()
self.set_wait_duration(response.get('minimumWaitDuration'))
return response['listUpdateResponses'] | [
"def",
"get_threats_update",
"(",
"self",
",",
"client_state",
")",
":",
"request_body",
"=",
"{",
"\"client\"",
":",
"{",
"\"clientId\"",
":",
"self",
".",
"client_id",
",",
"\"clientVersion\"",
":",
"self",
".",
"client_version",
",",
"}",
",",
"\"listUpdateRequests\"",
":",
"[",
"]",
",",
"}",
"for",
"(",
"threat_type",
",",
"platform_type",
",",
"threat_entry_type",
")",
",",
"current_state",
"in",
"client_state",
".",
"items",
"(",
")",
":",
"request_body",
"[",
"'listUpdateRequests'",
"]",
".",
"append",
"(",
"{",
"\"threatType\"",
":",
"threat_type",
",",
"\"platformType\"",
":",
"platform_type",
",",
"\"threatEntryType\"",
":",
"threat_entry_type",
",",
"\"state\"",
":",
"current_state",
",",
"\"constraints\"",
":",
"{",
"\"supportedCompressions\"",
":",
"[",
"\"RAW\"",
"]",
"}",
"}",
")",
"response",
"=",
"self",
".",
"service",
".",
"threatListUpdates",
"(",
")",
".",
"fetch",
"(",
"body",
"=",
"request_body",
")",
".",
"execute",
"(",
")",
"self",
".",
"set_wait_duration",
"(",
"response",
".",
"get",
"(",
"'minimumWaitDuration'",
")",
")",
"return",
"response",
"[",
"'listUpdateResponses'",
"]"
] | 41.888889 | 0.004322 |
def extract_file_name(content_dispo):
"""Extract file name from the input request body"""
# print type(content_dispo)
# print repr(content_dispo)
# convertion of escape string (str type) from server
# to unicode object
content_dispo = content_dispo.decode('unicode-escape').strip('"')
file_name = ""
for key_val in content_dispo.split(';'):
param = key_val.strip().split('=')
if param[0] == "filename":
file_name = param[1].strip('"')
break
return file_name | [
"def",
"extract_file_name",
"(",
"content_dispo",
")",
":",
"# print type(content_dispo)",
"# print repr(content_dispo)",
"# convertion of escape string (str type) from server",
"# to unicode object",
"content_dispo",
"=",
"content_dispo",
".",
"decode",
"(",
"'unicode-escape'",
")",
".",
"strip",
"(",
"'\"'",
")",
"file_name",
"=",
"\"\"",
"for",
"key_val",
"in",
"content_dispo",
".",
"split",
"(",
"';'",
")",
":",
"param",
"=",
"key_val",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'='",
")",
"if",
"param",
"[",
"0",
"]",
"==",
"\"filename\"",
":",
"file_name",
"=",
"param",
"[",
"1",
"]",
".",
"strip",
"(",
"'\"'",
")",
"break",
"return",
"file_name"
] | 37.142857 | 0.001876 |
def get_open_trackers_from_local():
"""Returns open trackers announce URLs list from local backup."""
with open(path.join(path.dirname(__file__), 'repo', OPEN_TRACKERS_FILENAME)) as f:
open_trackers = map(str.strip, f.readlines())
return list(open_trackers) | [
"def",
"get_open_trackers_from_local",
"(",
")",
":",
"with",
"open",
"(",
"path",
".",
"join",
"(",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'repo'",
",",
"OPEN_TRACKERS_FILENAME",
")",
")",
"as",
"f",
":",
"open_trackers",
"=",
"map",
"(",
"str",
".",
"strip",
",",
"f",
".",
"readlines",
"(",
")",
")",
"return",
"list",
"(",
"open_trackers",
")"
] | 45.5 | 0.007194 |
def _transformBy(self, matrix, **kwargs):
"""
This is the environment implementation of
:meth:`BaseGuideline.transformBy`.
**matrix** will be a :ref:`type-transformation`.
that has been normalized with :func:`normalizers.normalizeTransformationMatrix`.
Subclasses may override this method.
"""
t = transform.Transform(*matrix)
# coordinates
x, y = t.transformPoint((self.x, self.y))
self.x = x
self.y = y
# angle
angle = math.radians(-self.angle)
dx = math.cos(angle)
dy = math.sin(angle)
tdx, tdy = t.transformPoint((dx, dy))
ta = math.atan2(tdy - t[5], tdx - t[4])
self.angle = -math.degrees(ta) | [
"def",
"_transformBy",
"(",
"self",
",",
"matrix",
",",
"*",
"*",
"kwargs",
")",
":",
"t",
"=",
"transform",
".",
"Transform",
"(",
"*",
"matrix",
")",
"# coordinates",
"x",
",",
"y",
"=",
"t",
".",
"transformPoint",
"(",
"(",
"self",
".",
"x",
",",
"self",
".",
"y",
")",
")",
"self",
".",
"x",
"=",
"x",
"self",
".",
"y",
"=",
"y",
"# angle",
"angle",
"=",
"math",
".",
"radians",
"(",
"-",
"self",
".",
"angle",
")",
"dx",
"=",
"math",
".",
"cos",
"(",
"angle",
")",
"dy",
"=",
"math",
".",
"sin",
"(",
"angle",
")",
"tdx",
",",
"tdy",
"=",
"t",
".",
"transformPoint",
"(",
"(",
"dx",
",",
"dy",
")",
")",
"ta",
"=",
"math",
".",
"atan2",
"(",
"tdy",
"-",
"t",
"[",
"5",
"]",
",",
"tdx",
"-",
"t",
"[",
"4",
"]",
")",
"self",
".",
"angle",
"=",
"-",
"math",
".",
"degrees",
"(",
"ta",
")"
] | 33.181818 | 0.003995 |
def get_check_digit(unchecked):
"""returns the check digit of the card number."""
digits = digits_of(unchecked)
checksum = sum(even_digits(unchecked)) + sum([
sum(digits_of(2 * d)) for d in odd_digits(unchecked)])
return 9 * checksum % 10 | [
"def",
"get_check_digit",
"(",
"unchecked",
")",
":",
"digits",
"=",
"digits_of",
"(",
"unchecked",
")",
"checksum",
"=",
"sum",
"(",
"even_digits",
"(",
"unchecked",
")",
")",
"+",
"sum",
"(",
"[",
"sum",
"(",
"digits_of",
"(",
"2",
"*",
"d",
")",
")",
"for",
"d",
"in",
"odd_digits",
"(",
"unchecked",
")",
"]",
")",
"return",
"9",
"*",
"checksum",
"%",
"10"
] | 42.833333 | 0.003817 |
def import_apps_submodule(submodule):
"""
Look for a submodule is a series of packages, e.g. ".pagetype_plugins" in all INSTALLED_APPS.
"""
found_apps = []
for appconfig in apps.get_app_configs():
app = appconfig.name
if import_module_or_none('{0}.{1}'.format(app, submodule)) is not None:
found_apps.append(app)
return found_apps | [
"def",
"import_apps_submodule",
"(",
"submodule",
")",
":",
"found_apps",
"=",
"[",
"]",
"for",
"appconfig",
"in",
"apps",
".",
"get_app_configs",
"(",
")",
":",
"app",
"=",
"appconfig",
".",
"name",
"if",
"import_module_or_none",
"(",
"'{0}.{1}'",
".",
"format",
"(",
"app",
",",
"submodule",
")",
")",
"is",
"not",
"None",
":",
"found_apps",
".",
"append",
"(",
"app",
")",
"return",
"found_apps"
] | 33.909091 | 0.005222 |
def redirect(self, redirect_url=None, message=None, level="info"):
"""Redirect with a message
"""
if redirect_url is None:
redirect_url = self.back_url
if message is not None:
self.add_status_message(message, level)
return self.request.response.redirect(redirect_url) | [
"def",
"redirect",
"(",
"self",
",",
"redirect_url",
"=",
"None",
",",
"message",
"=",
"None",
",",
"level",
"=",
"\"info\"",
")",
":",
"if",
"redirect_url",
"is",
"None",
":",
"redirect_url",
"=",
"self",
".",
"back_url",
"if",
"message",
"is",
"not",
"None",
":",
"self",
".",
"add_status_message",
"(",
"message",
",",
"level",
")",
"return",
"self",
".",
"request",
".",
"response",
".",
"redirect",
"(",
"redirect_url",
")"
] | 40.5 | 0.006042 |
def _handle_waited_log(self, event: dict):
""" A subroutine of handle_log
Increment self.event_count, forget about waiting, and call the callback if any.
"""
txn_hash = event['transactionHash']
event_name = event['event']
assert event_name in self.event_waiting
assert txn_hash in self.event_waiting[event_name]
self.event_count[event_name][txn_hash] += 1
event_entry = self.event_waiting[event_name][txn_hash]
if event_entry.count == self.event_count[event_name][txn_hash]:
self.event_waiting[event_name].pop(txn_hash)
# Call callback function with event
if event_entry.callback:
event_entry.callback(event) | [
"def",
"_handle_waited_log",
"(",
"self",
",",
"event",
":",
"dict",
")",
":",
"txn_hash",
"=",
"event",
"[",
"'transactionHash'",
"]",
"event_name",
"=",
"event",
"[",
"'event'",
"]",
"assert",
"event_name",
"in",
"self",
".",
"event_waiting",
"assert",
"txn_hash",
"in",
"self",
".",
"event_waiting",
"[",
"event_name",
"]",
"self",
".",
"event_count",
"[",
"event_name",
"]",
"[",
"txn_hash",
"]",
"+=",
"1",
"event_entry",
"=",
"self",
".",
"event_waiting",
"[",
"event_name",
"]",
"[",
"txn_hash",
"]",
"if",
"event_entry",
".",
"count",
"==",
"self",
".",
"event_count",
"[",
"event_name",
"]",
"[",
"txn_hash",
"]",
":",
"self",
".",
"event_waiting",
"[",
"event_name",
"]",
".",
"pop",
"(",
"txn_hash",
")",
"# Call callback function with event",
"if",
"event_entry",
".",
"callback",
":",
"event_entry",
".",
"callback",
"(",
"event",
")"
] | 39.666667 | 0.004104 |
def to_bb(YY, y="deprecated"):
"""Convert mask YY to a bounding box, assumes 0 as background nonzero object"""
cols,rows = np.nonzero(YY)
if len(cols)==0: return np.zeros(4, dtype=np.float32)
top_row = np.min(rows)
left_col = np.min(cols)
bottom_row = np.max(rows)
right_col = np.max(cols)
return np.array([left_col, top_row, right_col, bottom_row], dtype=np.float32) | [
"def",
"to_bb",
"(",
"YY",
",",
"y",
"=",
"\"deprecated\"",
")",
":",
"cols",
",",
"rows",
"=",
"np",
".",
"nonzero",
"(",
"YY",
")",
"if",
"len",
"(",
"cols",
")",
"==",
"0",
":",
"return",
"np",
".",
"zeros",
"(",
"4",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"top_row",
"=",
"np",
".",
"min",
"(",
"rows",
")",
"left_col",
"=",
"np",
".",
"min",
"(",
"cols",
")",
"bottom_row",
"=",
"np",
".",
"max",
"(",
"rows",
")",
"right_col",
"=",
"np",
".",
"max",
"(",
"cols",
")",
"return",
"np",
".",
"array",
"(",
"[",
"left_col",
",",
"top_row",
",",
"right_col",
",",
"bottom_row",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
")"
] | 43.444444 | 0.015038 |
def output_barplot(df, figformat, path, title=None, palette=None):
"""Create barplots based on number of reads and total sum of nucleotides sequenced."""
logging.info("Nanoplotter: Creating barplots for number of reads and total throughput.")
read_count = Plot(path=path + "NanoComp_number_of_reads." + figformat,
title="Comparing number of reads")
ax = sns.countplot(x="dataset",
data=df,
palette=palette)
ax.set(ylabel='Number of reads',
title=title or read_count.title)
plt.xticks(rotation=30, ha='center')
read_count.fig = ax.get_figure()
read_count.save(format=figformat)
plt.close("all")
throughput_bases = Plot(path=path + "NanoComp_total_throughput." + figformat,
title="Comparing throughput in gigabases")
if "aligned_lengths" in df:
throughput = df.groupby('dataset')['aligned_lengths'].sum()
ylabel = 'Total gigabase aligned'
else:
throughput = df.groupby('dataset')['lengths'].sum()
ylabel = 'Total gigabase sequenced'
ax = sns.barplot(x=list(throughput.index),
y=throughput / 1e9,
palette=palette,
order=df["dataset"].unique())
ax.set(ylabel=ylabel,
title=title or throughput_bases.title)
plt.xticks(rotation=30, ha='center')
throughput_bases.fig = ax.get_figure()
throughput_bases.save(format=figformat)
plt.close("all")
return read_count, throughput_bases | [
"def",
"output_barplot",
"(",
"df",
",",
"figformat",
",",
"path",
",",
"title",
"=",
"None",
",",
"palette",
"=",
"None",
")",
":",
"logging",
".",
"info",
"(",
"\"Nanoplotter: Creating barplots for number of reads and total throughput.\"",
")",
"read_count",
"=",
"Plot",
"(",
"path",
"=",
"path",
"+",
"\"NanoComp_number_of_reads.\"",
"+",
"figformat",
",",
"title",
"=",
"\"Comparing number of reads\"",
")",
"ax",
"=",
"sns",
".",
"countplot",
"(",
"x",
"=",
"\"dataset\"",
",",
"data",
"=",
"df",
",",
"palette",
"=",
"palette",
")",
"ax",
".",
"set",
"(",
"ylabel",
"=",
"'Number of reads'",
",",
"title",
"=",
"title",
"or",
"read_count",
".",
"title",
")",
"plt",
".",
"xticks",
"(",
"rotation",
"=",
"30",
",",
"ha",
"=",
"'center'",
")",
"read_count",
".",
"fig",
"=",
"ax",
".",
"get_figure",
"(",
")",
"read_count",
".",
"save",
"(",
"format",
"=",
"figformat",
")",
"plt",
".",
"close",
"(",
"\"all\"",
")",
"throughput_bases",
"=",
"Plot",
"(",
"path",
"=",
"path",
"+",
"\"NanoComp_total_throughput.\"",
"+",
"figformat",
",",
"title",
"=",
"\"Comparing throughput in gigabases\"",
")",
"if",
"\"aligned_lengths\"",
"in",
"df",
":",
"throughput",
"=",
"df",
".",
"groupby",
"(",
"'dataset'",
")",
"[",
"'aligned_lengths'",
"]",
".",
"sum",
"(",
")",
"ylabel",
"=",
"'Total gigabase aligned'",
"else",
":",
"throughput",
"=",
"df",
".",
"groupby",
"(",
"'dataset'",
")",
"[",
"'lengths'",
"]",
".",
"sum",
"(",
")",
"ylabel",
"=",
"'Total gigabase sequenced'",
"ax",
"=",
"sns",
".",
"barplot",
"(",
"x",
"=",
"list",
"(",
"throughput",
".",
"index",
")",
",",
"y",
"=",
"throughput",
"/",
"1e9",
",",
"palette",
"=",
"palette",
",",
"order",
"=",
"df",
"[",
"\"dataset\"",
"]",
".",
"unique",
"(",
")",
")",
"ax",
".",
"set",
"(",
"ylabel",
"=",
"ylabel",
",",
"title",
"=",
"title",
"or",
"throughput_bases",
".",
"title",
")",
"plt",
".",
"xticks",
"(",
"rotation",
"=",
"30",
",",
"ha",
"=",
"'center'",
")",
"throughput_bases",
".",
"fig",
"=",
"ax",
".",
"get_figure",
"(",
")",
"throughput_bases",
".",
"save",
"(",
"format",
"=",
"figformat",
")",
"plt",
".",
"close",
"(",
"\"all\"",
")",
"return",
"read_count",
",",
"throughput_bases"
] | 44.911765 | 0.002564 |
def load_frequencyseries(path, group=None):
"""
Load a FrequencySeries from a .hdf, .txt or .npy file. The
default data types will be double precision floating point.
Parameters
----------
path: string
source file path. Must end with either .npy or .txt.
group: string
Additional name for internal storage use. Ex. hdf storage uses
this as the key value.
Raises
------
ValueError
If path does not end in .npy or .txt.
"""
ext = _os.path.splitext(path)[1]
if ext == '.npy':
data = _numpy.load(path)
elif ext == '.txt':
data = _numpy.loadtxt(path)
elif ext == '.hdf':
key = 'data' if group is None else group
f = h5py.File(path, 'r')
data = f[key][:]
series = FrequencySeries(data, delta_f=f[key].attrs['delta_f'],
epoch=f[key].attrs['epoch'])
f.close()
return series
else:
raise ValueError('Path must end with .npy, .hdf, or .txt')
if data.ndim == 2:
delta_f = (data[-1][0] - data[0][0]) / (len(data)-1)
epoch = _lal.LIGOTimeGPS(data[0][0])
return FrequencySeries(data[:,1], delta_f=delta_f, epoch=epoch)
elif data.ndim == 3:
delta_f = (data[-1][0] - data[0][0]) / (len(data)-1)
epoch = _lal.LIGOTimeGPS(data[0][0])
return FrequencySeries(data[:,1] + 1j*data[:,2], delta_f=delta_f,
epoch=epoch)
else:
raise ValueError('File has %s dimensions, cannot convert to Array, \
must be 2 (real) or 3 (complex)' % data.ndim) | [
"def",
"load_frequencyseries",
"(",
"path",
",",
"group",
"=",
"None",
")",
":",
"ext",
"=",
"_os",
".",
"path",
".",
"splitext",
"(",
"path",
")",
"[",
"1",
"]",
"if",
"ext",
"==",
"'.npy'",
":",
"data",
"=",
"_numpy",
".",
"load",
"(",
"path",
")",
"elif",
"ext",
"==",
"'.txt'",
":",
"data",
"=",
"_numpy",
".",
"loadtxt",
"(",
"path",
")",
"elif",
"ext",
"==",
"'.hdf'",
":",
"key",
"=",
"'data'",
"if",
"group",
"is",
"None",
"else",
"group",
"f",
"=",
"h5py",
".",
"File",
"(",
"path",
",",
"'r'",
")",
"data",
"=",
"f",
"[",
"key",
"]",
"[",
":",
"]",
"series",
"=",
"FrequencySeries",
"(",
"data",
",",
"delta_f",
"=",
"f",
"[",
"key",
"]",
".",
"attrs",
"[",
"'delta_f'",
"]",
",",
"epoch",
"=",
"f",
"[",
"key",
"]",
".",
"attrs",
"[",
"'epoch'",
"]",
")",
"f",
".",
"close",
"(",
")",
"return",
"series",
"else",
":",
"raise",
"ValueError",
"(",
"'Path must end with .npy, .hdf, or .txt'",
")",
"if",
"data",
".",
"ndim",
"==",
"2",
":",
"delta_f",
"=",
"(",
"data",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"-",
"data",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"/",
"(",
"len",
"(",
"data",
")",
"-",
"1",
")",
"epoch",
"=",
"_lal",
".",
"LIGOTimeGPS",
"(",
"data",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"return",
"FrequencySeries",
"(",
"data",
"[",
":",
",",
"1",
"]",
",",
"delta_f",
"=",
"delta_f",
",",
"epoch",
"=",
"epoch",
")",
"elif",
"data",
".",
"ndim",
"==",
"3",
":",
"delta_f",
"=",
"(",
"data",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"-",
"data",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"/",
"(",
"len",
"(",
"data",
")",
"-",
"1",
")",
"epoch",
"=",
"_lal",
".",
"LIGOTimeGPS",
"(",
"data",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"return",
"FrequencySeries",
"(",
"data",
"[",
":",
",",
"1",
"]",
"+",
"1j",
"*",
"data",
"[",
":",
",",
"2",
"]",
",",
"delta_f",
"=",
"delta_f",
",",
"epoch",
"=",
"epoch",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'File has %s dimensions, cannot convert to Array, \\\n must be 2 (real) or 3 (complex)'",
"%",
"data",
".",
"ndim",
")"
] | 34.510638 | 0.005995 |
def _setup_language_variables(self):
"""Check for availability of corpora for a language.
TODO: Make the selection of available languages dynamic from dirs
within ``corpora`` which contain a ``corpora.py`` file.
"""
if self.language not in AVAILABLE_LANGUAGES:
# If no official repos, check if user has custom
user_defined_corpora = self._check_distributed_corpora_file()
if user_defined_corpora:
return user_defined_corpora
else:
msg = 'Corpora not available (either core or user-defined) for the "{}" language.'.format(self.language)
logger.info(msg)
raise CorpusImportError(msg)
else:
user_defined_corpora = self._check_distributed_corpora_file()
return user_defined_corpora | [
"def",
"_setup_language_variables",
"(",
"self",
")",
":",
"if",
"self",
".",
"language",
"not",
"in",
"AVAILABLE_LANGUAGES",
":",
"# If no official repos, check if user has custom",
"user_defined_corpora",
"=",
"self",
".",
"_check_distributed_corpora_file",
"(",
")",
"if",
"user_defined_corpora",
":",
"return",
"user_defined_corpora",
"else",
":",
"msg",
"=",
"'Corpora not available (either core or user-defined) for the \"{}\" language.'",
".",
"format",
"(",
"self",
".",
"language",
")",
"logger",
".",
"info",
"(",
"msg",
")",
"raise",
"CorpusImportError",
"(",
"msg",
")",
"else",
":",
"user_defined_corpora",
"=",
"self",
".",
"_check_distributed_corpora_file",
"(",
")",
"return",
"user_defined_corpora"
] | 49.705882 | 0.003484 |
def calcu0(self,E,Lz):
"""
NAME:
calcu0
PURPOSE:
calculate the minimum of the u potential
INPUT:
E - energy
Lz - angular momentum
OUTPUT:
u0
HISTORY:
2012-11-29 - Written - Bovy (IAS)
"""
logu0= optimize.brent(_u0Eq,
args=(self._delta,self._pot,
E,Lz**2./2.))
return numpy.exp(logu0) | [
"def",
"calcu0",
"(",
"self",
",",
"E",
",",
"Lz",
")",
":",
"logu0",
"=",
"optimize",
".",
"brent",
"(",
"_u0Eq",
",",
"args",
"=",
"(",
"self",
".",
"_delta",
",",
"self",
".",
"_pot",
",",
"E",
",",
"Lz",
"**",
"2.",
"/",
"2.",
")",
")",
"return",
"numpy",
".",
"exp",
"(",
"logu0",
")"
] | 27.611111 | 0.015564 |