code stringlengths 70 11.9k | docstring stringlengths 4 7.08k | text stringlengths 128 15k |
|---|---|---|
def delete_roles(apps, schema_editor):
SystemWideEnterpriseRole = apps.get_model(, )
SystemWideEnterpriseRole.objects.filter(
name__in=[ENTERPRISE_OPERATOR_ROLE]
).delete() | Delete the enterprise roles. | ### Input:
Delete the enterprise roles.
### Response:
def delete_roles(apps, schema_editor):
SystemWideEnterpriseRole = apps.get_model(, )
SystemWideEnterpriseRole.objects.filter(
name__in=[ENTERPRISE_OPERATOR_ROLE]
).delete() |
def has_rotational(self):
for member in self.get_members():
if member.physical_drives.has_rotational:
return True
return False | Return true if any of the drive under ArrayControllers is ssd | ### Input:
Return true if any of the drive under ArrayControllers is ssd
### Response:
def has_rotational(self):
for member in self.get_members():
if member.physical_drives.has_rotational:
return True
return False |
def get_location(self, filename, position):
f = self.get_file(filename)
if isinstance(position, int):
return SourceLocation.from_offset(self, f, position)
return SourceLocation.from_position(self, f, position[0], position[1]) | Obtain a SourceLocation for a file in this translation unit.
The position can be specified by passing:
- Integer file offset. Initial file offset is 0.
- 2-tuple of (line number, column number). Initial file position is
(0, 0) | ### Input:
Obtain a SourceLocation for a file in this translation unit.
The position can be specified by passing:
- Integer file offset. Initial file offset is 0.
- 2-tuple of (line number, column number). Initial file position is
(0, 0)
### Response:
def get_location(self, filename, position):
f = self.get_file(filename)
if isinstance(position, int):
return SourceLocation.from_offset(self, f, position)
return SourceLocation.from_position(self, f, position[0], position[1]) |
def pushbullet(body, apikey, device, title="JCVI: Job Monitor", type="note"):
import base64
headers = {}
auth = base64.encodestring("{0}:".format(apikey)).strip()
headers[] = "Basic {0}".format(auth)
headers[] = "application/x-www-form-urlencoded"
conn = HTTPSConnection("api.pushbullet.com".format(apikey))
conn.request("POST", "/api/pushes",
urlencode({
"iden": device,
"type": "note",
"title": title,
"body": body,
}), headers)
conn.getresponse() | pushbullet.com API
<https://www.pushbullet.com/api> | ### Input:
pushbullet.com API
<https://www.pushbullet.com/api>
### Response:
def pushbullet(body, apikey, device, title="JCVI: Job Monitor", type="note"):
import base64
headers = {}
auth = base64.encodestring("{0}:".format(apikey)).strip()
headers[] = "Basic {0}".format(auth)
headers[] = "application/x-www-form-urlencoded"
conn = HTTPSConnection("api.pushbullet.com".format(apikey))
conn.request("POST", "/api/pushes",
urlencode({
"iden": device,
"type": "note",
"title": title,
"body": body,
}), headers)
conn.getresponse() |
def matches(self, address, name=None):
if self.controller:
return address == 8
return self.address == address | Check if this slot identifier matches the given tile.
Matching can happen either by address or by module name (not currently implemented).
Returns:
bool: True if there is a match, otherwise False. | ### Input:
Check if this slot identifier matches the given tile.
Matching can happen either by address or by module name (not currently implemented).
Returns:
bool: True if there is a match, otherwise False.
### Response:
def matches(self, address, name=None):
if self.controller:
return address == 8
return self.address == address |
def getFilesystemStats(self, fs):
if self._mapFSpathDev is None:
self._initFilesystemInfo()
return self._diskStats.get(self._mapFSpathDev.get(fs)) | Returns I/O stats for filesystem.
@param fs: Filesystem path.
@return: Dict of stats. | ### Input:
Returns I/O stats for filesystem.
@param fs: Filesystem path.
@return: Dict of stats.
### Response:
def getFilesystemStats(self, fs):
if self._mapFSpathDev is None:
self._initFilesystemInfo()
return self._diskStats.get(self._mapFSpathDev.get(fs)) |
def _object_table(self, object_id):
if not isinstance(object_id, ray.ObjectID):
object_id = ray.ObjectID(hex_to_binary(object_id))
message = self._execute_command(object_id, "RAY.TABLE_LOOKUP",
ray.gcs_utils.TablePrefix.OBJECT, "",
object_id.binary())
if message is None:
return {}
gcs_entry = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(
message, 0)
assert gcs_entry.EntriesLength() > 0
entry = ray.gcs_utils.ObjectTableData.GetRootAsObjectTableData(
gcs_entry.Entries(0), 0)
object_info = {
"DataSize": entry.ObjectSize(),
"Manager": entry.Manager(),
}
return object_info | Fetch and parse the object table information for a single object ID.
Args:
object_id: An object ID to get information about.
Returns:
A dictionary with information about the object ID in question. | ### Input:
Fetch and parse the object table information for a single object ID.
Args:
object_id: An object ID to get information about.
Returns:
A dictionary with information about the object ID in question.
### Response:
def _object_table(self, object_id):
if not isinstance(object_id, ray.ObjectID):
object_id = ray.ObjectID(hex_to_binary(object_id))
message = self._execute_command(object_id, "RAY.TABLE_LOOKUP",
ray.gcs_utils.TablePrefix.OBJECT, "",
object_id.binary())
if message is None:
return {}
gcs_entry = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(
message, 0)
assert gcs_entry.EntriesLength() > 0
entry = ray.gcs_utils.ObjectTableData.GetRootAsObjectTableData(
gcs_entry.Entries(0), 0)
object_info = {
"DataSize": entry.ObjectSize(),
"Manager": entry.Manager(),
}
return object_info |
def get_current_version(self, increment=None):
ver = (
self.get_tagged_version()
or str(self.get_next_version(increment)) +
)
return str(ver) | Return as a string the version of the current state of the
repository -- a tagged version, if present, or the next version
based on prior tagged releases. | ### Input:
Return as a string the version of the current state of the
repository -- a tagged version, if present, or the next version
based on prior tagged releases.
### Response:
def get_current_version(self, increment=None):
ver = (
self.get_tagged_version()
or str(self.get_next_version(increment)) +
)
return str(ver) |
def timestamp_localize(value):
if isinstance(value, datetime.datetime):
if not value.tzinfo:
value = pytz.UTC.localize(value)
else:
value = value.astimezone(pytz.UTC)
value = calendar.timegm(value.timetuple()) + \
value.microsecond / 1e6
return value | Save timestamp as utc
:param value: Timestamp (in UTC or with tz_info)
:type value: float | datetime.datetime
:return: Localized timestamp
:rtype: float | ### Input:
Save timestamp as utc
:param value: Timestamp (in UTC or with tz_info)
:type value: float | datetime.datetime
:return: Localized timestamp
:rtype: float
### Response:
def timestamp_localize(value):
if isinstance(value, datetime.datetime):
if not value.tzinfo:
value = pytz.UTC.localize(value)
else:
value = value.astimezone(pytz.UTC)
value = calendar.timegm(value.timetuple()) + \
value.microsecond / 1e6
return value |
def cipher (self, xl, xr, direction):
if direction == self.ENCRYPT:
for i in range (16):
xl = xl ^ self.p_boxes[i]
xr = self.__round_func (xl) ^ xr
xl, xr = xr, xl
xl, xr = xr, xl
xr = xr ^ self.p_boxes[16]
xl = xl ^ self.p_boxes[17]
else:
for i in range (17, 1, -1):
xl = xl ^ self.p_boxes[i]
xr = self.__round_func (xl) ^ xr
xl, xr = xr, xl
xl, xr = xr, xl
xr = xr ^ self.p_boxes[1]
xl = xl ^ self.p_boxes[0]
return xl, xr | Encryption primitive | ### Input:
Encryption primitive
### Response:
def cipher (self, xl, xr, direction):
if direction == self.ENCRYPT:
for i in range (16):
xl = xl ^ self.p_boxes[i]
xr = self.__round_func (xl) ^ xr
xl, xr = xr, xl
xl, xr = xr, xl
xr = xr ^ self.p_boxes[16]
xl = xl ^ self.p_boxes[17]
else:
for i in range (17, 1, -1):
xl = xl ^ self.p_boxes[i]
xr = self.__round_func (xl) ^ xr
xl, xr = xr, xl
xl, xr = xr, xl
xr = xr ^ self.p_boxes[1]
xl = xl ^ self.p_boxes[0]
return xl, xr |
def encrypt(passwd):
m = sha1()
salt = hexlify(os.urandom(salt_len))
m.update(unicode2bytes(passwd) + salt)
crypted = bytes2unicode(salt) + m.hexdigest()
return crypted | Encrypts the incoming password after adding some salt to store
it in the database.
@param passwd: password portion of user credentials
@type passwd: string
@returns: encrypted/salted string | ### Input:
Encrypts the incoming password after adding some salt to store
it in the database.
@param passwd: password portion of user credentials
@type passwd: string
@returns: encrypted/salted string
### Response:
def encrypt(passwd):
m = sha1()
salt = hexlify(os.urandom(salt_len))
m.update(unicode2bytes(passwd) + salt)
crypted = bytes2unicode(salt) + m.hexdigest()
return crypted |
def migrate_connections(new_data_path: str):
dest_connections = os.path.join(
new_data_path, , , )
os.makedirs(dest_connections, exist_ok=True)
with mount_state_partition() as state_path:
src_connections = os.path.join(
state_path, , , ,
)
LOG.info(f"migrate_connections: moving nmcli connections from"
f" {src_connections} to {dest_connections}")
found = migrate_system_connections(src_connections, dest_connections)
if found:
return
LOG.info(
"migrate_connections: No connections found in state, checking boot")
with mount_boot_partition() as boot_path:
src_connections = os.path.join(
boot_path, )
LOG.info(f"migrate_connections: moving nmcli connections from"
f" {src_connections} to {dest_connections}")
found = migrate_system_connections(src_connections, dest_connections)
if not found:
LOG.info("migrate_connections: No connections found in boot") | Migrate wifi connection files to new locations and patch them
:param new_data_path: The path to where the new data partition is mounted | ### Input:
Migrate wifi connection files to new locations and patch them
:param new_data_path: The path to where the new data partition is mounted
### Response:
def migrate_connections(new_data_path: str):
dest_connections = os.path.join(
new_data_path, , , )
os.makedirs(dest_connections, exist_ok=True)
with mount_state_partition() as state_path:
src_connections = os.path.join(
state_path, , , ,
)
LOG.info(f"migrate_connections: moving nmcli connections from"
f" {src_connections} to {dest_connections}")
found = migrate_system_connections(src_connections, dest_connections)
if found:
return
LOG.info(
"migrate_connections: No connections found in state, checking boot")
with mount_boot_partition() as boot_path:
src_connections = os.path.join(
boot_path, )
LOG.info(f"migrate_connections: moving nmcli connections from"
f" {src_connections} to {dest_connections}")
found = migrate_system_connections(src_connections, dest_connections)
if not found:
LOG.info("migrate_connections: No connections found in boot") |
def merge_ticket(self, ticket_id, into_id):
msg = self.__request(.format(str(ticket_id),
str(into_id)))
state = msg.split()[2]
return self.RE_PATTERNS[].match(state) is not None | Merge ticket into another (undocumented API feature).
:param ticket_id: ID of ticket to be merged
:param into: ID of destination ticket
:returns: ``True``
Operation was successful
``False``
Either origin or destination ticket does not
exist or user does not have ModifyTicket permission. | ### Input:
Merge ticket into another (undocumented API feature).
:param ticket_id: ID of ticket to be merged
:param into: ID of destination ticket
:returns: ``True``
Operation was successful
``False``
Either origin or destination ticket does not
exist or user does not have ModifyTicket permission.
### Response:
def merge_ticket(self, ticket_id, into_id):
msg = self.__request(.format(str(ticket_id),
str(into_id)))
state = msg.split()[2]
return self.RE_PATTERNS[].match(state) is not None |
def f(self, y):
d = self.d
mpsi = self.psi
z = d * y.copy()
for i in range(len(mpsi)):
a, b, c = mpsi[i]
z += a * np.tanh(b * (y + c))
return z | Transform y with f using parameter vector psi
psi = [[a,b,c]]
:math:`f = (y * d) + \\sum_{terms} a * tanh(b *(y + c))` | ### Input:
Transform y with f using parameter vector psi
psi = [[a,b,c]]
:math:`f = (y * d) + \\sum_{terms} a * tanh(b *(y + c))`
### Response:
def f(self, y):
d = self.d
mpsi = self.psi
z = d * y.copy()
for i in range(len(mpsi)):
a, b, c = mpsi[i]
z += a * np.tanh(b * (y + c))
return z |
def setiddname(cls, iddname, testing=False):
if cls.iddname == None:
cls.iddname = iddname
cls.idd_info = None
cls.block = None
elif cls.iddname == iddname:
pass
else:
if testing == False:
errortxt = "IDD file is set to: %s" % (cls.iddname,)
raise IDDAlreadySetError(errortxt) | Set the path to the EnergyPlus IDD for the version of EnergyPlus which
is to be used by eppy.
Parameters
----------
iddname : str
Path to the IDD file.
testing : bool
Flag to use if running tests since we may want to ignore the
`IDDAlreadySetError`.
Raises
------
IDDAlreadySetError | ### Input:
Set the path to the EnergyPlus IDD for the version of EnergyPlus which
is to be used by eppy.
Parameters
----------
iddname : str
Path to the IDD file.
testing : bool
Flag to use if running tests since we may want to ignore the
`IDDAlreadySetError`.
Raises
------
IDDAlreadySetError
### Response:
def setiddname(cls, iddname, testing=False):
if cls.iddname == None:
cls.iddname = iddname
cls.idd_info = None
cls.block = None
elif cls.iddname == iddname:
pass
else:
if testing == False:
errortxt = "IDD file is set to: %s" % (cls.iddname,)
raise IDDAlreadySetError(errortxt) |
def update_agent_db_refs(self, agent, agent_text, do_rename=True):
map_db_refs = deepcopy(self.gm.get(agent_text))
self.standardize_agent_db_refs(agent, map_db_refs, do_rename) | Update db_refs of agent using the grounding map
If the grounding map is missing one of the HGNC symbol or Uniprot ID,
attempts to reconstruct one from the other.
Parameters
----------
agent : :py:class:`indra.statements.Agent`
The agent whose db_refs will be updated
agent_text : str
The agent_text to find a grounding for in the grounding map
dictionary. Typically this will be agent.db_refs['TEXT'] but
there may be situations where a different value should be used.
do_rename: Optional[bool]
If True, the Agent name is updated based on the mapped grounding.
If do_rename is True the priority for setting the name is
FamPlex ID, HGNC symbol, then the gene name
from Uniprot. Default: True
Raises
------
ValueError
If the the grounding map contains and HGNC symbol for
agent_text but no HGNC ID can be found for it.
ValueError
If the grounding map contains both an HGNC symbol and a
Uniprot ID, but the HGNC symbol and the gene name associated with
the gene in Uniprot do not match or if there is no associated gene
name in Uniprot. | ### Input:
Update db_refs of agent using the grounding map
If the grounding map is missing one of the HGNC symbol or Uniprot ID,
attempts to reconstruct one from the other.
Parameters
----------
agent : :py:class:`indra.statements.Agent`
The agent whose db_refs will be updated
agent_text : str
The agent_text to find a grounding for in the grounding map
dictionary. Typically this will be agent.db_refs['TEXT'] but
there may be situations where a different value should be used.
do_rename: Optional[bool]
If True, the Agent name is updated based on the mapped grounding.
If do_rename is True the priority for setting the name is
FamPlex ID, HGNC symbol, then the gene name
from Uniprot. Default: True
Raises
------
ValueError
If the the grounding map contains and HGNC symbol for
agent_text but no HGNC ID can be found for it.
ValueError
If the grounding map contains both an HGNC symbol and a
Uniprot ID, but the HGNC symbol and the gene name associated with
the gene in Uniprot do not match or if there is no associated gene
name in Uniprot.
### Response:
def update_agent_db_refs(self, agent, agent_text, do_rename=True):
map_db_refs = deepcopy(self.gm.get(agent_text))
self.standardize_agent_db_refs(agent, map_db_refs, do_rename) |
def get_value(self, field, default=None):
section, key = field.split()
return self.get_section(section).get(key, default) | Get an entry from within a section, using a '/' delimiter | ### Input:
Get an entry from within a section, using a '/' delimiter
### Response:
def get_value(self, field, default=None):
section, key = field.split()
return self.get_section(section).get(key, default) |
def _evaluate(self):
if self._elements:
for element in self._elements:
yield element
else:
for page in itertools.count():
raw_elements = self._retrieve_raw_elements(page)
for raw_element in raw_elements:
element = self._parse_raw_element(raw_element)
self._elements.append(element)
yield element
if self.__limit and len(self._elements) >= self.__limit:
break
if any([
len(raw_elements) < self.page_size,
(self.__limit and len(self._elements) >= self.__limit)
]):
break | Lazily retrieve and paginate report results and build Record instances from returned data | ### Input:
Lazily retrieve and paginate report results and build Record instances from returned data
### Response:
def _evaluate(self):
if self._elements:
for element in self._elements:
yield element
else:
for page in itertools.count():
raw_elements = self._retrieve_raw_elements(page)
for raw_element in raw_elements:
element = self._parse_raw_element(raw_element)
self._elements.append(element)
yield element
if self.__limit and len(self._elements) >= self.__limit:
break
if any([
len(raw_elements) < self.page_size,
(self.__limit and len(self._elements) >= self.__limit)
]):
break |
async def join_voice(guild_id: int, channel_id: int):
node = get_node(guild_id)
voice_ws = node.get_voice_ws(guild_id)
await voice_ws.voice_state(guild_id, channel_id) | Joins a voice channel by ID's.
Parameters
----------
guild_id : int
channel_id : int | ### Input:
Joins a voice channel by ID's.
Parameters
----------
guild_id : int
channel_id : int
### Response:
async def join_voice(guild_id: int, channel_id: int):
node = get_node(guild_id)
voice_ws = node.get_voice_ws(guild_id)
await voice_ws.voice_state(guild_id, channel_id) |
def dns():
if salt.utils.platform.is_windows() or in __opts__:
return {}
resolv = salt.utils.dns.parse_resolv()
for key in (, , ,
):
if key in resolv:
resolv[key] = [six.text_type(i) for i in resolv[key]]
return {: resolv} if resolv else {} | Parse the resolver configuration file
.. versionadded:: 2016.3.0 | ### Input:
Parse the resolver configuration file
.. versionadded:: 2016.3.0
### Response:
def dns():
if salt.utils.platform.is_windows() or in __opts__:
return {}
resolv = salt.utils.dns.parse_resolv()
for key in (, , ,
):
if key in resolv:
resolv[key] = [six.text_type(i) for i in resolv[key]]
return {: resolv} if resolv else {} |
def attach(self, path, encoding=):
if self.is_attached():
raise ItsdbError(.format(self.path))
try:
path = _table_filename(path)
except ItsdbError:
path = _normalize_table_path(path)
open(path, ).close()
else:
if os.stat(path).st_size > 0 and len(self._records) > 0:
raise ItsdbError(
)
self.path = path
self.encoding = encoding
if len(self._records) == 0:
self._sync_with_file() | Attach the Table to the file at *path*.
Attaching a table to a file means that only changed records
are stored in memory, which greatly reduces the memory
footprint of large profiles at some cost of
performance. Tables created from :meth:`Table.from_file()` or
from an attached :class:`TestSuite` are automatically
attached. Attaching a file does not immediately flush the
contents to disk; after attaching the table must be separately
written to commit the in-memory data.
A non-empty table will fail to attach to a non-empty file to
avoid data loss when merging the contents. In this case, you
may delete or clear the file, clear the table, or attach to
another file.
Args:
path: the path to the table file
encoding: the character encoding of the files in the testsuite | ### Input:
Attach the Table to the file at *path*.
Attaching a table to a file means that only changed records
are stored in memory, which greatly reduces the memory
footprint of large profiles at some cost of
performance. Tables created from :meth:`Table.from_file()` or
from an attached :class:`TestSuite` are automatically
attached. Attaching a file does not immediately flush the
contents to disk; after attaching the table must be separately
written to commit the in-memory data.
A non-empty table will fail to attach to a non-empty file to
avoid data loss when merging the contents. In this case, you
may delete or clear the file, clear the table, or attach to
another file.
Args:
path: the path to the table file
encoding: the character encoding of the files in the testsuite
### Response:
def attach(self, path, encoding=):
if self.is_attached():
raise ItsdbError(.format(self.path))
try:
path = _table_filename(path)
except ItsdbError:
path = _normalize_table_path(path)
open(path, ).close()
else:
if os.stat(path).st_size > 0 and len(self._records) > 0:
raise ItsdbError(
)
self.path = path
self.encoding = encoding
if len(self._records) == 0:
self._sync_with_file() |
def yaml_dump(dict_to_dump):
yaml.SafeDumper.add_representer(OrderedDict, _dict_representer)
return yaml.safe_dump(dict_to_dump, default_flow_style=False) | Dump the dictionary as a YAML document.
:param dict_to_dump: Data to be serialized as YAML
:type dict_to_dump: dict
:return: YAML document
:rtype: str | ### Input:
Dump the dictionary as a YAML document.
:param dict_to_dump: Data to be serialized as YAML
:type dict_to_dump: dict
:return: YAML document
:rtype: str
### Response:
def yaml_dump(dict_to_dump):
yaml.SafeDumper.add_representer(OrderedDict, _dict_representer)
return yaml.safe_dump(dict_to_dump, default_flow_style=False) |
def ns2naturaltimesince(ns):
timestamp = ns / 1000000000
dt = datetime.datetime.utcfromtimestamp(timestamp)
return dt2naturaltimesince(dt) | nanoseconds to a human readable representation with how old this entry is information
e.g.:
Jan. 27, 2016, 9:04 p.m. (31 minutes ago) | ### Input:
nanoseconds to a human readable representation with how old this entry is information
e.g.:
Jan. 27, 2016, 9:04 p.m. (31 minutes ago)
### Response:
def ns2naturaltimesince(ns):
timestamp = ns / 1000000000
dt = datetime.datetime.utcfromtimestamp(timestamp)
return dt2naturaltimesince(dt) |
def _f_A20(self, r_a, r_s):
return r_a/(1+np.sqrt(1 + r_a**2)) - r_s/(1+np.sqrt(1 + r_s**2)) | equation A20 in Eliasdottir (2013)
:param r_a: r/Ra
:param r_s: r/Rs
:return: | ### Input:
equation A20 in Eliasdottir (2013)
:param r_a: r/Ra
:param r_s: r/Rs
:return:
### Response:
def _f_A20(self, r_a, r_s):
return r_a/(1+np.sqrt(1 + r_a**2)) - r_s/(1+np.sqrt(1 + r_s**2)) |
def addAllele(
self, allele_id, allele_label, allele_type=None,
allele_description=None):
if allele_type is None:
allele_type = self.globaltt[]
self.model.addIndividualToGraph(
allele_id, allele_label, allele_type, allele_description)
return | Make an allele object.
If no allele_type is added, it will default to a geno:allele
:param allele_id: curie for allele (required)
:param allele_label: label for allele (required)
:param allele_type: id for an allele type (optional,
recommended SO or GENO class)
:param allele_description: a free-text description of the allele
:return: | ### Input:
Make an allele object.
If no allele_type is added, it will default to a geno:allele
:param allele_id: curie for allele (required)
:param allele_label: label for allele (required)
:param allele_type: id for an allele type (optional,
recommended SO or GENO class)
:param allele_description: a free-text description of the allele
:return:
### Response:
def addAllele(
self, allele_id, allele_label, allele_type=None,
allele_description=None):
if allele_type is None:
allele_type = self.globaltt[]
self.model.addIndividualToGraph(
allele_id, allele_label, allele_type, allele_description)
return |
def inject_func_as_property(self, func, method_name=None, class_=None):
if method_name is None:
method_name = get_funcname(func)
new_property = property(func)
setattr(self.__class__, method_name, new_property) | WARNING:
properties are more safely injected using metaclasses
References:
http://stackoverflow.com/questions/13850114/dynamically-adding-methods-with-or-without-metaclass-in-python | ### Input:
WARNING:
properties are more safely injected using metaclasses
References:
http://stackoverflow.com/questions/13850114/dynamically-adding-methods-with-or-without-metaclass-in-python
### Response:
def inject_func_as_property(self, func, method_name=None, class_=None):
if method_name is None:
method_name = get_funcname(func)
new_property = property(func)
setattr(self.__class__, method_name, new_property) |
def create_import_request(self, import_request, project, repository_id):
route_values = {}
if project is not None:
route_values[] = self._serialize.url(, project, )
if repository_id is not None:
route_values[] = self._serialize.url(, repository_id, )
content = self._serialize.body(import_request, )
response = self._send(http_method=,
location_id=,
version=,
route_values=route_values,
content=content)
return self._deserialize(, response) | CreateImportRequest.
[Preview API] Create an import request.
:param :class:`<GitImportRequest> <azure.devops.v5_0.git.models.GitImportRequest>` import_request: The import request to create.
:param str project: Project ID or project name
:param str repository_id: The name or ID of the repository.
:rtype: :class:`<GitImportRequest> <azure.devops.v5_0.git.models.GitImportRequest>` | ### Input:
CreateImportRequest.
[Preview API] Create an import request.
:param :class:`<GitImportRequest> <azure.devops.v5_0.git.models.GitImportRequest>` import_request: The import request to create.
:param str project: Project ID or project name
:param str repository_id: The name or ID of the repository.
:rtype: :class:`<GitImportRequest> <azure.devops.v5_0.git.models.GitImportRequest>`
### Response:
def create_import_request(self, import_request, project, repository_id):
route_values = {}
if project is not None:
route_values[] = self._serialize.url(, project, )
if repository_id is not None:
route_values[] = self._serialize.url(, repository_id, )
content = self._serialize.body(import_request, )
response = self._send(http_method=,
location_id=,
version=,
route_values=route_values,
content=content)
return self._deserialize(, response) |
def set_param(self, section, param, value):
if section not in self.conf or param not in self.conf[section]:
logger.error(, section, param)
else:
self.conf[section][param] = value | Change a param in the config | ### Input:
Change a param in the config
### Response:
def set_param(self, section, param, value):
if section not in self.conf or param not in self.conf[section]:
logger.error(, section, param)
else:
self.conf[section][param] = value |
def merge(cls, components):
action = cls.EXTEND
val = {}
for component in components:
if component.action is cls.REPLACE:
val = component.val
action = cls.REPLACE
elif component.action is cls.EXTEND:
val.update(component.val)
else:
raise ParseError(.format(component.action))
return cls(action, val) | Merges components into a single component, applying their actions appropriately.
This operation is associative: M(M(a, b), c) == M(a, M(b, c)) == M(a, b, c).
:param list components: an iterable of instances of DictValueComponent.
:return: An instance representing the result of merging the components.
:rtype: `DictValueComponent` | ### Input:
Merges components into a single component, applying their actions appropriately.
This operation is associative: M(M(a, b), c) == M(a, M(b, c)) == M(a, b, c).
:param list components: an iterable of instances of DictValueComponent.
:return: An instance representing the result of merging the components.
:rtype: `DictValueComponent`
### Response:
def merge(cls, components):
action = cls.EXTEND
val = {}
for component in components:
if component.action is cls.REPLACE:
val = component.val
action = cls.REPLACE
elif component.action is cls.EXTEND:
val.update(component.val)
else:
raise ParseError(.format(component.action))
return cls(action, val) |
def txt_read_in(self):
data = ascii.read(self.WORKING_DIRECTORY + + self.file_name)
num_x_pts = len(np.unique(data[self.x_column_label]))
num_y_pts = len(np.unique(data[self.y_column_label]))
self.xvals = np.reshape(np.asarray(data[self.x_column_label]), (num_y_pts, num_x_pts))
self.yvals = np.reshape(np.asarray(data[self.y_column_label]), (num_y_pts, num_x_pts))
self.zvals = np.reshape(np.asarray(data[self.z_column_label]), (num_y_pts, num_x_pts))
return | Read in txt files.
Method for reading in text or csv files. This uses ascii class from astropy.io
for flexible input. It is slower than numpy, but has greater flexibility with less input. | ### Input:
Read in txt files.
Method for reading in text or csv files. This uses ascii class from astropy.io
for flexible input. It is slower than numpy, but has greater flexibility with less input.
### Response:
def txt_read_in(self):
data = ascii.read(self.WORKING_DIRECTORY + + self.file_name)
num_x_pts = len(np.unique(data[self.x_column_label]))
num_y_pts = len(np.unique(data[self.y_column_label]))
self.xvals = np.reshape(np.asarray(data[self.x_column_label]), (num_y_pts, num_x_pts))
self.yvals = np.reshape(np.asarray(data[self.y_column_label]), (num_y_pts, num_x_pts))
self.zvals = np.reshape(np.asarray(data[self.z_column_label]), (num_y_pts, num_x_pts))
return |
def upload():
env=os.environ.copy()
print(env)
env[]= "./pynt"
print(env)
pipe=subprocess.Popen([, , ,], env=env)
pipe.wait() | Uploads to PyPI | ### Input:
Uploads to PyPI
### Response:
def upload():
env=os.environ.copy()
print(env)
env[]= "./pynt"
print(env)
pipe=subprocess.Popen([, , ,], env=env)
pipe.wait() |
def header(*msg, level=, separator=" ", print_out=print):
out_string = separator.join(str(x) for x in msg)
if level == :
box_len = 80
print_out( + * (box_len + 2))
print_out("| %s" % out_string)
print_out( + * (box_len + 2))
elif level == :
print_out("")
print_out(out_string)
print_out( * 60)
elif level == :
print_out( % out_string)
print_out( + ( * 40))
else:
print_out( % out_string)
print_out( + ( * 20)) | Print header block in text mode | ### Input:
Print header block in text mode
### Response:
def header(*msg, level=, separator=" ", print_out=print):
out_string = separator.join(str(x) for x in msg)
if level == :
box_len = 80
print_out( + * (box_len + 2))
print_out("| %s" % out_string)
print_out( + * (box_len + 2))
elif level == :
print_out("")
print_out(out_string)
print_out( * 60)
elif level == :
print_out( % out_string)
print_out( + ( * 40))
else:
print_out( % out_string)
print_out( + ( * 20)) |
def cleanup(self):
self.exit_config_mode()
self.write_channel("logout" + self.RETURN)
count = 0
while count <= 5:
time.sleep(0.5)
output = self.read_channel()
if "Do you want to log out" in output:
self._session_log_fin = True
self.write_channel("y" + self.RETURN)
elif "Do you want to save the current" in output:
self._session_log_fin = True
self.write_channel("n" + self.RETURN)
try:
self.write_channel(self.RETURN)
except socket.error:
break
count += 1 | Gracefully exit the SSH session. | ### Input:
Gracefully exit the SSH session.
### Response:
def cleanup(self):
self.exit_config_mode()
self.write_channel("logout" + self.RETURN)
count = 0
while count <= 5:
time.sleep(0.5)
output = self.read_channel()
if "Do you want to log out" in output:
self._session_log_fin = True
self.write_channel("y" + self.RETURN)
elif "Do you want to save the current" in output:
self._session_log_fin = True
self.write_channel("n" + self.RETURN)
try:
self.write_channel(self.RETURN)
except socket.error:
break
count += 1 |
def return_hdr(self):
subj_id = self._header[] + + self._header[]
chan_name = [ch[] for ch in self._header[]]
return subj_id, self._header[], self._header[], chan_name, self._n_smp, self._header | Return the header for further use.
Returns
-------
subj_id : str
subject identification code
start_time : datetime
start time of the dataset
s_freq : float
sampling frequency
chan_name : list of str
list of all the channels
n_samples : int
number of samples in the dataset
orig : dict
additional information taken directly from the header | ### Input:
Return the header for further use.
Returns
-------
subj_id : str
subject identification code
start_time : datetime
start time of the dataset
s_freq : float
sampling frequency
chan_name : list of str
list of all the channels
n_samples : int
number of samples in the dataset
orig : dict
additional information taken directly from the header
### Response:
def return_hdr(self):
subj_id = self._header[] + + self._header[]
chan_name = [ch[] for ch in self._header[]]
return subj_id, self._header[], self._header[], chan_name, self._n_smp, self._header |
def generate(pseudo_ast, language):
pypythonrbrubyjavascriptjscscsharpgocpp
if isinstance(pseudo_ast, dict):
pseudo_ast = pseudo.loader.convert_to_syntax_tree(pseudo_ast)
translated_ast = API_TRANSLATORS[language](pseudo_ast).api_translate()
return GENERATORS[language]().generate(translated_ast) | generate output code in `language`
`pseudo_ast` can be a plain `dict` with ast data or
it can use the internal `pseudo` `Node(type, **fields)` format
if you want to play with it, you can use `generate_main` which
expects just a dict node / a list of dict nodes and a language
`language` can be 'py', 'python', 'rb', 'ruby',
'javascript', 'js', 'cs', 'csharp', 'go' or 'cpp' | ### Input:
generate output code in `language`
`pseudo_ast` can be a plain `dict` with ast data or
it can use the internal `pseudo` `Node(type, **fields)` format
if you want to play with it, you can use `generate_main` which
expects just a dict node / a list of dict nodes and a language
`language` can be 'py', 'python', 'rb', 'ruby',
'javascript', 'js', 'cs', 'csharp', 'go' or 'cpp'
### Response:
def generate(pseudo_ast, language):
pypythonrbrubyjavascriptjscscsharpgocpp
if isinstance(pseudo_ast, dict):
pseudo_ast = pseudo.loader.convert_to_syntax_tree(pseudo_ast)
translated_ast = API_TRANSLATORS[language](pseudo_ast).api_translate()
return GENERATORS[language]().generate(translated_ast) |
def merge_split_alignments(data):
data = utils.to_single_data(data)
data = _merge_align_bams(data)
data = _merge_hla_fastq_inputs(data)
return [[data]] | Merge split BAM inputs generated by common workflow language runs. | ### Input:
Merge split BAM inputs generated by common workflow language runs.
### Response:
def merge_split_alignments(data):
data = utils.to_single_data(data)
data = _merge_align_bams(data)
data = _merge_hla_fastq_inputs(data)
return [[data]] |
def get_single(group, name, path=None):
for config, distro in iter_files_distros(path=path):
if (group in config) and (name in config[group]):
epstr = config[group][name]
with BadEntryPoint.err_to_warnings():
return EntryPoint.from_string(epstr, name, distro)
raise NoSuchEntryPoint(group, name) | Find a single entry point.
Returns an :class:`EntryPoint` object, or raises :exc:`NoSuchEntryPoint`
if no match is found. | ### Input:
Find a single entry point.
Returns an :class:`EntryPoint` object, or raises :exc:`NoSuchEntryPoint`
if no match is found.
### Response:
def get_single(group, name, path=None):
for config, distro in iter_files_distros(path=path):
if (group in config) and (name in config[group]):
epstr = config[group][name]
with BadEntryPoint.err_to_warnings():
return EntryPoint.from_string(epstr, name, distro)
raise NoSuchEntryPoint(group, name) |
def get_service(station: str) -> Service:
for prefix in PREFERRED:
if station.startswith(prefix):
return PREFERRED[prefix]
return NOAA | Returns the preferred service for a given station | ### Input:
Returns the preferred service for a given station
### Response:
def get_service(station: str) -> Service:
for prefix in PREFERRED:
if station.startswith(prefix):
return PREFERRED[prefix]
return NOAA |
def edit(request):
model = apps.get_model(request.POST["app"], request.POST["model"])
obj = model.objects.get(id=request.POST["id"])
form = get_edit_form(obj, request.POST["fields"], data=request.POST,
files=request.FILES)
if not (is_editable(obj, request) and has_site_permission(request.user)):
response = _("Permission denied")
elif form.is_valid():
form.save()
model_admin = ModelAdmin(model, admin.site)
message = model_admin.construct_change_message(request, form, None)
model_admin.log_change(request, obj, message)
response = ""
else:
response = list(form.errors.values())[0][0]
return HttpResponse(response) | Process the inline editing form. | ### Input:
Process the inline editing form.
### Response:
def edit(request):
model = apps.get_model(request.POST["app"], request.POST["model"])
obj = model.objects.get(id=request.POST["id"])
form = get_edit_form(obj, request.POST["fields"], data=request.POST,
files=request.FILES)
if not (is_editable(obj, request) and has_site_permission(request.user)):
response = _("Permission denied")
elif form.is_valid():
form.save()
model_admin = ModelAdmin(model, admin.site)
message = model_admin.construct_change_message(request, form, None)
model_admin.log_change(request, obj, message)
response = ""
else:
response = list(form.errors.values())[0][0]
return HttpResponse(response) |
def fetch_liked_datasets(self, **kwargs):
kwargs[] = True
if kwargs.get():
return self.fetch_liked_datasets_with_http_info(**kwargs)
else:
(data) = self.fetch_liked_datasets_with_http_info(**kwargs)
return data | List liked datasets
Fetch datasets that the currently authenticated user likes.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.fetch_liked_datasets(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str limit: Maximum number of items to include in a page of results.
:param str next: Token from previous result page to be used when requesting a subsequent page.
:return: PaginatedDatasetResults
If the method is called asynchronously,
returns the request thread. | ### Input:
List liked datasets
Fetch datasets that the currently authenticated user likes.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.fetch_liked_datasets(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str limit: Maximum number of items to include in a page of results.
:param str next: Token from previous result page to be used when requesting a subsequent page.
:return: PaginatedDatasetResults
If the method is called asynchronously,
returns the request thread.
### Response:
def fetch_liked_datasets(self, **kwargs):
kwargs[] = True
if kwargs.get():
return self.fetch_liked_datasets_with_http_info(**kwargs)
else:
(data) = self.fetch_liked_datasets_with_http_info(**kwargs)
return data |
def get(cls, label=, path=None):
config = super(ServerConfig, cls).get(label, path)
if hasattr(config, ) and isinstance(config.auth, list):
config.auth = tuple(config.auth)
return config | Read a server configuration from a configuration file.
This method extends :meth:`nailgun.config.BaseServerConfig.get`. Please
read up on that method before trying to understand this one.
The entity classes rely on the requests library to be a transport
mechanism. The methods provided by that library, such as ``get`` and
``post``, accept an ``auth`` argument. That argument must be a tuple:
Auth tuple to enable Basic/Digest/Custom HTTP Auth.
However, the JSON decoder does not recognize a tuple as a type, and
represents sequences of elements as a tuple. Compensate for that by
converting ``auth`` to a two element tuple if it is a two element list.
This override is done here, and not in the base class, because the base
class may be extracted out into a separate library and used in other
contexts. In those contexts, the presence of a list may not matter or
may be desirable. | ### Input:
Read a server configuration from a configuration file.
This method extends :meth:`nailgun.config.BaseServerConfig.get`. Please
read up on that method before trying to understand this one.
The entity classes rely on the requests library to be a transport
mechanism. The methods provided by that library, such as ``get`` and
``post``, accept an ``auth`` argument. That argument must be a tuple:
Auth tuple to enable Basic/Digest/Custom HTTP Auth.
However, the JSON decoder does not recognize a tuple as a type, and
represents sequences of elements as a tuple. Compensate for that by
converting ``auth`` to a two element tuple if it is a two element list.
This override is done here, and not in the base class, because the base
class may be extracted out into a separate library and used in other
contexts. In those contexts, the presence of a list may not matter or
may be desirable.
### Response:
def get(cls, label=, path=None):
config = super(ServerConfig, cls).get(label, path)
if hasattr(config, ) and isinstance(config.auth, list):
config.auth = tuple(config.auth)
return config |
def extract(self):
trimmed = self.trim()
if trimmed.filtered:
indices = trimmed._filtered_range_to_unfiltered_indices(0, len(trimmed))
return trimmed.take(indices)
else:
return trimmed | Return a DataFrame containing only the filtered rows.
{note_copy}
The resulting DataFrame may be more efficient to work with when the original DataFrame is
heavily filtered (contains just a small number of rows).
If no filtering is applied, it returns a trimmed view.
For the returned df, len(df) == df.length_original() == df.length_unfiltered()
:rtype: DataFrame | ### Input:
Return a DataFrame containing only the filtered rows.
{note_copy}
The resulting DataFrame may be more efficient to work with when the original DataFrame is
heavily filtered (contains just a small number of rows).
If no filtering is applied, it returns a trimmed view.
For the returned df, len(df) == df.length_original() == df.length_unfiltered()
:rtype: DataFrame
### Response:
def extract(self):
trimmed = self.trim()
if trimmed.filtered:
indices = trimmed._filtered_range_to_unfiltered_indices(0, len(trimmed))
return trimmed.take(indices)
else:
return trimmed |
def _uniform_sample(self):
ep_ind = random.choice(self.demo_list)
states = self.demo_file["data/{}/states".format(ep_ind)].value
state = random.choice(states)
if self.need_xml:
model_xml = self._xml_for_episode_index(ep_ind)
xml = postprocess_model_xml(model_xml)
return state, xml
return state | Sampling method.
First uniformly sample a demonstration from the set of demonstrations.
Then uniformly sample a state from the selected demonstration. | ### Input:
Sampling method.
First uniformly sample a demonstration from the set of demonstrations.
Then uniformly sample a state from the selected demonstration.
### Response:
def _uniform_sample(self):
ep_ind = random.choice(self.demo_list)
states = self.demo_file["data/{}/states".format(ep_ind)].value
state = random.choice(states)
if self.need_xml:
model_xml = self._xml_for_episode_index(ep_ind)
xml = postprocess_model_xml(model_xml)
return state, xml
return state |
def getContactCreationParameters(self):
for contactType in self.getContactTypes():
if contactType.allowMultipleContactItems:
descriptiveIdentifier = _descriptiveIdentifier(contactType)
yield liveform.ListChangeParameter(
contactType.uniqueIdentifier(),
contactType.getParameters(None),
defaults=[],
modelObjects=[],
modelObjectDescription=descriptiveIdentifier)
else:
yield liveform.FormParameter(
contactType.uniqueIdentifier(),
liveform.LiveForm(
lambda **k: k,
contactType.getParameters(None))) | Yield a L{Parameter} for each L{IContactType} known.
Each yielded object can be used with a L{LiveForm} to create a new
instance of a particular L{IContactType}. | ### Input:
Yield a L{Parameter} for each L{IContactType} known.
Each yielded object can be used with a L{LiveForm} to create a new
instance of a particular L{IContactType}.
### Response:
def getContactCreationParameters(self):
for contactType in self.getContactTypes():
if contactType.allowMultipleContactItems:
descriptiveIdentifier = _descriptiveIdentifier(contactType)
yield liveform.ListChangeParameter(
contactType.uniqueIdentifier(),
contactType.getParameters(None),
defaults=[],
modelObjects=[],
modelObjectDescription=descriptiveIdentifier)
else:
yield liveform.FormParameter(
contactType.uniqueIdentifier(),
liveform.LiveForm(
lambda **k: k,
contactType.getParameters(None))) |
def read_config(self, filename):
if not os.path.exists(filename):
raise Exception("Configuration file cannot be found: %s" % filename)
with io.open(filename, encoding=) as stream:
return yaml.safe_load(stream) | Returns data found in config file (as dict), or raises exception if file not found | ### Input:
Returns data found in config file (as dict), or raises exception if file not found
### Response:
def read_config(self, filename):
if not os.path.exists(filename):
raise Exception("Configuration file cannot be found: %s" % filename)
with io.open(filename, encoding=) as stream:
return yaml.safe_load(stream) |
def _notify_deleted_file(self):
self.file_deleted.emit(self.editor)
self.enabled = False | Notify user from external file deletion. | ### Input:
Notify user from external file deletion.
### Response:
def _notify_deleted_file(self):
self.file_deleted.emit(self.editor)
self.enabled = False |
def fit(self, rdd, epochs=10, batch_size=32,
verbose=0, validation_split=0.1):
print()
if self.num_workers:
rdd = rdd.repartition(self.num_workers)
if self.mode in [, , ]:
self._fit(rdd, epochs, batch_size, verbose, validation_split)
else:
raise ValueError(
"Choose from one of the modes: asynchronous, synchronous or hogwild") | Train an elephas model on an RDD. The Keras model configuration as specified
in the elephas model is sent to Spark workers, abd each worker will be trained
on their data partition.
:param rdd: RDD with features and labels
:param epochs: number of epochs used for training
:param batch_size: batch size used for training
:param verbose: logging verbosity level (0, 1 or 2)
:param validation_split: percentage of data set aside for validation | ### Input:
Train an elephas model on an RDD. The Keras model configuration as specified
in the elephas model is sent to Spark workers, abd each worker will be trained
on their data partition.
:param rdd: RDD with features and labels
:param epochs: number of epochs used for training
:param batch_size: batch size used for training
:param verbose: logging verbosity level (0, 1 or 2)
:param validation_split: percentage of data set aside for validation
### Response:
def fit(self, rdd, epochs=10, batch_size=32,
verbose=0, validation_split=0.1):
print()
if self.num_workers:
rdd = rdd.repartition(self.num_workers)
if self.mode in [, , ]:
self._fit(rdd, epochs, batch_size, verbose, validation_split)
else:
raise ValueError(
"Choose from one of the modes: asynchronous, synchronous or hogwild") |
def last_continuous_indexes_slice(ol,value):
length = ol.__len__()
end = None
slice = []
for i in range(length-1,-1,-1):
if(ol[i]==value):
end = i
break
else:
pass
if(end == None):
return(None)
else:
slice.append(end)
for i in range(end-1,-1,-1):
if(ol[i]==value):
slice.append(i)
else:
break
slice.reverse()
return(slice) | from elist.elist import *
ol = [1,"a","a",2,3,"a",4,"a","a","a",5]
last_continuous_indexes_slice(ol,"a") | ### Input:
from elist.elist import *
ol = [1,"a","a",2,3,"a",4,"a","a","a",5]
last_continuous_indexes_slice(ol,"a")
### Response:
def last_continuous_indexes_slice(ol,value):
length = ol.__len__()
end = None
slice = []
for i in range(length-1,-1,-1):
if(ol[i]==value):
end = i
break
else:
pass
if(end == None):
return(None)
else:
slice.append(end)
for i in range(end-1,-1,-1):
if(ol[i]==value):
slice.append(i)
else:
break
slice.reverse()
return(slice) |
def read_flash(self, addr=0xFF, page=0x00):
buff = bytearray()
page_size = self.targets[addr].page_size
for i in range(0, int(math.ceil(page_size / 25.0))):
pk = None
retry_counter = 5
while ((not pk or pk.header != 0xFF or
struct.unpack(, pk.data[0:2]) != (addr, 0x1C)) and
retry_counter >= 0):
pk = CRTPPacket()
pk.set_header(0xFF, 0xFF)
pk.data = struct.pack(, addr, 0x1C, page, (i * 25))
self.link.send_packet(pk)
pk = self.link.receive_packet(1)
retry_counter -= 1
if (retry_counter < 0):
return None
else:
buff += pk.data[6:]
return buff[0:page_size] | Read back a flash page from the Crazyflie and return it | ### Input:
Read back a flash page from the Crazyflie and return it
### Response:
def read_flash(self, addr=0xFF, page=0x00):
buff = bytearray()
page_size = self.targets[addr].page_size
for i in range(0, int(math.ceil(page_size / 25.0))):
pk = None
retry_counter = 5
while ((not pk or pk.header != 0xFF or
struct.unpack(, pk.data[0:2]) != (addr, 0x1C)) and
retry_counter >= 0):
pk = CRTPPacket()
pk.set_header(0xFF, 0xFF)
pk.data = struct.pack(, addr, 0x1C, page, (i * 25))
self.link.send_packet(pk)
pk = self.link.receive_packet(1)
retry_counter -= 1
if (retry_counter < 0):
return None
else:
buff += pk.data[6:]
return buff[0:page_size] |
def make_c_header(name, front, body):
return .strip().format(front, name.upper(), body, name) + | Build a C header from the front and body. | ### Input:
Build a C header from the front and body.
### Response:
def make_c_header(name, front, body):
return .strip().format(front, name.upper(), body, name) + |
def black_winner(self, profile):
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc" and elecType != "csv":
print("ERROR: unsupported election type")
exit()
wmg = profile.getWmg()
m = profile.numCands
for cand1 in wmg.keys():
outgoing = 0
for cand2 in wmg[cand1].keys():
if wmg[cand1][cand2] > 0:
outgoing += 1
if outgoing == m - 1:
return [cand1]
Borda_winner = MechanismBorda().getWinners(profile)
return Borda_winner | Returns a number or a list that associates the winner(s) of a profile under black rule.
:ivar Profile profile: A Profile object that represents an election profile. | ### Input:
Returns a number or a list that associates the winner(s) of a profile under black rule.
:ivar Profile profile: A Profile object that represents an election profile.
### Response:
def black_winner(self, profile):
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc" and elecType != "csv":
print("ERROR: unsupported election type")
exit()
wmg = profile.getWmg()
m = profile.numCands
for cand1 in wmg.keys():
outgoing = 0
for cand2 in wmg[cand1].keys():
if wmg[cand1][cand2] > 0:
outgoing += 1
if outgoing == m - 1:
return [cand1]
Borda_winner = MechanismBorda().getWinners(profile)
return Borda_winner |
def execute_script(self, sql_script=None, commands=None, split_algo=, prep_statements=False,
dump_fails=True, execute_fails=True, ignored_commands=(, , )):
ss = Execute(sql_script, split_algo, prep_statements, dump_fails, self)
ss.execute(commands, ignored_commands=ignored_commands, execute_fails=execute_fails) | Wrapper method for SQLScript class. | ### Input:
Wrapper method for SQLScript class.
### Response:
def execute_script(self, sql_script=None, commands=None, split_algo=, prep_statements=False,
dump_fails=True, execute_fails=True, ignored_commands=(, , )):
ss = Execute(sql_script, split_algo, prep_statements, dump_fails, self)
ss.execute(commands, ignored_commands=ignored_commands, execute_fails=execute_fails) |
def should_be_hidden_as_cause(exc):
from valid8.validation_lib.types import HasWrongType, IsWrongType
return isinstance(exc, (HasWrongType, IsWrongType)) | Used everywhere to decide if some exception type should be displayed or hidden as the casue of an error | ### Input:
Used everywhere to decide if some exception type should be displayed or hidden as the casue of an error
### Response:
def should_be_hidden_as_cause(exc):
from valid8.validation_lib.types import HasWrongType, IsWrongType
return isinstance(exc, (HasWrongType, IsWrongType)) |
def delete(self):
if self.glucose:
pysolvers.glucose3_del(self.glucose)
self.glucose = None
if self.prfile:
self.prfile.close() | Destructor. | ### Input:
Destructor.
### Response:
def delete(self):
if self.glucose:
pysolvers.glucose3_del(self.glucose)
self.glucose = None
if self.prfile:
self.prfile.close() |
def delete_registry(self, registry):
if re.match(".*\\/.*", registry):
return [False, "input registry name cannot contain characters - valid registry names are of the form <host>:<port> where :<port> is optional"]
url = self.url + "/api/scanning/v1/anchore/registries/" + registry
res = requests.delete(url, headers=self.hdrs, verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
return [True, res.json()] | **Description**
Delete an existing image registry
**Arguments**
- registry: Full hostname/port of registry. Eg. myrepo.example.com:5000 | ### Input:
**Description**
Delete an existing image registry
**Arguments**
- registry: Full hostname/port of registry. Eg. myrepo.example.com:5000
### Response:
def delete_registry(self, registry):
if re.match(".*\\/.*", registry):
return [False, "input registry name cannot contain characters - valid registry names are of the form <host>:<port> where :<port> is optional"]
url = self.url + "/api/scanning/v1/anchore/registries/" + registry
res = requests.delete(url, headers=self.hdrs, verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
return [True, res.json()] |
def get_geostationary_angle_extent(geos_area):
req = geos_area.proj_dict[] / 1000
rp = geos_area.proj_dict[] / 1000
h = geos_area.proj_dict[] / 1000 + req
aeq = 1 - req**2 / (h ** 2)
ap_ = 1 - rp**2 / (h ** 2)
xmax = np.arccos(np.sqrt(aeq))
ymax = np.arccos(np.sqrt(ap_))
return xmax, ymax | Get the max earth (vs space) viewing angles in x and y. | ### Input:
Get the max earth (vs space) viewing angles in x and y.
### Response:
def get_geostationary_angle_extent(geos_area):
req = geos_area.proj_dict[] / 1000
rp = geos_area.proj_dict[] / 1000
h = geos_area.proj_dict[] / 1000 + req
aeq = 1 - req**2 / (h ** 2)
ap_ = 1 - rp**2 / (h ** 2)
xmax = np.arccos(np.sqrt(aeq))
ymax = np.arccos(np.sqrt(ap_))
return xmax, ymax |
def get_if_raw_addr(ifname):
try:
fd = os.popen("%s %s" % (conf.prog.ifconfig, ifname))
except OSError as msg:
warning("Failed to execute ifconfig: (%s)", msg)
return b"\0\0\0\0"
addresses = [l for l in fd if l.find("inet ") >= 0]
if not addresses:
warning("No IPv4 address found on %s !", ifname)
return b"\0\0\0\0"
address = addresses[0].split()[1]
if in address:
address = address.split("/")[0]
return socket.inet_pton(socket.AF_INET, address) | Returns the IPv4 address configured on 'ifname', packed with inet_pton. | ### Input:
Returns the IPv4 address configured on 'ifname', packed with inet_pton.
### Response:
def get_if_raw_addr(ifname):
try:
fd = os.popen("%s %s" % (conf.prog.ifconfig, ifname))
except OSError as msg:
warning("Failed to execute ifconfig: (%s)", msg)
return b"\0\0\0\0"
addresses = [l for l in fd if l.find("inet ") >= 0]
if not addresses:
warning("No IPv4 address found on %s !", ifname)
return b"\0\0\0\0"
address = addresses[0].split()[1]
if in address:
address = address.split("/")[0]
return socket.inet_pton(socket.AF_INET, address) |
def mimetype(self, value: str) -> None:
if (
value.startswith() or value == or
(value.startswith() and value.endswith())
):
mimetype = f"{value}; charset={self.charset}"
else:
mimetype = value
self.headers[] = mimetype | Set the mimetype to the value. | ### Input:
Set the mimetype to the value.
### Response:
def mimetype(self, value: str) -> None:
if (
value.startswith() or value == or
(value.startswith() and value.endswith())
):
mimetype = f"{value}; charset={self.charset}"
else:
mimetype = value
self.headers[] = mimetype |
def Multiplication(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Double(context.jvm_view().MultiplicationVertex, label, cast_to_double_vertex(left), cast_to_double_vertex(right)) | Multiplies one vertex by another
:param left: vertex to be multiplied
:param right: vertex to be multiplied | ### Input:
Multiplies one vertex by another
:param left: vertex to be multiplied
:param right: vertex to be multiplied
### Response:
def Multiplication(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Double(context.jvm_view().MultiplicationVertex, label, cast_to_double_vertex(left), cast_to_double_vertex(right)) |
def copy_memory(self, address, size):
start_time = time.time()
map_bytes = self._cpu._raw_read(address, size)
self._emu.mem_write(address, map_bytes)
if time.time() - start_time > 3:
logger.info(f"Copying {hr_size(size)} map at {hex(address)} took {time.time() - start_time} seconds") | Copy the bytes from address to address+size into Unicorn
Used primarily for copying memory maps
:param address: start of buffer to copy
:param size: How many bytes to copy | ### Input:
Copy the bytes from address to address+size into Unicorn
Used primarily for copying memory maps
:param address: start of buffer to copy
:param size: How many bytes to copy
### Response:
def copy_memory(self, address, size):
start_time = time.time()
map_bytes = self._cpu._raw_read(address, size)
self._emu.mem_write(address, map_bytes)
if time.time() - start_time > 3:
logger.info(f"Copying {hr_size(size)} map at {hex(address)} took {time.time() - start_time} seconds") |
def write(fn, a, **kwargs):
ext = fn[fn.rfind():].split()
if ext[0] == or ext[0] == :
return write_atoms(fn, a)
elif ext[0] == :
return write_lammps_data(fn, a, velocities=True, **kwargs)
elif ext[0] == :
return NetCDFTrajectory(fn, ).write(a)
else:
return ase.io.write(fn, a, **kwargs) | Convenience function: Detect file extension and write via Atomistica or ASE.
Has support for writing LAMMPS data files. | ### Input:
Convenience function: Detect file extension and write via Atomistica or ASE.
Has support for writing LAMMPS data files.
### Response:
def write(fn, a, **kwargs):
ext = fn[fn.rfind():].split()
if ext[0] == or ext[0] == :
return write_atoms(fn, a)
elif ext[0] == :
return write_lammps_data(fn, a, velocities=True, **kwargs)
elif ext[0] == :
return NetCDFTrajectory(fn, ).write(a)
else:
return ase.io.write(fn, a, **kwargs) |
def resume(self, obj):
if isinstance(obj, str):
savefile = open(obj, )
else:
savefile = obj
game = pickle.loads(zlib.decompress(savefile.read()))
if savefile is not obj:
savefile.close()
game.random_generator = random.Random()
game.random_generator.setstate(game.random_state)
del game.random_state
return game | Returns an Adventure game saved to the given file. | ### Input:
Returns an Adventure game saved to the given file.
### Response:
def resume(self, obj):
if isinstance(obj, str):
savefile = open(obj, )
else:
savefile = obj
game = pickle.loads(zlib.decompress(savefile.read()))
if savefile is not obj:
savefile.close()
game.random_generator = random.Random()
game.random_generator.setstate(game.random_state)
del game.random_state
return game |
def hexstr(x, onlyasc=0, onlyhex=0, color=False):
x = bytes_encode(x)
_sane_func = sane_color if color else sane
s = []
if not onlyasc:
s.append(" ".join("%02X" % orb(b) for b in x))
if not onlyhex:
s.append(_sane_func(x))
return " ".join(s) | Build a fancy tcpdump like hex from bytes. | ### Input:
Build a fancy tcpdump like hex from bytes.
### Response:
def hexstr(x, onlyasc=0, onlyhex=0, color=False):
x = bytes_encode(x)
_sane_func = sane_color if color else sane
s = []
if not onlyasc:
s.append(" ".join("%02X" % orb(b) for b in x))
if not onlyhex:
s.append(_sane_func(x))
return " ".join(s) |
def decrypt(self, encrypted_number):
relevant_private_key = self.__keyring[encrypted_number.public_key]
return relevant_private_key.decrypt(encrypted_number) | Return the decrypted & decoded plaintext of *encrypted_number*.
Args:
encrypted_number (EncryptedNumber): encrypted against a known public
key, i.e., one for which the private key is on this keyring.
Returns:
the int or float that *encrypted_number* was holding. N.B. if
the number returned is an integer, it will not be of type
float.
Raises:
KeyError: If the keyring does not hold the private key that
decrypts *encrypted_number*. | ### Input:
Return the decrypted & decoded plaintext of *encrypted_number*.
Args:
encrypted_number (EncryptedNumber): encrypted against a known public
key, i.e., one for which the private key is on this keyring.
Returns:
the int or float that *encrypted_number* was holding. N.B. if
the number returned is an integer, it will not be of type
float.
Raises:
KeyError: If the keyring does not hold the private key that
decrypts *encrypted_number*.
### Response:
def decrypt(self, encrypted_number):
relevant_private_key = self.__keyring[encrypted_number.public_key]
return relevant_private_key.decrypt(encrypted_number) |
def find_geometry(self, physics):
r
if in physics.settings.keys():
geom = self.geometries()[physics.settings[]]
return geom
for geo in self.geometries().values():
if physics in self.find_physics(geometry=geo):
return geo
raise Exception(+physics.name) | r"""
Find the Geometry associated with a given Physics
Parameters
----------
physics : OpenPNM Physics Object
Must be a Physics object
Returns
-------
An OpenPNM Geometry object
Raises
------
If no Geometry object can be found, then an Exception is raised. | ### Input:
r"""
Find the Geometry associated with a given Physics
Parameters
----------
physics : OpenPNM Physics Object
Must be a Physics object
Returns
-------
An OpenPNM Geometry object
Raises
------
If no Geometry object can be found, then an Exception is raised.
### Response:
def find_geometry(self, physics):
r
if in physics.settings.keys():
geom = self.geometries()[physics.settings[]]
return geom
for geo in self.geometries().values():
if physics in self.find_physics(geometry=geo):
return geo
raise Exception(+physics.name) |
def request_port_forward(self, address, port, handler=None):
if not self.active:
raise SSHException()
address = str(address)
port = int(port)
response = self.global_request(, (address, port), wait=True)
if response is None:
raise SSHException()
if port == 0:
port = response.get_int()
if handler is None:
def default_handler(channel, (src_addr, src_port), (dest_addr, dest_port)):
self._queue_incoming_channel(channel)
handler = default_handler
self._tcp_handler = handler
return port | Ask the server to forward TCP connections from a listening port on
the server, across this SSH session.
If a handler is given, that handler is called from a different thread
whenever a forwarded connection arrives. The handler parameters are::
handler(channel, (origin_addr, origin_port), (server_addr, server_port))
where C{server_addr} and C{server_port} are the address and port that
the server was listening on.
If no handler is set, the default behavior is to send new incoming
forwarded connections into the accept queue, to be picked up via
L{accept}.
@param address: the address to bind when forwarding
@type address: str
@param port: the port to forward, or 0 to ask the server to allocate
any port
@type port: int
@param handler: optional handler for incoming forwarded connections
@type handler: function(Channel, (str, int), (str, int))
@return: the port # allocated by the server
@rtype: int
@raise SSHException: if the server refused the TCP forward request | ### Input:
Ask the server to forward TCP connections from a listening port on
the server, across this SSH session.
If a handler is given, that handler is called from a different thread
whenever a forwarded connection arrives. The handler parameters are::
handler(channel, (origin_addr, origin_port), (server_addr, server_port))
where C{server_addr} and C{server_port} are the address and port that
the server was listening on.
If no handler is set, the default behavior is to send new incoming
forwarded connections into the accept queue, to be picked up via
L{accept}.
@param address: the address to bind when forwarding
@type address: str
@param port: the port to forward, or 0 to ask the server to allocate
any port
@type port: int
@param handler: optional handler for incoming forwarded connections
@type handler: function(Channel, (str, int), (str, int))
@return: the port # allocated by the server
@rtype: int
@raise SSHException: if the server refused the TCP forward request
### Response:
def request_port_forward(self, address, port, handler=None):
if not self.active:
raise SSHException()
address = str(address)
port = int(port)
response = self.global_request(, (address, port), wait=True)
if response is None:
raise SSHException()
if port == 0:
port = response.get_int()
if handler is None:
def default_handler(channel, (src_addr, src_port), (dest_addr, dest_port)):
self._queue_incoming_channel(channel)
handler = default_handler
self._tcp_handler = handler
return port |
def find_library(series_path):
for location in cfg.CONF.libraries:
if os.path.isdir(os.path.join(location, series_path)):
return location
segments = series_path.split(os.sep)[:-1]
while segments:
seg_path = os.path.join(*segments)
if os.path.isdir(os.path.join(location, seg_path)):
return location
segments = segments[:-1]
return cfg.CONF.default_library | Search for the location of a series within the library.
:param str series_path: name of the relative path of the series
:returns: library path
:rtype: str | ### Input:
Search for the location of a series within the library.
:param str series_path: name of the relative path of the series
:returns: library path
:rtype: str
### Response:
def find_library(series_path):
for location in cfg.CONF.libraries:
if os.path.isdir(os.path.join(location, series_path)):
return location
segments = series_path.split(os.sep)[:-1]
while segments:
seg_path = os.path.join(*segments)
if os.path.isdir(os.path.join(location, seg_path)):
return location
segments = segments[:-1]
return cfg.CONF.default_library |
def delete_snapshot(name, snap_name, runas=None, all=False):
*unneeded snapshot*Snapshot for linked clone
strict = not all
name = salt.utils.data.decode(name)
snap_ids = _validate_snap_name(name, snap_name, strict=strict, runas=runas)
if isinstance(snap_ids, six.string_types):
snap_ids = [snap_ids]
ret = {}
for snap_id in snap_ids:
snap_id = snap_id.strip()
args = [name, , snap_id]
ret[snap_id] = prlctl(, args, runas=runas)
ret_keys = list(ret.keys())
if len(ret_keys) == 1:
return ret[ret_keys[0]]
else:
return ret | Delete a snapshot
.. note::
Deleting a snapshot from which other snapshots are dervied will not
delete the derived snapshots
:param str name:
Name/ID of VM whose snapshot will be deleted
:param str snap_name:
Name/ID of snapshot to delete
:param str runas:
The user that the prlctl command will be run as
:param bool all:
Delete all snapshots having the name given
.. versionadded:: 2016.11.0
Example:
.. code-block:: bash
salt '*' parallels.delete_snapshot macvm 'unneeded snapshot' runas=macdev
salt '*' parallels.delete_snapshot macvm 'Snapshot for linked clone' all=True runas=macdev | ### Input:
Delete a snapshot
.. note::
Deleting a snapshot from which other snapshots are dervied will not
delete the derived snapshots
:param str name:
Name/ID of VM whose snapshot will be deleted
:param str snap_name:
Name/ID of snapshot to delete
:param str runas:
The user that the prlctl command will be run as
:param bool all:
Delete all snapshots having the name given
.. versionadded:: 2016.11.0
Example:
.. code-block:: bash
salt '*' parallels.delete_snapshot macvm 'unneeded snapshot' runas=macdev
salt '*' parallels.delete_snapshot macvm 'Snapshot for linked clone' all=True runas=macdev
### Response:
def delete_snapshot(name, snap_name, runas=None, all=False):
*unneeded snapshot*Snapshot for linked clone
strict = not all
name = salt.utils.data.decode(name)
snap_ids = _validate_snap_name(name, snap_name, strict=strict, runas=runas)
if isinstance(snap_ids, six.string_types):
snap_ids = [snap_ids]
ret = {}
for snap_id in snap_ids:
snap_id = snap_id.strip()
args = [name, , snap_id]
ret[snap_id] = prlctl(, args, runas=runas)
ret_keys = list(ret.keys())
if len(ret_keys) == 1:
return ret[ret_keys[0]]
else:
return ret |
def parse_250_row(row: list) -> BasicMeterData:
return BasicMeterData(row[1], row[2], row[3], row[4], row[5],
row[6], row[7], float(row[8]),
parse_datetime(row[9]), row[10], row[11], row[12],
float(row[13]), parse_datetime(
row[14]), row[15], row[16], row[17],
float(row[18]), row[19], row[20],
parse_datetime(row[21]), parse_datetime(row[22])) | Parse basic meter data record (250) | ### Input:
Parse basic meter data record (250)
### Response:
def parse_250_row(row: list) -> BasicMeterData:
return BasicMeterData(row[1], row[2], row[3], row[4], row[5],
row[6], row[7], float(row[8]),
parse_datetime(row[9]), row[10], row[11], row[12],
float(row[13]), parse_datetime(
row[14]), row[15], row[16], row[17],
float(row[18]), row[19], row[20],
parse_datetime(row[21]), parse_datetime(row[22])) |
def do_dictsort(value, case_sensitive=False, by=, reverse=False):
if by == :
pos = 0
elif by == :
pos = 1
else:
raise FilterArgumentError(
)
def sort_func(item):
value = item[pos]
if not case_sensitive:
value = ignore_case(value)
return value
return sorted(value.items(), key=sort_func, reverse=reverse) | Sort a dict and yield (key, value) pairs. Because python dicts are
unsorted you may want to use this function to order them by either
key or value:
.. sourcecode:: jinja
{% for item in mydict|dictsort %}
sort the dict by key, case insensitive
{% for item in mydict|dictsort(reverse=true) %}
sort the dict by key, case insensitive, reverse order
{% for item in mydict|dictsort(true) %}
sort the dict by key, case sensitive
{% for item in mydict|dictsort(false, 'value') %}
sort the dict by value, case insensitive | ### Input:
Sort a dict and yield (key, value) pairs. Because python dicts are
unsorted you may want to use this function to order them by either
key or value:
.. sourcecode:: jinja
{% for item in mydict|dictsort %}
sort the dict by key, case insensitive
{% for item in mydict|dictsort(reverse=true) %}
sort the dict by key, case insensitive, reverse order
{% for item in mydict|dictsort(true) %}
sort the dict by key, case sensitive
{% for item in mydict|dictsort(false, 'value') %}
sort the dict by value, case insensitive
### Response:
def do_dictsort(value, case_sensitive=False, by=, reverse=False):
if by == :
pos = 0
elif by == :
pos = 1
else:
raise FilterArgumentError(
)
def sort_func(item):
value = item[pos]
if not case_sensitive:
value = ignore_case(value)
return value
return sorted(value.items(), key=sort_func, reverse=reverse) |
def create_record_task(self, frame_parameters: dict=None, channels_enabled: typing.List[bool]=None) -> RecordTask:
return RecordTask(self.__hardware_source, frame_parameters, channels_enabled) | Create a record task for this hardware source.
.. versionadded:: 1.0
:param frame_parameters: The frame parameters for the record. Pass None for defaults.
:type frame_parameters: :py:class:`FrameParameters`
:param channels_enabled: The enabled channels for the record. Pass None for defaults.
:type channels_enabled: List of booleans.
:return: The :py:class:`RecordTask` object.
:rtype: :py:class:`RecordTask`
Callers should call close on the returned task when finished.
See :py:class:`RecordTask` for examples of how to use. | ### Input:
Create a record task for this hardware source.
.. versionadded:: 1.0
:param frame_parameters: The frame parameters for the record. Pass None for defaults.
:type frame_parameters: :py:class:`FrameParameters`
:param channels_enabled: The enabled channels for the record. Pass None for defaults.
:type channels_enabled: List of booleans.
:return: The :py:class:`RecordTask` object.
:rtype: :py:class:`RecordTask`
Callers should call close on the returned task when finished.
See :py:class:`RecordTask` for examples of how to use.
### Response:
def create_record_task(self, frame_parameters: dict=None, channels_enabled: typing.List[bool]=None) -> RecordTask:
return RecordTask(self.__hardware_source, frame_parameters, channels_enabled) |
def estimate_row_means(
self,
X,
observed,
column_means,
column_scales):
n_rows, n_cols = X.shape
column_means = np.asarray(column_means)
if len(column_means) != n_cols:
raise ValueError("Expected length %d but got shape %s" % (
n_cols, column_means.shape))
X = X - column_means.reshape((1, n_cols))
column_weights = 1.0 / column_scales
X *= column_weights.reshape((1, n_cols))
row_means = np.zeros(n_rows, dtype=X.dtype)
row_residual_sums = np.nansum(X, axis=1)
for i in range(n_rows):
row_mask = observed[i, :]
sum_weights = column_weights[row_mask].sum()
row_means[i] = row_residual_sums[i] / sum_weights
return row_means | row_center[i] =
sum{j in observed[i, :]}{
(1 / column_scale[j]) * (X[i, j] - column_center[j])
}
------------------------------------------------------------
sum{j in observed[i, :]}{1 / column_scale[j]} | ### Input:
row_center[i] =
sum{j in observed[i, :]}{
(1 / column_scale[j]) * (X[i, j] - column_center[j])
}
------------------------------------------------------------
sum{j in observed[i, :]}{1 / column_scale[j]}
### Response:
def estimate_row_means(
self,
X,
observed,
column_means,
column_scales):
n_rows, n_cols = X.shape
column_means = np.asarray(column_means)
if len(column_means) != n_cols:
raise ValueError("Expected length %d but got shape %s" % (
n_cols, column_means.shape))
X = X - column_means.reshape((1, n_cols))
column_weights = 1.0 / column_scales
X *= column_weights.reshape((1, n_cols))
row_means = np.zeros(n_rows, dtype=X.dtype)
row_residual_sums = np.nansum(X, axis=1)
for i in range(n_rows):
row_mask = observed[i, :]
sum_weights = column_weights[row_mask].sum()
row_means[i] = row_residual_sums[i] / sum_weights
return row_means |
def setup_from_yamlfile(self, yamlfile, output_shell=False):
self.logger.debug( + yamlfile)
with open(yamlfile, ) as yamlfd:
yamlconfig = yaml.load(yamlfd)
for instance in yamlconfig[]:
self.add_instance(instance[].upper(),
instance,
output_shell=output_shell)
if in yamlconfig.keys():
self.logger.debug( + str(yamlconfig[]))
self.config = yamlconfig[].copy() | Setup from yaml config
@param yamlfile: path to yaml config file
@type yamlfile: str
@param output_shell: write output from this connection to standard
output
@type output_shell: bool | ### Input:
Setup from yaml config
@param yamlfile: path to yaml config file
@type yamlfile: str
@param output_shell: write output from this connection to standard
output
@type output_shell: bool
### Response:
def setup_from_yamlfile(self, yamlfile, output_shell=False):
self.logger.debug( + yamlfile)
with open(yamlfile, ) as yamlfd:
yamlconfig = yaml.load(yamlfd)
for instance in yamlconfig[]:
self.add_instance(instance[].upper(),
instance,
output_shell=output_shell)
if in yamlconfig.keys():
self.logger.debug( + str(yamlconfig[]))
self.config = yamlconfig[].copy() |
def _read_pcm_information(self):
temp_dict = read_pattern(
self.text, {
"g_electrostatic": r"\s*G_electrostatic\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*",
"g_cavitation": r"\s*G_cavitation\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*",
"g_dispersion": r"\s*G_dispersion\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*",
"g_repulsion": r"\s*G_repulsion\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*",
"total_contribution_pcm": r"\s*Total\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*",
}
)
if temp_dict.get("g_electrostatic") is None:
self.data["g_electrostatic"] = None
else:
self.data["g_electrostatic"] = float(temp_dict.get("g_electrostatic")[0][0])
if temp_dict.get("g_cavitation") is None:
self.data["g_cavitation"] = None
else:
self.data["g_cavitation"] = float(temp_dict.get("g_cavitation")[0][0])
if temp_dict.get("g_dispersion") is None:
self.data["g_dispersion"] = None
else:
self.data["g_dispersion"] = float(temp_dict.get("g_dispersion")[0][0])
if temp_dict.get("g_repulsion") is None:
self.data["g_repulsion"] = None
else:
self.data["g_repulsion"] = float(temp_dict.get("g_repulsion")[0][0])
if temp_dict.get("total_contribution_pcm") is None:
self.data["total_contribution_pcm"] = []
else:
self.data["total_contribution_pcm"] = float(temp_dict.get("total_contribution_pcm")[0][0]) | Parses information from PCM solvent calculations. | ### Input:
Parses information from PCM solvent calculations.
### Response:
def _read_pcm_information(self):
temp_dict = read_pattern(
self.text, {
"g_electrostatic": r"\s*G_electrostatic\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*",
"g_cavitation": r"\s*G_cavitation\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*",
"g_dispersion": r"\s*G_dispersion\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*",
"g_repulsion": r"\s*G_repulsion\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*",
"total_contribution_pcm": r"\s*Total\s+=\s+([\d\-\.]+)\s+hartree\s+=\s+([\d\-\.]+)\s+kcal/mol\s*",
}
)
if temp_dict.get("g_electrostatic") is None:
self.data["g_electrostatic"] = None
else:
self.data["g_electrostatic"] = float(temp_dict.get("g_electrostatic")[0][0])
if temp_dict.get("g_cavitation") is None:
self.data["g_cavitation"] = None
else:
self.data["g_cavitation"] = float(temp_dict.get("g_cavitation")[0][0])
if temp_dict.get("g_dispersion") is None:
self.data["g_dispersion"] = None
else:
self.data["g_dispersion"] = float(temp_dict.get("g_dispersion")[0][0])
if temp_dict.get("g_repulsion") is None:
self.data["g_repulsion"] = None
else:
self.data["g_repulsion"] = float(temp_dict.get("g_repulsion")[0][0])
if temp_dict.get("total_contribution_pcm") is None:
self.data["total_contribution_pcm"] = []
else:
self.data["total_contribution_pcm"] = float(temp_dict.get("total_contribution_pcm")[0][0]) |
def isochone_ratio(e, rd, r_hyp):
if e == 0.:
c_prime = 0.8
elif e > 0.:
c_prime = 1. / ((1. / 0.8) - ((r_hyp - rd) / e))
return c_prime | Get the isochone ratio as described in Spudich et al. (2013) PEER
report, page 88.
:param e:
a float defining the E-path length, which is the distance from
Pd(direction) point to hypocentre. In km.
:param rd:
float, distance from the site to the direct point.
:param r_hyp:
float, the hypocentre distance.
:returns:
c_prime, a float defining the isochone ratio | ### Input:
Get the isochone ratio as described in Spudich et al. (2013) PEER
report, page 88.
:param e:
a float defining the E-path length, which is the distance from
Pd(direction) point to hypocentre. In km.
:param rd:
float, distance from the site to the direct point.
:param r_hyp:
float, the hypocentre distance.
:returns:
c_prime, a float defining the isochone ratio
### Response:
def isochone_ratio(e, rd, r_hyp):
if e == 0.:
c_prime = 0.8
elif e > 0.:
c_prime = 1. / ((1. / 0.8) - ((r_hyp - rd) / e))
return c_prime |
def reorient_image(image, axis1, axis2=None, doreflection=False, doscale=0, txfn=None):
inpixeltype = image.pixeltype
if image.pixeltype != :
image = image.clone()
axis_was_none = False
if axis2 is None:
axis_was_none = True
axis2 = [0]*image.dimension
axis1 = np.array(axis1)
axis2 = np.array(axis2)
axis1 = axis1 / np.sqrt(np.sum(axis1*axis1)) * (-1)
axis1 = axis1.astype()
if not axis_was_none:
axis2 = axis2 / np.sqrt(np.sum(axis2*axis2)) * (-1)
axis2 = axis2.astype()
else:
axis2 = np.array([0]*image.dimension).astype()
if txfn is None:
txfn = mktemp(suffix=)
if isinstance(doreflection, tuple):
doreflection = list(doreflection)
if not isinstance(doreflection, list):
doreflection = [doreflection]
if isinstance(doscale, tuple):
doscale = list(doscale)
if not isinstance(doscale, list):
doscale = [doscale]
if len(doreflection) == 1:
doreflection = [doreflection[0]]*image.dimension
if len(doscale) == 1:
doscale = [doscale[0]]*image.dimension
libfn = utils.get_lib_fn( % image._libsuffix)
libfn(image.pointer, txfn, axis1.tolist(), axis2.tolist(), doreflection, doscale)
image2 = apply_transforms(image, image, transformlist=[txfn])
if image.pixeltype != inpixeltype:
image2 = image2.clone(inpixeltype)
return {:image2,
:txfn} | Align image along a specified axis
ANTsR function: `reorientImage`
Arguments
---------
image : ANTsImage
image to reorient
axis1 : list/tuple of integers
vector of size dim, might need to play w/axis sign
axis2 : list/tuple of integers
vector of size dim for 3D
doreflection : boolean
whether to reflect
doscale : scalar value
1 allows automated estimate of scaling
txfn : string
file name for transformation
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> image = ants.image_read(ants.get_ants_data('r16'))
>>> ants.reorient_image(image, (1,0)) | ### Input:
Align image along a specified axis
ANTsR function: `reorientImage`
Arguments
---------
image : ANTsImage
image to reorient
axis1 : list/tuple of integers
vector of size dim, might need to play w/axis sign
axis2 : list/tuple of integers
vector of size dim for 3D
doreflection : boolean
whether to reflect
doscale : scalar value
1 allows automated estimate of scaling
txfn : string
file name for transformation
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> image = ants.image_read(ants.get_ants_data('r16'))
>>> ants.reorient_image(image, (1,0))
### Response:
def reorient_image(image, axis1, axis2=None, doreflection=False, doscale=0, txfn=None):
inpixeltype = image.pixeltype
if image.pixeltype != :
image = image.clone()
axis_was_none = False
if axis2 is None:
axis_was_none = True
axis2 = [0]*image.dimension
axis1 = np.array(axis1)
axis2 = np.array(axis2)
axis1 = axis1 / np.sqrt(np.sum(axis1*axis1)) * (-1)
axis1 = axis1.astype()
if not axis_was_none:
axis2 = axis2 / np.sqrt(np.sum(axis2*axis2)) * (-1)
axis2 = axis2.astype()
else:
axis2 = np.array([0]*image.dimension).astype()
if txfn is None:
txfn = mktemp(suffix=)
if isinstance(doreflection, tuple):
doreflection = list(doreflection)
if not isinstance(doreflection, list):
doreflection = [doreflection]
if isinstance(doscale, tuple):
doscale = list(doscale)
if not isinstance(doscale, list):
doscale = [doscale]
if len(doreflection) == 1:
doreflection = [doreflection[0]]*image.dimension
if len(doscale) == 1:
doscale = [doscale[0]]*image.dimension
libfn = utils.get_lib_fn( % image._libsuffix)
libfn(image.pointer, txfn, axis1.tolist(), axis2.tolist(), doreflection, doscale)
image2 = apply_transforms(image, image, transformlist=[txfn])
if image.pixeltype != inpixeltype:
image2 = image2.clone(inpixeltype)
return {:image2,
:txfn} |
def get_ip_address(self, x, y):
chip_info = self.get_chip_info(x=x, y=y)
return chip_info.ip_address if chip_info.ethernet_up else None | Get the IP address of a particular SpiNNaker chip's Ethernet link.
Returns
-------
str or None
The IPv4 address (as a string) of the chip's Ethernet link or None
if the chip does not have an Ethernet connection or the link is
currently down. | ### Input:
Get the IP address of a particular SpiNNaker chip's Ethernet link.
Returns
-------
str or None
The IPv4 address (as a string) of the chip's Ethernet link or None
if the chip does not have an Ethernet connection or the link is
currently down.
### Response:
def get_ip_address(self, x, y):
chip_info = self.get_chip_info(x=x, y=y)
return chip_info.ip_address if chip_info.ethernet_up else None |
def MX(domain, resolve=False, nameserver=None):
resolves limited to one IP, because although in practice itt use the resolver here, resolve them in a separate step.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.MX google.com
dig.MXThis function requires dig, which is not currently available' | Return a list of lists for the MX of ``domain``.
If the 'resolve' argument is True, resolve IPs for the servers.
It's limited to one IP, because although in practice it's very rarely a
round robin, it is an acceptable configuration and pulling just one IP lets
the data be similar to the non-resolved version. If you think an MX has
multiple IPs, don't use the resolver here, resolve them in a separate step.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.MX google.com | ### Input:
Return a list of lists for the MX of ``domain``.
If the 'resolve' argument is True, resolve IPs for the servers.
It's limited to one IP, because although in practice it's very rarely a
round robin, it is an acceptable configuration and pulling just one IP lets
the data be similar to the non-resolved version. If you think an MX has
multiple IPs, don't use the resolver here, resolve them in a separate step.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.MX google.com
### Response:
def MX(domain, resolve=False, nameserver=None):
resolves limited to one IP, because although in practice itt use the resolver here, resolve them in a separate step.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.MX google.com
dig.MXThis function requires dig, which is not currently available' |
def send_emails_from_template(to_emails, from_email, subject,
markdown_template=None, text_template=None,
html_template=None, fail_silently=False,
context=None, attachments=None, **kwargs):
if not to_emails:
return
if context is None:
context = {}
if markdown_template:
try:
from markdown import markdown
except ImportError:
raise ImportError(
)
base_html_template = getattr(settings,
,
)
text_content = render_to_string(markdown_template, context)
context[] = markdown(text_content)
html_content = render_to_string(base_html_template, context)
else:
text_content = render_to_string(text_template, context)
html_content = render_to_string(html_template, context)
emails = []
for email_address in to_emails:
email = EmailMultiAlternatives(
subject=subject,
body=text_content,
from_email=from_email,
to=[email_address],
alternatives=[(html_content, )]
)
if attachments:
email.mixed_subtype =
for attachment in attachments:
email.attach(attachment)
emails.append(email)
connection = mail.get_connection()
connection.open()
connection.send_messages(emails)
connection.close() | Send many emails from single template. Each email address listed in the
``to_emails`` will receive an separate email.
:param to_emails: list of email address to send the email to
:param from_email: the email address the email will be from
:param subject: the subject of the email
:param markdown_template: the markdown syntax template to use for the
email. If provided, this will generate both the text and html versions
of the email. You must have the "markdown" library installed in order
to use this. pip install markdown.
:param text_template: the template for the text version of the email. This
can be omitted if the markdown_template is provided.
:param html_template: the template for the html version of the email. This
can be omitted if the markdown_template is provided.
:param context: the context for the email templates
:param attachments: list of additional attachments to add to the email
(example: email.mime.image.MIMEImage object). The attachments will be
added to each email sent. | ### Input:
Send many emails from single template. Each email address listed in the
``to_emails`` will receive an separate email.
:param to_emails: list of email address to send the email to
:param from_email: the email address the email will be from
:param subject: the subject of the email
:param markdown_template: the markdown syntax template to use for the
email. If provided, this will generate both the text and html versions
of the email. You must have the "markdown" library installed in order
to use this. pip install markdown.
:param text_template: the template for the text version of the email. This
can be omitted if the markdown_template is provided.
:param html_template: the template for the html version of the email. This
can be omitted if the markdown_template is provided.
:param context: the context for the email templates
:param attachments: list of additional attachments to add to the email
(example: email.mime.image.MIMEImage object). The attachments will be
added to each email sent.
### Response:
def send_emails_from_template(to_emails, from_email, subject,
markdown_template=None, text_template=None,
html_template=None, fail_silently=False,
context=None, attachments=None, **kwargs):
if not to_emails:
return
if context is None:
context = {}
if markdown_template:
try:
from markdown import markdown
except ImportError:
raise ImportError(
)
base_html_template = getattr(settings,
,
)
text_content = render_to_string(markdown_template, context)
context[] = markdown(text_content)
html_content = render_to_string(base_html_template, context)
else:
text_content = render_to_string(text_template, context)
html_content = render_to_string(html_template, context)
emails = []
for email_address in to_emails:
email = EmailMultiAlternatives(
subject=subject,
body=text_content,
from_email=from_email,
to=[email_address],
alternatives=[(html_content, )]
)
if attachments:
email.mixed_subtype =
for attachment in attachments:
email.attach(attachment)
emails.append(email)
connection = mail.get_connection()
connection.open()
connection.send_messages(emails)
connection.close() |
def get_rt_data(self, code):
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of param in code is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
RtDataQuery.pack_req, RtDataQuery.unpack_rsp)
kargs = {
"code": code,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, rt_data_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
for x in rt_data_list:
x[] = code
col_list = [
, , , , ,
, , ,
]
rt_data_table = pd.DataFrame(rt_data_list, columns=col_list)
return RET_OK, rt_data_table | 获取指定股票的分时数据
:param code: 股票代码,例如,HK.00700,US.APPL
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下
ret != RET_OK 返回错误字符串
===================== =========== ==========================================================================
参数 类型 说明
===================== =========== ==========================================================================
code str 股票代码
time str 时间(yyyy-MM-dd HH:mm:ss)(美股默认是美东时间,港股A股默认是北京时间)
is_blank bool 数据状态;正常数据为False,伪造数据为True
opened_mins int 零点到当前多少分钟
cur_price float 当前价格
last_close float 昨天收盘的价格
avg_price float 平均价格
volume float 成交量
turnover float 成交金额
===================== =========== ========================================================================== | ### Input:
获取指定股票的分时数据
:param code: 股票代码,例如,HK.00700,US.APPL
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下
ret != RET_OK 返回错误字符串
===================== =========== ==========================================================================
参数 类型 说明
===================== =========== ==========================================================================
code str 股票代码
time str 时间(yyyy-MM-dd HH:mm:ss)(美股默认是美东时间,港股A股默认是北京时间)
is_blank bool 数据状态;正常数据为False,伪造数据为True
opened_mins int 零点到当前多少分钟
cur_price float 当前价格
last_close float 昨天收盘的价格
avg_price float 平均价格
volume float 成交量
turnover float 成交金额
===================== =========== ==========================================================================
### Response:
def get_rt_data(self, code):
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of param in code is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
RtDataQuery.pack_req, RtDataQuery.unpack_rsp)
kargs = {
"code": code,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, rt_data_list = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
for x in rt_data_list:
x[] = code
col_list = [
, , , , ,
, , ,
]
rt_data_table = pd.DataFrame(rt_data_list, columns=col_list)
return RET_OK, rt_data_table |
def ref(host, seq, takeoff, emergency=False):
p = 0b10001010101000000000000000000
if takeoff:
p |= 0b1000000000
if emergency:
p |= 0b100000000
at(host, , seq, [p]) | Basic behaviour of the drone: take-off/landing, emergency stop/reset)
Parameters:
seq -- sequence number
takeoff -- True: Takeoff / False: Land
emergency -- True: Turn off the engines | ### Input:
Basic behaviour of the drone: take-off/landing, emergency stop/reset)
Parameters:
seq -- sequence number
takeoff -- True: Takeoff / False: Land
emergency -- True: Turn off the engines
### Response:
def ref(host, seq, takeoff, emergency=False):
p = 0b10001010101000000000000000000
if takeoff:
p |= 0b1000000000
if emergency:
p |= 0b100000000
at(host, , seq, [p]) |
def _traverse(summary, function, *args):
function(summary, *args)
for row in summary:
function(row, *args)
for item in row:
function(item, *args) | Traverse all objects of a summary and call function with each as a
parameter.
Using this function, the following objects will be traversed:
- the summary
- each row
- each item of a row | ### Input:
Traverse all objects of a summary and call function with each as a
parameter.
Using this function, the following objects will be traversed:
- the summary
- each row
- each item of a row
### Response:
def _traverse(summary, function, *args):
function(summary, *args)
for row in summary:
function(row, *args)
for item in row:
function(item, *args) |
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
C = self.COEFFS[imt]
mean = (self._get_magnitude_scaling(C, rup.mag) +
self._get_distance_scaling(C, dists, rup.mag) +
self._get_site_term(C, sites.vs30))
mean -= np.log(g)
stddevs = self.get_stddevs(C, sites.vs30.shape, stddev_types)
return mean + self.adjustment_factor, stddevs | See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values. | ### Input:
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
### Response:
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
C = self.COEFFS[imt]
mean = (self._get_magnitude_scaling(C, rup.mag) +
self._get_distance_scaling(C, dists, rup.mag) +
self._get_site_term(C, sites.vs30))
mean -= np.log(g)
stddevs = self.get_stddevs(C, sites.vs30.shape, stddev_types)
return mean + self.adjustment_factor, stddevs |
def log_results():
from furious.context import get_current_async
async = get_current_async()
for result in async.result:
logging.info(result) | This is the callback that is run once the Async task is finished. It
takes the output from grep and logs it. | ### Input:
This is the callback that is run once the Async task is finished. It
takes the output from grep and logs it.
### Response:
def log_results():
from furious.context import get_current_async
async = get_current_async()
for result in async.result:
logging.info(result) |
async def on_message(message):
server = message.server
author = message.author
channel = message.channel
content = message.content
data = datatools.get_data()
if not data["discord"]["servers"][server.id][_data.modulename]["activated"]:
return
await client.send_typing(channel)
posts = api_reddit.get_top10()
if posts:
for post in posts:
embed = ui_embed.success(channel, post)
await embed.send()
else:
embed = ui_embed.no_results(channel)
await embed.send() | The on_message event handler for this module
Args:
message (discord.Message): Input message | ### Input:
The on_message event handler for this module
Args:
message (discord.Message): Input message
### Response:
async def on_message(message):
server = message.server
author = message.author
channel = message.channel
content = message.content
data = datatools.get_data()
if not data["discord"]["servers"][server.id][_data.modulename]["activated"]:
return
await client.send_typing(channel)
posts = api_reddit.get_top10()
if posts:
for post in posts:
embed = ui_embed.success(channel, post)
await embed.send()
else:
embed = ui_embed.no_results(channel)
await embed.send() |
def process_extra_vars(extra_vars_list, force_json=True):
extra_vars = {}
extra_vars_yaml = ""
for extra_vars_opt in extra_vars_list:
if extra_vars_opt.startswith("@"):
with open(extra_vars_opt[1:], ) as f:
extra_vars_opt = f.read()
opt_dict = string_to_dict(extra_vars_opt, allow_kv=False)
else:
opt_dict = string_to_dict(extra_vars_opt, allow_kv=True)
if any(line.startswith("
extra_vars_yaml += extra_vars_opt + "\n"
elif extra_vars_opt != "":
extra_vars_yaml += yaml.dump(
opt_dict, default_flow_style=False) + "\n"
extra_vars.update(opt_dict)
if not force_json:
try:
try_dict = yaml.load(extra_vars_yaml, Loader=yaml.SafeLoader)
assert type(try_dict) is dict
debug.log(, header=, nl=2)
return extra_vars_yaml.rstrip()
except Exception:
debug.log(,
header=, nl=2)
if extra_vars == {}:
return ""
return json.dumps(extra_vars, ensure_ascii=False) | Returns a string that is valid JSON or YAML and contains all the
variables in every extra_vars_opt inside of extra_vars_list.
Args:
parse_kv (bool): whether to allow key=value syntax.
force_json (bool): if True, always output json. | ### Input:
Returns a string that is valid JSON or YAML and contains all the
variables in every extra_vars_opt inside of extra_vars_list.
Args:
parse_kv (bool): whether to allow key=value syntax.
force_json (bool): if True, always output json.
### Response:
def process_extra_vars(extra_vars_list, force_json=True):
extra_vars = {}
extra_vars_yaml = ""
for extra_vars_opt in extra_vars_list:
if extra_vars_opt.startswith("@"):
with open(extra_vars_opt[1:], ) as f:
extra_vars_opt = f.read()
opt_dict = string_to_dict(extra_vars_opt, allow_kv=False)
else:
opt_dict = string_to_dict(extra_vars_opt, allow_kv=True)
if any(line.startswith("
extra_vars_yaml += extra_vars_opt + "\n"
elif extra_vars_opt != "":
extra_vars_yaml += yaml.dump(
opt_dict, default_flow_style=False) + "\n"
extra_vars.update(opt_dict)
if not force_json:
try:
try_dict = yaml.load(extra_vars_yaml, Loader=yaml.SafeLoader)
assert type(try_dict) is dict
debug.log(, header=, nl=2)
return extra_vars_yaml.rstrip()
except Exception:
debug.log(,
header=, nl=2)
if extra_vars == {}:
return ""
return json.dumps(extra_vars, ensure_ascii=False) |
def get_real_related(self, id_equip):
url = + str(id_equip) +
code, xml = self.submit(None, , url)
data = self.response(code, xml)
return data | Find reals related with equipment
:param id_equip: Identifier of equipment
:return: Following dictionary:
::
{'vips': [{'port_real': < port_real >,
'server_pool_member_id': < server_pool_member_id >,
'ip': < ip >,
'port_vip': < port_vip >,
'host_name': < host_name >,
'id_vip': < id_vip >, ...],
'equip_name': < equip_name > }}
:raise EquipamentoNaoExisteError: Equipment not registered.
:raise InvalidParameterError: Some parameter was invalid.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response. | ### Input:
Find reals related with equipment
:param id_equip: Identifier of equipment
:return: Following dictionary:
::
{'vips': [{'port_real': < port_real >,
'server_pool_member_id': < server_pool_member_id >,
'ip': < ip >,
'port_vip': < port_vip >,
'host_name': < host_name >,
'id_vip': < id_vip >, ...],
'equip_name': < equip_name > }}
:raise EquipamentoNaoExisteError: Equipment not registered.
:raise InvalidParameterError: Some parameter was invalid.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
### Response:
def get_real_related(self, id_equip):
url = + str(id_equip) +
code, xml = self.submit(None, , url)
data = self.response(code, xml)
return data |
def _write_continue(self, value):
self._FITS.write_continue(self._ext+1, str(value)) | Write history text into the header | ### Input:
Write history text into the header
### Response:
def _write_continue(self, value):
self._FITS.write_continue(self._ext+1, str(value)) |
def _get_column_names(self):
column_names = set()
for column in self.df:
column_names.add(column)
for column in self.unmaterialized_cols:
column_names.add(column)
return list(column_names) | Summary
Returns:
TYPE: Description | ### Input:
Summary
Returns:
TYPE: Description
### Response:
def _get_column_names(self):
column_names = set()
for column in self.df:
column_names.add(column)
for column in self.unmaterialized_cols:
column_names.add(column)
return list(column_names) |
def transform_coords(self, coords):
new_coords = []
for x in coords:
x = np.array(x)
Q = np.linalg.inv(self.P)
x_ = np.matmul(Q, (x - self.p))
new_coords.append(x_.tolist())
return new_coords | Takes a list of co-ordinates and transforms them.
:param coords: List of coords
:return: | ### Input:
Takes a list of co-ordinates and transforms them.
:param coords: List of coords
:return:
### Response:
def transform_coords(self, coords):
new_coords = []
for x in coords:
x = np.array(x)
Q = np.linalg.inv(self.P)
x_ = np.matmul(Q, (x - self.p))
new_coords.append(x_.tolist())
return new_coords |
def merge(self, other):
other = self.coerce(other)
if list_diff(self.domain, other.domain) != []:
raise Exception("Incomparable orderings. Different domains")
if self.is_equal(other):
return self
elif other.is_entailed_by(self):
return self
elif self.is_entailed_by(other):
self.low, self.high = other.low, other.high
elif self.is_contradictory(other):
raise Contradiction("Cannot merge %s and %s" % (self, other))
else:
to_i = self.to_i
self.low = self.domain[max(map(to_i, [self.low, other.low]))]
self.high =self.domain[min(map(to_i, [self.high, other.high]))]
return self | Merges the two values | ### Input:
Merges the two values
### Response:
def merge(self, other):
other = self.coerce(other)
if list_diff(self.domain, other.domain) != []:
raise Exception("Incomparable orderings. Different domains")
if self.is_equal(other):
return self
elif other.is_entailed_by(self):
return self
elif self.is_entailed_by(other):
self.low, self.high = other.low, other.high
elif self.is_contradictory(other):
raise Contradiction("Cannot merge %s and %s" % (self, other))
else:
to_i = self.to_i
self.low = self.domain[max(map(to_i, [self.low, other.low]))]
self.high =self.domain[min(map(to_i, [self.high, other.high]))]
return self |
def list_group_events(self, group_url, upcoming=True):
title = % self.__class__.__name__
input_fields = {
: group_url
}
for key, value in input_fields.items():
if value:
object_title = % (title, key, str(value))
self.fields.validate(value, % key, object_title)
url = % (self.endpoint, group_url)
params = {
: ,
:
}
if upcoming:
params[] =
response_details = self._get_request(url, params=params)
group_events = {
: []
}
for key, value in response_details.items():
if key != :
group_events[key] = value
for event in response_details[]:
group_events[].append(self._reconstruct_event(event))
return group_events | a method to retrieve a list of upcoming events hosted by group
:param group_url: string with meetup urlname field of group
:param upcoming: [optional] boolean to filter list to only future events
:return: dictionary with list of event details inside [json] key
event_details = self._reconstruct_event({}) | ### Input:
a method to retrieve a list of upcoming events hosted by group
:param group_url: string with meetup urlname field of group
:param upcoming: [optional] boolean to filter list to only future events
:return: dictionary with list of event details inside [json] key
event_details = self._reconstruct_event({})
### Response:
def list_group_events(self, group_url, upcoming=True):
title = % self.__class__.__name__
input_fields = {
: group_url
}
for key, value in input_fields.items():
if value:
object_title = % (title, key, str(value))
self.fields.validate(value, % key, object_title)
url = % (self.endpoint, group_url)
params = {
: ,
:
}
if upcoming:
params[] =
response_details = self._get_request(url, params=params)
group_events = {
: []
}
for key, value in response_details.items():
if key != :
group_events[key] = value
for event in response_details[]:
group_events[].append(self._reconstruct_event(event))
return group_events |
def to_text(path):
try:
from StringIO import StringIO
import sys
reload(sys)
sys.setdefaultencoding()
except ImportError:
from io import StringIO
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
rsrcmgr = PDFResourceManager()
retstr = StringIO()
codec =
laparams = LAParams()
laparams.all_texts = True
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
with open(path, ) as fp:
interpreter = PDFPageInterpreter(rsrcmgr, device)
password = ""
maxpages = 0
caching = True
pagenos = set()
pages = PDFPage.get_pages(
fp,
pagenos,
maxpages=maxpages,
password=password,
caching=caching,
check_extractable=True,
)
for page in pages:
interpreter.process_page(page)
device.close()
str = retstr.getvalue()
retstr.close()
return str.encode() | Wrapper around `pdfminer`.
Parameters
----------
path : str
path of electronic invoice in PDF
Returns
-------
str : str
returns extracted text from pdf | ### Input:
Wrapper around `pdfminer`.
Parameters
----------
path : str
path of electronic invoice in PDF
Returns
-------
str : str
returns extracted text from pdf
### Response:
def to_text(path):
try:
from StringIO import StringIO
import sys
reload(sys)
sys.setdefaultencoding()
except ImportError:
from io import StringIO
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
rsrcmgr = PDFResourceManager()
retstr = StringIO()
codec =
laparams = LAParams()
laparams.all_texts = True
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
with open(path, ) as fp:
interpreter = PDFPageInterpreter(rsrcmgr, device)
password = ""
maxpages = 0
caching = True
pagenos = set()
pages = PDFPage.get_pages(
fp,
pagenos,
maxpages=maxpages,
password=password,
caching=caching,
check_extractable=True,
)
for page in pages:
interpreter.process_page(page)
device.close()
str = retstr.getvalue()
retstr.close()
return str.encode() |
def resume_sm(self, xmlstream):
if self.running:
raise RuntimeError("Cannot resume Stream Management while"
" StanzaStream is running")
self._start_prepare(xmlstream, self.recv_stanza)
try:
response = yield from protocol.send_and_wait_for(
xmlstream,
[
nonza.SMResume(previd=self.sm_id,
counter=self._sm_inbound_ctr)
],
[
nonza.SMResumed,
nonza.SMFailed
]
)
if isinstance(response, nonza.SMFailed):
exc = errors.StreamNegotiationFailure(
"Server rejected SM resumption"
)
if response.counter is not None:
self.sm_ack(response.counter)
self._clear_unacked(StanzaState.DISCONNECTED)
xmlstream.stanza_parser.remove_class(
nonza.SMRequest)
xmlstream.stanza_parser.remove_class(
nonza.SMAcknowledgement)
self.stop_sm()
raise exc
self._resume_sm(response.counter)
except:
self._start_rollback(xmlstream)
raise
self._start_commit(xmlstream) | Resume an SM-enabled stream using the given `xmlstream`.
If the server rejects the attempt to resume stream management, a
:class:`.errors.StreamNegotiationFailure` is raised. The stream is then
in stopped state and stream management has been stopped.
.. warning::
This method cannot and does not check whether the server advertised
support for stream management. Attempting to negotiate stream
management without server support might lead to termination of the
stream.
If the XML stream dies at any point during the negotiation, the SM
state is left unchanged. If no response has been received yet, the
exception which caused the stream to die is re-raised. The state of the
stream depends on whether the main task already noticed the dead
stream.
If negotiation succeeds, this coroutine resumes the stream management
session and initiates the retransmission of any unacked stanzas. The
stream is then in running state.
.. versionchanged:: 0.11
Support for using the counter value provided some servers on a
failed resumption was added. Stanzas which are covered by the
counter will be marked as :attr:`~StanzaState.ACKED`; other stanzas
will be marked as :attr:`~StanzaState.DISCONNECTED`.
This is in contrast to the behaviour when resumption fails
*without* a counter given. In that case, stanzas which have not
been acked are marked as :attr:`~StanzaState.SENT_WITHOUT_SM`. | ### Input:
Resume an SM-enabled stream using the given `xmlstream`.
If the server rejects the attempt to resume stream management, a
:class:`.errors.StreamNegotiationFailure` is raised. The stream is then
in stopped state and stream management has been stopped.
.. warning::
This method cannot and does not check whether the server advertised
support for stream management. Attempting to negotiate stream
management without server support might lead to termination of the
stream.
If the XML stream dies at any point during the negotiation, the SM
state is left unchanged. If no response has been received yet, the
exception which caused the stream to die is re-raised. The state of the
stream depends on whether the main task already noticed the dead
stream.
If negotiation succeeds, this coroutine resumes the stream management
session and initiates the retransmission of any unacked stanzas. The
stream is then in running state.
.. versionchanged:: 0.11
Support for using the counter value provided some servers on a
failed resumption was added. Stanzas which are covered by the
counter will be marked as :attr:`~StanzaState.ACKED`; other stanzas
will be marked as :attr:`~StanzaState.DISCONNECTED`.
This is in contrast to the behaviour when resumption fails
*without* a counter given. In that case, stanzas which have not
been acked are marked as :attr:`~StanzaState.SENT_WITHOUT_SM`.
### Response:
def resume_sm(self, xmlstream):
if self.running:
raise RuntimeError("Cannot resume Stream Management while"
" StanzaStream is running")
self._start_prepare(xmlstream, self.recv_stanza)
try:
response = yield from protocol.send_and_wait_for(
xmlstream,
[
nonza.SMResume(previd=self.sm_id,
counter=self._sm_inbound_ctr)
],
[
nonza.SMResumed,
nonza.SMFailed
]
)
if isinstance(response, nonza.SMFailed):
exc = errors.StreamNegotiationFailure(
"Server rejected SM resumption"
)
if response.counter is not None:
self.sm_ack(response.counter)
self._clear_unacked(StanzaState.DISCONNECTED)
xmlstream.stanza_parser.remove_class(
nonza.SMRequest)
xmlstream.stanza_parser.remove_class(
nonza.SMAcknowledgement)
self.stop_sm()
raise exc
self._resume_sm(response.counter)
except:
self._start_rollback(xmlstream)
raise
self._start_commit(xmlstream) |
def create_transcript_objects(xml, edx_video_id, resource_fs, static_dir, external_transcripts):
with open_fs(resource_fs.root_path.split()[0]) as file_system:
for transcript in xml.findall():
try:
file_format = transcript.attrib[]
language_code = transcript.attrib[]
transcript_file_name = u.format(
edx_video_id=edx_video_id,
language_code=language_code,
file_format=file_format
)
import_transcript_from_fs(
edx_video_id=edx_video_id,
language_code=transcript.attrib[],
file_name=transcript_file_name,
provider=transcript.attrib[],
resource_fs=file_system,
static_dir=static_dir
)
except KeyError:
logger.warn("VAL: Required attributes are missing from xml, xml=[%s]", etree.tostring(transcript).strip())
for language_code, transcript_file_names in six.iteritems(external_transcripts):
for transcript_file_name in transcript_file_names:
import_transcript_from_fs(
edx_video_id=edx_video_id,
language_code=language_code,
file_name=transcript_file_name,
provider=TranscriptProviderType.CUSTOM,
resource_fs=file_system,
static_dir=static_dir
) | Create VideoTranscript objects.
Arguments:
xml (Element): lxml Element object.
edx_video_id (str): Video id of the video.
resource_fs (OSFS): Import file system.
static_dir (str): The Directory to retrieve transcript file.
external_transcripts (dict): A dict containing the list of names of the external transcripts.
Example:
{
'en': ['The_Flash.srt', 'Harry_Potter.srt'],
'es': ['Green_Arrow.srt']
} | ### Input:
Create VideoTranscript objects.
Arguments:
xml (Element): lxml Element object.
edx_video_id (str): Video id of the video.
resource_fs (OSFS): Import file system.
static_dir (str): The Directory to retrieve transcript file.
external_transcripts (dict): A dict containing the list of names of the external transcripts.
Example:
{
'en': ['The_Flash.srt', 'Harry_Potter.srt'],
'es': ['Green_Arrow.srt']
}
### Response:
def create_transcript_objects(xml, edx_video_id, resource_fs, static_dir, external_transcripts):
with open_fs(resource_fs.root_path.split()[0]) as file_system:
for transcript in xml.findall():
try:
file_format = transcript.attrib[]
language_code = transcript.attrib[]
transcript_file_name = u.format(
edx_video_id=edx_video_id,
language_code=language_code,
file_format=file_format
)
import_transcript_from_fs(
edx_video_id=edx_video_id,
language_code=transcript.attrib[],
file_name=transcript_file_name,
provider=transcript.attrib[],
resource_fs=file_system,
static_dir=static_dir
)
except KeyError:
logger.warn("VAL: Required attributes are missing from xml, xml=[%s]", etree.tostring(transcript).strip())
for language_code, transcript_file_names in six.iteritems(external_transcripts):
for transcript_file_name in transcript_file_names:
import_transcript_from_fs(
edx_video_id=edx_video_id,
language_code=language_code,
file_name=transcript_file_name,
provider=TranscriptProviderType.CUSTOM,
resource_fs=file_system,
static_dir=static_dir
) |
def shutdown(self):
self._shuttingDown = {key: Deferred()
for key in self.cachedConnections.keys()}
return DeferredList(
[maybeDeferred(p.transport.loseConnection)
for p in self.cachedConnections.values()]
+ self._shuttingDown.values()) | Disconnect all cached connections.
@returns: a deferred that fires once all connection are disconnected.
@rtype: L{Deferred} | ### Input:
Disconnect all cached connections.
@returns: a deferred that fires once all connection are disconnected.
@rtype: L{Deferred}
### Response:
def shutdown(self):
self._shuttingDown = {key: Deferred()
for key in self.cachedConnections.keys()}
return DeferredList(
[maybeDeferred(p.transport.loseConnection)
for p in self.cachedConnections.values()]
+ self._shuttingDown.values()) |
def add_file(self, *args):
for file_path in args:
self.files.append(FilePath(file_path, self)) | Add single file or list of files to bundle
:type: file_path: str|unicode | ### Input:
Add single file or list of files to bundle
:type: file_path: str|unicode
### Response:
def add_file(self, *args):
for file_path in args:
self.files.append(FilePath(file_path, self)) |
def group_data():
tr_obj = np.load("%s/ref_id.npz" %direc_ref)[]
groups = np.random.randint(0, 8, size=len(tr_obj))
np.savez("ref_groups.npz", groups) | Load the reference data, and assign each object
a random integer from 0 to 7. Save the IDs. | ### Input:
Load the reference data, and assign each object
a random integer from 0 to 7. Save the IDs.
### Response:
def group_data():
tr_obj = np.load("%s/ref_id.npz" %direc_ref)[]
groups = np.random.randint(0, 8, size=len(tr_obj))
np.savez("ref_groups.npz", groups) |
def parse_attributes(self, elt, ps):
if self.attribute_typecode_dict is None:
return
attributes = {}
for attr,what in self.attribute_typecode_dict.items():
namespaceURI,localName = None,attr
if type(attr) in _seqtypes:
namespaceURI,localName = attr
value = _find_attrNodeNS(elt, namespaceURI, localName)
self.logger.debug("Parsed Attribute (%s,%s) -- %s",
namespaceURI, localName, value)
if value is None: continue
attributes[attr] = what.text_to_data(value, elt, ps)
return attributes | find all attributes specified in the attribute_typecode_dict in
current element tag, if an attribute is found set it in the
self.attributes dictionary. Default to putting in String.
Parameters:
elt -- the DOM element being parsed
ps -- the ParsedSoap object. | ### Input:
find all attributes specified in the attribute_typecode_dict in
current element tag, if an attribute is found set it in the
self.attributes dictionary. Default to putting in String.
Parameters:
elt -- the DOM element being parsed
ps -- the ParsedSoap object.
### Response:
def parse_attributes(self, elt, ps):
if self.attribute_typecode_dict is None:
return
attributes = {}
for attr,what in self.attribute_typecode_dict.items():
namespaceURI,localName = None,attr
if type(attr) in _seqtypes:
namespaceURI,localName = attr
value = _find_attrNodeNS(elt, namespaceURI, localName)
self.logger.debug("Parsed Attribute (%s,%s) -- %s",
namespaceURI, localName, value)
if value is None: continue
attributes[attr] = what.text_to_data(value, elt, ps)
return attributes |
def find_suitable_period():
highest_acceptable_factor = int(math.sqrt(SIZE))
starting_point = len(VALID_CHARS) > 14 and len(VALID_CHARS) / 2 or 13
for p in range(starting_point, 7, -1) \
+ range(highest_acceptable_factor, starting_point + 1, -1) \
+ [6, 5, 4, 3, 2]:
if SIZE % p == 0:
return p
raise Exception("No valid period could be found for SIZE=%d.\n"
"Try avoiding prime numbers" % SIZE) | Automatically find a suitable period to use.
Factors are best, because they will have 1 left over when
dividing SIZE+1.
This only needs to be run once, on import. | ### Input:
Automatically find a suitable period to use.
Factors are best, because they will have 1 left over when
dividing SIZE+1.
This only needs to be run once, on import.
### Response:
def find_suitable_period():
highest_acceptable_factor = int(math.sqrt(SIZE))
starting_point = len(VALID_CHARS) > 14 and len(VALID_CHARS) / 2 or 13
for p in range(starting_point, 7, -1) \
+ range(highest_acceptable_factor, starting_point + 1, -1) \
+ [6, 5, 4, 3, 2]:
if SIZE % p == 0:
return p
raise Exception("No valid period could be found for SIZE=%d.\n"
"Try avoiding prime numbers" % SIZE) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.