input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
The current false positive rate"""
num = self.number_hashes * -1 * self.elements_added
dbl = num / self.number_bits
exp = math.exp(dbl)
return math.pow((1 - exp), self.number_hashes)
def intersection(self, second) -> Union["BloomFilter", None]:
"""Return a new Bloom Filter that contains the intersection of the
two
Args:
second (BloomFilter): The Bloom Filter with which to take the intersection
Returns:
BloomFilter: The new Bloom Filter containing the intersection
Raises:
TypeError: When second is not either a :class:`BloomFilter` or :class:`BloomFilterOnDisk`
Note:
`second` may be a BloomFilterOnDisk object
Note:
If `second` is not of the same size (false_positive_rate and est_elements) then this will return `None`"""
if not _verify_not_type_mismatch(second):
raise TypeError(MISMATCH_MSG)
if self._verify_bloom_similarity(second) is False:
return None
res = BloomFilter(
self.estimated_elements,
self.false_positive_rate,
hash_function=self.hash_function,
)
for i in range(0, res.bloom_length):
res._bloom[i] = self._get_element(i) & second._get_element(i)
res.elements_added = res.estimate_elements()
return res
def union(self, second: SimpleBloomT) -> Union["BloomFilter", None]:
"""Return a new Bloom Filter that contains the union of the two
Args:
second (BloomFilter): The Bloom Filter with which to calculate the union
Returns:
BloomFilter: The new Bloom Filter containing the union
Raises:
TypeError: When second is not either a :class:`BloomFilter` or :class:`BloomFilterOnDisk`
Note:
`second` may be a BloomFilterOnDisk object
Note:
If `second` is not of the same size (false_positive_rate and est_elements) then this will return `None`"""
if not _verify_not_type_mismatch(second):
raise TypeError(MISMATCH_MSG)
if self._verify_bloom_similarity(second) is False:
return None
res = BloomFilter(
self.estimated_elements,
self.false_positive_rate,
hash_function=self.hash_function,
)
for i in range(self.bloom_length):
res._bloom[i] = self._get_element(i) | second._get_element(i)
res.elements_added = res.estimate_elements()
return res
def jaccard_index(self, second: SimpleBloomT) -> Union[float, None]:
"""Calculate the jaccard similarity score between two Bloom Filters
Args:
second (BloomFilter): The Bloom Filter to compare with
Returns:
float: A numeric value between 0 and 1 where 1 is identical and 0 means completely different
Raises:
TypeError: When second is not either a :class:`BloomFilter` or :class:`BloomFilterOnDisk`
Note:
`second` may be a BloomFilterOnDisk object
Note:
If `second` is not of the same size (false_positive_rate and est_elements) then this will return `None`"""
if not _verify_not_type_mismatch(second):
raise TypeError(MISMATCH_MSG)
if self._verify_bloom_similarity(second) is False:
return None
count_union = 0
count_int = 0
for i in range(0, self.bloom_length):
el1 = self._get_element(i)
el2 = second._get_element(i)
t_union = el1 | el2
t_intersection = el1 & el2
count_union += bin(t_union).count("1")
count_int += bin(t_intersection).count("1")
if count_union == 0:
return 1.0
return count_int / count_union
# More private functions
@classmethod
def _get_optimized_params(cls, estimated_elements: int, false_positive_rate: float) -> Tuple[float, int, int]:
valid_prms = isinstance(estimated_elements, Number) and estimated_elements > 0
if not valid_prms:
msg = "Bloom: estimated elements must be greater than 0"
raise InitializationError(msg)
valid_prms = isinstance(false_positive_rate, Number) and 0.0 <= false_positive_rate < 1.0
if not valid_prms:
msg = "Bloom: false positive rate must be between 0.0 and 1.0"
raise InitializationError(msg)
fpr = cls._FPR_STRUCT.pack(float(false_positive_rate))
t_fpr = float(cls._FPR_STRUCT.unpack(fpr)[0]) # to mimic the c version!
# optimal caluclations
m_bt = math.ceil((-estimated_elements * math.log(t_fpr)) / 0.4804530139182) # ln(2)^2
number_hashes = int(round(0.6931471805599453 * m_bt / estimated_elements)) # math.log(2.0)
if number_hashes == 0:
raise InitializationError("Bloom: Number hashes is zero; unusable parameters provided")
return t_fpr, number_hashes, m_bt
def _set_values(
self, est_els: int, fpr: float, n_hashes: int, n_bits: int, hash_func: Union[HashFuncT, None]
) -> None:
self._est_elements = est_els
self._fpr = fpr
self._bloom_length = math.ceil(n_bits / self._bits_per_elm)
if hash_func is not None:
self._hash_func = hash_func
else:
self._hash_func = default_fnv_1a
self._els_added = 0
self._number_hashes = n_hashes
self._num_bits = n_bits
def _load_hex(self, hex_string: str, hash_function: Union[HashFuncT, None] = None) -> None:
"""placeholder for loading from hex string"""
offset = self._FOOTER_STRUCT_BE.size * 2
est_els, els_added, fpr, n_hashes, n_bits = self._parse_footer(
self._FOOTER_STRUCT_BE, unhexlify(hex_string[-offset:])
)
self._set_values(est_els, fpr, n_hashes, n_bits, hash_function)
self._bloom = array(self._typecode, unhexlify(hex_string[:-offset]))
self._els_added = els_added
def _load(
self,
file: Union[Path, str, IOBase, mmap, ByteString],
hash_function: Union[HashFuncT, None] = None,
) -> None:
"""load the Bloom Filter from file or bytes"""
if not isinstance(file, (IOBase, mmap, ByteString)):
file = Path(file)
with MMap(file) as filepointer:
self._load(filepointer, hash_function)
else:
offset = self._FOOTER_STRUCT.size
est_els, els_added, fpr, n_hashes, n_bits = self._parse_footer(
self._FOOTER_STRUCT, file[-offset:] # type: ignore
)
self._set_values(est_els, fpr, n_hashes, n_bits, hash_function)
# now read in the bit array!
self._parse_bloom_array(file, self._IMPT_STRUCT.size * self.bloom_length) # type: ignore
self._els_added = els_added
@classmethod
def _parse_footer(cls, stct: Struct, d: ByteString) -> Tuple[int, int, float, int, int]:
"""parse footer returning the data: estimated elements, elements added,
false positive rate, hash function, number hashes, number bits"""
e_elms, e_added, fpr = stct.unpack_from(bytearray(d))
est_elements = e_elms
els_added = e_added
fpr = float(fpr)
fpr, n_hashes, n_bits = cls._get_optimized_params(est_elements, fpr)
return int(est_elements), int(els_added), float(fpr), int(n_hashes), int(n_bits)
def _parse_bloom_array(self, b: ByteString, offset: int) -> None:
"""parse bytes into the bloom array"""
self._bloom = array(self._typecode, bytes(b[:offset]))
def _cnt_number_bits_set(self) -> int:
"""calculate the total number of set bits in the bloom"""
setbits = 0
for i in range(0, self.bloom_length):
setbits += bin(self._bloom[i]).count("1")
return setbits
def _get_element(self, idx: int) -> int:
"""wrappper for getting an element from the Bloom Filter!"""
return self._bloom[idx]
def _verify_bloom_similarity(self, second: SimpleBloomT) -> bool:
"""can the blooms be used in intersection, union, or jaccard index"""
hash_match = self.number_hashes != second.number_hashes
same_bits = self.number_bits != second.number_bits
next_hash = self.hashes("test") != second.hashes("test")
if hash_match or same_bits or next_hash:
return False
return True
class BloomFilterOnDisk(BloomFilter):
"""Simple Bloom Filter implementation directly on disk for use in python;
It can read and write the same format as the c version (https://github.com/barrust/bloom)
Args:
filepath (str): Path to file to load
est_elements (int): The number of estimated elements to be added
false_positive_rate (float): The desired false positive rate
hex_string (str): Hex based representation to be loaded
hash_function (function): Hashing strategy function to use \
`hf(key, number)`
Returns:
BloomFilterOnDisk: A Bloom Filter object
Raises:
NotSupportedError: Loading using a hex string is not supported
Note:
Initialization order of operations:
1) Esimated elements and false positive rate
2) From Hex String
3) Only filepath provided
"""
__slots__ = ["_filepath", "__file_pointer"]
def __init__(
self,
filepath: Union[str, Path],
est_elements: Union[int, None] = None,
false_positive_rate: Union[float, None] = None,
hex_string: Union[str, None] = None,
hash_function: Union[HashFuncT, None] = None,
) -> None:
# set some things up
self._filepath = Path(filepath)
self.__file_pointer = None
self._type = "regular-on-disk"
self._typecode = "B"
self._bits_per_elm = 8.0
self._on_disk = True
if is_hex_string(hex_string):
msg = "Loading from hex_string is currently not supported by the on disk Bloom Filter"
raise NotSupportedError(msg)
if est_elements is not None and false_positive_rate is not None:
fpr, n_hashes, n_bits = self._get_optimized_params(est_elements, false_positive_rate)
self._set_values(est_elements, fpr, n_hashes, n_bits, hash_function)
with open(filepath, "wb") as filepointer:
(array(self._typecode, [0]) * self.bloom_length).tofile(filepointer)
filepointer.write(self._FOOTER_STRUCT.pack(est_elements, 0, false_positive_rate))
filepointer.flush()
self._load(filepath, hash_function)
elif is_valid_file(self._filepath):
self._load(self._filepath.name, hash_function) # need .name for python 3.5
else:
raise InitializationError("Insufecient parameters to set up the On Disk Bloom Filter")
def __del__(self) -> None:
"""handle if user doesn't close the on disk Bloom Filter"""
self.close()
def __bytes__(self) -> bytes:
return bytes(self._bloom)
def close(self) -> None:
"""Clean up the BloomFilterOnDisk object"""
if self.__file_pointer is not None and not self.__file_pointer.closed:
self.__update()
self._bloom.close()
self.__file_pointer.close()
self.__file_pointer = None
def export(self, filename: Union[str, Path]) -> None: # type: ignore
"""Export to disk if a different location
Args:
filename (str): The filename to which the Bloom Filter will be exported
Note:
Only exported if the filename is not the original filename"""
self.__update()
if filename and Path(filename) != self._filepath:
copyfile(self._filepath.name, str(filename))
# otherwise, nothing to do!
def _load(self, filepath: Union[str, Path], hash_function: Union[HashFuncT, None] = None): # type: ignore
"""load the Bloom Filter on disk"""
# read the file, set the optimal params
# mmap everything
with open(filepath, "r+b") as filepointer:
offset = self._FOOTER_STRUCT.size
filepointer.seek(offset * -1, os.SEEK_END)
est_els, _, fpr = self._FOOTER_STRUCT.unpack_from(filepointer.read(offset))
fpr, n_hashes, n_bits = self._get_optimized_params(est_els, fpr)
self._set_values(est_els, fpr, n_hashes, n_bits, hash_function)
# setup a few additional items
self.__file_pointer = open(filepath, "r+b") # type: ignore
self._bloom = mmap(self.__file_pointer.fileno(), 0) # type: ignore
self._on_disk = True
def add_alt(self, hashes: HashResultsT) -> None:
super().add_alt(hashes)
self.__update()
@classmethod
def frombytes(cls, b: ByteString, hash_function: Union[HashFuncT, None] = None) -> "BloomFilterOnDisk":
"""
Raises: NotSupportedError
"""
msg = "Loading from bytes is currently not supported by the on disk Bloom Filter"
raise NotSupportedError(msg)
_EXPECTED_ELM_STRUCT = | |
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_members_rel_fk_head_with_http_info(id, nk, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param str fk: Foreign key for members (required)
:return: bool
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nk', 'fk']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_designs_nk_members_rel_fk_head" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_designs_nk_members_rel_fk_head`")
# verify the required parameter 'nk' is set
if ('nk' not in params) or (params['nk'] is None):
raise ValueError("Missing the required parameter `nk` when calling `portals_id_designs_nk_members_rel_fk_head`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portals_id_designs_nk_members_rel_fk_head`")
collection_formats = {}
resource_path = '/Portals/{id}/designs/{nk}/members/rel/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'nk' in params:
path_params['nk'] = params['nk']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'HEAD',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='bool',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_designs_nk_members_rel_fk_put(self, id, nk, fk, **kwargs):
"""
Add a related item by id for members.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_members_rel_fk_put(id, nk, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param str fk: Foreign key for members (required)
:param DesignMember data:
:return: DesignMember
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_designs_nk_members_rel_fk_put_with_http_info(id, nk, fk, **kwargs)
else:
(data) = self.portals_id_designs_nk_members_rel_fk_put_with_http_info(id, nk, fk, **kwargs)
return data
def portals_id_designs_nk_members_rel_fk_put_with_http_info(self, id, nk, fk, **kwargs):
"""
Add a related item by id for members.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_members_rel_fk_put_with_http_info(id, nk, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param str fk: Foreign key for members (required)
:param DesignMember data:
:return: DesignMember
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nk', 'fk', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_designs_nk_members_rel_fk_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_designs_nk_members_rel_fk_put`")
# verify the required parameter 'nk' is set
if ('nk' not in params) or (params['nk'] is None):
raise ValueError("Missing the required parameter `nk` when calling `portals_id_designs_nk_members_rel_fk_put`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portals_id_designs_nk_members_rel_fk_put`")
collection_formats = {}
resource_path = '/Portals/{id}/designs/{nk}/members/rel/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'nk' in params:
path_params['nk'] = params['nk']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DesignMember',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_designs_nk_permission_delete(self, id, nk, **kwargs):
"""
Deletes permission of this model.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_permission_delete(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_designs_nk_permission_delete_with_http_info(id, nk, **kwargs)
else:
(data) = self.portals_id_designs_nk_permission_delete_with_http_info(id, nk, **kwargs)
return data
def portals_id_designs_nk_permission_delete_with_http_info(self, id, nk, **kwargs):
"""
Deletes permission of this model.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_permission_delete_with_http_info(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nk']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_designs_nk_permission_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_designs_nk_permission_delete`")
# verify the required parameter 'nk' is set
if ('nk' not in params) or (params['nk'] is None):
raise ValueError("Missing the required parameter `nk` when calling `portals_id_designs_nk_permission_delete`")
collection_formats = {}
resource_path = '/Portals/{id}/designs/{nk}/permission'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'nk' in params:
path_params['nk'] = params['nk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_designs_nk_permission_get(self, id, nk, **kwargs):
"""
Fetches hasOne relation permission.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_permission_get(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param bool refresh:
:return: DesignPermissionSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_designs_nk_permission_get_with_http_info(id, nk, **kwargs)
else:
(data) = self.portals_id_designs_nk_permission_get_with_http_info(id, nk, **kwargs)
return data
def portals_id_designs_nk_permission_get_with_http_info(self, id, nk, **kwargs):
"""
Fetches hasOne relation permission.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_permission_get_with_http_info(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param bool refresh:
:return: DesignPermissionSet
If the | |
import cadquery as cq
import cadquery.selectors as cqs
import logging, importlib
from types import SimpleNamespace as Measures
# A plate for the Fansteck Galaxy Note9 case providing a Mollemount interface.
log = logging.getLogger(__name__)
class Caseplate:
def __init__(self, workplane, measures):
"""
A parametric backplate for cases of smartphones and phablets with a Mollemount interface.
Mollemount is an open design mount system for smartphones and other mobile devices,
compatible with the U.S. military PALS / MOLLE system. In addition to providing that, a
caseplate like this also adds an extra level of protection to cases that use only
thin material on the backside and / or protect sensitive phones with a glass case.
The design is parametric and can be adapted to most cases by defining plate shapes and sizes
and cutout positions and sizes. However, the initial parameters are made for the RedPepper
DOT+ Series waterproof case for Samsung Galaxy Note9, allowing to quickly create various
shapes and types of backplates for this case. This and identical cases offered by traders
under other names can be found at:
* https://www.aliexpress.com/item/4000271146538.html (9.87 EUR incl. shipping)
* https://www.aliexpress.com/item/32916521981.html (11.06 EUR incl. shipping)
* https://www.amazon.com/dp/B07G33YSJR (16.59 USD + 9.61 international shipping from USA)
* https://www.lightinthebox.com/en/p/_p8357432.html (out of stock)
:param workplane: The CadQuery workplane to create this part on.
:param measures: The measures to use for the parameters of this design. Expects a nested
[SimpleNamespace](https://docs.python.org/3/library/types.html#types.SimpleNamespace)
object.
.. todo:: Add rounded corners for the cutout that corresponds to the horizontal bar at the
back of the case.
.. todo:: Fix that currently the workplane is passed to this constructor as "workplane",
but selectors in this class use global axis directions. To fix this, create the
part in the XZ plane, and only when finished, rotate and translate it to be positioned
at the origin of the plane provided in the "workplane" parameter. Ideally, that action
would be provided by the part() method.
.. todo:: Add mockup parts for the Mollemount straps, for illustration.
"""
self.model = workplane
self.debug = False
self.measures = measures
m = self.measures
# TODO: Initialize missing measures with defaults.
# TODO: Should better be "self.model = self.build(self.model)", as that enables other
# methods using the same naming scheme, like "build_partname()". And actually, since
# the positioning should be "naturally" at first and only adapted to the workplane of
# the calling object in the end, there's no need to provide a parameter to build().
self.build()
def build(self):
m = self.measures
baseplane = self.model.workplane()
plate_1 = (
cq.Workplane()
.copyWorkplane(baseplane)
.box(m.plate_1.width, m.plate_1.height, m.plate_1.depth, centered = [False, False, False])
# Corner roundings.
# TODO: Fix that the corner angle where the cutouts intersect has to be smaller than this.
.edges("|Y")
.fillet(m.plate_1.corner_radius)
# Tapering off towards the face mounted to the device.
.faces(">Y")
.edges()
# Due to a bug we cannot use asymmetric chamfering here, as the "length" and "length2"
# parameters would be internally switched for some edges. So we do a simple 45° chamfer.
.chamfer(m.plate_1.edge_chamfer)
# TODO: Report and fix the bug mentioned above, then do the chamfering like this:
#.chamfer(length = 0.5 * m.front_edge_chamfer, length2 = 0.95 * m.depth_1)
# TODO: Don't do the chamfer if the measure given is zero.
# TODO: Also utilize back_edge_chamfer if present. If both are present, the part depth
# has to be split half and half between them.
# Translate according to the specified offsets of its bottom left corner.
.translate([
m.plate_1.bottom_left[0],
0,
m.plate_1.bottom_left[1]
])
)
plate_2 = (
cq.Workplane()
.copyWorkplane(baseplane)
.box(m.plate_2.width, m.plate_2.height, m.plate_2.depth, centered = [False, False, False])
# Corner roundings.
# TODO: Fix that the corner angle where the cutouts intersect has to be smaller than this.
.edges("|Y")
.fillet(m.plate_2.corner_radius)
# Tapering off towards the face mounted to the device.
# (See analogous step for plate_1 for hints and TODO items.)
.faces("<Y")
.edges()
.chamfer(m.plate_2.edge_chamfer)
# Translate according to the specified offsets of its bottom left corner (x and z
# components) and to start at the back surface of plate_1 (y component).
.translate([
m.plate_2.bottom_left[0],
-0.99 * m.plate_1.depth,
m.plate_2.bottom_left[1]
])
)
# A "bump" at the top of the box base shape. Needed to have enough material to
# reach into the side and top cutouts of the X-Mount shape, to help keep it in place.
# TODO: Use a spline instead of a simple loft to create this bump.
if (m.plate_3 is not None):
plate_3 = (
cq.Workplane()
.copyWorkplane(baseplane)
# Move to the center of the lower rectangle, then draw it.
.moveTo(
x = m.plate_3.bottom_left_1[0] + 0.5 * m.plate_3.width_1,
y = m.plate_3.bottom_left_1[1] + 0.5 * m.plate_3.height_1
)
.rect(m.plate_3.width_1, m.plate_3.height_1)
# Move to the center of the lower rectangle, then draw it.
.workplane(offset = m.plate_3.depth)
.moveTo(
x = m.plate_3.bottom_left_2[0] + 0.5 * m.plate_3.width_2,
y = m.plate_3.bottom_left_2[1] + 0.5 * m.plate_3.height_2
)
.rect(m.plate_3.width_2, m.plate_3.height_2)
.loft()
# Translate to start at the back surface of the plate_1 + plate_2 combination.
.translate([0, -0.99 * (m.plate_1.depth + m.plate_2.depth), 0])
)
show_object(plate_3, name = "plate_3", options = {"color": "yellow", "alpha": 0.8})
# TODO: Create the cutouts iteratively.
cutout_1 = (
cq.Workplane()
.copyWorkplane(baseplane)
.box(m.cutout_1.width, m.cutout_1.height, m.cutout_1.depth, centered = [False, False, False])
# translate() does not require a workplane, as it works with global axis directions.
.translate([m.cutout_1.bottom_left[0], 0, m.cutout_1.bottom_left[1]])
)
show_object(cutout_1, name = "cutout_1", options = {"color": "yellow", "alpha": 0.8})
cutout_2 = (
cq.Workplane()
.copyWorkplane(baseplane)
.box(m.cutout_2.width, m.cutout_2.height, m.cutout_2.depth, centered = [False, False, False])
.edges("|Y").fillet(m.cutout_2.corner_radius)
.translate([m.cutout_2.bottom_left[0], 0, m.cutout_2.bottom_left[1]])
)
show_object(cutout_2, name = "cutout_2", options = {"color": "yellow", "alpha": 0.8})
cutout_3 = (
cq.Workplane()
.copyWorkplane(baseplane)
.box(m.cutout_3.width, m.cutout_3.height, m.cutout_3.depth, centered = [False, False, False])
.edges("|Y").fillet(m.cutout_3.corner_radius)
.translate([m.cutout_3.bottom_left[0], 0, m.cutout_3.bottom_left[1]])
)
show_object(cutout_3, name = "cutout_3", options = {"color": "yellow", "alpha": 0.8})
cutout_4 = (
cq.Workplane()
.copyWorkplane(baseplane)
.box(m.cutout_4.width, m.cutout_4.height, m.cutout_4.depth, centered = [False, False, False])
.edges("|Y").fillet(m.cutout_4.corner_radius)
.translate([m.cutout_4.bottom_left[0], 0, m.cutout_4.bottom_left[1]])
)
show_object(cutout_4, name = "cutout_4", options = {"color": "yellow", "alpha": 0.8})
cutout_5 = (
cq.Workplane()
.copyWorkplane(baseplane)
.box(m.cutout_5.width, m.cutout_5.height, m.cutout_5.depth, centered = [False, False, False])
.edges("|Y").fillet(m.cutout_5.corner_radius)
.translate([m.cutout_5.bottom_left[0], 0, m.cutout_5.bottom_left[1]])
)
show_object(cutout_5, name = "cutout_5", options = {"color": "yellow", "alpha": 0.8})
cutout_6 = (
cq.Workplane()
.copyWorkplane(baseplane)
.box(m.cutout_6.width, m.cutout_6.height, m.cutout_6.depth, centered = [False, False, False])
.edges("|Y").fillet(m.cutout_6.corner_radius)
.translate([m.cutout_6.bottom_left[0], 0, m.cutout_6.bottom_left[1]])
)
show_object(cutout_6, name = "cutout_6", options = {"color": "yellow", "alpha": 0.8})
cutout_7 = (
cq.Workplane()
.copyWorkplane(baseplane)
.box(m.cutout_7.width, m.cutout_7.height, m.cutout_7.depth, centered = [False, False, False])
.edges("|Y").fillet(m.cutout_7.corner_radius)
.translate([m.cutout_7.bottom_left[0], 0, m.cutout_7.bottom_left[1]])
)
show_object(cutout_7, name = "cutout_7", options = {"color": "yellow", "alpha": 0.8})
cutout_8 = (
cq.Workplane()
.copyWorkplane(baseplane)
.box(m.cutout_8.width, m.cutout_8.height, m.cutout_8.depth, centered = [False, False, False])
.edges("|Y").fillet(m.cutout_8.corner_radius)
.translate([m.cutout_8.bottom_left[0], 0, m.cutout_8.bottom_left[1]])
)
show_object(cutout_8, name = "cutout_8", options = {"color": "yellow", "alpha": 0.8})
# Create the main shape.
self.model = plate_1.union(plate_2)
if (m.plate_3 is not None):
self.model = self.model.union(plate_3)
# Create the cutouts.
# TODO: Create these cutouts in a for loop, not in a sequence.
# TODO: Use cutThruAll() with 2D wires instead of cut(). The paradigm is not CSG!
# This still allowed tapered cutting, giving the same effect as chamfered edges.
# However, this also requires a way to fillet() the corners of the 2D wires before
# using them for cutThruAll(), as otherwise selecting the edges to fillet afterwards
# becomes complicated (at least needing tagging). And that way does not exist yet.
if (m.cutout_1.enabled): self.model = self.model.cut(cutout_1)
if (m.cutout_2.enabled): self.model = self.model.cut(cutout_2)
if (m.cutout_3.enabled): self.model = self.model.cut(cutout_3)
if (m.cutout_4.enabled): self.model = self.model.cut(cutout_4)
if (m.cutout_5.enabled): self.model = self.model.cut(cutout_5)
if (m.cutout_6.enabled): self.model = self.model.cut(cutout_6)
if (m.cutout_7.enabled): self.model = self.model.cut(cutout_7)
if (m.cutout_8.enabled): self.model = self.model.cut(cutout_8)
# Create the cutouts for stitching between the two MOLLE columns.
for row in range(m.molle_rows):
cutout = (
cq.Workplane()
.copyWorkplane(baseplane)
.box(m.molle_stitching_width, m.molle_stitching_height, 10.00, centered = [False, False, False])
# Create a slot-like shape with rounded ends, emulated by rounding the corners nearly
# as much as possible, that is each corner | |
<reponame>voytekresearch/omapping<filename>om/meg/group.py
"""MEG-DATA Analysis Module - Group"""
import os
import pickle
import datetime
import numpy as np
import scipy.io as sio
from scipy.stats.stats import pearsonr
from om.meg.single import MegSubj
from om.core.osc import check_bands
from om.core.errors import DataNotComputedError, InconsistentDataError, UnknownDataTypeError
###################################################################################################
###################################################################################################
class MegGroup(MegSubj):
"""A class to store OMEGA data from multiple subjects.
Holds all oscillations, regardless of spatial location.
Attributes
----------
n_subjs : int
The number of subjects included in the group data.
subjs : list of int
List of the subject numbers included in current group data.
bands : Osc() object
Stores labels and band definitions of oscillation bands.
n_oscs_tot : int
Total number of oscillations found across all subjects.
comment : str
A note about the data, label about data that is loaded.
gr_oscs : dict
All oscillations, in bands, for all subjects [n_verts, n_oscs, n_subjs].
osc_probs : dict
Oscillation probability for each oscillation band, for each vertex.
osc_pow_ratios : dict
Oscillation power ratios for each oscillation band, for each vertex.
osc_scores : dict
Oscillation scores for each oscillation band, for each vertex.
vert_exponents : 2d array
Aperiodic exponent values for each subject, at each vertex [n_verts, n_subjs].
exponent_gr_avg : 1d array
Average aperiodic exponent values across subjects for each vertex.
osc_prob_done : boolean
Whether oscillation probability has been calculated.
osc_power_done : boolean
Whether oscillation power ratio has been calculated.
osc_score_done : boolean
Whether oscillation score has been calculated.
"""
def __init__(self, db, osc):
"""Initialize object with omegamappin database, and oscillation definitions.
Parameters
----------
db : OMDB() object
Database object for omegamappin project.
osc : Osc() object
Object to store oscillatory band definitions.
"""
# Initialize from MegSubj() object
MegSubj.__init__(self, db, 'both')
# Initialize groups subject variables
self.n_subjs = int()
self.subjs = []
# Update variable types for demographic
self.sex = list()
self.age = np.array([])
# Set definition of oscillation bands used for the group
self.bands = osc.bands
# Initialize count of total oscillations, across all subjects
self.n_oscs_tot = int()
# Set comment for data, can be used for plotting
self.comment = 'Group'
# Initialize dictionary for oscillation band data
self.gr_oscs = dict()
# Initilaize dictionary to store oscillation probabilities
self.osc_probs = dict()
# Initialize dict to store oscillation power ratios
self.osc_pow_ratios = dict()
# Initialize to store oscillation scores
self.osc_scores = dict()
# Initialize vars to store exponent values
self.vert_exponents = np.array([])
self.exponent_gr_avg = np.array([])
# Set booleans for what has been run
self.osc_prob_done = False
self.osc_power_done = False
self.osc_score_done = False
def __len__(self):
return self.n_subjs
def add_subject(self, new_subj, add_vertex_oscs=False, add_vertex_exponents=False,
add_all_oscs=False, add_vertex_bands=False, add_peak_freqs=False,
add_demo=False):
"""Adds a new subject to the MegGroup object.
Parameters
----------
new_subj : MegSubj() Object
MEG subject (instance of MegSubj)
add_vertex_oscs : boolean, optional (default: False)
Whether to add all oscillations, across vertices.
add_vertex_exponents : boolean, optional (default: False)
Whether to add the aperiodic exponents.
add_all_oscs : boolean, optional (default: False)
Whether to add the vectors of all oscillations, collapsed across vertices.
add_vertex_bands : boolean, optional (default: False)
Whether to add the oscillation band data, across vertices.
add_peak_freqs : boolean, optional (default: False)
Whether to add peak frequencies.
add_demo : boolean, optional (default: False)
Whether to add demographic information.
"""
# Check if subject has data
if not new_subj.has_data:
raise DataNotComputedError("Empty meg data object. Cannot add data.")
# Add oscillations per vertex
if add_vertex_oscs:
# Check new subject has relevant data
if not new_subj.has_vertex_oscs:
raise DataNotComputedError('New subject does not have vertex osc data.')
if not self.has_data:
# Add data to group object
self.centers = new_subj.centers
self.powers = new_subj.powers
self.bws = new_subj.bws
# Update that group contains this data
self.has_vertex_oscs = True
else:
# Check that group has data defined
if not self.has_vertex_oscs:
raise DataNotComputedError('MEG Group does not include vertex osc data.')
# Add data to group object
self.centers = np.dstack([self.centers, new_subj.centers])
self.powers = np.dstack([self.powers, new_subj.powers])
self.bws = np.dstack([self.bws, new_subj.bws])
# Add exponents per vertex
if add_vertex_exponents:
# Check new subject has relevant data
if not new_subj.has_vertex_exponents:
raise DataNotComputedError('New subject does not have vertex exponent data.')
if not self.has_data:
# Add data to group object
self.vert_exponents = new_subj.exponents
# Update that group contains this data
self.has_vertex_exponents = True
else:
# Check that group has data defined
if not self.has_vertex_exponents:
raise DataNotComputedError('MEG Group does not include vertex exponent data.')
# Add data to group object
self.vert_exponents = np.hstack([self.vert_exponents, new_subj.exponents])
# Add All-Osc Data
if add_all_oscs:
# Check that new subject has all_osc data available
if not new_subj.has_all_osc:
raise DataNotComputedError('New subject does not have all osc data.')
# Check that group has data defined
if self.has_data:
if not self.has_all_osc:
raise DataNotComputedError('MEG Group does not include all osc data.')
# Add oscillation parameters to current data
self.centers_all = np.append(self.centers_all, new_subj.centers_all)
self.bws_all = np.append(self.bws_all, new_subj.bws_all)
self.powers_all = np.append(self.powers_all, new_subj.powers_all)
self.exponents = np.append(self.exponents, new_subj.exponents)
# Add centers hist
self.centers_hist.append(new_subj.centers_hist)
# Update count of total number of oscillations
self.n_oscs = np.append(self.n_oscs, new_subj.n_oscs)
self.n_oscs_tot = len(self.centers_all)
# If first subject, update what kind of data is loaded
if not self.has_data:
self.has_all_osc = True
# Add band-specific data
if add_vertex_bands:
# Check that new subject has vertex bands data
if not new_subj.has_vertex_bands:
raise DataNotComputedError('New subject does not have vertex band data.')
# Check that new subject has same bands defined
_ = check_bands([self.bands, new_subj.bands])
# Add new subject to group oscillations
if not self.has_data:
# Add data to group object
self.gr_oscs = new_subj.oscs
# Update that group contains this data
self.has_vertex_bands = True
else:
# Check that group has data defined
if not self.has_vertex_bands:
raise DataNotComputedError('MEG Group does not include vertex band data.')
# Add data to group object
for band in self.bands:
self.gr_oscs[band] = np.dstack([self.gr_oscs[band], new_subj.oscs[band]])
# Add oscillation peak data
if add_peak_freqs:
# Check that new subject has peak frequency data
if not new_subj.has_peak_freqs:
raise DataNotComputedError('New subject does not have peak freq data.')
# Check that new subject has same bands defined
_ = check_bands([self.bands, new_subj.bands])
# Add new subject to peak frequencies
if not self.has_data:
# Add data to group object
self.peaks = new_subj.peaks
# Update that group contains this data
self.has_peak_freqs = True
else:
# Check that group has data defined
if not self.has_peak_freqs:
raise DataNotComputedError('MEG Group does not include peak freq data.')
# Add data to group object
for band in self.bands:
self.peaks[band] = np.append(self.peaks[band], new_subj.peaks[band])
# Add demographic data
if add_demo:
# Check that incoming subject has demo data
if not new_subj.has_demo:
raise DataNotComputedError('Demographic data not available')
# Check that group has data defined
if self.has_data:
if not self.has_demo:
raise DataNotComputedError('MEG Group does not include demo data.')
# Add demographic data to group object
self.sex.append(new_subj.sex)
self.age = np.append(self.age, new_subj.age)
# If first subject, update what kind of data is loaded
if not self.has_data:
self.has_demo = True
# If first subject, update that object has data
if self.n_subjs == 0:
self.has_data = True
# Update subj count and subject number list
self.n_subjs += 1
self.subjs.append(new_subj.subnum)
# Check consistency of group data
self.check_consistency()
def check_consistency(self):
"""Check for consistency of data loaded in group object."""
n_vertices = 7501
if self.n_subjs != len(self.subjs):
raise InconsistentDataError('Discrepancy in subject numbers.')
if self.has_vertex_oscs:
if self.n_subjs > 1:
assert self.centers.shape == (n_vertices, 8, self.n_subjs)
assert self.powers.shape == (n_vertices, 8,self.n_subjs)
assert self.bws.shape == (n_vertices, 8,self.n_subjs)
if self.has_vertex_exponents:
assert self.vert_exponents.shape == (n_vertices, self.n_subjs)
if self.has_all_osc:
pass
if self.has_vertex_bands:
pass
if self.has_peak_freqs:
pass
if self.has_demo:
pass
def group_exponent(self, avg='mean'):
"""Calculates the average exponent value for each vertex, across subjects.
Parameters
----------
avg : {'mean', 'median'}, optional
How to average across the group.
"""
# Calculate the average exponent value per vertex
if avg is 'mean':
self.exponent_gr_avg = np.mean(self.vert_exponents, 1)
elif avg is 'median':
self.exponent_gr_avg = np.median(self.vert_exponents, 1)
def osc_prob(self):
"""Calculates the probability of an osc in a specific band.
This is done per vertex, across subjects.
"""
# Check if vertex data is set
if not self.has_vertex_bands:
raise DataNotComputedError('Vertex oscillation bands data not available.')
# For each oscillation band, compute the probability of an oscillation in that band - NEW
for | |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
from typing import Optional, Tuple
import rich_click as click
from airflow_breeze.commands.ci_image_commands import rebuild_ci_image_if_needed
from airflow_breeze.commands.main_command import main
from airflow_breeze.global_constants import (
DEFAULT_PYTHON_MAJOR_MINOR_VERSION,
MOUNT_SELECTED,
get_available_packages,
)
from airflow_breeze.params.build_ci_params import BuildCiParams
from airflow_breeze.params.doc_build_params import DocBuildParams
from airflow_breeze.params.shell_params import ShellParams
from airflow_breeze.pre_commit_ids import PRE_COMMIT_LIST
from airflow_breeze.utils.cache import read_from_cache_file
from airflow_breeze.utils.common_options import (
option_airflow_constraints_reference,
option_airflow_extras,
option_answer,
option_backend,
option_db_reset,
option_debian_version,
option_dry_run,
option_force_build,
option_forward_credentials,
option_github_repository,
option_installation_package_format,
option_integration,
option_load_default_connection,
option_load_example_dags,
option_mount_sources,
option_mssql_version,
option_mysql_version,
option_postgres_version,
option_python,
option_use_airflow_version,
option_use_packages_from_dist,
option_verbose,
)
from airflow_breeze.utils.console import get_console
from airflow_breeze.utils.custom_param_types import BetterChoice, NotVerifiedBetterChoice
from airflow_breeze.utils.docker_command_utils import (
check_docker_resources,
get_env_variables_for_docker_commands,
get_extra_docker_flags,
perform_environment_checks,
)
from airflow_breeze.utils.path_utils import AIRFLOW_SOURCES_ROOT
from airflow_breeze.utils.run_utils import (
RunCommandResult,
assert_pre_commit_installed,
filter_out_none,
run_command,
)
from airflow_breeze.utils.visuals import ASCIIART, ASCIIART_STYLE, CHEATSHEET, CHEATSHEET_STYLE
DEVELOPER_COMMANDS = {
"name": "Developer tools",
"commands": [
"shell",
"start-airflow",
"exec",
"stop",
"build-docs",
"static-checks",
],
}
DEVELOPER_PARAMETERS = {
"breeze": [
{
"name": "Basic flags for the default (shell) command",
"options": [
"--python",
"--backend",
"--postgres-version",
"--mysql-version",
"--mssql-version",
"--integration",
"--forward-credentials",
"--db-reset",
],
},
{
"name": "Advanced flags for the default (shell) command",
"options": [
"--use-airflow-version",
"--constraints-reference",
"--airflow-extras",
"--use-packages-from-dist",
"--package-format",
"--force-build",
"--mount-sources",
"--debian-version",
],
},
],
"breeze shell": [
{
"name": "Basic flags",
"options": [
"--python",
"--backend",
"--postgres-version",
"--mysql-version",
"--mssql-version",
"--integration",
"--forward-credentials",
"--db-reset",
],
},
{
"name": "Advanced flag for running",
"options": [
"--use-airflow-version",
"--constraints-reference",
"--airflow-extras",
"--use-packages-from-dist",
"--package-format",
"--force-build",
"--mount-sources",
"--debian-version",
],
},
],
"breeze start-airflow": [
{
"name": "Basic flags",
"options": [
"--python",
"--load-example-dags",
"--load-default-connections",
"--backend",
"--postgres-version",
"--mysql-version",
"--mssql-version",
"--integration",
"--forward-credentials",
"--db-reset",
],
},
{
"name": "Advanced flag for running",
"options": [
"--use-airflow-version",
"--constraints-reference",
"--airflow-extras",
"--use-packages-from-dist",
"--package-format",
"--force-build",
"--mount-sources",
],
},
],
"breeze exec": [
{"name": "Drops in the interactive shell of active airflow container"},
],
"breeze stop": [
{
"name": "Stop flags",
"options": [
"--preserve-volumes",
],
},
],
"breeze build-docs": [
{
"name": "Doc flags",
"options": [
"--docs-only",
"--spellcheck-only",
"--for-production",
"--package-filter",
],
},
],
"breeze static-checks": [
{
"name": "Pre-commit flags",
"options": [
"--type",
"--files",
"--all-files",
"--show-diff-on-failure",
"--last-commit",
],
},
],
}
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Make sure that whatever you add here as an option is also
# Added in the "main" command in breeze.py. The min command above
# Is used for a shorthand of shell and except the extra
# Args it should have the same parameters.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@main.command()
@option_verbose
@option_dry_run
@option_python
@option_backend
@option_debian_version
@option_github_repository
@option_postgres_version
@option_mysql_version
@option_mssql_version
@option_forward_credentials
@option_force_build
@option_use_airflow_version
@option_airflow_extras
@option_airflow_constraints_reference
@option_use_packages_from_dist
@option_installation_package_format
@option_mount_sources
@option_integration
@option_db_reset
@option_answer
@click.argument('extra-args', nargs=-1, type=click.UNPROCESSED)
def shell(
verbose: bool,
dry_run: bool,
python: str,
github_repository: str,
backend: str,
integration: Tuple[str],
postgres_version: str,
mysql_version: str,
mssql_version: str,
debian_version: str,
forward_credentials: bool,
mount_sources: str,
use_packages_from_dist: bool,
package_format: str,
use_airflow_version: Optional[str],
airflow_extras: str,
airflow_constraints_reference: str,
force_build: bool,
db_reset: bool,
answer: Optional[str],
extra_args: Tuple,
):
"""Enter breeze.py environment. this is the default command use when no other is selected."""
if verbose or dry_run:
get_console().print("\n[success]Welcome to breeze.py[/]\n")
get_console().print(f"\n[success]Root of Airflow Sources = {AIRFLOW_SOURCES_ROOT}[/]\n")
enter_shell(
verbose=verbose,
dry_run=dry_run,
python=python,
github_repository=github_repository,
backend=backend,
integration=integration,
postgres_version=postgres_version,
mysql_version=mysql_version,
mssql_version=mssql_version,
forward_credentials=str(forward_credentials),
mount_sources=mount_sources,
use_airflow_version=use_airflow_version,
airflow_extras=airflow_extras,
airflow_constraints_reference=airflow_constraints_reference,
use_packages_from_dist=use_packages_from_dist,
package_format=package_format,
force_build=force_build,
db_reset=db_reset,
extra_args=extra_args,
answer=answer,
debian_version=debian_version,
)
@option_verbose
@main.command(name='start-airflow')
@option_dry_run
@option_python
@option_github_repository
@option_backend
@option_postgres_version
@option_load_example_dags
@option_load_default_connection
@option_mysql_version
@option_mssql_version
@option_forward_credentials
@option_force_build
@option_use_airflow_version
@option_airflow_extras
@option_airflow_constraints_reference
@option_use_packages_from_dist
@option_installation_package_format
@option_mount_sources
@option_integration
@option_db_reset
@option_answer
@click.argument('extra-args', nargs=-1, type=click.UNPROCESSED)
def start_airflow(
verbose: bool,
dry_run: bool,
python: str,
github_repository: str,
backend: str,
integration: Tuple[str],
postgres_version: str,
load_example_dags: bool,
load_default_connections: bool,
mysql_version: str,
mssql_version: str,
forward_credentials: bool,
mount_sources: str,
use_airflow_version: Optional[str],
airflow_extras: str,
airflow_constraints_reference: str,
use_packages_from_dist: bool,
package_format: str,
force_build: bool,
db_reset: bool,
answer: Optional[str],
extra_args: Tuple,
):
"""Enter breeze.py environment and starts all Airflow components in the tmux session."""
enter_shell(
verbose=verbose,
dry_run=dry_run,
python=python,
github_repository=github_repository,
backend=backend,
integration=integration,
postgres_version=postgres_version,
load_default_connections=load_default_connections,
load_example_dags=load_example_dags,
mysql_version=mysql_version,
mssql_version=mssql_version,
forward_credentials=str(forward_credentials),
mount_sources=mount_sources,
use_airflow_version=use_airflow_version,
airflow_extras=airflow_extras,
airflow_constraints_reference=airflow_constraints_reference,
use_packages_from_dist=use_packages_from_dist,
package_format=package_format,
force_build=force_build,
db_reset=db_reset,
start_airflow=True,
extra_args=extra_args,
answer=answer,
)
@main.command(name='build-docs')
@option_verbose
@option_dry_run
@option_github_repository
@click.option('-d', '--docs-only', help="Only build documentation.", is_flag=True)
@click.option('-s', '--spellcheck-only', help="Only run spell checking.", is_flag=True)
@click.option(
'-p',
'--for-production',
help="Builds documentation for official release i.e. all links point to stable version.",
is_flag=True,
)
@click.option(
'-p',
'--package-filter',
help="List of packages to consider.",
type=NotVerifiedBetterChoice(get_available_packages()),
multiple=True,
)
def build_docs(
verbose: bool,
dry_run: bool,
github_repository: str,
docs_only: bool,
spellcheck_only: bool,
for_production: bool,
package_filter: Tuple[str],
):
"""Build documentation in the container."""
perform_environment_checks(verbose=verbose)
params = BuildCiParams(github_repository=github_repository, python=DEFAULT_PYTHON_MAJOR_MINOR_VERSION)
rebuild_ci_image_if_needed(build_params=params, dry_run=dry_run, verbose=verbose)
ci_image_name = params.airflow_image_name
doc_builder = DocBuildParams(
package_filter=package_filter,
docs_only=docs_only,
spellcheck_only=spellcheck_only,
for_production=for_production,
skip_environment_initialization=True,
)
extra_docker_flags = get_extra_docker_flags(MOUNT_SELECTED)
env = get_env_variables_for_docker_commands(params)
cmd = [
"docker",
"run",
"-t",
*extra_docker_flags,
"--pull",
"never",
ci_image_name,
"/opt/airflow/scripts/in_container/run_docs_build.sh",
*doc_builder.args_doc_builder,
]
process = run_command(cmd, verbose=verbose, dry_run=dry_run, text=True, env=env, check=False)
sys.exit(process.returncode)
@main.command(
name="static-checks",
help="Run static checks.",
context_settings=dict(
ignore_unknown_options=True,
allow_extra_args=True,
),
)
@click.option(
'-t',
'--type',
help="Type(s) of the static checks to run (multiple can be added).",
type=BetterChoice(PRE_COMMIT_LIST),
multiple=True,
)
@click.option('-a', '--all-files', help="Run checks on all files.", is_flag=True)
@click.option('-f', '--files', help="List of files to run the checks on.", multiple=True)
@click.option(
'-s', '--show-diff-on-failure', help="Show diff for files modified by the checks.", is_flag=True
)
@click.option(
'-c',
'--last-commit',
help="Run checks for all files in last commit. Mutually exclusive with --commit-ref.",
is_flag=True,
)
@click.option(
'-r',
'--commit-ref',
help="Run checks for this commit reference only "
"(can be any git commit-ish reference). "
"Mutually exclusive with --last-commit.",
)
@option_verbose
@option_dry_run
@option_github_repository
@click.argument('precommit_args', nargs=-1, type=click.UNPROCESSED)
def static_checks(
verbose: bool,
dry_run: bool,
github_repository: str,
all_files: bool,
show_diff_on_failure: bool,
last_commit: bool,
commit_ref: str,
type: Tuple[str],
files: bool,
precommit_args: Tuple,
):
assert_pre_commit_installed(verbose=verbose)
perform_environment_checks(verbose=verbose)
command_to_execute = [sys.executable, "-m", "pre_commit", 'run']
if last_commit and commit_ref:
get_console().print("\n[error]You cannot specify both --last-commit and --commit-ref[/]\n")
sys.exit(1)
for single_check in type:
command_to_execute.append(single_check)
if all_files:
command_to_execute.append("--all-files")
if show_diff_on_failure:
command_to_execute.append("--show-diff-on-failure")
if last_commit:
command_to_execute.extend(["--from-ref", "HEAD^", "--to-ref", "HEAD"])
if commit_ref:
command_to_execute.extend(["--from-ref", f"{commit_ref}^", "--to-ref", f"{commit_ref}"])
if files:
command_to_execute.append("--files")
if verbose or dry_run:
command_to_execute.append("--verbose")
if precommit_args:
command_to_execute.extend(precommit_args)
env = os.environ.copy()
env['GITHUB_REPOSITORY'] = github_repository
static_checks_result = run_command(
command_to_execute,
verbose=verbose,
dry_run=dry_run,
check=False,
no_output_dump_on_exception=True,
text=True,
env=env,
)
if static_checks_result.returncode != 0:
get_console().print("[error]There were errors during pre-commit check. They should be fixed[/]")
sys.exit(static_checks_result.returncode)
@main.command(name="stop", help="Stop running breeze environment.")
@option_verbose
@option_dry_run
@click.option(
"-p",
"--preserve-volumes",
help="Skip removing volumes when stopping Breeze.",
is_flag=True,
)
def stop(verbose: bool, dry_run: bool, preserve_volumes: bool):
command_to_execute = ['docker-compose', 'down', "--remove-orphans"]
if not preserve_volumes:
command_to_execute.append("--volumes")
shell_params = ShellParams(verbose=verbose)
env_variables = get_env_variables_for_docker_commands(shell_params)
run_command(command_to_execute, verbose=verbose, dry_run=dry_run, env=env_variables)
@main.command(name='exec', help='Joins the interactive shell of running airflow container')
@option_verbose
@option_dry_run
@click.argument('exec_args', nargs=-1, type=click.UNPROCESSED)
def exec(verbose: bool, dry_run: bool, exec_args: Tuple):
perform_environment_checks(verbose=verbose)
container_running = find_airflow_container(verbose, dry_run)
if container_running:
cmd_to_run = [
"docker",
"exec",
"-it",
container_running,
"/opt/airflow/scripts/docker/entrypoint_exec.sh",
]
if exec_args:
cmd_to_run.extend(exec_args)
process = run_command(
cmd_to_run,
verbose=verbose,
dry_run=dry_run,
check=False,
no_output_dump_on_exception=False,
text=True,
)
if not process:
sys.exit(1)
sys.exit(process.returncode)
def enter_shell(**kwargs) -> RunCommandResult:
"""
Executes entering shell using the parameters passed as kwargs:
* checks if docker version is good
* checks if docker-compose version is good
* updates kwargs with cached parameters
* displays ASCIIART and CHEATSHEET unless disabled
* build ShellParams from the updated kwargs
* executes the command to drop the user to Breeze shell
"""
verbose = kwargs['verbose']
dry_run = kwargs['dry_run']
perform_environment_checks(verbose=verbose)
if read_from_cache_file('suppress_asciiart') is None:
get_console().print(ASCIIART, style=ASCIIART_STYLE)
if read_from_cache_file('suppress_cheatsheet') is None:
get_console().print(CHEATSHEET, style=CHEATSHEET_STYLE)
enter_shell_params = ShellParams(**filter_out_none(**kwargs))
rebuild_ci_image_if_needed(build_params=enter_shell_params, dry_run=dry_run, verbose=verbose)
return run_shell(verbose, dry_run, enter_shell_params)
def run_shell(verbose: bool, dry_run: bool, shell_params: ShellParams) -> RunCommandResult:
"""
Executes a shell command built from params passed.
* prints information about the build
* constructs docker compose command to enter shell
* executes it
:param verbose: print commands when running
:param dry_run: do not execute "write" commands - just print what would happen
:param shell_params: parameters of the execution
"""
shell_params.print_badge_info()
cmd = ['docker-compose', 'run', '--service-ports', "-e", "BREEZE", '--rm', 'airflow']
cmd_added = shell_params.command_passed
env_variables = get_env_variables_for_docker_commands(shell_params)
if cmd_added is not None:
cmd.extend(['-c', cmd_added])
command_result = run_command(
cmd, verbose=verbose, dry_run=dry_run, env=env_variables, text=True, check=False
)
if command_result.returncode == 0:
return command_result
else:
get_console().print(f"[red]Error {command_result.returncode} returned[/]")
if verbose:
get_console().print(command_result.stderr)
return command_result
def stop_exec_on_error(returncode: int):
get_console().print('\n[error]ERROR in finding the airflow docker-compose process id[/]\n')
sys.exit(returncode)
def find_airflow_container(verbose, dry_run) -> Optional[str]:
exec_shell_params = ShellParams(verbose=verbose, dry_run=dry_run)
check_docker_resources(exec_shell_params.airflow_image_name, verbose=verbose, dry_run=dry_run)
exec_shell_params.print_badge_info()
env_variables = get_env_variables_for_docker_commands(exec_shell_params)
cmd = ['docker-compose', 'ps', '--all', '--filter', 'status=running', 'airflow']
docker_compose_ps_command = run_command(
cmd, verbose=verbose, dry_run=dry_run, text=True, capture_output=True, env=env_variables, check=False
)
if dry_run:
return "CONTAINER_ID"
if docker_compose_ps_command.returncode != 0:
if verbose:
get_console().print(docker_compose_ps_command.stdout)
get_console().print(docker_compose_ps_command.stderr)
stop_exec_on_error(docker_compose_ps_command.returncode)
return None
output = docker_compose_ps_command.stdout
container_info = output.strip().split('\n')
if container_info:
container_running = container_info[-1].split(' ')[0]
if | |
<gh_stars>1-10
# external import
from __future__ import print_function
import collections
import datetime
import errno
from io import StringIO
import logging
import os
import re
import select
import time
import paramiko
from . import util, exception
logger = logging.getLogger(__name__)
SSH_PORT = 22
class SSHSession(object):
r"""Establish SSH session with a remote host
:param host: name or ip of the remote host
:param username: user to be used for remote ssh session
:param proxy_transport:
:class:`paramiko.transport.Transport <paramiko.transport.Transport>` object for an SSH connection
used to establish ssh session between 2 remotes hosts
:param private_key_file: local path to a private key file to use if key needed for authentication
and not present in standard path (~/.ssh/)
:param port: port to connect to the remote host (default 22)
:param password: password to be used for authentication with remote host
:param missing_host_key_policy: set policy to use when connecting to servers without a known host key.
This parameter is a class **instance** of type
:class:`paramiko.client.MissingHostKeyPolicy <paramiko.client.MissingHostKeyPolicy>`, not a **class** itself
:param compress: set to True to turn on compression for this session
:param \**kwargs: any parameter taken by
:meth:`paramiko.client.SSHClient.connect <paramiko.client.SSHClient.connect>`
and not already explicitly covered by `SSHSession`
Usage::
>>> from jumpssh import SSHSession
>>> gateway_session = SSHSession('gateway.example.com', 'my_user', password='<PASSWORD>')
"""
def __init__(
self,
host,
username,
proxy_transport=None,
private_key_file=None,
port=SSH_PORT,
password=None,
missing_host_key_policy=None,
compress=False,
**kwargs
):
self.host = host
self.port = port
self.username = username
self.password = password
self.retry_nb = 0
self.proxy_transport = proxy_transport
self.private_key_file = private_key_file
self.compress = compress
# get input key/value parameters from user, they will be given to paramiko.client.SSHClient.connect
self.extra_parameters = kwargs
self.ssh_remote_sessions = {}
self.ssh_client = paramiko.client.SSHClient()
self.ssh_transport = None
# automatically accept unknown host keys by default
if not missing_host_key_policy:
missing_host_key_policy = paramiko.AutoAddPolicy()
self.ssh_client.set_missing_host_key_policy(missing_host_key_policy)
def __enter__(self):
self.open()
return self
def __exit__(self, *args):
self.close()
def __del__(self):
self.close()
def __repr__(self):
return '%s(host=%s, username=%s, port=%s, private_key_file=%s, proxy_transport=%s)' \
% (self.__class__.__name__, self.host, self.username, self.port,
self.private_key_file, repr(self.proxy_transport))
def is_active(self):
""" Check if connection with remote host is still active
An inactive SSHSession cannot run command on remote host
:return: True if current session is still active, else False
:rtype: bool
Usage::
>>> from jumpssh import SSHSession
>>> with SSHSession('gateway.example.com', 'my_user', password='<PASSWORD>') as ssh_session:
>>> ... ssh_session.is_active()
True
>>> ssh_session.is_active()
False
"""
return self.ssh_client and self.ssh_client.get_transport() and self.ssh_client.get_transport().is_active()
def open(self, retry=0, retry_interval=10):
"""Open session with the remote host
:param retry: number of retry to establish connection with remote host (-1 for infinite retry)
:param retry_interval: number of seconds between each retry
:return: same SSHSession opened
Usage::
>>> from jumpssh import SSHSession
>>> ssh_session = SSHSession('gateway.example.com', 'my_user', password='<PASSWORD>').open()
>>> ssh_session.is_active()
True
"""
# session is already active, nothing more to do
if self.is_active():
return
while True:
try:
# if `proxy_transport` is given it will open a remote ssh session from current ssh session
if self.proxy_transport:
# open a `direct-tcpip` channel passing
# the destination hostname:port and the local hostname:port
dest_addr = (self.host, self.port)
local_addr = ('localhost', SSH_PORT)
ssh_channel = self.proxy_transport.open_channel("direct-tcpip", dest_addr, local_addr)
hostname = 'localhost'
port = SSH_PORT
# else it will be a direct ssh session from local machine
else:
ssh_channel = None
hostname = self.host
port = self.port
# update with existing default values from SSHSession
self.extra_parameters.update({
'hostname': hostname,
'port': port,
'username': self.username,
'compress': self.compress,
'key_filename': self.private_key_file,
'password': <PASSWORD>,
'sock': ssh_channel,
})
# connect to the host
self.ssh_client.connect(**self.extra_parameters)
# no exception raised => connected to remote host
break
except Exception as ex:
# negative retry value means infinite retry
if retry < 0 or self.retry_nb < retry:
logger.warning("ssh to '%s:%s' still not possible (attempt %d): %s.\nKeep retrying..."
% (self.host, self.port, self.retry_nb, repr(ex)))
self.retry_nb += 1
time.sleep(retry_interval)
else:
raise exception.ConnectionError("Unable to connect to '%s:%s' with user '%s'"
% (self.host, self.port, self.username), original_exception=ex)
# Get the client's transport
self.ssh_transport = self.ssh_client.get_transport()
logger.info("Successfully connected to '%s:%s'" % (self.host, self.port))
return self
def close(self):
""" Close connection with remote host
Usage::
>>> from jumpssh import SSHSession
>>> ssh_session = SSHSession('gateway.example.com', 'my_user', password='<PASSWORD>').open()
>>> ssh_session.is_active()
True
>>> ssh_session.close()
>>> ssh_session.is_active()
False
"""
if hasattr(self, 'ssh_remote_sessions') and self.ssh_remote_sessions:
for remote_session in self.ssh_remote_sessions.values():
remote_session.close()
if hasattr(self, 'ssh_client') and self.is_active():
logger.debug("Closing connection to '%s:%s'..." % (self.host, self.port))
self.ssh_client.close()
# clear local host keys as they may not be valid for next connection
self.ssh_client.get_host_keys().clear()
def run_cmd(
self,
cmd,
username=None,
raise_if_error=True,
continuous_output=False,
silent=False,
timeout=None,
input_data=None,
success_exit_code=0,
retry=0,
retry_interval=5,
keep_retry_history=False
):
""" Run command on the remote host and return result locally
:param cmd: command to execute on remote host
cmd can be a str or a list of str
:param username: user used to execute the command (sudo privilege needed)
:param raise_if_error:
if True, raise SSHException when exit code of the command is different from 0
else just return exit code and command output
:param continuous_output: if True, print output all along the command is running
:param silent:
if True, does not log the command run (useful if sensitive information are used in command)
if parameter is a list, all strings of the command matching an item of the list will be concealed
in logs (regexp supported)
:param timeout: length in seconds after what a TimeoutError exception is raised
:param input_data:
key/value dictionary used when remote command expects input from user
when key is matching command output, value is sent
:param success_exit_code: integer or list of integer considered as a success exit code for command run
:param retry: number of retry until exit code is part of successful exit code list (-1 for infinite retry) or
RunCmdError exception is raised
:param retry_interval: number of seconds between each retry
:param keep_retry_history: if True, all retries results are kept and accessible in return result
default is False as we don't want to save by default all output for all retries especially for big output
:raises TimeoutError: if command run longer than the specified timeout
:raises TypeError: if `cmd` parameter is neither a string neither a list of string
:raises SSHException: if current SSHSession is already closed
:raises RunCmdError: if exit code of the command is different from 0 and raise_if_error is True
:return: a class inheriting from collections.namedtuple containing mainly `exit_code` and `output`
of the remotely executed command
:rtype: RunCmdResult
Usage::
>>> from jumpssh import SSHSession
>>> with SSHSession('gateway.example.com', 'my_user', password='<PASSWORD>') as ssh_session:
>>> ... ssh_session.run_cmd('hostname')
RunSSHCmdResult(exit_code=0, output=u'gateway.example.com')
"""
user = self.username
# check type of command parameter is valid
try:
string_type = basestring
except NameError:
string_type = str
if isinstance(cmd, list):
cmd = " && ".join(cmd)
elif not isinstance(cmd, string_type):
raise TypeError("Invalid type for cmd argument '%s'" % type(cmd))
# success_exit_code must be int or list of int
if isinstance(success_exit_code, int):
success_exit_code = [success_exit_code]
elif not isinstance(success_exit_code, list):
raise TypeError("Invalid type for success_exit_code argument '%s'" % type(success_exit_code))
my_cmd = cmd
if username:
user = username
# need to run full command with shell to support shell builtins commands (source, ...)
my_cmd = 'sudo su - %s -c "%s"' % (user, cmd.replace('"', '\\"'))
# open session if not already the case
self.open()
# conceal text from command to be logged if requested with silent parameter
cmd_for_log = cmd
if isinstance(silent, list):
for pattern in silent:
cmd_for_log = re.sub(pattern=pattern, repl='XXXXXXX', string=cmd_for_log)
if silent is not True:
logger.debug("Running command '%s' on '%s' as %s..." % (cmd_for_log, self.host, user))
# keep track of all results for each run to make them available in response object
result_list = []
# retry command until exit_code in success code list or max retry nb reached
retry_nb = 0
while True:
channel = self.ssh_transport.open_session()
# raise error rather than blocking the call
channel.setblocking(0)
# Forward local agent
paramiko.agent.AgentRequestHandler(channel)
# Commands executed after this point will see the forwarded agent on the remote end.
channel.set_combine_stderr(True)
channel.get_pty()
channel.exec_command(my_cmd)
# prepare timer for timeout
start = datetime.datetime.now()
start_secs = time.mktime(start.timetuple())
output = StringIO()
try:
# wait until command finished running or timeout is reached
while True:
got_chunk = False
readq, _, _ = select.select([channel], [], [], timeout)
for c in readq:
if c.recv_ready():
data = channel.recv(len(c.in_buffer)).decode('utf-8')
output.write(data)
got_chunk = | |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Test cases relating to listSnapshot() relating to parameters - id,listall,isrecursive,account and domainid
"""
# Import Local Modules
import marvin
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from nose.plugins.attrib import attr
# Import System modules
import time
_multiprocess_shared_ = True
class TestSnapshotList(cloudstackTestCase):
@classmethod
def setUpClass(cls):
"""
Create the following domain tree and accounts that are reqiured for executing listSnapshot test cases:
Under ROOT - create 2 domaind D1 and D2
Under D1 - Create 2 subdomain D11 and D12
Under D11 - Create subdimain D111
Under each of the domain create 1 admin user and couple of regular users.
As each of these users , deploy Virtual machines and take a snapshot of the ROOT volume.
"""
cls.testclient = super(TestSnapshotList, cls).getClsTestClient()
cls.apiclient = cls.testclient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
cls.hypervisor = cls.testclient.getHypervisorInfo()
if cls.hypervisor.lower() == 'lxc':
raise unittest.SkipTest("snapshots are not supported on %s" % cls.hypervisor.lower())
cls.acldata = cls.testdata["acl"]
cls.domain_1 = None
cls.domain_2 = None
cleanup = None
try:
# backup default apikey and secretkey
cls.default_apikey = cls.apiclient.connection.apiKey
cls.default_secretkey = cls.apiclient.connection.securityKey
# Create domains
cls.domain_1 = Domain.create(
cls.apiclient,
cls.acldata["domain1"]
)
cls.domain_11 = Domain.create(
cls.apiclient,
cls.acldata["domain11"],
parentdomainid=cls.domain_1.id
)
cls.domain_111 = Domain.create(
cls.apiclient,
cls.acldata["domain111"],
parentdomainid=cls.domain_11.id,
)
cls.domain_12 = Domain.create(
cls.apiclient,
cls.acldata["domain12"],
parentdomainid=cls.domain_1.id
)
cls.domain_2 = Domain.create(
cls.apiclient,
cls.acldata["domain2"]
)
# Create 1 admin account and 2 user accounts for doamin_1
cls.account_d1 = Account.create(
cls.apiclient,
cls.acldata["accountD1"],
admin=True,
domainid=cls.domain_1.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d1)
cls.user_d1_apikey = user.apikey
cls.user_d1_secretkey = user.secretkey
cls.account_d1a = Account.create(
cls.apiclient,
cls.acldata["accountD1A"],
admin=False,
domainid=cls.domain_1.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d1a)
cls.user_d1a_apikey = user.apikey
cls.user_d1a_secretkey = user.secretkey
cls.account_d1b = Account.create(
cls.apiclient,
cls.acldata["accountD1B"],
admin=False,
domainid=cls.domain_1.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d1b)
cls.user_d1b_apikey = user.apikey
cls.user_d1b_secretkey = user.secretkey
# Create 1 admin and 2 user accounts for doamin_11
cls.account_d11 = Account.create(
cls.apiclient,
cls.acldata["accountD11"],
admin=True,
domainid=cls.domain_11.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d11)
cls.user_d11_apikey = user.apikey
cls.user_d11_secretkey = user.secretkey
cls.account_d11a = Account.create(
cls.apiclient,
cls.acldata["accountD11A"],
admin=False,
domainid=cls.domain_11.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d11a)
cls.user_d11a_apikey = user.apikey
cls.user_d11a_secretkey = user.secretkey
cls.account_d11b = Account.create(
cls.apiclient,
cls.acldata["accountD11B"],
admin=False,
domainid=cls.domain_11.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d11b)
cls.user_d11b_apikey = user.apikey
cls.user_d11b_secretkey = user.secretkey
# Create 1 user account for doamin_111
cls.account_d111a = Account.create(
cls.apiclient,
cls.acldata["accountD111A"],
admin=False,
domainid=cls.domain_111.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d111a)
cls.user_d111a_apikey = user.apikey
cls.user_d111a_secretkey = user.secretkey
# Create 2 user accounts for doamin_12
cls.account_d12a = Account.create(
cls.apiclient,
cls.acldata["accountD12A"],
admin=False,
domainid=cls.domain_12.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d12a)
cls.user_d12a_apikey = user.apikey
cls.user_d12a_secretkey = user.secretkey
cls.account_d12b = Account.create(
cls.apiclient,
cls.acldata["accountD12B"],
admin=False,
domainid=cls.domain_12.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d12b)
cls.user_d12b_apikey = user.apikey
cls.user_d12b_secretkey = user.secretkey
# Create 1 user account for domain_2
cls.account_d2a = Account.create(
cls.apiclient,
cls.acldata["accountD2"],
admin=False,
domainid=cls.domain_2.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d2a)
cls.user_d2a_apikey = user.apikey
cls.user_d2a_secretkey = user.secretkey
# Create admin user account
cls.account_a = Account.create(
cls.apiclient,
cls.acldata["accountROOTA"],
admin=True,
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_a)
cls.user_a_apikey = user.apikey
cls.user_a_secretkey = user.secretkey
# create service offering
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.acldata["service_offering"]["small"]
)
cls.zone = get_zone(cls.apiclient, cls.testclient.getZoneForTests())
cls.acldata['mode'] = cls.zone.networktype
cls.template = get_template(cls.apiclient, cls.zone.id, cls.acldata["ostype"])
# deploy VM
cls.apiclient.connection.apiKey = cls.user_d1_apikey
cls.apiclient.connection.securityKey = cls.user_d1_secretkey
cls.vm_d1 = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD1"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.vm_d1_volume = Volume.list(cls.apiclient, virtualmachineid=cls.vm_d1.id)
cls.vm_d1_snapshot = Snapshot.create(cls.apiclient, cls.vm_d1_volume[0].id)
cls.apiclient.connection.apiKey = cls.user_d1a_apikey
cls.apiclient.connection.securityKey = cls.user_d1a_secretkey
cls.vm_d1a = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD1A"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.vm_d1a_volume = Volume.list(cls.apiclient, virtualmachineid=cls.vm_d1a.id)
cls.vm_d1a_snapshot = Snapshot.create(cls.apiclient, cls.vm_d1a_volume[0].id)
cls.apiclient.connection.apiKey = cls.user_d1b_apikey
cls.apiclient.connection.securityKey = cls.user_d1b_secretkey
cls.vm_d1b = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD1B"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.vm_d1b_volume = Volume.list(cls.apiclient, virtualmachineid=cls.vm_d1b.id)
cls.vm_d1b_snapshot = Snapshot.create(cls.apiclient, cls.vm_d1b_volume[0].id)
cls.apiclient.connection.apiKey = cls.user_d11_apikey
cls.apiclient.connection.securityKey = cls.user_d11_secretkey
cls.vm_d11 = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD11"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.vm_d11_volume = Volume.list(cls.apiclient, virtualmachineid=cls.vm_d11.id)
cls.vm_d11_snapshot = Snapshot.create(cls.apiclient, cls.vm_d11_volume[0].id)
cls.apiclient.connection.apiKey = cls.user_d11a_apikey
cls.apiclient.connection.securityKey = cls.user_d11a_secretkey
cls.vm_d11a = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD11A"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.vm_d11a_volume = Volume.list(cls.apiclient, virtualmachineid=cls.vm_d11a.id)
cls.vm_d11a_snapshot = Snapshot.create(cls.apiclient, cls.vm_d11a_volume[0].id)
cls.apiclient.connection.apiKey = cls.user_d11b_apikey
cls.apiclient.connection.securityKey = cls.user_d11b_secretkey
cls.vm_d11b = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD11B"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.vm_d11b_volume = Volume.list(cls.apiclient, virtualmachineid=cls.vm_d11b.id)
cls.vm_d11b_snapshot = Snapshot.create(cls.apiclient, cls.vm_d11b_volume[0].id)
cls.apiclient.connection.apiKey = cls.user_d111a_apikey
cls.apiclient.connection.securityKey = cls.user_d111a_secretkey
cls.vm_d111a = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD111A"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.vm_d111a_volume = Volume.list(cls.apiclient, virtualmachineid=cls.vm_d111a.id)
cls.vm_d111a_snapshot = Snapshot.create(cls.apiclient, cls.vm_d111a_volume[0].id)
cls.apiclient.connection.apiKey = cls.user_d12a_apikey
cls.apiclient.connection.securityKey = cls.user_d12a_secretkey
cls.vm_d12a = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD12A"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.vm_d12a_volume = Volume.list(cls.apiclient, virtualmachineid=cls.vm_d12a.id)
cls.vm_d12a_snapshot = Snapshot.create(cls.apiclient, cls.vm_d12a_volume[0].id)
cls.apiclient.connection.apiKey = cls.user_d12b_apikey
cls.apiclient.connection.securityKey = cls.user_d12b_secretkey
cls.vm_d12b = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD12B"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.vm_d12b_volume = Volume.list(cls.apiclient, virtualmachineid=cls.vm_d12b.id)
cls.vm_d12b_snapshot = Snapshot.create(cls.apiclient, cls.vm_d12b_volume[0].id)
cls.apiclient.connection.apiKey = cls.user_d2a_apikey
cls.apiclient.connection.securityKey = cls.user_d2a_secretkey
cls.vm_d2 = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD2A"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.vm_d2_volume = Volume.list(cls.apiclient, virtualmachineid=cls.vm_d2.id)
cls.vm_d2_snapshot = Snapshot.create(cls.apiclient, cls.vm_d2_volume[0].id)
cls.apiclient.connection.apiKey = cls.user_a_apikey
cls.apiclient.connection.securityKey = cls.user_a_secretkey
cls.vm_a = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmROOTA"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.vm_a_volume = Volume.list(cls.apiclient, virtualmachineid=cls.vm_a.id)
cls.vm_a_snapshot = Snapshot.create(cls.apiclient, cls.vm_a_volume[0].id)
cls.cleanup = [
cls.account_a,
cls.service_offering,
]
except Exception as e:
cls.domain_2.delete(cls.apiclient, cleanup="true")
cls.domain_1.delete(cls.apiclient, cleanup="true")
cleanup_resources(cls.apiclient, cls.cleanup)
raise Exception("Failed to create the setup required to execute the test cases: %s" % e)
@classmethod
def tearDownClass(cls):
cls.apiclient = super(TestSnapshotList, cls).getClsTestClient().getApiClient()
cls.apiclient.connection.apiKey = cls.default_apikey
cls.apiclient.connection.securityKey = cls.default_secretkey
try:
cls.domain_2.delete(cls.apiclient, cleanup="true")
cls.domain_1.delete(cls.apiclient, cleanup="true")
except:
pass
cleanup_resources(cls.apiclient, cls.cleanup)
def setUp(cls):
cls.apiclient = cls.testClient.getApiClient()
cls.dbclient = cls.testClient.getDbConnection()
def tearDown(cls):
# restore back default apikey and secretkey
cls.apiclient.connection.apiKey = cls.default_apikey
cls.apiclient.connection.securityKey = cls.default_secretkey
return
## Domain Admin - Test cases with listall =true
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_listall_true(self):
"""
Test listing of Snapshots by passing listall="true" parameter as domain admin
Validate that it returns all the Snapshots that is owned by accounts in this domain and all its subdomain
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, listall="true")
self.debug("List as Domain Admin - listall=true - %s" % snapshotList)
self.assertEqual(len(snapshotList) == 9,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d1_snapshot.id),
self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d1b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d12a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d12b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d111a_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_listall_true_rec_true(self):
"""
Test listing of Snapshots by passing listall="true"i and isrecusriv="true" parameter as domain admin
Validate that it returns all the Snapshots that is owned by accounts in this domain and all its subdomain
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, listall="true", isrecursive="true")
self.debug("List as Domain Admin - listall=true,isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 9,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d1_snapshot.id),
self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d1b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d12a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d12b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d111a_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_listall_true_rec_false(self):
"""
Test listing of Snapshots by passing listall="true" and isrecusriv="false" parameter as domain admin
Validate that it returns all the Snapshots that is owned by accounts in this domain and all its subdomain
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, listall="true", isrecursive="false")
self.debug("List as Domain Admin - listall=true,isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 9,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d1_snapshot.id),
self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d1b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d12a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d12b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d111a_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Domain Admin - Test cases with listall=false
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_listall_false(self):
"""
Test listing of Snapshots by passing listall="false" parameter as domain | |
<gh_stars>1-10
# Filename: Estimator.py
# Written by: <NAME>
# Description: Contains estimators such as kalman filter, extended kalman filter, unscented kalman filter and simple moving horizon estimators
from pynlcontrol.BasicUtils import Integrate, nlp2GGN, casadi2List, directSum, qrSym
import casadi as ca
import os
from jinja2 import Environment, FileSystemLoader
def KF(A, B, C, D, Qw, Rv, Ts, Integrator='rk4'):
"""
Function to implement Kalman filter (KF).
Parameters
----------
A: (numpy.2darray or casadi.SX array)
Continuous-time state matrix of the system
B: (numpy.2darray or casadi.SX array)
Continuous-time input matrix of the system
C: (numpy.2darray or casadi.SX array)
Continuous-time measurement matrix of the system
D: (numpy.2darray or casadi.SX array)
Continuous time output matrix coefficient of input
Qw: (numpy.2darray or casadi.SX array)
Process noise covariance matrix
Rv: (numpy.2darray or casadi.SX array)
Measurement noise covariance matrix
Ts: (float)
Sample time of KF
Integrator: (str, optional)
Integrator to be used for discretization. Defaults to 'rk4'.
Returns
-------
tuple
Tuple of Input, Output, Input name and Output name. Inputs are u, y, xp, Pp and output are xhat and Phat. Input and output are casadi symbolics (`casadi.SX`).
u: Current input to the system
y: Current measurement of the system
xp: State estimate from previous discrete time
Pp: Covariance estimate from previous discrete time (reshaped to column matrix)
xhat: State estimate at current discrete time
Phat: Covariance estimate at current discrete time (reshaped to column matrix)
These inputs are and outputs can be mapped using `casadi.Function` which can further be code generated.
Example
-------
>>> from pynlcontrol import Estimator, BasicUtils
>>> import casadi as ca
>>> Q11 = ca.SX.sym('Q11')
>>> Q22 = ca.SX.sym('Q22')
>>> Q33 = ca.SX.sym('Q33')
>>> Q = BasicUtils.directSum([Q11, Q22, Q33])
>>> R11 = ca.SX.sym('R11')
>>> R22 = ca.SX.sym('R22')
>>> R = BasicUtils.directSum([R11, R22])
>>> A = ca.SX([[-0.4,0.1,-2],[0,-0.3,4],[1,0,0]])
>>> B = ca.SX([[1,1],[0,1],[1,0]])
>>> C = ca.SX([[1, 0, 0], [0, 1, 0]])
>>> D = ca.SX([[0, 0], [0, 0]])
>>> In, Out, InName, OutName = Estimator.KF(A, B, C, D, Q, R, 0.1)
>>> KF_func = ca.Function('KF_func', In + [Q11, Q22, Q33, R11, R22], Out, InName + ['Q11', 'Q22', 'Q33', 'R11', 'R22'], OutName)
>>> BasicUtils.Gen_Code(KF_func, 'KF_code', printhelp=True)
u(2, 1), y(2, 1), xhatp(3, 1), Pkp(9, 1), Q11(1, 1), Q22(1, 1), Q33(1, 1), R11(1, 1), R22(1, 1) -> xhat(3, 1), Phat(9, 1)
KF_code.c
KF_code_Call.c
#include "KF_code.h"
#include "KF_code_Call.h"
KF_code_Call_Func(u, y, xhatp, Pkp, Q11, Q22, Q33, R11, R22, xhat, Phat);
Running above code generates C-codes for KF implementation. Implementation using Simulink can be found in example folder.
"""
nX = A.shape[0]
nU = B.shape[1]
nY = C.shape[0]
x = ca.SX.sym('x', nX, 1)
u = ca.SX.sym('u', nU, 1)
def Fc(x, u):
return A @ x + B @ u
xk1 = Integrate(Fc, Integrator, Ts, x, u)
Ad = ca.jacobian(xk1, x)
Bd = ca.jacobian(xk1, u)
Cd = C
Dd = D
xp = ca.SX.sym('xp', nX, 1)
u = ca.SX.sym('u', nU, 1)
y = ca.SX.sym('y', nY, 1)
Pp = ca.SX.sym('Pp', nX, nX)
xkm = Ad @ xp + Bd @ u
Pkm = Ad @ Pp @ ca.transpose(Ad) + Qw
yr = y - (Cd @ xkm + Dd @ u)
Sk = Cd @ Pkm @ ca.transpose(Cd) + Rv
Kk = Pkm @ ca.transpose(Cd) @ ca.inv(Sk)
xhat = xkm + Kk @ yr
Phat = (ca.SX_eye(Ad.shape[0]) - Kk @ Cd) @ Pkm
return [u, y, xp, Pp], [xhat, Phat], ['u', 'y', 'xhatp', 'Pkp'], ['xhat', 'Phat']
def EKF(nX, nU, nY, F, H, Qw, Rv, Ts, argF=[], argH=[], Integrator='rk4'):
"""
Function to implement Extended Kalman filter (EKF).
Parameters
----------
nX: (int)
Number of state variables
nU: (int)
Number of control inputs
ny: (int)
Number of measurement outputs
F: (function)
Function that returns right-hand side of state differential equation. Input arguments to F should be states and inputs respectively.
H: (function)
Function that retuns measurement variable from state and input variables. nput arguments to H should be states.
Qw: (numpy.2darray or casadi.SX array)
Process noise covariance matrix
Rv: (numpy.2darray or casadi.SX array)
Measurement noise covariance matrix
Ts: (float)
Sample time of the Kalman filter.
argF: (list)
List of external parameters to function F
argH: (list)
List of external parameters to function H
Integrator: (str, optional)
Integration method. Defaults to 'rk4'. For list of supported integrator, please see documentation of function `Integrate()`.
Returns
-------
tuple:
Tuple of Input, Output, Input name and Output name. Inputs are u, y, xp, Pp and output are xhat and Phat. Input and output are casadi symbolics (`casadi.SX`).
u: Current input to the system
y: Current measurement of the system
xp: State estimate from previous discrete time
Pp: Covariance estimate from previous discrete time (reshaped to column matrix)
xhat: State estimate at current discrete time
Phat: Covariance estimate at current discrete time (reshaped to column matrix)
These inputs are and outputs can be mapped using `casadi.Function` which can further be code generated.
"""
assert isinstance(nX, int), "nX must be integer."
assert isinstance(nU, int), "nU must be integer."
assert isinstance(nY, int), "nY must be integer."
assert Qw.shape[0] == Qw.shape[1], "Qw is not square matrix."
assert Rv.shape[0] == Rv.shape[1], "Rv is not square matrix."
assert nX == Qw.shape[0], "Shape mismatch of Qw with nX."
assert nY == Rv.shape[0], "Shape mismatch of Rv with nY."
assert isinstance(Ts, float), "Sample time (Ts) must be float."
xp = ca.SX.sym('xp', nX, 1)
u = ca.SX.sym('u', nU, 1)
y = ca.SX.sym('y', nY, 1)
Pp = ca.SX.sym('Pp', nX, nX)
xkm = Integrate(F, Integrator, Ts, xp, u, *argF)
Fk = ca.jacobian(xkm, xp)
Pkm = Fk @ Pp @ ca.transpose(Fk) + Qw
yr = y - H(xkm, *argH)
Hk = ca.substitute(ca.jacobian(H(xp, *argH), xp), xp, xkm)
Sk = Hk @ Pkm @ ca.transpose(Hk) + Rv
Kk = Pkm @ ca.transpose(Hk) @ ca.inv(Sk)
xhat = xkm + Kk @ yr
Phat = (ca.SX_eye(Fk.shape[0]) - Kk @ Hk) @ Pkm
return [u, y, xp, Pp], [xhat, Phat], ['u', 'y', 'xhatp', 'Pkp'], ['xhat', 'Phat']
def UKF(nX, nU, nY, F, H, Qw, Rv, Ts, argF=[], argH=[], PCoeff=None, Wm=None, Wc=None, alpha=1.0e-3, beta=2.0, kappa=0.0, Integrator='rk4'):
"""
Function to implement Unscented Kalman filter (UKF).
If either of PCoeff or Wm or Wc is None, it calculates those values with alpha=1e-3, Beta=2 and kappa=0. To use manual weights, specify PCOeff, Wm and Wc. Otherwise, use alpha, beta and kappa parameters to set those values.
Parameters
----------
nX: (int)
Number of state variables
nU: (int)
Number of control inputs
nY: (int)
Number of measurement outputs
F: (function)
Function that returns right-hand side of state differential equation. Input arguments to F should be states and inputs respectively.
H: (function)
Function that retuns measurement variable from state and input variables. Input arguments to H should be states.
Qw: (numpy.2darray or casadi.SX array)
Process noise covariance matrix
Rv: (numpy.2darray or casadi.SX array)
Measurement noise covariance matrix
Ts: (float)
Sample time of the Kalman filter.
argF: (list)
List of external parameters to function F
argH: (list)
List of external parameters to function H
PCoeff: (float)
Coefficient of covariance matrix (inside square root term) when calculating sigma points. Defaults to None
Wm: (list, optional)
List of weights for mean calculation. Defaults to None.
Wc: (list, optional)
List of weights for covariance calculation. Defaults to None.
alpha: (float, optional)
Value of alpha parameter. Defaults to 1.0e-3.
beta: (float, optional)
Value of beta parameter. Defaults to 2.0.
kappa: (float, optional)
Value of kappa parameter. Defaults to 0.0.
Integrator: (str, optional)
Integration method. Defaults to 'rk4'. For list of supported integrator, please see documentation of function `Integrate`.
Returns
-------
tuple:
Tuple of Input, Output, Input name and Output name. Inputs are u, y, xp, Pp and output are xhat and Phat. Input and output are casadi symbolics (`casadi.SX`).
u: Current input to the system
y: Current measurement of the system
xp: State estimate from previous discrete time
Pp: Covariance estimate from previous discrete time (reshaped to column matrix)
xhat: State estimate at current discrete time
Phat: Covariance estimate at current discrete | |
'variable'
N = self._nrows
K = len(tempdict) - len(id_vars)
#create an empty dataset
mdata = type(self)({})
# reexpand any categoricals
for col in id_vars:
id_data = tempdict.pop(col)
if TypeRegister.is_binned_array(id_data):
# note: multikey categorical expands to a tuple of arrays
# previously raised an error on expand array
id_data = id_data.expand_array
mdata[col] = np.tile(id_data._np,K)
mdata[var_name] = FastArray(list(tempdict.keys())).repeat(N)
mdata[value_name] = hstack(tempdict.values())
if trim:
goodmask = ~mdata[value_name].isnanorzero()
mdata=mdata[goodmask,:]
return mdata
#--------------------------------------------------------------------------
@classmethod
def hstack(cls, ds_list, destroy: bool = False) -> 'Dataset':
"""
Stacks columns from multiple datasets.
See Also
--------
Dataset.concat_rows
"""
return cls.concat_rows(ds_list, destroy=destroy)
#--------------------------------------------------------------------------
@classmethod
def concat_rows(cls, ds_list: Iterable['Dataset'], destroy: bool = False) -> 'Dataset':
"""
Stacks columns from multiple datasets.
If a dataset is missing a column that appears in others, it will fill the gap with the default invalid for that column's type.
Categoricals will be merged and stacked.
Column types will be checked to make sure they can be safely stacked - no general type mismatch allowed.
Columns of the same name must have the same number of dimension in each dataset (1 or 2 dimensions allowed)
Parameters
----------
ds_list : iterable of Dataset
The Datasets to be concatenated
destroy : bool
Set to True to destroy any dataset in the list to save memory. Defaults to False.
Returns
-------
Dataset
A new Dataset created from the concatenated rows of the input Datasets.
Examples
--------
Basic:
>>> ds1 = rt.Dataset({'col_'+str(i):np.random.rand(5) for i in range(3)})
>>> ds2 = rt.Dataset({'col_'+str(i):np.random.rand(5) for i in range(3)})
>>> ds1
# col_0 col_1 col_2
- ----- ----- -----
0 0.39 0.80 0.64
1 0.54 0.80 0.36
2 0.14 0.75 0.86
3 0.05 0.61 0.95
4 0.37 0.39 0.03
>>> ds2
# col_0 col_1 col_2
- ----- ----- -----
0 0.09 0.75 0.37
1 0.90 0.34 0.17
2 0.52 0.32 0.78
3 0.37 0.20 0.34
4 0.73 0.69 0.41
>>> rt.Dataset.concat_rows([ds1, ds2])
# col_0 col_1 col_2
- ----- ----- -----
0 0.39 0.80 0.64
1 0.54 0.80 0.36
2 0.14 0.75 0.86
3 0.05 0.61 0.95
4 0.37 0.39 0.03
5 0.09 0.75 0.37
6 0.90 0.34 0.17
7 0.52 0.32 0.78
8 0.37 0.20 0.34
9 0.73 0.69 0.41
With columns missing in one from some datasets:
>>> ds1 = rt.Dataset({'col_'+str(i):np.random.rand(5) for i in range(3)})
>>> ds2 = rt.Dataset({'col_'+str(i):np.random.rand(5) for i in range(2)})
>>> rt.Dataset.concat_rows([ds1, ds2])
# col_0 col_1 col_2
- ----- ----- -----
0 0.78 0.64 0.98
1 0.61 0.87 0.85
2 0.57 0.42 0.90
3 0.82 0.50 0.60
4 0.19 0.16 0.23
5 0.69 0.83 nan
6 0.07 0.82 nan
7 0.58 0.34 nan
8 0.69 0.38 nan
9 0.89 0.07 nan
With categorical column:
>>> ds1 = rt.Dataset({'cat_col': rt.Categorical(['a','a','b','c','a']),
... 'num_col': np.random.rand(5)})
>>> ds2 = rt.Dataset({'cat_col': rt.Categorical(['b','b','a','c','d']),
... 'num_col': np.random.rand(5)})
>>> rt.Dataset.concat_rows([ds1, ds2])
# cat_col num_col
- ------- -------
0 a 0.38
1 a 0.71
2 b 0.84
3 c 0.47
4 a 0.18
5 b 0.18
6 b 0.47
7 a 0.16
8 c 0.96
9 d 0.88
Multiple dimensions (note: numpy v-stack will be used to concatenate 2-dimensional columns):
>>> ds1 = rt.Dataset({'nums': rt.ones((4,4))})
>>> ds1
# nums
- ------------------------
0 [1.00, 1.00, 1.00, 1.00]
1 [1.00, 1.00, 1.00, 1.00]
2 [1.00, 1.00, 1.00, 1.00]
3 [1.00, 1.00, 1.00, 1.00]
>>> ds2 = rt.Dataset({'nums': rt.zeros((4,4))})
>>> ds2
# nums
- ------------------------
0 [0.00, 0.00, 0.00, 0.00]
1 [0.00, 0.00, 0.00, 0.00]
2 [0.00, 0.00, 0.00, 0.00]
3 [0.00, 0.00, 0.00, 0.00]
>>> rt.Dataset.concat_rows([ds1, ds2])
# nums
- ------------------------
0 [1.00, 1.00, 1.00, 1.00]
1 [1.00, 1.00, 1.00, 1.00]
2 [1.00, 1.00, 1.00, 1.00]
3 [1.00, 1.00, 1.00, 1.00]
4 [0.00, 0.00, 0.00, 0.00]
5 [0.00, 0.00, 0.00, 0.00]
6 [0.00, 0.00, 0.00, 0.00]
7 [0.00, 0.00, 0.00, 0.00]
Multiple dimensions with missing columns (sentinels/invalids will be flipped to final vstack dtype)
>>> ds1 = rt.Dataset({'nums': rt.ones((5,5)), 'nums2': rt.zeros((5,5), dtype=np.float64)})
>>> ds2 = rt.Dataset({'nums': rt.ones((5,5))})
>>> ds3 = rt.Dataset({'nums': rt.ones((5,5)), 'nums2': rt.zeros((5,5), dtype=np.int8)})
>>> rt.Dataset.concat_rows([ds1, ds2, ds3])
# nums nums2
-- ------------------------------ ------------------------------
0 [1.00, 1.00, 1.00, 1.00, 1.00] [0.00, 0.00, 0.00, 0.00, 0.00]
1 [1.00, 1.00, 1.00, 1.00, 1.00] [0.00, 0.00, 0.00, 0.00, 0.00]
2 [1.00, 1.00, 1.00, 1.00, 1.00] [0.00, 0.00, 0.00, 0.00, 0.00]
3 [1.00, 1.00, 1.00, 1.00, 1.00] [0.00, 0.00, 0.00, 0.00, 0.00]
4 [1.00, 1.00, 1.00, 1.00, 1.00] [0.00, 0.00, 0.00, 0.00, 0.00]
5 [1.00, 1.00, 1.00, 1.00, 1.00] [nan, nan, nan, nan, nan]
6 [1.00, 1.00, 1.00, 1.00, 1.00] [nan, nan, nan, nan, nan]
7 [1.00, 1.00, 1.00, 1.00, 1.00] [nan, nan, nan, nan, nan]
8 [1.00, 1.00, 1.00, 1.00, 1.00] [nan, nan, nan, nan, nan]
9 [1.00, 1.00, 1.00, 1.00, 1.00] [nan, nan, nan, nan, nan]
10 [1.00, 1.00, 1.00, 1.00, 1.00] [0.00, 0.00, 0.00, 0.00, 0.00]
11 [1.00, 1.00, 1.00, 1.00, 1.00] [0.00, 0.00, 0.00, 0.00, 0.00]
12 [1.00, 1.00, 1.00, 1.00, 1.00] [0.00, 0.00, 0.00, 0.00, 0.00]
13 [1.00, 1.00, 1.00, 1.00, 1.00] [0.00, 0.00, 0.00, 0.00, 0.00]
14 [1.00, 1.00, 1.00, 1.00, 1.00] [0.00, 0.00, 0.00, 0.00, 0.00]
"""
return hstack_any(ds_list, cls, Dataset, destroy=destroy)
#--------------------------------------------------------------------------
@classmethod
def concat_columns(cls, dsets, do_copy:bool, on_duplicate:str='raise', on_mismatch:str='warn'):
r"""
Concatenates a list of Datasets or Structs horizontally.
Parameters
----------
cls : class
The class (Dataset)
dsets : iterable
An iterable of Datasets
do_copy : bool
Makes deep copies of arrays if set to True
on_duplicate : {'raise', 'first', 'last'}
Governs behavior in case of duplicate columns.
on_mismatch : {'warn', 'raise', 'ignore'}
Optional, governs behavior for allowed duplicate column names, how to
address mismatched column values; can be 'warn' (default), 'raise' or 'ignore'.
Returns
-------
Dataset
The resulting dataset after concatenation.
Examples
--------
With the ``'last'`` `on_duplicate` option:
>>> N = 5
>>> dset1 = rt.Dataset(dict(A=rt.arange(N), B=rt.ones(N), C=N*['c']))
>>> dset2 = rt.Dataset(dict(A=rt.arange(N, 2*N, 1), B=rt.zeros(N), D=N*['d']))
>>> dsets = [dset1, dset2]
>>> rt.Dataset.concat_columns(dsets, do_copy=True, on_duplicate='last')
# A B C D
- - ---- - -
0 5 0.00 c d
1 6 0.00 c d
2 7 0.00 c d
3 8 0.00 c d
4 9 0.00 c d
<BLANKLINE>
[5 rows x 4 columns] total bytes: 70.0 B
With the default (``'raise'``) for the `on_duplicate` option:
>>> rt.Dataset.concat_columns(dsets, do_copy=True)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\ProgramData\Anaconda3\envs\riptable-dev37\lib\site-packages\riptable-0.0.0-py3.7-win-amd64.egg\riptable\rt_dataset.py", line 4308, in concat_columns
raise KeyError(f"Duplicate column '{column}'")
KeyError: "Duplicate column 'A'"
"""
# check that all Datasets have the same number of rows
if on_duplicate not in ('raise', 'first', 'last'):
raise ValueError(f"Invalid on_duplicate '{on_duplicate}'")
if on_mismatch not in ('raise', 'warn', 'ignore'):
raise ValueError(f"Invalid on_mismatch '{on_mismatch}'")
# if there are no Datasets ...
if len(dsets) == 0:
raise ValueError("No Datasets to concatenate")
if len(dsets) == 1 and not do_copy:
return dsets[0]
#try to convert any structs to dsets
newdset=[]
for d in dsets:
# check if even a dataset, if not try to convert it
try:
# test to see if a dataset
rownum = d._nrows
except:
#try to convert to a dataset (probably from struct)
try:
d = Dataset(d)
except:
#for c in d:
# print("col", c, type(d[c]), len(d[c]), d[c])
raise ValueError(f"Unable to convert {d!r} to a Dataset")
newdset.append(d)
dsets = newdset
# check for same length
rownum_set = set([d.shape[0] for d in dsets])
if len(rownum_set) != 1:
raise ValueError(f'Inconsistent Dataset lengths {rownum_set}')
# create dictionary
dict_retval = {}
columns = set()
dups = set()
for column, a in [(c, v) for d in dsets for c, v in d.items()]:
if column in columns:
if on_mismatch != 'ignore':
# print(f'on_mismatch={on_mismatch} column={column}')
dups.add(column)
if on_duplicate == 'raise':
raise KeyError(f"Duplicate column '{column}'")
elif on_duplicate == 'first':
pass
else:
dict_retval[column] = a.copy() if do_copy else a
else:
columns.add(column)
dict_retval[column] = a.copy() if do_copy else a
if on_mismatch != 'ignore':
if len(dups) > 0:
if on_mismatch == 'warn':
warnings.warn(f'concat_columns() duplicate column mismatch: {dups!r}')
if on_mismatch == 'raise':
raise RuntimeError(f'concat_columns() duplicate column mismatch: {dups!r}')
return cls(dict_retval)
# | |
a library with PBC2 value of {:.2f}.'.format(
pbc2_detail,
alignment_file['output_type'],
audit_link(path_to_text(alignment_file['@id']), alignment_file['@id']),
PBC2_value
)
)
if PBC2_value < 1:
yield AuditFailure('severe bottlenecking', detail,
level='NOT_COMPLIANT')
elif PBC2_value >= 1 and PBC2_value < 10:
yield AuditFailure('mild to moderate bottlenecking', detail,
level='WARNING')
return
def check_wgbs_coverage(samtools_metrics,
pipeline_title,
read_length,
organism,
pipeline_objects):
for m in samtools_metrics:
if 'mapped' in m:
mapped_reads = m['mapped']
if organism == 'mouse':
coverage = float(mapped_reads * read_length) / 2800000000.0
elif organism == 'human':
coverage = float(mapped_reads * read_length) / 3300000000.0
detail = ('Replicate of experiment processed by {} ( {} ) '
'has a coverage of {}X. '
'The minimum ENCODE standard coverage for each replicate in '
'a WGBS assay is 25X and the recommended value '
'is > 30X (See {} )'.format(
pipeline_title,
audit_link(path_to_text(pipeline_objects[0]['@id']), pipeline_objects[0]['@id']),
int(coverage),
audit_link('ENCODE WGBS data standards', '/data-standards/wgbs/')
)
)
if coverage < 5:
yield AuditFailure('extremely low coverage',
detail,
level='ERROR')
elif coverage < 25:
yield AuditFailure('insufficient coverage',
detail,
level='NOT_COMPLIANT')
elif coverage < 30:
yield AuditFailure('low coverage',
detail,
level='WARNING')
return
def check_wgbs_pearson(cpg_metrics, threshold, pipeline_title):
for m in cpg_metrics:
if 'Pearson Correlation Coefficient' in m:
if m['Pearson Correlation Coefficient'] < threshold:
detail = ('ENCODE experiment processed by {} '
'pipeline has CpG quantification Pearson Correlation Coefficient of '
'{}, while a value >={} is required.'.format(
pipeline_title,
m['Pearson Correlation Coefficient'],
threshold
)
)
yield AuditFailure('insufficient replicate concordance',
detail,
level='NOT_COMPLIANT')
return
def check_wgbs_lambda(bismark_metrics, threshold, pipeline_title):
for metric in bismark_metrics:
cpg_string = metric.get('lambda C methylated in CpG context')
chg_string = metric.get('lambda C methylated in CHG context')
chh_string = metric.get('lambda C methylated in CHH context')
if (cpg_string and chg_string and chh_string):
lambdaCpG = float(cpg_string[:-1])
lambdaCHG = float(chg_string[:-1])
lambdaCHH = float(chh_string[:-1])
if (lambdaCpG > 1 and lambdaCHG > 1 and lambdaCHH > 1) or \
(((lambdaCpG * 0.25) + (lambdaCHG * 0.25) + (lambdaCHH * 0.5)) > 1):
detail = ('ENCODE experiment processed by {} '
'pipeline has the following %C methylated in different contexts. '
'lambda C methylated in CpG context was {}%, '
'lambda C methylated in CHG context was {}%, '
'lambda C methylated in CHH context was {}%. '
'The %C methylated in all contexts should be < 1%.'.format(
pipeline_title,
lambdaCpG,
lambdaCHG,
lambdaCHH
)
)
yield AuditFailure('high lambda C methylation ratio', detail,
level='WARNING')
def check_file_chip_seq_read_depth(file_to_check,
control_type,
organism_name,
target,
read_depth,
standards_version):
# added individual file pipeline validation due to the fact that one experiment may
# have been mapped using 'Raw mapping' and also 'Histone ChIP-seq' - and there is no need to
# check read depth on Raw files, while it is required for Histone
pipeline_title = scanFilesForPipelineTitle_yes_chipseq(
[file_to_check],
['ChIP-seq read mapping',
'Transcription factor ChIP-seq pipeline (modERN)'])
if pipeline_title is False:
return
pipeline_objects = get_pipeline_objects([file_to_check])
marks = pipelines_with_read_depth['ChIP-seq read mapping']
modERN_cutoff = pipelines_with_read_depth[
'Transcription factor ChIP-seq pipeline (modERN)']
if read_depth is False:
detail = ('ENCODE processed {} file {} has no read depth information.'.format(
file_to_check['output_type'],
audit_link(path_to_text(file_to_check['@id']), file_to_check['@id'])
)
)
yield AuditFailure('missing read depth', detail, level='INTERNAL_ACTION')
return
if target is not False and 'name' in target:
target_name = target['name']
elif control_type:
target_name = control_type
else:
return
if target is not False and 'investigated_as' in target:
target_investigated_as = target['investigated_as']
elif control_type:
target_investigated_as = [control_type]
else:
return
if control_type == 'input library' and organism_name in ['human', 'mouse']:
if pipeline_title == 'Transcription factor ChIP-seq pipeline (modERN)':
if read_depth < modERN_cutoff:
detail = ('modERN processed alignment file {} has {} '
'usable fragments. It cannot be used as a control '
'in experiments studying transcription factors, which '
'require {} usable fragments, according to '
'the standards defined by the modERN project.'.format(
audit_link(path_to_text(file_to_check['@id']), file_to_check['@id']),
read_depth,
modERN_cutoff
)
)
yield AuditFailure('insufficient read depth',
detail, level='NOT_COMPLIANT')
else:
if read_depth >= marks['narrow']['recommended'] and read_depth < marks['broad']['recommended']:
if 'assembly' in file_to_check:
detail = ('Control {} file {} mapped using {} assembly has {} '
'usable fragments. '
'The minimum ENCODE standard for a control of ChIP-seq assays targeting broad '
'histone marks '
'is 20 million usable fragments, the recommended number of usable '
'fragments is > 45 million. (See {} )'.format(
file_to_check['output_type'],
audit_link(path_to_text(file_to_check['@id']), file_to_check['@id']),
file_to_check['assembly'],
read_depth,
audit_link('ENCODE ChIP-seq data standards', '/data-standards/chip-seq/')
)
)
else:
detail = ('Control {} file {} has {} '
'usable fragments. '
'The minimum ENCODE standard for a control of ChIP-seq assays targeting broad '
'histone marks '
'is 20 million usable fragments, the recommended number of usable '
'fragments is > 45 million. (See {} )'.format(
file_to_check['output_type'],
audit_link(path_to_text(file_to_check['@id']), file_to_check['@id']),
read_depth,
audit_link('ENCODE ChIP-seq data standards', '/data-standards/chip-seq/')
)
)
yield AuditFailure('insufficient read depth for broad peaks control', detail, level='INTERNAL_ACTION')
if read_depth < marks['narrow']['recommended']:
if 'assembly' in file_to_check:
detail = ('Control {} file {} mapped using {} assembly has {} '
'usable fragments. '
'The minimum ENCODE standard for a control of ChIP-seq assays targeting broad '
'histone marks '
'is 20 million usable fragments, the recommended number of usable '
'fragments is > 45 million. '
'The minimum for a control of ChIP-seq assays targeting narrow '
'histone marks or transcription factors '
'is 10 million usable fragments, the recommended number of usable '
'fragments is > 20 million. (See {} )'.format(
file_to_check['output_type'],
audit_link(path_to_text(file_to_check['@id']), file_to_check['@id']),
file_to_check['assembly'],
read_depth,
audit_link('ENCODE ChIP-seq data standards', '/data-standards/chip-seq/')
)
)
else:
detail = ('Control {} file {} has {} '
'usable fragments. '
'The minimum ENCODE standard for a control of ChIP-seq assays targeting broad '
'histone marks '
'is 20 million usable fragments, the recommended number of usable '
'fragments is > 45 million. '
'The minimum for a control of ChIP-seq assays targeting narrow '
'histone marks or transcription factors '
'is 10 million usable fragments, the recommended number of usable '
'fragments is > 20 million. (See {} )'.format(
file_to_check['output_type'],
audit_link(path_to_text(file_to_check['@id']), file_to_check['@id']),
read_depth.
audit_link('ENCODE ChIP-seq data standards', '/data-standards/chip-seq/')
)
)
if read_depth >= marks['narrow']['minimal']:
yield AuditFailure('low read depth', detail, level='WARNING')
elif read_depth >= marks['narrow']['low'] and read_depth < marks['narrow']['minimal']:
yield AuditFailure('insufficient read depth',
detail, level='NOT_COMPLIANT')
else:
yield AuditFailure('extremely low read depth',
detail, level='ERROR')
elif 'broad histone mark' in target_investigated_as and \
standards_version != 'modERN': # target_name in broad_peaks_targets:
pipeline_object = get_pipeline_by_name(
pipeline_objects, 'ChIP-seq read mapping')
if pipeline_object:
if target_name in ['H3K9me3-human', 'H3K9me3-mouse']:
if read_depth < marks['broad']['recommended']:
if 'assembly' in file_to_check:
detail = ('Processed {} file {} produced by {} '
'pipeline ( {} ) using the {} assembly '
'has {} mapped reads. '
'The minimum ENCODE standard for each replicate in a ChIP-seq '
'experiment targeting {} and investigated as '
'a broad histone mark is 35 million mapped reads. '
'The recommended value is > 45 million, but > 35 million is '
'acceptable. (See {} )'.format(
file_to_check['output_type'],
audit_link(path_to_text(file_to_check['@id']), file_to_check['@id']),
pipeline_object['title'],
audit_link(path_to_text(pipeline_object['@id']), pipeline_object['@id']),
file_to_check['assembly'],
read_depth,
target_name,
audit_link('ENCODE ChIP-seq data standards', '/data-standards/chip-seq/')
)
)
else:
detail = ('Processed {} file {} produced by {} '
'pipeline ( {} ) has {} mapped reads. '
'The minimum ENCODE standard for each replicate in a ChIP-seq '
'experiment targeting {} and investigated as '
'a broad histone mark is 35 million mapped reads. '
'The recommended value is > 45 million, but > 35 million is '
'acceptable. (See {} )'.format(
file_to_check['output_type'],
audit_link(path_to_text(file_to_check['@id']), file_to_check['@id']),
pipeline_object['title'],
audit_link(path_to_text(pipeline_object['@id']), pipeline_object['@id']),
read_depth,
target_name,
audit_link('ENCODE ChIP-seq data standards', '/data-standards/chip-seq/')
)
)
if read_depth >= marks['broad']['minimal']:
yield AuditFailure('low read depth',
detail, level='WARNING')
elif read_depth >= 100 and read_depth < marks['broad']['minimal']:
yield AuditFailure('insufficient read depth',
detail, level='NOT_COMPLIANT')
elif read_depth < 100:
yield AuditFailure('extremely low read depth',
detail, level='ERROR')
else:
if 'assembly' in file_to_check:
detail = ('Processed {} file {} produced by {} '
'pipeline ( {} ) using the {} assembly '
'has {} usable fragments. '
'The minimum ENCODE standard for each replicate in a ChIP-seq '
'experiment targeting {} and investigated as '
'a broad histone mark is 20 million usable fragments. '
'The recommended value is > 45 million, but > 35 million is '
'acceptable. (See {} )'.format(
file_to_check['output_type'],
audit_link(path_to_text(file_to_check['@id']), file_to_check['@id']),
pipeline_object['title'],
audit_link(path_to_text(pipeline_object['@id']), pipeline_object['@id']),
file_to_check['assembly'],
read_depth,
| |
import json
import redis
import time
import logging
from glintwebui.glint_api import repo_connector
from ast import literal_eval
import config
import os
logger = logging.getLogger('glintv2')
'''
Recieves a tuple of 3-tuples (repo, img_name, img_id) that uniquely identify an image_list
then sorts them based on their repo and returns them in a json dictionary string
Format:
Proj_dict{
Repo1Alias{
Img_ID1{
name
state
disk_format
container_format
visibility
checksum
hidden
}
Img_ID2{
name
state
disk_format
container_format
visibility
checksum
hidden
}
.
.
.
Img_IDX{
name
state
disk_format
container_format
visibility
checksum
hidden
}
}
Repo2Alias{
...
}
.
.
.
RepoXAlias{
...
}
}
'''
def jsonify_image_list(image_list, repo_list):
#take img list and sort it into repos
repo_dict = {}
# Build repo_dict
for repo in repo_list:
img_dict = {}
for image in image_list:
if image[0] == repo.tenant and image[7] == repo.cloud_name:
img = {}
img['name'] = image[1]
img['state'] = 'Present'
img['disk_format'] = image[3]
img['container_format'] = image[4]
img['visibility'] = image[5]
img['checksum'] = image[6]
img_dict[image[2]] = img
if img['visibility'] != "private":
img['hidden'] = True
else:
img['hidden'] = False
repo_dict[repo.cloud_name] = img_dict
return json.dumps(repo_dict)
# This function will accept 2 json string dictionaries and find the pending transactions
# from the first and add them to the second then check the queue for any state changes
# apply those final updates and finally return a jsonified dictstring
#
# This function changed significantly when we started using the image name as the unique identifier
# which changed the way the state changes are handled, We can no longer change intuitively change
# the states here based on the information of the two dictionaries. State changes are now handled
# in a seperate function that reads from a queue ()
#
def update_pending_transactions(old_img_dict, new_img_dict):
#on startup there wont be an old dict
try:
old_dict = json.loads(old_img_dict)
except TypeError as e:
logger.info("No old image dictionary, either bad redis entry or first call since startup")
return new_img_dict
new_dict = json.loads(new_img_dict)
for repo_key in old_dict:
repo_dict = old_dict[repo_key]
for img_key in repo_dict:
#if a pending one is found, check for it in the new list
if repo_dict[img_key]['state'] in {"Pending Transfer", "Pending Delete"}:
try:
new_img = new_dict[repo_key][img_key]
# if it was a pending transfer change the state to pending:
if repo_dict[img_key]['state'] == "Pending Transfer":
new_dict[repo_key][img_key]['state'] = "Pending Transfer"
# OR if it was pending a delete and it still exists: change state to Pending Delete
if repo_dict[img_key]['state'] == "Pending Delete":
new_dict[repo_key][img_key]['state'] = "Pending Delete"
except KeyError as e:
#Doesn't exist in the new one yet
# if it was a pending delete
if repo_dict[img_key]['state'] == "Pending Delete":
new_dict[repo_key][img_key] = repo_dict[img_key]
# if it was a pending transfer and it still doesnt exist: add it as Pending Xfer
if repo_dict[img_key]['state'] == "Pending Transfer":
new_dict[repo_key][img_key] = repo_dict[img_key]
# we also need to check for changes in the hidden status of images
# the simple way is to just assign the old value to the new dict
# however if the image is newly created it won't yet have a hidden attribute
# new images will always be private and recieve "False" for the hidden attribute
try:
new_dict[repo_key][img_key]['hidden'] = repo_dict[img_key]['hidden']
except:
# need a try block here incase we get here when an image was deleted
# faster than we could provide the state change. It will be gone already
# so we can just ignore it and not worry about adding to the dictionary
try:
new_dict[repo_key][img_key]['hidden'] = False
except:
pass
return json.dumps(new_dict)
# returns a jsonified python dictionary containing the image list for a given project
# If the image list doesn't exist in redis it returns False
# Redis info should be moved to a config file
def get_images_for_group(group_name):
try:
r = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)
return r.get(group_name)
except KeyError as e:
logger.error("Couldnt find image list for group %s", group_name)
return False
# accepts a project as key string and a jsonified dictionary of the images and stores them in redis
# Redis info should be moved to a config file
def set_images_for_group(group_name, json_img_dict):
try:
r = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)
r.set(group_name, json_img_dict)
except Exception as e:
logger.error ("Unknown exception while trying to set images for: %s", group_name)
# returns dictionary containing any conflicts for a given account name
def get_conflicts_for_group(group_name):
if group_name is None:
logger.info("Couldnt find conflict list; no group provided.")
return None
try:
r = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)
conflict_key = group_name + "_conflicts"
json_conflict_dict = r.get(conflict_key)
if json_conflict_dict is not None:
return json.loads(json_conflict_dict)
else:
return None
except KeyError as e:
logger.info("Couldnt find conflict list for group %s", group_name)
return None
def set_conflicts_for_group(group_name, conflict_dict):
try:
json_conflict_dict = json.dumps(conflict_dict)
conflict_key = group_name + "_conflicts"
r = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)
r.set(conflict_key, json_conflict_dict)
except Exception as e:
logger.error ("Unknown exception while trying to set conflicts for: %s", group_name)
# Returns a unique list of (image, name) tuples that are not hidden in glint
# May be a problem if two sites have the same image (id) but with different names
# as the tuple will no longer be unique
def get_unique_image_list(group_name):
image_dict=json.loads(get_images_for_group(group_name))
image_set = set()
# make a dictionary of all the images in the format key:value = image_id:list_of_repos
# start by making a list of the keys, using a set will keep them unique
for repo_key in image_dict:
for image_id in image_dict[repo_key]:
if not image_dict[repo_key][image_id]['hidden']:
image_set.add(image_dict[repo_key][image_id]['name'])
return sorted(image_set, key=lambda s: s.lower())
# similar to "get_unique_image_list", this function returns a set of tuples
# representing all the images in glint such that their hidden status can be toggled
def get_hidden_image_list(group_name):
image_dict=json.loads(get_images_for_group(group_name))
image_set = set()
# make a dictionary of all the images in the format key:value = image_id:list_of_repos
# start by making a list of the keys, using a set will keep them unique
for repo_key in image_dict:
for image_id in image_dict[repo_key]:
image_set.add(image_dict[repo_key][image_id]['name'])
return sorted(image_set, key=lambda s: s.lower())
# accepts image dictionary and returns a dictionary that inverses the format to
# repo1{
# img_name: img_key
# ...
#}
# repo2{
# img_name: img_key
# ...
#}
def build_id_lookup_dict(image_dict):
reverse_dict = {}
for repo in image_dict:
reversed_repo = {}
for image in image_dict[repo]:
reversed_repo[image_dict[repo][image]['name']] = image
reverse_dict[repo] = reversed_repo
return reverse_dict
# Accepts the image dictionary and checks if there are any repos that contain conflicts
#
# Type 1 - Image1 and Image2 have the same name but are different images.
# Type 2 - Image1 and Image2 have the same name and are the same image.
# Type 3 - Image1 and Image2 have different names but are the same image.
def check_for_image_conflicts(json_img_dict):
image_dict=json.loads(json_img_dict)
conflicts_dict = {}
for repo in image_dict:
conflicts = list()
for image in image_dict[repo]:
if image_dict[repo][image]['checksum'] == "No Checksum":
continue
for image2 in image_dict[repo]:
if image_dict[repo][image2]['checksum'] == "No Checksum":
continue
if image is not image2:
try:
#Check for name conflicts (type 1/type 2)
if image_dict[repo][image]['name'] == image_dict[repo][image2]['name']:
# Mayday we have a duplicate
# check if it is type 1 or type 2 conflint
if image_dict[repo][image]['checksum'] == image_dict[repo][image2]['checksum']:
logging.error("Type 2 image conflict detected.")
# Type 2
conflict = {
'type': 2,
'image_one': image,
'image_one_name': image_dict[repo][image]['name'],
'image_one_visibility': image_dict[repo][image]['visibility'],
'image_two': image2,
'image_two_name': image_dict[repo][image2]['name'],
'image_two_visibility': image_dict[repo][image2]['visibility']
}
duplicate_entry = False
for entry in conflicts:
if(entry['image_one'] == conflict['image_two'] and entry['image_two'] == conflict['image_one']):
duplicate_entry = True
break
if not duplicate_entry:
conflicts.append(conflict)
else:
logging.error("Type 1 image conflict detected.")
# Type 1
conflict = {
'type': 1,
'image_one': image,
'image_one_name': image_dict[repo][image]['name'],
'image_one_visibility': image_dict[repo][image]['visibility'],
'image_two': image2,
'image_two_name': image_dict[repo][image2]['name'],
'image_two_visibility': image_dict[repo][image2]['visibility']
}
duplicate_entry = False
for entry in conflicts:
if(entry['image_one'] == conflict['image_two'] and entry['image_two'] == conflict['image_one']):
duplicate_entry = True
break
if not duplicate_entry:
conflicts.append(conflict)
#Check for checksum conflicts (type 3, since type 2 will be caught by the first check)
if image_dict[repo][image]['checksum'] == image_dict[repo][image2]['checksum']:
logging.error("Type 3 image conflict detected.")
# Type 3
conflict = {
'type': 3,
'image_one': image,
'image_one_name': image_dict[repo][image]['name'],
'image_one_visibility': image_dict[repo][image]['visibility'],
'image_two': image2,
'image_two_name': image_dict[repo][image2]['name'],
'image_two_visibility': image_dict[repo][image2]['visibility']
}
duplicate_entry = False
for entry in conflicts:
if(entry['image_one'] == conflict['image_two'] and entry['image_two'] == conflict['image_one']):
duplicate_entry = True
break
if not duplicate_entry:
conflicts.append(conflict)
except Exception as e:
logger.error("Error when checking for conflicts on images: %s and %s" % (image, image2))
logger.error(e)
logger.error(image_dict)
if conflicts:
conflicts_dict[repo] = conflicts
if conflicts_dict:
return conflicts_dict
else:
return None
# Accepts a list of images (names), a project and a repo
# Cross references the image repo in redis against the given image list
# Either returns a list of transactions or posts them to redis to be
# picked up by another thread.
def parse_pending_transactions(group_name, cloud_name, image_list, user):
try:
r = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db)
proj_dict = json.loads(r.get(group_name))
repo_dict = proj_dict[cloud_name]
# This function takes a repo dictionary and returns a dictionary that has the format:
# image_name: image_id
# This is neccesary since we are now using image name as the unique identifier not the img id
img_translation = __get_image_ids(repo_dict)
for image in image_list:
# If image is not in the image list we need to make a pending transfer
if not img_translation.get(image, False):
#MAKE TRANSFER
#We need to get disk_format and container_format from another repo that has this image
img_details = __get_image_details(group_name=group_name, image=image)
disk_format = img_details[0]
container_format = img_details[1]
transaction = {
'user': user,
'action': 'transfer',
'group_name': group_name,
'cloud_name': cloud_name,
'image_name': image,
'disk_format': disk_format,
'container_format': container_format
}
trans_key = group_name + "_pending_transactions"
r.rpush(trans_key, json.dumps(transaction))
increment_transactions()
#else it is already there and do nothing
else:
pass
# Now we need to check deletes
for image_key in repo_dict:
#If the key exists but it isn't in the image list make a pending delete unless it is hidden
if repo_dict[image_key]['name'] not in image_list and repo_dict[image_key]['hidden'] is False:
# if its pending already we don't need to touch it
if repo_dict[image_key].get('state') not in {'Pending Delete', 'Pending Transfer'}:
# MAKE DELETE
transaction = {
'user': user,
'action': 'delete',
'group_name': group_name,
'cloud_name': cloud_name,
'image_id': image_key,
'image_name': repo_dict[image_key].get('name')
}
trans_key = group_name + "_pending_transactions"
r.rpush(trans_key, json.dumps(transaction))
increment_transactions()
except KeyError as e:
logger.error(e)
logger.error("Couldnt find image list for group %s", group_name)
return False
# This function reads pending transactions from a redis queue and spawns celery
# tasks to perform the file transfers. Since our repo dictionaries are | |
<filename>bellman/harness/harness.py
# Copyright 2021 The Bellman Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides a harness for running experiments.
"""
import datetime
import os
from enum import Enum
from typing import Dict, List, Optional, Tuple, Union
import gin
import tensorflow as tf
from absl import logging
from gin.tf import GinConfigSaverHook
from tf_agents.agents import TFAgent
from tf_agents.drivers.driver import Driver
from tf_agents.drivers.tf_driver import TFDriver
from tf_agents.environments.tf_environment import TFEnvironment
from tf_agents.eval import metric_utils
from tf_agents.metrics.tf_metric import TFStepMetric
from tf_agents.metrics.tf_metrics import (
AverageEpisodeLengthMetric,
AverageReturnMetric,
EnvironmentSteps,
NumberOfEpisodes,
)
from tf_agents.policies.random_tf_policy import RandomTFPolicy
from tf_agents.replay_buffers.replay_buffer import ReplayBuffer
from tf_agents.replay_buffers.tf_uniform_replay_buffer import TFUniformReplayBuffer
from tf_agents.utils import common
from bellman.harness.utils import (
EVALUATION_METRICS_DIR,
GIN_CONFIG,
TIME_METRIC,
TRAIN_METRICS_DIR,
)
from bellman.training.agent_trainer import AgentTrainer
from bellman.training.model_free_agent_trainer import OnPolicyModelFreeAgentTrainer
class ExperimentHarness:
"""
A harness for running experiments. The `run` method will run the experiment.
"""
def __init__(
self,
root_dir: str,
environment: TFEnvironment,
evaluation_environment: TFEnvironment,
agent: TFAgent,
agent_trainer: AgentTrainer,
real_replay_buffer_capacity: int,
total_number_of_environment_steps: int,
summary_interval: int,
evaluation_interval: int,
number_of_evaluation_episodes: int,
number_of_initial_random_policy_steps: int = 0,
use_tf_function: bool = False,
):
"""
:param root_dir: Root directory where all experiments are stored.
:param environment: The training environment the agent is stepping through.
:param evaluation_environment: The environment for evaluating the performance of the agent.
:param agent: The TF-Agent agent to train.
:param agent_trainer: The trainer which will produce a training schedule for the components
of the agent.
:param real_replay_buffer_capacity: Capacity of the buffer collecting real samples.
:param total_number_of_environment_steps: The number of environment steps to run the
experiment for.
:param summary_interval: Interval for summaries.
:param evaluation_interval: Interval for evaluation points.
:param number_of_evaluation_episodes: Number of episodes at each evaluation point.
:param number_of_initial_random_policy_steps: If > 0, some initial training data is
gathered by running a random policy on the real environment.
:param use_tf_function: If `True`, use a `tf.function` for data collection.
"""
assert real_replay_buffer_capacity > 0
assert total_number_of_environment_steps > 0
assert summary_interval > 0
assert evaluation_interval > 0
assert number_of_evaluation_episodes > 0
assert 0 <= number_of_initial_random_policy_steps <= total_number_of_environment_steps
assert number_of_initial_random_policy_steps == 0 or not isinstance(
agent_trainer, OnPolicyModelFreeAgentTrainer
) # model-free on-policy agents must always execute their own policy!
self._root_dir = root_dir
self._base_dirname = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
self._environment = environment
self._evaluation_environment = evaluation_environment
self._agent = agent
self._agent_trainer = agent_trainer
self._real_replay_buffer_capacity = real_replay_buffer_capacity
self._total_number_of_environment_steps = total_number_of_environment_steps
self._summary_interval = summary_interval
self._evaluation_interval = evaluation_interval
self._number_of_evaluation_episodes = number_of_evaluation_episodes
self._number_of_initial_random_policy_steps = number_of_initial_random_policy_steps
self._use_tf_function = use_tf_function
self._max_steps: Optional[int] = None
@property
def agent(self) -> TFAgent:
"""
:return: The agent which is trained by this harness.
"""
return self._agent
@property
def base_dirname(self) -> str:
"""
:return: The directory name in the root directory where results will be stored.
"""
return self._base_dirname
def define_base_experiment_directory(self) -> str:
"""
Define the path for the base directory for the experiment.
"""
base_dir = os.path.join(
os.path.expanduser(self._root_dir),
self._base_dirname,
)
return base_dir
@staticmethod
def serialise_config(base_dir: str):
"""
This method creates a single gin config file in the `base_dir`. This file can be used to
reproduce the experiment. This "operative" config file will contain all of the parameters
which have been used to parameterise a function decorated with `@gin.configurable'.
The config parameter values are also written to a TensorBoard events file in the
`base_dir`. This ensures that the parameter values can be viewed in TensorBoard.
Note: The `GinConfigSaverHook` can create a TensorBoard events file. Unfortunately it does
not seem to work with TensorFlow 2.0, so this is done manually.
"""
config_saver_hooks = GinConfigSaverHook(base_dir, summarize_config=False)
config_saver_hooks.after_create_session()
base_dir_summary_writer = tf.summary.create_file_writer(base_dir)
with base_dir_summary_writer.as_default():
tf.summary.text(GIN_CONFIG, gin.operative_config_str(), 0)
@staticmethod
def define_tensorboard_directories(base_dir: str) -> Tuple[str, str]:
"""
Define the paths of the tensorboard directories.
"""
train_dir = os.path.join(base_dir, TRAIN_METRICS_DIR)
eval_dir = os.path.join(base_dir, EVALUATION_METRICS_DIR)
return train_dir, eval_dir
@staticmethod
def create_summary_writers(
train_dir: str, eval_dir: str
) -> Tuple[tf.summary.SummaryWriter, tf.summary.SummaryWriter]:
"""
Create and return the training time summary writer and the evaluation time summary writer.
"""
# Summary writers
train_summary_writer = tf.summary.create_file_writer(train_dir, flush_millis=1000)
eval_summary_writer = tf.summary.create_file_writer(eval_dir, flush_millis=1000)
return train_summary_writer, eval_summary_writer
@staticmethod
def create_train_metrics() -> List[TFStepMetric]:
"""
Create a list of metrics to capture during training.
"""
return [
NumberOfEpisodes(),
EnvironmentSteps(),
AverageReturnMetric(buffer_size=1),
AverageEpisodeLengthMetric(buffer_size=1),
]
@staticmethod
def create_evaluation_metrics() -> List[TFStepMetric]:
"""
Create a list of metrics to capture during policy evaluation.
"""
return [
AverageReturnMetric(buffer_size=1),
AverageEpisodeLengthMetric(buffer_size=1),
]
def create_real_replay_buffer(self) -> ReplayBuffer:
"""
Create the replay buffer for storing data from the real environment.
"""
return TFUniformReplayBuffer(
self._agent.collect_policy.trajectory_spec,
batch_size=1,
max_length=self._real_replay_buffer_capacity,
)
def create_real_drivers(
self,
real_replay_buffer: ReplayBuffer,
train_metrics: List[TFStepMetric],
) -> Tuple[Driver, Driver]:
"""
Create the drivers for interacting with the real environment.
This method creates two drivers: one uses the agent's "collect" policy, the other uses a
uniform random policy.
Note that the random policy is defined with the same `info_spec` as the agent's "collect"
policy. The `info_spec` of the trajectories generated by the random policy must conform to
the expectations of the agent when the data is used for training.
"""
agent_collect_driver = TFDriver(
self._environment,
self._agent.collect_policy,
observers=[real_replay_buffer.add_batch] + train_metrics,
max_steps=self._max_steps,
disable_tf_function=not self._use_tf_function,
)
random_policy = RandomTFPolicy(
self._environment.time_step_spec(),
self._environment.action_spec(),
info_spec=self._agent.collect_policy.info_spec,
)
random_policy_collect_driver = TFDriver(
self._environment,
random_policy,
observers=[real_replay_buffer.add_batch] + train_metrics,
max_steps=self._max_steps,
disable_tf_function=not self._use_tf_function,
)
return agent_collect_driver, random_policy_collect_driver
@staticmethod
def write_summary_scalar(
metric_name: str,
metric_value: Union[List[float], float],
step: int,
summary_writer: tf.summary.SummaryWriter,
):
"""Write a scalar summary statistic to a tensorboard directory."""
with summary_writer.as_default():
if isinstance(metric_value, list):
value = metric_value[-1]
tf.compat.v2.summary.scalar(name=metric_name, data=value, step=step)
else:
tf.compat.v2.summary.scalar(name=metric_name, data=metric_value, step=step)
def run(self):
"""
This method runs an experiment. It creates a loop that steps through the environment,
using the agent to collect actions in each step. Note that loop only seemingly goes
step-by-step, drivers actually collect multiple steps in each call, governed by the number
of initial random policy steps and maximum number of steps - the greatest common divisor of
all the trainable components' training intervals, obtained through the
`TFTrainingScheduler` method `environment_steps_between_maybe_train`.
While running, this will collect metrics (during training and also separate evaluation
metrics). These metrics are periodically logged to `stdout`, and also recorded by summary
writers. Note that `max_steps` takes into account various reporting intervals computing a
greatest common denominator among all intervals to determine the step size. Hence, it
would be good to use intervals that are multiples of each other, if possible.
Note that this method also creates an environment step counter metric, which is used
throughout to monitor the progress of the experiment. This counter ensures that periodic
tasks, such as logging and training, happen at the correct intervals.
"""
logging.info("Initialising the experiment.")
base_dir = self.define_base_experiment_directory()
self.serialise_config(base_dir)
train_dir, eval_dir = self.define_tensorboard_directories(base_dir)
train_summary_writer, eval_summary_writer = self.create_summary_writers(
train_dir, eval_dir
)
train_summary_writer.set_as_default()
train_metrics = self.create_train_metrics()
environment_steps_metric = EnvironmentSteps()
train_metrics.extend([environment_steps_metric])
evaluation_metrics = self.create_evaluation_metrics()
real_replay_buffer = self.create_real_replay_buffer()
training_scheduler = self._agent_trainer.create_training_scheduler(
self._agent,
real_replay_buffer,
)
self._max_steps = training_scheduler.environment_steps_between_maybe_train(
additional_intervals=[
self._summary_interval,
self._evaluation_interval,
self._number_of_initial_random_policy_steps,
]
)
agent_collect_driver, random_policy_collect_driver = self.create_real_drivers(
real_replay_buffer,
train_metrics,
)
# Reset the real environment
time_step = self._environment.reset()
# executing the experiment
logging.info("Experiment started running.")
self.write_summary_scalar(
TIME_METRIC, 0.0, environment_steps_metric.result(), train_summary_writer
)
# step-by-step
while tf.math.less(
environment_steps_metric.result(), self._total_number_of_environment_steps
):
# Initial transitions with random policy to bootstrap training
if environment_steps_metric.result() < self._number_of_initial_random_policy_steps:
logging.info(
"Step = %d, collecting initial transitions with random policy, "
+ "%d steps in total.",
environment_steps_metric.result(),
self._number_of_initial_random_policy_steps,
)
time_step, _ = random_policy_collect_driver.run(time_step)
# Collecting data with the agent's "collect" policy
else:
logging.info(
"Step = %d, collecting regular transitions with agent policy, "
+ "%d steps in total.",
environment_steps_metric.result(),
self._max_steps,
)
time_step, _ = agent_collect_driver.run(time_step)
# potentially train certain component in current `self._max_steps` iteration
training_info = training_scheduler.maybe_train(environment_steps_metric.result())
for component, loss_info in training_info.items():
self.write_summary_scalar(
"TrainingLoss/" + component.name,
loss_info.loss,
environment_steps_metric.result(),
train_summary_writer,
)
if isinstance(loss_info.loss, list):
loss = loss_info.loss[-1]
else:
loss = loss_info.loss
logging.info(
"Step = %d, training of the %s component, loss (at final epoch) = %s",
environment_steps_metric.result(),
component.name,
str(loss),
)
# training summary and logs
if environment_steps_metric.result() % self._summary_interval == 0:
logging.info(
"Step | |
#!/usr/bin/python
#-*- coding: utf-8 -*-
from __future__ import absolute_import
import json
from json.decoder import JSONDecodeError
import base64
import hmac
import hashlib
import requests
from .. import utils
PROTOCOL = "https"
HOST = "api.bitfinex.com"
VERSION = "v1"
PATH_SYMBOLS = "symbols"
PATH_TICKER = "pubticker/%s"
PATH_TODAY = "today/%s"
PATH_STATS = "stats/%s"
PATH_LENDBOOK = "lendbook/%s"
PATH_ORDERBOOK = "book/%s"
# HTTP request timeout in seconds
TIMEOUT = 5.0
class BitfinexException(Exception):
pass
class Client:
"""
Client for the bitfinex.com API.
Link for official bitfinex documentation :
`Bitfinex rest1 docs <https://docs.bitfinex.com/v1/docs>`_
`Bitfinex rest1 reference <https://docs.bitfinex.com/v1/reference>`_
Parameters
----------
key : str
Bitfinex api key
secret : str
Bitfinex api secret
nonce_multiplier : Optional float
Multiply nonce by this number
Examples
--------
::
bfx_client = Client(key,secret)
bfx_client = Client(key,secret,2.0)
"""
def __init__(self, key=None, secret=None, nonce_multiplier=1.0):
assert isinstance(nonce_multiplier, float), "nonce_multiplier must be decimal"
self.url = "%s://%s/%s" % (PROTOCOL, HOST, VERSION)
self.base_url = "%s://%s/" % (PROTOCOL, HOST)
self.key = key
self.secret = secret
self.nonce_multiplier = nonce_multiplier
def server(self):
return u"{0:s}://{1:s}/{2:s}".format(PROTOCOL, HOST, VERSION)
def url_for(self, path, path_arg=None, parameters=None):
# build the basic url
url = "%s/%s" % (self.server(), path)
# If there is a path_arh, interpolate it into the URL.
# In this case the path that was provided will need to have string
# interpolation characters in it, such as PATH_TICKER
if path_arg:
url = url % (path_arg)
# Append any parameters to the URL.
if parameters:
url = "%s?%s" % (url, self._build_parameters(parameters))
return url
def _nonce(self):
"""Returns a nonce used in authentication.
Nonce must be an increasing number, if the API key has been used
earlier or other frameworks that have used higher numbers you might
need to increase the nonce_multiplier."""
return str(utils.get_nonce(self.nonce_multiplier))
def _sign_payload(self, payload):
j = json.dumps(payload)
data = base64.standard_b64encode(j.encode('utf8'))
hmc = hmac.new(self.secret.encode('utf8'), data, hashlib.sha384)
signature = hmc.hexdigest()
return {
"X-BFX-APIKEY": self.key,
"X-BFX-SIGNATURE": signature,
"X-BFX-PAYLOAD": data
}
def _get(self, url):
response = requests.get(url, timeout=TIMEOUT)
if response.status_code == 200:
return response.json()
else:
try:
content = response.json()
except JSONDecodeError:
content = response.text()
raise BitfinexException(response.status_code, response.reason, content)
def _post(self, endoint, payload, verify=True):
url = self.url_for(path=endoint)
signed_payload = self._sign_payload(payload)
response = requests.post(url, headers=signed_payload, verify=verify)
if response.status_code == 200:
return response.json()
elif response.status_code >= 400:
return response.json()
else:
try:
content = response.json()
except JSONDecodeError:
content = response.text()
raise BitfinexException(response.status_code, response.reason, content)
def _build_parameters(self, parameters):
# sort the keys so we can test easily in Python 3.3 (dicts are not
# ordered)
keys = list(parameters.keys())
keys.sort()
return '&'.join(["%s=%s" % (k, parameters[k]) for k in keys])
def account_infos(self):
"""`Return information about your account (trading fees)
<https://docs.bitfinex.com/reference#rest-auth-account-info>`_
Return information about your account (trading fees)
Returns
-------
list
::
[{
"maker_fees":"0.1",
"taker_fees":"0.2",
"fees":[{
"pairs":"BTC",
"maker_fees":"0.1",
"taker_fees":"0.2"
},{
"pairs":"LTC",
"maker_fees":"0.1",
"taker_fees":"0.2"
},
{
"pairs":"ETH",
"maker_fees":"0.1",
"taker_fees":"0.2"
}]
}]
Example
-------
::
bfx_client.account_infos()
"""
payload = {
"request": "/v1/account_infos",
"nonce": self._nonce()
}
response = self._post("account_infos", payload=payload, verify=True)
return response
def account_fees(self):
"""`See the fees applied to your withdrawals
<https://docs.bitfinex.com/reference#rest-auth-account-fees>`_
See the fees applied to your withdrawals
Returns
-------
dict
::
{
"withdraw":{
"BTC": "0.0005",
"LTC": 0,
"ETH": 0,
...
}
}
Example
-------
::
bfx_client.account_fees()
"""
payload = {
"request": "/v1/account_fees",
"nonce": self._nonce()
}
response = self._post("account_fees", payload=payload, verify=True)
return response
def summary(self):
"""`Returns a 30-day summary of your trading volume and return on margin funding.
<https://docs.bitfinex.com/reference#rest-auth-account-fees>`_
Returns a 30-day summary of your trading volume and return on margin funding.
Returns
-------
dict
::
{
"trade_vol_30d":[
{"curr":"BTC","vol":11.88696022},
{"curr":"LTC","vol":0.0},
{"curr":"ETH","vol":0.1},
{"curr":"Total (USD)","vol":5027.63}
],
"funding_profit_30d":[
{"curr":"USD","amount":0.0},
{"curr":"BTC","amount":0.0},
{"curr":"LTC","amount":0.0},
{"curr":"ETH","amount":0.0}
],
"maker_fee":0.001,
"taker_fee":0.002
}
Example
-------
::
bfx_client.summary()
"""
payload = {
"request": "/v1/summary",
"nonce": self._nonce()
}
response = self._post("summary", payload=payload, verify=True)
return response
def place_order(self, amount, price, side, ord_type, symbol='btcusd', exchange='bitfinex'):
"""
.. _new_order:
`Bitfinex new order <https://docs.bitfinex.com/v1/reference#rest-auth-new-order>`_
Submit a new Order
Parameters
----------
amount : float
Order size: how much you want to buy or sell
price : float
Price to buy or sell at. Must be positive. Use random number for market orders.
side : string
Either “buy” or “sell”.
ord_type : string
Either “market” / “limit” / “stop” / “trailing-stop” / “fill-or-kill” /
“exchange market” / “exchange limit” / “exchange stop” / “exchange trailing-stop” /
“exchange fill-or-kill”. (type starting by “exchange ” are exchange orders, others are
margin trading orders)
symbol : str
The `symbol <restv1.html#symbols>`_ you want information about.
exchange : str
'bitfinex'
Returns
-------
dict
::
# response
{
"id":448364249,
"symbol":"btcusd",
"exchange":"bitfinex",
"price":"0.01",
"avg_execution_price":"0.0",
"side":"buy",
"type":"exchange limit",
"timestamp":"1444272165.252370982",
"is_live":true,
"is_cancelled":false,
"is_hidden":false,
"was_forced":false,
"original_amount":"0.01",
"remaining_amount":"0.01",
"executed_amount":"0.0",
"order_id":448364249
}
Examples
--------
::
bfx_client.place_order(0.01, 0.01, "buy", "exchange limit", "btcusd")
"""
payload = {
"request": "/v1/order/new",
"nonce": self._nonce(),
"symbol": symbol,
"amount": amount,
"price": price,
"exchange": exchange,
"side": side,
"type": ord_type
}
response = self._post("/order/new", payload=payload, verify=True)
return response
def place_multiple_orders(self, orders):
"""
Parameters
----------
orders : list
Each item in the list is a dict that must have the following items : symbol, amount,
price, side, type, exchange
Returns
-------
dict
::
// response
{
"order_ids":[
{
"id":448383727,
"symbol":"btcusd",
"exchange":"bitfinex",
"price":"0.01",
"avg_execution_price":"0.0",
"side":"buy",
"type":"exchange limit",
"timestamp":"1444274013.621701916",
"is_live":true,
"is_cancelled":false,
"is_hidden":false,
"was_forced":false,
"original_amount":"0.01",
"remaining_amount":"0.01",
"executed_amount":"0.0"
},{
"id":448383729,
"symbol":"btcusd",
"exchange":"bitfinex",
"price":"0.03",
"avg_execution_price":"0.0",
"side":"buy",
"type":"exchange limit",
"timestamp":"1444274013.661297306",
"is_live":true,
"is_cancelled":false,
"is_hidden":false,
"was_forced":false,
"original_amount":"0.02",
"remaining_amount":"0.02",
"executed_amount":"0.0"
}],
"status":"success"
}
Examples
--------
::
# Make a list with 3 orders to buy 100 iota at 3 dollars,100 iota at 4 dollars and
# 100 iota at 5 dollars
# The list is sent to the method place_multiple_orders
orders = []
for price in range(3, 6):
print(price)
payload = {
"symbol": 'IOTUSD',
"amount": '100',
"price": str(price),
"exchange": 'bitfinex',
"side": 'buy',
"type": 'limit'
}
orders.append(payload)
response = bfx_client.place_multiple_orders(orders)
print(response)
"""
payload = {
"request": "/v1/order/new/multi",
"nonce": self._nonce(),
"orders": orders
}
response = self._post("/order/new/multi", payload=payload, verify=True)
return response
def delete_order(self, order_id):
"""`Bitfinex cancel order reference
<https://docs.bitfinex.com/v1/reference#rest-auth-cancel-order>`_
Cancel an order.
Parameters
----------
order_id : int
The order ID given by `new_order`_ function
Returns
-------
dict
::
{
"id":446915287,
"symbol":"btcusd",
"exchange":null,
"price":"239.0",
"avg_execution_price":"0.0",
"side":"sell",
"type":"trailing stop",
"timestamp":"1444141982.0",
"is_live":true,
"is_cancelled":false,
"is_hidden":false,
"was_forced":false,
"original_amount":"1.0",
"remaining_amount":"1.0",
"executed_amount":"0.0"
}
Example
-------
::
bfx_client.delete_order(448411153)
"""
payload = {
"request": "/v1/order/cancel",
"nonce": self._nonce(),
"order_id": order_id
}
response = self._post("/order/cancel", payload=payload, verify=True)
return response
def delete_all_orders(self):
"""`Bitfinex cancel all orders reference
<https://docs.bitfinex.com/v1/reference#rest-auth-cancel-multiple-orders>`_
Cancel all orders at once.
Returns
-------
dict
::
{"result":"Orders cancelled"}
Example
-------
::
bfx_client.delete_all_orders()
"""
payload = {
"request": "/v1/order/cancel/all",
"nonce": self._nonce(),
}
response = self._post("/order/cancel/all", payload=payload, verify=True)
return response
def status_order(self, order_id):
"""`Bitfinex status order reference
<https://docs.bitfinex.com/v1/reference#rest-auth-order-status>`_
Get the status of an order. Is it active? Was it cancelled?
To what extent has it been executed? etc.
Parameters
----------
order_id : int
The order ID given by `new_order`_ function
Returns
-------
dict
::
{
"id":448411153,
"symbol":"btcusd",
"exchange":null,
"price":"0.01",
"avg_execution_price":"0.0",
"side":"buy",
"type":"exchange limit",
"timestamp":"1444276570.0",
"is_live":false,
"is_cancelled":true,
"is_hidden":false,
"oco_order":null,
"was_forced":false,
"original_amount":"0.01",
"remaining_amount":"0.01",
"executed_amount":"0.0"
}
Example
-------
::
bfx_client.status_order(448411153)
"""
payload = {
"request": "/v1/order/status",
"nonce": self._nonce(),
"order_id": order_id
}
response = self._post("/order/status", payload=payload, verify=True)
return response
def active_orders(self):
"""`Bitfinex active orders reference
<https://docs.bitfinex.com/v1/reference#rest-auth-active-orders>`_
View your active orders.
Returns
-------
list
::
[{
"id":448411365,
"symbol":"btcusd",
"exchange":"bitfinex",
"price":"0.02",
"avg_execution_price":"0.0",
"side":"buy",
"type":"exchange limit",
"timestamp":"1444276597.0",
"is_live":true,
"is_cancelled":false,
"is_hidden":false,
"was_forced":false,
"original_amount":"0.02",
"remaining_amount":"0.02",
"executed_amount":"0.0"
}]
Example
-------
::
bfx_client.active_orders(448411153)
"""
payload = {
"request": "/v1/orders",
"nonce": self._nonce()
}
response = self._post("orders", payload=payload, verify=True)
return response
def active_positions(self):
"""`Bitfinex active positions reference
<https://docs.bitfinex.com/v1/reference#rest-auth-active-positions>`_
View your active positions.
Returns
-------
list
::
[{
"id":943715,
"symbol":"btcusd",
"status":"ACTIVE",
"base":"246.94",
"amount":"1.0",
"timestamp":"1444141857.0",
"swap":"0.0",
"pl":"-2.22042"
}]
Example
-------
::
bfx_client.active_positions(448411153)
"""
payload = {
"request": "/v1/positions",
"nonce": self._nonce()
}
response = self._post("positions", payload=payload, verify=True)
return response
def claim_position(self, position_id):
"""`Bitfinex claim position reference
<https://docs.bitfinex.com/v1/reference#rest-auth-claim-position>`_
A position can be claimed if: It is a long position: The amount in the last unit of the
position pair that you have in your trading wallet AND/OR the realized profit of the
position is greater or equal to the purchase amount of the position
(base price * position amount) and the funds which need to be returned. For example, for a
long BTCUSD position, you can claim the position if the amount of USD you have in the
trading wallet is greater than the base | |
import argparse
import sys
import os
import shutil
import time
import math
import h5py
import random
import torch
import torch.nn as nn
import torch.optim
import torchvision.transforms as transforms
import torch.nn.functional as F
import torch.nn.parallel
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
import numpy as np
from scipy.io import savemat
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import FormatStrFormatter
sys.path.append('../ResNet')
import ResNet1d as rn
sys.path.append('../')
import Model_Util
import Utilities
from Dataset_Management import Labeled_Real_DataLoader
sys.path.append('../Translocations_Detector/models')
from backbone import build_backbone
from transformer import build_transformer
import detr as DT
sys.path.append('./Evaluator')
from Evaluator import mean_average_precision_and_errors
def parse():
model_names = ['ResNet10', 'ResNet18', 'ResNet34', 'ResNet50', 'ResNet101', 'ResNet152']
parser = argparse.ArgumentParser(description='Nanopore Translocation Detector Training')
parser.add_argument('data', metavar='DIR', type=str,
help='path to experimental validation dataset')
parser.add_argument('counter', metavar='COUNTER', type=str,
help='path to translocation counter')
parser.add_argument('predictor', metavar='PREDICTOR', type=str,
help='path to translocation feature predictor')
parser.add_argument('detector', metavar='DETECTOR', type=str,
help='path to translocation detector')
parser.add_argument('--feature_predictor_arch', '-fpa', metavar='FEATURE_PREDICTOR_ARCH', default='ResNet18',
choices=model_names,
help='This is the architecture of the feature_predictor section in the backbone: ' +
' | '.join(model_names) +
' (default: ResNet18_Custom)')
parser.add_argument('--pulse_counter_arch', '-pca', metavar='PULSE_COUNTER_ARCH', default='ResNet18',
choices=model_names,
help='This is the architecture of the pulse_counter section in the backbone: ' +
' | '.join(model_names) +
' (default: ResNet18_Counter)')
parser.add_argument('-b', '--batch-size', default=6, type=int,
metavar='N', help='mini-batch size per process (default: 6)')
parser.add_argument('-save-stats', default='', type=str, metavar='STATS_PATH',
help='path to save the stats produced during evaluation (default: none)')
parser.add_argument('-stats', '--statistics', dest='statistics', action='store_true',
help='Compute statistics about contrast between a trained and a traditional model on validation set')
parser.add_argument('-stats-from-file', default='', type=str, metavar='STATS_FROM_FILE',
help='path to load the stats produced during validation from a file (default: none)')
parser.add_argument('-c', '--compute-predictions', default='', type=str, metavar='COMPUTE_PREDICTIONS',
help='Run a trained model and compute and save all its predictions in noisy traces')
parser.add_argument('-r', '--run', dest='run', action='store_true',
help='Run a trained model and plots a window of predictions in a noisy trace')
parser.add_argument('--run-plot-window', default=1.0, type=float, metavar='RPW',
help='the percentage of the window width the you want to actually plot (default: 1; which means 100%%)')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--cpu', action='store_true',
help='Runs CPU based version of the workflow.')
parser.add_argument('-v', '--verbose', action='store_true',
help='provides additional details as to what the program is doing')
parser.add_argument('-t', '--test', action='store_true',
help='Launch test mode with preset arguments')
parser.add_argument('--transformer-hidden-dim', default=512, type=int, metavar='TRANSFORMER-HIDDEN-DIM',
help='Hidden dimension of transformer on DETR model (default: 512)')
parser.add_argument('--transformer-dropout', default=0.1, type=float, metavar='TRANSFORMER_DROPOUT',
help='Dropout of transformer on DETR model (default: 0.1)')
parser.add_argument('--transformer-num-heads', default=8, type=int, metavar='TRANSFORMER_NUM_HEADS',
help='Number of heads of transformer on DETR model (default: 8)')
parser.add_argument('--transformer-dim-feedforward', default=2048, type=int, metavar='TRANSFORMER_DIM_FEEDFORWARD',
help='Feedforward dimension inside transformer on DETR model (default: 2048)')
parser.add_argument('--transformer-num-enc-layers', default=6, type=int, metavar='TRANSFORMER_NUM_ENC_LAYERS',
help='Number of encoder layers inside transformer on DETR model (default: 6)')
parser.add_argument('--transformer-num-dec-layers', default=6, type=int, metavar='TRANSFORMER_NUM_DEC_LAYERS',
help='Number of decoder layers inside transformer on DETR model (default: 6)')
parser.add_argument('--transformer-pre-norm', dest='transformer-pre-norm', action='store_true',
help='Configurization of transformer on DETR model (default: False)')
parser.add_argument('--num-classes', default=1, type=int, metavar='NUM_CLASSES',
help='The number of different translocation classes that DETR has to classify (default: 1)')
parser.add_argument('--num-queries', default=75, type=int, metavar='NUM_QUERIES',
help='The maximum number of translocations that DETR considers could exist in a window (default: 75)')
parser.add_argument('--cost-class', default=1.0, type=float, metavar='COST_CLASS',
help='This is the relative weight of the classification error in the Hungarian matching cost (default: 1.0)')
parser.add_argument('--cost-bsegment', default=1.0, type=float, metavar='COST_BSEGMENT',
help='This is the relative weight of the L1 error of the bounding segment coordinates in the Hungarian matching cost (default: 1.0)')
parser.add_argument('--cost-giou', default=0.0, type=float, metavar='COST_GIOU',
help='This is the relative weight of the giou loss of the bounding segment in the Hungarian matching cost (default: 0.0)')
parser.add_argument('--loss_ce', default=1.0, type=float, metavar='LOSS_CE',
help='This is the relative weight of the classification error in loss (default: 1.0)')
parser.add_argument('--loss_bsegment', default=1.0, type=float, metavar='LOSS_BSEGMENT',
help='This is the relative weight of the L1 error of the bounding segment coordinates in loss (default: 1.0)')
parser.add_argument('--loss_giou', default=0.0, type=float, metavar='LOSS_GIOU',
help='This is the relative weight of the giou loss of the bounding segment in the loss (default: 0.0)')
parser.add_argument('--eos-coef', default=0.1, type=float, metavar='EOS_COEF',
help='This is relative classification weight applied to the no-translocation category in the loss (default: 0.1)')
parser.add_argument('--start-threshold', default=0.5, type=float, metavar='START_THRESHOLD',
help='This is the start threshold for the mAP computation (default: 0.5)')
parser.add_argument('--end-threshold', default=0.95, type=float, metavar='END_THRESHOLD',
help='This is the end threshold for the mAP computation (default: 0.95)')
parser.add_argument('--step-threshold', default=0.05, type=float, metavar='STEP_THRESHOLD',
help='This is the step threshold for the mAP computation (default: 0.05)')
parser.add_argument('--trace_number', default=0, type=int,
metavar='TN', help='trace number to plot (default: 0)')
parser.add_argument('--window_number', default=0, type=int,
metavar='WN', help='window number to plot (default: 0)')
args = parser.parse_args()
return args
def main():
global best_precision, args
best_precision = 0
args = parse()
if not len(args.data):
raise Exception("error: No data set provided")
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.gpu = 0
args.world_size = 1
if args.distributed:
args.gpu = args.local_rank
if not args.cpu:
torch.cuda.set_device(args.gpu)
torch.distributed.init_process_group(backend='gloo',
init_method='env://')
args.world_size = torch.distributed.get_world_size()
args.total_batch_size = args.world_size * args.batch_size
# Set the device
device = torch.device('cpu' if args.cpu else 'cuda:' + str(args.gpu))
#######################################################################
# Start DETR contruction
#######################################################################
# create DETR backbone
# create backbone pulse counter
if args.test:
args.pulse_counter_arch = 'ResNet10'
if args.local_rank==0 and args.verbose:
print("=> creating backbone pulse counter '{}'".format(args.pulse_counter_arch))
if args.pulse_counter_arch == 'ResNet18':
backbone_pulse_counter = rn.ResNet18_Counter()
elif args.pulse_counter_arch == 'ResNet34':
backbone_pulse_counter = rn.ResNet34_Counter()
elif args.pulse_counter_arch == 'ResNet50':
backbone_pulse_counter = rn.ResNet50_Counter()
elif args.pulse_counter_arch == 'ResNet101':
backbone_pulse_counter = rn.ResNet101_Counter()
elif args.pulse_counter_arch == 'ResNet152':
backbone_pulse_counter = rn.ResNet152_Counter()
elif args.pulse_counter_arch == 'ResNet10':
backbone_pulse_counter = rn.ResNet10_Counter()
else:
print("Unrecognized {} architecture for the backbone pulse counter" .format(args.pulse_counter_arch))
backbone_pulse_counter = backbone_pulse_counter.to(device)
# create backbone feature predictor
if args.test:
args.feature_predictor_arch = 'ResNet10'
if args.local_rank==0 and args.verbose:
print("=> creating backbone feature predictor '{}'".format(args.feature_predictor_arch))
if args.feature_predictor_arch == 'ResNet18':
backbone_feature_predictor = rn.ResNet18_Custom()
elif args.feature_predictor_arch == 'ResNet34':
backbone_feature_predictor = rn.ResNet34_Custom()
elif args.feature_predictor_arch == 'ResNet50':
backbone_feature_predictor = rn.ResNet50_Custom()
elif args.feature_predictor_arch == 'ResNet101':
backbone_feature_predictor = rn.ResNet101_Custom()
elif args.feature_predictor_arch == 'ResNet152':
backbone_feature_predictor = rn.ResNet152_Custom()
elif args.feature_predictor_arch == 'ResNet10':
backbone_feature_predictor = rn.ResNet10_Custom()
else:
print("Unrecognized {} architecture for the backbone feature predictor" .format(args.feature_predictor_arch))
backbone_feature_predictor = backbone_feature_predictor.to(device)
# For distributed training, wrap the model with torch.nn.parallel.DistributedDataParallel.
if args.distributed:
if args.cpu:
backbone_pulse_counter = DDP(backbone_pulse_counter)
backbone_feature_predictor = DDP(backbone_feature_predictor)
else:
backbone_pulse_counter = DDP(backbone_pulse_counter, device_ids=[args.gpu], output_device=args.gpu)
backbone_feature_predictor = DDP(backbone_feature_predictor, device_ids=[args.gpu], output_device=args.gpu)
if args.verbose:
print('Since we are in a distributed setting the backbone componets are replicated here in local rank {}'
.format(args.local_rank))
# bring counter from a checkpoint
if args.counter:
# Use a local scope to avoid dangling references
def bring_counter():
if os.path.isfile(args.counter):
print("=> loading backbone pulse counter '{}'" .format(args.counter))
if args.cpu:
checkpoint = torch.load(args.counter, map_location='cpu')
else:
checkpoint = torch.load(args.counter, map_location = lambda storage, loc: storage.cuda(args.gpu))
loss_history_1 = checkpoint['loss_history']
counter_error_history = checkpoint['Counter_error_history']
best_error_1 = checkpoint['best_error']
backbone_pulse_counter.load_state_dict(checkpoint['state_dict'])
total_time_1 = checkpoint['total_time']
print("=> loaded counter '{}' (epoch {})"
.format(args.counter, checkpoint['epoch']))
print("Counter best precision saved was {}" .format(best_error_1))
return best_error_1, backbone_pulse_counter, loss_history_1, counter_error_history, total_time_1
else:
print("=> no counter found at '{}'" .format(args.counter))
best_error_1, backbone_pulse_counter, loss_history_1, counter_error_history, total_time_1 = bring_counter()
else:
raise Exception("error: No counter path provided")
# bring predictor from a checkpoint
if args.predictor:
# Use a local scope to avoid dangling references
def bring_predictor():
if os.path.isfile(args.predictor):
print("=> loading backbone feature predictor '{}'" .format(args.predictor))
if args.cpu:
checkpoint = torch.load(args.predictor, map_location='cpu')
else:
checkpoint = torch.load(args.predictor, map_location = lambda storage, loc: storage.cuda(args.gpu))
loss_history_2 = checkpoint['loss_history']
duration_error_history = checkpoint['duration_error_history']
amplitude_error_history = checkpoint['amplitude_error_history']
best_error_2 = checkpoint['best_error']
backbone_feature_predictor.load_state_dict(checkpoint['state_dict'])
total_time_2 = checkpoint['total_time']
print("=> loaded predictor '{}' (epoch {})"
.format(args.predictor, checkpoint['epoch']))
print("Predictor best precision saved was {}" .format(best_error_2))
return best_error_2, backbone_feature_predictor, loss_history_2, duration_error_history, amplitude_error_history, total_time_2
else:
print("=> no predictor found at '{}'" .format(args.predictor))
best_error_2, backbone_feature_predictor, loss_history_2, duration_error_history, amplitude_error_history, total_time_2 = bring_predictor()
else:
raise Exception("error: No predictor path provided")
# create backbone
if args.local_rank==0 and args.verbose:
print("=> creating backbone")
if args.feature_predictor_arch == 'ResNet18':
backbone=build_backbone(pulse_counter=backbone_pulse_counter,
feature_predictor=backbone_feature_predictor,
num_channels=512)
elif args.feature_predictor_arch == 'ResNet34':
backbone=build_backbone(pulse_counter=backbone_pulse_counter,
feature_predictor=backbone_feature_predictor,
num_channels=512)
elif args.feature_predictor_arch == 'ResNet50':
backbone=build_backbone(pulse_counter=backbone_pulse_counter,
feature_predictor=backbone_feature_predictor,
num_channels=2048)
elif args.feature_predictor_arch == 'ResNet101':
backbone=build_backbone(pulse_counter=backbone_pulse_counter,
feature_predictor=backbone_feature_predictor,
num_channels=2048)
elif args.feature_predictor_arch == 'ResNet152':
backbone=build_backbone(pulse_counter=backbone_pulse_counter,
feature_predictor=backbone_feature_predictor,
num_channels=2048)
elif args.feature_predictor_arch == 'ResNet10':
backbone=build_backbone(pulse_counter=backbone_pulse_counter,
feature_predictor=backbone_feature_predictor,
num_channels=512)
else:
print("Unrecognized {} architecture for the backbone feature predictor" .format(args.feature_predictor_arch))
backbone = backbone.to(device)
# create DETR transformer
if args.local_rank==0 and args.verbose:
print("=> creating transformer")
if args.test:
args.transformer_hidden_dim = 64
args.transformer_num_heads = 2
args.transformer_dim_feedforward = 256
args.transformer_num_enc_layers = 2
args.transformer_num_dec_layers = 2
args.transformer_pre_norm = True
transformer = build_transformer(hidden_dim=args.transformer_hidden_dim,
dropout=args.transformer_dropout,
nheads=args.transformer_num_heads,
dim_feedforward=args.transformer_dim_feedforward,
enc_layers=args.transformer_num_enc_layers,
dec_layers=args.transformer_num_dec_layers,
pre_norm=args.transformer_pre_norm)
# create DETR in itself
if args.local_rank==0 and args.verbose:
print("=> creating DETR")
detr = DT.DETR(backbone=backbone,
transformer=transformer,
num_classes=args.num_classes,
num_queries=args.num_queries)
detr = detr.to(device)
# For distributed training, wrap the model | |
# the source codes of transE are from https://github.com/mklimasz/TransE-PyTorch
from absl import app
from absl import flags
import os
import numpy as np
import torch.optim as optim
from torch.utils import data as torch_data
from torch.utils import tensorboard
from collections import Counter
from torch.utils import data
from typing import Dict, Tuple
import torch
from torch import nn
from torch.optim import optimizer
import pickle
FB15K = 'FB15K'
WORDNET = 'WordNet'
dataset = WORDNET # WordNet FB15K
FLAGS = flags.FLAGS
flags.DEFINE_float("lr", default=0.01, help="Learning rate value.")
flags.DEFINE_integer("seed", default=715, help="Seed value.")
flags.DEFINE_integer("batch_size", default=128, help="Maximum batch size.")
flags.DEFINE_integer("validation_batch_size", default=64, help="Maximum batch size during model validation.")
flags.DEFINE_integer("vector_length", default=100, help="Length of entity/relation vector.")
flags.DEFINE_float("margin", default=1.0, help="Margin value in margin-based ranking loss.")
flags.DEFINE_integer("norm", default=1, help="Norm used for calculating dissimilarity metric (usually 1 or 2).")
flags.DEFINE_integer("epochs", default=4000, help="Number of training epochs.")
flags.DEFINE_bool("use_gpu", default=True, help="Flag enabling gpu usage.")
flags.DEFINE_integer("validation_freq", default=10, help="Validate model every X epochs.")
flags.DEFINE_string("checkpoint_path", default="", help="Path to model checkpoint (by default train from scratch).")
flags.DEFINE_string("tensorboard_log_dir", default=os.path.abspath('../resource/runs/'),
help="Path for tensorboard log directory.")
flags.DEFINE_string("checkpoint_folder", default=os.path.abspath('../resource/transe_checkpoint/'),
help='checkpoint folder')
flags.DEFINE_string("load_checkpoint_file", default=None, help='checkpoint file')
if dataset == FB15K:
flags.DEFINE_string("dataset_path", default=os.path.abspath('../resource/FB15k/'), help="Path to dataset.")
elif dataset == WORDNET:
flags.DEFINE_string("dataset_path", default=os.path.abspath('../resource/wordnet/'), help="Path to dataset.")
else:
raise ValueError('Illegal Dataset')
HITS_AT_1_SCORE = float
HITS_AT_3_SCORE = float
HITS_AT_10_SCORE = float
MRR_SCORE = float
METRICS = Tuple[HITS_AT_1_SCORE, HITS_AT_3_SCORE, HITS_AT_10_SCORE, MRR_SCORE]
Mapping = Dict[str, int]
_MODEL_STATE_DICT = "model_state_dict"
_OPTIMIZER_STATE_DICT = "optimizer_state_dict"
_EPOCH = "epoch"
_STEP = "step"
_BEST_SCORE = "best_score"
class TransE(nn.Module):
def __init__(self, entity_count, relation_count, device, norm=1, dim=100, margin=1.0):
super(TransE, self).__init__()
self.entity_count = entity_count
self.relation_count = relation_count
self.device = device
self.norm = norm
self.dim = dim
self.entities_emb = self._init_entity_emb()
self.relations_emb = self._init_relation_emb()
self.criterion = nn.MarginRankingLoss(margin=margin, reduction='none')
def _init_entity_emb(self):
entities_emb = nn.Embedding(num_embeddings=self.entity_count + 1,
embedding_dim=self.dim,
padding_idx=self.entity_count)
uniform_range = 6 / np.sqrt(self.dim)
entities_emb.weight.data.uniform_(-uniform_range, uniform_range)
return entities_emb
def _init_relation_emb(self):
relations_emb = nn.Embedding(num_embeddings=self.relation_count + 1,
embedding_dim=self.dim,
padding_idx=self.relation_count)
uniform_range = 6 / np.sqrt(self.dim)
relations_emb.weight.data.uniform_(-uniform_range, uniform_range)
# -1 to avoid nan for OOV vector
relations_emb.weight.data[:-1, :].div_(relations_emb.weight.data[:-1, :].norm(p=1, dim=1, keepdim=True))
return relations_emb
def forward(self, positive_triplets: torch.LongTensor, negative_triplets: torch.LongTensor):
"""Return model losses based on the input.
:param positive_triplets: triplets of positives in Bx3 shape (B - batch, 3 - head, relation and tail)
:param negative_triplets: triplets of negatives in Bx3 shape (B - batch, 3 - head, relation and tail)
:return: tuple of the model loss, positive triplets loss component, negative triples loss component
"""
# -1 to avoid nan for OOV vector
self.entities_emb.weight.data[:-1, :].div_(self.entities_emb.weight.data[:-1, :].norm(p=2, dim=1, keepdim=True))
assert positive_triplets.size()[1] == 3
positive_distances = self._distance(positive_triplets)
assert negative_triplets.size()[1] == 3
negative_distances = self._distance(negative_triplets)
return self.loss(positive_distances, negative_distances), positive_distances, negative_distances
def predict(self, triplets: torch.LongTensor):
"""Calculated dissimilarity score for given triplets.
:param triplets: triplets in Bx3 shape (B - batch, 3 - head, relation and tail)
:return: dissimilarity score for given triplets
"""
return self._distance(triplets)
def loss(self, positive_distances, negative_distances):
target = torch.tensor([-1], dtype=torch.long, device=self.device)
return self.criterion(positive_distances, negative_distances, target)
def _distance(self, triplets):
"""Triplets should have shape Bx3 where dim 3 are head id, relation id, tail id."""
assert triplets.size()[1] == 3
heads = triplets[:, 0]
relations = triplets[:, 1]
tails = triplets[:, 2]
return (self.entities_emb(heads) + self.relations_emb(relations) - self.entities_emb(tails)).norm(p=self.norm,
dim=1)
def load_checkpoint(checkpoint_path: str, model: nn.Module, optim_: optimizer.Optimizer) -> Tuple[int, int, float]:
"""Loads training checkpoint.
:param checkpoint_path: path to checkpoint
:param model: model to update state
:param optim_: optimizer to update state
:return tuple of starting epoch id, starting step id, best checkpoint score
"""
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint[_MODEL_STATE_DICT])
optim_.load_state_dict(checkpoint[_OPTIMIZER_STATE_DICT])
start_epoch_id = checkpoint[_EPOCH] + 1
step = checkpoint[_STEP] + 1
best_score = checkpoint[_BEST_SCORE]
return start_epoch_id, step, best_score
def save_checkpoint(model: nn.Module, optim_: optimizer.Optimizer, epoch_id: int, step: int, best_score: float,
save_path: str, kg_name: str):
torch.save({
_MODEL_STATE_DICT: model.state_dict(),
_OPTIMIZER_STATE_DICT: optim_.state_dict(),
_EPOCH: epoch_id,
_STEP: step,
_BEST_SCORE: best_score
}, os.path.join(save_path, kg_name+'_'+str(epoch_id)+'_checkpoint.tar'))
def hit_at_k(predictions: torch.Tensor, ground_truth_idx: torch.Tensor, device: torch.device, k: int = 10) -> int:
"""Calculates number of hits@k.
:param predictions: BxN tensor of prediction values where B is batch size and N number of classes. Predictions
must be sorted in class ids order
:param ground_truth_idx: Bx1 tensor with index of ground truth class
:param device: device on which calculations are taking place
:param k: number of top K results to be considered as hits
:return: Hits@K score
"""
assert predictions.size(0) == ground_truth_idx.size(0)
zero_tensor = torch.tensor([0], device=device)
one_tensor = torch.tensor([1], device=device)
_, indices = predictions.topk(k=k, largest=False)
return torch.where(indices == ground_truth_idx, one_tensor, zero_tensor).sum().item()
def cal_mrr(predictions: torch.Tensor, ground_truth_idx: torch.Tensor) -> float:
"""Calculates mean reciprocal rank (MRR) for given predictions and ground truth values.
:param predictions: BxN tensor of prediction values where B is batch size and N number of classes. Predictions
must be sorted in class ids order
:param ground_truth_idx: Bx1 tensor with index of ground truth class
:return: Mean reciprocal rank score
"""
assert predictions.size(0) == ground_truth_idx.size(0)
indices = predictions.argsort()
return (1.0 / (indices == ground_truth_idx).nonzero()[:, 1].float().add(1.0)).sum().item()
def create_mappings(dataset_path: str) -> Tuple[Mapping, Mapping]:
"""Creates separate mappings to indices for entities and relations."""
# counters to have entities/relations sorted from most frequent
entity_counter = Counter()
relation_counter = Counter()
with open(dataset_path, "r") as f:
for line in f:
# -1 to remove newline sign
head, relation, tail = line[:-1].split("\t")
entity_counter.update([head, tail])
relation_counter.update([relation])
entity2id = {}
relation2id = {}
for idx, (mid, _) in enumerate(entity_counter.most_common()):
entity2id[mid] = idx
for idx, (relation, _) in enumerate(relation_counter.most_common()):
relation2id[relation] = idx
return entity2id, relation2id
class FB15KDataset(data.Dataset):
"""Dataset implementation for handling FB15K and FB15K-237."""
def __init__(self, data_path: str, entity2id: Mapping, relation2id: Mapping):
self.entity2id = entity2id
self.relation2id = relation2id
with open(data_path, "r") as f:
# data in tuples (head, relation, tail)
self.data = [line[:-1].split("\t") for line in f]
def __len__(self):
"""Denotes the total number of samples."""
return len(self.data)
def __getitem__(self, index):
"""Returns (head id, relation id, tail id)."""
head, relation, tail = self.data[index]
head_id = self._to_idx(head, self.entity2id)
relation_id = self._to_idx(relation, self.relation2id)
tail_id = self._to_idx(tail, self.entity2id)
return head_id, relation_id, tail_id
@staticmethod
def _to_idx(key: str, mapping: Mapping) -> int:
try:
return mapping[key]
except KeyError:
return len(mapping)
class WordNetDataset(data.Dataset):
"""Dataset implementation for handling WordNet."""
def __init__(self, kg_data: Mapping):
self.data = kg_data
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
@staticmethod
def _to_idx(key: str, mapping: Mapping) -> int:
try:
return mapping[key]
except KeyError:
return len(mapping)
def test(model: torch.nn.Module, data_generator: torch_data.DataLoader, entities_count: int,
summary_writer: tensorboard.SummaryWriter, device: torch.device, epoch_id: int, metric_suffix: str,
) -> METRICS:
examples_count = 0.0
hits_at_1 = 0.0
hits_at_3 = 0.0
hits_at_10 = 0.0
mrr = 0.0
entity_ids = torch.arange(end=entities_count, device=device).unsqueeze(0)
for head, relation, tail in data_generator:
current_batch_size = head.size()[0]
head, relation, tail = head.to(device), relation.to(device), tail.to(device)
all_entities = entity_ids.repeat(current_batch_size, 1)
heads = head.reshape(-1, 1).repeat(1, all_entities.size()[1])
relations = relation.reshape(-1, 1).repeat(1, all_entities.size()[1])
tails = tail.reshape(-1, 1).repeat(1, all_entities.size()[1])
# Check all possible tails
triplets = torch.stack((heads, relations, all_entities), dim=2).reshape(-1, 3)
tails_predictions = model.predict(triplets).reshape(current_batch_size, -1)
# Check all possible heads
triplets = torch.stack((all_entities, relations, tails), dim=2).reshape(-1, 3)
heads_predictions = model.predict(triplets).reshape(current_batch_size, -1)
# Concat predictions
predictions = torch.cat((tails_predictions, heads_predictions), dim=0)
ground_truth_entity_id = torch.cat((tail.reshape(-1, 1), head.reshape(-1, 1)))
hits_at_1 += hit_at_k(predictions, ground_truth_entity_id, device=device, k=1)
hits_at_3 += hit_at_k(predictions, ground_truth_entity_id, device=device, k=3)
hits_at_10 += hit_at_k(predictions, ground_truth_entity_id, device=device, k=10)
mrr += cal_mrr(predictions, ground_truth_entity_id)
examples_count += predictions.size()[0]
hits_at_1_score = hits_at_1 / examples_count * 100
hits_at_3_score = hits_at_3 / examples_count * 100
hits_at_10_score = hits_at_10 / examples_count * 100
mrr_score = mrr / examples_count * 100
summary_writer.add_scalar('Metrics/Hits_1/' + metric_suffix, hits_at_1_score, global_step=epoch_id)
summary_writer.add_scalar('Metrics/Hits_3/' + metric_suffix, hits_at_3_score, global_step=epoch_id)
summary_writer.add_scalar('Metrics/Hits_10/' + metric_suffix, hits_at_10_score, global_step=epoch_id)
summary_writer.add_scalar('Metrics/MRR/' + metric_suffix, mrr_score, global_step=epoch_id)
return hits_at_1_score, hits_at_3_score, hits_at_10_score, mrr_score
def main(_):
torch.random.manual_seed(FLAGS.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
batch_size = FLAGS.batch_size
vector_length = FLAGS.vector_length
margin = FLAGS.margin
norm = FLAGS.norm
learning_rate = FLAGS.lr
epochs = FLAGS.epochs
device = torch.device('cuda') if FLAGS.use_gpu else torch.device('cpu')
path = FLAGS.dataset_path
train_generator, validation_generator, test_generator = None, None, None
if dataset == 'FB15K':
train_path = os.path.join(path, "freebase_mtr100_mte100-train.txt")
validation_path = os.path.join(path, "freebase_mtr100_mte100-valid.txt")
test_path = os.path.join(path, "freebase_mtr100_mte100-test.txt")
entity2id, relation2id = create_mappings(train_path)
train_set = FB15KDataset(train_path, entity2id, relation2id)
train_generator = torch_data.DataLoader(train_set, batch_size=batch_size)
validation_set = FB15KDataset(validation_path, entity2id, relation2id)
validation_generator = torch_data.DataLoader(validation_set, batch_size=FLAGS.validation_batch_size)
test_set = FB15KDataset(test_path, entity2id, relation2id)
test_generator = torch_data.DataLoader(test_set, batch_size=FLAGS.validation_batch_size)
print('FB15K data loaded')
elif dataset == WORDNET:
data_obj = pickle.load(open(os.path.join(path, 'wordnet_KG.pkl'), 'rb'))
relation2id = data_obj['relation_idx_dict']
entity2id = data_obj['word_idx_dict']
train_set = WordNetDataset(data_obj['fact_list'])
train_generator = torch_data.DataLoader(train_set, batch_size=batch_size)
print('WordNet data loaded')
else:
raise ValueError('')
model = TransE(entity_count=len(entity2id), relation_count=len(relation2id), dim=vector_length, margin=margin,
device=device, norm=norm) # type: torch.nn.Module
model = model.to(device)
optim_ = optim.SGD(model.parameters(), lr=learning_rate)
summary_writer = tensorboard.SummaryWriter(log_dir=FLAGS.tensorboard_log_dir)
start_epoch_id = 1
step = 0
best_score = 0.0
if FLAGS.load_checkpoint_file is not None:
checkpoint_path | |
17869, 'tracts': 4},
'Polk': {'population': 41475, 'tracts': 7},
'Pulaski': {'population': 12010, 'tracts': 3},
'Putnam': {'population': 21218, 'tracts': 5},
'Quitman': {'population': 2513, 'tracts': 1},
'Rabun': {'population': 16276, 'tracts': 5},
'Randolph': {'population': 7719, 'tracts': 2},
'Richmond': {'population': 200549, 'tracts': 47},
'Rockdale': {'population': 85215, 'tracts': 15},
'Schley': {'population': 5010, 'tracts': 2},
'Screven': {'population': 14593, 'tracts': 5},
'Seminole': {'population': 8729, 'tracts': 3},
'Spalding': {'population': 64073, 'tracts': 12},
'Stephens': {'population': 26175, 'tracts': 5},
'Stewart': {'population': 6058, 'tracts': 2},
'Sumter': {'population': 32819, 'tracts': 8},
'Talbot': {'population': 6865, 'tracts': 3},
'Taliaferro': {'population': 1717, 'tracts': 1},
'Tattnall': {'population': 25520, 'tracts': 5},
'Taylor': {'population': 8906, 'tracts': 3},
'Telfair': {'population': 16500, 'tracts': 3},
'Terrell': {'population': 9315, 'tracts': 4},
'Thomas': {'population': 44720, 'tracts': 11},
'Tift': {'population': 40118, 'tracts': 9},
'Toombs': {'population': 27223, 'tracts': 6},
'Towns': {'population': 10471, 'tracts': 3},
'Treutlen': {'population': 6885, 'tracts': 2},
'Troup': {'population': 67044, 'tracts': 14},
'Turner': {'population': 8930, 'tracts': 2},
'Twiggs': {'population': 9023, 'tracts': 2},
'Union': {'population': 21356, 'tracts': 6},
'Upson': {'population': 27153, 'tracts': 7},
'Walker': {'population': 68756, 'tracts': 13},
'Walton': {'population': 83768, 'tracts': 15},
'Ware': {'population': 36312, 'tracts': 9},
'Warren': {'population': 5834, 'tracts': 2},
'Washington': {'population': 21187, 'tracts': 5},
'Wayne': {'population': 30099, 'tracts': 6},
'Webster': {'population': 2799, 'tracts': 2},
'Wheeler': {'population': 7421, 'tracts': 2},
'White': {'population': 27144, 'tracts': 5},
'Whitfield': {'population': 102599, 'tracts': 18},
'Wilcox': {'population': 9255, 'tracts': 4},
'Wilkes': {'population': 10593, 'tracts': 4},
'Wilkinson': {'population': 9563, 'tracts': 3},
'Worth': {'population': 21679, 'tracts': 5}},
'HI': {'Hawaii': {'population': 185079, 'tracts': 34},
'Honolulu': {'population': 953207, 'tracts': 244},
'Kalawao': {'population': 90, 'tracts': 1},
'Kauai': {'population': 67091, 'tracts': 16},
'Maui': {'population': 154834, 'tracts': 37}},
'IA': {'Adair': {'population': 7682, 'tracts': 3},
'Adams': {'population': 4029, 'tracts': 2},
'Allamakee': {'population': 14330, 'tracts': 5},
'Appanoose': {'population': 12887, 'tracts': 5},
'Audubon': {'population': 6119, 'tracts': 3},
'Benton': {'population': 26076, 'tracts': 7},
'<NAME>': {'population': 131090, 'tracts': 38},
'Boone': {'population': 26306, 'tracts': 7},
'Bremer': {'population': 24276, 'tracts': 8},
'Buchanan': {'population': 20958, 'tracts': 6},
'<NAME>': {'population': 20260, 'tracts': 6},
'Butler': {'population': 14867, 'tracts': 5},
'Calhoun': {'population': 9670, 'tracts': 4},
'Carroll': {'population': 20816, 'tracts': 6},
'Cass': {'population': 13956, 'tracts': 5},
'Cedar': {'population': 18499, 'tracts': 5},
'<NAME>': {'population': 44151, 'tracts': 11},
'Cherokee': {'population': 12072, 'tracts': 4},
'Chickasaw': {'population': 12439, 'tracts': 4},
'Clarke': {'population': 9286, 'tracts': 3},
'Clay': {'population': 16667, 'tracts': 4},
'Clayton': {'population': 18129, 'tracts': 6},
'Clinton': {'population': 49116, 'tracts': 12},
'Crawford': {'population': 17096, 'tracts': 5},
'Dallas': {'population': 66135, 'tracts': 15},
'Davis': {'population': 8753, 'tracts': 2},
'Decatur': {'population': 8457, 'tracts': 3},
'Delaware': {'population': 17764, 'tracts': 4},
'<NAME>': {'population': 40325, 'tracts': 11},
'Dickinson': {'population': 16667, 'tracts': 5},
'Dubuque': {'population': 93653, 'tracts': 26},
'Emmet': {'population': 10302, 'tracts': 4},
'Fayette': {'population': 20880, 'tracts': 7},
'Floyd': {'population': 16303, 'tracts': 5},
'Franklin': {'population': 10680, 'tracts': 3},
'Fremont': {'population': 7441, 'tracts': 3},
'Greene': {'population': 9336, 'tracts': 4},
'Grundy': {'population': 12453, 'tracts': 4},
'Guthrie': {'population': 10954, 'tracts': 3},
'Hamilton': {'population': 15673, 'tracts': 5},
'Hancock': {'population': 11341, 'tracts': 4},
'Hardin': {'population': 17534, 'tracts': 6},
'Harrison': {'population': 14928, 'tracts': 5},
'Henry': {'population': 20145, 'tracts': 5},
'Howard': {'population': 9566, 'tracts': 3},
'Humboldt': {'population': 9815, 'tracts': 4},
'Ida': {'population': 7089, 'tracts': 3},
'Iowa': {'population': 16355, 'tracts': 4},
'Jackson': {'population': 19848, 'tracts': 6},
'Jasper': {'population': 36842, 'tracts': 9},
'Jefferson': {'population': 16843, 'tracts': 4},
'Johnson': {'population': 130882, 'tracts': 24},
'Jones': {'population': 20638, 'tracts': 5},
'Keokuk': {'population': 10511, 'tracts': 4},
'Kossuth': {'population': 15543, 'tracts': 6},
'Lee': {'population': 35862, 'tracts': 11},
'Linn': {'population': 211226, 'tracts': 45},
'Louisa': {'population': 11387, 'tracts': 3},
'Lucas': {'population': 8898, 'tracts': 4},
'Lyon': {'population': 11581, 'tracts': 3},
'Madison': {'population': 15679, 'tracts': 3},
'Mahaska': {'population': 22381, 'tracts': 7},
'Marion': {'population': 33309, 'tracts': 8},
'Marshall': {'population': 40648, 'tracts': 10},
'Mills': {'population': 15059, 'tracts': 5},
'Mitchell': {'population': 10776, 'tracts': 3},
'Monona': {'population': 9243, 'tracts': 4},
'Monroe': {'population': 7970, 'tracts': 3},
'Montgomery': {'population': 10740, 'tracts': 4},
'Muscatine': {'population': 42745, 'tracts': 10},
"O'Brien": {'population': 14398, 'tracts': 4},
'Osceola': {'population': 6462, 'tracts': 2},
'Page': {'population': 15932, 'tracts': 6},
'Palo Alto': {'population': 9421, 'tracts': 4},
'Plymouth': {'population': 24986, 'tracts': 6},
'Pocahontas': {'population': 7310, 'tracts': 3},
'Polk': {'population': 430640, 'tracts': 98},
'Pottawattamie': {'population': 93158, 'tracts': 30},
'Poweshiek': {'population': 18914, 'tracts': 5},
'Ringgold': {'population': 5131, 'tracts': 2},
'Sac': {'population': 10350, 'tracts': 4},
'Scott': {'population': 165224, 'tracts': 47},
'Shelby': {'population': 12167, 'tracts': 4},
'Sioux': {'population': 33704, 'tracts': 7},
'Story': {'population': 89542, 'tracts': 20},
'Tama': {'population': 17767, 'tracts': 6},
'Taylor': {'population': 6317, 'tracts': 3},
'Union': {'population': 12534, 'tracts': 4},
'<NAME>': {'population': 7570, 'tracts': 2},
'Wapello': {'population': 35625, 'tracts': 11},
'Warren': {'population': 46225, 'tracts': 12},
'Washington': {'population': 21704, 'tracts': 5},
'Wayne': {'population': 6403, 'tracts': 3},
'Webster': {'population': 38013, 'tracts': 12},
'Winnebago': {'population': 10866, 'tracts': 3},
'Winneshiek': {'population': 21056, 'tracts': 5},
'Woodbury': {'population': 102172, 'tracts': 26},
'Worth': {'population': 7598, 'tracts': 3},
'Wright': {'population': 13229, 'tracts': 5}},
'ID': {'Ada': {'population': 392365, 'tracts': 59},
'Adams': {'population': 3976, 'tracts': 2},
'Bannock': {'population': 82839, 'tracts': 22},
'Bear Lake': {'population': 5986, 'tracts': 2},
'Benewah': {'population': 9285, 'tracts': 2},
'Bingham': {'population': 45607, 'tracts': 8},
'Blaine': {'population': 21376, 'tracts': 4},
'Boise': {'population': 7028, 'tracts': 1},
'Bonner': {'population': 40877, 'tracts': 9},
'Bonneville': {'population': 104234, 'tracts': 21},
'Boundary': {'population': 10972, 'tracts': 2},
'Butte': {'population': 2891, 'tracts': 1},
'Camas': {'population': 1117, 'tracts': 1},
'Canyon': {'population': 188923, 'tracts': 29},
'Caribou': {'population': 6963, 'tracts': 2},
'Cassia': {'population': 22952, 'tracts': 6},
'Clark': {'population': 982, 'tracts': 1},
'Clearwater': {'population': 8761, 'tracts': 2},
'Custer': {'population': 4368, 'tracts': 1},
'Elmore': {'population': 27038, 'tracts': 5},
'Franklin': {'population': 12786, 'tracts': 2},
'Fremont': {'population': 13242, 'tracts': 3},
'Gem': {'population': 16719, 'tracts': 3},
'Gooding': {'population': 15464, 'tracts': 2},
'Idaho': {'population': 16267, 'tracts': 5},
'Jefferson': {'population': 26140, 'tracts': 4},
'Jerome': {'population': 22374, 'tracts': 5},
'Kootenai': {'population': 138494, 'tracts': 25},
'Latah': {'population': 37244, 'tracts': 7},
'Lemhi': {'population': 7936, 'tracts': 3},
'Lewis': {'population': 3821, 'tracts': 3},
'Lincoln': {'population': 5208, 'tracts': 1},
'Madison': {'population': 37536, 'tracts': 6},
'Minidoka': {'population': 20069, 'tracts': 5},
'<NAME>': {'population': 39265, 'tracts': 10},
'Oneida': {'population': 4286, 'tracts': 1},
'Owyhee': {'population': 11526, 'tracts': 3},
'Payette': {'population': 22623, 'tracts': 4},
'Power': {'population': 7817, 'tracts': 2},
'Shoshone': {'population': 12765, 'tracts': 3},
'Teton': {'population': 10170, 'tracts': 1},
'<NAME>': {'population': 77230, 'tracts': 14},
'Valley': {'population': 9862, 'tracts': 3},
'Washington': {'population': 10198, 'tracts': 3}},
'IL': {'Adams': {'population': 67103, 'tracts': 18},
'Alexander': {'population': 8238, 'tracts': 4},
'Bond': {'population': 17768, 'tracts': 4},
'Boone': {'population': 54165, 'tracts': 7},
'Brown': {'population': 6937, 'tracts': 2},
'Bureau': {'population': 34978, 'tracts': 10},
'Calhoun': {'population': 5089, 'tracts': 2},
'Carroll': {'population': 15387, 'tracts': 6},
'Cass': {'population': 13642, 'tracts': 5},
'Champaign': {'population': 201081, 'tracts': 43},
'Christian': {'population': 34800, 'tracts': 10},
'Clark': {'population': 16335, 'tracts': 4},
'Clay': {'population': 13815, 'tracts': 4},
'Clinton': {'population': 37762, 'tracts': 8},
'Coles': {'population': 53873, 'tracts': 12},
'Cook': {'population': 5194675, 'tracts': 1318},
'Crawford': {'population': 19817, 'tracts': 6},
'Cumberland': {'population': 11048, 'tracts': 3},
'<NAME>': {'population': 16561, 'tracts': 5},
'DeKalb': {'population': 105160, 'tracts': 21},
'Douglas': {'population': 19980, 'tracts': 5},
'DuPage': {'population': 916924, 'tracts': 216},
'Edgar': {'population': 18576, 'tracts': 5},
'Edwards': {'population': 6721, 'tracts': 3},
'Effingham': {'population': 34242, 'tracts': 8},
'Fayette': {'population': 22140, 'tracts': 7},
'Ford': {'population': 14081, 'tracts': 5},
'Franklin': {'population': 39561, 'tracts': 12},
'Fulton': {'population': 37069, 'tracts': 12},
'Gallatin': {'population': 5589, 'tracts': 2},
'Greene': {'population': 13886, 'tracts': 5},
'Grundy': {'population': 50063, 'tracts': 10},
'Hamilton': {'population': 8457, 'tracts': 3},
'Hancock': {'population': 19104, 'tracts': 7},
'Hardin': {'population': 4320, 'tracts': 2},
'Henderson': {'population': 7331, 'tracts': 3},
'Henry': {'population': 50486, 'tracts': 13},
'Iroquois': {'population': 29718, 'tracts': 9},
'Jackson': {'population': 60218, 'tracts': 14},
'Jasper': {'population': 9698, 'tracts': 3},
'Jefferson': {'population': 38827, 'tracts': 11},
'Jersey': {'population': 22985, 'tracts': 6},
'<NAME>': {'population': 22678, 'tracts': 6},
'Johnson': {'population': 12582, 'tracts': 4},
'Kane': {'population': 515269, 'tracts': 82},
'Kankakee': {'population': 113449, 'tracts': 29},
'Kendall': {'population': 114736, 'tracts': 10},
'Knox': {'population': 52919, 'tracts': 16},
'La Salle': {'population': 113924, 'tracts': 28},
'Lake': {'population': 703462, 'tracts': 153},
'Lawrence': {'population': 16833, 'tracts': 5},
'Lee': {'population': 36031, 'tracts': 9},
'Livingston': {'population': 38950, 'tracts': 10},
'Logan': {'population': 30305, 'tracts': 8},
'Macon': {'population': 110768, 'tracts': 34},
'Macoupin': {'population': 47765, 'tracts': 13},
'Madison': {'population': 269282, 'tracts': 61},
'Marion': {'population': 39437, 'tracts': 12},
'Marshall': {'population': 12640, 'tracts': 5},
'Mason': {'population': 14666, 'tracts': 6},
'Massac': {'population': 15429, 'tracts': 4},
| |
#!/usr/bin/env python
import sys, os, time
import logging
import struct, socket
from optparse import OptionParser
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ppparent_dir = os.path.dirname(os.path.dirname(parent_dir))
py_third_dir = os.path.join(ppparent_dir, 'py_third')
sys.path = [parent_dir, py_third_dir] + sys.path
from lcp import link_master as lm
from lcp import commit_ovs as cm
from pyDatalog import pyDatalog
logger = logging.getLogger('')
TUPLENET_DIR = ""
ETCD_ENDPOINT = ""
etcd = None
wmaster = None
system_id = ""
HOST_BR_PHY = ""
HOST_BR_INT = 'br-int'
entity_list = []
class TPToolErr(Exception):
pass
class LSwitch(pyDatalog.Mixin):
def __init__(self, uuid):
super(LSwitch, self).__init__()
self.uuid = uuid
self.name = uuid
def __repr__(self):
return self.uuid
class LRouter(pyDatalog.Mixin):
def __init__(self, uuid, chassis = None):
super(LRouter, self).__init__()
self.uuid = uuid
self.chassis = chassis
self.name = uuid
def __repr__(self):
return "%s:(chassis:%s)" %(self.uuid, self.chassis)
class LSPort(pyDatalog.Mixin):
def __init__(self, uuid, ip, mac, parent, chassis = None, peer = None):
super(LSPort, self).__init__()
self.uuid = uuid
self.ip = ip
self.mac = mac
self.parent = parent
self.chassis = chassis
self.peer = peer
self.name = uuid
def __repr__(self):
return "%s:(ip:%s, parent:%s)" % (self.uuid, self.ip, self.parent)
class LRPort(pyDatalog.Mixin):
def __init__(self, uuid, ip, prefix, mac, parent,
chassis = None, peer = None):
super(LRPort, self).__init__()
self.uuid = uuid
self.ip = ip
self.prefix = int(prefix)
self.mac = mac
self.parent = parent
self.chassis = chassis
self.peer = peer
self.name = uuid
def __repr__(self):
return "%s:(ip:%s/%d, parent:%s)" % (self.uuid, self.ip,
self.prefix, self.parent)
class LStaticRoute(pyDatalog.Mixin):
def __init__(self, uuid, ip, prefix, next_hop, outport, parent):
super(LStaticRoute, self).__init__()
self.uuid = uuid
self.ip = ip
self.prefix = int(prefix)
self.next_hop = next_hop
self.outport = outport
self.parent = parent
self.name = uuid
def __repr__(self):
return self.uuid
class Chassis(pyDatalog.Mixin):
def __init__(self, uuid, ip, tick):
super(Chassis, self).__init__()
self.uuid = uuid
self.ip = ip
self.tick = tick
self.name = uuid
def __repr__(self):
return self.uuid
def init_logger():
global logger
log_type = logging.NullHandler()
logger = logging.getLogger('')
format_type = ("%(asctime)s.%(msecs)03d %(levelname)s %(filename)s "
"[line:%(lineno)d]: %(message)s")
datefmt = '%Y-%m-%d %H:%M:%S'
console = log_type
console_formater = logging.Formatter(format_type, datefmt)
console.setFormatter(console_formater)
logger.setLevel(logging.DEBUG)
logger.addHandler(console)
logger.info("")
def ecmp_execute_cmds(cmd_tpctl_list, cmd_first = None, cmd_final = None):
if cmd_first is not None:
tpctl_execute([cmd_first])
time.sleep(5)
tpctl_execute(cmd_tpctl_list)
if cmd_final is not None:
time.sleep(5)
tpctl_execute([cmd_final])
def tpctl_execute(cmd_list):
endpoint_cmd = "--endpoints={}".format(ETCD_ENDPOINT)
prefix_cmd = "--prefix={}".format(TUPLENET_DIR)
for cmd in cmd_list:
cmd = cmd.split()
cmd.insert(1, prefix_cmd)
cmd.insert(1, endpoint_cmd)
cm.call_popen(cmd, commu='yes\n', shell=False)
def update_entity(entity_list, add_pool):
if add_pool.has_key('LS'):
for path, value_set in add_pool['LS'].items():
path = path.split('/')
entity_id = path[-1]
entity_list.append(LSwitch(entity_id))
if add_pool.has_key('LR'):
for path, value_set in add_pool['LR'].items():
path = path.split('/')
entity_id = path[-1]
entity_list.append(LRouter(entity_id, value_set.get('chassis')))
if add_pool.has_key('lsp'):
for path, value_set in add_pool['lsp'].items():
path = path.split('/')
entity_id = path[-1]
parent = path[-3]
entity_list.append(LSPort(entity_id, value_set['ip'],
value_set['mac'], parent,
value_set.get('chassis'),
value_set.get('peer')))
if add_pool.has_key('lrp'):
for path, value_set in add_pool['lrp'].items():
path = path.split('/')
entity_id = path[-1]
parent = path[-3]
entity_list.append(LRPort(entity_id, value_set['ip'],
value_set['prefix'],
value_set['mac'], parent,
value_set.get('chassis'),
value_set.get('peer')))
if add_pool.has_key('lsr'):
for path, value_set in add_pool['lsr'].items():
path = path.split('/')
entity_id = path[-1]
parent = path[-3]
entity_list.append(LStaticRoute(entity_id, value_set['ip'],
value_set['prefix'],
value_set['next_hop'],
value_set['outport'],
parent))
if add_pool.has_key('chassis'):
for path, value_set in add_pool['chassis'].items():
path = path.split('/')
entity_id = path[-1]
parent = path[-3]
entity_list.append(Chassis(entity_id, value_set['ip'],
value_set['tick']))
def sync_etcd_data(etcd_endpoints):
global wmaster
wmaster = lm.WatchMaster(etcd_endpoints, TUPLENET_DIR)
data_type, add_pool, del_pool = wmaster.read_remote_kvdata()
update_entity(entity_list, add_pool)
pyDatalog.create_terms('X,Y,Z')
pyDatalog.create_terms('LR, LS, LSP, LRP, LSR')
pyDatalog.create_terms('LR1, LS1, LSP1, LRP1, LSR1')
pyDatalog.create_terms('LR2, LS2, LSP2, LRP2, LSR2')
pyDatalog.create_terms('LR3, LS3, LSP3, LRP3, LSR3')
pyDatalog.create_terms('LS_OUT, LSP_OUT_TO_EDGE, LRP_EDGE_TO_OUT, LR_EDGE')
pyDatalog.create_terms('LRP_EDGE_TO_INNER, LSP_INNER_TO_EDGE, LS_INNER')
pyDatalog.create_terms('LSP_INNER_TO_CEN, LRP_CEN_TO_INNER, LR_CEN')
pyDatalog.create_terms('LSR_VIRT, LSR_EDGE, LSR_OUT')
def datalog_lr_central():
LRouter.uuid[X] == Y
if len(X.data) != 1:
raise TPToolErr("failed to know central LR")
return X.v()
def datalog_check_port_occupied_ip(ips):
for ip in ips:
LSPort.ip[X] == ip
if len(X.data) != 0:
raise TPToolErr("ip %s was occupied by other lsp" % ip)
LRPort.ip[X] == ip
if len(X.data) != 0:
raise TPToolErr("ip %s was occupied by other lrp" % ip)
def datalog_check_chassis_exist(system_id):
Chassis.uuid[X] == system_id
if len(X.data) != 1:
raise TPToolErr("chassis %s is not registed in etcd" % system_id)
def datalog_check_chassis_is_edge():
LRouter.chassis[X] == system_id
if len(X.data) == 0:
raise TPToolErr("chassis %s is an edge already" % system_id)
def datalog_is_entity_exist(uuid):
LSwitch.uuid[X] == uuid
if len(X.data) != 0:
return True
LRouter.uuid[X] == uuid
if len(X.data) != 0:
return True
LSPort.uuid[X] == uuid
if len(X.data) != 0:
return True
LRPort.uuid[X] == uuid
if len(X.data) != 0:
return True
LStaticRoute.uuid[X] == uuid
if len(X.data) != 0:
return True
return False
pyDatalog.create_terms('dl_LS_has_patchport')
dl_LS_has_patchport(LS) <= (
(LSPort.ip[LSP] == '255.255.255.255') &
(LSwitch.uuid[LS] == LSPort.parent[LSP])
)
pyDatalog.create_terms('dl_edge_LR_peer_LS')
dl_edge_LR_peer_LS(LR, LRP, LS, LSP) <= (
(LRouter.chassis[LR] != None) &
(LRouter.uuid[LR] == LRPort.parent[LRP]) &
(LRPort.uuid[LRP] == LSPort.peer[LSP]) &
(LSPort.parent[LSP] == LSwitch.uuid[LS])
)
pyDatalog.create_terms('dl_edge_LR_peer_LR')
dl_edge_LR_peer_LR(LR, LRP, LR1, LRP1) <= (
dl_edge_LR_peer_LS(LR, LRP, LS, LSP) &
(LSPort.parent[LSP1] == LSwitch.uuid[LS]) &
(LSPort.peer[LSP1] == LRPort.uuid[LRP1]) &
(LRPort.peer[LRP1] == LSPort.uuid[LSP1]) &
(LRouter.uuid[LR1] == LRPort.parent[LRP1]) &
(LR != LR1)
)
pyDatalog.create_terms('dl_ecmp_road')
dl_ecmp_road(LS_OUT, LSP_OUT_TO_EDGE, LRP_EDGE_TO_OUT, LR_EDGE,
LRP_EDGE_TO_INNER, LSP_INNER_TO_EDGE, LS_INNER,
LRP_CEN_TO_INNER, LR_CEN, LSR_VIRT, LSR_OUT, LSR_EDGE
) <= (
dl_edge_LR_peer_LS(LR_EDGE, LRP_EDGE_TO_OUT,
LS_OUT, LSP_OUT_TO_EDGE) &
dl_LS_has_patchport(LS_OUT) &
dl_edge_LR_peer_LS(LR_EDGE, LRP_EDGE_TO_INNER,
LS_INNER, LSP_INNER_TO_EDGE) &
(LS_OUT != LS_INNER) &
dl_edge_LR_peer_LR(LR_EDGE, LRP_EDGE_TO_INNER, LR_CEN, LRP_CEN_TO_INNER) &
(LStaticRoute.parent[LSR_VIRT] == LRouter.uuid[LR_EDGE]) &
(LStaticRoute.outport[LSR_VIRT] == LRPort.uuid[LRP_EDGE_TO_INNER]) &
(LStaticRoute.parent[LSR_OUT] == LRouter.uuid[LR_EDGE]) &
(LStaticRoute.outport[LSR_OUT] == LRPort.uuid[LRP_EDGE_TO_OUT]) &
(LStaticRoute.parent[LSR_EDGE] == LRouter.uuid[LR_CEN]) &
(LStaticRoute.outport[LSR_EDGE] == LRPort.uuid[LRP_CEN_TO_INNER])
)
def new_entity_name(etype, prefix_name):
i = 1
prefix_name = '{}_{}'.format(etype, prefix_name)
while True:
name = "tp_{}{}".format(prefix_name, i)
i += 1
if datalog_is_entity_exist(name):
continue
return name
def _cmd_new_link(lr_name, ls_name, ip, prefix):
cmd = "tpctl lr link {} {} {}/{}".format(lr_name, ls_name, ip, prefix)
return cmd
def _cmd_new_lsr(lr_name, ip, prefix, next_hop, outport):
lsr_name = "{}_{}-{}_to_{}_{}".format(lr_name, ip, prefix,
next_hop, outport)
cmd = "tpctl lsr add {} {} {}/{} {} {}".format(
lr_name, lsr_name, ip, prefix, next_hop, outport)
return cmd
def _cmd_new_patchport(ls_name, portname, chassis, peer_br):
cmd = "tpctl patchport add {} {} {} {}".format(
ls_name, portname, chassis, peer_br)
return cmd
def _cmd_del_patchport(ls_name, portname):
cmd = "tpctl lsp add {} {}".format(ls_name, portname)
return cmd
def _cmd_new_ls(ls_name):
cmd = "tpctl ls add {}".format(ls_name)
return cmd
def _cmd_del_ls(ls_name):
cmd = "tpctl ls del {} -r".format(ls_name)
return cmd
def _cmd_new_lr(lr_name, chassis = None):
if chassis is None:
cmd = "tpctl lr add {}".format(lr_name)
else:
cmd = "tpctl lr add {} {}".format(lr_name, chassis)
return cmd
def _cmd_del_lr(lr_name):
cmd = "tpctl lr del {} -r".format(lr_name)
return cmd
def _cmd_del_lrp(lr_name, lrp_name):
cmd = "tpctl lrp del {} {}".format(lr_name, lrp_name)
return cmd
def _cmd_del_lsr(lr_name, lsr_name):
cmd = "tpctl lsr del {} {}".format(lr_name, lsr_name)
return cmd
def _gen_lrp_property(ip_int, prefix):
mprefix = 32 - prefix
max_ip_int = ((ip_int >> mprefix) << mprefix) + (0xffffffff >> prefix)
min_ip_int = ((ip_int >> mprefix) << mprefix)
for ip_int in xrange(max_ip_int-1, min_ip_int, -1):
try:
ip = socket.inet_ntoa(struct.pack("!I", ip_int))
datalog_check_port_occupied_ip([ip])
except:
continue
else:
return ip, prefix
raise TPToolErr("cannot found a lrp due to ip confict")
def _init_ecmp_road(should_wait, central_lr, vip, vip_prefix, virt_ip,
virt_prefix, out_net, out_prefix, inner_ip, inner_prefix,
edge_net, edge_net_prefix, ext_gw):
tp_cmd_list = []
# create LS and LR command
out_ls_name = new_entity_name('LS', 'outside')
tp_cmd_list.append(_cmd_new_ls(out_ls_name))
edge_lr_name = new_entity_name('LR', 'edge')
tp_cmd_list.append(_cmd_new_lr(edge_lr_name, system_id))
inner_ls_name = new_entity_name('LS', 'm')
tp_cmd_list.append(_cmd_new_ls(inner_ls_name))
#create patch port
patchport = new_entity_name('lsp', out_ls_name + "-patchport")
tp_cmd_list.append(_cmd_new_patchport(out_ls_name, patchport,
system_id, HOST_BR_PHY))
# create link command
tp_cmd_list.append(_cmd_new_link(edge_lr_name, out_ls_name,
vip, vip_prefix))
tp_cmd_list.append(_cmd_new_link(edge_lr_name, inner_ls_name,
inner_ip, inner_prefix))
# it take an assumption that there is no lport consume inner_ip/prefix
ip_int = struct.unpack("!L", socket.inet_aton(inner_ip))[0]
central_lr_ip, _ = _gen_lrp_property(ip_int, int(inner_prefix))
if central_lr_ip == inner_ip:
raise Exception(("failed to allocate ip for "
"central_lr port, please revise inner ip"))
tp_cmd_list.append(_cmd_new_link(central_lr, inner_ls_name,
central_lr_ip, inner_prefix))
# create lsr command
outport = "{}_to_{}".format(edge_lr_name, out_ls_name)
tp_cmd_list.append(_cmd_new_lsr(edge_lr_name, out_net, out_prefix,
ext_gw, outport))
outport = "{}_to_{}".format(edge_lr_name, inner_ls_name)
tp_cmd_list.append(_cmd_new_lsr(edge_lr_name, virt_ip, virt_prefix,
central_lr_ip, outport))
outport = "{}_to_{}".format(central_lr, inner_ls_name)
tp_cmd_list.append(_cmd_new_lsr(central_lr, edge_net, edge_net_prefix,
inner_ip, outport))
print("tpctl will executes following commands")
print('\n'.join(tp_cmd_list))
is_execute = raw_input(("Please verify tpctl commands and press "
"yes to init an ecmp path:"))
if is_execute == 'yes':
if should_wait:
ecmp_execute_cmds(tp_cmd_list[:-1], cmd_final=tp_cmd_list[-1])
else:
ecmp_execute_cmds(tp_cmd_list)
print("Done")
else:
sys.exit(0)
def _remove_ecmp_road(should_wait, out, edge, inner, central_lr,
central_lsr, central_lrp):
outport_name = "{}_to_{}".format(central_lr.name, inner.name)
tp_cmd_list = []
tp_cmd_list.append(_cmd_del_lr(edge.name))
tp_cmd_list.append(_cmd_del_ls(inner.name))
tp_cmd_list.append(_cmd_del_ls(out.name))
tp_cmd_list.insert(0, _cmd_del_lsr(central_lr.name, central_lsr.name))
tp_cmd_list.insert(0, _cmd_del_lrp(central_lr.name, central_lrp.name))
print("tpctl will executes following commands")
print('\n'.join(tp_cmd_list))
is_execute = raw_input(("Please verify tpctl commands and press "
"yes to remove a ecmp path:"))
if is_execute == 'yes':
if should_wait:
ecmp_execute_cmds(tp_cmd_list[1:], cmd_first = tp_cmd_list[0])
else:
ecmp_execute_cmds(tp_cmd_list)
print("Done")
else:
sys.exit(0)
def remove_ecmp_road(vip):
dl_ecmp_road(LS_OUT, LSP_OUT_TO_EDGE, LRP_EDGE_TO_OUT, LR_EDGE,
LRP_EDGE_TO_INNER, LSP_INNER_TO_EDGE, LS_INNER,
LRP_CEN_TO_INNER, LR_CEN, LSR_VIRT, LSR_OUT, LSR_EDGE)
ecmp_road = zip(LS_OUT.data, LRP_EDGE_TO_OUT.data,
LR_EDGE.data, LS_INNER.data,
LR_CEN.data, LSR_EDGE.data, LRP_CEN_TO_INNER.data)
found = False
for out, lrp_edge_to_out, edge, inner, \
lr_central, lsr_central, lrp_central in ecmp_road:
if lrp_edge_to_out.ip == vip:
found = True
should_wait = False if len(ecmp_road) == 1 else True
_remove_ecmp_road(should_wait, out, edge, inner, lr_central,
lsr_central, lrp_central)
break
if found is False:
raise TPToolErr("failed to search ecmp path by using vip:%s" % vip)
def add_ecmp_road(vip, vip_prefix):
datalog_check_port_occupied_ip([vip])
dl_ecmp_road(LS_OUT, LSP_OUT_TO_EDGE, | |
width, bottom=R5)
p2 = plt.bar(ind, R2, width, bottom=R5+R4, color='orange')
p3 = plt.bar(ind, R3, width, bottom=R5+R4+R2, color='green')
plt.title('RU Count Frequency for GP1BA VNTR', y=1.05)
plt.xticks((0, 1, 2), ('African', 'East Asian', 'European'), fontsize=13)
plt.legend((p5[0], p4[0], p2[0], p3[0]), ('3 Repeats', '4 Repeats', '1 Repeats', '2 Repeats'), fontsize=legend_size)
plt.savefig('Population_RU_Count_GP1BA.pdf')
else:
R5_array = (49, 53, 58)
R4_array = (42, 42, 36)
R2_array = (7, 3, 4)
R3_array = (2, 2, 2)
R5 = np.array(R5_array)
R4 = np.array(R4_array)
R2 = np.array(R2_array)
R3 = np.array(R3_array)
ind = (0, 1, 2)
width = 0.35
p5 = plt.bar(ind, R5, width)
p4 = plt.bar(ind, R4, width, bottom=R5)
p2 = plt.bar(ind, R2, width, bottom=R5 + R4, color='orange')
p3 = plt.bar(ind, R3, width, bottom=R5 + R4 + R2, color='green')
plt.title('RU Count Frequency for MAOA VNTR', y=1.05)
plt.xticks((0, 1, 2), ('African', 'East Asian', 'European'), fontsize=13)
plt.legend((p5[0], p4[0], p2[0], p3[0]), ('5 Repeats', '4 Repeats', '2 Repeats', '3 Repeats'), fontsize=legend_size)
plt.savefig('Population_RU_Count_MAOA.pdf')
def get_diabetes_pattern_interavls():
pattern = 'GGCCCCCCCCGTGCCGCCCACGGGTGACTCCGG'
last = pattern[0]
start = 0
intervals = []
for i in range(1, len(pattern)+1):
if i == len(pattern):
intervals.append((start+1, i))
break
if pattern[i] != last:
intervals.append((start+1, i))
last = pattern[i]
start = i
return intervals
def plot_indel_frequencies_for_diabetes():
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc, rcParams
plt.style.use('ggplot')
plt.rcParams['axes.facecolor'] = '#FFFFFF'
rc('text', usetex=True)
rcParams['text.latex.preamble'] = [r'\usepackage{sfmath} \boldmath']
plt.title('Frameshift Frequency in Diabetes Patients')
plt.xlabel(r'\emph{Frameshift Position}')
plt.ylabel(r'\emph{\# of Individuals}')
plt.gca().spines['bottom'].set_color('black')
plt.gca().spines['left'].set_color('black')
plt.tight_layout(pad=4.0, w_pad=0.5, h_pad=2.0)
raw_case = [('I0C', 1), ('D16', 1), ('D10', 1), ('I16T', 1), ('I28G', 1), ('I13C,D10', 1), ('I31A,D30,D31,I33T', 1), ('I9A', 1), ('I20C', 1), ('I3A', 1), ('I5C', 1), ('I19C', 1), ('I26G', 1), ('I31A', 1), ('I23C', 1), ('I33G', 1), ('I33C', 1), ('I2T', 1), ('I8T', 1), ('D13,I13C', 2), ('I24C', 2), ('I21A', 2), ('I10C', 3), ('I33T', 3), ('I12C', 4), ('D5', 5)]
raw_control = [('D16', 1), ('D10', 1), ('I33C', 1), ('I7A', 1), ('I8C', 1), ('I24C', 1), ('I33C,I25T', 1), ('I28C', 1), ('I19G', 1), ('I13G', 1), ('I9C', 1), ('I11C', 1), ('I6C', 2), ('D5', 2), ('I10C', 3), ('I12C', 7)]
case = {}
control = {}
for keys, value in raw_case:
for key in keys.split(','):
if key in case.keys():
case[key] += value
else:
case[key] = value
for keys, value in raw_control:
for key in keys.split(','):
if key in control.keys():
control[key] += value
else:
control[key] = value
total_indes = [pos for pos, num in case.items()]
total_indes += [pos for pos, num in control.items()]
total_indes = list(set(total_indes))
total_indel_sorted = []
intervals = get_diabetes_pattern_interavls()
print(intervals)
width = 0.35
case_array = []
control_array = []
filtered_indels = []
for pos in total_indes:
case_count = case[pos] if pos in case.keys() else 0
control_count = control[pos] if pos in control.keys() else 0
if case_count + control_count < 2:
continue
case_array += [case_count]
control_array += [control_count]
filtered_indels += [pos]
print(case_count, control_count)
case_array = np.array(case_array)
control_array = np.array(control_array)
print(case_array)
print(control_array)
print(filtered_indels)
filtered_indels = ['[3-10]D', '[3-10]I\_C', '[12]I\_C', '[13]D', '[13]I\_C', '[16]D', '[21]I\_A', '[22-24]I\_C', '[30-31]I\_A', '[32-33]I\_C','[32-33]I\_T']
case_array = np.array([5+2, 3, 4, 2, 3, 1, 2, 1, 2, 1, 4])
control_array = np.array([2+1, 2+3, 7, 0, 0, 1, 0, 1, 0, 2, 0])
x_axis = [i for i, indel in enumerate(filtered_indels)]
p0 = plt.bar(x_axis, case_array, width)
p1 = plt.bar(x_axis, control_array, width, bottom=case_array)
plt.xticks(x_axis, filtered_indels, fontsize=6, rotation=45)
plt.legend((p0[0], p1[0]), ('Case', 'Control'))
plt.savefig('diabetes_indels.png', dpi=300)
def add_recruitment_results_for_illumina(illumina_recruitment_plots, results_dir):
import glob
titles = 'ABC'
ru = [12, 30, 39]
arrow_heads = [(3, 1), (4, 1), (4, 1)]
arrow_tails = [(50, -110), (+10, -65), (+10, -65)]
gene_dirs = glob.glob(results_dir + '*')
gene_index = 0
for gene_dir in gene_dirs:
gene_name = gene_dir.split('/')[-1]
result_file = gene_dir + '/result.txt'
if gene_name == 'IL1RN' or gene_name == 'DRD4':
continue
copies= []
bwa_result = []
bowtie_result = []
our_selection_result = []
with open(result_file) as input:
lines = input.readlines()
for line in lines:
copy, original, our_filtering, our_selection, bwa, bowtie = line.split()
original = int(original)
copies.append(copy)
our_selection_result.append(float(our_selection) / original)
bwa_result.append(float(bwa) / original)
bowtie_result.append(float(bowtie) / original)
#'o-',markersize=4.2,
title_text = '('+titles[gene_index] + ') \emph{%s}' % gene_name + ' \t(RU=%sbp)' % ru[gene_index]
illumina_recruitment_plots[gene_index].set_title(title_text, fontsize=13)
illumina_recruitment_plots[gene_index].plot(copies, our_selection_result, '.-', markersize=4, label='adVNTR')
illumina_recruitment_plots[gene_index].plot(copies, bwa_result, '.-', markersize=4, label='BWA-MEM')
illumina_recruitment_plots[gene_index].plot(copies, bowtie_result, '.-', markersize=4, label='Bowtie 2', color='orange')
illumina_recruitment_plots[gene_index].spines['bottom'].set_color('black')
illumina_recruitment_plots[gene_index].spines['left'].set_color('black')
illumina_recruitment_plots[gene_index].annotate('hg19 RU Count', xy=arrow_heads[gene_index], xycoords='data',
xytext=arrow_tails[gene_index], textcoords='offset points',
arrowprops={'arrowstyle': '->', 'lw': 1, 'color': 'black'},
horizontalalignment='right', verticalalignment='bottom')
gene_index += 1
def add_recruitment_results_for_pacbio(pacbio_recruitment_plots, results_dir):
from matplotlib.ticker import FormatStrFormatter
import numpy
import glob
titles = 'ABC'
gene_dirs = glob.glob(results_dir + '*')
gene_index = 0
for gene_dir in gene_dirs:
gene_name = gene_dir.split('/')[-1]
result_file = gene_dir + '/result.txt'
copies= []
bwa_result = []
bowtie_result = []
our_selection_result = []
with open(result_file) as input:
lines = input.readlines()
for line in lines:
copy, original, our_filtering, our_selection, bwa, bowtie = line.split()
copies.append(copy)
our_selection_result.append(float(our_selection) / float(original))
bwa_result.append(float(bwa) / float(original))
bowtie_result.append(float(bowtie) / float(original))
pacbio_recruitment_plots[gene_index].title.set_text(titles[gene_index] + ') %s' % gene_name)
pacbio_recruitment_plots[gene_index].plot(copies, our_selection_result, '.-', markersize=4, label='adVNTR')
pacbio_recruitment_plots[gene_index].plot(copies, bowtie_result, '.-', markersize=4, label='Blasr', color=(0.0, 0.6196078431372549, 0.45098039215686275))
pacbio_recruitment_plots[gene_index].spines['bottom'].set_color('black')
pacbio_recruitment_plots[gene_index].spines['left'].set_color('black')
# pacbio_recruitment_plots[gene_index].yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
pacbio_recruitment_plots[gene_index].yaxis.set_ticks([0.8, 0.9, 1.0])
gene_index += 1
def plot_read_recruitment_results():
from matplotlib import rc, rcParams
import matplotlib.pyplot as plt
plt.style.use('ggplot')
plt.rcParams['axes.facecolor'] = '#FFFFFF'
rc('text', usetex=True)
rcParams['text.latex.preamble'] = [r'\usepackage{sfmath} \boldmath']
plt.title('Read Recruitment Comparison')
plt.gca().spines['bottom'].set_color('black')
plt.gca().spines['left'].set_color('black')
fig = plt.figure(figsize=(9, 3))
ax = list([])
x_label_font = 13
y_label_font = 13
ax.append(fig.add_subplot(111))
ax[0].set_ylabel(r'\emph{Read Selection Recall}', fontsize=y_label_font, labelpad=10)
ax[0].set_xlabel(r'\emph{Simulated RU Count}', fontsize=x_label_font, labelpad=10)
# Turn off axis lines and ticks of the big subplot
for i in range(1):
ax[i].spines['top'].set_color('none')
ax[i].spines['bottom'].set_color('none')
ax[i].spines['left'].set_color('none')
ax[i].spines['right'].set_color('none')
ax[i].tick_params(labelcolor='w', top='off', bottom='off', left='off', right='off')
# pacbio_recruitment_plots = list([])
# pacbio_recruitment_plots.append(fig.add_subplot(231))
# pacbio_recruitment_plots.append(fig.add_subplot(232, sharey=pacbio_recruitment_plots[0]))
# pacbio_recruitment_plots.append(fig.add_subplot(233, sharey=pacbio_recruitment_plots[0]))
# add_recruitment_results_for_pacbio(pacbio_recruitment_plots, results_dir='../pacbio_coverage_experiment/')
illumina_recruitment_plots = list([])
illumina_recruitment_plots.append(fig.add_subplot(131))
illumina_recruitment_plots.append(fig.add_subplot(132))
illumina_recruitment_plots.append(fig.add_subplot(133))
add_recruitment_results_for_illumina(illumina_recruitment_plots, results_dir='../Illumina_copy_number_short_vntrs_mapping/')
plt.tight_layout(pad=0.6, w_pad=0.5, h_pad=1.0)
# plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.3, hspace=0.4)
plt.subplots_adjust(top=0.80, left=0.1, bottom=0.1)
illumina_handles, illumina_labels = illumina_recruitment_plots[2].get_legend_handles_labels()
# handles, labels = pacbio_recruitment_plots[2].get_legend_handles_labels()
# plt.figlegend(handles + illumina_handles[1:], labels + illumina_labels[1:], loc='upper center', ncol=5, labelspacing=0.)
plt.figlegend(illumina_handles, illumina_labels, loc='upper center', ncol=5, labelspacing=0.)
# fig.legend(lines, labels, loc=(0.5, 0), ncol=5)
plt.savefig('read_recruitment_result.pdf', bbox_inches='tight')
def get_correct_estimates_for_ru(files, ru_length=None, adVNTR=False):
count = 0
vntr_results = {}
if len(files) == 0: ###################TEMP
return 0, 0
for file_name in files:
count += 1
vntr_id = int(file_name.split('_')[-3])
if vntr_id not in vntr_results.keys():
vntr_results[vntr_id] = []
sim = int(file_name.split('_')[-2])
correct = False
with open(file_name) as input:
lines = input.readlines()
if len(lines) > 1:
if lines[-1].strip() != 'None' and len(lines[-1]) < 10:
estimate = int(float(lines[-1].strip()))
if estimate == sim:
correct = True
if correct:
vntr_results[vntr_id].append(1)
else:
vntr_results[vntr_id].append(0)
vntr_averages = {}
for vntr_id in vntr_results.keys():
vntr_averages[vntr_id] = sum(vntr_results[vntr_id]) / float(len(vntr_results[vntr_id])) * 100
correct_ratio = sum(vntr_averages.values()) / float(len(vntr_averages.values()))
from scipy import stats
error_bar = stats.sem([e for e in vntr_averages.values()])
return correct_ratio, error_bar
def plot_pacbio_ru_length_result(results_dir='../pacbio_ru_data_for_all_vntrs/', diploid=True):
from matplotlib import rc, rcParams
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
plt.style.use('ggplot')
plt.rcParams['axes.facecolor'] = '#FFFFFF'
rc('text', usetex=True)
rcParams['text.latex.unicode'] = True
rcParams['text.latex.preamble'] = [r'\usepackage{sfmath} \boldmath']
plt.title(r'Effect of RU Length on RU Count Estimation', fontname='Sans')
plt.ylabel(r'\emph{Correct Estimates Percentage}')
plt.xlabel(r'\emph{RU Length}')
ax = fig.add_subplot(1, 1, 1)
plt.gca().spines['bottom'].set_color('black')
plt.gca().spines['left'].set_color('black')
# ax.text(-0.1, 1.1, r'\textbf{B}', transform=ax.transAxes, fontsize=16, fontweight='bold', va='top', ha='right')
import glob
import os
ru_dirs = glob.glob(results_dir + '*')
points = []
naive_points = []
def get_lengths_and_discrepancies(file_name):
import ast
with open(file_name) as infile:
lines = infile.readlines()
if len(lines) < 2:
return [], []
length = ast.literal_eval(lines[0])
disc = ast.literal_eval(lines[-1])
return length, disc
# from advntr.advntr_commands import get_tested_vntrs
# pacbio_ids = get_tested_vntrs(True)
lengths = []
naive_lengths = []
discrepancies = []
naive_discrepancies = []
if diploid:
prefix = 'diploid_'
else:
prefix = ''
for ru_dir in ru_dirs:
vntr_id = int(os.path.basename(ru_dir))
# if vntr_id not in pacbio_ids:
# continue
advntr_results = ru_dir + '/%sadvntr_result.txt' % prefix
r3_naive_results = ru_dir + '/%sR3naive_result.txt' % prefix
if not os.path.exists(advntr_results) or not os.path.exists(r3_naive_results):
continue
length, disc = get_lengths_and_discrepancies(advntr_results)
naive_length, naive_disc = get_lengths_and_discrepancies(r3_naive_results)
lengths += length
naive_lengths += naive_length
discrepancies += disc
naive_discrepancies += naive_disc
print(len(discrepancies))
def plot_data(matplot_ax, discrepancies_list, lengths_list, naive=0):
data = {}
width = 200
offset = float(width) / 2
if naive:
offset *= -1
total = 0
total_wrongs = 0
for i in range(len(discrepancies_list)):
key = 500 * (lengths_list[i] / 500)
if key > 3000:
continue
if key not in data.keys():
data[key] = []
data[key].append(discrepancies_list[i])
for key in data.keys():
wrongs = sum([1 for e in data[key] if e > 0])
total_wrongs += wrongs
total | |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 14 10:12:03 2020
@brief: Library of ADI-styled Tkinter frames
@description:
Module containing modifications of the base Tkinter building blocks:
- Window
- Frame
As well as new templates which inherit from these blocks:
- adiConnectFrame : For connection through serial port
- adiSpeedReferenceFrame:
- adiCanvasFrame
- adiOptionsFrame
@author: <NAME>
@last-modified: 2020-11-09
"""
import inspect
import logging
import math
import platform
import random
import sys
import threading
import time
import tkinter as tk
import tkinter.ttk as ttk
import traceback
import matplotlib.figure as fig
import numpy as np
import serial
import serial.tools.list_ports
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from .adiStyle import AdiStyle
from .adiWidgets import Button, Text, OptionMenu, Label, Scale, Entry, Radiobutton, Checkbutton, ToolTip, createToolTip
# Create or get the logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
file_handler = logging.FileHandler('logfile.log')
formatter = logging.Formatter('%(asctime)s : %(levelname)s : %(name)s :%(funcName)20s(): %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
VERSION = "0.1.0"
class AdiWindow(tk.Tk):
"""Tkinter Window with prebuilt adi icon
and title description. Invoking program also gives
a function (close_cb) to be called on window close."""
ADI_ICON = 'app.ico'
def __init__(self, name, close_cb):
self.close_cb = close_cb
tk.Tk.__init__(self)
self.iconbitmap(self.ADI_ICON)
self.title(name + ' ' + chr(169) + ' Analog Devices 2021')
self.protocol('WM_DELETE_WINDOW', self._releaseAndQuit)
self.configure(bg=AdiStyle.COLOR_BG)
version_string = "ADI Motor Control GUI, v." + VERSION
self.version_tag = tk.Label(self, text=version_string, bg='white')
self.version_tag.grid(row=100, column=0, sticky='w', columnspan=4) # always at bottom of window
def _releaseAndQuit(self):
"""Called when the user closes the main application window.
It closes both the GUI and the command line windows."""
# call the callback
self.close_cb()
class AdiFrame(tk.Frame):
"""ADI basic frame, with attached 'name' label."""
def __init__(self, parent, name, row=0, column=0, rowspan=1, columnspan=1, padx=0, pady=0,
bg=AdiStyle.COLOR_BG, relief="sunken"):
self.row = row
self.column = column
self.rowspan = rowspan
self.columnspan = columnspan
self.bg = bg
self.relief = relief
self.padx = padx
self.pady = pady
tk.Frame.__init__(
self,
parent,
relief=self.relief,
borderwidth=1.2,
bg=bg,
)
temp = tk.Label(
self,
font=AdiStyle.FONT_HEADER,
text=name,
bg=AdiStyle.COLOR_BG,
)
temp.grid(
row=0,
column=0,
columnspan=2,
sticky='W'
)
temp = tk.Frame(
self,
bg=AdiStyle.COLOR_BG,
relief=tk.FLAT,
)
temp.grid(
row=1,
column=0,
padx=10,
pady=10,
)
self.container = tk.Frame(
self,
bg=AdiStyle.COLOR_BG,
relief=tk.FLAT,
)
self.container.grid(
row=1,
column=1,
padx=5,
pady=5,
)
def show(self):
self.grid(
row=self.row,
column=self.column,
rowspan=self.rowspan,
columnspan=self.columnspan,
sticky="nsew",
padx=self.padx,
pady=self.pady,
)
def hide(self):
self.grid_forget()
@staticmethod
def _add(elem, row, column, columnspan=1, sticky="we", padx=0, pady=0):
"""Inserts widget into frame using grid. Configures
widget with default font and background colour."""
try:
elem.configure(font=AdiStyle.FONT_BODY)
except tk.TclError:
pass
try:
elem.configure(bg=AdiStyle.COLOR_BG)
except tk.TclError:
pass
elem.grid(
row=row,
column=column,
columnspan=columnspan,
sticky=sticky,
padx=padx,
pady=pady,
)
class AdiConnectFrame(AdiFrame):
"""Provides widgets to connect to serial port,
from list of connected serial ports."""
def __init__(self, parent, connect_cb, disconnect_cb, serial, name="connection", row=0, column=0):
# record variables
self.parent = parent
self.connect_cb = connect_cb
self.disconnect_cb = disconnect_cb
self.serial = serial
self.available_ports = self.serial.discoverPorts()
if len(self.available_ports) == 0:
self.available_ports = ["No Devices Found"]
print(f"{self.available_ports}")
self.comSelected = tk.StringVar()
# init parent
AdiFrame.__init__(
self,
parent,
name,
row,
column,
)
# row 0: serial port
self.serialFrame = tk.Frame(
self.container,
borderwidth=0,
bg=AdiStyle.COLOR_BG,
)
temp = Label(
self.serialFrame,
font=AdiStyle.FONT_BODY,
bg=AdiStyle.COLOR_BG,
text="Port:"
)
self._add(temp, 1, 0)
self.comSelect = OptionMenu(
self.serialFrame,
self.comSelected,
*self.available_ports
)
self.comSelect.bind("<Button-1>", self.update_ports)
self._add(self.comSelect, 1, 1)
self.serial_button = Button(
self.serialFrame,
text='connect',
command=self.connect,
)
self._add(self.serial_button, 1, 2)
self.serialFrame.grid(
row=2,
column=1,
sticky="ew",
)
def connect(self):
try:
if self.serial.connected:
self.serial_button.configure(
text="Connect",
bg=AdiStyle.COLOR_BG,
)
self.disconnect_cb() # ensure proper shutdown of motor
self.serial.disconnect() # must be called last while so cb has serial connection
else:
assert (len(self.comSelected.get()) != 0)
self.serial.connect(self.comSelected.get())
self.serial_button.configure(
text="Disconnect",
bg=AdiStyle.COLOR_NOERROR,
)
self.connect_cb() # connects motor and begins serial and plotting threads
except AssertionError:
print("Please enter a COM port to connect to")
except:
traceback.print_exc()
def update_ports(self, *args):
self.available_ports = self.serial.discoverPorts()
if len(self.available_ports) == 0:
self.available_ports = ["No Devices Found"]
print(f"{self.available_ports}")
self.comSelected.set('')
self.comSelect['menu'].delete(0, 'end')
for port in self.available_ports:
self.comSelect['menu'].add_command(label=port, command=tk._setit(self.comSelected, port))
class AdiSpeedReferenceFrame(AdiFrame):
"""Template with scale, entry box and buttons
for controlling motor start/stop and speed"""
def __init__(self, parent, motor, start_cb, stop_cb, name="speed reference", row=0, column=0, rowspan=1):
self.start_cb = start_cb
self.stop_cb = stop_cb
self.rpm_val = tk.IntVar()
self.rpm_val.set(0)
self.rpm_val.trace_add("write", self._on_update_entry) # triggers function on entry box write
self.started = False
self.motor = motor
self.parent = parent
AdiFrame.__init__(
self,
parent,
name,
row,
column,
rowspan
)
self.temp = tk.Frame(
self.container,
background="white",
width=50,
)
self.scaleFrame = tk.Frame(
self.container,
borderwidth=0,
background="white",
)
self.slider = Scale(
self.scaleFrame,
from_=3000,
to=0,
orient="vertical",
command=self._on_move_slider,
)
self.s = ttk.Style()
self.s.configure("Vertical.TScale", background="white")
self._add(self.slider, 0, 1, columnspan=3, sticky='n')
self.slider_entry = Entry(
self.scaleFrame,
textvariable=self.rpm_val,
width=5,
)
self._add(self.slider_entry, 1, 1, sticky='w')
temp = Label(
self.scaleFrame,
font=AdiStyle.FONT_BODY,
bg="white",
text="[RPM]",
padx=5,
pady=5,
)
self._add(temp, 1, 2, sticky='e')
self.start_button = Button(
self.scaleFrame,
text='Start',
command=self.start_stop_motor,
state="disabled",
)
self._add(self.start_button, 2, 1, columnspan=2)
self.temp.grid(
row=0,
column=0,
sticky='w',
)
self.scaleFrame.grid(
row=1,
column=2,
sticky='w',
)
def start_stop_motor(self, *args):
try:
assert (self.parent.serial_queue.serial_conn is not None)
if not self.started:
self.start_button['text'] = "Stop"
self.motor.start()
self.started = True
self.start_cb()
else:
self.start_button['text'] = "Start"
self.motor.stop()
self.started = False
self.stop_cb()
except AssertionError:
print("Cannot start motor without a serial connection")
def set_motor_speed(self, *args):
speed = int(self.slider_entry.get())
self.motor.setSpeed(speed)
def _on_move_slider(self, *args):
"""Auto updates entry box when slider is moved"""
try:
self.slider_entry.delete(0, "end")
self.slider_entry.insert(0, int(self.slider.get()))
except:
traceback.print_exc()
def _on_update_entry(self, *args):
"""Auto updates slider when entry is changed"""
try:
val = self.slider_entry.get()
if val != '':
self.slider.set(int(val))
self.set_motor_speed()
except:
print("Please enter a valid integer for motor speed [RPM]")
class AdiCanvasFrame(AdiFrame):
"""Template holding the updating plot for Serial data. Contains update func
which allows the plot to be updated when new data is available"""
def __init__(self, parent, name="plotting frame", row=0, column=0, rowspan=2, columnspan=2, padx=0, pady=0):
AdiFrame.__init__(
self,
parent,
name,
row,
column,
rowspan,
columnspan,
padx,
pady,
)
self.parent = parent
self.f = fig.Figure(dpi=100, tight_layout=True) # matplotlib figure to embed in tkinter
self.a = self.f.add_subplot(111) # axes object to plot data
self.canvas = FigureCanvasTkAgg( # canvas to draw lines on given figure
self.f,
self,
)
# Create initial lists for Phase U and V data
# Updating lists is cheaper than recreating them
self.x_data = np.array([0])
self.y_data = np.array([0])
line, = self.a.plot(self.x_data, self.y_data, label="Phase U")
line2, = self.a.plot(self.x_data, self.y_data, label="Phase V")
# create dictionary to update plotted lines by name
self.lines = {
"Phase U": line,
"Phase V": line2,
}
self.a.legend(loc='upper right', fontsize=8)
self.a.set_xlabel("Time (s)")
self.a.set_ylabel("Motor Current (A)")
self.a.set_title("Motor Phase Currents", size="large")
self.a.set_yscale('linear')
if self.parent.time_ylim:
self.a.set_ylim((self.parent.time_y_lo, self.parent.time_y_hi))
self.canvas.draw()
self.canvas.get_tk_widget().grid(
row=0,
column=0,
padx=0,
)
self.fft = False # initialise plot to show time data
self.saved_frames = 0 # used to track "frozen" data frames, for comparison of modes
def __str__(self):
return f'Plotting {len(self.lines)} lines of data on Figure {self.f}'
@staticmethod
def full_fft(yt, timestep): # timestep will be motor.step
signal = np.array(yt, dtype=float)
n = signal.size
scaling_factor = 2 / n # scaling fft data as we are only looking at positive frequency
fourier = np.fft.fft(signal)
fftmagnitude = abs(fourier)
fftmagnitude *= scaling_factor
frequency = np.fft.fftfreq(n, d=timestep)
return frequency[:n // 2], fftmagnitude[:n // 2] # return only positive freq, mag of fft
def update_data(self, name, x_data, y_data):
"""Update a line within plot given a name and new data"""
try:
assert (len(x_data) == len(y_data))
if name not in self.lines: # if new line, create dict entry
line, = self.a.plot([0], [0]) # create new line on plot
line.set_label(name)
self.lines[name] = line # add new entry to dict
data = self.lines[name] # grab reference of line to update
if not self.fft:
data.set_data(x_data, y_data)
else:
delta = x_data[0] # timestep always lowest value (same as step size)
xf, yf = self.full_fft(y_data, delta)
data.set_data(xf, yf)
# data must be relimited and scaled after setting data
self.a.relim()
self.a.autoscale_view()
self.f.canvas.draw_idle()
except AssertionError:
logger.error(f"Cannot plot X {len(x_data)} and Y {len(y_data)}")
print(f"Cannot plot X {len(x_data)} and Y {len(y_data)}")
def save_frame(self):
x1, y1 = self.lines["Phase U"].get_data(orig=True)
x2, y2 = self.lines["Phase V"].get_data(orig=True)
if self.parent.options_frame.cur_mode:
mode = int(self.parent.options_frame.cur_mode[-1])
name1 = "Frame" + str(self.saved_frames + 1) + " Mode" + str(mode) + ": U"
name2 = "Frame" + str(self.saved_frames + 1) + " Mode" + str(mode) + ": V"
line1, = self.a.plot(x1, y1) # create new line on plot
line2, = self.a.plot(x2, y2)
line1.set_label(name1)
line2.set_label(name2)
self.a.legend(loc='upper right', fontsize=8)
self.lines[name1] = line1 # add new entry to dict
self.lines[name2] = line2
self.saved_frames += 1
def clear_frames(self):
old_frames = []
for key in self.lines:
if key in ["Phase U", "Phase V"]:
self.lines[key].set_data([0], [0])
else:
old_frames.append(key)
# must use separate loop as dict key | |
"rmap_red1_ipv6",
"direction": "out",
}
]
}
}
}
}
}
},
},
},
]
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure neighbor for route map in r2")
input_dict_4 = {
"r2": {
"bgp": [
{
"local_as": "100",
"vrf": "RED_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link1": {
"route_maps": [
{
"name": "rmap_r1_ipv4",
"direction": "in",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link1": {
"route_maps": [
{
"name": "rmap_r1_ipv6",
"direction": "in",
}
]
}
}
}
}
}
},
},
},
{
"local_as": "100",
"vrf": "RED_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link2": {
"route_maps": [
{
"name": "rmap_r1_ipv4",
"direction": "in",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link2": {
"route_maps": [
{
"name": "rmap_r1_ipv6",
"direction": "in",
}
]
}
}
}
}
}
},
},
},
{
"local_as": "100",
"vrf": "BLUE_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link3": {
"route_maps": [
{
"name": "rmap_r1_ipv4",
"direction": "in",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link3": {
"route_maps": [
{
"name": "rmap_r1_ipv6",
"direction": "in",
}
]
}
}
}
}
}
},
},
},
{
"local_as": "100",
"vrf": "BLUE_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link4": {
"route_maps": [
{
"name": "rmap_r1_ipv4",
"direction": "in",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link4": {
"route_maps": [
{
"name": "rmap_r1_ipv6",
"direction": "in",
}
]
}
}
}
}
}
},
},
},
]
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"All the prefixes advertised from RED_1 and BLUE_1 should carry"
" attributes set by outbound route-maps within specific vrfs. "
"Router R1 should be able to match and permit/deny those "
"prefixes based on received attributes. Please use below "
"commands to verify."
)
input_dict = {
"largeCommunity": "1:1:1 1:2:3 2:1:1 2:2:2",
}
for addr_type in ADDR_TYPES:
vrf = "RED_A"
routes = [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]]
result = verify_bgp_community(tgen, addr_type, "r2", routes, input_dict, vrf)
assert result is True, "Test case {} : Failed \n Error: {}".format(
tc_name, result
)
for addr_type in ADDR_TYPES:
vrf = "RED_B"
routes = [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]]
result = verify_bgp_community(tgen, addr_type, "r2", routes, input_dict, vrf)
assert result is True, "Test case {} : Failed \n Error: {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_route_map_match_traffic_based_on_vrf_p0(request):
"""
FUNC_13:
Configure a route-map on DUT to match traffic based
on a VRF interfaces.
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
reset_config_on_routers(tgen)
if tgen.routers_have_failure():
check_router_status(tgen)
step(
"Advertise unique BGP prefixes(IPv4+IPv6) from RED_1 "
"in vrf instances(RED_A and RED_B)."
)
for addr_type in ADDR_TYPES:
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step(
"Advertise unique BGP prefixes(IPv4+IPv6) from from BLUE_1 in"
" vrf instances(BLUE_A and BLUE_B)."
)
for addr_type in ADDR_TYPES:
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK3_1[addr_type]] + [NETWORK3_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK4_1[addr_type]] + [NETWORK4_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Redistribute static..")
input_dict_3 = {}
for dut in ["red1", "blue1"]:
temp = {dut: {"bgp": []}}
input_dict_3.update(temp)
if "red" in dut:
VRFS = ["RED_A", "RED_B"]
AS_NUM = [500, 500]
elif "blue" in dut:
VRFS = ["BLUE_A", "BLUE_B"]
AS_NUM = [800, 800]
for vrf, as_num in zip(VRFS, AS_NUM):
temp[dut]["bgp"].append(
{
"local_as": as_num,
"vrf": vrf,
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
},
}
)
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Configure a route-map on R1 to match the prefixes "
"coming from vrf RED_A and set as-prepend to these routes."
)
input_dict_4 = {
"r1": {
"route_maps": {
"ABC": [
{
"action": "permit",
"match": {"source-vrf": "RED_A"},
"set": {"path": {"as_num": 1, "as_action": "prepend"}},
}
]
}
}
}
result = create_route_maps(tgen, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"On R1, import the routes form vrf RED_A and RED_B to BLUE_A and"
" apply the route-map under vrf BLUE_A while importing"
)
raw_config = {
"r1": {
"raw_config": [
"router bgp 100 vrf BLUE_A",
"address-family ipv4 unicast",
"import vrf RED_A",
"import vrf RED_B",
"import vrf route-map ABC",
"address-family ipv6 unicast",
"import vrf RED_A",
"import vrf RED_B",
"import vrf route-map ABC",
]
}
}
result = apply_raw_config(tgen, raw_config)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step(
"All the prefixes advertised from RED_1 and BLUE_1 in vrfs "
"RED_B and BLUE_B must prepend the AS number in as-path on R2."
)
for addr_type in ADDR_TYPES:
input_dict_7 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK3_1[addr_type]] + [NETWORK3_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
]
}
}
result = verify_bgp_rib(tgen, addr_type, "r1", input_dict_7)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_vrf_lite_with_static_bgp_originated_routes_p0(request):
"""
FUNC_14:
Test VRF-lite with Static+BGP originated routes.
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
reset_config_on_routers(tgen)
if tgen.routers_have_failure():
check_router_status(tgen)
step(
"Advertise unique BGP prefixes(IPv4+IPv6) from from RED_1"
" in vrf instances(RED_A and RED_B)."
)
for addr_type in ADDR_TYPES:
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step(
"Advertise unique BGP prefixes(IPv4+IPv6) from from BLUE_1 in"
" vrf instances(BLUE_A and BLUE_B)."
)
for addr_type in ADDR_TYPES:
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
input_dict_3 = {
"red1": {
"bgp": [
{
"local_as": "500",
"vrf": "RED_A",
"address_family": {
"ipv4": {
"unicast": {
"advertise_networks": [
{
"network": [NETWORK5_1["ipv4"]]
+ [NETWORK5_2["ipv4"]]
}
],
"redistribute": [{"redist_type": "static"}],
}
},
"ipv6": {
"unicast": {
"advertise_networks": [
{
"network": [NETWORK5_1["ipv6"]]
+ [NETWORK5_2["ipv6"]]
}
],
"redistribute": [{"redist_type": "static"}],
}
},
},
},
{
"local_as": "500",
"vrf": "RED_B",
"address_family": {
"ipv4": {
"unicast": {
"advertise_networks": [
{
"network": [NETWORK6_1["ipv4"]]
+ [NETWORK6_2["ipv4"]]
}
],
"redistribute": [{"redist_type": "static"}],
}
},
"ipv6": {
"unicast": {
"advertise_networks": [
{
"network": [NETWORK6_1["ipv6"]]
+ [NETWORK6_2["ipv6"]]
}
],
"redistribute": [{"redist_type": "static"}],
}
},
},
},
]
},
"blue1": {
"bgp": [
{
"local_as": "800",
"vrf": "BLUE_A",
"address_family": {
"ipv4": {
"unicast": {
"advertise_networks": [
{
"network": [NETWORK7_1["ipv4"]]
+ [NETWORK7_2["ipv4"]]
}
],
"redistribute": [{"redist_type": "static"}],
}
},
"ipv6": {
"unicast": {
"advertise_networks": [
{
"network": [NETWORK7_1["ipv6"]]
+ [NETWORK7_2["ipv6"]]
}
],
"redistribute": [{"redist_type": "static"}],
}
},
},
},
{
"local_as": "800",
"vrf": "BLUE_B",
"address_family": {
"ipv4": {
"unicast": {
"advertise_networks": [
{
"network": [NETWORK8_1["ipv4"]]
+ [NETWORK8_2["ipv4"]]
}
],
"redistribute": [{"redist_type": "static"}],
}
},
"ipv6": {
"unicast": {
"advertise_networks": [
{
"network": [NETWORK8_1["ipv6"]]
+ [NETWORK8_2["ipv6"]]
}
],
"redistribute": [{"redist_type": "static"}],
}
},
},
},
]
},
}
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : | |
import datetime
import pathlib
from math import sqrt
import matplotlib.dates as mdates
import numpy as np
import steampi.json_utils
# Reference: https://stackoverflow.com/a/3054314
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from aggregate_steam_spy import get_steam_database_filename, get_steam_categories_filename, get_steam_genres_filename
def load_aggregated_database():
steam_database = steampi.json_utils.load_json_data(get_steam_database_filename())
all_categories = steampi.json_utils.load_json_data(get_steam_categories_filename())
all_genres = steampi.json_utils.load_json_data(get_steam_genres_filename())
return steam_database, all_categories, all_genres
def get_description_keywords(steam_database, verbose=False):
description_keywords = set()
for appID in steam_database:
current_keywords = steam_database[appID].keys()
description_keywords = description_keywords.union(current_keywords)
description_keywords = sorted(description_keywords)
if verbose:
print('\nDescription keywords:')
print('\n'.join(description_keywords))
return description_keywords
def build_steam_calendar(steam_database, verbose=False):
# Objective: build a calendar of game releases, as a dict: datetime -> list of appIDs
release_calendar = dict()
weird_release_dates = set()
weird_counter = 0
for appID in steam_database:
release_info = steam_database[appID]['release_date']
is_released = release_info['is_released']
release_date_as_str = release_info['date']
if not is_released:
continue
release_date_as_str = release_date_as_str.replace(',', '') # "Nov 11, 2017" == "Nov 11 2017"
release_date_as_str = release_date_as_str.replace('сен.', 'September') # Specifically for appID=689740
try:
# Reference: https://stackoverflow.com/a/6557568/
release_date_as_datetime = datetime.datetime.strptime(release_date_as_str, '%b %d %Y')
except ValueError:
try:
release_date_as_datetime = datetime.datetime.strptime(release_date_as_str, '%d %b %Y')
except ValueError:
try:
release_date_as_datetime = datetime.datetime.strptime(release_date_as_str, '%B %d %Y')
except ValueError:
try:
release_date_as_datetime = datetime.datetime.strptime(release_date_as_str, '%d %B %Y')
except ValueError:
try:
release_date_as_datetime = datetime.datetime.strptime(release_date_as_str, '%b %Y')
except ValueError:
weird_release_dates.add(release_date_as_str)
weird_counter += 1
if verbose:
if weird_counter == 1:
print('\nGames being sold with weird release dates:')
if steam_database[appID]['price_overview'] is not None:
if not (steam_database[appID]['is_free']):
sentence = 'appID={0:6}\t' + steam_database[appID]['name']
print(sentence.format(appID))
continue
try:
release_calendar[release_date_as_datetime].append(appID)
except KeyError:
release_calendar[release_date_as_datetime] = [appID]
weird_release_dates = sorted(weird_release_dates)
if verbose:
print('\nWeird release dates:')
print('\n'.join(weird_release_dates))
return release_calendar, weird_release_dates
def get_full_plot_filename(base_plot_filename):
output_folder = 'plots/'
pathlib.Path(output_folder).mkdir(parents=True, exist_ok=True)
file_extension = '.png'
full_plot_filename = output_folder + base_plot_filename + file_extension
return full_plot_filename
def get_x_y_time_series(release_calendar,
steam_database=None,
description_keyword=None,
starting_year=None):
x_list = []
y_raw_list = []
all_release_dates = sorted(list(release_calendar.keys()))
for release_date in all_release_dates:
if starting_year is not None and release_date.year < starting_year:
# Skip release dates prior to the input starting year
continue
app_ids = release_calendar[release_date]
if description_keyword is None:
selected_app_ids = app_ids
else:
selected_app_ids = [app_id for app_id in app_ids if steam_database[app_id][description_keyword] is not None]
if len(selected_app_ids) == 0:
continue
x_list.append(release_date)
y_raw_list.append(selected_app_ids)
return x_list, y_raw_list
def plot_x_y_time_series(x_list, y_list,
chosen_title=None,
chosen_ylabel=None,
base_plot_filename=None,
month_formatting=False,
is_variable_of_interest_numeric=True,
max_ordinate=None,
confidence_interval_data=None):
fig = Figure(dpi=300)
FigureCanvas(fig)
ax = fig.add_subplot(111)
if confidence_interval_data is None or len(confidence_interval_data) == 0:
ax.plot(x_list, y_list)
else:
plot_mean_and_confidence_interval(ax,
confidence_interval_data['mean'],
confidence_interval_data['lb'],
confidence_interval_data['ub'],
x_list)
if chosen_title is not None:
ax.set_title(chosen_title)
ax.set_xlabel('Date')
if chosen_ylabel is not None:
ax.set_ylabel(chosen_ylabel)
if month_formatting:
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b\n%Y'))
ax.grid()
if not is_variable_of_interest_numeric:
if max_ordinate is None:
if confidence_interval_data is None or len(confidence_interval_data) == 0:
vec_reference = y_list
else:
vec_reference = confidence_interval_data['ub']
max_ordinate = np.min([1.0, np.max(vec_reference) * 1.1])
ax.set_ylim(0, max_ordinate)
if base_plot_filename is not None:
fig.savefig(get_full_plot_filename(base_plot_filename), bbox_inches='tight')
return
def simplify_calendar(release_calendar):
# Objective: merge daily dates into monthly dates
merged_calendar = dict()
for release_date in release_calendar:
merged_release_date = datetime.date(release_date.year, release_date.month, 1)
try:
merged_calendar[merged_release_date].extend(release_calendar[release_date])
except KeyError:
merged_calendar[merged_release_date] = release_calendar[release_date]
return merged_calendar
def remove_current_date(release_calendar):
# Objective: remove partial data just before plotting time-series
now = datetime.datetime.now()
this_day = datetime.date(now.year, now.month, now.day)
this_month = datetime.date(this_day.year, this_day.month, 1)
# Start by copying the dictionary
# Reference: https://stackoverflow.com/a/5844692
filtered_calendar = dict(release_calendar)
try:
del filtered_calendar[this_day]
except KeyError:
try:
del filtered_calendar[this_month]
except KeyError:
print('No recent date could be removed from the calendar.')
return filtered_calendar
def plot_mean_and_confidence_interval(ax, mean, lb, ub, x_tick_as_dates=None, color_mean=None, color_shading=None):
# Reference: plot_mean_and_CI() in https://github.com/woctezuma/humble-monthly/blob/master/plot_time_series.py
# Reference: https://studywolf.wordpress.com/2017/11/21/matplotlib-legends-for-mean-and-confidence-interval-plots/
if color_shading is None:
color_shading = 'b'
if color_mean is None:
dotted_color = color_shading + '--'
color_mean = dotted_color
if x_tick_as_dates is None:
x_tick_as_dates = range(mean.shape[0])
# plot the shaded range of the confidence intervals
ax.fill_between(x_tick_as_dates, ub, lb,
color=color_shading, alpha=.5)
# plot the mean on top
ax.plot(x_tick_as_dates, mean, color_mean)
return
def get_mean_and_confidence_interval(x_list, is_variable_of_interest_numeric=True):
# Reference: plot_time_series() in https://github.com/woctezuma/humble-monthly/blob/master/plot_time_series.py
x_vec = np.array([np.array(xi) for xi in x_list])
mean = np.array([np.mean(xi) for xi in x_vec])
# 0.95-Quantile of the normal distribution
# Reference: https://en.wikipedia.org/wiki/Normal_distribution
z_quantile = 1.95996398454
try:
if is_variable_of_interest_numeric:
sig = np.array([np.std(xi) / np.sqrt(len(xi)) for xi in x_vec])
confidence_factor = z_quantile
ub = mean + confidence_factor * sig
lb = mean - confidence_factor * sig
else:
# Reference:
# computeWilsonScore() in https://github.com/woctezuma/hidden-gems/blob/master/compute_wilson_score.py
num_pos = np.array([np.sum(xi) for xi in x_vec])
num_neg = np.array([len(xi) - np.sum(xi) for xi in x_vec])
z2 = pow(z_quantile, 2)
den = num_pos + num_neg + z2
mean = (num_pos + z2 / 2) / den
inside_sqrt = num_pos * num_neg / (num_pos + num_neg) + z2 / 4
temp = np.array([sqrt(i) for i in inside_sqrt])
delta = (z_quantile * temp) / den
ub = mean + delta
lb = mean - delta
except TypeError:
ub = None
lb = None
return mean, lb, ub
def plot_time_series_for_numeric_variable_of_interest(release_calendar,
steam_database=None,
statistic_str=None,
description_keyword=None,
legend_keyword=None,
starting_year=None,
is_variable_of_interest_numeric=True,
max_ordinate=None,
plot_confidence_interval_if_possible=True):
# Get x: dates and y: a set of appIDs of games released for each date in x
(x, y_raw) = get_x_y_time_series(release_calendar, steam_database, description_keyword, starting_year)
# Compute the value of interest y from y_raw
feature_list = []
for app_ids in y_raw:
if description_keyword is not None:
if is_variable_of_interest_numeric:
# noinspection PyPep8
g = int
else:
# noinspection PyPep8
g = generic_converter
features = [g(steam_database[app_id][description_keyword]) for app_id in app_ids]
else:
features = app_ids
feature_list.append(features)
confidence_interval_data = {}
if plot_confidence_interval_if_possible and statistic_str is not None and statistic_str == 'Average':
(mean, lb, ub) = get_mean_and_confidence_interval(feature_list, is_variable_of_interest_numeric)
# Thresholding of lower-bound of confidence interval so that it is non-negative
lb = np.array([max(i, 0) for i in lb])
confidence_interval_data['mean'] = mean
confidence_interval_data['lb'] = lb
confidence_interval_data['ub'] = ub
if statistic_str == 'Median':
# noinspection PyPep8
f = np.median
elif statistic_str == 'Average':
# noinspection PyPep8
f = np.mean
elif statistic_str == 'Sum':
# noinspection PyPep8
f = np.sum
else:
# noinspection PyPep8
f = len
y = []
for features in feature_list:
value = f(features)
if description_keyword == 'price_overview':
# Convert from cents to euros
value = value / 100
y.append(value)
if description_keyword == 'price_overview':
# Convert from cents to euros
for entry in confidence_interval_data:
confidence_interval_data[entry] = np.array([i / 100 for i in confidence_interval_data[entry]])
# Plot legend
if description_keyword is None:
my_title = 'Number of games released on Steam each month'
my_ylabel = 'Number of game releases'
my_plot_filename = 'num_releases'
elif description_keyword == 'price_overview':
my_title = statistic_str + ' price of games released on Steam each month'
my_ylabel = statistic_str + ' price (in €)'
my_plot_filename = statistic_str.lower() + '_price'
else:
if is_variable_of_interest_numeric and (statistic_str == 'Median' or statistic_str == 'Average'):
statistic_legend = statistic_str + ' '
else:
statistic_legend = ''
if legend_keyword is None:
if is_variable_of_interest_numeric:
legend_keyword = 'number of ' + description_keyword
else:
sentence_prefixe_for_proportion = 'Proportion of games with '
legend_keyword = sentence_prefixe_for_proportion + description_keyword
my_title = statistic_legend + legend_keyword + ' among monthly Steam releases'
my_ylabel = statistic_legend + legend_keyword
if is_variable_of_interest_numeric:
my_plot_filename = 'num_' + description_keyword
if len(statistic_str) > 0:
my_plot_filename = statistic_str.lower() + '_' + my_plot_filename
else:
my_plot_filename = 'proportion_' + description_keyword
if starting_year is not None:
my_plot_filename = my_plot_filename + '_from_' + str(starting_year)
month_formatting = bool(starting_year is not None)
# Plot
plot_x_y_time_series(x, y, my_title, my_ylabel, my_plot_filename, month_formatting, is_variable_of_interest_numeric,
max_ordinate, confidence_interval_data)
return
def generic_converter(my_boolean):
# Objective: output either 0 or 1, with an input which is likely a boolean, but might be a str or an int.
# Convert boolean to int
x = int(my_boolean)
# If my_boolean was a str or an int, then x is now an int, which we binarize.
x = int(x > 0)
return x
def plot_time_series_for_boolean_variable_of_interest(release_calendar,
steam_database,
description_keyword='controller_support',
legend_keyword=None,
starting_year=None,
max_ordinate=1.0):
statistic_str = 'Average'
is_variable_of_interest_numeric = False
plot_time_series_for_numeric_variable_of_interest(release_calendar,
steam_database,
statistic_str,
description_keyword,
legend_keyword,
starting_year,
is_variable_of_interest_numeric,
max_ordinate)
return
def fill_in_platform_support(steam_database):
for app_id in steam_database:
steam_database[app_id]['windows_support'] = steam_database[app_id]['platforms']['windows']
steam_database[app_id]['mac_support'] = steam_database[app_id]['platforms']['mac']
steam_database[app_id]['linux_support'] = steam_database[app_id]['platforms']['linux']
return steam_database
def fill_in_drm_support(steam_database):
for app_id in steam_database:
steam_database[app_id]['drm_support'] = bool(steam_database[app_id]['drm_notice'] is not None)
return steam_database
def plot_every_time_series_based_on_steam_calendar(release_calendar, steam_database):
plot_time_series_for_numeric_variable_of_interest(release_calendar) # Plot number of releases
plot_time_series_for_numeric_variable_of_interest(release_calendar, steam_database, 'Median', 'price_overview')
plot_time_series_for_numeric_variable_of_interest(release_calendar, steam_database, 'Average', 'price_overview')
plot_time_series_for_numeric_variable_of_interest(release_calendar, steam_database, 'Median', 'achievements')
plot_time_series_for_numeric_variable_of_interest(release_calendar, steam_database, 'Average', 'achievements')
plot_time_series_for_numeric_variable_of_interest(release_calendar, steam_database, 'Average', 'dlc')
plot_time_series_for_numeric_variable_of_interest(release_calendar, steam_database, 'Median', 'metacritic',
'Metacritic score')
plot_time_series_for_numeric_variable_of_interest(release_calendar, steam_database, 'Average', 'metacritic',
'Metacritic score')
plot_time_series_for_numeric_variable_of_interest(release_calendar, steam_database, 'Median', 'recommendations')
plot_time_series_for_numeric_variable_of_interest(release_calendar, steam_database, 'Average', 'recommendations')
sentence_prefixe = 'Proportion of games with '
plot_time_series_for_boolean_variable_of_interest(release_calendar, steam_database, 'controller_support',
sentence_prefixe + 'controller support')
plot_time_series_for_boolean_variable_of_interest(release_calendar, steam_database, 'demos',
sentence_prefixe + 'a demo')
plot_time_series_for_boolean_variable_of_interest(release_calendar, steam_database, 'ext_user_account_notice',
sentence_prefixe + | |
<reponame>masnes/publicprize<filename>publicprize/evc/form.py
# -*- coding: utf-8 -*-
""" contest forms: HTTP form processing for contest pages
:copyright: Copyright (c) 2014 Bivio Software, Inc. All Rights Reserved.
:license: Apache, see LICENSE for more details.
"""
import decimal
import re
import sys
import flask
import flask_mail
import flask_wtf
import paypalrestsdk
import paypalrestsdk.exceptions
import wtforms
import wtforms.validators as wtfv
from . import model as pcm
from .. import common
from .. import controller as ppc
from ..auth import model as pam
class Contestant(flask_wtf.Form):
"""Project submission form.
Fields:
display_name: project name
contestant_desc: project summary
youtube_url: full YouTube video url
slideshow_url: full SlideShare url
founder_desc: current user's founder info for this project
website: project website (optional)
"""
display_name = wtforms.StringField(
'Legal Name of Business', validators=[
wtfv.DataRequired(), wtfv.Length(max=100)])
contestant_desc = wtforms.TextAreaField(
'Summary of Business, Product and/or Service',
validators=[wtfv.DataRequired(), wtfv.Length(max=10000)])
youtube_url = wtforms.StringField(
'YouTube Video URL', validators=[
wtfv.DataRequired(), wtfv.Length(max=500)])
slideshow_url = wtforms.StringField(
'SlideShare Pitch Deck URL', validators=[
wtfv.DataRequired(), wtfv.Length(max=500)])
founder_desc = wtforms.TextAreaField(
'Your Bio', validators=[wtfv.DataRequired(), wtfv.Length(max=10000)])
website = wtforms.StringField(
'Business Website', validators=[wtfv.Length(max=100)])
tax_id = wtforms.StringField(
'Business US Tax Id', validators=[
wtfv.DataRequired(), wtfv.Length(max=30)])
business_phone = wtforms.StringField(
'Business Phone', validators=[
wtfv.DataRequired(), wtfv.Length(max=100)])
business_address = wtforms.TextAreaField(
'Business Mailing Address', validators=[
wtfv.DataRequired(), wtfv.Length(max=500)])
agree_to_terms = wtforms.BooleanField(
'Agree to Terms of Service', validators=[wtfv.DataRequired()])
founder2_name = wtforms.StringField(
'Other Founder Name', validators=[wtfv.Length(max=100)])
founder2_desc = wtforms.TextAreaField(
'Other Founder Bio', validators=[wtfv.Length(max=10000)])
founder3_name = wtforms.StringField(
'Other Founder Name', validators=[wtfv.Length(max=100)])
founder3_desc = wtforms.TextAreaField(
'Other Founder Bio', validators=[wtfv.Length(max=10000)])
def execute(self, contest):
"""Validates and creates the contestant model"""
if self.is_submitted() and self.validate():
contestant = self._update_models(contest)
if contestant:
self._send_mail_to_support(contestant)
flask.flash(
'Thank you for submitting your entry. You will be '
'contacted by email when your entry has been reviewed.')
return flask.redirect(contest.format_uri('contestants'))
return contest.task_class.get_template().render_template(
contest,
'submit',
form=self,
selected_menu_action='submit-contestant'
)
def validate(self):
"""Performs superclass wtforms validation followed by url
field validation"""
super(Contestant, self).validate()
self._validate_youtube()
self._validate_slideshare()
self._validate_website()
common.log_form_errors(self)
return not self.errors
def _add_founder(self, contestant, founder):
"""Creates the founder and links it to the contestant."""
ppc.db.session.add(founder)
ppc.db.session.flush()
ppc.db.session.add(
pam.BivAccess(
source_biv_id=contestant.biv_id,
target_biv_id=founder.biv_id
)
)
def _add_founders(self, contestant):
"""Add the current user as a founder and any optional founders."""
founder = pcm.Founder()
self.populate_obj(founder)
founder.display_name = flask.session['user.display_name']
self._add_founder(contestant, founder)
ppc.db.session.add(
pam.BivAccess(
source_biv_id=flask.session['user.biv_id'],
target_biv_id=founder.biv_id
)
)
if self.founder2_name.data:
self._add_founder(contestant, pcm.Founder(
display_name=str(self.founder2_name.data),
founder_desc=str(self.founder2_desc.data),
))
if self.founder3_name.data:
self._add_founder(contestant, pcm.Founder(
display_name=str(self.founder3_name.data),
founder_desc=str(self.founder3_desc.data),
))
def _send_mail_to_support(self, contestant):
"""Send a notification to support for a new entry"""
ppc.mail().send(flask_mail.Message(
'New Entry Submitted: {}'.format(contestant.biv_id),
recipients=[ppc.app().config['PUBLICPRIZE']['SUPPORT_EMAIL']],
# TODO(pjm): requires new Flask-Mail for unicode on python 3
# body='Submitted by: {} {}\nTitle: {}\nReview URL: {}'.format(
# flask.session['user.display_name'],
# pam.User.query.filter_by(
# biv_id=flask.session['user.biv_id']
# ).one().user_email,
# contestant.display_name,
# contestant.format_absolute_uri()
# )
body='Submitted by: {}\nReview URL: {}'.format(
pam.User.query.filter_by(
biv_id=flask.session['user.biv_id']
).one().user_email,
contestant.format_absolute_uri()
)
))
def _slideshare_code(self):
"""Download slideshare url and extract embed code.
The original url may not have the code.
ex. www.slideshare.net/Micahseff/game-xplain-pitch-deck-81610
Adds field errors if the code can not be determined.
"""
html = common.get_url_content(self.slideshow_url.data)
if not html:
self.slideshow_url.errors = [
'SlideShare URL invalid or unavailable.']
return None
match = re.search(r'slideshow/embed_code/(\d+)', html)
if match:
return match.group(1)
self.slideshow_url.errors = [
'Embed code not found on SlideShare page.']
return None
def _update_models(self, contest):
"""Creates the Contestant and Founder models
and adds BivAccess models to join the contest and Founder models"""
contestant = pcm.Contestant()
self.populate_obj(contestant)
contestant.youtube_code = self._youtube_code()
contestant.slideshow_code = self._slideshare_code()
contestant.is_public = \
ppc.app().config['PUBLICPRIZE']['ALL_PUBLIC_CONTESTANTS']
contestant.is_under_review = False
ppc.db.session.add(contestant)
ppc.db.session.flush()
ppc.db.session.add(
pam.BivAccess(
source_biv_id=contest.biv_id,
target_biv_id=contestant.biv_id
)
)
self._add_founders(contestant)
return contestant
def _youtube_code(self):
"""Ensure the youtube url contains a VIDEO_ID"""
value = self.youtube_url.data
# http://youtu.be/a1Y73sPHKxw
# or https://www.youtube.com/watch?v=a1Y73sPHKxw
if re.search(r'\?', value) and re.search(r'v\=', value):
match = re.search(r'(?:\?|\&)v\=(.*?)(&|$)', value)
if match:
return match.group(1)
else:
match = re.search(r'\/([^\&\?\/]+)$', value)
if match:
return match.group(1)
return None
def _validate_slideshare(self):
"""Ensures the SlideShare slide deck exists"""
if self.slideshow_url.errors:
return
code = self._slideshare_code()
if code:
if not common.get_url_content(
'http://www.slideshare.net/slideshow/embed_code/' + code):
self.slideshow_url.errors = [
'Unknown SlideShare ID: ' + code + '.']
def _validate_website(self):
"""Ensures the website exists"""
if self.website.errors:
return
if self.website.data:
if not common.get_url_content(self.website.data):
self.website.errors = ['Website invalid or unavailable.']
def _validate_youtube(self):
"""Ensures the YouTube video exists"""
if self.youtube_url.errors:
return
code = self._youtube_code()
if code:
html = common.get_url_content('http://youtu.be/' + code)
# TODO(pjm): need better detection for not-found page
if not html or re.search(r'<title>YouTube</title>', html):
self.youtube_url.errors = [
'Unknown YouTube VIDEO_ID: ' + code + '.']
else:
self.youtube_url.errors = ['Invalid YouTube URL.']
class Donate(flask_wtf.Form):
"""Donation form.
Fields:
amount: donation amount
"""
# TODO(pjm): DecimalField doesn't accept '' value...
amount = wtforms.StringField('Contribution Amount')
donate5 = wtforms.SubmitField('$5')
donate25 = wtforms.SubmitField('$25')
donate100 = wtforms.SubmitField('$100')
other_amount = wtforms.SubmitField('Other Amount')
def execute(self, contestant):
"""Validates and redirects to PayPal
For test credit card payments, use card number: 4736656842918643
"""
if self.is_submitted() and self.validate():
url = self._paypal_payment(contestant)
if url:
return flask.redirect(url)
contest = contestant.get_contest()
return contest.task_class.get_template().render_template(
contest,
'detail',
contestant=contestant,
contestant_url=contestant.format_absolute_uri(),
contestant_tweet="Help us win! " + contestant.display_name,
form=self,
)
def execute_payment(self, contestant):
"""Handles return task from paypal. Calls paypal with payment and
payer IDs to complete the transaction."""
donor = pcm.Donor.unsafe_load_from_session()
if not donor:
ppc.app().logger.warn('missing session donor')
flask.flash('The referenced contribution was already processed.')
return flask.redirect(contestant.format_uri())
self._save_payment_info_to_donor(donor)
payment = paypalrestsdk.Payment({
'id': donor.paypal_payment_id
})
donor.remove_from_session()
try:
if payment.execute({'payer_id': donor.paypal_payer_id}):
donor.donor_state = 'executed'
ppc.db.session.add(donor)
return flask.redirect(contestant.format_uri('thank-you'))
else:
ppc.app().logger.warn('payment execute failed')
except paypalrestsdk.exceptions.ClientError as err:
ppc.app().logger.warn(err)
except:
ppc.app().logger.warn(sys.exc_info()[0])
return flask.redirect(contestant.format_uri())
def validate(self):
"""Ensure the amount is present and at least $10"""
super(Donate, self).validate()
amount = None
if self.donate5.data:
amount = 5
elif self.donate25.data:
amount = 25
elif self.donate100.data:
amount = 100
elif self.amount.data:
try:
if float(self.amount.data) < 10:
self.amount.errors = ['Amount must be at least $10.']
elif float(self.amount.data) > 1000000:
self.amount.errors = ['Amount too large.']
except ValueError:
self.amount.errors = ['Please enter an amount.']
else:
self.amount.errors = ['Please enter an amount.']
self.amount.raw_data = None
if amount:
self.amount.data = decimal.Decimal(amount)
common.log_form_errors(self)
return not self.errors
def _create_donor(self, contestant):
"""Create a new donor model and link to the parent contestant."""
donor = pcm.Donor()
self.populate_obj(donor)
donor.donor_state = 'submitted'
ppc.db.session.add(donor)
ppc.db.session.flush()
ppc.db.session.add(
pam.BivAccess(
source_biv_id=contestant.biv_id,
target_biv_id=donor.biv_id
)
)
return donor
def _link_donor_to_user(self, donor):
"""Link the donor model to a user model. Match the donor email with
the user. If no match, use the current user, if present."""
if pam.BivAccess.query.select_from(pam.User).filter(
pam.BivAccess.source_biv_id == pam.User.biv_id,
pam.BivAccess.target_biv_id == donor.biv_id
).count() > 0:
return
user = pam.User.query.filter_by(user_email=donor.donor_email).first()
if not user and flask.session.get('user.is_logged_in'):
user = pam.User.query.filter_by(
biv_id=flask.session['user.biv_id']
).one()
if not user:
return
ppc.db.session.add(
pam.BivAccess(
source_biv_id=user.biv_id,
target_biv_id=donor.biv_id
)
)
def _paypal_payment(self, contestant):
"""Call paypal server to create payment record.
Returns a redirect link to paypal site or None on error."""
donor = self._create_donor(contestant)
amount = '%.2f' % float(self.amount.data)
payment = paypalrestsdk.Payment({
'intent': 'sale',
'payer': {
'payment_method': 'paypal'
},
'redirect_urls': {
'return_url': contestant.format_absolute_uri('donate-done'),
'cancel_url': contestant.format_absolute_uri('donate-cancel'),
},
'transactions': [
{
'amount': {
'total': amount,
'currency': 'USD',
},
'item_list': {
'items': [
{
'quantity': 1,
'price': amount,
'currency': 'USD',
'name': '{} contribution, {}'.format(
contestant.display_name,
contestant.get_contest().display_name),
'tax': 0
}
]
}
}
]
})
try:
if payment.create():
ppc.app().logger.info(payment)
donor.paypal_payment_id = str(payment.id)
donor.add_to_session()
for link in payment.links:
if link.method == 'REDIRECT':
return str(link.href)
else:
ppc.app().logger.warn(payment.error)
except paypalrestsdk.exceptions.ClientError as err:
ppc.app().logger.warn(err)
except:
ppc.app().logger.warn(sys.exc_info()[0])
self.amount.errors = [
'There was an error processing your contribution.']
return None
def _save_payment_info_to_donor(self, donor):
"""Get payer info from paypal server, save info to Donor model."""
try:
payment = paypalrestsdk.Payment.find(donor.paypal_payment_id)
info = payment.payer.payer_info
donor.donor_email = info.email
donor.display_name = info.first_name + ' ' + info.last_name
except paypalrestsdk.exceptions.ConnectionError as err:
ppc.app().logger.warn(err)
donor.paypal_payer_id = flask.request.args['PayerID']
donor.donor_state = 'pending_confirmation'
ppc.db.session.add(donor)
self._link_donor_to_user(donor)
class Judgement(flask_wtf.Form):
"""Judgement form.
Fields:
question(1 .. 6): question score
question(1 ..6)_comment: comments for survey question
general_comment: End of survey comments
"""
def _comment_field(label='Comments'):
return wtforms.TextAreaField(
label, validators=[wtfv.Length(max=10000)])
def _question_field(number):
return wtforms.RadioField(
'Question {}'.format(number),
choices=[
('1', 'Unsatisfactory'),
('2', 'Improvement Needed'),
('3', 'Meets Expectations'),
('4', 'Exceeds Expectations')
]
)
question1 = _question_field('1')
question1_comment = _comment_field()
question2 = _question_field('2')
question2_comment = _comment_field()
question3 = _question_field('3')
question3_comment = _comment_field()
question4 = _question_field('4')
question4_comment = _comment_field()
question5 = _question_field('5')
question5_comment = _comment_field()
question6 = _question_field('6')
question6_comment = _comment_field()
general_comment = _comment_field('General Comments')
def execute(self, contestant):
"""Saves scores for questions."""
contest = contestant.get_contest()
if self.is_submitted():
if self.validate():
self._save_scores(contestant)
flask.flash('Thank you for scoring contestant {}.'.format(
contestant.display_name))
return flask.redirect(
contest.format_uri('judging'))
else:
self._load_scores(contestant)
return contest.task_class.get_template().render_template(
contest,
'judge-contestant',
sub_base_template=contest.task_class.get_template().base_template('detail'),
contestant=contestant,
form=self
)
@classmethod
def get_points_for_question(cls, number):
return pcm.JudgeScore.get_points_for_question(number)
@classmethod
def get_text_for_question(cls, number):
| |
+ -------|
| | /1 \|
| | sin|--||
| \ \u2//
log(f(x)) = log(C1) + | ---------------- d(u2)
| 2
| u2
|
/
>>> pprint(odesimp(eq, f(x), 1, set([C1]),
... hint='1st_homogeneous_coeff_subs_indep_div_dep'
... )) #doctest: +SKIP
x
--------- = C1
/f(x)\
tan|----|
\2*x /
"""
x = func.args[0]
f = func.func
C1 = get_numbered_constants(eq, num=1)
# First, integrate if the hint allows it.
eq = _handle_Integral(eq, func, order, hint)
if hint.startswith("nth_linear_euler_eq_nonhomogeneous"):
eq = simplify(eq)
if not isinstance(eq, Equality):
raise TypeError("eq should be an instance of Equality")
# Second, clean up the arbitrary constants.
# Right now, nth linear hints can put as many as 2*order constants in an
# expression. If that number grows with another hint, the third argument
# here should be raised accordingly, or constantsimp() rewritten to handle
# an arbitrary number of constants.
eq = constantsimp(eq, constants)
# Lastly, now that we have cleaned up the expression, try solving for func.
# When RootOf is implemented in solve(), we will want to return a RootOf
# everytime instead of an Equality.
# Get the f(x) on the left if possible.
if eq.rhs == func and not eq.lhs.has(func):
eq = [Eq(eq.rhs, eq.lhs)]
# make sure we are working with lists of solutions in simplified form.
if eq.lhs == func and not eq.rhs.has(func):
# The solution is already solved
eq = [eq]
# special simplification of the rhs
if hint.startswith("nth_linear_constant_coeff"):
# Collect terms to make the solution look nice.
# This is also necessary for constantsimp to remove unnecessary
# terms from the particular solution from variation of parameters
#
# Collect is not behaving reliably here. The results for
# some linear constant-coefficient equations with repeated
# roots do not properly simplify all constants sometimes.
# 'collectterms' gives different orders sometimes, and results
# differ in collect based on that order. The
# sort-reverse trick fixes things, but may fail in the
# future. In addition, collect is splitting exponentials with
# rational powers for no reason. We have to do a match
# to fix this using Wilds.
global collectterms
try:
collectterms.sort(key=default_sort_key)
collectterms.reverse()
except:
pass
assert len(eq) == 1 and eq[0].lhs == f(x)
sol = eq[0].rhs
sol = expand_mul(sol)
for i, reroot, imroot in collectterms:
sol = collect(sol, x**i*exp(reroot*x)*sin(abs(imroot)*x))
sol = collect(sol, x**i*exp(reroot*x)*cos(imroot*x))
for i, reroot, imroot in collectterms:
sol = collect(sol, x**i*exp(reroot*x))
del collectterms
# Collect is splitting exponentials with rational powers for
# no reason. We call powsimp to fix.
sol = powsimp(sol)
eq[0] = Eq(f(x), sol)
else:
# The solution is not solved, so try to solve it
try:
eqsol = solve(eq, func, force=True)
if not eqsol:
raise NotImplementedError
except (NotImplementedError, PolynomialError):
eq = [eq]
else:
def _expand(expr):
numer, denom = expr.as_numer_denom()
if denom.is_Add:
return expr
else:
return powsimp(expr.expand(), combine='exp', deep=True)
# XXX: the rest of odesimp() expects each ``t`` to be in a
# specific normal form: rational expression with numerator
# expanded, but with combined exponential functions (at
# least in this setup all tests pass).
eq = [Eq(f(x), _expand(t)) for t in eqsol]
# special simplification of the lhs.
if hint.startswith("1st_homogeneous_coeff"):
for j, eqi in enumerate(eq):
newi = logcombine(eqi, force=True)
if newi.lhs.func is log and newi.rhs == 0:
newi = Eq(newi.lhs.args[0]/C1, C1)
eq[j] = newi
# We cleaned up the constants before solving to help the solve engine with
# a simpler expression, but the solved expression could have introduced
# things like -C1, so rerun constantsimp() one last time before returning.
for i, eqi in enumerate(eq):
eq[i] = constantsimp(eqi, constants)
eq[i] = constant_renumber(eq[i], 'C', 1, 2*order)
# If there is only 1 solution, return it;
# otherwise return the list of solutions.
if len(eq) == 1:
eq = eq[0]
return eq
def checkodesol(ode, sol, func=None, order='auto', solve_for_func=True):
r"""
Substitutes ``sol`` into ``ode`` and checks that the result is ``0``.
This only works when ``func`` is one function, like `f(x)`. ``sol`` can
be a single solution or a list of solutions. Each solution may be an
:py:class:`~sympy.core.relational.Equality` that the solution satisfies,
e.g. ``Eq(f(x), C1), Eq(f(x) + C1, 0)``; or simply an
:py:class:`~sympy.core.expr.Expr`, e.g. ``f(x) - C1``. In most cases it
will not be necessary to explicitly identify the function, but if the
function cannot be inferred from the original equation it can be supplied
through the ``func`` argument.
If a sequence of solutions is passed, the same sort of container will be
used to return the result for each solution.
It tries the following methods, in order, until it finds zero equivalence:
1. Substitute the solution for `f` in the original equation. This only
works if ``ode`` is solved for `f`. It will attempt to solve it first
unless ``solve_for_func == False``.
2. Take `n` derivatives of the solution, where `n` is the order of
``ode``, and check to see if that is equal to the solution. This only
works on exact ODEs.
3. Take the 1st, 2nd, ..., `n`\th derivatives of the solution, each time
solving for the derivative of `f` of that order (this will always be
possible because `f` is a linear operator). Then back substitute each
derivative into ``ode`` in reverse order.
This function returns a tuple. The first item in the tuple is ``True`` if
the substitution results in ``0``, and ``False`` otherwise. The second
item in the tuple is what the substitution results in. It should always
be ``0`` if the first item is ``True``. Note that sometimes this function
will ``False``, but with an expression that is identically equal to ``0``,
instead of returning ``True``. This is because
:py:meth:`~sympy.simplify.simplify.simplify` cannot reduce the expression
to ``0``. If an expression returned by this function vanishes
identically, then ``sol`` really is a solution to ``ode``.
If this function seems to hang, it is probably because of a hard
simplification.
To use this function to test, test the first item of the tuple.
Examples
========
>>> from sympy import Eq, Function, checkodesol, symbols
>>> x, C1 = symbols('x,C1')
>>> f = Function('f')
>>> checkodesol(f(x).diff(x), Eq(f(x), C1))
(True, 0)
>>> assert checkodesol(f(x).diff(x), C1)[0]
>>> assert not checkodesol(f(x).diff(x), x)[0]
>>> checkodesol(f(x).diff(x, 2), x**2)
(False, 2)
"""
if not isinstance(ode, Equality):
ode = Eq(ode, 0)
if func is None:
try:
_, func = _preprocess(ode.lhs)
except ValueError:
funcs = [s.atoms(AppliedUndef) for s in (
sol if is_sequence(sol, set) else [sol])]
funcs = reduce(set.union, funcs, set())
if len(funcs) != 1:
raise ValueError(
'must pass func arg to checkodesol for this case.')
func = funcs.pop()
if not isinstance(func, AppliedUndef) or len(func.args) != 1:
raise ValueError(
"func must be a function of one variable, not %s" % func)
if is_sequence(sol, set):
return type(sol)([checkodesol(ode, i, order=order, solve_for_func=solve_for_func) for i in sol])
if not isinstance(sol, Equality):
sol = Eq(func, sol)
x = func.args[0]
s = True
testnum = 0
if order == 'auto':
order = ode_order(ode, func)
if solve_for_func and not (
sol.lhs == func and not sol.rhs.has(func)) and not (
sol.rhs == func and not sol.lhs.has(func)):
try:
solved = solve(sol, func)
if not solved:
raise NotImplementedError
except NotImplementedError:
pass
else:
if len(solved) == 1:
result = checkodesol(ode, Eq(func, solved[0]),
order=order, solve_for_func=False)
else:
result = checkodesol(ode, [Eq(func, t) for t in solved],
order=order, solve_for_func=False)
return result
while s:
if testnum == 0:
# First pass, try substituting a solved solution directly into the
# ODE. This has the highest chance of succeeding.
ode_diff = ode.lhs - ode.rhs
if sol.lhs == func:
s = sub_func_doit(ode_diff, func, sol.rhs)
elif sol.rhs == func:
s = sub_func_doit(ode_diff, func, sol.lhs)
else:
testnum += 1
continue
ss = simplify(s)
if ss:
# with the new numer_denom in power.py, if we do a simple
# expansion then testnum == 0 verifies all solutions.
s = ss.expand(force=True)
else:
s = 0
testnum += 1
elif testnum == 1:
# Second pass. If we cannot substitute | |
self.https:
port = 443
else:
port = 80
if ':' in host:
_host, _port = host.rsplit(':', 1)
try:
port = int(_port)
host = _host
fport = True
except:
pass
return (
fport,
host,
port,
path)
def get_query(self):
if self.https:
url = 'https://%s/' % self.path
else:
url = self.path
url_scm, _, _, _, _, _ = urlparse.urlparse(url)
if len(FQUERY.split('/')) > 2:
cgi_http = 'http/'
if cgi_http in FQUERY.lower():
url_cgi = url.split(cgi_http)
if len(url_cgi) > 1:
url = '%s://%s' % (url_scm, url_cgi.pop())
else:
url = url.replace(FQUERY, '')
if len(MQUERY.split('/')) > 2:
url = url.replace(MQUERY, '')
if len(BQUERY.split('/')) > 2:
url = url.replace(BQUERY, '')
url_len = len(url_scm) + 3
url_path = url[url_len:]
if CQUERY:
cquery_list = CQUERY.split('|')
for cquery in cquery_list:
try:
old, new = cquery.split('>')
url_path = url_path.replace(old, new)
except:
pass
fport, host, port, path = self.get_path('%s%s' % (FQUERY, url_path))
advhost = host
if fport and not RPORT:
path = '%s:%s%s%s%s' % (host,
port,
MQUERY,
path,
BQUERY)
else:
path = '%s%s%s%s' % (host,
MQUERY,
path,
BQUERY)
fport, host, port, path = self.get_path(path)
if self.https:
fport = True
path = '%s:%s' % (host, port)
else:
if self.phost and self.pport or ADMODE:
if RQUERY:
if MQUERY.startswith('/'):
path = '%s%s%s' % (url[:url_len], RQUERY, path)
else:
path = '%s%s%s%s' % (url[:url_len],
RQUERY,
MQUERY,
path)
elif fport and not RPORT:
path = '%s%s:%s%s' % (url[:url_len],
host,
port,
path)
else:
path = '%s%s%s' % (url[:url_len], host, path)
else:
_, path = path.split('/', 1)
path = '/%s' % path
cur_header = 'proxy-connection'
if cur_header in self.headers and not self.phost and not self.pport:
del self.headers[cur_header]
cur_header = 'connection'
if not self.https and not PTYPE:
if cur_header in self.headers:
del self.headers[cur_header]
self.headers[cur_header] = 'close'
cur_header = 'host'
if cur_header in self.headers:
del self.headers[cur_header]
if fport and not RPORT and not self.https:
self.headers[cur_header] = '%s:%s' % (host, port)
else:
self.headers[cur_header] = host
if RQUERY:
cur_header = 'host'
if cur_header in self.headers:
del self.headers[cur_header]
self.headers[cur_header] = RQUERY
cur_header = 'x-online-host'
if cur_header in self.headers:
del self.headers[cur_header]
if fport and not self.https:
self.headers[cur_header] = '%s:%s' % (host, port)
else:
self.headers[cur_header] = '%s' % host
if ADMODE:
cur_header = 'host'
if cur_header in self.headers:
if RQUERY:
del self.headers[cur_header]
self.headers[cur_header] = '%s' % RQUERY
cur_header = 'x-online-host'
if cur_header in self.headers:
del self.headers[cur_header]
if fport and not self.https:
self.headers[cur_header] = '%s:%s' % (advhost, port)
else:
self.headers[cur_header] = '%s' % advhost
elif self.phost and self.pport:
del self.headers[cur_header]
advhost = advhost.replace(FQUERY, '').replace(MQUERY, '').replace(BQUERY, '')
if fport and not self.https:
self.headers[cur_header] = '%s:%s' % (advhost, port)
else:
self.headers[cur_header] = '%s' % advhost
if CUSHDR0 and not VALHDR0:
cur_header = CUSHDR0.lower()
if cur_header in self.headers:
del self.headers[cur_header]
if CUSHDR0 and VALHDR0:
cur_header = CUSHDR0.lower()
if cur_header in self.headers:
del self.headers[cur_header]
self.headers[cur_header] = VALHDR0
if CUSHDR1 and not VALHDR1:
cur_header = CUSHDR1.lower()
if cur_header in self.headers:
del self.headers[cur_header]
if CUSHDR1 and VALHDR1:
cur_header = CUSHDR1.lower()
if cur_header in self.headers:
del self.headers[cur_header]
self.headers[cur_header] = VALHDR1
if CUSHDR2 and not VALHDR2:
cur_header = CUSHDR2.lower()
if cur_header in self.headers:
del self.headers[cur_header]
if CUSHDR2 and VALHDR2:
cur_header = CUSHDR2.lower()
if cur_header in self.headers:
del self.headers[cur_header]
self.headers[cur_header] = VALHDR2
if CUSHDR3 and not VALHDR3:
cur_header = CUSHDR3.lower()
if cur_header in self.headers:
del self.headers[cur_header]
if CUSHDR3 and VALHDR3:
cur_header = CUSHDR3.lower()
if cur_header in self.headers:
del self.headers[cur_header]
self.headers[cur_header] = VALHDR3
if RPORT:
cur_port = ':%s' % port
path = path.replace(cur_port, '')
cur_list = ('host', 'x-online-host')
for cur_header in cur_list:
if cur_header in self.headers and ':' in self.headers[cur_header]:
rhost, _ = self.headers[cur_header].split(':')
del self.headers[cur_header]
self.headers[cur_header] = rhost
header = self.headers
uahdr = 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)'
cur_header = 'user-agent'
if cur_header in self.headers:
uahdr = self.headers[cur_header]
self.del_garbage()
return (
path,
header,
uahdr,
host,
port,
advhost)
def del_garbage(self):
del self.command
del self.path
del self.headers
del self.https
del self.phost
del self.pport
class ProxyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def log_message(self, format, *args):
pass
def __getattr__(self, item):
if item.startswith('do_'):
return self.do_COMMAND
def do_COMMAND(self):
self.get_urlcheck()
self.get_headercheck()
self.get_recv_headers()
self.get_proxy()
query = QueryHandler(self.command, self.path, self.headers, self.https, self.phost, self.pport)
self.path, self.headers, self.uahdr, self.host, self.port, self.advhost = query.get_query()
self.get_newline()
self.get_requestline()
self.get_injectline()
self.get_send_inject()
self.get_send_headers()
soc = self.proxy_sock()
try:
if self.connect_to(soc, self.host, self.port, self.advhost):
data = ra('%s%s' % (self.get_injectline(), self.newline)).encode('hex')
for header, value in self.headers.items():
data += ra('%s: %s%s' % (str(header).title(), value, self.newline)).encode('hex')
post_header = 'content-length'
if post_header in self.headers:
data += ra(self.newline).encode('hex')
data += self.rfile.read(int(self.headers[post_header])).encode('hex')
data += ra(self.newline).encode('hex')
data += ra('%s%s' % (self.newline, self.get_send_end())).encode('hex')
data = data.decode('hex')
while data:
byte = soc.send(data)
data = data[byte:]
self.get_response_data(soc)
self.send_connection_close(soc)
self.del_garbage()
except socket.error as msg:
self.send_connection_error(msg)
self.send_connection_close(soc)
return
except:
return
def do_CONNECT(self):
if RHTTPS:
self.get_urlcheck()
self.get_headercheck()
self.get_recv_headers()
self.get_proxy()
query = QueryHandler(self.command, self.path, self.headers, self.https, self.phost, self.pport)
self.path, self.headers, self.uahdr, self.host, self.port, self.advhost = query.get_query()
self.get_newline()
self.get_requestline()
self.get_injectline()
self.get_send_inject()
self.get_send_headers()
soc = self.proxy_sock()
try:
if self.connect_to(soc, self.host, self.port, self.advhost):
data = '%s 200 Connection Established\r\nProxy-Agent: %s/%s' % (self.request_version, Info('name').get_info().replace(' ', ''), Info('ver').get_info()[:3])
self.send_response_data('%s\r\n' % data)
self.send_response_data('\r\n')
self.get_response_header(data)
self.get_response_data(soc)
self.send_connection_close(soc)
self.del_garbage()
except socket.error as msg:
self.send_connection_error(msg)
self.send_connection_close(soc)
return
except:
return
else:
self.send_connection_error((501, 'method not allowed'))
self.connection.close()
return
def get_urlcheck(self):
self.https = False
if self.command == 'CONNECT':
self.https = True
def get_headercheck(self):
header_check = {}
for header, value in self.headers.items():
if header.find('\t') == -1 and header.find('\t') == -1:
header_check[str(header).lower()] = value
self.headers = header_check
def get_proxy(self):
self.phost = ''
self.pport = 0
self.puser = None
self.ppass = None
if ':' in PHOST and not PPORT:
plist = PHOST.split('>')
count = len(plist)
while 1:
count -= 1
if count >= 0:
plist = plist[random.randint(0, len(plist) - 1)]
if '@' in plist and plist:
try:
self.puser, self.ppass = plist.split('@')[1].split(':')
plist = plist.split('@')[0]
except:
pass
if ':' in plist and plist:
try:
self.phost, self.pport = plist.split(':')
self.pport = int(self.pport)
except:
pass
break
else:
break
else:
if PHOST and PPORT:
self.phost, self.pport = PHOST, PPORT
return
def proxy_sock(self):
if IQUERY and self.https or self.https:
data = ra('%s%s' % (self.get_injectline(), self.newline))
for header, value in self.headers.items():
data += ra('%s: %s%s' % (str(header).title(), value, self.newline))
soc = socksocket(headers=data, newline=self.newline)
else:
soc = socksocket(newline=self.newline)
if self.phost and self.pport:
soc.setproxy(PTYPE, self.phost, self.pport, rdns=True, username=self.puser, password=<PASSWORD>, useragent=self.uahdr)
return soc
def connect_to(self, soc, host, port, advhost):
try:
if ADMODE:
host, port = advhost, port
soc.setblocking(1)
soc.connect((host, port))
return 1
except socket.error as msg:
self.send_connection_error(msg)
self.send_connection_close(soc)
return 0
except:
return 0
def get_newline(self):
self.newline = ['\r\n', '\n'][ILINE]
def get_requestline(self):
if RHTTP == 1:
self.request_version = 'HTTP/1.0'
else:
if RHTTP == 2:
self.request_version = 'HTTP/1.1'
self.requestline = '%s %s %s' % (self.command, self.path, self.request_version)
def get_injectline(self):
if IQUERY:
meth = [
'HEAD',
'GET',
'POST',
'DELETE',
'CONNECT',
'OPTIONS',
'TRACE',
'PUT'][IMETHOD]
if '/' in IQUERY:
host, path = IQUERY.split('/', 1)
path = '/%s' % path
else:
host = IQUERY
path = '/'
if self.phost and self.pport or ADMODE:
path = 'http://%s%s' % (host, path)
self.splitline = self.newline * 3
if ISPLIT:
self.splitline = self.newline * ISPLIT
self.injectline = '%s %s HTTP/1.1%sHost: %s%s' % (meth,
path,
self.newline,
host, self.splitline)
return '%s%s' % (self.injectline, self.requestline)
return self.requestline
def get_send_end(self):
if IQUERY:
return self.newline
return ''
def get_recv_headers(self):
self.send_connection_logger('+++++[ Receive Request ]+++++\r\nFrom Address - %s:%s\r\n%s\r\n' % (self.client_address[0], self.client_address[1], self.requestline))
for header, value in self.headers.items():
self.send_connection_logger('%s: %s\r\n' % (str(header).title(), value))
self.send_connection_logger('\r\n')
def get_send_inject(self):
if IQUERY:
self.send_connection_logger('+++++[ Send Injection ]+++++\r\n')
if self.phost and self.pport:
self.send_connection_logger('Using Proxy - Loocked\r\n')
else:
if ADMODE:
self.send_connection_logger('Using Host - Loocked\r\n')
else:
self.send_connection_logger('Using Server - Loocked\r\n')
for inject in self.splitline[0].split(self.newline):
self.send_connection_logger('No: Loocked\r\n')
self.send_connection_logger('\r\n')
def get_send_headers(self):
self.send_connection_logger('+++++[ Send Request ]+++++\r\n')
if self.phost and self.pport:
self.send_connection_logger('Using Proxy - Loocked\r\n')
else:
if ADMODE:
self.send_connection_logger('Using Host - Loocked\r\n')
else:
self.send_connection_logger('Using Server - Loocked\r\n')
self.send_connection_logger('Config: <NAME>\r\n')
for header, value in self.headers.items():
self.send_connection_logger('%s: %s\r\n' % (str(header).title(), value))
self.send_connection_logger('\r\n')
def find_double_newline(self, data):
pos1 = data.find('\n\r\n')
if pos1 >= 0:
pos1 += 3
pos2 = data.find('\n\n')
if pos2 >= 0:
pos2 += 2
if pos1 >= 0:
if pos2 >= 0:
return min(pos1, pos2)
return pos1
else:
return pos2
def get_data_splitter(self, data):
if data.split('\r\n\r\n')[0].split(' ')[0] in ('HTTP/0.9', 'HTTP/1.0', 'HTTP/1.1'):
return 1
return 0
def get_response_header(self, data):
if not self.https:
| |
<gh_stars>0
from cards import *
from tkinter import ARC, _flatten
from PIL import Image, ImageTk
from aidan_graphics import *
from npc_controls import *
class Hand(object):
count = 0 # Keep track of hand number for logs
button_sprite = Image.open('resources/button.png').convert("RGB")
SEATS = 8
PRE_FLOP = 0
FLOP = 1
TURN = 2
RIVER = 3
SHOWDOWN = 4
def __init__(self, players, *blinds, ante=0):
self.players = [None for _ in range(Hand.SEATS)]
self.player_count = 0
self.deck = Card.new_deck()
self.community_cards = list()
self.pot = 0
self.betting_round = 0
self.button = players[Hand.count % len(players)].seat
self.absolute_min_raise = max(*_flatten(blinds), 0)
for player in players:
if player.stack > self.absolute_min_raise:
self.players[player.seat] = player
self.player_count += 1
self.reset_round()
# Deal initial cards and put in blinds/ante
for i in range(2):
for player in players:
if i == 0:
player.reset_cards()
player.stack -= ante
self.pot += ante
player.deal_card(self.deck.pop(0))
for blind in _flatten(blinds):
player = self.players[self.action]
player.stack -= blind
self.pot += blind
self.current_bet[self.action] += blind
self.closing_action = self.action
self.action = self.next_player()
# Initialize hand log -- HANDLE WITH LOG CLASS??
Hand.count += 1
self.log = f'Hand #{Hand.count} is being delt to {len(self.players)} players.'
# Move the "action" onto the next player
def next_player(self, button=None):
if button != None:
seat = (button + 1) % len(self.players)
else:
seat = (self.action + 1) % len(self.players)
if self.players[seat] == None or self.players[seat].stack == 0:
return self.next_player(seat)
return seat
def previous_player(self, button=None):
if button != None:
seat = (button - 1) % len(self.players)
else:
seat = (self.action - 1) % len(self.players)
if self.players[seat] == None or self.players[seat].stack == 0:
return self.previous_player(seat)
return seat
# Reset variables for a new round of betting
def reset_round(self):
self.action = self.next_player(self.button)
self.closing_action = self.previous_player()
self.current_bet = [0 for _ in self.players]
self.relative_min_raise = self.absolute_min_raise
Player.community_cards = self.community_cards
Range.community_cards = self.community_cards
def progress_game(self):
self.betting_round += 1
if self.betting_round == Hand.SHOWDOWN:
self.showdown()
else:
for player in self.players:
if player != None and player.stack == 0:
player.sidepot += self.player_count * self.current_bet[player.seat]
self.deck.pop(0)
if self.betting_round == Hand.FLOP:
for _ in range(3):
self.community_cards.append(self.deck.pop(0))
else:
self.community_cards.append(self.deck.pop(0))
self.reset_round()
# NEED LOGGING FOR HAND AND PLAYER
def bet(self, amount=0):
player = self.players[self.action]
call_price = max(self.current_bet) - self.current_bet[self.action]
min_raise = call_price + self.relative_min_raise
if player.stack > call_price:
if player.stack < min_raise:
amount = player.stack
player.sidepot = self.pot - sum(self.current_bet)
player.stack -= amount
self.pot += amount
self.current_bet[self.action] += amount
self.player_count -= 1
if self.closing_action == self.action:
self.progress_game()
else:
self.action = self.next_player()
else:
amount = max(amount, min_raise)
player.stack -= amount
self.pot += amount
self.current_bet[self.action] += amount
self.relative_min_raise = amount - call_price
self.closing_action = self.previous_player()
self.action = self.next_player()
else:
self.call()
def call(self):
player = self.players[self.action]
call_price = max(self.current_bet) - self.current_bet[self.action]
if player.stack > 0:
if player.stack < call_price:
amount = player.stack
player.sidepot += self.pot - sum(self.current_bet)
else:
amount = call_price
player.stack -= amount
self.pot += amount
self.current_bet[self.action] += amount
if self.closing_action == self.action:
self.progress_game()
else:
self.action = self.next_player()
else:
self.fold()
def fold(self):
player = self.players[self.action]
call_price = max(self.current_bet) - self.current_bet[self.action]
if call_price > 0 and player.stack > 0:
self.players[self.action] = None
self.player_count -= 1
if self.player_count <= 1:
self.showdown()
return None
if self.closing_action == self.action:
self.progress_game()
else:
self.action = self.next_player()
def showdown(self):
self.betting_round = Hand.SHOWDOWN
showdown_players = [player for player in self.players if player != None]
self._distribute_pot(showdown_players)
def _distribute_pot(self, showdown_players):
self.winner = max(showdown_players)
if self.winner.sidepot == 0:
self.winner.stack += self.pot
else:
self.winner.stack += self.winner.sidepot
self.pot -= self.winner.sidepot
showdown_players.remove(self.winner)
self._distribute_pot(showdown_players)
@staticmethod
def _chip_map(app, seat):
map = [((1/2)*app.width, (27/48)*app.height, 's', 'n'),
((1/4)*app.width, (27/48)*app.height, 'sw', 'ne'),
((3/16)*app.width, (5/12)*app.height, 'w', 'e'),
((1/4)*app.width, (13/48)*app.height, 'nw', 'se'),
((1/2)*app.width, (13/48)*app.height, 'n', 's'),
((3/4)*app.width, (13/48)*app.height, 'ne', 'sw'),
((13/16)*app.width, (5/12)*app.height, 'e', 'w'),
((3/4)*app.width, (27/48)*app.height, 'se', 'nw'),]
return map[seat]
def _draw_community_cards(self, app, canvas):
padding = 10
if len(self.community_cards) > 0:
sprite = self.community_cards[0].sprite()
scaling_factor = app.height / 8 / sprite.size[1]
sprite_x = int(sprite.size[0] * scaling_factor)
sprite_y = int(sprite.size[1] * scaling_factor)
for i in range(len(self.community_cards)):
card = format_image(sprite_x, sprite_y, self.community_cards[i].sprite())
x_pos = app.width/2 + (i - 2) * (sprite_x + padding)
canvas.create_image(x_pos, (5/12)*app.height, image=card)
canvas.create_text(app.width/2, (23/48)*app.height + 2*padding, text=f'Pot: {self.pot}', font='Helvetica 16 bold')
def draw_cards(self, app, canvas):
self._draw_community_cards(app, canvas)
for player in self.players:
if player != None:
if self.betting_round == Hand.SHOWDOWN:
player._draw_player_info(app, canvas, False)
else:
player._draw_player_info(app, canvas, not player.user, player.seat == self.action)
if self.betting_round == Hand.SHOWDOWN:
canvas.create_text(app.width/2, (5/12)*app.height, text=f'Player {self.winner.seat} wins!', font='Helvetica 32 bold', anchor='s')
canvas.create_text(app.width/2, (5/12)*app.height, text='Press any key to deal next hand.', font='Helvetica 32 bold', anchor='n')
def draw_chips(self, app, canvas):
x, y, anchor1, anchor2 = Hand._chip_map(app, self.button)
if self.button in [0,1,7]:
x -= (1/16)*app.width
y += (1/48)*app.height
elif self.button in [3,4,5]:
x += (1/15)*app.width
y -= (1/64)*app.height
elif self.button == 6:
x += (1/26)*app.width
y -= (1/12)*app.height
else:
x -= (1/26)*app.width
y -= (1/12)*app.height
scaling_factor = app.height / 32 / Hand.button_sprite.size[1]
button_size = int(Hand.button_sprite.size[0] * scaling_factor)
button_sprite = format_image(button_size, button_size, Hand.button_sprite)
canvas.create_image(x, y, image=button_sprite)
for i in range(len(self.players)):
if self.players[i] != None or self.current_bet[i] != 0:
x, y, anchor1, anchor2 = Hand._chip_map(app, i)
canvas.create_text(x, y, text=self.current_bet[i], font='Helvetica 16 bold')
def init_user_controls(self, app):
padding = 50
call_price = max(self.current_bet) - self.current_bet[self.action]
min_raise = call_price + self.relative_min_raise
self.amount = min_raise
button_coords = [[None]*2, [None]*3]
for i in range(len(button_coords)):
button_width = app.width//3//len(button_coords[i]) + padding
for j in range(len(button_coords[i])):
button_coords[i][j] = (padding + j*button_width, (5/6)*app.height + padding, 0 + (j+1)*button_width, app.height - padding)
self.buttons = [AidanButton(button_coords[0][0], text='Check', function=self.call),
AidanButton(button_coords[0][1], function=self.bet, parameters=min_raise),
AidanButton(button_coords[1][0], text='Fold', function=self.fold),
AidanButton(button_coords[1][1], function=self.call),
AidanButton(button_coords[1][2], function=self.bet, parameters=min_raise)]
self.update_user_controls()
def update_user_controls(self):
player = self.players[self.action]
if self.betting_round == Hand.SHOWDOWN or not player.user:
for i in range(len(self.buttons)):
self.buttons[i].turn_off()
else:
call_price = max(self.current_bet) - self.current_bet[self.action]
min_raise = call_price + self.relative_min_raise
self.amount = max(self.amount, min_raise)
self.buttons[1].parameters = min(self.amount, player.stack)
self.buttons[4].parameters = min(self.amount, player.stack)
self.buttons[1].text = f'Bet {self.buttons[1].parameters}'
self.buttons[3].text = f'Call {min(call_price, player.stack)}'
self.buttons[4].text = f'Raise {self.buttons[4].parameters}'
if call_price == 0:
for i in range(2):
self.buttons[i].turn_on()
for j in range(2,len(self.buttons)):
self.buttons[j].turn_off()
else:
for i in range(2):
self.buttons[i].turn_off()
for j in range(2,len(self.buttons)):
self.buttons[j].turn_on()
def __repr__(self):
output = f'Pot: {self.pot} Board: {self.community_cards}'
for i in range(len(self.players)):
output += f'\n{self.players[i]}'
if self.players[i] != None:
if i == self.button: output += ' BUTTON'
if i == self.action: output += ' <- ACTION'
return output
class Player(object):
community_cards = list()
def __init__(self, stack, seat, user=False):
self.stack = stack
self.seat = seat
self.hole_cards = list()
self.user = user
self.sidepot = 0
def poker_hand(self):
return PokerHand(Player.community_cards, self.hole_cards)
def deal_card(self, card):
self.hole_cards.append(card)
self.hole_cards.sort(reverse=True)
def __eq__(self, other):
if isinstance(other, Player):
return self.poker_hand() == other.poker_hand()
def __lt__(self, other):
if isinstance(other, Player):
return self.poker_hand() < other.poker_hand()
def __le__(self, other):
if isinstance(other, Player):
return self.poker_hand() <= other.poker_hand()
def __gt__(self, other):
if isinstance(other, Player):
return self.poker_hand() > other.poker_hand()
def __ge__(self, other):
if isinstance(other, Player):
return self.poker_hand() >= other.poker_hand()
def reset_cards(self):
self.hole_cards = list()
def _seat_map(self, app):
map = [((1/2)*app.width, (2/3)*app.height, 'n'),
((1/4)*app.width, (2/3)*app.height, 'ne'),
((1/10)*app.width, (5/12)*app.height, 'e'),
((1/4)*app.width, (1/7)*app.height, 'se'),
((1/2)*app.width, (1/7)*app.height, 's'),
((3/4)*app.width, (1/7)*app.height, 'sw'),
((9/10)*app.width, (5/12)*app.height, 'w'),
((3/4)*app.width, (2/3)*app.height, 'nw'),]
return map[self.seat]
def _draw_player_info(self, app, canvas, face_down=True, indicator=False):
x, y, anchor = self._seat_map(app)
padding = 10
sprite = self.hole_cards[0].sprite()
scaling_factor = app.height / 8 / sprite.size[1]
sprite_x = int(sprite.size[0] * scaling_factor)
sprite_y = int(sprite.size[1] * scaling_factor)
card1 = format_image(sprite_x, sprite_y, self.hole_cards[0].sprite(face_down))
card2 = format_image(sprite_x, sprite_y, self.hole_cards[1].sprite(face_down))
canvas.create_image(x - (sprite_x/2 + padding), y, image=card1)#, anchor=anchor)
canvas.create_image(x + (sprite_x/2 + padding), y, image=card2)#, anchor=anchor)
if indicator:
outline = 'yellow'
else:
outline = 'black'
stack_box = (x - (sprite_x + padding), y + sprite_y/2 + padding,
x + (sprite_x + padding), y + sprite_y/2 + 3*padding)
canvas.create_rectangle(stack_box, fill='white', width=3, outline=outline)
canvas.create_text(x, y + 2*padding + sprite_y/2, text=f'Player {self.seat}: {self.stack}', font='Helvetica 16 bold')
def __repr__(self):
return f'Player(Seat: {self.seat}, Stack: {self.stack}, Cards: {self.hole_cards})'
def draw_table(app, canvas):
thickness = 10
top_rail = ((1/4)*app.width-1, (1/6)*app.height,
(3/4)*app.width+1, (1/6)*app.height)
bottom_rail = ((1/4)*app.width-1, (2/3)*app.height,
(3/4)*app.width+1, (2/3)*app.height)
left_rail = ((1/8)*app.width, (1/6)*app.height,
(3/8)*app.width, (2/3)*app.height)
right_rail = ((5/8)*app.width, (1/6)*app.height,
(7/8)*app.width, (2/3)*app.height)
# Color in the table
canvas.create_arc(left_rail, fill='dark green', width=0, start=90, extent=180)
| |
<gh_stars>10-100
from amitools.vamos.error import *
from amitools.vamos.log import log_mem_alloc
from amitools.vamos.label import LabelRange, LabelStruct
from amitools.vamos.astructs import AccessStruct
class Memory:
def __init__(self, addr, size, label, access):
self.addr = addr
self.size = size
self.label = label
self.access = access
def __str__(self):
if self.label != None:
return str(self.label)
else:
return "[@%06x +%06x %06x]" % (self.addr, self.size, self.addr + self.size)
class MemoryChunk:
def __init__(self, addr, size):
self.addr = addr
self.size = size
self.next = None
self.prev = None
def __str__(self):
end = self.addr + self.size
return "[@%06x +%06x %06x]" % (self.addr, self.size, end)
def does_fit(self, size):
"""check if new size would fit into chunk
return < 0 if it does not fit, 0 for exact fit, > 0 n wasted bytes
"""
return self.size - size
class MemoryAlloc:
def __init__(self, mem, addr=0, size=0, label_mgr=None):
"""mem is a interface.
setup allocator starting at addr with size bytes.
if label_mgr is set then labels are created for allocations.
"""
# if no size is specified then take mem total
if size == 0:
size = mem.get_ram_size_kib() * 1024
size -= addr
# if addr == 0 skip 0 to never return NULL pointer!
if addr == 0:
addr = 4
size -= 4
self.mem = mem
self.addr = addr
self.size = size
self.label_mgr = label_mgr
# compat link
self.access = mem
self.addrs = {}
self.mem_objs = {}
# init free list
self.free_bytes = size
self.free_first = MemoryChunk(addr, self.free_bytes)
self.free_entries = 1
@classmethod
def for_machine(cls, machine):
return cls(
machine.get_mem(),
addr=machine.get_ram_begin(),
label_mgr=machine.get_label_mgr(),
)
def get_mem(self):
return self.mem
def get_addr(self):
return self.addr
def get_size(self):
return self.size
def get_label_mgr(self):
return self.label_mgr
def get_free_bytes(self):
return self.free_bytes
def is_all_free(self):
return self.size == self.free_bytes
def _find_best_chunk(self, size):
"""find best chunk that could take the given alloc
return: index of chunk in free list or -1 if none found + bytes left in chunk
"""
chunk = self.free_first
while chunk != None:
left = chunk.does_fit(size)
# exact match
if left == 0:
return (chunk, 0)
# potential candidate: has some bytes left
elif left > 0:
# Don't make such a hassle. Return the first one that fits.
# This function takes too much time.
return (chunk, left)
chunk = chunk.next
# nothing found?
return (None, -1)
def _remove_chunk(self, chunk):
next = chunk.next
prev = chunk.prev
if chunk == self.free_first:
self.free_first = next
if next != None:
next.prev = prev
if prev != None:
prev.next = next
self.free_entries -= 1
def _replace_chunk(self, old_chunk, new_chunk):
next = old_chunk.next
prev = old_chunk.prev
if old_chunk == self.free_first:
self.free_first = new_chunk
if next != None:
next.prev = new_chunk
if prev != None:
prev.next = new_chunk
new_chunk.next = next
new_chunk.prev = prev
def _insert_chunk(self, chunk):
cur = self.free_first
last = None
addr = chunk.addr
while cur != None:
# fits right before
if addr < cur.addr:
break
last = cur
cur = cur.next
# inster after last but before cur
if last == None:
self.free_first = chunk
else:
last.next = chunk
chunk.prev = last
if cur != None:
chunk.next = cur
cur.prev = chunk
self.free_entries += 1
def _merge_chunk(self, a, b):
# can we merge?
if a.addr + a.size == b.addr:
chunk = MemoryChunk(a.addr, a.size + b.size)
prev = a.prev
if prev != None:
prev.next = chunk
chunk.prev = prev
next = b.next
if next != None:
next.prev = chunk
chunk.next = next
if self.free_first == a:
self.free_first = chunk
self.free_entries -= 1
return chunk
else:
return None
def _stat_info(self):
num_allocs = len(self.addrs)
return "(free %06x #%d) (allocs #%d)" % (
self.free_bytes,
self.free_entries,
num_allocs,
)
def alloc_mem(self, size, except_on_fail=True):
"""allocate memory and return addr or 0 if no more memory"""
# align size to 4 bytes
size = (size + 3) & ~3
# find best free chunk
chunk, left = self._find_best_chunk(size)
# out of memory?
if chunk == None:
if except_on_fail:
self.dump_orphans()
log_mem_alloc.error("[alloc: NO MEMORY for %06x bytes]" % size)
raise VamosInternalError("[alloc: NO MEMORY for %06x bytes]" % size)
return 0
# remove chunk from free list
# is something left?
addr = chunk.addr
if left == 0:
self._remove_chunk(chunk)
else:
left_chunk = MemoryChunk(addr + size, left)
self._replace_chunk(chunk, left_chunk)
# add to valid allocs map
self.addrs[addr] = size
self.free_bytes -= size
# erase memory
self.mem.clear_block(addr, size, 0)
log_mem_alloc.info(
"[alloc @%06x-%06x: %06x bytes] %s",
addr,
addr + size,
size,
self._stat_info(),
)
if addr % 4:
raise VamosInternalError(
"Memory pool is invalid, return address not aligned by a long word"
)
return addr
def free_mem(self, addr, size):
# first check if its a right alloc
if addr not in self.addrs:
raise VamosInternalError("Invalid Free'd Memory at %06x" % addr)
# align size to 4 bytes
size = (size + 3) & ~3
real_size = self.addrs[addr]
assert size == real_size
# remove from valid allocs
del self.addrs[addr]
# create a new free chunk
chunk = MemoryChunk(addr, real_size)
self._insert_chunk(chunk)
# try to merge with prev/next
prev = chunk.prev
if prev != None:
new_chunk = self._merge_chunk(prev, chunk)
if new_chunk != None:
log_mem_alloc.debug(
"merged: %s + this=%s -> %s", prev, chunk, new_chunk
)
chunk = new_chunk
next = chunk.next
if next != None:
new_chunk = self._merge_chunk(chunk, next)
if new_chunk != None:
log_mem_alloc.debug(
"merged: this=%s + %s -> %s", chunk, next, new_chunk
)
# correct free bytes
self.free_bytes += size
num_allocs = len(self.addrs)
log_mem_alloc.info(
"[free @%06x-%06x: %06x bytes] %s",
addr,
addr + size,
size,
self._stat_info(),
)
def get_range_by_addr(self, addr):
if addr in self.addrs:
return self.addrs[addr]
else:
return None
def dump_mem_state(self):
chunk = self.free_first
num = 0
while chunk != None:
log_mem_alloc.debug("dump #%02d: %s" % (num, chunk))
num += 1
chunk = chunk.next
def _dump_orphan(self, addr, size):
log_mem_alloc.warning("orphan: [@%06x +%06x %06x]" % (addr, size, addr + size))
if self.label_mgr is not None:
labels = self.label_mgr.get_intersecting_labels(addr, size)
for l in labels:
log_mem_alloc.warning("-> %s", l)
def dump_orphans(self):
last = self.free_first
# orphan at begin?
if last.addr != self.addr:
addr = self.addr
size = last.addr - addr
self._dump_orphan(addr, size)
# walk along free list
cur = last.next
while cur != None:
addr = last.addr + last.size
size = cur.addr - addr
self._dump_orphan(addr, size)
last = cur
cur = cur.next
# orphan at end?
addr = last.addr + last.size
end = self.addr + self.size
if addr != end:
self._dump_orphan(addr, end - addr)
# ----- convenience functions with label creation -----
def get_memory(self, addr):
if addr in self.mem_objs:
return self.mem_objs[addr]
else:
return None
# memory
def alloc_memory(self, name, size, add_label=True, except_on_failure=True):
addr = self.alloc_mem(size, except_on_failure)
if addr == 0:
return None
if add_label and self.label_mgr is not None:
label = LabelRange(name, addr, size)
self.label_mgr.add_label(label)
else:
label = None
mem = Memory(addr, size, label, self.mem)
log_mem_alloc.info("alloc memory: %s", mem)
self.mem_objs[addr] = mem
return mem
def free_memory(self, mem):
log_mem_alloc.info("free memory: %s", mem)
if mem.label != None:
self.label_mgr.remove_label(mem.label)
self.free_mem(mem.addr, mem.size)
del self.mem_objs[mem.addr]
# struct
def alloc_struct(self, name, struct, size=None, add_label=True):
if size is None:
size = struct.get_size()
addr = self.alloc_mem(size)
if self.label_mgr is not None and add_label:
label = LabelStruct(name, addr, struct)
self.label_mgr.add_label(label)
else:
label = None
access = AccessStruct(self.mem, struct, addr)
mem = Memory(addr, size, label, access)
log_mem_alloc.info("alloc struct: %s", mem)
self.mem_objs[addr] = mem
return mem
def map_struct(self, name, addr, struct):
size = struct.get_size()
access = AccessStruct(self.mem, struct, addr)
if self.label_mgr is not None:
label = self.label_mgr.get_label(addr)
else:
label = None
mem = Memory(addr, size, label, access)
log_mem_alloc.info("map struct: %s", mem)
return mem
def free_struct(self, mem):
log_mem_alloc.info("free struct: %s", mem)
if self.label_mgr is not None:
self.label_mgr.remove_label(mem.label)
self.free_mem(mem.addr, mem.size)
del self.mem_objs[mem.addr]
# cstr
def alloc_cstr(self, name, cstr):
size = len(cstr) + 1
addr = self.alloc_mem(size)
if self.label_mgr is not None:
label = LabelRange(name, addr, size)
self.label_mgr.add_label(label)
else:
label = None
self.mem.w_cstr(addr, cstr)
mem = Memory(addr, size, label, self.mem)
log_mem_alloc.info("alloc c_str: %s", mem)
self.mem_objs[addr] = mem
return mem
def free_cstr(self, mem):
log_mem_alloc.info("free c_str: %s", mem)
if self.label_mgr is not None:
self.label_mgr.remove_label(mem.label)
self.free_mem(mem.addr, mem.size)
del self.mem_objs[mem.addr]
# bstr
def alloc_bstr(self, name, bstr):
size = len(bstr) + 2 # front: count, end: extra zero for | |
#!/usr/bin/env python
from unicorn import *
from unicorn.x86_const import *
import re, struct, sys, base64, pefile, binascii, hashlib
__author__ = "<NAME> [karttoon] @noottrak"
__email__ = "<EMAIL>"
__version__ = "1.2.1"
__date__ = "21MAY2018"
# v1.2.1 - e864f9735349e14c8c4583fe4c29b1b8eab5fca74855476f91e93349b796d818
# Adjusted regex for Build Number on Variant 4
# v1.2.0 - 006f7fd56fa89fa576fa95221bdf16422d66787ca366e57816ff6d8a957d7de5
# Adjusted the RC4 decryption function to account for blob size.
# Included second regex for version number in decrypted content
# v1.1.9 - 6dcbf652b96a7aea16d0c2e72186173d9345f722c9592e62820bcfe477b2b297
# Added functionality to strip URL from new RTF variant of Hancitor
# v1.1.8 - 85d2ba3f12877bf7e531ec1970909f2ea20f55ba17d27f4a5b65e8e8dc493909
# Added new variant stub and ability to adjust offset for B64 decoding
# v1.1.7 - efe7cfe0c08265e1a4eed68a1e544ba0e98fff98942e0e55941e1899aba71579
# Latest versions Base64 buffer is longer than what is decoded so caused padding issue. Adjusted to account
# v1.1.6
# Newer versions of Unicorn Engine (1.0.0+) changed memory management so I needed to adjust some areas to re-init the memory sections each loop
# v1.1.5 - 62e6e5dc0c3927a8c5d708688ca2b56df93848b15a4c38aab173c5a8384395f9
# Added variant 4 to phase 1 decoder - now doing alternating 4-byte XOR keys
# They fixed variant 3 so it now alternates correctly. Key-pairs will need to be added manually
# v1.1.4 - <KEY>
# Added variant 3 to phase 1 decoder - now doing 4-byte XOR key
# They have a bug in their code so likely V4 will be along shortly
# v1.1.3 - 5527d778becf75d8ce7f45876a88add06f79577ad7c4cbd1a8f42aa0e9991320
# Changed phase 1 variant 2 decoder to now brute force values outside of Unicorn
# Restricted it to 27K possibilities (30^3) for add, xor1, xor2
# v1.1.2 - 5a3c843bfcf31c2f2f2a2e4d5f5967800a2474e07323e8baa46ff3ac64d60d4a
# New variant of decoder in phase 1
# Different add value and alternates XOR each character with 0xF and 0x10
# v1.1.1 - 7eaa732d95252bf05440aca56f1b2e789dab39af72031a11fc597be88b1ede7f
# New variant has encrypted URLs
# First 5 bytes of a SHA1 hash of a key are used as decrypt key to RC4 encrypted data holding C2 URLs
# v1.1.0 - e1cb2bc858327f9967a3631056f7e513af17990d87780e4ee1c01bc141d3dc7f
# New stub bytes pre-header added
# v1.0.9 - fc1f1845e47d4494a02407c524eb0e94b6484045adb783e90406367ae20a83ac
# Adjusted HTTP export to account for change in URL structure, gate.php to forum.php
# Will not extract regardless of PHP file name
# v1.0.8 - b506faff00ae557056d387442e9d4d2a53e87c5f9cd59f75db9ba5525ffa0ba3
# New shellcode decoding binary with string "STARFALL"
# Will now extract regardless of magic header
# v1.0.7 - 14211739584aa0f04ba8845a9b66434529e5e4636f460d34fa84821ebfb142fd
# Hancitor directly embedded - fileless inject of PE but URLs scrapable
# v1.0.6 - 98f4e4436a7b2a0844d94526f5be5b489604d2b1f586be16ef578cc40d6a61b7
# Brute force of second stage keys (false key plants/re-positioned)
# Cleaned up handling for multiple sections
# e5b54afc85e7d282d7b2c0045e6e74967ff41ac571880929728f4d49693003a8
# Also added new first stage decoder for above hash variants
# 2ac7d8a063127641e71911941c549b8ce889c8587c1d948c72b1aca900069e5e
# New mechanisms for H1N1 decrypting added
# v1.0.5 - 6dbb31e435e2ff2b7f2b185dc19e6fb63da9ab3553d20b868a298b4c100aeb2a
# New Hancitor second stage XOR key
# Change phase 2 to automatically extract XOR key and extract encrypted binary
# v1.0.4 - 8f26a30a1fc71b7e9eb12e3b94317b7dd5827e2cbcfb3cd3feb684af6a73b4e6
# Hancitor no longer embedded, instead encoded URls that will download it
# v1.0.3 - b586c11f5485e3a38a156cba10379a4135a8fe34aa2798af8d543c059f0ac9a4
# Utilized code from Mak and Sysopfb to unpack Upack H1N1 DLL and extract C2
# v1.0.2 - b586c11f5485e3a38a156cba10379a4135a8fe34aa2798af8d543c059f0ac9a4
# Added XOR brute for phase 1
# Added including stripped MZ header on phase 1 EXE
# Added check for H1N1 phase 2 payload
# v1.0.1 - f648b0d91956f79a2645cbdf0c1612801107d783a6c6bb0ea41582b9b2161199
# Malware now XORs in macro to obfuscate B64 shellcode
# Added ability to extract phase 1 based off regex, assumes stored values in shellcode
# v1.0.0 - 03aef51be133425a0e5978ab2529890854ecf1b98a7cf8289c142a62de7acd1a
# Initial release, dumps phase 1 and phase 2 packed payloads
# Prints Hancitor C2 URLs
# Setup Unicorn enviroment
ADDRESS = 0x1000000
mu = Uc(UC_ARCH_X86, UC_MODE_32)
# Converted unpacking to a function to make brute forcing XOR easier
def phase1_unpack(ADD_VALUE, XOR_VALUE, SIZE_VALUE, ENC_PAYLOAD):
ADDRESS = 0x1000000
mu = Uc(UC_ARCH_X86, UC_MODE_32)
# Initialize stack
mu.mem_map(ADDRESS, 4 * 1024 * 1024)
# Build shellcode with variables
# sub_8A6
SC = b'\x8A\x04\x0F\x04' + ADD_VALUE + b'\x34' + XOR_VALUE + b'\x88\x04\x0F\x41\x81\xF9' + SIZE_VALUE + b'\x72\xED\x57\xE8\x61\x00\x00\x00\x83\x7D\xFC\x01'
# sub_7CA
SC += b'\x6B\xC0\x06\x99\x83\xE2\x07\x03\xC2\xC1\xF8\x03\xC3'
# sub_7D7
SC += b'\x6B\xC0\x06\x25\x07\x00\x00\x80\x79\x05\x48\x83\xC8\xF8\x40\xC3'
# sub_7E7
SC += b'\x8D\x48\xBF\x80\xF9\x19\x77\x07\x0F\xBE\xC0\x83\xE8\x41\xC3\x8D\x48\x9F\x80\xF9\x19\x77\x07\x0F\xBE\xC0\x83\xE8\x47\xC3\x8D\x48\xD0\x80\xF9\x09\x77\x07\x0F\xBE\xC0\x83\xC0\x04\xC3\x3C\x2B\x75\x04\x6A\x3E\x58\xC3\x3C\x2F\x75\x04\x6A\x3F\x58\xC3\x33\xC0\xC3'
# sub_827
SC += b'\x55\x8B\xEC\x51\x51\x8B\x45\x08\x83\x65\xFC\x00\x89\x45\xF8\x8A\x00\x84\xC0\x74\x68\x53\x56\x57\xE8\xA3\xFF\xFF\xFF\x8B\xD8\x8B\x45\xFC\xE8\x7C\xFF\xFF\xFF\x8B\x4D\xF8\x8D\x14\x08\x8B\x45\xFC\xE8\x7B\xFF\xFF\xFF\x8B\xF8\x8B\xF0\xF7\xDE\x8D\x4E\x08\xB0\x01\xD2\xE0\xFE\xC8\xF6\xD0\x20\x02\x83\xFF\x03\x7D\x09\x8D\x4E\x02\xD2\xE3\x08\x1A\xEB\x15\x8D\x4F\xFE\x8B\xC3\xD3\xF8\x8D\x4E\x0A\xD2\xE3\x08\x02\xC6\x42\x01\x00\x08\x5A\x01\xFF\x45\x08\x8B\x45\x08\x8A\x00\xFF\x45\xFC\x84\xC0\x75\x9E\x5F\x5E\x5B\xC9\xC3'
# Build final code to emulate
X86_CODE32 = SC + ENC_PAYLOAD
# Write code to memory
mu.mem_write(ADDRESS, X86_CODE32)
# Start of encoded data + offset to binary
mu.reg_write(UC_X86_REG_EDI, 0x10000F9 + 0x0C)
# Initialize ECX counter to 0
mu.reg_write(UC_X86_REG_ECX, 0x0)
# Initialize Stack for functions
mu.reg_write(UC_X86_REG_ESP, 0x1300000)
# Print 150 characters of encrypted value
#print "Encrypt: %s" % mu.mem_read(0x10000F9,150)
# Run the code
try:
mu.emu_start(ADDRESS, ADDRESS + len(X86_CODE32))
except UcError as e:
pass
# Print 150 characters of decrypted value
#print "Decrypt: %s" % mu.mem_read(0x10000F9,150)
return mu
def phase1_unpack_variant2(ENC_PAYLOAD):
# Try version 2
# 5a3c843bfcf31c2f2f2a2e4d5f5967800a2474e07323e8baa46ff3ac64d60d4a New decoding variant
print "\t[!] Attempting variant 2..."
# Brute force
ADD_VALUE = 0
SUCCESS_FLAG = 0
while ADD_VALUE < 30:
XOR_VALUE_1 = 0
while XOR_VALUE_1 < 30:
XOR_VALUE_2 = 0
while XOR_VALUE_2 < 30:
try:
B64_DATA = phase1_unpack_v2decode(ADD_VALUE, XOR_VALUE_1, XOR_VALUE_2, 322, ENC_PAYLOAD)
B64_DATA = re.search("[A-Za-z0-9+/=]{300,}", B64_DATA)
DEC_PAYLOAD = base64.b64decode(B64_DATA.group())
except:
DEC_PAYLOAD = ''
if "This program cannot be run in DOS mode" in DEC_PAYLOAD:
print "\t[*] Successfully brute forced Hancitor encoder variant v2"
print "\t[-] ADD: %s\n\t[-] XOR1: %s\n\t[-] XOR2: %s" % (
hex(ADD_VALUE), hex(XOR_VALUE_1), hex(XOR_VALUE_2))
B64_DATA = phase1_unpack_v2decode(ADD_VALUE, XOR_VALUE_1, XOR_VALUE_2, len(ENC_PAYLOAD), ENC_PAYLOAD)
B64_DATA = re.search("[A-Za-z0-9+/=]{300,}", B64_DATA)
DEC_PAYLOAD = base64.b64decode(B64_DATA.group())
FILE_NAME = sys.argv[1].split(".")[0] + "_S1.exe"
FILE_HANDLE = open(FILE_NAME, "w")
FILE_HANDLE.write(DEC_PAYLOAD)
FILE_HANDLE.close()
print "\t[!] Success! Written to disk as %s" % FILE_NAME
SUCCESS_FLAG = 1
XOR_VALUE_2 += 1
XOR_VALUE_1 += 1
ADD_VALUE += 1
return SUCCESS_FLAG
def phase1_unpack_v2decode(ADD_VALUE, XOR_VALUE_1, XOR_VALUE_2, LENGTH_VALUE, ENC_PAYLOAD):
# Leaving partial Unicorn data for preservation
## If we have shellcode, find shellcode function (this is to avoid brute forcing 16.7M possible combinations, 0xFF^3 (add/dual xor)
##SC = re.search("\x8B\x45\xF8\x8D\x14\x01\x8A\x02.+\x41\x88\x02\x3B\xCE\x72\xE4", SC_DATA).group(0)
##SC = "\x90\x89\xE8" + SC[3:]
# Build shellcode with variables
# sub_C52 loc_ED3
#SC = b'\x90\x89\xE8\x8D\x14\x01\x8A\x02\x04' + chr(ADD_VALUE) + '\xF6\xC1\x01\x75\x04\x34' + chr(XOR_VALUE_1) + '\xEB\x02\x34' + chr(XOR_VALUE_2) + '\x41\x88\x02\x3B\xCE\x72\xE4'
# Build final code to emulate
#X86_CODE32 = SC + ENC_PAYLOAD[10:]
# The above functions work but emulating this brute-force with Unicorn takes a SIGNFICANT amount of time (15+mins)
# Deleted most of the Unicorn part and re-wrote for Python with partial B64 matching to reduce time to <5sec
mu = ''
count = 0
for i in ENC_PAYLOAD[10:LENGTH_VALUE]:
i = ord(i) + ADD_VALUE
if count % 2 == 0:
i = i ^ XOR_VALUE_1
else:
i = i ^ XOR_VALUE_2
try:
mu += chr(i)
except:
continue
count += 1
return mu
def phase1_unpack_variant3(ENC_PAYLOAD):
# Try version 3
# 800bf028a23440134fc834efc5c1e02cc70f05b2e800bbc285d7c92a4b126b1c New decoding variant
print "\t[!] Attempting variant 3..."
SUCCESS_FLAG = 0
# Don't have the shellcode or a way to brute force the key, so will need to manually add them here for the time being
# Insert new xor keys below -- NOTE: this variant will probably never be used again as it was a broken version of variant 4
XOR_KEYS = [
<KEY>",
<KEY>"
]
for XOR_VALUE in XOR_KEYS:
if SUCCESS_FLAG == 0:
try:
B64_DATA = phase1_unpack_v3decode(XOR_VALUE, 322, ENC_PAYLOAD)
B64_DATA = re.search("[A-Za-z0-9+/=]{300,}", B64_DATA)
DEC_PAYLOAD = base64.b64decode(B64_DATA.group())
if "This program cannot be run in DOS mode" in DEC_PAYLOAD:
print "\t[*] Successfully decoded Hancitor with variant v3"
print "\t[-] XOR: 0x%s" % ("".join([hex(ord(i))[2:] for i in XOR_VALUE]))
B64_DATA = phase1_unpack_v3decode(XOR_VALUE, len(ENC_PAYLOAD), ENC_PAYLOAD)
B64_DATA = re.search("[A-Za-z0-9+/=]{300,}", B64_DATA)
DEC_PAYLOAD = base64.b64decode(B64_DATA.group())
FILE_NAME = sys.argv[1].split(".")[0] + "_S1.exe"
FILE_HANDLE = open(FILE_NAME, "w")
FILE_HANDLE.write(DEC_PAYLOAD)
FILE_HANDLE.close()
print "\t[!] Success! Written to disk as %s" % FILE_NAME
SUCCESS_FLAG = 1
except:
pass
return SUCCESS_FLAG
def phase1_unpack_v3decode(XOR_VALUE_1, LENGTH_VALUE, ENC_PAYLOAD):
mu = ''
l = len(XOR_VALUE_1)
for i in range(0, len(ENC_PAYLOAD[10:LENGTH_VALUE])):
mu += chr(ord(ENC_PAYLOAD[10:LENGTH_VALUE][i]) ^ ord(XOR_VALUE_1[i % l]))
return mu
def phase1_unpack_variant4(ENC_PAYLOAD):
# Try version 4
# 62e6e5dc0c3927a8c5d708688ca2b56df93848b15a4c38aab173c5a8384395f9 New decoding variant
print "\t[!] Attempting variant 4..."
SUCCESS_FLAG = 0
# Don't have the shellcode or a way to brute force the key, so will need to manually add them here for the time being
# Insert new xor-pairs below
XOR_PAIRS = {
"\x78\x53\x38\x35":"\xC9\xA1\x43\x24"
}
for XOR_PAIR in XOR_PAIRS:
if SUCCESS_FLAG == 0:
try:
XOR_VALUE_1 = XOR_PAIR
XOR_VALUE_2 = XOR_PAIRS[XOR_PAIR]
B64_DATA = phase1_unpack_v4decode(XOR_VALUE_1, XOR_VALUE_2, 322, ENC_PAYLOAD)
B64_DATA = re.search("[A-Za-z0-9+/=]{300,}", B64_DATA)
DEC_PAYLOAD = base64.b64decode(B64_DATA.group())
if "This program cannot be run in DOS mode" in DEC_PAYLOAD:
print "\t[*] Successfully decoded Hancitor with variant v4"
print "\t[-] XOR1: 0x%s" % ("".join([hex(ord(i))[2:] for i in XOR_VALUE_1]))
print "\t[-] XOR2: 0x%s" % ("".join([hex(ord(i))[2:] for i in XOR_VALUE_2]))
B64_DATA = phase1_unpack_v4decode(XOR_VALUE_1, XOR_VALUE_2, len(ENC_PAYLOAD), ENC_PAYLOAD)
B64_DATA = re.search("[A-Za-z0-9+/=]{300,}", B64_DATA)
# efe7cfe0c08265e1a4eed68a1e544ba0e98fff98942e0e55941e1899aba71579
# Their B64 | |
# coding: utf-8
# ## This notebook contains code to easily create the inputs required to run the process in one of the 'overlay' notebooks.
# #### In addition to the outputs produced by this output, the user will need to also obtain from other sources elevation (NED DEM) and optionally ortho imagery data (NAIP).
# #### The final steps in the process is done in manually ArcScene outside of this notebook. See instructions at the bottom of this notebook.
# The bulk of the libraries required for this processing can be found in the ../environment.yml file
# A conda env can be created with:
# conda env create -f environment.yml
#
# But there were two libraries used that are either not yet up on conda or pip, or need a version latter than what's available:
# These are ulmo 0.8.3dev, available from:
# https://github.com/ulmo-dev/ulmo/tree/master/ulmo
# and:
# landsat-util 0.13.0 which is available on pypi but I had to install manually:
# https://pypi.python.org/pypi/landsat-util/0.13.0
#
# The only data not automatically downloaded are the MODIS layers which are accessed from a local repository.
#
# Given the non-standard libraries and local data requirements, this notebook should be considered comprehensive documentation of the process used to produce the inputs for this analysis, instead of a tool/script to be run by a general, non-expert users.
# In[2]:
import os
import matplotlib.pyplot as plt
import matplotlib as mpl
import rasterio
# In[3]:
import sys
sys.path.append(r"..\..")
import pyphenocam
sys.path.append(r"J:\Projects\NCCSC\phenocam\Tools\DaymetPy\daymetpy")
import daymetpy
# # Create a point shapefile of our camera location
# In[4]:
output_dir = r"J:\Projects\NCCSC\phenocam\DerivedData\quickbird"
site_name = "quickbird"
# In[5]:
site = pyphenocam.dataaccess.get_site(site_name)
# In[6]:
site.x, site.y
# In[7]:
import fiona
from fiona.crs import from_epsg
from shapely.geometry import Point, mapping
# In[8]:
simpleschema = {'geometry': 'Point',
'properties': {'name':'str'}}
wgs84_dir = os.path.join(output_dir, "wgs84")
if not os.path.exists(wgs84_dir):
os.makedirs(wgs84_dir)
camera_fname = os.path.join(wgs84_dir, "cameraloc.shp")
with fiona.open(camera_fname, 'w', crs=from_epsg(4326),driver='ESRI Shapefile', schema=simpleschema) as output:
point = Point(site.x, site.y)
output.write({'properties': {'name': site.sitename},'geometry': mapping(point)})
# # Download the landsat8 scene over this area
# In[9]:
from landsat.search import Search
from landsat.downloader import Downloader
# In[10]:
s = Search()
results = s.search(lat=site.y, lon=site.x, limit=100)
scene_id = results['results'][1]['sceneID']
# In[11]:
landsat_dname = os.path.join(output_dir, 'Landsat')
if not os.path.exists(landsat_dname):
os.makedirs(landsat_dname)
# In[11]:
from landsat.downloader import Downloader
d = Downloader(download_dir=landsat_dname)
result = d.download([str(scene_id)])
# In[12]:
import tarfile
scene_dname = os.path.join(landsat_dname, scene_id)
tar_fname = os.path.join(landsat_dname, scene_id + ".tar.bz")
tar = tarfile.open(tar_fname)
tar.extractall(path=scene_dname)
tar.close()
os.unlink(tar_fname)
# In[12]:
from pygaarst import raster
scene_dname = os.path.join(landsat_dname, scene_id)
sc = raster.Landsatscene(scene_dname)
# In[13]:
scene_dname = os.path.join(landsat_dname, scene_id)
scene_dname
b4_fname = os.path.join(scene_dname, [f for f in os.listdir(scene_dname) if f.endswith('_B4.TIF')][0])
b5_fname = os.path.join(scene_dname, [f for f in os.listdir(scene_dname) if f.endswith('_B5.TIF')][0])
# In[14]:
landsat = rasterio.open(b5_fname)
landsat_data = landsat.read(masked=True)
landsat_extents = [landsat.bounds.left, landsat.bounds.right, landsat.bounds.bottom, landsat.bounds.top]
# #### If the location isn't good either get another landsat scene or mosaic a couple
# In[15]:
get_ipython().magic(u'matplotlib inline')
import numpy as np
import matplotlib.pyplot as plt
import cartopy
import cartopy.crs as ccrs
import cartopy.feature as cfeature
utm_zone = landsat.crs_wkt.split('UTM zone ')[1].split('N"')[0]
landsat_proj = ccrs.UTM(zone=utm_zone, globe=ccrs.Globe(datum='WGS84',
ellipse='WGS84'))
fig = plt.figure(figsize=(15, 15))
# ax_extent = [quickbird.x - 0.015, quickbird.x + 0.015, quickbird.y - 0.002, quickbird.y + 0.015]
ax = plt.axes(projection=landsat_proj)
# ax.set_extent(ax_extent, ccrs.Geodetic())
im = ax.imshow(sc.NDVI[::10, ::10], origin='upper', extent=landsat_extents,
transform=landsat_proj, interpolation='none', cmap=mpl.cm.BrBG)
geodetic = ccrs.Geodetic()
ax.scatter(site.x, site.y, color='r', s=20, alpha=0.5, transform=geodetic)
plt.colorbar(im)
# ### We need to extract out a subset 254x254 centered horizontally on our camera loc, and extending north
# In[16]:
landsat_camx, landsat_camy = list(landsat_proj.transform_point(site.x, site.y, geodetic))
landsat_camx, landsat_camy
# In[17]:
landsat_a = landsat.affine
# In[18]:
import math
landsat_cam_col, landsat_cam_row = [int(math.floor(coord)) for coord in ~landsat_a * (landsat_camx, landsat_camy)]
landsat_left_col, landsat_right_col = landsat_cam_col-254/2, landsat_cam_col+254/2
landsat_upper_row, landsat_lower_row = landsat_cam_row-244, landsat_cam_row+10
print landsat_cam_col, landsat_cam_row
print landsat_left_col, landsat_right_col
print landsat_upper_row, landsat_lower_row
# In[19]:
landsat_ulx, landsat_uly = landsat_a * (landsat_left_col, landsat_upper_row)
landsat_lrx, landsat_lry = landsat_a * (landsat_right_col, landsat_lower_row)
landsat_urx, landsat_ury = landsat_a * (landsat_right_col, landsat_upper_row)
landsat_llx, landsat_lly = landsat_a * (landsat_left_col, landsat_lower_row)
print landsat_ulx, landsat_uly
print landsat_lrx, landsat_lry
print landsat_urx, landsat_ury
print landsat_llx, landsat_lly
print landsat_camx, landsat_camy
# In[20]:
get_ipython().magic(u'matplotlib inline')
fig = plt.figure(figsize=(15, 15))
ax = plt.axes(projection=landsat_proj)
ax.imshow(sc.NDVI[::10, ::10], origin='upper', extent=landsat_extents,
transform=landsat_proj, interpolation='none', cmap=mpl.cm.BrBG)
ax.scatter(site.x, site.y, color='r', s=20, alpha=0.5, transform=geodetic)
ax.plot([landsat_ulx, landsat_lrx], [landsat_uly, landsat_uly], 'k-', lw=2, c='black', transform=landsat_proj)
ax.plot([landsat_ulx, landsat_lrx], [landsat_lry, landsat_lry], 'k-', lw=2, c='black', transform=landsat_proj)
ax.plot([landsat_ulx, landsat_ulx], [landsat_uly, landsat_lry], 'k-', lw=2, c='black', transform=landsat_proj)
ax.plot([landsat_lrx, landsat_lrx], [landsat_uly, landsat_lry], 'k-', lw=2, c='black', transform=landsat_proj)
# In[21]:
from shapely.geometry import mapping, Polygon
import fiona
# Here's an example Shapely geometry
poly = Polygon([(landsat_ulx, landsat_uly),
(landsat_lrx, landsat_uly),
(landsat_lrx, landsat_lry),
(landsat_ulx, landsat_lry),
(landsat_ulx, landsat_uly)])
# Define a polygon feature geometry with one attribute
schema = {
'geometry': 'Polygon',
'properties': {'id': 'int'},
}
# Write a new Shapefile
landsat_subset_dname = os.path.join(landsat_dname, 'Subset')
if not os.path.exists(landsat_subset_dname):
os.makedirs(landsat_subset_dname)
boundary_fname = os.path.join(landsat_subset_dname, 'landsat_boundary.shp')
with fiona.open(boundary_fname, 'w', 'ESRI Shapefile', schema, crs=landsat.crs) as c:
## If there are multiple geometries, put the "for" loop here
c.write({
'geometry': mapping(poly),
'properties': {'id': 123},
})
# In[22]:
from shapely.geometry import mapping, Polygon
import fiona
# Define a polygon feature geometry with one attribute
schema = {
'geometry': 'Polygon',
'properties': {'row': 'int', 'col':'int'},
}
# Write a new Shapefile
landsat_subset_dname = os.path.join(landsat_dname, 'Subset')
if not os.path.exists(landsat_subset_dname):
os.makedirs(landsat_subset_dname)
fishnet_fname = os.path.join(landsat_subset_dname, 'landsat_fishnet.shp')
with fiona.open(fishnet_fname, 'w', 'ESRI Shapefile', schema, crs=landsat.crs) as c:
## If there are multiple geometries, put the "for" loop here
for row in range(254):
cell_uly = landsat_uly + (row*landsat_a.e)
for col in range(254):
cell_ulx = landsat_ulx + (col*landsat.affine.a)
poly = Polygon([(cell_ulx, cell_uly),
(cell_ulx + landsat_a.a, cell_uly),
(cell_ulx + landsat_a.a, cell_uly + landsat_a.e),
(cell_ulx, cell_uly + landsat_a.e),
(cell_ulx, cell_uly)])
c.write({
'geometry': mapping(poly),
'properties': {'row': row,
'col':col},
})
# # Create a subset of this for display purposes
# In[55]:
landsat_subset = sc.NDVI[
landsat_upper_row:landsat_lower_row,
landsat_left_col:landsat_right_col]
plt.imshow(landsat_subset)
plt.scatter(landsat_cam_col-landsat_left_col, landsat_cam_row-landsat_upper_row, c='r')
pyphenocam.plotting.format_photo_axes(plt.gca())
# In[65]:
landsat_subset_fname = os.path.join(landsat_subset_dname, "landsat_subset.tif")
import copy
landsat_subset_meta = copy.copy(landsat.meta)
landsat_subset_meta['width'] = landsat_subset.shape[0]
landsat_subset_meta['height'] = landsat_subset.shape[1]
new_transform = list(landsat.meta['transform'])
new_transform[0] = landsat_ulx
new_transform[3] = landsat_uly
landsat_subset_meta['transform'] = tuple(new_transform)
landsat_subset_meta['dtype'] = 'float32'
import affine
landsat_subset_meta['affine'] = affine.Affine.from_gdal(*landsat_subset_meta['transform'])
with rasterio.open(landsat_subset_fname, 'w', **landsat_subset_meta) as dst:
dst.write_band(1, landsat_subset.astype(rasterio.float32))
# In[60]:
landsat_subset_meta
# In[25]:
get_ipython().magic(u'matplotlib inline')
n=10
r = np.zeros(landsat_subset.shape).astype(np.dtype('i1'))
landsat_col_x3 = np.tile(np.repeat(np.arange(r.shape[1]), n), (r.shape[0])*n)
landsat_col_x3.shape = r.shape[0]*n, r.shape[1]*n
plt.imshow(landsat_col_x3, interpolation='none')
plt.colorbar()
# In[26]:
landsat_col_x3.shape
# In[27]:
landsat_row_x3 = np.tile(np.repeat(np.arange(r.shape[0]), n), (r.shape[1]*n))
landsat_row_x3.shape = r.shape[1]*n, r.shape[0]*n
landsat_row_x3 = landsat_row_x3.T
plt.imshow(landsat_row_x3, interpolation='none')
plt.colorbar()
# In[28]:
landsat_b = np.zeros(landsat_row_x3.shape)
# In[29]:
get_ipython().magic(u'matplotlib inline')
plt.imshow(np.dstack([landsat_col_x3, landsat_row_x3, landsat_b])[:,:,:], interpolation='none')
# ##### One small detail to note, because of the way ArcScene renders a raster, we're increasing the pixel resoultion 10x
# In[30]:
import scipy
subset_index_fname = os.path.join(landsat_subset_dname, "landsat_subset_index.tif")
import copy
landsat_subset_meta = copy.copy(landsat.meta)
landsat_subset_meta['width'] = landsat_subset.shape[0] * 10
landsat_subset_meta['height'] = landsat_subset.shape[1] * 10
new_transform = list(landsat.meta['transform'])
new_transform[0] = landsat_ulx
new_transform[3] = landsat_uly
new_transform[1] = new_transform[1] / 10.0
new_transform[-1] = new_transform[-1] / 10.0
landsat_subset_meta['transform'] = new_transform
import affine
landsat_subset_meta['affine'] = affine.Affine.from_gdal(*landsat_subset_meta['transform'])
landsat_subset_meta.update(
dtype=rasterio.uint8,
count=3,
nodata=255)
with rasterio.open(subset_index_fname, 'w', **landsat_subset_meta) as dst:
dst.write_band(1, landsat_col_x3.astype(rasterio.uint8))
dst.write_band(2, landsat_row_x3.astype(rasterio.uint8))
dst.write_band(3, landsat_b.astype(rasterio.uint8))
# ## download and process some NED 30m DEM data for our area
# In[31]:
from ulmo.usgs.ned import core
core.get_available_layers()
# In[33]:
layer = '1/3 arc-second'
landsat_bbox = list(geodetic.transform_point(landsat_ulx, landsat_lry, landsat_proj))
landsat_bbox += list(geodetic.transform_point(landsat_lrx, landsat_uly, landsat_proj))
dname = os.path.join(output_dir, "ArcScene", "InputData", "DEM")
if not os.path.exists(dname):
os.makedirs(dname)
dem_result = core.get_raster(layer=layer, bbox=landsat_bbox, path=dname, mosaic=True)
# ## Reproject this to match our landsat subset grid
# In[34]:
if type(dem_result) == list:
dem_result = dem_result[0]
# In[35]:
dem = rasterio.open(dem_result)
landsat_subset = rasterio.open(landsat_subset_fname)
# In[36]:
from rasterio.warp import calculate_default_transform, reproject, RESAMPLING
utm_dname = os.path.join(output_dir, "ArcScene", "InputData", "UTM")
if not os.path.exists(utm_dname):
os.makedirs(utm_dname)
out_fname = os.path.join(utm_dname, "NED_30m.tif")
# affine, width, height = calculate_default_transform(
# src.crs, dst_crs, src.width, src.height, *src.bounds)
kwargs = landsat_subset.meta.copy()
kwargs['dtype'] = dem.meta['dtype']
kwargs['nodata'] = -9999
# kwargs.update({
# 'crs': subset.crs,
# 'transform': subset.affine,
# 'affine': subset.affine,
# 'width': subset.width,
# 'height': subset.height
# })
with rasterio.open(out_fname, 'w', **kwargs) as dst:
reproject(
source=rasterio.band(dem, 1),
destination=rasterio.band(dst, 1),
src_transform=dem.affine,
src_crs=dem.crs,
src_nodata=kwargs['nodata'],
dst_transform=landsat_subset.affine,
dst_crs=landsat_subset.crs,
dst_nodata=kwargs['nodata'],
resampling=RESAMPLING.cubic_spline)
dem_data = dst.read_band(1, masked=True)
# In[37]:
elev_subset_fname = out_fname
elev = rasterio.open(elev_subset_fname)
elev_data = elev.read()
nad83 = ccrs.Geodetic(globe=ccrs.Globe(datum='NAD83', ellipse='GRS80'))
elev_extents = [elev.bounds.left, elev.bounds.right, elev.bounds.bottom, elev.bounds.top]
fig = plt.figure(figsize=(15, 15))
ax = plt.axes(projection=landsat_proj)
ax.imshow(dem_data, origin='upper', extent=elev_extents, transform=landsat_proj,
cmap=mpl.cm.gist_earth, interpolation='none')
ax.scatter(site.x, site.y, color='r', s=20, alpha=0.5, transform=geodetic)
ax.plot([landsat_ulx, landsat_lrx], [landsat_uly, landsat_uly], 'k-', lw=2, c='black', transform=landsat_proj)
ax.plot([landsat_ulx, landsat_lrx], [landsat_lry, landsat_lry], 'k-', lw=2, c='black', transform=landsat_proj)
ax.plot([landsat_ulx, landsat_ulx], [landsat_uly, landsat_lry], 'k-', lw=2, c='black', transform=landsat_proj)
ax.plot([landsat_lrx, landsat_lrx], [landsat_uly, landsat_lry], 'k-', lw=2, c='black', transform=landsat_proj)
# # And do the same thing we did with landsat for modis (250m)
# We have some nationwide mosaics of the 250m modis products maintained at the fort collins science center. TODO:add link to notbook used to generate these...
# In[38]:
modis_mosaic_fname = r"J:\GIS_Layers\Climate\ClimateCache\MODIS\DerivedData\Mosaics\NDVI\MOD13Q1.A2001081.NDVI.tif"
modis_mosaic = rasterio.open(modis_mosaic_fname)
modis_mosaic_data = modis_mosaic.read(masked=True)
# In[39]:
fig = plt.figure(figsize=(15, 15))
modis_proj = cartopy.crs.Sinusoidal.MODIS
ax = plt.axes(projection=modis_proj)
modis_mosaic_extents = [modis_mosaic.bounds.left, modis_mosaic.bounds.right, modis_mosaic.bounds.bottom, modis_mosaic.bounds.top]
ax.imshow(modis_mosaic_data[0, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::10], origin='upper', extent=modis_mosaic_extents, interpolation='none',
cmap=mpl.cm.jet, transform=modis_proj)
# ax.imshow(modis_mosaic_data[0, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::10], transform=modis_proj)
ax.scatter(site.x, site.y, color='white', s=450, alpha=0.8, marker="*", transform=geodetic)
# ax.plot([ulx,lrx], [uly, uly], 'k-', lw=2, c='black', transform=landsat_proj)
# ax.plot([ulx,lrx], [lry, lry], 'k-', lw=2, c='black', transform=landsat_proj)
# ax.plot([ulx,ulx], [uly, lry], 'k-', lw=2, c='black', transform=landsat_proj)
# ax.plot([lrx,lrx], [uly, lry], 'k-', lw=2, c='black', transform=landsat_proj)
#love the modis sinusoidal grid!
# # Extract out the cooresponding subset of modis pixels
# In[40]:
modis_urx, modis_ury = list(modis_proj.transform_point(landsat_urx, landsat_ury, landsat_proj))
modis_llx, modis_lly = list(modis_proj.transform_point(landsat_llx, landsat_lly, landsat_proj))
modis_ulx, modis_uly = list(modis_proj.transform_point(landsat_ulx, landsat_uly, landsat_proj))
modis_lrx, modis_lry = list(modis_proj.transform_point(landsat_lrx, landsat_lry, landsat_proj))
print modis_urx, modis_ury
print modis_llx, modis_lly
print modis_ulx, modis_uly
print modis_lrx, modis_lry
# In[41]:
modis_a = modis_mosaic.affine
# In[42]:
import math
modissubset_ll_col, modissubset_ll_row = [int(math.floor(coord)) for coord in ~modis_a * (modis_llx, modis_lly)]
modissubset_ur_col, modissubset_ur_row = [int(math.floor(coord)) for coord | |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gfsa.automaton."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import numpy as np
from gfsa import automaton_builder
from gfsa import graph_types
class AutomatonTest(parameterized.TestCase):
def build_simple_schema(self):
return {
graph_types.NodeType("a"):
graph_types.NodeSchema(
in_edges=[
graph_types.InEdgeType("ai_0"),
graph_types.InEdgeType("ai_1")
],
out_edges=[graph_types.OutEdgeType("ao_0")]),
graph_types.NodeType("b"):
graph_types.NodeSchema(
in_edges=[graph_types.InEdgeType("bi_0")],
out_edges=[
graph_types.OutEdgeType("bo_0"),
graph_types.OutEdgeType("bo_1")
]),
}
def build_loop_graph(self):
"""Helper method to build this complex graph, to test graph encodings.
┌───────<──┐ ┌───────<─────────<─────┐
│ │ │ │
│ [ao_0]│ ↓[ai_0] │
│ (a0) │
│ [ai_1]↑ │[ao_1] │
│ │ │ │
│ [ao_0]│ ↓[ai_0] │
↓ (a1) ↑
│ [ai_1]↑ │[ao_1] │
│ │ │ │
│ [bo_0]│ ↓[bi_0] │
│ ╭──╮───────>[bo_2]────┐ │
│ │b0│───────>[bo_2]──┐ │ │
│ │ │<──[bi_0]─────<─┘ │ │
│ ╰──╯<──[bi_0]─────<─┐ │ │
│ [bi_0]↑ │[bo_1] │ │ ↑
│ │ │ │ │ │
│ [bo_0]│ ↓[bi_0] │ │ │
│ ╭──╮───────>[bo_2]──┘ │ │
│ │b1│───────>[bo_2]──┐ │ │
↓ │ │<──[bi_0]─────<─┘ │ │
│ ╰──╯<──[bi_0]─────<───┘ │
│ [bi_0]↑ │[bo_1] │
│ │ │ │
└───────>──┘ └───────>─────────>─────┘
Returns:
Tuple (schema, graph) for the above structure.
"""
a = graph_types.NodeType("a")
b = graph_types.NodeType("b")
ai_0 = graph_types.InEdgeType("ai_0")
ai_1 = graph_types.InEdgeType("ai_1")
bi_0 = graph_types.InEdgeType("bi_0")
bi_0 = graph_types.InEdgeType("bi_0")
ao_0 = graph_types.OutEdgeType("ao_0")
ao_1 = graph_types.OutEdgeType("ao_1")
bo_0 = graph_types.OutEdgeType("bo_0")
bo_1 = graph_types.OutEdgeType("bo_1")
bo_2 = graph_types.OutEdgeType("bo_2")
a0 = graph_types.NodeId("a0")
a1 = graph_types.NodeId("a1")
b0 = graph_types.NodeId("b0")
b1 = graph_types.NodeId("b1")
schema = {
a:
graph_types.NodeSchema(
in_edges=[ai_0, ai_1], out_edges=[ao_0, ao_1]),
b:
graph_types.NodeSchema(
in_edges=[bi_0], out_edges=[bo_0, bo_1, bo_2]),
}
test_graph = {
a0:
graph_types.GraphNode(
a, {
ao_0: [graph_types.InputTaggedNode(b1, bi_0)],
ao_1: [graph_types.InputTaggedNode(a1, ai_0)]
}),
a1:
graph_types.GraphNode(
a, {
ao_0: [graph_types.InputTaggedNode(a0, ai_1)],
ao_1: [graph_types.InputTaggedNode(b0, bi_0)]
}),
b0:
graph_types.GraphNode(
b, {
bo_0: [graph_types.InputTaggedNode(a1, ai_1)],
bo_1: [graph_types.InputTaggedNode(b1, bi_0)],
bo_2: [
graph_types.InputTaggedNode(b0, bi_0),
graph_types.InputTaggedNode(b1, bi_0)
]
}),
b1:
graph_types.GraphNode(
b, {
bo_0: [graph_types.InputTaggedNode(b0, bi_0)],
bo_1: [graph_types.InputTaggedNode(a0, ai_0)],
bo_2: [
graph_types.InputTaggedNode(b0, bi_0),
graph_types.InputTaggedNode(b1, bi_0)
]
}),
}
return schema, test_graph
def test_constructor_actions_nodes_routes(self):
builder = automaton_builder.AutomatonBuilder(
self.build_simple_schema(), with_backtrack=False, with_fail=True)
self.assertEqual(
set(builder.special_actions), {
automaton_builder.SpecialActions.FINISH,
automaton_builder.SpecialActions.FAIL
})
self.assertEqual(
set(builder.node_types),
{graph_types.NodeType("a"),
graph_types.NodeType("b")})
self.assertEqual(
set(builder.in_route_types), {
automaton_builder.InRouteType(
graph_types.NodeType("a"), automaton_builder.SOURCE_INITIAL),
automaton_builder.InRouteType(
graph_types.NodeType("a"), graph_types.InEdgeType("ai_0")),
automaton_builder.InRouteType(
graph_types.NodeType("a"), graph_types.InEdgeType("ai_1")),
automaton_builder.InRouteType(
graph_types.NodeType("b"), automaton_builder.SOURCE_INITIAL),
automaton_builder.InRouteType(
graph_types.NodeType("b"), graph_types.InEdgeType("bi_0")),
})
self.assertEqual(
set(builder.in_out_route_types), {
automaton_builder.InOutRouteType(
graph_types.NodeType("a"), automaton_builder.SOURCE_INITIAL,
graph_types.OutEdgeType("ao_0")),
automaton_builder.InOutRouteType(
graph_types.NodeType("a"), graph_types.InEdgeType("ai_0"),
graph_types.OutEdgeType("ao_0")),
automaton_builder.InOutRouteType(
graph_types.NodeType("a"), graph_types.InEdgeType("ai_1"),
graph_types.OutEdgeType("ao_0")),
automaton_builder.InOutRouteType(
graph_types.NodeType("b"), automaton_builder.SOURCE_INITIAL,
graph_types.OutEdgeType("bo_0")),
automaton_builder.InOutRouteType(
graph_types.NodeType("b"), automaton_builder.SOURCE_INITIAL,
graph_types.OutEdgeType("bo_1")),
automaton_builder.InOutRouteType(
graph_types.NodeType("b"), graph_types.InEdgeType("bi_0"),
graph_types.OutEdgeType("bo_0")),
automaton_builder.InOutRouteType(
graph_types.NodeType("b"), graph_types.InEdgeType("bi_0"),
graph_types.OutEdgeType("bo_1")),
})
def test_constructor_inverse_mappings(self):
builder = automaton_builder.AutomatonBuilder(self.build_simple_schema())
# Mappings should be inverses of the corresponding lists
for i, node_type in enumerate(builder.node_types):
self.assertEqual(builder.node_type_to_index[node_type], i)
for i, in_route_type in enumerate(builder.in_route_types):
self.assertEqual(builder.in_route_type_to_index[in_route_type], i)
for i, in_out_route_type in enumerate(builder.in_out_route_types):
self.assertEqual(builder.in_out_route_type_to_index[in_out_route_type], i)
def test_constructor_information_removing_mappings(self):
builder = automaton_builder.AutomatonBuilder(self.build_simple_schema())
# Check consistency of information-removing mappings with the
# corresponding pairs of lists.
for in_out_route_type in builder.in_out_route_types:
in_route_type = automaton_builder.InRouteType(in_out_route_type.node_type,
in_out_route_type.in_edge)
self.assertEqual(
builder.in_out_route_to_in_route[
builder.in_out_route_type_to_index[in_out_route_type]],
builder.in_route_type_to_index[in_route_type])
for in_route_type in builder.in_route_types:
node_type = graph_types.NodeType(in_route_type.node_type)
self.assertEqual(
builder.in_route_to_node_type[
builder.in_route_type_to_index[in_route_type]],
builder.node_type_to_index[node_type])
for in_out_route_type in builder.in_out_route_types:
in_route_type = automaton_builder.InRouteType(in_out_route_type.node_type,
in_out_route_type.in_edge)
self.assertEqual(
builder.in_out_route_to_in_route[
builder.in_out_route_type_to_index[in_out_route_type]],
builder.in_route_type_to_index[in_route_type])
@parameterized.named_parameters([
{
"testcase_name": "sum",
"reduction": "sum"
},
{
"testcase_name": "max",
"reduction": "max"
},
{
"testcase_name": "softmax",
"reduction": "softmax"
},
])
def test_routing_reduce_correct(self, reduction):
"""Compare JAX implementations to a (slow but correct) iterative one."""
n_variants = 2
n_states = 4
def make_range_shaped(shape):
return np.arange(np.prod(shape)).reshape(shape).astype("float32")
schema = self.build_simple_schema()
builder = automaton_builder.AutomatonBuilder(schema)
routing_params = automaton_builder.RoutingParams(
move=make_range_shaped([
n_variants,
len(builder.in_out_route_types),
n_states,
n_states,
]),
special=make_range_shaped([
n_variants,
len(builder.in_route_types),
n_states,
len(builder.special_actions),
]),
)
# Compute aggregates with JAX
if reduction == "softmax":
routing_aggregates = builder.routing_softmax(routing_params)
else:
routing_aggregates = builder.routing_reduce(
routing_params, reduction=reduction)
routing_aggregates = jax.tree_multimap(
lambda s, p: np.array(jnp.broadcast_to(s, p.shape)),
routing_aggregates, routing_params)
# Manual looping aggregates
for variant in range(n_variants):
for current_state in range(n_states):
for in_route_type in builder.in_route_types:
# Compute aggregates
distn_vals = []
iroute_idx = builder.in_route_type_to_index[in_route_type]
for out_edge_type in schema[in_route_type.node_type].out_edges:
ioroute_idx = builder.in_out_route_type_to_index[
automaton_builder.InOutRouteType(in_route_type.node_type,
in_route_type.in_edge,
out_edge_type)]
for next_state in range(n_states):
distn_vals.append(routing_params.move[variant, ioroute_idx,
current_state, next_state])
for action_idx in range(len(builder.special_actions)):
distn_vals.append(routing_params.special[variant, iroute_idx,
current_state, action_idx])
if reduction == "sum":
distn_aggregate = [sum(distn_vals)] * len(distn_vals)
elif reduction == "max":
distn_aggregate = [max(distn_vals)] * len(distn_vals)
elif reduction == "softmax":
distn_aggregate = list(jax.nn.softmax(jnp.array(distn_vals)))
else:
raise ValueError(f"Invalid reduction {reduction}")
i = 0
# Check them with the JAX version
for out_edge_type in schema[in_route_type.node_type].out_edges:
ioroute_idx = builder.in_out_route_type_to_index[
automaton_builder.InOutRouteType(in_route_type.node_type,
in_route_type.in_edge,
out_edge_type)]
for next_state in range(n_states):
np.testing.assert_allclose(
routing_aggregates.move[variant, ioroute_idx, current_state,
next_state],
distn_aggregate[i],
rtol=1e-6)
i += 1
for action_idx in range(len(builder.special_actions)):
np.testing.assert_allclose(
routing_aggregates.special[variant, iroute_idx, current_state,
action_idx],
distn_aggregate[i],
rtol=1e-6)
i += 1
def test_initial_routing_params_noiseless(self):
schema = self.build_simple_schema()
builder = automaton_builder.AutomatonBuilder(schema)
routing_params = builder.initialize_routing_params(
key=None,
num_fsm_states=3,
num_variants=2,
state_change_prob=0.2,
move_prob=0.9,
noise_factor=0)
outgoing_count = np.array([
len(schema[in_out_route.node_type].out_edges)
for in_out_route in builder.in_out_route_types
])[None, :, None]
all_same_state_moves = routing_params.move[:, :, np.arange(3), np.arange(3)]
expected = np.broadcast_to(0.9 * 0.8 / outgoing_count,
all_same_state_moves.shape)
np.testing.assert_allclose(all_same_state_moves, expected)
state_1 = []
state_2 = []
for i in range(3):
for j in range(3):
if i != j:
state_1.append(i)
state_2.append(j)
all_different_state_moves = routing_params.move[:, :, state_1, state_2]
expected = np.broadcast_to(0.9 * 0.2 / (2 * outgoing_count),
all_different_state_moves.shape)
np.testing.assert_allclose(all_different_state_moves, expected)
np.testing.assert_allclose(routing_params.special, 0.1 / 3)
def test_initial_routing_params_with_noise(self):
builder = automaton_builder.AutomatonBuilder(self.build_simple_schema())
# Small amounts of noise shouldn't change parameters much
initializer_kwargs = dict(
num_fsm_states=3, num_variants=2, state_change_prob=0.2, move_prob=0.9)
noiseless_params = builder.initialize_routing_params(
key=None, noise_factor=0, **initializer_kwargs)
eps_noise_params = builder.initialize_routing_params(
key=jax.random.PRNGKey(1234), noise_factor=1e-6, **initializer_kwargs)
np.testing.assert_allclose(
noiseless_params.move, eps_noise_params.move, rtol=0.02)
np.testing.assert_allclose(
noiseless_params.special, eps_noise_params.special, rtol=0.02)
# Even with more noise, should still be normalized
noisy_params = builder.initialize_routing_params(
key=jax.random.PRNGKey(1234), noise_factor=0.8, **initializer_kwargs)
noisy_sums = builder.routing_reduce(noisy_params, "sum")
np.testing.assert_allclose(noisy_sums.move, 1.0, rtol=1e-6)
np.testing.assert_allclose(noisy_sums.special, 1.0, rtol=1e-6)
def test_routing_gates_to_probs(self):
builder = automaton_builder.AutomatonBuilder(self.build_simple_schema())
# [variants, in_out_routes, fsm_states, fsm_states]
# [variants, in_routes, fsm_states]
move_gates = np.full([3, len(builder.in_out_route_types), 2, 2], 0.5)
accept_gates = np.full([3, len(builder.in_route_types), 2], 0.5)
backtrack_gates = np.full([3, len(builder.in_route_types), 2], 0.5)
# Set one distribution to sum to more than 1.
idx_d1_move1 = builder.in_out_route_type_to_index[
automaton_builder.InOutRouteType(
graph_types.NodeType("b"), graph_types.InEdgeType("bi_0"),
graph_types.OutEdgeType("bo_0"))]
move_gates[0, idx_d1_move1, 0, :] = [.2, .3]
idx_d1_move2 = builder.in_out_route_type_to_index[
automaton_builder.InOutRouteType(
graph_types.NodeType("b"), graph_types.InEdgeType("bi_0"),
graph_types.OutEdgeType("bo_1"))]
move_gates[0, idx_d1_move2, 0, :] = [.4, .5]
idx_d1_special = builder.in_route_type_to_index[
automaton_builder.InRouteType(
graph_types.NodeType("b"), graph_types.InEdgeType("bi_0"))]
accept_gates[0, idx_d1_special, 0] = .6
backtrack_gates[0, idx_d1_special, 0] = .3
# Set another to sum to less than 1.
idx_d2_move = builder.in_out_route_type_to_index[
automaton_builder.InOutRouteType(
graph_types.NodeType("a"), graph_types.InEdgeType("ai_0"),
graph_types.OutEdgeType("ao_0"))]
move_gates[2, idx_d2_move, 1, :] = [.1, .2]
idx_d2_special = builder.in_route_type_to_index[
automaton_builder.InRouteType(
graph_types.NodeType("a"), graph_types.InEdgeType("ai_0"))]
accept_gates[2, idx_d2_special, 1] = .3
backtrack_gates[2, idx_d2_special, 1] = .75
routing_gates = automaton_builder.RoutingGateParams(
move_gates=jax.scipy.special.logit(move_gates),
accept_gates=jax.scipy.special.logit(accept_gates),
backtrack_gates=jax.scipy.special.logit(backtrack_gates))
routing_probs = builder.routing_gates_to_probs(routing_gates)
# Check probabilities for first distribution: should divide evenly.
np.testing.assert_allclose(routing_probs.move[0, idx_d1_move1, 0, :],
np.array([.2, .3]) / 2.0)
np.testing.assert_allclose(routing_probs.move[0, idx_d1_move2, 0, :],
np.array([.4, .5]) / 2.0)
np.testing.assert_allclose(routing_probs.special[0, idx_d1_special, 0, :],
np.array([.6, 0, 0]) / 2.0)
# Check probabilities for second distribution: should assign remainder to
# backtrack and fail.
np.testing.assert_allclose(routing_probs.move[2, idx_d2_move, 1, :],
np.array([.1, .2]))
np.testing.assert_allclose(routing_probs.special[2, idx_d2_special, 1, :],
np.array([.3, .3, .1]))
def initialize_routing_gates(self):
"""Just make sure that we can initialize routing gates."""
builder = automaton_builder.AutomatonBuilder(self.build_simple_schema())
# Noiseless
noiseless_gates = builder.initialize_routing_gates(
key=None, logistic_noise=0, num_fsm_states=3, num_variants=2)
self.assertEqual(noiseless_gates.move_gates.shape,
(2, len(builder.in_out_route_types), 3, 3))
self.assertEqual(noiseless_gates.accept_gates.shape,
(2, len(builder.in_route_types), 3))
self.assertEqual(noiseless_gates.backtracKkgates.shape,
(2, len(builder.in_route_types), 3))
# Perturbed
noisy_gates = builder.initialize_routing_gates(
key=jax.random.PRNGKey(0),
logistic_noise=0.2,
num_fsm_states=3,
num_variants=2)
self.assertEqual(noisy_gates.move_gates.shape,
(2, len(builder.in_out_route_types), 3, 3))
self.assertEqual(noisy_gates.accept_gates.shape,
(2, len(builder.in_route_types), 3))
self.assertEqual(noisy_gates.backtracKkgates.shape,
(2, len(builder.in_route_types), 3))
def test_graph_encoding_size(self):
"""Test the size of the encoded graph."""
schema, test_graph = self.build_loop_graph()
builder = automaton_builder.AutomatonBuilder(schema)
encoded_graph, graph_meta = builder.encode_graph(test_graph, as_jax=False)
# Graph metadata should match our graph's actual size
self.assertEqual(graph_meta.num_nodes, 4)
self.assertEqual(graph_meta.num_input_tagged_nodes, 6)
# Nonzero entries should match the number of possible transitions
# Initial transition counts each NODE once, so each A node has 2 and each
# B node has 4 outgoing transitions
self.assertEqual(encoded_graph.initial_to_in_tagged.values.shape[0], 12)
# Normal transitions count each input-tagged node once, so each A node has
# 2*2=4 and each B node has 1*4=4 outgoing transitions
self.assertEqual(encoded_graph.in_tagged_to_in_tagged.values.shape[0], 16)
def test_transition_all_ones(self):
"""Test the transition matrix of an all-ones routing parameter vector."""
schema, test_graph = self.build_loop_graph()
builder = automaton_builder.AutomatonBuilder(schema)
encoded_graph, graph_meta = builder.encode_graph(test_graph, as_jax=False)
# The | |
import re
import unicodedata
import simplejson as json
from django.db import models
from django.utils import timezone
from jsonfield import JSONField
import clients.models as Clients
import directory.models as directory
import slog.models as slog
import users.models as umodels
import cases.models as cases
from api.models import Application
from laboratory.utils import strdate
from users.models import DoctorProfile
import contracts.models as contracts
from appconf.manager import SettingManager
class FrequencyOfUseResearches(models.Model):
research = models.ForeignKey(directory.Researches, on_delete=models.CASCADE)
user = models.ForeignKey(DoctorProfile, db_index=True, on_delete=models.CASCADE)
cnt = models.IntegerField(default=0)
def __str__(self):
return str(self.user) + " - " + str(self.research) + ", " + str(self.cnt)
@staticmethod
def inc(research, user):
if not FrequencyOfUseResearches.objects.filter(research=research, user=user).exists():
FrequencyOfUseResearches(research=research, user=user, cnt=0).save()
f = FrequencyOfUseResearches.objects.get(research=research, user=user)
f.cnt += 1
f.save()
@staticmethod
def reset(user):
for f in FrequencyOfUseResearches.objects.filter(user=user):
f.cnt = 0
f.save()
class Meta:
verbose_name = 'Частота назначения исследований пользователем'
verbose_name_plural = 'Частоты назначения исследований пользователем'
class CustomResearchOrdering(models.Model):
research = models.ForeignKey(directory.Researches, on_delete=models.CASCADE)
user = models.ForeignKey(DoctorProfile, db_index=True, on_delete=models.CASCADE)
weight = models.IntegerField(default=0)
def __str__(self):
return str(self.user) + " - " + str(self.research) + ", " + str(self.weight)
class Meta:
verbose_name = 'Пользовательская сортировка исследований'
verbose_name_plural = 'Пользовательские сортировки исследований'
class TubesRegistration(models.Model):
"""
Таблица с пробирками для исследований
"""
id = models.AutoField(primary_key=True, db_index=True)
type = models.ForeignKey(directory.ReleationsFT, help_text='Тип ёмкости', on_delete=models.CASCADE)
time_get = models.DateTimeField(null=True, blank=True, help_text='Время взятия материала', db_index=True)
doc_get = models.ForeignKey(DoctorProfile, null=True, blank=True, db_index=True, related_name='docget', help_text='Кто взял материал', on_delete=models.SET_NULL)
time_recive = models.DateTimeField(null=True, blank=True, help_text='Время получения материала', db_index=True)
doc_recive = models.ForeignKey(DoctorProfile, null=True, blank=True, db_index=True, related_name='docrecive', help_text='Кто получил материал', on_delete=models.SET_NULL)
barcode = models.CharField(max_length=255, null=True, blank=True, help_text='Штрих-код или номер ёмкости', db_index=True)
notice = models.CharField(max_length=512, default="", blank=True, help_text='Замечания', db_index=True)
daynum = models.IntegerField(default=0, blank=True, null=True,
help_text='Номер принятия ёмкости среди дня в лаборатории')
def __str__(self):
return "%d %s (%s, %s) %s" % (self.pk, self.type.tube.title, self.doc_get, self.doc_recive, self.notice)
def day_num(self, doc, num):
if not self.getstatus():
iss = Issledovaniya.objects.filter(tubes=self)
if iss.count():
self.set_get(iss[0].napravleniye.doc)
new_t = False
if not self.rstatus():
new_t = True
self.set_r(doc)
if not self.daynum:
'''from django.utils import timezone, datetime_safe
last_num = 0
date1 = datetime_safe.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)
date2 = datetime_safe.datetime.now()
if TubesRegistration.objects.filter(time_recive__range=(date1, date2), daynum__gt=0, issledovaniya__research__subgroup__podrazdeleniye=doc.podrazdeleniye).exists():
last_num = max([x.daynum for x in TubesRegistration.objects.filter(time_recive__range=(date1, date2), daynum__gt=0, issledovaniya__research__subgroup__podrazdeleniye=doc.podrazdeleniye)])
self.daynum = last_num + 1'''
self.daynum = num
self.save()
return {"n": self.daynum, "new": new_t}
def set_get(self, doc_get):
"""
Установка статуса взятия
:param doc_get: врач/мед сестра, взявшая материал
:return: None
"""
from django.utils import timezone
self.time_get = timezone.now()
self.doc_get = doc_get
self.barcode = self.pk
self.save()
slog.Log(key=str(self.pk), type=9, body="", user=doc_get).save()
def getstatus(self, one_by_one=False):
"""
Получение статуса взятия
:return:
"""
return (self.time_get is not None and self.doc_get is not None) or (self.type.receive_in_lab and one_by_one)
def set_r(self, doc_r):
"""
Установка статуса принятия материала лабораторией
:param doc_r: врач/лаборант, принявший материал
:return:
"""
from django.utils import timezone
if not self.getstatus():
self.set_get(doc_r)
self.time_recive = timezone.now()
self.doc_recive = doc_r
self.save()
slog.Log(key=str(self.pk), user=doc_r, type=11,
body=json.dumps({"Замечание не приёма": self.notice}) if self.notice != "" else "").save()
def set_notice(self, doc_r, notice):
"""
Установка замечания для пробирки
:param doc_r: врач/лаборант, указавший замечание
:param notice: текст замечания
:return:
"""
if notice != "":
self.doc_recive = None
self.time_recive = None
self.notice = notice
self.save()
slog.Log(key=str(self.pk), user=doc_r, type=12,
body=json.dumps({"Замечание не приёма": self.notice})).save()
def clear_notice(self, doc_r):
old_notice = self.notice
if old_notice == "":
return
self.notice = ""
self.save()
slog.Log(key=str(self.pk), user=doc_r, type=4000,
body=json.dumps({"Удалённое замечание": old_notice})).save()
def rstatus(self, check_not=False):
"""
Получение статуса принятия материала лабораторией
:return: статус принятия
"""
if self.doc_recive and (not check_not or self.notice == ""):
return True
return False
def getbc(self):
"""
Получение номера штрих-кода
:return: штрих-код
"""
if self.barcode and self.barcode.isnumeric():
return self.barcode
return self.id
class Meta:
verbose_name = 'Ёмкость для направления'
verbose_name_plural = 'Ёмкости для направлений'
class IstochnikiFinansirovaniya(models.Model):
"""
Таблица источников финансирования
"""
title = models.CharField(max_length=511, help_text='Название')
active_status = models.BooleanField(default=True, help_text='Статус активности')
base = models.ForeignKey(Clients.CardBase, help_text='База пациентов, к которой относится источник финансирования', db_index=True, on_delete=models.CASCADE)
hide = models.BooleanField(default=False, blank=True, help_text="Скрытие")
rmis_auto_send = models.BooleanField(default=True, blank=True, help_text="Автоматическая отправка в РМИС")
default_diagnos = models.CharField(max_length=36, help_text="Диагноз по умолчанию", default="", blank=True)
contracts = models.ForeignKey(contracts.Contract, null=True,blank=True,default='', on_delete=models.CASCADE)
order_weight = models.SmallIntegerField(default=0)
def __str__(self):
return "{} {} (скрыт: {})".format(self.base, self.title, self.hide)
def get_price_modifier(self):
"""
На основании источника финансирования возвращает прайс(объект)+модификатор(множитель цены)
Если источник финансирования ДМС поиск осуществляется по цепочке company-contract. Company(Страховая организация)
Если источник финансирования МЕДОСМОТР поиск осуществляется по цепочке company-contract. Company(место работы)
Если источник финансирования ПЛАТНО поиск осуществляется по цепочке ист.фин-contract-прайс
Если источник финансирования ОМС, ДИСПАНСЕРИЗАЦИЯ поиск осуществляется по цепочке ист.фин-contract-прайс
Если источник финансирования Бюджет поиск осуществляется по цепочке contract
"""
price_modifier=None
price_contract = set(SettingManager.get("price_contract").split(','))
price_company = set(SettingManager.get("price_company").split(','))
if self.title.upper() in price_contract:
contract_l = IstochnikiFinansirovaniya.objects.values_list('contracts_id').filter(pk=self.pk).first()
if contract_l[0]:
price_modifier = contracts.Contract.objects.values_list('price', 'modifier').get(id=contract_l[0])
elif self.title.upper() in price_company:
contract_l = contracts.Company.objects.values_list('contracts_id').filter(pk=self.pk).first()
if contract_l[0]:
price_modifier = contracts.Contract.objects.values_list('price', 'modifier').get(id=contract_l[0])
return price_modifier
class Meta:
verbose_name = 'Источник финансирования'
verbose_name_plural = 'Источники финансирования'
class Diagnoses(models.Model):
M = (
(0, "Диапазон"),
(1, "Группа"),
(2, "Значение"),
)
code = models.CharField(max_length=255, db_index=True)
title = models.TextField(db_index=True)
parent = models.CharField(max_length=255, null=True, db_index=True)
d_type = models.CharField(max_length=255, db_index=True)
m_type = models.IntegerField(choices=M, db_index=True)
def __str__(self):
return "{} {}".format(self.code, self.title)
class RMISServiceInactive(models.Model):
rmis_id = models.CharField(max_length=30, primary_key=True)
enabled = models.BooleanField(default=True, blank=True)
@staticmethod
def checkInactive(serviceId, enabled):
r = RMISServiceInactive.objects.filter(rmis_id=serviceId)
if not r.exists() and enabled:
RMISServiceInactive(rmis_id=serviceId, enabled=enabled).save()
elif r.exists() and r[0].enabled != enabled:
r[0].enabled = enabled
r[0].save()
@staticmethod
def isInactive(serviceId):
r = RMISServiceInactive.objects.filter(rmis_id=serviceId)
return r.exists() and r[0].enabled
def __str__(self):
return "{} {}".format(self.rmis_id, self.enabled)
class RMISOrgs(models.Model):
rmis_id = models.IntegerField(primary_key=True, editable=False)
title = models.CharField(max_length=255)
def __str__(self):
return self.title
class Napravleniya(models.Model):
"""
Таблица направлений
"""
data_sozdaniya = models.DateTimeField(auto_now_add=True, help_text='Дата создания направления', db_index=True)
visit_date = models.DateTimeField(help_text='Дата посещения по направлению', db_index=True, default=None, blank=True, null=True)
visit_who_mark = models.ForeignKey(DoctorProfile, related_name="visit_who_mark", default=None, blank=True, null=True, help_text='Профиль, который отметил посещение', on_delete=models.SET_NULL)
diagnos = models.CharField(max_length=511, help_text='Диагноз', default='', blank=True)
vich_code = models.CharField(max_length=12, help_text='Код для направления на СПИД', default='', blank=True)
client = models.ForeignKey(Clients.Card, db_index=True, help_text='Пациент', on_delete=models.CASCADE)
doc = models.ForeignKey(DoctorProfile, db_index=True, null=True, help_text='Лечащий врач', on_delete=models.CASCADE)
istochnik_f = models.ForeignKey(IstochnikiFinansirovaniya, blank=True, null=True, help_text='Источник финансирования', on_delete=models.CASCADE)
history_num = models.CharField(max_length=255, default=None, blank=True, null=True, help_text='Номер истории')
rmis_case_id = models.CharField(max_length=255, default=None, blank=True, null=True, help_text='РМИС: Номер случая')
rmis_hosp_id = models.CharField(max_length=255, default=None, blank=True, null=True, help_text='РМИС: ЗОГ')
rmis_resend_services = models.BooleanField(default=False, blank=True, help_text='Переотправить услуги?', db_index=True)
doc_who_create = models.ForeignKey(DoctorProfile, default=None, blank=True, null=True, related_name="doc_who_create", help_text='Создатель направления', on_delete=models.SET_NULL)
cancel = models.BooleanField(default=False, blank=True, help_text='Отмена направления')
rmis_number = models.CharField(max_length=15, default=None, blank=True, null=True, db_index=True, help_text='ID направления в РМИС')
result_rmis_send = models.BooleanField(default=False, blank=True, help_text='Результат отправлен в РМИС?')
imported_from_rmis = models.BooleanField(default=False, blank=True, db_index=True, help_text='Направление создано на основе направления из РМИС?')
imported_org = models.ForeignKey(RMISOrgs, default=None, blank=True, null=True, on_delete=models.SET_NULL)
imported_directions_rmis_send = models.BooleanField(default=False, blank=True, help_text='Для направления из РМИС отправлен бланк')
force_rmis_send = models.BooleanField(default=False, blank=True, help_text='Подтверждение ручной отправки в РМИС')
forcer_rmis_send = models.ForeignKey(DoctorProfile, default=None, blank=True, null=True, related_name="doc_forcer_rmis_send", help_text='Исполнитель подтверждения отправки в РМИС', on_delete=models.SET_NULL)
case = models.ForeignKey(cases.Case, default=None, blank=True, null=True, help_text='Случай обслуживания', on_delete=models.SET_NULL)
num_contract = models.CharField(max_length=25, default=None, blank=True, null=True, db_index=True, help_text='ID направления в РМИС')
def __str__(self):
return "%d для пациента %s (врач %s, выписал %s, %s, %s, %s)" % (
self.pk, self.client.individual.fio(), self.doc.get_fio(), self.doc_who_create, self.rmis_number, self.rmis_case_id, self.rmis_hosp_id)
def get_instructions(self):
r = []
for i in Issledovaniya.objects.filter(napravleniye=self).exclude(research__instructions=""):
r.append({"pk": i.research.pk, "title": i.research.title, "text": i.research.instructions})
return r
@staticmethod
def gen_napravleniye(client_id: object, doc: object, istochnik_f: object, diagnos: object, historynum: object, doc_current: object, ofname_id: object, ofname: object,
issledovaniya: object = None,
save: object = True,
for_rmis: object = None,
rmis_data: object = None) -> object:
"""
Генерация направления
:param client_id: id пациента
:param doc: л/врач
:param istochnik_f: источник финансирования
:param diagnos: диагноз
:param patient_type: тип пациента (напр; поликлиника/стационар)
:param issledovaniya: исследования (reserved)
:return: созданое направление
"""
if rmis_data is None:
rmis_data = {}
if issledovaniya is None:
pass
dir = Napravleniya(client=Clients.Card.objects.get(pk=client_id),
doc=doc if not for_rmis else None,
istochnik_f=istochnik_f,
data_sozdaniya=timezone.now(),
diagnos=diagnos, cancel=False)
if for_rmis:
dir.rmis_number = rmis_data.get("rmis_number")
dir.imported_from_rmis = True
dir.imported_org = RMISOrgs.objects.filter(rmis_id=rmis_data.get("imported_org", -1)).first()
dir.doc = None
dir.doc_who_create = doc_current
else:
if historynum != "":
dir.history_num = historynum
if ofname_id > -1 and ofname:
dir.doc = ofname
dir.doc_who_create = doc_current
if save:
dir.save()
return dir
@staticmethod
def set_of_name(dir: object, doc_current: object, ofname_id: object, ofname: object) -> object:
"""
Проверка на выписывание направления от имени другого врача и установка этого имени в направление, если необходимо
:rtype: Null
:param dir: направление
:param doc_current: текущий врач, выписавший направление
:param ofname_id: id врача, от которого выписывается направление
:param ofname: объект с профилем врача, от которого выписывается направление
:return: Null
"""
if ofname_id > -1 and ofname:
dir.doc = ofname
dir.doc_who_create = doc_current
dir.save()
@staticmethod
def gen_napravleniya_by_issledovaniya(client_id, diagnos, finsource, history_num, | |
# Spices, anise seed
2003: ["Basil", "dried"], # Spices, basil, dried
2004: ["Bay leaf"], # Spices, bay leaf
2005: ["Caraway seed"], # Spices, caraway seed
2006: ["Cardamom"], # Spices, cardamom
2007: ["Celery seed"], # Spices, celery seed
2008: ["Chervil", "dried"], # Spices, chervil, dried
2009: ["Chili powder"], # Spices, chili powder
2010: ["Cinnamon", "ground"], # Spices, cinnamon, ground
2011: ["Cloves", "ground"], # Spices, cloves, ground
2012: ["Coriander leaf", "dried"], # Spices, coriander leaf, dried
2013: ["Coriander seed"], # Spices, coriander seed
2014: ["Cumin seed"], # Spices, cumin seed
2015: ["Curry powder"], # Spices, curry powder
2016: ["Dill seed"], # Spices, dill seed
2017: ["Dill weed", "dried"], # Spices, dill weed, dried
2018: ["Fennel seed"], # Spices, fennel seed
2019: ["Fenugreek seed"], # Spices, fenugreek seed
2020: ["Garlic powder"], # Spices, garlic powder
2021: ["Ginger", "ground"], # Spices, ginger, ground
2022: ["Mace", "ground"], # Spices, mace, ground
2023: ["Marjoram", "dried"], # Spices, marjoram, dried
2024: ["Mustard seed", "ground"], # Spices, mustard seed, ground
2025: ["Nutmeg", "ground"], # Spices, nutmeg, ground
2026: ["Onion powder"], # Spices, onion powder
2027: ["Oregano", "dried"], # Spices, oregano, dried
2028: ["Paprika", "dried"], # Spices, paprika
2029: ["Parsley", "dried"], # Spices, parsley, dried
2030: ["Black pepper", "", "Pepper"], # Spices, pepper, black
2031: ["Cayenne pepper"], # Spices, pepper, red or cayenne
2032: ["White pepper"], # Spices, pepper, white
2033: ["Poppy seed"], # Spices, poppy seed
2034: ["Poultry seasoning"], # Spices, poultry seasoning
2035: ["Pumpkin pie spice"], # Spices, pumpkin pie spice
2036: ["Rosemary", "dried"], # Spices, rosemary, dried
2037: ["Saffron"], # Spices, saffron
2038: ["Sage", "ground"], # Spices, sage, ground
2039: ["Savory", "ground"], # Spices, savory, ground
2041: ["Tarragon", "dried"], # Spices, tarragon, dried
2042: ["Thyme", "dried"], # Spices, thyme, dried
2043: ["Turmeric", "ground"], # Spices, turmeric, ground
2044: ["Basil", "fresh"], # Basil, fresh
2045: ["Dill weed", "fresh"], # Dill weed, fresh
2046: ["Mustard"], # Mustard, prepared, yellow
2047: ["Salt"], # Salt, table
2048: ["Cider vinegar"], # Vinegar, cider
2049: ["Thyme", "fresh"], # Thyme, fresh
2050: ["Vanilla extract"], # Vanilla extract
2051: [], # Vanilla extract, imitation, alcohol
2052: [], # Vanilla extract, imitation, no alcohol
2053: [], # Vinegar, distilled
2054: ["Capers"], # Capers, canned
2055: ["Horseradish"], # Horseradish, prepared
2063: ["Rosemary", "fresh"], # Rosemary, fresh
2064: ["Peppermint", "fresh"], # Peppermint, fresh
2065: ["Spearmint", "fresh", "Mint"], # Spearmint, fresh
2066: ["Spearmint", "dried"], # Spearmint, dried
2068: ["Red wine vinegar"], # Vinegar, red wine
2069: ["Balsamic vinegar"], # Vinegar, balsamic
2074: [], # Seasoning mix, dry, sazon, coriander & annatto
2075: [], # Seasoning mix, dry, taco, original
2076: [], # Seasoning mix, dry, chili, original
3000: [], # Clif Z bar
3001: [], # Babyfood, juice treats, fruit medley, toddler
3002: [], # Babyfood, meat, beef, strained
3003: [], # Babyfood, meat, beef, junior
3005: [], # Babyfood, meat, veal, strained
3007: [], # Babyfood, meat, pork, strained
3008: [], # Babyfood, meat, ham, strained
3009: [], # Babyfood, meat, ham, junior
3010: [], # Babyfood, meat, lamb, strained
3011: [], # Babyfood, meat, lamb, junior
3012: [], # Babyfood, meat, chicken, strained
3013: [], # Babyfood, meat, chicken, junior
3014: [], # Babyfood, meat, chicken sticks, junior
3015: [], # Babyfood, meat, turkey, strained
3016: [], # Babyfood, meat, turkey, junior
3017: [], # Babyfood, meat, turkey sticks, junior
3019: [], # Babyfood, snack, GERBER GRADUATE FRUIT STRIPS, Real Fruit Bars
3021: [], # Babyfood, meat, meat sticks, junior
3022: [], # Babyfood, GERBER, 2nd Foods, apple, carrot and squash, organic
3023: [], # Babyfood, finger snacks, GERBER, GRADUATES, PUFFS, apple and cinnamon
3024: [], # Babyfood, water, bottled, GERBER, without added fluoride
3025: [], # Babyfood, GERBER, 3rd Foods, apple, mango and kiwi
3026: [], # Babyfood, tropical fruit medley
3041: [], # Babyfood, dinner, vegetables and dumplings and beef, strained
3042: [], # Babyfood, dinner, vegetables and dumplings and beef, junior
3043: [], # Babyfood, dinner, beef lasagna, toddler
3044: [], # Babyfood, dinner, macaroni and tomato and beef, strained
3045: [], # Babyfood, dinner, macaroni and tomato and beef, junior
3046: [], # Babyfood, ravioli, cheese filled, with tomato sauce
3047: [], # Babyfood, dinner, beef noodle, strained
3048: [], # Babyfood, macaroni and cheese, toddler
3049: [], # Babyfood, dinner, beef and rice, toddler
3050: [], # Babyfood, dinner, spaghetti and tomato and meat, junior
3051: [], # Babyfood, dinner, spaghetti and tomato and meat, toddler
3053: [], # Babyfood, dinner, vegetables and beef, strained
3054: [], # Babyfood, dinner, vegetables and beef, junior
3055: [], # Babyfood, dinner, beef with vegetables
3067: [], # Babyfood, dinner, vegetables and lamb, junior
3068: [], # Babyfood, dinner, chicken noodle, strained
3069: [], # Babyfood, dinner, chicken noodle, junior
3070: [], # Babyfood, dinner, chicken soup, strained
3072: [], # Babyfood, dinner, chicken stew, toddler
3073: [], # Babyfood, dinner, vegetables chicken, strained
3075: [], # Babyfood, dinner, vegetables, noodles and chicken, strained
3076: [], # Babyfood, dinner, vegetables, noodles and chicken, junior
3077: [], # Babyfood, dinner, pasta with vegetables
3079: [], # Babyfood, dinner, vegetables and noodles and turkey, strained
3081: [], # Babyfood, dinner, vegetables and noodles and turkey, junior
3082: [], # Babyfood, dinner, turkey and rice, strained
3083: [], # Babyfood, dinner, turkey and rice, junior
3084: [], # Babyfood, dinner, vegetables and turkey, strained
3085: [], # Babyfood, dinner, vegetables and turkey, junior
3089: [], # Babyfood, dinner, macaroni and cheese, strained
3090: [], # Babyfood, dinner, macaroni and cheese, junior
3091: [], # Babyfood, vegetables, green beans, strained
3092: [], # Babyfood, vegetables, green beans, junior
3093: [], # Babyfood, green beans, dices, toddler
3096: [], # Babyfood, vegetable, green beans and potatoes
3098: [], # Babyfood, vegetables, beets, strained
3099: [], # Babyfood, vegetables, carrots, strained
3100: [], # Babyfood, vegetables, carrots, junior
3104: [], # Babyfood, vegetables, squash, strained
3105: [], # Babyfood, vegetables, squash, junior
3108: [], # Babyfood, vegetables, sweet potatoes strained
3109: [], # Babyfood, vegetables, sweet potatoes, junior
3112: [], # Babyfood, potatoes, toddler
3113: [], # Babyfood, cereal, Oatmeal, dry, GERBER, SINGLE GRAIN, fortified
3114: [], # Babyfood, vegetable, butternut squash and corn
3115: [], # Babyfood, apples, dices, toddler
3116: [], # Babyfood, fruit, applesauce, strained
3117: [], # Babyfood, fruit, applesauce, junior
3118: [], # Babyfood, fruit, apricot with tapioca, strained
3119: [], # Babyfood, vegetables, corn, creamed, strained
3120: [], # Babyfood, vegetables, corn, creamed, junior
3121: [], # Babyfood, vegetables, peas, strained
3122: [], # Babyfood, peas, dices, toddler
3127: [], # Babyfood, vegetables, spinach, creamed, strained
3128: [], # Babyfood, fruit, apricot with tapioca, junior
3129: [], # Babyfood, fruit, bananas with tapioca, strained
3130: [], # Babyfood, fruit, peaches, strained
3131: [], # Babyfood, fruit, peaches, junior
3132: [], # Babyfood, fruit, pears, strained
3133: [], # Babyfood, fruit, pears, junior
3134: [], # Babyfood, fruit, plums with tapioca, without ascorbic acid, strained
3135: [], # Babyfood, fruit, plums with tapioca, without ascorbic acid, junior
3136: [], # Babyfood, fruit, prunes with tapioca, without ascorbic acid, strained
3137: [], # Babyfood, fruit, prunes with tapioca, without ascorbic acid, junior
3139: [], # Babyfood, prunes, without vitamin c, strained
3140: [], # Babyfood, fruit dessert, mango with tapioca
3141: [], # Babyfood, pears, dices, toddler
3142: [], # Babyfood, fruit, applesauce and apricots, strained
3143: [], # Babyfood, fruit, applesauce and apricots, junior
3144: [], # Babyfood, fruit, applesauce and cherries, strained
3145: [], # Babyfood, fruit, applesauce and cherries, junior
3147: [], # Babyfood, fruit, applesauce with banana, junior
3150: [], # Babyfood, fruit, applesauce and pineapple, strained
3151: [], # Babyfood, fruit, applesauce and pineapple, junior
3152: [], # Babyfood, fruit, apple and raspberry, strained
3153: [], # Babyfood, fruit, apple and | |
<reponame>aurelienpierre/colour
"""
Image Input / Output Utilities
==============================
Defines the image related input / output utilities objects.
"""
from __future__ import annotations
import numpy as np
from dataclasses import dataclass, field
from colour.hints import (
Any,
ArrayLike,
Boolean,
DTypeNumber,
List,
Literal,
NDArray,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from colour.utilities import (
CaseInsensitiveMapping,
as_float_array,
as_int_array,
attest,
is_openimageio_installed,
filter_kwargs,
optional,
required,
usage_warning,
validate_method,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "<EMAIL>"
__status__ = "Production"
__all__ = [
"BitDepth_Specification",
"ImageAttribute_Specification",
"convert_bit_depth",
"read_image_OpenImageIO",
"read_image_Imageio",
"READ_IMAGE_METHODS",
"read_image",
"write_image_OpenImageIO",
"write_image_Imageio",
"WRITE_IMAGE_METHODS",
"write_image",
]
@dataclass(frozen=True)
class BitDepth_Specification:
"""
Define a bit depth specification.
Parameters
----------
name
Attribute name.
numpy
Object representing the *Numpy* bit depth.
openimageio
Object representing the *OpenImageIO* bit depth.
"""
name: str
numpy: Type[DTypeNumber]
openimageio: Any
@dataclass
class ImageAttribute_Specification:
"""
Define an image specification attribute.
Parameters
----------
name
Attribute name.
value
Attribute value.
type_
Attribute type as an *OpenImageIO* :class:`TypeDesc` class instance.
"""
name: str
value: Any
type_: Optional[OpenImageIO.TypeDesc] = field( # type: ignore[name-defined] # noqa
default_factory=lambda: None
)
if is_openimageio_installed(): # pragma: no cover
from OpenImageIO import UINT8, UINT16, HALF, FLOAT, DOUBLE
MAPPING_BIT_DEPTH: CaseInsensitiveMapping = CaseInsensitiveMapping(
{
"uint8": BitDepth_Specification("uint8", np.uint8, UINT8),
"uint16": BitDepth_Specification("uint16", np.uint16, UINT16),
"float16": BitDepth_Specification("float16", np.float16, HALF),
"float32": BitDepth_Specification("float32", np.float32, FLOAT),
"float64": BitDepth_Specification("float64", np.float64, DOUBLE),
}
)
if hasattr(np, "float128"): # pragma: no cover
MAPPING_BIT_DEPTH["float128"] = BitDepth_Specification(
"float128", np.float128, DOUBLE # type: ignore[arg-type]
)
else: # pragma: no cover
MAPPING_BIT_DEPTH: CaseInsensitiveMapping = ( # type: ignore[no-redef]
CaseInsensitiveMapping(
{
"uint8": BitDepth_Specification("uint8", np.uint8, None),
"uint16": BitDepth_Specification("uint16", np.uint16, None),
"float16": BitDepth_Specification("float16", np.float16, None),
"float32": BitDepth_Specification("float32", np.float32, None),
"float64": BitDepth_Specification("float64", np.float64, None),
}
)
)
if hasattr(np, "float128"): # pragma: no cover
MAPPING_BIT_DEPTH["float128"] = BitDepth_Specification(
"float128", np.float128, None # type: ignore[arg-type]
)
def convert_bit_depth(
a: ArrayLike,
bit_depth: Literal[
"uint8", "uint16", "float16", "float32", "float64", "float128"
] = "float32",
) -> NDArray:
"""
Convert given array to given bit depth, the current bit depth of the array
is used to determine the appropriate conversion path.
Parameters
----------
a
Array to convert to given bit depth.
bit_depth
Bit depth.
Returns
-------
:class`numpy.ndarray`
Converted array.
Examples
--------
>>> a = np.array([0.0, 0.5, 1.0])
>>> convert_bit_depth(a, 'uint8')
array([ 0, 128, 255], dtype=uint8)
>>> convert_bit_depth(a, 'uint16')
array([ 0, 32768, 65535], dtype=uint16)
>>> convert_bit_depth(a, 'float16')
array([ 0. , 0.5, 1. ], dtype=float16)
>>> a = np.array([0, 128, 255], dtype=np.uint8)
>>> convert_bit_depth(a, 'uint16')
array([ 0, 32896, 65535], dtype=uint16)
>>> convert_bit_depth(a, 'float32') # doctest: +ELLIPSIS
array([ 0. , 0.501960..., 1. ], dtype=float32)
"""
a = np.asarray(a)
bit_depths = ", ".join(sorted(MAPPING_BIT_DEPTH.keys()))
attest(
bit_depth in bit_depths,
f'Incorrect bit depth was specified, it must be one of: "{bit_depths}"!',
)
attest(
str(a.dtype) in bit_depths,
f'Image bit depth must be one of: "{bit_depths}"!',
)
source_dtype = str(a.dtype)
target_dtype = MAPPING_BIT_DEPTH[bit_depth].numpy
if source_dtype == "uint8":
if bit_depth == "uint16":
a = (a * 257).astype(target_dtype)
elif bit_depth in ("float16", "float32", "float64", "float128"):
a = (a / 255).astype(target_dtype)
elif source_dtype == "uint16":
if bit_depth == "uint8":
a = (a / 257).astype(target_dtype)
elif bit_depth in ("float16", "float32", "float64", "float128"):
a = (a / 65535).astype(target_dtype)
elif source_dtype in ("float16", "float32", "float64", "float128"):
if bit_depth == "uint8":
a = np.around(a * 255).astype(target_dtype)
elif bit_depth == "uint16":
a = np.around(a * 65535).astype(target_dtype)
elif bit_depth in ("float16", "float32", "float64", "float128"):
a = a.astype(target_dtype)
return a # type: ignore[return-value]
@required("OpenImageIO")
def read_image_OpenImageIO(
path: str,
bit_depth: Literal[
"uint8", "uint16", "float16", "float32", "float64", "float128"
] = "float32",
attributes: Boolean = False,
) -> Union[NDArray, Tuple[NDArray, List]]: # noqa: D405,D410,D407,D411
"""
Read the image at given path using *OpenImageIO*.
Parameters
----------
path
Image path.
bit_depth
Returned image bit depth, the bit depth conversion behaviour is driven
directly by *OpenImageIO*, this definition only converts to the
relevant data type after reading.
attributes
Whether to return the image attributes.
Returns
-------
:class`numpy.ndarray` or :class:`tuple`
Image data or tuple of image data and list of
:class:`colour.io.ImageAttribute_Specification` class instances.
Notes
-----
- For convenience, single channel images are squeezed to 2D arrays.
Examples
--------
>>> import os
>>> import colour
>>> path = os.path.join(colour.__path__[0], 'io', 'tests', 'resources',
... 'CMS_Test_Pattern.exr')
>>> image = read_image_OpenImageIO(path) # doctest: +SKIP
"""
from OpenImageIO import ImageInput
path = str(path)
bit_depth_specification = MAPPING_BIT_DEPTH[bit_depth]
image = ImageInput.open(path)
specification = image.spec()
shape = (
specification.height,
specification.width,
specification.nchannels,
)
image_data = image.read_image(bit_depth_specification.openimageio)
image.close()
image = np.squeeze(
np.array(image_data, dtype=bit_depth_specification.numpy).reshape(
shape
)
)
if attributes:
extra_attributes = []
for i in range(len(specification.extra_attribs)):
attribute = specification.extra_attribs[i]
extra_attributes.append(
ImageAttribute_Specification(
attribute.name, attribute.value, attribute.type
)
)
return image, extra_attributes
else:
return image
def read_image_Imageio(
path: str,
bit_depth: Literal[
"uint8", "uint16", "float16", "float32", "float64", "float128"
] = "float32",
**kwargs: Any,
) -> NDArray:
"""
Read the image at given path using *Imageio*.
Parameters
----------
path
Image path.
bit_depth
Returned image bit depth, the image data is converted with
:func:`colour.io.convert_bit_depth` definition after reading the
image.
Other Parameters
----------------
kwargs
Keywords arguments.
Returns
-------
:class`numpy.ndarray`
Image data.
Notes
-----
- For convenience, single channel images are squeezed to 2D arrays.
Examples
--------
>>> import os
>>> import colour
>>> path = os.path.join(colour.__path__[0], 'io', 'tests', 'resources',
... 'CMS_Test_Pattern.exr')
>>> image = read_image_Imageio(path)
>>> image.shape # doctest: +SKIP
(1267, 1274, 3)
>>> image.dtype
dtype('float32')
"""
from imageio import imread
image = np.squeeze(imread(path, **kwargs))
return convert_bit_depth(image, bit_depth)
READ_IMAGE_METHODS: CaseInsensitiveMapping = CaseInsensitiveMapping(
{
"Imageio": read_image_Imageio,
"OpenImageIO": read_image_OpenImageIO,
}
)
READ_IMAGE_METHODS.__doc__ = """
Supported image read methods.
"""
def read_image(
path: str,
bit_depth: Literal[
"uint8", "uint16", "float16", "float32", "float64", "float128"
] = "float32",
method: Union[Literal["Imageio", "OpenImageIO"], str] = "OpenImageIO",
**kwargs: Any,
) -> NDArray: # noqa: D405,D407,D410,D411,D414
"""
Read the image at given path using given method.
Parameters
----------
path
Image path.
bit_depth
Returned image bit depth, for the *Imageio* method, the image data is
converted with :func:`colour.io.convert_bit_depth` definition after
reading the image, for the *OpenImageIO* method, the bit depth
conversion behaviour is driven directly by the library, this definition
only converts to the relevant data type after reading.
method
Read method, i.e. the image library used for reading images.
Other Parameters
----------------
attributes
{:func:`colour.io.read_image_OpenImageIO`},
Whether to return the image attributes.
Returns
-------
:class`numpy.ndarray`
Image data.
Notes
-----
- If the given method is *OpenImageIO* but the library is not available
writing will be performed by *Imageio*.
- If the given method is *Imageio*, ``kwargs`` is passed directly to the
wrapped definition.
- For convenience, single channel images are squeezed to 2D arrays.
Examples
--------
>>> import os
>>> import colour
>>> path = os.path.join(colour.__path__[0], 'io', 'tests', 'resources',
... 'CMS_Test_Pattern.exr')
>>> image = read_image(path)
>>> image.shape # doctest: +SKIP
(1267, 1274, 3)
>>> image.dtype
dtype('float32')
"""
method = validate_method(method, READ_IMAGE_METHODS)
if method == "openimageio": # pragma: no cover
if not is_openimageio_installed():
usage_warning(
'"OpenImageIO" related API features are not available, '
'switching to "Imageio"!'
)
method = "Imageio"
function = READ_IMAGE_METHODS[method]
if method == "openimageio": # pragma: no cover
kwargs = filter_kwargs(function, **kwargs)
return function(path, bit_depth, **kwargs)
@required("OpenImageIO")
def write_image_OpenImageIO(
image: ArrayLike,
path: str,
bit_depth: Literal[
"uint8", "uint16", "float16", "float32", "float64", "float128"
] = "float32",
attributes: Optional[Sequence] = None,
) -> Boolean: # noqa: D405,D407,D410,D411
"""
Write given image at given path using *OpenImageIO*.
Parameters
----------
image
Image data.
path
Image path.
bit_depth
Bit depth to write the image at, the bit depth conversion behaviour is
ruled directly by *OpenImageIO*.
attributes
An array of :class:`colour.io.ImageAttribute_Specification` class
instances used to set attributes of the image.
Returns
-------
:class:`bool`
Definition success.
Examples
--------
Basic image writing:
>>> import os
>>> import colour
>>> path = os.path.join(colour.__path__[0], 'io', 'tests', 'resources',
... 'CMS_Test_Pattern.exr')
>>> image = read_image(path) # doctest: +SKIP
>>> path = os.path.join(colour.__path__[0], 'io', 'tests', 'resources',
... 'CMSTestPattern.tif')
>>> write_image_OpenImageIO(image, path) # doctest: +SKIP
True
Advanced image writing while setting attributes:
>>> compression = ImageAttribute_Specification('Compression', 'none')
>>> write_image_OpenImageIO(image, path, 'uint8', [compression])
... # doctest: +SKIP
True
Writing an "ACES" compliant "EXR" file:
>>> if is_openimageio_installed(): # doctest: +SKIP
... from OpenImageIO import TypeDesc
... chromaticities = (
... 0.7347, 0.2653, 0.0, 1.0, 0.0001, -0.077, 0.32168, 0.33767)
... attributes = [
... ImageAttribute_Specification('acesImageContainerFlag', True),
... ImageAttribute_Specification(
... 'chromaticities', chromaticities, TypeDesc('float[8]')),
... ImageAttribute_Specification('compression', 'none')]
... write_image_OpenImageIO(image, path, attributes=attributes)
| |
import os
import sys
import csv
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.tools.visualization import plot_histogram
import numpy as np
import lib.simulation as sim
from lib.gateSet import *
def run_test(args):
if args.adder:
para = args.adder
test_adder(para[0], para[1], para[2], args)
if args.phimod:
para = args.phimod
test_ccphiMOD(para[0], para[1], para[2], para[3], args)
if args.cmult:
para = args.cmult
testCMULT(para[0], para[1], para[2], para[3], para[4], args)
if args.cu:
para = args.cu
test_cu(para[0], para[1], para[2], para[3], args)
if args.nor:
para = args.nor
if para[1] == 0:
for i in range(2, para[0]):
if math.gcd(i, para[0]) == 1:
shorNormal(para[0], i, args)
else:
shorNormal(para[0], para[1], args)
if args.seq:
para = args.seq
shorSequential(para[0], para[1], args)
def test_adder_appro(a, b, n, appro, args):
if args.log:
if not os.path.exists('adder/log'):
os.makedirs('adder/log')
path = f'adder/log/a{a}_b{b}_n{n}.log'
sys.stdout = open(path, 'w')
qc = adder_appro(a, b, n, appro)
print("=" * 40)
print(
f"Executing adder_appro with a={a}, b={b}, n={n}, appro_deg={appro}...")
if args.draw:
figdir = f'./adder_appro/qcfig'
if not os.path.exists(figdir):
os.makedirs(figdir)
figpath = f'./adder_appro/qcfig/a{a}_b{b}_n{n}.png'
if not os.path.isfile(figpath):
circuit_drawer(qc, filename=figpath, output='mpl')
res = sim.mySim(qc, args)
res_lis = list(res)
expect_res = a + b
meas_lis = []
for i in range(len(res_lis)):
meas_lis.append(int(res_lis[i], 2))
equal_flag = False
if args.simulation:
dir_name = args.simulation
if len(res) != 1:
raise Exception("The measurement result should be determinisitic!")
print(f"Expect ans = {expect_res}, Measure res = {meas_lis[0]}")
if expect_res == meas_lis[0]:
equal_flag = True
else:
dir_name = 'real'
for i in range(len(meas_lis)):
print(f"Expect ans = {expect_res}, Measure res = {meas_lis[i]}")
if meas_lis[i] == expect_res:
equal_flag = True
if equal_flag:
print("Result correct! Adder success!")
if args.output:
dir = f'./adder_appro/result/{dir_name}'
if not os.path.exists(dir):
os.makedirs(dir)
path = f'./adder_appro/result/{dir_name}/adder{a}_{b}_appro{appro}.png'
plot_histogram(res, figsize=(10, 10),
title=f'adder{a}_{b}_appro{appro}').savefig(path)
print("=" * 40)
def test_adder(a, b, n, args):
if args.log:
if not os.path.exists('adder/log'):
os.makedirs('adder/log')
path = f'adder/log/a{a}_b{b}_n{n}.log'
sys.stdout = open(path, 'w')
qc = adder(a, b, n)
# print("="*40)
print('=========================')
print(f"Executing adder with a={a}, b={b}, n={n}...")
if args.draw:
figdir = f'./adder/qcfig'
if not os.path.exists(figdir):
os.makedirs(figdir)
figpath = f'./adder/qcfig/a{a}_b{b}_n{n}.png'
if not os.path.isfile(figpath):
circuit_drawer(qc, filename=figpath, output='mpl')
res = sim.mySim(qc, args)
res_lis = list(res)
expect_res = a + b
meas_lis = []
for i in range(len(res_lis)):
meas_lis.append(int(res_lis[i], 2))
equal_flag = False
if args.simulation:
dir_name = args.simulation
if len(res) != 1:
raise Exception("The measurement result should be determinisitic!")
print(f"Expect ans = {expect_res}, Measure res = {meas_lis[0]}")
if expect_res == meas_lis[0]:
equal_flag = True
else:
dir_name = 'real'
for i in range(len(meas_lis)):
print(f"Expect ans = {expect_res}, Measure res = {meas_lis[i]}")
if meas_lis[i] == expect_res:
equal_flag = True
if equal_flag:
print("Result correct! Adder success!")
else:
print("Result wrong! Adder failed!")
if expect_res >= (2 ** n):
print("Overflow occurs!")
if args.output:
dir = f'./adder/result/{dir_name}'
if not os.path.exists(dir):
os.makedirs(dir)
path = f'./adder/result/{dir_name}/adder{a}_{b}.png'
plot_histogram(res, figsize=(10, 10),
title=f'adder{a}_{b}').savefig(path)
# print("="*40)
print('=========================')
def test_ccphiMOD(n, b, a, N, args, print_qc=False, save_fig=False):
print("-" * 40)
bitlen = n + 1
expect = (a + b) % N
print(f'a={a}, b={b}, N={N}, (a+b)mod N={expect} (expect ans)')
qr_ctrl = QuantumRegister(2, name='ctrl')
qr_phi = QuantumRegister(bitlen, name='phi')
qr_ancilla = QuantumRegister(1, name='ancilla')
cr_phi = ClassicalRegister(bitlen - 1, name='cr_phi')
cr_ancilla = ClassicalRegister(1, name='cr_ancilla')
qc = QuantumCircuit(qr_ctrl, qr_phi, qr_ancilla, cr_phi, cr_ancilla)
gate = ccphiADDmodN(n=n, a=a, b=b, N=N,
print_qc=print_qc, save_fig=save_fig)
qft = myQFT(bitlen, inverse=False)
iqft = myQFT(bitlen, inverse=True)
qc.x(qr_ctrl)
b = b % N
b_bin = '{n:0{bit}b}'.format(n=b, bit=bitlen)
for i in range(bitlen):
if b_bin[i] == '1':
qc.x(qr_phi[i])
qc.append(qft, qargs=qr_phi[:])
qc.append(gate, qargs=qr_ctrl[:] + qr_phi[:] + qr_ancilla[:])
qc.append(iqft, qargs=qr_phi[:])
for i in range(bitlen - 1):
qc.measure(qr_phi[i + 1], cr_phi[bitlen - i - 2])
qc.measure(qr_ancilla, cr_ancilla)
if print_qc:
print(qc)
if save_fig:
circuit_drawer(qc, scale=1.3, output='mpl',
filename='./report/ccphiaddmod.png', plot_barriers=False)
res = sim.mySim(qc, args)
if len(list(res)) != 1:
raise Exception("Ans trivial")
res_num = list(res)[0]
meas_anc = res_num.split(" ")[0]
meas_phi = res_num.split(" ")[1]
if meas_anc == '0':
print("Ancilla bit correct!")
else:
raise Exception("ancilla bit broken!")
print(f"The expect result is {a}+{b} mod {N} = {expect}")
print(f"The measurement result is {res_num}={int(meas_phi, 2)}")
if expect == int(meas_phi, 2):
print("Measure = Expect, Correct!")
else:
raise Exception('wrong ans')
print("-" * 40)
def testCMULT(n, x, b, a, N, args, print_qc=False, save_fig=False):
bitlen = n + 1
qr_c = QuantumRegister(1, name='c')
qr_x = QuantumRegister(n, name='x')
qr_b = QuantumRegister(bitlen, name='b')
qr_ancilla = QuantumRegister(1, name='ancilla')
cr_x = ClassicalRegister(n, name='cr_x')
cr_b = ClassicalRegister(bitlen, name='cr_b')
cr_ancilla = ClassicalRegister(1, name='cr_ancilla')
qc = QuantumCircuit(qr_c, qr_x, qr_b, qr_ancilla, cr_x, cr_b, cr_ancilla)
qc.x(qr_c)
b = b % N
x_bin = '{n:0{bit}b}'.format(n=x, bit=n)
b_bin = '{n:0{bit}b}'.format(n=b, bit=bitlen)
for i in range(n):
if x_bin[i] == '1':
qc.x(qr_x[i])
for i in range(bitlen):
if b_bin[i] == '1':
qc.x(qr_b[i])
gate = cmult_a_mod_N(n, a, b, N, False, True)
qc.append(gate, qargs=qr_c[:] + qr_x[:] + qr_b[:] + qr_ancilla[:])
for i in range(bitlen):
qc.measure(qr_b[i], cr_b[bitlen - i - 1])
# circuit_drawer(qc,scale=0.8,filename='./report/cmult2.png',output='mpl')
for i in range(n):
qc.measure(qr_x[i], cr_x[n - i - 1])
if print_qc:
print(qc)
if save_fig:
pass
circuit_drawer(
qc, scale=0.8, filename='./report/cmult2.png', output='mpl')
res = sim.mySim(qc, args)
res_num = list(res)[0]
meas_x = res_num.split(" ")[2]
meas_b = res_num.split(" ")[1]
meas_ancilla = res_num.split(" ")[0]
if meas_x == x_bin:
print("The x remain the same! Correct!")
else:
raise Exception("x_change")
if meas_ancilla == '0':
print("Ancilla bit correct!")
else:
raise Exception("ancilla bit broken!")
expect = (b + a * x) % N
print(f"x={x}, b={b}, a={a}, N={N}, b+ax mod N={(b + a * x) % N} ")
if expect == int(meas_b, 2):
print("Expect = Measure = {0}".format(expect))
print("Multiplier correct!")
elif a * x + b >= 2 ** bitlen:
print("Expect = Measure = {0}".format(expect))
print("Overflow occurs! Multiplier error!")
else:
raise Exception("Multiplier wrong")
def CMULTexp_latex(n, x, b, a, N, args, print_qc=False, save_fig=False):
bitlen = n + 1
qr_c = QuantumRegister(1, name='c')
qr_x = QuantumRegister(n, name='x')
qr_b = QuantumRegister(bitlen, name='b')
qr_ancilla = QuantumRegister(1, name='ancilla')
cr_b = ClassicalRegister(bitlen, name='cr_b')
qc = QuantumCircuit(qr_c, qr_x, qr_b, qr_ancilla, cr_b)
qc.x(qr_c)
b = b % N
x_bin = '{n:0{bit}b}'.format(n=x, bit=n)
b_bin = '{n:0{bit}b}'.format(n=b, bit=bitlen)
for i in range(n):
if x_bin[i] == '1':
qc.x(qr_x[i])
for i in range(bitlen):
if b_bin[i] == '1':
qc.x(qr_b[i])
gate = cmult_a_mod_N(n, a, b, N, False, True)
qc.append(gate, qargs=qr_c[:] + qr_x[:] + qr_b[:] + qr_ancilla[:])
for i in range(bitlen):
qc.measure(qr_b[i], cr_b[bitlen - i - 1])
circuit_drawer(qc, scale=0.8, filename='./report/cmult3.png', output='mpl')
def test_cu(n, x, a, N, args):
print(f'x={x},a={a},N={N},ax mod N = {(a * x) % N}')
a = a % N
bitlen = n + 1
qr_c = QuantumRegister(1, name='c')
qr_x = QuantumRegister(n, name='x')
qr_b_0 = QuantumRegister(bitlen, name='b0')
qr_ancilla = QuantumRegister(1, name='ancilla')
cr = ClassicalRegister(n, name='cr')
qc = QuantumCircuit(qr_c, qr_x, qr_b_0, qr_ancilla, cr)
qc.x(qr_c[0])
x = x % N
x_bin = '{n:0{bit}b}'.format(n=x, bit=n)
for i in range(n):
if x_bin[i] == '1':
qc.x(qr_x[i])
gate = cu_a(n, a, N, False, True)
qc.append(gate, qargs=qr_c[:] + qr_x[:] + qr_b_0[:] + qr_ancilla[:])
for i in range(n):
qc.measure(qr_x[i], cr[n - 1 - i])
circuit_drawer(qc, output='mpl', scale=0.8, filename='./report/cu.png')
# print(qc)
res = sim.mySim(qc, args)
res_num = list(res)[0]
expect = (a * x) % N
if expect == int(res_num, 2):
print("Expect = Measure = {0}".format(expect))
print("CU correct!")
else:
raise Exception("CU wrong")
def check_cphiADD(num, qr, qc):
binary = bin(int(num))[2:].zfill(4)
for i in range(4):
if binary[i] == '1':
qc.x(qr[i + 1])
def rangeTest_cMult(N):
error_lis = []
for i in range(1, 16):
for j in range(1, 16):
try:
testCMULT(n=4, x=2, b=i, a=j, N=N, print_qc=True)
except Exception as e:
args = (4, 2, i, j, N, e)
error_lis.append(args)
with open('error_mul.csv', 'w') as out:
csv_out = csv.writer(out)
csv_out.writerow(['n', 'x', 'b', 'a', 'N', 'errorType'])
for i in error_lis:
csv_out.writerow(i)
def rangeTest_ccphiMOD(N):
error_lis = []
wrong_lis = []
ancilla_lis = []
trivial_lis = []
for i in range(1, 16):
for j in range(1, 16):
try:
test_ccphiMOD(n=4, b=i, a=j, N=N,
print_qc=False, save_fig=False)
except Exception as e:
print(e)
args = (5, i, j, N)
if str(e) == 'wrong ans':
wrong_lis.append(args)
elif str(e) == 'ancilla bit broken!':
ancilla_lis.append(args)
elif str(e) == 'Ans trivial':
trivial_lis.append(args)
else:
error_lis.append(args)
# test_ccphiMOD(bitlen=5,b=1,a=9,N=9,print_qc=True,save_fig=True)
with open('error.csv', 'w') as out:
csv_out = csv.writer(out)
csv_out.writerow(['bitlen', 'b', 'a', 'N'])
for row in error_lis:
csv_out.writerow(row)
with open('wrong.csv', 'w') as out:
csv_out = csv.writer(out)
csv_out.writerow(['bitlen', 'b', 'a', 'N'])
for row in wrong_lis:
csv_out.writerow(row)
with open('ancilla.csv', 'w') as out:
csv_out = csv.writer(out)
csv_out.writerow(['bitlen', 'b', 'a', 'N'])
for row in ancilla_lis:
csv_out.writerow(row)
with open('trivial.csv', 'w') as out:
csv_out = csv.writer(out)
csv_out.writerow(['bitlen', 'b', 'a', 'N'])
for row in trivial_lis:
csv_out.writerow(row)
def shorNormal_circuit(N, | |
import matplotlib.path as mplPath
from abc import ABCMeta
import abc
from protodata.utils import read_json
from protodata.columns import create_image_column
import numpy as np
import os
import tensorflow as tf
import logging
logger = logging.getLogger(__name__)
""" General functions for data manipulation """
class TrainMode(object):
WIDE = 'wide'
DEEP = 'deep'
CNN = 'cnn'
WIDE_AND_DEEP = 'wide_and_deep'
ALL = 'wide_deep_cnn'
class DataMode(object):
TRAINING = 'training'
VALIDATION = 'validation'
TEST = 'testing'
""" Data filename pattern """
def get_filename(name_tag, shard, num_shards):
""" Returns the format of the record file names given the
data tag, the current shard and the total amount of shards"""
return '%s-%.5d-of-%.5d' % (name_tag, shard, num_shards)
def get_filename_pattern(folder, tag):
""" Returns the pattern to read record files """
return os.path.join(folder, '%s-*' % str(tag))
""" Neighborhood processing functions """
def read_city_data(path):
""" Maps the neighborhoods of the city from the provided data
so each entry contains a map between the neighborhood and the
polygon delimiting it """
raw_cities = read_json(path)
cities = {}
for n in raw_cities['features']:
coords = np.array([c for c in n['geometry']['coordinates'][0][0]])
# Make sure coordinates have form Nx2
if coords.shape[1] == 3:
coords = coords[:, :-1]
if coords.shape[1] != 2:
raise ValueError('Coordinates have depth %d and should have 2'
% coords.shape[1])
# Map into polygon and add to dictionary
neigh_name = n['properties']['neighbourhood']
bb = mplPath.Path(coords)
cities.update({neigh_name: bb})
return cities
def get_neighborhood(neighs, longitude, latitude):
""" Returns the neighborhood of the input point. We assume that a
point can only be contained in one neighborhood (otherwise data is broken)
Args:
neighs: Dictionary mapping neighborhoods and their area
longitude: Longitude of the input point
latitude: Latitude of the input point
Returns:
name: Name of the neighborhood the point belongs to or None if no
valid neighborhood found
"""
for neigh, bb in neighs.items():
if bb.contains_point((longitude, latitude)):
return neigh
return None
""" Data normalization functions """
def quantile_norm(val, edges, nq):
""" Normalizes continuous values so they are converted by the formula:
x = i/(nq - 1)
Where i is the quartile number [0, nq) the input value falls in
the feature distribution and nq is the number of quartiles used.
Args:
val: Input value
edges: Histogram edges
nq: Number of quantiles
"""
if val >= edges[-1]:
quantile = len(edges)
elif val <= edges[0]:
quantile = 0
else:
quantile = np.argmax(edges >= val)
return quantile / (nq - 1.0)
def feature_normalize(dataset):
""" Normalizes feature and returns the mean, deviation and minimum and
maximum values """
mu = np.mean(dataset, axis=0)
sigma = np.std(dataset, axis=0)
min_c, max_c = np.min(dataset, axis=0), np.max(dataset, axis=0)
return mu, sigma, min_c, max_c
""" Tensorflow helpers """
def get_interval_mask(low, high, values):
""" Returns the boolean mask such that True values are those x which
are in interval low <= x < high """
# Since greater_equal and less support broadcasting, we can do this
low_t, high_t = tf.greater_equal(values, low), tf.less(values, high)
return tf.logical_and(low_t, high_t)
def copy_columns(x, num):
""" Replicates the input tensor x by concatenating its columns num times"""
# Create replicas into a 1D vector
vector = tf.tile(x, tf.stack([num]))
# Transpose and reshape
return tf.transpose(tf.reshape(vector, tf.stack([num, -1])))
""" Pandas helpers """
def get_column_info(data, excluded=[]):
""" Returns the zipped pair of columns and corresponding numpy type
for columns of interest """
col_names = [i for i in data.columns.values if i not in excluded]
col_types = [data[i].dtype for i in col_names if i not in excluded]
return list(zip(col_names, col_types))
def quantile_normalization(data, train_ind, nq, excluded=[]):
""" Normalizes into [0,1] numeric values in the dataset using
quantile normalization as described in:
Wide & Deep Learning for Recommender Systems.
Cheng et al (2016)
[https://arxiv.org/abs/1606.07792]
Args:
data: Pandas dataframe
train_ind: Instances in the training set
nq: Number of quantiles to use
excluded: Columns to ignore in the normalization process
Returns:
data: Normalized dataset given the mean and standard deviation
extracted from the training
d: Dictionary indexed by column name where each entry contains
the cuts for a numeric feature
"""
d = {}
for (name, dtype) in get_column_info(data, excluded=excluded):
if is_numeric(dtype):
logger.debug('Normalizing column %s' % name)
# Compute histogram edges for training data
train_content = data.iloc[train_ind, name]
hist, edges = np.histogram(train_content, bins=nq-2)
edges = np.array(edges)
# Store entry in dictionary
d[name] = edges
# Update column
data[name] = data[name].apply(lambda x: quantile_norm(x,
edges,
nq))
return data, d
def z_scores(data, train_ind, excluded=[]):
""" Returns a dictionary containing the min, max, mean and standard
deviation for the numeric
columns in the dataframe
Args:
data: Pandas dataframe
train_ind: List of instance indices belonging to the train set
excluded: Columns to ignore in the normalization process
Returns:
data: Normalized dataset given the mean and standard deviation
extracted from the training
d: Dictionary indexed by numeric column name where each entry
contains its mean and std
"""
d = {}
for (name, dtype) in get_column_info(data, excluded=excluded):
if is_numeric(dtype):
# Compute mean and std on training
train_content = data.iloc[train_ind, name].as_matrix()
mean, std, min_c, max_c = feature_normalize(train_content)
# Store entry in dictionary
d[name] = {'mean': mean, 'std': std, 'min': min_c, 'max': max_c}
# Update column
data[name] = (data[name] - mean)/std
return data, d
def normalize_data(data, train_ind, zscores=True, excluded=[], nq=5):
""" Normalizes the input
Args:
data: Pandas dataframe
train_ind: List of instance indices belonging to the train set
zscores: Whether to use z-scores normalization (True) or
quantile normalization (False)
excluded: Columns to ignore in the normalization process
nq: Number of quantiles. Only used if zscores is False.
Returns:
data: Normalized dataset
d: Dictionary containing metadata of the normalization. For z-scores
contains mean, min, max and standard deviation of each numeric
column. For quantile norm, it contains the edges of the histogram
"""
if zscores:
return z_scores(data, train_ind=train_ind, excluded=excluded)
else:
return quantile_normalization(data,
train_ind=train_ind,
nq=nq,
excluded=excluded)
def to_dummy(data, name):
""" Converts categorical column into dummy binary column
(one for each possible value) """
def is_equal(x, other):
return x == other
logger.debug('Converting %s into dummy column ...' % name)
for val in data[name].unique():
# Obtain a valid string formatted name for the new column
col_name = '_'.join([name, val])
dummy_name = erase_special(unicode_to_str(col_name))
# Convert according to whether it is equal or not to current value
data[dummy_name] = data[name].apply(lambda x: is_equal(x, val))
# Erase original column
return data.drop(name, axis=1)
def convert_to_dummy(data, excluded_columns=[]):
""" Converts categorical variables into dummy binary variables
Args:
data: pandas dataframe
excluded_columns: columns to ignore.
"""
for (feat_name, feat_type) in get_column_info(data,
excluded=excluded_columns):
if feat_type == np.dtype('object'):
data = to_dummy(data, feat_name)
return data
def convert_boolean(data, excluded_columns=[], func=float):
""" Converts the boolean columns in the dataset into the desired type
Args:
data: pandas Dataframe
excluded_columns: Columns to exclude in the conversion process
func: Type (and function) to use to convert booleans to
(e.g. float, int). Default is float
"""
for (c, t) in get_column_info(data, excluded=excluded_columns):
if t == np.dtype('bool'):
logger.debug('Converting boolean column %s to numeric' % c)
data[c] = data[c].apply(func)
return data
def is_categorical(type_def):
""" Whether input column type represents a categorical
column (True) or not (False) """
return type_def == np.dtype('object')
def is_numeric(type_def):
""" Whether columns represents a numerical value (True) or not (False) """
return np.issubdtype(type_def, np.number)
def is_bool(type_def):
""" Whether columns represents a boolean value """
return np.issubdtype(type_def, bool) and not np.issubdtype(type_def,
np.number)
""" TF Serialization wrapper operations """
def int64_feature(value):
""" Wrapper for int64 proto features """
value = [value] if not isinstance(value, list) else value
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def float64_feature(value):
""" Wrapper for floay64 proto features """
value = [value] if not isinstance(value, list) else value
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def bytes_feature(value):
""" Wrapper for byte proto features """
# Ensure we have a list of elements
value = [value] if not isinstance(value, list) else value
# Convert string into bytes if found
for i in range(len(value)):
if isinstance(value[i], str):
value[i] = str.encode(value[i])
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def map_feature(value, f_type):
""" Builds the Tensorflow feature for the given feature information """
if f_type == np.dtype('object'):
return bytes_feature(value)
elif f_type == np.dtype('int'):
return int64_feature(value)
elif f_type == np.dtype('float'):
return float64_feature(value)
elif f_type == np.dtype('bool'):
return int64_feature(value.astype('int'))
else:
raise ValueError('Do not know how to store value {} with type {}'
.format(value, f_type))
def map_feature_type(np_type):
""" Maps numpy types into accepted Tensorflow feature
types | |
/ 2], [-1 / 2, -1 / 2]),
("CRot", [math.pi / 2, 0, 0], [-1 / 2, -1 / 2]),
("CRot", [0, math.pi / 2, 0], [-1 / 2, 1 / 4]),
("CRot", [0, 0, math.pi / 2], [-1 / 2, -1 / 2]),
("CRot", [math.pi / 2, 0, -math.pi], [-1 / 2, -1 / 2]),
("CRot", [0, math.pi / 2, -math.pi], [-1 / 2, 1 / 4]),
("CRot", [-math.pi, 0, math.pi / 2], [-1 / 2, -1 / 2]),
(
"QubitUnitary",
[
np.array(
[
[1, 0, 0, 0],
[0, 1 / math.sqrt(2), 1 / math.sqrt(2), 0],
[0, 1 / math.sqrt(2), -1 / math.sqrt(2), 0],
[0, 0, 0, 1],
]
)
],
[-1 / 2, -1 / 2],
),
(
"QubitUnitary",
[
np.array(
[
[-1, 0, 0, 0],
[0, 1 / math.sqrt(2), 1 / math.sqrt(2), 0],
[0, 1 / math.sqrt(2), -1 / math.sqrt(2), 0],
[0, 0, 0, -1],
]
)
],
[-1 / 2, -1 / 2],
),
],
)
def test_supported_gate_two_wires_with_parameters(self, rep, tol, name, par, expected_output):
"""Tests supported gates that act on two wires wires that are parameterized"""
op = getattr(qml.ops, name)
dev = qml.device("default.tensor", wires=2, representation=rep)
assert dev.supports_operation(name)
@qml.qnode(dev, diff_method=None)
def circuit():
qml.QubitStateVector(np.array([1 / 2, 0, 0, math.sqrt(3) / 2]), wires=Wires([0, 1]))
op(*par, wires=Wires([0, 1]))
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
assert np.allclose(circuit(), expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize(
"name,state,expected_output",
[
("PauliX", [1 / math.sqrt(2), 1 / math.sqrt(2)], 1),
("PauliX", [1 / math.sqrt(2), -1 / math.sqrt(2)], -1),
("PauliX", [1, 0], 0),
("PauliY", [1 / math.sqrt(2), 1j / math.sqrt(2)], 1),
("PauliY", [1 / math.sqrt(2), -1j / math.sqrt(2)], -1),
("PauliY", [1, 0], 0),
("PauliZ", [1, 0], 1),
("PauliZ", [0, 1], -1),
("PauliZ", [1 / math.sqrt(2), 1 / math.sqrt(2)], 0),
("Hadamard", [1, 0], 1 / math.sqrt(2)),
("Hadamard", [0, 1], -1 / math.sqrt(2)),
("Hadamard", [1 / math.sqrt(2), 1 / math.sqrt(2)], 1 / math.sqrt(2)),
],
)
def test_supported_observable_single_wire_no_parameters(
self, rep, tol, name, state, expected_output
):
"""Tests supported observables on single wires without parameters."""
obs = getattr(qml.ops, name)
dev = qml.device("default.tensor", wires=1, representation=rep)
assert dev.supports_observable(name)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(np.array(state), wires=[0])
return qml.expval(obs(wires=[0]))
assert np.isclose(circuit(), expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize(
"name,state,expected_output,par",
[
("Identity", [1, 0], 1, []),
("Identity", [0, 1], 1, []),
("Identity", [1 / math.sqrt(2), -1 / math.sqrt(2)], 1, []),
("Hermitian", [1, 0], 1, [np.array([[1, 1j], [-1j, 1]])]),
("Hermitian", [0, 1], 1, [np.array([[1, 1j], [-1j, 1]])]),
(
"Hermitian",
[1 / math.sqrt(2), -1 / math.sqrt(2)],
1,
[np.array([[1, 1j], [-1j, 1]])],
),
],
)
def test_supported_observable_single_wire_with_parameters(
self, rep, tol, name, state, expected_output, par
):
"""Tests supported observables on single wires with parameters."""
obs = getattr(qml.ops, name)
dev = qml.device("default.tensor", wires=1, representation=rep)
assert dev.supports_observable(name)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(np.array(state), wires=[0])
return qml.expval(obs(*par, wires=[0]))
assert np.isclose(circuit(), expected_output, atol=tol, rtol=0)
@pytest.mark.parametrize(
"name,state,expected_output,par",
[
(
"Hermitian",
[1 / math.sqrt(3), 0, 1 / math.sqrt(3), 1 / math.sqrt(3)],
5 / 3,
[np.array([[1, 1j, 0, 1], [-1j, 1, 0, 0], [0, 0, 1, -1j], [1, 0, 1j, 1]])],
),
(
"Hermitian",
[0, 0, 0, 1],
0,
[np.array([[0, 1j, 0, 0], [-1j, 0, 0, 0], [0, 0, 0, -1j], [0, 0, 1j, 0]])],
),
(
"Hermitian",
[1 / math.sqrt(2), 0, -1 / math.sqrt(2), 0],
1,
[np.array([[1, 1j, 0, 0], [-1j, 1, 0, 0], [0, 0, 1, -1j], [0, 0, 1j, 1]])],
),
(
"Hermitian",
[
1 / math.sqrt(3),
-1 / math.sqrt(3),
1 / math.sqrt(6),
1 / math.sqrt(6),
],
1,
[
np.array(
[
[1, 1j, 0, 0.5j],
[-1j, 1, 0, 0],
[0, 0, 1, -1j],
[-0.5j, 0, 1j, 1],
]
)
],
),
(
"Hermitian",
[1 / math.sqrt(2), 0, 0, 1 / math.sqrt(2)],
1,
[np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])],
),
(
"Hermitian",
[0, 1 / math.sqrt(2), -1 / math.sqrt(2), 0],
-1,
[np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])],
),
],
)
def test_supported_observable_two_wires_with_parameters(
self, rep, tol, name, state, expected_output, par
):
"""Tests supported observables on two wires with parameters."""
obs = getattr(qml.ops, name)
dev = qml.device("default.tensor", wires=2, representation=rep)
assert dev.supports_observable(name)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(np.array(state), wires=Wires([0, 1]))
return qml.expval(obs(*par, wires=Wires([0, 1])))
assert np.isclose(circuit(), expected_output, atol=tol, rtol=0)
def test_expval_warnings(self, rep):
"""Tests that expval raises a warning if the given observable is complex."""
dev = qml.device("default.tensor", wires=1, representation=rep)
A = np.array([[2j, 1j], [-3j, 1j]])
obs_node = dev._create_nodes_from_tensors(
[A], [Wires([0])], "ComplexObservable", key="observables"
)
# text warning raised if matrix is complex
with pytest.warns(RuntimeWarning, match="Nonvanishing imaginary part"):
dev.ev(obs_node, obs_wires=[Wires([0])])
@pytest.mark.parametrize("method", ["auto", "greedy", "branch", "optimal"])
def test_correct_state_no_params(self, rep, method):
"""Tests that if different QNodes are used with the same device,
then the contracted state is correct for each one."""
dev = qml.device("default.tensor", wires=2, representation=rep)
state = dev._state()
expected = np.array([[1, 0], [0, 0]])
assert np.allclose(state, expected)
@qml.qnode(dev)
def circuit():
qml.Hadamard(wires=0)
return qml.expval(qml.PauliZ(0))
circuit()
state = dev._state()
expected = np.array([[1, 0], [1, 0]]) / np.sqrt(2)
assert np.allclose(state, expected)
@pytest.mark.parametrize("method", ["auto", "greedy", "branch", "optimal"])
def test_correct_state_diff_params(self, rep, method, tol):
"""Tests that if different inputs are fed to the same QNode,
then the contracted state is updated correctly."""
dev = qml.device("default.tensor", wires=2, representation=rep, contraction_method=method)
@qml.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
def expected(theta):
vec = np.outer(np.array([np.cos(theta / 2), -1j * np.sin(theta / 2)]), [1.0, 0.0])
return vec.reshape([2, 2])
theta = np.pi / 4
out1 = circuit(theta)
ket1 = dev._state()
assert dev._contracted_state_node is not None
assert np.allclose(ket1, expected(theta), atol=tol, rtol=0)
assert out1 == np.cos(theta / 2) ** 2 - np.sin(theta / 2) ** 2
theta = -0.1234
out2 = circuit(theta)
ket2 = dev._state()
assert dev._contracted_state_node is not None
assert np.allclose(ket2, expected(theta), atol=tol, rtol=0)
assert out2 == np.cos(theta / 2) ** 2 - np.sin(theta / 2) ** 2
@pytest.mark.parametrize("rep", ("exact", "mps"))
@pytest.mark.parametrize("theta,phi,varphi", list(zip(THETA, PHI, VARPHI)))
class TestTensorExpval:
"""Test tensor expectation values"""
def test_paulix_pauliy(self, theta, phi, varphi, rep, tol):
"""Test that a tensor product involving PauliX and PauliY works correctly"""
dev = qml.device("default.tensor", wires=3, representation=rep)
dev.reset()
dev.apply("RX", wires=Wires([0]), par=[theta])
dev.apply("RX", wires=Wires([1]), par=[phi])
dev.apply("RX", wires=Wires([2]), par=[varphi])
dev.apply("CNOT", wires=Wires([0, 1]), par=[])
dev.apply("CNOT", wires=Wires([1, 2]), par=[])
res = dev.expval(["PauliX", "PauliY"], [Wires([0]), Wires([2])], [[], []])
expected = np.sin(theta) * np.sin(phi) * np.sin(varphi)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_pauliz_identity(self, theta, phi, varphi, rep, tol):
"""Test that a tensor product involving PauliZ and Identity works correctly"""
dev = qml.device("default.tensor", wires=3, representation=rep)
dev.reset()
dev.apply("RX", wires=Wires([0]), par=[theta])
dev.apply("RX", wires=Wires([1]), par=[phi])
dev.apply("RX", wires=Wires([2]), par=[varphi])
dev.apply("CNOT", wires=Wires([0, 1]), par=[])
dev.apply("CNOT", wires=Wires([1, 2]), par=[])
res = dev.expval(
["PauliZ", "Identity", "PauliZ"], [Wires([0]), Wires([1]), Wires([2])], [[], [], []]
)
expected = np.cos(varphi) * np.cos(phi)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_pauliz_hadamard(self, theta, phi, varphi, rep, tol):
"""Test that a tensor product involving PauliZ and PauliY and hadamard works correctly"""
dev = qml.device("default.tensor", wires=3, representation=rep)
dev.reset()
dev.apply("RX", wires=Wires([0]), par=[theta])
dev.apply("RX", wires=Wires([1]), par=[phi])
dev.apply("RX", wires=Wires([2]), par=[varphi])
dev.apply("CNOT", wires=Wires([0, 1]), par=[])
dev.apply("CNOT", wires=Wires([1, 2]), par=[])
res = dev.expval(
["PauliZ", "Hadamard", "PauliY"], [Wires([0]), Wires([1]), Wires([2])], [[], [], []]
)
expected = -(np.cos(varphi) * np.sin(phi) + np.sin(varphi) * np.cos(theta)) / np.sqrt(2)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_hermitian(self, theta, phi, varphi, rep, tol):
"""Test that a tensor product involving qml.Hermitian works correctly"""
dev = qml.device("default.tensor", wires=3, representation=rep)
dev.reset()
dev.apply("RX", wires=Wires([0]), par=[theta])
dev.apply("RX", wires=Wires([1]), par=[phi])
dev.apply("RX", wires=Wires([2]), par=[varphi])
dev.apply("CNOT", wires=Wires([0, 1]), par=[])
dev.apply("CNOT", wires=Wires([1, 2]), par=[])
A = np.array(
[
[-6, 2 + 1j, -3, -5 + 2j],
[2 - 1j, 0, 2 - 1j, -5 + 4j],
[-3, 2 + 1j, 0, -4 + 3j],
[-5 - 2j, -5 - 4j, -4 - 3j, -6],
]
)
res = dev.expval(["PauliZ", "Hermitian"], [Wires([0]), Wires([1, 2])], [[], [A]])
expected = 0.5 * (
-6 * np.cos(theta) * (np.cos(varphi) + 1)
- 2 * np.sin(varphi) * (np.cos(theta) + np.sin(phi) - 2 * np.cos(phi))
+ 3 * np.cos(varphi) * np.sin(phi)
+ np.sin(phi)
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_hermitian_hermitian(self, theta, phi, varphi, rep, tol):
"""Test that a tensor product involving two Hermitian matrices works correctly"""
dev = qml.device("default.tensor", wires=3, representation=rep)
dev.reset()
dev.apply("RX", wires=Wires([0]), par=[theta])
dev.apply("RX", wires=Wires([1]), par=[phi])
dev.apply("RX", wires=Wires([2]), par=[varphi])
dev.apply("CNOT", wires=Wires([0, 1]), par=[])
dev.apply("CNOT", wires=Wires([1, 2]), | |
"credit_percent" in stip and not isinstance(
stip["credit_percent"], (int, float)):
raise ValidationError(_("credit_percent must be a float"))
if ("allowed_session_count" in stip
and (
not isinstance(stip["allowed_session_count"], int)
or stip["allowed_session_count"] < 0)):
raise ValidationError(
_("'allowed_session_count' must be a non-negative integer"))
# {{{ deprecated exception stuff
class FlowAccessException(models.Model):
# deprecated
participation = models.ForeignKey(Participation, db_index=True,
verbose_name=_('Participation'))
flow_id = models.CharField(max_length=200, blank=False, null=False,
verbose_name=_('Flow ID'))
expiration = models.DateTimeField(blank=True, null=True,
verbose_name=_('Expiration'))
stipulations = JSONField(blank=True, null=True,
# Translators: help text for stipulations in FlowAccessException
# (deprecated)
help_text=_("A dictionary of the same things that can be added "
"to a flow access rule, such as allowed_session_count or "
"credit_percent. If not specified here, values will default "
"to the stipulations in the course content."),
validators=[validate_stipulations],
dump_kwargs={'ensure_ascii': False},
verbose_name=_('Stipulations'))
creator = models.ForeignKey(settings.AUTH_USER_MODEL, null=True,
verbose_name=_('Creator'))
creation_time = models.DateTimeField(default=now, db_index=True,
verbose_name=_('Creation time'))
is_sticky = models.BooleanField(
default=False,
# Translators: deprecated
help_text=_("Check if a flow started under this "
"exception rule set should stay "
"under this rule set until it is expired."),
# Translators: deprecated
verbose_name=_('Is sticky'))
comment = models.TextField(blank=True, null=True,
verbose_name=_('Comment'))
def __unicode__(self):
return (
# Translators: flow access exception in admin (deprecated)
_("Access exception for '%(user)s' to '%(flow_id)s' "
"in '%(course)s'") %
{
"user": self.participation.user,
"flow_id": self.flow_id,
"course": self.participation.course
})
if six.PY3:
__str__ = __unicode__
class FlowAccessExceptionEntry(models.Model):
# deprecated
exception = models.ForeignKey(FlowAccessException,
related_name="entries",
verbose_name=_('Exception'))
permission = models.CharField(max_length=50,
choices=FLOW_PERMISSION_CHOICES,
verbose_name=_('Permission'))
class Meta:
# Translators: FlowAccessExceptionEntry (deprecated)
verbose_name_plural = _("Flow access exception entries")
def __unicode__(self):
return self.permission
if six.PY3:
__str__ = __unicode__
# }}}
class FlowRuleException(models.Model):
flow_id = models.CharField(max_length=200, blank=False, null=False,
verbose_name=_('Flow ID'))
participation = models.ForeignKey(Participation, db_index=True,
verbose_name=_('Participation'))
expiration = models.DateTimeField(blank=True, null=True,
verbose_name=_('Expiration'))
creator = models.ForeignKey(settings.AUTH_USER_MODEL, null=True,
verbose_name=_('Creator'))
creation_time = models.DateTimeField(default=now, db_index=True,
verbose_name=_('Creation time'))
comment = models.TextField(blank=True, null=True,
verbose_name=_('Comment'))
kind = models.CharField(max_length=50, blank=False, null=False,
choices=FLOW_RULE_KIND_CHOICES,
verbose_name=_('Kind'))
rule = YAMLField(blank=False, null=False,
verbose_name=_('Rule'))
active = models.BooleanField(default=True,
verbose_name=pgettext_lazy(
"Is the flow rule exception activated?", "Active"))
def __unicode__(self):
return (
# Translators: For FlowRuleException
_("%(kind)s exception for '%(user)s' to '%(flow_id)s'"
"in '%(course)s'")
% {
"kind": self.kind,
"user": self.participation.user,
"flow_id": self.flow_id,
"course": self.participation.course})
if six.PY3:
__str__ = __unicode__
def clean(self):
super(FlowRuleException, self).clean()
if (self.kind == flow_rule_kind.grading
and self.expiration is not None):
raise ValidationError(_("grading rules may not expire"))
from course.validation import (
ValidationError as ContentValidationError,
validate_session_start_rule,
validate_session_access_rule,
validate_session_grading_rule,
ValidationContext)
from course.content import (get_course_repo,
get_course_commit_sha,
get_flow_desc)
from relate.utils import dict_to_struct
rule = dict_to_struct(self.rule)
repo = get_course_repo(self.participation.course)
commit_sha = get_course_commit_sha(
self.participation.course, self.participation)
ctx = ValidationContext(
repo=repo,
commit_sha=commit_sha)
flow_desc = get_flow_desc(repo,
self.participation.course,
self.flow_id, commit_sha)
tags = None
grade_identifier = None
if hasattr(flow_desc, "rules"):
tags = getattr(flow_desc.rules, "tags", None)
grade_identifier = flow_desc.rules.grade_identifier
try:
if self.kind == flow_rule_kind.start:
validate_session_start_rule(ctx, six.text_type(self), rule, tags)
elif self.kind == flow_rule_kind.access:
validate_session_access_rule(ctx, six.text_type(self), rule, tags)
elif self.kind == flow_rule_kind.grading:
validate_session_grading_rule(
ctx, six.text_type(self), rule, tags,
grade_identifier)
else:
# the rule refers to FlowRuleException rule
raise ValidationError(_("invalid rule kind: ")+self.kind)
except ContentValidationError as e:
# the rule refers to FlowRuleException rule
raise ValidationError(_("invalid existing_session_rules: ")+str(e))
class Meta:
verbose_name = _("Flow rule exception")
verbose_name_plural = _("Flow rule exceptions")
# }}}
# {{{ grading
class GradingOpportunity(models.Model):
course = models.ForeignKey(Course,
verbose_name=_('Course'))
identifier = models.CharField(max_length=200, blank=False, null=False,
# Translators: format of identifier for GradingOpportunity
help_text=_("A symbolic name for this grade. "
"lower_case_with_underscores, no spaces."),
verbose_name=_('Grading opportunity ID'))
name = models.CharField(max_length=200, blank=False, null=False,
# Translators: name for GradingOpportunity
help_text=_("A human-readable identifier for the grade."),
verbose_name=_('Grading opportunity name'))
flow_id = models.CharField(max_length=200, blank=True, null=True,
help_text=_("Flow identifier that this grading opportunity "
"is linked to, if any"),
verbose_name=_('Flow ID'))
aggregation_strategy = models.CharField(max_length=20,
choices=GRADE_AGGREGATION_STRATEGY_CHOICES,
# Translators: strategy on how the grading of mutiple sessioins
# are aggregated.
verbose_name=_('Aggregation strategy'))
due_time = models.DateTimeField(default=None, blank=True, null=True,
verbose_name=_('Due time'))
creation_time = models.DateTimeField(default=now,
verbose_name=_('Creation time'))
shown_in_grade_book = models.BooleanField(default=True,
verbose_name=_('Shown in grade book'))
shown_in_student_grade_book = models.BooleanField(default=True,
verbose_name=_('Shown in student grade book'))
class Meta:
verbose_name = _("Grading opportunity")
verbose_name_plural = _("Grading opportunities")
ordering = ("course", "due_time", "identifier")
unique_together = (("course", "identifier"),)
def __unicode__(self):
return (
# Translators: For GradingOpportunity
_("%(opportunity_name)s (%(opportunity_id)s) in %(course)s")
% {
"opportunity_name": self.name,
"opportunity_id": self.identifier,
"course": self.course})
if six.PY3:
__str__ = __unicode__
def get_aggregation_strategy_descr(self):
return dict(GRADE_AGGREGATION_STRATEGY_CHOICES).get(
self.aggregation_strategy)
class GradeChange(models.Model):
"""Per 'grading opportunity', each participant may accumulate multiple grades
that are aggregated according to :attr:`GradingOpportunity.aggregation_strategy`.
In addition, for each opportunity, grade changes are grouped by their 'attempt'
identifier, where later grades with the same :attr:`attempt_id` supersede earlier
ones.
"""
opportunity = models.ForeignKey(GradingOpportunity,
verbose_name=_('Grading opportunity'))
participation = models.ForeignKey(Participation,
verbose_name=_('Participation'))
state = models.CharField(max_length=50,
choices=GRADE_STATE_CHANGE_CHOICES,
# Translators: something like 'status'.
verbose_name=_('State'))
attempt_id = models.CharField(max_length=50, null=True, blank=True,
default="main",
# Translators: help text of "attempt_id" in GradeChange class
help_text=_("Grade changes are grouped by their 'attempt ID' "
"where later grades with the same attempt ID supersede earlier "
"ones."),
verbose_name=_('Attempt ID'))
points = models.DecimalField(max_digits=10, decimal_places=2,
blank=True, null=True,
verbose_name=_('Points'))
max_points = models.DecimalField(max_digits=10, decimal_places=2,
verbose_name=_('Max points'))
comment = models.TextField(blank=True, null=True,
verbose_name=_('Comment'))
due_time = models.DateTimeField(default=None, blank=True, null=True,
verbose_name=_('Due time'))
creator = models.ForeignKey(settings.AUTH_USER_MODEL, null=True,
verbose_name=_('Creator'))
grade_time = models.DateTimeField(default=now, db_index=True,
verbose_name=_('Grade time'))
flow_session = models.ForeignKey(FlowSession, null=True, blank=True,
related_name="grade_changes",
verbose_name=_('Flow session'))
class Meta:
verbose_name = _("Grade change")
verbose_name_plural = _("Grade changes")
ordering = ("opportunity", "participation", "grade_time")
def __unicode__(self):
# Translators: information for GradeChange
return _("%(participation)s %(state)s on %(opportunityname)s") % {
'participation': self.participation,
'state': self.state,
'opportunityname': self.opportunity.name}
if six.PY3:
__str__ = __unicode__
def clean(self):
super(GradeChange, self).clean()
if self.opportunity.course != self.participation.course:
raise ValidationError(_("Participation and opportunity must live "
"in the same course"))
def percentage(self):
if self.max_points is not None and self.points is not None:
return 100*self.points/self.max_points
else:
return None
def get_state_desc(self):
return dict(GRADE_STATE_CHANGE_CHOICES).get(
self.state)
# }}}
# {{{ grade state machine
class GradeStateMachine(object):
def __init__(self):
self.opportunity = None
self.state = None
self._clear_grades()
self.due_time = None
self.last_graded_time = None
self.last_report_time = None
# applies to *all* grade changes
self._last_grade_change_time = None
def _clear_grades(self):
self.state = None
self.last_grade_time = None
self.valid_percentages = []
self.attempt_id_to_gchange = {}
def _consume_grade_change(self, gchange, set_is_superseded):
if self.opportunity is None:
self.opportunity = gchange.opportunity
self.due_time = self.opportunity.due_time
else:
assert self.opportunity.pk == gchange.opportunity.pk
# check that times are increasing
if self._last_grade_change_time is not None:
assert gchange.grade_time > self._last_grade_change_time
self._last_grade_change_time = gchange.grade_time
if gchange.state == grade_state_change_types.graded:
if self.state == grade_state_change_types.unavailable:
raise ValueError(
_("cannot accept grade once opportunity has been "
"marked 'unavailable'"))
if self.state == grade_state_change_types.exempt:
raise ValueError(
_("cannot accept grade once opportunity has been "
"marked 'exempt'"))
#if self.due_time is not None and gchange.grade_time > self.due_time:
#raise ValueError("cannot accept grade after due date")
self.state = gchange.state
if gchange.attempt_id is not None:
if (set_is_superseded and
gchange.attempt_id in self.attempt_id_to_gchange):
self.attempt_id_to_gchange[gchange.attempt_id] \
.is_superseded = True
self.attempt_id_to_gchange[gchange.attempt_id] \
= gchange
else:
self.valid_percentages.append(gchange.percentage())
self.last_graded_time = gchange.grade_time
elif gchange.state == grade_state_change_types.unavailable:
self._clear_grades()
self.state = gchange.state
elif gchange.state == grade_state_change_types.do_over:
self._clear_grades()
elif gchange.state == grade_state_change_types.exempt:
self._clear_grades()
self.state = gchange.state
elif gchange.state == grade_state_change_types.report_sent:
self.last_report_time = gchange.grade_time
elif gchange.state == grade_state_change_types.extension:
self.due_time = gchange.due_time
elif gchange.state in [
grade_state_change_types.grading_started,
grade_state_change_types.retrieved,
]:
pass
else:
raise RuntimeError(
_("invalid grade change state '%s'") % gchange.state)
def consume(self, iterable, set_is_superseded=False):
for gchange in iterable:
gchange.is_superseded = False
self._consume_grade_change(gchange, set_is_superseded)
valid_grade_changes = sorted(
(gchange
for gchange in self.attempt_id_to_gchange.values()
if gchange.percentage() is not None),
key=lambda gchange: gchange.grade_time)
self.valid_percentages.extend(
gchange.percentage()
for gchange in valid_grade_changes)
del self.attempt_id_to_gchange
return self
def percentage(self):
"""
:return: a percentage of achieved points, or *None*
"""
if self.opportunity is None or not self.valid_percentages:
return None
strategy = self.opportunity.aggregation_strategy
if strategy == grade_aggregation_strategy.max_grade:
return max(self.valid_percentages)
elif strategy == grade_aggregation_strategy.min_grade:
return min(self.valid_percentages)
elif strategy == grade_aggregation_strategy.avg_grade:
return sum(self.valid_percentages)/len(self.valid_percentages)
elif strategy == grade_aggregation_strategy.use_earliest:
return self.valid_percentages[0]
elif strategy == grade_aggregation_strategy.use_latest:
return self.valid_percentages[-1]
else:
raise ValueError(
_("invalid grade aggregation strategy '%s'") % strategy)
def stringify_state(self):
if self.state is None:
return u"- ∅ -"
elif self.state == grade_state_change_types.exempt:
return "_((exempt))"
elif self.state == grade_state_change_types.graded:
if self.valid_percentages:
result = "%.1f%%" % self.percentage()
if len(self.valid_percentages) > 1:
result += " (/%d)" % len(self.valid_percentages)
return result
else:
return u"- ∅ -"
else:
return "_((other state))"
def stringify_machine_readable_state(self):
if self.state is None:
return u"NONE"
elif self.state == grade_state_change_types.exempt:
return "EXEMPT"
elif self.state == grade_state_change_types.graded:
if self.valid_percentages:
return "%.3f" % self.percentage()
else:
return u"NONE"
else:
return u"OTHER_STATE"
def stringify_percentage(self):
if self.state == grade_state_change_types.graded:
if self.valid_percentages:
return "%.1f" % self.percentage()
else:
return u""
else:
return ""
# }}}
# {{{ flow <-> grading integration
def get_flow_grading_opportunity(course, | |
vol )
else:
qtyNeeded = purchase.qty
if not cost_result['consumables'].has_key(bottle.name):
cost_result['consumables'][bottle.name] =0
cost_result['consumables'][ bottle.name ] = cost_result['consumables'][ bottle.name ] + (purchase.purchaseCost * qtyNeeded)
cost_result['consumables']['__total__'] = cost_result['consumables']['__total__'] + (purchase.purchaseCost * qtyNeeded)
totalBottleVol=qtyNeeded*vol
bottle_volume_required = bottle_volume_required - (qtyNeeded * vol )
qtyRequired = qtyRequired + qtyNeeded
total_bottles = total_bottles + qtyNeeded
# if we don't have enough stock of bottles we will ask based on the last tbottle in the list
# if this is a tiny bottle this will be odd,... but variabile volume bottles isn't perfect
# if we have multiple types of bottles we don't ask for the full volume, we only ask for the
# missing bit
if bottle_volume_required > 0:
# sys.stderr.write("doing out of stock stuff\n")
stock_result['__pcnt_left__'][ bottle.name ] = 0
stock_result['__stockrequirements__'].append( [bottle.name,qtyAvailable, math.ceil(bottle_volume_required / vol ) + qtyRequired])
stock_result['__out_of_stock__'].append( bottle.name )
stock_result['__qty_available__'][ bottle.name ] = qtyAvailable
# this next calculation is the excess, but we have qtyRequired adding up
# as we go along
stock_result['__qty_required__'][ bottle.name ] = math.ceil(bottle_volume_required / vol ) + qtyRequired
### out of stock
sys.stderr.write("\tdbg:checkStockAndPrice() Polypins: %s/%s Kegs: %s/%s Bottles: %s/%s\n" %(total_polypins,totalPolypinVol,total_kegs,totalKegVol,total_bottles,totalBottleVol ))
purchase=None
# OCT2015 moved from takeStock
self.TAKESTOCK_kegs=total_kegs
self.TAKESTOCK_polypins=total_polypins
self.TAKESTOCK_bottles=total_bottles
# Now do crown caps
total_caps = total_bottles + 4
qtyRequired = 0
qtyAvailable = 0
ourBottleCaps = self.dbWrapper.GqlQuery("SELECT * FROM gPurchases WHERE owner = :1 AND itemcategory = :2", username,"bottlecaps")
for purchase in ourBottleCaps.fetch(50000):
qtyAvailable = qtyAvailable + purchase.qty
if purchase.qty > 0 and total_caps > 0:
if purchase.qty > total_caps:
qtyNeeded= total_caps
else:
qtyNeeded = purchase.qty
if not cost_result['consumables'].has_key( purchase.storeitem ):
cost_result['consumables'][ purchase.storeitem ] = 0
cost_result['consumables'][ purchase.storeitem ] = cost_result['consumables'][ purchase.storeitem ] + (purchase.purchaseCost * qtyNeeded)
cost_result['consumables']['__total__'] = cost_result['consumables']['__total__'] + (purchase.purchaseCost * qtyNeeded)
total_caps = total_caps - qtyNeeded
qtyRequired = qtyRequired + qtyNeeded
if total_caps > 0:
sys.stderr.write("totalcaps - no enough\n")
stock_result['__pcnt_left__'][ purchase.storeitem ] = 0
stock_result['__stockrequirements__'].append( [purchase.storeitem ,qtyAvailable,qtyRequired] )
stock_result['__out_of_stock__'].append( purchase.storeitem )
stock_result['__qty_available__'][ purchase.storeitem ] = qtyAvailable
stock_result['__qty_required__'][ purchase.storeitem ] = qtyRequired
purchase = None
# And priming sugar
if recipe.priming_sugar_qty > 0:
priming_sugar_reqd = (total_bottles + 5) * recipe.priming_sugar_qty
qtyRequired=0
qtyAvailable=0
sys.stderr.write("\tdbg:checkStockAndPrice(): priming_sugar_reqd %s (BOTTLES)\n" %(priming_sugar_reqd))
ourPrimingSugar = self.dbWrapper.GqlQuery("SELECT * FROM gPurchases WHERE owner = :1 AND itemcategory = :2", username,"primingsugar")
for purchase in ourPrimingSugar.fetch(50000):
qtyAvailable = qtyAvailable + purchase.qty
if purchase.qty > 0 and priming_sugar_reqd > 0:
qtyNeeded = priming_sugar_reqd
else:
qtyneeded = purchase.qty
if not cost_result['consumables'].has_key( purchase.storeitem ):
cost_result['consumables'][ purchase.storeitem ] = 0
cost_result['consumables'][ purchase.storeitem ] = cost_result['consumables'][ purchase.storeitem ] + (purchase.purchaseCost * qtyNeeded)
cost_result['consumables']['__total__'] = cost_result['consumables']['__total__'] + (purchase.purchaseCost * qtyNeeded)
priming_sugar_reqd = priming_sugar_reqd - qtyNeeded
qtyRequired = qtyRequired + qtyNeeded
if priming_sugar_reqd > 0:
sys.stderr.write("takeStock(): priming_sugar_reqd %s (BOTTLES - not enough)\n" %(priming_sugar_reqd))
try:
stock_result['__pcnt_left__'][ purchase.storeitem ] = 0
stock_result['__out_of_stock__'].append( purchase.storeitem )
stock_result['__stockrequirements__'].append( [purchase.storeitem ,qtyAvailable,qtyRequired] )
stock_result['__qty_available__'][ purchase.storeitem ] = qtyAvailable
stock_result['__qty_required__'][ purchase.storeitem ] = qtyRequired
except ImportError:
# we probably don' thave any type of priming sugar
# so we make this up instead
stock_result['__pcnt_left__'][ "__PRIMING_SUGAR__" ] = 0
stock_result['__stockrequirements__'].append( ['__PRIMING_SUGAR__' ,qtyAvailable,qtyRequired] )
stock_result['__out_of_stock__'].append( "__PRIMING_SUGAR__" )
stock_result['__qty_available__'][ "__PRIMING_SUGAR__" ] = qtyAvailable
stock_result['__qty_required__'][ "__PRIMING_SUGAR__" ] = qtyRequired
#
#
# water treatment (not sure how to trigger this)
#
#
# we will always do it
ourRecipeStats =self.dbWrapper.GqlQuery("SELECT * FROM gRecipeStats WHERE owner = :1 AND recipe = :2", username,recipeName).fetch()[0]
crsAdjust=self.crsAdjustment(315, float(ourRecipeStats.mash_liquid_6)+float(ourRecipeStats.sparge_water),50)
purchase=None
ourCrs = self.dbWrapper.GqlQuery("SELECT * FROM gPurchases WHERE owner = :1 AND itemsubcategory = :2 AND storeitem = :3", username,"watertreatment","AMS")
total_crs=0 # the amount we have allocated throughout
qtyRequired=crsAdjust # total qty we require
qtyAvailable=0
qtyNeeded=0 # qty of a particular purchase we need
for purchase in ourCrs.fetch(5555):
qtyAvailable = qtyAvailable + purchase.qty
if purchase.qty > 0 and qtyRequired > 0:
if purchase.qty > qtyRequired:
qtyNeeded= qtyRequired
else:
qtyNeeded = purchase.qty
if not cost_result['consumables'].has_key( purchase.storeitem ):
cost_result['consumables'][ purchase.storeitem ] = 0
cost_result['consumables'][ purchase.storeitem ] = cost_result['consumables'][ purchase.storeitem ] + (purchase.purchaseCost * qtyNeeded)
cost_result['consumables']['__total__'] = cost_result['consumables']['__total__'] + (purchase.purchaseCost * qtyNeeded)
qtyRequired = qtyRequired - qtyNeeded
total_crs = total_crs + qtyNeeded
if qtyRequired > 0:
try:
stock_result['__pcnt_left__'][ purchase.storeitem ] = 0
stock_result['__stockrequirements__'].append( [purchase.storeitem ,qtyAvailable,qtyRequired] )
stock_result['__out_of_stock__'].append( purchase.storeitem )
stock_result['__qty_available__'][ purchase.storeitem ] = qtyAvailable
stock_result['__qty_required__'][ purchase.storeitem ] = qtyRequired
except ImportError:
stock_result['__pcnt_left__'][ "__AMS__" ] = 0
stock_result['__stockrequirements__'].append( ['__AMS__' ,qtyAvailable,qtyRequired] )
stock_result['__out_of_stock__'].append( "__AMS__" )
stock_result['__qty_available__'][ "__AMS__" ] = qtyAvailable
stock_result['__qty_required__'][ "__AMS__" ] = qtyRequired
completeVolume=keg_volume_required=polypin_volume_required-totalPolypinVol + totalBottleVol
#
# reaplcement for process costing, this is simplified to not split across partial purchases
# in practice for things like sterilising fluid etc we won't be splitting
#
# these calculations need to be replicated in takeStock
# sterilising fluid
#30gm for fermenter, + 6gm teaspoon for each 5 bottles
sterilisingPowder= 30 + (total_bottles / 5)*6
yeastVit=5
salifert=3
protofloc=1
campden=2
# consumableProcessIngredients - checkStock
for (consumableQtyRequired,item) in [(sterilisingPowder,'Sterilising Powder'),(yeastVit,'Yeast Vit'),(salifert,'Salifert Alkaline Test') ,(protofloc,'Protofloc'),(campden,'Campden Tablets')]:
qtyRequired=consumableQtyRequired
ourConsumablePurchases = self.dbWrapper.GqlQuery("SELECT * FROM gPurchases WHERE owner = :1 AND storeitem = :2", username,item)
for purchase in ourConsumablePurchases.fetch(5000):
qtyAvailable = qtyAvailable + purchase.qty
if purchase.qty > 0 and consumableQtyRequired > 0:
if (purchase.qty) > consumableQtyRequired:
qtyNeeded = consumableQtyRequired
if not cost_result['consumables'].has_key( purchase.storeitem ):
cost_result['consumables'][purchase.storeitem] =0
cost_result['consumables'][ purchase.storeitem ] = cost_result['consumables'][ purchase.storeitem ] + (purchase.purchaseCost * qtyNeeded)
cost_result['consumables']['__total__'] = cost_result['consumables']['__total__'] + (purchase.purchaseCost * qtyNeeded)
qtyRequired = qtyRequired - qtyNeeded
if qtyRequired > 0:
stock_result['__pcnt_left__'][ item ] = 0
stock_result['__stockrequirements__'].append( [item ,qtyAvailable,qtyRequired] )
stock_result['__out_of_stock__'].append( item )
stock_result['__qty_available__'][ item ] = qtyAvailable
stock_result['__qty_required__'][ item ] = qtyRequired
result = {}
result['cost_result'] = cost_result
result['stock_result'] = stock_result
if raw:
sys.stderr.write("END: checkStockAndPrice()\n")
return (cost_result,stock_result)
sys.stderr.write("END: checkStockAndPrice()\n")
return {'operation' : 'checkStockAndPrice', 'status' : 1, 'json' : json.dumps( {'result': result } ) }
def deleteBrewlog(self,owner,brewlog):
sys.stderr.write("\nSTART: deleteBrewlog() %s\n" %(brewlog))
ourOldRecords = self.dbWrapper.GqlQuery("SELECT * FROM gBrewlogStock WHERE owner = :1 AND brewlog = :2",owner,brewlog)
for oldRecord in ourOldRecords.fetch(234898): oldRecord.delete()
# Remove our old brewlog indexes
ourOldRecords = self.dbWrapper.GqlQuery("SELECT * FROM gBrewlogs WHERE owner = :1 AND brewlog = :2", owner,brewlog)
for oldRecord in ourOldRecords.fetch(234898): oldRecord.delete()
# Remove our old step records
ourOldRecords = self.dbWrapper.GqlQuery("SELECT * FROM gField WHERE owner = :1 AND brewlog = :2", owner,brewlog)
for oldRecord in ourOldRecords.fetch(234898): oldRecord.delete()
# Remove our old notes
ourOldRecords = self.dbWrapper.GqlQuery("SELECT * FROM gBrewlogStep WHERE owner = :1 AND brewlog = :2",owner,brewlog)
for oldRecord in ourOldRecords.fetch(234898): oldRecord.delete()
sys.stderr.write("END: deleteBrewlog()\n")
return {'operation':'deleteBrewlog','satus':1}
def changeProcess(self,username,recipeName,newProcess,activeCategory=""):
sys.stderr.write("\nSTART: changeProcess() %s/%s\n" %(recipeName,newProcess))
ourRecipe = self.dbWrapper.GqlQuery("SELECT * FROM gRecipes WHERE owner = :1 AND recipename = :2", username,recipeName)
for recipe in ourRecipe.fetch(500):
recipe.process=newProcess
recipe.put()
# now include calculate/compile steps
self.calculateRecipe(username,recipeName)
#self.compile(username,recipeName,None)
self.compile(username,recipeName,None)
tmp = self.viewRecipe(username,recipeName,activeCategory,1)
sys.stderr.write("END: changeProcess()\n")
return {'operation' : 'changeProcess', 'status' : 1 ,'json' : tmp['json'] }
def listClearanceStock(self,username):
"""
Builds a list of stock items which are out of date, and soon out of date
"""
sys.stderr.write("\nSTART: listClearanceStock()\n")
bestBeforeThreshold = time.time()
bestBeforeEarlyThreshold = time.time()-(86400*6)
toclear={}
oldstock={}
earlythreshold=0
overthreshold=0
for storetype in ['fermentables','hops','yeast','misc','consumable']:
toclear[ storetype ] = {}
ourPurchases = self.dbWrapper.GqlQuery("SELECT * FROM gPurchases WHERE owner = :1 AND storecategory = :2", username,storetype)
for purchasedItem in ourPurchases.fetch(50000):
threshold=-1
if purchasedItem.qty > 0: # only >0
if not purchasedItem.willNotExpire:
if purchasedItem.bestBeforeEnd < bestBeforeThreshold:
threshold=1
overthreshold=overthreshold + 1
elif purchasedItem.bestBeforeEnd < bestBeforeEarlyThreshold:
threshold=0
earlythreshold=earlythreshold + 1
if threshold >= 0: # if threshold or limit exceeded
if not toclear[ storetype ].has_key( purchasedItem.storeitem ):
toclear[ storetype ][ purchasedItem.storeitem ] = []
if not oldstock.has_key( purchasedItem.storeitem ):
oldstock[ purchasedItem.storeitem ] = []
oldstock[ purchasedItem.storeitem ].append([threshold, int((bestBeforeThreshold-purchasedItem.bestBeforeEnd)/86400)+1, purchasedItem.storeitem, purchasedItem.stocktag] )
toclear[ storetype ][ purchasedItem.storeitem ].append( (threshold, int((bestBeforeThreshold-purchasedItem.bestBeforeEnd)), purchasedItem ) )
OLDSTOCKINDEX=[]
for x in oldstock:
OLDSTOCKINDEX.append(x)
toclear['__overthreshold__'] = overthreshold
toclear['__earlythreshold__'] = earlythreshold
toclear['__oldstock__'] = oldstock
toclear['__oldstockindex__'] = OLDSTOCKINDEX
sys.stderr.write("END: listClearanceStock()\n")
return toclear
def _stockBestBefore(self, username, stock_result, stockType, recipeName,dummyAllocate=0):
"""
Internal method which takes the stock with the oldest best before date
This method also takes into account a fixed wasted factor/percentage
dummyAllocate does 2 things, 1st it doesn't actually allocate and
2nd it will x10'd the qty required. The use case for dummyAllocate
is hops of different alphas.
stockBestBefore doesn't seem to actually save anything in the database
"""
sys.stderr.write("\nSTART: _stockBestBefore() %s\n" %(stockType))
# just a bit of protection
if not stock_result.has_key( stockType ):
stock_result[ stockType ] = {}
# i knew this was going to burn us when we were playing with
# adding ingredients
if stockType == "hops":
ourRecipeIngredients = self.dbWrapper.GqlQuery("SELECT * FROM gIngredients WHERE owner = :1 AND recipename = :2 AND ingredientType = :3 AND hopAddAt <= :4",username,recipeName,stockType,0.0)
else:
ourRecipeIngredients = self.dbWrapper.GqlQuery("SELECT * FROM gIngredients WHERE owner = :1 AND recipename = :2 AND ingredientType = :3",username,recipeName,stockType)
# gIngredients will NOT catch both real recipe ingredients and consumables
# need something more but lets get ingredients done first
# will need to build this in
# if ITEM.category != "bottle" and ITEM.category != "bottlecaps":
for ITEM in ourRecipeIngredients.fetch(40000):
qty = ITEM.qty
ourStockCheck = self.dbWrapper.GqlQuery("SELECT * FROM gPurchases WHERE owner = :1 AND storeitem = :2",username,ITEM.ingredient)
ourStock = ourStockCheck.fetch(20000)
if len(ourStock) > 0 :
#US.has_key( ITEM ):
qtyNeeded = qty
# A future improvement might attempt to use whole bags rather than
# cause leaving opened packets.
best_before_dates_obj = {}
best_before_dates = []
for purchasedItem in ourStock:
if not best_before_dates_obj.has_key( purchasedItem.bestBeforeEnd ):
best_before_dates_obj[ purchasedItem.bestBeforeEnd ] = []
best_before_dates.append( purchasedItem.bestBeforeEnd )
best_before_dates_obj[ purchasedItem.bestBeforeEnd].append( purchasedItem )
# soonest best before end date first
best_before_dates.sort()
#uMake the qty required tenfold as we would really like to know
# how muct we can adjust up to.
if dummyAllocate: qtyNeeded = qtyNeeded * 100
for best_before_date in best_before_dates:
for item in best_before_dates_obj[ best_before_date ]:
if item.qty > 0 and qtyNeeded >0:
if not stock_result[ stockType ].has_key( item.storeitem ):
stock_result[ stockType ][ item.storeitem ] = []
if item.qty > qtyNeeded:
stock_result[ stockType ][ item.storeitem ].append( (qtyNeeded/item.qty,qtyNeeded, item.stocktag, item.storeitem, item) )
# If we need multiple quantities then we won't do wastage
# assumption is that the multiple qty is set appropriately.
# item | |
<gh_stars>1-10
unicode_data_to_decomposition_start = {
160: "<noBreak> 0020",
168: "<compat> 0020 0308",
170: "<super> 0061",
175: "<compat> 0020 0304",
178: "<super> 0032",
179: "<super> 0033",
180: "<compat> 0020 0301",
181: "<compat> 03BC",
184: "<compat> 0020 0327",
185: "<super> 0031",
186: "<super> 006F",
188: "<fraction> 0031 2044 0034",
189: "<fraction> 0031 2044 0032",
190: "<fraction> 0033 2044 0034",
192: "0041 0300",
193: "0041 0301",
194: "0041 0302",
195: "0041 0303",
196: "0041 0308",
197: "0041 030A",
199: "0043 0327",
200: "0045 0300",
201: "0045 0301",
202: "0045 0302",
203: "0045 0308",
204: "0049 0300",
205: "0049 0301",
206: "0049 0302",
207: "0049 0308",
209: "004E 0303",
210: "004F 0300",
211: "004F 0301",
212: "004F 0302",
213: "004F 0303",
214: "004F 0308",
217: "0055 0300",
218: "0055 0301",
219: "0055 0302",
220: "0055 0308",
221: "0059 0301",
224: "0061 0300",
225: "0061 0301",
226: "0061 0302",
227: "0061 0303",
228: "0061 0308",
229: "0061 030A",
231: "0063 0327",
232: "0065 0300",
233: "0065 0301",
234: "0065 0302",
235: "0065 0308",
236: "0069 0300",
237: "0069 0301",
238: "0069 0302",
239: "0069 0308",
241: "006E 0303",
242: "006F 0300",
243: "006F 0301",
244: "006F 0302",
245: "006F 0303",
246: "006F 0308",
249: "0075 0300",
250: "0075 0301",
251: "0075 0302",
252: "0075 0308",
253: "0079 0301",
255: "0079 0308",
256: "0041 0304",
257: "0061 0304",
258: "0041 0306",
259: "0061 0306",
260: "0041 0328",
261: "0061 0328",
262: "0043 0301",
263: "0063 0301",
264: "0043 0302",
265: "0063 0302",
266: "0043 0307",
267: "0063 0307",
268: "0043 030C",
269: "0063 030C",
270: "0044 030C",
271: "0064 030C",
274: "0045 0304",
275: "0065 0304",
276: "0045 0306",
277: "0065 0306",
278: "0045 0307",
279: "0065 0307",
280: "0045 0328",
281: "0065 0328",
282: "0045 030C",
283: "0065 030C",
284: "0047 0302",
285: "0067 0302",
286: "0047 0306",
287: "0067 0306",
288: "0047 0307",
289: "0067 0307",
290: "0047 0327",
291: "0067 0327",
292: "0048 0302",
293: "0068 0302",
296: "0049 0303",
297: "0069 0303",
298: "0049 0304",
299: "0069 0304",
300: "0049 0306",
301: "0069 0306",
302: "0049 0328",
303: "0069 0328",
304: "0049 0307",
306: "<compat> 0049 004A",
307: "<compat> 0069 006A",
308: "004A 0302",
309: "006A 0302",
310: "004B 0327",
311: "006B 0327",
313: "004C 0301",
314: "006C 0301",
315: "004C 0327",
316: "006C 0327",
317: "004C 030C",
318: "006C 030C",
319: "<compat> 004C 00B7",
320: "<compat> 006C 00B7",
323: "004E 0301",
324: "006E 0301",
325: "004E 0327",
326: "006E 0327",
327: "004E 030C",
328: "006E 030C",
329: "<compat> 02BC 006E",
332: "004F 0304",
333: "006F 0304",
334: "004F 0306",
335: "006F 0306",
336: "004F 030B",
337: "006F 030B",
340: "0052 0301",
341: "0072 0301",
342: "0052 0327",
343: "0072 0327",
344: "0052 030C",
345: "0072 030C",
346: "0053 0301",
347: "0073 0301",
348: "0053 0302",
349: "0073 0302",
350: "0053 0327",
351: "0073 0327",
352: "0053 030C",
353: "0073 030C",
354: "0054 0327",
355: "0074 0327",
356: "0054 030C",
357: "0074 030C",
360: "0055 0303",
361: "0075 0303",
362: "0055 0304",
363: "0075 0304",
364: "0055 0306",
365: "0075 0306",
366: "0055 030A",
367: "0075 030A",
368: "0055 030B",
369: "0075 030B",
370: "0055 0328",
371: "0075 0328",
372: "0057 0302",
373: "0077 0302",
374: "0059 0302",
375: "0079 0302",
376: "0059 0308",
377: "005A 0301",
378: "007A 0301",
379: "005A 0307",
380: "007A 0307",
381: "005A 030C",
382: "007A 030C",
383: "<compat> 0073",
416: "004F 031B",
417: "006F 031B",
431: "0055 031B",
432: "0075 031B",
452: "<compat> 0044 017D",
453: "<compat> 0044 017E",
454: "<compat> 0064 017E",
455: "<compat> 004C 004A",
456: "<compat> 004C 006A",
457: "<compat> 006C 006A",
458: "<compat> 004E 004A",
459: "<compat> 004E 006A",
460: "<compat> 006E 006A",
461: "0041 030C",
462: "0061 030C",
463: "0049 030C",
464: "0069 030C",
465: "004F 030C",
466: "006F 030C",
467: "0055 030C",
468: "0075 030C",
469: "00DC 0304",
470: "00FC 0304",
471: "00DC 0301",
472: "00FC 0301",
473: "00DC 030C",
474: "00FC 030C",
475: "00DC 0300",
476: "00FC 0300",
478: "00C4 0304",
479: "00E4 0304",
480: "0226 0304",
481: "0227 0304",
482: "00C6 0304",
483: "00E6 0304",
486: "0047 030C",
487: "0067 030C",
488: "004B 030C",
489: "006B 030C",
490: "004F 0328",
491: "006F 0328",
492: "01EA 0304",
493: "01EB 0304",
494: "01B7 030C",
495: "0292 030C",
496: "006A 030C",
497: "<compat> 0044 005A",
498: "<compat> 0044 007A",
499: "<compat> 0064 007A",
500: "0047 0301",
501: "0067 0301",
504: "004E 0300",
505: "006E 0300",
506: "00C5 0301",
507: "00E5 0301",
508: "00C6 0301",
509: "00E6 0301",
510: "00D8 0301",
511: "00F8 0301",
512: "0041 030F",
513: "0061 030F",
514: "0041 0311",
515: "0061 0311",
516: "0045 030F",
517: "0065 030F",
518: "0045 0311",
519: "0065 0311",
520: "0049 030F",
521: "0069 030F",
522: "0049 0311",
523: "0069 0311",
524: "004F 030F",
525: "006F 030F",
526: "004F 0311",
527: "006F 0311",
528: "0052 030F",
529: "0072 030F",
530: "0052 0311",
531: "0072 0311",
532: "0055 030F",
533: "0075 030F",
534: "0055 0311",
535: "0075 0311",
536: "0053 0326",
537: "0073 0326",
538: "0054 0326",
539: "0074 0326",
542: "0048 030C",
543: "0068 030C",
550: "0041 0307",
551: "0061 0307",
552: "0045 0327",
553: "0065 0327",
554: "00D6 0304",
555: "00F6 0304",
556: "00D5 0304",
557: "00F5 0304",
558: "004F 0307",
559: "006F 0307",
560: "022E 0304",
561: "022F 0304",
562: "0059 0304",
563: "0079 0304",
688: "<super> 0068",
689: "<super> 0266",
690: "<super> 006A",
691: "<super> 0072",
692: "<super> 0279",
693: "<super> 027B",
694: "<super> 0281",
695: "<super> 0077",
696: "<super> 0079",
728: "<compat> 0020 0306",
729: "<compat> 0020 0307",
730: "<compat> 0020 030A",
731: "<compat> 0020 0328",
732: "<compat> 0020 0303",
733: "<compat> 0020 030B",
736: "<super> 0263",
737: "<super> 006C",
738: "<super> 0073",
739: "<super> 0078",
740: "<super> 0295",
832: "0300",
833: "0301",
835: "0313",
836: "0308 0301",
884: "02B9",
890: "<compat> 0020 0345",
894: "003B",
900: "<compat> 0020 0301",
901: "00A8 0301",
902: "0391 0301",
903: "00B7",
904: "0395 0301",
905: "0397 0301",
906: "0399 0301",
908: "039F 0301",
910: "03A5 0301",
911: "03A9 0301",
912: "03CA 0301",
938: "0399 0308",
939: "03A5 0308",
940: "03B1 0301",
941: "03B5 0301",
942: "03B7 0301",
943: "03B9 0301",
944: "03CB 0301",
970: "03B9 0308",
971: "03C5 0308",
972: "03BF 0301",
973: "03C5 0301",
974: "03C9 0301",
976: "<compat> 03B2",
977: "<compat> 03B8",
978: "<compat> 03A5",
979: "03D2 0301",
980: "03D2 0308",
981: "<compat> 03C6",
982: "<compat> 03C0",
1008: "<compat> 03BA",
1009: "<compat> 03C1",
1010: "<compat> 03C2",
1012: "<compat> 0398",
1013: "<compat> 03B5",
1017: "<compat> 03A3",
1024: "0415 0300",
1025: "0415 0308",
1027: "0413 0301",
1031: "0406 0308",
1036: "041A 0301",
1037: "0418 0300",
1038: "0423 0306",
1049: "0418 0306",
1081: "0438 0306",
1104: "0435 0300",
1105: "0435 0308",
1107: "0433 0301",
1111: "0456 0308",
1116: "043A 0301",
1117: "0438 0300",
1118: "0443 0306",
1142: "0474 030F",
1143: "0475 030F",
1217: "0416 0306",
1218: "0436 0306",
1232: "0410 0306",
1233: "0430 0306",
1234: "0410 0308",
1235: "0430 0308",
1238: "0415 0306",
1239: "0435 0306",
1242: "04D8 0308",
1243: "04D9 0308",
1244: "0416 0308",
1245: "0436 0308",
1246: "0417 0308",
1247: "0437 0308",
1250: "0418 0304",
1251: "0438 0304",
1252: "0418 0308",
1253: "0438 0308",
1254: "041E 0308",
1255: "043E 0308",
1258: "04E8 0308",
1259: "04E9 0308",
1260: "042D 0308",
1261: "044D 0308",
1262: "0423 0304",
1263: "0443 0304",
1264: "0423 0308",
1265: "0443 0308",
1266: "0423 030B",
1267: "0443 030B",
1268: "0427 0308",
1269: "0447 0308",
1272: "042B 0308",
1273: "044B 0308",
1415: "<compat> 0565 0582",
1570: "0627 0653",
1571: "0627 0654",
1572: "0648 0654",
1573: "0627 0655",
1574: "064A 0654",
1653: "<compat> 0627 0674",
1654: "<compat> 0648 0674",
1655: "<compat> 06C7 0674",
1656: "<compat> 064A 0674",
1728: "06D5 0654",
1730: "06C1 0654",
1747: "06D2 0654",
2345: "0928 093C",
2353: "0930 093C",
2356: | |
# -*- coding: utf-8 -*-
db = DAL('mysql://root@localhost/sisventi')
# by default give a view/generic.extension to all actions from localhost
# none otherwise. a pattern can be 'maestrcontroller/function.extension'
response.generic_patterns = ['*'] if request.is_local else []
import datetime
from gluon.tools import Mail, Auth, Crud, Service, PluginManager, prettydate
mail = Mail() # mailer
auth = Auth(db) # authentication/authorization
crud = Crud(db) # for CRUD helpers using auth
service = Service() # for json, xml, jsonrpc, xmlrpc, amfrpc
plugins = PluginManager()
today = datetime.date.today()
now = datetime.datetime.now()
# Custom auth_user table
db.define_table(
auth.settings.table_user_name,
Field('username', length=128, default='', label=T('Usuario'), unique=True),
Field('first_name', length=128, default='', label=T('Nombres')),
Field('last_name', length=128, default='', label=T('Apellidos')),
Field('email', length=128, default='', unique=True, label=T('Correo electrónico')),
Field('password', 'password', length=512, readable=False, label=T('Contraseña')),
Field('registration_date', 'date', default=today, writable=False, readable=False, label=T('Tiempo Registro')),
Field('registration_key', length=512, writable=False, readable=False, default=''),
Field('reset_password_key', length=512, writable=False, readable=False, default=''),
Field('registration_id', length=512, writable=False, readable=False, default='')
)
custom_auth_table = db[auth.settings.table_user_name] # get the custom_auth_table
custom_auth_table.username.requires = [
IS_NOT_EMPTY(error_message='Campo obligatorio'),
IS_NOT_IN_DB(db, custom_auth_table.username, error_message=T('El nombre de usuario ya está registrado'))]
custom_auth_table.first_name.requires = [
IS_NOT_EMPTY(error_message='Campo obligatorio')]
custom_auth_table.last_name.requires = [
IS_NOT_EMPTY(error_message='Campo obligatorio')]
custom_auth_table.password.requires = [
IS_NOT_EMPTY(error_message='Campo obligatorio'),
CRYPT()]
custom_auth_table.email.requires = [
IS_EMAIL(error_message=auth.messages.invalid_email),
IS_NOT_IN_DB(db, custom_auth_table.email, error_message=T('El correo ya está registrado'))]
auth.settings.table_user = custom_auth_table
mail.settings.server = 'logging' or 'smtp.gmail.com:587' # your SMTP server
mail.settings.sender = '<EMAIL>' # your email
mail.settings.login = 'username:password' # your credentials or None
auth.settings.hmac_key = 'sha512:a58dabf0-5503-4058-b583-f13a0b4add4f' # before define_tables()
auth.define_tables(username=True) # creates all needed tables
auth.settings.mailer = mail # for user email verification
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.messages.verify_email = 'Click on the link http://'+request.env.http_host+URL('default','user',args=['verify_email'])+'/%(key)s to verify your email'
auth.settings.reset_password_requires_verification = True
auth.messages.reset_password = 'Click on the link http://'+request.env.http_host+URL('default','user',args=['reset_password'])+'/%(key)s to reset your password'
crud.settings.auth = None # =auth to enforce authorization on crud
# Actions disabled
auth.settings.actions_disabled.append('register')
# Language
T.force('es-es')
# Tables
db.define_table('monedas',
Field('id', 'integer'),
Field('registro', 'datetime', label='Tiempo Registro', default=now, notnull=False, writable=False),
Field('modo', 'integer', default=0, notnull=False),
Field('codigo', 'string', default='', label='Código', notnull=False),
Field('nombre', 'string', default='', notnull=False),
Field('simbolo', 'string', default='', label='Símbolo', notnull=False),
Field('orden', 'integer', default=0, notnull=False)
)
db.define_table('almacenes_lista',
Field('id', 'integer'),
Field('registro', 'datetime', label='Tiempo Registro', default=now, notnull=False, writable=False),
Field('almacen', 'string', default='', notnull=False),
Field('descripcion', 'string', default='', notnull=False),
Field('modo', 'integer', default=0, notnull=False),
Field('area', 'string', default='', notnull=False),
Field('ubigeo', 'string', default='', notnull=False),
Field('direccion', 'string', default='', notnull=False),
Field('tipo_doc', 'integer', default=0, notnull=False),
Field('doc_id', 'string', default='', notnull=False)
)
db.define_table('puntos_venta',
Field('id', 'integer'),
Field('registro', 'datetime', label='Tiempo Registro', default=now, notnull=False, writable=False),
Field('codigo', 'string', default='', notnull=False),
Field('nombre', 'string', default='', notnull=False),
Field('distrito', 'string', default='', notnull=False),
Field('direccion', 'string', default='', label='Dirección', notnull=False),
Field('pos_1', 'integer', default=0, label='Posición 1', notnull=False),
Field('pos_2', 'integer', default=0, label='Posición 2', notnull=False),
Field('almacen', db.almacenes_lista, label='Almacén',
requires=IS_IN_DB(db, db.almacenes_lista, '%(almacen)s', zero='[Seleccionar]',
error_message='Seleccione un almacén')),
Field('alias', 'string', default='', notnull=False),
Field('area', 'integer', default=0, notnull=False),
Field('factor_merma', 'double', default=0.0, label='Factor Merma', notnull=False)
)
db.define_table('condiciones_comerciales',
Field('id', 'integer'),
Field('registro', 'datetime', label='Tiempo Registro', default=now, notnull=False, writable=False),
Field('condicion', 'string', default='', label='Condición', notnull=False),
Field('modo', 'integer', default=0, notnull=False),
Field('descripcion', 'string', default='', label='Descripción', notnull=False),
Field('codigo', 'integer', default=0, label='Código', notnull=False),
Field('dias', 'integer', default=0, label='Días', notnull=False),
Field('posicion', 'integer', default=0, label='Posición', notnull=False)
)
db.define_table('documentos_identidad',
Field('id', 'string'),
Field('registro', 'datetime', label='Tiempo Registro', default=now, notnull=False, writable=False),
Field('nombre', 'string', default='', notnull=False),
Field('longitud', 'integer', default=9, notnull=False)
)
db.define_table('directorio',
Field('id', 'integer'),
Field('registro', 'datetime', label='Tiempo Registro', default=now, notnull=False, writable=False),
Field('modo', 'string',
requires=IS_IN_SET({'1':'Cliente', '2':'Proveedor'}, zero='[Seleccionar]',
error_message='Seleccione el modo')),
Field('razon_social', 'string', default='', notnull=False, label='Razón Social'),
Field('nombre_corto', 'string', default=''),
Field('rubro', 'integer', default=0, notnull=False),
Field('nombres', 'string', default='', notnull=False),
Field('apellidos', 'string', default='', notnull=False),
Field('tipo_doc', db.documentos_identidad, label='Tipo de Documento',
requires=IS_IN_DB(db, db.documentos_identidad, '%(nombre)s',
zero='[Seleccionar]',
error_message='Seleccione una tipo de documento')),
Field('doc_id', 'string', default='', notnull=False, label='ID del Documento'),
Field('doc_id_aux', 'string', default='', label='ID Auxiliar del Documento'),
Field('pais', 'string', default='Perú', notnull=False, label='País'),
Field('ubigeo', 'string', default='', notnull=False, label='Departamento'),
Field('direccion', 'string', default='', notnull=False, label='Dirección'),
Field('codigo_postal', 'string', default='', label='Código Postal'),
Field('referencia', 'string', default=''),
Field('condicion', db.condiciones_comerciales, label='Condición',
requires=IS_IN_DB(db, db.condiciones_comerciales, '%(condicion)s',
zero='[Seleccionar]',
error_message='Seleccione una condición')),
Field('tiempo_cred', 'integer', default=0, notnull=False, label='Días a Pagar'),
Field('intervalo', 'integer', default=0, notnull=False, label='Intervalo'),
Field('interes', 'double', default=0.0, notnull=False, label='Interés'),
Field('linea_credito', 'double', default=0.0, notnull=False, label='Línea de Crédito'),
Field('representante_legal', 'string', default=''),
Field('cargo', 'string', default=''),
Field('fecha', 'date', notnull=False, default=datetime.date.today()),
Field('sexo', 'string',
requires=IS_IN_SET({'1':'ND', '2':'Masculino', '3':'Femenino'}, zero='[Seleccionar]',
error_message='Seleccione el sexo')),
Field('preferente', 'boolean', default=False)
)
db.define_table('transportistas',
Field('id', 'integer'),
Field('registro', 'datetime', label='Tiempo Registro', default=now, notnull=False, writable=False),
Field('codigo', 'string', default='', label='Código', notnull=False),
Field('emp_doc_id', 'string', default='', notnull=False),
Field('doc_id', 'string', default='', notnull=False),
Field('nombres', 'string', default='', notnull=False),
Field('apellidos', 'string', default='', notnull=False),
Field('ubigeo', 'string', default='', notnull=False),
Field('direccion', 'string', default='', label='Dirección', notnull=False),
Field('posicion', 'integer', default=0, label='Posición', notnull=False)
)
db.define_table('turnos',
Field('id', 'integer'),
Field('registro', 'datetime', label='Tiempo Registro', default=now, notnull=False, writable=False),
Field('turno', 'string', default='', notnull=False),
Field('descripcion', 'string', default='', label='Descripción', notnull=False),
Field('hora_inicio', 'time', default=datetime.time(0,0,0), label='Hora de Inicio', notnull=False),
Field('hora_fin', 'time', default=datetime.time(0,0,0), label='Hora de Fin', notnull=False)
)
db.define_table('articulos',
Field('id', 'integer'),
Field('registro', 'datetime', label='Tiempo Registro', default=now, notnull=False, writable=False),
Field('articulo', 'string', default='', label='Artículo', notnull=False),
Field('nombre', 'string', default='', notnull=False),
Field('posicion', 'integer', default=0, label='Posición', notnull=False)
)
db.define_table('catmod',
Field('id', 'integer'),
Field('registro', 'datetime', label='Tiempo Registro', default=now, notnull=False, writable=False),
Field('catmod', 'string', default='', label='Categoría Modo', notnull=False),
Field('nombre', 'string', default='', notnull=False),
Field('posicion', 'integer', default=0, label='Posición', notnull=False)
)
db.define_table('empaques',
Field('id', 'integer'),
Field('registro', 'datetime', label='Tiempo Registro', default=now, notnull=False, writable=False),
Field('empaque', 'string', default='', notnull=False),
Field('nombre', 'string', default='', notnull=False),
Field('posicion', 'integer', default=0, label='Posición', notnull=False)
)
db.define_table('sub_casas',
Field('id', 'integer'),
Field('registro', 'datetime', label='Tiempo Registro', default=now, notnull=False, writable=False),
Field('sub_casa', 'string', default='', notnull=False),
Field('nombre', 'string', default='', notnull=False),
Field('posicion', 'integer', default=0, label='Posición', notnull=False)
)
db.define_table('sellos',
Field('id', 'integer'),
Field('registro', 'datetime', label='Tiempo Registro', default=now, notnull=False, writable=False),
Field('sello', 'string', default='', notnull=False),
Field('nombre', 'string', default='', notnull=False),
Field('posicion', 'integer', default=0, label='Posición', notnull=False)
)
db.define_table('casas',
Field('id', 'integer'),
Field('registro', 'datetime', label='Tiempo Registro', default=now, notnull=False, writable=False),
Field('casa', 'string', default='', notnull=False),
Field('nombre', 'string', default='', notnull=False),
Field('posicion', 'integer', default=0, label='Posición', notnull=False)
)
db.define_table('sub_sellos',
Field('id', 'integer'),
Field('registro', 'datetime', label='Tiempo Registro', default=now, notnull=False, writable=False),
Field('sub_sello', 'string', default='', notnull=False),
Field('nombre', 'string', default='', notnull=False),
Field('posicion', 'integer', default=0, label='Posición', notnull=False)
)
db.define_table('status',
Field('id', 'integer'),
Field('registro', 'datetime', label='Tiempo Registro', default=now, notnull=False, writable=False),
Field('status', 'string', default='', label='Estado', notnull=False),
Field('nombre', 'string', default='', notnull=False),
Field('posicion', 'integer', default=0, label='Posición', notnull=False)
)
db.define_table('tipos',
Field('id', 'integer'),
Field('registro', 'datetime', label='Tiempo Registro', default=now, notnull=False, writable=False),
Field('tipo', 'string', default='', notnull=False),
Field('nombre', 'string', default='', notnull=False),
Field('posicion', 'integer', default=0, label='Posición', notnull=False)
)
db.define_table('unidades_medida',
Field('id', 'integer'),
Field('registro', 'datetime', label='Tiempo Registro', default=now, notnull=False, writable=False),
Field('codigo', 'string', default='', label='Código', notnull=False),
Field('descripcion', 'string', default='', label='Descripción', notnull=False),
Field('modo', 'integer', default=0, notnull=False),
Field('abreviatura_origen', 'string', default='', notnull=False),
Field('abreviatura_destino', 'string', default='', notnull=False),
Field('factor', 'double', default=0.0, notnull=False)
)
db.define_table('generos',
Field('id', 'integer'),
Field('registro', 'datetime', label='Tiempo Registro', default=now, notnull=False, writable=False),
Field('genero', 'string', default='', label='Género', notnull=False),
Field('nombre', 'string', default='', notnull=False),
Field('posicion', 'integer', default=0, label='Posición', notnull=False)
)
db.define_table('sub_generos',
Field('id', 'integer'),
Field('registro', 'datetime', label='Tiempo Registro', default=now, notnull=False, writable=False),
Field('genero', db.generos, label='Género',
requires=IS_IN_DB(db, db.generos, '%(nombre)s', zero='[Seleccionar]',
error_message='Seleccione un género')),
Field('sub_genero', 'string', default='', label='Sub-Género', notnull=False),
Field('nombre', 'string', default='', notnull=False),
Field('posicion', 'integer', default=0, label='Posición', notnull=False)
)
db.define_table('categorias',
Field('id', 'integer'),
Field('registro', 'datetime', label='Tiempo Registro', default=now, notnull=False, writable=False),
Field('categoria', 'string', default='', label='Categoría', notnull=False),
Field('nombre', 'string', default='', notnull=False),
Field('posicion', 'integer', default=0, label='Posición', notnull=False)
)
db.define_table('maestro',
Field('id', 'integer'),
Field('registro', 'datetime', label='Tiempo Registro', default=now, notnull=False, writable=False),
Field('codbarras', 'string', default='', notnull=False, label='Código'),
Field('pv', db.puntos_venta, label='Punto de Venta',
requires=IS_IN_DB(db, db.puntos_venta, '%(nombre)s', zero='[Seleccionar]',
error_message='Seleccione un punto de venta')),
Field('grupo_venta', 'string', default='', notnull=False),
Field('articulo', db.articulos,
requires=IS_IN_DB(db, db.articulos, '%(nombre)s', zero='[Seleccionar]',
error_message='Seleccione un artículo')),
Field('casa', db.casas,
requires=IS_IN_DB(db, db.casas, '%(nombre)s', zero='[Seleccionar]',
error_message='Seleccione una casa')),
Field('sub_casa', db.sub_casas,
requires=IS_IN_DB(db, db.sub_casas, '%(nombre)s', zero='[Seleccionar]',
error_message='Seleccione una sub-casa')),
Field('genero', db.generos,
requires=IS_IN_DB(db, db.generos, '%(nombre)s', zero='[Seleccionar]',
error_message='Seleccione un género')),
Field('sub_genero', db.sub_generos,
requires=IS_IN_DB(db, db.sub_generos, '%(nombre)s', zero='[Seleccionar]',
error_message='Seleccione un sub-género')),
Field('empaque', db.empaques,
requires=IS_IN_DB(db, db.empaques, '%(nombre)s', zero='[Seleccionar]',
error_message='Seleccione un empaque')),
Field('sello', db.sellos,
requires=IS_IN_DB(db, db.sellos, '%(nombre)s', zero='[Seleccionar]',
error_message='Seleccione un sello')),
Field('sub_sello', db.sub_sellos,
requires=IS_IN_DB(db, db.sub_sellos, '%(nombre)s', zero='[Seleccionar]',
error_message='Seleccione un sub-sello')),
Field('tipo', db.tipos,
requires=IS_IN_DB(db, db.tipos, '%(nombre)s', zero='[Seleccionar]',
error_message='Seleccione un tipo')),
Field('catmod', db.catmod,
requires=IS_IN_DB(db, db.catmod, '%(nombre)s', zero='[Seleccionar]',
error_message='Seleccione un catmod')),
Field('categoria', db.categorias,
requires=IS_IN_DB(db, db.categorias, '%(nombre)s', zero='[Seleccionar]',
error_message='Seleccione una categoría')),
Field('status', db.status,
requires=IS_IN_DB(db, db.status, '%(nombre)s', zero='[Seleccionar]',
error_message='Seleccione un status')),
Field('moneda', db.monedas,
requires=IS_IN_DB(db, db.monedas, '%(nombre)s', zero='[Seleccionar]',
error_message='Seleccione una moneda')),
Field('precio', 'double', default=0.0, notnull=False),
Field('modo_impuesto', 'integer', default=0, notnull=False, label='Modo de Impuesto'),
Field('impuesto', 'string', default='', notnull=False),
Field('nombre', 'string', default='', notnull=False),
Field('descripcion', 'string', default='', notnull=False, label='Descripción'),
Field('alias', 'string', default='', notnull=False),
Field('descuento', 'integer', default=0, notnull=False),
Field('dependencia', 'integer', default=0, notnull=False),
Field('stock_min', 'double', default=0.0, | |
print(out.numpy())
Outputs:
.. testoutput::
0
"""
if axis is None:
assert not keepdims, "can not set axis=None and keepdims=True"
inp = inp.flatten()
axis = 0
axis = _normalize_axis(inp.ndim, axis, reverse=True)
if isinstance(axis, collections.abc.Iterable):
for ai in axis:
op = builtin.Argmin(axis=ai)
(inp,) = apply(op, inp)
if not keepdims:
inp = squeeze(inp, ai)
return inp
op = builtin.Argmin(axis=axis)
(result,) = apply(op, inp)
if not keepdims:
result = squeeze(result, axis)
return result
def argmax(
inp: Tensor,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
) -> Tensor:
r"""
Returns the indices of the maximum values along
given axis. If axis is a list of dimensions,
reduce over all of them.
:param inp: input tensor.
:param axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
:param keepdims: whether the output tensor has axis retained or not. Default: False
:return: output tensor.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
out = F.argmax(x)
print(out.numpy())
Outputs:
.. testoutput::
5
"""
if axis is None:
assert not keepdims, "can not set axis=None and keepdims=True"
inp = inp.flatten()
axis = 0
axis = _normalize_axis(inp.ndim, axis, reverse=True)
if isinstance(axis, collections.abc.Iterable):
for ai in axis:
op = builtin.Argmax(axis=ai)
(inp,) = apply(op, inp)
if not keepdims:
inp = squeeze(inp, ai)
return inp
op = builtin.Argmax(axis=axis)
(result,) = apply(op, inp)
if not keepdims:
result = squeeze(result, axis)
return result
def normalize(
inp: Tensor, ord: float = None, axis: int = None, eps: float = 1e-12,
) -> Tensor:
r"""
Performs :math:`L_p` normalization of input tensor along
given axis.
For a tensor of shape :math:`(n_0, ..., n_{dim}, ..., n_k)`, each
:math:`n_{dim}` -element vector :math:`v` along dimension :attr:`axis` is transformed as:
.. math::
v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}.
:param inp: input tensor.
:param ord: power of value applied to input tensor. Default: 2
:param axis: dimension to reduce.If None, input must be a vector. Default: None
:param eps: a small value to avoid division by zero. Default: 1e-12
:return: normalized output tensor.
"""
if axis is None:
return inp / clip(norm(inp, ord, axis), lower=eps)
else:
return inp / clip(norm(inp, ord, axis, keepdims=True), lower=eps)
def argsort(inp: Tensor, descending: bool = False) -> Tensor:
r"""
Returns the indices that would sort the input tensor.
:param inp: input tensor. If it's 2d, the result would be array of indices show how to sort each row in the input tensor.
:param descending: sort in descending order, where the largest comes first. Default: False
:return: indices of int32 indicates how to sort the input.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.array([1,2], dtype=np.float32))
indices = F.argsort(x)
print(indices.numpy())
Outputs:
.. testoutput::
[0 1]
"""
assert len(inp.shape) <= 2, "Input should be 1d or 2d"
if descending:
order = "descending"
else:
order = "ascending"
op = builtin.Argsort(order=order)
if len(inp.shape) == 1:
inp = inp.reshape(1, -1)
_, result = apply(op, inp)
return result[0]
_, result = apply(op, inp)
return result
def sort(inp: Tensor, descending: bool = False) -> Tuple[Tensor, Tensor]:
r"""
Returns sorted tensor and the indices would sort the input tensor.
:param inp: input tensor. If it's 2d, the result would be sorted by row.
:param descending: sort in descending order, where the largest comes first. Default: False
:return: tuple of two tensors `(sorted_tensor, indices_of_int32)`.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.array([1,2], dtype=np.float32))
out, indices = F.sort(x)
print(out.numpy())
Outputs:
.. testoutput::
[1. 2.]
"""
assert len(inp.shape) <= 2, "Input should be 1d or 2d"
if descending:
order = "descending"
else:
order = "ascending"
op = builtin.Argsort(order=order)
if len(inp.shape) == 1:
inp = inp.reshape(1, -1)
tns, ind = apply(op, inp)
return tns[0], ind[0]
tns, ind = apply(op, inp)
return tns, ind
def topk(
inp: Tensor,
k: int,
descending: bool = False,
kth_only: bool = False,
no_sort: bool = False,
) -> Tuple[Tensor, Tensor]:
r"""
Selects the ``Top-K`` (by default) smallest elements of 2d matrix by row.
:param inp: input tensor. If input tensor is 2d, each row will be sorted.
:param k: number of elements needed.
:param descending: if True, return the largest elements instead. Default: False
:param kth_only: if True, only the k-th element will be returned. Default: False
:param no_sort: if True, the returned elements can be unordered. Default: False
:return: tuple of two tensors ``(topk_tensor, indices_of_int32)``
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.array([2, 4, 6, 8, 7, 5, 3, 1], dtype=np.float32))
top, indices = F.topk(x, 5)
print(top.numpy(), indices.numpy())
Outputs:
.. testoutput::
[1. 2. 3. 4. 5.] [7 0 6 1 5]
"""
if descending:
k = -k
if kth_only:
mode = "kth_only"
elif no_sort:
mode = "value_idx_nosort"
else:
mode = "value_idx_sorted"
op = builtin.TopK(mode=mode)
if not isinstance(k, Tensor):
(k,) = Const(k, dtype="int32", device=inp.device)()
if len(inp.shape) == 1:
if kth_only:
(tns,) = apply(op, expand_dims(inp, 0), k)
# FIXME:
# could use a dedicated kernel
# gradient may be routed to other indices if k-th value is not unique
ind = argmax((tns == inp).astype("int8"))
tns = squeeze(tns, 0)
else:
tns, ind = apply(op, expand_dims(inp, 0), k)
tns = squeeze(tns, 0)
ind = squeeze(ind, 0)
else:
if kth_only:
(tns,) = apply(op, inp, k)
# FIXME: same as above
ind = argmax((expand_dims(tns, 1) == inp).astype("int8"), 1)
else:
tns, ind = apply(op, inp, k)
return tns, ind
def matinv(inp: Tensor) -> Tensor:
"""
Computes the inverse of a batch of matrices; input must has shape [..., n, n].
:param inp: input tensor.
:return: output tensor.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
data = tensor([[1.0, 0.0], [1.0, 1.0]])
out = F.matinv(data)
print(out.numpy())
Outputs:
.. testoutput::
[[ 1. 0.]
[-1. 1.]]
"""
(result,) = apply(builtin.MatrixInverse(), inp)
return result
def matmul(
inp1: Tensor,
inp2: Tensor,
transpose_a=False,
transpose_b=False,
compute_mode="default",
format="default",
) -> Tensor:
"""
Performs a matrix multiplication of the matrices ``inp1`` and ``inp2``.
With different inputs dim, this function behaves differently:
- Both 1-D tensor, simply forward to ``dot``.
- Both 2-D tensor, normal matrix multiplication.
- If one input tensor is 1-D, matrix vector multiplication.
- If at least one tensor are 3-dimensional or >3-dimensional, the other tensor should have dim >= 2,
the batched matrix-matrix is returned, and the tensor with smaller dimension will be broadcasted.
For example:
- inp1: `(n, k, m)`, inp2: `(n, m, p)`, return: `(n, k, p)`
- inp1: `(n, k, m)`, inp2: `(m, p)`, return: `(n, k, p)`
- inp1: `(n, j, k, m)`, inp2: `(n, j, m, p)`, return: `(n, j, k, p)`
:param inp1: first matrix to be multiplied.
:param inp2: second matrix to be multiplied.
:return: output tensor.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
data1 = tensor(np.arange(0, 6, dtype=np.float32).reshape(2, 3))
data2 = tensor(np.arange(0, 6, dtype=np.float32).reshape(3, 2))
out = F.matmul(data1, data2)
print(out.numpy())
Outputs:
.. testoutput::
[[10. 13.]
[28. 40.]]
"""
if amp._enabled:
compute_mode = "float32"
inp1, inp2 = cast_tensors(inp1, inp2)
else:
dtype = dtype_promotion(inp1, inp2)
if inp1.dtype != dtype:
inp1 = inp1.astype(dtype)
if inp2.dtype != dtype:
inp2 = inp2.astype(dtype)
remove_row, remove_col = False, False
dim1, dim2 = inp1.ndim, inp2.ndim
# handle dim=1 cases, dot and matrix-vector multiplication
if dim1 == 1 and dim2 == 1:
return dot(inp1, inp2)
# the underlying matmul op requires input dims to be at least 2
if dim1 == 1:
inp1 = expand_dims(inp1, 0)
dim1 = 2
remove_row = True
if dim2 == 1:
inp2 = expand_dims(inp2, 1)
dim2 = 2
remove_col = True
batch_shape = None
shape1 = inp1.shape
shape2 = inp2.shape
maxdim = dim1 if dim1 > dim2 else dim2
if dim1 >= 3 or dim2 >= 3:
if use_symbolic_shape():
if dim1 > dim2:
shape2 = concat([shape1[:-2], shape2[-2:]])
inp2 = broadcast_to(inp2, shape2)
if dim1 | |
postfix of the Sv file used to remove noise from, default to '_Sv'
source_path : str
path of Sv file used to remove noise from, can be one of the following:
- None (default):
use Sv in RAWFILENAME_Sv.nc in the same folder as the raw data file,
or when RAWFILENAME_Sv.nc doesn't exist, perform self.calibrate() and use the resulted self.Sv
- path to a directory: RAWFILENAME_Sv.nc in the specified directory
- path to a specific file: the specified file, e.g., ./another_directory/some_other_filename.nc
noise_est_range_bin_size : float, optional
Meters per tile for noise estimation [m]
noise_est_ping_size : int, optional
Number of pings per tile for noise estimation
SNR : int, optional
Minimum signal-to-noise ratio (remove values below this after general noise removal).
Sv_threshold : int, optional
Minimum Sv threshold [dB] (remove values below this after general noise removal)
save : bool, optional
Whether to save the denoised Sv (``Sv_clean``) into a new .nc file.
Default to ``False``.
save_postfix : str
Filename postfix, default to '_Sv_clean'
save_path : str
Full filename to save to, overwriting the RAWFILENAME_Sv_clean.nc default
"""
# Check params
if (noise_est_range_bin_size is not None) and (self.noise_est_range_bin_size != noise_est_range_bin_size):
self.noise_est_range_bin_size = noise_est_range_bin_size
if (noise_est_ping_size is not None) and (self.noise_est_ping_size != noise_est_ping_size):
self.noise_est_ping_size = noise_est_ping_size
# Get calibrated Sv
if self.Sv is not None:
print('%s Remove noise from Sv stored in memory.' % dt.datetime.now().strftime('%H:%M:%S'))
print_src = False
else:
print_src = True
proc_data = self._get_proc_Sv(source_path=source_path, source_postfix=source_postfix)
if print_src:
print('%s Remove noise from Sv stored in: %s' %
(dt.datetime.now().strftime('%H:%M:%S'), self.Sv_path))
# Get tile indexing parameters
self.noise_est_range_bin_size, range_bin_tile_bin_edge, ping_tile_bin_edge = \
self.get_tile_params(r_data_sz=proc_data.range_bin.size,
p_data_sz=proc_data.ping_time.size,
r_tile_sz=self.noise_est_range_bin_size,
p_tile_sz=self.noise_est_ping_size,
sample_thickness=self.sample_thickness)
# Get TVG and ABS for compensating for transmission loss
range_meter = self.range
TVG = np.real(20 * np.log10(range_meter.where(range_meter >= 1, other=1)))
ABS = 2 * self.seawater_absorption * range_meter
# Function for use with apply
def remove_n(x, rr):
p_c_lin = 10 ** ((x.Sv - x.ABS - x.TVG) / 10)
nn = 10 * np.log10(p_c_lin.mean(dim='ping_time').groupby_bins('range_bin', rr).mean().min(
dim='range_bin_bins')) + x.ABS + x.TVG
# Return values where signal is [SNR] dB above noise and at least [Sv_threshold] dB
if not Sv_threshold:
return x.Sv.where(x.Sv > (nn + SNR), other=np.nan)
else:
return x.Sv.where((x.Sv > (nn + SNR)) & (x > Sv_threshold), other=np.nan)
# Groupby noise removal operation
proc_data.coords['ping_idx'] = ('ping_time', np.arange(proc_data.Sv['ping_time'].size))
ABS.name = 'ABS'
TVG.name = 'TVG'
pp = xr.merge([proc_data, ABS])
pp = xr.merge([pp, TVG])
# check if number of range_bin per tile the same for all freq channels
if np.unique([np.array(x).size for x in range_bin_tile_bin_edge]).size == 1:
Sv_clean = pp.groupby_bins('ping_idx', ping_tile_bin_edge).\
map(remove_n, rr=range_bin_tile_bin_edge[0])
Sv_clean = Sv_clean.drop_vars(['ping_idx'])
else:
tmp_clean = []
cnt = 0
for key, val in pp.groupby('frequency'): # iterate over different frequency channel
tmp = val.groupby_bins('ping_idx', ping_tile_bin_edge). \
map(remove_n, rr=range_bin_tile_bin_edge[cnt])
cnt += 1
tmp_clean.append(tmp)
clean_val = np.array([zz.values for zz in xr.align(*tmp_clean, join='outer')])
Sv_clean = xr.DataArray(clean_val,
coords={'frequency': proc_data['frequency'].values,
'ping_time': tmp_clean[0]['ping_time'].values,
'range_bin': tmp_clean[0]['range_bin'].values},
dims=['frequency', 'ping_time', 'range_bin'])
# Set up DataSet
Sv_clean.name = 'Sv'
Sv_clean = Sv_clean.to_dataset()
Sv_clean['noise_est_range_bin_size'] = ('frequency', self.noise_est_range_bin_size)
Sv_clean.attrs['noise_est_ping_size'] = self.noise_est_ping_size
# Attach calculated range into data set
Sv_clean['range'] = (('frequency', 'range_bin'), self.range.T)
# Save as object attributes as a netCDF file
self.Sv_clean = Sv_clean
# TODO: now adding the below so that MVBS can be calculated directly
# from the cleaned Sv without saving and loading Sv_clean from disk.
# However this is not explicit to the user. A better way to do this
# is to change get_MVBS() to first check existence of self.Sv_clean
# when `_Sv_clean` is specified as the source_postfix.
if not print_src: # remove noise from Sv stored in memory
self.Sv = Sv_clean.copy()
if save:
self.Sv_clean_path = self.validate_path(save_path=save_path, save_postfix=save_postfix)
print('%s saving denoised Sv to %s' % (dt.datetime.now().strftime('%H:%M:%S'), self.Sv_clean_path))
Sv_clean.to_netcdf(self.Sv_clean_path)
# Close opened resources
proc_data.close()
def noise_estimates(self, source_postfix='_Sv', source_path=None,
noise_est_range_bin_size=None, noise_est_ping_size=None):
"""Obtain noise estimates from the minimum mean calibrated power level along each column of tiles.
The tiles here are defined by class attributes noise_est_range_bin_size and noise_est_ping_size.
This method contains redundant pieces of code that also appear in method remove_noise(),
but this method can be used separately to determine the exact tile size for noise removal before
noise removal is actually performed.
Parameters
----------
source_postfix : str
postfix of the Sv file used to calculate noise estimates from, default to '_Sv'
source_path : str
path of Sv file used to calculate noise estimates from, can be one of the following:
- None (default):
use Sv in RAWFILENAME_Sv.nc in the same folder as the raw data file,
or when RAWFILENAME_Sv.nc doesn't exist, perform self.calibrate() and use the resulted self.Sv
- path to a directory: RAWFILENAME_Sv.nc in the specified directory
- path to a specific file: the specified file, e.g., ./another_directory/some_other_filename.nc
noise_est_range_bin_size : float
meters per tile for noise estimation [m]
noise_est_ping_size : int
number of pings per tile for noise estimation
Returns
-------
noise_est : xarray DataSet
noise estimates as a DataArray with dimension [ping_time x range_bin]
ping_time and range_bin are taken from the first element of each tile along each of the dimensions
"""
# Check params
if (noise_est_range_bin_size is not None) and (self.noise_est_range_bin_size != noise_est_range_bin_size):
self.noise_est_range_bin_size = noise_est_range_bin_size
if (noise_est_ping_size is not None) and (self.noise_est_ping_size != noise_est_ping_size):
self.noise_est_ping_size = noise_est_ping_size
# Use calibrated data to calculate noise removal
proc_data = self._get_proc_Sv()
# Get tile indexing parameters
self.noise_est_range_bin_size, range_bin_tile_bin_edge, ping_tile_bin_edge = \
self.get_tile_params(r_data_sz=proc_data.range_bin.size,
p_data_sz=proc_data.ping_time.size,
r_tile_sz=self.noise_est_range_bin_size,
p_tile_sz=self.noise_est_ping_size,
sample_thickness=self.sample_thickness)
# Values for noise estimates
range_meter = self.range
TVG = np.real(20 * np.log10(range_meter.where(range_meter >= 1, other=1)))
ABS = 2 * self.seawater_absorption * range_meter
# Noise estimates
proc_data['power_cal'] = 10 ** ((proc_data.Sv - ABS - TVG) / 10)
# check if number of range_bin per tile the same for all freq channels
if np.unique([np.array(x).size for x in range_bin_tile_bin_edge]).size == 1:
noise_est = 10 * np.log10(proc_data['power_cal'].coarsen(
ping_time=self.noise_est_ping_size,
range_bin=int(np.unique(self.noise_est_range_bin_size / self.sample_thickness)),
boundary='pad').mean().min(dim='range_bin'))
else:
range_bin_coarsen_idx = (self.noise_est_range_bin_size / self.sample_thickness).astype(int)
tmp_noise = []
for r_bin in range_bin_coarsen_idx:
freq = r_bin.frequency.values
tmp_da = 10 * np.log10(proc_data['power_cal'].sel(frequency=freq).coarsen(
ping_time=self.noise_est_ping_size,
range_bin=r_bin.values,
boundary='pad').mean().min(dim='range_bin'))
tmp_da.name = 'noise_est'
tmp_noise.append(tmp_da)
# Construct a dataArray TODO: this can probably be done smarter using xarray native functions
noise_val = np.array([zz.values for zz in xr.align(*tmp_noise, join='outer')])
noise_est = xr.DataArray(noise_val,
coords={'frequency': proc_data['frequency'].values,
'ping_time': tmp_noise[0]['ping_time'].values},
dims=['frequency', 'ping_time'])
noise_est = noise_est.to_dataset(name='noise_est')
noise_est['noise_est_range_bin_size'] = ('frequency', self.noise_est_range_bin_size)
noise_est.attrs['noise_est_ping_size'] = self.noise_est_ping_size
# Close opened resources
proc_data.close()
return noise_est
def get_MVBS(self, source_postfix='_Sv', source_path=None,
MVBS_range_bin_size=None, MVBS_ping_size=None,
save=False, save_postfix='_MVBS', save_path=None):
"""Calculate Mean Volume Backscattering Strength (MVBS).
The calculation uses class attributes MVBS_ping_size and MVBS_range_bin_size to
calculate and save MVBS as a new attribute to the calling EchoData instance.
MVBS is an xarray DataArray with dimensions ``ping_time`` and ``range_bin``
that are from the first elements of each tile along the corresponding dimensions
in the original Sv or Sv_clean DataArray.
Parameters
----------
source_postfix : str
postfix of the Sv file used to calculate MVBS, default to '_Sv'
source_path : str
path of Sv file used to calculate MVBS, can be one of the following:
- None (default):
use Sv in RAWFILENAME_Sv.nc in the same folder as the raw data file,
or when RAWFILENAME_Sv.nc doesn't exist, perform self.calibrate() and use the resulted self.Sv
- path to a directory: RAWFILENAME_Sv.nc in the specified directory
- path to a specific file: the specified file, e.g., ./another_directory/some_other_filename.nc
MVBS_range_bin_size : float, optional
meters per tile for calculating MVBS [m]
MVBS_ping_size : int, optional
number of pings per tile for calculating MVBS
save : bool, optional
whether to save the calculated MVBS into a new .nc file, default to ``False``
save_postfix : str
Filename postfix, default to '_MVBS'
save_path : str
Full filename to save to, overwriting the RAWFILENAME_MVBS.nc default
"""
# Check params
if (MVBS_range_bin_size is not None) and (self.MVBS_range_bin_size != MVBS_range_bin_size):
self.MVBS_range_bin_size = MVBS_range_bin_size
if (MVBS_ping_size is not None) and (self.MVBS_ping_size != MVBS_ping_size):
self.MVBS_ping_size = MVBS_ping_size
# Get Sv by validating path and calibrate if not already done
if self.Sv is not None:
print('%s use Sv stored in memory to calculate MVBS' % dt.datetime.now().strftime('%H:%M:%S'))
print_src = False
else:
print_src = True
proc_data = self._get_proc_Sv(source_path=source_path, source_postfix=source_postfix)
if | |
<filename>sdk/fedn/combiner/server.py
from concurrent import futures
import grpc
import time
import uuid
import queue
import threading
import fedn.proto.alliance_pb2 as alliance
import fedn.proto.alliance_pb2_grpc as rpc
from datetime import datetime, timedelta
from scaleout.repository.helpers import get_repository
from fedn.utils.mongo import connect_to_mongodb
from fedn.combiner.role import Role
####################################################################################################################
# class PredictionServer:
# #TODO add a flask api and run in separate thread.
# pass
def whoami(client, instance):
client.name = instance.id
client.role = role_to_proto_role(instance.role)
return client
def role_to_proto_role(role):
if role == Role.COMBINER:
return alliance.COMBINER
if role == Role.WORKER:
return alliance.WORKER
if role == Role.REDUCER:
return alliance.REDUCER
if role == Role.OTHER:
return alliance.OTHER
class CombinerClient:
def __init__(self, address, port, id, role):
self.id = id
self.role = role
channel = grpc.insecure_channel(address + ":" + str(port))
self.connection = rpc.ConnectorStub(channel)
self.orchestrator = rpc.CombinerStub(channel)
print("ORCHESTRATOR Client: {} connected to {}:{}".format(self.id, address, port))
threading.Thread(target=self.__listen_to_model_update_stream, daemon=True).start()
threading.Thread(target=self.__listen_to_model_validation_stream, daemon=True).start()
def __listen_to_model_update_stream(self):
""" Subscribe to the model update request stream. """
r = alliance.ClientAvailableMessage()
whoami(r.sender, self)
for request in self.orchestrator.ModelUpdateStream(r):
# A client sent a model update to be handled by the combiner
if request.client.name != "reducer":
print("ORCHESTRATOR: received model from client! {}".format(request.client), flush=True)
self.receive_model_candidate(request.model_update_id)
print("Recieved model update.", flush=True)
def __listen_to_model_validation_stream(self):
""" Subscribe to the model update request stream. """
r = alliance.ClientAvailableMessage()
whoami(r.sender, self)
for validation in self.orchestrator.ModelValidationStream(r):
# A client sent a model update to be handled by the combiner
self.receive_validation(validation)
print("Recieved model validation.", flush=True)
def request_model_update(self, model_id, clients=[]):
""" Ask members in from_clients list to update the current global model. """
print("ORCHESTRATOR: Sending to clients {}".format(clients), flush=True)
request = alliance.ModelUpdateRequest()
whoami(request.sender, self)
request.model_id = model_id
request.correlation_id = str(uuid.uuid4())
request.timestamp = str(datetime.now())
if len(clients) == 0:
# Broadcast request to all active member clients
request.receiver.name = ""
request.receiver.role = alliance.WORKER
response = self.orchestrator.SendModelUpdateRequest(request)
else:
# Send to all specified clients
for client in clients:
request.receiver.name = client.name
request.receiver.role = alliance.WORKER
self.orchestrator.SendModelUpdateRequest(request)
print("Requesting model update from clients {}".format(clients), flush=True)
def request_model_validation(self, model_id, from_clients=[]):
""" Send a request for members in from_client to validate the model <model_id>.
The default is to broadcast the request to all active members.
"""
request = alliance.ModelValidationRequest()
whoami(request.sender, self)
request.model_id = model_id
request.correlation_id = str(uuid.uuid4())
request.timestamp = str(datetime.now())
if len(from_clients) == 0:
request.receiver.name = "" # Broadcast request to all active member clients
request.receiver.role = alliance.WORKER
self.orchestrator.SendModelValidationRequest(request)
else:
# Send to specified clients
for client in from_clients:
request.receiver.name = client.name
request.receiver.role = alliance.WORKER
self.orchestrator.SendModelValidationRequest(request)
print("ORCHESTRATOR: Sent validation request for model {}".format(model_id), flush=True)
def _list_clients(self, channel):
request = alliance.ListClientsRequest()
whoami(request.sender, self)
request.channel = channel
clients = self.connection.ListActiveClients(request)
return clients.client
def get_active_trainers(self):
trainers = self._list_clients(alliance.Channel.MODEL_UPDATE_REQUESTS)
return trainers
def get_active_validators(self):
validators = self._list_clients(alliance.Channel.MODEL_VALIDATION_REQUESTS)
return validators
def nr_active_trainers(self):
return len(self.get_active_trainers())
def nr_active_validators(self):
return len(self.get_active_validators())
####################################################################################################################
####################################################################################################################
class FednServer(rpc.CombinerServicer, rpc.ReducerServicer, rpc.ConnectorServicer):
""" Communication relayer. """
def __init__(self, project, get_orchestrator):
self.clients = {}
self.project = project
self.role = Role.COMBINER
self.id = "combiner"
address = "localhost"
port = 12808
try:
unpack = project.config['Alliance']
address = unpack['controller_host']
port = unpack['controller_port']
# self.client = unpack['Member']['name']
except KeyError as e:
print("ORCHESTRATOR: could not get all values from config file {}".format(e))
try:
unpack = self.project.config['Alliance']
address = unpack['controller_host']
port = unpack['controller_port']
self.repository = get_repository(config=unpack['Repository'])
self.bucket_name = unpack["Repository"]["minio_bucket"]
except KeyError as e:
print("ORCHESETRATOR: could not get all values from config file {}".format(e), flush=True)
# get the appropriate combiner class and instantiate with a pointer to the alliance server instance and repository
# self.net = OrchestratorClient(address, port, self.id)
# threading.Thread(target=self.__listen_to_model_update_stream, daemon=True).start()
# threading.Thread(target=self.__listen_to_model_validation_stream, daemon=True).start()
self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=100))
# TODO refactor services into separate services
rpc.add_CombinerServicer_to_server(self, self.server)
rpc.add_ConnectorServicer_to_server(self, self.server)
rpc.add_ReducerServicer_to_server(self, self.server)
self.server.add_insecure_port('[::]:' + str(port))
self.orchestrator = get_orchestrator(project)(address, port, self.id, self.role, self.repository)
self.server.start()
# def __get_clients(self):
# return self.clients
def __join_client(self, client):
if not client.name in self.clients.keys():
self.clients[client.name] = {"lastseen": datetime.now()}
print("New client connected:{}".format(client), flush=True)
def _subscribe_client_to_queue(self, client, queue_name):
self.__join_client(client)
if not queue_name in self.clients[client.name].keys():
self.clients[client.name][queue_name] = queue.Queue()
def __get_queue(self, client, queue_name):
try:
return self.clients[client.name][queue_name]
except KeyError:
raise
def __get_status_queue(self, client):
return self.__get_queue(client, alliance.Channel.STATUS)
def _send_request(self, request, queue_name):
self.__route_request_to_client(request, request.receiver, queue_name)
def _broadcast_request(self, request, queue_name):
""" Publish a request to all subscribed members. """
active_clients = self._list_active_clients()
for client in active_clients:
self.clients[client.name][queue_name].put(request)
def __route_request_to_client(self, request, client, queue_name):
try:
q = self.__get_queue(client, queue_name)
q.put(request)
except:
print("Failed to route request to client: {} {}", request.receiver, queue_name)
raise
def _send_status(self, status):
for name, client in self.clients.items():
try:
q = client[alliance.Channel.STATUS]
status.timestamp = str(datetime.now())
q.put(status)
except KeyError:
pass
def __register_heartbeat(self, client):
""" Adds a client entry in the clients dict if first time connecting.
Updates heartbeat timestamp.
"""
self.__join_client(client)
self.clients[client.name]["lastseen"] = datetime.now()
def AllianceStatusStream(self, response, context):
""" A server stream RPC endpoint that emits status messages. """
status = alliance.Status(status="Client {} connecting to AllianceStatusStream.".format(response.sender))
status.log_level = alliance.Status.INFO
status.sender.name = self.id
status.sender.role = role_to_proto_role(self.role)
self._subscribe_client_to_queue(response.sender, alliance.Channel.STATUS)
q = self.__get_queue(response.sender, alliance.Channel.STATUS)
self._send_status(status)
while True:
yield q.get()
def SendStatus(self, status: alliance.Status, context):
# Register a heartbeat (if the clients sends a message it is online)
# self.__register_heartbeat(status.client)
# Add the status message to all subscribers of the status channel
self._send_status(status)
response = alliance.Response()
response.response = "Status received."
return response
def _list_subscribed_clients(self, queue_name):
subscribed_clients = []
for name, client in self.clients.items():
if queue_name in client.keys():
subscribed_clients.append(name)
return subscribed_clients
def _list_active_clients(self, channel):
active_clients = []
for client in self._list_subscribed_clients(channel):
# This can break with different timezones.
now = datetime.now()
then = self.clients[client]["lastseen"]
# TODO: move the heartbeat timeout to config.
if (now - then) < timedelta(seconds=30):
active_clients.append(client)
return active_clients
def ListActiveClients(self, request: alliance.ListClientsRequest, context):
""" RPC endpoint that returns a ClientList containing the names of all active clients.
An active client has sent a status message / responded to a heartbeat
request in the last 10 seconds.
"""
clients = alliance.ClientList()
active_clients = self._list_active_clients(request.channel)
for client in active_clients:
clients.client.append(alliance.Client(name=client, role=alliance.WORKER))
return clients
def SendHeartbeat(self, heartbeat: alliance.Heartbeat, context):
""" RPC that lets clients send a hearbeat, notifying the server that
the client is available. """
self.__register_heartbeat(heartbeat.sender)
response = alliance.Response()
response.sender.name = heartbeat.sender.name
response.sender.role = heartbeat.sender.role
response.response = "Heartbeat received"
return response
## Combiner Service
def ModelUpdateStream(self, update, context):
client = update.sender
status = alliance.Status(status="Client {} connecting to ModelUpdateStream.".format(client.name))
status.log_level = alliance.Status.INFO
status.sender.name = self.id
status.sender.role = role_to_proto_role(self.role)
self._subscribe_client_to_queue(client, alliance.Channel.MODEL_UPDATES)
q = self.__get_queue(client, alliance.Channel.MODEL_UPDATES)
self._send_status(status)
while True:
yield q.get()
def ModelUpdateRequestStream(self, response, context):
""" A server stream RPC endpoint. Messages from client stream. """
client = response.sender
metadata = context.invocation_metadata()
if metadata:
print("\n\n\nGOT METADATA: {}\n\n\n".format(metadata), flush=True)
status = alliance.Status(status="Client {} connecting to ModelUpdateRequestStream.".format(client.name))
status.log_level = alliance.Status.INFO
whoami(status.sender, self)
# print("Client {} connecting to ModelUpdateRequestStream.".format(client))
self._subscribe_client_to_queue(client, alliance.Channel.MODEL_UPDATE_REQUESTS)
q = self.__get_queue(client, alliance.Channel.MODEL_UPDATE_REQUESTS)
self._send_status(status)
while True:
yield q.get()
def ModelValidationStream(self, update, context):
client = update.sender
status = alliance.Status(status="Client {} connecting to ModelValidationStream.".format(client.name))
status.log_level = alliance.Status.INFO
status.sender.name = self.id
status.sender.role = role_to_proto_role(self.role)
# print("Client {} connecting to ModelUpdateStream.".format(client))
self._subscribe_client_to_queue(client, alliance.Channel.MODEL_VALIDATIONS)
q = self.__get_queue(client, alliance.Channel.MODEL_VALIDATIONS)
self._send_status(status)
while True:
yield q.get()
def ModelValidationRequestStream(self, response, context):
""" A server stream RPC endpoint. Messages from client stream. """
client = response.sender
status = alliance.Status(status="Client {} connecting to ModelValidationRequestStream.".format(client.name))
status.log_level = alliance.Status.INFO
status.sender.name = self.id
status.sender.role = role_to_proto_role(self.role)
# whoami(status.sender, self)
self._subscribe_client_to_queue(client, alliance.Channel.MODEL_VALIDATION_REQUESTS)
q = self.__get_queue(client, alliance.Channel.MODEL_VALIDATION_REQUESTS)
self._send_status(status)
while True:
yield q.get()
def SendModelUpdateRequest(self, request, context):
""" Send a model update request. """
self._send_request(request, alliance.Channel.MODEL_UPDATE_REQUESTS)
response = alliance.Response()
response.response = "CONTROLLER RECEIVED ModelUpdateRequest from client {}".format(request.sender.name)
return response # TODO Fill later
def SendModelUpdate(self, request, context):
""" Send a model update response. """
# self._send_request(request,alliance.Channel.MODEL_UPDATES)
self.orchestrator.receive_model_candidate(request.model_update_id)
print("ORCHESTRATOR: Received model update", flush=True)
response = alliance.Response()
response.response = "RECEIVED ModelUpdate {} from client {}".format(response, response.sender.name)
return response # TODO Fill later
def SendModelValidationRequest(self, request, context):
""" Send a model update request. """
self._send_request(request, alliance.Channel.MODEL_VALIDATION_REQUESTS)
response = alliance.Response()
response.response = "CONTROLLER RECEIVED ModelValidationRequest from client {}".format(request.sender.name)
return response # TODO Fill later
def SendModelValidation(self, request, context):
""" Send a model update response. """
# self._send_request(request,alliance.Channel.MODEL_VALIDATIONS)
self.orchestrator.receive_validation(request)
print("ORCHESTRATOR received validation ", flush=True)
response = alliance.Response()
response.response = "RECEIVED ModelValidation {} from client {}".format(response, response.sender.name)
return response # TODO Fill later
## Reducer Service
def GetGlobalModel(self, request, context):
print("got globalmodel request, sending response! ", flush=True)
response = alliance.GetGlobalModelResponse()
whoami(response.sender, self)
response.receiver.name = "reducer"
response.receiver.role = role_to_proto_role(Role.REDUCER)
response.model_id = self.orchestrator.get_model_id()
| |
from copy import copy, deepcopy
import sqlite3
from hashlib import md5
import time
import os
import os.path as osp
from base64 import b64encode, b64decode
from zlib import compress, decompress
import itertools as it
import logging
# instead of pickle we use dill, so we can save dynamically defined
# classes
import dill
from wepy.sim_manager import Manager
from wepy.orchestration.configuration import Configuration
from wepy.orchestration.snapshot import SimApparatus, SimSnapshot
from wepy.util.kv import KV, SQLITE3_INMEMORY_URI, gen_uri
class OrchestratorError(Exception):
""" """
pass
class Orchestrator():
""" """
# we freeze the pickle protocol for making hashes, because we care
# more about stability than efficiency of newer versions
HASH_PICKLE_PROTOCOL = 3
DEFAULT_WORKDIR = Configuration.DEFAULT_WORKDIR
DEFAULT_CONFIG_NAME = Configuration.DEFAULT_CONFIG_NAME
DEFAULT_NARRATION = Configuration.DEFAULT_NARRATION
DEFAULT_MODE = Configuration.DEFAULT_MODE
DEFAULT_CHECKPOINT_FILENAME = "checkpoint.orch.sqlite"
ORCH_FILENAME_TEMPLATE = "{config}{narration}.orch.sqlite"
# the default way to oepn up the whole parent database
DEFAULT_ORCHESTRATION_MODE = 'x'
# mode to open the individual kv stores on the parent database
KV_MODE = 'r+'
# default timeout for connecting to a database
SQLITE3_DEFAULT_TIMEOUT = 5
# the fields to return (and their order) as a record for a run
# query
RUN_SELECT_FIELDS = ('last_cycle_idx', 'config_hash')
def __init__(self, orch_path=None,
mode='x',
append_only=False,
):
self._mode = mode
self._append_only = append_only
# handle the path and convert to a proper URI for the database
# given the path and the mode
self._db_uri = gen_uri(orch_path, mode)
# run table: start_hash, end_hash, num_cycles, configuration_id
# get a raw connection to the database
self._db = sqlite3.connect(self.db_uri, uri=True,
timeout=self.SQLITE3_DEFAULT_TIMEOUT)
self._closed = False
# set isolation level to autocommit
self._db.isolation_level = None
# we can use read_uncommited only in append_only mode (no
# updates) because you never have to worry about dirty reads
# since you can't update
if self.append_only:
self._db.execute("PRAGMA read_uncommited=1")
# we make a table for the run data, if it doesn't already
# exist
c = self._db.cursor().execute(self.create_run_table_query)
# initialize or open each of the separate KV-stores (tables in
# the same SQLite3 database)
# change the mode for the KV stores since we already created the database
# metadata: default init walkers, default apparatus, default
# configuration
self.metadata_kv = KV(db_url=self.db_uri,
table='meta',
mode='a',
value_types=None,
append_only=self.append_only)
# snapshots
self.snapshot_kv = KV(db_url=self.db_uri,
table='snapshots',
primary_key='snaphash',
value_name='snapshot',
mode='a',
append_only=self.append_only)
# configurations
self.configuration_kv = KV(db_url=self.db_uri,
table='configurations',
primary_key='config_hash',
value_name='config',
mode='a',
append_only=self.append_only)
@property
def mode(self):
return self._mode
@property
def append_only(self):
return self._append_only
def close(self):
if self._closed == True:
raise IOError("The database connection is already closed")
else:
# close all the connections
self.metadata_kv.close()
self.configuration_kv.close()
self.snapshot_kv.close()
self._db.close()
self._closed = True
@property
def db_uri(self):
return self._db_uri
@property
def orch_path(self):
# if it is not an in-memory database we parse off the path and
# return that
if self.db_uri == SQLITE3_INMEMORY_URI:
return None
else:
# URIs have the following form: protocol:url?query
# destructure the URI
_, tail = self.db_uri.split(':')
if len(tail.split('?')) > 1:
url, _ = tail.split('?')
else:
url = tail
return url
@classmethod
def serialize(cls, snapshot):
"""Serialize a snapshot to a compressed, encoded, pickle string
representation.
Currently uses the dill module for pickling because the base
pickle module is inadequate. However, it is mostly compatible
and can be read natively with pickle but this usage is
officially not supported. Instead use the deserialize_snapshot.
Also compresses with default zlib compression and is encoded
in base64.
The object will always have a deepcopy performed on it so that
all of the extraneous references to it are avoided since there
is no (AFAIK) way to make sure all references to an object are
deleted.
NOTE: Perhaps there is a way and that should be done (and
tested) to see if it provides stable pickles (i.e. pickles
that always hash to the same value). To avoid the overhead of
copying large objects.
Parameters
----------
snapshot : SimSnapshot object
The snapshot of the simulation you want to serialize.
Returns
-------
serial_str : str
Serialized string of the snapshot object
"""
serial_str = b64encode(
compress(
dill.dumps(
deepcopy(snapshot),
protocol=cls.HASH_PICKLE_PROTOCOL,
recurse=True)
)
)
return serial_str
# core methods for serializing python objects, used for snapshots,
# apparatuses, configurations, and the initial walker list
@classmethod
def deserialize(cls, serial_str):
"""Deserialize an unencoded string snapshot to an object.
Parameters
----------
serial_str : str
Serialized string of the snapshot object
Returns
-------
snapshot : SimSnapshot object
Simulation snapshot object
"""
return dill.loads(decompress(b64decode(serial_str)))
# defaults getters and setters
def set_default_sim_apparatus(self, sim_apparatus):
# serialize the apparatus and then set it
serial_app = self.serialize(sim_apparatus)
self.metadata_kv['default_sim_apparatus'] = serial_app
def set_default_init_walkers(self, init_walkers):
# serialize the apparatus and then set it
serial_walkers = self.serialize(init_walkers)
self.metadata_kv['default_init_walkers'] = serial_walkers
def set_default_configuration(self, configuration):
# serialize the apparatus and then set it
serial_config = self.serialize(configuration)
config_hash = self.hash_snapshot(serial_config)
self.metadata_kv['default_configuration_hash'] = config_hash
self.configuration_kv[config_hash] = serial_config
def set_default_snapshot(self, snapshot):
snaphash = self.add_snapshot(snapshot)
# then save the hash in the metadata
self.metadata_kv['default_snapshot_hash'] = snaphash
return snaphash
def gen_default_snapshot(self):
# generate the snapshot
sim_start_hash = self.gen_start_snapshot(self.get_default_init_walkers())
# then save the hash in the metadata
self.metadata_kv['default_snapshot_hash'] = sim_start_hash
return sim_start_hash
def get_default_sim_apparatus(self):
return self.deserialize(self.metadata_kv['default_sim_apparatus'])
def get_default_init_walkers(self):
return self.deserialize(self.metadata_kv['default_init_walkers'])
def get_default_configuration(self):
config_hash = self.metadata_kv['default_configuration_hash']
return self.get_configuration(config_hash)
def get_default_configuration_hash(self):
return self.metadata_kv['default_configuration_hash']
def get_default_snapshot(self):
start_hash = self.metadata_kv['default_snapshot_hash']
return self.get_snapshot(start_hash)
def get_default_snapshot_hash(self):
return self.metadata_kv['default_snapshot_hash']
@classmethod
def hash_snapshot(cls, serial_str):
"""
Parameters
----------
serial_str :
Returns
-------
"""
return md5(serial_str).hexdigest()
def get_snapshot(self, snapshot_hash):
"""Returns a copy of a snapshot.
Parameters
----------
snapshot_hash :
Returns
-------
"""
return self.deserialize(self.snapshot_kv[snapshot_hash])
def get_configuration(self, config_hash):
"""Returns a copy of a snapshot.
Parameters
----------
config_hash :
Returns
-------
"""
return self.deserialize(self.configuration_kv[config_hash])
@property
def snapshot_hashes(self):
""" """
# iterate over the snapshot kv
return list(self.snapshot_kv.keys())
@property
def configuration_hashes(self):
""" """
# iterate over the snapshot kv
return list(self.configuration_kv.keys())
def add_snapshot(self, snapshot):
"""
Parameters
----------
snapshot :
Returns
-------
"""
# serialize the snapshot using the protocol for doing so
serialized_snapshot = self.serialize(snapshot)
# get the hash of the snapshot
snaphash = self.hash_snapshot(serialized_snapshot)
# check that the hash is not already in the snapshots
if any([True if snaphash == md5 else False for md5 in self.snapshot_hashes]):
# just skip the rest of the function and return the hash
return snaphash
# save the snapshot in the KV store
self.snapshot_kv[snaphash] = serialized_snapshot
return snaphash
def add_serial_snapshot(self, serial_snapshot):
# get the hash of the snapshot
snaphash = self.hash_snapshot(serial_snapshot)
# check that the hash is not already in the snapshots
if any([True if snaphash == md5 else False for md5 in self.snapshot_hashes]):
# just skip the rest of the function and return the hash
return snaphash
# save the snapshot in the KV store
self.snapshot_kv[snaphash] = serial_snapshot
return snaphash
def gen_start_snapshot(self, init_walkers):
"""
Parameters
----------
init_walkers :
Returns
-------
"""
# make a SimSnapshot object using the initial walkers and
start_snapshot = SimSnapshot(init_walkers, self.get_default_sim_apparatus())
# save the snapshot, and generate its hash
sim_start_md5 = self.add_snapshot(start_snapshot)
return sim_start_md5
@property
def default_snapshot_hash(self):
""" """
return self.metadata_kv['default_snapshot_hash']
@property
def default_snapshot(self):
""" """
return self.get_snapshot(self.default_snapshot_hash)
def snapshot_registered(self, snapshot):
"""Check whether a snapshot is already in the database, based on the
hash of it.
This serializes the snapshot so may be slow.
Parameters
----------
snapshot : SimSnapshot object
The snapshot object you want to query for.
Returns
-------
"""
# serialize and hash the snapshot
snaphash = self.hash_snapshot(self.serialize(snapshot))
# then check it
return self.snapshot_hash_registered(snaphash)
def snapshot_hash_registered(self, snapshot_hash):
"""Check whether a snapshot hash is already in the database.
Parameters
----------
snapshot_hash : str
The string hash of the snapshot.
Returns
-------
"""
if any([True if snapshot_hash == h else False for h in self.snapshot_hashes]):
return True
else:
return False
def configuration_hash_registered(self, config_hash):
"""Check whether a snapshot hash is already in the database.
Parameters
----------
snapshot_hash : str
The string hash of the snapshot.
Returns
-------
"""
if any([True if config_hash == h else False for h in self.configuration_hashes]):
return True
else:
return False
### run methods
def add_configuration(self, configuration):
serialized_config = self.serialize(configuration)
config_hash = self.hash_snapshot(serialized_config)
# check that the hash is not already in the snapshots
if any([True if config_hash == md5 else False for md5 in self.configuration_hashes]):
# just skip the rest of the function and return the hash
return config_hash
# | |
`decode` of some encoding
stages only commute with sum if the number of summands is known.
Consider the example of uniform quantization on a specified interval.
Encoding applies a pre-defined linear transformation to the input, and maps
the resulting values to a discrete set of values. Because of the linear
transformation, the decoding functionality does not immediately commute with
sum. However, if we knew how many summands are in the sum, we can determine
what is the appropriate inverse linear transformation, enabling the
commutativity.
A simple way to make this functionality available is to add a
`tf.constant(1, tf.int32)` to the encoded tensors returned by the `encode`
method.
The problem is that this approach will be often inefficient. Typically, we
are intereseted in encoding a collection of values, such as all weights of a
model, and multiple encoding stages might require this information. The
result will be a lot of redundant information being communicated. Moreover,
a user interested in this will always have the relevant information already
available.
Such information can thus be provided to the `decode` method of an encoding
stage via the `num_summands` argument, which will be handled by higher-level
interfaces.
Args:
encoded_tensors: A dictionary containing `Tensor` objects, representing
the encoded value.
decode_params: A dictionary, containing the parameters needed for the
decoding. The structure needs to be the return structure of the
`get_params` method.
num_summands: An integer representing the number of summands, if
`encoded_tensors` is a sum of the encoded representations. The default
value `None` refers to the case when no summation occurred, and can thus
be interpreted as `1`.
shape: Required if the `decode_needs_input_shape` property is `True`. A
shape of the original input to `encode`, if needed for decoding. Can be
either a `Tensor`, or a python object.
Returns:
A single decoded `Tensor`.
"""
@six.add_metaclass(abc.ABCMeta)
class AdaptiveEncodingStageInterface(object):
"""Adaptive version of the `EncodingStageInterface`.
This class has the same functionality as the `EncodingStageInterface`, but in
addition maintains a state, which is adaptive based on the values being
compressed and can parameterize the way encoding functionality works. Note
that this is useful only in case where the encoding is executed in multiple
iterations.
A typical implementation of this interface would be a wrapper of an
implementation of `EncodingStageInterface, which uses the existing stateless
transformations and adds state that controls some of the parameters returned
by the `get_params` method.
The important distinction is that in addition to `encoded_tensors`, the
`encode` method of this class returns an additional dictionary of
`state_update_tensors`. The `commutes_with_sum` property talks about summation
of only the `encoded_tensors`. The `state_update_tensors` can be aggregated
in more flexible ways, specified by the `state_update_aggregation_modes`
property, before being passed to the `update_state` method.
Each implementation should also be wrapped by `tf_style_encoding_stage` to
ensure adherence to the TensorFlow style guide. The adherence is enforced by
the `BaseEncodingStageTest` test class. See `test_utils.py` for more details.
"""
@abc.abstractproperty
def name(self):
"""Name of the encoding stage.
This is a general name for the implementation of this interface, which is
used mainly by the `Encoder` class to create appropriate TensorFlow name
scopes when composing individual encoding stages.
"""
@abc.abstractproperty
def compressible_tensors_keys(self):
"""Keys of encoded tensors allowed to be further encoded.
These keys correspond to tensors in object returned by the `encode` method,
that are allowed to be further lossily compressed.
This property does not directly impact the functionality, but is used by the
`Encoder` class to validate composition.
Returns:
A list of `string` values.
"""
@abc.abstractproperty
def commutes_with_sum(self):
"""`True/False` based on whether the encoding commutes with sum.
Iff `True`, it means that given multiple inputs `x` with the same `shape`
and `dtype`, and the same `params` argument of the `encode` method, the
implementation is such that every value in the returned `encoded_tensors`
can be first summed, before being passed to the decoding functionality, and
the output should be identical (up to numerical precision) to summing the
fully decoded `Tensor` objects.
Note that this also assumes that each of the `decode` methods would be used
with the same values of `decode_params`.
Returns:
A boolean, `True` iff the encoding commutes with sum.
"""
@abc.abstractproperty
def decode_needs_input_shape(self):
"""Whether original shape of the encoded object is needed for decoding.
Iff `True`, it means that the `shape` of the `x` argument to the `encode`
method needs to be provided to the `decode` method. For instance, this is
needed for bitpacking, where inputs of multiple shapes can result in
identical bitpacked representations.
This property will be used by `Encoder` to efficiently realize the
composition of implementations of this interface.
"""
@abc.abstractproperty
def state_update_aggregation_modes(self):
"""Aggregation mode of state update tensors.
Returns a dictionary mapping keys appearing in `state_update_tensors`
returned by the `encode` method to a `StateAggregationMode` object, which
declares how should the `Tensor` objects be aggreggated.
"""
@abc.abstractmethod
def initial_state(self):
"""Creates an initial state.
Returns:
A dictionary of `Tensor` objects, representing the initial state.
"""
@abc.abstractmethod
def update_state(self, state, state_update_tensors):
"""Updates the state.
This method updates the `state` based on the current value of `state`, and
(potentially aggregated) `state_update_tesors`, returned by the `encode`
method. This will typically happen at the end of a notion of iteration.
Args:
state: A dictionary of `Tensor` objects, representing the current state.
The dictionary has the same structure as return dictionary of the
`initial_state` method.
state_update_tensors: A dictionary of `Tensor` objects, representing the
`state_update_tensors` returned by the `encode` method and appropriately
aggregated.
Returns:
A dictionary of `Tensor` objects, representing the updated `state`.
"""
@abc.abstractmethod
def get_params(self, state):
"""Returns the parameters needed for encoding.
This method returns parameters controlling the behavior of the `encode` and
`decode` methods.
Note that this method is not purely functional in terms of `TensorFlow`. The
params can be derived from an internal state of the compressor. For
instance, if a constructor optionally takes a `Variable` as an input
argument, which is allowed to change during iterative execution, that
`Variable`, or a function of it, would be exposed via this method.However,
only values that can be TensorFlow values should be exposed via params. If a
parameter always needs to be a Python constant, for instance used for Python
control flow, it should not be exposed via params, and accessed via `self`
instead.
Args:
state: A dictionary of `Tensor` objects. This should be the object
controlled by the `initial_state` and `update_state` methods.
Returns:
A tuple `(encode_params, decode_params)`, where
`encode_params`: A dictionary to be passed as argument to the `encode`
method.
`decode_params`: A dictionary to be passed as argument to the `decode`
method.
Each value of the dictionaries can be either a `Tensor` or any python
constant.
"""
@abc.abstractmethod
def encode(self, x, encode_params):
"""Encodes a given `Tensor`.
This method can create TensorFlow variables, which can be updated every time
the encoding is executed. An example is an encoder that internally remembers
the error incurred by previous encoding, and adds it to `x` in the next
iteration, before executing the encoding.
However, this method may be called in an entirely separate graph from all
other methods. That is, the implementer of this class can *only* assume such
variables can be accessed from this method but not from others.
Args:
x: A `Tensor`, input to be encoded.
encode_params: A dictionary, containing the parameters needed for the
encoding. The structure needs to be the return structure of `get_params`
method.
Returns:
A tuple `(encoded_tensors, state_update_tensors)`, where these are:
`encoded_tensors`: A dictionary of `Tensor` objects representing the
encoded input `x`.
`state_update_tensors`: A dictionary of `Tensor` objects representing
information necessary for updating the state.
"""
@abc.abstractmethod
def decode(self,
encoded_tensors,
decode_params,
num_summands=None,
shape=None):
"""Decodes the encoded representation.
This method is the inverse transformation of the `encode` method. The
`encoded_tensors` argument is expected to be the output structure of
`encode` method.
The `num_summands` argument is needed because the `decode` some encoding
stages only commute with sum if the number of | |
in the pod's namespace
"""
def __init__(__self__, *,
key: str,
name: Optional[str] = None,
optional: Optional[bool] = None):
"""
Selects a key of a secret in the pod's namespace
:param str key: The key of the secret to select from. Must be a valid secret key.
:param str name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param bool optional: Specify whether the Secret or its key must be defined
"""
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> str:
"""
The key of the secret to select from. Must be a valid secret key.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def optional(self) -> Optional[bool]:
"""
Specify whether the Secret or its key must be defined
"""
return pulumi.get(self, "optional")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterAgentConfigExternalMetrics(dict):
"""
ExternalMetricsConfig contains the configuration of the external metrics provider in Cluster Agent
"""
def __init__(__self__, *,
enabled: Optional[bool] = None,
endpoint: Optional[str] = None,
port: Optional[int] = None,
use_datadog_metrics: Optional[bool] = None,
wpa_controller: Optional[bool] = None):
"""
ExternalMetricsConfig contains the configuration of the external metrics provider in Cluster Agent
:param bool enabled: Enable the metricsProvider to be able to scale based on metrics in Datadog
:param str endpoint: Override the API endpoint for the external metrics server. Defaults to .spec.agent.config.ddUrl or "https://app.datadoghq.com" if that's empty.
:param int port: If specified configures the metricsProvider external metrics service port
:param bool use_datadog_metrics: Enable usage of DatadogMetrics CRD (allow to scale on arbitrary queries)
:param bool wpa_controller: Enable informer and controller of the watermark pod autoscaler NOTE: The WatermarkPodAutoscaler controller needs to be installed see https://github.com/DataDog/watermarkpodautoscaler for more details.
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if endpoint is not None:
pulumi.set(__self__, "endpoint", endpoint)
if port is not None:
pulumi.set(__self__, "port", port)
if use_datadog_metrics is not None:
pulumi.set(__self__, "use_datadog_metrics", use_datadog_metrics)
if wpa_controller is not None:
pulumi.set(__self__, "wpa_controller", wpa_controller)
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
Enable the metricsProvider to be able to scale based on metrics in Datadog
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def endpoint(self) -> Optional[str]:
"""
Override the API endpoint for the external metrics server. Defaults to .spec.agent.config.ddUrl or "https://app.datadoghq.com" if that's empty.
"""
return pulumi.get(self, "endpoint")
@property
@pulumi.getter
def port(self) -> Optional[int]:
"""
If specified configures the metricsProvider external metrics service port
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="useDatadogMetrics")
def use_datadog_metrics(self) -> Optional[bool]:
"""
Enable usage of DatadogMetrics CRD (allow to scale on arbitrary queries)
"""
return pulumi.get(self, "use_datadog_metrics")
@property
@pulumi.getter(name="wpaController")
def wpa_controller(self) -> Optional[bool]:
"""
Enable informer and controller of the watermark pod autoscaler NOTE: The WatermarkPodAutoscaler controller needs to be installed see https://github.com/DataDog/watermarkpodautoscaler for more details.
"""
return pulumi.get(self, "wpa_controller")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterAgentConfigResources(dict):
"""
Datadog cluster-agent resource requests and limits
"""
def __init__(__self__, *,
limits: Optional[Mapping[str, 'outputs.DatadogAgentSpecClusterAgentConfigResourcesLimits']] = None,
requests: Optional[Mapping[str, 'outputs.DatadogAgentSpecClusterAgentConfigResourcesRequests']] = None):
"""
Datadog cluster-agent resource requests and limits
:param Mapping[str, 'DatadogAgentSpecClusterAgentConfigResourcesLimitsArgs'] limits: Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param Mapping[str, 'DatadogAgentSpecClusterAgentConfigResourcesRequestsArgs'] requests: Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
if limits is not None:
pulumi.set(__self__, "limits", limits)
if requests is not None:
pulumi.set(__self__, "requests", requests)
@property
@pulumi.getter
def limits(self) -> Optional[Mapping[str, 'outputs.DatadogAgentSpecClusterAgentConfigResourcesLimits']]:
"""
Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "limits")
@property
@pulumi.getter
def requests(self) -> Optional[Mapping[str, 'outputs.DatadogAgentSpecClusterAgentConfigResourcesRequests']]:
"""
Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "requests")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterAgentConfigResourcesLimits(dict):
def __init__(__self__):
pass
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterAgentConfigResourcesRequests(dict):
def __init__(__self__):
pass
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterAgentConfigVolumeMounts(dict):
"""
VolumeMount describes a mounting of a Volume within a container.
"""
def __init__(__self__, *,
mount_path: str,
name: str,
mount_propagation: Optional[str] = None,
read_only: Optional[bool] = None,
sub_path: Optional[str] = None,
sub_path_expr: Optional[str] = None):
"""
VolumeMount describes a mounting of a Volume within a container.
:param str mount_path: Path within the container at which the volume should be mounted. Must not contain ':'.
:param str name: This must match the Name of a Volume.
:param str mount_propagation: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
:param bool read_only: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
:param str sub_path: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
:param str sub_path_expr: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive.
"""
pulumi.set(__self__, "mount_path", mount_path)
pulumi.set(__self__, "name", name)
if mount_propagation is not None:
pulumi.set(__self__, "mount_propagation", mount_propagation)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
if sub_path is not None:
pulumi.set(__self__, "sub_path", sub_path)
if sub_path_expr is not None:
pulumi.set(__self__, "sub_path_expr", sub_path_expr)
@property
@pulumi.getter(name="mountPath")
def mount_path(self) -> str:
"""
Path within the container at which the volume should be mounted. Must not contain ':'.
"""
return pulumi.get(self, "mount_path")
@property
@pulumi.getter
def name(self) -> str:
"""
This must match the Name of a Volume.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="mountPropagation")
def mount_propagation(self) -> Optional[str]:
"""
mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
"""
return pulumi.get(self, "mount_propagation")
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[bool]:
"""
Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
"""
return pulumi.get(self, "read_only")
@property
@pulumi.getter(name="subPath")
def sub_path(self) -> Optional[str]:
"""
Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
"""
return pulumi.get(self, "sub_path")
@property
@pulumi.getter(name="subPathExpr")
def sub_path_expr(self) -> Optional[str]:
"""
Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive.
"""
return pulumi.get(self, "sub_path_expr")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterAgentConfigVolumes(dict):
"""
Volume represents a named volume in a pod that may be accessed by any container in the pod.
"""
def __init__(__self__, *,
name: str,
aws_elastic_block_store: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesAwsElasticBlockStore'] = None,
azure_disk: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesAzureDisk'] = None,
azure_file: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesAzureFile'] = None,
cephfs: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesCephfs'] = None,
cinder: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesCinder'] = None,
config_map: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesConfigMap'] = None,
csi: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesCsi'] = None,
downward_api: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesDownwardAPI'] = None,
empty_dir: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesEmptyDir'] = None,
fc: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesFc'] = None,
flex_volume: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesFlexVolume'] = None,
flocker: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesFlocker'] = None,
gce_persistent_disk: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesGcePersistentDisk'] = None,
git_repo: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesGitRepo'] = None,
glusterfs: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesGlusterfs'] = None,
host_path: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesHostPath'] = None,
iscsi: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesIscsi'] = None,
nfs: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesNfs'] = None,
persistent_volume_claim: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesPersistentVolumeClaim'] = None,
photon_persistent_disk: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesPhotonPersistentDisk'] = None,
portworx_volume: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesPortworxVolume'] = None,
projected: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesProjected'] = None,
quobyte: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesQuobyte'] = None,
rbd: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesRbd'] = None,
scale_io: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesScaleIO'] = None,
secret: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesSecret'] = None,
storageos: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesStorageos'] = None,
vsphere_volume: Optional['outputs.DatadogAgentSpecClusterAgentConfigVolumesVsphereVolume'] = None):
"""
Volume represents a named volume in a pod that may be accessed by any container in the pod.
:param str name: Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
:param 'DatadogAgentSpecClusterAgentConfigVolumesAwsElasticBlockStoreArgs' aws_elastic_block_store: AWSElasticBlockStore represents | |
if files is None:
return None
mols = MoleculeSet([])
for file in files:
mol = None
self.lastDir = os.path.split(file)[0]
self.fileExt = os.path.splitext(file)[1]
if not self.fileExt in [".pdb",".pdbq", ".pdbqs", ".pdbqt",
".mol2", ".pqr", ".f2d", ".cif",".gro"]:
# popup a pannel to allow the user to choose the parser
val = self.showForm('parser')
if not val == {}:
self.fileExt = self.parserToExt[val['parser']]
self.vf.GUI.configMenuEntry(self.GUI.menuButton,
cmdmenuEntry,
state='disabled')
mol = self.vf.tryto(self.doitWrapper, file, kw={})
self.vf.GUI.configMenuEntry(self.GUI.menuButton,
cmdmenuEntry,state='normal')
if mol is not None:
mols.data.extend(mol.data)
if len(mols): return mols
else: return None
def __call__(self, filename, parser=None, **kw):
"""mols <- readMolecule(filename,parser=None, **kw)
\nfilename --- path to a file describing a molecule
\nparser --- you can specify the parser to use to parse the file
has to be one of 'PDB', 'PDBQ', 'PDBQS','PDBQT', 'PQR', 'MOL2'.
This is useful when your file doesn't have the correct
extension.
"""
self.fileExt = os.path.splitext(filename)[1]
kw['parser'] = parser
kw['ask']=0
return apply ( self.doitWrapper, (filename,), kw)
def doit(self, filename, parser=None, ask=True, addToRecent=True, **kw):
import os
if not os.path.exists(filename):
self.warningMsg("ERROR: %s doesn't exists"%filename)
return None
if not parser is None and self.parserToExt.has_key(parser):
self.fileExt = self.parserToExt[parser]
# Call the right parser
if self.fileExt == ".pdb" or self.fileExt == ".ent":
# Call readPDB
mols = self.vf.readPDB(filename, ask=ask, topCommand=0)
elif self.fileExt == ".pdbq":
# Call readPDBQ
mols = self.vf.readPDBQ(filename, ask=ask,topCommand=0)
elif self.fileExt == ".pdbqs":
# Call readPDBQS
mols = self.vf.readPDBQS(filename,ask=ask, topCommand=0)
elif self.fileExt == ".pdbqt":
foundModelsAs = kw.has_key("modelsAs")
#print "readMolecule: foundModelsAs=", foundModelsAs
setupUpdates = kw.get("setupUpdates", 0)
#print "readMolecule: setupUpdates=", setupUpdates
#set default in any case
modelsAs = kw.get('modelsAs', 'molecules')
#check for multimodel file
fptr = open(filename)
lines = fptr.readlines()
fptr.close()
found = 0
for l in lines:
if l.find("MODEL")==0:
found = found + 1
if found>1:
break
if found > 0:
if not foundModelsAs:
ifd = InputFormDescr(title="Load MODELS as: ")
ifd.append({'name': 'ModelsAsMols',
'text': 'separate molecules',
'widgetType':Tkinter.Radiobutton,
'tooltip':'Check this button to add a separate molecule for each model.',
'variable': self.modelsAsMols,
'value': '1',
'text': 'Molecules ',
'gridcfg': {'sticky':'w','columnspan':2}})
ifd.append({'name': 'ModelsAsConfs',
'widgetType':Tkinter.Radiobutton,
'tooltip':'Check this button to add a single molecule\n with a separate conformation for each model',
'variable': self.modelsAsMols,
'value': '0',
'text': 'Conformations ',
'gridcfg': {'sticky':'w'}})
ifd.append({'name': 'updates label',
'widgetType':Tkinter.Label,
'tooltip':'On sets changing models with arrow keys',
'text': 'If conformations, change models using arrow keys',
'gridcfg': {'sticky':'w', 'column':0, 'columnspan':2}})
ifd.append({'name': 'updates',
'widgetType':Tkinter.Radiobutton,
'tooltip':'Yes sets changing models with arrow keys',
'variable': self.doModelUpdates,
'value': '1',
'text': 'Yes',
'gridcfg': {'sticky':'w', 'column':0}})
ifd.append({'name': 'no_updates',
'widgetType':Tkinter.Radiobutton,
'tooltip':'No do not change models with arrow keys',
'variable': self.doModelUpdates,
'value': '0',
'text': 'No',
'gridcfg': {'sticky':'w', 'row':-1, 'column':1}})
d = self.vf.getUserInput(ifd)
# if cancel, stop
if not len(d): return
ans = d['ModelsAsMols']
if not ans:
modelsAs = 'conformations'
if modelsAs=='conformations' and (self.doModelUpdates.get() or setupUpdates):
e = self.vf.GUI.VIEWER.currentCamera.eventManager
if "<Right>" in e.eventHandlers.keys():
l = e.eventHandlers["<Right>"]
if self.processArrowEvent not in l:
self.vf.GUI.addCameraCallback("<Right>", self.processArrowEvent)
else:
self.vf.GUI.addCameraCallback("<Right>", self.processArrowEvent)
if "<Left>" in e.eventHandlers.keys():
l = e.eventHandlers["<Left>"]
if self.processArrowEvent not in l:
self.vf.GUI.addCameraCallback("<Left>", self.processArrowEvent)
else:
self.vf.GUI.addCameraCallback("<Left>", self.processArrowEvent)
#self.warningMsg("added arrow keys to camera callbacks!")
# Call readPDBQT
mols = self.vf.readPDBQT(filename, ask=ask, modelsAs=modelsAs,
setupUpdates=setupUpdates, topCommand=0)
elif self.fileExt == ".pqr":
# Call readPQR
mols = self.vf.readPQR(filename,ask=ask, topCommand=0)
elif self.fileExt == ".mol2":
# Call readMOL2
mols = self.vf.readMOL2(filename,ask=ask, topCommand=0)
elif self.fileExt == ".cif":
# Call readMMCIF
mols = self.vf.readMMCIF(filename, ask=ask, topCommand=0)
elif self.fileExt == ".gro":
# Call readGRO
mols = self.vf.readGRO(filename, ask=ask, topCommand=0)
elif self.fileExt == ".f2d":
# Call readGRO
mols = self.vf.readF2D(filename, ask=ask, topCommand=0)
else:
self.warningMsg("ERROR: Extension %s not recognized"%self.fileExt)
return None
if mols is None:
self.warningMsg("ERROR: Could not read %s"%filename)
if addToRecent and hasattr(self.vf,'recentFiles'):
self.vf.recentFiles.add(filename, self.name)
return mols
from Pmv.fileCommandsGUI import MoleculeReaderGUI
class PDBReader(MoleculeLoader):
"""Command to load PDB files using a PDB spec compliant parser
\nPackage : Pmv
\nModule : fileCommands
\nClass : MoleculeReader
\nCommand : readMolecule
\nSynopsis:\n
mols <--- readPDB(filename, **kw)
\nRequired Arguments:\n
filename --- path to the PDB file
"""
lastDir = None
def onAddCmdToViewer(self):
if not hasattr(self.vf,'readMolecule'):
self.vf.loadCommand('fileCommands', ['readMolecule'], 'Pmv',
topCommand=0)
def onRemoveObjectFromViewer(self, obj):
""" Function to remove the sets able to reference a TreeNode created
in this command : Here remove the alternate location list created
when a pdb File is read."""
if self.vf.undoableDelete__: return
if not hasattr(obj, 'parser') or \
not isinstance(obj.parser, PdbParser): return
MoleculeLoader.onRemoveObjectFromViewer(self, obj)
# Free the parser too !
# this dictionary contains referneces to itself through function.
if hasattr(obj.parser, 'pdbRecordParser'):
del obj.parser.pdbRecordParser
if hasattr(obj.parser, 'mol'):
del obj.parser.mol
del obj.parser
def doit(self, filename, ask=True):
modelsAs = self.vf.userpref['Read molecules as']['value']
newparser = PdbParser(filename,modelsAs=modelsAs)
# overwrite progress bar methods
if self.vf.hasGui:
newparser.updateProgressBar = self.vf.GUI.updateProgressBar
newparser.configureProgressBar = self.vf.GUI.configureProgressBar
mols = newparser.parse()
if mols is None: return
newmol = []
for m in mols:
mol = self.vf.addMolecule(m, ask)
if mol is None:
del newparser
return mols.__class__([])
newmol.append(mol)
return mols.__class__(newmol)
def __call__(self, filename, **kw):
"""mols <- readPDB(filename, **kw)
\nfilename --- path to the PDB file"""
kw['ask']=0
return apply ( self.doitWrapper, (filename,), kw)
pdbReaderGuiDescr = {'widgetType':'Menu', 'menyBarName':'menuRoot',
'menuButtonName':'File',
'menyEntryLabel':'Read PDB ...',
'index':0}
#PDBReaderGUI = CommandGUI()
#PDBReaderGUI.addMenuCommand('menuRoot', 'File', 'Read PDB ...',index=0)
class MMCIFReader(MoleculeLoader):
"""This command reads macromolecular Crystallographic Information File (mmCIF)
\nPackage : Pmv
\nModule : fileCommands
\nClass : MMCIFReader
\nCommand :readMMCIF
\nSynopsis:\n
mols <- readMMCIF(filename, **kw)
\nRequired Arguments:\n
filename --- path to the MMCIF file
"""
lastDir = None
def onAddCmdToViewer(self):
if not hasattr(self.vf,'readMolecule'):
self.vf.loadCommand('fileCommands', ['readMolecule'], 'Pmv',
topCommand=0)
def onRemoveObjectFromViewer(self, obj):
""" Function to remove the sets able to reference a TreeNode created
in this command : Here remove the alternate location list created
when a pdb File is read."""
if self.vf.undoableDelete__: return
if not hasattr(obj, 'parser') or \
not isinstance(obj.parser, PdbParser): return
MoleculeLoader.onRemoveObjectFromViewer(self, obj)
# Free the parser too !
# this dictionary contains referneces to itself through function.
if hasattr(obj.parser, 'pdbRecordParser'):
del obj.parser.pdbRecordParser
if hasattr(obj.parser, 'mol'):
del obj.parser.mol
del obj.parser
def doit(self, filename, ask=True):
newparser = MMCIFParser(filename)
# overwrite progress bar methods
if self.vf.hasGui:
newparser.updateProgressBar = self.vf.GUI.updateProgressBar
newparser.configureProgressBar = self.vf.GUI.configureProgressBar
mols = newparser.parse()
if mols is None: return
newmol = []
for m in mols:
mol = self.vf.addMolecule(m, ask)
if mol is None:
del newparser
return mols.__class__([])
newmol.append(mol)
return mols.__class__(newmol)
def __call__(self, filename, **kw):
"""mols <- readMMCIF(filename, **kw)
\nfilename --- path to the PDB file"""
kw['ask']=0
return apply ( self.doitWrapper, (filename,), kw)
class GROReader(MoleculeLoader):
"""This command reads macromolecular Crystallographic Information File (mmCIF)
\nPackage : Pmv
\nModule : fileCommands
\nClass : MMCIFReader
\nCommand :readMMCIF
\nSynopsis:\n
mols <- readMMCIF(filename, **kw)
\nRequired Arguments:\n
filename --- path to the MMCIF file
"""
lastDir = None
def onAddCmdToViewer(self):
if not hasattr(self.vf,'readMolecule'):
self.vf.loadCommand('fileCommands', ['readMolecule'], 'Pmv',
topCommand=0)
def onRemoveObjectFromViewer(self, obj):
""" Function to remove the sets able to reference a TreeNode created
in this command : Here remove the alternate location list created
when a pdb File is read."""
if self.vf.undoableDelete__: return
if not hasattr(obj, 'parser') or \
not isinstance(obj.parser, PdbParser): return
MoleculeLoader.onRemoveObjectFromViewer(self, obj)
# Free the parser too !
# this dictionary contains referneces to itself through function.
if hasattr(obj.parser, 'pdbRecordParser'):
del obj.parser.pdbRecordParser
if hasattr(obj.parser, 'mol'):
del obj.parser.mol
del obj.parser
def doit(self, filename, ask=True):
newparser = groParser(filename)
# overwrite progress bar methods
if self.vf.hasGui:
newparser.updateProgressBar = self.vf.GUI.updateProgressBar
newparser.configureProgressBar = self.vf.GUI.configureProgressBar
mols = newparser.parse()
if mols is None: return
newmol = []
for m in mols:
mol = self.vf.addMolecule(m, ask)
if mol is None:
del newparser
return mols.__class__([])
newmol.append(mol)
return mols.__class__(newmol)
def __call__(self, filename, **kw):
"""mols <- readGRO(filename, **kw)
\nfilename --- path to the PDB file"""
kw['ask']=0
return apply ( self.doitWrapper, (filename,), kw)
class PDBQReader(MoleculeLoader):
"""Command to load AutoDock PDBQ files.
\nPackage : Pmv
\nModule : fileCommands
\nClass : PDBQReader
\nCommand : readPDBQ
\nSynopsis:\n
mols <--- readPDBQ(filename, **kw)
\nRequired Arguments:\n
filename --- path to the PDBQ file
"""
def onAddCmdToViewer(self):
if not hasattr(self.vf,'readMolecule'):
self.vf.loadCommand('fileCommands', ['readMolecule'], 'Pmv',
topCommand=0)
def onRemoveObjectFromViewer(self, obj):
""" Function to remove the sets able to reference a TreeNode created
in this command : Here remove the alternate location list created
when a pdb File is read."""
if self.vf.undoableDelete__: return
if not hasattr(obj, 'parser') or \
not isinstance(obj.parser, | |
# Copyright (c) 2015.
# <NAME> <bytefish[at]gmx[dot]de> and
# <NAME> <flier[at]techfak.uni-bielefeld.de> and
# <NAME> <nkoester[at]techfak.uni-bielefeld.de>
#
#
# Released to public domain under terms of the BSD Simplified license.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the organization nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# See <http://www.opensource.org/licenses/bsd-license>
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# coding: utf-8
import numpy as np
from scipy.signal import convolve2d
class LocalDescriptor(object):
def __init__(self, neighbors):
self._neighbors = neighbors
def __call__(self, X):
raise NotImplementedError("Every LBPOperator must implement the __call__ method.")
@property
def neighbors(self):
return self._neighbors
def __repr__(self):
return "LBPOperator (neighbors=%s)" % (self._neighbors)
class OriginalLBP(LocalDescriptor):
def __init__(self):
LocalDescriptor.__init__(self, neighbors=8)
def __call__(self, X):
X = np.asarray(X)
X = (1 << 7) * (X[0:-2, 0:-2] >= X[1:-1, 1:-1]) \
+ (1 << 6) * (X[0:-2, 1:-1] >= X[1:-1, 1:-1]) \
+ (1 << 5) * (X[0:-2, 2:] >= X[1:-1, 1:-1]) \
+ (1 << 4) * (X[1:-1, 2:] >= X[1:-1, 1:-1]) \
+ (1 << 3) * (X[2:, 2:] >= X[1:-1, 1:-1]) \
+ (1 << 2) * (X[2:, 1:-1] >= X[1:-1, 1:-1]) \
+ (1 << 1) * (X[2:, :-2] >= X[1:-1, 1:-1]) \
+ (1 << 0) * (X[1:-1, :-2] >= X[1:-1, 1:-1])
return X
def __repr__(self):
return "OriginalLBP (neighbors=%s)" % (self._neighbors)
class ExtendedLBP(LocalDescriptor):
def __init__(self, radius=1, neighbors=8):
LocalDescriptor.__init__(self, neighbors=neighbors)
self._radius = radius
def __call__(self, X):
X = np.asanyarray(X)
ysize, xsize = X.shape
# define circle
angles = 2 * np.pi / self._neighbors
theta = np.arange(0, 2 * np.pi, angles)
# calculate sample points on circle with radius
sample_points = np.array([-np.sin(theta), np.cos(theta)]).T
sample_points *= self._radius
# find boundaries of the sample points
miny = min(sample_points[:, 0])
maxy = max(sample_points[:, 0])
minx = min(sample_points[:, 1])
maxx = max(sample_points[:, 1])
# calculate block size, each LBP code is computed within a block of size bsizey*bsizex
blocksizey = np.ceil(max(maxy, 0)) - np.floor(min(miny, 0)) + 1
blocksizex = np.ceil(max(maxx, 0)) - np.floor(min(minx, 0)) + 1
# coordinates of origin (0,0) in the block
origy = 0 - np.floor(min(miny, 0))
origx = 0 - np.floor(min(minx, 0))
# calculate output image size
dx = xsize - blocksizex + 1
dy = ysize - blocksizey + 1
# get center points
C = np.asarray(X[origy:origy + dy, origx:origx + dx], dtype=np.uint8)
result = np.zeros((dy, dx), dtype=np.uint32)
for i, p in enumerate(sample_points):
# get coordinate in the block
y, x = p + (origy, origx)
# Calculate floors, ceils and rounds for the x and y.
fx = np.floor(x)
fy = np.floor(y)
cx = np.ceil(x)
cy = np.ceil(y)
# calculate fractional part
ty = y - fy
tx = x - fx
# calculate interpolation weights
w1 = (1 - tx) * (1 - ty)
w2 = tx * (1 - ty)
w3 = (1 - tx) * ty
w4 = tx * ty
# calculate interpolated image
N = w1 * X[fy:fy + dy, fx:fx + dx]
N += w2 * X[fy:fy + dy, cx:cx + dx]
N += w3 * X[cy:cy + dy, fx:fx + dx]
N += w4 * X[cy:cy + dy, cx:cx + dx]
# update LBP codes
D = N >= C
result += (1 << i) * D
return result
@property
def radius(self):
return self._radius
def __repr__(self):
return "ExtendedLBP (neighbors=%s, radius=%s)" % (self._neighbors, self._radius)
class VarLBP(LocalDescriptor):
def __init__(self, radius=1, neighbors=8):
LocalDescriptor.__init__(self, neighbors=neighbors)
self._radius = radius
def __call__(self, X):
X = np.asanyarray(X)
ysize, xsize = X.shape
# define circle
angles = 2 * np.pi / self._neighbors
theta = np.arange(0, 2 * np.pi, angles)
# calculate sample points on circle with radius
sample_points = np.array([-np.sin(theta), np.cos(theta)]).T
sample_points *= self._radius
# find boundaries of the sample points
miny = min(sample_points[:, 0])
maxy = max(sample_points[:, 0])
minx = min(sample_points[:, 1])
maxx = max(sample_points[:, 1])
# calculate block size, each LBP code is computed within a block of size bsizey*bsizex
blocksizey = np.ceil(max(maxy, 0)) - np.floor(min(miny, 0)) + 1
blocksizex = np.ceil(max(maxx, 0)) - np.floor(min(minx, 0)) + 1
# coordinates of origin (0,0) in the block
origy = 0 - np.floor(min(miny, 0))
origx = 0 - np.floor(min(minx, 0))
# Calculate output image size:
dx = xsize - blocksizex + 1
dy = ysize - blocksizey + 1
# Allocate memory for online variance calculation:
mean = np.zeros((dy, dx), dtype=np.float32)
delta = np.zeros((dy, dx), dtype=np.float32)
m2 = np.zeros((dy, dx), dtype=np.float32)
# Holds the resulting variance matrix:
result = np.zeros((dy, dx), dtype=np.float32)
for i, p in enumerate(sample_points):
# Get coordinate in the block:
y, x = p + (origy, origx)
# Calculate floors, ceils and rounds for the x and y:
fx = np.floor(x)
fy = np.floor(y)
cx = np.ceil(x)
cy = np.ceil(y)
# Calculate fractional part:
ty = y - fy
tx = x - fx
# Calculate interpolation weights:
w1 = (1 - tx) * (1 - ty)
w2 = tx * (1 - ty)
w3 = (1 - tx) * ty
w4 = tx * ty
# Calculate interpolated image:
N = w1 * X[fy:fy + dy, fx:fx + dx]
N += w2 * X[fy:fy + dy, cx:cx + dx]
N += w3 * X[cy:cy + dy, fx:fx + dx]
N += w4 * X[cy:cy + dy, cx:cx + dx]
# Update the matrices for Online Variance calculation (http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#On-line_algorithm):
delta = N - mean
mean = mean + delta / float(i + 1)
m2 = m2 + delta * (N - mean)
# Optional estimate for variance is m2/self._neighbors:
result = m2 / (self._neighbors - 1)
return result
@property
def radius(self):
return self._radius
def __repr__(self):
return "VarLBP (neighbors=%s, radius=%s)" % (self._neighbors, self._radius)
class LPQ(LocalDescriptor):
""" This implementation of Local Phase Quantization (LPQ) is a 1:1 adaption of the
original implementation by <NAME> & <NAME>, which is available at:
* http://www.cse.oulu.fi/CMV/Downloads/LPQMatlab
So all credit goes to them.
Reference:
<NAME> & <NAME> (2008) Blur insensitive texture classification
using local phase quantization. Proc. Image and Signal Processing
(ICISP 2008), Cherbourg-Octeville, France, 5099:236-243.
Copyright 2008 by Heikkilae & Ojansivu
"""
def __init__(self, radius=3):
LocalDescriptor.__init__(self, neighbors=8)
self._radius = radius
def euc_dist(self, X):
Y = X = X.astype(np.float)
XX = np.sum(X * X, axis=1)[:, np.newaxis]
YY = XX.T
distances = np.dot(X, Y.T)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, distances)
distances.flat[::distances.shape[0] + 1] = 0.0
return np.sqrt(distances)
def __call__(self, X):
f = 1.0
x = np.arange(-self._radius, self._radius + 1)
n = len(x)
rho = 0.95
[xp, yp] = np.meshgrid(np.arange(1, (n + 1)), np.arange(1, (n + 1)))
pp = np.concatenate((xp, yp)).reshape(2, -1)
dd = self.euc_dist(pp.T) # squareform(pdist(...)) would do the job, too...
C = np.power(rho, dd)
w0 = (x * 0.0 + 1.0)
w1 = np.exp(-2 * np.pi * 1j * x * f / n)
w2 = np.conj(w1)
q1 = | |
if hasattr(listener, "enterDistinct_clause"):
listener.enterDistinct_clause(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitDistinct_clause"):
listener.exitDistinct_clause(self)
def distinct_clause(self):
localctx = PigParser.Distinct_clauseContext(
self, self._ctx, self.state)
self.enterRule(localctx, 76, self.RULE_distinct_clause)
try:
self.enterOuterAlt(localctx, 1)
self.state = 413
self.match(PigParser.DISTINCT)
self.state = 414
self.alias()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Col_refContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def alias_col_ref(self):
return self.getTypedRuleContext(PigParser.Alias_col_refContext, 0)
def dollar_col_ref(self):
return self.getTypedRuleContext(PigParser.Dollar_col_refContext, 0)
def getRuleIndex(self):
return PigParser.RULE_col_ref
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterCol_ref"):
listener.enterCol_ref(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitCol_ref"):
listener.exitCol_ref(self)
def col_ref(self):
localctx = PigParser.Col_refContext(self, self._ctx, self.state)
self.enterRule(localctx, 78, self.RULE_col_ref)
try:
self.state = 418
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [PigParser.GROUP, PigParser.IDENTIFIER]:
self.enterOuterAlt(localctx, 1)
self.state = 416
self.alias_col_ref()
pass
elif token in [PigParser.DOLLAR]:
self.enterOuterAlt(localctx, 2)
self.state = 417
self.dollar_col_ref()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Alias_col_refContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def GROUP(self):
return self.getToken(PigParser.GROUP, 0)
def IDENTIFIER(self):
return self.getToken(PigParser.IDENTIFIER, 0)
def getRuleIndex(self):
return PigParser.RULE_alias_col_ref
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterAlias_col_ref"):
listener.enterAlias_col_ref(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitAlias_col_ref"):
listener.exitAlias_col_ref(self)
def alias_col_ref(self):
localctx = PigParser.Alias_col_refContext(self, self._ctx, self.state)
self.enterRule(localctx, 80, self.RULE_alias_col_ref)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 420
_la = self._input.LA(1)
if not(_la == PigParser.GROUP or _la == PigParser.IDENTIFIER):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Dollar_col_refContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def DOLLAR(self):
return self.getToken(PigParser.DOLLAR, 0)
def INTEGER(self):
return self.getToken(PigParser.INTEGER, 0)
def getRuleIndex(self):
return PigParser.RULE_dollar_col_ref
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterDollar_col_ref"):
listener.enterDollar_col_ref(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitDollar_col_ref"):
listener.exitDollar_col_ref(self)
def dollar_col_ref(self):
localctx = PigParser.Dollar_col_refContext(self, self._ctx, self.state)
self.enterRule(localctx, 82, self.RULE_dollar_col_ref)
try:
self.enterOuterAlt(localctx, 1)
self.state = 422
self.match(PigParser.DOLLAR)
self.state = 423
self.match(PigParser.INTEGER)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Infix_exprContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def add_expr(self):
return self.getTypedRuleContext(PigParser.Add_exprContext, 0)
def getRuleIndex(self):
return PigParser.RULE_infix_expr
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterInfix_expr"):
listener.enterInfix_expr(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitInfix_expr"):
listener.exitInfix_expr(self)
def infix_expr(self):
localctx = PigParser.Infix_exprContext(self, self._ctx, self.state)
self.enterRule(localctx, 84, self.RULE_infix_expr)
try:
self.enterOuterAlt(localctx, 1)
self.state = 425
self.add_expr()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Const_exprContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def scalar(self):
return self.getTypedRuleContext(PigParser.ScalarContext, 0)
def map_(self):
return self.getTypedRuleContext(PigParser.Map_Context, 0)
def bag(self):
return self.getTypedRuleContext(PigParser.BagContext, 0)
def tuple_(self):
return self.getTypedRuleContext(PigParser.Tuple_Context, 0)
def getRuleIndex(self):
return PigParser.RULE_const_expr
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterConst_expr"):
listener.enterConst_expr(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitConst_expr"):
listener.exitConst_expr(self)
def const_expr(self):
localctx = PigParser.Const_exprContext(self, self._ctx, self.state)
self.enterRule(localctx, 86, self.RULE_const_expr)
try:
self.state = 431
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input, 44, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 427
self.scalar()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 428
self.map_()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 429
self.bag()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 430
self.tuple_()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ScalarContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def INTEGER(self):
return self.getToken(PigParser.INTEGER, 0)
def LONGINEGER(self):
return self.getToken(PigParser.LONGINEGER, 0)
def FLOATNUMBER(self):
return self.getToken(PigParser.FLOATNUMBER, 0)
def DOUBLENUMBER(self):
return self.getToken(PigParser.DOUBLENUMBER, 0)
def QUOTEDSTRING(self):
return self.getToken(PigParser.QUOTEDSTRING, 0)
def NULL(self):
return self.getToken(PigParser.NULL, 0)
def getRuleIndex(self):
return PigParser.RULE_scalar
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterScalar"):
listener.enterScalar(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitScalar"):
listener.exitScalar(self)
def scalar(self):
localctx = PigParser.ScalarContext(self, self._ctx, self.state)
self.enterRule(localctx, 88, self.RULE_scalar)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 433
_la = self._input.LA(1)
if not(((((_la - 45)) & ~0x3f) == 0 and ((1 << (_la - 45)) & ((1 << (PigParser.NULL - 45)) | (1 << (PigParser.INTEGER - 45)) | (1 << (PigParser.DOUBLENUMBER - 45)) | (1 << (PigParser.FLOATNUMBER - 45)) | (1 << (PigParser.QUOTEDSTRING - 45)) | (1 << (PigParser.LONGINEGER - 45)))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Map_Context(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def LEFT_BRACKET(self):
return self.getToken(PigParser.LEFT_BRACKET, 0)
def RIGHT_BRACKET(self):
return self.getToken(PigParser.RIGHT_BRACKET, 0)
def keyvalue(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(PigParser.KeyvalueContext)
else:
return self.getTypedRuleContext(PigParser.KeyvalueContext, i)
def COMMA(self, i: int = None):
if i is None:
return self.getTokens(PigParser.COMMA)
else:
return self.getToken(PigParser.COMMA, i)
def NOT(self):
return self.getToken(PigParser.NOT, 0)
def MAP_VAL(self):
return self.getToken(PigParser.MAP_VAL, 0)
def getRuleIndex(self):
return PigParser.RULE_map_
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterMap_"):
listener.enterMap_(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitMap_"):
listener.exitMap_(self)
def map_(self):
localctx = PigParser.Map_Context(self, self._ctx, self.state)
self.enterRule(localctx, 90, self.RULE_map_)
self._la = 0 # Token type
try:
self.state = 454
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [PigParser.LEFT_BRACKET]:
self.enterOuterAlt(localctx, 1)
self.state = 435
self.match(PigParser.LEFT_BRACKET)
self.state = 444
self._errHandler.sync(self)
_la = self._input.LA(1)
if ((((_la - 29)) & ~0x3f) == 0 and ((1 << (_la - 29)) & ((1 << (PigParser.NOT - 29)) | (1 << (PigParser.NULL - 29)) | (1 << (PigParser.QUOTEDSTRING - 29)))) != 0):
self.state = 436
self.keyvalue()
self.state = 441
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la == PigParser.COMMA:
self.state = 437
self.match(PigParser.COMMA)
self.state = 438
self.keyvalue()
self.state = 443
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 446
self.match(PigParser.RIGHT_BRACKET)
pass
elif token in [PigParser.NOT]:
self.enterOuterAlt(localctx, 2)
self.state = 447
self.match(PigParser.NOT)
self.state = 448
self.match(PigParser.MAP_VAL)
self.state = 450
self._errHandler.sync(self)
_alt = 1
while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER:
if _alt == 1:
self.state = 449
self.keyvalue()
else:
raise NoViableAltException(self)
self.state = 452
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(
self._input, 47, self._ctx)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class KeyvalueContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def string_val(self):
return self.getTypedRuleContext(PigParser.String_valContext, 0)
def POUND(self):
return self.getToken(PigParser.POUND, 0)
def const_expr(self):
return self.getTypedRuleContext(PigParser.Const_exprContext, 0)
def NOT(self):
return self.getToken(PigParser.NOT, 0)
def KEY_VAL_PAIR(self):
return self.getToken(PigParser.KEY_VAL_PAIR, 0)
def getRuleIndex(self):
return PigParser.RULE_keyvalue
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterKeyvalue"):
listener.enterKeyvalue(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitKeyvalue"):
listener.exitKeyvalue(self)
def keyvalue(self):
localctx = PigParser.KeyvalueContext(self, self._ctx, self.state)
self.enterRule(localctx, 92, self.RULE_keyvalue)
try:
self.state = 465
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [PigParser.NULL, PigParser.QUOTEDSTRING]:
self.enterOuterAlt(localctx, 1)
self.state = 456
self.string_val()
self.state = 457
self.match(PigParser.POUND)
self.state = 458
self.const_expr()
pass
elif token in [PigParser.NOT]:
self.enterOuterAlt(localctx, 2)
self.state = 460
self.match(PigParser.NOT)
self.state = 461
self.match(PigParser.KEY_VAL_PAIR)
self.state = 462
self.string_val()
self.state = 463
self.const_expr()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class String_valContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def QUOTEDSTRING(self):
return self.getToken(PigParser.QUOTEDSTRING, 0)
def NULL(self):
return self.getToken(PigParser.NULL, 0)
def getRuleIndex(self):
return PigParser.RULE_string_val
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterString_val"):
listener.enterString_val(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitString_val"):
listener.exitString_val(self)
def string_val(self):
localctx = PigParser.String_valContext(self, self._ctx, self.state)
self.enterRule(localctx, 94, self.RULE_string_val)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 467
_la = self._input.LA(1)
if not(_la == PigParser.NULL or _la == PigParser.QUOTEDSTRING):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BagContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def LEFT_CURLY(self):
return self.getToken(PigParser.LEFT_CURLY, 0)
def RIGHT_CURLY(self):
return self.getToken(PigParser.RIGHT_CURLY, 0)
def tuple_(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(PigParser.Tuple_Context)
else:
return self.getTypedRuleContext(PigParser.Tuple_Context, i)
def COMMA(self, i: int = None):
if i is | |
#!/usr/bin/python
#----------------------------------------------------------------------
# Jacked cube color picker to control LightBrick v1.0 AKA Optimus Shine
# <NAME> 23 Sept 2010
#----------------------------------------------------------------------
#
# Based on CUBECOLOURDIALOG Widget, Python Code By:
# <NAME>, @ 16 Aug 2007
#
#
#todo:
# loop hue control
# light selection
# actual control
# buffer to record random playing, display as gradient, and loop selection
# remember last color or query network for it on startup, remember custom colors
MODES = ['CK','Simu','Saiko']
CURRENT_MODE = 1 # 2= saiko liblo
MODE_NAME = MODES[CURRENT_MODE]
IP_ADDRESSES = ["192.168.1.200","192.168.1.201","192.168.1.122"]
class LightController:
def SendLightsSimu(self,r,g,b):
print r,g,b
def SendLightsCK(self,r,g,b):
# struct.pack(fmt, magic, ver, type, seq, port, flags, timerVal V, uni, 0, 0, 0, 0, data)
levels = [r,g,b]*10
arr = array.array('B', levels)
out = struct.pack("LHHLBxHLB255s", 0x4adc0104, 0x0001, 0x0101, 0, 0, 0, -1, 0, arr.tostring())
socket(AF_INET, SOCK_DGRAM).sendto(out, (IP_ADDRESS, port))
# print r,g,b
def SendLightsSaiko(self,r,g,b):
fRed = r/255.0
fGreen = g/255.0
fBlue = b/255.0
for address in addresses:
liblo.send(address,'/light/color/set',('f',fRed),('f',fGreen),('f',fBlue))
def UpdateControlSet(self,listOfLights):
pass
if CURRENT_MODE == 2:
import liblo
addresses = [liblo.Address(IP_ADDRESS,"2222") for IP_ADDRESS in IP_ADDRESSES]
LightController.SendLights = LightController.SendLightsSaiko
elif CURRENT_MODE == 0:
import struct
import array
from socket import socket, AF_INET, SOCK_DGRAM
port = 6038
LightController.SendLights = LightController.SendLightsCK
else:
LightController.SendLights = LightController.SendLightsSimu
import wx
import os
import sys
try:
dirName = os.path.dirname(os.path.abspath(__file__))
except:
dirName = os.path.dirname(os.path.abspath(sys.argv[0]))
sys.path.append(os.path.split(dirName)[0])
try:
from agw import cubecolourdialog as ccdSource
# from agw.cubecolourdialog import *
except ImportError: # if it's not there locally, try the wxPython lib.
# from wx.lib.agw.cubecolourdialog import *
import wx.lib.agw.cubecolourdialog as ccdSource
#import cubecolourdialog as ccdSource
from wx.lib.agw.cubecolourdialog import Colour
#Colour = ccdSource.Colour
from colourWidgets import RGBCube,HSVWheel,BrightCtrl
from myWidget import PowerCtrl,XYPanel#RGBCube = ccdSource.RGBCube
#HSVWheel = ccdSource.HSVWheel
#BrightCtrl = ccdSource.BrightCtrl
CustomPanel = ccdSource.CustomPanel
ColourPanel = ccdSource.ColourPanel
#colourAttributes = ccdSource.colourAttributes
#colourMaxValues = ccdSource.colourMaxValues
colourAttributes = ["r", "g", "b", "h", "s", "v","t","c","p"]
colourMaxValues = [255, 255, 255, 359, 255, 255, 359, 255, 255]
Distance = ccdSource.Distance
Vertex = ccdSource.Vertex
Top = ccdSource.Top
Left = ccdSource.Left
Right = ccdSource.Right
RED=ccdSource.RED
GREEN=ccdSource.GREEN
BLUE=ccdSource.BLUE
LineDescription = ccdSource.LineDescription
Slope = ccdSource.Slope
FindC = ccdSource.FindC
PointOnLine = ccdSource.PointOnLine
Intersection = ccdSource.Intersection
PtFromAngle = ccdSource.PtFromAngle
RECT_WIDTH = ccdSource.RECT_WIDTH
class NewColour(Colour):
constrainPower = False
def __init__(self,colour):
# super.__init__(colour)
# Colour.__init__(self,colour)
super(NewColour,self).__init__(colour)
self.ToXYZ()
def ToHSL(self):
self.H = self.h
self.L = (510.0-(self.s)) * (self.v/255.0)
self.S = self.s * self.v
if self.L <= 255.0:
lfactor = self.L
else:
lfactor = 510.0 - self.L
self.S /= lfactor
self.L /= 2.0
def ToXYZ(self):
self.c = self.s #2*(max(self.b,max(self.r,self.g)) - min(self.b,min(self.r,self.g)))
self.p = min(255,self.r+self.g+self.b)
self.t = self.h
if self.constrainPower:
# do stuff for ToHSV and ToRGB
pass
else:
pass
def ToHSV(self):
Colour.ToHSV(self)
self.ToXYZ()
def ToRGB(self):
Colour.ToRGB(self)
self.ToXYZ()
def HSL_ToRGB_HSV(self):
self.h = self.H
ell = self.L/255.0 * 2
ess = self.S/255.0
if ell <= 1:
ess *= ell
else:
ess *= (2 - ell)
self.v = int(255.0*((ell + ess) / 2))
self.s = int(255.0*(2*ess /(ell+ess)))
Colour.ToRGB(self)
def XYZ_ToRGB_HSV(self):
maxVal = self.p
delta = maxVal * self.c / 255.0
minVal = maxVal - delta
hue = float(self.t)
if self.t > 300 or self.t <=60:
#red max
r=int(maxVal)
if self.t > 300:
g = int(minVal)
hue = (hue - 360.0)/60.0
b = int(-(hue*delta - minVal))
else:
b=int(minVal)
hue = hue/60.0
g = int(hue*delta+minVal)
elif self.t > 60 and self.t < 180:
#green max
g = int(maxVal)
hue = (hue/60.0 - 2.0)*delta
if self.t < 120:
b = int(minVal)
r = int(minVal - hue)
else:
r = int(minVal)
b = int(minVal + hue)
else:
b = int(maxVal)
hue = (hue/60.0 - 4.0)*delta
if self.t < 240:
r = int(minVal)
g = int(minVal - hue)
else:
g = int(minVal)
r = int(minVal + hue)
power = self.p
sumpower = r+g+b / 1.0
if sumpower:
self.r=int(r*power/sumpower)
self.g=int(g*power/sumpower)
self.b=int(b*power/sumpower)
#
# self.h = self.t
# self.s = self.c
# power = self.p
# self.v = self.p
# Colour.ToRGB(self)
# colorpower = (self.r + self.g + self.b) / 1
# if colorpower:
# self.r=int(self.r*power/colorpower)
# self.g=int(self.g*power/colorpower)
# self.b=int(self.b*power/colorpower)
#
Colour.ToHSV(self)
class NewCustomPanel(CustomPanel):
def __init__(self,parent,cd):
# super(NewCustomPanel,self).__init__(parent,cd)
CustomPanel.__init__(self,parent,cd)
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
def OnLeftDown(self, event):
"""
Handles the ``wx.EVT_LEFT_DOWN`` for L{CustomPanel}.
:param `event`: a `wx.MouseEvent` event to be processed.
"""
x, y = event.GetX(), event.GetY()
selX = (x - self._customColourRect.x)/(self._smallRectangleSize.x + self._gridSpacing)
selY = (y - self._customColourRect.y)/(self._smallRectangleSize.y + self._gridSpacing)
ptr = selX + selY*8
# dc = wx.ClientDC(self)
# self.PaintHighlight(dc, False)
self._colourSelection = ptr
self._mainDialog._colour = NewColour(self._customColours[self._colourSelection])
# self.PaintCustomColour(dc, selX, selY)
# self.PaintHighlight(dc, True)
self._mainDialog.DrawAll()
self._mainDialog.SendLightsIfManual()
class CubeColourFrame(wx.Frame):
"""
This is the CubeColourFrame main class implementation.
"""
manualSend = False
def __init__(self, parent, title, lc = None, colourData=None, agwStyle=ccdSource.CCD_SHOW_ALPHA):
"""
Default class constructor.
:param `colourData`: a standard `wx.ColourData` (as used in `wx.ColourFrame`;
:param `agwStyle`: can be either ``None`` or ``ccdSource.CCD_SHOW_ALPHA``, depending if you want
to hide the alpha channel control or not.
"""
if lc == None:
self.lc = LightController()
else:
self.lc = lc
# wx.Dialog.__init__(self, parent, id=wx.ID_ANY, title=_("Optimus Shine"),
# pos=wx.DefaultPosition, size=(900, 900), style=wx.DEFAULT_DIALOG_STYLE)
wx.Frame.__init__(self, parent, -1, title, pos=wx.DefaultPosition, size=(900, 900))
if colourData:
self._colourData = colourData
else:
self._colourData = wx.ColourData()
self._colourData.SetColour(wx.Colour(128, 128, 128))
# self._oldColour = Colour(self._colourData.GetColour())
self._colour = NewColour(self._colourData.GetColour())
self._inMouse = False
self._initOver = False
self._inDrawAll = False
self._agwStyle = agwStyle
self.mainPanel = wx.Panel(self, -1)
self.xyzSizer_staticbox = wx.StaticBox(self.mainPanel, -1, "XYZ")
self.hsvSizer_staticbox = wx.StaticBox(self.mainPanel, -1, "HSB")
self.rgbValueSizer_staticbox = wx.StaticBox(self.mainPanel, -1, "RGB Values")
self.hsvValueSizer_staticbox = wx.StaticBox(self.mainPanel, -1, "HSB Values")
self.xyzValueSizer_staticbox = wx.StaticBox(self.mainPanel, -1, "XYZ Values")
self.rgbSizer_staticbox = wx.StaticBox(self.mainPanel, -1, "RGB")
self.curcolSizer_staticbox = wx.StaticBox(self.mainPanel, -1, "Current Color")
# self.alphaSizer_staticbox = wx.StaticBox(self.mainPanel, -1, "Alpha")
# self.alphaValueSizer_staticbox = wx.StaticBox(self.mainPanel, -1, "Alpha")
self.rgbBitmap = RGBCube(self.mainPanel)
self.hsvBitmap = HSVWheel(self.mainPanel)
self.brightCtrl = BrightCtrl(self.mainPanel)
# self.alphaCtrl = AlphaCtrl(self.mainPanel)
self.powerCtrl = PowerCtrl(self.mainPanel)
self.xyPanel = XYPanel(self.mainPanel)
# self.showAlpha = wx.CheckBox(self.mainPanel, -1, "Show Alpha Control")
self.autoSend = wx.CheckBox(self.mainPanel, -1, "AutoSend on\nColorChange")
self.customColours = NewCustomPanel(self.mainPanel, self._colourData)
self.addCustom = wx.Button(self.mainPanel, -1, "Add to custom colours")
# self.okButton = wx.Button(self.mainPanel, -1, "Ok")
self.cancelButton = wx.Button(self.mainPanel, -1, "Cancel")
self.sendButton = wx.Button(self.mainPanel, -1, "Send")
# self.oldColourPanel = ColourPanel(self.mainPanel, style=wx.SIMPLE_BORDER)
self.newColourPanel = ColourPanel(self.mainPanel, style=wx.SIMPLE_BORDER)
self.redSpin = wx.SpinCtrl(self.mainPanel, -1, "180", min=0, max=255,
style=wx.SP_ARROW_KEYS)
self.greenSpin = wx.SpinCtrl(self.mainPanel, -1, "180", min=0, max=255,
style=wx.SP_ARROW_KEYS)
self.blueSpin = wx.SpinCtrl(self.mainPanel, -1, "180", min=0, max=255,
style=wx.SP_ARROW_KEYS)
self.hueSpin = wx.SpinCtrl(self.mainPanel, -1, "0", min=-1, max=360,
style=wx.SP_ARROW_KEYS)
self.saturationSpin = wx.SpinCtrl(self.mainPanel, -1, "", min=0, max=255,
style=wx.SP_ARROW_KEYS)
self.brightnessSpin = wx.SpinCtrl(self.mainPanel, -1, "", min=0, max=255,
style=wx.SP_ARROW_KEYS)
self.tintSpin = wx.SpinCtrl(self.mainPanel, -1, "0", min=-1, max=360,
style=wx.SP_ARROW_KEYS)
self.chromaSpin = wx.SpinCtrl(self.mainPanel, -1, "", min=0, max=255,
style=wx.SP_ARROW_KEYS)
self.powerSpin = wx.SpinCtrl(self.mainPanel, -1, "", min=0, max=255,
style=wx.SP_ARROW_KEYS)
# self.alphaSpin = wx.SpinCtrl(self.mainPanel, -1, "", min=0, max=255,
# style=wx.SP_ARROW_KEYS)
# self.accessCode = wx.TextCtrl(self.mainPanel, -1, "", style=wx.TE_READONLY)
# self.htmlCode = wx.TextCtrl(self.mainPanel, -1, "", style=wx.TE_READONLY)
# self.webSafe = wx.TextCtrl(self.mainPanel, -1, "", style=wx.TE_READONLY)
# self.htmlName = wx.TextCtrl(self.mainPanel, -1, "", style=wx.TE_READONLY)
self.SetProperties()
self.DoLayout()
self.spinCtrls = [self.redSpin, self.greenSpin, self.blueSpin,
self.hueSpin, self.saturationSpin, self.brightnessSpin,
self.tintSpin, self.chromaSpin, self.powerSpin]
for spin in self.spinCtrls:
spin.Bind(wx.EVT_SPINCTRL, self.OnSpinCtrl)
# self.Bind(wx.EVT_SPINCTRL, self.OnAlphaSpin, self.alphaSpin)
# self.Bind(wx.EVT_BUTTON, self.OnOk, self.okButton)
# self.Bind(wx.EVT_BUTTON, self.OnCancel, self.cancelButton)
self.Bind(wx.EVT_BUTTON, self.OnSend, self.sendButton)
self.Bind(wx.EVT_BUTTON, self.OnAddCustom, self.addCustom)
self.Bind(wx.EVT_CHECKBOX, self.OnAutoSend)
# self.Bind(wx.EVT_CHECKBOX, self.OnShowAlpha)
# self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
self.Bind(wx.EVT_CHAR_HOOK, self.OnKeyUp)
self.Centre(wx.BOTH)
wx.CallAfter(self.InitDialog)
def SetProperties(self):
""" Sets some initial properties for L{CubeColourDialog} (sizes, values). """
# self.okButton.SetDefault()
# self.oldColourPanel.SetMinSize((-1, 50))
self.newColourPanel.SetMinSize((-1, 50))
self.redSpin.SetMinSize((60, -1))
self.greenSpin.SetMinSize((60, -1))
self.blueSpin.SetMinSize((60, -1))
self.hueSpin.SetMinSize((60, -1))
self.saturationSpin.SetMinSize((60, -1))
self.brightnessSpin.SetMinSize((60, -1))
self.tintSpin.SetMinSize((60, -1))
self.chromaSpin.SetMinSize((60, -1))
self.powerSpin.SetMinSize((60, -1))
# self.alphaSpin.SetMinSize((60, -1))
# self.showAlpha.SetValue(1)
self.autoSend.SetValue(1)
# self.accessCode.SetInitialSize((80, -1))
# self.webSafe.SetInitialSize((80, -1))
# self.htmlCode.SetInitialSize((80, -1))
def DoLayout(self):
""" Layouts all the controls in the L{CubeColourDialog}. """
windowSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer = wx.GridBagSizer(10, 5)
hsvValueSizer = wx.StaticBoxSizer(self.hsvValueSizer_staticbox, wx.VERTICAL)
hsvGridSizer = wx.GridSizer(2, 3, 2, 10)
rgbValueSizer = wx.StaticBoxSizer(self.rgbValueSizer_staticbox, wx.HORIZONTAL)
rgbGridSizer = wx.GridSizer(2, 3, 2, 10)
xyzValueSizer = wx.StaticBoxSizer(self.xyzValueSizer_staticbox, wx.HORIZONTAL)
xyzGridSizer = wx.GridSizer(2, 3, 2, 10)
# alphaValueSizer = wx.StaticBoxSizer(self.alphaValueSizer_staticbox, wx.VERTICAL)
# alphaGridSizer = wx.BoxSizer(wx.VERTICAL)
customSizer = wx.BoxSizer(wx.VERTICAL)
buttonSizer = wx.BoxSizer(wx.VERTICAL)
sendbuttonSizer = wx.BoxSizer(wx.VERTICAL)
sendSizer = wx.BoxSizer(wx.HORIZONTAL)
curcolSizer = wx.StaticBoxSizer(self.curcolSizer_staticbox, wx.VERTICAL)
panelSizer = wx.BoxSizer(wx.VERTICAL)
# htmlSizer1 = wx.BoxSizer(wx.HORIZONTAL)
# htmlSizer2 = wx.BoxSizer(wx.VERTICAL)
# htmlSizer_a = wx.BoxSizer(wx.VERTICAL)
# htmlSizer_b = wx.BoxSizer(wx.VERTICAL)
xyzSizer = wx.StaticBoxSizer(self.xyzSizer_staticbox, wx.HORIZONTAL)
hsvSizer = wx.StaticBoxSizer(self.hsvSizer_staticbox, wx.HORIZONTAL)
rgbSizer = wx.StaticBoxSizer(self.rgbSizer_staticbox, wx.VERTICAL)
# autosendSizer = wx.StaticBoxSizer(self.autosendSizer_staticbox, wx.VERTICAL)
# mainSizer.Add(self.showAlpha, (0, 0), (1, 1), wx.LEFT|wx.TOP, 10)
# htmlLabel1 = wx.StaticText(self.mainPanel, -1, "HTML Code")
# htmlLabel2 = wx.StaticText(self.mainPanel, -1, "Web Safe")
# htmlSizer_a.Add(htmlLabel1, 0, wx.TOP, 3)
# htmlSizer_b.Add(htmlLabel2, 0, wx.TOP, 3)
# htmlSizer_a.Add(self.htmlCode, 0, wx.TOP, 3)
# htmlSizer_b.Add(self.webSafe, 0, wx.TOP, 3)
#
# htmlSizer1.Add(htmlSizer_a, 0)
# htmlSizer1.Add(htmlSizer_b, 0, wx.LEFT, 10)
# mainSizer.Add(htmlSizer1, (1, 0), (1, 1), wx.LEFT|wx.RIGHT, 10)
# htmlLabel3 = wx.StaticText(self.mainPanel, -1, "HTML Name")
# htmlSizer2.Add(htmlLabel3, 0, wx.TOP|wx.BOTTOM, 3)
# htmlSizer2.Add(self.htmlName, 0)
# mainSizer.Add(htmlSizer2, (1, 1), (1, 1), wx.LEFT|wx.RIGHT, 10)
customLabel = wx.StaticText(self.mainPanel, -1, "Custom Colours")
customSizer.Add(customLabel, 0, wx.BOTTOM, 3)
customSizer.Add(self.customColours, 0)
customSizer.Add(self.addCustom, 0, wx.TOP|wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL, 5)
mainSizer.Add(customSizer, (1, 1), (1, 1),wx.LEFT|wx.RIGHT, 5)
# panelSizer.Add(accessSizer, 0, wx.TOP, 5)
xyzSizer.Add(self.xyPanel, 0, wx.ALL, 15)
xyzSizer.Add(self.powerCtrl, 0, wx.RIGHT|wx.TOP|wx.BOTTOM, 15)
mainSizer.Add(xyzSizer, (2, 2), (1, 1), wx.ALL|wx.EXPAND, 10)
rgbSizer.Add(self.rgbBitmap, 0, wx.ALL, 15)
mainSizer.Add(rgbSizer, (2, 0), (1, 1), wx.ALL|wx.EXPAND, 10)
hsvSizer.Add(self.hsvBitmap, 0, wx.ALL, 15)
hsvSizer.Add(self.brightCtrl, 0, wx.RIGHT|wx.TOP|wx.BOTTOM, 15)
mainSizer.Add(hsvSizer, (2, 1), (1, 1), wx.ALL|wx.EXPAND, 10)
# alphaSizer.Add(self.alphaCtrl, 0, wx.TOP|wx.ALIGN_CENTER, 15)
# mainSizer.Add(alphaSizer, (2, 2), (1, 1), wx.ALL|wx.EXPAND, 10)
# oldLabel = wx.StaticText(self.mainPanel, -1, "Old Colour")
# panelSizer.Add(oldLabel, 0, wx.BOTTOM, 3)
# panelSizer.Add(self.oldColourPanel, 0, wx.BOTTOM|wx.EXPAND, 20)
# newLabel = wx.StaticText(self.mainPanel, -1, "New Colour")
# accessLabel = wx.StaticText(self.mainPanel, -1, "MS Access Code")
# accessSizer.Add(accessLabel, 0, wx.BOTTOM, 3)
# accessSizer.Add(self.accessCode, 0)
sendbuttonSizer.Add(self.sendButton, 0,wx.TOP,10)
curcolSizer.Add(self.newColourPanel, 0, wx.EXPAND)
sendSizer.Add(self.autoSend)
sendSizer.Add(sendbuttonSizer,0,wx.LEFT,20)
curcolSizer.Add(sendSizer)
# panelSizer.Add(newLabel, 0, wx.TOP, 3)
# panelSizer.Add(autosendSizer, 0, wx.TOP)
# panelSizer.Add((0, 0), 1, wx.EXPAND)
# panelSizer.Add((1,0), 1, wx.BOTTOM)
# panelSizer.Add(sendbuttonSizer, 0, wx.TOP, 5)
# panelSizer.Add(autosendSizer, 0, wx.BOTTOM, 10)
mainSizer.Add(curcolSizer, (1, 0), (1, 1), wx.ALL|wx.EXPAND, 10)
redLabel = wx.StaticText(self.mainPanel, -1, "Red")
rgbGridSizer.Add(redLabel, 0)
greenLabel = wx.StaticText(self.mainPanel, -1, "Green")
rgbGridSizer.Add(greenLabel, 0)
blueLabel = wx.StaticText(self.mainPanel, -1, "Blue")
rgbGridSizer.Add(blueLabel, 0)
rgbGridSizer.Add(self.redSpin, 0, wx.EXPAND)
rgbGridSizer.Add(self.greenSpin, 0, wx.EXPAND)
rgbGridSizer.Add(self.blueSpin, 0, wx.EXPAND)
rgbValueSizer.Add(rgbGridSizer, 1, 0, 0)
mainSizer.Add(rgbValueSizer, (3, 0), (1, 1), wx.LEFT|wx.RIGHT|wx.BOTTOM|wx.EXPAND, 10)
hueLabel = wx.StaticText(self.mainPanel, -1, "Hue")
hsvGridSizer.Add(hueLabel, 0)
saturationLabel = wx.StaticText(self.mainPanel, -1, "Saturation")
hsvGridSizer.Add(saturationLabel, 0)
brightnessLabel = wx.StaticText(self.mainPanel, -1, "Brightness")
hsvGridSizer.Add(brightnessLabel, 0)
hsvGridSizer.Add(self.hueSpin, 0, wx.EXPAND)
hsvGridSizer.Add(self.saturationSpin, 0, wx.EXPAND)
hsvGridSizer.Add(self.brightnessSpin, 0, wx.EXPAND)
hsvValueSizer.Add(hsvGridSizer, 1, wx.EXPAND)
mainSizer.Add(hsvValueSizer, (3, 1), (1, 1), wx.LEFT|wx.RIGHT|wx.BOTTOM|wx.EXPAND, 10)
xyzValueSizer.Add(xyzGridSizer, 1, 0, 0)
mainSizer.Add(xyzValueSizer, (3, 2), (1, 1), wx.LEFT|wx.RIGHT|wx.BOTTOM|wx.EXPAND, 10)
tintLabel = wx.StaticText(self.mainPanel, -1, "Tint")
xyzGridSizer.Add(tintLabel, 0)
chromaLabel = wx.StaticText(self.mainPanel, -1, "Chroma")
xyzGridSizer.Add(chromaLabel, 0)
powerLabel = wx.StaticText(self.mainPanel, -1, "Power")
xyzGridSizer.Add(powerLabel, 0)
xyzGridSizer.Add(self.tintSpin, 0, wx.EXPAND)
xyzGridSizer.Add(self.chromaSpin, 0, wx.EXPAND)
xyzGridSizer.Add(self.powerSpin, 0, wx.EXPAND)
# alphaLabel = wx.StaticText(self.mainPanel, -1, "Alpha")
# alphaGridSizer.Add(alphaLabel, 0)
# alphaGridSizer.Add(self.alphaSpin, 0, wx.EXPAND|wx.TOP, 10)
# alphaValueSizer.Add(alphaGridSizer, 1, wx.EXPAND)
# mainSizer.Add(alphaValueSizer, (3, 2), (1, 1), wx.LEFT|wx.RIGHT|wx.BOTTOM|wx.EXPAND, 10)
# buttonSizer.Add(self.okButton, 0, wx.BOTTOM, 3)
buttonSizer.Add(self.cancelButton, 0)
mainSizer.Add(buttonSizer, (3, 3), (1, 1), wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT, 5)
mainSizer.Hide(buttonSizer)
self.mainPanel.SetAutoLayout(True)
self.mainPanel.SetSizer(mainSizer)
mainSizer.Fit(self.mainPanel)
mainSizer.SetSizeHints(self.mainPanel)
# if self.GetAGWWindowStyleFlag() & ccdSource.CCD_SHOW_ALPHA == 0:
# mainSizer.Hide(self.showAlpha)
# mainSizer.Hide(alphaSizer)
# mainSizer.Hide(alphaValueSizer)
windowSizer.Add(self.mainPanel, 1, wx.EXPAND)
self.SetAutoLayout(True)
self.SetSizer(windowSizer)
windowSizer.Fit(self)
windowSizer.SetSizeHints(self)
self.Layout()
self.mainSizer = mainSizer
self.windowSizer = windowSizer
# self.alphaSizers = [alphaSizer, alphaValueSizer]
def InitDialog(self):
""" Initialize the L{CubeColourDialog}. """
hsvRect = self.hsvBitmap.GetClientRect()
self._centre = wx.Point(hsvRect.x + hsvRect.width/2, hsvRect.y + hsvRect.height/2)
xyRect = self.xyPanel.GetClientRect()
self._centre2 = wx.Point(xyRect.x + xyRect.width/2, xyRect.y + xyRect.height/2)
self._redLen = Distance(Vertex, Top)
self._greenLen = Distance(Vertex, Left)
self._blueLen = Distance(Vertex, Right)
self.CalcSlopes()
self.CalcCuboid()
self.CalcRects()
self.CalcRects2()
self.SetSpinVals()
self._initOver = True
wx.CallAfter(self.Refresh)
def CalcSlopes(self):
""" Calculates the line slopes in the RGB colour cube. """
self._lines = {RED: LineDescription(), GREEN: LineDescription(), BLUE: LineDescription}
self._lines[RED].slope = Slope(Top, Vertex)
self._lines[GREEN].slope = Slope(Left, Vertex)
self._lines[BLUE].slope = Slope(Right, Vertex)
for i in xrange(3):
self._lines[i].x = Vertex.x
self._lines[i].y = Vertex.y
self._lines[i].c = FindC(self._lines[i])
def CalcCuboid(self):
""" Calculates the RGB | |
)
histogramBuilder.addVals( findSnpsMatchingConds( showHeadings = 'val', showVals = snpStat, **args ).val )
histogramBuilder.save( outFile )
def AddUpHistograms( histFiles, outFile, getio = None ):
"""Add up histograms from separate files, write results to new file"""
outFileStats = AddFileSfx( outFile, 'stats' )
if getio: return dict( depends_on = histFiles, creates = ( outFile, outFileStats ),
attrs = dict( piperun_short = True ) )
sumHist = reduce( operator.add, map( Histogrammer.load, histFiles ) )
sumHist.save( outFile )
def GraphHistograms( histFiles, outFile = None, xlabel = '', ylabel = '', title = '',
labels = (), colors = 'brcmygkbrcmygkbrcmygkbrcmygk',
relWidth = 0.4,
xbound = None, ybound = None, coarsenBy = None, sfx = '',
ticksCoarsen = 1, log = False, normed = False,
cumulative = False,
cumulativeUpTo = None,
figSize = (24, 12 ),
subplots_adjust = {},
getio = None ):
"""Plot one or more histograms sharing the same bins.
Params:
normalizeHistograms - if true, for each histogram on the y-axis we plot not the number of
items in a given bin, but their fraction out of the total number of items in that histogram.
This lets us compare different histograms.
"""
#dbg( '"at_first" labels' )
# ( if the bins of one are strictly finer than bins of other, i.e. if they form a DAG in this
# relationship, then we can still do the graph).
histFiles = MakeSeq( histFiles )
if not outFile:
assert len( histFiles ) == 1
outFile = ReplaceFileExt( histFiles[0], '.png' )
outFile = AddFileSfx( outFile, sfx )
if not labels: labels = [ os.path.splitext( os.path.basename( f ) )[0] for f in histFiles ]
if getio: return dict( depends_on = histFiles, creates = outFile,
mediumRuleNameSfx = sfx,
attrs = dict( piperun_short = True ) )
pp.figure(1, figsize = figSize )
#pp.clf()
pp.subplots_adjust( **MergeDicts( dict( hspace = 0.3, bottom = 0.15 ), subplots_adjust ) )
for which, cumulative in enumerate( ( True, False ) ):
pp.subplot( 2, 1, which + 1 )
pp.xlabel( xlabel )
pp.ylabel( ylabel )
pp.hold( True )
binSize = None
binShift = None
theLabels = []
theHandles = []
hists = map( Histogrammer.load, histFiles )
if coarsenBy: hists = [ hist.coarsenBy( coarsenBy ) for hist in hists ]
allBinIds = reduce( operator.concat, [ hist.bin2count.keys() for hist in hists ] )
if not allBinIds: allBinIds = ( 0, )
minBinId = min( allBinIds )
maxBinId = max( allBinIds ) + 1
if cumulativeUpTo is not None:
maxBinId = min( maxBinId, max( [ hist.getCumulativeBinFor( cumulativeUpTo ) for hist in hists ] ) ) + 1
for color, label, ( histFileNum, hist ) in zip( colors, labels, enumerate( hists ) ):
# check that all histograms we're loading have the same bins
if binSize is None: binSize = hist.binSize
else: assert abs( hist.binSize - binSize ) < 1e-12
if binShift is None: binShift = hist.binShift
else: assert abs( hist.binShift - binShift ) < 1e-12
width = binSize * relWidth / len( histFiles )
left = np.array( hist.getAllBinLefts( minBinId = minBinId, maxBinId = maxBinId ) ) + histFileNum * width
if histFileNum == 0: pp.xticks( [ x for i, x in enumerate( left ) if i % ticksCoarsen == 0 ] )
height = hist.getAllBinCounts( normed = normed, cumulative = cumulative,
minBinId = minBinId, maxBinId = maxBinId )
rects = pp.bar( height = height,
width = width * 0.95, **Dict( 'left color log' ) )
if rects:
labelHere = label + ' (%d values)' % hist.getNumVals()
if hist.getNumNaNs(): labelHere += ' (%d nans)' % hist.getNumNaNs()
if hist.getNumInfs(): labelHere += ' (%d infs)' % hist.getNumInfs()
rects[ 0 ].set_label( labelHere )
theLabels.append( labelHere )
theHandles.append( rects[0] )
pp.title( title )
if theLabels and theHandles:
pp.figlegend( loc = 'lower center', labels = theLabels, handles = theHandles )
if xbound: pp.gca().set_xbound( *xbound )
if ybound: pp.gca().set_ybound( *ybound )
pp.savefig( outFile )
def GraphCumulPlots( histFiles, outFile = None, xlabel = '', ylabel = '', title = '',
labels = (), colors = 'brcmygkbrcmygkbrcmygkbrcmygk',
relWidth = 0.4,
xbound = None, ybound = None, coarsenBy = None, sfx = '',
ticksCoarsen = 1, log = False, normed = True,
getio = None ):
"""Plot one or more cumulative plots.
"""
# ( if the bins of one are strictly finer than bins of other, i.e. if they form a DAG in this
# relationship, then we can still do the graph).
histFiles = MakeSeq( histFiles )
if not outFile:
assert len( histFiles ) == 1
outFile = ReplaceFileExt( histFiles[0], '.png' )
if not labels: labels = [ os.path.splitext( os.path.basename( f ) )[0] for f in histFiles ]
outFileTable = outFile + '.points.tsv'
if getio: return dict( depends_on = histFiles, creates = ( outFile, outFileTable ),
mediumRuleNameSfx = sfx,
attrs = dict( piperun_short = True ) )
pp.figure(1, figsize = (18,6) )
#pp.clf()
pp.subplots_adjust( bottom = 0.37 )
pp.xlabel( xlabel + '\n\n\n\n' )
pp.ylabel( ylabel )
pp.hold( True )
binSize = None
theLabels = []
theHandles = []
for color, label, ( histFileNum, histFile ) in zip( colors, labels, enumerate( histFiles ) ):
hist = Histogrammer.load( histFile )
if coarsenBy: hist = hist.coarsenBy( coarsenBy )
if not binSize: binSize = hist.binSize
else:
if not abs( hist.binSize - binSize ) < 1e-12:
dbg( 'hist.binSize binSize hist.binSize-binSize' )
assert abs( hist.binSize - binSize ) < 1e-12
binLefts = hist.getBinLefts()
if histFileNum == 0: pp.xticks( [ x for i, x in enumerate( binLefts ) if i % ticksCoarsen == 0 ] )
binCounts = hist.getBinCounts( normed = normed, cumulative = True )
rects = pp.plot( binLefts, binCounts, label = label, color = color )
DotData( names = ( 'binLefts', 'binCounts' ), Columns = ( binLefts, binCounts ) ).saveToSV( outFileTable )
if rects:
theLabels.append( label )
theHandles.append( rects )
pp.title( title )
if theLabels and theHandles:
pp.figlegend( loc = 'lower center', labels = theLabels, handles = theHandles )
if xbound: pp.gca().set_xbound( *xbound )
if ybound: pp.gca().set_ybound( *ybound )
pp.savefig( outFile )
def DefineRulesTo_histogramSnpStatistic( pr, Ddata,
outFile, snpTables, snpStat, binSize,
binShift = 0.0,
scen2sfxs = lambda scen: '',
scenCond = 'True',
allScens = GetScenarios(),
nreplicas = 100, thinSfx = '', replicaTables = (),
replicaConds = 'True', replicaCondsSfxs = '',
snpConds = 'True', snpCondsSfxs = '', title = '', titlePrefix = '',
xlabel = '', ylabel = '',
xbound = None, ybound = None, log = False, coarsenBy = None, sfx = '',
ticksCoarsen = 1, cumulative = False, normed = False,
colors = 'brcmygkbrcmygkbrcmygkbrcmygk',
subplots_adjust = {},
name = None ):
"""A generic way to plot the distribution of some per-snp statistics for some subset of SNPs.
Params:
statTable - the name of the per-snp statistics table. we assume there is a file called
Ddata/snpstats/scenDir/statTable_pop.tsv for each scenario.
statCol - column name to histogram.
Notes:
- for histogramming should not need to load it all into memory. can do a pre-pass to just get
the range of values, define the bins, then do a second pass to count what goes in what bin.
could also add bins as we go. so, really just need to know bin size, and then can do all this
with one pass. can also, later, make this automatically parallelized.
"""
if not os.path.dirname( outFile ): outFile = os.path.join( Ddata, outFile )
scenCondExpr = compile_expr( scenCond )
replicaConds = MakeSeq( replicaConds )
replicaCondsSfxs = MakeSeq( replicaCondsSfxs )
snpConds = MakeSeq( snpConds )
snpCondsSfxs = MakeSeq( snpCondsSfxs )
totaledHistFiles = []
totaledLabels = []
outFile = AddFileSfx( outFile, sfx )
baseOutFile = outFile
for replicaCond, replicaCondSfx in zip( replicaConds, replicaCondsSfxs ):
for snpCond, snpCondSfx in zip( snpConds, snpCondsSfxs ):
histFiles = []
for scen in allScens:
if not eval( scenCondExpr, globals(), ScenAttrs( scen ) ): continue
scenDir = scen.scenDir()
for scenSfx in MakeSeq( scen2sfxs( scen ) if callable( scen2sfxs ) else scen2sfxs[ scen ] | |
patch
def save_image_batch(self, cv, z_range, float_patch, bbox, mip, to_uint8=True):
x_range = bbox.x_range(mip=mip)
y_range = bbox.y_range(mip=mip)
print("type of float_patch", type(float_patch), "shape", float_patch.shape)
patch = np.transpose(float_patch, (2,3,0,1))
# patch = np.transpose(float_patch, (2,1,0))[..., np.newaxis]
if to_uint8:
patch = (np.multiply(patch, 255)).astype(np.uint8)
print("patch shape", patch.shape)
cv[mip][x_range[0]:x_range[1], y_range[0]:y_range[1],
z_range[0]:z_range[1]] = patch
def append_image(self, float_patch, cv, z, bbox, mip, to_uint8=True):
x_range = bbox.x_range(mip=mip)
y_range = bbox.y_range(mip=mip)
patch = np.transpose(float_patch, (2,3,0,1))
#print("----------------z is", z, "save image patch at mip", mip, "range", x_range, y_range, "range at mip0", bbox.x_range(mip=0), bbox.y_range(mip=0))
if to_uint8:
patch = (np.multiply(patch, 255)).astype(np.uint8)
cv[mip][x_range[0]:x_range[1], y_range[0]:y_range[1], z] = cv[mip][x_range[0]:x_range[1], y_range[0]:y_range[1], z] + patch
def append_image_batch(self, cv, z_range, float_patch, bbox, mip, to_uint8=True):
x_range = bbox.x_range(mip=mip)
y_range = bbox.y_range(mip=mip)
print("type of float_patch", type(float_patch), "shape", float_patch.shape)
patch = np.transpose(float_patch, (2,3,0,1))
# patch = np.transpose(float_patch, (2,1,0))[..., np.newaxis]
if to_uint8:
patch = (np.multiply(patch, 255)).astype(np.uint8)
print("patch shape", patch.shape)
cv[mip][x_range[0]:x_range[1], y_range[0]:y_range[1], z_range[0]:z_range[1]] = cv[mip][x_range[0]:x_range[1], y_range[0]:y_range[1], z_range[0]:z_range[1]] + patch
#######################
# Field IO + handlers #
#######################
def get_field(self, cv, z, bbox, mip, relative=False, to_tensor=True, as_int16=True):
"""Retrieve vector field from CloudVolume.
Args
CV: MiplessCloudVolume storing vector field as MIP0 residuals in X,Y,Z,2 order
Z: int for section index
BBOX: BoundingBox for X & Y extent of the field to retrieve
MIP: int for resolution at which to pull the vector field
RELATIVE: bool indicating whether to convert MIP0 residuals to relative residuals
from [-1,1] based on residual location within shape of the BBOX
TO_TENSOR: bool indicating whether to return FIELD as a torch tensor
Returns
FIELD: vector field with dimensions of BBOX at MIP, with RELATIVE residuals &
as TO_TENSOR, using convention (Z,Y,X,2)
Note that the grid convention for torch.grid_sample is (N,H,W,2), where the
components in the final dimension are (x,y). We are NOT altering it here.
"""
x_range = bbox.x_range(mip=mip)
y_range = bbox.y_range(mip=mip)
print('get_field from {bbox}, z={z}, MIP{mip} to {path}'.format(bbox=bbox,
z=z, mip=mip, path=cv.path))
field = cv[mip][x_range[0]:x_range[1], y_range[0]:y_range[1], z]
field = np.transpose(field, (2,0,1,3))
if as_int16:
field = np.float32(field) / 4
if relative:
field = self.abs_to_rel_residual(field, bbox, mip)
if to_tensor:
field = torch.from_numpy(field)
return field.to(device=self.device)
else:
return field
def save_field(self, field, cv, z, bbox, mip, relative, as_int16=True):
"""Save vector field to CloudVolume.
Args
field: ndarray vector field with dimensions of bbox at mip with absolute MIP0
residuals, using grid_sample convention of (Z,Y,X,2), where the components in
the final dimension are (x,y).
cv: MiplessCloudVolume to store vector field as MIP0 residuals in X,Y,Z,2 order
z: int for section index
bbox: BoundingBox for X & Y extent of the field to be stored
mip: int for resolution at which to store the vector field
relative: bool indicating whether to convert MIP0 residuals to relative residuals
from [-1,1] based on residual location within shape of the bbox
as_int16: bool indicating whether vectors should be saved as int16
"""
if relative:
field = field * (field.shape[-2] / 2) * (2**mip)
# field = field.data.cpu().numpy()
x_range = bbox.x_range(mip=mip)
y_range = bbox.y_range(mip=mip)
field = np.transpose(field, (1,2,0,3))
print('save_field for {0} at MIP{1} to {2}'.format(bbox.stringify(z),
mip, cv.path))
if as_int16:
if(np.max(field) > 8192 or np.min(field) < -8191):
print('Value in field is out of range of int16 max: {}, min: {}'.format(
np.max(field),np.min(field)), flush=True)
field = np.int16(field * 4)
#print("**********field shape is ", field.shape, type(field[0,0,0,0]))
cv[mip][x_range[0]:x_range[1], y_range[0]:y_range[1], z] = field
def rel_to_abs_residual(self, field, mip):
"""Convert vector field from relative space [-1,1] to absolute MIP0 space
"""
return field * (field.shape[-2] / 2) * (2**mip)
def abs_to_rel_residual(self, field, bbox, mip):
"""Convert vector field from absolute MIP0 space to relative space [-1,1]
"""
x_fraction = bbox.x_size(mip=0) * 0.5
y_fraction = bbox.y_size(mip=0) * 0.5
rel_residual = deepcopy(field)
rel_residual[:, :, :, 0] /= x_fraction
rel_residual[:, :, :, 1] /= y_fraction
return rel_residual
def avg_field(self, field):
favg = field.sum() / (torch.nonzero(field).size(0) + self.eps)
return favg
def profile_field(self, field):
avg_x = self.avg_field(field[0,...,0])
avg_y = self.avg_field(field[0,...,1])
return torch.Tensor([avg_x, avg_y])
#############################
# CloudVolume chunk methods #
#############################
def compute_field_chunk(self, model_path, src_cv, tgt_cv, src_z, tgt_z, bbox, mip, pad,
src_mask_cv=None, src_mask_mip=0, src_mask_val=0,
tgt_mask_cv=None, tgt_mask_mip=0, tgt_mask_val=0,
tgt_alt_z=None, prev_field_cv=None, prev_field_z=None,
prev_field_inverse=False):
"""Run inference with SEAMLeSS model on two images stored as CloudVolume regions.
Args:
model_path: str for relative path to model directory
src_z: int of section to be warped
src_cv: MiplessCloudVolume with source image
tgt_z: int of section to be warped to
tgt_cv: MiplessCloudVolume with target image
bbox: BoundingBox for region of both sections to process
mip: int of MIP level to use for bbox
pad: int for amount of padding to add to the bbox before processing
mask_cv: MiplessCloudVolume with mask to be used for both src & tgt image
prev_field_cv: if specified, a MiplessCloudVolume containing the
previously predicted field to be profile and displace
the src chunk
Returns:
field with MIP0 residuals with the shape of bbox at MIP mip (np.ndarray)
"""
archive = self.get_model_archive(model_path)
model = archive.model
normalizer = archive.preprocessor
print('compute_field for {0} to {1}'.format(bbox.stringify(src_z),
bbox.stringify(tgt_z)))
print('pad: {}'.format(pad))
padded_bbox = deepcopy(bbox)
padded_bbox.max_mip = mip
padded_bbox.uncrop(pad, mip=mip)
if prev_field_cv is not None:
field = self.get_field(prev_field_cv, prev_field_z, padded_bbox, mip,
relative=False, to_tensor=True)
if prev_field_inverse:
field = -field
distance = self.profile_field(field)
print('Displacement adjustment: {} px'.format(distance))
distance = (distance // (2 ** mip)) * 2 ** mip
new_bbox = self.adjust_bbox(padded_bbox, distance.flip(0))
else:
distance = torch.Tensor([0, 0])
new_bbox = padded_bbox
tgt_z = [tgt_z]
if tgt_alt_z is not None:
try:
tgt_z.extend(tgt_alt_z)
except TypeError:
tgt_z.append(tgt_alt_z)
print('alternative target slices:', tgt_alt_z)
src_patch = self.get_masked_image(src_cv, src_z, new_bbox, mip,
mask_cv=src_mask_cv, mask_mip=src_mask_mip,
mask_val=src_mask_val,
to_tensor=True, normalizer=normalizer)
tgt_patch = self.get_composite_image(tgt_cv, tgt_z, padded_bbox, mip,
mask_cv=tgt_mask_cv, mask_mip=tgt_mask_mip,
mask_val=tgt_mask_val,
to_tensor=True, normalizer=normalizer)
print('src_patch.shape {}'.format(src_patch.shape))
print('tgt_patch.shape {}'.format(tgt_patch.shape))
# Running the model is the only part that will increase memory consumption
# significantly - only incrementing the GPU lock here should be sufficient.
if self.gpu_lock is not None:
self.gpu_lock.acquire()
print("Process {} acquired GPU lock".format(os.getpid()))
try:
print("GPU memory allocated: {}, cached: {}".format(torch.cuda.memory_allocated(), torch.cuda.memory_cached()))
# model produces field in relative coordinates
field = model(src_patch, tgt_patch)
print("GPU memory allocated: {}, cached: {}".format(torch.cuda.memory_allocated(), torch.cuda.memory_cached()))
field = self.rel_to_abs_residual(field, mip)
field = field[:,pad:-pad,pad:-pad,:]
field += distance.to(device=self.device)
field = field.data.cpu().numpy()
# clear unused, cached memory so that other processes can allocate it
torch.cuda.empty_cache()
print("GPU memory allocated: {}, cached: {}".format(torch.cuda.memory_allocated(), torch.cuda.memory_cached()))
finally:
if self.gpu_lock is not None:
print("Process {} releasing GPU lock".format(os.getpid()))
self.gpu_lock.release()
return field
def predict_image(self, cm, model_path, src_cv, dst_cv, z, mip, bbox,
chunk_size):
start = time()
chunks = self.break_into_chunks(bbox, chunk_size,
cm.dst_voxel_offsets[mip], mip=mip,
max_mip=cm.num_scales)
print("\nfold detect\n"
"model {}\n"
"src {}\n"
"dst {}\n"
"z={} \n"
"MIP{}\n"
"{} chunks\n".format(model_path, src_cv, dst_cv, z,
mip, len(chunks)), flush=True)
batch = []
for patch_bbox in chunks:
batch.append(tasks.PredictImgTask(model_path, src_cv, dst_cv, z, mip,
patch_bbox))
return batch
def predict_image_chunk(self, model_path, src_cv, z, mip, bbox):
archive = self.get_model_archive(model_path, readonly=2)
model = archive.model
image = self.get_image(src_cv, z, bbox, mip, to_tensor=True)
new_image = model(image)
return new_image
def vector_vote_chunk(self, pairwise_cvs, vvote_cv, z, bbox, mip,
inverse=False, serial=True, softmin_temp=None,
blur_sigma=None):
"""Compute consensus vector field using pairwise vector fields with earlier sections.
Vector voting requires that vector fields be composed to a common section
before comparison: inverse=False means that the comparison will be based on
composed vector fields F_{z,compose_start}, while inverse=True will be
F_{compose_start,z}.
TODO:
Reimplement field_cache
Args:
pairwise_cvs: dict of MiplessCloudVolumes, indexed by their z_offset
vvote_cv: MiplessCloudVolume where vector-voted field will be stored
z: int for section index to be vector voted
bbox: BoundingBox for region where all fields will be loaded/written
mip: int for MIP level of fields
softmin_temp: softmin temperature (default will be 2**mip)
inverse: bool indicating if pairwise fields are to be treated as inverse fields
serial: bool indicating to if a previously composed field is
not necessary
softmin_temp: temperature to use for the softmin in vector voting; default None
will use formula based on MIP level
blur_sigma: std dev of Gaussian kernel by which to blur the vector vote inputs;
default None means no blurring
"""
fields = []
for z_offset, f_cv in pairwise_cvs.items():
if serial:
F = self.get_field(f_cv, z, bbox, mip, relative=False, to_tensor=True)
else:
G_cv = vvote_cv
if | |
# -*- coding: utf-8 -*-
import time
from time import strftime, localtime
from datetime import datetime
import numpy as np
import tensorflow as tf
from sklearn.metrics import classification_report
import logging
logger = logging.getLogger(__name__)
# todo: hooks should also have prefixes so that one can use the same hook with different parameters
class Hook(object):
"""Serves as Hook interface."""
def __init__(self):
raise NotImplementedError
def __call__(self, sess, epoch, model, loss, current_feed_dict=None):
raise NotImplementedError
class TraceHook(object):
"""Abstract hook class, which implements an update function the summary."""
def __init__(self, summary_writer=None):
self.summary_writer = summary_writer
def __tag__(self):
raise NotImplementedError
def __call__(self, sess, epoch, model, loss, current_feed_dict=None):
raise NotImplementedError
def at_epoch_end(self, *args, **kwargs):
# self.__call__(*args, **kwargs)
pass
def at_iteration_end(self, *args, **kwargs):
self.__call__(*args, **kwargs)
def update_summary(self, sess, current_step, title, value):
"""Adds summary (title, value) to summary writer object.
Args:
sess (TensorFlow session): The TensorFlow session object.
current_step (int): Current step in the training procedure.
title (string): The title of the summary.
value (float): Scalar value for the message.
"""
if self.summary_writer is not None:
summary = tf.Summary(value=[
tf.Summary.Value(tag=title, simple_value=value),
])
self.summary_writer.add_summary(summary, current_step)
class LossHook(TraceHook):
"""A hook at prints the current loss and adds it to the summary."""
def __init__(self, iter_interval, batch_size, summary_writer=None):
#TODO(dirk): Why batch_size as parameter? loss should be batch normalized anyway during training and when it comes in here.
super(LossHook, self).__init__(summary_writer)
self.iter_interval = iter_interval
self.acc_loss = 0
self.batch_size = batch_size
self.iter = 0
def __tag__(self):
return "Loss"
def __call__(self, sess, epoch, model, loss, current_feed_dict=None):
"""Prints the loss, epoch, and #calls; adds it to the summary."""
self.iter += 1
self.acc_loss += loss / self.batch_size
if not self.iter == 0 and self.iter % self.iter_interval == 0:
loss = self.acc_loss / self.iter_interval
logger.info("Epoch {}\tIter {}\tLoss {}".format(str(epoch), str(self.iter), str(loss)))
self.update_summary(sess, self.iter, self.__tag__(), loss)
self.acc_loss = 0
class TensorHook(TraceHook):
def __init__(self, iter_interval, tensorlist, feed_dicts=None,
summary_writer=None, modes=['mean_abs'], prefix="",
global_statistics=False):
"""
Evaluate the tf.Tensor objects in `tensorlist` during training (every `iter_interval` iterations),
and calculate statistics on them (in `modes`): 'mean_abs', 'std', 'min', and/or 'max'.
Additionally, the `print` mode prints the entire tensor to stdout.
If feed_dicts is a generator or iterator over feed_dicts (e.g. to iterate over the entire dev-set),
each tensor in `tensorlist` is evaluated and concatenated for each feed_dict,
before calculating the scores for the different `modes`.
If it's a single feed_dict or `None`, only one evaluation is done.
"""
super(TensorHook, self).__init__(summary_writer)
self.iter_interval = iter_interval
self.tensorlist = tensorlist
self.feed_dicts = {} if feed_dicts is None else feed_dicts
self.modes = modes
self.iter = 0
self.prefix = prefix
self.global_statastics = global_statistics
if self.global_statastics:
self.tensor = tf.stack([tf.reshape(t, [-1]) for t in self.tensorlist])
def __tag__(self):
return self.prefix + "Tensor"
def __call__(self, sess, epoch, model, loss, current_feed_dict=None):
self.iter += 1
if not self.iter == 0 and self.iter % self.iter_interval == 0:
if self.global_statastics:
mean = tf.reduce_mean(tf.abs(self.tensor))
max = tf.reduce_max(self.tensor)
min = tf.reduce_min(self.tensor)
# sum = tf.reduce_sum(self.tensor)
norm = tf.norm(self.tensor)
mean_val, max_val, min_val, norm_val = \
sess.run([mean, max, min, norm],
feed_dict=current_feed_dict)
self.update_summary(sess, self.iter, self.__tag__() + '_mean_abs', mean_val)
self.update_summary(sess, self.iter, self.__tag__() + '_max', max_val)
self.update_summary(sess, self.iter, self.__tag__() + '_min', min_val)
# self.update_summary(sess, self.iter, self.__tag__() + '_sum', sum_val)
self.update_summary(sess, self.iter, self.__tag__() + '_norm', norm_val)
else:
for tensor in self.tensorlist:
tag = tensor.name
if isinstance(self.feed_dicts, dict):
t = sess.run(tensor, feed_dict=self.feed_dicts)
else:
for i, feed_dict in enumerate(self.feed_dicts):
t_i = sess.run(tensor, feed_dict=feed_dict)
if not hasattr(t_i,'__len__'):
t_i = [t_i]
t = t_i if i == 0 else np.concatenate([t, t_i], axis=0)
if 'mean_abs' in self.modes:
value_mean = float(np.mean(t))
self.update_summary(sess, self.iter, tag+'_mean_abs', value_mean)
if 'std' in self.modes:
value_std = float(np.std(t))
self.update_summary(sess, self.iter, tag+'_std', value_std)
if 'min' in self.modes:
value_min = float(np.min(t))
self.update_summary(sess, self.iter, tag+'_min', value_min)
if 'max' in self.modes:
value_max = float(np.max(t))
self.update_summary(sess, self.iter, tag + '_max', value_max)
if 'print' in self.modes: #for debug purposes
logger.info('\n{}\n{}\n'.format(tag, str(t)))
class ExamplesPerSecHook(TraceHook):
"""Prints the examples per sec and adds it to the summary writer."""
def __init__(self, iter_interval, batch_size, summary_writer=None):
super(ExamplesPerSecHook, self).__init__(summary_writer)
self.iter_interval = iter_interval
self.batch_size = batch_size
self.t0 = time.time()
self.num_examples = iter_interval * batch_size
self.iter = 0
self.reset = True
def __tag__(self):
return "Speed"
def __call__(self, sess, epoch, model, loss, current_feed_dict=None):
"""Prints the examples per sec and adds it to the summary writer."""
self.iter += 1
if self.reset:
self.t0 = time.time()
self.reset = False
elif self.iter % self.iter_interval == 0:
diff = time.time() - self.t0
speed = "%.2f" % (self.num_examples / diff)
logger.info("Epoch {}\tIter {}\tExamples/s {}".format(str(epoch), str(self.iter), str(speed)))
self.update_summary(sess, self.iter, self.__tag__(), float(speed))
self.t0 = time.time()
def at_epoch_end(self, *args, **kwargs):
# to eliminate drop in measured speed due to post-epoch hooks:
# do not execute; reset for use during epochs only
self.reset = True
return
def at_iteration_end(self, sess, epoch, model, loss, current_feed_dict=None):
return self.__call__(sess, epoch, model, loss, current_feed_dict)
class ETAHook(TraceHook):
"""Estimates ETA from max_iter vs current_iter."""
def __init__(self, iter_interval, max_epochs, iter_per_epoch,
summary_writer=None):
super(ETAHook, self).__init__(summary_writer)
self.iter_interval = iter_interval
self.max_iters = max_epochs * iter_per_epoch
self.iter = 0
self.epoch = 1
self.max_epochs = max_epochs
self.start = time.time()
self.reestimate = True
def __tag__(self):
return "ETA"
def __call__(self, sess, epoch, model, loss, current_feed_dict=None):
"""Estimates ETA from max_iter vs current_iter."""
self.iter += 1
if not self.iter == 0 and self.iter % self.iter_interval == 0:
progress = float(self.iter) / self.max_iters
current_time = time.time()
elapsed = current_time - self.start
eta = (1-progress) * elapsed
eta_date = strftime("%y-%m-%d %H:%M:%S", localtime(current_time + eta))
def format_eta(seconds):
if seconds == float("inf"):
return "never"
else:
seconds, _ = divmod(seconds, 1)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
seconds = str(int(seconds))
minutes = str(int(minutes))
hours = str(int(hours))
if len(hours) < 2:
hours = "0"+hours
if len(minutes) < 2:
minutes = "0"+minutes
if len(seconds) < 2:
seconds = "0"+seconds
return "{}:{}:{}".format(hours, minutes, seconds)
logger.info("Epoch %d\tIter %d\tETA in %s [%2.2f" %
(epoch, self.iter, format_eta(eta), progress * 100) +
"%] " + eta_date)
# logger.info("Epoch {}\tIter {}\tETA in {} {0:.2g}".format(epoch, self.iter, format_eta(eta), progress * 100) + "%] " + eta_date)
self.update_summary(sess, self.iter, self.__tag__(), float(eta))
self.update_summary(sess, self.iter, self.__tag__() + "_progress", progress)
def at_epoch_end(self, *args, **kwargs):
if self.reestimate:
self.max_iters = self.max_epochs * self.iter
self.reestimate = False
class AccuracyHook(TraceHook):
#todo: will be deprecated; less general (e.g. for binary vectors in multi-label problems etc).
#todo: accuracy already covered by EvalHook
def __init__(self, batches, predict, target, at_every_epoch=1,
placeholders=None, prefix="", summary_writer=None):
super(AccuracyHook, self).__init__(summary_writer)
self.batches = batches
self.predict = predict
self.target = target
self.at_every_epoch = at_every_epoch
self.placeholders = placeholders
self.done_for_epoch = False
self.iter = 0
self.prefix = prefix
def __tag__(self):
return self.prefix + "Acc"
def __call__(self, sess, epoch, model, loss, current_feed_dict=None):
self.iter += 1
if epoch % self.at_every_epoch == 0 and loss==0: #hacky: force to be post-epoch
if not self.done_for_epoch:
total = 0
correct = 0
for i, batch in enumerate(self.batches):
if self.placeholders is not None:
feed_dict = dict(zip(self.placeholders, batch))
else:
feed_dict = batch
predicted = sess.run(self.predict, feed_dict=feed_dict)
target = feed_dict[self.target]
gold = target if np.shape(target) == np.shape(predicted) else np.argmax(target)
overlap = gold == predicted
# todo: extend further, because does not cover all likely cases yet
#overlap = np.argmax(feed_dict[self.target]) == predicted
# correct += np.sum(overlap, axis=0)
correct += np.sum(overlap)
total += predicted.size
acc = float(correct) / total * 100
self.update_summary(sess, self.iter, self.__tag__(), acc)
logger.info("Epoch {}\tAcc {:.2f}\tCorrect {}\tTotal {}".format(str(epoch), acc, str(correct), str(total)))
self.done_for_epoch = True
else:
self.done_for_epoch = False
def at_epoch_end(self, sess, epoch, model, loss):
if epoch % self.at_every_epoch == 0:
self.__call__(sess, epoch, model, loss)
else:
return
class EvalHook(TraceHook):
"""Hook which applies various metrics, such as recall, precision, F1.
To be used during training on dev-data, and after training on test-data.
"""
def __init__(self, batches, logits, predict, target, at_every_epoch=1, placeholders=None,
metrics=[], summary_writer=None, print_details=False,
write_metrics_to="", print_to="", info="", iter_interval=1,
side_effect=None, epoch_interval=1):
"""
Initialize EvalHook object.
Calling the hook prints calculated metrics to stdout, and returns targets, predictions, and a metrics dict.
Meant as post-epoch hook; hence the argument `post_epoch=True` required when calling the hook.
Args:
batches:
iterator / generator of batches; assumed each batch is a proper feed_dict in case placeholders=None
otherwise paired with the placeholders to form feed_dicts.
logits:
tf op with logits
predict:
tf op that returns binary predictions, either for each instance as the index of the predicted answer (if unique)
otherwise as a tensor of | |
<gh_stars>0
import pickle
import os.path
import sys, getopt
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import html
from collections import namedtuple
from dataclasses import dataclass
from typing import List
from rdf_serializer import serialize_rdf
# NOTE: there is a main() function, start reading from there
# There are times when it seems like a very bad imitation of PHP
# Ideally, I would have used templates (Jinja2) to write the data
# But given time constraints, I saved the lines to a list
# and then write them at the end to a file (persist)
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']
# The ID and range of a sample spreadsheet.
DOCID = '13d1eRXZZBCw84vYGoCJeMU08rzkkzadDzxY3n2iOi8k'
SHEET = 'BaseOntology'
NAMESPACE = "dpv:"
# These are the columns in the sheet
FIELDS = (
"type", "term", "description", "domain", "range", "super", "sub",
"related_terms", "related_how", "comments",
"source", "created", "status", "rdfs_comments", "contributor", "approved", "resolution")
Template = namedtuple('Template', FIELDS)
filecontent = []
def download_data(SHEET_NAME):
"""Download data from Google Sheets"""
creds = None
SHEET_RANGE = SHEET_NAME + '!A:Q'
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server()
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=DOCID,
range=SHEET_RANGE).execute()
values = result.get('values', [])
if not values:
print('No data found.')
return values
def extract_classes_properties(data):
"""Extract classes and properties from downloaded data"""
def _extract_data(row):
# ensure that the row contains a field for every column
# this function can also be used for data cleaning
# remove surrounding whitespaces
row = [item.strip().replace('\n', ' ') for item in row]
if len(row) < len(FIELDS):
row += [''] * (len(FIELDS) - len(row))
return row
classes = []
properties = []
for row in data:
# if row is not empty
if(len(row)):
# if row is about a Class
if(row[0]=='Class'):
classes.append(Template(*_extract_data(row)))
# if row is about a Property
elif(row[0]=='Property'):
properties.append(Template(*_extract_data(row)))
return classes, properties
def document_toc(classes, properties):
# print(f'<dt><a href="rdf/{SHEET}.ttl">rdf/turtle serialization</a></dt>')
pass
def document_classes(classes, properties):
"""Generate documentation for classes"""
filecontent.append(f'<section id="{SHEET.lower()}-classes">')
filecontent.append('<h3>Classes</h3>')
if not classes:
return
classes.sort(key=lambda c: c.term)
string = "<p>\n"
string += ' | \n'.join((
f'<code><a href="{item.term}">:{item.term.split(":")[1]}</a></code>'
for item in classes))
string += "\n</p>\n"
filecontent.append(string)
for cl in classes:
term = cl.term.split(':')[1]
termbank = []
label = term[0]
for i in range(1, len(term)):
if term[i].isupper() and term[i-1].islower():
termbank.append(label)
label = ''
label += term[i]
termbank.append(label)
label = ' '.join(termbank)
filecontent.append('<section>')
filecontent.append(f'<h4 id={cl.term}>{label}</h4>')
filecontent.append('<table class="definition">')
filecontent.append('<tbody>')
filecontent.append('<tr>')
filecontent.append('<th>Class:</th>')
filecontent.append(f'<th><code><a href="#{cl.term}">{cl.term}</a></code></th>')
filecontent.append('</tr>')
filecontent.append('<tr>')
filecontent.append('<td>Description:</td>')
filecontent.append(f'<td>{cl.description}</td>')
filecontent.append('</tr>')
if cl.rdfs_comments:
filecontent.append('<tr>')
filecontent.append('<td>Comments:</td>')
filecontent.append(f'<td>{cl.rdfs_comments}</td>')
filecontent.append('</tr>')
filecontent.append('<tr>')
if cl.super:
filecontent.append('<tr>')
filecontent.append('<td>is SubClass of:</td>')
scs = []
for sc in cl.super.split(','):
sc = sc.strip()
if sc.startswith(NAMESPACE):
scs.append(f'<a href="#{sc}">{sc}</a>')
else:
link = sc
if sc.startswith("dpv:"):
link = sc.replace("dpv:", "https://w3.org/ns/dpv#")
scs.append(f'<a href="{link}">{sc}</a>')
scs = ' ∩ '.join(scs)
filecontent.append(f'<td>{scs}</td>')
filecontent.append('</tr>')
if cl.sub:
filecontent.append('<tr>')
filecontent.append('<td>is Parent Class of:</td>')
scs = []
for sc in cl.sub.split(','):
sc = sc.strip()
if sc.startswith(NAMESPACE):
scs.append(f'<a href="#{sc}">{sc}</a>')
else:
link = sc
if sc.startswith("dpv:"):
link = sc.replace("dpv:", "https://w3.org/ns/dpv#")
scs.append(f'<a href="{link}">{sc}</a>')
scs = ', '.join(scs)
filecontent.append(f'<td>{scs}</td>')
filecontent.append('</tr>')
domains = []
for prop in properties:
if (cl.term in prop.domain):
domains.append(f'<a href="#{prop.term}">{prop.term}</a>')
ranges = []
for prop in properties:
if (cl.term in prop.range):
ranges.append(f'<a href="#{prop.term}">{prop.term}</a>')
if domains or ranges:
if domains:
domains.sort()
filecontent.append('<tr>')
filecontent.append('<td>in Domain of:</td>')
filecontent.append(f'<td>{", ".join(domains)}</td>')
filecontent.append('</tr>')
if ranges:
ranges.sort()
filecontent.append('<tr>')
filecontent.append('<td>in Range of:</td>')
filecontent.append(f'<td>{", ".join(ranges)}</td>')
filecontent.append('</tr>')
if cl.source:
filecontent.append('<tr>')
filecontent.append('<td>Source:</td>')
s = ', '.join((
f'<a href="{s}">{s}</a>'
for s in cl.source.split(',')))
filecontent.append(f'<td>{s}</td>')
filecontent.append('</tr>')
if cl.status:
filecontent.append('<tr>')
filecontent.append('<td>Status:</td>')
st = "\n".join(cl.status.split(";"))
filecontent.append(f'<td>{st}</td>')
filecontent.append('</tr>')
if cl.created:
filecontent.append('<tr>')
filecontent.append('<td>Date Created:</td>')
filecontent.append(f'<td>{cl.created}</td>')
filecontent.append('</tr>')
if cl.approved:
filecontent.append('<tr>')
filecontent.append('<td>Date Approved:</td>')
filecontent.append(f'<td>{cl.approved}</td>')
filecontent.append('</tr>')
if cl.resolution:
filecontent.append('<tr>')
filecontent.append('<td>Approval Resolution:</td>')
filecontent.append(f'<td><a href="{cl.resolution}">{cl.resolution}</a></td>')
filecontent.append('</tr>')
if cl.contributor:
filecontent.append('<tr>')
filecontent.append('<td>Contributor:</td>')
filecontent.append(f'<td>{cl.contributor}</td>')
filecontent.append('</tr>')
if cl.comments:
filecontent.append('<tr>')
filecontent.append('<td>Notes:</td>')
filecontent.append(f'<td>{cl.comments}</td>')
filecontent.append('</tr>')
if cl.related_terms:
filecontent.append('<tr>')
filecontent.append('<td>Related Terms:</td>')
filecontent.append('<td>')
for t in cl.related_terms.split(','):
filecontent.append(f'{cl.related_how} {t}\n')
filecontent.append('</td></tr>')
filecontent.append('</tbody>')
filecontent.append('</thead>')
filecontent.append('</table>')
filecontent.append('</section>')
filecontent.append('</section>')
def document_properties(classes, properties):
"""Generate documentation for properties"""
if not properties:
return
filecontent.append(f'<section id="{SHEET.lower()}-properties">')
filecontent.append('<h3>Properties</h3>')
properties.sort(key=lambda c: c.term)
string = "<p>\n"
string += ' | \n'.join((
f'<code><a href="{item.term}">:{item.term.split(":")[1]}</a></code>'
for item in properties))
string += "\n</p>\n"
filecontent.append(string)
for cl in properties:
term = cl.term.split(':')[1]
termbank = []
label = term[0]
for i in range(1, len(term)):
if term[i].isupper() and term[i-1].islower():
termbank.append(label)
label = ''
label += term[i]
termbank.append(label)
label = ' '.join(termbank)
filecontent.append('<section>')
filecontent.append(f'<h4 id={cl.term}>{label}</h4>')
filecontent.append('<table class="definition">')
filecontent.append('<tbody>')
filecontent.append('<tr>')
filecontent.append('<th>Property:</th>')
filecontent.append(f'<th><code><a href="#{cl.term}">{cl.term}</a></code></th>')
filecontent.append('</tr>')
filecontent.append('<tr>')
filecontent.append('<td>Description:</td>')
filecontent.append(f'<td>{cl.description}</td>')
filecontent.append('</tr>')
if cl.rdfs_comments:
filecontent.append('<tr>')
filecontent.append('<td>Comments:</td>')
filecontent.append(f'<td>{cl.rdfs_comments}</td>')
filecontent.append('</tr>')
filecontent.append('<tr>')
if cl.super:
filecontent.append('<tr>')
filecontent.append('<td>is Sub-Property of:</td>')
scs = [f'<a href="#{sc}">{sc}<a>' for sc in cl.super.split(',')]
scs = ', '.join(scs)
filecontent.append(f'<td>{scs}</td>')
filecontent.append('</tr>')
if cl.sub:
filecontent.append('<tr>')
filecontent.append('<td>is Parent Property of:</td>')
scs = [f'<a href="#{sc}">{sc}<a>' for sc in cl.sub.split(',')]
scs = ', '.join(scs)
filecontent.append(f'<td>{scs}</td>')
filecontent.append('</tr>')
if cl.domain:
if 'union' in cl.domain:
domains = [
f'<a href="{c.strip()}">{c.strip()}</a>'
for c in cl.domain.split('union')]
domains = ' ∪ '.join(domains)
else:
domains = f'<a href="{cl.domain}">{cl.domain}</a>'
filecontent.append('<tr>')
filecontent.append('<td>Domain:</td>')
filecontent.append(f'<td>{domains}</td>')
filecontent.append('</tr>')
if cl.range:
filecontent.append('<tr>')
filecontent.append('<td>Range:</td>')
filecontent.append(f'<td><a href="{cl.range}">{cl.range}</a></td>')
filecontent.append('</tr>')
if cl.source:
filecontent.append('<tr>')
filecontent.append('<td>Source:</td>')
s = ', '.join((
f'<a href="{s}">{s}</a>'
for s in cl.source.split(',')))
filecontent.append(f'<td>{s}</td>')
filecontent.append('</tr>')
if cl.status:
filecontent.append('<tr>')
filecontent.append('<td>Status:</td>')
st = "\n".join(cl.status.split(";"))
filecontent.append(f'<td>{st}</td>')
filecontent.append('</tr>')
if cl.created:
filecontent.append('<tr>')
filecontent.append('<td>Date Created:</td>')
filecontent.append(f'<td>{cl.created}</td>')
filecontent.append('</tr>')
if cl.approved:
filecontent.append('<tr>')
filecontent.append('<td>Date Approved:</td>')
filecontent.append(f'<td>{cl.approved}</td>')
filecontent.append('</tr>')
if cl.resolution:
filecontent.append('<tr>')
filecontent.append('<td>Approval Resolution:</td>')
filecontent.append(f'<td><a href="{cl.resolution}">{cl.resolution}</a></td>')
filecontent.append('</tr>')
if cl.contributor:
filecontent.append('<tr>')
filecontent.append('<td>Contributor:</td>')
filecontent.append(f'<td>{cl.contributor}</td>')
filecontent.append('</tr>')
if cl.comments:
filecontent.append('<tr>')
filecontent.append('<td>Notes:</td>')
filecontent.append(f'<td>{cl.comments}</td>')
filecontent.append('</tr>')
if cl.related_terms:
filecontent.append('<tr>')
filecontent.append('<td>Related Terms:</td>')
filecontent.append('<td>')
for t in cl.related_terms.split(','):
filecontent.append(f'{cl.related_how} {t}\n')
filecontent.append('</td></tr>')
filecontent.append('</tbody>')
filecontent.append('</thead>')
filecontent.append('</table>')
filecontent.append('</section>')
filecontent.append('</section>')
def generate_rdf(classes, properties):
"""Generate the RDF/OWL file for this ontology"""
code = []
for cl in classes:
serialization = []
serialization.append(f'{cl.term} a rdfs:Class')
serialization.append(f' dct:description "{cl.description}"@en')
if cl.super:
serialization.append(f' rdfs:subClassOf {cl.super}')
if cl.created:
serialization.append(f' dct:created "{cl.created}"^^xsd:date')
if cl.approved:
serialization.append(f' dct:date-accepted "{cl.approved}"^^xsd:date')
if cl.contributor:
serialization.append(f' dct:creator "{cl.contributor}"')
if cl.rdfs_comments:
rdfs_comments = cl.rdfs_comments.replace('"', '\\"')
serialization.append(f' rdfs:comment "{rdfs_comments}"')
if cl.source:
for s in cl.source.split(','):
if s.startswith('http'):
serialization.append(f' rdfs:isDefinedBy <{s}>')
else:
serialization.append(f' rdfs:isDefinedBy "s"')
if cl.related_terms:
if not cl.related_how:
related_how = 'rdfs:seeAlso'
else:
related_how = cl.related_how
for t in cl.related_terms.split(','):
serialization.append(f' {related_how} {t}')
if cl.status:
serialization.append(f' sw:term_status "{cl.status}"')
code.append(serialization)
for prop in properties:
serialization = []
serialization.append(f'{prop.term} a rdfs:Property')
serialization.append(f' dct:description "{prop.description}"@en')
if prop.domain:
if 'union' in prop.domain:
domains = prop.domain.split(' union ')
s = f' rdfs:domain [ owl:unionOf (\n'
for item in domains:
s += f' {item}\n'
s += f' ) ]'
serialization.append(s)
else:
serialization.append(f' rdfs:domain {prop.domain}')
if prop.range:
if 'union' in prop.range:
ranges = prop.range.split(' union ')
s = f' rdfs:range [ owl:unionOf (\n'
for item in ranges:
s += f' {item}\n'
s += f' ) ]'
serialization.append(s)
else:
serialization.append(f' rdfs:range {prop.range}')
# TODO: add superproperty
if prop.created:
serialization.append(f' dct:created "{prop.created}"^^xsd:date')
if prop.approved:
serialization.append(f' dct:date-accepted "{prop.approved}"^^xsd:date')
if prop.contributor:
serialization.append(f' dct:creator "{prop.contributor}"')
if prop.rdfs_comments:
rdfs_comments = prop.rdfs_comments.replace('"', '\\"')
serialization.append(f' rdfs:comment "{rdfs_comments}"')
if prop.source:
for s in prop.source.split(','):
if s.startswith('http'):
serialization.append(f' rdfs:isDefinedBy <{s}>')
else:
serialization.append(f' rdfs:isDefinedBy "s"')
if prop.related_terms:
if not prop.related_how:
related_how = 'rdfs:seeAlso'
else:
related_how = prop.related_how
for t in prop.related_terms.split(','):
serialization.append(f' {related_how} {t}')
if prop.status:
serialization.append(f' sw:term_status "{prop.status}"')
code.append(serialization)
return code
def main(pickled=False):
"""First argument should be the name of the Tab in the spreadsheet
you want to parse (in quotes), e.g. 'Base Ontology'"""
if pickled:
print(f'loading data from file pickled/{SHEET}.')
classes, properties = pickle.load(open(f'pickled/{SHEET}.pickle', 'rb'))
else:
# download data from Google Sheets using the API
data = download_data(SHEET)
# extract classes and properties from downloaded data
classes, properties = extract_classes_properties(data)
# pickles for offline working (in case SHEETS API is not working)
# or when there's no internet connectivity - e.g. flights
pickle.dump(
(classes, properties),
open(f'pickled/{SHEET}.pickle', 'wb'))
# the contents are generated, stored in a list, and saved to a file
document_classes(classes, properties)
document_properties(classes, properties)
with open(f'docs/{SHEET}.html', 'w') as fd:
for line in filecontent:
print(line, file=fd)
# serialize the classes and properties | |
<gh_stars>1-10
# Copyright 2018 <NAME> <EMAIL>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import covid19pandas as cod
import covid19pandas.exceptions as codex
from test_getters import _check_gotten
import pandas as pd
import numpy as np
import datetime
import pytest
import math
formats = ["wide", "long"]
jhu_data_types = ["all", "cases", "deaths", "recovered"]
jhu_regions = ["global", "us"]
nyt_data_types = ["all", "cases", "deaths"]
nyt_county_options = [True, False]
@pytest.mark.filterwarnings("ignore::covid19pandas.exceptions.FileNotUpdatedWarning")
class TestSelectors:
@classmethod
def setup_class(cls):
"""Ensures that all data tables have been recently downloaded, so we can skip the update in all our tests to improve speed."""
cod.get_data_jhu(data_type="all", region="global", update=True)
cod.get_data_jhu(data_type="all", region="us", update=True)
cod.get_data_nyt(data_type="all", counties=False, update=True)
cod.get_data_nyt(data_type="all", counties=True, update=True)
# -------------------------------------------------------------------------------------------------------------
# Tests for select_top_x_regions
# -------------------------------------------------------------------------------------------------------------
def test_select_top_x_jhu(self):
for format in formats:
for data_type in jhu_data_types:
for region in jhu_regions:
if (region == "us" and data_type == "recovered") or (format == "wide" and data_type == "all"):
pass # Invalid table parameter combination
else:
df = cod.get_data_jhu(format=format, data_type=data_type, region=region, update=False)
if data_type == "all":
compare_by_types = set(jhu_data_types)
compare_by_types.remove("all")
if region == "us":
compare_by_types.remove("recovered")
for compare_by_type in compare_by_types:
self._check_select_top_x(df, format, compare_by_type, num_regions=1) # Don't keep others
self._check_select_top_x(df, format, compare_by_type, num_regions=1, other_to_keep=[col for col in compare_by_types if col != compare_by_type]) # Keep others
self._check_select_top_x(df, format, compare_by_type, num_regions=2) # Don't keep others
self._check_select_top_x(df, format, compare_by_type, num_regions=2, other_to_keep=[col for col in compare_by_types if col != compare_by_type]) # Keep others
else:
self._check_select_top_x(df, format, data_type, num_regions=1)
self._check_select_top_x(df, format, data_type, num_regions=2)
def test_select_top_x_nyt(self):
for format in formats:
for data_type in nyt_data_types:
for county_option in nyt_county_options:
if (format == "wide" and data_type == "all"):
pass # Invalid table parameter combination
else:
df = cod.get_data_nyt(format=format, data_type=data_type, counties=county_option, update=False)
if data_type == "all":
compare_by_types = set(nyt_data_types)
compare_by_types.remove("all")
for compare_by_type in compare_by_types:
self._check_select_top_x(df, format, compare_by_type, num_regions=1) # Don't keep others
self._check_select_top_x(df, format, compare_by_type, num_regions=1, other_to_keep=[col for col in compare_by_types if col != compare_by_type]) # Keep others
# It only will work to do more than 1 grouping col if we're using the states and counties table, because the just states table only has one grouping col
if county_option:
self._check_select_top_x(df, format, compare_by_type, num_regions=2) # Don't keep others
self._check_select_top_x(df, format, compare_by_type, num_regions=2, other_to_keep=[col for col in compare_by_types if col != compare_by_type]) # Keep others
else:
self._check_select_top_x(df, format, data_type, num_regions=1)
# It only will work to do more than 1 grouping col if we're using the states and counties table, because the just states table only has one grouping col
if county_option:
self._check_select_top_x(df, format, data_type, num_regions=2)
# -------------------------------------------------------------------------------------------------------------
# Tests for select_regions
# -------------------------------------------------------------------------------------------------------------
def test_select_regions_jhu(self):
for format in formats:
for data_type in jhu_data_types:
for region in jhu_regions:
if (region == "us" and data_type == "recovered") or (format == "wide" and data_type == "all"):
pass # Invalid table parameter combination
else:
df = cod.get_data_jhu(format=format, data_type=data_type, region=region, update=False)
if data_type == "all":
cols_to_keep = {"cases", "deaths", "recovered"}
if region == "us":
cols_to_keep.remove("recovered")
cols_to_keep = sorted(cols_to_keep) # Convert it back to a list
else:
cols_to_keep = [data_type]
self._check_select_regions(df, format, cols_kept=cols_to_keep)
def test_select_regions_nyt(self):
for format in formats:
for data_type in nyt_data_types:
for county_option in nyt_county_options:
if (format == "wide" and data_type == "all"):
pass # Invalid table parameter combination
else:
df = cod.get_data_nyt(format=format, data_type=data_type, counties=county_option, update=False)
if data_type == "all":
cols_to_keep = ["cases", "deaths"]
else:
cols_to_keep = [data_type]
self._check_select_regions(df, format, cols_kept=cols_to_keep)
# -------------------------------------------------------------------------------------------------------------
# Tests for calc_x_day_rolling_mean
# -------------------------------------------------------------------------------------------------------------
def test_calc_x_day_rolling_mean_jhu(self):
for format in formats:
for data_type in jhu_data_types:
for region in jhu_regions:
if (region == "us" and data_type == "recovered") or (format == "wide" and data_type == "all"):
pass # Invalid table parameter combination
else:
df = cod.get_data_jhu(format=format, data_type=data_type, region=region, update=False)
if data_type == "all":
input_data_types = set(jhu_data_types)
input_data_types.remove("all")
if region == "us":
input_data_types.remove("recovered")
for input_data_type in input_data_types:
self._check_calc_x_day_rolling_mean(df, format, data_type=input_data_type, other_input_data_types=[col for col in input_data_types if col != input_data_type])
# Note that we still also perform this test if data_type == "all" because we can also calculate the x day mean for all columns.
self._check_calc_x_day_rolling_mean(df, format, data_type)
def test_calc_x_day_rolling_mean_nyt(self):
for format in formats:
for data_type in nyt_data_types:
for county_option in nyt_county_options:
if (format == "wide" and data_type == "all"):
pass # Invalid table parameter combination
else:
df = cod.get_data_nyt(format=format, data_type=data_type, counties=county_option, update=False)
if data_type == "all":
input_data_types = set(nyt_data_types)
input_data_types.remove("all")
for input_data_type in input_data_types:
self._check_calc_x_day_rolling_mean(df, format, data_type=input_data_type, other_input_data_types=[col for col in input_data_types if col != input_data_type])
# Note that we still also perform this test if data_type == "all" because we can also calculate the x day mean for all columns.
self._check_calc_x_day_rolling_mean(df, format, data_type)
# -------------------------------------------------------------------------------------------------------------
# Tests for calc_daily_change
# -------------------------------------------------------------------------------------------------------------
def test_calc_daily_change_jhu(self):
for format in formats:
for data_type in jhu_data_types:
for region in jhu_regions:
if (region == "us" and data_type == "recovered") or (format == "wide" and data_type == "all"):
pass # Invalid table parameter combination
else:
df = cod.get_data_jhu(format=format, data_type=data_type, region=region, update=False)
if data_type == "all":
input_data_types = set(jhu_data_types)
input_data_types.remove("all")
if region == "us":
input_data_types.remove("recovered")
for input_data_type in input_data_types:
self._check_daily_change(df, format=format, data_type=input_data_type, other_data_types=[col for col in input_data_types if col != input_data_type])
# Note that we still also perform this test if data_type == "all" because we can also calculate daily change for all columns.
self._check_daily_change(df, format=format, data_type=data_type)
def test_calc_daily_change_long_nyt(self):
for format in formats:
for data_type in nyt_data_types:
for county_option in nyt_county_options:
if format == "wide" and data_type == "all":
pass # Invalid table parameter combination
else:
df = cod.get_data_nyt(format=format, data_type=data_type, counties=county_option, update=False)
if data_type == "all":
input_data_types = set(nyt_data_types)
input_data_types.remove("all")
for input_data_type in input_data_types:
self._check_daily_change(df, format=format, data_type=input_data_type, other_data_types=[col for col in input_data_types if col != input_data_type])
# Note that we still also perform this test if data_type == "all" because we can also calculate daily change for all columns.
self._check_daily_change(df, format=format, data_type=data_type)
# -------------------------------------------------------------------------------------------------------------
# Tests for calc_days_since_min_count
# -------------------------------------------------------------------------------------------------------------
def test_calc_days_since_min_count_jhu(self):
for format in formats:
for data_type in jhu_data_types:
for region in jhu_regions:
if (region == "us" and data_type == "recovered") or (format == "wide" and data_type == "all"):
pass # Invalid table parameter combination
else:
df = cod.get_data_jhu(format=format, data_type=data_type, region=region, update=False)
if data_type == "all":
count_by_types = set(jhu_data_types)
count_by_types.remove("all")
if region == "us":
count_by_types.remove("recovered")
for count_by_type in count_by_types:
self._check_days_since(df, format, count_by_type)
else:
self._check_days_since(df, format, data_type)
def test_calc_days_since_min_count_nyt(self):
for format in formats:
for data_type in nyt_data_types:
for county_option in nyt_county_options:
if (format == "wide" and data_type == "all"):
pass # Invalid table parameter combination
else:
df = cod.get_data_nyt(format=format, data_type=data_type, counties=county_option, update=False)
if data_type == "all":
for count_by_type in [type for type in nyt_data_types if type != "all"]:
self._check_days_since(df, format, count_by_type)
else:
self._check_days_since(df, format, data_type)
# -------------------------------------------------------------------------------------------------------------
# Helper methods
# -------------------------------------------------------------------------------------------------------------
@staticmethod
def _check_select_top_x(df, format, data_type, num_regions, other_to_keep=[]):
if num_regions == 1:
# Search for defined region cols (based on data source)
if {"Province/State", "Country/Region"}.issubset(df.columns): # JHU global table
region_col = "Country/Region"
exclude = ["US", "China"]
elif {"Combined_Key"}.issubset(df.columns): # JHU USA table
region_col = "Province_State"
exclude = ["New York", "Illinois"]
elif {"state"}.issubset(df.columns): # NYT USA state only or states and counties table.
region_col = "state"
exclude = ["Washington", "Illinois"]
else:
raise ParameterError("The dataframe you passed does not contain any of the standard location grouping columns. Must contain one of these sets of columns: \n\n{'Province/State', 'Country/Region'}\n{'Combined_Key'}\n{'county', 'state'}\n{'state'}\n\n" + f"Your dataframe's columns are:\n{df.columns}")
if format == "wide":
group_cols = [region_col]
else: # format == "long"
group_cols = ["date", region_col]
num_top = 10
# Call the function
outs = {
"top_others_kept": cod.select_top_x_regions(df, region_cols=region_col, data_col=data_type, x=num_top, combine_subregions=True, other_data_cols=other_to_keep),
"top_uncombined": cod.select_top_x_regions(df, region_cols=region_col, data_col=data_type, x=num_top, combine_subregions=False, other_data_cols=other_to_keep),
"top_with_exclusions": cod.select_top_x_regions(df, region_cols=region_col, data_col=data_type, x=num_top, combine_subregions=True, other_data_cols=other_to_keep, exclude=exclude),
}
# Run basic table checks
for name, out in outs.items():
if name == "top_uncombined" and {"Admin2"}.issubset(df.columns):
_check_gotten(out, format, group_cols=group_cols + ["Admin2"]) # | |
<gh_stars>0
#!/usr/bin/env python
"""Test suite for docformatter."""
from __future__ import (absolute_import,
division,
print_function,
unicode_literals)
import contextlib
import io
import os
import random
import shutil
import string
import subprocess
import sys
import tempfile
import unittest
if sys.version_info >= (3, 3):
from unittest.mock import patch
else:
from mock import patch
import docformatter
ROOT_DIRECTORY = os.path.abspath(os.path.dirname(__file__))
if (
'DOCFORMATTER_COVERAGE' in os.environ and
int(os.environ['DOCFORMATTER_COVERAGE'])
):
DOCFORMATTER_COMMAND = ['coverage', 'run', '--branch', '--parallel',
'--omit=*/site-packages/*',
os.path.join(ROOT_DIRECTORY, 'docformatter.py')]
else:
# We need to specify the executable to make sure the correct Python
# interpreter gets used.
DOCFORMATTER_COMMAND = [sys.executable,
os.path.join(
ROOT_DIRECTORY,
'docformatter.py')] # pragma: no cover
class TestUnits(unittest.TestCase):
def test_is_in_range(self):
self.assertTrue(docformatter.is_in_range(None, 1, 9))
self.assertTrue(docformatter.is_in_range([1, 4], 3, 5))
self.assertTrue(docformatter.is_in_range([1, 4], 4, 10))
self.assertTrue(docformatter.is_in_range([2, 10], 1, 2))
self.assertFalse(docformatter.is_in_range([1, 1], 2, 9))
self.assertFalse(docformatter.is_in_range([10, 20], 1, 9))
def test_has_correct_length(self):
self.assertTrue(docformatter.has_correct_length(None, 1, 9))
self.assertTrue(docformatter.has_correct_length([1, 3], 3, 5))
self.assertTrue(docformatter.has_correct_length([1, 1], 1, 1))
self.assertTrue(docformatter.has_correct_length([1, 10], 5, 10))
self.assertFalse(docformatter.has_correct_length([1, 1], 2, 9))
self.assertFalse(docformatter.has_correct_length([10, 20], 2, 9))
def test_strip_docstring(self):
self.assertEqual(
'Hello.',
docformatter.strip_docstring('''
"""Hello.
"""
'''))
def test_strip_docstring_with_single_quotes(self):
self.assertEqual(
'Hello.',
docformatter.strip_docstring("""
'''Hello.
'''
"""))
def test_strip_docstring_with_empty_string(self):
self.assertEqual('', docformatter.strip_docstring('""""""'))
def test_strip_docstring_with_escaped_quotes(self):
self.assertEqual("hello\\'",
docformatter.strip_docstring("'hello\\''"))
def test_strip_docstring_with_escaped_double_quotes(self):
self.assertEqual('hello\\"',
docformatter.strip_docstring('"hello\\""'))
def test_strip_docstring_with_unhandled(self):
with self.assertRaises(ValueError):
docformatter.strip_docstring('r"""foo"""')
def test_strip_docstring_with_unknown(self):
with self.assertRaises(ValueError):
docformatter.strip_docstring('foo')
def test_format_docstring(self):
self.assertEqual('"""Hello."""',
docformatter.format_docstring(' ', '''
"""
Hello.
"""
'''.strip()))
def test_format_docstring_with_summary_that_ends_in_quote(self):
self.assertEqual('''""""Hello"."""''',
docformatter.format_docstring(' ', '''
"""
"Hello"
"""
'''.strip()))
def test_format_docstring_with_bad_indentation(self):
self.assertEqual('''"""Hello.
This should be indented but it is not. The
next line should be indented too. And
this too.
"""''',
docformatter.format_docstring(' ', '''
"""Hello.
This should be indented but it is not. The
next line should be indented too. And
this too.
"""
'''.strip()))
def test_format_docstring_with_too_much_indentation(self):
self.assertEqual('''"""Hello.
This should be dedented.
1. This too.
2. And this.
3. And this.
"""''',
docformatter.format_docstring(' ', '''
"""Hello.
This should be dedented.
1. This too.
2. And this.
3. And this.
"""
'''.strip()))
def test_format_docstring_with_description_wrapping(self):
self.assertEqual('''"""Hello.
This should be indented but it is not. The next line should be
indented too. But this is okay.
"""''',
docformatter.format_docstring(' ', '''
"""Hello.
This should be indented but it is not. The
next line should be indented too. But
this is okay.
"""
'''.strip(), description_wrap_length=72))
def test_format_docstring_should_ignore_doctests(self):
docstring = '''"""Hello.
>>> 4
4
"""'''
self.assertEqual(
docstring,
docformatter.format_docstring(' ',
docstring,
description_wrap_length=72))
def test_format_docstring_should_ignore_doctests_in_summary(self):
docstring = '''"""
>>> 4
4
"""'''
self.assertEqual(
docstring,
docformatter.format_docstring(' ',
docstring,
description_wrap_length=72))
def test_format_docstring_should_maintain_indentation_of_doctest(self):
self.assertEqual(
'''"""Foo bar bing bang.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> tests.sort(key = lambda test: test.name)
"""''',
docformatter.format_docstring(
' ',
docstring='''"""Foo bar bing bang.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> tests.sort(key = lambda test: test.name)
"""''',
description_wrap_length=72))
def test_format_docstring_should_ignore_numbered_lists(self):
docstring = '''"""Hello.
1. This should be indented but it is not. The
next line should be indented too. But
this is okay.
"""'''
self.assertEqual(
docstring,
docformatter.format_docstring(' ',
docstring,
description_wrap_length=72))
def test_format_docstring_should_ignore_parameter_lists(self):
docstring = '''"""Hello.
foo - This is a foo. This is a foo. This is a foo. This is a foo. This is.
bar - This is a bar. This is a bar. This is a bar. This is a bar. This is.
"""'''
self.assertEqual(
docstring,
docformatter.format_docstring(' ',
docstring,
description_wrap_length=72))
def test_format_docstring_should_ignore__colon_parameter_lists(self):
docstring = '''"""Hello.
foo: This is a foo. This is a foo. This is a foo. This is a foo. This is.
bar: This is a bar. This is a bar. This is a bar. This is a bar. This is.
"""'''
self.assertEqual(
docstring,
docformatter.format_docstring(' ',
docstring,
description_wrap_length=72))
def test_format_docstring_should_ignore_multi_paragraph(self):
docstring = '''"""Hello.
This should be indented but it is not. The
next line should be indented too. But
this is okay.
This should be indented but it is not. The
next line should be indented too. But
this is okay.
"""'''
self.assertEqual(
docstring,
docformatter.format_docstring(' ',
docstring,
description_wrap_length=72))
def test_format_docstring_with_trailing_whitespace(self):
self.assertEqual('''"""Hello.
This should be not have trailing whitespace. The
next line should not have trailing whitespace either.
"""''',
docformatter.format_docstring(' ', '''
"""Hello.\t
\t
This should be not have trailing whitespace. The\t\t\t
next line should not have trailing whitespace either.\t
\t
"""
'''.strip()))
def test_format_docstring_with_no_post_description_blank(self):
self.assertEqual('''"""Hello.
Description.
"""''',
docformatter.format_docstring(' ', '''
"""
Hello.
Description.
"""
'''.strip(), post_description_blank=False))
def test_format_docstring_with_pre_summary_newline(self):
self.assertEqual('''"""
Hello.
Description.
"""''',
docformatter.format_docstring(' ', '''
"""
Hello.
Description.
"""
'''.strip(), pre_summary_newline=True))
def test_format_docstring_with_empty_docstring(self):
self.assertEqual('""""""',
docformatter.format_docstring(' ', '""""""'))
def test_format_docstring_with_no_period(self):
self.assertEqual('"""Hello."""',
docformatter.format_docstring(' ', '''
"""
Hello
"""
'''.strip()))
def test_format_docstring_with_single_quotes(self):
self.assertEqual('"""Hello."""',
docformatter.format_docstring(' ', """
'''
Hello.
'''
""".strip()))
def test_format_docstring_with_single_quotes_multi_line(self):
self.assertEqual('''
"""Return x factorial.
This uses math.factorial.
"""
'''.strip(),
docformatter.format_docstring(' ', """
'''
Return x factorial.
This uses math.factorial.
'''
""".strip()))
def test_format_docstring_with_wrap(self):
# This function uses `random` so make sure each run of this test is
# repeatable.
random.seed(0)
min_line_length = 50
for max_length in range(min_line_length, 100):
for num_indents in range(0, 20):
indentation = ' ' * num_indents
formatted_text = indentation + docformatter.format_docstring(
indentation=indentation,
docstring=generate_random_docstring(
max_word_length=min_line_length // 2),
summary_wrap_length=max_length)
for line in formatted_text.split('\n'):
# It is not the formatter's fault if a word is too long to
# wrap.
if len(line.split()) > 1:
self.assertLessEqual(len(line), max_length)
def test_format_docstring_with_weird_indentation_and_punctuation(self):
self.assertEqual('''
"""Creates and returns four was awakens to was created tracked ammonites
was the fifty, arithmetical four was pyrotechnic to pyrotechnic physicists.
`four' falsified x falsified ammonites
to awakens to. `created' to ancestor was four to x dynamo to was
four ancestor to physicists().
"""
'''.strip(),
docformatter.format_docstring(' ', '''
"""Creates and returns four was awakens to was created tracked
ammonites was the fifty, arithmetical four was pyrotechnic to
pyrotechnic physicists. `four' falsified x falsified ammonites
to awakens to. `created' to ancestor was four to x dynamo to was
four ancestor to physicists().
"""
'''.strip(), summary_wrap_length=79))
def test_format_docstring_should_leave_list_alone(self):
docstring = '''"""
one
two
three
four
five
six
seven
eight
nine
ten
eleven
"""'''
self.assertEqual(
docstring,
docformatter.format_docstring(' ', docstring))
def test_format_docstring_should_underlined_summaries_alone(self):
docstring = '''"""
Foo bar
-------
This is more.
"""'''
self.assertEqual(
docstring,
docformatter.format_docstring(' ', docstring))
def test_format_code(self):
self.assertEqual(
'''\
def foo():
"""Hello foo."""
''',
docformatter.format_code(
'''\
def foo():
"""
Hello foo.
"""
'''))
def test_format_code_range_miss(self):
self.assertEqual('''\
def f(x):
""" This is a docstring. That should be on more lines"""
pass
def g(x):
""" Badly indented docstring"""
pass''',
docformatter.format_code('''\
def f(x):
""" This is a docstring. That should be on more lines"""
pass
def g(x):
""" Badly indented docstring"""
pass''', line_range=[1, 1]))
def test_format_code_range_hit(self):
self.assertEqual('''\
def f(x):
"""This is a docstring.
That should be on more lines
"""
pass
def g(x):
""" Badly indented docstring"""
pass''',
docformatter.format_code('''\
def f(x):
""" This is a docstring. That should be on more lines"""
pass
def g(x):
""" Badly indented docstring"""
pass''', line_range=[1, 2]))
def test_format_code_docstring_length(self):
self.assertEqual('''\
def f(x):
"""This is a docstring.
That should be on less lines
"""
pass
def g(x):
"""Badly indented docstring."""
pass''',
docformatter.format_code('''\
def f(x):
"""This is a docstring.
That should be on less lines
"""
pass
def g(x):
""" Badly indented docstring"""
pass''', length_range=[1, 1]))
def test_format_code_with_module_docstring(self):
self.assertEqual(
'''\
#!/usr/env/bin python
"""This is a module docstring.
1. One
2. Two
"""
"""But
this
is
not."""
''',
docformatter.format_code(
'''\
#!/usr/env/bin python
"""This is
a module
docstring.
1. One
2. Two
"""
"""But
this
is
not."""
'''))
def test_format_code_should_ignore_non_docstring(self):
source = '''\
x = """This
is
not."""
'''
self.assertEqual(
source,
docformatter.format_code(source))
def test_format_code_with_empty_string(self):
self.assertEqual(
'',
docformatter.format_code(''))
def test_format_code_with_tabs(self):
self.assertEqual(
'''\
def foo():
\t"""Hello foo."""
\tif True:
\t\tx = 1
''',
docformatter.format_code(
'''\
def foo():
\t"""
\tHello foo.
\t"""
\tif True:
\t\tx = 1
'''))
def test_format_code_with_mixed_tabs(self):
self.assertEqual(
'''\
def foo():
\t"""Hello foo."""
\tif True:
\t x = 1
''',
docformatter.format_code(
'''\
def foo():
\t"""
\tHello foo.
\t"""
\tif True:
\t x = 1
'''))
def test_format_code_with_escaped_newlines(self):
self.assertEqual(
r'''def foo():
"""Hello foo."""
x = \
1
''',
docformatter.format_code(
r'''def foo():
"""
Hello foo.
"""
x = \
1
'''))
def test_format_code_with_comments(self):
self.assertEqual(
r'''
def foo():
"""Hello foo."""
# My comment
# My comment with escape \
123
'''.lstrip(),
docformatter.format_code(
r'''
def foo():
"""
Hello foo.
"""
# My comment
# My comment with escape \
123
'''.lstrip()))
def test_format_code_with_escaped_newline_in_inline_comment(self):
self.assertEqual(
r'''
def foo():
"""Hello foo."""
def test_method_no_chr_92(): the501(92) # \
'''.lstrip(),
docformatter.format_code(
r'''
def foo():
"""
Hello foo.
"""
def test_method_no_chr_92(): the501(92) # \
'''.lstrip()))
def test_format_code_skip_complex(self):
"""We do not handle r/u/b prefixed strings."""
self.assertEqual(
'''\
def foo():
r"""
Hello foo.
"""
''',
docformatter.format_code(
'''\
def foo():
r"""
Hello foo.
"""
'''))
def test_format_code_skip_complex_single(self):
"""We do not handle r/u/b prefixed strings."""
self.assertEqual(
"""\
def foo():
r'''
Hello foo.
'''
""",
docformatter.format_code(
"""\
def foo():
r'''
Hello foo.
'''
"""))
def test_format_code_skip_nested(self):
code = """\
def foo():
'''Hello foo. \"\"\"abc\"\"\"
'''
"""
self.assertEqual(code, docformatter.format_code(code))
def test_format_code_with_multiple_sentences(self):
self.assertEqual(
'''\
def foo():
"""Hello foo.
This is a docstring.
"""
''',
docformatter.format_code(
'''\
def foo():
"""
Hello foo.
This is a docstring.
"""
'''))
def test_format_code_with_multiple_sentences_same_line(self):
self.assertEqual(
'''\
def foo():
"""Hello foo.
This is a docstring.
"""
''',
docformatter.format_code(
'''\
def foo():
"""
Hello foo. This is a docstring.
"""
'''))
def test_format_code_with_multiple_sentences_multi_line_summary(self):
self.assertEqual(
'''\
def foo():
"""Hello foo.
This is a | |
== "_n_components":
_val = [1 for objt in obj_type]
attr = [objt + attr for objt in obj_type]
elif attr in ["_longest_component", "_largest_component"]:
_val = [cid for objt in obj_type]
attr = [objt + attr for objt in obj_type]
elif attr == "vertex_list":
# reassigns vertex list + network, graph component vertices
supp = [objt + "_component_vertices" for objt in obj_type]
_val = [getattr(cnet, supp[0])[cid]]
_val += [{cid: getattr(cnet, s)[cid]} for s in supp]
attr = [attr] + supp
elif attr == "vertex_coords":
# reassigns both vertex_coords and vertices
supp = getattr(cnet, "vertex_list")
_val = [{k: v for k, v in getattr(cnet, attr).items() if k in supp}]
_val += [{v: k for k, v in _val[0].items()}]
attr = [attr, "vertices"]
elif attr == "_component_vertex_count":
# reassigns both network and graph _component_vertex_count
supp = len(getattr(cnet, "vertex_list"))
_val = [{cid: supp} for objt in obj_type]
attr = [objt + attr for objt in obj_type]
elif attr == "adjacencylist":
supp_adj = copy.deepcopy(list(getattr(cnet, attr).keys()))
supp_vtx = getattr(cnet, "vertex_list")
supp_rmv = [v for v in supp_adj if v not in supp_vtx]
[getattr(cnet, attr).pop(s) for s in supp_rmv]
return
elif attr == "_component_is_ring":
# reassigns both network and graph _component_is_ring
supp = [getattr(cnet, objt + attr) for objt in obj_type]
_val = [{cid: s[cid]} for s in supp]
attr = [objt + attr for objt in obj_type]
elif attr == "non_articulation_points":
supp_vtx = getattr(cnet, "vertex_list")
_val = [[s for s in getattr(cnet, attr) if s in supp_vtx]]
attr = [attr]
elif attr == "_component2":
# reassigns both network and graph _component2 attributes
supp = [_n + "_component2" + _a]
if hasgraph:
supp += [_g + "_component2" + _e]
_val = [{cid: getattr(cnet, s)[cid]} for s in supp]
attr = supp
elif attr == "arcs":
# reassigns both arcs and edges
c2 = "_component2"
supp = [_n + c2 + _a]
if hasgraph:
supp += [_g + c2 + _e]
_val = [getattr(cnet, s)[cid] for s in supp]
attr = [attr]
if hasgraph:
attr += ["edges"]
elif attr == "_component_labels":
# reassigns both network and graph _component_labels
supp = [len(getattr(cnet, o + "s")) for o in obj]
_val = [numpy.array([cid] * s) for s in supp]
attr = [objt + attr for objt in obj_type]
elif attr == "_component_lengths":
# reassigns both network and graph _component_lengths
supp = [objt + attr for objt in obj_type]
_val = [{cid: getattr(cnet, s)[cid]} for s in supp]
attr = supp
elif attr == "_lengths":
# reassigns both arc and edge _lengths
supp_name = [o + attr for o in obj]
supp_lens = [getattr(cnet, s) for s in supp_name]
supp_link = [getattr(cnet, o + "s") for o in obj]
supp_ll = list(zip(supp_lens, supp_link))
_val = [{k: v for k, v in l1.items() if k in l2} for l1, l2 in supp_ll]
attr = supp_name
# reassign attributes
for a, av in zip(attr, _val):
setattr(cnet, a, av)
# provide warning (for now) if the network contains a point pattern
if getattr(net, "pointpatterns"):
msg = "There is a least one point pattern associated with the network."
msg += " Component extraction should be performed prior to snapping"
msg += " point patterns to the network object; failing to do so may"
msg += " lead to unexpected results."
warnings.warn(msg)
# provide warning (for now) if the network contains a point pattern
dm, nt = "distance_matrix", "network_trees"
if hasattr(net, dm) or hasattr(net, nt):
msg = "Either one or both (%s, %s) attributes" % (dm, nt)
msg += " are present and will be deleted. These must be"
msg += " recalculated following component extraction."
warnings.warn(msg)
for attr in [dm, nt]:
if hasattr(net, attr):
_attr = getattr(net, attr)
del _attr
# make initial copy of the network
cnet = copy.deepcopy(net)
# set labels
_n, _a, _g, _e = "network", "arc", "graph", "edge"
obj_type = [_n]
obj = [_a]
hasgraph = False
if hasattr(cnet, "w_graph"):
obj_type += [_g]
obj += [_e]
hasgraph = True
# attributes to reassign
update_attributes = [
"_fully_connected",
"_n_components",
"_longest_component",
"_largest_component",
"vertex_list",
"vertex_coords",
"_component_vertex_count",
"adjacencylist",
"_component_is_ring",
"_component2",
"arcs",
"_component_lengths",
"_lengths",
"_component_labels",
]
if hasgraph:
update_attributes.append("non_articulation_points")
# reassign attributes
for attribute in update_attributes:
_reassign(attribute, component_id)
# recreate spatial weights
cnet.w_network = cnet.contiguityweights(graph=False, weightings=weightings)
if hasgraph:
cnet.w_graph = cnet.contiguityweights(graph=True, weightings=weightings)
return cnet
def spanning_tree(net, method="sort", maximum=False, silence_warnings=True):
"""Extract a minimum or maximum spanning tree from a network.
Parameters
----------
net : spaghetti.Network
Instance of a network object.
method : str
Method for determining spanning tree. Currently, the only
supported method is 'sort', which sorts the network arcs
by length prior to building intermediary networks and checking
for cycles within the tree/subtrees. Future methods may
include linear programming approachs, etc.
maximum : bool
When ``True`` a maximum spanning tree is created. When ``False``
a minimum spanning tree is created. Default is ``False``.
silence_warnings : bool
Warn if there is more than one connected component. Default is
``False`` due to the nature of constructing a minimum
spanning tree.
Returns
-------
net : spaghetti.Network
Pruned instance of the network object.
Notes
-----
For in-depth background and details see
:cite:`GrahamHell_1985`,
:cite:`AhujaRavindraK`, and
:cite:`Okabe2012`.
See also
--------
networkx.algorithms.tree.mst
scipy.sparse.csgraph.minimum_spanning_tree
Examples
--------
Create a network instance.
>>> from libpysal import cg
>>> import spaghetti
>>> p00 = cg.Point((0,0))
>>> lines = [cg.Chain([p00, cg.Point((0,3)), cg.Point((4,0)), p00])]
>>> ntw = spaghetti.Network(in_data=lines)
Extract the minimum spanning tree.
>>> minst_net = spaghetti.spanning_tree(ntw)
>>> min_len = sum(minst_net.arc_lengths.values())
>>> min_len
7.0
Extract the maximum spanning tree.
>>> maxst_net = spaghetti.spanning_tree(ntw, maximum=True)
>>> max_len = sum(maxst_net.arc_lengths.values())
>>> max_len
9.0
>>> max_len > min_len
True
"""
# (un)silence warning
weights_kws = {"silence_warnings": silence_warnings}
# do not extract graph object while testing for cycles
net_kws = {"extractgraph": False, "weights_kws": weights_kws}
# if the network has no cycles, it is already a spanning tree
if util.network_has_cycle(net.adjacencylist):
if method.lower() == "sort":
spanning_tree = mst_weighted_sort(net, maximum, net_kws)
else:
msg = "'%s' not a valid method for minimum spanning tree creation"
raise ValueError(msg % method)
# instantiate the spanning tree as a network object
net = Network(in_data=spanning_tree, weights_kws=weights_kws)
return net
def mst_weighted_sort(net, maximum, net_kws):
"""Extract a minimum or maximum spanning tree from a network used
the length-weighted sort method.
Parameters
----------
net : spaghetti.Network
See ``spanning_tree()``.
maximum : bool
See ``spanning_tree()``.
net_kws : dict
Keywords arguments for instaniating a ``spaghetti.Network``.
Returns
-------
spanning_tree : list
All networks arcs that are members of the spanning tree.
Notes
-----
This function is based on the method found in Chapter 3
Section 4.3 of :cite:`Okabe2012`.
"""
# network arcs dictionary sorted by arc length
sort_kws = {"key": net.arc_lengths.get, "reverse": maximum}
sorted_lengths = sorted(net.arc_lengths, **sort_kws)
# the spanning tree is initially empty
spanning_tree = []
# iterate over each lengths of network arc
while sorted_lengths:
_arc = sorted_lengths.pop(0)
# make a spatial representation of an arc
chain_rep = util.chain_constr(net.vertex_coords, [_arc])
# current set of network arcs as libpysal.cg.Chain
_chains = spanning_tree + chain_rep
# current network iteration
_ntw = Network(in_data=_chains, **net_kws)
# determine if the network contains a cycle
if not util.network_has_cycle(_ntw.adjacencylist):
# If no cycle is present, add the arc to the spanning tree
spanning_tree.extend(chain_rep)
return spanning_tree
@requires("geopandas", "shapely")
def element_as_gdf(
net,
vertices=False,
arcs=False,
pp_name=None,
snapped=False,
routes=None,
id_col="id",
geom_col="geometry",
):
"""Return a ``geopandas.GeoDataFrame`` of network elements. This can be
(a) the vertices of a network; (b) the arcs of a network; (c) both the
vertices and arcs of the network; (d) the raw point pattern associated
with the network; (e) the snapped point pattern of (d); or (f) the
shortest path routes between point observations.
Parameters
----------
net : spaghetti.Network
A `spaghetti` network object.
vertices : bool
Extract the network vertices (``True``). Default is ``False``.
arcs : bool
Extract the network arcs (``True``). Default is ``False``.
pp_name : str
Name of the ``network.PointPattern`` to extract.
Default is ``None``.
snapped : bool
If extracting a ``network.PointPattern``, set to ``True`` for
snapped point | |
pass
# Exit a parse tree produced by SQLParser#tableConstraintDef.
def exitTableConstraintDef(self, ctx:SQLParser.TableConstraintDefContext):
pass
# Enter a parse tree produced by SQLParser#constraintName.
def enterConstraintName(self, ctx:SQLParser.ConstraintNameContext):
pass
# Exit a parse tree produced by SQLParser#constraintName.
def exitConstraintName(self, ctx:SQLParser.ConstraintNameContext):
pass
# Enter a parse tree produced by SQLParser#fieldDefinition.
def enterFieldDefinition(self, ctx:SQLParser.FieldDefinitionContext):
pass
# Exit a parse tree produced by SQLParser#fieldDefinition.
def exitFieldDefinition(self, ctx:SQLParser.FieldDefinitionContext):
pass
# Enter a parse tree produced by SQLParser#columnAttribute.
def enterColumnAttribute(self, ctx:SQLParser.ColumnAttributeContext):
pass
# Exit a parse tree produced by SQLParser#columnAttribute.
def exitColumnAttribute(self, ctx:SQLParser.ColumnAttributeContext):
pass
# Enter a parse tree produced by SQLParser#columnFormat.
def enterColumnFormat(self, ctx:SQLParser.ColumnFormatContext):
pass
# Exit a parse tree produced by SQLParser#columnFormat.
def exitColumnFormat(self, ctx:SQLParser.ColumnFormatContext):
pass
# Enter a parse tree produced by SQLParser#storageMedia.
def enterStorageMedia(self, ctx:SQLParser.StorageMediaContext):
pass
# Exit a parse tree produced by SQLParser#storageMedia.
def exitStorageMedia(self, ctx:SQLParser.StorageMediaContext):
pass
# Enter a parse tree produced by SQLParser#gcolAttribute.
def enterGcolAttribute(self, ctx:SQLParser.GcolAttributeContext):
pass
# Exit a parse tree produced by SQLParser#gcolAttribute.
def exitGcolAttribute(self, ctx:SQLParser.GcolAttributeContext):
pass
# Enter a parse tree produced by SQLParser#references.
def enterReferences(self, ctx:SQLParser.ReferencesContext):
pass
# Exit a parse tree produced by SQLParser#references.
def exitReferences(self, ctx:SQLParser.ReferencesContext):
pass
# Enter a parse tree produced by SQLParser#deleteOption.
def enterDeleteOption(self, ctx:SQLParser.DeleteOptionContext):
pass
# Exit a parse tree produced by SQLParser#deleteOption.
def exitDeleteOption(self, ctx:SQLParser.DeleteOptionContext):
pass
# Enter a parse tree produced by SQLParser#keyList.
def enterKeyList(self, ctx:SQLParser.KeyListContext):
pass
# Exit a parse tree produced by SQLParser#keyList.
def exitKeyList(self, ctx:SQLParser.KeyListContext):
pass
# Enter a parse tree produced by SQLParser#keyPart.
def enterKeyPart(self, ctx:SQLParser.KeyPartContext):
pass
# Exit a parse tree produced by SQLParser#keyPart.
def exitKeyPart(self, ctx:SQLParser.KeyPartContext):
pass
# Enter a parse tree produced by SQLParser#keyListWithExpression.
def enterKeyListWithExpression(self, ctx:SQLParser.KeyListWithExpressionContext):
pass
# Exit a parse tree produced by SQLParser#keyListWithExpression.
def exitKeyListWithExpression(self, ctx:SQLParser.KeyListWithExpressionContext):
pass
# Enter a parse tree produced by SQLParser#keyPartOrExpression.
def enterKeyPartOrExpression(self, ctx:SQLParser.KeyPartOrExpressionContext):
pass
# Exit a parse tree produced by SQLParser#keyPartOrExpression.
def exitKeyPartOrExpression(self, ctx:SQLParser.KeyPartOrExpressionContext):
pass
# Enter a parse tree produced by SQLParser#keyListVariants.
def enterKeyListVariants(self, ctx:SQLParser.KeyListVariantsContext):
pass
# Exit a parse tree produced by SQLParser#keyListVariants.
def exitKeyListVariants(self, ctx:SQLParser.KeyListVariantsContext):
pass
# Enter a parse tree produced by SQLParser#indexType.
def enterIndexType(self, ctx:SQLParser.IndexTypeContext):
pass
# Exit a parse tree produced by SQLParser#indexType.
def exitIndexType(self, ctx:SQLParser.IndexTypeContext):
pass
# Enter a parse tree produced by SQLParser#indexOption.
def enterIndexOption(self, ctx:SQLParser.IndexOptionContext):
pass
# Exit a parse tree produced by SQLParser#indexOption.
def exitIndexOption(self, ctx:SQLParser.IndexOptionContext):
pass
# Enter a parse tree produced by SQLParser#commonIndexOption.
def enterCommonIndexOption(self, ctx:SQLParser.CommonIndexOptionContext):
pass
# Exit a parse tree produced by SQLParser#commonIndexOption.
def exitCommonIndexOption(self, ctx:SQLParser.CommonIndexOptionContext):
pass
# Enter a parse tree produced by SQLParser#visibility.
def enterVisibility(self, ctx:SQLParser.VisibilityContext):
pass
# Exit a parse tree produced by SQLParser#visibility.
def exitVisibility(self, ctx:SQLParser.VisibilityContext):
pass
# Enter a parse tree produced by SQLParser#indexTypeClause.
def enterIndexTypeClause(self, ctx:SQLParser.IndexTypeClauseContext):
pass
# Exit a parse tree produced by SQLParser#indexTypeClause.
def exitIndexTypeClause(self, ctx:SQLParser.IndexTypeClauseContext):
pass
# Enter a parse tree produced by SQLParser#fulltextIndexOption.
def enterFulltextIndexOption(self, ctx:SQLParser.FulltextIndexOptionContext):
pass
# Exit a parse tree produced by SQLParser#fulltextIndexOption.
def exitFulltextIndexOption(self, ctx:SQLParser.FulltextIndexOptionContext):
pass
# Enter a parse tree produced by SQLParser#spatialIndexOption.
def enterSpatialIndexOption(self, ctx:SQLParser.SpatialIndexOptionContext):
pass
# Exit a parse tree produced by SQLParser#spatialIndexOption.
def exitSpatialIndexOption(self, ctx:SQLParser.SpatialIndexOptionContext):
pass
# Enter a parse tree produced by SQLParser#dataTypeDefinition.
def enterDataTypeDefinition(self, ctx:SQLParser.DataTypeDefinitionContext):
pass
# Exit a parse tree produced by SQLParser#dataTypeDefinition.
def exitDataTypeDefinition(self, ctx:SQLParser.DataTypeDefinitionContext):
pass
# Enter a parse tree produced by SQLParser#dataType.
def enterDataType(self, ctx:SQLParser.DataTypeContext):
pass
# Exit a parse tree produced by SQLParser#dataType.
def exitDataType(self, ctx:SQLParser.DataTypeContext):
pass
# Enter a parse tree produced by SQLParser#nchar.
def enterNchar(self, ctx:SQLParser.NcharContext):
pass
# Exit a parse tree produced by SQLParser#nchar.
def exitNchar(self, ctx:SQLParser.NcharContext):
pass
# Enter a parse tree produced by SQLParser#realType.
def enterRealType(self, ctx:SQLParser.RealTypeContext):
pass
# Exit a parse tree produced by SQLParser#realType.
def exitRealType(self, ctx:SQLParser.RealTypeContext):
pass
# Enter a parse tree produced by SQLParser#fieldLength.
def enterFieldLength(self, ctx:SQLParser.FieldLengthContext):
pass
# Exit a parse tree produced by SQLParser#fieldLength.
def exitFieldLength(self, ctx:SQLParser.FieldLengthContext):
pass
# Enter a parse tree produced by SQLParser#fieldOptions.
def enterFieldOptions(self, ctx:SQLParser.FieldOptionsContext):
pass
# Exit a parse tree produced by SQLParser#fieldOptions.
def exitFieldOptions(self, ctx:SQLParser.FieldOptionsContext):
pass
# Enter a parse tree produced by SQLParser#charsetWithOptBinary.
def enterCharsetWithOptBinary(self, ctx:SQLParser.CharsetWithOptBinaryContext):
pass
# Exit a parse tree produced by SQLParser#charsetWithOptBinary.
def exitCharsetWithOptBinary(self, ctx:SQLParser.CharsetWithOptBinaryContext):
pass
# Enter a parse tree produced by SQLParser#ascii.
def enterAscii(self, ctx:SQLParser.AsciiContext):
pass
# Exit a parse tree produced by SQLParser#ascii.
def exitAscii(self, ctx:SQLParser.AsciiContext):
pass
# Enter a parse tree produced by SQLParser#unicode.
def enterUnicode(self, ctx:SQLParser.UnicodeContext):
pass
# Exit a parse tree produced by SQLParser#unicode.
def exitUnicode(self, ctx:SQLParser.UnicodeContext):
pass
# Enter a parse tree produced by SQLParser#wsNumCodepoints.
def enterWsNumCodepoints(self, ctx:SQLParser.WsNumCodepointsContext):
pass
# Exit a parse tree produced by SQLParser#wsNumCodepoints.
def exitWsNumCodepoints(self, ctx:SQLParser.WsNumCodepointsContext):
pass
# Enter a parse tree produced by SQLParser#typeDatetimePrecision.
def enterTypeDatetimePrecision(self, ctx:SQLParser.TypeDatetimePrecisionContext):
pass
# Exit a parse tree produced by SQLParser#typeDatetimePrecision.
def exitTypeDatetimePrecision(self, ctx:SQLParser.TypeDatetimePrecisionContext):
pass
# Enter a parse tree produced by SQLParser#charsetName.
def enterCharsetName(self, ctx:SQLParser.CharsetNameContext):
pass
# Exit a parse tree produced by SQLParser#charsetName.
def exitCharsetName(self, ctx:SQLParser.CharsetNameContext):
pass
# Enter a parse tree produced by SQLParser#collationName.
def enterCollationName(self, ctx:SQLParser.CollationNameContext):
pass
# Exit a parse tree produced by SQLParser#collationName.
def exitCollationName(self, ctx:SQLParser.CollationNameContext):
pass
# Enter a parse tree produced by SQLParser#createTableOptions.
def enterCreateTableOptions(self, ctx:SQLParser.CreateTableOptionsContext):
pass
# Exit a parse tree produced by SQLParser#createTableOptions.
def exitCreateTableOptions(self, ctx:SQLParser.CreateTableOptionsContext):
pass
# Enter a parse tree produced by SQLParser#createTableOptionsSpaceSeparated.
def enterCreateTableOptionsSpaceSeparated(self, ctx:SQLParser.CreateTableOptionsSpaceSeparatedContext):
pass
# Exit a parse tree produced by SQLParser#createTableOptionsSpaceSeparated.
def exitCreateTableOptionsSpaceSeparated(self, ctx:SQLParser.CreateTableOptionsSpaceSeparatedContext):
pass
# Enter a parse tree produced by SQLParser#createTableOption.
def enterCreateTableOption(self, ctx:SQLParser.CreateTableOptionContext):
pass
# Exit a parse tree produced by SQLParser#createTableOption.
def exitCreateTableOption(self, ctx:SQLParser.CreateTableOptionContext):
pass
# Enter a parse tree produced by SQLParser#ternaryOption.
def enterTernaryOption(self, ctx:SQLParser.TernaryOptionContext):
pass
# Exit a parse tree produced by SQLParser#ternaryOption.
def exitTernaryOption(self, ctx:SQLParser.TernaryOptionContext):
pass
# Enter a parse tree produced by SQLParser#defaultCollation.
def enterDefaultCollation(self, ctx:SQLParser.DefaultCollationContext):
pass
# Exit a parse tree produced by SQLParser#defaultCollation.
def exitDefaultCollation(self, ctx:SQLParser.DefaultCollationContext):
pass
# Enter a parse tree produced by SQLParser#defaultEncryption.
def enterDefaultEncryption(self, ctx:SQLParser.DefaultEncryptionContext):
pass
# Exit a parse tree produced by SQLParser#defaultEncryption.
def exitDefaultEncryption(self, ctx:SQLParser.DefaultEncryptionContext):
pass
# Enter a parse tree produced by SQLParser#defaultCharset.
def enterDefaultCharset(self, ctx:SQLParser.DefaultCharsetContext):
pass
# Exit a parse tree produced by SQLParser#defaultCharset.
def exitDefaultCharset(self, ctx:SQLParser.DefaultCharsetContext):
pass
# Enter a parse tree produced by SQLParser#partitionClause.
def enterPartitionClause(self, ctx:SQLParser.PartitionClauseContext):
pass
# Exit a parse tree produced by SQLParser#partitionClause.
def exitPartitionClause(self, ctx:SQLParser.PartitionClauseContext):
pass
# Enter a parse tree produced by SQLParser#partitionDefKey.
def enterPartitionDefKey(self, ctx:SQLParser.PartitionDefKeyContext):
pass
# Exit a parse tree produced by SQLParser#partitionDefKey.
def exitPartitionDefKey(self, ctx:SQLParser.PartitionDefKeyContext):
pass
# Enter a parse tree produced by SQLParser#partitionDefHash.
def enterPartitionDefHash(self, ctx:SQLParser.PartitionDefHashContext):
pass
# Exit a parse tree produced by SQLParser#partitionDefHash.
def exitPartitionDefHash(self, ctx:SQLParser.PartitionDefHashContext):
pass
# Enter a parse tree produced by SQLParser#partitionDefRangeList.
def enterPartitionDefRangeList(self, ctx:SQLParser.PartitionDefRangeListContext):
pass
# Exit a parse tree produced by SQLParser#partitionDefRangeList.
def exitPartitionDefRangeList(self, ctx:SQLParser.PartitionDefRangeListContext):
pass
# Enter a parse tree produced by SQLParser#subPartitions.
def enterSubPartitions(self, ctx:SQLParser.SubPartitionsContext):
pass
# Exit a parse tree produced by SQLParser#subPartitions.
def exitSubPartitions(self, ctx:SQLParser.SubPartitionsContext):
pass
# Enter a parse tree produced by SQLParser#partitionKeyAlgorithm.
def enterPartitionKeyAlgorithm(self, ctx:SQLParser.PartitionKeyAlgorithmContext):
pass
# Exit a parse tree produced by SQLParser#partitionKeyAlgorithm.
def exitPartitionKeyAlgorithm(self, ctx:SQLParser.PartitionKeyAlgorithmContext):
pass
# Enter a parse tree produced by SQLParser#partitionDefinitions.
def enterPartitionDefinitions(self, ctx:SQLParser.PartitionDefinitionsContext):
pass
# Exit a parse tree produced by SQLParser#partitionDefinitions.
def exitPartitionDefinitions(self, ctx:SQLParser.PartitionDefinitionsContext):
pass
# Enter a parse tree produced by SQLParser#partitionDefinition.
def enterPartitionDefinition(self, ctx:SQLParser.PartitionDefinitionContext):
pass
# Exit a parse tree produced by SQLParser#partitionDefinition.
def exitPartitionDefinition(self, ctx:SQLParser.PartitionDefinitionContext):
pass
# Enter a parse tree produced by SQLParser#partitionValuesIn.
def enterPartitionValuesIn(self, ctx:SQLParser.PartitionValuesInContext):
pass
# Exit a parse tree produced by SQLParser#partitionValuesIn.
def exitPartitionValuesIn(self, ctx:SQLParser.PartitionValuesInContext):
pass
# Enter a parse tree produced by SQLParser#partitionOption.
def enterPartitionOption(self, ctx:SQLParser.PartitionOptionContext):
pass
# Exit a parse tree produced by SQLParser#partitionOption.
def exitPartitionOption(self, ctx:SQLParser.PartitionOptionContext):
pass
# Enter a parse tree produced by SQLParser#subpartitionDefinition.
def enterSubpartitionDefinition(self, ctx:SQLParser.SubpartitionDefinitionContext):
pass
# Exit a parse tree produced by SQLParser#subpartitionDefinition.
def exitSubpartitionDefinition(self, ctx:SQLParser.SubpartitionDefinitionContext):
pass
# Enter a parse tree produced by SQLParser#partitionValueItemListParen.
def enterPartitionValueItemListParen(self, ctx:SQLParser.PartitionValueItemListParenContext):
pass
# Exit a parse tree produced by SQLParser#partitionValueItemListParen.
def exitPartitionValueItemListParen(self, ctx:SQLParser.PartitionValueItemListParenContext):
pass
# Enter a parse tree produced by SQLParser#partitionValueItem.
def enterPartitionValueItem(self, ctx:SQLParser.PartitionValueItemContext):
pass
# Exit a parse tree produced by | |
import os, sys
from random import randint, choice
from math import sin, cos, radians
import pygame
from pygame import Rect, Color
from pygame.sprite import Sprite
from gridmap import GridMap
from pathfinder import PathFinder
from simpleanimation import SimpleAnimation
from utils import Timer
from vec2d import vec2d
from widgets import Box, MessageBoard
class Creep(Sprite):
""" A creep sprite that bounces off walls and changes its
direction from time to time.
"""
def __init__(
self, screen, game, creep_images, explosion_images,
field, init_position, init_direction, speed):
""" Create a new Creep.
screen:
The screen on which the creep lives (must be a
pygame Surface object, such as pygame.display)
game:
The game object that holds information about the
game world.
creep_images:
A pair of images (as Pygame surface objects) for
the creep. The first one should point at 3
o'clock, and the second diagonally between 12
and 3 o'clock (at 45 degrees above the horizontal
plane)
explosion_images:
A list of image objects for the explosion
animation.
field:
A Rect specifying the 'playing field' boundaries.
The Creep will bounce off the 'walls' of this
field.
init_position:
A vec2d or a pair specifying the initial position
of the creep on the screen.
init_direction:
A vec2d or a pair specifying the initial direction
of the creep. Must have an angle that is a
multiple of 45 degres.
speed:
Creep speed, in pixels/millisecond (px/ms)
"""
Sprite.__init__(self)
self.screen = screen
self.game = game
self.speed = speed
self.field = field
# base_image_0/45 hold the original images, un-rotated
#
self.base_image_0 = creep_images[0]
self.base_image_45 = creep_images[1]
# self.image is the current image representing the creep
# in the game. It's rotated to the creep's direction.
#
self.image = self.base_image_0
self.explosion_images = explosion_images
# A vector specifying the creep's position on the screen
#
self.pos = vec2d(init_position)
self.prev_pos = vec2d(self.pos)
# The direction is a normalized vector
#
self.direction = vec2d(init_direction).normalized()
self.state = Creep.ALIVE
self.health = 15
def is_alive(self):
return self.state in (Creep.ALIVE, Creep.EXPLODING)
def update(self, time_passed):
""" Update the creep.
time_passed:
The time passed (in ms) since the previous update.
"""
if self.state == Creep.ALIVE:
# Maybe it's time to change the direction ?
#
self._compute_direction(time_passed)
# Make the creep image point in the correct direction.
# Note that two images are used, one for diagonals
# and one for horizontals/verticals.
#
# round() on the angle is necessary, to make it
# exact, despite small deviations that may result from
# floating-point calculations
#
if int(round(self.direction.angle)) % 90 == 45:
self.image = pygame.transform.rotate(
self.base_image_45, -(self.direction.angle + 45))
elif int(round(self.direction.angle)) % 90 == 0:
self.image = pygame.transform.rotate(
self.base_image_0, -self.direction.angle)
else:
assert False
# Compute and apply the displacement to the position
# vector. The displacement is a vector, having the angle
# of self.direction (which is normalized to not affect
# the magnitude of the displacement)
#
displacement = vec2d(
self.direction.x * self.speed * time_passed,
self.direction.y * self.speed * time_passed)
self.prev_pos = vec2d(self.pos)
self.pos += displacement
# When the image is rotated, its size is changed.
self.image_w, self.image_h = self.image.get_size()
elif self.state == Creep.EXPLODING:
if self.explode_animation.active:
self.explode_animation.update(time_passed)
else:
self._die()
elif self.state == Creep.DEAD:
pass
def draw(self):
""" Blit the creep onto the screen that was provided in
the constructor.
"""
if self.state == Creep.ALIVE:
# The creep image is placed at self.pos. To allow for
# smooth movement even when the creep rotates and the
# image size changes, its placement is always
# centered.
#
self.draw_rect = self.image.get_rect().move(
self.pos.x - self.image_w / 2,
self.pos.y - self.image_h / 2)
self.screen.blit(self.image, self.draw_rect)
# The health bar is 15x4 px.
#
health_bar_x = self.pos.x - 7
health_bar_y = self.pos.y - self.image_h / 2 - 6
self.screen.fill( Color('red'),
(health_bar_x, health_bar_y, 15, 4))
self.screen.fill( Color('green'),
( health_bar_x, health_bar_y,
self.health, 4))
elif self.state == Creep.EXPLODING:
self.explode_animation.draw()
elif self.state == Creep.DEAD:
pass
def mouse_click_event(self, pos):
""" The mouse was clicked in pos.
"""
if self._point_is_inside(vec2d(pos)):
self._decrease_health(3)
#------------------ PRIVATE PARTS ------------------#
# States the creep can be in.
#
# ALIVE: The creep is roaming around the screen
# EXPLODING:
# The creep is now exploding, just a moment before dying.
# DEAD: The creep is dead and inactive
#
(ALIVE, EXPLODING, DEAD) = range(3)
def _die(self):
self.state = Creep.DEAD
self.kill()
def _compute_direction(self, time_passed):
""" Finds out where to go
"""
coord = self.game.xy2coord(self.pos)
if self.game.is_goal_coord(coord):
self._die()
else:
x_mid, y_mid = self.game.coord2xy_mid(coord)
if ( (x_mid - self.pos.x) * (x_mid - self.prev_pos.x) < 0 or
(y_mid - self.pos.y) * (y_mid - self.prev_pos.y) < 0):
next_coord = self.game.next_on_path(coord)
self.direction = vec2d(
next_coord[1] - coord[1],
next_coord[0] - coord[0]).normalized()
def _point_is_inside(self, point):
""" Is the point (given as a vec2d) inside our creep's
body?
"""
img_point = point - vec2d(
int(self.pos.x - self.image_w / 2),
int(self.pos.y - self.image_h / 2))
try:
pix = self.image.get_at(img_point)
return pix[3] > 0
except IndexError:
return False
def _decrease_health(self, n):
""" Decrease my health by n (or to 0, if it's currently
less than n)
"""
self.health = max(0, self.health - n)
if self.health == 0:
self._explode()
def _explode(self):
""" Starts the explosion animation that ends the Creep's
life.
"""
self.state = Creep.EXPLODING
pos = ( self.pos.x - self.explosion_images[0].get_width() / 2,
self.pos.y - self.explosion_images[0].get_height() / 2)
self.explode_animation = SimpleAnimation(
self.screen, pos, self.explosion_images,
100, 300)
class GridPath(object):
""" Represents the game grid and answers questions about
paths on this grid.
After initialization, call set_blocked for changed
information about the state of blocks on the grid, and
get_next to get the next coordinate on the path to the
goal from a given coordinate.
"""
def __init__(self, nrows, ncols, goal):
self.map = GridMap(nrows, ncols)
self.goal = goal
# Path cache. For a coord, keeps the next coord to move
# to in order to reach the goal. Invalidated when the
# grid changes (with set_blocked)
#
self._path_cache = {}
def get_next(self, coord):
""" Get the next coordinate to move to from 'coord'
towards the goal.
"""
# If the next path for this coord is not cached, compute
# it
#
if not (coord in self._path_cache):
self._compute_path(coord)
# _compute_path adds the path for the coord to the cache.
# If it's still not cached after the computation, it means
# that no path exists to the goal from this coord.
#
if coord in self._path_cache:
return self._path_cache[coord]
else:
return None
def set_blocked(self, coord, blocked=True):
""" Set the 'blocked' state of a coord
"""
self.map.set_blocked(coord, blocked)
# Invalidate cache, because the map has changed
#
self._path_cache = {}
def _compute_path(self, coord):
pf = PathFinder(self.map.successors, self.map.move_cost,
self.map.move_cost)
# Get the whole path from coord to the goal into a list,
# and for each coord in the path write the next coord in
# the path into the path cache
#
path_list = list(pf.compute_path(coord, self.goal))
for i, path_coord in enumerate(path_list):
next_i = i if i == len(path_list) - 1 else i + 1
self._path_cache[path_coord] = path_list[next_i]
class Game(object):
# Game parameters
BG_TILE_IMG = 'images/brick_tile.png'
SCREEN_WIDTH, SCREEN_HEIGHT = 580, 500
GRID_SIZE = 20
FIELD_SIZE = 400, 400
CREEP_FILENAMES = [
('images/bluecreep_0.png', 'images/bluecreep_45.png'),
('images/greencreep_0.png', 'images/greencreep_45.png'),
('images/yellowcreep_0.png', 'images/yellowcreep_45.png'),
('images/pinkcreep_0.png', 'images/pinkcreep_45.png'),
]
MAX_N_CREEPS = 50
def __init__(self):
pygame.init()
self.screen = pygame.display.set_mode(
(self.SCREEN_WIDTH, self.SCREEN_HEIGHT), 0, 32)
self.tile_img = pygame.image.load(self.BG_TILE_IMG).convert_alpha()
self.tile_img_rect = self.tile_img.get_rect()
self.field_border_width = 4
field_outer_width = self.FIELD_SIZE[0] + 2 * self.field_border_width
field_outer_height = self.FIELD_SIZE[1] + 2 * self.field_border_width
self.field_rect_outer = Rect(20, 60, field_outer_width, field_outer_height)
self.field_bgcolor = Color(109, 41, 1, 100)
self.field_border_color = Color(0, 0, 0)
self.field_box = Box(self.screen,
rect=self.field_rect_outer,
bgcolor=self.field_bgcolor,
border_width=self.field_border_width,
border_color=self.field_border_color)
self.tboard_text = ['The | |
== other.sequence.lower() and self.overhangsEqual(other):
return True
return False
def overhangsEqual(self, other):
if self.bottomLeftOverhang.sequence.lower() == other.bottomLeftOverhang.sequence.lower() and \
self.topLeftOverhang.sequence.lower() == other.topLeftOverhang.sequence.lower() and \
self.bottomRightOverhang.sequence.lower() == other.bottomRightOverhang.sequence.lower() and \
self.topRightOverhang.sequence.lower() == other.topRightOverhang.sequence.lower():
return True
return False
def clone(self):
clone = DNA(self.DNAclass, self.name, self.sequence)
clone.topLeftOverhang = Overhang(self.topLeftOverhang.sequence)
clone.topRightOverhang = Overhang(self.topRightOverhang.sequence)
clone.bottomLeftOverhang = Overhang(self.bottomLeftOverhang.sequence)
clone.bottomRightOverhang = Overhang(self.bottomRightOverhang.sequence)
return clone
def prettyPrint(self):
#prints out top and bottom strands, truncates middle so length is ~100bp
#example:
# TTATCG...[1034bp]...GGAA
# |||| ||||
# TAGC..............CCTTAA
if self.DNAclass == 'digest':
(TL,TR,BL,BR) = SetFlags(self)
if len(self.sequence) > 8:
trExtra = ''
brExtra = ''
if TR:
trExtra = self.topRightOverhang.sequence
if BR:
brExtra = self.bottomRightOverhang.sequence
print "\t"+self.topLeftOverhang.sequence+' '*len(self.bottomLeftOverhang.sequence)+self.sequence[:4]+'.'*3+'['+str(len(self.sequence)-8)+'bp]'+'.'*3+self.sequence[len(self.sequence)-4:]+trExtra
print "\t"+' '*len(self.topLeftOverhang.sequence)+'|'*4+' '*(10+len(str(len(self.sequence)-8)))+'|'*4
print "\t"+' '*len(self.topLeftOverhang.sequence)+self.bottomLeftOverhang.sequence+Complement(self.sequence[:4])+'.'*(10+len(str(len(self.sequence)-8)))+Complement(self.sequence[len(self.sequence)-4:])+brExtra
else:
trExtra = ''
brExtra = ''
if TR:
trExtra = self.topRightOverhang.sequence
if BR:
brExtra = self.bottomRightOverhang.sequence
print "\t"+self.topLeftOverhang.sequence+' '*len(self.bottomLeftOverhang.sequence)+self.sequence+trExtra
print "\t"+' '*len(self.topLeftOverhang.sequence)+'|'*len(self.sequence)
print "\t"+' '*len(self.topLeftOverhang.sequence)+self.bottomLeftOverhang.sequence+Complement(self.sequence)+brExtra
else:
if len(self.sequence) > 8:
print "\t"+self.sequence[:4]+'.'*3+'['+str(len(self.sequence)-8)+'bp]'+'.'*3+self.sequence[len(self.sequence)-4:]
print "\t"+'|'*4+' '*(10+len(str(len(self.sequence)-8)))+'|'*4
print "\t"+Complement(self.sequence[:4])+'.'*(10+len(str(len(self.sequence)-8)))+Complement(self.sequence[len(self.sequence)-4:])
else:
print "\t"+self.sequence
print "\t"+'|'*len(self.sequence)
print "\t"+Complement(self.sequence)
return 0
# Description: BaseExpand() for regex generation, taken from BioPython
def BaseExpand(base):
"""BaseExpand(base) -> string.
given a degenerated base, returns its meaning in IUPAC alphabet.
i.e:
b= 'A' -> 'A'
b= 'N' -> 'ACGT'
etc..."""
base = base.upper()
return dna_alphabet[base]
# Description: regex() function to convert recog site into regex, from Biopython
def regex(site):
"""regex(site) -> string.
Construct a regular expression from a DNA sequence.
i.e.:
site = 'ABCGN' -> 'A[CGT]CG.'"""
reg_ex = site
for base in reg_ex:
if base in ('A', 'T', 'C', 'G', 'a', 'c', 'g', 't'):
pass
if base in ('N', 'n'):
reg_ex = '.'.join(reg_ex.split('N'))
reg_ex = '.'.join(reg_ex.split('n'))
if base in ('R', 'Y', 'W', 'M', 'S', 'K', 'H', 'D', 'B', 'V'):
expand = '['+ str(BaseExpand(base))+']'
reg_ex = expand.join(reg_ex.split(base))
return reg_ex
# Description: ToRegex() function to convert recog site into regex, from Biopython
def ToRegex(site, name):
sense = ''.join(['(?P<', name, '>', regex(site.upper()), ')'])
antisense = ''.join(['(?P<', name, '_as>', regex( reverseComplement( site.upper() )), ')'])
rg = sense + '|' + antisense
return rg
# Description: restrictionEnzyme class encapsulates information about buffers, overhangs, incubation / inactivation, end distance, etc.
class restrictionEnzyme(object):
def __init__(self,name="", buffer1="", buffer2="", buffer3="", buffer4="", bufferecori="", heatinact="", incubatetemp="", recognitionsite="",distance=""):
self.name = name
self.buffer_activity =[buffer1, buffer2, buffer3, buffer4, bufferecori]
self.inactivate_temp = heatinact
self.incubate_temp = incubatetemp
#human-readable recognition site
self.recognition_site = recognitionsite
self.endDistance = distance
#function to convert recog site into regex
alpha_only_site = re.sub('[^a-zA-Z]+', '', recognitionsite)
self.alpha_only_site = alpha_only_site
# print ToRegex(alpha_only_site, name)
self.compsite = ToRegex(alpha_only_site, name)
self.reach = False
#convert information about where the restriction happens to an offset on the top and bottom strand
#for example, BamHI -> 1/5 with respect to the start of the site match
hasNum = re.compile('(-?\d+/-?\d+)')
not_completed = 1
for m in hasNum.finditer(recognitionsite):
(top, bottom) = m.group().split('/')
self.top_strand_offset = int(top)
self.bottom_strand_offset = int(bottom)
self.reach = True
not_completed = 0
p = re.compile("/")
for m in p.finditer(recognitionsite):
if not_completed:
self.top_strand_offset = int(m.start())
self.bottom_strand_offset = len(recognitionsite) - 1 - self.top_strand_offset
def prettyPrint(self):
print "Name: ", self.name, "Recognition Site: ", self.recognition_site
def find_sites(self, DNA):
seq = DNA.sequence
(fwd, rev) = self.compsite.split('|')
fwd_rease_re = re.compile(fwd)
rev_rease_re = re.compile(rev)
indices = []
seen = {}
if DNA.topology == "circular":
searchSequence = seq.upper() + seq[0:len(self.recognition_site)-2]
else:
searchSequence = seq.upper()
for m in fwd_rease_re.finditer(searchSequence):
span = m.span()
span = (span[0] % len(seq), span[1] % len(seq))
seen[span[0]] = 1
span = span + ('sense',)
indices.append(span)
for m in rev_rease_re.finditer(searchSequence):
span = m.span()
try:
seen[span[0]]
except:
span = span + ('antisense',)
indices.append(span)
return indices
# Description: phosphorylates 5' end of DNA molecule, allowing blunt end ligation
# see http://openwetware.org/wiki/PNK_Treatment_of_DNA_Ends
def TreatPNK(inputDNAs):
for inputDNA in inputDNAs:
inputDNA.phosphorylate()
return inputDNAs
# Description: DigestBuffer() function finds the optimal digestBuffer
# todo: If Buffer 2 > 150, return Buffer 2 and list of activity values, else, return buffer 1, 3, or 4 (ignore EcoRI)
# return format will be list, [rec_buff, [buff1_act, buff2_act...buff4_Act]]
def DigestBuffer(*str_or_list):
best_buff = ""
best_buff_score = [0,0,0,0,0]
enzdic = EnzymeDictionary()
num_enz = 0
for e in str_or_list:
enz = enzdic[e]
best_buff_score = list(x + int(y) for x, y in zip(best_buff_score, enz.buffer_activity))
num_enz = num_enz + 1
ret = []
if best_buff_score[1] >( 75 * num_enz):
ret.append(2)
ret.append(best_buff_score)
else:
m = max(best_buff_score)
p = best_buff_score.index(m)
ret.append(p)
ret.append(best_buff_score)
return ret
#accepts two primers and list of input template DNAs
#todo:implement this with PCR!
def SOERoundTwo(primer1, primer2, templates):
return 0
def SOE(list_of_primers, templates):
#assume primers are in the right order outer inner_rev inner_fwd outer
#call two pcrs with list[0], [1] and list[2], [3]
return 0
def Primers(product, template):
return rPrimers(product, template, 0)
def rPrimers(product, template, baseCase):
# Annealing region design criteria:
# TODO: incorporate these somehow
# In general, the 3' base of your oligos should be a G or C
# The overall G/C content of your annealing region should be between 50 and 65%
# The overall base composition of the sequences should be balanced (no missing bases, no excesses of one particular base)
# The length of your sequence can be modified to be around 18 and 25 bp
# The sequence should appear random. There shouldn't be long stretches of a single base, or large regions of G/C rich sequence and all A/T in other regions
# There should be little secondary structure. Ideally the Tm for the oligo should be under 40 degrees.
try:
# Die after 2 rounds of recursion
if baseCase == 2:
return ()
# Compute "forward" and "backwards" LCS (i.e. on both sides of a mutation)
fwdMatch = LCS(template.sequence.upper()+'$', product.sequence.upper())
(fwdMatchCount, forwardMatchIndicesTuple, forwardPrimerStub) = fwdMatch.LCSasRegex(template.sequence.upper()+'$', product.sequence.upper(), 1)
revMatch = LCS(reverse(template.sequence.upper())+'$', reverse(product.sequence.upper()))
(revMatchCount, reverseMatchIndicesTuple, revPrimerStub) = revMatch.LCSasRegex(reverse(template.sequence.upper())+'$', reverse(product.sequence.upper()), 1)
fFlag = False
if not len(forwardMatchIndicesTuple):
fMI = (len(product.sequence), len(product.sequence))
fFlag = True
else:
fMI = forwardMatchIndicesTuple
if not len(reverseMatchIndicesTuple):
if fFlag:
# neither side matches
raise Exception('For primer design, no detectable homology on terminal ends of product and template sequences.')
rMI = (0, 0)
else:
rMI = (0 , len(product.sequence) - reverseMatchIndicesTuple[0])
# wrap around mutation case
if not fMI[0] > rMI[1]:
diffLen = fMI[0] + len(product.sequence) - rMI[1]
insert = product.sequence[rMI[1]:] + product.sequence[:fMI[0]]
else:
diffLen = fMI[0] - rMI[1]
insert = product.sequence[rMI[1]:fMI[0]]
if 60 < diffLen <= 100:
primers, enz = DesignWobble(product, insert, (rMI[1], fMI[0]))
elif 1 <= diffLen <= 60:
primers, enz = DesignEIPCR(product, insert, (rMI[1], fMI[0]), template)
if primers[0] == 0:
print '*Primer Warning*: EIPCR primers could not be designed for given template and product. Try removing BsaI, BseRI, and/or BsmBI sites from template plasmid. Returning null data.'
return [], ''
# test the PCR --> will return an exception if they don't anneal
# TODO: FIX THIS / ERR HANDLING
amplifies = PCR(primers[0], primers[1], template)
# if it amplifies up ok, then return the primers
return primers, enz
# may be misaligned ==> realign and recurse
except:
baseCase += 1
# If you had an LCS on the fwd direction, re-align using that one
if fwdMatchCount:
myLCS = product.sequence[forwardMatchIndicesTuple[0]:forwardMatchIndicesTuple[1]]
newProduct = DNA('plasmid', product.name, product.sequence[forwardMatchIndicesTuple[0]:] + product.sequence[:forwardMatchIndicesTuple[0]])
match = re.search(myLCS.upper(), template.sequence.upper())
if match:
startSite = match.start()
newTemplate = DNA('plasmid', template.name, template.sequence[startSite:]+template.sequence[:startSite])
else:
return ()
# If you had an LCS in the rev direction, re-align using that one
elif revMatchCount:
myLCS = reverse(reverse(product.sequence)[reverseMatchIndicesTuple[0]:reverseMatchIndicesTuple[1]])
myMatch = re.search(myLCS.upper(), product.sequence.upper())
startIndex = myMatch.start()
newProduct = DNA('plasmid', product.name, product.sequence[startIndex:] + product.sequence[:startIndex])
match = re.search(myLCS.upper(), template.sequence.upper())
if match:
startSite = match.start()
newTemplate = DNA('plasmid', template.name, template.sequence[startSite:]+template.sequence[:startSite])
else:
return ()
else:
return ()
return rPrimers(newProduct, newTemplate, baseCase)
def getAnnealingRegion(template, fwd):
if len(template) <= 10:
return ''
if not fwd:
template = reverseComplement(template)
for i in range(len(template)):
currentRegion = template[:i]
if primerTm(currentRegion) >= 60:
break
return currentRegion
def chooseReachover(plasmid):
EnzDict = EnzymeDictionary()
bsaI = EnzDict['BsaI']; bsaMatch = bsaI.find_sites(plasmid); bsaFlag = len(bsaMatch) > 0
bsmBI = EnzDict['BsmBI']; bsmMatch = bsmBI.find_sites(plasmid); bsmFlag = len(bsmMatch) > 0
bseRI = EnzDict['BseRI']; bseMatch = bseRI.find_sites(plasmid); bseFlag = len(bseMatch) > 0
if not bsaFlag:
# use BsaI
tail = "taaattGGTCTCA"
return bsaI, tail, 2
if not bsmFlag:
# | |
'e': e,
'False': False,
'inf': inf,
'None': None,
'True': True,
'pi': pi,
}
def add_binop(name, function, precedence):
'''name: a string not already in ufunctions or binops.
The name also shouldn't have any such names as substrings, or it might not work.
function: a function that takes exactly two required arguments.
precedence: Where the function should be on the precedence_map.
Returns: None. Maps name to function, and function to precedence.
NOTES:
- Choose precedence carefully! If you're not sure, you should probably make it
as high as possible, so that it resolves before any other operations do.
'''
# raise NotImplementedError
# new_op_regex = r'(\(|\)|\[|\]|`(?:[^`]|(?<=\\)`)+`|' + \
# '|'.join(re.escape(x) for x in ufunctions) + \
# '|'.join(re.escape(x) for x in binops)
binops[name] = function
precedence_map[function] = precedence
def add_ufunction(name, function):
'''name: a string not already in ufunctions or binops.
The name also shouldn't have any such names as substrings, or it might not work.
function: a function that takes exactly one required argument.
Returns: None. Equivalent to ufunctions[name] = function.
'''
ufunctions[name] = function
def funcname(func):
'''Mostly useful for running functions on arbitrary inputs with the eval(string) function.'''
module = func.__module__
name = re.findall("<.*(?:function|class) \'?([a-zA-Z_\d\.]+)\'?.*>", repr(func))[0]
if isinstance(func, type) or module in ['builtins', '__main__']:
return name
return module + '.' + name
def parse_token(tok):
if re.match('\d*\.\d+$', tok):
return float(tok)
elif re.match('\d+$', tok):
return int(tok)
elif re.match('-?\d*(?::-?\d*)?:-?\d*$', tok):
return IntRange.fromstring(tok)
elif tok in binops:
return binops[tok]
elif tok in ufunctions:
return ufunctions[tok]
elif tok in constants:
return constants[tok]
elif re.match('[a-zA-Z]+$', tok):
return tok
elif tok[0]=='`' and tok[-1]=='`':
return tok[1:-1].replace('\\`', '`')
raise ComputeError
def parse_safe_token(tok):
if re.match('\d*\.\d+$', tok):
return float(tok)
elif re.match('\d+$', tok):
return int(tok)
elif re.match('-?\d*(?::-?\d*)?:-?\d*$', tok):
return IntRange.fromstring(tok)
elif tok in safe_binops:
return safe_binops[tok]
elif tok in safe_ufunctions:
return safe_ufunctions[tok]
elif tok in constants:
return constants[tok]
elif re.match('[a-zA-Z]+$', tok):
return tok
raise ComputeError
def eqn_eval(eqn):
'''Uses builtin eval() to evaluate a mathematical expression with <=1 variable name.
Returns a number if there is no variable name, otherwise returns a lambda expression.
It's not *guaranteed* to be safe, because you can use letters in the expression, but
you probably can't do anything unsafe with only numbers and one word without
underscores.'''
varnames = ','.join(set(re.findall('[a-zA-Z]+', eqn)))
assert len(varnames) <= 1, \
'Cannot evaluate an expression with more than one variable.'
if not varnames:
return eval(eqn)
else:
return eval("lambda {var}: {eqn}".format(var=varnames, eqn=eqn))
def apply_uminus(expr, varnames):
if expr in varnames:
ind = varnames.index(expr)
return lambda args: -args[ind]
elif isinstance(expr, function):
return lambda args: -expr(args)
elif isinstance(expr, IntRange):
# because of the way my tokenizer works, if the start parameter of an IntRange
# is a negative number, the '-' sign is treated as a separate token rather than
# part of the start parameter of the IntRange's string notation
return IntRange(-expr.start, expr.stop, expr.step)
else:
return -expr
def apply_ufunction(ufunction, expr, varnames):
'''ufunction: a function that accepts one argument.
expr: a mathematical expression that can be evaluated by compute().
varnames: the variable names in an equation, sorted in ASCII order.
Returns: the result of applying ufunction to the expression.
'''
if expr in varnames:
ind = varnames.index(expr)
return lambda args: ufunction(args[ind])
elif isinstance(expr, function):
return lambda args: ufunction(expr(args))
elif expr is None:
return ufunction()
else:
return ufunction(expr)
def get_precedence(evald_tok):
try:
return precedence_map.get(evald_tok)
except TypeError as ex:
if 'unhashable' in repr(ex):
return None
else:
raise TypeError(ex)
# class ResoBin:
# def __init__(self, new_elt, binop, old_elt, varnames):
# self.new_elt = new_elt
# self.binop = binop
# self.old_elt = old_elt
# self.varnames = varnames
# def __call__(self, args):
# return self.func(args)
# def __str__(self):
# return f"ResoBin({repr(self.new_elt)}, {funcname(self.binop)}, {repr(self.old_elt)})"
# __repr__ = __str__
def resolve_binop(new_elt, binop, old_elt, varnames):
'''func: a binary operator that accepts two numbers.
new_elt, old_elt: each is either a number, or a string representing a variable name,
or a function taking a single numeric input.
A string representing a variable name is essentially equivalent to lambda x: x.
Returns:
If old_elt or new_elt is a function or string, returns a function of a single
numeric input.
If both are numbers, returns a number.
Examples:
>>> fun1 = resolve_binop(2, operator.add, 'x') # returns lambda x: 2 + x
>>> fun1(2)
4
>>> fun2 = resolve_binop('x', operator.mul, 2)
>>> fun2(0.5)
1.0
>>> resolve_binop(2.0, operator.pow, 3)
8.0
>>> fun3 = resolve_binop(lambda x: x + 3, operator.mul, 5)
>>> fun3(2)
25
>>> fun4 = resolve_binop(lambda x: x + 3, operator.add, 'x')
>>> fun4(1)
5
>>> fun5 = resolve_binop('x', operator.pow, lambda x: x*2)
>>> fun5(2.0)
16.0
'''
if binop == map:
# TODO: make it so that this same logic applies automatically to any binop
# that takes a function as an argument but returns a scalar.
if new_elt in ufunctions:
if old_elt in varnames:
ind_old = varnames.index(old_elt)
return lambda args: map(ufunctions[new_elt], args[ind_old])
elif isinstance(old_elt, function):
return lambda args: map(ufunctions[new_elt], old_elt(args))
else:
return map(ufunctions[new_elt], old_elt)
elif new_elt in varnames:
ind_new = varnames.index(new_elt)
if old_elt in varnames:
ind_old = varnames.index(old_elt)
return lambda args: binop(args[ind_new], args[ind_old])
elif isinstance(old_elt, function):
return lambda args: binop(args[ind_new], old_elt(args))
else:
return lambda args: binop(args[ind_new], old_elt)
elif isinstance(new_elt, function):
if old_elt in varnames:
ind_old = varnames.index(old_elt)
return lambda args: binop(new_elt(args), args[ind_old])
elif isinstance(old_elt, function):
return lambda args: binop(new_elt(args), old_elt(args))
else:
return lambda args: binop(new_elt(args), old_elt)
else: # new_elt is a string representing a function to be computed
fun = compute(new_elt)
if old_elt in varnames:
ind_old = varnames.index(old_elt)
return lambda args: binop(fun, args[ind_old])
elif isinstance(old_elt, function):
return lambda args: binop(fun, old_elt(args))
else:
return binop(fun, old_elt)
if new_elt in varnames:
ind_new = varnames.index(new_elt)
if old_elt in varnames:
ind_old = varnames.index(old_elt)
return lambda args: binop(args[ind_new], args[ind_old])
elif isinstance(old_elt, function):
return lambda args: binop(args[ind_new], old_elt(args))
else:
return lambda args: binop(args[ind_new], old_elt)
elif isinstance(new_elt, function):
if old_elt in varnames:
ind_old = varnames.index(old_elt)
return lambda args: binop(new_elt(args), args[ind_old])
elif isinstance(old_elt, function):
return lambda args: binop(new_elt(args), old_elt(args))
else:
return lambda args: binop(new_elt(args), old_elt)
else:
if old_elt in varnames:
ind_old = varnames.index(old_elt)
return lambda args: binop(new_elt, args[ind_old])
elif isinstance(old_elt, function):
return lambda args: binop(new_elt, old_elt(args))
else:
return binop(new_elt, old_elt)
def tokenize(eqn):
tokens = op_regex.split(eqn.lstrip())
tokens = [tok for tok in tokens if tok.strip()!='']
first_open_sqbk, first_open_paren = None, None
open_paren_count, open_sqbk_count = 0, 0
for ii, tok in enumerate(tokens):
if tok.strip() == '(':
open_paren_count += 1
if first_open_paren is None:
first_open_paren = ii
if tok.strip() == ')':
open_paren_count -= 1
if open_paren_count < 0:
raise ComputeError("Unmatched ')'", tokens, ii)
if open_paren_count == 0:
first_open_paren = None
if tok.strip() == '[':
open_sqbk_count += 1
if first_open_sqbk is None:
first_open_sqbk = ii
if tok.strip() == ']':
open_sqbk_count -= 1
if open_sqbk_count < 0:
raise ComputeError("Unmatched ']'", tokens, ii)
if open_sqbk_count == 0:
first_open_sqbk = None
if open_paren_count > 0:
raise ComputeError("Unmatched '('", tokens, first_open_paren)
if open_sqbk_count > 0:
raise ComputeError("Unmatched '['", tokens, first_open_sqbk)
if len(tokens) == 1:
assert tokens[0] not in binops, \
"Cannot resolve binary operator {} without inputs".format(tokens[0])
ComputeLogger.debug("tokens = {}".format(tokens))
return tokens
def evaluate_tokens(tokens, varnames, safe = False):
if safe:
parse_token = parse_safe_token
binops = safe_binops
ufunctions = safe_ufunctions
else:
parse_token = globals()['parse_token']
binops = globals()['binops']
ufunctions = globals()['ufunctions']
evald_tokens = []
last_tok = None
uminus = False
parens_opened = 0
paren_expressions = []
last_num_ind = None
last_func_ind = None
ufunction, ufuncname = None, None
ii = 0
while ii < len(tokens):
tok = tokens[ii].strip()
if tok == '[' and not safe: # square brackets for slicing and indexing
new_expr = []
parens_opened += 1
for jj in range(ii+1, len(tokens)):
if tokens[jj].strip() == ']':
parens_opened -= 1
if parens_opened == 0:
last_num_ind = len(evald_tokens)
if jj-ii > 2:
paren_evald_toks = evaluate_tokens(new_expr, varnames)
paren_expr = resolve_big_stack(paren_evald_toks, varnames)
elif jj-ii == 2:
# square brackets containing only one token, e.g.
# "x[1]"
paren_expr = parse_token(tokens[ii+1])
else: # parentheses enclosing nothing
paren_expr = None
if uminus: # eqn is something like "-x[0]"
evald_tokens.append(neg_getitem)
uminus = False
else:
evald_tokens.append(getitem)
evald_tokens.append(paren_expr)
ii = jj+1
break
else:
new_expr.append(tokens[jj])
elif tokens[jj].strip() == '[':
parens_opened += 1
new_expr.append(tokens[jj])
else:
new_expr.append(tokens[jj])
last_tok = evald_tokens[-1]
continue
if tok == '(':
tried_calling_uncallable = False
try:
if (last_tok is not | |
<reponame>Golder06/Goldbot
import asyncio
import calendar
import io
import os
import random
from datetime import datetime, timedelta
import discord
import googletrans
import wikipedia
from discord.ext import commands, tasks
from googletrans import Translator
from iso639 import languages
import botutilities
import googlesearch
from morsecode import MorseCode
from oxforddict import get_definition
# from PIL import Image
status_list = ('My default prefix is g!.', "If I break, contact Golder06#7041.", 'To see my commands, type g!help.')
change_loop_interval = random.randint(1, 90)
def get_dict_key(dictionary, value):
key_list = list(dictionary.keys())
value_list = list(dictionary.values())
for listed_value in value_list:
if listed_value == value:
return key_list[value_list.index(value)]
return value
def get_emoji_list(emojis):
return_list = list(emojis)
for i in range(len(return_list)):
for j in range(i + 1, len(return_list)):
if return_list[i].name > return_list[j].name:
return_list[i], return_list[j] = return_list[j], return_list[i]
return return_list
# TODO: Move stuff from here to new separate cogs.
class Commands(commands.Cog):
def __init__(self, bot):
self.activity = None
self.bot = bot
self.log = None
self.loop_interval = None
self.morse = MorseCode()
self.my_guild = None
self.translator = Translator()
self.lang_dict = googletrans.LANGUAGES
self.emoji_list = None
async def reaction_decision(self, ctx, check_str):
check_message = await ctx.send(check_str)
await check_message.add_reaction("\U00002705")
await check_message.add_reaction("\U0000274c")
def check(reaction_checked, reaction_user):
user_check = reaction_user.id == ctx.author.id or reaction_user.guild_permissions.administrator and ctx.author.bot
return user_check and reaction_checked.message == check_message and str(reaction_checked.emoji) in (
"\U00002705", "\U0000274c")
reaction, user = await self.bot.wait_for('reaction_add', check=check)
if str(reaction.emoji) == "\U00002705":
return True
elif str(reaction.emoji) == "\U0000274c":
return False
@tasks.loop(minutes=change_loop_interval)
async def change_status_task(self):
global change_loop_interval
self.activity = random.choice(status_list)
await self.bot.change_presence(status=discord.Status.online, activity=discord.Game(self.activity))
time_now = datetime.now()
print(f'Status changed to "{self.activity}" ({time_now.strftime("%H:%M")}).')
change_loop_interval = random.randint(1, 90)
print(
f"Next status change in {change_loop_interval} minutes ({(time_now + timedelta(minutes=change_loop_interval)).strftime('%H:%M')}).")
@commands.Cog.listener()
async def on_ready(self):
self.log = self.bot.get_channel(botutilities.config["log_channel"])
self.my_guild = self.bot.get_guild(botutilities.config["guild_id"])
self.emoji_list = get_emoji_list(self.my_guild.emojis)
print(f'"{self.bot.user.display_name}" is ready.')
print(f"Created by Golder06#7041.")
await self.log.send("Bot Started.")
self.change_status_task.start()
@commands.command(name='8ball')
async def _8ball(self, ctx, *, question):
ball_predicts = ("It is certain.", "It is decidedly so.", "Without a doubt.", "Yes - definitely.",
"You may rely on it.", "As I see it, yes.", "Most likely.", "Outlook good.", "Yes.",
"Signs point to yes.", "Reply hazy, try again.", "Ask again later.",
"Better not tell you now.", "Cannot predict now.", "Concentrate and ask again.",
"Don't count on it.", "My reply is no.", "My sources say no.", "Outlook not so good.",
"Very doubtful.")
if question.endswith("?"):
if question.strip() == "?":
prediction = "That's not a question, that's a question sign..."
elif "love" in question.lower(): # :tr:
prediction = random.choice(ball_predicts[-5:])
else:
prediction = random.choice(ball_predicts)
else:
prediction = "That's not a question..."
await ctx.send(f'Question: {question}\nThe ***:8ball:BALL*** says: {prediction}')
"""
@commands.command()
async def color(self, ctx, hex_code):
if not hex_code.startswith("#"):
hex_code = f"#{hex_code}"
try:
int(hex_code[1:], 16)
except ValueError:
await ctx.send(f"Error: `{hex_code}` is not a valid Hex Color")
return
if len(hex_code) > 7:
await ctx.send(f"Error: `{hex_code}` is not a valid Hex Color")
return
embed = botutilities.embed_template(ctx, footer=hex_code)
await ctx.send()
"""
@commands.check(botutilities.is_not_report_banned)
@commands.command(aliases=('bugreport', 'reportbug', 'bug-report', 'report-bug'))
async def report(self, ctx, *, message):
report_channel = self.bot.get_channel(920770517424816179)
if len(ctx.message.attachments) > 0:
attachments = []
for attachment in ctx.message.attachments:
spoiler = attachment.is_spoiler()
attachments.append(await attachment.to_file(spoiler=spoiler))
else:
attachments = None
embed = botutilities.embed_template(ctx, title=f"{ctx.author.name}#{ctx.author.discriminator}",
description=f">>> {message}", footer=f"User ID: {ctx.author.id}",
add_def_footer=False, icon=ctx.author.display_avatar.url)
await report_channel.send(
f'{botutilities.ping_all_bot_owners()}\nReported from "{ctx.guild.name}" ({ctx.guild.id}):', embed=embed)
await report_channel.send(files=attachments)
await ctx.send(f"Bug Report sent successfully")
@commands.command()
async def choose(self, ctx, *, options):
divided_options: list = options.split(",")
if len(divided_options) >= 2:
for option in divided_options:
if not option:
divided_options.remove(option)
await ctx.send(f"Gøldbot chooses: `{random.choice(divided_options).strip()}`.")
else:
await ctx.send(
f"I can't just choose between {len(divided_options)} choice. *(to divide the choices you should put a comma between them)*.")
@commands.command(aliases=("coinflip", "flipcoin"))
async def flip(self, ctx):
await ctx.send(f"-Flip!-\nIt landed on {random.choice(('heads', 'tails'))}!")
@commands.command(aliases=("rolldice", "diceroll", "dice"))
async def roll(self, ctx, faces=6.0):
if type(faces) is float and faces != int(faces):
await ctx.send(
f"Error: You can't roll a die with a non-whole amout of faces, you {faces}-dimensional being!")
return
if faces > 2:
try:
faces = int(faces)
except ValueError:
await ctx.send("Error: You can't roll a die with a non-numeric amount of faces...")
result = random.randint(1, faces)
print(result)
if faces <= 6:
result = discord.utils.get(self.my_guild.emojis, name=f"Dice{result}")
await ctx.send(f"Rolled a d{faces}.\nIt landed on **{result}**!")
elif faces == 2:
await ctx.send(f"... A 2 sided die is a coin... Use the `{ctx.prefix}flip` command.")
elif faces <= 1:
await ctx.send("... You serious?")
@roll.error
async def roll_error(self, ctx, error):
if isinstance(error, commands.BadArgument):
await ctx.send("Error: You can't roll a die with a non-numeric amount of faces...")
@commands.command()
async def say(self, ctx, message, channel=None):
if len(message.strip()) == 0:
await ctx.send("Error: You can't send an empty message.")
return
if channel is None:
channel = ctx.channel
else:
if channel.startswith("<#"):
channel = self.bot.get_channel(int(channel.strip("<>")[1:]))
else:
channel = discord.utils.get(ctx.guild.text_channels, name=channel)
if channel is None:
await ctx.send("Error: Channel doesn't exist.")
return
if not channel.permissions_for(ctx.author).send_messages:
await ctx.send(f"Error: You don't have permissions to talk in {channel.mention}")
return
if channel.guild.id != ctx.guild.id:
await ctx.send(f"Error: {channel.mention} is not in {ctx.guild.name}")
return
if message.lower().startswith("i am") or message.lower().startswith("i'm"):
if "stupid" in message.lower():
message = f"{ctx.author.mention} is stupid."
elif "dumb" in message.lower():
message = f"{ctx.author.mention} is dumb."
await discord.Message.delete(ctx.message, delay=0)
await channel.send(message)
try:
os.remove(".google-cookie")
except FileNotFoundError:
pass
@commands.command(aliases=('definition',))
async def dictionary(self, ctx, *, query):
message = await ctx.send("Getting definition...")
results = get_definition(query)
if results != "404 Error":
lex_entries = results["lexicalEntries"]
entries = [lex_entry["entries"] for lex_entry in lex_entries]
entries = [x for y in entries for x in y]
senses = [entry["senses"] for entry in entries]
senses = [x for y in senses for x in y]
definitions = [definition['definitions'][0] for definition in senses]
emb = botutilities.embed_template(ctx, title=f'Definition of "{query.title()}":',
description=f"{definitions[0].capitalize()}")
else:
emb = botutilities.embed_template(ctx, title="Error:",
description=f'Definition for "{query.title()}" not found.')
await message.edit(content="", embed=emb)
@commands.command(aliases=("googleit", "googlesearch", "search"))
async def google(self, ctx, *, search_request):
message = await ctx.send(f"Searching for `{search_request}`...")
async with ctx.typing():
i = 1
output_str = ""
for url in googlesearch.search(search_request, stop=10):
if i < 10:
output_str += f"`{i}.` **[{discord.utils.escape_markdown(url.title)}](<{url.link}>)**\n"
else:
output_str += f"`{i}.` **[{discord.utils.escape_markdown(url.title)}](<{url.link}>)**\n"
i += 1
if i == 1:
output_str = "**No results found.**"
embed = botutilities.embed_template(ctx, "Google", output_str[0:-1],
icon="https://upload.wikimedia.org/wikipedia/commons/thumb/5/53/Google_%22G%22_Logo.svg/1200px-Google_%22G%22_Logo.svg.png")
return await message.edit(content=None, embed=embed)
@commands.command(aliases=("detect", "language"))
async def lang_detect(self, ctx, *, user_message):
detected_lang = self.translator.detect(user_message).lang
if isinstance(detected_lang, list):
detected_lang = detected_lang[self.translator.detect(user_message).confidence.index(
max(self.translator.detect(user_message).confidence))]
await ctx.send(
f'"{user_message}" is in {languages.get(alpha2=detected_lang).name} (Certainty: `{int(max(self.translator.detect(user_message).confidence) * 100)}%`).')
@commands.command(name="morse", aliases=("morsecode",))
async def morse_code(self, ctx, encrypt_decrypt, *, sentence):
disc = encrypt_decrypt
var = self.morse.check_letter(sentence.upper())
if not var:
await ctx.send("Error: Invalid character detected.")
return
code = f"{sentence} "
error_message = f"Error: You tried to {disc} an already {disc}ed message or you entered an invalid character."
if disc == "encrypt":
try:
code = code[0:-1]
output = self.morse.encrypt(code.upper())
except KeyError:
output = error_message
except Exception as e:
print(e)
return
elif disc == "decrypt":
code = code.replace('_', '-')
try:
output = self.morse.decrypt(code).lower()
except ValueError:
output = error_message
else:
output = "Error: Invalid discriminator."
await ctx.send(output.capitalize())
@commands.command()
async def ping(self, ctx):
await ctx.send(f':ping_pong: Pong! {self.bot.latency * 1000:.0f}ms.')
@commands.command()
async def translate(self, ctx, translate_message, destination_language='en', source_language=None):
destination_language = destination_language.lower()
destination_language = get_dict_key(self.lang_dict, destination_language)
if source_language is not None:
source_language = source_language.lower()
source_language = get_dict_key(self.lang_dict, source_language)
else:
source_language = self.translator.detect(translate_message).lang
if isinstance(source_language, list):
source_language = source_language[0]
try:
translated_text = self.translator.translate(translate_message, src=source_language,
dest=destination_language).text.replace("`", "\`")
await ctx.send(
f'Translated from {self.lang_dict[source_language].capitalize()} to {self.lang_dict[destination_language].capitalize()}\n`{translated_text.capitalize()}`.')
except ValueError:
await ctx.send(f"Error: Invalid language.")
@commands.command()
async def wikipedia(self, ctx, *, search_request):
message = await ctx.send(f"Searching for {search_request}")
async with ctx.typing():
title = "Wikipedia"
description = ""
image = "https://i.imgur.com/7kT1Ydo.png"
try:
result = wikipedia.page(search_request)
# update: didn't go that bad, but it wasn't "well lol"
description = f"[{result.title}]({result.url})\n{result.summary[:300].strip()}..."
try:
image = result.images[0]
except IndexError:
pass
except wikipedia.exceptions.DisambiguationError as e:
i = 1
for option in e.options[:9]:
i += 1
disamb_result = wikipedia.page(option, auto_suggest=False)
if disamb_result.url != "":
result_2 = f"[{disamb_result.title}]({disamb_result.url})"
else:
result_2 = f"{disamb_result} **URL Not Found**"
description += f"`{i}`: {result_2}\n"
except wikipedia.exceptions.PageError:
description = "Page not found."
embed = botutilities.embed_template(ctx, title, description, image=image,
icon="https://i.imgur.com/FD1pauH.png")
await message.edit(content=None, embed=embed)
@commands.has_permissions(manage_messages=True)
@commands.command()
async def clear(self, ctx, amount: int):
await ctx.message.delete()
deleted_messages = await ctx.channel.purge(limit=amount)
clear_message = await ctx.send(f'Cleared {len(deleted_messages)} messages.')
await asyncio.sleep(2)
await clear_message.delete()
@commands.has_permissions(ban_members=True)
@commands.command()
async def ban(self, ctx, member: discord.Member, *, reason=None):
if not member.guild_permissions.administrator:
await member.ban(reason=reason)
await ctx.send(f'{member} banned via `{ctx.prefix}ban` command. Reason: {reason}.')
else:
await ctx.send(f"Error: {member} is an admin and can't be banned by Goldbot.")
@commands.has_permissions(kick_members=True)
@commands.command()
async def kick(self, ctx, member: discord.Member, *, reason=None):
await member.kick(reason=reason)
await ctx.send(f'{member} kicked via `{ctx.prefix}kick` command. Reason: {reason}.')
@commands.has_permissions(manage_roles=True)
@commands.command()
async def mute(self, ctx, member: discord.Member, time="1m", *, reason=None):
return
@commands.has_permissions(manage_messages=True)
@commands.command()
async def pin(self, ctx):
if ctx.message.reference:
await ctx.message.reference.resolved.pin()
else:
messages = await ctx.history(limit=2).flatten()
messages.remove(ctx.message)
await messages[0].pin()
@commands.command()
async def invite(self, ctx):
await ctx.send(
"Here's the invite link for Goldbot:\nhttps://discord.com/api/oauth2/authorize?client_id=573680244213678081&permissions=8&scope=bot")
@commands.command()
async def binary(self, ctx, encode_decode: str, *, sentence):
if encode_decode.lower() == "encode":
s = ''.join(format(ord(i), '08b') for x, i in enumerate(sentence))
bin_list = [s[i:i + 8] for i in range(0, len(s), 8)]
output = ''
for _bin in bin_list:
output += f'{_bin} '
output = output[:-1]
await ctx.send(f"Here\'s your encoded string: \n`{output}`")
elif encode_decode.lower() == 'decode':
for char in sentence:
if char not in ['0', '1', ' ']:
await ctx.send("Please only use 1s and 0s.")
return
try:
int(sentence)
except ValueError:
bin_list = sentence.split()
else:
bin_list = [sentence[i:i + 8] for i in range(0, len(sentence), 8)]
output = ''
for _bin in bin_list:
output += chr(int(_bin, 2))
await ctx.send(f"Here\'s your decoded binary code: \n`{output}`")
return
else:
await ctx.send('ERROR: Invalid discriminator.')
return
@commands.has_permissions(ban_members=True)
@commands.command()
async def unban(self, ctx, *, member):
banned_users = await ctx.guild.bans()
member_name, member_discriminator = member.split('#')
for ban_entry in banned_users:
user = ban_entry.user
if (user.name, user.discriminator) == (member_name, member_discriminator):
await ctx.guild.unban(user)
await ctx.send(f'Unbanned {user.mention}.')
return
await ctx.send(f'{user.mention} is not banned.')
@commands.has_permissions(manage_nicknames=True)
@commands.command(aliases=("rename",))
async def nickname(self, ctx, *, nickname):
if len(nickname) > 32:
await ctx.send(f'Error: "{nickname}" has more than 32 characters and therefore can\'t fit as my nickname.')
return
await ctx.guild.me.edit(nick=nickname)
await ctx.send(f'Successfully changed my nickname to "{nickname}".')
@commands.command()
async def prefix(self, ctx, new_prefix=None):
if new_prefix is None:
await ctx.send(f"Server's prefix currently set to `{ctx.prefix}`.")
else:
if ctx.author.guild_permissions.administrator:
if new_prefix.lower() == "reset":
botutilities.parser.remove(str(ctx.guild.id))
await ctx.send(f"Prefix reset back to `{botutilities.parser.default}`!")
else:
botutilities.parser.update(str(ctx.guild.id), new_prefix)
await ctx.send(f"Prefix changed to `{new_prefix}`!")
else:
raise commands.MissingPermissions(missing_permissions=['administrator'])
@commands.command(name="help")
async def _help(self, ctx, command=None):
footer = ""
mod_commands = ("ban", "clear", "kick", "pin", "unban")
if command is None:
title = "Commands"
with open("help_texts/general_help.txt", "r", encoding='utf-8') as file:
help_text = file.read()
if ctx.author.guild_permissions.administrator:
with open("help_texts/mod_help.txt", "r", encoding='utf-8') as file:
help_text += file.read()
footer = "\n<>=Necessary, []=optional."
else:
command = command.lower()
if command in mod_commands:
if ctx.author.guild_permissions.administrator:
title = command.capitalize()
with open(f"help_texts/specific_help/{command}.txt", encoding='utf-8') as file:
help_text = file.read()
footer = "\n<>=Necessary, []=optional."
else:
title = "Error!"
help_text = f"You don't have permissions to use `{command}`"
else:
try:
title = command.capitalize()
with open(f"help_texts/specific_help/{command}.txt", encoding='utf-8') as file:
help_text = file.read()
footer = "\n<>=Necessary, []=optional."
except FileNotFoundError:
title = "Error!"
help_text = "Command not found."
embed = botutilities.embed_template(ctx, title, help_text.format(prefix=ctx.prefix), footer,
add_def_footer=True)
await ctx.send(embed=embed)
@commands.check(botutilities.is_bot_owner)
@commands.command()
async def test(self, ctx):
print("TEST")
embed = discord.Embed(title="Title", description=f"[Test Link](https://www.youtube.com)",
color=random.randint(0, | |
<reponame>v0rts/crossfeed
import traceback
try:
from source import (
alerts,
list_organizations,
alias_organization,
mentions,
root_domains,
creds,
top_cves,
)
from redact import redact_pii
import psycopg2
import psycopg2.extras as extras
import os
import pandas as pd
import datetime
from datetime import date, timedelta
import requests
except:
print(traceback.format_exc())
DB_HOST = os.environ.get("DB_HOST")
PE_DB_NAME = os.environ.get("PE_DB_NAME")
PE_DB_USERNAME = os.environ.get("PE_DB_USERNAME")
PE_DB_PASSWORD = os.environ.get("PE_DB_PASSWORD")
org_id = os.environ.get("org_id")
org_name = os.environ.get("org_name")
# get todays date formatted YYYY-MM-DD and the startdate 16 days prior
today = date.today()
days_back = timedelta(days=40)
start_date = str(today - days_back)
end_date = str(today)
date_span = f"[{start_date} TO {end_date}]"
to_date = datetime.datetime.now()
back = timedelta(days=16)
from_date = (to_date - back).strftime("%Y-%m-%d %H:%M:%S")
to_date = to_date.strftime("%Y-%m-%d %H:%M:%S")
def cve(cveid):
"""Get CVE data."""
url = f"https://cve.circl.lu/api/cve/{cveid}"
resp = requests.get(url).json()
return resp
def getDataSource(conn, source):
cur = conn.cursor()
sql = """SELECT * FROM data_source WHERE name=%s"""
cur.execute(sql, (source,))
source = cur.fetchone()
cur.close()
return source
"""Connect to PE Database"""
try:
PE_conn = psycopg2.connect(
host=DB_HOST,
database=PE_DB_NAME,
user=PE_DB_USERNAME,
password=<PASSWORD>,
)
print("Connected to PE database.")
except:
print("Failed connecting to PE database.")
"""Get the Cybersixgill data source uid"""
try:
source_uid = getDataSource(PE_conn, "Cybersixgill")
print("Success fetching the data source")
except:
print("Failed fetching the data source.")
"""Select organization from PE Database"""
try:
print(f"Running on organization: {org_name}")
cur = PE_conn.cursor()
sql = """SELECT * FROM organizations WHERE name=%s"""
cur.execute(sql, (org_name,))
pe_org_uid = cur.fetchone()
cur.close()
print(f"PE_org_uid: {pe_org_uid}")
except:
print("Failed with Select Statement")
print(traceback.format_exc())
"""Fetch associated CyberSixGill org id"""
try:
sixgill_orgs_df = list_organizations()
for index, row in sixgill_orgs_df.iterrows():
if pe_org_uid[2] == row["name"]:
sixgill_org_id = row["organization_id"]
if not sixgill_org_id:
raise Exception("Sixgill cannot match an org_id")
print(f"Sixgill_org_uid: {sixgill_org_id}")
except:
print("Failed fetching the CyberSixGill Org Id.")
print(traceback.format_exc())
"""Fetch Aliases from Cybersix"""
try:
aliases = alias_organization(sixgill_org_id)
except:
print("Failed fetching Cybersixgill aliases.")
print(traceback.format_exc())
"""Insert/Update Aliases into PE database instance"""
try:
# aliases_list = json.loads(aliases.replace("'", '"'))
alias_df = pd.DataFrame(aliases, columns=["alias"])
alias_df["organizations_uid"] = pe_org_uid[0]
table = "alias"
# Create a list of tupples from the dataframe values
tuples = [tuple(x) for x in alias_df.to_numpy()]
# Comma-separated dataframe columns
cols = ",".join(list(alias_df.columns))
assert table in ["alias"]
assert [c in ["alias", "organizations_uid"] for c in cols]
# SQL quert to execute
query = """INSERT INTO {}({}) VALUES %s
ON CONFLICT (alias) DO NOTHING;"""
cursor = PE_conn.cursor()
try:
extras.execute_values(
cursor,
query.format(
table,
cols,
),
tuples,
)
PE_conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
print("Error: %s" % error)
print(traceback.format_exc())
PE_conn.rollback()
cursor.close()
print("Successfully inserted/updated alias data into PE database.")
cursor.close()
except:
print("Failed inserting/updating alias data.")
print(traceback.format_exc())
"""Fetch Alert Data"""
try:
alerts_df = alerts(sixgill_org_id)
# add associated pe org_id
alerts_df["organizations_uid"] = pe_org_uid[0]
# rename columns
alerts_df = alerts_df.rename(columns={"id": "sixgill_id"})
except:
print("Failed fetching Alert data.")
print(traceback.format_exc())
""" Run redact script on Alert content and title to remove PII"""
try:
alerts_df = redact_pii(alerts_df, ["content", "title"])
print("Success redacting PII")
except:
print("Something failed with the redact.")
print(traceback.format_exc())
"""Insert Alert data into PE database instance."""
try:
alerts_df = alerts_df.drop(
columns=["alert_type_id", "sub_alerts", "langcode", "matched_assets"],
errors="ignore",
)
alerts_df["data_source_uid"] = source_uid[0]
table = "alerts"
# Create a list of tupples from the dataframe values
tuples = [tuple(x) for x in alerts_df.to_numpy()]
# Comma-separated dataframe columns
cols = ",".join(list(alerts_df.columns))
assert table in ["alerts"]
assert [
c
in [
"alert_name",
"content",
"date",
"sixgill_id",
"read",
"severity",
"site",
"threat_level",
"threats",
"title",
"user_id",
"category",
"lang",
"organizations_uid",
"data_source_uid",
]
for c in cols
]
# SQL quert to execute
query = """INSERT INTO {}({}) VALUES %s
ON CONFLICT (sixgill_id) DO NOTHING;"""
cursor = PE_conn.cursor()
try:
extras.execute_values(
cursor,
query.format(
table,
cols,
),
tuples,
)
PE_conn.commit()
print("Successfully inserted/updated alert data into PE database.")
except (Exception, psycopg2.DatabaseError) as error:
print("Error: %s" % error)
print(traceback.format_exc())
PE_conn.rollback()
cursor.close()
cursor.close()
except:
print("Failed inserting alert data into PE database.")
print(traceback.format_exc())
"""Fetch Mention Data"""
try:
# call mentions function
mentions_df = mentions(date_span, aliases)
# rename columns
mentions_df = mentions_df.rename(columns={"id": "sixgill_mention_id"})
# drop unneeded columns (errors = "ignore" adds drop "if exists" functionality)
try:
mentions_df = mentions_df[
[
"category",
"collection_date",
"content",
"creator",
"date",
"sixgill_mention_id",
"lang",
"post_id",
"rep_grade",
"site",
"site_grade",
"sub_category",
"title",
"type",
"url",
"comments_count",
"tags",
]
]
except:
try:
mentions_df = mentions_df[
[
"category",
"collection_date",
"content",
"creator",
"date",
"sixgill_mention_id",
"lang",
"post_id",
"rep_grade",
"site",
"site_grade",
"sub_category",
"title",
"type",
"url",
"comments_count",
]
]
except:
mentions_df = mentions_df[
[
"category",
"collection_date",
"content",
"creator",
"date",
"sixgill_mention_id",
"lang",
"post_id",
"rep_grade",
"site",
"site_grade",
"title",
"type",
"url",
]
]
# add associated pe org_id
mentions_df["organizations_uid"] = pe_org_uid[0]
except:
print("Failed fetching mention data.")
print(traceback.format_exc())
""" Run redact script on Mention content and title to remove PII"""
try:
# Make sure both columns are strings
mentions_df.loc[:, "title"] = str(mentions_df["title"])
mentions_df.loc[:, "content"] = str(mentions_df["content"])
# Run redact script
mentions_df = redact_pii(mentions_df, ["content", "title"])
print("Success redacting PII")
except:
print("Something failed with the redact.")
print(traceback.format_exc())
"""Insert mention data into PE database instance."""
try:
mentions_df = mentions_df.apply(
lambda col: col.str.replace(r"[\x00|NULL]", "") if col.dtype == object else col
)
mentions_df["data_source_uid"] = source_uid[0]
table = "mentions"
# Create a list of tupples from the dataframe values
tuples = [tuple(x) for x in mentions_df.to_numpy()]
# Comma-separated dataframe columns
cols = ",".join(list(mentions_df.columns))
assert table in ["mentions"]
assert [
c
in [
"category",
"collection_date",
"content",
"creator",
"date",
"sixgill_mention_id",
"lang",
"post_id",
"rep_grade",
"site",
"site_grade",
"sub_category",
"title",
"type",
"url",
"comments_count",
"tags",
"organizations_uid",
"data_source_uid",
]
for c in cols
]
# SQL quert to execute
query = """INSERT INTO {}({}) VALUES %s
ON CONFLICT (sixgill_mention_id) DO NOTHING;"""
cursor = PE_conn.cursor()
try:
extras.execute_values(
cursor,
query.format(
table,
cols,
),
tuples,
)
PE_conn.commit()
print("Successfully inserted/updated mention data into PE database.")
except (Exception, psycopg2.DatabaseError) as error:
print("Error: %s" % error)
print(traceback.format_exc())
PE_conn.rollback()
cursor.close()
cursor.close()
except:
print("Failed inserting mention data into PE database.")
print(traceback.format_exc())
"""Fetch Top CVE data"""
try:
top_cve_df = top_cves(10)
top_cve_df["date"] = end_date
top_cve_df["nvd_base_score"] = top_cve_df["nvd_base_score"].astype("str")
# Get CVE description from circl.lu
top_cve_df["summary"] = ""
for index, row in top_cve_df.iterrows():
try:
resp = cve(row["cve_id"])
summary = resp["summary"]
except:
summary = ""
top_cve_df.at[index, "summary"] = summary
print("Successfully fetched top cve data.")
except:
print("Failed fetching top cve data.")
print(traceback.format_exc())
"""Insert Top CVE Data into PE database"""
try:
top_cve_df["data_source_uid"] = source_uid[0]
table = "top_cves"
# Create a list of tupples from the dataframe values
tuples = [tuple(x) for x in top_cve_df.to_numpy()]
# Comma-separated dataframe columns
cols = ",".join(list(top_cve_df.columns))
assert table in ["top_cves"]
assert [
c
in [
"cve_id",
"dynamic_rating",
"nvd_base_score",
"date",
"summary",
"data_source_uid",
]
for c in cols
]
# SQL query to execute
query = """INSERT INTO {}({}) VALUES %s
ON CONFLICT (cve_id, date) DO NOTHING;"""
cursor = PE_conn.cursor()
try:
extras.execute_values(
cursor,
query.format(
table,
cols,
),
tuples,
)
PE_conn.commit()
print("Successfully inserted/updated top cve data into PE database.")
except (Exception, psycopg2.DatabaseError) as error:
print("Error: %s" % error)
print(traceback.format_exc())
PE_conn.rollback()
cursor.close()
cursor.close()
except:
print("Failed inserting top cve data into PE database.")
print(traceback.format_exc())
"""Fetch root domains for Credential function"""
try:
root_domains = root_domains(sixgill_org_id)
# root_domains = json.loads(root_domains.replace("'", '"'))
except:
print("Failed fetching root domain data.")
print(traceback.format_exc())
"""Fetch Credential Data"""
try:
creds_df = creds(root_domains, from_date, to_date)
creds_df["organizations_uid"] = pe_org_uid[0]
creds_df["data_source_uid"] = source_uid[0]
print("Successfully fetched credential data.")
except:
print("Failed fetching credential data.")
print(traceback.format_exc())
if not creds_df.empty:
"""Split credential data into breach and credential tables"""
try:
# Change empty and ambiguous breach names
creds_df.loc[
creds_df["breach_name"] == "", "breach_name"
] = "Cybersixgill_" + creds_df["breach_id"].astype(str)
creds_df.loc[
creds_df["breach_name"] == "Automatic leaked credentials detection",
"breach_name",
] = "Cybersixgill_" + creds_df["breach_id"].astype(str)
creds_breach_df = creds_df[
["breach_name", "description", "breach_date", "password", "data_source_uid"]
].reset_index()
# Create password_included column
creds_breach_df["password_included"] = creds_breach_df["password"] != ""
# Group breaches and count the number of credentials
count_creds = creds_breach_df.groupby(
[
"breach_name",
"description",
"breach_date",
"password_included",
"data_source_uid",
]
).size()
creds_breach_df = count_creds.to_frame(name="exposed_cred_count").reset_index()
creds_breach_df["modified_date"] = creds_breach_df["breach_date"]
except:
print("Failed splitting credential data.")
print(traceback.format_exc())
# Insert breach data into the PE database
try:
table = "credential_breaches"
# Create a list of tuples from the dataframe values
tuples = [tuple(x) for x in creds_breach_df.to_numpy()]
# Comma-separated dataframe columns
cols = ",".join(list(creds_breach_df.columns))
assert table in ["credential_breaches"]
assert [
c
in [
"breach_name",
"description",
"breach_date",
"password_included",
"data_source_uid",
"exposed_cred_count",
"modified_date",
]
for c in cols
]
# SQL query to execute
query = """INSERT INTO {}({}) VALUES %s
ON CONFLICT (breach_name) DO UPDATE SET
exposed_cred_count = EXCLUDED.exposed_cred_count,
password_included = EXCLUDED.password_included;"""
cursor = PE_conn.cursor()
try:
extras.execute_values(
cursor,
query.format(
table,
cols,
),
tuples,
)
PE_conn.commit()
print("Successfully inserted/updated breaches into PE database.")
except (Exception, psycopg2.DatabaseError) as error:
print(error)
PE_conn.rollback()
cursor.close()
except Exception as e:
print(f"Failed inserting breaches for {org_id}")
print(e)
# Get breach uids and match to credentials
try:
cur = PE_conn.cursor()
sql = """SELECT breach_name, credential_breaches_uid FROM credential_breaches"""
cur.execute(sql)
pe_orgs = cur.fetchall()
cur.close()
except (Exception, | |
SQLParser#simpleExprWindowingFunction.
def exitSimpleExprWindowingFunction(self, ctx:SQLParser.SimpleExprWindowingFunctionContext):
pass
# Enter a parse tree produced by SQLParser#simpleExprBinary.
def enterSimpleExprBinary(self, ctx:SQLParser.SimpleExprBinaryContext):
pass
# Exit a parse tree produced by SQLParser#simpleExprBinary.
def exitSimpleExprBinary(self, ctx:SQLParser.SimpleExprBinaryContext):
pass
# Enter a parse tree produced by SQLParser#simpleExprColumnRef.
def enterSimpleExprColumnRef(self, ctx:SQLParser.SimpleExprColumnRefContext):
pass
# Exit a parse tree produced by SQLParser#simpleExprColumnRef.
def exitSimpleExprColumnRef(self, ctx:SQLParser.SimpleExprColumnRefContext):
pass
# Enter a parse tree produced by SQLParser#simpleExprParamMarker.
def enterSimpleExprParamMarker(self, ctx:SQLParser.SimpleExprParamMarkerContext):
pass
# Exit a parse tree produced by SQLParser#simpleExprParamMarker.
def exitSimpleExprParamMarker(self, ctx:SQLParser.SimpleExprParamMarkerContext):
pass
# Enter a parse tree produced by SQLParser#simpleExprSum.
def enterSimpleExprSum(self, ctx:SQLParser.SimpleExprSumContext):
pass
# Exit a parse tree produced by SQLParser#simpleExprSum.
def exitSimpleExprSum(self, ctx:SQLParser.SimpleExprSumContext):
pass
# Enter a parse tree produced by SQLParser#simpleExprConvertUsing.
def enterSimpleExprConvertUsing(self, ctx:SQLParser.SimpleExprConvertUsingContext):
pass
# Exit a parse tree produced by SQLParser#simpleExprConvertUsing.
def exitSimpleExprConvertUsing(self, ctx:SQLParser.SimpleExprConvertUsingContext):
pass
# Enter a parse tree produced by SQLParser#simpleExprSubQuery.
def enterSimpleExprSubQuery(self, ctx:SQLParser.SimpleExprSubQueryContext):
pass
# Exit a parse tree produced by SQLParser#simpleExprSubQuery.
def exitSimpleExprSubQuery(self, ctx:SQLParser.SimpleExprSubQueryContext):
pass
# Enter a parse tree produced by SQLParser#simpleExprGroupingOperation.
def enterSimpleExprGroupingOperation(self, ctx:SQLParser.SimpleExprGroupingOperationContext):
pass
# Exit a parse tree produced by SQLParser#simpleExprGroupingOperation.
def exitSimpleExprGroupingOperation(self, ctx:SQLParser.SimpleExprGroupingOperationContext):
pass
# Enter a parse tree produced by SQLParser#simpleExprNot.
def enterSimpleExprNot(self, ctx:SQLParser.SimpleExprNotContext):
pass
# Exit a parse tree produced by SQLParser#simpleExprNot.
def exitSimpleExprNot(self, ctx:SQLParser.SimpleExprNotContext):
pass
# Enter a parse tree produced by SQLParser#simpleExprValues.
def enterSimpleExprValues(self, ctx:SQLParser.SimpleExprValuesContext):
pass
# Exit a parse tree produced by SQLParser#simpleExprValues.
def exitSimpleExprValues(self, ctx:SQLParser.SimpleExprValuesContext):
pass
# Enter a parse tree produced by SQLParser#simpleExprDefault.
def enterSimpleExprDefault(self, ctx:SQLParser.SimpleExprDefaultContext):
pass
# Exit a parse tree produced by SQLParser#simpleExprDefault.
def exitSimpleExprDefault(self, ctx:SQLParser.SimpleExprDefaultContext):
pass
# Enter a parse tree produced by SQLParser#simpleExprList.
def enterSimpleExprList(self, ctx:SQLParser.SimpleExprListContext):
pass
# Exit a parse tree produced by SQLParser#simpleExprList.
def exitSimpleExprList(self, ctx:SQLParser.SimpleExprListContext):
pass
# Enter a parse tree produced by SQLParser#simpleExprInterval.
def enterSimpleExprInterval(self, ctx:SQLParser.SimpleExprIntervalContext):
pass
# Exit a parse tree produced by SQLParser#simpleExprInterval.
def exitSimpleExprInterval(self, ctx:SQLParser.SimpleExprIntervalContext):
pass
# Enter a parse tree produced by SQLParser#simpleExprCase.
def enterSimpleExprCase(self, ctx:SQLParser.SimpleExprCaseContext):
pass
# Exit a parse tree produced by SQLParser#simpleExprCase.
def exitSimpleExprCase(self, ctx:SQLParser.SimpleExprCaseContext):
pass
# Enter a parse tree produced by SQLParser#simpleExprConcat.
def enterSimpleExprConcat(self, ctx:SQLParser.SimpleExprConcatContext):
pass
# Exit a parse tree produced by SQLParser#simpleExprConcat.
def exitSimpleExprConcat(self, ctx:SQLParser.SimpleExprConcatContext):
pass
# Enter a parse tree produced by SQLParser#simpleExprLiteral.
def enterSimpleExprLiteral(self, ctx:SQLParser.SimpleExprLiteralContext):
pass
# Exit a parse tree produced by SQLParser#simpleExprLiteral.
def exitSimpleExprLiteral(self, ctx:SQLParser.SimpleExprLiteralContext):
pass
# Enter a parse tree produced by SQLParser#arrayCast.
def enterArrayCast(self, ctx:SQLParser.ArrayCastContext):
pass
# Exit a parse tree produced by SQLParser#arrayCast.
def exitArrayCast(self, ctx:SQLParser.ArrayCastContext):
pass
# Enter a parse tree produced by SQLParser#jsonOperator.
def enterJsonOperator(self, ctx:SQLParser.JsonOperatorContext):
pass
# Exit a parse tree produced by SQLParser#jsonOperator.
def exitJsonOperator(self, ctx:SQLParser.JsonOperatorContext):
pass
# Enter a parse tree produced by SQLParser#sumExpr.
def enterSumExpr(self, ctx:SQLParser.SumExprContext):
pass
# Exit a parse tree produced by SQLParser#sumExpr.
def exitSumExpr(self, ctx:SQLParser.SumExprContext):
pass
# Enter a parse tree produced by SQLParser#groupingOperation.
def enterGroupingOperation(self, ctx:SQLParser.GroupingOperationContext):
pass
# Exit a parse tree produced by SQLParser#groupingOperation.
def exitGroupingOperation(self, ctx:SQLParser.GroupingOperationContext):
pass
# Enter a parse tree produced by SQLParser#windowFunctionCall.
def enterWindowFunctionCall(self, ctx:SQLParser.WindowFunctionCallContext):
pass
# Exit a parse tree produced by SQLParser#windowFunctionCall.
def exitWindowFunctionCall(self, ctx:SQLParser.WindowFunctionCallContext):
pass
# Enter a parse tree produced by SQLParser#windowingClause.
def enterWindowingClause(self, ctx:SQLParser.WindowingClauseContext):
pass
# Exit a parse tree produced by SQLParser#windowingClause.
def exitWindowingClause(self, ctx:SQLParser.WindowingClauseContext):
pass
# Enter a parse tree produced by SQLParser#leadLagInfo.
def enterLeadLagInfo(self, ctx:SQLParser.LeadLagInfoContext):
pass
# Exit a parse tree produced by SQLParser#leadLagInfo.
def exitLeadLagInfo(self, ctx:SQLParser.LeadLagInfoContext):
pass
# Enter a parse tree produced by SQLParser#nullTreatment.
def enterNullTreatment(self, ctx:SQLParser.NullTreatmentContext):
pass
# Exit a parse tree produced by SQLParser#nullTreatment.
def exitNullTreatment(self, ctx:SQLParser.NullTreatmentContext):
pass
# Enter a parse tree produced by SQLParser#jsonFunction.
def enterJsonFunction(self, ctx:SQLParser.JsonFunctionContext):
pass
# Exit a parse tree produced by SQLParser#jsonFunction.
def exitJsonFunction(self, ctx:SQLParser.JsonFunctionContext):
pass
# Enter a parse tree produced by SQLParser#inSumExpr.
def enterInSumExpr(self, ctx:SQLParser.InSumExprContext):
pass
# Exit a parse tree produced by SQLParser#inSumExpr.
def exitInSumExpr(self, ctx:SQLParser.InSumExprContext):
pass
# Enter a parse tree produced by SQLParser#identListArg.
def enterIdentListArg(self, ctx:SQLParser.IdentListArgContext):
pass
# Exit a parse tree produced by SQLParser#identListArg.
def exitIdentListArg(self, ctx:SQLParser.IdentListArgContext):
pass
# Enter a parse tree produced by SQLParser#identList.
def enterIdentList(self, ctx:SQLParser.IdentListContext):
pass
# Exit a parse tree produced by SQLParser#identList.
def exitIdentList(self, ctx:SQLParser.IdentListContext):
pass
# Enter a parse tree produced by SQLParser#fulltextOptions.
def enterFulltextOptions(self, ctx:SQLParser.FulltextOptionsContext):
pass
# Exit a parse tree produced by SQLParser#fulltextOptions.
def exitFulltextOptions(self, ctx:SQLParser.FulltextOptionsContext):
pass
# Enter a parse tree produced by SQLParser#runtimeFunctionCall.
def enterRuntimeFunctionCall(self, ctx:SQLParser.RuntimeFunctionCallContext):
pass
# Exit a parse tree produced by SQLParser#runtimeFunctionCall.
def exitRuntimeFunctionCall(self, ctx:SQLParser.RuntimeFunctionCallContext):
pass
# Enter a parse tree produced by SQLParser#geometryFunction.
def enterGeometryFunction(self, ctx:SQLParser.GeometryFunctionContext):
pass
# Exit a parse tree produced by SQLParser#geometryFunction.
def exitGeometryFunction(self, ctx:SQLParser.GeometryFunctionContext):
pass
# Enter a parse tree produced by SQLParser#timeFunctionParameters.
def enterTimeFunctionParameters(self, ctx:SQLParser.TimeFunctionParametersContext):
pass
# Exit a parse tree produced by SQLParser#timeFunctionParameters.
def exitTimeFunctionParameters(self, ctx:SQLParser.TimeFunctionParametersContext):
pass
# Enter a parse tree produced by SQLParser#fractionalPrecision.
def enterFractionalPrecision(self, ctx:SQLParser.FractionalPrecisionContext):
pass
# Exit a parse tree produced by SQLParser#fractionalPrecision.
def exitFractionalPrecision(self, ctx:SQLParser.FractionalPrecisionContext):
pass
# Enter a parse tree produced by SQLParser#weightStringLevels.
def enterWeightStringLevels(self, ctx:SQLParser.WeightStringLevelsContext):
pass
# Exit a parse tree produced by SQLParser#weightStringLevels.
def exitWeightStringLevels(self, ctx:SQLParser.WeightStringLevelsContext):
pass
# Enter a parse tree produced by SQLParser#weightStringLevelListItem.
def enterWeightStringLevelListItem(self, ctx:SQLParser.WeightStringLevelListItemContext):
pass
# Exit a parse tree produced by SQLParser#weightStringLevelListItem.
def exitWeightStringLevelListItem(self, ctx:SQLParser.WeightStringLevelListItemContext):
pass
# Enter a parse tree produced by SQLParser#dateTimeTtype.
def enterDateTimeTtype(self, ctx:SQLParser.DateTimeTtypeContext):
pass
# Exit a parse tree produced by SQLParser#dateTimeTtype.
def exitDateTimeTtype(self, ctx:SQLParser.DateTimeTtypeContext):
pass
# Enter a parse tree produced by SQLParser#trimFunction.
def enterTrimFunction(self, ctx:SQLParser.TrimFunctionContext):
pass
# Exit a parse tree produced by SQLParser#trimFunction.
def exitTrimFunction(self, ctx:SQLParser.TrimFunctionContext):
pass
# Enter a parse tree produced by SQLParser#substringFunction.
def enterSubstringFunction(self, ctx:SQLParser.SubstringFunctionContext):
pass
# Exit a parse tree produced by SQLParser#substringFunction.
def exitSubstringFunction(self, ctx:SQLParser.SubstringFunctionContext):
pass
# Enter a parse tree produced by SQLParser#functionCall.
def enterFunctionCall(self, ctx:SQLParser.FunctionCallContext):
pass
# Exit a parse tree produced by SQLParser#functionCall.
def exitFunctionCall(self, ctx:SQLParser.FunctionCallContext):
pass
# Enter a parse tree produced by SQLParser#udfExprList.
def enterUdfExprList(self, ctx:SQLParser.UdfExprListContext):
pass
# Exit a parse tree produced by SQLParser#udfExprList.
def exitUdfExprList(self, ctx:SQLParser.UdfExprListContext):
pass
# Enter a parse tree produced by SQLParser#udfExpr.
def enterUdfExpr(self, ctx:SQLParser.UdfExprContext):
pass
# Exit a parse tree produced by SQLParser#udfExpr.
def exitUdfExpr(self, ctx:SQLParser.UdfExprContext):
pass
# Enter a parse tree produced by SQLParser#variable.
def enterVariable(self, ctx:SQLParser.VariableContext):
pass
# Exit a parse tree produced by SQLParser#variable.
def exitVariable(self, ctx:SQLParser.VariableContext):
pass
# Enter a parse tree produced by SQLParser#userVariable.
def enterUserVariable(self, ctx:SQLParser.UserVariableContext):
pass
# Exit a parse tree produced by SQLParser#userVariable.
def exitUserVariable(self, ctx:SQLParser.UserVariableContext):
pass
# Enter a parse tree produced by SQLParser#systemVariable.
def enterSystemVariable(self, ctx:SQLParser.SystemVariableContext):
pass
# Exit a parse tree produced by SQLParser#systemVariable.
def exitSystemVariable(self, ctx:SQLParser.SystemVariableContext):
pass
# Enter a parse tree produced by SQLParser#internalVariableName.
def enterInternalVariableName(self, ctx:SQLParser.InternalVariableNameContext):
pass
# Exit a parse tree produced by SQLParser#internalVariableName.
def exitInternalVariableName(self, ctx:SQLParser.InternalVariableNameContext):
pass
# Enter a parse tree produced by SQLParser#whenExpression.
def enterWhenExpression(self, ctx:SQLParser.WhenExpressionContext):
pass
# Exit a parse tree produced by SQLParser#whenExpression.
def exitWhenExpression(self, ctx:SQLParser.WhenExpressionContext):
pass
# Enter a parse tree produced by SQLParser#thenExpression.
def enterThenExpression(self, ctx:SQLParser.ThenExpressionContext):
pass
# Exit a parse tree produced by SQLParser#thenExpression.
def exitThenExpression(self, ctx:SQLParser.ThenExpressionContext):
pass
# Enter a parse tree produced by SQLParser#elseExpression.
def enterElseExpression(self, ctx:SQLParser.ElseExpressionContext):
pass
# Exit a parse tree produced by SQLParser#elseExpression.
def exitElseExpression(self, ctx:SQLParser.ElseExpressionContext):
pass
# Enter a parse tree produced by SQLParser#castType.
def enterCastType(self, ctx:SQLParser.CastTypeContext):
pass
# Exit a parse tree produced by SQLParser#castType.
def exitCastType(self, ctx:SQLParser.CastTypeContext):
pass
# Enter a parse tree produced by SQLParser#exprList.
def enterExprList(self, ctx:SQLParser.ExprListContext):
pass
# Exit a parse tree produced by SQLParser#exprList.
def exitExprList(self, ctx:SQLParser.ExprListContext):
pass
# Enter a parse tree produced by SQLParser#charset.
def enterCharset(self, ctx:SQLParser.CharsetContext):
pass
# Exit a parse tree produced by SQLParser#charset.
def exitCharset(self, ctx:SQLParser.CharsetContext):
pass
# Enter a parse tree produced by SQLParser#notRule.
def enterNotRule(self, ctx:SQLParser.NotRuleContext):
pass
# Exit a parse tree produced by SQLParser#notRule.
def exitNotRule(self, ctx:SQLParser.NotRuleContext):
pass
# Enter a parse tree produced by SQLParser#not2Rule.
def enterNot2Rule(self, ctx:SQLParser.Not2RuleContext):
pass
# Exit a parse tree produced by SQLParser#not2Rule.
def exitNot2Rule(self, ctx:SQLParser.Not2RuleContext):
pass
# Enter a parse tree produced by SQLParser#interval.
def enterInterval(self, ctx:SQLParser.IntervalContext):
pass
# Exit a parse tree produced by SQLParser#interval.
def exitInterval(self, ctx:SQLParser.IntervalContext):
pass
# Enter a | |
def eventFilter(*args, **kwargs):
pass
def horizontalScrollBar(*args, **kwargs):
pass
def horizontalScrollBarPolicy(*args, **kwargs):
pass
def keyPressEvent(*args, **kwargs):
pass
def maximumViewportSize(*args, **kwargs):
pass
def minimumSizeHint(*args, **kwargs):
pass
def mouseDoubleClickEvent(*args, **kwargs):
pass
def mouseMoveEvent(*args, **kwargs):
pass
def mousePressEvent(*args, **kwargs):
pass
def mouseReleaseEvent(*args, **kwargs):
pass
def paintEvent(*args, **kwargs):
pass
def resizeEvent(*args, **kwargs):
pass
def scrollBarWidgets(*args, **kwargs):
pass
def scrollContentsBy(*args, **kwargs):
pass
def setCornerWidget(*args, **kwargs):
pass
def setHorizontalScrollBar(*args, **kwargs):
pass
def setHorizontalScrollBarPolicy(*args, **kwargs):
pass
def setSizeAdjustPolicy(*args, **kwargs):
pass
def setVerticalScrollBar(*args, **kwargs):
pass
def setVerticalScrollBarPolicy(*args, **kwargs):
pass
def setViewport(*args, **kwargs):
pass
def setViewportMargins(*args, **kwargs):
pass
def setupViewport(*args, **kwargs):
pass
def sizeAdjustPolicy(*args, **kwargs):
pass
def sizeHint(*args, **kwargs):
pass
def verticalScrollBar(*args, **kwargs):
pass
def verticalScrollBarPolicy(*args, **kwargs):
pass
def viewport(*args, **kwargs):
pass
def viewportEvent(*args, **kwargs):
pass
def viewportMargins(*args, **kwargs):
pass
def viewportSizeHint(*args, **kwargs):
pass
def wheelEvent(*args, **kwargs):
pass
AdjustIgnored = None
AdjustToContents = None
AdjustToContentsOnFirstShow = None
SizeAdjustPolicy = None
__new__ = None
staticMetaObject = None
class QSplitter(QFrame):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __lshift__(*args, **kwargs):
"""
x.__lshift__(y) <==> x<<y
"""
pass
def __rlshift__(*args, **kwargs):
"""
x.__rlshift__(y) <==> y<<x
"""
pass
def __rrshift__(*args, **kwargs):
"""
x.__rrshift__(y) <==> y>>x
"""
pass
def __rshift__(*args, **kwargs):
"""
x.__rshift__(y) <==> x>>y
"""
pass
def addWidget(*args, **kwargs):
pass
def changeEvent(*args, **kwargs):
pass
def childEvent(*args, **kwargs):
pass
def childrenCollapsible(*args, **kwargs):
pass
def closestLegalPosition(*args, **kwargs):
pass
def count(*args, **kwargs):
pass
def createHandle(*args, **kwargs):
pass
def event(*args, **kwargs):
pass
def getRange(*args, **kwargs):
pass
def handle(*args, **kwargs):
pass
def handleWidth(*args, **kwargs):
pass
def indexOf(*args, **kwargs):
pass
def insertWidget(*args, **kwargs):
pass
def isCollapsible(*args, **kwargs):
pass
def minimumSizeHint(*args, **kwargs):
pass
def moveSplitter(*args, **kwargs):
pass
def opaqueResize(*args, **kwargs):
pass
def orientation(*args, **kwargs):
pass
def refresh(*args, **kwargs):
pass
def resizeEvent(*args, **kwargs):
pass
def restoreState(*args, **kwargs):
pass
def saveState(*args, **kwargs):
pass
def setChildrenCollapsible(*args, **kwargs):
pass
def setCollapsible(*args, **kwargs):
pass
def setHandleWidth(*args, **kwargs):
pass
def setOpaqueResize(*args, **kwargs):
pass
def setOrientation(*args, **kwargs):
pass
def setRubberBand(*args, **kwargs):
pass
def setSizes(*args, **kwargs):
pass
def setStretchFactor(*args, **kwargs):
pass
def sizeHint(*args, **kwargs):
pass
def sizes(*args, **kwargs):
pass
def widget(*args, **kwargs):
pass
__new__ = None
splitterMoved = None
staticMetaObject = None
class QFontComboBox(QComboBox):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def currentFont(*args, **kwargs):
pass
def event(*args, **kwargs):
pass
def fontFilters(*args, **kwargs):
pass
def setCurrentFont(*args, **kwargs):
pass
def setFontFilters(*args, **kwargs):
pass
def setWritingSystem(*args, **kwargs):
pass
def sizeHint(*args, **kwargs):
pass
def writingSystem(*args, **kwargs):
pass
AllFonts = None
FontFilter = None
FontFilters = None
MonospacedFonts = None
NonScalableFonts = None
ProportionalFonts = None
ScalableFonts = None
__new__ = None
currentFontChanged = None
staticMetaObject = None
class QRadioButton(QAbstractButton):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def event(*args, **kwargs):
pass
def hitButton(*args, **kwargs):
pass
def initStyleOption(*args, **kwargs):
pass
def minimumSizeHint(*args, **kwargs):
pass
def mouseMoveEvent(*args, **kwargs):
pass
def paintEvent(*args, **kwargs):
pass
def sizeHint(*args, **kwargs):
pass
__new__ = None
staticMetaObject = None
class QStyleOptionComboBox(QStyleOptionComplex):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
currentIcon = None
currentText = None
editable = None
frame = None
iconSize = None
popupRect = None
StyleOptionType = None
StyleOptionVersion = None
Type = None
Version = None
__new__ = None
class QStyleOptionToolButton(QStyleOptionComplex):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
arrowType = None
features = None
font = None
icon = None
iconSize = None
pos = None
text = None
toolButtonStyle = None
Arrow = None
HasMenu = None
Menu = None
MenuButtonPopup = None
locals()['None'] = None
PopupDelay = None
StyleOptionType = None
StyleOptionVersion = None
ToolButtonFeature = None
ToolButtonFeatures = None
Type = None
Version = None
__new__ = None
class QDateTimeEdit(QAbstractSpinBox):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def calendarPopup(*args, **kwargs):
pass
def calendarWidget(*args, **kwargs):
pass
def clear(*args, **kwargs):
pass
def clearMaximumDate(*args, **kwargs):
pass
def clearMaximumDateTime(*args, **kwargs):
pass
def clearMaximumTime(*args, **kwargs):
pass
def clearMinimumDate(*args, **kwargs):
pass
def clearMinimumDateTime(*args, **kwargs):
pass
def clearMinimumTime(*args, **kwargs):
pass
def currentSection(*args, **kwargs):
pass
def currentSectionIndex(*args, **kwargs):
pass
def date(*args, **kwargs):
pass
def dateTime(*args, **kwargs):
pass
def dateTimeFromText(*args, **kwargs):
pass
def displayFormat(*args, **kwargs):
pass
def displayedSections(*args, **kwargs):
pass
def event(*args, **kwargs):
pass
def fixup(*args, **kwargs):
pass
def focusInEvent(*args, **kwargs):
pass
def focusNextPrevChild(*args, **kwargs):
pass
def initStyleOption(*args, **kwargs):
pass
def keyPressEvent(*args, **kwargs):
pass
def maximumDate(*args, **kwargs):
pass
def maximumDateTime(*args, **kwargs):
pass
def maximumTime(*args, **kwargs):
pass
def minimumDate(*args, **kwargs):
pass
def minimumDateTime(*args, **kwargs):
pass
def minimumTime(*args, **kwargs):
pass
def mousePressEvent(*args, **kwargs):
pass
def paintEvent(*args, **kwargs):
pass
def sectionAt(*args, **kwargs):
pass
def sectionCount(*args, **kwargs):
pass
def sectionText(*args, **kwargs):
pass
def setCalendarPopup(*args, **kwargs):
pass
def setCalendarWidget(*args, **kwargs):
pass
def setCurrentSection(*args, **kwargs):
pass
def setCurrentSectionIndex(*args, **kwargs):
pass
def setDate(*args, **kwargs):
pass
def setDateRange(*args, **kwargs):
pass
def setDateTime(*args, **kwargs):
pass
def setDateTimeRange(*args, **kwargs):
pass
def setDisplayFormat(*args, **kwargs):
pass
def setMaximumDate(*args, **kwargs):
pass
def setMaximumDateTime(*args, **kwargs):
pass
def setMaximumTime(*args, **kwargs):
pass
def setMinimumDate(*args, **kwargs):
pass
def setMinimumDateTime(*args, **kwargs):
pass
def setMinimumTime(*args, **kwargs):
pass
def setSelectedSection(*args, **kwargs):
pass
def setTime(*args, **kwargs):
pass
def setTimeRange(*args, **kwargs):
pass
def setTimeSpec(*args, **kwargs):
pass
def sizeHint(*args, **kwargs):
pass
def stepBy(*args, **kwargs):
pass
def stepEnabled(*args, **kwargs):
pass
def textFromDateTime(*args, **kwargs):
pass
def time(*args, **kwargs):
pass
def timeSpec(*args, **kwargs):
pass
def validate(*args, **kwargs):
pass
def wheelEvent(*args, **kwargs):
pass
AmPmSection = None
DateSections_Mask = None
DaySection = None
HourSection = None
MSecSection = None
MinuteSection = None
MonthSection = None
NoSection = None
SecondSection = None
Section = None
| |
the local basis indices.
This is prerequisite for calculating the tangent vector parameters B,
which optimally approximate the exact time evolution.
These are to be used on one side of the super-operator when applying the
nearest-neighbour Hamiltonian, similarly to C in eqn. (44) of
arXiv:1103.0936v2 [cond-mat.str-el], for the non-norm-preserving case.
Makes use only of the nearest-neighbour Hamiltonian, and of the A's.
C[n] depends on A[n] through A[n + self.ham_sites - 1].
"""
if self.ham is None:
return 0
if n_low < 1:
n_low = 1
if n_high < 1:
n_high = self.N - self.ham_sites + 1
if calc_AA:
for n in range(1, self.N):
self.AA[n] = tm.calc_AA(self.A[n], self.A[n + 1])
if self.ham_sites == 3:
for n in range(1, self.N - 1):
self.AAA[n] = tm.calc_AAA_AA(self.AA[n], self.A[n + 2])
else:
self.AAA.fill(None)
for n in range(n_low, n_high + 1):
if callable(self.ham):
ham_n = lambda *args: self.ham(n, *args)
ham_n = sp.vectorize(ham_n, otypes=[sp.complex128])
ham_n = sp.fromfunction(ham_n, tuple(self.C[n].shape[:-2] * 2))
else:
ham_n = self.ham[n]
if ham_n is None:
self.C[n] = None
else:
if self.ham_sites == 2:
self.C[n] = tm.calc_C_mat_op_AA(ham_n, self.AA[n])
else:
self.C[n] = tm.calc_C_3s_mat_op_AAA(ham_n, self.AAA[n])
def calc_K(self, n_low=-1, n_high=-1):
"""Generates the K matrices used to calculate the B's.
This is called automatically by self.update().
K[n] is contains the action of the Hamiltonian on sites n to N.
K[n] is recursively defined. It depends on C[m] and A[m] for all m >= n.
It directly depends on A[n], A[n + 1], r[n], r[n + 1], C[n] and K[n + 1].
This is equivalent to K on p. 14 of arXiv:1103.0936v2 [cond-mat.str-el], except
that it is for the non-norm-preserving case.
K[1] is, assuming a normalized state, the expectation value H of Ĥ.
"""
if n_low < 1:
n_low = 1
if n_high < 1:
n_high = self.N
for n in reversed(range(n_low, n_high + 1)):
if n <= self.N - self.ham_sites + 1:
if self.C[n] is None:
self.K[n], ex = (tm.eps_r_noop(self.K[n + 1], self.A[n], self.A[n]), 0)
else:
if self.ham_sites == 2:
self.K[n], ex = tm.calc_K(self.K[n + 1], self.C[n], self.l[n - 1],
self.r[n + 1], self.A[n], self.AA[n])
else:
self.K[n], ex = tm.calc_K_3s(self.K[n + 1], self.C[n], self.l[n - 1],
self.r[n + 2], self.A[n], self.AAA[n])
self.h_expect[n] = ex
else:
self.K[n].fill(0)
if n_low == 1:
self.H_expect = sp.asscalar(self.K[1])
def calc_K_l(self, n_low=-1, n_high=-1):
"""Generates the K matrices used to calculate the B's.
For the left gauge-fixing case.
"""
if n_low < 2:
n_low = self.ham_sites
if n_high < 1:
n_high = self.N
self.K[1] = sp.zeros((self.D[1], self.D[1]), dtype=self.typ)
self.K[2] = sp.zeros((self.D[2], self.D[2]), dtype=self.typ)
for n in range(n_low, n_high + 1):
#if n <= self.N - self.ham_sites + 1:
if self.ham_sites == 2:
self.K[n], ex = tm.calc_K_l(self.K[n - 1], self.C[n - 1], self.l[n - 2],
self.r[n], self.A[n], self.AA[n - 1])
else:
self.K[n], ex = tm.calc_K_3s_l(
self.K[n - 1], self.C[n - 2], self.l[n - 3], self.r[n],
self.A[n], self.AAA[n - 2])
self.h_expect[n - 1] = ex
#else:
# self.K[n].fill(0)
if n_high == self.N:
self.H_expect = sp.asscalar(self.K[self.N])
def update(self, restore_CF=True, normalize=True, auto_truncate=False, restore_CF_after_trunc=True):
"""Updates secondary quantities to reflect the state parameters self.A.
Must be used after taking a step or otherwise changing the
parameters self.A before calculating
physical quantities or taking the next step.
Also (optionally) restores the canonical form.
Parameters
----------
restore_CF : bool
Whether to restore canonical form.
normalize : bool
Whether to normalize the state in case restore_CF is False.
auto_truncate : bool
Whether to automatically truncate the bond-dimension if
rank-deficiency is detected. Requires restore_CF.
restore_CF_after_trunc : bool
Whether to restore_CF after truncation.
Returns
-------
truncated : bool (only if auto_truncate == True)
Whether truncation was performed.
"""
trunc = super(EvoMPS_TDVP_Generic, self).update(restore_CF=restore_CF,
normalize=normalize,
auto_truncate=auto_truncate,
restore_CF_after_trunc=restore_CF_after_trunc)
self.calc_C()
if self.gauge_fixing == 'right':
self.calc_K()
else:
self.calc_K_l()
return trunc
def calc_x(self, n, Vsh, sqrt_l, sqrt_r, sqrt_l_inv, sqrt_r_inv):
"""Calculates the matrix x* that results in the TDVP tangent vector B.
This is equivalent to eqn. (49) of arXiv:1103.0936v2 [cond-mat.str-el] except
that, here, norm-preservation is not enforced, such that the optimal
parameter matrices x*_n (for the parametrization of B) are given by the
derivative w.r.t. x_n of <Phi[B, A]|Ĥ|Psi[A]>, rather than
<Phi[B, A]|Ĥ - H|Psi[A]> (with H = <Psi|Ĥ|Psi>).
An additional sum was added for the single-site hamiltonian.
Some multiplications have been pulled outside of the sums for efficiency.
Direct dependencies:
- A[n - 1], A[n], A[n + 1]
- r[n], r[n + 1], l[n - 2], l[n - 1]
- C[n], C[n - 1]
- K[n + 1]
- V[n]
"""
if n > 1:
lm2 = self.l[n - 2]
Cm1 = self.C[n - 1]
Am1 = self.A[n - 1]
else:
lm2 = None
Cm1 = None
Am1 = None
if n > 2:
lm3 = self.l[n - 3]
Cm2 = self.C[n - 2]
Am2Am1 = self.AA[n - 2]
else:
lm3 = None
Cm2 = None
Am2Am1 = None
if n <= self.N - self.ham_sites + 1:
C = self.C[n]
else:
C = None
if n + 1 <= self.N - self.ham_sites + 1:
Kp1 = self.K[n + 1]
else:
Kp1 = None
if n < self.N - 1:
Ap1Ap2 = self.AA[n + 1]
rp2 = self.r[n + 2]
else:
Ap1Ap2 = None
rp2 = None
if n < self.N:
rp1 = self.r[n + 1]
Ap1 = self.A[n + 1]
else:
rp1 = None
Ap1 = None
if self.ham_sites == 2:
x = tm.calc_x(Kp1, C, Cm1, rp1,
lm2, Am1, self.A[n], Ap1,
sqrt_l, sqrt_l_inv, sqrt_r, sqrt_r_inv, Vsh)
else:
x = tm.calc_x_3s(Kp1, C, Cm1, Cm2, rp1, rp2, lm2,
lm3, Am2Am1, Am1, self.A[n], Ap1, Ap1Ap2,
sqrt_l, sqrt_l_inv, sqrt_r, sqrt_r_inv, Vsh)
return x
def calc_x_l(self, n, Vsh, sqrt_l, sqrt_r, sqrt_l_inv, sqrt_r_inv):
if n > 1:
Km1 = self.K[n - 1]
Cm1 = self.C[n - 1]
Am1 = self.A[n - 1]
lm2 = self.l[n - 2]
else:
Km1 = None
Cm1 = None
Am1 = None
lm2 = None
if n > 2:
Cm2 = self.C[n - 2]
lm3 = self.l[n - 3]
AAm2 = self.AA[n - 2]
else:
Cm2 = None
lm3 = None
AAm2 = None
if n < self.N:
Ap1 = self.A[n + 1]
rp1 = self.r[n + 1]
else:
Ap1 = None
rp1 = None
if n < self.N - 1:
rp2 = self.r[n + 2]
AAp1 = self.AA[n + 1]
else:
rp2 = None
AAp1 = None
if n <= self.N - self.ham_sites + 1:
C = self.C[n]
else:
C = None
if self.ham_sites == 2:
x = tm.calc_x_l(Km1, C, Cm1, rp1,
lm2, Am1, self.A[n], Ap1,
sqrt_l, sqrt_l_inv, sqrt_r, sqrt_r_inv, Vsh)
else:
x = tm.calc_x_l_3s(Km1, C, Cm1, Cm2, rp1, rp2, lm2, lm3,
AAm2, Am1, self.A[n], Ap1, AAp1,
sqrt_l, sqrt_l_inv, sqrt_r, sqrt_r_inv, Vsh)
return x
def calc_BB_Y_2s(self, l_s, l_si, r_s, r_si, Vrh, Vlh):
Y = sp.empty((self.N + 1), dtype=sp.ndarray)
etaBB_sq = sp.zeros((self.N + 1), dtype=sp.complex128)
for n in range(1, self.N):
if (not Vrh[n + 1] is None and not Vlh[n] is None):
if self.ham_sites == 2:
Y[n], etaBB_sq[n] = tm.calc_BB_Y_2s(self.C[n], Vlh[n],
Vrh[n + 1], l_s[n - 1], r_s[n + 1])
else:
A_m1 = self.A[n - 1] if n - 1 > 0 else None
A_p2 = self.A[n + 2] if n + 2 <= self.N else None
l_m2 = self.l[n - 2] if n - 2 >= 0 else None
r_p2 = self.r[n + 2] if n + 2 <= self.N else None
Y[n], etaBB_sq[n] = tm.calc_BB_Y_2s_ham_3s(A_m1, A_p2,
self.C[n], self.C[n - 1], Vlh[n],
Vrh[n + 1], l_m2, r_p2, l_s[n - 1],
l_si[n - 1], r_s[n + 1], r_si[n + 1])
return Y, etaBB_sq
def calc_BB_2s(self, Y, Vlh, | |
str
tools : Version
'''
tag_ = tag
tools_ = Version.from_json(tools) if tools else None
# Validate arguments against known Juju API types.
if tag_ is not None and not isinstance(tag_, (bytes, str)):
raise Exception("Expected tag_ to be a str, received: {}".format(type(tag_)))
if tools_ is not None and not isinstance(tools_, (dict, Version)):
raise Exception("Expected tools_ to be a Version, received: {}".format(type(tools_)))
self.tag = tag_
self.tools = tools_
self.unknown_fields = unknown_fields
class EntityWorkloadVersion(Type):
_toSchema = {'tag': 'tag', 'workload_version': 'workload-version'}
_toPy = {'tag': 'tag', 'workload-version': 'workload_version'}
def __init__(self, tag=None, workload_version=None, **unknown_fields):
'''
tag : str
workload_version : str
'''
tag_ = tag
workload_version_ = workload_version
# Validate arguments against known Juju API types.
if tag_ is not None and not isinstance(tag_, (bytes, str)):
raise Exception("Expected tag_ to be a str, received: {}".format(type(tag_)))
if workload_version_ is not None and not isinstance(workload_version_, (bytes, str)):
raise Exception("Expected workload_version_ to be a str, received: {}".format(type(workload_version_)))
self.tag = tag_
self.workload_version = workload_version_
self.unknown_fields = unknown_fields
class EntityWorkloadVersions(Type):
_toSchema = {'entities': 'entities'}
_toPy = {'entities': 'entities'}
def __init__(self, entities=None, **unknown_fields):
'''
entities : typing.Sequence[~EntityWorkloadVersion]
'''
entities_ = [EntityWorkloadVersion.from_json(o) for o in entities or []]
# Validate arguments against known Juju API types.
if entities_ is not None and not isinstance(entities_, (bytes, str, list)):
raise Exception("Expected entities_ to be a Sequence, received: {}".format(type(entities_)))
self.entities = entities_
self.unknown_fields = unknown_fields
class EnvListArgs(Type):
_toSchema = {'patterns': 'patterns'}
_toPy = {'patterns': 'patterns'}
def __init__(self, patterns=None, **unknown_fields):
'''
patterns : typing.Sequence[str]
'''
patterns_ = patterns
# Validate arguments against known Juju API types.
if patterns_ is not None and not isinstance(patterns_, (bytes, str, list)):
raise Exception("Expected patterns_ to be a Sequence, received: {}".format(type(patterns_)))
self.patterns = patterns_
self.unknown_fields = unknown_fields
class EnvListResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~Payload]
'''
results_ = [Payload.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class Error(Type):
_toSchema = {'code': 'code', 'info': 'info', 'message': 'message'}
_toPy = {'code': 'code', 'info': 'info', 'message': 'message'}
def __init__(self, code=None, info=None, message=None, **unknown_fields):
'''
code : str
info : typing.Mapping[str, typing.Any]
message : str
'''
code_ = code
info_ = info
message_ = message
# Validate arguments against known Juju API types.
if code_ is not None and not isinstance(code_, (bytes, str)):
raise Exception("Expected code_ to be a str, received: {}".format(type(code_)))
if info_ is not None and not isinstance(info_, dict):
raise Exception("Expected info_ to be a Mapping, received: {}".format(type(info_)))
if message_ is not None and not isinstance(message_, (bytes, str)):
raise Exception("Expected message_ to be a str, received: {}".format(type(message_)))
self.code = code_
self.info = info_
self.message = message_
self.unknown_fields = unknown_fields
class ErrorInfo(Type):
_toSchema = {'macaroon': 'macaroon', 'macaroon_path': 'macaroon-path'}
_toPy = {'macaroon': 'macaroon', 'macaroon-path': 'macaroon_path'}
def __init__(self, macaroon=None, macaroon_path=None, **unknown_fields):
'''
macaroon : Macaroon
macaroon_path : str
'''
macaroon_ = Macaroon.from_json(macaroon) if macaroon else None
macaroon_path_ = macaroon_path
# Validate arguments against known Juju API types.
if macaroon_ is not None and not isinstance(macaroon_, (dict, Macaroon)):
raise Exception("Expected macaroon_ to be a Macaroon, received: {}".format(type(macaroon_)))
if macaroon_path_ is not None and not isinstance(macaroon_path_, (bytes, str)):
raise Exception("Expected macaroon_path_ to be a str, received: {}".format(type(macaroon_path_)))
self.macaroon = macaroon_
self.macaroon_path = macaroon_path_
self.unknown_fields = unknown_fields
class ErrorResult(Type):
_toSchema = {'error': 'error'}
_toPy = {'error': 'error'}
def __init__(self, error=None, **unknown_fields):
'''
error : Error
'''
error_ = Error.from_json(error) if error else None
# Validate arguments against known Juju API types.
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
self.error = error_
self.unknown_fields = unknown_fields
class ErrorResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~ErrorResult]
'''
results_ = [ErrorResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class ExternalControllerInfo(Type):
_toSchema = {'addrs': 'addrs', 'ca_cert': 'ca-cert', 'controller_alias': 'controller-alias', 'controller_tag': 'controller-tag'}
_toPy = {'addrs': 'addrs', 'ca-cert': 'ca_cert', 'controller-alias': 'controller_alias', 'controller-tag': 'controller_tag'}
def __init__(self, addrs=None, ca_cert=None, controller_alias=None, controller_tag=None, **unknown_fields):
'''
addrs : typing.Sequence[str]
ca_cert : str
controller_alias : str
controller_tag : str
'''
addrs_ = addrs
ca_cert_ = ca_cert
controller_alias_ = controller_alias
controller_tag_ = controller_tag
# Validate arguments against known Juju API types.
if addrs_ is not None and not isinstance(addrs_, (bytes, str, list)):
raise Exception("Expected addrs_ to be a Sequence, received: {}".format(type(addrs_)))
if ca_cert_ is not None and not isinstance(ca_cert_, (bytes, str)):
raise Exception("Expected ca_cert_ to be a str, received: {}".format(type(ca_cert_)))
if controller_alias_ is not None and not isinstance(controller_alias_, (bytes, str)):
raise Exception("Expected controller_alias_ to be a str, received: {}".format(type(controller_alias_)))
if controller_tag_ is not None and not isinstance(controller_tag_, (bytes, str)):
raise Exception("Expected controller_tag_ to be a str, received: {}".format(type(controller_tag_)))
self.addrs = addrs_
self.ca_cert = ca_cert_
self.controller_alias = controller_alias_
self.controller_tag = controller_tag_
self.unknown_fields = unknown_fields
class ExternalControllerInfoResult(Type):
_toSchema = {'error': 'error', 'result': 'result'}
_toPy = {'error': 'error', 'result': 'result'}
def __init__(self, error=None, result=None, **unknown_fields):
'''
error : Error
result : ExternalControllerInfo
'''
error_ = Error.from_json(error) if error else None
result_ = ExternalControllerInfo.from_json(result) if result else None
# Validate arguments against known Juju API types.
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if result_ is not None and not isinstance(result_, (dict, ExternalControllerInfo)):
raise Exception("Expected result_ to be a ExternalControllerInfo, received: {}".format(type(result_)))
self.error = error_
self.result = result_
self.unknown_fields = unknown_fields
class ExternalControllerInfoResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~ExternalControllerInfoResult]
'''
results_ = [ExternalControllerInfoResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class FanConfigEntry(Type):
_toSchema = {'overlay': 'overlay', 'underlay': 'underlay'}
_toPy = {'overlay': 'overlay', 'underlay': 'underlay'}
def __init__(self, overlay=None, underlay=None, **unknown_fields):
'''
overlay : str
underlay : str
'''
overlay_ = overlay
underlay_ = underlay
# Validate arguments against known Juju API types.
if overlay_ is not None and not isinstance(overlay_, (bytes, str)):
raise Exception("Expected overlay_ to be a str, received: {}".format(type(overlay_)))
if underlay_ is not None and not isinstance(underlay_, (bytes, str)):
raise Exception("Expected underlay_ to be a str, received: {}".format(type(underlay_)))
self.overlay = overlay_
self.underlay = underlay_
self.unknown_fields = unknown_fields
class FanConfigResult(Type):
_toSchema = {'fans': 'fans'}
_toPy = {'fans': 'fans'}
def __init__(self, fans=None, **unknown_fields):
'''
fans : typing.Sequence[~FanConfigEntry]
'''
fans_ = [FanConfigEntry.from_json(o) for o in fans or []]
# Validate arguments against known Juju API types.
if fans_ is not None and not isinstance(fans_, (bytes, str, list)):
raise Exception("Expected fans_ to be a Sequence, received: {}".format(type(fans_)))
self.fans = fans_
self.unknown_fields = unknown_fields
class Filesystem(Type):
_toSchema = {'filesystem_tag': 'filesystem-tag', 'info': 'info', 'volume_tag': 'volume-tag'}
_toPy = {'filesystem-tag': 'filesystem_tag', 'info': 'info', 'volume-tag': 'volume_tag'}
def __init__(self, filesystem_tag=None, info=None, volume_tag=None, **unknown_fields):
'''
filesystem_tag : str
info : FilesystemInfo
volume_tag : str
'''
filesystem_tag_ = filesystem_tag
info_ = FilesystemInfo.from_json(info) if info else None
volume_tag_ = volume_tag
# Validate arguments against known Juju API types.
if filesystem_tag_ is not None and not isinstance(filesystem_tag_, (bytes, str)):
raise Exception("Expected filesystem_tag_ to be a str, received: {}".format(type(filesystem_tag_)))
if info_ is not None and not isinstance(info_, (dict, FilesystemInfo)):
raise Exception("Expected info_ to be a FilesystemInfo, received: {}".format(type(info_)))
if volume_tag_ is not None and not isinstance(volume_tag_, (bytes, str)):
raise Exception("Expected volume_tag_ to be a str, received: {}".format(type(volume_tag_)))
self.filesystem_tag = filesystem_tag_
self.info = info_
self.volume_tag = volume_tag_
self.unknown_fields = unknown_fields
class FilesystemAttachment(Type):
_toSchema = {'filesystem_tag': 'filesystem-tag', 'info': 'info', 'machine_tag': 'machine-tag'}
_toPy = {'filesystem-tag': 'filesystem_tag', 'info': 'info', 'machine-tag': 'machine_tag'}
def __init__(self, filesystem_tag=None, info=None, machine_tag=None, **unknown_fields):
'''
filesystem_tag : str
info : FilesystemAttachmentInfo
machine_tag : str
'''
| |
<reponame>fabmiz/osf.io<gh_stars>1-10
from datetime import timedelta
import json
import logging
import re
import sys
from modularodm import Q
from modularodm.storage.base import KeyExistsException
from framework.mongo import database
from framework.transactions.context import TokuTransaction
from scripts import utils as script_utils
from website.app import init_app
from website import models
from website import settings
logger = logging.getLogger(__name__)
# Multiple updates to any <node>.child_node_subscriptions causes only the last one to succeed.
# Cache the intended value instead, updating it here before writing.
cns_dict_to_update = {}
# Dictionary containing {<preprint._id>: <node._id>} mapping for pairs that swapped guids
preprint_node_swapped_ids_map = {}
def get_targets():
return database.node.find({'preprint_file': {'$ne': None}})
def validate_node_document(document):
logger.info('* Validating and repairing node {}'.format(document['_id']))
assert document.get('preprint_created'), '{} has no preprint_created'.format(document['_id'])
assert document.get('preprint_file'), '{} has no preprint_file'.format(document['_id'])
assert document.get('preprint_subjects'), '{} has no preprint_subjects'.format(document['_id'])
if not document.get('preprint_providers'):
logger.debug('{} has no preprint_providers, assuming OSF'.format(document['_id']))
database['node'].find_and_modify(
{'_id': document['_id']},
{'$set': {
'preprint_providers': ['osf']
}}
)
def validate_node_preprint_subjects(node):
flat_subjects = node.preprint_subjects
for subject_id in flat_subjects:
subject = models.Subject.load(subject_id)
if not subject:
logger.debug('Found nonexistant subject {} on node {}, removing'.format(subject_id, node._id))
node.preprint_subjects.remove(subject_id)
node.save()
validate_node_preprint_subjects(node)
break
if subject.parents and not set([c._id for c in subject.parents]) & set(flat_subjects):
logger.debug('Found subject {} on node {} without parents. Adding first parent - {}'.format(subject_id, node._id, subject.parents[0]._id))
node.preprint_subjects.append(subject.parents[0]._id)
node.save()
validate_node_preprint_subjects(node)
break
def create_preprint_service_from_node(document, swap_cutoff):
created = {}
for provider_id in document['preprint_providers']:
non_osf_provider = False
node = models.Node.load(document['_id'])
provider = models.PreprintProvider.load(provider_id)
# primary_file already set correctly* on node
if not provider:
logger.warn('Unable to find provider {} for node {}, skipping'.format(provider_id, document['_id']))
continue
try:
logger.info('* Creating preprint for node {}'.format(node._id))
preprint = models.PreprintService(node=node, provider=provider)
preprint.save()
database['preprintservice'].find_and_modify(
{'_id': preprint._id},
{'$set': {
'date_created': document['preprint_created'],
'date_published': document['preprint_created'],
'is_published': True
}}
)
except KeyExistsException:
logger.warn('Duplicate PreprintService found for provider {} on node {}, skipping'.format(provider._id, node._id))
continue
else:
if node.preprint_doi:
database['node'].find_and_modify(
{'_id': node._id},
{'$set': {
'preprint_article_doi': document['preprint_doi']
}}
)
database['node'].find_and_modify(
{'_id': node._id},
{'$unset': {
'preprint_doi': '',
'preprint_created': ''
}}
)
node.reload()
preprint.reload()
if preprint.provider._id == 'osf':
# Give Guid retention priotity to OSF-provider
if should_swap_guids(node, preprint, swap_cutoff):
swap_guids(node, preprint)
else:
logger.info('* Not swapping guids for preprint {} and preexisting node {}'.format(preprint._id, node._id))
else:
logger.info('* Not swapping guids for preprint {} for provider {}'.format(preprint._id, preprint.provider))
non_osf_provider = True
node.reload()
preprint.reload()
validate_node_preprint_subjects(preprint.node)
preprint.node.reload()
enumerate_and_set_subject_hierarchies(preprint)
database['preprintservice'].find_and_modify(
{'_id': preprint._id},
{'$set': {
'date_modified': document['preprint_created'],
}}
)
created.update({preprint._id: (node._id, non_osf_provider)})
return created
def should_swap_guids(node, preprint, swap_cutoff):
preprint.reload()
logger.info('Preprint {} - Node {} timedelta = {}'.format(preprint._id, node._id, preprint.date_created - node.date_created))
return preprint.date_created - node.date_created < swap_cutoff
def swap_guids(node, preprint):
logger.info('* Swapping guids for preprint {} and node {}'.format(preprint._id, node._id))
preprint_node_swapped_ids_map[node._id] = preprint._id # node._id is about to become preprint._id, reverse here
old_guid = models.Guid.load(node._id)
new_guid = models.Guid.load(preprint._id)
node._id = new_guid._id
node.save()
preprint._id = old_guid._id
preprint.node = node
preprint.save()
old_guid.referent = preprint
new_guid.referent = node
old_guid.save()
new_guid.save()
update_foreign_fields(old_guid._id, node)
def update_foreign_fields(old_id, node):
dry_run = '--dry' in sys.argv
logger.info('* Updating ForeignFields for node {}->{}'.format(old_id, node))
bns_owner = list(database['boxnodesettings'].find({'owner': old_id}))
if bns_owner:
logger.info('** Updating {} BoxNodeSettings (owner) {}'.format(old_id, [d['_id'] for d in bns_owner]))
for doc in bns_owner:
database['boxnodesettings'].find_and_modify(
{'_id': doc['_id']},
{'$set':{
'owner': node._id
}}
)
bus_og = list(database['boxusersettings'].find({'oauth_grants.{}'.format(old_id): {'$ne': None}}))
if bus_og:
logger.info('** Updating {} BoxUserSettings (oauth_grants) {}'.format(old_id, [d['_id'] for d in bus_og]))
for doc in bus_og:
og = doc['oauth_grants']
og[node._id] = og.pop(old_id)
database['boxusersettings'].find_and_modify(
{'_id': doc['_id']},
{'$set':{
'oauth_grants': og
}}
)
advns_o = list(database['addondataversenodesettings'].find({'owner': old_id}))
if advns_o:
logger.info('** Updating {} AddonDataverseNodeSettings (owner) {}'.format(old_id, [d['_id'] for d in advns_o]))
for doc in advns_o:
database['addondataversenodesettings'].find_and_modify(
{'_id': doc['_id']},
{'$set':{
'owner': node._id
}}
)
advus_og = list(database['addondataverseusersettings'].find({'oauth_grants.{}'.format(old_id): {'$ne': None}}))
if advus_og:
logger.info('** Updating {} AddonDataverseUserSettings (oauth_grants) {}'.format(old_id, [d['_id'] for d in advus_og]))
for doc in advus_og:
og = doc['oauth_grants']
og[node._id] = og.pop(old_id)
database['addondataverseusersettings'].find_and_modify(
{'_id': doc['_id']},
{'$set':{
'oauth_grants': og
}}
)
dbns_o = list(database['dropboxnodesettings'].find({'owner': old_id}))
if dbns_o:
logger.info('** Updating {} DropboxNodeSettings (owner) {}'.format(old_id, [d['_id'] for d in dbns_o]))
for doc in dbns_o:
database['dropboxnodesettings'].find_and_modify(
{'_id': doc['_id']},
{'$set':{
'owner': node._id
}}
)
dbus_og = list(database['dropboxusersettings'].find({'oauth_grants.{}'.format(old_id): {'$ne': None}}))
if dbus_og:
logger.info('** Updating {} DropboxUserSettings (oauth_grants) {}'.format(old_id, [d['_id'] for d in dbus_og]))
for doc in dbus_og:
og = doc['oauth_grants']
og[node._id] = og.pop(old_id)
database['dropboxusersettings'].find_and_modify(
{'_id': doc['_id']},
{'$set':{
'oauth_grants': og
}}
)
afsns_o = list(database['addonfigsharenodesettings'].find({'owner': old_id}))
if afsns_o:
logger.info('** Updating {} AddonFigShareNodeSettings (owner) {}'.format(old_id, [d['_id'] for d in afsns_o]))
for doc in afsns_o:
database['addonfigsharenodesettings'].find_and_modify(
{'_id': doc['_id']},
{'$set':{
'owner': node._id
}}
)
## Figshare has no oauth_grants
fwns_o = list(database['forwardnodesettings'].find({'owner': old_id}))
if fwns_o:
logger.info('** Updating {} ForwardNodeSettings (owner) {}'.format(old_id, [d['_id'] for d in fwns_o]))
for doc in fwns_o:
database['forwardnodesettings'].find_and_modify(
{'_id': doc['_id']},
{'$set':{
'owner': node._id
}}
)
ghns_o = list(database['githubnodesettings'].find({'owner': old_id}))
if ghns_o:
logger.info('** Updating {} GithubNodeSettings (owner) {}'.format(old_id, [d['_id'] for d in ghns_o]))
for doc in ghns_o:
database['githubnodesettings'].find_and_modify(
{'_id': doc['_id']},
{'$set':{
'owner': node._id
}}
)
ghus_og = list(database['githubusersettings'].find({'oauth_grants.{}'.format(old_id): {'$ne': None}}))
if ghus_og:
logger.info('** Updating {} GithubUserSettings (oauth_grants) {}'.format(old_id, [d['_id'] for d in ghus_og]))
for doc in ghus_og:
og = doc['oauth_grants']
og[node._id] = og.pop(old_id)
database['githubusersettings'].find_and_modify(
{'_id': doc['_id']},
{'$set':{
'oauth_grants': og
}}
)
gdns_o = list(database['googledrivenodesettings'].find({'owner': old_id}))
if gdns_o:
logger.info('** Updating {} GoogleDriveNodeSettings (owner) {}'.format(old_id, [d['_id'] for d in gdns_o]))
for doc in gdns_o:
database['googledrivenodesettings'].find_and_modify(
{'_id': doc['_id']},
{'$set':{
'owner': node._id
}}
)
gdus_og = list(database['googledriveusersettings'].find({'oauth_grants.{}'.format(old_id): {'$ne': None}}))
if gdus_og:
logger.info('** Updating {} GoogleDriveUserSettings (oauth_grants) {}'.format(old_id, [d['_id'] for d in gdus_og]))
for doc in gdus_og:
og = doc['oauth_grants']
og[node._id] = og.pop(old_id)
database['googledriveusersettings'].find_and_modify(
{'_id': doc['_id']},
{'$set':{
'oauth_grants': og
}}
)
mns_o = list(database['mendeleynodesettings'].find({'owner': old_id}))
if mns_o:
logger.info('** Updating {} MendeleyNodeSettings (owner) {}'.format(old_id, [d['_id'] for d in mns_o]))
for doc in mns_o:
database['mendeleynodesettings'].find_and_modify(
{'_id': doc['_id']},
{'$set':{
'owner': node._id
}}
)
mus_og = list(database['mendeleyusersettings'].find({'oauth_grants.{}'.format(old_id): {'$ne': None}}))
if mus_og:
logger.info('** Updating {} MendeleyUserSettings (oauth_grants) {}'.format(old_id, [d['_id'] for d in mus_og]))
for doc in mus_og:
og = doc['oauth_grants']
og[node._id] = og.pop(old_id)
database['mendeleyusersettings'].find_and_modify(
{'_id': doc['_id']},
{'$set':{
'oauth_grants': og
}}
)
osfsns_o = list(database['osfstoragenodesettings'].find({'owner': old_id}))
if osfsns_o:
logger.info('** Updating {} OsfStorageNodeSettings (owner) {}'.format(old_id, [d['_id'] for d in osfsns_o]))
for doc in osfsns_o:
database['osfstoragenodesettings'].find_and_modify(
{'_id': doc['_id']},
{'$set':{
'owner': node._id
}}
)
ocns_o = list(database['addonowncloudnodesettings'].find({'owner': old_id}))
if ocns_o:
logger.info('** Updating {} AddonOwnCloudNodeSettings (owner) {}'.format(old_id, [d['_id'] for d in ocns_o]))
for doc in ocns_o:
database['addonowncloudnodesettings'].find_and_modify(
{'_id': doc['_id']},
{'$set':{
'owner': node._id
}}
)
ocus_og = list(database['addonowncloudusersettings'].find({'oauth_grants.{}'.format(old_id): {'$ne': None}}))
if ocus_og:
logger.info('** Updating {} AddonOwnCloudUserSettings (oauth_grants) {}'.format(old_id, [d['_id'] for d in ocus_og]))
for doc in ocus_og:
og = doc['oauth_grants']
og[node._id] = og.pop(old_id)
database['addonowncloudusersettings'].find_and_modify(
{'_id': doc['_id']},
{'$set':{
'oauth_grants': og
}}
)
s3ns_o = list(database['s3nodesettings'].find({'owner': old_id}))
if s3ns_o:
logger.info('** Updating {} s3NodeSettings (owner) {}'.format(old_id, [d['_id'] for d in s3ns_o]))
for doc in s3ns_o:
database['s3nodesettings'].find_and_modify(
{'_id': doc['_id']},
{'$set':{
'owner': node._id
}}
)
s3us_og = list(database['s3usersettings'].find({'oauth_grants.{}'.format(old_id): {'$ne': None}}))
if s3us_og:
logger.info('** Updating {} S3UserSettings (oauth_grants) {}'.format(old_id, [d['_id'] for d in s3us_og]))
for doc in s3us_og:
og = doc['oauth_grants']
og[node._id] = og.pop(old_id)
database['s3usersettings'].find_and_modify(
{'_id': doc['_id']},
{'$set':{
'oauth_grants': og
}}
)
awns_o = list(database['addonwikinodesettings'].find({'owner': old_id}))
if awns_o:
logger.info('** Updating {} AddonWikiNodeSettings (owner) {}'.format(old_id, [d['_id'] for d in awns_o]))
for doc in awns_o:
database['addonwikinodesettings'].find_and_modify(
{'_id': doc['_id']},
{'$set':{
'owner': node._id
}}
)
nwp_n = list(database['nodewikipage'].find({'node': old_id}))
if nwp_n:
logger.info('** Updating {} NodeWikiPage (node) {}'.format(old_id, [d['_id'] for d in nwp_n]))
for doc in nwp_n:
database['nodewikipage'].find_and_modify(
{'_id': doc['_id']},
{'$set':{
'node': node._id
}}
)
zns_o = list(database['zoteronodesettings'].find({'owner': old_id}))
if zns_o:
logger.info('** Updating {} ZoteroNodeSettings (owner) {}'.format(old_id, [d['_id'] for d in zns_o]))
for doc in zns_o:
database['zoteronodesettings'].find_and_modify(
{'_id': doc['_id']},
{'$set':{
'owner': node._id
}}
)
zus_og = list(database['zoterousersettings'].find({'oauth_grants.{}'.format(old_id): {'$ne': None}}))
if zus_og:
logger.info('** Updating {} ZoteroUserSettings (oauth_grants) {}'.format(old_id, [d['_id'] for d in zus_og]))
for doc in zus_og:
og = doc['oauth_grants']
og[node._id] = og.pop(old_id)
database['zoterousersettings'].find_and_modify(
{'_id': doc['_id']},
{'$set':{
'oauth_grants': og
}}
)
aj_sn = list(database['archivejob'].find({'src_node': old_id}))
if aj_sn:
logger.info('** Updating {} ArchiveJobs (src_node) {}'.format(old_id, [d['_id'] for d in aj_sn]))
for doc in aj_sn:
database['archivejob'].find_and_modify(
{'_id': doc['_id']},
{'$set':{
'src_node': node._id
}}
)
tfn_n = list(database['trashedfilenode'].find({'node': old_id}))
if tfn_n:
logger.info('** Updating {} TrashedFileNodes (node) {}'.format(old_id, [d['_id'] for d in tfn_n]))
for doc in tfn_n:
del_on = doc.pop('deleted_on') # Remove non-JSON-serializable datetime fields
last_touch = doc.pop('last_touched')
hist_mods = [doc['history'][doc['history'].index(h)].pop('modified') for h in doc['history']]
replacement = json.loads(re.sub(r'\b{}\b'.format(old_id), node._id, json.dumps(doc)))
for i, mod in enumerate(hist_mods):
replacement['history'][i]['modified'] = mod
database['trashedfilenode'].find_and_modify(
{'_id': doc['_id']},
{'$set':{
'node': replacement['node'],
'history': replacement['history']
}}
)
sfn_n = list(database['storedfilenode'].find({'node': old_id}))
if sfn_n:
logger.info('** Updating {} StoredFileNodes (node) {}'.format(old_id, [d['_id'] for d in sfn_n]))
for doc | |
},
"GUELPH": {
"de_DE": "Guelph",
"es_ES": "Guelph",
"fr_FR": "Guelph",
"it_IT": "Guelph",
"ja_JP": "ゲルフ",
"ko_KR": "궬프",
"pl_PL": "Guelph",
"pt_BR": "Guelph",
"ru_RU": "Гуэлф"
},
"GUIMARAES": {
"de_DE": "Guimarães",
"es_ES": "Guimarães",
"fr_FR": "Guimarães",
"it_IT": "Guimarães",
"ja_JP": "ギマランイス",
"ko_KR": "기마라에스",
"pl_PL": "Guimarães",
"pt_BR": "Guimarães",
"ru_RU": "Гимарайнш"
},
"GUMI": {
"de_DE": "Gumi",
"es_ES": "Gumi",
"fr_FR": "Gumi",
"it_IT": "Gumi",
"ja_JP": "亀尾",
"ko_KR": "구미",
"pl_PL": "Gumi",
"pt_BR": "Gumi",
"ru_RU": "Куми"
},
"GUNPO": {
"de_DE": "Gunpo",
"es_ES": "Gunpo",
"fr_FR": "Gunpo",
"it_IT": "Gunpo",
"ja_JP": "軍浦",
"ko_KR": "군포",
"pl_PL": "Gunpo",
"pt_BR": "Gunpo",
"ru_RU": "Кунпхо"
},
"GWANGJU": {
"de_DE": "Gwangju",
"es_ES": "Gwangju",
"fr_FR": "Gwangju",
"it_IT": "Gwangju",
"ja_JP": "光州",
"ko_KR": "광주",
"pl_PL": "Gwangju",
"pt_BR": "Gwangju",
"ru_RU": "Кванджу"
},
"GYEONGGI_GWANGJU": {
"de_DE": "Gyeonggi Gwangju",
"es_ES": "Gyeonggi Gwangju",
"fr_FR": "Gyeonggi Gwangju",
"it_IT": "Gyeonggi Gwangju",
"ja_JP": "京畿広州",
"ko_KR": "경기도 광주",
"pl_PL": "Gyeonggi Gwangju",
"pt_BR": "Gyeonggi Gwangju",
"ru_RU": "Кёнги-Кванджу"
},
"GYEONGJU": {
"de_DE": "Gyeongju",
"es_ES": "Gyeongju",
"fr_FR": "Gyeongju",
"it_IT": "Gyeongju",
"ja_JP": "慶州",
"ko_KR": "경주",
"pl_PL": "Gyeongju",
"pt_BR": "Gyeongju",
"ru_RU": "Кёнджу"
},
"GYOR": {
"de_DE": "Győr",
"es_ES": "Győr",
"fr_FR": "Győr",
"it_IT": "Győr",
"ja_JP": "ジェール",
"ko_KR": "죄르",
"pl_PL": "Győr",
"pt_BR": "Győr",
"ru_RU": "Дьер"
},
"HAARLEM": {
"de_DE": "Haarlem",
"es_ES": "Haarlem",
"fr_FR": "Haarlem",
"it_IT": "Haarlem",
"ja_JP": "ハールレム",
"ko_KR": "하를럼",
"pl_PL": "Haarlem",
"pt_BR": "Haarlem",
"ru_RU": "Харлем"
},
"HADDINGTON": {
"de_DE": "Haddington",
"es_ES": "Haddington",
"fr_FR": "Haddington",
"it_IT": "Haddington",
"ja_JP": "ハーディントン",
"ko_KR": "해딩턴",
"pl_PL": "Haddington",
"pt_BR": "Haddington",
"ru_RU": "Хаддингтон"
},
"HAGMATANA": {
"de_DE": "Hagmatana",
"es_ES": "Hagmatana",
"fr_FR": "Hagmatana",
"it_IT": "Hagmatana",
"ja_JP": "アグマターナ",
"ko_KR": "하그마타나",
"pl_PL": "Ekbatana",
"pt_BR": "Hagmatana",
"ru_RU": "Экбатана"
},
"HAI_DUONG": {
"de_DE": "Hải Dương",
"es_ES": "Hải Dương",
"fr_FR": "Hải Dương",
"it_IT": "Hải Dương",
"ja_JP": "ハイズオン",
"ko_KR": "하이 즈엉",
"pl_PL": "Hải Dương",
"pt_BR": "Hai Duong",
"ru_RU": "Хайзыонг"
},
"HAI_PHONG": {
"de_DE": "<NAME>",
"es_ES": "Hải Phòng",
"fr_FR": "Hải Phòng",
"it_IT": "Hải Phòng",
"ja_JP": "ハイフォン",
"ko_KR": "하이퐁",
"pl_PL": "Hải Phòng",
"pt_BR": "Haifom",
"ru_RU": "Хайфон"
},
"HAKODATE": {
"de_DE": "Hakodate",
"es_ES": "Hakodate",
"fr_FR": "Hakodate",
"it_IT": "Hakodate",
"ja_JP": "函館",
"ko_KR": "하코다테",
"pl_PL": "Hakodate",
"pt_BR": "Hakodate",
"ru_RU": "Хакодатэ"
},
"HALAB": {
"de_DE": "Halab",
"es_ES": "Halab",
"fr_FR": "Halab",
"it_IT": "Halab",
"ja_JP": "ハラブ",
"ko_KR": "할랍",
"pl_PL": "Halab",
"pt_BR": "Halab",
"ru_RU": "Халеб"
},
"HALEP": {
"de_DE": "Halep",
"es_ES": "Halep",
"fr_FR": "Halep",
"it_IT": "Halep",
"ja_JP": "ハレプ",
"ko_KR": "알레포",
"pl_PL": "Halep",
"pt_BR": "Halep",
"ru_RU": "Халеп"
},
"HALICARNASSUS": {
"de_DE": "Halikarnassos",
"es_ES": "Halicarnaso",
"fr_FR": "Halicarnasse",
"it_IT": "Alicarnasso",
"ja_JP": "ハリカルナッソス",
"ko_KR": "할리카르나소스",
"pl_PL": "Halikarnas",
"pt_BR": "Halicarnasso",
"ru_RU": "Галикарнас"
},
"HALIFAX": {
"de_DE": "Halifax",
"es_ES": "Halifax",
"fr_FR": "Halifax",
"it_IT": "Halifax",
"ja_JP": "ハリファックス",
"ko_KR": "핼리팩스",
"pl_PL": "Halifax",
"pt_BR": "Halifax",
"ru_RU": "Галифакс"
},
"HALMSTAD": {
"de_DE": "Halmstad",
"es_ES": "Halmstad",
"fr_FR": "Halmstad",
"it_IT": "Halmstad",
"ja_JP": "ハルムスタッド",
"ko_KR": "할름스타드",
"pl_PL": "Halmstad",
"pt_BR": "Halmostádio",
"ru_RU": "Хальмстад"
},
"HAMA": {
"de_DE": "Hama",
"es_ES": "Hama",
"fr_FR": "Hama",
"it_IT": "Hama",
"ja_JP": "ハマ",
"ko_KR": "하마",
"pl_PL": "Hama",
"pt_BR": "Hama",
"ru_RU": "Хама"
},
"HAMAMATSU": {
"de_DE": "Hamamatsu",
"es_ES": "Hamamatsu",
"fr_FR": "Hamamatsu",
"it_IT": "Hamamatsu",
"ja_JP": "浜松",
"ko_KR": "하마마쓰",
"pl_PL": "Hamamatsu",
"pt_BR": "Hamamatsu",
"ru_RU": "Хамамацу"
},
"HAMAR": {
"de_DE": "Hamar",
"es_ES": "Hamar",
"fr_FR": "Hamar",
"it_IT": "Hamar",
"ja_JP": "ハーマル",
"ko_KR": "하마르",
"pl_PL": "Hama",
"pt_BR": "Hamar",
"ru_RU": "Хамар"
},
"HAMAZI": {
"de_DE": "Hamazi",
"es_ES": "Hamazi",
"fr_FR": "Hamazi",
"it_IT": "Hamazi",
"ja_JP": "ハマツィ",
"ko_KR": "하마지",
"pl_PL": "Hamazi",
"pt_BR": "Hamazi",
"ru_RU": "Хамази"
},
"HAMBURG": {
"de_DE": "Hamburg",
"es_ES": "Hamburgo",
"fr_FR": "Hambourg",
"it_IT": "Amburgo",
"ja_JP": "ハンブルグ",
"ko_KR": "함부르크",
"pl_PL": "Hamburg",
"pt_BR": "Hamburgo",
"ru_RU": "Гамбург"
},
"HAMHUNG": {
"de_DE": "Hamhŭng",
"es_ES": "Hamhung",
"fr_FR": "Hamhung",
"it_IT": "Hamhung",
"ja_JP": "咸興",
"ko_KR": "함흥",
"pl_PL": "Hamhung",
"pt_BR": "Hamhung",
"ru_RU": "Хамхын"
},
"HAMI": {
"de_DE": "Hami",
"es_ES": "Hami",
"fr_FR": "Hami",
"it_IT": "Hami",
"ja_JP": "哈密",
"ko_KR": "하미",
"pl_PL": "Hami",
"pt_BR": "Hami",
"ru_RU": "Хами"
},
"HAMILTON": {
"de_DE": "Hamilton",
"es_ES": "Hamilton",
"fr_FR": "Hamilton",
"it_IT": "Hamilton",
"ja_JP": "ハミルトン",
"ko_KR": "해밀턴",
"pl_PL": "Hamilton",
"pt_BR": "Hamilton",
"ru_RU": "Гамильтон"
},
"HANDAN": {
"de_DE": "Handan",
"es_ES": "Handan",
"fr_FR": "Handan",
"it_IT": "Handan",
"ja_JP": "邯鄲",
"ko_KR": "한단",
"pl_PL": "Handan",
"pt_BR": "Handan",
"ru_RU": "Ханьдань"
},
"HANOVER": {
"de_DE": "Hannover",
"es_ES": "Hannover",
"fr_FR": "Hanovre",
"it_IT": "Hanover",
"ja_JP": "ハノーファー",
"ko_KR": "하노버",
"pl_PL": "Hanover",
"pt_BR": "Hanover",
"ru_RU": "Ганновер"
},
"HARADUM": {
"de_DE": "Haradum",
"es_ES": "Haradum",
"fr_FR": "Haradum",
"it_IT": "Haradum",
"ja_JP": "ハラドゥム",
"ko_KR": "하라둠",
"pl_PL": "Haradum",
"pt_BR": "Haradum",
"ru_RU": "Харадум"
},
"HARAIVA": {
"de_DE": "Haraiva",
"es_ES": "Haraiva",
"fr_FR": "Hérat",
"it_IT": "Haraiva",
"ja_JP": "ハライヴァ",
"ko_KR": "하라이바",
"pl_PL": "Herat",
"pt_BR": "Haraiva",
"ru_RU": "Герат"
},
"HARAR": {
"de_DE": "Harar",
"es_ES": "Harar",
"fr_FR": "Harar",
"it_IT": "Harar",
"ja_JP": "ハラール",
"ko_KR": "하라르",
"pl_PL": "Harer",
"pt_BR": "Harar",
"ru_RU": "Харэр"
},
"HARBIDUM": {
"de_DE": "Harbidum",
"es_ES": "Harbidum",
"fr_FR": "Harbidum",
"it_IT": "Harbidum",
"ja_JP": "ハービダム",
"ko_KR": "하르비둠",
"pl_PL": "Harbidum",
"pt_BR": "Harbidum",
"ru_RU": "Харбидум"
},
"HARBU": {
"de_DE": "Harbu",
"es_ES": "Harbu",
"fr_FR": "Harbu",
"it_IT": "Harbu",
"ja_JP": "ハルブ",
"ko_KR": "하르부",
"pl_PL": "Harbu",
"pt_BR": "Harbu",
"ru_RU": "Харбу"
},
"HARIHARALAYA": {
"de_DE": "Hariharalaya",
"es_ES": "Hariharalaya",
"fr_FR": "Hariharalaya",
"it_IT": "Hariharalaya",
"ja_JP": "ハリハララヤ",
"ko_KR": "하리하랄라야",
"pl_PL": "Hariharalaja",
"pt_BR": "Hariharalaya",
"ru_RU": "Харихаралайя"
},
"HASTINGS": {
"de_DE": "Hastings",
"es_ES": "Hastings",
"fr_FR": "Hastings",
"it_IT": "Hastings",
"ja_JP": "ヘイスティングス",
"ko_KR": "헤이스팅스",
"pl_PL": "Hastings",
"pt_BR": "Hastings",
"ru_RU": "Гастингс"
},
"HATTIN": {
"de_DE": "Hattin",
"es_ES": "Hattin",
"fr_FR": "Hattin",
"it_IT": "Hattin",
"ja_JP": "ヒッティーン",
"ko_KR": "하틴",
"pl_PL": "Hattin",
"pt_BR": "Hattin",
"ru_RU": "Хаттин"
},
"HATTUSA": {
"de_DE": "Hattuscha",
"es_ES": "Hattusa",
"fr_FR": "Hattusa",
"it_IT": "Hattusa",
"ja_JP": "ハットゥシャ",
"ko_KR": "하투사",
"pl_PL": "Hattusa",
"pt_BR": "Hattusa",
"ru_RU": "Хаттуса"
},
"HATTUSA_1": {
"de_DE": "Hattuscha",
"es_ES": "Hattusa",
"fr_FR": "Hattusa",
"it_IT": "Hattusa",
"ja_JP": "ハットゥシャ",
"ko_KR": "하투사",
"pl_PL": "Hattusa",
"pt_BR": "Hattusa",
"ru_RU": "Хаттуса"
},
"HAVANA": {
"de_DE": "Havanna",
"es_ES": "La Habana",
"fr_FR": "La Havane",
"it_IT": "Havana",
"ja_JP": "ハバナ",
"ko_KR": "아바나",
"pl_PL": "Hawana",
"pt_BR": "Havana",
"ru_RU": "Гавана"
},
"HA_LONG": {
"de_DE": "Hạ Long",
"es_ES": "Hạ Long",
"fr_FR": "Hạ Long",
"it_IT": "Hạ Long",
"ja_JP": "ハロン",
"ko_KR": "하롱",
"pl_PL": "Hạ Long",
"pt_BR": "Ha Long",
"ru_RU": "Халонг"
},
"HA_NOI": {
"de_DE": "Thang Long",
"es_ES": "Thăng Long",
"fr_FR": "Thang Long",
"it_IT": "Thăng Long",
"ja_JP": "タンロン",
"ko_KR": "탕롱",
"pl_PL": "Thăng Long",
"pt_BR": "Thăng Long",
"ru_RU": "Тханглонг"
},
"HEERLEN": {
"de_DE": "Heerlen",
"es_ES": "Heerlen",
"fr_FR": "Heerlen",
"it_IT": "Heerlen",
"ja_JP": "ヘールレン",
"ko_KR": "헤이를런",
"pl_PL": "Heerlen",
"pt_BR": "Heerlen",
"ru_RU": "Херлен"
},
"HEFEI": {
"de_DE": "Hefei",
"es_ES": "Hefei",
"fr_FR": "Hefei",
"it_IT": "Hefei",
"ja_JP": "合肥",
"ko_KR": "허페이",
"pl_PL": "Hefei",
"pt_BR": "Hefei",
"ru_RU": "Хэфэй"
},
"HEH": {
"de_DE": "Heh",
"es_ES": "Heh",
"fr_FR": "Heh",
"it_IT": "Heh",
"ja_JP": "ヘフ",
"ko_KR": "헤흐",
"pl_PL": "Heh",
"pt_BR": "Heh",
"ru_RU": "Хех"
},
"HEIDELBERG": {
"de_DE": "Heidelberg",
"es_ES": "Heidelberg",
"fr_FR": "Heidelberg",
"it_IT": "Heidelberg",
"ja_JP": "ハイデルベルク",
"ko_KR": "하이델베르크",
"pl_PL": "Heidelberg",
"pt_BR": "Heidelberg",
"ru_RU": "Гейдельберг"
},
"HELSINGBORG": {
"de_DE": "Helsingborg",
"es_ES": "Helsingborg",
"fr_FR": "Helsingborg",
"it_IT": "Helsingborg",
"ja_JP": "ヘルシンボリ",
"ko_KR": "헬싱보리",
"pl_PL": "Helsingborg",
"pt_BR": "Helsimburgo",
"ru_RU": "Хельсингборг"
},
"HERACLEA_LYNCESTIS": {
"de_DE": "<NAME>",
"es_ES": "<NAME>",
"fr_FR": "<NAME>",
"it_IT": "<NAME> Pelagonia",
"ja_JP": "ヘラクレア・リンセスティス",
"ko_KR": "헤라클레아 링케스티스",
"pl_PL": "<NAME>",
"pt_BR": "<NAME>",
"ru_RU": "Г<NAME>инкестис"
},
"HERVEY_BAY": {
"de_DE": "<NAME>",
"es_ES": "<NAME>",
"fr_FR": "<NAME>",
"it_IT": "<NAME>",
"ja_JP": "ハービーベイ",
"ko_KR": "허비 베이",
"pl_PL": "<NAME>",
"pt_BR": "<NAME>",
"ru_RU": "Харви-Бей"
},
"HIMEJI": {
"de_DE": "Himeji",
"es_ES": "Himeji",
"fr_FR": "Himeji",
"it_IT": "Himeji",
"ja_JP": "姫路",
"ko_KR": "히메지",
"pl_PL": "Himeji",
"pt_BR": "Himeji",
"ru_RU": "Химэдзи"
},
"HIPPONENSIS_SINUS": {
"de_DE": "Hipponensis Sinus",
"es_ES": "Hipponensis Sinus",
"fr_FR": "Hipponensis Sinus",
"it_IT": "Ippona",
"ja_JP": "ヒッポネンシス・シヌス",
"ko_KR": "히포넨시스 사이너스",
"pl_PL": "Hipponensis Sinus",
"pt_BR": "Hipponensis Sinus",
"ru_RU": "Гиппон"
},
"HIRITUM": {
"de_DE": "Hiritum",
"es_ES": "Hiritum",
"fr_FR": "Hiritum",
"it_IT": "Hiritum",
"ja_JP": "ヒリトゥム",
"ko_KR": "히리툼",
"pl_PL": "Hiritum",
"pt_BR": "Hiritum",
"ru_RU": "Хиритум"
},
"HIROSHIMA": {
"de_DE": "Hiroshima",
"es_ES": "Hiroshima",
"fr_FR": "Hiroshima",
"it_IT": "Hiroshima",
"ja_JP": "広島",
"ko_KR": "히로시마",
"pl_PL": "Hiroszima",
"pt_BR": "Hiroshima",
"ru_RU": "Хиросима"
},
"HISPALIS": {
"de_DE": "Hispalis",
"es_ES": "Hispalis",
"fr_FR": "Hispalis",
"it_IT": "Hispalis",
"ja_JP": "ヒスパリス",
"ko_KR": "히스팔리스",
"pl_PL": "Hispalis",
"pt_BR": "Hispalis",
"ru_RU": "Гиспалис"
},
"HIT": {
"de_DE": "Hit",
"es_ES": "Hit",
"fr_FR": "Hit",
"it_IT": "Hit",
"ja_JP": "ヒート",
"ko_KR": "히트",
"pl_PL": "Hit",
"pt_BR": "Hit",
"ru_RU": "Хит"
},
"HLATIKULU": {
"de_DE": "Hlatikulu",
"es_ES": "Hlatikulu",
"fr_FR": "Hlatikulu",
"it_IT": "Hlatikulu",
"ja_JP": "ハラティクル",
"ko_KR": "흘라티쿨루",
"pl_PL": "Hlatikulu",
"pt_BR": "Hlatikulu",
"ru_RU": "Хлатикулу"
},
"HLOBANE": {
"de_DE": "Hlobane",
"es_ES": "Hlobane",
"fr_FR": "Hlobane",
"it_IT": "Hlobane",
"ja_JP": "フロバネ",
"ko_KR": "흘로바네",
"pl_PL": "Hlobane",
"pt_BR": "Hlobane",
"ru_RU": "Хлобане"
},
"HLUHLUWE": {
"de_DE": "Hluhluwe",
"es_ES": "Hluhluwe",
"fr_FR": "Hluhluwe",
"it_IT": "Hluhluwe",
"ja_JP": "フルフルウェ",
"ko_KR": "룰루웨",
"pl_PL": "Hluhluwe",
"pt_BR": "Hluhluwe",
"ru_RU": "Хлухлуве"
},
"HOA_LU": {
"de_DE": "Hoa Lư",
"es_ES": "Hoa Lư",
"fr_FR": "Hoa Lư",
"it_IT": "Hoa Lư",
"ja_JP": "ホアルー",
"ko_KR": "호아루",
"pl_PL": "Hoa | |
frame_no_key = '%02d' % frame_no
ray_background = rayleigh.rvs(loc=0, scale=1, size=(img_h, img_w)) #sigma_n=E(n^2) = 2*scale^2
# Erc: average clutter energy.
erc = np.sum(ray_background ** 2) / ray_background.size
#add targets on the simulated position in each frame
simulated_frame = ray_background
# Each frame gets multiple targets.
gt_targets = gt_dict[frame_no_key]
for tid in gt_targets:
#Note that here x,y in gt is the top-lelf position.
x, y, w, h, theta = gt_targets[tid]
cx = x + w/2
cy = y + h/2
simulated_frame = add_gaussian_template_on_clutter_v2(cx, cy, w, h, theta, erc, snr,
simulated_frame,swerling_type)
# if tid == 'amelia':#uniform distributed target.
# simulated_frame = add_uniform_template_on_clutter(cx, cy, w, h, theta, erc, snr, simulated_frame, swerling_type)
# else:#Gaussian distributed target.
# simulated_frame = add_gaussian_template_on_clutter(cx, cy, w, h, theta, erc, snr, simulated_frame, swerling_type)
#simulated_frame = uti.frame_normalize(simulated_frame)
fids = list(gt_dict.keys())
fids.sort()
if(int(frame_no)==int(fids[-1])):
print('Averaged (extended region -- peak point) SNR is (%.2f - %.2f)' % (np.mean(local_snrs), np.mean(global_snrs)))
return simulated_frame
def manuver_in_clutter(snr=10):
'''
Simulate a target in a clutter given a snr.
:return:
'''
img_w = 256
img_h = 256
rayscale = 1
rayclutter = rayleigh.rvs(loc=0, scale=rayscale, size=(img_h, img_w)) # samples generation
Erc = np.sum(rayclutter ** 2) / rayclutter.size
xs,ys,ws,hs,thetas = s_manuver()
for i, elem in enumerate(zip(xs,ys,ws,hs,thetas)):
rayclutter = rayleigh.rvs(loc=0, scale=rayscale, size=(img_h, img_w))
x, y, w, h, theta = elem
et_clutter_frame = add_gaussian_template_on_clutter(x, y, w, h, theta, snr, rayclutter)
plt.imshow(et_clutter_frame)
plt.pause(0.1)
def multiple_extended_targets_in_clutter():
'''
:return:
'''
x0 = 20+20
y0 = 30+20
velo = (1.5, 1.2)
#velo = (3.75, 2.7)
npoints = 51
xs_cv, ys_cv = constant_velocity(x0, y0, velo, npoints)
w_cv = 20+8
h_cv = 16+4
ws_cv = np.ones(npoints)*w_cv #
#ws_cv = np.random.normal(w_cv, 0.5, npoints)
hs_cv = np.ones(npoints)*h_cv #
#hs_cv = np.random.normal(h_cv, 0.5, npoints)
theta_cv = get_orientation(xs_cv, ys_cv)
recttl_xs_cv = xs_cv - ws_cv/2
recttl_ys_cv = ys_cv - hs_cv/2
x0 = 160+20
y0 = 30+20
# velo = (-6, -2)
# acc = (0.3, 0.25)
velo = (-1.5, -0.5)
acc = (0.1, 0.1)
npoints = 51
w_ca = 28
h_ca = 20
# w_ca = 14 #for uniform_distribution
# h_ca = 20 #for uniform_distribution
xs_ca, ys_ca = constant_accelerate(x0, y0, velo, acc, npoints)
ws_ca = np.ones(npoints)*w_ca ##
#ws_ca = np.random.normal(w_ca, 0.5, npoints)
hs_ca = np.ones(npoints)*h_ca ##
#hs_ca = np.random.normal(h_ca, 0.5, npoints)
theta_ca = get_orientation(xs_ca, ys_ca)
recttl_xs_ca = xs_ca - ws_ca/2
recttl_ys_ca = ys_ca - hs_ca/2
#radius = 60
#omega = 0.0685
# x0 = 50 + 6
# y0 = 100+20
radius = 70
omega = 0.0685/1.5
npoints = 51
x0 = 50 + 6
y0 = 100 + 20
w_circ = 16+20
h_circ = 10+10
xs_circ, ys_circ = constant_turn(x0, y0, radius, omega, npoints)
ws_circ = np.ones(npoints)*w_circ ##
#ws_circ= np.random.normal(w_circ, 0.5, npoints)
hs_circ = np.ones(npoints)*h_circ ##
#hs_circ= np.random.normal(h_circ, 0.5, npoints)
theta_circ = get_orientation(xs_circ, ys_circ)
recttl_xs_circ = xs_circ - ws_circ/2
recttl_ys_circ = ys_circ - hs_circ/2
# radius = 50
# omega = -0.15
# npoints = 50
# x0 = 60 + 20
# y0 = 100 + 20
# w_ct = 16+10
# h_ct = 16+0
# xs_ct, ys_ct = constant_turn(x0, y0, radius, omega, npoints)
# ws_ct = np.random.normal(w_ct, 0.5, npoints)
# hs_ct = np.random.normal(h_ct, 0.5, npoints)
# theta_ct = get_orientation(xs_ct, ys_ct)
# x0 = 40
# y0 = 30+20
# velo = (0.5, 0)
# npoints = 50
# w_cvline = 22 + 16
# h_cvline = 17 + 13
# xs_cvline, ys_cvline = constant_velocity(x0, y0, velo, npoints)
# #ws_ca = np.ones(npoints)*w_ca ##
# ws_cvline = np.random.normal(w_cvline, 0.5, npoints)
# #hs_ca = np.ones(npoints)*h_ca ##
# hs_cvline = np.random.normal(h_cvline, 0.5, npoints)
# theta_cvline = get_orientation(xs_cvline, ys_cvline)
# recttl_xs_cvline = xs_cvline - ws_cvline/2
# recttl_ys_cvline = ys_cvline - hs_cvline/2
## This part is to view the trajectory of the ideal ground-truth.
# fig,ax =plt.subplots()
# plot_ellipse(ax, xs_cv, ys_cv, ws_cv, hs_cv, facecolor='green')
# plot_ellipse(ax, xs_ca, ys_ca, ws_ca, hs_ca, facecolor='red')
# plot_ellipse(ax, xs_circ, ys_circ, ws_circ, hs_circ, facecolor='blue')
# plot_ellipse(ax, xs_ct, ys_ct, ws_ct, hs_ct, facecolor='black')
# plt.show()
Gt_dict = {}
for i in range(npoints):
Gt_dict['%02d' % i] = {}
# tid = 1, 2, 3, 4
Gt_dict['%02d' % i]['victor']=[recttl_xs_cv[i], recttl_ys_cv[i], ws_cv[i], hs_cv[i], theta_cv[i]]
Gt_dict['%02d' % i]['amelia']=[recttl_xs_ca[i], recttl_ys_ca[i], ws_ca[i], hs_ca[i], theta_ca[i]]
Gt_dict['%02d' % i]['urich' ]=[recttl_xs_circ[i],recttl_ys_circ[i],ws_circ[i], hs_circ[i], theta_circ[i]]
#Gt_dict['%02d' % i]['line' ] =[recttl_xs_cvline[i], recttl_ys_cvline[i], ws_cvline[i], hs_cvline[i], theta_cvline[i]]
#Gt_dict['%02d' % i]['dormy']=[xs_ct[i], ys_ct[i], ws_ct[i], hs_ct[i], theta_ct[i]]
# # add target on the clutter background
# # results can be viewed on a canvas(300,300).
# img_w = 300
# img_h = 300
#
# rayscale = 1 # Base uint for computing the snr.
# rayclutter = rayleigh.rvs(loc=0, scale=rayscale, size=(img_h, img_w)) # samples generation
# Erc = np.sum(rayclutter ** 2) / rayclutter.size
#
# snr = 10
# frame_nums = len(Gt_dict)
# for key in Gt_dict:
# print('frame %s' % key)
# gt_targets = Gt_dict[key]
# for tid in gt_targets:
# x, y, w, h, theta = gt_targets[tid]
# et_clutter_frame = add_gaussian_template_on_clutter(x, y, w, h, theta, snr, rayclutter)
# plt.imshow(et_clutter_frame)
# plt.pause(0.1)
return Gt_dict
def mtt_sim():
'''
simulate 4 targets in a roi to test the JPDA algorithm.
:return:
'''
x0 = 10
y0 = 20
velo = (1.5, 1.7)
npoints = 50
x1s, y1s = constant_velocity(x0, y0, velo, npoints)
x0 = 10
y0 = 80
velo = (1.5, -2)
npoints = 50
x2s, y2s = constant_velocity(x0, y0, velo, npoints)
radius = 60
omega = 0.0685
npoints =50
x0 = 30
y0 = 50
x3s,y3s = constant_turn(x0, y0, radius, omega, npoints)
radius = 50
omega = -0.15
npoints =50
x0 = 60
y0 = 100
x4s,y4s = constant_turn(x0, y0, radius, omega, npoints)
plt.axis([0, 200, 0, 200])
plt.plot(x1s, y1s, '.', color='red')
plt.plot(x2s, y2s, '.', color='green')
plt.plot(x3s, y3s, '.', color='blue')
plt.plot(x4s, y4s, '.', color='yellow')
tx = [str(i) for i in range(1,51)]
# x = x1s
# y = y1s
# for i in range(50):
# plt.text(x[i], y[i], tx[i])
#plt.text(x1s, y1s, tx)
show_text(x1s, y1s, tx)
show_text(x2s, y2s, tx)
show_text(x3s, y3s, tx)
show_text(x4s, y4s, tx)
plt.show()
def plot_ellipse(ax, xs, ys, ws, hs, facecolor):
'''
Plot ellipse based on the ground truth sequential points:
:param ax: axis object
:param xs: x vector
:param ys: y vector
:param ws: width vector
:param hs: height vector
:return:
'''
dys = np.diff(ys)
dxs = np.diff(xs)
thetas_less = np.arctan2(dys, dxs) # len(dxs) - 1
thetas = np.pad(thetas_less,(0,1),'edge') # add one elements to the end
#ellipse_gate1 = []
#fig, ax = plt.subplots()
#plot_trajectory(xs, ys, color=facecolor)
for i in range(len(xs)):
#rect = Rectangle(xy=[x1s[i], y1s[i]], width=w1s[i], height=y1s[i], angle=theta1s[i])
angle_deg = thetas[i]*180/np.pi
e = Ellipse(xy=[xs[i], ys[i]], width=ws[i], height=hs[i], angle=angle_deg, alpha=0.5, color=facecolor)
#ellipse_gate1.append(e)
plt.plot(xs, ys, '.', color=facecolor, markersize=2)
ax.add_patch(e)
ax.set_aspect('equal')
ax.autoscale()
ax.text(xs[i], ys[i], str(i), fontsize=9, color=facecolor)
def multiple_extended_targets_sim():
'''
simulate 4 extended targets in a roi, pay attention to rotation.
theta = atan(dy/dx)
:return:
'''
x0 = 10
y0 = 20
#velo = (1.5, 1.7)
velo = (1.5, 2.7)
npoints = 50
x1m, y1m = constant_velocity(x0, y0, velo, npoints)
motion_noise = np.random.normal(3,0.4,2*npoints)
observation_noise = np.random.normal(2,0.5,2*npoints)
x1t = x1m + motion_noise[0:npoints]
y1t = y1m + motion_noise[npoints:2*npoints]
w1t = 4
h1t = 2
x1s = x1t + observation_noise[:npoints]
y1s = y1t + observation_noise[npoints:2*npoints]
w1s = np.random.normal(w1t, 0.5, npoints)
h1s = np.random.normal(h1t, 0.5, npoints)
x0 = 10
y0 = 80
velo = (1.5, -2)
npoints = 50
x2m, y2m = constant_velocity(x0, y0, velo, npoints)
motion_noise = np.random.normal(4,0.5,2*npoints)
observation_noise = np.random.normal(2,0.5,2*npoints)
x2t = x2m + motion_noise[0:npoints]
y2t = y2m + motion_noise[npoints:2*npoints]
w2t = 4
h2t = 3
x2s = x2t + observation_noise[:npoints]
y2s = y2t + observation_noise[npoints:2*npoints]
w2s = np.random.normal(w2t, 0.5, npoints)
h2s = np.random.normal(h2t, 0.5, npoints)
radius = 60
omega = 0.0685
npoints =50
x0 = 30
y0 = 50
x3m, y3m = constant_turn(x0, y0, radius, omega, npoints)
motion_noise = np.random.normal(3,0.5,2*npoints)
observation_noise = np.random.normal(2,0.5,2*npoints)
x3t = x3m + motion_noise[0:npoints]
y3t = y3m + motion_noise[npoints:2*npoints]
w3t = 6
h3t = 3
x3s = x3t + observation_noise[:npoints]
y3s = y3t + observation_noise[npoints:2*npoints]
w3s = np.random.normal(w3t, 0.5, npoints)
h3s = np.random.normal(h3t, 0.5, npoints)
radius = 50
omega = -0.15
npoints =50
x0 = 60
y0 = 100
x4m,y4m = | |
{}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/LinkedTransactions/{LinkedTransactionID}")
try:
return self.api_client.call_api(
url,
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "delete_linked_transaction")
def delete_payment(
self,
xero_tenant_id,
payment_id,
payment_delete,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Updates a specific payment for invoices and credit notes # noqa: E501
OAuth2 scope: accounting.transactions
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str payment_id: Unique identifier for a Payment (required)
:param PaymentDelete payment_delete: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Payments
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `delete_payment`"
)
# verify the required parameter 'payment_id' is set
if payment_id is None:
raise ValueError(
"Missing the required parameter `payment_id` "
"when calling `delete_payment`"
)
# verify the required parameter 'payment_delete' is set
if payment_delete is None:
raise ValueError(
"Missing the required parameter `payment_delete` "
"when calling `delete_payment`"
)
collection_formats = {}
path_params = {
"PaymentID": payment_id,
}
query_params = []
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = payment_delete
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Payments/{PaymentID}")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Payments",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "delete_payment")
def delete_tracking_category(
self,
xero_tenant_id,
tracking_category_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Deletes a specific tracking category # noqa: E501
OAuth2 scope: accounting.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str tracking_category_id: Unique identifier for a TrackingCategory (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: TrackingCategories
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `delete_tracking_category`"
)
# verify the required parameter 'tracking_category_id' is set
if tracking_category_id is None:
raise ValueError(
"Missing the required parameter `tracking_category_id` "
"when calling `delete_tracking_category`"
)
collection_formats = {}
path_params = {
"TrackingCategoryID": tracking_category_id,
}
query_params = []
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/TrackingCategories/{TrackingCategoryID}")
try:
return self.api_client.call_api(
url,
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TrackingCategories",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "delete_tracking_category")
def delete_tracking_options(
self,
xero_tenant_id,
tracking_category_id,
tracking_option_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Deletes a specific option for a specific tracking category # noqa: E501
OAuth2 scope: accounting.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str tracking_category_id: Unique identifier for a TrackingCategory (required)
:param str tracking_option_id: Unique identifier for a Tracking Option (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: TrackingOptions
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `delete_tracking_options`"
)
# verify the required parameter 'tracking_category_id' is set
if tracking_category_id is None:
raise ValueError(
"Missing the required parameter `tracking_category_id` "
"when calling `delete_tracking_options`"
)
# verify the required parameter 'tracking_option_id' is set
if tracking_option_id is None:
raise ValueError(
"Missing the required parameter `tracking_option_id` "
"when calling `delete_tracking_options`"
)
collection_formats = {}
path_params = {
"TrackingCategoryID": tracking_category_id,
"TrackingOptionID": tracking_option_id,
}
query_params = []
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url(
"/TrackingCategories/{TrackingCategoryID}/Options/{TrackingOptionID}"
)
try:
return self.api_client.call_api(
url,
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="TrackingOptions",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "delete_tracking_options")
def email_invoice(
self,
xero_tenant_id,
invoice_id,
request_empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Sends a copy of a specific invoice to related contact via email # noqa: E501
OAuth2 scope: accounting.transactions
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str invoice_id: Unique identifier for an Invoice (required)
:param RequestEmpty request_empty: (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: None
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `email_invoice`"
)
# verify the required parameter 'invoice_id' is set
if invoice_id is None:
raise ValueError(
"Missing the required parameter `invoice_id` "
"when calling `email_invoice`"
)
# verify the required parameter 'request_empty' is set
if request_empty is None:
raise ValueError(
"Missing the required parameter `request_empty` "
"when calling `email_invoice`"
)
collection_formats = {}
path_params = {
"InvoiceID": invoice_id,
}
query_params = []
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = request_empty
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Invoices/{InvoiceID}/Email")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "email_invoice")
def get_account(
self,
xero_tenant_id,
account_id,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves a single chart of accounts by using a unique account Id # noqa: E501
OAuth2 scope: accounting.settings, accounting.settings.read
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str account_id: Unique identifier for Account object (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Accounts
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_account`"
)
# verify the required parameter 'account_id' is set
if account_id is None:
raise ValueError(
"Missing the required parameter `account_id` "
"when calling `get_account`"
)
collection_formats = {}
path_params = {
"AccountID": account_id,
}
query_params = []
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Accounts/{AccountID}")
try:
return self.api_client.call_api(
url,
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Accounts",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "get_account")
def get_account_attachment_by_file_name(
self,
xero_tenant_id,
account_id,
file_name,
content_type,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Retrieves an attachment for a specific account by filename # noqa: E501
OAuth2 scope: accounting.attachments, accounting.attachments.read
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param str account_id: Unique identifier for Account object (required)
:param str file_name: Name of the attachment (required)
:param str content_type: The mime type of the attachment file you are retrieving i.e image/jpg, application/pdf (required)
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: file
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `get_account_attachment_by_file_name`"
)
# verify the required parameter 'account_id' is set
if account_id is None:
raise ValueError(
"Missing the required parameter `account_id` "
"when calling `get_account_attachment_by_file_name`"
)
# verify the required parameter 'file_name' is set
if file_name is None:
raise ValueError(
"Missing the required parameter `file_name` "
"when calling `get_account_attachment_by_file_name`"
)
# verify the required parameter 'content_type' is set
| |
import os
import sys
import threading
from fanstatic import compat
import fanstatic.checksum
import fanstatic.registry
DEFAULT_SIGNATURE = 'fanstatic'
VERSION_PREFIX = ':version:'
BUNDLE_PREFIX = ':bundle:'
NEEDED = 'fanstatic.needed'
DEBUG = 'debug'
MINIFIED = 'minified'
_resource_file_existence_checking = True
_auto_register_library = False
def set_resource_file_existence_checking(v):
"""Set resource file existence checking to True or False.
By default, this is set to True, so that resources that point to
non-existent files will result in an error. We recommend you keep
it at this value when using Fanstatic. An
:py:class:`UnknownResourceError` will then be raised if you
accidentally refer to a non-existent resource.
When running tests it's often useful to make fake resources that
don't really have a filesystem representation, so this is set to
False temporarily; for the Fanstatic tests this is done. Inside
a test for this particular feature, this can temporarily be set
to True.
"""
global _resource_file_existence_checking
_resource_file_existence_checking = v
def set_auto_register_library(v):
"""
Global to say whether the Library instances should auto-register
themselves to the Library registry. Defaults to False, is useful in tests.
"""
global _auto_register_library
_auto_register_library = v
class UnknownResourceExtensionError(Exception):
"""A resource has an unrecognized extension.
"""
class ModeResourceDependencyError(Exception):
"""A Mode Resource does not have the same dependencies as the
resource it replaces.
"""
# BBB backwards compatibility
UnknownResourceExtension = UnknownResourceExtensionError
class UnknownResourceError(Exception):
"""Resource refers to non-existent resource file.
"""
class ConfigurationError(Exception):
"""Impossible or illegal configuration.
"""
class LibraryDependencyCycleError(Exception):
"""Dependency cycles between libraries aren't allowed.
A dependency cycle between libraries occurs when the file in one
library depends on a file in another library, while that library
depends on a file in the first library.
"""
class SlotError(Exception):
"""A slot was filled in incorrectly.
If a slot is required, it must be filled in by passing an extra
dictionary parameter to the ``.need`` method, containing a mapping
from the required :py:class:`Slot` to :py:class:`Resource`.
When a slot is filled, the resource filled in should have
the same dependencies as the slot, or a subset of the dependencies
of the slot. It should also have the same extension as the slot.
If this is not the case, it is an error.
"""
class Library(object):
"""The resource library.
This object defines which directory is published and can be
referred to by :py:class:`Resource` objects to describe
these resources.
:param name: A string that uniquely identifies this library.
:param rootpath: An absolute or relative path to the directory
that contains the static resources this library publishes. If
relative, it will be relative to the directory of the module
that initializes the library.
:param ignores: A list of globs used to determine which files
and directories not to publish.
"""
path = None
"""
The absolute path to the directory which contains the static resources
this library publishes.
"""
_signature = None
def __init__(self, name, rootpath, ignores=None, version=None,
compilers=None, minifiers=None):
self.name = name
self.rootpath = rootpath
self.ignores = ignores or []
self.path = os.path.join(caller_dir(), rootpath)
self.version = version
self._library_deps = set()
self.known_resources = {}
self.known_assets = []
self.library_nr = None
self.module = sys._getframe(1).f_globals['__name__']
self.compilers = compilers
if self.compilers is None:
self.compilers = {}
self.minifiers = minifiers
if self.minifiers is None:
self.minifiers = {}
if _auto_register_library:
fanstatic.get_library_registry().add(self)
def __repr__(self):
return "<Library '%s' at '%s'>" % (self.name, self.path)
def init_library_nr(self):
"""This can only be called once all resources are known.
i.e. once sort_resources is called this can be called.
once library numbers are calculated once this will be done
very quickly.
"""
# if there already is a known library nr, we're done
if self.library_nr is not None:
return
# the maximum library number is the maximum number of the
# depending libraries + 1
max_library_nr = 0
for resource in compat.itervalues(self.known_resources):
for depend in resource.depends:
for asset in depend.list_assets():
# we don't care about resources in the same library
if asset.library is self:
continue
# assign library number of library we are dependent on
# recursively if necessary
if asset.library.library_nr is None:
asset.library.init_library_nr()
max_library_nr = max(
max_library_nr, asset.library.library_nr + 1)
self.library_nr = max_library_nr
def check_dependency_cycle(self, resource):
for dependency in resource.resources:
self._library_deps.add(dependency.library)
for dep in self._library_deps:
if dep is self:
continue
if self in dep._library_deps:
raise LibraryDependencyCycleError(
'Library cycle detected in resource %s' % resource)
def register(self, resource):
"""Register a Resource with this Library.
A Resource knows about its Library. After a Resource has registered
itself with its Library, the Library knows about the Resources
associated to it.
"""
if isinstance(resource, Resource):
if resource.relpath in self.known_resources:
raise ConfigurationError(
'Resource path %s is already defined.' % resource.relpath)
self.known_resources[resource.relpath] = resource
self.known_assets.append(resource)
def signature(self, recompute_hashes=False, version_method=None):
"""Get a unique signature for this Library.
If a version has been defined, we return the version.
If no version is defined, a hash of the contents of the directory
indicated by ``path`` is calculated.
If ``recompute_hashes`` is set to ``True``, the signature will be
recalculated each time, which is useful during development when
changing Javascript/css code and images.
"""
if self.version is not None:
return VERSION_PREFIX + self.version
if recompute_hashes:
# Always re-compute.
sig = version_method(self.path)
elif self._signature is None:
# Only compute if not computed before.
sig = self._signature = version_method(self.path)
else:
# Use cached value.
sig = self._signature
return VERSION_PREFIX + sig
# Total hack to be able to get the dir the resources will be in.
def caller_dir():
return os.path.dirname(sys._getframe(2).f_globals['__file__'])
class InclusionRenderers(dict):
_default_order = 0
def register(self, extension, renderer, order=None):
"""Register a renderer function for a given filename extension.
:param extension: the filename extension to register the
renderer for.
:param renderer: a callable that should accept a URL argument
and return a rendered HTML snippet for this resource.
:param order: optionally, to control the order in which the
snippets are included in the HTML document. If no order is
given, the resource will be included after all other resource
inclusions. The lower the order number, the earlier in the
rendering the inclusion will appear.
"""
if order is None:
order = self._default_order
else:
self._default_order = max(self._default_order, order + 1)
self[extension] = (order, renderer)
inclusion_renderers = InclusionRenderers()
register_inclusion_renderer = inclusion_renderers.register
def render_ico(url):
return '<link rel="shortcut icon" type="image/x-icon" href="%s"/>' % (url,)
def render_css(url):
return '<link rel="stylesheet" type="text/css" href="%s" />' % (url,)
def render_js(url):
return '<script type="text/javascript" src="%s"></script>' % (url,)
def render_print_css(url):
return (
'<link rel="stylesheet" type="text/css" href="%s" '
'media="print" />') % (url,)
def render_screen_css(url):
return (
'<link rel="stylesheet" type="text/css" media="screen" '
'href="%s" />') % (url,)
register_inclusion_renderer('.css', render_css, 10)
register_inclusion_renderer('.js', render_js, 20)
register_inclusion_renderer('.ico', render_ico, 30)
class Renderable(object):
"""A renderable.
A renderable must have a library attribute and a dependency_nr.
"""
def render(self, library_url):
"""Render this renderable as something to insert in HTML.
This returns a snippet.
"""
raise NotImplementedError()
class Dependable(object):
"""Dependables have a dependencies and an a resources attributes.
"""
resources = None
depends = None
supports = None
def add_dependency(self, dependency):
if dependency in self.depends:
return
if dependency in self.list_supporting():
raise ValueError('Cannot create dependencies loops')
new_dependencies = set(self.depends)
new_dependencies.add(dependency)
self.set_dependencies(new_dependencies)
def set_dependencies(self, dependencies):
raise NotImplementedError()
def list_assets(self):
raise NotImplementedError()
def list_supporting(self):
supports = set()
for dependable in self.supports:
supports.add(dependable)
supports.update(dependable.list_supporting())
return supports
class Asset(Dependable):
"""An asset can either a resource or a slot.
"""
def __init__(self, library, depends=None):
self.library = library
self.supports = set()
self.set_dependencies(depends)
self.library.register(self)
def set_dependencies(self, depends):
assert not isinstance(depends, compat.basestring)
if depends is not None:
self.depends = set(depends)
else:
self.depends = set()
self.resources = set([self])
for depend in self.depends:
depend.supports.add(self)
self.resources.update(depend.resources)
# Update resources if needed.
for dependable in self.list_supporting():
dependable.resources.update(self.resources)
# Check for library dependency cycles.
self.library.check_dependency_cycle(self)
def list_assets(self):
return set([self])
def init_dependency_nr(self):
# on dependency within the library
dependency_nr = 0
for depend in self.depends:
for asset in depend.list_assets():
dependency_nr = max(asset.dependency_nr + 1, dependency_nr)
self.dependency_nr = dependency_nr
NOTHING = object()
class Resource(Renderable, Asset):
"""A resource.
A resource specifies a single resource in a library so that it can
be included in a web page. This is useful for Javascript and CSS
resources in particular. Some static resources such as images are
not included in this way and therefore do not have to be defined
this way.
:param library: the :py:class:`Library` this resource is in.
:param relpath: the relative path (from | |
<gh_stars>1-10
import requests
import json
import base64
import threading
import time
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtPrintSupport import *
from PyQt5.QtCore import *
from PyQt5 import sip
import sys
# import win32api
# import win32con
from PIL import Image
import numpy as np
import io
import os
# from imageProcessing import image_process
class Chat2Comm:
def __init__(self, server_choose=1):
# self.servers = ["http://lance-chatroom2.herokuapp.com/v3/api",
# "http://127.0.0.1:5000/v3/api"]
self.servers = ["http://lance-latina-debug.herokuapp.com/v3/api",
"http://127.0.0.1:5000/v3/api"]
self.SERVER = self.servers[server_choose]
self.MAIN = ""
self.ABOUT = "about"
self.BEAT = "beat"
self.LOGIN = "login"
self.SIGNUP = "signup"
self.GET_MESSAGES = "get_messages"
self.SEND_MESSAGE = "send_message"
self.GET_HEAD = "get_head"
self.CLEAR_ALL = "clear_all"
self.SET_USER = "set_user"
self.JOIN_IN = "join_in"
self.CREATE_ROOM = "create_room"
self.SET_ROOM = "set_room"
self.GET_ROOM = "get_room"
self.GET_ROOM_ALL = "get_room_all"
self.GET_ROOM_INFO = "get_room_info"
self.SET_ROOM_INFO = "set_room_info"
self.UPLOAD = "upload"
self.MAKE_FRIENDS = "make_friends"
self.UID = 'uid'
self.MID = 'mid'
self.GID = 'gid'
self.AUTH = 'auth'
self.MESSAGE_TYPE = 'message_type'
self.USERNAME = 'username'
self.PASSWORD = 'password'
self.EMAIL = 'email'
self.NAME = 'name'
# 异步post请求
# v2
def post_(self, url: str, params: dict, callback):
def post_request(murl: str, mparams: dict, mcallback):
mcallback(requests.post(murl, data=mparams))
t = threading.Thread(target=post_request, args=(url, params, callback))
# t.setDaemon(True)
t.start()
# v3
def post(self, action: str, params: dict):
params['action'] = action
r = requests.post(self.SERVER, data=params)
if r.status_code != 200:
return {'code': '-1', 'message': "Server Error."}
return r.json()
# v2
def get(self, url: str):
r = requests.get(url)
if r.status_code != 200:
return {'code': '-1', 'message': "Server Error."}
return r.json()
# 异步get请求
# v2
def get_(self, url: str, callback):
def post_request(murl: str, mcallback):
mcallback(requests.get(murl))
t = threading.Thread(target=post_request, args=(url, callback))
# t.setDaemon(True)
t.start()
class Chat2Client:
def __init__(self, server_choose=1):
self.comm = Chat2Comm(server_choose=server_choose)
self.username = ""
self.auth = ""
self.gid = 0
self.latest_mid = 0
self.load()
def init(self):
self.username = ""
self.auth = ""
self.gid = 0
self.latest_mid = 0
self.save()
def save(self):
with open('save.json', 'w') as f:
f.write(json.dumps({
'username': self.username,
'auth': self.auth,
'latest_mid': self.latest_mid}))
def load(self):
try:
with open('save.json', 'r') as f:
settings = json.load(f)
self.username = settings['username']
self.auth = settings['auth']
self.latest_mid = settings['latest_mid']
except Exception as e:
print(e)
def parse_errors(self, result):
print(result['message'])
def post_auth(self, action: str, params: dict):
params['auth'] = self.auth
return self.comm.post(action, params)
def login_(self, username, password):
def login_callback(request):
result = json.loads(request.text)
if result['code'] != '0':
self.parse_errors(result)
return int(result['code'])
self.auth = result['data']['auth']
self.username = username
self.comm.post_(self.comm.LOGIN, {'username': username, 'password': password}, login_callback)
return
def login(self, username, password):
result = self.post_auth(self.comm.LOGIN, {'username': username, 'password': password})
if result['code'] != '0':
self.parse_errors(result)
return int(result['code'])
self.username = username
self.auth = result['data']['user_info']['auth']
return int(result['code'])
def signup(self, username, password, email='<EMAIL>', name='Lance', user_type='printer'):
result = self.post_auth(self.comm.SIGNUP,
{'username': username, 'password': password,
'email': email, 'name': name, 'user_type': user_type})
if result['code'] != '0':
self.parse_errors(result)
return int(result['code'])
return int(result['code'])
def logout(self):
self.auth = ''
def beat(self):
result = self.post_auth(self.comm.BEAT, {})
if result['code'] != '0':
self.parse_errors(result)
return int(result['code'])
return int(result['code'])
def create_room(self, room_name):
result = self.post_auth(self.comm.CREATE_ROOM, {'name': room_name})
if result['code'] != '0':
self.parse_errors(result)
return int(result['code'])
return int(result['code'])
def get_rooms(self):
result = self.post_auth(self.comm.GET_ROOM_ALL, {})
if result['code'] != '0':
self.parse_errors(result)
return int(result['code'])
return result['data']['room_data']
def enter_room(self, gid: int):
self.gid = gid
def quit_room(self):
self.gid = 0
def get_messages(self, gid: int=0):
# must init gid to get single room messages
if gid == 0:
gid = self.gid
result = self.post_auth(self.comm.GET_MESSAGES, {'since': self.latest_mid, 'gid': gid})
if result['code'] != '0':
self.parse_errors(result)
return
messages = result['data']['message']
for m in messages:
self.latest_mid = max(self.latest_mid, m['mid'])
# print("\t\tlatest_mid:", self.latest_mid)
self.save()
return messages
def send_message(self, text: str, message_type='text', gid=0):
if gid == 0:
gid = self.gid
result = self.post_auth(self.comm.SEND_MESSAGE,
{'message_type': message_type, 'text': text, 'gid': gid})
if result['code'] != '0':
self.parse_errors(result)
return int(result['code'])
return int(result['code'])
def upload(self, filename, data):
result = self.post_auth(self.comm.UPLOAD, {'data': data, 'filename': filename})
if result['code'] != '0':
self.parse_errors(result)
return int(result['code'])
return result['data']['upload_result']
def clear_all(self):
# result = self.post_auth(self.comm.CLEAR_ALL, {})
result = self.comm.get(self.comm.SERVER + '/clear_all')
print('Clear_ALL:', result)
return int(result['code'])
def make_friends(self, friend: str):
result = self.post_auth(self.comm.MAKE_FRIENDS, {'friend': friend})
if result['code'] != '0':
self.parse_errors(result)
return int(result['code'])
return int(result['code'])
def join_in(self, gid: int):
result = self.post_auth(self.comm.JOIN_IN, {'gid': str(gid)})
if result['code'] != '0':
self.parse_errors(result)
return int(result['code'])
return int(result['code'])
def get_image(self, url):
content = requests.get(url).content
stream = io.BytesIO(content)
image = Image.open(stream)
return image
'''
def delay_enter():
time.sleep(0.5)
win32api.keybd_event(0x0D, 0, 0, 0) # Enter
win32api.keybd_event(0x0D, 0, win32con.KEYEVENTF_KEYUP, 0) # 释放按键
'''
class TextPrinterWindow(QMainWindow):
def __init__(self, parent=None, text=None, paper_type='58'):
super(TextPrinterWindow, self).__init__(parent)
printer = QPrinter()
# printer.setOutputFormat(QPrinter.PdfFormat)
# printer.setOutputFileName("pdf.pdf")
# rect = QRectF(0, 0, 180, 3276)
if paper_type == 'A4':
width = printer.logicalDpiX() * (210 / 25.4)
height = printer.logicalDpiY() * (297 / 25.4)
rect = QRectF(0, 0, width, height)
else:
width = 180
height = 3276
rect = QRectF(0, 0, width, height)
option = QTextOption(Qt.AlignLeft)
option.setWrapMode(QTextOption.WordWrap)
painter = QPainter()
printer.setPageMargins(0, 0, 0, 0, QPrinter.Millimeter)
painter.setPen(QPen(QColor(0, 0, 0), 3))
painter.begin(printer)
if text is not None:
painter.drawText(rect, text, option)
painter.end()
class ImagePrinterWindow(QMainWindow):
def __init__(self, parent=None, image=None, paper_type='58'):
super(ImagePrinterWindow, self).__init__(parent)
printer = QPrinter()
# printer.setOutputFormat(QPrinter.PdfFormat)
# printer.setOutputFileName("pdf.pdf")
option = QTextOption(Qt.AlignLeft)
option.setWrapMode(QTextOption.WordWrap)
painter = QPainter()
printer.setPageMargins(0, 0, 0, 0, QPrinter.Millimeter)
painter.setPen(QPen(QColor(0, 0, 0), 3))
painter.begin(printer)
if image is not None:
# image = QImage(image)
# image = Image.open(image)
if image.size[1] < image.size[0]:
image = image.rotate(90, expand=True)
if paper_type == 'A4':
width = printer.logicalDpiX() * (210 / 25.4)
height = printer.logicalDpiY() * (297 / 25.4)
if width / height < image.size[0] / image.size[1]:
rect = QRectF(0, 0, width, image.size[1] * (width / image.size[0]))
else:
rect = QRectF(0, 0, image.size[0] * (height / image.size[1]), height)
else:
width = 180
rect = QRectF(0, 0, width, image.size[1] * (width / image.size[0]))
img = np.array(image)
im = QImage(img[:], img.shape[1], img.shape[0], img.shape[1] * 3, QImage.Format_RGB888)
painter.drawImage(rect, im)
painter.end()
class Chat2Printer:
def print_text(self, text, paper_type='58'):
global app
window = TextPrinterWindow(text=text)
app.closeAllWindows()
def print_image(self, image, paper_type='58'):
global app
window = ImagePrinterWindow(image=image, paper_type=paper_type)
app.closeAllWindows()
def module_test():
client = Chat2Client(server_choose=0)
client.init()
# client.clear_all()
client.signup('Lance', '')
client.login('Lance', '')
# time.sleep(1)
print(client.username, client.auth)
client.create_room('NameLose')
rooms = client.get_rooms()
print(rooms)
client.enter_room(rooms[0]['gid'])
messages = client.get_new_message()
print(len(messages), messages)
messages = client.get_new_message()
print(len(messages), messages)
client.send_message('First commit~')
messages = client.get_new_message()
print(len(messages), messages)
with open('save.json', 'rb') as f:
data = f.read()
b64 = base64.b64encode(data)
upload_result = client.upload('save.json', b64)
print(upload_result)
client.send_message(upload_result['url'], message_type='file')
messages = client.get_new_message()
print(len(messages), messages)
def mini_test():
client = Chat2Client()
client.login('Lance', '')
print(client.username, client.auth)
rooms = client.get_rooms()
print(rooms)
client.enter_room(rooms[0]['gid'])
while True:
messages = client.get_new_message()
print(len(messages), messages)
for m in messages:
client.send_message('我反对 @%s的观点!' % m['username'])
client.latest_mid = client.latest_mid + 1
client.save()
time.sleep(10)
def friend_test():
client = Chat2Client(server_choose=0)
client.login('Tony', '')
rooms = client.get_rooms()
print(rooms)
# client.join_in(2)
# client.make_friends('Lance')
# rooms = client.get_rooms()
# print(rooms)
client.enter_room(4)
try:
while True:
messages = client.get_new_message()
# print(messages)
for m in messages:
if m['username'] == client.username:
continue
print(m)
if m['type'] == 'image':
image = client.get_image(m['text'])
printer = Chat2Printer()
printer.print_image(image=image)
if m['type'] == 'text':
text = "@{username}\n{text}".format(username=m['username'], text=m['text'])
printer = Chat2Printer()
printer.print_text(text=text)
# time.sleep(1)
# time.sleep(10)
except Exception as e:
print(e)
return
class LatinaPrinter:
def __init__(self):
global app
global default_font
app.setFont(default_font)
self.default_option = {}
self.scenery_option = {
'black_white': True,
'intensity': True,
'high_enhance': True,
}
self.comic_option = {
'black_white': True,
}
self.black_white_option = {
'black_white': True,
}
self.options = {}
self.print_options = {
'comic': self.comic_option,
'default': self.default_option,
'black_white': self.black_white_option
}
self.font_options = {}
self.default_font_option = {
'font-family': '微软雅黑',
'font-size': 10,
}
self.sdk_running = False
self.quit_confirm = False
self.font_families = ['微软雅黑', '宋体', '仿宋', '黑体',
'Microsoft YaHei Mono', '幼圆', '楷体', '隶书']
self.client = Chat2Client(server_choose=0)
self.load()
def save(self):
with open('options.json', 'w') as f:
f.write(json.dumps({
'options': self.options,
'font_options': self.font_options
}))
def load(self):
try:
with open('options.json', 'r') as f:
save_options = json.load(f)
self.options = save_options['options']
self.font_options = save_options['font_options']
except Exception as e:
print(e, 'try to save...')
self.save()
def set_option(self, username: str, option: str):
if option not in self.print_options:
return '修改设置失败!'
self.options[username] = self.print_options[option]
self.save()
return '修改设置成功!'
def set_font_option(self, username: str, size: int=None, family: str=None):
if family is not None and family not in self.font_families:
return '修改设置失败!'
if size is not None and size > 30:
return '修改设置失败!'
option = self.default_font_option
if family is not None:
option['font-family'] = family
if size is not None:
option['font-size'] = size
self.font_options[username] = option
self.save()
return '修改设置成功!'
def mainloop(self, username='Printer', password='<PASSWORD>'):
global app
global default_font
self.client.logout()
code = self.client.login(username, password)
if code != 0:
print("新用户,注册...")
code = self.client.signup(username=username, password=password, user_type='printer')
if code != 0:
| |
== easy_read_format_from_detection_corpus(
corpus_format), easyread_format + "VS\n" + \
easy_read_format_from_detection_corpus(corpus_format)
assert corpus_format == detection_corpus_format_from_easy_read(
easyread_format), corpus_format + "VS\n" + \
detection_corpus_format_from_easy_read(easyread_format)
# add to corpus, new line separated
corpus += corpus_format + "\n"
easyread_corpus += easyread_format + "\n"
# counting
totalRepairs += len(re.findall("<rps", wordstring, re.S))
wordCount += len(wordstring.split())
# write corpus string to file
if partial:
filepartial = "_partial" # suffix for file name
else:
filepartial = ""
if writeFile:
disffile = open(filename + filepartial + "_data.csv", "w")
disffile.write(corpus)
disffile.close()
print>>self.errorlog, "Problematic repairs removed = " + \
str(problem_repairs)
print "Problematic repairs removed = " + str(problem_repairs)
print>>self.errorlog, "Disfluency corpus constructed with\
{} Errors and {} Warnings".format(
errors, warnings)
print "Disfluency corpus constructed with \
{} Errors and {} Warnings".format(errors, warnings)
print "Disfluency detection corpus complete"
print "number trans = " + str(numberTrans)
print "number utts = " + str(len(uttList))
print "number words = " + str(wordCount)
print "number repairs = " + str(totalRepairs)
for k, v in sorted(dialogue_act.items(), key=lambda x: x[1],
reverse=True):
print k, v
return easyread_corpus # Always return the corpus as a string
def write_clean_corpus(self, testcorpus, targetfilename, debug=False):
"""Write a file cleaned of reparanda and edit terms from a test corpus.
Keyword Arguments:
testcorpus -- a string separated by newline markers which
has all the disfluency markup.
targetfilename -- a string giving the location of the cleaned corpus.
"""
print "Writing clean corpus..."
clean_corpus = open(targetfilename + "_clean.text", "w")
for line in testcorpus.split("\n"):
# print line
if line == "":
continue
split = line.split(",")
# no need to write the indices to source data
if split[0] == "REF":
continue
elif split[0] == "POS":
pos = split[1]
else:
uttref = split[0]
text = split[1]
continue
words = strip_disf_tags_from_easy_read(text)
pos_tags = strip_disf_tags_from_easy_read(pos)
disfluencies = disf_tags_from_easy_read(text)
clean_word_string = ""
clean_pos_string = ""
for i in range(0, len(disfluencies)):
if "<e" in disfluencies[i] or "<rm" in disfluencies[i] \
or "<i" in disfluencies[i]:
continue
clean_word_string += words[i] + " "
clean_pos_string += pos_tags[i] + " "
clean_word_string = clean_word_string.strip()
clean_pos_string = clean_pos_string.strip()
if clean_word_string == "":
continue
clean_corpus.write(
uttref + "," + clean_word_string + "\nPOS," +
clean_pos_string + "\n")
clean_corpus.close()
print "done"
return
def write_edit_term_corpus(self, testcorpus, targetfilename, debug=False):
"""Write a file cleaned of reparanda and edit terms from a test corpus.
Keyword Arguments:
testcorpus -- a string separated by newline markers which
has all the disfluency markup.
targetfilename -- a string giving the location of
the edit term filled corpus.
"""
print "Writing edit term corpus..."
edit_term_corpus = open(targetfilename + "_edit.text", "w")
for line in testcorpus.split("\n"):
if line == "":
continue
split = line.split(",")
# no need to write the indices to source data
if split[0] == "REF":
continue
elif split[0] == "POS":
pos = split[1]
else:
uttref = split[0]
text = split[1]
continue
editterm_examples = get_edit_terms_from_easy_read(text, pos)
if editterm_examples != []:
for my_editterm, my_poseditterm in editterm_examples:
edit_term_corpus.write(
uttref + "," + my_editterm + "\nPOS," +
my_poseditterm + "\n")
edit_term_corpus.close()
print "done"
return
# def write_tree_corpus(self, testcorpus, targetfilename, debug=False):
# #TODO finish and test
# reffile = open(targetfilename+"TreeCorpus.text","w")
# overallCount = 0
# stringlist = []
# uttlist = []
# overallPOSList = []
# overallIndexList = []
# dodgy = ["NNP","NNPS","CD","LS","SYM","FW"]
# totalRepairs = 0
# thirdPos = 0
# firstPos = 0
# typedict = defaultdict(list)
# printdict = defaultdict(list)
#
# self.editingTermList = [[]]
# # preprocessing step to get all the editing terms seen
# #in whole corpus, list of lists
# #what do we do for held out data again..,
# # for realistic test should see how we do with the edit dict.
# numberTrans = 0
# for trans in self.corpus.iter_transcripts():
# repairStack = [] #can be both speakers/callers or just one
# count=0
# transnumber = int(trans.swda_filename[19:23])
# #conversationNumber = int(trans.conversation_no)
# if transnumber < 1: continue #from
# #if ranges and not trans.has_trees():
# # continue
#
#
# if transnumber > 1210: break
# #up to test files 1210, none beyond this
#
# numberTrans+=1
# while count < len(trans.utterances):
# utt = trans.utterances[count]
# mytreemap = None
# myPOSmap = None
# treepaths = None #will be a defaultdict
# if trans.has_trees():
# mytreemap = self.__treemaplist__.get_treemap(trans,utt)
# if mytreemap:
# mypos = mytreemap.get_POS(trans,utt)
# try:
# treepaths = mytreemap.get_path_lengths(
# trans,utt.transcript_index)
# except:
# print "treepath problem"
# print sys.exc_info()
# else:
# count+=1
# continue
# else: # for POS files
# myPOSmap = self.__POSmaplist__.get_POSmap(trans,utt)
# if myPOSmap:
# mypos = myPOSmap.get_POS(utt)
# else:
# count+=1
# continue
# #TODO get corresponding repair line from the test corpus
# return
def disfluency_type_corpus_analysis(self, testcorpus, targetfilename):
"""Take a test corpus and compute distributions over
the types of disfluencies.
For repairs, the types we're looking at are the structures
from the minimal edit distance alignment of repair and reparandum.
For edit terms/interregna this is the probability of words
being edit terms at all, and the probability of
them being interregna.
Keyword Arguments:
testcorpus -- a string separated by newline markers which
has all the disfluency markup.
targetfilename -- a string giving the location of
the edit term filled corpus.
"""
# TODO finish and test
typedict = defaultdict(list)
printdict = defaultdict(list)
editingTermList = [[]]
for line in testcorpus.split("\n"):
if line == "":
continue
split = line.split(",")
ref = split[0]
text = split[1]
if ref == "REF":
continue
disfluencies = disf_tags_from_easy_read(text)
for i in range(0, len(disfluencies)):
if "<rpn" in disfluencies[i]:
repair = None # TODO place holder
# TODO get the general class rep/sub/delete from the words
simpleclass = None
try:
if not repair.reparandumWords == []:
complexclass = classify_repair(
repair.reparandumWords,
repair.repairWords,
repair.continuationWords
)
# ONLY ADD ONE EXAMPLE, the top one..
if not complexclass in typedict[simpleclass]:
output = graph_viz_repair(
complexclass, repair.reparandumWords,
repair.repairWords,
repair.continuationWords)
if not output in printdict[simpleclass]:
printdict[simpleclass].extend(output, 1)
typedict[simpleclass].append(
[complexclass, output, 1])
else:
for mytype in typedict[simpleclass]:
if mytype[0] == complexclass:
# allows us to get the count for each
# complex type
mytype[2] += 1
except BaseException:
print sys.exc_info()
print repair.reparandumWords
print repair.repairWords
print repair.continuationWords
raw_input("third one")
# Do the interregnum and edit term analysis
interregDict = defaultdict(int)
for edit in editingTermList:
if edit == []:
continue
editstring = ""
for string in edit:
editstring += string + " "
editstring = editstring[:-1]
interregDict[editstring] += 1
# now turn it into a dict list for corpus analysis
# this will be used in below method, dict of [incleanCorpus,FLD,Repair]
editingTermList = defaultdict(list)
# creates
for w in sorted(interregDict, key=interregDict.get, reverse=True):
print w, interregDict[w]
editingTermList[w] = [0, 0, 0]
# interregFile.write(w+","+str(interregDict[w])+"\n")
# interregFile.close()
typefile = open(targetfilename + "RepairTypeDistributions.text", "w")
print "writing to typefile" + typefile.name
for key in printdict.keys():
print str(key) + str(len(typedict[key]))
typefile.write("\n\n\n\n\n" + str(key) + "MAINTYPE:" +
str(len(printdict[key])) +
"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n")
# writes the most popular mappings-- should do this is order of
# occurence?
for repairtype in printdict[key]:
typefile.write(repairtype + "\n")
# for key in typedict.keys():
# print key
# print
typefile.close()
return
def make_corpus(self, target_folder=None, corpus=None, range_files=None,
edit_terms_marked=True, mode=None, debug=False):
"""Write a disfluency related corpus according to the
mode required."""
self.dRepairs = defaultdict(list)
# all the edit terms ranges, may be able to get round this easier below
self.dEditTerms = defaultdict(list)
self.dRelaxedRepairs = None
# get the repair annotations
if self.annotationFiles:
self.read_in_self_repair_files()
if self.annotationFilesRelaxed:
self.read_in_relaxed_self_repair_files
self.ranges = []
# get the ranges of the source corpus to extract data from
if range_files:
for range_file_name in range_files:
rangeFile = open(range_file_name, "r")
for line in rangeFile:
a = line.strip("\n")
self.ranges.append(a) # conversation_no
# print a
rangeFile.close()
print "files in ranges = " + str(len(self.ranges))
else:
self.ranges = None
# write the corpus for the accepted mode
if mode in ["both", "clean", "disfluency"]:
if mode == "disfluency":
write_edit = False
else:
write_edit = True
if mode == "clean":
save_test = False
else:
save_test = True
test_corpus = self.write_test_corpus(target_folder + os.sep +
corpus,
ranges=self.ranges,
partial=args.partialWords,
writeFile=save_test,
edit_terms_marked=edit_terms_marked,
debug=debug)
if mode in ["clean", "both"]:
self.write_clean_corpus(
test_corpus, target_folder + os.sep + corpus, debug=debug)
if write_edit:
self.write_edit_term_corpus(
test_corpus, target_folder + os.sep + corpus, debug=debug)
if mode == "tree":
self.write_tree_corpus(target_folder + os.sep + corpus + os.sep +
"TREEPATHS" + corpus, ranges=self.ranges,
partial=self.partial_words)
if __name__ == '__main__':
# parse command line parameters
# Optional arguments:
# -i string, path of source data (in swda | |
<gh_stars>0
import os
import re
import logging
from django.conf import settings
from django.contrib.gis.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.formats import date_format
from easy_thumbnails.alias import aliases
from easy_thumbnails.exceptions import InvalidImageFormatError
from easy_thumbnails.files import get_thumbnailer
from mapentity.registry import registry
from mapentity.models import MapEntityMixin
from mapentity.serializers import plain_text, smart_plain_text
from geotrek.authent.models import StructureRelated
from geotrek.core.models import Topology
from geotrek.common.mixins import (NoDeleteMixin, TimeStampedModelMixin,
PictogramMixin, OptionalPictogramMixin,
PublishableMixin, PicturesMixin,
AddPropertyMixin)
from geotrek.common.models import Theme
from geotrek.common.utils import intersecting
from extended_choices import Choices
if 'modeltranslation' in settings.INSTALLED_APPS:
from modeltranslation.manager import MultilingualManager
else:
from django.db.models import Manager as MultilingualManager
logger = logging.getLogger(__name__)
def _get_target_choices():
""" Populate choices using installed apps names.
"""
apps = [('public', _("Public website"))]
for model, entity in registry.registry.items():
if entity.menu:
appname = model._meta.app_label.lower()
apps.append((appname, unicode(entity.label)))
return tuple(apps)
class InformationDeskType(PictogramMixin):
label = models.CharField(verbose_name=_(u"Label"), max_length=128, db_column='label')
class Meta:
db_table = 't_b_type_renseignement'
verbose_name = _(u"Information desk type")
verbose_name_plural = _(u"Information desk types")
ordering = ['label']
def __unicode__(self):
return self.label
class InformationDesk(models.Model):
name = models.CharField(verbose_name=_(u"Title"), max_length=256, db_column='nom')
type = models.ForeignKey(InformationDeskType, verbose_name=_(u"Type"),
related_name='desks', db_column='type')
description = models.TextField(verbose_name=_(u"Description"), blank=True, db_column='description',
help_text=_(u"Brief description"))
phone = models.CharField(verbose_name=_(u"Phone"), max_length=32,
blank=True, null=True, db_column='telephone')
email = models.EmailField(verbose_name=_(u"Email"), max_length=256, db_column='email',
blank=True, null=True)
website = models.URLField(verbose_name=_(u"Website"), max_length=256, db_column='website',
blank=True, null=True)
photo = models.FileField(verbose_name=_(u"Photo"), upload_to=settings.UPLOAD_DIR,
db_column='photo', max_length=512, blank=True, null=True)
street = models.CharField(verbose_name=_(u"Street"), max_length=256,
blank=True, null=True, db_column='rue')
postal_code = models.CharField(verbose_name=_(u"Postal code"), max_length=8,
blank=True, null=True, db_column='code')
municipality = models.CharField(verbose_name=_(u"Municipality"),
blank=True, null=True,
max_length=256, db_column='commune')
geom = models.PointField(verbose_name=_(u"Emplacement"), db_column='geom',
blank=True, null=True,
srid=settings.SRID, spatial_index=False)
objects = models.GeoManager()
class Meta:
db_table = 't_b_renseignement'
verbose_name = _(u"Information desk")
verbose_name_plural = _(u"Information desks")
ordering = ['name']
def __unicode__(self):
return self.name
@property
def description_strip(self):
"""Used in trek public template.
"""
nobr = re.compile(r'(\s*<br.*?>)+\s*', re.I)
newlines = nobr.sub("\n", self.description)
return smart_plain_text(newlines)
@property
def serializable_type(self):
return {
'id': self.type.id,
'label': self.type.label,
'pictogram': self.type.pictogram.url,
}
@property
def latitude(self):
if self.geom:
api_geom = self.geom.transform(settings.API_SRID, clone=True)
return api_geom.y
return None
@property
def longitude(self):
if self.geom:
api_geom = self.geom.transform(settings.API_SRID, clone=True)
return api_geom.x
return None
@property
def thumbnail(self):
if not self.photo:
return None
thumbnailer = get_thumbnailer(self.photo)
try:
return thumbnailer.get_thumbnail(aliases.get('thumbnail'))
except InvalidImageFormatError:
logger.warning(_("Image %s invalid or missing from disk.") % self.photo)
return None
@property
def photo_url(self):
thumbnail = self.thumbnail
if not thumbnail:
return None
return os.path.join(settings.MEDIA_URL, thumbnail.name)
GEOMETRY_TYPES = Choices(
('POINT', 'point', _('Point')),
('LINE', 'line', _('Line')),
('POLYGON', 'polygon', _('Polygon')),
('ANY', 'any', _('Any')),
)
class TouristicContentCategory(PictogramMixin):
label = models.CharField(verbose_name=_(u"Label"), max_length=128, db_column='nom')
geometry_type = models.CharField(db_column="type_geometrie", max_length=16,
choices=GEOMETRY_TYPES, default=GEOMETRY_TYPES.POINT)
type1_label = models.CharField(verbose_name=_(u"First list label"), max_length=128,
db_column='label_type1', blank=True)
type2_label = models.CharField(verbose_name=_(u"Second list label"), max_length=128,
db_column='label_type2', blank=True)
order = models.IntegerField(verbose_name=_(u"Order"), null=True, blank=True, db_column='tri',
help_text=_(u"Alphabetical order if blank"))
id_prefix = 'C'
class Meta:
db_table = 't_b_contenu_touristique_categorie'
verbose_name = _(u"Touristic content category")
verbose_name_plural = _(u"Touristic content categories")
ordering = ['order', 'label']
def __unicode__(self):
return self.label
@property
def prefixed_id(self):
return '{prefix}{id}'.format(prefix=self.id_prefix, id=self.id)
class TouristicContentType(OptionalPictogramMixin):
label = models.CharField(verbose_name=_(u"Label"), max_length=128, db_column='nom')
category = models.ForeignKey(TouristicContentCategory, related_name='types',
verbose_name=_(u"Category"), db_column='categorie')
# Choose in which list of choices this type will appear
in_list = models.IntegerField(choices=((1, _(u"First")), (2, _(u"Second"))), db_column='liste_choix')
class Meta:
db_table = 't_b_contenu_touristique_type'
verbose_name = _(u"Touristic content type")
verbose_name_plural = _(u"Touristic content type")
ordering = ['label']
def __unicode__(self):
return self.label
class TouristicContentType1Manager(MultilingualManager):
def get_queryset(self):
return super(TouristicContentType1Manager, self).get_queryset().filter(in_list=1)
class TouristicContentType2Manager(MultilingualManager):
def get_queryset(self):
return super(TouristicContentType2Manager, self).get_queryset().filter(in_list=2)
class TouristicContentType1(TouristicContentType):
objects = TouristicContentType1Manager()
def __init__(self, *args, **kwargs):
self._meta.get_field('in_list').default = 1
super(TouristicContentType1, self).__init__(*args, **kwargs)
class Meta:
proxy = True
verbose_name = _(u"Type")
verbose_name_plural = _(u"First list types")
class TouristicContentType2(TouristicContentType):
objects = TouristicContentType2Manager()
def __init__(self, *args, **kwargs):
self._meta.get_field('in_list').default = 2
super(TouristicContentType2, self).__init__(*args, **kwargs)
class Meta:
proxy = True
verbose_name = _(u"Type")
verbose_name_plural = _(u"Second list types")
class ReservationSystem(models.Model):
name = models.CharField(verbose_name=_(u"Name"), max_length=256,
blank=False, null=False, unique=True)
def __unicode__(self):
return self.name
class Meta:
db_table = 't_b_systeme_reservation'
verbose_name = _(u"Reservation system")
verbose_name_plural = _(u"Reservation systems")
class TouristicContent(AddPropertyMixin, PublishableMixin, MapEntityMixin, StructureRelated,
TimeStampedModelMixin, PicturesMixin, NoDeleteMixin):
""" A generic touristic content (accomodation, museum, etc.) in the park
"""
description_teaser = models.TextField(verbose_name=_(u"Description teaser"), blank=True,
help_text=_(u"A brief summary"), db_column='chapeau')
description = models.TextField(verbose_name=_(u"Description"), blank=True, db_column='description',
help_text=_(u"Complete description"))
themes = models.ManyToManyField(Theme, related_name="touristiccontents",
db_table="t_r_contenu_touristique_theme", blank=True, verbose_name=_(u"Themes"),
help_text=_(u"Main theme(s)"))
geom = models.GeometryField(verbose_name=_(u"Location"), srid=settings.SRID)
category = models.ForeignKey(TouristicContentCategory, related_name='contents',
verbose_name=_(u"Category"), db_column='categorie')
contact = models.TextField(verbose_name=_(u"Contact"), blank=True, db_column='contact',
help_text=_(u"Address, phone, etc."))
email = models.EmailField(verbose_name=_(u"Email"), max_length=256, db_column='email',
blank=True, null=True)
website = models.URLField(verbose_name=_(u"Website"), max_length=256, db_column='website',
blank=True, null=True)
practical_info = models.TextField(verbose_name=_(u"Practical info"), blank=True, db_column='infos_pratiques',
help_text=_(u"Anything worth to know"))
type1 = models.ManyToManyField(TouristicContentType, related_name='contents1',
verbose_name=_(u"Type 1"), db_table="t_r_contenu_touristique_type1",
blank=True)
type2 = models.ManyToManyField(TouristicContentType, related_name='contents2',
verbose_name=_(u"Type 2"), db_table="t_r_contenu_touristique_type2",
blank=True)
source = models.ManyToManyField('common.RecordSource',
blank=True, related_name='touristiccontents',
verbose_name=_("Source"), db_table='t_r_contenu_touristique_source')
portal = models.ManyToManyField('common.TargetPortal',
blank=True, related_name='touristiccontents',
verbose_name=_("Portal"), db_table='t_r_contenu_touristique_portal')
eid = models.CharField(verbose_name=_(u"External id"), max_length=1024, blank=True, null=True, db_column='id_externe')
reservation_system = models.ForeignKey(ReservationSystem, verbose_name=_(u"Reservation system"),
blank=True, null=True)
reservation_id = models.CharField(verbose_name=_(u"Reservation ID"), max_length=1024,
blank=True, db_column='id_reservation')
approved = models.BooleanField(verbose_name=_(u"Approved"), default=False, db_column='labellise')
objects = NoDeleteMixin.get_manager_cls(models.GeoManager)()
class Meta:
db_table = 't_t_contenu_touristique'
verbose_name = _(u"Touristic content")
verbose_name_plural = _(u"Touristic contents")
def __unicode__(self):
return self.name
@property
def districts_display(self):
return ', '.join([unicode(d) for d in self.districts])
@property
def type1_label(self):
return self.category.type1_label
@property
def type2_label(self):
return self.category.type2_label
@property
def type1_display(self):
return ', '.join([unicode(n) for n in self.type1.all()])
@property
def type2_display(self):
return ', '.join([unicode(n) for n in self.type2.all()])
@property
def prefixed_category_id(self):
return self.category.prefixed_id
def distance(self, to_cls):
return settings.TOURISM_INTERSECTION_MARGIN
@property
def type(self):
"""Fake type to simulate POI for mobile app v1"""
return self.category
@property
def min_elevation(self):
return 0
@property
def max_elevation(self):
return 0
@property
def portal_display(self):
return ', '.join([unicode(portal) for portal in self.portal.all()])
@property
def source_display(self):
return ','.join([unicode(source) for source in self.source.all()])
@property
def themes_display(self):
return ','.join([unicode(source) for source in self.themes.all()])
@property
def extent(self):
return self.geom.buffer(10).transform(settings.API_SRID, clone=True).extent
@property
def rando_url(self):
category_slug = _(u'touristic-content')
return '{}/{}/'.format(category_slug, self.slug)
@property
def meta_description(self):
return plain_text(self.description_teaser or self.description)[:500]
Topology.add_property('touristic_contents', lambda self: intersecting(TouristicContent, self), _(u"Touristic contents"))
Topology.add_property('published_touristic_contents', lambda self: intersecting(TouristicContent, self).filter(published=True), _(u"Published touristic contents"))
TouristicContent.add_property('touristic_contents', lambda self: intersecting(TouristicContent, self), _(u"Touristic contents"))
TouristicContent.add_property('published_touristic_contents', lambda self: intersecting(TouristicContent, self).filter(published=True), _(u"Published touristic contents"))
class TouristicEventType(OptionalPictogramMixin):
type = models.CharField(verbose_name=_(u"Type"), max_length=128, db_column='type')
class Meta:
db_table = 't_b_evenement_touristique_type'
verbose_name = _(u"Touristic event type")
verbose_name_plural = _(u"Touristic event types")
ordering = ['type']
def __unicode__(self):
return self.type
class TouristicEvent(AddPropertyMixin, PublishableMixin, MapEntityMixin, StructureRelated,
PicturesMixin, TimeStampedModelMixin, NoDeleteMixin):
""" A touristic event (conference, workshop, etc.) in the park
"""
description_teaser = models.TextField(verbose_name=_(u"Description teaser"), blank=True,
help_text=_(u"A brief summary"), db_column='chapeau')
description = models.TextField(verbose_name=_(u"Description"), blank=True, db_column='description',
help_text=_(u"Complete description"))
themes = models.ManyToManyField(Theme, related_name="touristic_events",
db_table="t_r_evenement_touristique_theme", blank=True, verbose_name=_(u"Themes"),
help_text=_(u"Main theme(s)"))
geom = models.PointField(verbose_name=_(u"Location"), srid=settings.SRID)
begin_date = models.DateField(blank=True, null=True, verbose_name=_(u"Begin date"), db_column='date_debut')
end_date = models.DateField(blank=True, null=True, verbose_name=_(u"End date"), db_column='date_fin')
duration = models.CharField(verbose_name=_(u"Duration"), max_length=64, blank=True, db_column='duree',
help_text=_(u"3 days, season, ..."))
meeting_point = models.CharField(verbose_name=_(u"Meeting point"), max_length=256, blank=True, db_column='point_rdv',
help_text=_(u"Where exactly ?"))
meeting_time = models.TimeField(verbose_name=_(u"Meeting time"), blank=True, null=True, db_column='heure_rdv',
help_text=_(u"11:00, 23:30"))
contact = models.TextField(verbose_name=_(u"Contact"), blank=True, db_column='contact')
email = models.EmailField(verbose_name=_(u"Email"), max_length=256, db_column='email',
blank=True, null=True)
website = models.URLField(verbose_name=_(u"Website"), max_length=256, db_column='website',
blank=True, null=True)
organizer = models.CharField(verbose_name=_(u"Organizer"), max_length=256, blank=True, db_column='organisateur')
speaker = models.CharField(verbose_name=_(u"Speaker"), max_length=256, blank=True, db_column='intervenant')
type = models.ForeignKey(TouristicEventType, verbose_name=_(u"Type"), blank=True, null=True, db_column='type')
accessibility = models.CharField(verbose_name=_(u"Accessibility"), max_length=256, blank=True, db_column='accessibilite')
participant_number = models.CharField(verbose_name=_(u"Number of participants"), max_length=256, blank=True, db_column='nb_places')
booking = models.TextField(verbose_name=_(u"Booking"), blank=True, db_column='reservation')
target_audience = models.CharField(verbose_name=_(u"Target audience"), max_length=128, blank=True, null=True, db_column='public_vise')
practical_info = models.TextField(verbose_name=_(u"Practical info"), blank=True, db_column='infos_pratiques',
help_text=_(u"Recommandations / To plan / Advices"))
source = models.ManyToManyField('common.RecordSource',
blank=True, related_name='touristicevents',
verbose_name=_("Source"), db_table='t_r_evenement_touristique_source')
portal = models.ManyToManyField('common.TargetPortal',
blank=True, related_name='touristicevents',
verbose_name=_("Portal"), db_table='t_r_evenement_touristique_portal')
eid = models.CharField(verbose_name=_(u"External id"), max_length=1024, blank=True, null=True, db_column='id_externe')
approved = models.BooleanField(verbose_name=_(u"Approved"), default=False, db_column='labellise')
objects = NoDeleteMixin.get_manager_cls(models.GeoManager)()
category_id_prefix = 'E'
class Meta:
db_table = 't_t_evenement_touristique'
verbose_name = _(u"Touristic event")
verbose_name_plural = _(u"Touristic events")
ordering = ['-begin_date']
def __unicode__(self):
return self.name
@property
def type1(self):
return [self.type] if self.type else []
@property
def type2(self):
return []
@property
def districts_display(self):
return ', '.join([unicode(d) for d in self.districts])
@property
def dates_display(self):
if not self.begin_date and not self.end_date:
return u""
elif not self.end_date:
return _(u"starting from {begin}").format(
begin=date_format(self.begin_date, 'SHORT_DATE_FORMAT'))
elif not self.begin_date:
return _(u"up to {end}").format(
end=date_format(self.end_date, 'SHORT_DATE_FORMAT'))
elif self.begin_date == self.end_date:
return date_format(self.begin_date, 'SHORT_DATE_FORMAT')
else:
return _(u"from {begin} to {end}").format(
begin=date_format(self.begin_date, 'SHORT_DATE_FORMAT'),
end=date_format(self.end_date, 'SHORT_DATE_FORMAT'))
@property
def prefixed_category_id(self):
return self.category_id_prefix
def distance(self, to_cls):
return settings.TOURISM_INTERSECTION_MARGIN
@property
def portal_display(self):
return ', '.join([unicode(portal) for portal in self.portal.all()])
@property
def source_display(self):
return ', '.join([unicode(source) for source in self.source.all()])
@property
def themes_display(self):
return ','.join([unicode(source) for source in self.themes.all()])
@property
def rando_url(self):
category_slug = _(u'touristic-event')
return '{}/{}/'.format(category_slug, self.slug)
@property
def meta_description(self):
return plain_text(self.description_teaser or self.description)[:500]
TouristicEvent.add_property('touristic_contents', lambda self: intersecting(TouristicContent, self), _(u"Touristic contents"))
TouristicEvent.add_property('published_touristic_contents', lambda self: intersecting(TouristicContent, self).filter(published=True), _(u"Published touristic contents"))
Topology.add_property('touristic_events', lambda self: intersecting(TouristicEvent, self), _(u"Touristic events"))
Topology.add_property('published_touristic_events', lambda self: intersecting(TouristicEvent, self).filter(published=True), _(u"Published touristic events"))
TouristicContent.add_property('touristic_events', lambda self: intersecting(TouristicEvent, self), _(u"Touristic events"))
TouristicContent.add_property('published_touristic_events', lambda self: intersecting(TouristicEvent, self).filter(published=True), | |
<filename>engine/2.80/scripts/addons/archipack/archipack_reference_point.py
# -*- coding:utf-8 -*-
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110- 1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# ----------------------------------------------------------
# Author: <NAME> (s-leger)
#
# ----------------------------------------------------------
import bpy
from bpy.types import Operator, PropertyGroup, Object, Panel
from bpy.props import (
FloatVectorProperty,
CollectionProperty,
FloatProperty,
EnumProperty
)
from mathutils import Vector
from .bmesh_utils import BmeshEdit as bmed
from .archipack_object import ArchipackCollectionManager
def update(self, context):
self.update(context)
class archipack_reference_point(PropertyGroup):
location_2d : FloatVectorProperty(
subtype='XYZ',
name="position 2d",
default=Vector((0, 0, 0))
)
location_3d : FloatVectorProperty(
subtype='XYZ',
name="position 3d",
default=Vector((0, 0, 0))
)
symbol_scale : FloatProperty(
name="Screen scale",
default=1,
min=0.01,
update=update)
symbol_type : EnumProperty(
name="Symbol type",
default='WALL',
items=(
('WALL', 'Wall', '', 0),
('ROOF', 'Roof', '', 1)),
update=update)
@classmethod
def filter(cls, o):
"""
Filter object with this class in data
return
True when object contains this datablock
False otherwise
usage:
class_name.filter(object) from outside world
self.__class__.filter(object) from instance
"""
try:
return cls.__name__ in o
except:
pass
return False
@classmethod
def datablock(cls, o):
"""
Retrieve datablock from base object
return
datablock when found
None when not found
usage:
class_name.datablock(object) from outside world
self.__class__.datablock(object) from instance
"""
try:
return getattr(o, cls.__name__)[0]
except:
pass
return None
def update(self, context):
o = context.active_object
if self.datablock(o) != self:
return
s = self.symbol_scale
if self.symbol_type == 'WALL':
verts = [(s * x, s * y, s * z) for x, y, z in [
(-0.25, 0.25, 0.0), (0.25, 0.25, 0.0), (-0.25, -0.25, 0.0), (0.25, -0.25, 0.0),
(0.0, 0.0, 0.487), (-0.107, 0.107, 0.216), (0.108, 0.107, 0.216), (-0.107, -0.107, 0.216),
(0.108, -0.107, 0.216), (-0.05, 0.05, 0.5), (0.05, 0.05, 0.5), (0.05, -0.05, 0.5),
(-0.05, -0.05, 0.5), (-0.193, 0.193, 0.0), (0.193, 0.193, 0.0), (0.193, -0.193, 0.0),
(-0.193, -0.193, 0.0), (0.0, 0.0, 0.8), (0.0, 0.8, -0.0), (0.0, 0.0, -0.0),
(0.0, 0.0, 0.0), (0.05, 0.05, 0.674), (-0.05, 0.674, -0.05), (0.0, 0.8, -0.0),
(-0.05, -0.05, 0.674), (-0.05, 0.674, 0.05), (0.05, 0.674, -0.05), (-0.129, 0.129, 0.162),
(0.129, 0.129, 0.162), (-0.129, -0.129, 0.162), (0.129, -0.129, 0.162), (0.0, 0.0, 0.8),
(-0.05, 0.05, 0.674), (0.05, -0.05, 0.674), (0.05, 0.674, 0.05), (0.8, -0.0, -0.0),
(0.0, -0.0, -0.0), (0.674, 0.05, -0.05), (0.8, -0.0, -0.0), (0.674, 0.05, 0.05),
(0.674, -0.05, -0.05), (0.674, -0.05, 0.05)]]
edges = [(1, 0), (0, 9), (9, 10), (10, 1), (3, 1), (10, 11),
(11, 3), (2, 3), (11, 12), (12, 2), (0, 2), (12, 9),
(6, 5), (8, 6), (7, 8), (5, 7), (17, 24), (17, 20),
(18, 25), (18, 19), (13, 14), (14, 15), (15, 16), (16, 13),
(4, 6), (15, 30), (17, 21), (26, 22), (23, 22), (23, 34),
(18, 26), (28, 27), (30, 28), (29, 30), (27, 29), (14, 28),
(13, 27), (16, 29), (4, 7), (4, 8), (4, 5), (31, 33),
(31, 32), (21, 32), (24, 32), (24, 33), (21, 33), (25, 22),
(25, 34), (26, 34), (35, 39), (35, 36), (40, 37), (38, 37),
(38, 41), (35, 40), (39, 37), (39, 41), (40, 41)]
elif self.symbol_type == 'ROOF':
verts = [(s * x, s * y, s * z) for x, y, z in [
(-0.25, 0.25, 0.0), (0.25, 0.25, 0.0), (-0.25, -0.25, 0.0), (0.25, -0.25, 0.0),
(0.0, 0.0, 0.487), (-0.107, 0.107, 0.216), (0.108, 0.107, 0.216), (-0.107, -0.107, 0.216),
(0.108, -0.107, 0.216), (-0.05, 0.05, 0.5), (0.05, 0.05, 0.5), (0.05, -0.05, 0.5),
(-0.05, -0.05, 0.5), (-0.193, 0.193, 0.0), (0.193, 0.193, 0.0), (0.193, -0.193, 0.0),
(-0.193, -0.193, 0.0), (0.0, 0.0, 0.8), (0.0, 0.8, -0.0), (0.0, 0.0, 0.0),
(0.05, 0.05, 0.673), (-0.05, 0.674, -0.05), (-0.05, -0.05, 0.673), (-0.05, 0.674, 0.05),
(0.05, 0.674, -0.05), (-0.129, 0.129, 0.162), (0.129, 0.129, 0.162), (-0.129, -0.129, 0.162),
(0.129, -0.129, 0.162), (-0.05, 0.05, 0.673), (0.05, -0.05, 0.673), (0.05, 0.674, 0.05),
(0.8, -0.0, -0.0), (0.674, 0.05, -0.05), (0.674, 0.05, 0.05), (0.674, -0.05, -0.05),
(0.674, -0.05, 0.05), (0.108, 0.0, 0.216), (0.09, 0.0, 0.261), (0.001, 0.107, 0.216),
(0.001, -0.107, 0.216), (-0.107, 0.0, 0.216), (0.0, -0.089, 0.261), (0.0, 0.089, 0.261),
(-0.089, 0.0, 0.261), (0.0, 0.042, 0.694), (-0.042, 0.0, 0.694), (0.0, -0.042, 0.694),
(0.042, 0.0, 0.694)]]
edges = [
(1, 0), (0, 9), (10, 1), (3, 1), (11, 3), (2, 3), (12, 2), (0, 2),
(17, 22), (17, 19), (18, 23), (13, 14), (14, 15), (15, 16), (16, 13),
(15, 28), (17, 20), (24, 21), (18, 24), (14, 26), (13, 25), (16, 27),
(45, 29), (46, 29), (47, 30), (48, 30), (23, 21), (23, 31), (24, 31),
(32, 34), (35, 33), (32, 35), (34, 33), (34, 36), (35, 36), (28, 37),
(6, 38), (26, 37), (26, 39), (25, 39), (5, 43), (5, 44), (25, 41),
(27, 41), (7, 44), (8, 42), (28, 40), (27, 40), (20, 45), (22, 46),
(22, 47), (20, 48), (18, 19), (18, 21), (18, 31), (17, 30), (17, 29),
(32, 19), (32, 33), (32, 36), (4, 6), (4, 7), (4, 8), (4, 5), (8, 38),
(6, 43), (7, 42), (9, 10), (10, 11), (11, 12), (12, 9)]
bm = bmed._start(context, o)
bm.clear()
for v in verts:
bm.verts.new(v)
bm.verts.ensure_lookup_table()
for ed in edges:
bm.edges.new((bm.verts[ed[0]], bm.verts[ed[1]]))
bmed._end(bm, o)
class ARCHIPACK_PT_reference_point(Panel):
bl_idname = "ARCHIPACK_PT_reference_point"
bl_label = "Reference point"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = 'Archipack'
@classmethod
def poll(cls, context):
return archipack_reference_point.filter(context.active_object)
def draw(self, context):
o = context.active_object
props = archipack_reference_point.datablock(o)
if props is None:
return
layout = self.layout
if (o.location - props.location_2d).length < 0.01:
layout.operator('archipack.move_to_3d')
layout.operator('archipack.move_2d_reference_to_cursor')
else:
layout.operator('archipack.move_to_2d')
layout.prop(props, 'symbol_scale')
layout.separator()
layout.operator('archipack.apply_holes')
class ARCHIPACK_OT_reference_point(ArchipackCollectionManager, Operator):
"""Add reference point"""
bl_idname = "archipack.reference_point"
bl_label = "Reference point"
bl_description = "Add reference point"
bl_category = 'Archipack'
bl_options = {'REGISTER', 'UNDO'}
location_3d : FloatVectorProperty(
subtype='XYZ',
name="position 3d",
default=Vector((0, 0, 0))
)
symbol_type : EnumProperty(
name="Symbol type",
default='WALL',
items=(
('WALL', 'Wall', '', 0),
('ROOF', 'Roof', '', 1))
)
@classmethod
def poll(cls, context):
return context.active_object is not None
def draw(self, context):
layout = self.layout
row = layout.row()
row.label(text="Use Properties panel (N) to define parms", icon='INFO')
def create(self, context):
x, y, z = context.scene.cursor_location
# bpy.ops.object.empty_add(type='ARROWS', radius=0.5, location=Vector((x, y, 0)))
m = bpy.data.meshes.new(name="Reference")
o = bpy.data.objects.new("Reference", m)
o.location = Vector((x, y, 0))
self.link_object_to_scene(context, o)
d = o.archipack_reference_point.add()
d.location_2d = Vector((x, y, 0))
d.location_3d = self.location_3d
d.symbol_type = self.symbol_type
o.select_set(state=True)
context.view_layer.objects.active = o
d.update(context)
return o
def execute(self, context):
if context.mode == "OBJECT":
o = self.create(context)
o.select_set(state=True)
context.view_layer.objects.active = o
return {'FINISHED'}
else:
self.report({'WARNING'}, "Archipack: Option only valid in Object mode")
return {'CANCELLED'}
class ARCHIPACK_OT_move_to_3d(Operator):
bl_idname = "archipack.move_to_3d"
bl_label = "Move to 3d"
bl_description = "Move point to 3d position"
bl_category = 'Archipack'
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return archipack_reference_point.filter(context.active_object)
def execute(self, context):
if context.mode == "OBJECT":
o = context.active_object
props = archipack_reference_point.datablock(o)
if props is None:
return {'CANCELLED'}
o.location = props.location_3d
return {'FINISHED'}
else:
self.report({'WARNING'}, "Archipack: Option only valid in Object mode")
return {'CANCELLED'}
class ARCHIPACK_OT_apply_holes(Operator):
bl_idname = "archipack.apply_holes"
bl_label = "Apply holes"
bl_description = "Apply and remove holes from scene"
bl_category = 'Archipack'
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return archipack_reference_point.filter(context.active_object)
def apply_boolean(self, context, o):
# mods = [m for m in o.modifiers if m.type == 'BOOLEAN']
ctx = bpy.context.copy()
ctx['object'] = o
for mod in o.modifiers[:]:
ctx['modifier'] = mod
try:
bpy.ops.object.modifier_apply(ctx, apply_as='DATA',
modifier=mod.name)
except:
pass
def execute(self, context):
if context.mode == "OBJECT":
o = context.active_object
to_remove = []
for c in o.children:
if 'archipack_hybridhole' in c:
self.apply_boolean(context, c)
to_remove.append(c)
for c in o.children:
if c.data is not None and "archipack_wall2" in c.data:
self.apply_boolean(context, c)
for c in o.children:
if c.data is not None | |
9, 2, 9, 0)))
def testToStrHourlyByHour(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
byhour=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByMinute(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyBySecond(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByHourAndMinute(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByHourAndSecond(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
byhour=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByMinuteAndSecond(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyByHourAndMinuteAndSecond(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrHourlyBySetPos(self):
self._rrulestr_reverse_test(rrule(HOURLY,
count=3,
byminute=(15, 45),
bysecond=(15, 45),
bysetpos=(3, -3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutely(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyInterval(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
interval=2,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyIntervalLarge(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
interval=1501,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByMonth(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
bymonth=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByMonthDay(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
bymonthday=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByMonthAndMonthDay(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
bymonth=(1, 3),
bymonthday=(5, 7),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByWeekDay(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByNWeekDay(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByMonthAndWeekDay(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
bymonth=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByMonthAndNWeekDay(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
bymonth=(1, 3),
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByMonthDayAndWeekDay(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByMonthAndMonthDayAndWeekDay(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
bymonth=(1, 3),
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByYearDay(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=4,
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByYearDayNeg(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=4,
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByMonthAndYearDay(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=4,
bymonth=(4, 7),
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByMonthAndYearDayNeg(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=4,
bymonth=(4, 7),
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByWeekNo(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byweekno=20,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByWeekNoAndWeekDay(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byweekno=1,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByWeekNoAndWeekDayLarge(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byweekno=52,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByWeekNoAndWeekDayLast(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byweekno=-1,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByWeekNoAndWeekDay53(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byweekno=53,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByEaster(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byeaster=0,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByEasterPos(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byeaster=1,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByEasterNeg(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byeaster=-1,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByHour(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byhour=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByMinute(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyBySecond(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByHourAndMinute(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByHourAndSecond(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byhour=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByMinuteAndSecond(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyByHourAndMinuteAndSecond(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrMinutelyBySetPos(self):
self._rrulestr_reverse_test(rrule(MINUTELY,
count=3,
bysecond=(15, 30, 45),
bysetpos=(3, -3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondly(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyInterval(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
interval=2,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyIntervalLarge(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
interval=90061,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByMonth(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
bymonth=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByMonthDay(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
bymonthday=(1, 3),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByMonthAndMonthDay(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
bymonth=(1, 3),
bymonthday=(5, 7),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByWeekDay(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByNWeekDay(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByMonthAndWeekDay(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
bymonth=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByMonthAndNWeekDay(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
bymonth=(1, 3),
byweekday=(TU(1), TH(-1)),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByMonthDayAndWeekDay(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByMonthAndMonthDayAndWeekDay(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
bymonth=(1, 3),
bymonthday=(1, 3),
byweekday=(TU, TH),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByYearDay(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=4,
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByYearDayNeg(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=4,
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByMonthAndYearDay(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=4,
bymonth=(4, 7),
byyearday=(1, 100, 200, 365),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByMonthAndYearDayNeg(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=4,
bymonth=(4, 7),
byyearday=(-365, -266, -166, -1),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByWeekNo(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byweekno=20,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByWeekNoAndWeekDay(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byweekno=1,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByWeekNoAndWeekDayLarge(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byweekno=52,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByWeekNoAndWeekDayLast(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byweekno=-1,
byweekday=SU,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByWeekNoAndWeekDay53(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byweekno=53,
byweekday=MO,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByEaster(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byeaster=0,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByEasterPos(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byeaster=1,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByEasterNeg(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byeaster=-1,
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByHour(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byhour=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByMinute(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyBySecond(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByHourAndMinute(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByHourAndSecond(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byhour=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByMinuteAndSecond(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByHourAndMinuteAndSecond(self):
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
byhour=(6, 18),
byminute=(6, 18),
bysecond=(6, 18),
dtstart=datetime(1997, 9, 2, 9, 0)))
def testToStrSecondlyByHourAndMinuteAndSecondBug(self):
# This explores a bug found by <NAME>.
self._rrulestr_reverse_test(rrule(SECONDLY,
count=3,
bysecond=(0,),
byminute=(1,),
dtstart=datetime(2010, 3, 22, 12, 1)))
def testToStrLongIntegers(self):
if not PY3: # There is no longs in python3
self._rrulestr_reverse_test(rrule(MINUTELY,
count=long(2),
interval=long(2),
bymonth=long(2),
byweekday=long(3),
byhour=long(6),
byminute=long(6),
bysecond=long(6),
dtstart=datetime(1997, 9, 2, 9, 0)))
self._rrulestr_reverse_test(rrule(YEARLY,
count=long(2),
bymonthday=long(5),
byweekno=long(2),
dtstart=datetime(1997, 9, 2, 9, 0)))
class ParserTest(unittest.TestCase):
def setUp(self):
self.tzinfos = {"BRST": -10800}
self.brsttz = tzoffset("BRST", -10800)
self.default = datetime(2003, 9, 25)
# Parser should be able to handle bytestring and unicode
base_str = '2014-05-01 08:00:00'
try:
# Python 2.x
self.uni_str = unicode(base_str)
self.str_str = str(base_str)
except NameError:
self.uni_str = str(base_str)
self.str_str = bytes(base_str.encode())
def testDateCommandFormat(self):
self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003",
tzinfos=self.tzinfos),
datetime(2003, 9, 25, 10, 36, 28,
tzinfo=self.brsttz))
def testDateCommandFormatUnicode(self):
self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003",
tzinfos=self.tzinfos),
datetime(2003, 9, 25, 10, 36, 28,
tzinfo=self.brsttz))
def testDateCommandFormatReversed(self):
self.assertEqual(parse("2003 10:36:28 BRST 25 Sep Thu",
tzinfos=self.tzinfos),
datetime(2003, 9, 25, 10, 36, 28,
tzinfo=self.brsttz))
def testDateCommandFormatWithLong(self):
if not PY3:
self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003",
tzinfos={"BRST": long(-10800)}),
datetime(2003, 9, 25, 10, 36, 28,
tzinfo=self.brsttz))
def testDateCommandFormatIgnoreTz(self):
self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003",
ignoretz=True),
datetime(2003, 9, 25, 10, 36, 28))
def testDateCommandFormatStrip1(self):
self.assertEqual(parse("Thu Sep 25 10:36:28 2003"),
datetime(2003, 9, 25, 10, 36, 28))
def testDateCommandFormatStrip2(self):
self.assertEqual(parse("Thu Sep 25 10:36:28", default=self.default),
datetime(2003, 9, 25, 10, 36, 28))
def testDateCommandFormatStrip3(self):
self.assertEqual(parse("Thu Sep 10:36:28", default=self.default),
datetime(2003, 9, 25, 10, 36, 28))
def testDateCommandFormatStrip4(self):
self.assertEqual(parse("Thu 10:36:28", default=self.default),
datetime(2003, 9, 25, 10, 36, 28))
def testDateCommandFormatStrip5(self):
self.assertEqual(parse("Sep 10:36:28", default=self.default),
datetime(2003, 9, 25, 10, 36, 28))
def testDateCommandFormatStrip6(self):
self.assertEqual(parse("10:36:28", default=self.default),
datetime(2003, 9, 25, 10, 36, 28))
def testDateCommandFormatStrip7(self):
self.assertEqual(parse("10:36", default=self.default),
datetime(2003, 9, 25, 10, 36))
def testDateCommandFormatStrip8(self):
self.assertEqual(parse("Thu Sep 25 2003"),
datetime(2003, 9, 25))
def testDateCommandFormatStrip9(self):
self.assertEqual(parse("Sep 25 2003"),
datetime(2003, 9, 25))
def testDateCommandFormatStrip10(self):
self.assertEqual(parse("Sep 2003", default=self.default),
datetime(2003, 9, 25))
def testDateCommandFormatStrip11(self):
self.assertEqual(parse("Sep", default=self.default),
datetime(2003, 9, 25))
def testDateCommandFormatStrip12(self):
self.assertEqual(parse("2003", default=self.default),
datetime(2003, 9, 25))
def testDateRCommandFormat(self):
self.assertEqual(parse("Thu, 25 Sep 2003 10:49:41 -0300"),
datetime(2003, 9, 25, 10, 49, 41,
tzinfo=self.brsttz))
def testISOFormat(self):
self.assertEqual(parse("2003-09-25T10:49:41.5-03:00"),
datetime(2003, 9, 25, 10, 49, 41, 500000,
tzinfo=self.brsttz))
def testISOFormatStrip1(self):
self.assertEqual(parse("2003-09-25T10:49:41-03:00"),
datetime(2003, 9, 25, 10, 49, 41,
tzinfo=self.brsttz))
def testISOFormatStrip2(self):
self.assertEqual(parse("2003-09-25T10:49:41"),
datetime(2003, 9, 25, 10, 49, 41))
def testISOFormatStrip3(self):
self.assertEqual(parse("2003-09-25T10:49"),
datetime(2003, 9, 25, 10, 49))
def testISOFormatStrip4(self):
self.assertEqual(parse("2003-09-25T10"),
datetime(2003, 9, 25, 10))
def testISOFormatStrip5(self):
self.assertEqual(parse("2003-09-25"),
datetime(2003, 9, 25))
def testISOStrippedFormat(self):
self.assertEqual(parse("20030925T104941.5-0300"),
datetime(2003, 9, 25, 10, 49, 41, 500000,
tzinfo=self.brsttz))
def testISOStrippedFormatStrip1(self):
self.assertEqual(parse("20030925T104941-0300"),
datetime(2003, 9, 25, 10, 49, 41,
tzinfo=self.brsttz))
def testISOStrippedFormatStrip2(self):
self.assertEqual(parse("20030925T104941"),
datetime(2003, 9, 25, 10, 49, 41))
def testISOStrippedFormatStrip3(self):
self.assertEqual(parse("20030925T1049"),
datetime(2003, 9, 25, 10, 49, 0))
def testISOStrippedFormatStrip4(self):
self.assertEqual(parse("20030925T10"),
datetime(2003, 9, 25, 10))
def testISOStrippedFormatStrip5(self):
self.assertEqual(parse("20030925"),
datetime(2003, 9, 25))
def testNoSeparator1(self):
self.assertEqual(parse("199709020908"),
datetime(1997, 9, | |
True
@receiver(post_save, sender=User)
def create_associated_email(sender, **kwargs):
"""
Creates and attaches a primary AssociatedEmail when a User object is
created.
"""
user = kwargs['instance']
if kwargs['created']:
email = AssociatedEmail(user=user, email=user.email, is_primary_email=True)
if user.is_active:
email.verification_date = timezone.now()
email.is_verified = True
email.save()
@receiver(post_save, sender=User)
def update_associated_emails(sender, **kwargs):
"""
Updates the primary/non-primary status of AssociatedEmails when the
User object's email field is updated.
"""
user = kwargs['instance']
if not kwargs['created'] and kwargs['update_fields'] and 'email' in kwargs['update_fields']:
old_primary_email = AssociatedEmail.objects.get(user=user, is_primary_email=True)
new_primary_email = AssociatedEmail.objects.get(user=user, email=user.email)
old_primary_email.is_primary_email = False
new_primary_email.is_primary_email = True
old_primary_email.save()
new_primary_email.save()
def photo_path(instance, filename):
"""
Storage path of profile photo relative to media root.
Keep the original file extension only.
"""
return 'users/{0}/{1}'.format(instance.user.username, '.'.join(['profile-photo', filename.split('.')[-1]]))
def training_report_path(instance, filename):
"""
Storage path of CITI training report
"""
return 'credential-applications/{}/{}'.format(instance.slug, 'training-report.pdf')
class LegacyCredential(models.Model):
"""
Stores instances of profiles that were credentialed on the old
pn website.
"""
first_names = models.CharField(max_length=100, blank=True, default='')
last_name = models.CharField(max_length=100, blank=True, default='')
email = models.EmailField(max_length=255, unique=True)
country = models.CharField(max_length=100, blank=True, default='')
# These dates are stored as strings in the legacy system.
# All are credentialed for mimic
mimic_approval_date = models.CharField(max_length=100)
eicu_approval_date = models.CharField(max_length=100, blank=True,
default='')
# Their stated reason for using the data
info = models.CharField(max_length=300, blank=True, default='')
# Whether the credentialing has been migrated to an account on the
# new site
migrated = models.BooleanField(default=False)
migration_date = models.DateTimeField(null=True)
migrated_user = models.ForeignKey('user.User', null=True, on_delete=models.CASCADE)
reference_email = models.CharField(max_length=255, blank=True, default='')
revoked_datetime = models.DateTimeField(null=True)
def __str__(self):
return self.email
def is_legacy(self):
return True
def revoke(self):
"""
Revokes a legacy application.
"""
# Removes credentialing from the user
with transaction.atomic():
self.revoked_datetime = timezone.now()
self.migrated_user.is_credentialed = False
self.migrated_user.credential_datetime = None
self.migrated_user.save()
self.save()
logger.info('Credentialing for user {0} has been removed.'.format(
self.migrated_user.email))
class Profile(models.Model):
"""
Class storing profile information which is
not directly related to account activity
or authentication.
This model should contain some fields which help map
projects to datacite:
https://schema.datacite.org/
https://schema.datacite.org/meta/kernel-4.0/doc/DataCite-MetadataKernel_v4.0.pdf
"""
user = models.OneToOneField('user.User', related_name='profile',
on_delete=models.CASCADE)
first_names = models.CharField(max_length=100, validators=[validators.validate_name])
last_name = models.CharField(max_length=50, validators=[validators.validate_name])
affiliation = models.CharField(max_length=250, blank=True, default='',
validators=[validators.validate_affiliation])
location = models.CharField(max_length=100, blank=True, default='',
validators=[validators.validate_location])
website = models.URLField(default='', blank=True, null=True)
photo = models.ImageField(upload_to=photo_path, blank=True, null=True,
validators=[FileExtensionValidator(['png', 'jpg', 'jpeg'],
'Allowed filetypes are png and jpg only.')])
MAX_PHOTO_SIZE = 2 * 1024 ** 2
# Where all the users' files are kept
FILE_ROOT = os.path.join(settings.MEDIA_ROOT, 'users')
def __str__(self):
return self.get_full_name()
def get_full_name(self):
return ' '.join([self.first_names, self.last_name])
def get_names(self):
return self.first_names, self.last_name
def delete_photo(self):
"""
Delete the photo
"""
if self.photo:
os.remove(self.photo.path)
self.photo = None
self.save()
def file_root(self):
"Where the profile's files are stored"
return os.path.join(Profile.FILE_ROOT, self.username)
class Orcid(models.Model):
"""
Class for storing ORCID account information.
Here are examples of expected formats from a sandbox account:
orcid_id: 0000-0002-8983-9907
access_token: <PASSWORD>
refresh_token: <PASSWORD>
token_expiration: 2242899965.166591
where the token_expiration is in unix timestamp format (seconds since Jan 1st 1970)
"""
user = models.OneToOneField('user.User', related_name='orcid',
on_delete=models.CASCADE)
orcid_id = models.CharField(max_length=50, default='', blank=True,
validators=[validators.validate_orcid_id])
name = models.CharField(max_length=50, default='', blank=True)
access_token = models.CharField(max_length=50, default='', blank=True,
validators=[validators.validate_orcid_token])
refresh_token = models.CharField(max_length=50, default='', blank=True,
validators=[validators.validate_orcid_token])
token_type = models.CharField(max_length=50, default='', blank=True)
token_scope = models.CharField(max_length=50, default='', blank=True)
token_expiration = models.DecimalField(max_digits=50, decimal_places=40, default=0)
@staticmethod
def get_orcid_url():
return settings.ORCID_DOMAIN
class DualAuthModelBackend():
"""
This is a ModelBacked that allows authentication with either a username or an email address.
"""
def authenticate(self, request, username=None, password=None):
if '@' in username:
kwargs = {'email': username.lower()}
else:
kwargs = {'username': username.lower()}
try:
user = get_user_model().objects.get(**kwargs)
if user.check_password(password):
return user
except User.DoesNotExist:
logger.error('Unsuccessful authentication {0}'.format(username.lower()))
return None
def get_user(self, user_id):
try:
return get_user_model().objects.get(pk=user_id)
except get_user_model().DoesNotExist:
return None
class CredentialApplication(models.Model):
"""
An application to become PhysioNet credentialed
"""
RESEARCHER_CATEGORIES = (
(0, 'Student'),
(7, 'Graduate Student'),
(1, 'Postdoc'),
(2, 'Academic Researcher'),
(3, 'Hospital Researcher'),
(4, 'Industry Researcher'),
(5, 'Government Researcher'),
(6, 'Independent Researcher'),
)
REFERENCE_CATEGORIES = (
(0, 'Supervisor (required for students and Postdocs)'),
(1, 'Colleague'),
(2, 'Coauthor'),
(3, 'Other'),
)
COURSE_CATEGORIES = (
(0, 'Not for a course'),
(1, 'I am taking a course using the data'),
)
REFERENCE_RESPONSES = (
('', '-----------'),
(1, 'No'),
(2, 'Yes')
)
REJECT_ACCEPT_WITHDRAW = (
('', '-----------'),
(1, 'Reject'),
(2, 'Accept'),
(3, 'Withdrawn'),
(4, 'Revoked')
)
# Maximum size for training_completion_report
MAX_REPORT_SIZE = 2 * 1024 * 1024
# Location for storing files associated with the application
FILE_ROOT = os.path.join(settings.MEDIA_ROOT, 'credential-applications')
slug = models.SlugField(max_length=20, unique=True, db_index=True)
application_datetime = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey('user.User', related_name='credential_applications',
on_delete=models.CASCADE)
# Personal fields
first_names = models.CharField(max_length=100, validators=[validators.validate_name])
last_name = models.CharField(max_length=50, validators=[validators.validate_name])
researcher_category = models.PositiveSmallIntegerField(choices=RESEARCHER_CATEGORIES)
# Organization fields
organization_name = models.CharField(max_length=200,
validators=[validators.validate_organization])
job_title = models.CharField(max_length=60,
validators=[validators.validate_job_title])
city = models.CharField(max_length=100,
validators=[validators.validate_city])
state_province = models.CharField(max_length=100,
validators=[validators.validate_state], default='', blank=True)
country = models.CharField(max_length=2, choices=COUNTRIES)
webpage = models.URLField(default='', blank=True)
zip_code = models.CharField(max_length=60,
validators=[validators.validate_zipcode])
suffix = models.CharField(max_length=60,
validators=[validators.validate_suffix], default='', blank=True)
# Human resources training
training_course_name = models.CharField(max_length=100, default='',
blank=True, validators=[validators.validate_training_course])
training_completion_date = models.DateField(null=True, blank=True)
training_completion_report = models.FileField(
upload_to=training_report_path, validators=[FileExtensionValidator(
['pdf'], 'File must be a pdf.')])
training_completion_report_url = models.URLField(blank=True, null=True)
# Course info
course_category = models.PositiveSmallIntegerField(choices=COURSE_CATEGORIES,
null=True, blank=True)
course_info = models.CharField(max_length=100, default='', blank=True,
validators=[validators.validate_course])
# Reference
reference_category = models.PositiveSmallIntegerField(null=True,
blank=True, choices=REFERENCE_CATEGORIES)
reference_name = models.CharField(max_length=202, default='', blank=True,
validators=[validators.validate_reference_name])
reference_email = models.EmailField(default='', blank=True)
reference_organization = models.CharField(max_length=200,
validators=[validators.validate_organization], blank=True)
reference_title = models.CharField(max_length=60, default='', blank=True,
validators=[validators.validate_reference_title])
# 0 1 2 3 = pending, rejected, accepted, withdrawn
status = models.PositiveSmallIntegerField(default=0, choices=REJECT_ACCEPT_WITHDRAW)
reference_contact_datetime = models.DateTimeField(null=True)
reference_response_datetime = models.DateTimeField(null=True)
# Whether reference verifies the applicant. 0 1 2 = null, no, yes
reference_response = models.PositiveSmallIntegerField(default=0,
choices=REFERENCE_RESPONSES)
reference_response_text = models.CharField(max_length=2000,
validators=[validators.validate_reference_response])
research_summary = models.CharField(max_length=1000,
validators=[validators.validate_research_summary])
project_of_interest = models.ForeignKey('project.PublishedProject', null=True,
on_delete=models.SET_NULL, limit_choices_to={'access_policy': 2},)
decision_datetime = models.DateTimeField(null=True)
responder = models.ForeignKey('user.User', null=True,
related_name='responded_applications', on_delete=models.SET_NULL)
responder_comments = models.CharField(max_length=500, default='',
blank=True)
revoked_datetime = models.DateTimeField(null=True)
def file_root(self):
"""Location for storing files associated with the application"""
return os.path.join(CredentialApplication.FILE_ROOT, self.slug)
def get_full_name(self):
return ' '.join([self.first_names, self.last_name])
def get_latest_by_user(self):
return CredentialApplication.objects.filter(user=self.user).last()
def is_latest_by_user(self):
if self == CredentialApplication.objects.filter(user=self.user).last():
return True
else:
return False
def is_legacy(self):
return False
def time_elapsed(self):
return (timezone.now() - self.application_datetime).days
def _apply_decision(self, decision, responder):
"""
Reject, accept, or withdraw a credentialing application.
Args:
decision (int): 1 = reject, 2 = accept, 3 = withdraw.
responder (str): User object
"""
self.responder = responder
self.status = decision
self.decision_datetime = timezone.now()
self.save()
def reject(self, responder):
"""
Reject a credentialing application.
"""
self._apply_decision(1, responder)
def accept(self, responder):
"""
Reject a credentialing application.
"""
try:
with transaction.atomic():
self._apply_decision(2, responder)
# update the user credentials
user = self.user
user.is_credentialed = True
user.credential_datetime = timezone.now()
user.save()
except DatabaseError:
messages.error(request, 'Database error. Please try again.')
def withdraw(self, responder):
"""
Reject a credentialing application.
"""
self._apply_decision(3, responder)
def ref_known_flag(self):
"""
Returns True if the reference is known, else False. By "Known" we mean
that the reference has been previously contacted.
"""
if CredentialApplication.objects.filter(
reference_email__iexact=self.reference_email,
reference_contact_datetime__isnull=False).exclude(
reference_email=''):
return True
elif LegacyCredential.objects.filter(
reference_email__iexact=self.reference_email).exclude(
reference_email=''):
return True
else:
return False
def ref_user_flag(self):
"""
Returns True if the reference is a registered user, else False.
"""
try:
ref = User.objects.get(
associated_emails__email__iexact=self.reference_email,
associated_emails__is_verified=True)
return True
except ObjectDoesNotExist:
return False
def get_reference_user(self):
"""
Returns reference User if the reference is a registered user,
else None.
"""
try:
ref = User.objects.get(
associated_emails__email__iexact=self.reference_email,
associated_emails__is_verified=True)
return ref
except ObjectDoesNotExist:
return None
def ref_credentialed_flag(self):
"""
Returns True if the reference is a credentialed registered user,
else False.
"""
try:
ref = User.objects.get(
associated_emails__email__iexact=self.reference_email,
associated_emails__is_verified=True)
return ref.is_credentialed
except ObjectDoesNotExist:
return False
def revoke(self):
"""
Revokes an approved application.
"""
# Set the application as unsucessful with the current datetime
self.status = 4
self.revoked_datetime = timezone.now()
# Removes credentialing from the user
self.user.is_credentialed = False
self.user.credential_datetime = None
with transaction.atomic():
self.user.save()
self.save()
logger.info('Credentialing for user {0} has been removed.'.format(
self.user.email))
def remove_contact_reference(self):
"""
Remove the date that indicates when the reference was contacted.
Note that this may also affect the "known" status of the reference.
"""
self.reference_contact_datetime = None
self.save()
def update_review_status(self, review_status):
"""
Update the review status of a credentialing application.
Args:
"""
self.credential_review.status = review_status
self.credential_review.save()
class CredentialReview(models.Model):
"""
Reviews for the CredentialApplications.
NOTES
-----
This relational model will be deleted in the case that a credential
reviewer decides to "reset" the application, meaning reset it back to the
"initial review" stage.
"""
REVIEW_STATUS_LABELS = (
('', | |
R16 = params["R16"]
R17 = params["R17"]
R18 = params["R18"]
R19 = params["R19"]
R20 = params["R20"]
R21 = params["R21"]
R22 = params["R22"]
R23 = params["R23"]
R24 = params["R24"]
R25 = params["R25"]
R26 = params["R26"]
R27 = params["R27"]
R28 = params["R28"]
R29 = params["R29"]
R30 = params["R30"]
R31 = params["R31"]
R32 = params["R32"]
R33 = params["R33"]
R34 = params["R34"]
R35 = params["R35"]
R36 = params["R36"]
R37 = params["R37"]
R38 = params["R38"]
R39 = params["R39"]
R40 = params["R40"]
R41 = params["R41"]
R42 = params["R42"]
R43 = params["R43"]
R44 = params["R44"]
R45 = params["R45"]
R46 = params["R46"]
R47 = params["R47"]
R48 = params["R48"]
R49 = params["R49"]
R50 = params["R50"]
R51 = params["R51"]
R52 = params["R52"]
R53 = params["R53"]
R54 = params["R54"]
R55 = params["R55"]
R56 = params["R56"]
R57 = params["R57"]
R58 = params["R58"]
R59 = params["R59"]
R60 = params["R60"]
R61 = params["R61"]
R62 = params["R62"]
R63 = params["R63"]
R64 = params["R64"]
R65 = params["R65"]
R66 = params["R66"]
R67 = params["R67"]
return (
Rs
+ (R1 / (1 + w * 1j * t_values[0]))
+ (R2 / (1 + w * 1j * t_values[1]))
+ (R3 / (1 + w * 1j * t_values[2]))
+ (R4 / (1 + w * 1j * t_values[3]))
+ (R5 / (1 + w * 1j * t_values[4]))
+ (R6 / (1 + w * 1j * t_values[5]))
+ (R7 / (1 + w * 1j * t_values[6]))
+ (R8 / (1 + w * 1j * t_values[7]))
+ (R9 / (1 + w * 1j * t_values[8]))
+ (R10 / (1 + w * 1j * t_values[9]))
+ (R11 / (1 + w * 1j * t_values[10]))
+ (R12 / (1 + w * 1j * t_values[11]))
+ (R13 / (1 + w * 1j * t_values[12]))
+ (R14 / (1 + w * 1j * t_values[13]))
+ (R15 / (1 + w * 1j * t_values[14]))
+ (R16 / (1 + w * 1j * t_values[15]))
+ (R17 / (1 + w * 1j * t_values[16]))
+ (R18 / (1 + w * 1j * t_values[17]))
+ (R19 / (1 + w * 1j * t_values[18]))
+ (R20 / (1 + w * 1j * t_values[19]))
+ (R21 / (1 + w * 1j * t_values[20]))
+ (R22 / (1 + w * 1j * t_values[21]))
+ (R23 / (1 + w * 1j * t_values[22]))
+ (R24 / (1 + w * 1j * t_values[23]))
+ (R25 / (1 + w * 1j * t_values[24]))
+ (R26 / (1 + w * 1j * t_values[25]))
+ (R27 / (1 + w * 1j * t_values[26]))
+ (R28 / (1 + w * 1j * t_values[27]))
+ (R29 / (1 + w * 1j * t_values[28]))
+ (R30 / (1 + w * 1j * t_values[29]))
+ (R31 / (1 + w * 1j * t_values[30]))
+ (R32 / (1 + w * 1j * t_values[31]))
+ (R33 / (1 + w * 1j * t_values[32]))
+ (R34 / (1 + w * 1j * t_values[33]))
+ (R35 / (1 + w * 1j * t_values[34]))
+ (R36 / (1 + w * 1j * t_values[35]))
+ (R37 / (1 + w * 1j * t_values[36]))
+ (R38 / (1 + w * 1j * t_values[37]))
+ (R39 / (1 + w * 1j * t_values[38]))
+ (R40 / (1 + w * 1j * t_values[39]))
+ (R41 / (1 + w * 1j * t_values[40]))
+ (R42 / (1 + w * 1j * t_values[41]))
+ (R43 / (1 + w * 1j * t_values[42]))
+ (R44 / (1 + w * 1j * t_values[43]))
+ (R45 / (1 + w * 1j * t_values[44]))
+ (R46 / (1 + w * 1j * t_values[45]))
+ (R47 / (1 + w * 1j * t_values[46]))
+ (R48 / (1 + w * 1j * t_values[47]))
+ (R49 / (1 + w * 1j * t_values[48]))
+ (R50 / (1 + w * 1j * t_values[49]))
+ (R51 / (1 + w * 1j * t_values[50]))
+ (R52 / (1 + w * 1j * t_values[51]))
+ (R53 / (1 + w * 1j * t_values[52]))
+ (R54 / (1 + w * 1j * t_values[53]))
+ (R55 / (1 + w * 1j * t_values[54]))
+ (R56 / (1 + w * 1j * t_values[55]))
+ (R57 / (1 + w * 1j * t_values[56]))
+ (R58 / (1 + w * 1j * t_values[57]))
+ (R59 / (1 + w * 1j * t_values[58]))
+ (R60 / (1 + w * 1j * t_values[59]))
+ (R61 / (1 + w * 1j * t_values[60]))
+ (R62 / (1 + w * 1j * t_values[61]))
+ (R63 / (1 + w * 1j * t_values[62]))
+ (R64 / (1 + w * 1j * t_values[63]))
+ (R65 / (1 + w * 1j * t_values[64]))
+ (R66 / (1 + w * 1j * t_values[65]))
+ (R67 / (1 + w * 1j * t_values[66]))
)
def KK_RC68_fit(params, w, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
Rs = params["Rs"]
R1 = params["R1"]
R2 = params["R2"]
R3 = params["R3"]
R4 = params["R4"]
R5 = params["R5"]
R6 = params["R6"]
R7 = params["R7"]
R8 = params["R8"]
R9 = params["R9"]
R10 = params["R10"]
R11 = params["R11"]
R12 = params["R12"]
R13 = params["R13"]
R14 = params["R14"]
R15 = params["R15"]
R16 = params["R16"]
R17 = params["R17"]
R18 = params["R18"]
R19 = params["R19"]
R20 = params["R20"]
R21 = params["R21"]
R22 = params["R22"]
R23 = params["R23"]
R24 = params["R24"]
R25 = params["R25"]
R26 = params["R26"]
R27 = params["R27"]
R28 = params["R28"]
R29 = params["R29"]
R30 = params["R30"]
R31 = params["R31"]
R32 = params["R32"]
R33 = params["R33"]
R34 = params["R34"]
R35 = params["R35"]
R36 = params["R36"]
R37 = params["R37"]
R38 = params["R38"]
R39 = params["R39"]
R40 = params["R40"]
R41 = params["R41"]
R42 = params["R42"]
R43 = params["R43"]
R44 = params["R44"]
R45 = params["R45"]
R46 = params["R46"]
R47 = params["R47"]
R48 = params["R48"]
R49 = params["R49"]
R50 = params["R50"]
R51 = params["R51"]
R52 = params["R52"]
R53 = params["R53"]
R54 = params["R54"]
R55 = params["R55"]
R56 = params["R56"]
R57 = params["R57"]
R58 = params["R58"]
R59 = params["R59"]
R60 = params["R60"]
R61 = params["R61"]
R62 = params["R62"]
R63 = params["R63"]
R64 = params["R64"]
R65 = params["R65"]
R66 = params["R66"]
R67 = params["R67"]
R68 = params["R68"]
return (
Rs
+ (R1 / (1 + w * 1j * t_values[0]))
+ (R2 / (1 + w * 1j * t_values[1]))
+ (R3 / (1 + w * 1j * t_values[2]))
+ (R4 / (1 + w * 1j * t_values[3]))
+ (R5 / (1 + w * 1j * t_values[4]))
+ (R6 / (1 + w * 1j * t_values[5]))
+ (R7 / (1 + w * 1j * t_values[6]))
+ (R8 / (1 + w * 1j * t_values[7]))
+ (R9 / (1 + w * 1j * t_values[8]))
+ (R10 / (1 + w * 1j * t_values[9]))
+ (R11 / (1 + w * 1j * t_values[10]))
+ (R12 / (1 + w * 1j * t_values[11]))
+ (R13 / (1 + w * 1j * t_values[12]))
+ (R14 / (1 + w * 1j * t_values[13]))
+ (R15 / (1 + w * 1j * t_values[14]))
+ (R16 / (1 + w * 1j * t_values[15]))
+ (R17 / (1 + w * 1j * t_values[16]))
+ (R18 / (1 + w * 1j * t_values[17]))
+ (R19 / (1 + w * 1j * t_values[18]))
+ (R20 / (1 + w * 1j * t_values[19]))
+ (R21 / (1 + w * 1j * t_values[20]))
+ (R22 / (1 + w * 1j * t_values[21]))
+ (R23 / (1 + w * 1j | |
YESIEUNG-SIOS
11F2 HANGUL JONGSEONG YESIEUNG-PANSIOS
11F3 HANGUL JONGSEONG PHIEUPH-PIEUP
11F4 HANGUL JONGSEONG KAPYEOUNPHIEUPH
11F5 HANGUL JONGSEONG HIEUH-NIEUN
11F6 HANGUL JONGSEONG HIEUH-RIEUL
11F7 HANGUL JONGSEONG HIEUH-MIEUM
11F8 HANGUL JONGSEONG HIEUH-PIEUP
11F9 HANGUL JONGSEONG YEORINHIEUH
11FA HANGUL JONGSEONG KIYEOK-NIEUN
11FB HANGUL JONGSEONG KIYEOK-PIEUP
11FC HANGUL JONGSEONG KIYEOK-CHIEUCH
11FD HANGUL JONGSEONG KIYEOK-KHIEUKH
11FE HANGUL JONGSEONG KIYEOK-HIEUH
11FF HANGUL JONGSEONG SSANGNIEUN
1200 ETHIOPIC SYLLABLE HA
1201 ETHIOPIC SYLLABLE HU
1202 ETHIOPIC SYLLABLE HI
1203 ETHIOPIC SYLLABLE HAA
1204 ETHIOPIC SYLLABLE HEE
1205 ETHIOPIC SYLLABLE HE
1206 ETHIOPIC SYLLABLE HO
1207 ETHIOPIC SYLLABLE HOA
1208 ETHIOPIC SYLLABLE LA
1209 ETHIOPIC SYLLABLE LU
120A ETHIOPIC SYLLABLE LI
120B ETHIOPIC SYLLABLE LAA
120C ETHIOPIC SYLLABLE LEE
120D ETHIOPIC SYLLABLE LE
120E ETHIOPIC SYLLABLE LO
120F ETHIOPIC SYLLABLE LWA
1210 ETHIOPIC SYLLABLE HHA
1211 ETHIOPIC SYLLABLE HHU
1212 ETHIOPIC SYLLABLE HHI
1213 ETHIOPIC SYLLABLE HHAA
1214 ETHIOPIC SYLLABLE HHEE
1215 ETHIOPIC SYLLABLE HHE
1216 ETHIOPIC SYLLABLE HHO
1217 ETHIOPIC SYLLABLE HHWA
1218 ETHIOPIC SYLLABLE MA
1219 ETHIOPIC SYLLABLE MU
121A ETHIOPIC SYLLABLE MI
121B ETHIOPIC SYLLABLE MAA
121C ETHIOPIC SYLLABLE MEE
121D ETHIOPIC SYLLABLE ME
121E ETHIOPIC SYLLABLE MO
121F ETHIOPIC SYLLABLE MWA
1220 ETHIOPIC SYLLABLE SZA
1221 ETHIOPIC SYLLABLE SZU
1222 ETHIOPIC SYLLABLE SZI
1223 ETHIOPIC SYLLABLE SZAA
1224 ETHIOPIC SYLLABLE SZEE
1225 ETHIOPIC SYLLABLE SZE
1226 ETHIOPIC SYLLABLE SZO
1227 ETHIOPIC SYLLABLE SZWA
1228 ETHIOPIC SYLLABLE RA
1229 ETHIOPIC SYLLABLE RU
122A ETHIOPIC SYLLABLE RI
122B ETHIOPIC SYLLABLE RAA
122C ETHIOPIC SYLLABLE REE
122D ETHIOPIC SYLLABLE RE
122E ETHIOPIC SYLLABLE RO
122F ETHIOPIC SYLLABLE RWA
1230 ETHIOPIC SYLLABLE SA
1231 ETHIOPIC SYLLABLE SU
1232 ETHIOPIC SYLLABLE SI
1233 ETHIOPIC SYLLABLE SAA
1234 ETHIOPIC SYLLABLE SEE
1235 ETHIOPIC SYLLABLE SE
1236 ETHIOPIC SYLLABLE SO
1237 ETHIOPIC SYLLABLE SWA
1238 ETHIOPIC SYLLABLE SHA
1239 ETHIOPIC SYLLABLE SHU
123A ETHIOPIC SYLLABLE SHI
123B ETHIOPIC SYLLABLE SHAA
123C ETHIOPIC SYLLABLE SHEE
123D ETHIOPIC SYLLABLE SHE
123E ETHIOPIC SYLLABLE SHO
123F ETHIOPIC SYLLABLE SHWA
1240 ETHIOPIC SYLLABLE QA
1241 ETHIOPIC SYLLABLE QU
1242 ETHIOPIC SYLLABLE QI
1243 ETHIOPIC SYLLABLE QAA
1244 ETHIOPIC SYLLABLE QEE
1245 ETHIOPIC SYLLABLE QE
1246 ETHIOPIC SYLLABLE QO
1247 ETHIOPIC SYLLABLE QOA
1248 ETHIOPIC SYLLABLE QWA
124A ETHIOPIC SYLLABLE QWI
124B ETHIOPIC SYLLABLE QWAA
124C ETHIOPIC SYLLABLE QWEE
124D ETHIOPIC SYLLABLE QWE
1250 ETHIOPIC SYLLABLE QHA
1251 ETHIOPIC SYLLABLE QHU
1252 ETHIOPIC SYLLABLE QHI
1253 ETHIOPIC SYLLABLE QHAA
1254 ETHIOPIC SYLLABLE QHEE
1255 ETHIOPIC SYLLABLE QHE
1256 ETHIOPIC SYLLABLE QHO
1258 ETHIOPIC SYLLABLE QHWA
125A ETHIOPIC SYLLABLE QHWI
125B ETHIOPIC SYLLABLE QHWAA
125C ETHIOPIC SYLLABLE QHWEE
125D ETHIOPIC SYLLABLE QHWE
1260 ETHIOPIC SYLLABLE BA
1261 ETHIOPIC SYLLABLE BU
1262 ETHIOPIC SYLLABLE BI
1263 ETHIOPIC SYLLABLE BAA
1264 ETHIOPIC SYLLABLE BEE
1265 ETHIOPIC SYLLABLE BE
1266 ETHIOPIC SYLLABLE BO
1267 ETHIOPIC SYLLABLE BWA
1268 ETHIOPIC SYLLABLE VA
1269 ETHIOPIC SYLLABLE VU
126A ETHIOPIC SYLLABLE VI
126B ETHIOPIC SYLLABLE VAA
126C ETHIOPIC SYLLABLE VEE
126D ETHIOPIC SYLLABLE VE
126E ETHIOPIC SYLLABLE VO
126F ETHIOPIC SYLLABLE VWA
1270 ETHIOPIC SYLLABLE TA
1271 ETHIOPIC SYLLABLE TU
1272 ETHIOPIC SYLLABLE TI
1273 ETHIOPIC SYLLABLE TAA
1274 ETHIOPIC SYLLABLE TEE
1275 ETHIOPIC SYLLABLE TE
1276 ETHIOPIC SYLLABLE TO
1277 ETHIOPIC SYLLABLE TWA
1278 ETHIOPIC SYLLABLE CA
1279 ETHIOPIC SYLLABLE CU
127A ETHIOPIC SYLLABLE CI
127B ETHIOPIC SYLLABLE CAA
127C ETHIOPIC SYLLABLE CEE
127D ETHIOPIC SYLLABLE CE
127E ETHIOPIC SYLLABLE CO
127F ETHIOPIC SYLLABLE CWA
1280 ETHIOPIC SYLLABLE XA
1281 ETHIOPIC SYLLABLE XU
1282 ETHIOPIC SYLLABLE XI
1283 ETHIOPIC SYLLABLE XAA
1284 ETHIOPIC SYLLABLE XEE
1285 ETHIOPIC SYLLABLE XE
1286 ETHIOPIC SYLLABLE XO
1287 ETHIOPIC SYLLABLE XOA
1288 ETHIOPIC SYLLABLE XWA
128A ETHIOPIC SYLLABLE XWI
128B ETHIOPIC SYLLABLE XWAA
128C ETHIOPIC SYLLABLE XWEE
128D ETHIOPIC SYLLABLE XWE
1290 ETHIOPIC SYLLABLE NA
1291 ETHIOPIC SYLLABLE NU
1292 ETHIOPIC SYLLABLE NI
1293 ETHIOPIC SYLLABLE NAA
1294 ETHIOPIC SYLLABLE NEE
1295 ETHIOPIC SYLLABLE NE
1296 ETHIOPIC SYLLABLE NO
1297 ETHIOPIC SYLLABLE NWA
1298 ETHIOPIC SYLLABLE NYA
1299 ETHIOPIC SYLLABLE NYU
129A ETHIOPIC SYLLABLE NYI
129B ETHIOPIC SYLLABLE NYAA
129C ETHIOPIC SYLLABLE NYEE
129D ETHIOPIC SYLLABLE NYE
129E ETHIOPIC SYLLABLE NYO
129F ETHIOPIC SYLLABLE NYWA
12A0 ETHIOPIC SYLLABLE GLOTTAL A
12A1 ETHIOPIC SYLLABLE GLOTTAL U
12A2 ETHIOPIC SYLLABLE GLOTTAL I
12A3 ETHIOPIC SYLLABLE GLOTTAL AA
12A4 ETHIOPIC SYLLABLE GLOTTAL EE
12A5 ETHIOPIC SYLLABLE GLOTTAL E
12A6 ETHIOPIC SYLLABLE GLOTTAL O
12A7 ETHIOPIC SYLLABLE GLOTTAL WA
12A8 ETHIOPIC SYLLABLE KA
12A9 ETHIOPIC SYLLABLE KU
12AA ETHIOPIC SYLLABLE KI
12AB ETHIOPIC SYLLABLE KAA
12AC ETHIOPIC SYLLABLE KEE
12AD ETHIOPIC SYLLABLE KE
12AE ETHIOPIC SYLLABLE KO
12AF ETHIOPIC SYLLABLE KOA
12B0 ETHIOPIC SYLLABLE KWA
12B2 ETHIOPIC SYLLABLE KWI
12B3 ETHIOPIC SYLLABLE KWAA
12B4 ETHIOPIC SYLLABLE KWEE
12B5 ETHIOPIC SYLLABLE KWE
12B8 ETHIOPIC SYLLABLE KXA
12B9 ETHIOPIC SYLLABLE KXU
12BA ETHIOPIC SYLLABLE KXI
12BB ETHIOPIC SYLLABLE KXAA
12BC ETHIOPIC SYLLABLE KXEE
12BD ETHIOPIC SYLLABLE KXE
12BE ETHIOPIC SYLLABLE KXO
12C0 ETHIOPIC SYLLABLE KXWA
12C2 ETHIOPIC SYLLABLE KXWI
12C3 ETHIOPIC SYLLABLE KXWAA
12C4 ETHIOPIC SYLLABLE KXWEE
12C5 ETHIOPIC SYLLABLE KXWE
12C8 ETHIOPIC SYLLABLE WA
12C9 ETHIOPIC SYLLABLE WU
12CA ETHIOPIC SYLLABLE WI
12CB ETHIOPIC SYLLABLE WAA
12CC ETHIOPIC SYLLABLE WEE
12CD ETHIOPIC SYLLABLE WE
12CE ETHIOPIC SYLLABLE WO
12CF ETHIOPIC SYLLABLE WOA
12D0 ETHIOPIC SYLLABLE PHARYNGEAL A
12D1 ETHIOPIC SYLLABLE PHARYNGEAL U
12D2 ETHIOPIC SYLLABLE PHARYNGEAL I
12D3 ETHIOPIC SYLLABLE PHARYNGEAL AA
12D4 ETHIOPIC SYLLABLE PHARYNGEAL EE
12D5 ETHIOPIC SYLLABLE PHARYNGEAL E
12D6 ETHIOPIC SYLLABLE PHARYNGEAL O
12D8 ETHIOPIC SYLLABLE ZA
12D9 ETHIOPIC SYLLABLE ZU
12DA ETHIOPIC SYLLABLE ZI
12DB ETHIOPIC SYLLABLE ZAA
12DC ETHIOPIC SYLLABLE ZEE
12DD ETHIOPIC SYLLABLE ZE
12DE ETHIOPIC SYLLABLE ZO
12DF ETHIOPIC SYLLABLE ZWA
12E0 ETHIOPIC SYLLABLE ZHA
12E1 ETHIOPIC SYLLABLE ZHU
12E2 ETHIOPIC SYLLABLE ZHI
12E3 ETHIOPIC SYLLABLE ZHAA
12E4 ETHIOPIC SYLLABLE ZHEE
12E5 ETHIOPIC SYLLABLE ZHE
12E6 ETHIOPIC SYLLABLE ZHO
12E7 ETHIOPIC SYLLABLE ZHWA
12E8 ETHIOPIC SYLLABLE YA
12E9 ETHIOPIC SYLLABLE YU
12EA ETHIOPIC SYLLABLE YI
12EB ETHIOPIC SYLLABLE YAA
12EC ETHIOPIC SYLLABLE YEE
12ED ETHIOPIC SYLLABLE YE
12EE ETHIOPIC SYLLABLE YO
12EF ETHIOPIC SYLLABLE YOA
12F0 ETHIOPIC SYLLABLE DA
12F1 ETHIOPIC SYLLABLE DU
12F2 ETHIOPIC SYLLABLE DI
12F3 ETHIOPIC SYLLABLE DAA
12F4 ETHIOPIC SYLLABLE DEE
12F5 ETHIOPIC SYLLABLE DE
12F6 ETHIOPIC SYLLABLE DO
12F7 ETHIOPIC SYLLABLE DWA
12F8 ETHIOPIC SYLLABLE DDA
12F9 ETHIOPIC SYLLABLE DDU
12FA ETHIOPIC SYLLABLE DDI
12FB ETHIOPIC SYLLABLE DDAA
12FC ETHIOPIC SYLLABLE DDEE
12FD ETHIOPIC SYLLABLE DDE
12FE ETHIOPIC SYLLABLE DDO
12FF ETHIOPIC SYLLABLE DDWA
1300 ETHIOPIC SYLLABLE JA
1301 ETHIOPIC SYLLABLE JU
1302 ETHIOPIC SYLLABLE JI
1303 ETHIOPIC SYLLABLE JAA
1304 ETHIOPIC SYLLABLE JEE
1305 ETHIOPIC SYLLABLE JE
1306 ETHIOPIC SYLLABLE JO
1307 ETHIOPIC SYLLABLE JWA
1308 ETHIOPIC SYLLABLE GA
1309 ETHIOPIC SYLLABLE GU
130A ETHIOPIC SYLLABLE GI
130B ETHIOPIC SYLLABLE GAA
130C ETHIOPIC SYLLABLE GEE
130D ETHIOPIC SYLLABLE GE
130E ETHIOPIC SYLLABLE GO
130F ETHIOPIC SYLLABLE GOA
1310 ETHIOPIC SYLLABLE GWA
1312 ETHIOPIC SYLLABLE GWI
1313 ETHIOPIC SYLLABLE GWAA
1314 ETHIOPIC SYLLABLE GWEE
1315 ETHIOPIC SYLLABLE GWE
1318 ETHIOPIC SYLLABLE GGA
1319 ETHIOPIC SYLLABLE GGU
131A ETHIOPIC SYLLABLE GGI
131B ETHIOPIC SYLLABLE GGAA
131C ETHIOPIC SYLLABLE GGEE
131D ETHIOPIC SYLLABLE GGE
131E ETHIOPIC SYLLABLE GGO
131F ETHIOPIC SYLLABLE GGWAA
1320 ETHIOPIC SYLLABLE THA
1321 ETHIOPIC SYLLABLE THU
1322 ETHIOPIC SYLLABLE THI
1323 ETHIOPIC SYLLABLE THAA
1324 ETHIOPIC SYLLABLE THEE
1325 ETHIOPIC SYLLABLE THE
1326 ETHIOPIC SYLLABLE THO
1327 ETHIOPIC SYLLABLE THWA
1328 ETHIOPIC SYLLABLE CHA
1329 ETHIOPIC SYLLABLE CHU
132A ETHIOPIC SYLLABLE CHI
132B ETHIOPIC SYLLABLE CHAA
132C ETHIOPIC SYLLABLE CHEE
132D ETHIOPIC SYLLABLE CHE
132E ETHIOPIC SYLLABLE CHO
132F ETHIOPIC SYLLABLE CHWA
1330 ETHIOPIC SYLLABLE PHA
1331 ETHIOPIC SYLLABLE PHU
1332 ETHIOPIC SYLLABLE PHI
1333 ETHIOPIC SYLLABLE PHAA
1334 ETHIOPIC SYLLABLE PHEE
1335 ETHIOPIC SYLLABLE PHE
1336 ETHIOPIC SYLLABLE PHO
1337 ETHIOPIC SYLLABLE PHWA
1338 ETHIOPIC SYLLABLE TSA
1339 ETHIOPIC SYLLABLE TSU
133A ETHIOPIC SYLLABLE TSI
133B ETHIOPIC SYLLABLE TSAA
133C ETHIOPIC SYLLABLE TSEE
133D ETHIOPIC SYLLABLE TSE
133E ETHIOPIC SYLLABLE TSO
133F ETHIOPIC SYLLABLE TSWA
1340 ETHIOPIC SYLLABLE TZA
1341 ETHIOPIC SYLLABLE TZU
1342 ETHIOPIC SYLLABLE TZI
1343 ETHIOPIC SYLLABLE TZAA
1344 ETHIOPIC SYLLABLE TZEE
1345 ETHIOPIC SYLLABLE TZE
1346 ETHIOPIC SYLLABLE TZO
1347 ETHIOPIC SYLLABLE TZOA
1348 ETHIOPIC SYLLABLE FA
1349 ETHIOPIC SYLLABLE FU
134A ETHIOPIC SYLLABLE FI
134B ETHIOPIC SYLLABLE FAA
134C ETHIOPIC SYLLABLE FEE
134D ETHIOPIC SYLLABLE FE
134E ETHIOPIC SYLLABLE FO
134F ETHIOPIC SYLLABLE FWA
1350 ETHIOPIC SYLLABLE PA
1351 ETHIOPIC SYLLABLE PU
1352 ETHIOPIC SYLLABLE PI
1353 ETHIOPIC SYLLABLE PAA
1354 ETHIOPIC SYLLABLE PEE
1355 ETHIOPIC SYLLABLE PE
1356 ETHIOPIC SYLLABLE PO
1357 ETHIOPIC SYLLABLE PWA
1358 ETHIOPIC SYLLABLE RYA
1359 ETHIOPIC SYLLABLE MYA
135A ETHIOPIC SYLLABLE FYA
135F ETHIOPIC COMBINING GEMINATION MARK
1360 ETHIOPIC SECTION MARK
1361 ETHIOPIC WORDSPACE
1362 ETHIOPIC FULL STOP
1363 ETHIOPIC COMMA
1364 ETHIOPIC SEMICOLON
1365 ETHIOPIC COLON
1366 ETHIOPIC PREFACE COLON
1367 ETHIOPIC QUESTION MARK
1368 ETHIOPIC PARAGRAPH SEPARATOR
1369 ETHIOPIC DIGIT ONE
136A ETHIOPIC DIGIT TWO
136B ETHIOPIC DIGIT THREE
136C ETHIOPIC DIGIT FOUR
136D ETHIOPIC DIGIT FIVE
136E ETHIOPIC DIGIT SIX
136F ETHIOPIC DIGIT SEVEN
1370 ETHIOPIC DIGIT EIGHT
1371 ETHIOPIC DIGIT NINE
1372 ETHIOPIC NUMBER TEN
1373 ETHIOPIC NUMBER TWENTY
1374 ETHIOPIC NUMBER THIRTY
1375 ETHIOPIC NUMBER FORTY
1376 ETHIOPIC NUMBER FIFTY
1377 ETHIOPIC NUMBER SIXTY
1378 ETHIOPIC NUMBER SEVENTY
1379 ETHIOPIC NUMBER EIGHTY
137A ETHIOPIC NUMBER NINETY
137B ETHIOPIC NUMBER HUNDRED
137C ETHIOPIC NUMBER TEN THOUSAND
1380 ETHIOPIC SYLLABLE SEBATBEIT MWA
1381 ETHIOPIC SYLLABLE MWI
1382 ETHIOPIC SYLLABLE MWEE
1383 ETHIOPIC SYLLABLE MWE
1384 ETHIOPIC SYLLABLE SEBATBEIT BWA
1385 ETHIOPIC SYLLABLE BWI
1386 ETHIOPIC SYLLABLE BWEE
1387 ETHIOPIC SYLLABLE BWE
1388 ETHIOPIC SYLLABLE SEBATBEIT FWA
1389 ETHIOPIC SYLLABLE FWI
138A ETHIOPIC SYLLABLE FWEE
138B ETHIOPIC SYLLABLE FWE
138C ETHIOPIC SYLLABLE SEBATBEIT PWA
138D ETHIOPIC SYLLABLE PWI
138E ETHIOPIC SYLLABLE PWEE
138F ETHIOPIC SYLLABLE PWE
1390 ETHIOPIC TONAL MARK YIZET
1391 ETHIOPIC TONAL MARK DERET
1392 ETHIOPIC TONAL MARK RIKRIK
1393 ETHIOPIC TONAL MARK SHORT RIKRIK
1394 ETHIOPIC TONAL MARK DIFAT
1395 ETHIOPIC TONAL MARK KENAT
1396 ETHIOPIC TONAL MARK CHIRET
1397 ETHIOPIC TONAL MARK HIDET
1398 ETHIOPIC TONAL MARK DERET-HIDET
1399 ETHIOPIC TONAL MARK KURT
13A0 CHEROKEE LETTER A
13A1 CHEROKEE LETTER E
13A2 CHEROKEE LETTER I
13A3 CHEROKEE LETTER O
13A4 CHEROKEE LETTER U
13A5 CHEROKEE LETTER V
13A6 CHEROKEE LETTER GA
13A7 CHEROKEE LETTER KA
13A8 CHEROKEE LETTER GE
13A9 CHEROKEE LETTER GI
13AA CHEROKEE LETTER GO
13AB CHEROKEE LETTER GU
13AC CHEROKEE LETTER GV
13AD CHEROKEE LETTER HA
13AE CHEROKEE LETTER HE
13AF CHEROKEE LETTER HI
13B0 CHEROKEE LETTER HO
13B1 CHEROKEE LETTER HU
13B2 CHEROKEE LETTER HV
13B3 CHEROKEE LETTER LA
13B4 CHEROKEE LETTER LE
13B5 CHEROKEE LETTER LI
13B6 CHEROKEE LETTER LO
13B7 CHEROKEE LETTER LU
13B8 CHEROKEE LETTER LV
13B9 CHEROKEE LETTER MA
13BA CHEROKEE LETTER ME
13BB CHEROKEE LETTER MI
13BC CHEROKEE LETTER MO
13BD CHEROKEE LETTER MU
13BE CHEROKEE LETTER NA
13BF CHEROKEE LETTER HNA
13C0 CHEROKEE LETTER NAH
13C1 CHEROKEE LETTER NE
13C2 CHEROKEE LETTER NI
13C3 CHEROKEE LETTER NO
13C4 CHEROKEE LETTER NU
13C5 CHEROKEE LETTER NV
13C6 CHEROKEE LETTER QUA
13C7 CHEROKEE LETTER QUE
13C8 CHEROKEE LETTER QUI
13C9 CHEROKEE LETTER QUO
13CA CHEROKEE LETTER QUU
13CB CHEROKEE LETTER QUV
13CC CHEROKEE LETTER SA
13CD CHEROKEE LETTER S
13CE CHEROKEE LETTER SE
13CF CHEROKEE LETTER SI
13D0 CHEROKEE LETTER SO
13D1 CHEROKEE LETTER SU
13D2 CHEROKEE LETTER SV
13D3 CHEROKEE LETTER DA
13D4 CHEROKEE LETTER TA
13D5 CHEROKEE LETTER DE
13D6 CHEROKEE LETTER TE
13D7 CHEROKEE LETTER DI
13D8 CHEROKEE LETTER TI
13D9 CHEROKEE LETTER DO
13DA CHEROKEE LETTER DU
13DB CHEROKEE LETTER DV
13DC CHEROKEE LETTER DLA
13DD CHEROKEE LETTER TLA
13DE CHEROKEE LETTER TLE
13DF CHEROKEE LETTER TLI
13E0 CHEROKEE LETTER TLO
13E1 CHEROKEE LETTER TLU
13E2 CHEROKEE LETTER TLV
13E3 CHEROKEE LETTER TSA
13E4 CHEROKEE LETTER TSE
13E5 CHEROKEE LETTER TSI
13E6 CHEROKEE LETTER TSO
13E7 CHEROKEE LETTER TSU
13E8 CHEROKEE LETTER TSV
13E9 CHEROKEE LETTER WA
13EA CHEROKEE LETTER WE
13EB CHEROKEE LETTER WI
13EC CHEROKEE LETTER WO
13ED CHEROKEE LETTER WU
13EE CHEROKEE LETTER WV
13EF CHEROKEE LETTER YA
13F0 CHEROKEE LETTER YE
13F1 CHEROKEE LETTER YI
13F2 CHEROKEE LETTER YO
13F3 CHEROKEE LETTER YU
13F4 CHEROKEE LETTER YV
1400 CANADIAN SYLLABICS HYPHEN
1401 CANADIAN SYLLABICS E
1402 CANADIAN SYLLABICS AAI
1403 CANADIAN SYLLABICS I
1404 CANADIAN SYLLABICS II
1405 CANADIAN SYLLABICS O
1406 CANADIAN SYLLABICS OO
1407 CANADIAN SYLLABICS Y-CREE OO
1408 CANADIAN SYLLABICS CARRIER EE
1409 CANADIAN SYLLABICS CARRIER I
140A CANADIAN SYLLABICS A
140B CANADIAN SYLLABICS AA
140C CANADIAN SYLLABICS WE
140D CANADIAN SYLLABICS WEST-CREE WE
140E CANADIAN SYLLABICS WI
140F CANADIAN SYLLABICS WEST-CREE WI
1410 CANADIAN SYLLABICS WII
1411 CANADIAN SYLLABICS WEST-CREE WII
1412 CANADIAN SYLLABICS WO
1413 CANADIAN SYLLABICS WEST-CREE WO
1414 CANADIAN SYLLABICS WOO
1415 CANADIAN SYLLABICS WEST-CREE WOO
1416 CANADIAN SYLLABICS NASKAPI WOO
1417 CANADIAN SYLLABICS WA
1418 CANADIAN SYLLABICS WEST-CREE WA
1419 CANADIAN SYLLABICS WAA
141A CANADIAN SYLLABICS WEST-CREE WAA
141B CANADIAN SYLLABICS NASKAPI WAA
141C CANADIAN SYLLABICS AI
141D CANADIAN SYLLABICS Y-CREE W
141E CANADIAN SYLLABICS GLOTTAL STOP
141F CANADIAN SYLLABICS FINAL ACUTE
1420 CANADIAN SYLLABICS FINAL GRAVE
1421 CANADIAN SYLLABICS FINAL BOTTOM HALF RING
1422 CANADIAN SYLLABICS FINAL TOP HALF RING
1423 CANADIAN SYLLABICS FINAL RIGHT HALF RING
1424 CANADIAN SYLLABICS FINAL RING
1425 CANADIAN SYLLABICS FINAL DOUBLE ACUTE
1426 CANADIAN SYLLABICS FINAL DOUBLE SHORT VERTICAL STROKES
1427 CANADIAN SYLLABICS FINAL MIDDLE DOT
1428 CANADIAN SYLLABICS FINAL SHORT HORIZONTAL STROKE
1429 CANADIAN SYLLABICS FINAL PLUS
142A CANADIAN SYLLABICS FINAL DOWN TACK
142B CANADIAN SYLLABICS EN
142C CANADIAN SYLLABICS IN
142D CANADIAN SYLLABICS ON
142E CANADIAN SYLLABICS AN
142F CANADIAN SYLLABICS PE
1430 CANADIAN SYLLABICS PAAI
1431 CANADIAN SYLLABICS PI
1432 CANADIAN SYLLABICS PII
1433 CANADIAN SYLLABICS PO
1434 CANADIAN SYLLABICS POO
1435 CANADIAN SYLLABICS Y-CREE POO
1436 CANADIAN SYLLABICS CARRIER HEE
1437 CANADIAN SYLLABICS CARRIER HI
1438 CANADIAN SYLLABICS PA
1439 CANADIAN SYLLABICS PAA
143A CANADIAN SYLLABICS PWE
143B CANADIAN SYLLABICS WEST-CREE PWE
143C CANADIAN SYLLABICS PWI
143D CANADIAN SYLLABICS WEST-CREE PWI
143E CANADIAN SYLLABICS PWII
143F CANADIAN SYLLABICS WEST-CREE PWII
1440 CANADIAN SYLLABICS PWO
1441 CANADIAN SYLLABICS WEST-CREE PWO
1442 CANADIAN SYLLABICS PWOO
1443 CANADIAN SYLLABICS WEST-CREE PWOO
1444 CANADIAN SYLLABICS PWA
1445 CANADIAN SYLLABICS WEST-CREE PWA
1446 CANADIAN SYLLABICS PWAA
1447 CANADIAN SYLLABICS WEST-CREE PWAA
1448 CANADIAN SYLLABICS Y-CREE PWAA
1449 CANADIAN SYLLABICS P
144A CANADIAN SYLLABICS WEST-CREE P
144B CANADIAN SYLLABICS CARRIER H
144C CANADIAN SYLLABICS TE
144D CANADIAN SYLLABICS TAAI
144E CANADIAN SYLLABICS TI
144F CANADIAN SYLLABICS TII
1450 CANADIAN SYLLABICS TO
1451 CANADIAN SYLLABICS TOO
1452 CANADIAN SYLLABICS Y-CREE TOO
1453 CANADIAN SYLLABICS CARRIER DEE
1454 CANADIAN SYLLABICS CARRIER DI
1455 CANADIAN SYLLABICS TA
1456 CANADIAN SYLLABICS TAA
1457 CANADIAN SYLLABICS TWE
1458 CANADIAN SYLLABICS WEST-CREE TWE
1459 CANADIAN SYLLABICS TWI
145A CANADIAN SYLLABICS WEST-CREE TWI
145B CANADIAN SYLLABICS TWII
145C CANADIAN SYLLABICS WEST-CREE TWII
145D CANADIAN SYLLABICS TWO
145E CANADIAN SYLLABICS WEST-CREE TWO
145F CANADIAN SYLLABICS TWOO
1460 CANADIAN SYLLABICS WEST-CREE TWOO
1461 CANADIAN SYLLABICS TWA
1462 CANADIAN SYLLABICS WEST-CREE TWA
1463 CANADIAN SYLLABICS TWAA
1464 CANADIAN SYLLABICS WEST-CREE TWAA
1465 CANADIAN SYLLABICS NASKAPI TWAA
1466 CANADIAN SYLLABICS T
1467 CANADIAN SYLLABICS TTE
1468 CANADIAN SYLLABICS TTI
1469 CANADIAN SYLLABICS TTO
146A CANADIAN SYLLABICS TTA
146B CANADIAN SYLLABICS KE
146C CANADIAN SYLLABICS KAAI
146D CANADIAN SYLLABICS KI
146E CANADIAN SYLLABICS KII
146F CANADIAN SYLLABICS KO
1470 CANADIAN SYLLABICS | |
<gh_stars>1-10
#
# Copyright (c) 2018, 2021 <NAME> <<EMAIL>>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import copy
import inspect
from WBC import constants
from WBC import fermentables
from WBC.getparam import getparam
from WBC.addition import Addition, Opaque
from WBC.utils import *
from WBC.units import *
from WBC.units import _Mass, _Strength, _Temperature, _Volume, _Duration
from WBC.hop import Hop
from WBC.mash import Mash
from WBC.worter import Worter
from WBC import brewutils, timespec
from WBC.timespec import Timespec, Boil
def checkconfig():
return True
def _ismass(type): return istype(type, Mass)
def _isvolume(type): return istype(type, Volume)
def _ismassvolume(type): return istupletype(type, (Mass, Volume))
def _isvolumevolume(type): return istupletype(type, (Volume, Volume))
class WBC:
pass
class Recipe:
def __init__(self, name, yeast, volume, boiltime = None):
# volume may be None if the recipe contains only relative units
if volume is not None:
checktype(volume, Volume)
input = {}
input['name' ] = name
input['yeast'] = yeast
input['notes'] = {}
input['notes']['water'] = None
input['notes']['brewday'] = []
input['notes']['recipe'] = []
self.boiltime = input['boiltime'] = boiltime
timespec.set_boiltime(boiltime)
self.input = input
self.volume_inherent = volume
self.volume_set = self.volume_scaled = None
self.needinherent = []
self.hops = []
self.hops_recipeIBUBUGU = None
self.ferms_in = []
# the current "best guess" for additional extract needed
# to reach final-strength target (for applicable recipes)
self.fermentable_extadj = _Mass(0)
self.final_strength = None
# cached educated guess of the water
# amount used in the forward calculation
self.waterguess = None
self.opaques = []
self.input['stolen_wort'] = Worter()
self._boiladj = _Mass(0)
self.hopsdrunk = {'kettle':_Volume(0), 'fermentor':_Volume(0),
'package':_Volume(0)}
self.fermentables = []
self._calculatestatus = 0
self.mash = Mash()
self._oncelst = []
sysparams.processdefaults()
def paramfile(self, filename):
Sysparams.processfile(filename)
THEREST= 'rest'
def _final_volume(self):
assert(self._calculatestatus > 0)
v = [self.volume_scaled, self.volume_set, self.volume_inherent ]
return ([x for x in v if x is not None] + [None])[0]
def _final_extract(self):
if self.final_strength is None:
return None
w = Worter()
w.set_volstrength(self._final_volume(), self.final_strength)
return w.extract()
def _grain_absorption(self):
rv = getparam('grain_absorption')
absorp = rv[0] / rv[1]
return absorp
def _boiloff(self):
if self.boiltime is None:
return _Volume(0)
return _Volume(getparam('boiloff_perhour')
* (self.boiltime/60.0))
def _reference_temp(self):
return getparam('ambient_temp')
def _needinherentvol(self, what):
if what not in self.needinherent:
self.needinherent.append(what)
#
# various scaling routines
#
def _scale(self, what):
if self.volume_inherent is None or self.volume_scaled is None:
return what
assert(isinstance(what, Mass) or isinstance(what, Volume))
scale = self.volume_scaled / self.volume_inherent
return what.__class__(scale * what, what.defaultunit)
def _xvol2x(self, x):
assert(istupletype(x, (Mass, Volume))
or istupletype(x, (Volume, Volume)))
return x[0].__class__(x[0]/x[1] * self._final_volume(),
x[0].defaultunit)
#
# other helpers
#
def _once(self, callme, *args):
cf = inspect.stack()[1]
caller = cf[1] + '/' + str(cf[2]) + '/' + cf[3]
if caller in self._oncelst:
return
self._oncelst.append(caller)
callme(*args)
def _addunit(self, checkfuns, unit, fname):
rv = ([x for x in checkfuns if x(unit)] + [None])[0]
if rv is None:
raise PilotError('invalid input type for: ' + fname)
if isinstance(unit, tuple):
scale = self._xvol2x
else:
scale = self._scale
self._needinherentvol(fname)
return scale
#
# user interfaces
#
def set_volume_and_scale(self, volume):
checktype(volume, Volume)
self.volume_scaled = volume
def set_volume(self, volume):
checktype(volume, Volume)
self.volume_set = volume
# set opaque water notes to be printed with recipe
def set_waternotes(self, waternotes):
checktype(waternotes, str)
if self.input['notes']['water'] is not None:
warn('water notes already set')
self.input['notes']['water'] = waternotes
def add_brewdaynote(self, note):
checktype(note, str)
self.input['notes']['brewday'].append(note)
def add_recipenote(self, note):
checktype(note, str)
self.input['notes']['recipe'].append(note)
#
# Hops.
#
def _hopstore(self, hop, amount, resolver, time, cookie):
checktypes([(hop, Hop), (time, Timespec)])
a = Addition(hop, amount, resolver, time, cookie = cookie)
self.hops.append(a)
return a
def hop_byunit(self, name, unit, time):
scale = self._addunit([_ismass, _ismassvolume], unit, __name__)
self._hopstore(name, unit, scale, time, 'm')
# alpha acid mass
def hop_byAA(self, hop, mass, time):
checktype(mass, Mass)
self._needinherentvol('hop_byAA')
amount = _Mass(mass / hop.aa)
self._hopstore(hop, amount, self._scale, time, 'm')
# alpha acid mass per final volume
def hop_byAAvolratio(self, hop, mv, time):
(mass, vol) = mv
checktypes([(mass, Mass), (vol, Volume)])
amount = (_Mass(mass / hop.aa), vol)
self._hopstore(hop, amount, self._xvol2x, time, 'm')
def hop_byIBU(self, hop, IBU, time):
a = self._hopstore(hop, None, None, time, 'i')
a.info = IBU
def _setIBUBUGU(self, hop, time, value, what):
if self.hops_recipeIBUBUGU is not None:
raise PilotError('total IBU/BUGU specified >once')
checktypes([(hop, Hop), (time, Timespec)])
self.hops_recipeIBUBUGU = {
'hop': hop,
'time': time,
'value': value,
'type': what,
}
def hop_byrecipeIBU(self, hop, IBU, time):
if IBU > 120.0:
warn("Hop \"" + hop.name + "\" has high IBU ("
+ str(IBU) + ")\n")
self._setIBUBUGU(hop, time, IBU, 'IBU')
def hop_byrecipeBUGU(self, hop, BUGU, time):
if BUGU > 2.0:
warn("Hop \"" + hop.name + "\" has high BUGU ("
+ str(BUGU) + ")\n")
self._setIBUBUGU(hop, time, BUGU, 'BUGU')
# opaque additions. not used for in-recipe calculations,
# just printed out in timed additions.
def _opaquestore(self, opaque, amount, resolver, time):
checktype(time, Timespec)
a = Addition(Opaque(opaque), amount, resolver, time)
self.opaques.append(a)
def opaque_byunit(self, name, unit, time):
scale = self._addunit([_ismass, _isvolume,
_ismassvolume, _isvolumevolume], unit, __name__)
self._opaquestore(name, unit, scale, time)
def opaque_byopaque(self, opaque, ospec, time):
checktype(time, Timespec)
if ospec.__class__ != str:
raise PilotError('opaque spec must be a string')
self._opaquestore(opaque, ospec, None, time)
def anchor_bystrength(self, strength):
checktype(strength, Strength)
if self.final_strength is not None:
raise PilotError('final strength already set')
self.final_strength = strength
self.input['strength'] = strength
def _fermstore(self, name, amount, resolver, time, cookie):
ferm = fermentables.Get(name)
v = [x for x in self.ferms_in
if x.obj.name.lower() == name.lower() and x.time == time ]
if len(v) > 0:
raise PilotError('fermentables may be specified max '
+ 'once per stage')
checktypes([(ferm, fermentables.Fermentable), (time, Timespec)])
a = Addition(ferm, amount, resolver, time, cookie = cookie)
self.ferms_in.append(a)
return a
def fermentable_byunit(self, name, unit, time):
scale = self._addunit([_ismass, _ismassvolume], unit, __name__)
self._fermstore(name, unit, scale, time, 'm')
# percent of fermentable's mass, not extract's mass
def fermentable_bypercent(self, name, percent, time):
if percent != self.THEREST:
if percent <= 0:
raise PilotError('grain percentage must be '
+ 'positive (it is a fun thing!)')
if sum([x.get_amount() for x in self.ferms_in
if x.cookie == 'p']) + percent > 100.0001:
raise PilotError('captain, I cannot change the'
+ ' laws of math; 100% fermentables max!')
self._fermstore(name, percent, None, time,
'p' if percent != self.THEREST else 'r')
# indicate that we want to "borrow" some wort at the preboil stage
# for e.g. building starters.
def steal_preboil_wort(self, vol, strength):
checktypes([(vol, Volume), (strength, Strength)])
self.input['stolen_wort'].set_volstrength(vol, strength)
def _fermentable_percentage(self, what, theoretical=False):
f = what.obj
percent = f.extract.cgai()
if f.conversion and not theoretical:
percent *= getparam('mash_efficiency')/100.0
return percent
def _fermentable_extract(self, what, theoretical=False):
return _Mass(what.get_amount()
* self._fermentable_percentage(what, theoretical)/100.0)
def _fermentables_bytimespec(self, when):
spec = timespec.stage2timespec[when]
return [x for x in self.fermentables \
if x.time.__class__ in spec]
def _fermentables_massof(self, fermlist):
return sum(x.get_amount() for x in fermlist)
def _fermfilter(self, sel):
return [x for x in self.ferms_in if x.cookie in tuple(sel)]
def _extract_bytimespec(self, stage, theoretical=False):
assert(stage in Timespec.stages)
return _Mass(sum([self._fermentable_extract(x, theoretical) \
for x in self._fermentables_bytimespec(stage)]))
def _sanity_check(self):
# XXX: none at the time
pass
# set initial guesses for fermentables
def _dofermentables_preprocess(self):
rlst, plst = self._fermfilter('r'), self._fermfilter('p')
if len(rlst + plst) == 0:
return
ptot = sum([x.get_amount() for x in plst])
if len(rlst) == 0:
if abs(ptot - 100.) > .00001:
raise PilotError('need 100% fermentables. '
+ 'literally forgot "rest"?')
return
missing = 100. - ptot
if missing > .000001:
for f in rlst:
f.set_amount(missing / len(rlst))
# note: by-percent fermentables are 100% here.
# all fermentables (if by-mass are specified)
# might be over. we'll adjust the guess for
# "rest" later so that sum of fermentables is 100%.
assert(abs(sum([x.get_amount() for x in rlst+plst])
- 100.) < .000001)
def _dofermentables_bypercent(self, ferms):
#
#
# Calculate fermentables given as percentages + strength.
#
# We do it iteratively, since analytically figuring out
# a loss-function is "difficult" (it depends on the
# strength, stage the fermentable is added at, amount
# of hop thirst, etc).
#
#
# Guess extract we get from "bymass" fermentables.
mext = _Mass(sum([self._fermentable_extract(x) for x in ferms]))
extract = self._final_extract() + self.fermentable_extadj
# set the amount of extract we need from by-percent
# fermentables (= total - yield_bymass)
# per-mass additions
if mext > extract:
raise PilotError('strength anchor and '
'by-mass addition mismatch (overshooting strength)')
extract -= mext
# produce one best-current-guess for fermentable masses.
def guess(f_in):
allp = self._fermfilter(('r', 'p'))
# solve for the total mass of fermentables
# we need to reach our extract goal:
#
# extract = yield1 * m1 + yield2 * m2 + ...
# where yieldn = whatever extract we get out of the
# mass (e.g. ~80% for honey, 100%
# for white sugar, masheff * extract
# for malts, ...)
# and mn = %n * totmass
# and then solve: totmass = extract / (sum(yieldn*pn))
thesum = sum([self._fermentable_percentage(x)/100.0
* x.get_amount()/100.0 for x in allp])
totmass = _Mass(extract / thesum)
f_out = []
f_out += f_in
# set the masses of each individual fermentable
for x in allp:
# limit mass to 0.1g accuracy
m = (int(10000*(x.get_amount()/100.0 * totmass))
/ 10000.0)
n = copy.copy(x)
# save original percent in info for later
n.info = x.get_amount()
n.set_amount(_Mass(m))
f_out.append(n)
return f_out
# handle the situation where we have:
# 1) mixed mass/percent fermentable quantities
# AND
# 2) one or more "rest" grains
#
# adjust the non-rest percentages to be as specified.
# not sure if this could be solved analytically instead of
# with iteration, but, as usual, my head started hurting
# when thinking about it.
#
# we need to both reduce "rest" until the sum of the
# percentages is 100% (otherwise the fixed percentages
# are | |
# -*- coding: utf-8 -*-
"""Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/11wBui-tMpkLXlYGZAgE7-77MpWtpKRXP
Package Imports
"""
import numpy as np
import pandas as pd
from collections import Counter
# imports for Part II
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn import svm
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from numpy.linalg import norm
import matplotlib.pyplot as plt
"""# **Part I**
### 1. K-NearestNeighbour Algorithm
"""
def KNN(X_train, X_test, y_train, k=8):
""":type X_train: numpy.ndarray
:type X_test: numpy.ndarray
:type Y_train: numpy.ndarray
:rtype: numpy.ndarray"""
X_train = X_train.to_numpy()
X_test = X_test.to_numpy()
y_train = y_train.to_numpy()
y_pred = []
distance = [np.sqrt(np.sum(np.square(np.subtract(X_train, X_test[i])),axis=1)) for i in range(len(X_test))]
distance = np.asarray(distance)
for i in range(0,distance.shape[0]):
distance_value = distance[i]
indexes = distance_value.argsort()
class_pred = y_train[indexes]
mode = Counter(class_pred[0:k])
y_pred.append(mode.most_common(1)[0][0])
return y_pred
"""### 2. Random Forest"""
def RandomForest(X_train, Y_train, X_test):
""":type X_train: numpy.ndarray
:type X_test: numpy.ndarray
:type Y_train: numpy.ndarray
:rtype: numpy.ndarray"""
class RandomForestClassifier:
def __init__(self, X, y, max_samples, max_features=10, n_estimators=10, max_depth=6, min_samples_leaf=1):
np.random.seed(50)
self.X, self.y = X, y
self.n_estimators = n_estimators # Number of Trees in a forest, default = 20
self.max_depth = max_depth # Maximum depth of a tree, default = None.
self.max_features = max_features # Maximum number of features to pass onto each tree.
self.min_sample_leaf = min_samples_leaf # Minimum number of samples to be present in a leaf.
self.max_samples = max_samples # Maximum random samples to draw from X.
self.unique_elements = np.unique(self.y)
self.trees = [self.grow_tree() for tree in range(n_estimators)]
def grow_tree(self): # Convert to fit if to pass only X and y
indices = np.random.permutation(len(self.y))[:self.max_samples]
feature_indices = np.random.permutation(self.X.shape[1])[:self.max_features]
tree = DecisionTree(self.X.iloc[indices], self.y.iloc[indices], self.unique_elements, self.max_features, feature_indices, indices=np.array(range(self.max_samples)), max_depth=self.max_depth, min_sample_leaf = self.min_sample_leaf)
return tree
def predict(self, X_test):
predicted = [tree.predict(X_test) for tree in self.trees]
column_mode = pd.DataFrame(predicted)
mode_df = []
for x in column_mode.columns:
mode = Counter(column_mode[x])
mode = mode.most_common()[0][0]
mode = mode_df.append(mode)
self.random_forest_predictions = pd.DataFrame(mode_df)
self.random_forest_predictions = (self.random_forest_predictions.reset_index(drop=True)).to_numpy()
return self.random_forest_predictions
def calculate_accuracy(self, y_test):
y_test = (y_test.reset_index(drop=True)).to_numpy()
accuracy = np.sum(np.where(self.random_forest_predictions.flatten()==y_test.flatten(),1,0))/(len(y_test))
return accuracy*100
class DecisionTree:
def __init__(self, X, y, master_unique_elements, max_features, feature_indices, indices, max_depth, min_sample_leaf):
self.X = X
self.y = y
self.max_features = max_features
self.indices = indices
self.feature_indices = feature_indices
self.max_depth = max_depth
self.min_sample_leaf = min_sample_leaf
self.m = len(indices) # Number of samples
self.n = X.shape[1] # Number of features
self.master_unique_elements = master_unique_elements
self.unique_elements = list(set(self.y))
self.target_count = [self.unique_elements.count(target) for target in self.master_unique_elements]
self.score = float('inf') # Initiating a Inf Score
self.classify = float('inf')
if len(np.unique(self.y.values[self.indices])) == 1 or (self.max_depth <= 0) or (len(self.indices) <= min_sample_leaf): #
self.classify = self.classification(y.values[self.indices])
else:
self.grow_tree()
def split_data(self):
return self.X.values[self.indices, self.feature_index]
def classification(self, y):
unique_classes, counts = np.unique(y, return_counts=True)
classification = unique_classes[counts.argmax()]
return classification
def grow_tree(self):
for feature_index in range(self.max_features):
self.best_split(feature_index)
if self.score == float('inf') or self.max_depth <= 0:
return
data_split = self.split_data()
left = np.nonzero(data_split <= self.split_threshold)
right = np.nonzero(data_split > self.split_threshold)
if len(left)== 0 or len(right) == 0:
self.classify = self.classification(self.y.values[self.indices])
left_indices = np.random.permutation(self.X.shape[1])[:self.max_features]
right_indices = np.random.permutation(self.X.shape[1])[:self.max_features]
self.left = DecisionTree(self.X, self.y, self.master_unique_elements, self.max_features, left_indices, self.indices[left], max_depth=self.max_depth-1, min_sample_leaf=self.min_sample_leaf)
self.right = DecisionTree(self.X, self.y, self.master_unique_elements, self.max_features, right_indices, self.indices[right], max_depth=self.max_depth-1, min_sample_leaf=self.min_sample_leaf)
def calculate_entropy(self, y):
_, counts = np.unique(y, return_counts=True)
prob = counts / counts.sum()
entropy = sum(prob * -np.log2(prob))
return entropy
def calculate_column_entropy(self, below_threshold, above_threshold, y_data_below, y_data_above):
n = len(below_threshold) + len(above_threshold)
prob_below_threshold = len(below_threshold) / n
prob_above_threshold = len(above_threshold) / n
overall_entropy = (prob_below_threshold * self.calculate_entropy(y_data_below) + prob_above_threshold * self.calculate_entropy(y_data_above))
return overall_entropy
def best_split(self, feature_index):
X, y = self.X.values[self.indices, feature_index], self.y.values[self.indices]
sorted_indices = np.argsort(X)
sort_x, sort_y = X[sorted_indices], y[sorted_indices]
# Entropy Calculation
for row in range(self.m-1):
data_below = sort_x[:row+1]
data_above = sort_x[row+1:]
current_X = sort_x[row]
y_data_below = sort_y[:row+1]
y_data_above = sort_y[row+1:]
current_score = self.calculate_column_entropy(data_below, data_above, y_data_below, y_data_above)
if X[row] == X[row-1]:
continue
if current_score < self.score:
self.feature_index, self.score, self.split_threshold = feature_index, current_score, current_X
def predict(self, X_test):
X_test = np.array(X_test)
predicted = np.array([self.predict_row(predict) for predict in X_test])
return predicted
def predict_row(self, predict):
if self.max_depth == 0 or self.classify != float('inf'):
if self.classify != float('inf'):
return self.classify
else:
return int(self.y[self.indices].mode().sample(1))
predicted = self.left if predict[self.feature_index] <= self.split_threshold else self.right
return predicted.predict_row(predict)
random_forest_classifier = RandomForestClassifier(X_train, Y_train, 950, 30, 20, 11)
y_pred = random_forest_classifier.predict(X_test)
# print("Accuracy of Random Forest: " + str(random_forest_classifier.calculate_accuracy(y_test)))
return y_pred
"""### 3. PCA"""
def PCA(X_train, N):
"""
:type X_train: numpy.ndarray
:type N: int
:rtype: numpy.ndarray
"""
covariance_matrix = np.cov(X_train.T)
eigen_values, eigen_vectors = np.linalg.eig(covariance_matrix)
eigen_pairs = []
for i in range(0,len(eigen_values)):
eigen_pairs.append([np.abs(eigen_values[i]), eigen_vectors[:,i]])
eigen_pairs.sort(reverse = True)
result = 0
for i in eigen_pairs:
if (i[0]/sum(eigen_values) * 100 > 1):
result+=i[0]/sum(eigen_values) * 100
transformed_matrix = []
for i in range(0,N):
transformed_matrix.append(eigen_pairs[i][1])
transformed_matrix = np.asarray(transformed_matrix)
transformed_matrix = transformed_matrix.reshape(len(data.columns)-1,N)
Y = data_features.dot(transformed_matrix)
return Y
"""### 4. K-Means Clustering"""
def Kmeans(X_train, N):
""":type X_train: numpy.ndarray
:type N: int
:rtype: List[numpy.ndarray]"""
input_X = np.array(X_train)
centroids = input_X[np.random.choice(input_X.shape[0], N, replace=False)]
for i in range(200):
old_centroids = centroids
distance = np.zeros((input_X.shape[0], N))
intermediate_centroids = np.zeros((N, input_X.shape[1]))
for i in range(N):
row_norm = norm(input_X - old_centroids[i, :], axis=1)
distance[:, i] = np.square(row_norm)
classes = np.argmin(distance, axis=1)
for j in range(N):
intermediate_centroids[j, :] = np.mean(input_X[classes == j, :], axis=0)
centroids = intermediate_centroids
if np.all(old_centroids == centroids):
break
cluster_list = [input_X[classes == i] for i in range(N)]
return cluster_list
def Accuracy(y_true, y_pred):
""":type y_true: numpy.ndarray
:type y_pred: numpy.ndarray
:rtype: float"""
accuracy = 0
for true, pred in zip(y_true,y_pred):
if true == pred:
accuracy += 1
return (accuracy/len(y_true) * 100)
"""### 6. Recall"""
def Recall(y_true, y_pred):
""":type y_true: numpy.ndarray
:type y_pred: numpy.ndarray
:rtype: float"""
confusion_matrix = ConfusionMatrix(y_true,y_pred)
rows, columns = confusion_matrix.shape
total_sum = 0
for label in range(columns):
row = confusion_matrix[label, :]
a = confusion_matrix[label, label] / row.sum()
total_sum += a
return total_sum / columns
"""### 7. Precision"""
def Precision(y_true, y_pred):
""":type y_true: numpy.ndarray
:type y_pred: numpy.ndarray
:rtype: float"""
confusion_matrix = ConfusionMatrix(y_true,y_pred)
rows, columns = confusion_matrix.shape
total_sum = 0
for label in range(rows):
col = confusion_matrix[:, label]
a = confusion_matrix[label, label] / col.sum()
total_sum += a
return total_sum / rows
"""### 8. Within Cluster Sum of Squares (WCSS)"""
def WCSS(clusters):
""":Clusters List[numpy.ndarray]
:rtype: float"""
wcss = 0
m = 0
for x in clusters:
wcss+= np.sum(np.square((np.subtract(np.mean(x,axis=0),clusters[m]))))
m+=1
return wcss
"""# **Part II**
### Implementation of Supervised Algorithms: SVM, Logistics Regression, Decision Tree, and KNN using Scikit-learn
"""
def SklearnSupervisedLearning(X_train, Y_train, X_test):
""":type X_train: numpy.ndarray
:type X_test: numpy.ndarray
:type Y_train: numpy.ndarray
:rtype: List[numpy.ndarray]"""
y_preds = []
# SVM implementation in Sklearn
svm = SVC(kernel='rbf', C = 10, gamma=1)
svm_model = svm.fit(X_train, Y_train)
svm_pred = svm_model.predict(X_test)
# Logistic Regression implementation using Sklearn.
logistic_regression = LogisticRegression(max_iter=10000,C=50)
logistic_model = logistic_regression.fit(X_train, Y_train)
logistic_pred = logistic_model.predict(X_test)
# Decision Tree implementation using Sklearn
gini_tree = DecisionTreeClassifier(criterion="gini", random_state=100, max_depth=7, min_samples_leaf=5)
decisiontree_model = gini_tree.fit(X_train, Y_train)
decision_tree_pred = decisiontree_model.predict(X_test)
# KNN implementation using Sklearn
knn = KNeighborsClassifier(n_neighbors=8)
knn_model = knn.fit(X_train, Y_train)
knn_pred = knn.predict(X_test)
# List of predictions by Sklearn Models
y_preds = [svm_pred, logistic_pred, decision_tree_pred, knn_pred]
return y_preds
"""### Ensemble model using the voting classifier"""
def SklearnVotingClassifier(X_train, Y_train, X_test):
""":type X_train: numpy.ndarray
:type X_test: numpy.ndarray
:type Y_train: numpy.ndarray
:rtype: List[numpy.ndarray]"""
estimators = []
svm = SVC(kernel='rbf', C = 10, gamma=1)
estimators.append(('svm', svm))
svm_model = svm.fit(X_train, y_train)
svm_pred = svm_model.predict(X_test)
logistic_regression = LogisticRegression(max_iter=10000, C=50)
estimators.append(('logistic', logistic_regression))
logistic_model = logistic_regression.fit(X_train, y_train)
logistic_pred = logistic_model.predict(X_test)
gini_tree = DecisionTreeClassifier(criterion="gini", random_state=42, max_depth=8, min_samples_leaf=5)
estimators.append(('decision', gini_tree))
decisiontree_model = gini_tree.fit(X_train, y_train)
decision_tree_pred = decisiontree_model.predict(X_test)
knn = KNeighborsClassifier(n_neighbors=8)
estimators.append(('knn', knn))
knn_model = knn.fit(X_train, y_train)
knn_pred = knn_model.predict(X_test)
ensemble = VotingClassifier(estimators)
ensemble_model = ensemble.fit(X_train, y_train)
ensemble_pred = ensemble_model.predict(X_test)
y_preds = [logistic_pred, decision_tree_pred, svm_pred, knn_pred, ensemble_pred]
return y_preds
"""### Confusion Matrix"""
def ConfusionMatrix(y_true, y_pred):
""":type y_true: numpy.ndarray
:type y_pred: numpy.ndarray
:rtype: float"""
result = np.zeros((12, 12))
for i, j in zip(y_true, y_pred):
i = int(i)
j = int(j)
result[i][j] += 1
result = np.delete(result, (0), axis=0)
result = np.delete(result, (0), axis=1)
return result
def confusion_matrix_plot(confusion_matrix, ax):
ax.imshow(np.array(confusion_matrix), cmap=plt.get_cmap("Pastel1"), interpolation='nearest')
for x in range(len(confusion_matrix)):
for y in range(len(confusion_matrix[0])):
ax.annotate(int(confusion_matrix[x][y]), xy=(y, x), horizontalalignment='center',verticalalignment='center')
ylabels=['1','2','3','4','5','6','7','8','9','10','11']
xlabels=['1','2','3','4','5','6','7','8','9','10','11']
ax.set_yticks(range(confusion_matrix.shape[0]))
ax.set_yticklabels(ylabels)
ax.set_xticks(range(confusion_matrix.shape[1]))
ax.set_xticklabels(xlabels)
plt.style.use('classic')
def confusion_matrix_plots(y_preds_ensemble):
fig,(ax0,ax1, ax2,ax3, ax4) = plt.subplots(nrows=1, ncols=5, sharey=False, sharex=False, figsize=(40,50))
fig.patch.set_facecolor('xkcd:white')
ax0.set_title('Logistic Confusion Matrix')
ax1.set_title('Decision Tree Confusion Matrix')
ax2.set_title('SVM Confusion Matrix')
ax3.set_title('KNN Confusion Matrix')
ax4.set_title('Ensemble Confusion Matrix')
confusion_matrix_plot(ConfusionMatrix(y_test, | |
<reponame>zysszy/Recoder<filename>testDefect4jv_2.py<gh_stars>10-100
import os
import javalang
#from ast import nodes
from graphviz import Digraph
import json
import pickle
from tqdm import tqdm
import numpy as np
from run import *
from stringfycode import stringfyRoot
from copy import deepcopy
import time
import io
import subprocess
from Searchnode import Node
linenode = ['Statement_ter', 'BreakStatement_ter', 'ReturnStatement_ter', 'ContinueStatement', 'ContinueStatement_ter', 'LocalVariableDeclaration', 'condition', 'control', 'BreakStatement', 'ContinueStatement', 'ReturnStatement', "parameters", 'StatementExpression', 'return_type']
#os.environ["CUDA_VISIBLE_DEVICES"]="1, 4"
def getLocVar(node):
varnames = []
if node.name == 'VariableDeclarator':
currnode = -1
for x in node.child:
if x.name == 'name':
currnode = x
break
varnames.append((currnode.child[0].name, node))
if node.name == 'FormalParameter':
currnode = -1
for x in node.child:
if x.name == 'name':
currnode = x
break
varnames.append((currnode.child[0].name, node))
if node.name == 'InferredFormalParameter':
currnode = -1
for x in node.child:
if x.name == 'name':
currnode = x
break
varnames.append((currnode.child[0].name, node))
for x in node.child:
varnames.extend(getLocVar(x))
return varnames
n = 0
def setid(root):
global n
root.id = n
n += 1
for x in root.child:
setid(x)
def solveLongTree(root, subroot):
global n
m = 'None'
troot = 'None'
for x in root.child:
if x.name == 'name':
m = x.child[0].name
if len(root.getTreestr().strip().split()) >= 1000:
tmp = subroot
if len(tmp.getTreestr().split()) >= 1000:
assert(0)
lasttmp = None
while True:
if len(tmp.getTreestr().split()) >= 1000:
break
lasttmp = tmp
tmp = tmp.father
index = tmp.child.index(lasttmp)
ansroot = Node(tmp.name, 0)
ansroot.child.append(lasttmp)
ansroot.num = 2 + len(lasttmp.getTreestr().strip().split())
while True:
b = True
afternode = tmp.child.index(ansroot.child[-1]) + 1
if afternode < len(tmp.child) and ansroot.num + tmp.child[afternode].getNum() < 1000:
b = False
ansroot.child.append(tmp.child[afternode])
ansroot.num += tmp.child[afternode].getNum()
prenode = tmp.child.index(ansroot.child[0]) - 1
if prenode >= 0 and ansroot.num + tmp.child[prenode].getNum() < 1000:
b = False
ansroot.child.append(tmp.child[prenode])
ansroot.num += tmp.child[prenode].getNum()
if b:
break
troot = ansroot
else:
troot = root
n = 0
setid(troot)
varnames = getLocVar(troot)
fnum = -1
vnum = -1
vardic = {}
vardic[m] = 'meth0'
typedic = {}
for x in varnames:
if x[1].name == 'VariableDeclarator':
vnum += 1
vardic[x[0]] = 'loc' + str(vnum)
t = -1
for s in x[1].father.father.child:
#print(s.name)
if s.name == 'type':
t = s.child[0].child[0].child[0].name[:-4]
break
assert(t != -1)
typedic[x[0]] = t
else:
fnum += 1
vardic[x[0]] = 'par' + str(fnum)
t = -1
for s in x[1].child:
if s.name == 'type':
t = s.child[0].child[0].child[0].name[:-4]
break
assert(t != -1)
typedic[x[0]] = t
return troot, vardic, typedic
def addter(root):
if len(root.child) == 0:
root.name += "_ter"
for x in root.child:
addter(x)
return
def setProb(r, p):
r.possibility = p#max(min(np.random.normal(0.8, 0.1, 10)[0], 1), 0)
for x in r.child:
setProb(x, p)
def getLineNode(root, block, add=True):
ans = []
block = block + root.name
#print(root.name, 'lll')
for x in root.child:
if x.name in linenode:
if 'info' in x.getTreestr() or 'assert' in x.getTreestr() or 'logger' in x.getTreestr() or 'LOGGER' in x.getTreestr() or 'system.out' in x.getTreestr().lower():
continue
x.block = block
ans.append(x)
else:
#print(x.name)
s = ""
if not add:
s = block
#tmp = getLineNode(x, block)
else:
s = block + root.name
#print(block + root.name + "--------")
tmp = getLineNode(x, block)
'''if x.name == 'then_statement' and tmp == []:
print(tmp)
print(x.father.printTree(x.father))
assert(0)'''
ans.extend(tmp)
return ans
def getroottree(tokens, isex=False):
root = Node(tokens[0], 0)
currnode = root
idx = 1
for i, x in enumerate(tokens[1:]):
if x != "^":
if isinstance(x, tuple):
nnode = Node(x[0], idx)
nnode.position = x[1]
else:
nnode = Node(x, idx)
nnode.father = currnode
currnode.child.append(nnode)
currnode = nnode
idx += 1
else:
currnode = currnode.father
return root
def ismatch(root, subroot):
index = 0
#assert(len(subroot.child) <= len(root.child))
#print(len(subroot.child), len(root.child))
for x in subroot.child:
while index < len(root.child) and root.child[index].name != x.name:
index += 1
if index == len(root.child):
return False
if not ismatch(root.child[index], x):
return False
index += 1
return True
def findSubtree(root, subroot):
if root.name == subroot.name:
if ismatch(root, subroot):
return root
for x in root.child:
tmp = findSubtree(x, subroot)
if tmp:
return tmp
return None
def generateAST(tree):
sub = []
if not tree:
return ['None', '^']
if isinstance(tree, str):
tmpStr = tree
tmpStr = tmpStr.replace(" ", "").replace(":", "")
if "\t" in tmpStr or "'" in tmpStr or "\"" in tmpStr:
tmpStr = "<string>"
if len(tmpStr) == 0:
tmpStr = "<empty>"
if tmpStr[-1] == "^":
tmpStr += "<>"
sub.append(tmpStr)
sub.append("^")
return sub
if isinstance(tree, list):
if len(tree) == 0:
sub.append("empty")
sub.append("^")
else:
for ch in tree:
subtree = generateAST(ch)
sub.extend(subtree)
return sub
position = None
if hasattr(tree, 'position'):
#assert(0)
position = tree.position
curr = type(tree).__name__
#print(curr)
if True:
if False:
assert(0)#sub.append((str(getLiteral(tree.children)))
else:
sub.append((curr, position))
try:
for x in tree.attrs:
if x == "documentation":
continue
if not getattr(tree, x):
continue
'''if x == 'prefix_operators':
node = getattr(tree, x)
print(type(node))
print(len(node))
print(node[0])
assert(0)
if type(getattr(tree, x)).__name__ not in nodes:
print(type(getattr(tree, x)).__name__)
continue'''
sub.append(x)
node = getattr(tree, x)
if isinstance(node, list):
if len(node) == 0:
sub.append("empty")
sub.append("^")
else:
for ch in node:
subtree = generateAST(ch)
sub.extend(subtree)
elif isinstance(node, javalang.tree.Node):
subtree = generateAST(node)
sub.extend(subtree)
elif not node:
continue
elif isinstance(node, str):
tmpStr = node
tmpStr = tmpStr.replace(" ", "").replace(":", "")
if "\t" in tmpStr or "'" in tmpStr or "\"" in tmpStr:
tmpStr = "<string>"
if len(tmpStr) == 0:
tmpStr = "<empty>"
if tmpStr[-1] == "^":
tmpStr += "<>"
sub.append(tmpStr)
sub.append("^")
elif isinstance(node, set):
for ch in node:
subtree = generateAST(ch)
sub.extend(subtree)
elif isinstance(node, bool):
sub.append(str(node))
sub.append("^")
else:
print(type(node))
assert(0)
sub.append("^")
except AttributeError:
assert(0)
pass
sub.append('^')
return sub
else:
print(curr)
return sub
'''def setProb(root, subroot, prob):
root.possibility = max(min(max(root.possibility, prob), 0.98), 0.01)
index = 0
assert(len(subroot.child) <= len(root.child))
#print(len(subroot.child), len(root.child))
for x in subroot.child:
while root.child[index].name != x.name:
#print(root.child[index].name, x.name)
index += 1
setProb(root.child[index], x, prob)
index += 1'''
def getSubroot(treeroot):
currnode = treeroot
lnode = None
mnode = None
while currnode:
if currnode.name in linenode:
lnode = currnode
break
currnode = currnode.father
currnode = treeroot
while currnode:
if currnode.name == 'MethodDeclaration' or currnode.name == 'ConstructorDeclaration':
mnode = currnode
break
currnode = currnode.father
return lnode, mnode
def repair(treeroot, troot, oldcode, filepath, filepath2, patchpath, patchnum, isIf, mode, subroot, vardic, typedic, idxs, testmethods, idss, classname):
global aftercode
global precode
actionlist = solveone(troot.printTreeWithVar(troot, vardic), troot.getTreeProb(troot), model, subroot, vardic, typedic, idxs, idss, classname, mode)
for x in actionlist:
if x.strip() in patchdict:
continue
#print('-', x)
patchdict[x.strip()] = 1
#print(x.split())
root = getroottree(x.split())
code = stringfyRoot(root, isIf, mode)
#print(oldcode)
print(precode[-1000:])
print(code)
print(aftercode[:1000])
#copycode = deepcopy(liness)
#copycode[lineid - 1] = code
lnum = 0
for x in code.splitlines():
if x.strip() != "":
lnum += 1
else:
continue
print('lnum', lnum, mode)
if lnum == 1 and 'if' in code:
if mode == 0:
continue
afterlines = aftercode.splitlines()
lnum = 0
rnum = 0
for p, x in enumerate(afterlines):
if '{' in x:
lnum += 1
if '}' in x:
if lnum == 0:
aftercode = "\n".join(afterlines[:p] + ['}'] + afterlines[p:])
#print(aftercode)
#assert(0)
break
lnum -= 1
tmpcode = precode + "\n" + code + aftercode
tokens = javalang.tokenizer.tokenize(tmpcode)
parser = javalang.parser.Parser(tokens)
else:
tmpcode = precode + "\n" + code + aftercode
tokens = javalang.tokenizer.tokenize(tmpcode)
parser = javalang.parser.Parser(tokens)
try:
tree = parser.parse()
except:
#assert(0)
print(code)
continue
open(filepath2, "w").write(tmpcode)
bugg = False
for t in testmethods:
cmd = 'defects4j test -w buggy2/ -t %s' % t.strip()
Returncode = ""
child = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=-1)
while_begin = time.time()
while True:
Flag = child.poll()
print(Flag)
if Flag == 0:
Returncode = child.stdout.readlines()#child.stdout.read()
break
elif Flag != 0 and time.time() - while_begin > 10:
child.kill()
break
else:
time.sleep(1)
log = Returncode
if len(log) > 0 and log[-1].decode('utf-8') == "Failing tests: 0\n":
continue
else:
bugg = True
break
if not bugg:
print('success')
patchnum += 1
wf = open(patchpath + 'patch' + str(patchnum) + ".txt", 'w')
wf.write(filepath + "\n")
wf.write("-" + oldcode + "\n")
wf.write("+" + code + "\n")
if patchnum >= 5:
return patchnum
return patchnum
def getNodeById(root, line):
if root.position:
if root.position.line == line and root.name != 'IfStatement' and root.name != 'ForStatement':
return root
for x in root.child:
t = getNodeById(x, line)
if t:
return t
return None
def containID(root):
ans = []
if root.position is not None:
ans.extend([root.position.line])
for x in root.child:
ans.extend(containID(x))
return ans
def getAssignMent(root):
if root.name == 'Assignment':
return root
for x in root.child:
t = getAssignMent(x)
if | |
"end is restoring the boresight"
self.moveBoresight(
self.begBoreXYDeg,
doWait = False,
)
if self.didTakeImage and (self.doWindow or doRestoreBoresight):
if self.sr.debug:
print "end is taking a final exposure"
exposeCmdDict = self.getExposeCmdDict(doWindow=False)
sr.startCmd(**exposeCmdDict)
def formatBinFactorArg(self):
"""Return bin factor argument for expose/centroid/findstars command"""
#print "defBinFactor=%r, binFactor=%r" % (self.defBinFactor, self.binFactor)
# if defBinFactor None then bin factor cannot be set
if self.defBinFactor is None:
return ""
return "bin=%d" % (self.binFactor,)
def formatExposeArgs(self, doWindow=True):
"""Format arguments for exposure command.
Inputs:
- doWindow: if true, window the exposure (if permitted)
"""
argList = [
"time=%s" % (self.expTime,),
self.formatBinFactorArg(),
self.formatWindowArg(doWindow),
]
argList = [arg for arg in argList if arg]
return " ".join(argList)
def formatWindowArg(self, doWindow=True):
"""Format window argument for expose/centroid/findstars command.
Inputs:
- doWindow: if true, window the exposure (if permitted)
"""
if not doWindow or not self.doWindow:
return ""
if self.windowIsInclusive:
urOffset = self.windowOrigin
else:
urOffset = self.windowOrigin + 1
windowLL = [self.window[ii] + self.windowOrigin for ii in range(2)]
windowUR = [self.window[ii+2] + urOffset for ii in range(2)]
return "window=%d,%d,%d,%d" % (windowLL[0], windowLL[1], windowUR[0], windowUR[1])
def getInstInfo(self):
"""Obtains instrument data.
Verifies the correct instrument and sets these attributes:
- instScale: x,y image scale in unbinned pixels/degree
- instCtr: x,y image center in unbinned pixels
- instLim: xmin, ymin, xmax, ymax image limits, inclusive, in unbinned pixels
- arcsecPerPixel: image scale in arcsec/unbinned pixel;
average of x and y scales
Raises ScriptError if wrong instrument.
"""
sr = self.sr
if self.tccInstPrefix and not sr.debug:
# Make sure current instrument is correct
try:
currInstName = sr.getKeyVar(self.tccModel.inst)
except sr.ScriptError:
raise sr.ScriptError("current instrument unknown")
if not currInstName.lower().startswith(self.tccInstPrefix.lower()):
raise sr.ScriptError("%s is not the current instrument (%s)!" % (self.instName, currInstName))
self.instScale = sr.getKeyVar(self.tccModel.iimScale, ind=None)
self.instCtr = sr.getKeyVar(self.tccModel.iimCtr, ind=None)
self.instLim = sr.getKeyVar(self.tccModel.iimLim, ind=None)
else:
# data from tcc tinst:I_NA2_DIS.DAT 18-OCT-2006
self.instScale = [-12066.6, 12090.5] # unbinned pixels/deg
self.instCtr = [240, 224]
self.instLim = [0, 0, 524, 511]
self.arcsecPerPixel = 3600.0 * 2 / (abs(self.instScale[0]) + abs(self.instScale[1]))
def getEntryNum(self, wdg):
"""Return the numeric value of a widget, or raise ScriptError if blank.
"""
numVal = wdg.getNumOrNone()
if numVal is not None:
return numVal
raise self.sr.ScriptError(wdg.label + " not specified")
def getExposeCmdDict(self, doWindow=True):
"""Get basic command arument dict for an expose command
This includes actor, cmdStr, abortCmdStr
"""
return dict(
actor = self.gcamActor,
cmdStr = "expose " + self.formatExposeArgs(doWindow),
abortCmdStr = "abort",
)
def graphFocusMeas(self, focPosFWHMList, extremeFocPos=None, extremeFWHM=None):
"""Graph measured fwhm vs focus.
Inputs:
- focPosFWHMList: list of data items:
- focus position (um)
- measured FWHM (binned pixels)
- extremeFocPos: extremes of focus position
- extremeFWHM: extremes of FWHM
- setFocRange: adjust displayed focus range?
extremes are an Extremes object with .minVal and .maxVal
"""
# "graphFocusMeas(focPosFWHMList=%s, extremeFocPos=%r, extremeFWHM=%r)" % (focPosFWHMList, extremeFocPos, extremeFWHM)
numMeas = len(focPosFWHMList)
if numMeas == 0:
return
focList, fwhmList = zip(*focPosFWHMList)
if not self.plotLine:
self.plotLine = self.plotAxis.plot(focList, fwhmList, 'bo')[0]
else:
self.plotLine.set_data(focList[:], fwhmList[:])
self.setGraphRange(extremeFocPos=extremeFocPos, extremeFWHM=extremeFWHM)
def initAll(self):
"""Initialize variables, table and graph.
"""
# initialize shared variables
self.didTakeImage = False
self.focDir = None
self.currBoreXYDeg = None
self.begBoreXYDeg = None
self.instScale = None
self.arcsecPerPixel = None
self.instCtr = None
self.instLim = None
self.cmdMode = None
self.focPosToRestore = None
self.expTime = None
self.absStarPos = None
self.relStarPos = None
self.binFactor = None
self.window = None # LL pixel is 0, UL pixel is included
self.enableCmdBtns(False)
def logFitFWHM(self, name, focPos, fwhm):
"""Log a fit value of FWHM or FWHM error.
"""
if fwhm is not None:
fwhmArcSec = fwhm * self.arcsecPerPixel * self.binFactor
else:
fwhmArcSec = None
dataStrs = (
formatNum(focPos, "%0.0f"),
formatNum(fwhm, "%0.1f"),
formatNum(fwhmArcSec, "%0.2f"),
)
outStr = "%s\t%s" % (name, "\t".join(dataStrs))
self.logWdg.addMsg(outStr)
def logStarMeas(self, name, focPos, starMeas):
"""Log a star measurement.
The name should be less than 8 characters long.
Any or all data fields in starMeas may be None.
Inputs:
- focPos: focus position, in um
- starMeas: StarMeas object
If fwhm is None, it is reported as NaN.
"""
fwhm = starMeas.fwhm
if fwhm is not None:
fwhmArcSec = fwhm * self.arcsecPerPixel * self.binFactor
else:
fwhmArcSec = None
if None not in (starMeas.ampl, starMeas.sky):
skyPlusAmpl = starMeas.ampl + starMeas.sky
else:
skyPlusAmpl = None
dataStrs = (
formatNum(focPos, "%0.0f"),
formatNum(fwhm, "%0.1f"),
formatNum(fwhmArcSec, "%0.2f"),
formatNum(starMeas.sky, "%0.0f"),
formatNum(starMeas.ampl, "%0.0f"),
formatNum(skyPlusAmpl, "%0.0f"),
)
outStr = "%s\t%s" % (name, "\t".join(dataStrs))
self.logWdg.addMsg(outStr)
def recordUserParams(self, doStarPos=True):
"""Record user-set parameters relating to exposures but not to focus
Inputs:
- doStarPos: if true: save star position and related information;
warning: if doStarPos true then there must *be* a valid star position
Set the following instance variables:
- expTime
- centroidRadPix
The following are set to None if doStarPos false:
- absStarPos
- relStarPos
- window
"""
self.expTime = self.getEntryNum(self.expTimeWdg)
self.binFactor = self.dispBinFactor
centroidRadArcSec = self.getEntryNum(self.centroidRadWdg)
self.centroidRadPix = centroidRadArcSec / (self.arcsecPerPixel * self.binFactor)
if doStarPos:
winRad = self.centroidRadPix * self.WinSizeMult
self.absStarPos = [None, None]
for ii in range(2):
wdg = self.starPosWdgSet[ii]
self.absStarPos[ii] = self.getEntryNum(wdg)
if self.doWindow:
windowMinXY = [max(self.instLim[ii], int(0.5 + self.absStarPos[ii] - winRad)) for ii in range(2)]
windowMaxXY = [min(self.instLim[ii-2], int(0.5 + self.absStarPos[ii] + winRad)) for ii in range(2)]
self.window = windowMinXY + windowMaxXY
self.relStarPos = [self.absStarPos[ii] - windowMinXY[ii] for ii in range(2)]
#print "winRad=%s, windowMinXY=%s, relStarPos=%s" % (winRad, windowMinXY, self.relStarPos)
else:
self.window = None
self.relStarPos = self.absStarPos[:]
else:
self.absStarPos = None
self.relStarPos = None
self.window = None
def run(self, sr):
"""Run the focus script.
"""
self.initAll()
# fake data for debug mode
# iteration #, FWHM
self.debugIterFWHM = (1, 2.0)
self.getInstInfo()
yield self.waitExtraSetup()
# open image viewer window, if any
if self.imageViewerTLName:
self.tuiModel.tlSet.makeVisible(self.imageViewerTLName)
self.sr.master.winfo_toplevel().lift()
focPosFWHMList = []
extremeFocPos = Extremes()
extremeFWHM = Extremes()
# check that the gcam actor is alive. This is important because
# centroid commands can fail due to no actor or no star
# so we want to halt in the former case
yield sr.waitCmd(
actor = self.gcamActor,
cmdStr = "ping",
)
# command loop; repeat until error or user explicitly presses Stop
if self.maxFindAmpl is None:
btnStr = "Measure or Sweep"
else:
btnStr = "Find, Measure or Sweep"
waitMsg = "Press %s to continue" % (btnStr,)
testNum = 0
while True:
# wait for user to press the Expose or Sweep button
# note: the only time they should be enabled is during this wait
self.enableCmdBtns(True)
sr.showMsg(waitMsg, RO.Constants.sevWarning)
yield sr.waitUser()
self.enableCmdBtns(False)
if self.cmdMode == self.cmd_Sweep:
break
if testNum == 0:
self.clearGraph()
if self.maxFindAmpl is None:
self.logWdg.addMsg("===== Measure =====")
else:
self.logWdg.addMsg("===== Find/Measure =====")
testNum += 1
focPos = float(self.centerFocPosWdg.get())
if focPos is None:
raise sr.ScriptError("must specify center focus")
yield self.waitSetFocus(focPos, False)
if self.cmdMode == self.cmd_Measure:
cmdName = "Meas"
self.recordUserParams(doStarPos=True)
yield self.waitCentroid()
elif self.cmdMode == self.cmd_Find:
cmdName = "Find"
self.recordUserParams(doStarPos=False)
yield self.waitFindStar()
starData = sr.value
if starData.xyPos is not None:
sr.showMsg("Found star at %0.1f, %0.1f" % tuple(starData.xyPos))
self.setStarPos(starData.xyPos)
else:
raise RuntimeError("Unknown command mode: %r" % (self.cmdMode,))
starMeas = sr.value
self.logStarMeas("%s %d" % (cmdName, testNum,), focPos, starMeas)
fwhm = starMeas.fwhm
if fwhm is None:
waitMsg = "No star found! Fix and then press %s" % (btnStr,)
self.setGraphRange(extremeFocPos=extremeFocPos)
else:
extremeFocPos.addVal(focPos)
extremeFWHM.addVal(starMeas.fwhm)
focPosFWHMList.append((focPos, fwhm))
self.graphFocusMeas(focPosFWHMList, extremeFocPos, extremeFWHM)
waitMsg = "%s done; press %s to continue" % (cmdName, btnStr,)
self.recordUserParams(doStarPos=True)
yield self.waitFocusSweep()
doRestoreBoresight = self.begBoreXYDeg != self.currBoreXYDeg
if doRestoreBoresight:
yield self.moveBoresight(
self.begBoreXYDeg,
msgStr ="Restoring original boresight position",
doWait = True,
)
if self.didTakeImage and (self.doWindow or doRestoreBoresight):
self.didTakeImage = False # to prevent end from taking another image
self.sr.showMsg("Taking a final image")
exposeCmdDict = self.getExposeCmdDict(doWindow=False)
yield sr.waitCmd(**exposeCmdDict)
def setCurrFocus(self, *args):
"""Set center focus to current focus.
"""
currFocus = self.sr.getKeyVar(self.tccModel.secFocus, defVal=None)
if currFocus is None:
self.sr.showMsg("Current focus not known",
severity=RO.Constants.sevWarning,
)
return
self.centerFocPosWdg.set(currFocus)
self.sr.showMsg("")
def setGraphRange(self, extremeFocPos=None, extremeFWHM=None):
"""Sets the displayed range of the graph.
Inputs:
- extremeFocPos: focus extremes
- extremeFWHM: FWHM extremes
"""
# "setGraphRange(extremeFocPos=%s, extremeFWHM=%s)" | |
= feeders[feeders['id_f'] == inbound]
#st.write("handle*Id, feeders_1= ", feeders_1)
feeders_1['id_f_nf'] = feeders_1['id_f'] + '_' + feeders_1['id_nf']
# extract these outgoings from the FSU database
# if outbounds has ids not in fsu, this approach will not work
fsu_outbound = pd.merge(fsu, outbounds, how='inner', left_on='id', right_on='id_nf')
fsu_outbound['id_f_nf'] = fsu_outbound['id_f'] + '_' + fsu_outbound['id_nf']
#st.write("fsu_outbound= ", fsu_outbound)
fsu_pax = pd.merge(fsu_outbound, feeders_1, how='inner', left_on='id_f_nf', right_on='id_f_nf')
#st.write("top level: fsu_pax.shape: ", fsu_pax.shape)
#fsu_outbound.to_csv("outbound_cityGraph.csv", index=0)
fsu_pax.drop_duplicates(inplace=True)
#st.write("fsu_pax= ", fsu_pax)
# Compute connection time (inbound.IN - outbound.sch_dep)
id_f = fsu_pax.id_f_x
id_nf = fsu_pax.id_nf_x
# Node metadata
fsu_pax['dep_delay'] = (fsu_pax.OUT_DTMZ - fsu_pax.SCH_DEP_DTMZ) / 1e9 / 60
if only_keep_late_dep:
fsu_pax = fsu_pax[fsu_pax['dep_delay'] > 0]
dep_delay = (fsu_pax.OUT_DTMZ - fsu_pax.SCH_DEP_DTMZ) / 1e9 / 60
arr_delay = (fsu_pax.IN_DTMZ - fsu_pax.SCH_ARR_DTMZ) / 1e9 / 60 # outbound
flt_num = fsu_pax.FLT_NUM
tail = fsu_pax.TAIL
od = fsu_pax.OD
sch_dep_tmz = fsu_pax.SCH_DEP_TMZ
sch_arr_tmz = fsu_pax.SCH_ARR_TMZ
node_nf_dict = {'id':id_nf, 'arr_delay':arr_delay, 'dep_delay':dep_delay, 'od':od, 'FLT_NUM':flt_num, 'TAIL':tail, 'SCH_DEP_TMZ':sch_dep_tmz, 'SCH_ARR_TMZ':sch_arr_tmz}
d_nf = pd.DataFrame(node_nf_dict)
#st.write("d_nf= ", d_nf)
# Add feeder row
# Find inbound in FSU data
# fsu_inbound is a Series. Another way to access : fsu_inbound.loc['SCH_DEP_DTMZ',0]
#st.write("fsu_inbound= ", fsu_inbound)
# I removed the transpose. Not clear why.
#dep_delay = (fsu_inbound.OUT_DTMZ - fsu_inbound.transpose().SCH_DEP_DTMZ) / 1e9 / 60
dep_delay = (fsu_inbound.OUT_DTMZ - fsu_inbound.SCH_DEP_DTMZ) / 1e9 / 60
arr_delay = (fsu_inbound.IN_DTMZ - fsu_inbound.SCH_ARR_DTMZ) / 1e9 / 60 # outbound
dep_delay = dep_delay.values[0] # Must fix this. Not clear why needed here.
arr_delay = arr_delay.values[0]
od = fsu_inbound.OD.values[0] # Series
flt_num = fsu_inbound.FLT_NUM.values[0]
tail = fsu_inbound.TAIL.values[0]
#### IT is zero on second level of inbound flights to PTY. WHY? (2021-06-09)
#st.write("fsu_pax.shape: ", fsu_pax.shape)
if fsu_pax.shape[0] == 0:
continue
#st.write("fsu_inbound: ", fsu_inbound.SCH_DEP_TMZ)
#st.write("inbound: " , flight_id)
#st.write("fsu_pax: ", fsu_pax[['id','SCH_DEP_TMZ']])
#sch_dep_tmz = fsu_pax.SCH_DEP_TMZ.values[0]
#sch_arr_tmz = fsu_pax.SCH_ARR_TMZ.values[0]
sch_dep_tmz = fsu_inbound.SCH_DEP_TMZ.values[0]
sch_arr_tmz = fsu_inbound.SCH_ARR_TMZ.values[0]
row_f = {'id':inbound, 'arr_delay':arr_delay, 'dep_delay':dep_delay, 'od':od, 'lev':flight_id_level, 'FLT_NUM':flt_num, 'TAIL':tail, 'SCH_DEP_TMZ':sch_dep_tmz, 'SCH_ARR_TMZ':sch_arr_tmz}
#st.write(row_f)
#print(type(fsu_inbound))
d_nf.loc[-1] = row_f
# drop=True: do not keep the new index column created by default
node_df = d_nf.sort_index().reset_index(drop=True)
node_df.loc[:,'lev'] = flight_id_level
node_df.loc[1:,'lev'] = flight_id_level + 1
# The first node is the feeder
# All the other nodes are the outbounds
# Create Graph edges and metadata
id_f_nf = id_f + "_" + id_nf
# Why isn't IN_DTMZ a scalar like in the method handleCitiesGraph()?
available = (fsu_outbound.SCH_DEP_DTMZ - fsu_inbound.IN_DTMZ.values[0]) / 1e9 / 60
planned = (fsu_outbound.SCH_DEP_DTMZ - fsu_inbound.SCH_ARR_DTMZ.values[0]) / 1e9 / 60
if debug:
st.write("id: ", fsu_inbound.id)
st.write("inbound arrival: ", fsu_inbound.SCH_ARR_TMZ)
st.write("outbound departure: ", fsu_outbound[['id','SCH_DEP_TMZ']])
st.write("planned: ", planned);
#pax_id_nf = fsu_pax.id_nf_y
#pax_id_f = fsu_pax.id_f_y
#pax_avail = (fsu_pax.SCH_DEP_DTMZ - fsu_inbound.IN_DTMZ.values[0]) / 1e9 / 60
#pax_planned = (fsu_pax.SCH_DEP_DTMZ - fsu_inbound.SCH_ARR_DTMZ) / 1e9 / 60
#if debug:
#st.write("pax planned: ", pax_planned);
#st.write("planned, pax_planned: ", planned, pax_planned)
#st.write("available, pax_avail: ", available, pax_avail)
#dfx = pd.DataFrame([pax_id_f, pax_id_nf, pax_avail, pax_planned]).transpose()
#st.write("1 node_df: ", node_df)
fsux = fsu[fsu['id'] == '2019/10/01SJOPTY10:29459']
fsuy = fsu[fsu['id'] == '2019/10/01PTYTPA14:12393']
delta = planned - available # = IN - SCH_ARR
edge_nf_zip = zip(available, planned, delta)
id_f = fsu_pax['id_f_y']
id_nf = fsu_pax['id_nf_y']
id_f_nf = fsu_pax['id_f_nf']
edge_df = pd.DataFrame()
edge_df = pd.concat([edge_df, id_f_nf, id_f, id_nf], axis=1)
#st.write("ID, edge_df: ", edge_df.shape)
## Reorder the columns for clarity
edge_df['avail'] = available
edge_df['planned'] = planned
edge_df['delta'] = delta
edge_df['pax'] = fsu_pax.pax_nf
#st.write("ID, fsu_pax: ", fsu_pax.shape)
## EDGE correct. Now add metadata: avail, planned, delta
# Remove edges and nodes for flights with less available connection time than `delay`
# (I could either simplify the graph, or use brushing in the graph. Or both.)
# Let us do both. Simplification in this method, and brushing in Altair.
#st.write(node_df.columns)
#st.write(edge_df.columns)
# 1. find all nodes to remove
# The passengers that "could" miss their flights have less available time than needed.
# We keep the flights that potentially have the most impact on the network
#st.write("delay= ", delay)
ids_nf_to_keep = edge_df[edge_df['avail'] < delay]['id_nf_y']
#st.write("ID, ids_nf_to_keep: ", ids_nf_to_keep)
#st.write("ID, edge_df after filtering delays: ", edge_df.shape)
#st.write("ids_nf_to_keep: ", ids_nf_to_keep)
# 2. delete nodes from node DataFrame
#st.write("node_df: ", node_df)
#st.write("edge_df= ", edge_df)
#st.write("delay= ", delay)
node_df = node_df.set_index('id').loc[ids_nf_to_keep,:].reset_index()
#st.write("3 node_df: ", node_df) # EMPTY
# Add back the first row that is the feeder (it stays)
node_df.loc[-1] = row_f
node_df = node_df.sort_index().reset_index(drop=True)
# 3. delete edges from edge DataFrame
edge_df = edge_df.set_index('id_nf_y').loc[ids_nf_to_keep,:].reset_index()
#st.write("ID, last node_df: ", node_df)
#st.write("ID, last edge_df: ", edge_df)
#st.write("node_df: ", node_df)
#st.write("edge_df: ", edge_df)
#if nb_search > 0: st.stop() # DEBUGGING
#st.write(node_df.shape, edge_df.shape)
# Only the first ix
#st.write(node_df)
#st.write(edge_df)
if debug:
st.write("edge_df: ", edge_df)
#st.write("handle: node_df: ", node_df)
return node_df, edge_df
#---------------------------------------------------------
def handleCityGraphIdLev2_xxx(flight_id, keep_early_arr, id_list, fsu, bookings_f, feeders, is_print=True, delay=45, flight_id_level=0):
"""
Given an inbound flight to PTY return the corresponding outbound flighs
Return a tuple of Dataframes with node and edges
>>> Second tier flights <<<
Arguments
flight_id_level: level of flight_id in the graph network. The root has level zero. Children of flight_id have level 1, grandchildren of flight_id have level 2. Each leg of a flight increases the level by 1.
"""
#st.write("enter handleCityGraphId")
# I need to return two structures: nodes and edges.
# This pair of structures should be returned for each flight I am working with.
# Nodes are single IDs with arr and dep times, arr and dep delays.
# Edges are double IDs with PAX, rotation, and connection times.
#st.write("Enter handleCityGraph")
inbound = flight_id
#city_inbounds = findCity(bookings_f, city)
"""
if choice_ix == 'all':
min_ix = 0
max_ix = city_inbounds.shape[0]
else:
min_ix = choice_ix
max_ix = choice_ix+1
"""
min_ix, max_ix = 0, 1
# For each inbound flight, compute the corresponding outbound flights
for which_ix in range(min_ix, max_ix):
nodes = []
inbound = pd.DataFrame({'id':[inbound]}) # New, created from method argument
try:
nodes.append(inbound)
fsu_inbound = fsu[fsu['id'] == inbound['id'].values[0]]
except:
st.write("except")
continue
inbound_arr_delay = fsu_inbound.ARR_DELAY_MINUTES.values[0]
# if the arrival delay of the inbound is negative, the plane arrived early, and the
# passengers have time to connect
if keep_early_arr == False and inbound_arr_delay < 0:
#st.write("continue")
## Must keep keep_early_arrivals to TRUE for now. 2021-06-07.
continue
# just a collection of 'id_nf' ==> nodes
inbound = inbound['id'].values[0] # Series convert to list using .values
outbounds = findOutboundIds(id_list, inbound).to_frame()
outbounds['id_f'] = inbound
nodes.extend(outbounds['id_nf'].tolist()) # This is the list of nodes
edges = outbounds['id_nf'].to_frame('e2') # e2 is id_nf
edges['e1'] = inbound # e1 is id_f
edges['id'] = edges['e1'] + '_' + edges['e2']
# What is left to do is add the metadata to these lists
# Nodes: the data comes from FSU files
# Edges: the data comes from PAX files
# Create a unique id that combines inbound (feeder) and outbound flights
# This will allow me to merge two files with feeder/non-feeder columns
feeders_1 = feeders[feeders['id_f'] == inbound]
feeders_1['id_f_nf'] = feeders_1['id_f'] + '_' + feeders_1['id_nf']
# extract these outgoings from the FSU database
# if outbounds has ids not in fsu, this approach will not work
fsu_outbound = pd.merge(fsu, outbounds, how='inner', left_on='id', right_on='id_nf')
fsu_outbound['id_f_nf'] = fsu_outbound['id_f'] + '_' + fsu_outbound['id_nf']
fsu_pax = pd.merge(fsu_outbound, feeders_1, how='inner', left_on='id_f_nf', right_on='id_f_nf')
#st.write("top level: fsu_pax.shape: ", fsu_pax.shape)
fsu_pax.drop_duplicates(inplace=True)
# Compute connection time (inbound.IN - outbound.sch_dep)
id_f = fsu_pax.id_f_x
id_nf = fsu_pax.id_nf_x
# Node metadata
dep_delay = (fsu_pax.OUT_DTMZ - fsu_pax.SCH_DEP_DTMZ) / 1e9 / 60
arr_delay = (fsu_pax.IN_DTMZ - fsu_pax.SCH_ARR_DTMZ) / 1e9 / 60 # outbound
flt_num = fsu_pax.FLT_NUM
tail = fsu_pax.TAIL
od = fsu_pax.OD
sch_dep_tmz = fsu_pax.SCH_DEP_TMZ
sch_arr_tmz = fsu_pax.SCH_ARR_TMZ
node_nf_dict = {'id':id_nf, 'arr_delay':arr_delay, 'dep_delay':dep_delay, 'od':od, 'FLT_NUM':flt_num, 'TAIL':tail, 'SCH_DEP_TMZ':sch_dep_tmz, 'SCH_ARR_TMZ':sch_arr_tmz}
d_nf = pd.DataFrame(node_nf_dict)
#st.write("d_nf= ", d_nf)
# Add feeder row
# Find inbound in FSU data
# fsu_inbound is a Series. Another way to access : fsu_inbound.loc['SCH_DEP_DTMZ',0]
#st.write("fsu_inbound= ", fsu_inbound)
# I removed the transpose. Not clear why.
#dep_delay = (fsu_inbound.OUT_DTMZ - fsu_inbound.transpose().SCH_DEP_DTMZ) / 1e9 / 60
dep_delay = (fsu_inbound.OUT_DTMZ - fsu_inbound.SCH_DEP_DTMZ) / 1e9 / 60
arr_delay = (fsu_inbound.IN_DTMZ - fsu_inbound.SCH_ARR_DTMZ) / 1e9 / 60 # outbound
dep_delay = dep_delay.values[0] # Must | |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# <NAME> (<EMAIL>)
from __future__ import print_function
from collections import OrderedDict
import nir_algebraic
from nir_opcodes import type_sizes
import itertools
import struct
from math import pi
# Convenience variables
a = 'a'
b = 'b'
c = 'c'
d = 'd'
e = 'e'
signed_zero_inf_nan_preserve_16 = 'nir_is_float_control_signed_zero_inf_nan_preserve(info->float_controls_execution_mode, 16)'
signed_zero_inf_nan_preserve_32 = 'nir_is_float_control_signed_zero_inf_nan_preserve(info->float_controls_execution_mode, 32)'
# Written in the form (<search>, <replace>) where <search> is an expression
# and <replace> is either an expression or a value. An expression is
# defined as a tuple of the form ([~]<op>, <src0>, <src1>, <src2>, <src3>)
# where each source is either an expression or a value. A value can be
# either a numeric constant or a string representing a variable name.
#
# If the opcode in a search expression is prefixed by a '~' character, this
# indicates that the operation is inexact. Such operations will only get
# applied to SSA values that do not have the exact bit set. This should be
# used by by any optimizations that are not bit-for-bit exact. It should not,
# however, be used for backend-requested lowering operations as those need to
# happen regardless of precision.
#
# Variable names are specified as "[#]name[@type][(cond)][.swiz]" where:
# "#" indicates that the given variable will only match constants,
# type indicates that the given variable will only match values from ALU
# instructions with the given output type,
# (cond) specifies an additional condition function (see nir_search_helpers.h),
# swiz is a swizzle applied to the variable (only in the <replace> expression)
#
# For constants, you have to be careful to make sure that it is the right
# type because python is unaware of the source and destination types of the
# opcodes.
#
# All expression types can have a bit-size specified. For opcodes, this
# looks like "op@32", for variables it is "a@32" or "a@uint32" to specify a
# type and size. In the search half of the expression this indicates that it
# should only match that particular bit-size. In the replace half of the
# expression this indicates that the constructed value should have that
# bit-size.
#
# If the opcode in a replacement expression is prefixed by a '!' character,
# this indicated that the new expression will be marked exact.
#
# A special condition "many-comm-expr" can be used with expressions to note
# that the expression and its subexpressions have more commutative expressions
# than nir_replace_instr can handle. If this special condition is needed with
# another condition, the two can be separated by a comma (e.g.,
# "(many-comm-expr,is_used_once)").
# based on https://web.archive.org/web/20180105155939/http://forum.devmaster.net/t/fast-and-accurate-sine-cosine/9648
def lowered_sincos(c):
x = ('fsub', ('fmul', 2.0, ('ffract', ('fadd', ('fmul', 0.5 / pi, a), c))), 1.0)
x = ('fmul', ('fsub', x, ('fmul', x, ('fabs', x))), 4.0)
return ('ffma', ('ffma', x, ('fabs', x), ('fneg', x)), 0.225, x)
def intBitsToFloat(i):
return struct.unpack('!f', struct.pack('!I', i))[0]
optimizations = [
(('imul', a, '#b(is_pos_power_of_two)'), ('ishl', a, ('find_lsb', b)), '!options->lower_bitops'),
(('imul', a, '#b(is_neg_power_of_two)'), ('ineg', ('ishl', a, ('find_lsb', ('iabs', b)))), '!options->lower_bitops'),
(('ishl', a, '#b'), ('imul', a, ('ishl', 1, b)), 'options->lower_bitops'),
(('unpack_64_2x32_split_x', ('imul_2x32_64(is_used_once)', a, b)), ('imul', a, b)),
(('unpack_64_2x32_split_x', ('umul_2x32_64(is_used_once)', a, b)), ('imul', a, b)),
(('imul_2x32_64', a, b), ('pack_64_2x32_split', ('imul', a, b), ('imul_high', a, b)), 'options->lower_mul_2x32_64'),
(('umul_2x32_64', a, b), ('pack_64_2x32_split', ('imul', a, b), ('umul_high', a, b)), 'options->lower_mul_2x32_64'),
(('udiv', a, 1), a),
(('idiv', a, 1), a),
(('umod', a, 1), 0),
(('imod', a, 1), 0),
(('imod', a, -1), 0),
(('irem', a, 1), 0),
(('irem', a, -1), 0),
(('udiv', a, '#b(is_pos_power_of_two)'), ('ushr', a, ('find_lsb', b)), '!options->lower_bitops'),
(('idiv', a, '#b(is_pos_power_of_two)'), ('imul', ('isign', a), ('ushr', ('iabs', a), ('find_lsb', b))), '!options->lower_bitops'),
(('idiv', a, '#b(is_neg_power_of_two)'), ('ineg', ('imul', ('isign', a), ('ushr', ('iabs', a), ('find_lsb', ('iabs', b))))), '!options->lower_bitops'),
(('umod', a, '#b(is_pos_power_of_two)'), ('iand', a, ('isub', b, 1))),
(('imod', a, '#b(is_pos_power_of_two)'), ('iand', a, ('isub', b, 1))),
(('imod', a, '#b(is_neg_power_of_two)'), ('ior', a, b)),
(('irem', a, '#b(is_pos_power_of_two)'), ('bcsel', ('ige', a, 0), ('iand', a, ('isub', b, 1)), ('ior', a, ('ineg', b)))),
(('irem', a, '#b(is_neg_power_of_two)'), ('bcsel', ('ige', a, 0), ('iand', a, ('inot', b)), ('ior', a, b))),
(('~fneg', ('fneg', a)), a),
(('ineg', ('ineg', a)), a),
(('fabs', ('fneg', a)), ('fabs', a)),
(('fabs', ('u2f', a)), ('u2f', a)),
(('iabs', ('iabs', a)), ('iabs', a)),
(('iabs', ('ineg', a)), ('iabs', a)),
(('f2b', ('fneg', a)), ('f2b', a)),
(('i2b', ('ineg', a)), ('i2b', a)),
(('~fadd', a, 0.0), a),
# a+0.0 is 'a' unless 'a' is denormal or -0.0. If it's only used by a
# floating point instruction, they should flush any input denormals and we
# can replace -0.0 with 0.0 if the float execution mode allows it.
(('fadd(is_only_used_as_float)', 'a@16', 0.0), a, '!'+signed_zero_inf_nan_preserve_16),
(('fadd(is_only_used_as_float)', 'a@32', 0.0), a, '!'+signed_zero_inf_nan_preserve_32),
(('iadd', a, 0), a),
(('usadd_4x8', a, 0), a),
(('usadd_4x8', a, ~0), ~0),
(('~fadd', ('fmul', a, b), ('fmul', a, c)), ('fmul', a, ('fadd', b, c))),
(('iadd', ('imul', a, b), ('imul', a, c)), ('imul', a, ('iadd', b, c))),
(('iand', ('ior', a, b), ('ior', a, c)), ('ior', a, ('iand', b, c))),
(('ior', ('iand', a, b), ('iand', a, c)), ('iand', a, ('ior', b, c))),
(('~fadd', ('fneg', a), a), 0.0),
(('iadd', ('ineg', a), a), 0),
(('iadd', ('ineg', a), ('iadd', a, b)), b),
(('iadd', a, ('iadd', ('ineg', a), b)), b),
(('~fadd', ('fneg', a), ('fadd', a, b)), b),
(('~fadd', a, ('fadd', ('fneg', a), b)), b),
(('fadd', ('fsat', a), ('fsat', ('fneg', a))), ('fsat', ('fabs', a))),
(('~fmul', a, 0.0), 0.0),
# The only effect a*0.0 should have is when 'a' is infinity, -0.0 or NaN
(('fmul', 'a@16', 0.0), 0.0, '!'+signed_zero_inf_nan_preserve_16),
(('fmul', 'a@32', 0.0), 0.0, '!'+signed_zero_inf_nan_preserve_32),
(('imul', a, 0), 0),
(('umul_unorm_4x8', a, 0), 0),
(('umul_unorm_4x8', a, ~0), a),
(('~fmul', a, 1.0), a),
# The only effect a*1.0 can have is flushing denormals. If it's only used by
# a floating point instruction, they should flush any input denormals and
# this multiplication isn't needed.
(('fmul(is_only_used_as_float)', a, 1.0), a),
(('imul', a, 1), a),
(('fmul', a, -1.0), ('fneg', a)),
(('imul', a, -1), ('ineg', a)),
# If a < 0: fsign(a)*a*a => -1*a*a => -a*a => abs(a)*a
# If a > 0: fsign(a)*a*a => 1*a*a => a*a => abs(a)*a
# If a == 0: fsign(a)*a*a => 0*0*0 => abs(0)*0
# If a != a: fsign(a)*a*a => 0*NaN*NaN => abs(NaN)*NaN
(('fmul', ('fsign', a), ('fmul', a, a)), ('fmul', ('fabs', a), a)),
(('fmul', ('fmul', ('fsign', a), a), a), ('fmul', ('fabs', a), a)),
(('~ffma', 0.0, a, b), b),
(('ffma@16(is_only_used_as_float)', 0.0, a, b), b, '!'+signed_zero_inf_nan_preserve_16),
(('ffma@32(is_only_used_as_float)', 0.0, a, b), b, '!'+signed_zero_inf_nan_preserve_32),
(('~ffma', a, b, 0.0), ('fmul', a, b)),
(('ffma@16', a, b, 0.0), ('fmul', a, b), '!'+signed_zero_inf_nan_preserve_16),
(('ffma@32', a, b, 0.0), ('fmul', a, b), '!'+signed_zero_inf_nan_preserve_32),
(('ffma', 1.0, a, b), ('fadd', a, b)),
(('ffma', -1.0, a, b), ('fadd', ('fneg', a), b)),
(('~flrp', a, b, 0.0), a),
(('~flrp', a, b, 1.0), b),
(('~flrp', a, a, b), a),
(('~flrp', 0.0, a, b), ('fmul', a, b)),
# flrp(a, a + b, c) => a + flrp(0, b, c) => a + (b * c)
(('~flrp', a, ('fadd(is_used_once)', a, b), c), ('fadd', ('fmul', b, c), a)),
]
# Float sizes
for s in [16, 32, 64]:
optimizations.extend([
(('~flrp@{}'.format(s), a, b, ('b2f', 'c@1')), ('bcsel', c, b, a), 'options->lower_flrp{}'.format(s)),
(('~flrp@{}'.format(s), a, ('fadd', a, b), c), ('fadd', ('fmul', b, c), a), 'options->lower_flrp{}'.format(s)),
(('~flrp@{}'.format(s), ('fadd', a, b), ('fadd', a, c), d), ('fadd', ('flrp', | |
if try_times > connect_try:
break
msg_from_client_str = str(msg_from_client.decode('utf-8'))
print(msg_from_client_str + " " + "try_time: " + str(try_times))
# try_times = try_times + 1
matched = re.match(legal_pattern, msg_from_client_str)
if matched is not None:
break
if not msg_from_client:
break
response = "403 " + "Message-error!"
conn.send(bytes(response, 'utf-8'))
msg_from_client = conn.recv(4096)
try_times = try_times + 1
# msg_from_client_str = str(msg_from_client.decode('utf-8'))
if matched is None:
conn.close()
lock.acquire()
# lock.release()
tmp = dictionary['running_number']
tmp = tmp - 1
dictionary['running_number'] = tmp
lock.release()
return
print("connect success!")
measure = matched.group()
pre_list = measure.split(" ")
measure_s = pre_list[0] + 'S' + pre_list[-1]
measure_t = pre_list[0] + 'T' + pre_list[-1]
measure_up = pre_list[0] + 'U' + pre_list[-1]
measure_write = pre_list[0] + 'W' + pre_list[-1]
lock.acquire()
# lock.release()
tmp_running = dictionary['running_number']
lock.release()
res_pool = pool_size - tmp_running
response = "400 " + pre_list[0] + " " + pre_list[-1] + " " + str(res_pool)
conn.send(bytes(response, 'utf-8'))
catched_job = pre_list[0]
catched_job = catched_job.lower()
if catched_job == 'xce':
aim_ns = 'xception-' + pre_list[-1] + '-' + pre_list[-1]
else:
aim_ns = catched_job + "-" + pre_list[-1] + "-" + pre_list[-1]
#/tfdata/k8snfs/setfix/
job_con_path = "/tfdata/k8snfs/setad2/%s/%s.json" % (aim_ns, aim_ns)
# job_con_path = "/tfdata/k8snfs/%s/%s.json" % (aim_ns, aim_ns)
job_config = load_config(job_con_path)
print("load job config success!!")
# allow_path = "/tfdata/k8snfs/%s/%s.json" % (aim_ns, measure_t)
allow_path = "/tfdata/k8snfs/setad2/%s/%s.json" % (aim_ns, measure_t)
# allow_path2 = "/tfdata/k8snfs/%s/%s_r.json" % (measure_t,measure_t)
allow_p, created = check_path(aim_ns)
print(allow_p)
if created:
allow_read = {}
# allow_readr = {}
allow_read['OK'] = True
allow_read['retry'] = job_config['retry']
save_config2(allow_read, allow_path)
# save_config2(allow_readr,allow_path2)
if not os.path.exists(allow_path):
allow_read = {}
# allow_readr = {}
allow_read['OK'] = True
allow_read['retry'] = job_config['retry']
save_config2(allow_read, allow_path)
ns_list = get_ns(v1)
ceshi_count = 0
ceshi_in = False
while True:
if ceshi_count > 35:
break
ns_list = get_ns(v1)
write_ss = influx_client.query("select * from " + measure_write + " order by desc limit 1")
key_write = write_ss.keys()
print(key_write[:])
write_inter = write_ss[key_write[0]]
write_items = list(write_inter)
print(write_items[:])
write_now = int(write_items[0]['modulate'])
if aim_ns not in ns_list and (write_now == 0):
ceshi_count += 1
time.sleep(15)
else:
ceshi_in = True
break
if not ceshi_in:
conn.close()
lock.acquire()
# lock.release()
tmp = dictionary['running_number']
tmp = tmp - 1
dictionary['running_number'] = tmp
lock.release()
print("namespace created error!")
return
result = influx_client.query("select * from " + measure_t + " order by desc limit 1")
key = result.keys()
print(key)
result_inter = result[key[0]]
result_items = list(result_inter)
print(result_items)
trains_step = int(result_items[0]['training_step'])
tmp_item = dict(result_items[0])
key_tmp = tmp_item.keys()
if 'retry' not in key_tmp:
retry_now = int(job_config['retry'])
else:
retry_now = int(result_items[0]['retry'])
allow_read = load_config(allow_path)
print("Reload success!!")
allow_read['retry'] = retry_now
# 'ps_replicas': job.ps_replicas,
# 'worker_replicas': job.worker_replicas
if 'ps' not in key_tmp:
ps_now = int(job_config['ps_replicas'])
else:
ps_now = int(result_items[0]['ps'])
if 'worker' not in key_tmp:
worker_now = int(job_config['worker_replicas'])
else:
worker_now = int(result_items[0]['worker'])
allow_read['worker'] = worker_now
allow_read['ps'] = ps_now
save_config2(allow_read, allow_path)
print("save success!!")
result2 = influx_client.query("select * from " + measure_up + " order by desc limit 1")
key2 = result2.keys()
print(key2)
result_inter2 = result2[key2[0]]
result_items2 = list(result_inter2)
print(result_items2)
retry_top = int(result_items2[0]['retry'])
print(retry_top)
print(type(retry_top))
print(retry_now)
print(type(retry_now))
if retry_top != retry_now:
new_ps = int(result_items2[0]['ps'])
new_worker = int(result_items2[0]['worker'])
trains_step = math.ceil(trains_step * worker_now / new_worker)
allow_read = load_config(allow_path)
allow_read['retry'] = retry_top
allow_read['ps'] = new_ps
allow_read['worker'] = new_worker
save_config2(allow_read, allow_path)
print("saved successful!!")
print(trains_step)
modekk = 0
if trains_step <= 200:
step_items = [
{
'measurement': measure_t,
'tags': {
'task': int(pre_list[-1]),
'runtimes': int(pre_list[-1]),
'retry': int(retry_top)
},
'fields': {
'training_step': int(trains_step),
'ps': int(allow_read['ps']),
'worker': int(allow_read['worker'])
}
}
]
print("saved in db")
print(trains_step)
influx_client.write_points(step_items, time_precision="ms", database="PREDICT")
print("Writed in db")
# conn.close()
# lock.acquire()
# # lock.release()
# tmp = dictionary['running_number']
# tmp = tmp - 1
# dictionary['running_number'] = tmp
# lock.release()
print("Do not need to predict,return")
modekk = 1
min_steps = math.ceil(trains_step * 0.2)
length = math.ceil(min_steps * 0.4)
print("Initial Config Success!" + "min_steps:" + str(min_steps))
time_start = time.time()
print("start to load data")
loss, max_loss, modekk_z = load_data_nnls(min_steps=min_steps, length=length, measure=measure, first=True)
if not loss:
conn.close()
lock.acquire()
# lock.release()
tmp = dictionary['running_number']
tmp = tmp - 1
dictionary['running_number'] = tmp
lock.release()
return
# loss_array = normalization(loss,max_loss)
result = influx_client.query("select * from " + measure_t + " order by desc limit 1")
key = result.keys()
result_inter = result[key[0]]
result_items = list(result_inter)
trains_step = int(result_items[0]['training_step'])
step_to_train = trains_step
if trains_step <= 200:
modekk_z = 1
if modekk_z != 1:
print("Get data first time")
data_in, step_x = make_dataset_nnls(loss, max_loss)
step_to_train = predict_step_nnls(data_in, step_x, measure, trains_step, math.ceil(trains_step * 0.5))
else:
step_to_train = trains_step
res1 = influx_client.query("select * from " + measure_up + " order by desc limit 1")
key1 = res1.keys()
res1_inter = res1[key1[0]]
res1_items = list(res1_inter)
retry = int(res1_items[0]['retry'])
allow_read = load_config(allow_path)
retry_now = int(allow_read['retry'])
if retry_now != retry:
new_ps = int(res1_items[0]['ps'])
new_worker = int(res1_items[0]['worker'])
step_to_train = math.ceil(step_to_train * int(allow_read['worker']) / new_worker)
allow_read['retry'] = retry
allow_read['ps'] = new_ps
allow_read['worker'] = new_worker
save_config2(allow_read, allow_path)
step_items = [
{
'measurement': measure_t,
'tags': {
'task': int(pre_list[-1]),
'runtimes': int(pre_list[-1]),
'retry': int(retry)
},
'fields': {
'training_step': step_to_train,
'ps': int(allow_read['ps']),
'worker': int(allow_read['worker'])
}
}
]
print("saved in db")
print(step_to_train)
influx_client.write_points(step_items, time_precision="ms", database="PREDICT")
print("Writed in db")
print("First prdict cost time: " + str(time.time() - time_start))
iftrain = 0
time_total = 0
if modekk != 1:
modekk = modekk_z
while True:
if modekk == 1:
break
# selected_node = select_node(influx_client, measure_s)
res1 = influx_client.query(
"select * from " + measure_s + " where nodes='worker0' order by desc limit 10")
key1 = res1.keys()
print(key1[:])
res1_inter = res1[key1[0]]
res1_items = list(res1_inter)
print(res1_items[:])
step_now = int(res1_items[0]['step'])
time_mean_list = [float(i['time_d']) for i in res1_items]
time_mean = np.mean(time_mean_list)
print(time_mean)
# time_sleep = predict_fre * time_mean
print(step_now)
ns_list = get_ns(v1)
print(ns_list)
print(aim_ns)
print(aim_ns in ns_list)
write_ss = influx_client.query("select * from " + measure_write + " order by desc limit 1")
key_write = write_ss.keys()
print(key_write[:])
write_inter = write_ss[key_write[0]]
write_items = list(write_inter)
print(write_items[:])
write_now = int(write_items[0]['modulate'])
if (aim_ns not in ns_list) and (write_now == 0):
time.sleep(15)
ns_list = get_ns(v1)
write_ss = influx_client.query("select * from " + measure_write + " order by desc limit 1")
key_write = write_ss.keys()
# print(key_write[:])
write_inter = write_ss[key_write[0]]
write_items = list(write_inter)
# print(write_items[:])
write_now = int(write_items[0]['modulate'])
if (aim_ns not in ns_list) and (write_now == 0):
print("namespace is missing")
break
pod_status = [i.status.phase for i in v1.list_namespaced_pod(aim_ns).items]
print(pod_status)
print("going on")
print(measure)
print(math.ceil(step_to_train * 0.85))
print(step_now)
write_ss = influx_client.query("select * from " + measure_write + " order by desc limit 1")
key_write = write_ss.keys()
write_inter = write_ss[key_write[0]]
write_items = list(write_inter)
write_now = int(write_items[0]['modulate'])
if ('Succeeded' in pod_status or 'Failed' in pod_status) and (write_now == 0):
print("Job is ended")
break
else:
time.sleep(3)
print("Job is going")
print(math.ceil(step_to_train * 0.85))
print(step_now)
panduan_going = math.ceil(step_to_train * 0.85)
print(type(step_now))
step_now = int(step_now)
print(type(step_now))
print(step_now)
if step_now >= panduan_going:
print("It need not to predict")
modekk = 1
break
else:
time.sleep(2)
print("Job is going to load")
time.sleep(2.5)
print(measure)
print(length)
print(type(length))
print("load data again")
if time_total >= predict_fre:
result = influx_client.query("select * from " + measure_t + " order by desc limit 1")
key = result.keys()
result_inter = result[key[0]]
result_items = list(result_inter)
trains_step = int(result_items[0]['training_step'])
if step_now >= trains_step - 3:
print("This process is ended!!")
break
# loss, max_loss = load_data_nnls(min_steps=min_steps, length=length, measure=measure, first=False)
loss,max_loss = load_data_nnls(min_steps=min_steps,length=length,measure=measure,first=False)
print("start to nnls process!!")
data_in,step_x = make_dataset_nnls(loss,max_loss)
step_to_train = predict_step_nnls(data_in,step_x,measure,trains_step,math.ceil(trains_step*0.5))
# step_to_train = step_predict(data=loss[:], model=model, input_dim=1, predict_step=10, time_step=20,
# div=0.01, top_step=trains_step, low_step=math.ceil(trains_step * 0.5),
# measure=measure)
res2 = influx_client.query("select * from " + measure_up + " order by desc limit 1")
key2 = list(res2.keys())
res2_inter = res2[key2[0]]
res2_items = list(res2_inter)
retry = int(res2_items[0]['retry'])
allow_read = load_config(allow_path)
retry_now = int(allow_read['retry'])
new_ps = int(allow_read['ps'])
new_worker = int(allow_read['worker'])
if retry_now != retry:
new_ps = int(res2_items[0]['ps'])
new_worker = int(res2_items[0]['worker'])
step_to_train = math.ceil(step_to_train * int(allow_read['worker']) / new_worker)
allow_read['retry'] = retry
allow_read['worker'] = new_worker
allow_read['ps'] = new_ps
save_config2(allow_read, allow_path)
step_items = [
{
'measurement': measure_t,
'tags': {
'task': int(pre_list[-1]),
'runtimes': int(pre_list[-1]),
'retry': int(retry)
},
'fields': {
'training_step': step_to_train,
'ps': new_ps,
'worker': new_worker
}
}
]
print(step_to_train)
influx_client.write_points(step_items, time_precision="ms", database="PREDICT")
print("Writed result in db")
iftrain = iftrain + 1
print("Predict " + str(iftrain) + " costs time: " + str(time.time() - time_start))
| |
<filename>BPG/photonic_core.py
import os
import sys
import bag
import bag.io
import abc
import yaml
import logging
import math
import string
from .logger import setup_logger
from pathlib import Path
from itertools import chain
# BAG imports
from bag.core import BagProject, create_tech_info, _import_class_from_str
from bag.layout.util import BBox
from bag.io.file import read_yaml, read_file
from bag.layout.core import BagLayout, DummyTechInfo
# BPG imports
import BPG
from BPG.content_list import ContentList
from BPG.geometry import BBoxMut
from typing import TYPE_CHECKING, List, Callable, Union, Tuple, Dict, Optional, Any
from BPG.bpg_custom_types import layer_or_lpp_type, dim_type
# Typing imports
if TYPE_CHECKING:
from BPG.objects import PhotonicRound, PhotonicPath
from bag.layout.objects import InstanceInfo
from bag.layout.core import TechInfo
try:
import cybagoa
except ImportError:
cybagoa = None
def _parse_yaml_file(fname):
# type: (str) -> Dict[str, Any]
"""Parse YAML file with environment variable substitution.
Parameters
----------
fname : str
yaml file name.
Returns
-------
table : Dict[str, Any]
the yaml file as a dictionary.
"""
content = read_file(fname)
# substitute environment variables
content = string.Template(content).substitute(os.environ)
return yaml.load(content)
# From bag/core
class PhotonicBagProject(BagProject):
"""
The main bag controller class.
This class extracts user configuration variables and issues high level bag commands. Most config variables have
defaults pointing to files in the BPG/examples/tech folder
Parameters
----------
bag_config_path : Optional[str]
the bag configuration file path. If None, will attempt to read from
environment variable BAG_CONFIG_PATH.
port : Optional[int]
the BAG server process port number. If not given, will read from port file.
"""
def __init__(self, bag_config_path: Optional[str] = None, port: Optional[int] = None) -> None:
BagProject.__init__(self, bag_config_path, port)
# Init empty path variables to be set by user spec file
self.log_path = None
self.log_filename = 'output.log'
self.project_dir: Optional[Path] = None
self.scripts_dir: Optional[Path] = None
self.data_dir: Optional[Path] = None
self.content_dir: Optional[Path] = None
self.lsf_path = None
self.gds_path = None
BPG.run_settings.load_new_configuration(config_dict={})
def load_spec_file_paths(self,
spec_file: str,
**kwargs: Dict[str, Any],
):
""" Receives a specification file from the user and configures the project paths accordingly """
specs = self.load_yaml(spec_file)
specs.update(**kwargs) # Update the read specs with any passed variables
# If a new bag configuration is passed in the yaml, load it
if 'bag_config_path' in specs:
BPG.run_settings.update_configuration(self.load_yaml(specs['bag_config_path']))
BPG.run_settings.update_configuration(specs) # Update the base run_settings with anything from the yaml
# Get root path for the project
bag_work_dir = Path(os.environ['BAG_WORK_DIR'])
# self.tech_info = create_tech_info(bag_config_path=BPG.run_settings['bag_config_path'])
tech_params = _parse_yaml_file(BPG.run_settings['tech_config_path'])
if 'class' in tech_params:
tech_cls = _import_class_from_str(tech_params['class'])
self.tech_info = tech_cls(tech_params)
else:
# just make a default tech_info object as place holder.
print('*WARNING*: No TechInfo class defined. Using a dummy version.')
self.tech_info = DummyTechInfo(tech_params)
# BAG might reset the photonic_tech_info config, need to reset it
self.photonic_tech_info = create_photonic_tech_info(
bpg_config=BPG.run_settings['bpg_config'],
tech_info=self.tech_info,
)
print(f"BPG.run_settings: {BPG.run_settings['bpg_config']}")
self.photonic_tech_info.load_tech_files()
if 'photonic_tech_config_path' in BPG.run_settings:
self.photonic_tech_info = create_photonic_tech_info(
bpg_config=dict(photonic_tech_config_path=BPG.run_settings['photonic_tech_config_path']),
tech_info=self.tech_info,
)
self.photonic_tech_info.load_tech_files()
# Setup relevant output files and directories
if 'project_dir' in BPG.run_settings:
self.project_dir = Path(BPG.run_settings['project_dir']).expanduser()
else:
default_path = Path(BPG.run_settings['database']['default_lib_path'])
self.project_dir = default_path / BPG.run_settings['project_name']
self.scripts_dir = self.project_dir / 'scripts'
self.data_dir = self.project_dir / 'data'
self.content_dir = self.project_dir / 'content'
# If users provide paths to add provide them here
if 'path_setup' in BPG.run_settings:
for path in BPG.run_settings['path_setup']:
if path not in sys.path:
sys.path.insert(0, path)
print(f'Adding {path} to python module search path')
# Make the project directories if they do not exists
self.project_dir.mkdir(exist_ok=True, parents=True)
self.scripts_dir.mkdir(exist_ok=True)
self.data_dir.mkdir(exist_ok=True)
self.content_dir.mkdir(exist_ok=True)
# Enable logging for BPG
if 'logfile' in BPG.run_settings:
# If logfile is specified in specs, dump all logs in that location
log_path = bag_work_dir / BPG.run_settings['logfile']
if log_path.is_dir():
self.log_path = log_path
self.log_filename = 'output.log'
else:
self.log_path = log_path.parent
self.log_filename = log_path.name
else:
self.log_path = self.project_dir
self.log_filename = 'output.log'
setup_logger(log_path=str(self.log_path), log_filename=str(self.log_filename))
logging.info(f'PhotonicCoreLayout initialized from spec file: {spec_file}')
# Overwrite tech parameters if specified in the spec file
# Setup the abstract tech layermap
if 'layermap' in BPG.run_settings:
self.photonic_tech_info.layermap_path = bag_work_dir / BPG.run_settings['layermap']
logging.info(f'loading layermap from {self.photonic_tech_info.layermap_path}')
# Setup the dataprep procedure
if 'dataprep' in BPG.run_settings:
self.photonic_tech_info.dataprep_routine_filepath = bag_work_dir / BPG.run_settings['dataprep']
logging.info(f'loading dataprep procedure from {self.photonic_tech_info.dataprep_routine_filepath}')
if 'dataprep_params' in BPG.run_settings:
self.photonic_tech_info.dataprep_parameters_filepath = bag_work_dir / BPG.run_settings['dataprep_params']
logging.info(f'loading dataprep and DRC parameters from '
f'{self.photonic_tech_info.dataprep_parameters_filepath}')
if 'dataprep_label_depth' in BPG.run_settings:
self.photonic_tech_info.dataprep_label_depth = BPG.run_settings['dataprep_label_depth']
logging.info(f'dataprep_label_depth set to '
f'{self.photonic_tech_info.dataprep_label_depth}')
# Setup the lumerical export map
if 'lsf_export_map' in BPG.run_settings:
self.photonic_tech_info.lsf_export_path = bag_work_dir / BPG.run_settings['lsf_export_map']
logging.info(f'loading lumerical export configuration from {self.photonic_tech_info.lsf_export_path}')
# Now that paths are fully settled, load the tech files
self.photonic_tech_info.load_tech_files()
# Set the paths of the output files
self.lsf_path = str(self.scripts_dir / BPG.run_settings['lsf_filename'])
self.gds_path = str(self.data_dir / BPG.run_settings['gds_filename'])
logging.info('loaded paths successfully')
@staticmethod
def load_yaml(filepath):
""" Setup standardized method for yaml loading """
return _parse_yaml_file(filepath)
def create_photonic_tech_info(bpg_config: Dict,
tech_info: "TechInfo",
) -> "PhotonicTechInfo":
"""Create PhotonicTechInfo object."""
if 'photonic_tech_config_path' not in bpg_config:
raise ValueError('photonic_tech_config_path not defined in bag_config.yaml.')
photonic_tech_params = _parse_yaml_file(bpg_config['photonic_tech_config_path'])
if 'photonic_tech_class' in photonic_tech_params:
photonic_tech_cls = _import_class_from_str(photonic_tech_params['photonic_tech_class'])
photonic_tech_info = photonic_tech_cls(photonic_tech_params,
tech_info.resolution,
tech_info.layout_unit,
)
else:
# Make a default photonic_tech_info as a place holder.
print('*WARNING*: No PhotonicTechInfo class defined. Using a dummy version.')
photonic_tech_info = DummyPhotonicTechInfo(photonic_tech_params,
tech_info.resolution,
tech_info.layout_unit
)
return photonic_tech_info
class PhotonicBagLayout(BagLayout):
"""
This class contains layout information of a cell.
Parameters
----------
grid : :class:`bag.layout.routing.RoutingGrid`
the routing grid instance.
use_cybagoa : bool
True to use cybagoa package to accelerate layout.
"""
def __init__(self, grid, use_cybagoa=False):
BagLayout.__init__(self, grid, use_cybagoa)
# Add new features to be supported in content list
self._round_list: List["PhotonicRound"] = []
self._sim_list = []
self._source_list = []
self._monitor_list = []
# The angle to rotate this master by upon finalization
self._mod_angle = 0
# TODO: fix bound box during rotation
# Initialize the boundary of this cell with zero area at the origin
self._bound_box = BBoxMut(0, 0, 0, 0, resolution=self._res, unit_mode=True)
@property
def mod_angle(self):
""" Angle that this master must be rotated by during finalization """
return self._mod_angle
@mod_angle.setter
def mod_angle(self, val):
""" Ensure that the provided angle is between 0 and pi/2 """
if val < 0 or val > math.pi / 2:
raise ValueError(f"Angle {val} is not in modulo format")
self._mod_angle = val
@property
def bound_box(self) -> BBoxMut:
return self._bound_box
def finalize(self):
# type: () -> None
""" Prevents any further changes to this layout. """
# TODO: change this to be a 'close to 0' check
if self.mod_angle != 0:
self.rotate_all_by(self.mod_angle)
self._finalized = True
# get rectangles
rect_list = []
for obj in self._rect_list:
if obj.valid:
if not obj.bbox.is_physical():
print('WARNING: rectangle with non-physical bounding box found.', obj.layer)
else:
obj_content = obj.content
rect_list.append(obj_content)
# filter out invalid geometries
path_list = []
polygon_list = []
blockage_list = []
boundary_list = []
via_list = []
round_list = []
sim_list = []
for targ_list, obj_list in ((path_list, self._path_list),
(polygon_list, self._polygon_list),
(blockage_list, self._blockage_list),
(boundary_list, self._boundary_list),
(via_list, self._via_list),
(round_list, self._round_list),
(sim_list, self._sim_list)
):
for obj in obj_list:
if obj.valid:
targ_list.append(obj.content)
# get via primitives
via_list.extend(self._via_primitives)
# get instances
inst_list = [] # type: List[InstanceInfo]
for obj in self._inst_list:
if obj.valid:
obj_content = self._format_inst(obj)
inst_list.append(obj_content)
# Assemble raw content list from all
self._raw_content = [inst_list,
self._inst_primitives,
rect_list,
via_list,
self._pin_list,
path_list,
blockage_list,
boundary_list,
polygon_list,
round_list,
sim_list,
self._source_list,
self._monitor_list
]
if (not inst_list and not self._inst_primitives and not rect_list and not blockage_list and
not boundary_list and not via_list and not self._pin_list and not path_list and
not polygon_list and not round_list and not self._sim_list and not self._source_list
and not self._monitor_list):
self._is_empty = True
else:
self._is_empty = False
# Calculate the bounding box for the overall layout
for inst in self._inst_list:
self._bound_box.merge(inst.bound_box)
for rect in self._rect_list:
self._bound_box.merge(rect.bound_box)
if self._via_list != []:
logging.warning("vias are currently not considered in master bounding box calculations")
# if self._pin_list != []:
# logging.warning("pins are currently not considered in master bounding box calculations")
for path in self._path_list:
self._bound_box.merge(path.bound_box)
if self._blockage_list != []:
logging.warning("blockages are currently not considered in master bounding box calculations")
if self._boundary_list != []:
logging.warning("boundaries are currently not considered in master bounding box calculations")
for poly in self._polygon_list:
self._bound_box.merge(poly.bound_box)
if self._round_list != []:
for round in self._round_list:
self._bound_box.merge(round.bound_box)
def get_content(self,
lib_name: str,
cell_name: str,
rename_fun: Callable[[str], str],
) -> Union[ContentList, Tuple[str, 'cybagoa.PyOALayout']]:
"""
Returns a list describing geometries in this layout.
Parameters
----------
lib_name : str
the layout library name.
cell_name : str
the layout top level cell name.
rename_fun : Callable[[str], str]
the layout cell renaming function.
Returns
-------
content : Union[ContentList, Tuple[str, 'cybagoa.PyOALayout']]
a ContentList describing this | |
start: Time of the first log entry to be gathered. Defaults to 0.
:type start: int, optional
:param end: Time of the last log entry to be gathered. Defaults to current time.
:type end: int, optional
:param excludeInfo: Filter Info log level messages
:type excludeInfo: bool, optional
:param excludeWarning: Filter Warning log level messages
:type excludeWarning: bool, optional
:param excludeCritical: Filter Critical log level messages
:type excludeCritical: bool, optional
:param excludeError: Filter Error log level messages
:type excludeError: bool, optional
:param username: The name of the executing user
:type username: str
:return: A dictionary containing the requested log messages
:rtype: dict
"""
device_manager_service = DeviceManagerService()
return {
'data':
device_manager_service.get_log(
start, end, {
'info': excludeInfo,
'warning': excludeWarning,
'critical': excludeCritical,
'error': excludeError
})
}
@app.get('/api/bookings')
def get_booking_list(start: int = 0,
end: int = 2**32 - 1,
username: str = Depends(decode_token)):
"""
Fetches the registered bookings from the postgreSQL database
:param start: The time of the first booking to be gathered. Defaults to 0.
:type start: int, optional
:param end: The time of the last booking to be gathered.
:type end: int, optional
:param username: The name of the executing user
:type username: str
:return: A list of all bookings
:rtype: List[booking_info]
"""
device_manager_service = DeviceManagerService()
return {'data': device_manager_service.get_bookings(start, end)}
@app.post('/api/bookings')
def book_device(bookingInfo: BookingModel,
username: str = Depends(decode_token)):
"""
Store a booking for a device in the postgreSQL database
:param bookingInfo: The booking information
:type bookingInfo: BookingModel
:param username: The name of the executing user
:type username: str
:return: None
"""
device_manager_service = DeviceManagerService()
device_manager_service.book_device(bookingInfo.name, bookingInfo.user,
bookingInfo.device, bookingInfo.start,
bookingInfo.end)
return
@app.get('/api/bookings/device/{uuid}')
def get_device_booking_list(uuid: str,
start: int = 0,
end: int = datetime.now().timestamp(),
username: str = Depends(decode_token)):
"""
Fetches a list of device bookings
:param uuid: Internally assigned device uuid
:type uuid: str
:param start: The time of the first booking entry to be gathered. Defaults to 0.
:type start: int, optional
:param end: The time of the last booking entry to be gathered. Defaults to current time.
:type end: int, optional
:param username: The name of the executing user
:type username: str
:return: List of booking information objects for the device
:rtype: List[BookingInfoWithNames]
"""
device_manager_service = DeviceManagerService()
return {
'data': device_manager_service.get_device_bookings(uuid, start, end)
}
@app.get('/api/bookings/{bookingID}') #, methods=['GET', 'DELETE'])
def get_booking(bookingID: int, username: str = Depends(decode_token)):
"""
Get booking information for a booking id
:param bookingID: The id of the requested booking
:type bookingID: int
:param username: The name of the executing user
:type username: str
:return: Information of the booking
:rtype: BookingInfo
"""
device_manager_service = DeviceManagerService()
return {'data': device_manager_service.get_booking_entry(bookingID)}
@app.delete('/api/bookings/{bookingID}') #, methods=['GET', 'DELETE'])
def delete_booking(bookingID: int, username: str = Depends(decode_token)):
"""
Delete a booking from the postgreSQL by id
:param bookingID: The booking id
:type bookingID: int
:param username: The name of the executing user
:type username: str
:return: None
"""
current_user = user.get_user_by_name(username)
device_manager_service = DeviceManagerService()
booking_entry = device_manager_service.get_booking_entry(bookingID)
if (booking_entry.user != current_user.id) and (current_user.role !=
'admin'):
raise HTTPException(
403,
"Can't delete the booking entry. Only the owning user or an administrator can delete a booking entry"
)
device_manager_service.delete_booking_entry(bookingID)
return
@app.get('/api/experiments')
def get_experiments(username: str = Depends(decode_token)):
"""
Get information of all stored experiments
:param username: The name of the executing user
:type username: str
:return: Returns a list of experiments with the corresponding information
:rtype: List[Experiment]
"""
device_manager_service = DeviceManagerService()
return {'data': device_manager_service.get_all_experiments()}
@app.post('/api/experiments')
def create_experiment(experiment: ExperimentBookingModel,
username: str = Depends(decode_token)):
"""
Store a new experiment in the postgreSQL database
:param experiment: The new experiment
:type experiment: ExperimentBookingModel
:param username: The name of the executing user
:type username: str
:return: None
"""
userID = user.get_user_by_name(username).id
device_manager_service = DeviceManagerService()
device_manager_service.create_experiment(experiment.name, experiment.start,
experiment.end, userID,
experiment.devices,
experiment.scriptID)
return
@app.put('/api/experiments/edit/{experimentID}')
def edit_experiment(experimentID: int,
experiment: ExperimentBookingModel,
username: str = Depends(decode_token)):
"""
Edit an already existing experiment
:param experimentID: The internal experiment id
:type experimentID: int
:param experiment: The experiment information object
:type experiment: ExperimentBookingModel
:param username: The name of the executing user
:type username: str
:return: None
"""
print(f'experiment data {experiment}')
userID = user.get_user_by_name(username).id
device_manager_service = DeviceManagerService()
device_manager_service.edit_experiment(experimentID, experiment.name,
experiment.start,
experiment.end, userID,
experiment.devices,
experiment.scriptID)
return
@app.delete('/api/experiments/{experimentID}')
def delete_experiment(experimentID: int,
username: str = Depends(decode_token)):
"""
Delete an experiment from the postgreSQL database.
:param experimentID: The internal id of the experiment to be deleted
:type experimentID: str
:param username: The name of the executing user
:type username: str
:return: None
"""
current_user = user.get_user_by_name(username)
device_manager_service = DeviceManagerService()
if (get_experiment_user(experimentID) !=
current_user.id) and (current_user.role != 'admin'):
raise HTTPException(
403,
"Can't delete the experiment. Only the owning user or an administrator can delete an experiment"
)
device_manager_service.delete_experiment(experimentID)
return
@app.get('/api/scripts')
def get_user_scripts_info(username: str = Depends(decode_token)):
"""
Get the information of all registered user-scripts
:param username: The name of the executing user
:type username: str
:return: A list of script objects that contain the script content and information
:rtype: List[Script]
"""
device_manager_service = DeviceManagerService()
current_user = user.get_user_by_name(username)
return {
'data': device_manager_service.get_user_scripts_info(current_user.id)
}
@app.get('/api/scripts/{scriptID}')
def get_user_script(scriptID: int, username: str = Depends(decode_token)):
"""
Get the information of a specific user-scripts
:param scriptID: The internally assigned script id
:type scriptID: int
:param username: The name of the executing user
:type username: str
:return: The script object containing the scripts content and information
:rtype: Script
"""
device_manager_service = DeviceManagerService()
current_user = user.get_user_by_name(username)
script_info = device_manager_service.get_user_script_info(scriptID)
if (script_info.user != current_user.id) and (current_user.role !=
'admin'):
raise HTTPException(
403,
"Can't get the script. Only the owning user or an administrator can get a script"
)
return device_manager_service.get_user_script(scriptID)
@app.post('/api/scripts')
def upload_user_script(script: ScriptModel,
username: str = Depends(decode_token)):
"""
Add a new script object to the postgreSQL database
:param script: The script object containing content and additional information
:type script: ScriptModel
:param username: The name of the executing user
:type username: str
:return: None
"""
device_manager_service = DeviceManagerService()
current_user = user.get_user_by_name(username)
device_manager_service.create_user_script(script.name, script.fileName,
current_user.id, script.data)
return
@app.delete('/api/scripts/{scriptID}')
def delete_user_script(scriptID: int, username: str = Depends(decode_token)):
"""
Delete a specific user-script from the postgreSQL database
:param scriptID: The id of the script
:type scriptID: str
:param username: The name of the executing user
:type username: str
:return: None
"""
device_manager_service = DeviceManagerService()
current_user = user.get_user_by_name(username)
script_info = device_manager_service.get_user_script_info(scriptID)
if (script_info.user != current_user.id) and (current_user.role !=
'admin'):
raise HTTPException(
403,
"Can't delete the script. Only the owning user or an administrator can delete a script"
)
device_manager_service.delete_user_script(scriptID)
return
@app.put('/api/scripts/{scriptID}/info')
def set_user_script_info(scriptID: int,
info: ScriptInfoModel,
username: str = Depends(decode_token)):
"""
:param scriptID: The id of the script
:type scriptID: int
:param info: The object containing information and content of the script
:type info: ScriptInfoModel
:param username: The name of the executing user
:type username: str
:return: None
"""
device_manager_service = DeviceManagerService()
current_user = user.get_user_by_name(username)
script_info = device_manager_service.get_user_script_info(scriptID)
if (script_info.user != current_user.id) and (current_user.role !=
'admin'):
raise HTTPException(
403,
"Can't modify the script. Only the owning user or an administrator can modify a script"
)
device_manager_service.set_user_script_info(scriptID, info.name,
info.fileName, current_user.id)
return
@app.put('/api/scripts/{scriptID}/')
def set_user_script(scriptID: int,
script: ScriptModel,
username: str = Depends(decode_token)):
"""
:param scriptID: The id of the script
:type scriptID: int
:param script: The object containing the script content and information
:type script: ScriptModel
:param username: The name of the executing user
:type username: str
:return: None
"""
device_manager_service = DeviceManagerService()
current_user = user.get_user_by_name(username)
script_info = device_manager_service.get_user_script_info(scriptID)
if (script_info.user != current_user.id) and (current_user.role !=
'admin'):
raise HTTPException(
403,
"Can't modify the script. Only the owning user or an administrator can modify a script"
)
device_manager_service.set_user_script(scriptID, script.name,
script.fileName, current_user.id,
script.data)
return
@app.put('/api/experiments/{experimentID}/status')
async def control_experiment(status: Status,
experimentID: int,
username: str = Depends(decode_token)):
"""
Asynchronous function to keep track of the experiment status. Updates the experiment status in the databse.
:param status: The status whether the experiment is running or not
:type status: bool
:param experimentID: The id of the experiment
:type experimentID: int
:param username: The name of the executing user
:type username: str
:return: None
"""
if status.running:
await start_experiment(experimentID)
else:
await stop_experiment(experimentID)
return
# Todo allow authentication !
@app.websocket("/ws/experiments_status")
async def experiment_status_websocket(
websocket: WebSocket): # , username:str = Depends(decode_token)):
"""
Asynchronous function that forwards the experiment status via websocket
:param websocket: The websocket the information is transferred by
:type websocket: Websocket
:return: None
"""
pool = await get_redis_pool()
channels = await pool.subscribe('experiment_status')
| |
return m
class SpatialCrop:
"""
General purpose cropper to produce sub-volume region of interest (ROI).
If a dimension of the expected ROI size is bigger than the input image size, will not crop that dimension.
So the cropped result may be smaller than the expected ROI, and the cropped results of several images may
not have exactly the same shape.
It can support to crop ND spatial (channel-first) data.
The cropped region can be parameterised in various ways:
- a list of slices for each spatial dimension (allows for use of -ve indexing and `None`)
- a spatial center and size
- the start and end coordinates of the ROI
"""
def __init__(
self,
roi_center: Union[Sequence[int], np.ndarray, None] = None,
roi_size: Union[Sequence[int], np.ndarray, None] = None,
roi_start: Union[Sequence[int], np.ndarray, None] = None,
roi_end: Union[Sequence[int], np.ndarray, None] = None,
) -> None:
"""
Args:
roi_center: voxel coordinates for center of the crop ROI.
roi_size: size of the crop ROI, if a dimension of ROI size is bigger than image size,
will not crop that dimension of the image.
roi_start: voxel coordinates for start of the crop ROI.
roi_end: voxel coordinates for end of the crop ROI, if a coordinate is out of image,
use the end coordinate of image.
roi_slices: list of slices for each of the spatial dimensions.
"""
if roi_center is not None and roi_size is not None:
roi_center = np.asarray(roi_center, dtype=np.int16)
roi_size = np.asarray(roi_size, dtype=np.int16)
roi_start_np = np.maximum(roi_center - np.floor_divide(roi_size, 2), 0)
roi_end_np = np.maximum(roi_start_np + roi_size, roi_start_np)
else:
if roi_start is None or roi_end is None:
raise ValueError("Please specify either roi_center, roi_size or roi_start, roi_end.")
roi_start_np = np.maximum(np.asarray(roi_start, dtype=np.int16), 0)
roi_end_np = np.maximum(np.asarray(roi_end, dtype=np.int16), roi_start_np)
# Allow for 1D by converting back to np.array (since np.maximum will convert to int)
roi_start_np = roi_start_np if isinstance(roi_start_np, np.ndarray) else np.array([roi_start_np])
roi_end_np = roi_end_np if isinstance(roi_end_np, np.ndarray) else np.array([roi_end_np])
# convert to slices
self.slices = [slice(s, e) for s, e in zip(roi_start_np, roi_end_np)]
def __call__(self, img: Union[np.ndarray, torch.Tensor]):
"""
Apply the transform to `img`, assuming `img` is channel-first and
slicing doesn't apply to the channel dim.
"""
sd = min(len(self.slices), len(img.shape[1:])) # spatial dims
slices = [slice(None)] + self.slices[:sd]
return img[tuple(slices)]
class CenterSpatialCrop:
"""
Crop at the center of image with specified ROI size.
If a dimension of the expected ROI size is bigger than the input image size, will not crop that dimension.
So the cropped result may be smaller than the expected ROI, and the cropped results of several images may
not have exactly the same shape.
Args:
roi_size: the spatial size of the crop region e.g. [224,224,128]
if a dimension of ROI size is bigger than image size, will not crop that dimension of the image.
If its components have non-positive values, the corresponding size of input image will be used.
for example: if the spatial size of input data is [40, 40, 40] and `roi_size=[32, 64, -1]`,
the spatial size of output data will be [32, 40, 40].
"""
def __init__(self, roi_size: Union[Sequence[int], int]) -> None:
self.roi_size = roi_size
def __call__(self, img: np.ndarray):
"""
Apply the transform to `img`, assuming `img` is channel-first and
slicing doesn't apply to the channel dim.
"""
assert img.ndim == 4, "img ndim 必须为4, (channel, W, H, D)"
center = [i // 2 for i in img.shape[1:]]
cropper = SpatialCrop(roi_center=center, roi_size=self.roi_size)
return cropper(img)
def map_binary_to_indices(
label: np.ndarray,
image: Optional[np.ndarray] = None,
image_threshold: float = 0.0,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Compute the foreground and background of input label data, return the indices after fattening.
For example:
``label = np.array([[[0, 1, 1], [1, 0, 1], [1, 1, 0]]])``
``foreground indices = np.array([1, 2, 3, 5, 6, 7])`` and ``background indices = np.array([0, 4, 8])``
Args:
label: use the label data to get the foreground/background information.
image: if image is not None, use ``label = 0 & image > image_threshold``
to define background. so the output items will not map to all the voxels in the label.
image_threshold: if enabled `image`, use ``image > image_threshold`` to
determine the valid image content area and select background only in this area.
"""
# Prepare fg/bg indices
if label.shape[0] > 1:
label = label[1:] # for One-Hot format data, remove the background channel
label_flat = np.any(label, axis=0).ravel() # in case label has multiple dimensions
fg_indices = np.nonzero(label_flat)[0]
if image is not None:
img_flat = np.any(image > image_threshold, axis=0).ravel()
bg_indices = np.nonzero(np.logical_and(img_flat, ~label_flat))[0]
else:
bg_indices = np.nonzero(~label_flat)[0]
return fg_indices, bg_indices
class RandCropByPosNegLabel:
"""
Crop random fixed sized regions with the center being a foreground or background voxel
based on the Pos Neg Ratio.
And will return a list of arrays for all the cropped images.
For example, crop two (3 x 3) arrays from (5 x 5) array with pos/neg=1::
[[[0, 0, 0, 0, 0],
[0, 1, 2, 1, 0], [[0, 1, 2], [[2, 1, 0],
[0, 1, 3, 0, 0], --> [0, 1, 3], [3, 0, 0],
[0, 0, 0, 0, 0], [0, 0, 0]] [0, 0, 0]]
[0, 0, 0, 0, 0]]]
If a dimension of the expected spatial size is bigger than the input image size,
will not crop that dimension. So the cropped result may be smaller than expected size, and the cropped
results of several images may not have exactly same shape.
Args:
spatial_size: the spatial size of the crop region e.g. [224, 224, 128].
if a dimension of ROI size is bigger than image size, will not crop that dimension of the image.
if its components have non-positive values, the corresponding size of `label` will be used.
for example: if the spatial size of input data is [40, 40, 40] and `spatial_size=[32, 64, -1]`,
the spatial size of output data will be [32, 40, 40].
label: the label image that is used for finding foreground/background, if None, must set at
`self.__call__`. Non-zero indicates foreground, zero indicates background.
pos: used with `neg` together to calculate the ratio ``pos / (pos + neg)`` for the probability
to pick a foreground voxel as a center rather than a background voxel.
neg: used with `pos` together to calculate the ratio ``pos / (pos + neg)`` for the probability
to pick a foreground voxel as a center rather than a background voxel.
num_samples: number of samples (crop regions) to take in each list.
image: optional image data to help select valid area, can be same as `img` or another image array.
if not None, use ``label == 0 & image > image_threshold`` to select the negative
sample (background) center. So the crop center will only come from the valid image areas.
image_threshold: if enabled `image`, use ``image > image_threshold`` to determine
the valid image content areas.
fg_indices: if provided pre-computed foreground indices of `label`, will ignore above `image` and
`image_threshold`, and randomly select crop centers based on them, need to provide `fg_indices`
and `bg_indices` together, expect to be 1 dim array of spatial indices after flattening.
a typical usage is to call `FgBgToIndices` transform first and cache the results.
bg_indices: if provided pre-computed background indices of `label`, will ignore above `image` and
`image_threshold`, and randomly select crop centers based on them, need to provide `fg_indices`
and `bg_indices` together, expect to be 1 dim array of spatial indices after flattening.
a typical usage is to call `FgBgToIndices` transform first and cache the results.
Raises:
ValueError: When ``pos`` or ``neg`` are negative.
ValueError: When ``pos=0`` and ``neg=0``. Incompatible values.
"""
def __init__(
self,
spatial_size: Union[Sequence[int], int],
label: Optional[np.ndarray] = None,
pos: float = 1.0,
neg: float = 1.0,
num_samples: int = 1,
image: Optional[np.ndarray] = None,
image_threshold: float = 0.0,
random_state: np.random.RandomState = None,
) -> None:
self.spatial_size = spatial_size
self.label = label
if pos < 0 or neg < 0:
raise ValueError(f"pos and neg must be nonnegative, got pos={pos} neg={neg}.")
| |
#! python
from behave import given, then
from renderer.bolts import Tuple, Point, Vector, Color
from math import isclose, sqrt, pi
from renderer.matrix import IdentityMatrix
EPSILON = 0.0001
#def determineValue(stringval):
# if stringval == 'π':
# return pi
# else:
# return float(stringval)
#def determineNumeric(stringval):
# fractional = stringval.split('/')
# assert len(fractional) < 3
# denominator = 1
# numerator = 1
# if len(fractional) == 2:
# denominator = determineValue(fractional[1])
# sqrtsplit = fractional[0].split('√')
# assert len(sqrtsplit) < 3
# if len(sqrtsplit) == 2:
# numerator *= sqrt(determineValue(sqrtsplit[1]))
# if len(sqrtsplit[0]) > 0:
# if len(sqrtsplit[0]) == 1 and sqrtsplit[0][0] == '-':
# numerator *= -1
# else:
# numerator *= determineValue(sqrtsplit[0])
# return numerator / denominator
@given(u'{var:w} ← tuple({x:g}, {y:g}, {z:g}, {w:g})')
def step_impl(context, var, x, y, z, w):
print(u'STEP: {} ← tuple({}, {}, {}, {})'.format(var, x, y, z, w))
context.result[var] = Tuple(x,y,z,w)
pass
@then(u'{var:w}.x = {val:g}')
def step_impl(context, var, val):
print(u'STEP: THEN {}.x = {}'.format(var,val))
assert isclose(context.result[var][0], val), 'Expected {}, got {}'.format(val, context.result[var][0])
@then(u'{var:w}.y = {val:g}')
def step_impl(context, var, val):
print(u'STEP: THEN {}.y = {}'.format(var,val))
assert isclose(context.result[var][1], val), 'Expected {}, got {}'.format(val, context.result[var][1])
@then(u'{var:w}.z = {val:g}')
def step_impl(context, var, val):
print(u'STEP: THEN {}.z = {}'.format(var,val))
assert isclose(context.result[var][2], val), 'Expected {}, got {}'.format(val, context.result[var][2])
@then(u'{var:w}.w = {val:g}')
def step_impl(context, var, val):
print(u'STEP: THEN {}.w = {}'.format(var,val))
assert isclose(context.result[var][3], val), 'Expected {}, got {}'.format(val, context.result[var][3])
@then(u'{var:w} is a point')
def step_impl(context, var):
print(u'STEP: THEN {} is a point'.format(var))
assert context.result[var].isPoint(), 'Expected isPoint({}), got False'.format(context.result[var])
@then(u'{var:w} is not a vector')
def step_impl(context, var):
print(u'STEP: THEN {} is not a vector'.format(var))
assert not context.result[var].isVector(), 'Expected !isVector({}), got True'.format(var)
@then(u'{var:w} is not a point')
def step_impl(context, var):
print(u'STEP: THEN {} is not a point'.format(var))
assert not context.result[var].isPoint(), 'Expected !isPoint({}), got True'.format(var)
@then(u'{var:w} is a vector')
def step_impl(context, var):
print(u'STEP: THEN {} is a vector'.format(var))
assert context.result[var].isVector(), 'Expected !isVector({}), got False'.format(var)
@given(u'{var:w} ← point({x:g}, {y:g}, {z:g})')
def step_impl(context, var, x, y, z):
print(u'STEP: {} ← point({}, {}, {})'.format(var, x, y, z))
#SetupContext(context)
context.result[var] = Point(x,y,z)
pass
@then(u'{var1:w} + {var2:w} = tuple({x:g}, {y:g}, {z:g}, {w:g})')
def step_impl(context, var1, var2, x, y, z, w):
print(u'STEP: {} + {} = tuple({},{},{},{})'.format(var1, var2, x, y, z, w))
expected = Tuple(x,y,z,w)
result = context.result[var1] + context.result[var2]
assert expected == result, 'Expected {} == {} + {} ({} = {} + {})'.format(expected,var1, var2,result,context.result[var1],context.result[var2])
@then(u'{var1:w} * {scalar:g} = tuple({x:g}, {y:g}, {z:g}, {w:g})')
def step_impl(context, var1, scalar, x, y, z, w):
print(u'STEP: {} * {} = tuple({}, {}, {}, {})'.format(var1, scalar, x, y, z, w))
expected = Tuple(x,y,z,w)
result = context.result[var1] * scalar
assert expected == result, 'Expected {} == {} * {} ({} = {} * {})'.format(expected,var1,scalar,result,context.result[var1],scalar)
@then(u'{var1:w} / {scalar:g} = tuple({x:g}, {y:g}, {z:g}, {w:g})')
def step_impl(context, var1, scalar, x, y, z, w):
print(u'STEP: {} / {} = tuple({}, {}, {}, {})'.format(var1, scalar, x, y, z, w))
expected = Tuple(x,y,z,w)
result = context.result[var1] / scalar
assert expected == result, 'Expected {} == {} / {} ({} = {} / {})'.format(expected,var1,scalar,result,context.result[var1],scalar)
@then(u'-{var1:w} = tuple({x:g}, {y:g}, {z:g}, {w:g})')
def step_impl(context, var1, x, y, z, w):
print(u'STEP: -{} = tuple({}, {}, {}, {})'.format(var1, x, y, z, w))
expected = Tuple(x,y,z,w)
result = -context.result[var1]
assert expected == result, 'Expected {} == -{} ({})'.format(expected,var1,result)
@then(u'{var1:w} = tuple({x:g}, {y:g}, {z:g}, {w:g})')
def step_impl(context, var1, x, y, z, w):
print(u'STEP: {} = tuple({}, {}, {}, {})'.format(var1, x, y, z, w))
assert var1 in context.result, 'Tuple {} not found in context'.format(var1)
expected = Tuple(x,y,z,w)
assert expected == context.result[var1], 'Expected {} == {} ({})'.format(expected,var1, context.result['p'])
@then(u'{var1:w} - {var2:w} = vector({x:g}, {y:g}, {z:g})')
def step_impl(context, var1, var2, x, y, z):
print(u'STEP: {} - {} = vector({}, {}, {})'.format(var1, var2, x, y, z))
expected = Vector(x,y,z)
assert var1 in context.result, 'Expected to find {} in context'.format(var1)
assert var2 in context.result, 'Expected to find {} in context'.format(var2)
result = context.result[var1] - context.result[var2]
assert expected == result, 'Expected {} == {} - {} ({} = {} - {})'.format(expected,var1,var2,result,context.result[var1],context.result[var2])
@then(u'{var1:w} - {var2:w} = point({x:g}, {y:g}, {z:g})')
def step_impl(context, var1, var2, x, y, z):
print(u'STEP: {} - {} = vector({}, {}, {})'.format(var1, var2, x, y, z))
expected = Point(x,y,z)
result = context.result[var1] - context.result[var2]
assert expected == result, 'Expected {} == {} - {} ({} = {} - {})'.format(expected,var1,var2,result,context.result[var1],context.result[var2])
@then(u'magnitude({var1:w}) = {val:S}')
def step_impl(context, var1, val):
print(u'STEP: magnitude({}) = {}'.format(var1,val))
expected = context.helpers['determineNumeric'](val)
result = context.result[var1].magnitude()
assert isclose(expected, result), 'Expected {} = magnitude({}) = {}'.format(expected, var1, result)
@then(u'normalize({var1:w}) = vector({x:g}, {y:g}, {z:g})')
def step_impl(context, var1, x, y, z):
print(u'STEP: normalize({}) = vector({}, {}, {})'.format(var1, x, y, z))
expected = Vector(x,y,z)
result = context.result[var1].normalize()
assert expected == result, 'Expected {} == normalize({}) = {}'.format( expected, var1, result )
@then(u'normalize({var1:w}) = approximately vector({x:g}, {y:g}, {z:g})')
def step_impl(context,var1,x,y,z):
print(u'STEP: normalize({}) = approximately vector({}, {}, {})'.format(var1,x,y,z))
expected = Vector(x,y,z)
result = context.result[var1].normalize()
assert expected.compare(result,0.00001), 'Expected {} == normalize({}) = {}'.format( expected, var1, result )
@when(u'{var1:w} ← normalize({var2:w})')
def step_impl(context, var1, var2):
print(u'STEP: {} ← normalize({})'.format(var1,var2))
context.result[var1] = context.result[var2].normalize()
pass
@then(u'dot({var1:w}, {var2:w}) = {val:g}')
def step_impl(context,var1,var2,val):
print(u'STEP: dot({},{}) = {}'.format(var1,var2,val))
assert isclose(val,context.result[var1].dot(context.result[var2])), 'Expected {} == dot({},{}) = dot({},{})'.format( val, var1, var2, context.result[var1], context.result[var2])
@then(u'cross({var1:w}, {var2:w}) = vector({x:g}, {y:g}, {z:g})')
def step_impl(context,var1,var2,x,y,z):
print(u'STEP: cross({},{}) = vector({},{},{})'.format(var1,var2,x,y,z))
expected = Vector(x,y,z)
result = context.result[var1].cross(context.result[var2])
assert expected == result, 'Expected {} == cross({},{}) = cross({},{}) = {}'.format(expected,var1,var2,context.result[var1],context.result[var2],result)
@given(u'{var1:w} ← color({red:g}, {green:g}, {blue:g})')
def step_impl(context, var1, red, green, blue):
print(u'STEP: {} ← color({}, {}, {})'.format(var1,red,green,blue))
#SetupContext(context)
context.result[var1] = Color(red,green,blue)
pass
@then(u'{var1:w}.red = {val:g}')
def step_impl(context, var1, val):
print(u'STEP: {}.red = {}'.format(context.result[var1],val))
assert isclose( context.result[var1]['red'], val ), 'Expected {} == {}.red = {}'.format( val, var1, context.result[var1] )
@then(u'{var1:w}.green = {val:g}')
def step_impl(context, var1, val):
print(u'STEP: {}.green = {}'.format(context.result[var1],val))
assert isclose( context.result[var1]['green'], val ), 'Expected {} == {}.green = {}'.format( val, var1, context.result[var1] )
@then(u'{var1:w}.blue = {val:g}')
def step_impl(context, var1, val):
print(u'STEP: {}.blue = {}'.format(context.result[var1],val))
assert isclose( context.result[var1]['blue'], val ), 'Expected {} == {}.blue = {}'.format( val, var1, context.result[var1] )
@then(u'{var1:w} + {var2:w} = color({r:g}, {g:g}, {b:g})')
def step_impl(context,var1,var2,r,g,b):
print(u'STEP: {} + {} = color({}, {}, {})'.format(var1,var2,r,g,b))
expected = Color(r,g,b)
result = context.result[var1] + context.result[var2]
assert expected == result, 'Expected {} == {} + {} = {}'.format(expected, var1, var2, result)
@then(u'{var1:w} - {var2:w} = color({r:g}, {g:g}, {b:g})')
def step_impl(context,var1,var2,r,g,b):
print(u'STEP: {} - {} = color({}, {}, {})'.format(var1,var2,r,g,b))
expected = Color(r,g,b)
result = context.result[var1] - context.result[var2]
assert expected == result, 'Expected {} == {} + {} = {}'.format(expected, var1, var2, result)
@then(u'{var:w} * 2 = color({r:g}, {g:g}, {b:g})')
def step_impl(context, var, r, g, b):
scalar = 2
print(u'STEP: {} * {} = color({}, {}, {})'.format(var,scalar,r,g,b))
expected = Color(r,g,b)
result = context.result[var] * scalar
assert expected == result, 'Expected {} == {} * {} = {}'.format(expected,var,scalar,result)
@then(u'{var:w} = color({r:g}, {g:g}, {b:g})')
def step_impl(context, var, r, g, b):
print(u'STEP: Then {} = color({}, {}, {})'.format(var, r, g, b))
expected = Color(r,g,b)
result = context.result[var]
assert expected.compare(result), 'Expected {} to be {}, found it to be {} instead'.format( var, expected, result )
@then(u'{var1:w} * {var2:w} = color({r:g}, {g:g}, {b:g})')
def step_impl(context, var1, var2, r, g, b):
print(u'STEP: {} * {} = color({}, {}, {})'.format(var1,var2,r,g,b))
expected = Color(r,g,b)
result = context.result[var1].multiply(context.result[var2])
assert expected == result, 'Expected {} == {} * {} = {} * {} = {}'.format(expected, var1, var2, context.result[var1], context.result[var2], result)
@when(u'{result:w} ← reflect({vectorvar:w}, {normalvar:w})')
def step_impl(context, result, vectorvar, normalvar):
print(u'STEP: When {} ← reflect(v, n)'.format(result, vectorvar, normalvar))
assert vectorvar in context.result
assert normalvar in context.result
context.result[result] = context.result[vectorvar].reflect(context.result[normalvar])
@then(u'{var:w} = vector({x:S}, {y:S}, {z:S})')
def step_impl(context, var, x, y, z):
print(u'STEP: Then {} = vector({}, {}, {})'.format(var, x, y, z))
assert var in context.result
expected = Vector( context.helpers['determineNumeric'](x), context.helpers['determineNumeric'](y), context.helpers['determineNumeric'](z) )
result = context.result[var]
assert expected.compare(result), 'Expected {} to be {}, but found it is {}'.format(var, expected, result)
@then(u'{var1:w} = normalize({var2:w})')
def step_impl(context, var1, var2):
print(u'STEP: Then {} = normalize({})'.format(var1, var2))
assert var1 in context.result
assert var2 in context.result
expected = context.result[var1]
result = context.result[var2].normalize()
assert expected == result, 'Expected normalize({}) = {}, found it is {} instead'.format(var2, expected, result)
@given(u'{resultvar:w} ← vector({x:S}, {y:S}, {z:S})')
def step_impl(context, resultvar, x, y, z):
print(u'STEP: Given {} ← vector({}, {}, {})'.format(resultvar, x, y, z))
#SetupContext(context)
context.result[resultvar] = Vector( context.helpers['determineNumeric'](x), context.helpers['determineNumeric'](y), context.helpers['determineNumeric'](z) )
@then(u'{compsvar:w}.point = point({x:S}, {y:S}, {z:S})')
def step_impl(context, compsvar, x, y, z):
print(u'STEP: Then {}.point = point({}, {}, {})'.format(compsvar, x, y, z))
assert compsvar in context.result
expected = Point( context.helpers['determineNumeric'](x), context.helpers['determineNumeric'](y), context.helpers['determineNumeric'](z) )
result = context.result[compsvar]['point']
assert expected.compare(result), 'Expected computation {} point is {}, found it as {}'.format(compsvar, expected, result)
@then(u'{compsvar}.eyev = vector({x:S}, {y:S}, {z:S})')
def step_impl(context, | |
**kwargs):
"""
Compute standard deviation of groups
For multiple groupings, the result will be a MultiSet
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
return self._calculate_all(GB_FUNCTIONS.GB_STD, *args, **kwargs)
#---------------------------------------------------------------
def nanstd(self, *args, **kwargs):
"""
Compute standard deviation of groups, excluding missing values
"""
return self._calculate_all(GB_FUNCTIONS.GB_NANSTD, *args, **kwargs)
#---------------------------------------------------------------
def var(self, *args, **kwargs):
"""
Compute variance of groups
For multiple groupings, the result will be a MultiSet
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
return self._calculate_all(GB_FUNCTIONS.GB_VAR, *args, **kwargs)
#---------------------------------------------------------------
def nanvar(self, *args, **kwargs):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result will be a MultiSet
"""
return self._calculate_all(GB_FUNCTIONS.GB_NANVAR, *args, **kwargs)
#---------------------------------------------------------------
def rolling_sum(self, *args, window=3, **kwargs):
"""rolling sum for each group
Parameters
----------
window: optional, window size, defaults to 3
Returns
-------
Dataset same rows as original dataset
"""
return self._calculate_all(GB_FUNCTIONS.GB_ROLLING_SUM, *args, func_param=(window), **kwargs)
#---------------------------------------------------------------
def rolling_nansum(self, *args, window=3, **kwargs):
"""rolling nan sum for each group
Parameters
----------
window: optional, window size, defaults to 3
Returns
-------
Dataset same rows as original dataset
"""
return self._calculate_all(GB_FUNCTIONS.GB_ROLLING_NANSUM, *args, func_param=(window), **kwargs)
#---------------------------------------------------------------
def rolling_mean(self, *args, window=3, **kwargs):
"""rolling mean for each group
Parameters
----------
window: optional, window size, defaults to 3
Returns
-------
Dataset same rows as original dataset
"""
return self._calculate_all(GB_FUNCTIONS.GB_ROLLING_MEAN, *args, func_param=(window), **kwargs)
#---------------------------------------------------------------
def rolling_nanmean(self, *args, window=3, **kwargs):
"""rolling nan mean for each group
Parameters
----------
window: optional, window size, defaults to 3
Returns
-------
Dataset same rows as original dataset
"""
return self._calculate_all(GB_FUNCTIONS.GB_ROLLING_NANMEAN, *args, func_param=(window), **kwargs)
#---------------------------------------------------------------
def rolling_count(self, *args, window=3, **kwargs):
"""rolling count for each group
Parameters
----------
window: optional, window size, defaults to 3
Returns
-------
Dataset same rows as original dataset
"""
return self._calculate_all(GB_FUNCTIONS.GB_ROLLING_COUNT, *args, func_param=(window), **kwargs)
#---------------------------------------------------------------
def rolling_shift(self, *args, window=1, **kwargs):
"""rolling shift for each group
Parameters
----------
window: optional, window size, defaults to 1
windows can be negative
Returns
-------
Dataset same rows as original dataset
"""
return self._calculate_all(GB_FUNCTIONS.GB_ROLLING_SHIFT, *args, func_param=(window), **kwargs)
#---------------------------------------------------------------
def rolling_diff(self, *args, window=1, **kwargs):
"""rolling diff for each group
Parameters
----------
window: optional, window size, defaults to 1
Returns
-------
Dataset same rows as original dataset
"""
return self._calculate_all(GB_FUNCTIONS.GB_ROLLING_DIFF, *args, func_param=(window), **kwargs)
#---------------------------------------------------------------
def cumcount(self, *args, ascending=True, **kwargs):
"""rolling count for each group
Number each item in each group from 0 to the length of that group - 1.
Parameters
----------
ascending : bool, default True
Returns
-------
A single array, same size as the original grouping dict/categorical.
If a filter was applied, integer sentinels will appear in those slots.
"""
param=1
if not ascending:
param=-1
# cumcount doesn't need an origdict, pass it in empty
result= self.grouping._calculate_all({}, GB_FUNCTIONS.GB_ROLLING_COUNT, func_param=(param), keychain=self.gb_keychain, **kwargs)
return result
#---------------------------------------------------------------
def cumsum(self, *args, filter = None, reset_filter=None, **kwargs):
"""Cumulative sum for each group
Parameters
----------
filter: optional, boolean mask array of included
reset_filter: optional, boolean mask array
Returns
-------
Dataset same rows as original dataset
"""
if filter is None:
filter = self._filter
return self._calculate_all(GB_FUNCTIONS.GB_CUMSUM, *args, func_param=(0.0, None, filter, reset_filter),**kwargs)
#---------------------------------------------------------------
def cumprod(self, *args, filter = None, reset_filter=None, **kwargs):
"""Cumulative product for each group
Parameters
----------
filter: optional, boolean mask array of included
reset_filter: optional, boolean mask array
Returns
-------
Dataset same rows as original dataset
"""
if filter is None:
filter = self._filter
return self._calculate_all(GB_FUNCTIONS.GB_CUMPROD, *args, func_param=(0.0, None, filter, reset_filter),**kwargs)
#---------------------------------------------------------------
def findnth(self, *args, filter = None, **kwargs):
"""FindNth
Parameters
----------
filter: optional, boolean mask array of included
TAKES NO ARGUMENTS -- operates on bin
Returns
-------
Dataset same rows as original dataset
"""
if filter is None:
filter = self._filter
return self._calculate_all(GB_FUNCTIONS.GB_FINDNTH, *args, func_param=(0.0, None, filter, None),**kwargs)
#---------------------------------------------------------------
def _ema_op(self, function, *args, time=None, decay_rate = 1.0, filter = None, reset_filter=None, **kwargs):
"""
Ema base function for time based ema functions
Formula
-------
grp loops over each item in a groupby group
i loops over eachitem in the original dataset
Output[i] = <some formula>
Parameters
----------
time: float or int array used to calculate time difference
decay_rate: see formula, used a half life
filter: optional, boolean mask array of included
reset_filter: optional, boolean mask array
Returns
-------
Dataset same rows as original dataset
"""
if time is None:
raise ValueError("The 'time' kwarg is required when calling ema functions")
if filter is None:
filter = self._filter
if filter is not None:
if len(time) != len(filter):
raise ValueError(f"The 'time' array length {len(time)} must match the length of the filter")
return self._calculate_all(function, *args, func_param=(decay_rate, time, filter, reset_filter), **kwargs)
#---------------------------------------------------------------
def ema_decay(self, *args, time=None, decay_rate = None, filter = None, reset_filter=None, **kwargs):
"""
Ema decay for each group
Formula
-------
grp loops over each item in a groupby group
i loops over eachitem in the original dataset
Output[i] = Column[i] + LastEma[grp] * exp(-decay_rate * (Time[i] - LastTime[grp]));
LastEma[grp] = Output[i]
LastTime[grp] = Time[i]
Parameters
----------
time: float or int array used to calculate time difference
decay_rate: see formula, used a half life
filter: optional, boolean mask array of included
reset_filter: optional, boolean mask array
Returns
-------
Dataset same rows as original dataset
Example
-------
>>> aapl
# delta sym org time
- ------ ---- ------ -----
0 -3.11 AAPL -3.11 25.65
1 210.54 AAPL 210.54 38.37
2 49.97 AAPL 42.11 41.66
>>> np.log(2)/(1e3*100)
6.9314718055994526e-06
>>> aapl.groupby('sym')['delta'].ema_decay(time=aapl.time, decay_rate=np.log(2)/(1e3*100))[0]
FastArray([ -3.11271882, 207.42784495, 257.39155897])
"""
if decay_rate is None:
raise ValueError("ema_decay function requires a kwarg 'decay_rate' floating point value as input")
return self._ema_op(GB_FUNCTIONS.GB_EMADECAY, *args, time=time, decay_rate=decay_rate, filter=filter, reset_filter=reset_filter, **kwargs)
#---------------------------------------------------------------
def ema_normal(self, *args, time=None, decay_rate = None, filter = None, reset_filter=None, **kwargs):
"""
Ema decay for each group
Formula
-------
grp loops over each item in a groupby group
i loops over eachitem in the original dataset
decayedWeight = exp(-decayRate * (Time[i] - LastTime[grp]));
LastEma[grp] = Column[i] * (1 - decayedWeight) + LastEma[grp] * decayedWeight
Output[i] = LastEma[grp]
LastTime[grp] = Time[i]
Parameters
----------
time: float or int array used to calculate time difference
decay_rate: see formula, used a half life (defaults to 1.0)
filter: optional, boolean mask array of included
reset_filter: optional, boolean mask array
Returns
-------
Dataset same rows as original dataset
Example
-------
>>> ds = rt.Dataset({'test': rt.arange(10), 'group2': rt.arange(10) % 3})
>>> ds.normal = ds.gb('group2')['test'].ema_normal(decay_rate=1.0, time = rt.arange(10))['test']
>>> ds.weighted = ds.gb('group2')['test'].ema_weighted(decay_rate=0.5)['test']
>>> ds
# test group2 normal weighted
- ---- ------ ------ --------
0 0 0 0.00 0.00
1 1 1 1.00 1.00
2 2 2 2.00 2.00
3 3 0 2.85 1.50
4 4 1 3.85 2.50
5 5 2 4.85 3.50
6 6 0 5.84 3.75
7 7 1 6.84 4.75
8 8 2 7.84 5.75
9 9 0 8.84 6.38
See Also
--------
ema_weighted
ema_decay
"""
if decay_rate is None:
raise ValueError('ema_normal function requires a decay_rate floating point value')
if time is None:
raise ValueError('ema_normal function requires a time array. Use the "time" kwarg')
if not isinstance(time, np.ndarray):
raise ValueError('ema_normal function requires a time numpy array.')
# cannot support int16/uint16
if time.dtype.num < 5:
time = time.astype(np.int32)
return self._ema_op(GB_FUNCTIONS.GB_EMANORMAL, *args, time=time, decay_rate=decay_rate, filter=filter, reset_filter=reset_filter, **kwargs)
#---------------------------------------------------------------
def ema_weighted(self, *args, decay_rate = None, filter = None, reset_filter=None, **kwargs):
"""
Ema decay for each group with constant decay value (no time parameter)
Formula
-------
grp loops over each item in a groupby group
i loops over eachitem in the original dataset
LastEma[grp] = Column[i] * (1 - decay_rate) + LastEma[grp] * decay_rate
Output[i] = LastEma[grp]
Parameters
----------
time: <not used>
decay_rate: see formula, used a half life
filter: optional, boolean mask array of included
reset_filter: optional, boolean mask array
Returns
-------
Dataset same rows as original dataset
Example
-------
>>> ds = rt.Dataset({'test': rt.arange(10), 'group2': rt.arange(10) % 3})
>>> ds.normal = ds.gb('group2')['test'].ema_normal(decay_rate=1.0, time=rt.arange(10))['test']
>>> ds.weighted = ds.gb('group2')['test'].ema_weighted(decay_rate=0.5)['test']
>>> ds
# test group2 normal weighted
- ---- ------ ------ --------
0 0 0 0.00 0.00
1 1 1 1.00 1.00
2 2 2 2.00 2.00
3 3 0 2.85 1.50
4 4 1 | |
X):
"""
Predicts the output given an array of instances.
Parameters
----------
X : (n, d) array like
The covariates on which to predict
Returns
-------
predictions : {(n,) array, (n,p) array}
The predicted mean outcomes
"""
if X is None:
X = np.empty((1, 0))
if self.fit_intercept:
X = add_constant(X, has_constant='add')
return np.matmul(X, self._param)
@property
def coef_(self):
"""
Get the model's coefficients on the covariates.
Returns
-------
coef_ : {(d,), (p, d)} nd array like
The coefficients of the variables in the linear regression. If label y
was p-dimensional, then the result is a matrix of coefficents, whose p-th
row containts the coefficients corresponding to the p-th coordinate of the label.
"""
if self.fit_intercept:
if self._n_out == 0:
return self._param[1:]
else:
return self._param[1:].T
else:
if self._n_out == 0:
return self._param
else:
return self._param.T
@property
def intercept_(self):
"""
Get the intercept(s) (or 0 if no intercept was fit).
Returns
-------
intercept_ : float or (p,) nd array like
The intercept of the linear regresion. If label y was p-dimensional, then the result is a vector
whose p-th entry containts the intercept corresponding to the p-th coordinate of the label.
"""
return self._param[0] if self.fit_intercept else (0 if self._n_out == 0 else np.zeros(self._n_out))
@property
def _param_stderr(self):
"""
The standard error of each parameter that was estimated.
Returns
-------
_param_stderr : {(d (+1),) (d (+1), p)} nd array like
The standard error of each parameter that was estimated.
"""
if self._n_out == 0:
return np.sqrt(np.clip(np.diag(self._param_var), 0, np.inf))
else:
return np.array([np.sqrt(np.clip(np.diag(v), 0, np.inf)) for v in self._param_var]).T
@property
def coef_stderr_(self):
"""
Gets the standard error of the fitted coefficients.
Returns
-------
coef_stderr_ : {(d,), (p, d)} nd array like
The standard error of the coefficients
"""
return self._param_stderr[1:].T if self.fit_intercept else self._param_stderr.T
@property
def intercept_stderr_(self):
"""
Gets the standard error of the intercept(s) (or 0 if no intercept was fit).
Returns
-------
intercept_stderr_ : float or (p,) nd array like
The standard error of the intercept(s)
"""
return self._param_stderr[0] if self.fit_intercept else (0 if self._n_out == 0 else np.zeros(self._n_out))
def prediction_stderr(self, X):
"""
Gets the standard error of the predictions.
Parameters
----------
X : (n, d) array like
The covariates at which to predict
Returns
-------
prediction_stderr : (n, p) array like
The standard error of each coordinate of the output at each point we predict
"""
if X is None:
X = np.empty((1, 0))
if self.fit_intercept:
X = add_constant(X, has_constant='add')
if self._n_out == 0:
return np.sqrt(np.clip(np.sum(np.matmul(X, self._param_var) * X, axis=1), 0, np.inf))
else:
return np.array([np.sqrt(np.clip(np.sum(np.matmul(X, v) * X, axis=1), 0, np.inf))
for v in self._param_var]).T
def coef__interval(self, alpha=.05):
"""
Gets a confidence interval bounding the fitted coefficients.
Parameters
----------
alpha : float
The confidence level. Will calculate the alpha/2-quantile and the (1-alpha/2)-quantile
of the parameter distribution as confidence interval
Returns
-------
coef__interval : {tuple ((p, d) array, (p,d) array), tuple ((d,) array, (d,) array)}
The lower and upper bounds of the confidence interval of the coefficients
"""
return np.array([_safe_norm_ppf(alpha / 2, loc=p, scale=err)
for p, err in zip(self.coef_, self.coef_stderr_)]),\
np.array([_safe_norm_ppf(1 - alpha / 2, loc=p, scale=err)
for p, err in zip(self.coef_, self.coef_stderr_)])
def intercept__interval(self, alpha=.05):
"""
Gets a confidence interval bounding the intercept(s) (or 0 if no intercept was fit).
Parameters
----------
alpha : float
The confidence level. Will calculate the alpha/2-quantile and the (1-alpha/2)-quantile
of the parameter distribution as confidence interval
Returns
-------
intercept__interval : {tuple ((p,) array, (p,) array), tuple (float, float)}
The lower and upper bounds of the confidence interval of the intercept(s)
"""
if not self.fit_intercept:
return (0 if self._n_out == 0 else np.zeros(self._n_out)),\
(0 if self._n_out == 0 else np.zeros(self._n_out))
if self._n_out == 0:
return _safe_norm_ppf(alpha / 2, loc=self.intercept_, scale=self.intercept_stderr_),\
_safe_norm_ppf(1 - alpha / 2, loc=self.intercept_, scale=self.intercept_stderr_)
else:
return np.array([_safe_norm_ppf(alpha / 2, loc=p, scale=err)
for p, err in zip(self.intercept_, self.intercept_stderr_)]),\
np.array([_safe_norm_ppf(1 - alpha / 2, loc=p, scale=err)
for p, err in zip(self.intercept_, self.intercept_stderr_)])
def predict_interval(self, X, alpha=.05):
"""
Gets a confidence interval bounding the prediction.
Parameters
----------
X : (n, d) array like
The covariates on which to predict
alpha : float
The confidence level. Will calculate the alpha/2-quantile and the (1-alpha/2)-quantile
of the parameter distribution as confidence interval
Returns
-------
prediction_intervals : {tuple ((n,) array, (n,) array), tuple ((n,p) array, (n,p) array)}
The lower and upper bounds of the confidence intervals of the predicted mean outcomes
"""
return np.array([_safe_norm_ppf(alpha / 2, loc=p, scale=err)
for p, err in zip(self.predict(X), self.prediction_stderr(X))]),\
np.array([_safe_norm_ppf(1 - alpha / 2, loc=p, scale=err)
for p, err in zip(self.predict(X), self.prediction_stderr(X))])
class StatsModelsLinearRegression(_StatsModelsWrapper):
"""
Class which mimics weighted linear regression from the statsmodels package.
However, unlike statsmodels WLS, this class also supports sample variances in addition to sample weights,
which enables more accurate inference when working with summarized data.
Parameters
----------
fit_intercept : bool (optional, default=True)
Whether to fit an intercept in this model
fit_args : dict (optional, default=`{}`)
The statsmodels-style fit arguments; keys can include 'cov_type'
"""
def __init__(self, fit_intercept=True, cov_type="HC0"):
self.cov_type = cov_type
self.fit_intercept = fit_intercept
return
def _check_input(self, X, y, sample_weight, freq_weight, sample_var):
"""Check dimensions and other assertions."""
if X is None:
X = np.empty((y.shape[0], 0))
if self.fit_intercept:
X = add_constant(X, has_constant='add')
# set default values for None
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
if freq_weight is None:
freq_weight = np.ones(y.shape[0])
if sample_var is None:
sample_var = np.zeros(y.shape)
# check freq_weight should be integer and should be accompanied by sample_var
if np.any(np.not_equal(np.mod(freq_weight, 1), 0)):
raise AttributeError("Frequency weights must all be integers for inference to be valid!")
if sample_var.ndim < 2:
if np.any(np.equal(freq_weight, 1) & np.not_equal(sample_var, 0)):
warnings.warn(
"Variance was set to non-zero for an observation with freq_weight=1! "
"sample_var represents the variance of the original observations that are "
"summarized in this sample. Hence, cannot have a non-zero variance if only "
"one observations was summarized. Inference will be invalid!")
elif np.any(np.not_equal(freq_weight, 1) & np.equal(sample_var, 0)):
warnings.warn(
"Variance was set to zero for an observation with freq_weight>1! "
"sample_var represents the variance of the original observations that are "
"summarized in this sample. If it's zero, please use sample_wegiht instead "
"to reflect the weight for each individual sample!")
else:
if np.any(np.equal(freq_weight, 1) & np.not_equal(np.sum(sample_var, axis=1), 0)):
warnings.warn(
"Variance was set to non-zero for an observation with freq_weight=1! "
"sample_var represents the variance of the original observations that are "
"summarized in this sample. Hence, cannot have a non-zero variance if only "
"one observations was summarized. Inference will be invalid!")
elif np.any(np.not_equal(freq_weight, 1) & np.equal(np.sum(sample_var, axis=1), 0)):
warnings.warn(
"Variance was set to zero for an observation with freq_weight>1! "
"sample_var represents the variance of the original observations that are "
"summarized in this sample. If it's zero, please use sample_wegiht instead "
"to reflect the weight for each individual sample!")
# check array shape
assert (X.shape[0] == y.shape[0] == sample_weight.shape[0] ==
freq_weight.shape[0] == sample_var.shape[0]), "Input lengths not compatible!"
if y.ndim >= 2:
assert (y.ndim == sample_var.ndim and
y.shape[1] == sample_var.shape[1]), "Input shapes not compatible: {}, {}!".format(
y.shape, sample_var.shape)
# weight X and y and sample_var
weighted_X = X * np.sqrt(sample_weight).reshape(-1, 1)
if y.ndim < 2:
weighted_y = y * np.sqrt(sample_weight)
sample_var = sample_var * sample_weight
else:
weighted_y = y * np.sqrt(sample_weight).reshape(-1, 1)
sample_var = sample_var * (sample_weight.reshape(-1, 1))
return weighted_X, weighted_y, freq_weight, sample_var
def fit(self, X, y, sample_weight=None, freq_weight=None, sample_var=None):
"""
Fits the model.
Parameters
----------
X : (N, d) nd array like
co-variates
y : {(N,), (N, p)} nd array like
output variable(s)
sample_weight : (N,) array like or None
Individual weights for each sample. If None, it assumes equal weight.
freq_weight: (N, ) array like of integers or None
Weight for the observation. Observation i is treated as the mean
outcome of freq_weight[i] independent observations.
When ``sample_var`` is not None, this should be provided.
sample_var : {(N,), (N, p)} nd array like or None
Variance of the outcome(s) of the | |
= elements['font-style']
if fontStyle == 'italic' or fontStyle == 'oblique':
font.slant = 0.2
elif fontStyle == 'normal':
font.slant = 0.0
if 'font-weight' in elements:
fontWeight = elements['font-weight']
if fontWeight == 'normal':
weight = 400
elif fontWeight == 'bold':
weight = 700
elif fontWeight == 'bolder':
weight = 900
elif fontWeight == 'lighter':
weight = 200
else:
weight = int(fontWeight)
font.embolden = (float(weight) - 400.0)/2666.0
if 'text-align' in elements:
font.alignment = textAlignMap[elements['text-align']]
if 'text-anchor' in elements:
font.alignment = textAnchorMap[elements['text-anchor']]
if 'line-height' in elements:
try:
font.lineHeight = float(elements['line-height'])
except:
font.lineHeight = sizeFromString(elements['line-height'], font.size)/font.size
if 'textLength' in elements:
font.maxLength = sizeFromString(elements['textLength'], width)
if 'inline-size' in elements:
font.maxLength = sizeFromString(elements['inline-size'], width)
if 'opacity' in elements:
opacity = float(elements['opacity'])
if not group and not fill and not stroke:
raise Exception("Shape doesn't have a stroke or a fill.")
if hasAny or opacity != 1.0:
return Style(fill, stroke, font, opacity)
return None
def write(self, builder):
offsets = []
if self.fill:
materialOffset = builder.CreateString(self.fill.material)
FillPathCommand.Start(builder)
FillPathCommand.AddMaterial(builder, materialOffset)
FillPathCommand.AddOpacity(builder, self.fill.opacity*self.opacity)
FillPathCommand.AddFillRule(builder, self.fill.fillRule)
commandOffset = FillPathCommand.End(builder)
VectorCommand.Start(builder)
VectorCommand.AddCommandType(builder, VectorCommandUnion.FillPathCommand)
VectorCommand.AddCommand(builder, commandOffset)
offsets.append(VectorCommand.End(builder))
if self.stroke:
materialOffset = builder.CreateString(self.stroke.material)
StrokePathCommand.Start(builder)
StrokePathCommand.AddMaterial(builder, materialOffset)
StrokePathCommand.AddOpacity(builder, self.stroke.opacity*self.opacity)
StrokePathCommand.AddJoinType(builder, self.stroke.join)
StrokePathCommand.AddCapType(builder, self.stroke.cap)
StrokePathCommand.AddWidth(builder, self.stroke.width)
StrokePathCommand.AddMiterLimit(builder, self.stroke.miterLimit)
StrokePathCommand.AddDashArray(builder, CreateDashArray(builder,
self.stroke.dashArray[0], self.stroke.dashArray[1], self.stroke.dashArray[2],
self.stroke.dashArray[3]))
commandOffset = StrokePathCommand.End(builder)
VectorCommand.Start(builder)
VectorCommand.AddCommandType(builder, VectorCommandUnion.StrokePathCommand)
VectorCommand.AddCommand(builder, commandOffset)
offsets.append(VectorCommand.End(builder))
return offsets
class TextRange:
"""Class containing a range of text to draw."""
def __init__(self, start, count, position, positionType, style):
self.start = start
self.count = count
self.position = position
self.positionType = positionType
self.style = style
def writeStartPath(builder, transform, simple):
StartPathCommand.Start(builder)
StartPathCommand.AddTransform(builder, transform.createMatrix33f(builder))
StartPathCommand.AddSimple(builder, simple)
commandOffset = StartPathCommand.End(builder)
VectorCommand.Start(builder)
VectorCommand.AddCommandType(builder, VectorCommandUnion.StartPathCommand)
VectorCommand.AddCommand(builder, commandOffset)
return [VectorCommand.End(builder)]
def writeEllipse(builder, transform, style, center, radius):
offsets = writeStartPath(builder, transform, True)
EllipseCommand.Start(builder)
EllipseCommand.AddCenter(builder, CreateVector2f(builder, center[0], center[1]))
EllipseCommand.AddRadius(builder, CreateVector2f(builder, radius[0], radius[1]))
commandOffset = EllipseCommand.End(builder)
VectorCommand.Start(builder)
VectorCommand.AddCommandType(builder, VectorCommandUnion.EllipseCommand)
VectorCommand.AddCommand(builder, commandOffset)
offsets.append(VectorCommand.End(builder))
offsets.extend(style.write(builder))
return offsets
def writeImage(builder, transform, style, upperLeft, size, location):
name = os.path.splitext(os.path.basename(location))[0]
nameOffset = builder.CreateString(name)
ImageCommand.Start(builder)
ImageCommand.AddImage(builder, nameOffset)
ImageCommand.AddUpperLeft(builder, CreateVector2f(builder, upperLeft[0], upperLeft[1]))
ImageCommand.AddLowerRight(builder, CreateVector2f(builder, upperLeft[0] + size[0],
upperLeft[1] + size[1]))
ImageCommand.AddOpacity(builder, 1.0 if not style else style.opacity)
ImageCommand.AddTransform(builder, transform.createMatrix33f(builder))
commandOffset = ImageCommand.End(builder)
VectorCommand.Start(builder)
VectorCommand.AddCommandType(builder, VectorCommandUnion.ImageCommand)
VectorCommand.AddCommand(builder, commandOffset)
return [VectorCommand.End(builder)]
def writeLines(builder, transform, style, points, closePath = False):
if not points:
raise Exception("No points available.")
offsets = writeStartPath(builder, transform, False)
MoveCommand.Start(builder)
MoveCommand.AddPosition(builder, CreateVector2f(builder, points[0][0], points[0][1]))
commandOffset = MoveCommand.End(builder)
VectorCommand.Start(builder)
VectorCommand.AddCommandType(builder, VectorCommandUnion.MoveCommand)
VectorCommand.AddCommand(builder, commandOffset)
offsets.append(VectorCommand.End(builder))
for point in points[1:]:
LineCommand.Start(builder)
LineCommand.AddEnd(builder, CreateVector2f(builder, point[0], point[1]))
commandOffset = LineCommand.End(builder)
VectorCommand.Start(builder)
VectorCommand.AddCommandType(builder, VectorCommandUnion.LineCommand)
VectorCommand.AddCommand(builder, commandOffset)
offsets.append(VectorCommand.End(builder))
if closePath:
ClosePathCommand.Start(builder)
commandOffset = ClosePathCommand.End(builder)
VectorCommand.Start(builder)
VectorCommand.AddCommandType(builder, VectorCommandUnion.ClosePathCommand)
VectorCommand.AddCommand(builder, commandOffset)
offsets.append(VectorCommand.End(builder))
offsets.extend(style.write(builder))
return offsets
def parsePointList(pointStr, size):
tokens = re.findall(r"[-+0-9.e]+(?:[eE][-+]?[0-9]+)?(?:cm|mm|Q|in|pc|pt|em|px|%)?", pointStr)
points = []
for i in range(int(len(tokens)/2)):
points.append((sizeFromString(tokens[i*2], size[0]),
sizeFromString(tokens[i*2 + 1], size[1])))
return points
def writePolygon(builder, transform, style, pointStr, size):
points = parsePointList(pointStr, size)
return writeLines(builder, transform, style, points, True)
def writePolyline(builder, transform, style, pointStr, size):
points = parsePointList(pointStr, size)
return writeLines(builder, transform, style, points)
def writePath(builder, transform, style, path, size, diagonalSize):
offsets = writeStartPath(builder, transform, False)
tokens = re.findall(
r"[mMzZlLhHvVcCsSqQtTaAbB]|[-+]?[0-9.]+(?:[eE][-+]?[0-9]+)?" \
"(?:cm|mm|Q|in|pc|pt|em|px|deg|grad|rad|turn|%)?", path)
pos = (0.0, 0.0)
lastControlPos = None
lastQuadraticPos = None
index = 0
command = ''
while index < len(tokens):
if tokens[index][0] == '.' or tokens[index][0] == '-' or tokens[index][0] == '+' or \
(ord(tokens[index][0]) >= ord('0') and ord(tokens[index][0]) <= ord('9')):
x = sizeFromString(tokens[index], size[0])
index += 1
if command != 'b' and command != 'B' and command != 'h' and command != 'H' and \
command != 'v' and command != 'V':
y = sizeFromString(tokens[index], size[1])
index += 1
if command == 'm' or command == 'M':
if command == 'm':
pos = (pos[0] + x, pos[1] + y)
command = 'l'
else:
pos = (x, y)
command = 'L'
MoveCommand.Start(builder)
MoveCommand.AddPosition(builder, CreateVector2f(builder, pos[0], pos[1]))
commandOffset = MoveCommand.End(builder)
VectorCommand.Start(builder)
VectorCommand.AddCommandType(builder, VectorCommandUnion.MoveCommand)
VectorCommand.AddCommand(builder, commandOffset)
offsets.append(VectorCommand.End(builder))
elif command == 'l' or command == 'L' or command == 'h' or command == 'H' or \
command == 'v' or command == 'V':
if command == 'l':
pos = (pos[0] + x, pos[1] + y)
elif command == 'L':
pos = (x, y)
elif command == 'h':
pos = (pos[0] + x, pos[1])
elif command == 'H':
pos = (x, pos[1])
elif command == 'v':
pos = (pos[0], pos[1] + x)
elif command == 'V':
pos = (pos[0], x)
LineCommand.Start(builder)
LineCommand.AddEnd(builder, CreateVector2f(builder, pos[0], pos[1]))
commandOffset = LineCommand.End(builder)
VectorCommand.Start(builder)
VectorCommand.AddCommandType(builder, VectorCommandUnion.LineCommand)
VectorCommand.AddCommand(builder, commandOffset)
offsets.append(VectorCommand.End(builder))
elif command == 'c' or command == 'C' or command == 's' or command == 'S':
if command == 's' or command == 'S':
if lastControlPos:
diff = (lastControlPos[0] - pos[0], lastControlPos[1] - pos[1])
control1 = (pos[0] - diff[0], pos[1] - diff[1])
else:
control1 = pos
if command == 's':
control2 = (pos[0] + x, pos[1] + y)
else:
control2 = (x, y)
elif command == 'c':
control1 = (pos[0] + x, pos[1] + y)
x = sizeFromString(tokens[index], size[0])
index += 1
y = sizeFromString(tokens[index], size[1])
index += 1
control2 = (pos[0] + x, pos[1] + y)
elif command == 'C':
control1 = (x, y)
x = sizeFromString(tokens[index], size[0])
index += 1
y = sizeFromString(tokens[index], size[1])
index += 1
control2 = (x, y)
x = sizeFromString(tokens[index], size[0])
index += 1
y = sizeFromString(tokens[index], size[1])
index += 1
if command == 'c' or command == 's':
end = (pos[0] + x, pos[1] + y)
else:
end = (x, y)
BezierCommand.Start(builder)
BezierCommand.AddControl1(builder, CreateVector2f(builder, control1[0],
control1[1]))
BezierCommand.AddControl2(builder, CreateVector2f(builder, control2[0],
control2[1]))
BezierCommand.AddEnd(builder, CreateVector2f(builder, end[0], end[1]))
commandOffset = BezierCommand.End(builder)
VectorCommand.Start(builder)
VectorCommand.AddCommandType(builder, VectorCommandUnion.BezierCommand)
VectorCommand.AddCommand(builder, commandOffset)
offsets.append(VectorCommand.End(builder))
pos = end
lastControlPos = control2
elif command == 'q' or command == 'Q' or command == 't' or command == 'T':
if command == 't' or command == 'T':
if lastQuadraticPos:
diff = (lastQuadraticPos[0] - pos[0], lastQuadraticPos[1] - pos[1])
control = (pos[0] - diff[0], pos[1] - diff[1])
else:
control = pos
else:
if command == 'q':
control = (pos[0] + x, pos[1] + y)
elif command == 'Q':
control = (x, y)
x = sizeFromString(tokens[index], size[0])
index += 1
y = sizeFromString(tokens[index], size[1])
index += 1
if command == 'q' or command == 't':
end = (pos[0] + x, pos[1] + y)
else:
end = (x, y)
QuadraticCommand.Start(builder)
QuadraticCommand.AddControl(builder, CreateVector2f(builder, control[0],
control[1]))
QuadraticCommand.AddEnd(builder, CreateVector2f(builder, end[0], end[1]))
commandOffset = QuadraticCommand.End(builder)
VectorCommand.Start(builder)
VectorCommand.AddCommandType(builder, VectorCommandUnion.QuadraticCommand)
VectorCommand.AddCommand(builder, commandOffset)
offsets.append(VectorCommand.End(builder))
pos = end
lastQuadraticPos = control
elif command == 'a' or command == 'A':
radius = (x, y)
rotation = angleFromString(tokens[index])
index += 1
largeArc = int(tokens[index]) != 0
index += 1
sweep = int(tokens[index]) != 0
index += 1
x = sizeFromString(tokens[index], size[0])
index += 1
y = sizeFromString(tokens[index], size[1])
index += 1
if command == 'a':
end = (pos[0] + x, pos[1] + y)
else:
end = (x, y)
ArcCommand.Start(builder)
ArcCommand.AddRadius(builder, CreateVector2f(builder, radius[0], radius[1]))
ArcCommand.AddRotation(builder, rotation)
ArcCommand.AddLargeArc(builder, largeArc)
ArcCommand.AddClockwise(builder, sweep)
ArcCommand.AddEnd(builder, CreateVector2f(builder, end[0], end[1]))
commandOffset = ArcCommand.End(builder)
VectorCommand.Start(builder)
VectorCommand.AddCommandType(builder, VectorCommandUnion.ArcCommand)
VectorCommand.AddCommand(builder, commandOffset)
offsets.append(VectorCommand.End(builder))
pos = end
elif command == 'b' or command == 'B':
raise Exception('Bearing currently not implemented. ' \
'It is generally not implemented by other SVG renderers either.')
elif tokens[index] == 'z' or tokens[index] == 'Z':
ClosePathCommand.Start(builder)
commandOffset = ClosePathCommand.End(builder)
VectorCommand.Start(builder)
VectorCommand.AddCommandType(builder, VectorCommandUnion.ClosePathCommand)
VectorCommand.AddCommand(builder, commandOffset)
offsets.append(VectorCommand.End(builder))
lastControlPos = None
lastQuadraticPos = None
index += 1
else:
# Reset the last control pos if not a curve command.
if command != 'c' and command != 'C' and command != 's' and command != 'S':
lastControlPos = None
if command != 'q' and command != 'Q' and command != 't' and command != 'T':
lastQuadraticPos = None
command = tokens[index]
index += 1
offsets.extend(style.write(builder))
return offsets
def writeRectangle(builder, transform, style, upperLeft, rectSize, radius):
offsets = writeStartPath(builder, transform, True)
RectangleCommand.Start(builder)
RectangleCommand.AddUpperLeft(builder, CreateVector2f(builder, upperLeft[0], upperLeft[1]))
RectangleCommand.AddLowerRight(builder, CreateVector2f(builder, upperLeft[0] + rectSize[0],
upperLeft[1] + rectSize[1]))
RectangleCommand.AddCornerRadius(builder, CreateVector2f(builder, radius[0], radius[1]))
commandOffset = RectangleCommand.End(builder)
VectorCommand.Start(builder)
VectorCommand.AddCommandType(builder, VectorCommandUnion.RectangleCommand)
VectorCommand.AddCommand(builder, commandOffset)
offsets.append(VectorCommand.End(builder))
offsets.extend(style.write(builder))
return offsets
def writeText(builder, transform, font, text, rangeCount):
textOffset = builder.CreateString(text)
fontOffset = builder.CreateString(font.font)
TextCommand.Start(builder)
TextCommand.AddText(builder, textOffset)
TextCommand.AddFont(builder, fontOffset)
TextCommand.AddAlignment(builder, font.alignment)
TextCommand.AddMaxLength(builder, font.maxLength if font.maxLength else 3.402823e+38)
TextCommand.AddLineHeight(builder, font.lineHeight)
TextCommand.AddTransform(builder, transform.createMatrix33f(builder))
TextCommand.AddRangeCount(builder, rangeCount)
commandOffset = TextCommand.End(builder)
VectorCommand.Start(builder)
VectorCommand.AddCommandType(builder, VectorCommandUnion.TextCommand)
VectorCommand.AddCommand(builder, commandOffset)
return [VectorCommand.End(builder)]
def writeTextRange(builder, textRange):
style = textRange.style
fillMaterialOffset = 0
fillOpacity = 0.0
if style.fill:
fillMaterialOffset = builder.CreateString(style.fill.material)
fillOpacity = style.opacity*style.fill.opacity
outlineMaterialOffset = 0
outlineOpacity = 0.0
outlineWidth = 0.0
if style.stroke:
outlineMaterialOffset = builder.CreateString(style.stroke.material)
outlineOpacity = style.opacity*style.stroke.opacity
sizeToWidthFactor = 1.5/style.font.size
outlineWidth = style.stroke.width*sizeToWidthFactor
TextRangeCommand.Start(builder)
TextRangeCommand.AddStart(builder, textRange.start)
TextRangeCommand.AddCount(builder, textRange.count)
TextRangeCommand.AddPositionType(builder, textRange.positionType)
TextRangeCommand.AddPosition(builder, \
CreateVector2f(builder, textRange.position[0], textRange.position[1]))
TextRangeCommand.AddFillMaterial(builder, fillMaterialOffset)
TextRangeCommand.AddOutlineMaterial(builder, outlineMaterialOffset)
TextRangeCommand.AddFillOpacity(builder, fillOpacity)
TextRangeCommand.AddOutlineOpacity(builder, outlineOpacity)
TextRangeCommand.AddSize(builder, style.font.size)
TextRangeCommand.AddEmbolden(builder, style.font.embolden)
TextRangeCommand.AddSlant(builder, style.font.slant)
TextRangeCommand.AddOutlineWidth(builder, outlineWidth)
TextRangeCommand.AddFuziness(builder, 1.0)
commandOffset = TextRangeCommand.End(builder)
VectorCommand.Start(builder)
VectorCommand.AddCommandType(builder, VectorCommandUnion.TextRangeCommand)
VectorCommand.AddCommand(builder, commandOffset)
return [VectorCommand.End(builder)]
def readMaterials(node, materials, size, diagonalSize):
for defNode in node.childNodes:
if defNode.nodeType != xml.dom.Node.ELEMENT_NODE:
continue
if defNode.tagName == 'linearGradient':
gradient = LinearGradientMaterial(defNode, size, materials)
materials.addLinearGradient(gradient)
elif defNode.tagName == 'radialGradient':
gradient = RadialGradientMaterial(defNode, size, diagonalSize,
materials)
materials.addRadialGradient(gradient)
def readText(node, defaultFont, size, diagonalSize, materials, style = None):
if not node or node.tagName != 'text':
return None, None, None
rootStyle = Style.create(node, materials, diagonalSize, style, defaultFont = defaultFont, \
width = size[0], text = True)
text = u""
initialPosition = (0.0, 0.0)
if node.hasAttribute('x') and node.hasAttribute('y'):
initialPosition = (sizeFromString(node.getAttribute('x'), size[0]), \
sizeFromString(node.getAttribute('y'), size[1]))
ranges = [TextRange(0, 0, initialPosition, TextPosition.Absolute, rootStyle)]
for child in node.childNodes:
if child.nodeType == xml.dom.Node.ELEMENT_NODE:
rangeStyle = Style.create(child, materials, diagonalSize, rootStyle, \
defaultFont = defaultFont, width = size[0], text = True)
curText = u""
for nextChild in child.childNodes:
if nextChild.nodeType == xml.dom.Node.TEXT_NODE:
if sys.version_info < (3, 0):
textPiece = unicode(nextChild.data)
else:
textPiece = nextChild.data
if curText:
curText += ' '
curText += textPiece.strip()
if text:
curText = ' ' + curText
position = (0.0, 0.0)
positionType = TextPosition.Offset
if not rootStyle.font.maxLength and child.hasAttribute('x') and \
child.hasAttribute('y'):
position = (sizeFromString(child.getAttribute('x'), size[0]), \
sizeFromString(child.getAttribute('y'), size[1]))
positionType = TextPosition.Absolute
elif child.hasAttribute('dx') or child.hasAttribute('dy'):
if child.hasAttribute('dx'):
position = (sizeFromString(child.getAttribute('dx'), size[0]), 0.0)
if child.hasAttribute('dy'):
position = (position[0], sizeFromString(child.getAttribute('dy'), size[1]))
positionType = TextPosition.Offset
if curText:
ranges.append(TextRange(len(text), len(curText), position, positionType, \
rangeStyle))
text += curText
elif child.nodeType == xml.dom.Node.TEXT_NODE:
if sys.version_info < (3, 0):
curText = unicode(child.data)
else:
curText = child.data
curText = curText.strip()
if curText:
if text:
curText = ' ' + curText
if ranges[-1].style == rootStyle:
ranges[-1].count += len(curText)
else:
ranges.append(TextRange(len(text), len(curText), (0.0, 0.0), \
TextPosition.Offset, rootStyle))
text += curText
return rootStyle.font, text, ranges
def readShapes(node, defaultFont, materials, size, diagonalSize, transform, style = None):
commands = []
if node.tagName == 'g':
groupTransform = transform*Transform.fromNode(node)
groupStyle = Style.create(node, materials, diagonalSize, style, group = True, \
defaultFont = defaultFont, width = size[0])
for groupNode in node.childNodes:
if groupNode.nodeType == xml.dom.Node.ELEMENT_NODE:
commands.extend(readShapes(groupNode, defaultFont, materials, size, diagonalSize,
groupTransform, groupStyle))
elif node.tagName == 'circle':
commands.append(lambda builder,
transform = transform*Transform.fromNode(node),
style = Style.create(node, materials, diagonalSize, style),
center = (sizeFromString(node.getAttribute('cx'), size[0]),
sizeFromString(node.getAttribute('cy'), size[1])),
radius = sizeFromString(node.getAttribute('r'), diagonalSize):
writeEllipse(builder, transform, style, center, (radius, radius)))
elif node.tagName == 'ellipse':
commands.append(lambda builder,
transform = transform*Transform.fromNode(node),
style = Style.create(node, materials, diagonalSize, style),
center = (sizeFromString(node.getAttribute('cx'), size[0]),
sizeFromString(node.getAttribute('cy'), size[1])),
radius = (sizeFromString(node.getAttribute('rx'), diagonalSize),
sizeFromString(node.getAttribute('ry'), diagonalSize)):
writeEllipse(builder, transform, style, center, radius))
elif node.tagName == 'image':
commands.append(lambda builder,
transform = transform*Transform.fromNode(node),
style = Style.create(node, materials, diagonalSize, style),
upperLeft = (sizeFromString(node.getAttribute('x'), size[0]),
sizeFromString(node.getAttribute('y'), size[1])),
imageSize = (sizeFromString(node.getAttribute('width'), size[0]),
sizeFromString(node.getAttribute('height'), size[1])),
location = | |
''' This submodule provides basic preprocessing functionality to work with hyperspectral data/images.
E.g.
- Normalization
- Baseline Correction/Removal
- RGB-Image standardization
- Scatter correction (especially RMieS-correction)
- Data transformations from 3D to 2D and reverse
- ...
'''
# IMPORTS
#########
import numpy as np
# FUNCTIONS
###########
def _baseline_corr(data, lam=1000, p=0.05, n_iter=10):
''' Asymmetric least squares smoothing for baseline removal/correction.
Adapted from Eilers and Boelens (2005) and with optimized memory usage.
Two parameters: lam (lambda) for smoothness and p for asymmetry.
Generally for data with positive peaks 0.001 <= p <= 0.1 is a good choice and 10^2 <= lam <= 10^9
Although iteration number is fixed at 10 your mileage may vary if the weights do not converge in this time.
Returns the baseline corrected data.
'''
from scipy import sparse
from scipy.sparse.linalg import spsolve
data_length = len(data)
D = sparse.diags([1, -2, 1], [0, -1, -2], shape=(data_length, data_length - 2))
D = lam * D.dot(D.T)
weights = np.ones(data_length)
W = sparse.spdiags(weights, 0, data_length, data_length)
for i in range(n_iter):
W.setdiag(weights)
Z = W + D
z = spsolve(Z, weights * data)
weights = p * (data > z) + (1 - p) * (data < z)
return z
def baseline_als(data, lam=1000, p=0.05, n_iter=10):
'''Checks input data shape. If it's a single spectrum defaults to calling subfunction _baseline_corr. Otherwise loops through the data of shape (number of spectra, data points) and applies correction to each spectrum.
Returns the baseline corrected data as a numpy array.
'''
if len(data.shape) == 1:
result = np.array(_baseline_corr(data, lam, p, n_iter))
return result
elif len(data.shape) == 2:
result = np.array([_baseline_corr(i, lam, p, n_iter) for i in data])
return result
else:
print(
'Data shape error! Please check your input values accordingly. Desired shape of (number of spectra, data points)')
# ---------------------------------------------------------------------------
# AS IMPLEMENTED BY <NAME> DURING THE INITIAL HACKATHON
# ---------------------------------------------------------------------------
from typing import Union as U, Tuple as T, Optional
from sklearn.decomposition import PCA
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.metrics import confusion_matrix
import numpy as np
def emsc(spectra: np.ndarray,
wavenumbers: np.ndarray,
poly_order: Optional[int] = 2,
reference: np.ndarray = None,
constituents: np.ndarray = None,
use_reference: bool = True,
return_coefs: bool = False) -> U[np.ndarray, T[np.ndarray, np.ndarray]]:
"""
Preprocess all spectra with EMSC
:param spectra: ndarray of shape [n_samples, n_channels]
:param wavenumbers: ndarray of shape [n_channels]
:param poly_order: order of polynomial
None: EMSC without polynomial components
:param reference: reference spectrum
None: use average spectrum as reference;
:param constituents: ndarray of shape [n_consituents, n_channels]
Except constituents it can also take orthogonal vectors,
for example from PCA.
:param use_reference: if False not include reference in the model
:param return_coefs: if True returns coefficients
[n_samples, n_coeffs], where n_coeffs = 1 + len(costituents) + (order + 1).
Order of returned coefficients:
1) b*reference + # reference coeff
k) c_0*constituent[0] + ... + c_k*constituent[k] + # constituents coeffs
a_0 + a_1*w + a_2*w^2 + ... # polynomial coeffs
:return: preprocessed spectra, [coefficients]
"""
# assert poly_order >= 0, 'poly_order must be >= 0'
if reference is None:
reference = np.mean(spectra, axis=0)
reference = reference[:, np.newaxis]
half_rng = np.abs(wavenumbers[0] - wavenumbers[-1]) / 2
normalized_wns = (wavenumbers - np.mean(wavenumbers)) / half_rng
if poly_order is None:
if constituents is None:
columns = (reference)
else:
columns = (reference, constituents.T)
else:
polynomial_columns = [np.ones(len(wavenumbers))]
for j in range(1, poly_order + 1):
polynomial_columns.append(normalized_wns ** j)
polynomial_columns = np.stack(polynomial_columns, axis=1)
# spectrum = X*coefs + residues
# least squares -> A = (X.T*X)^-1 * X.T; coefs = A * spectrum
if constituents is None:
columns = (reference, polynomial_columns)
else:
columns = (reference, constituents.T, polynomial_columns)
if not use_reference: columns = columns[1:]
if isinstance(columns, tuple):
X = np.concatenate(columns, axis=1)
else:
X = columns.copy()
A = np.dot(np.linalg.pinv(np.dot(X.T, X)), X.T)
spectra_columns = spectra.T
coefs = np.dot(A, spectra_columns)
residues = spectra_columns - np.dot(X, coefs)
if use_reference:
preprocessed_spectra = residues / coefs[0] + reference
else:
preprocessed_spectra = residues.copy()
if return_coefs:
return preprocessed_spectra.T, coefs.T
return preprocessed_spectra.T
def rep_emsc(spectra: np.ndarray,
wavenumbers: np.ndarray,
replicate: np.ndarray,
poly_order: Optional[int] = 2,
reference: np.ndarray = None,
n_comp: int = 1,
use_reference: bool = True,
return_coefs: bool = False):
"""
Preprocess all spectra with replicate EMSC
:param spectra: ndarray of shape [n_samples, n_channels]
:param wavenumbers: ndarray of shape [n_channels]
:param replicate: ndarray of shape [n_samples]
:param poly_order: order of polynomial
None: EMSC without polynomial components
:param reference: reference spectrum
None: use average spectrum as reference;
:param n_comp: number of principal components used for replicate correction
:param use_reference: if False not include reference in the model
:param return_coefs: if True returns coefficients
[n_samples, n_coeffs], where n_coeffs = 1 + n_comp + (order + 1).
Order of returned coefficients:
1) b*reference + # reference coeff
n) r_0*loading_rep[0] + ... + r_n*loading_rep[n] + # replicate coeffs
a_0 + a_1*w + a_2*w^2 + ... # polynomial coeffs
:return: preprocessed spectra, [coefficients]
"""
constituents = cal_rep_matrix(spectra=spectra,
wavenumbers=wavenumbers,
replicate=replicate,
do_PCA=True,
n_comp=n_comp)[1]
res = emsc(spectra=spectra,
wavenumbers=wavenumbers,
poly_order=poly_order,
reference=reference,
constituents=constituents,
use_reference=use_reference,
return_coefs=return_coefs)
return res
def cal_rep_matrix(spectra: np.ndarray,
wavenumbers: np.ndarray,
replicate: np.ndarray,
do_PCA: bool = False,
n_comp: int = 1):
"""
Calculate mean spectra for each replicate, and do PCA if required
:param spectra: ndarray of shape [n_samples, n_channels]
:param wavenumbers: ndarray of shape [n_channels]
:param replicate: ndarray of shape [n_samples]
:param do_PCA: if True returns loadings from PCA
:param n_comp: number of principal components used for replicate correction
:return: mean spectra of each replicate
"""
n_rep = len(replicate)
n_sample = np.shape(spectra)[0]
assert n_rep == n_sample
rep_mean = []
rep_uni = np.unique(replicate)
#### replace for loop with map ####
rep_mean = list(map(lambda x: np.mean(spectra[replicate == x, :], axis=0), rep_uni))
# for j in range(len(rep_uni)):
# rep_mean.append(np.mean(spectra[replicate==rep_uni[j],:], axis=0))
rep_mean = np.stack(rep_mean, axis=0)
if do_PCA:
n_comp = np.min((n_rep, n_comp))
model_pca = PCA(n_comp)
rep_mean_c = rep_mean - np.mean(rep_mean, axis=0)
rep_columns = model_pca.fit(rep_mean_c).components_
return rep_mean, rep_columns
else:
return rep_mean
def cal_merit_lda(spectra: np.ndarray,
wavenumbers: np.ndarray,
replicate: np.ndarray,
label: np.ndarray):
"""
Benchmark of replicate EMSC correction based on LDA classification
:param spectra: ndarray of shape [n_samples, n_channels]
:param wavenumbers: ndarray of shape [n_channels]
:param replicate: ndarray of shape [n_samples]
:param label: ndarray of shape [n_samples]
:return: mean sensitivity of leave-one-replicate-out cross-validation
"""
logo = LeaveOneGroupOut()
res_true = []
res_pred = []
for train, test in logo.split(spectra, label, groups=replicate):
tmp_model = LinearDiscriminantAnalysis()
tmp_model.fit(spectra[train], label[train])
res_pred = np.append(res_pred, tmp_model.predict(spectra[test]))
res_true = np.append(res_true, label[test])
c_m = confusion_matrix(res_true, res_pred, labels=np.unique(label))
res = np.mean(np.diag(c_m) / np.sum(c_m, axis=1))
return res
def rep_emsc_opt(spectra: np.ndarray,
wavenumbers: np.ndarray,
replicate: np.ndarray,
label: np.ndarray,
poly_order: Optional[int] = 2,
reference: np.ndarray = None,
n_comp_all: np.ndarray = (1, 2, 3),
use_reference: bool = True,
return_coefs: bool = False,
fun_merit=cal_merit_lda,
do_correction: bool = True):
"""
Preprocess all spectra with replicate EMSC, wit automatically optimization of n_comp
:param spectra: ndarray of shape [n_samples, n_channels]
:param wavenumbers: ndarray of shape [n_channels]
:param replicate: ndarray of shape [n_samples]
:param label: ndarray of shape [n_samples]
:param poly_order: order of polynomial
None: EMSC without polynomial components
:param reference: reference spectrum
None: use average spectrum as reference;
:param n_comp_all: calidated number of principal components
used for replicate correction
:param use_reference: if False not include reference in the model
:param return_coefs: if True returns coefficients
[n_samples, n_coeffs], where n_coeffs = 1 + n_comp + (order + 1).
Order of returned coefficients:
1) b*reference + # reference coeff
n) r_0*loading_rep[0] + ... + r_n*loading_rep[n] + # replicate coeffs
a_0 + a_1*w + a_2*w^2 + ... # polynomial coeffs
:param fun_merit: function used to calculate the merits
benchmarking the goodness of replicate correction
:param do_correction: if or not do replicate EMSC correction using optimal n_comp
:return: [preprocessed spectra, [coefficients]], merits, opt_comp
"""
uni_rep = np.unique(replicate)
merits = []
for n_comp in n_comp_all:
if n_comp >= len(uni_rep): break
prep_spectra = rep_emsc(spectra=spectra,
wavenumbers=wavenumbers,
replicate=replicate,
poly_order=poly_order,
reference=reference,
n_comp=n_comp,
use_reference=use_reference,
return_coefs=False)
met = fun_merit(spectra=prep_spectra,
wavenumbers=wavenumbers,
replicate=replicate,
label=label)
merits.append(met)
opt_comp = n_comp_all[np.argmax(merits)]
if do_correction:
res = rep_emsc(spectra=spectra,
wavenumbers=wavenumbers,
replicate=replicate,
poly_order=poly_order,
reference=reference,
n_comp=opt_comp,
use_reference=use_reference,
return_coefs=return_coefs)
return res, merits, opt_comp
else:
return merits, opt_comp
# ---------------------------------------------------------------------------------------------------
# The following Part | |
import time
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.animation as animation
import pandas as pd
import sys
from matplotlib import gridspec
import matplotlib.pyplot as plt
import cartopy
import cartopy.io.shapereader as shpreader
import cartopy.crs as ccrs
import geopandas as gpd
def find_absbm3(id_target, target, net):
"""
this is the brains of the Self Organizing Maps
- passes in a real normalized data vector
- identifies which target is closest to which vector (target vs trial)
- returns the identifier and the vector
- instead of using Euclidean distance it uses the sum of the absolute values as a measure of distance
- any consistent measure of distance works
"""
absbm_id = np.array([0, 0])
nFeatures = net.shape[2]
min_dist = sys.maxsize
# run thru 2 dimensions
for x in range(net.shape[0]):
for y in range(net.shape[1]):
# pick a vector
trial = net[x, y, :].reshape(nFeatures, 1)
# see how far away the two vectors are using absolute values
### abs_dist = np.sum((w - t) ** 2)
abs_dist = np.sum(abs(trial - target)) ### large difference
if abs_dist < min_dist:
min_dist = abs_dist
absbm_id = np.array([x, y])
absbm = net[absbm_id[0], absbm_id[1], :].reshape(nFeatures, 1)
return (absbm, absbm_id)
def find_bmu(id_target, target, net):
"""
Find the best matching unit for a given target vector in the SOM
Returns: (bmu, bmu_idx) where bmu is the high-dimensional BMU
and bmu_idx is the index of this vector in the SOM
"""
nFeatures = net.shape[2]
bmu_idx = np.array([0, 0])
# set the initial minimum distance to a huge number
# min_dist = np.iinfo(np.int).max
# print("start min dist: ",min_dist)
min_dist = sys.maxsize
# calculate the high-dimensional distance between each neuron and the input
for x in range(net.shape[0]):
for y in range(net.shape[1]):
trial = net[x, y, :].reshape(nFeatures, 1)
sq_dist = np.sum((trial - target) ** 2)
if sq_dist < min_dist:
min_dist = sq_dist
bmu_idx = np.array([x, y])
# get vector corresponding to bmu_idx
bmu = net[bmu_idx[0], bmu_idx[1], :].reshape(nFeatures, 1)
return (bmu, bmu_idx)
def generateReport(net,lstVars):
"""
this is the reporter for Self Organizing Maps
- report occurs at the end of all iterations
- routine for identifying and reporting membership of Countries in clusters
- returns the count of Countries in each cluster
"""
nFeatures = net.shape[2]
# print("************* membership of Countries ********************")
cntGps = np.zeros([nXs,nYs],dtype=np.int)
print("countries: \n", countries)
vec=[]
for iRow in range(nCountrys):
sov = countries[iRow]
train = data[:, iRow].reshape(np.array([nFeatures, 1]))
# absbm, absbm_id = find_absbm3(iRow,train, net)
absbm, absbm_id = find_bmu(iRow, train, net)
# print(iRow, np.array(absbm_id)+1, countries[iRow]) ##
vec.append([iRow,absbm_id, absbm.T])
iiX = absbm_id[0]
iiY = absbm_id[1]
cntGps[iiX,iiY]+=1
# print(">>>>**** iRow, iiX, iiY,sov: ", iRow, iiX, iiY, sov)
for x in range(net.shape[0]):
for y in range(net.shape[1]):
train = net[x, y, :].reshape(np.array([nFeatures, 1, 1]))
iRow+=1
absbm_id = str([x,y]).replace(',','')
absbm = train
vec.append([iRow,absbm_id, absbm.T])
# print("************* Features of Countries ********************")
# print("Features: \n", lstVars)
unique, counts = np.unique(([str(vec[i][1]) for i in range(len(vec))]), return_counts=True)
cntList = list(zip(unique,counts))
cnt = [cntList[i][1] -1 for i in range(len(cntList))]
return cntGps
def generatePlot(iIter,net,cntGps,nColx):
"""
this is the plotter for Self Organizing Maps
- after every 1000 iterations (in this case) plots the status of the net
- figures out which are the 3 most important features based on the estimated Self Organizing Map
- takes each vector of the 3 by 3 by 4 net and figures out its color based on the 3 most important features
- creates a rectangle for each element of the net, colors and annotates the rectangle
- in the final plot, includes the count of the number of Countries in each cluster
"""
global countries, nFeatures
patch = []
axes[0].clear()
axes[0].annotate(('iter: %d out of %d' % (iIter+1,nCnt)),xy=(0.05,0.95),xycoords='axes fraction')
nFeatures = net.shape[2]
nPtMax = 5
nColorMax = 3
nColorPts = min(nFeatures,nColorMax)
orderFace = np.argsort(np.abs(net.std(axis=(0,1))))[::-1][:nFeatures]
for x in range(1, net.shape[0] + 1):
for y in range(1, net.shape[1] + 1):
face = net[x-1,y-1,:]
# print("face: ", face)
faceX = []
for i in range(nColorPts):
faceX.append(face[orderFace[i]])
varX = '1st>>'+str(orderFace[0]+1)+": " + \
lstVars[orderFace[0]][:9]+' 2nd>>'+str(orderFace[1]+1)+": "+lstVars[orderFace[1]][:9]+ \
' 3rd>>'+str(orderFace[2]+1)+": "+lstVars[orderFace[2]][:9]+' 4th>>'+str(orderFace[3]+1)+ \
": "+lstVars[orderFace[3]][:9]
axes[0].annotate(varX,xy=(0.05,0.015),xycoords='axes fraction',fontsize=9,fontweight='normal')
rect = plt.Rectangle((0.05+(x-1.0)/8, 0.014+(y-0.68)/8), 1.0/8.0, 1.0/8.0, facecolor=faceX,edgecolor='gray')
patch.append(axes[0].add_patch(rect))
face = [int(1000*face[i])/1000.0 for i in range(nFeatures)]
strFace = ""
for i in range(nColx):
strFace+=(str(orderFace[i]+1)+": "+str(face[orderFace[i]])+'\n')
strXYZ = "["+str(x)+","+str(y)+"]"
if iIter >= nCnt-1:
cntGp = cntGps[x-1][y-1]
strXYZ+= ": ("+str(cntGp)+")"
strFace+=strXYZ
#-----------------------------------------------------
colorX = 'orange'
if faceX[0]>0.7 or faceX[1]>0.6 or faceX[2]>0.75:
colorX = 'black'
# axes[0].annotate(strFace,xy=((x-0.78)/(nXs+0.20),(y-0.78)/(nYs+0.20)),xycoords='axes fraction',
# fontsize=7, color=colorX, fontweight='bold')
axes[0].annotate(strFace,xy=((x-0.45)/(nXs+1),(y-0.55)/(nYs+1)),xycoords='axes fraction',
fontsize=7, color=colorX, fontweight='bold')
if iIter >= nCnt-1 or iIter%5 == 0:
# print("**************************** plot map for iIter: ", iIter)
# print("\n*****plotMap")
plotMap(plt, axes, orderFace)
return patch
def plotMap(plt, axes, orderFace):
lstCntClusters = []
if iIter >= nCnt-1 or iIter%5 == 0:
axes[1].clear()
#ax.add_feature(cartopy.feature.LAND)
axes[1].add_feature(cartopy.feature.OCEAN)
#ax.add_feature(cartopy.feature.COASTLINE)
#ax.add_feature(cartopy.feature.BORDERS, linestyle='-', alpha=.5)
#ax.add_feature(cartopy.feature.LAKES, alpha=0.95)
#ax.add_feature(cartopy.feature.RIVERS)
axes[1].set_extent([-150, 60, -25, 60])
cntGps = np.zeros([nXs,nYs],dtype=np.int)
lstCountrys = [[[] for i in range(nYs)] for j in range(nXs)]
vec=[]
for iRow in range(nCountrys):
train = data[:, iRow].reshape(np.array([nFeatures, 1]))
# find its Best Matching Unit
bmu, bmu_idx = find_bmu(iRow, train, net)
vec.append([iRow,bmu_idx, bmu.T])
iiRow = bmu_idx[0]
iiCol = bmu_idx[1]
strCountry = countries[iRow]
# print("strCountry: ", strCountry)
cntGps[iiRow,iiCol]+=1
lstCountrys[iiRow][iiCol].append(countries[iRow])
lstCntClusters.append([iiRow,iiCol])
if iIter >= nCnt-1:
for i in range(nYs):
for j in range(nXs):
print("[",j+1,i+1,"] ",lstCountrys[j][i])
for x in range(net.shape[0]):
for y in range(net.shape[1]):
train = net[x, y, :].reshape(np.array([nFeatures, 1]))
# set to Best Matching Unit
# bmu, bmu_idx = find_bmu(train, net, m)
iRow+=1
bmu_idx = str([x,y]).replace(',','')
bmu = train
vec.append([iRow,bmu_idx, bmu.T])
unique, counts = np.unique(([str(vec[i][1]) for i in range(len(vec))]), return_counts=True)
cntList = list(zip(unique,counts))
cnt = [cntList[i][1] -1 for i in range(len(cntList))]
Countries = reader.records()
Countries1 = gpd.read_file('./world/TM_WORLD_BORDERS-0.3.shp')
qTest0 = True
if qTest0:
nUsed = 0
for country in Countries:
# print("\n ============ country: ", country)
sov = country.attributes['name']
lab = country.attributes['adm0_a3']
# print("sov: ", sov)
# print("lab: ", lab)
bounds = country.bounds
if lab != 'RUS':
x = ((bounds[0]+bounds[2])/2.0+178.0)/360.0
else:
x = ((45.0+bounds[2])/2.0+178.0)/360.0
y = ((bounds[1]+bounds[3])/2.0+59)/145.0
valXY = (x,y)
if sov in countries:
nUsed+=1
ind1 = countries.index(sov)
# print("ind1: ",ind1)
ind2 = lstCntClusters[ind1]
# print("ind2: ",ind2)
color = (net[ind2[0],ind2[1],0],net[ind2[0],ind2[1],1],net[ind2[0],ind2[1],2])
color = (net[ind2[0],ind2[1],orderFace[0]],net[ind2[0],ind2[1],orderFace[1]],net[ind2[0],ind2[1],orderFace[2]])
# print(">>>>**** sov, ind1, ind2, color, nUsed: ",sov, ind1, ind2, color, nUsed)
axes[1].add_geometries(country.geometry, ccrs.PlateCarree(),
facecolor = color, label=lab)
colorX = 'orange'
axes[1].annotate(lab,valXY,xycoords='axes fraction',fontsize=8,color=colorX,fontweight='bold')
else:
axes[1].add_geometries(country.geometry, ccrs.PlateCarree(),
facecolor=(0.8, 0.8, 0.8), label=lab)
colorX = 'grey'
axes[1].annotate(lab,valXY,xycoords='axes fraction',fontsize=8,color=colorX,fontweight='bold')
qTest1 = False
if qTest1:
nUsed = 0
sovs = Countries1.NAME
labs = Countries1.ISO3
geos = Countries1.geometry
nCountries = len(sovs)
for iCountry in range(nCountries):
sov = sovs[iCountry]
lab = labs[iCountry]
geo = geos[iCountry]
bounds = geo.bounds
print("sov: ", sov)
print("lab: ", lab)
print("bounds: ", bounds)
if lab != 'RUS':
x = ((bounds[0]+bounds[2])/2.0+178.0)/360.0
else:
x = ((45.0+bounds[2])/2.0+178.0)/360.0
y = ((bounds[1]+bounds[3])/2.0+59)/145.0
valXY = (x,y)
if sov in countries:
nUsed+=1
ind1 = countries.index(sov)
# print("ind1: ",ind1)
ind2 = lstCntClusters[ind1]
# print("ind2: ",ind2)
color = (net[ind2[0],ind2[1],0],net[ind2[0],ind2[1],1],net[ind2[0],ind2[1],2])
color = (net[ind2[0],ind2[1],orderFace[0]],net[ind2[0],ind2[1],orderFace[1]],net[ind2[0],ind2[1],orderFace[2]])
# print(">>>>**** sov, ind1, ind2, color, nUsed: ",sov, ind1, ind2, color, nUsed)
axes[1].add_geometries(geo, ccrs.PlateCarree(), facecolor = color, label=lab)
colorX = 'orange'
axes[1].annotate(lab,valXY,xycoords='axes fraction',fontsize=8,color=colorX,fontweight='bold')
else:
axes[1].add_geometries(geo, ccrs.PlateCarree(),
facecolor=(0.8, 0.8, 0.8), label=lab)
colorX = 'grey'
axes[1].annotate(lab,valXY,xycoords='axes fraction',fontsize=8,color=colorX,fontweight='bold')
Countries.close()
plt.gca().set_yticks([-60, -30, 0, 30, 60], crs=ccrs.PlateCarree())
plt.gca().set_xticks(np.arange(-180,240,60), crs=ccrs.PlateCarree())
plt.gca().gridlines()
return
def decay_radius(initial_radius, i, time_constant):
return initial_radius * np.exp(-i / time_constant)
def decay_learning_rate(initial_learning_rate, i, n_iterations):
return initial_learning_rate * np.exp(-i / n_iterations)
def calculate_influence(distance, radius):
return np.exp(-distance / (2* (radius**2)))
def getRawData():
"""
this routine pulls in the raw data
"""
import csv
df = pd.read_csv('./Data/factbook.csv', sep=';')
lstVars = list(df.columns[1:45])
vars1 = ['Area(sq km)','Birth rate(births/1000 population)','Death rate(deaths/1000 population)',
'Infant mortality rate(deaths/1000 live births)','Life expectancy at birth(years)','Population']
vars2 = ['Electricity - consumption(kWh)','Electricity - production(kWh)','Exports','Highways(km)','Imports',
'Internet users','Oil - consumption(bbl/day)','Oil - production(bbl/day)',
'Telephones - main lines in use','Telephones - mobile cellular']
vars3 = ['Debt - external','GDP','GDP - per capita','GDP - real growth rate(%)',
'Inflation rate (consumer prices)(%)']
lstVars = vars1+vars2+vars3
subset = df[lstVars].drop(0)
xCnt = list(len(subset.loc[pd.notnull(df[lstVars[i]])]) for i in range(len(lstVars)))
nCutOff = 160
lstVars = [lstVars[i] for i in range(len(lstVars)) if xCnt[i] > nCutOff]
subset = subset[lstVars]
print(lstVars)
for i in range(len(lstVars)):
print(i,lstVars[i])
subset = subset.loc[pd.notnull(df[lstVars[i]])]
countries = list(df['Country'][subset.index][1:])
print("countries: ",countries)
subset = subset.astype(float)
# subset = subset.convert_objects(convert_numeric=True)
groups = df[['Country']]
| |
<reponame>Agoniii/tensorflow<filename>tensorflow/python/framework/config.py<gh_stars>1-10
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for configuring TensorFlow execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.util.tf_export import tf_export
@tf_export('config.threading.get_intra_op_parallelism_threads')
def get_intra_op_parallelism_threads():
"""Get number of threads used within an individual op for parallelism.
Certain operations like matrix multiplication and reductions can utilize
parallel threads for speed ups. A value of 0 means the system picks an
appropriate number.
Returns:
Number of parallel threads
"""
return context.context().intra_op_parallelism_threads
@tf_export('config.threading.set_intra_op_parallelism_threads')
def set_intra_op_parallelism_threads(num_threads):
"""Set number of threads used within an individual op for parallelism.
Certain operations like matrix multiplication and reductions can utilize
parallel threads for speed ups. A value of 0 means the system picks an
appropriate number.
Args:
num_threads: Number of parallel threads
"""
context.context().intra_op_parallelism_threads = num_threads
@tf_export('config.threading.get_inter_op_parallelism_threads')
def get_inter_op_parallelism_threads():
"""Get number of threads used for parallelism between independent operations.
Determines the number of threads used by independent non-blocking operations.
0 means the system picks an appropriate number.
Returns:
Number of parallel threads
"""
return context.context().inter_op_parallelism_threads
@tf_export('config.threading.set_inter_op_parallelism_threads')
def set_inter_op_parallelism_threads(num_threads):
"""Set number of threads used for parallelism between independent operations.
Determines the number of threads used by independent non-blocking operations.
0 means the system picks an appropriate number.
Args:
num_threads: Number of parallel threads
"""
context.context().inter_op_parallelism_threads = num_threads
@tf_export('config.optimizer.get_jit')
def get_optimizer_jit():
"""Get if JIT compilation is enabled.
Note that optimizations are only applied in graph mode, (within tf.function).
Returns:
If JIT compilation is enabled.
"""
return context.context().optimizer_jit
@tf_export('config.optimizer.set_jit')
def set_optimizer_jit(enabled):
"""Set if JIT compilation is enabled.
Args:
enabled: Whether to enable JIT compilation.
"""
context.context().optimizer_jit = enabled
@tf_export('config.optimizer.get_experimental_options')
def get_optimizer_experimental_options():
"""Get experimental optimizer options.
Refer to tf.config.optimizer.set_experimental_options for a list of current
options.
Note that optimizations are only applied in graph mode, (within tf.function).
In addition, as these are experimental options, the list is subject to change.
Returns:
Dictionary of configured experimental optimizer options
"""
return context.context().get_optimizer_experimental_options()
@tf_export('config.optimizer.set_experimental_options')
def set_optimizer_experimental_options(options):
"""Set experimental optimizer options.
Note that optimizations are only applied in graph mode, (within tf.function).
In addition, as these are experimental options, the list is subject to change.
Args:
options: Dictionary of experimental optimizer options to configure.
Valid keys:
- layout_optimizer: Optimize tensor layouts
e.g. This will try to use NCHW layout on GPU which is faster.
- constant_folding: Fold constants
Statically infer the value of tensors when possible, and materialize the
result using constants.
- shape_optimization: Simplify computations made on shapes.
- remapping: Remap subgraphs onto more efficient implementations.
- arithmetic_optimization: Simplify arithmetic ops with common
sub-expression elimination and arithmetic simplification.
- dependency_optimization: Control dependency optimizations. Remove
redundant control dependencies, which may enable other optimization.
This optimizer is also essential for pruning Identity and NoOp nodes.
- loop_optimization: Loop optimizations.
- function_optimization: Function optimizations and inlining.
- debug_stripper: Strips debug-related nodes from the graph.
- disable_model_pruning: Disable removal of unnecessary ops from the graph
- scoped_allocator_optimization: Try to allocate some independent Op
outputs contiguously in order to merge or eliminate downstream Ops.
- pin_to_host_optimization: Force small ops onto the CPU.
- implementation_selector: Enable the swap of kernel implementations based
on the device placement.
- auto_mixed_precision: Change certain float32 ops to float16 on Volta
GPUs and above. Without the use of loss scaling, this can cause
numerical underflow (see
`keras.mixed_precision.experimental.LossScaleOptimizer`).
- disable_meta_optimizer: Disable the entire meta optimizer.
- min_graph_nodes: The minimum number of nodes in a graph to optimizer.
For smaller graphs, optimization is skipped.
"""
context.context().set_optimizer_experimental_options(options)
@tf_export('config.get_soft_device_placement')
def get_soft_device_placement():
"""Get if soft device placement is enabled.
If enabled, an op will be placed on CPU if any of the following are true
1. there's no GPU implementation for the OP
2. no GPU devices are known or registered
3. need to co-locate with reftype input(s) which are from CPU
Returns:
If soft placement is enabled.
"""
return context.context().soft_device_placement
@tf_export('config.set_soft_device_placement')
def set_soft_device_placement(enabled):
"""Set if soft device placement is enabled.
If enabled, an op will be placed on CPU if any of the following are true
1. there's no GPU implementation for the OP
2. no GPU devices are known or registered
3. need to co-locate with reftype input(s) which are from CPU
Args:
enabled: Whether to enable soft placement.
"""
context.context().soft_device_placement = enabled
@tf_export('config.experimental.get_device_policy')
def get_device_policy():
"""Gets the current device policy.
The device policy controls how operations requiring inputs on a specific
device (e.g., on GPU:0) handle inputs on a different device (e.g. GPU:1).
This function only gets the device policy for the current thread. Any
subsequently started thread will again use the default policy.
Returns:
Current thread device policy
"""
device_policy = context.context().device_policy
if device_policy == context.DEVICE_PLACEMENT_SILENT:
return 'silent'
elif device_policy == context.DEVICE_PLACEMENT_SILENT_FOR_INT32:
return 'silent_for_int32'
elif device_policy == context.DEVICE_PLACEMENT_WARN:
return 'warn'
elif device_policy == context.DEVICE_PLACEMENT_EXPLICIT:
return 'explicit'
else:
raise ValueError('Not a valid device policy: %r' % device_policy)
@tf_export('config.experimental.set_device_policy')
def set_device_policy(device_policy):
"""Sets the current thread device policy.
The device policy controls how operations requiring inputs on a specific
device (e.g., on GPU:0) handle inputs on a different device (e.g. GPU:1).
When using the default, an appropriate policy will be picked automatically.
The default policy may change over time.
This function only sets the device policy for the current thread. Any
subsequently started thread will again use the default policy.
Args:
device_policy: A device policy.
Valid values:
- None: Switch to a system default.
- 'warn': Copies the tensors which are not on the right device and logs
a warning.
- 'explicit': Raises an error if the placement is not as required.
- 'silent': Silently copies the tensors. Note that this may hide
performance problems as there is no notification provided when
operations are blocked on the tensor being copied between devices.
- 'silent_for_int32': silently copies `int32` tensors, raising errors on
the other ones.
Raises:
ValueError: If an invalid `device_policy` is passed.
"""
if device_policy == 'silent':
context.context().device_policy = context.DEVICE_PLACEMENT_SILENT
elif device_policy == 'silent_for_int32':
context.context().device_policy = context.DEVICE_PLACEMENT_SILENT_FOR_INT32
elif device_policy == 'warn':
context.context().device_policy = context.DEVICE_PLACEMENT_WARN
elif device_policy == 'explicit':
context.context().device_policy = context.DEVICE_PLACEMENT_EXPLICIT
elif device_policy is None:
context.context().device_policy = None
else:
raise ValueError('Not a valid device policy: %r' % device_policy)
@tf_export('config.experimental.get_synchronous_execution')
def get_synchronous_execution():
"""Gets whether operations are executed synchronously or asynchronously.
TensorFlow can execute operations synchronously or asynchronously. If
asynchronous execution is enabled, operations may return "non-ready" handles.
Returns:
Current thread execution mode
"""
return context.context().execution_mode == context.SYNC
@tf_export('config.experimental.set_synchronous_execution')
def set_synchronous_execution(enable):
"""Specifies whether operations are executed synchronously or asynchronously.
TensorFlow can execute operations synchronously or asynchronously. If
asynchronous execution is enabled, operations may return "non-ready" handles.
When `enable` is set to None, an appropriate value will be picked
automatically. The value picked may change between TensorFlow releases.
Args:
enable: Whether operations should be dispatched synchronously.
Valid values:
- None: sets the system default.
- True: executes each operation synchronously.
- False: executes each operation asynchronously.
"""
if enable is None:
context.context().execution_mode = None
elif enable:
context.context().execution_mode = context.SYNC
else:
context.context().execution_mode = context.ASYNC
@tf_export('config.experimental.list_physical_devices')
def list_physical_devices(device_type=None):
"""Return a list of physical devices visible to the runtime.
Physical devices are hardware devices locally present on the current machine.
By default all discovered CPU and GPU devices are considered visible. The
`list_physical_devices` allows querying the hardware prior to runtime
initialization.
The following example ensures the machine can see at least 1 GPU.
>>> physical_devices = tf.config.experimental.list_physical_devices('GPU')
>>> print("Num GPUs:", len(physical_devices))
Num GPUs: ...
Args:
device_type: (optional) Device type to filter by such as "CPU" or "GPU"
Returns:
List of PhysicalDevice objects
"""
return context.context().list_physical_devices(device_type)
@tf_export('config.experimental.list_logical_devices')
def list_logical_devices(device_type=None):
"""Return a list of logical devices created by runtime.
Logical devices may correspond to physical devices or remote devices in the
cluster. Operations and tensors may | |
{'url': '/jobs/{id}'}
def purge_command_queue(
self, id, custom_headers=None, raw=False, **operation_config):
"""Delete all the pending commands for this device from the IoT hub.
:param id: Device ID.
:type id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PurgeMessageQueueResult or ClientRawResponse if raw=true
:rtype: ~service.models.PurgeMessageQueueResult or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.purge_command_queue.metadata['url']
path_format_arguments = {
'id': self._serialize.url("id", id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PurgeMessageQueueResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
purge_command_queue.metadata = {'url': '/devices/{id}/commands'}
def get_twin(
self, id, custom_headers=None, raw=False, **operation_config):
"""Get a device twin.
Get a device twin. See
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-device-twins
for more information.
:param id: Device ID.
:type id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Twin or ClientRawResponse if raw=true
:rtype: ~service.models.Twin or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_twin.metadata['url']
path_format_arguments = {
'id': self._serialize.url("id", id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
# @digimaun - change device param from 'Twin' to {object}
if response.status_code == 200:
deserialized = self._deserialize('{object}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_twin.metadata = {'url': '/twins/{id}'}
def replace_twin(
self, id, device_twin_info, if_match=None, custom_headers=None, raw=False, **operation_config):
"""Replaces tags and desired properties of a device twin.
Replaces a device twin. See
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-device-twins
for more information.
:param id: Device ID.
:type id: str
:param device_twin_info: Device twin info
:type device_twin_info: ~service.models.Twin
:param if_match:
:type if_match: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Twin or ClientRawResponse if raw=true
:rtype: ~service.models.Twin or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.replace_twin.metadata['url']
path_format_arguments = {
'id': self._serialize.url("id", id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
# @digimaun - Change deserialize type to {object} from Twin
body_content = self._serialize.body(device_twin_info, '{object}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Twin', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
replace_twin.metadata = {'url': '/twins/{id}'}
def update_twin(
self, id, device_twin_info, if_match=None, custom_headers=None, raw=False, **operation_config):
"""Updates tags and desired properties of a device twin.
Updates a device twin. See
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-device-twins
for more information.
:param id: Device ID.
:type id: str
:param device_twin_info: Device twin info
:type device_twin_info: ~service.models.Twin
:param if_match:
:type if_match: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Twin or ClientRawResponse if raw=true
:rtype: ~service.models.Twin or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.update_twin.metadata['url']
path_format_arguments = {
'id': self._serialize.url("id", id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(device_twin_info, 'Twin')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Twin', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
update_twin.metadata = {'url': '/twins/{id}'}
def get_module_twin(
self, id, mid, custom_headers=None, raw=False, **operation_config):
"""Gets a module twin.
Gets a module twin. See
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-device-twins
for more information.
:param id: Device ID.
:type id: str
:param mid: Module ID.
:type mid: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Twin or ClientRawResponse if raw=true
:rtype: ~service.models.Twin or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_module_twin.metadata['url']
path_format_arguments = {
'id': self._serialize.url("id", id, 'str'),
'mid': self._serialize.url("mid", mid, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
# @digimaun - Change deserialize type to {object} from Twin
deserialized = self._deserialize('{object}', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_module_twin.metadata = {'url': '/twins/{id}/modules/{mid}'}
def replace_module_twin(
self, id, mid, device_twin_info, if_match=None, custom_headers=None, raw=False, **operation_config):
"""Replaces tags and desired properties of a module twin.
Replaces a module twin. See
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-device-twins
for more information.
:param id: Device ID.
:type id: str
:param mid: Module ID.
:type mid: str
:param device_twin_info: Device twin info
:type device_twin_info: ~service.models.Twin
:param if_match:
:type if_match: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Twin or ClientRawResponse if raw=true
:rtype: ~service.models.Twin or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.replace_module_twin.metadata['url']
path_format_arguments = {
'id': self._serialize.url("id", id, 'str'),
'mid': self._serialize.url("mid", mid, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
# @digimaun - Change deserialize type to {object} from Twin
body_content = self._serialize.body(device_twin_info, '{object}')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Twin', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
replace_module_twin.metadata = {'url': '/twins/{id}/modules/{mid}'}
def update_module_twin(
self, id, mid, device_twin_info, if_match=None, custom_headers=None, raw=False, **operation_config):
"""Updates tags and desired properties of a module twin.
Updates a module twin. See
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-device-twins
for more information.
:param id: Device ID.
:type id: str
:param | |
import sys
import re
import json
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import MultiLabelBinarizer
from scipy.spatial.distance import cdist
from colorama import Fore, Style
from kneed import KneeLocator
import copy
import time
import pickle
import os
def error_msg(error_msg, arg):
"""
Helper function to display error message on the screen.
Input:
The error message along with its respective argument.
(Values include - filename, selected action).
Output:
The formatted error message on the screen along with the argument.
"""
print("****************************")
print(Fore.RED, end='')
print(error_msg,":", arg)
print(Style.RESET_ALL, end='')
print("****************************")
sys.exit(0)
def printINFO(info):
"""
Helper function to ask the user for Input.
Input:
The message that is to be displayed.
Output:
The formatted message on the screen.
"""
print(Fore.BLUE, end='')
print(info)
print(Style.RESET_ALL, end='')
# *****************************************************************************
# *****************************************************************************
# Helper Methods Start
def calculate_num_clusters(df, acl_weights):
"""
Calculates the optimal number of clusters using the elbow_graph approach.
Input:
The Pandas dataframe of the input file (ACL.json)
output:
The value of k that provides the least MSE.
"""
files = ['IP_Access_List', 'Route_Filter_List', 'VRF', 'AS_Path_Access_List',
'IKE_Phase1_Keys', 'IPsec_Phase2_Proposals', 'Routing_Policy']
k_select_vals = [41, 17, 42, 5, 3, 2, 58]
curr_file = file_name.split(".")[0]
file_index = files.index(curr_file)
return k_select_vals[file_index]
features = df[df.columns]
ran = min(len(df.columns), len(discrete_namedstructure))
if ran > 50:
k_range = range(1, 587)
else:
k_range = range(1, ran)
print(k_range)
k_range = range(1, 580)
distortions = []
np.seed = 0
clusters_list = []
f = open('distortions.txt', 'w')
for k in k_range:
print(k)
kmeans = KMeans(n_clusters=k).fit(features, None, sample_weight=acl_weights)
clusters_list.append(kmeans)
cluster_centers = kmeans.cluster_centers_
k_distance = cdist(features, cluster_centers, "euclidean")
distance = np.min(k_distance, axis=1)
distortion = np.sum(distance)/features.shape[0]
distortions.append(distortion)
f.write(str(distortion))
f.write("\n")
kn = KneeLocator(list(k_range), distortions, S=3.0, curve='convex', direction='decreasing')
print("Knee is: ", kn.knee)
plt.xlabel('k')
plt.ylabel('Distortion')
plt.title('The Elbow Method showing the optimal k')
plt.plot(k_range, distortions, 'bx-')
plt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')
plt.show()
if kn.knee is None:
if ran < 5:
return ran - 1
else:
return 5
return kn.knee
'''
for i in range(1, len(avg_within)):
if (avg_within[i-1] - avg_within[i]) < 1:
break
# return i-1 if len(avg_within) > 1 else 1
# return i - 1 if i > 1 else 1
'''
def perform_kmeans_clustering(df, ns_weights):
"""
To get a mapping of the rows into respective clusters generated using the K-means algorithm.
Input:
df:The Pandas data-frame of the input file (ACL.json)
ns_weights: The weights of each name structure which allows the weighted k-means algorithm to work.
Output:
Adding respective K-means cluster label to the input dataframe.
Example:
Row1 - Label 0 //Belongs to Cluster 0
Row2 - Label 0 //Belongs to Cluster 0
Row3 - Label 1 //Belongs to Cluster 1
"""
global k_select
k_select = calculate_num_clusters(df, ns_weights)
features = df[df.columns]
kmeans = KMeans(n_clusters=k_select)
kmeans.fit(features, None, sample_weight=ns_weights)
labels = kmeans.labels_
df["kmeans_cluster_number"] = pd.Series(labels)
def extract_keys(the_dict, prefix=''):
"""
Recursive approach to gather all the keys that have nested keys in the input file.
Input:
The dictionary file to find all the keys in.
Output:
All the keys found in the nested dictionary.
Example:
Consider {key1:value1, key2:{key3:value3}, key4:[value4], key5:[key6:{key7:value7}]}
The function returns key2, key5=key6
"""
key_list = []
for key, value in the_dict.items():
if len(prefix) == 0:
new_prefix = key
else:
new_prefix = prefix + '=' + key
try:
if type(value) == dict:
key_list.extend(extract_keys(value, new_prefix))
elif type(value) == list and type(value[0]) == dict:
key_list.extend(extract_keys(value[0], new_prefix))
elif type(value) == list and type(value[0]) != dict:
key_list.append(new_prefix)
else:
key_list.append(new_prefix)
except:
key_list.append(new_prefix)
return key_list
def get_uniques(data):
"""
A helper function to get unique elements in a List.
Input:
A list that we need to capture uniques from.
Output:
A dictionary with unique entries and count of occurrences.
"""
acl_count_dict = {}
for acl in data:
acl = json.dumps(acl)
if acl not in acl_count_dict:
acl_count_dict[acl] = 1
else:
value = acl_count_dict[acl]
value += 1
acl_count_dict[acl] = value
keys = []
values = []
for key, value in acl_count_dict.items():
keys.append(key)
values.append(value)
return keys, values
def overall_dict(data_final):
"""
Parses through the dictionary and appends the frequency with which the keys occur.
Input:
A nested dictionary.
Example:
{key1:{key2:value1, key3:value2, key4:{key5:value3}}
{key6:{key7:value2}
{key8:{key3:value3, key4:value5, key6:value3}}
Output:
Returns a new array with the nested keys appended along with a tuple containing the un-nested value along with
the frequency count.
[{
key1=key2:{'value1':1},
key1=key3:{'value2':2},
key1=key4=key5:{'value3':3},
key6=key7:{'value2':2},
key8=key3:{'value3':3},
key8=key4:{'value5':1},
key8=key6:{'value3':1}
}]
"""
overall_array = []
for data in data_final:
overall = {}
for item in data:
if item[0] is None:
continue
result = extract_keys(item[0])
for element in result:
value = item[0]
for key in element.split("="):
new_value = value[key]
if type(new_value) == list:
if len(new_value) != 0:
new_value = new_value[0]
else:
new_value = "#BUG#"
value = new_value
if element not in overall:
overall[element] = {}
if value not in overall[element]:
overall[element][value] = 1
else:
overall[element][value] += 1
overall_array.append(overall)
return overall_array
def get_overall_dict(data_final):
"""
Parses through the dictionary and appends the frequency with which the keys occur.
Input:
A nested dictionary.
Example:
{key1:{key2:value1, key3:value2, key4:{key5:value3}}
{key6:{key7:value2}
{key8:{key3:value3, key4:value5, key6:value3}}
Output:
Returns a new array with the nested keys appended along with a tuple containing the unnested value along with the frequency count.
[{
key1=key2:{'value1':1},
key1=key3:{'value2':2},
key1=key4=key5:{'value3':3},
key6=key7:{'value2':2},
key8=key3:{'value3':3},
key8=key4:{'value5':1},
key8=key6:{'value3':1}
}]
"""
overall_array = []
for data in data_final:
overall = {}
new_value = None
flag = 0
for item in data:
visited = {"lines=name":1}
if item[0] is None:
continue
result = extract_keys(item[0])
for element in result:
value = item[0]
for key in element.split("="):
if element not in visited:
visited[element] = 1
new_value = value[key]
flag = 0
if type(new_value) == list:
if len(new_value) > 0:
for list_data in new_value:
if element not in overall:
overall[element] = {}
temp = element
temp_val = list_data
temp = temp.split("=", 1)[-1]
while len(temp.split("=")) > 1:
temp_val = temp_val[temp.split("=")[0]]
temp = temp.split("=", 1)[-1]
list_key = temp
check = 0
try:
if type(temp_val[list_key]) == list:
if temp_val[list_key][0] not in overall[element]:
overall[element][temp_val[list_key][0]] = 1
check = 1
else:
if temp_val[list_key] not in overall[element]:
overall[element][temp_val[list_key]] = 1
check = 1
except:
dummy=0
'''
do nothing
'''
try:
if check == 0:
if type(temp_val[list_key]) == list:
if temp_val[list_key][0] in overall[element]:
overall[element][temp_val[list_key][0]] += 1
else:
if temp_val[list_key] in overall[element]:
overall[element][temp_val[list_key]] += 1
except:
dummy=0
flag = 1
value = new_value
else:
'''
Type is not list
'''
value = new_value
else:
if flag == 0:
if element not in overall:
overall[element] = {}
if new_value not in overall[element]:
overall[element][new_value] = 1
else:
overall[element][new_value] += 1
if flag == 0:
if element not in overall:
overall[element] = {}
if new_value not in overall[element]:
overall[element][new_value] = 1
else:
overall[element][new_value] += 1
overall_array.append(overall)
return overall_array
def calculate_z_score(arr):
"""
Calculates the Z-score (uses mean) (or) Modified Z-score (uses median) of data-points
Input:
Data points generated from parsing through the input file.
Also considers the Z_SCORE_FLAG that is set previously with 0 (default) using the Modified Z-score and 1 using Z-score.
Output:
The Z-score of given data-points array.
"""
if len(arr) == 1:
return arr
z_score = []
'''
Calculates the Z-score using mean. Generally used if distribution is normal (Bell curve).
'''
if Z_SCORE_FLAG:
mean = np.mean(arr)
std = np.std(arr)
if std == 0:
return np.ones(len(arr)) * 1000
for val in arr:
z_score.append((val - mean) / std)
'''
Modified Z-score approach.
Calculates the Z-score using median. Generally used if distribution is skewed.
'''
else:
median_y = np.median(arr)
medians = [np.abs(y - median_y) for y in arr]
med = np.median(medians)
median_absolute_deviation_y = np.median([np.abs(y - median_y) for y in arr])
if median_absolute_deviation_y == 0:
return np.ones(len(arr)) * 1000
z_score = [0.6745 * (y - median_y) / median_absolute_deviation_y for y in arr]
return z_score
def calculate_signature_d(overall_arr):
"""
Uses Z-score to generate the signatures of data-points and also maps points on level of significance (include for
signature calculation, include for bug calculation, no significance).
If Z-score is equal to 1000.0 or in between sig_threshold and bug_threshold, no-significance.
If Z-score is >= sig_threshold, include for signature calculation.
If Z-score is <= bug_threshold, include for bug calculation.
Input:
The individual master-signature generated for each Cluster.
Output:
An array containing dictionaries marked with tags that represent the action that needs to | |
SMALL LIGATURE FF}', u'ff', decode=False)
self.register(u'\N{GREEK SMALL LETTER ALPHA}', u'\\alpha', mode='math')
self.register(u'\N{GREEK SMALL LETTER BETA}', u'\\beta', mode='math')
self.register(u'\N{GREEK SMALL LETTER GAMMA}', u'\\gamma', mode='math')
self.register(u'\N{GREEK SMALL LETTER DELTA}', u'\\delta', mode='math')
self.register(
u'\N{GREEK SMALL LETTER EPSILON}',
u'\\epsilon',
mode='math')
self.register(u'\N{GREEK SMALL LETTER ZETA}', u'\\zeta', mode='math')
self.register(u'\N{GREEK SMALL LETTER ETA}', u'\\eta', mode='math')
self.register(u'\N{GREEK SMALL LETTER THETA}', u'\\theta', mode='math')
self.register(u'\N{GREEK SMALL LETTER THETA}', u'\\texttheta',
package='textgreek', encode=False)
self.register(u'\N{GREEK SMALL LETTER IOTA}', u'\\iota', mode='math')
self.register(u'\N{GREEK SMALL LETTER KAPPA}', u'\\kappa', mode='math')
self.register(
u'\N{GREEK SMALL LETTER LAMDA}',
u'\\lambda',
mode='math') # LAMDA not LAMBDA
self.register(u'\N{GREEK SMALL LETTER MU}', u'\\mu', mode='math')
self.register(u'\N{GREEK SMALL LETTER NU}', u'\\nu', mode='math')
self.register(u'\N{GREEK SMALL LETTER XI}', u'\\xi', mode='math')
self.register(
u'\N{GREEK SMALL LETTER OMICRON}',
u'\\omicron',
mode='math')
self.register(u'\N{GREEK SMALL LETTER PI}', u'\\pi', mode='math')
self.register(u'\N{GREEK SMALL LETTER RHO}', u'\\rho', mode='math')
self.register(u'\N{GREEK SMALL LETTER SIGMA}', u'\\sigma', mode='math')
self.register(u'\N{GREEK SMALL LETTER TAU}', u'\\tau', mode='math')
self.register(
u'\N{GREEK SMALL LETTER UPSILON}',
u'\\upsilon',
mode='math')
self.register(u'\N{GREEK SMALL LETTER PHI}', u'\\phi', mode='math')
self.register(u'\N{GREEK PHI SYMBOL}', u'\\varphi', mode='math')
self.register(u'\N{GREEK SMALL LETTER CHI}', u'\\chi', mode='math')
self.register(u'\N{GREEK SMALL LETTER PSI}', u'\\psi', mode='math')
self.register(u'\N{GREEK SMALL LETTER OMEGA}', u'\\omega', mode='math')
self.register(
u'\N{GREEK CAPITAL LETTER ALPHA}',
u'\\Alpha',
mode='math')
self.register(u'\N{GREEK CAPITAL LETTER BETA}', u'\\Beta', mode='math')
self.register(
u'\N{GREEK CAPITAL LETTER GAMMA}',
u'\\Gamma',
mode='math')
self.register(
u'\N{GREEK CAPITAL LETTER DELTA}',
u'\\Delta',
mode='math')
self.register(
u'\N{GREEK CAPITAL LETTER EPSILON}',
u'\\Epsilon',
mode='math')
self.register(u'\N{GREEK CAPITAL LETTER ZETA}', u'\\Zeta', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER ETA}', u'\\Eta', mode='math')
self.register(
u'\N{GREEK CAPITAL LETTER THETA}',
u'\\Theta',
mode='math')
self.register(u'\N{GREEK CAPITAL LETTER IOTA}', u'\\Iota', mode='math')
self.register(
u'\N{GREEK CAPITAL LETTER KAPPA}',
u'\\Kappa',
mode='math')
self.register(
u'\N{GREEK CAPITAL LETTER LAMDA}',
u'\\Lambda',
mode='math') # LAMDA not LAMBDA
self.register(u'\N{GREEK CAPITAL LETTER MU}', u'\\Mu', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER NU}', u'\\Nu', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER XI}', u'\\Xi', mode='math')
self.register(
u'\N{GREEK CAPITAL LETTER OMICRON}',
u'\\Omicron',
mode='math')
self.register(u'\N{GREEK CAPITAL LETTER PI}', u'\\Pi', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER RHO}', u'\\Rho', mode='math')
self.register(
u'\N{GREEK CAPITAL LETTER SIGMA}',
u'\\Sigma',
mode='math')
self.register(u'\N{GREEK CAPITAL LETTER TAU}', u'\\Tau', mode='math')
self.register(
u'\N{GREEK CAPITAL LETTER UPSILON}',
u'\\Upsilon',
mode='math')
self.register(u'\N{GREEK CAPITAL LETTER PHI}', u'\\Phi', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER CHI}', u'\\Chi', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER PSI}', u'\\Psi', mode='math')
self.register(
u'\N{GREEK CAPITAL LETTER OMEGA}',
u'\\Omega',
mode='math')
self.register(u'\N{COPYRIGHT SIGN}', u'\\copyright')
self.register(u'\N{COPYRIGHT SIGN}', u'\\textcopyright')
self.register(u'\N{LATIN CAPITAL LETTER A WITH ACUTE}', u"\\'A")
self.register(u'\N{LATIN CAPITAL LETTER I WITH ACUTE}', u"\\'I")
self.register(u'\N{HORIZONTAL ELLIPSIS}', u'\\ldots')
self.register(u'\N{TRADE MARK SIGN}', u'^{TM}', mode='math')
self.register(
u'\N{TRADE MARK SIGN}',
u'\\texttrademark',
package='textcomp')
self.register(
u'\N{REGISTERED SIGN}',
u'\\textregistered',
package='textcomp')
# \=O and \=o will be translated into Ō and ō before we can
# match the full latex string... so decoding disabled for now
self.register(u'Ǭ', text_type(r'\textogonekcentered{\=O}'),
decode=False)
self.register(u'ǭ', text_type(r'\textogonekcentered{\=o}'),
decode=False)
self.register(u'ℕ', text_type(r'\mathbb{N}'), mode='math')
self.register(u'ℕ', text_type(r'\mathbb N'), mode='math', decode=False)
self.register(u'ℤ', text_type(r'\mathbb{Z}'), mode='math')
self.register(u'ℤ', text_type(r'\mathbb Z'), mode='math', decode=False)
self.register(u'ℚ', text_type(r'\mathbb{Q}'), mode='math')
self.register(u'ℚ', text_type(r'\mathbb Q'), mode='math', decode=False)
self.register(u'ℝ', text_type(r'\mathbb{R}'), mode='math')
self.register(u'ℝ', text_type(r'\mathbb R'), mode='math', decode=False)
self.register(u'ℂ', text_type(r'\mathbb{C}'), mode='math')
self.register(u'ℂ', text_type(r'\mathbb C'), mode='math', decode=False)
def register(self, unicode_text, latex_text, mode='text', package=None,
decode=True, encode=True):
"""Register a correspondence between *unicode_text* and *latex_text*.
:param str unicode_text: A unicode character.
:param str latex_text: Its corresponding LaTeX translation.
:param str mode: LaTeX mode in which the translation applies
(``'text'`` or ``'math'``).
:param str package: LaTeX package requirements (currently ignored).
:param bool decode: Whether this translation applies to decoding
(default: ``True``).
:param bool encode: Whether this translation applies to encoding
(default: ``True``).
"""
if mode == 'math':
# also register text version
self.register(unicode_text, u'$' + latex_text + u'$', mode='text',
package=package, decode=decode, encode=encode)
self.register(unicode_text,
text_type(r'\(') + latex_text + text_type(r'\)'),
mode='text', package=package,
decode=decode, encode=encode)
# XXX for the time being, we do not perform in-math substitutions
return
if package is not None:
# TODO implement packages
pass
# tokenize, and register unicode translation
self.lexer.reset()
self.lexer.state = 'M'
tokens = tuple(self.lexer.get_tokens(latex_text, final=True))
if decode:
if tokens not in self.unicode_map:
self.max_length = max(self.max_length, len(tokens))
self.unicode_map[tokens] = unicode_text
# also register token variant with brackets, if appropriate
# for instance, "\'{e}" for "\'e", "\c{c}" for "\c c", etc.
# note: we do not remove brackets (they sometimes matter,
# e.g. bibtex uses them to prevent lower case transformation)
if (len(tokens) == 2 and
tokens[0].name.startswith(u'control') and
tokens[1].name == u'chars'):
alt_tokens = (tokens[0], self.lexer.curlylefttoken, tokens[1],
self.lexer.curlyrighttoken)
if alt_tokens not in self.unicode_map:
self.max_length = max(self.max_length, len(alt_tokens))
self.unicode_map[alt_tokens] = u"{" + unicode_text + u"}"
if encode and unicode_text not in self.latex_map:
assert len(unicode_text) == 1
self.latex_map[unicode_text] = (latex_text, tokens)
_LATEX_UNICODE_TABLE = LatexUnicodeTable(lexer.LatexIncrementalDecoder())
_ULATEX_UNICODE_TABLE = LatexUnicodeTable(
lexer.UnicodeLatexIncrementalDecoder())
# incremental encoder does not need a buffer
# but decoder does
class LatexIncrementalEncoder(lexer.LatexIncrementalEncoder):
"""Translating incremental encoder for latex. Maintains a state to
determine whether control spaces etc. need to be inserted.
"""
emptytoken = lexer.Token(u"unknown", u"")
"""The empty token."""
table = _LATEX_UNICODE_TABLE
"""Translation table."""
def __init__(self, errors='strict'):
super(LatexIncrementalEncoder, self).__init__(errors=errors)
self.reset()
def reset(self):
super(LatexIncrementalEncoder, self).reset()
self.state = 'M'
def get_space_bytes(self, bytes_):
"""Inserts space bytes in space eating mode."""
if self.state == 'S':
# in space eating mode
# control space needed?
if bytes_.startswith(u' '):
# replace by control space
return u'\\ ', bytes_[1:]
else:
# insert space (it is eaten, but needed for separation)
return u' ', bytes_
else:
return u'', bytes_
def _get_latex_chars_tokens_from_char(self, c):
# if ascii, try latex equivalents
# (this covers \, #, &, and other special LaTeX characters)
if ord(c) < 128:
try:
return self.table.latex_map[c]
except KeyError:
pass
# next, try input encoding
try:
bytes_ = c.encode(self.inputenc, 'strict')
except UnicodeEncodeError:
pass
else:
return c, (lexer.Token(name=u'chars', text=c),)
# next, try latex equivalents of common unicode characters
try:
return self.table.latex_map[c]
except KeyError:
# translation failed
if self.errors == 'strict':
raise UnicodeEncodeError(
"latex", # codec
c, # problematic input
0, 1, # location of problematic character
"don't know how to translate {0} into latex"
.format(repr(c)))
elif self.errors == 'ignore':
return u'', (self.emptytoken,)
elif self.errors == 'replace':
# use the \\char command
# this assumes
# \usepackage[T1]{fontenc}
# \usepackage[utf8]{inputenc}
bytes_ = u'{\\char' + str(ord(c)) + u'}'
return bytes_, (lexer.Token(name=u'chars', text=bytes_),)
elif self.errors == 'keep' and not self.binary_mode:
return c, (lexer.Token(name=u'chars', text=c),)
else:
raise ValueError(
"latex codec does not support {0} errors"
.format(self.errors))
def get_latex_chars(self, unicode_, final=False):
if not isinstance(unicode_, string_types):
raise TypeError(
"expected unicode for encode input, but got {0} instead"
.format(unicode_.__class__.__name__))
# convert character by character
for pos, c in enumerate(unicode_):
bytes_, tokens = self._get_latex_chars_tokens_from_char(c)
space, bytes_ = self.get_space_bytes(bytes_)
# update state
if tokens[-1].name == u'control_word':
# we're eating spaces
self.state = 'S'
else:
self.state = 'M'
if space:
yield space
yield bytes_
class LatexIncrementalDecoder(lexer.LatexIncrementalDecoder):
"""Translating incremental decoder for LaTeX."""
table = _LATEX_UNICODE_TABLE
"""Translation table."""
def __init__(self, errors='strict'):
lexer.LatexIncrementalDecoder.__init__(self, errors=errors)
def reset(self):
lexer.LatexIncrementalDecoder.reset(self)
self.token_buffer = []
# python codecs API does not support multibuffer incremental decoders
def getstate(self):
raise NotImplementedError
def setstate(self, state):
raise NotImplementedError
def get_unicode_tokens(self, chars, final=False):
for token in self.get_tokens(chars, final=final):
# at this point, token_buffer does not match anything
self.token_buffer.append(token)
# new token appended at the end, see if we have a match now
# note: match is only possible at the *end* of the buffer
# because all other positions have already been checked in
# earlier iterations
for i in range(len(self.token_buffer), 0, -1):
last_tokens = tuple(self.token_buffer[-i:]) # last i tokens
try:
unicode_text = self.table.unicode_map[last_tokens]
except KeyError:
# no match: continue
continue
else:
# match!! flush buffer, and translate last bit
# exclude last i tokens
for token in self.token_buffer[:-i]:
yield self.decode_token(token)
yield unicode_text
self.token_buffer = []
break
# flush tokens that can no longer match
while len(self.token_buffer) >= self.table.max_length:
yield self.decode_token(self.token_buffer.pop(0))
# also flush the buffer at the end
if final:
for token in self.token_buffer:
yield self.decode_token(token)
self.token_buffer = []
class LatexCodec(codecs.Codec):
IncrementalEncoder = None
IncrementalDecoder = None
def encode(self, unicode_, errors='strict'):
"""Convert unicode string to LaTeX bytes."""
encoder = self.IncrementalEncoder(errors=errors)
return (
encoder.encode(unicode_, final=True),
len(unicode_),
)
def decode(self, bytes_, errors='strict'):
"""Convert LaTeX bytes to unicode string."""
decoder = self.IncrementalDecoder(errors=errors)
return (
decoder.decode(bytes_, final=True),
len(bytes_),
)
class UnicodeLatexIncrementalDecoder(LatexIncrementalDecoder):
table = _ULATEX_UNICODE_TABLE
binary_mode = False
class UnicodeLatexIncrementalEncoder(LatexIncrementalEncoder):
table = _ULATEX_UNICODE_TABLE
binary_mode = False
def find_latex(encoding):
"""Return a :class:`codecs.CodecInfo` instance for the requested
LaTeX *encoding*, which must be equal to ``latex``,
or to ``latex+<encoding>``
where ``<encoding>`` describes another encoding.
"""
if u'_' in encoding:
# Python 3.9 now normalizes "latex+latin1" to | |
text)
text = re_sub(r'№№?', r' № ', text)
# апостроф в начале или в конце строки - кавычки
text = re_sub(r"^'|'$", '"', text)
# если несколько символов ., ?, !, подряд, то если среди них есть
# ?, то меняем всё на него, если есть !, то на него, иначе ставим
# три точки
text = re_sub(r'[.?!]{2,}',
lambda x: ' ' + re_sub(r'.*\..*', '...',
re_sub(r'.*\!.*', '!',
re_sub(r'.*\?.*', '?',
x.group(0)))) + ' ',
text)
# === PERIODS ===
# ---------------
# --- names ---
# инициал: одна заглая буква; м.б. 1 или 2 инициала
# фамилия: с заглавной буквы; не меньше двух символов;
# если в середине дефис, то обе части фамилии
# с заглавной буквы и каждая не меньше двух символов
for re_lname, re_init in [
( r'[A-Z](?:[a-z]+-[A-Z])?[a-z]+' , r'[A-Z]\.'),
(r'[ЁА-Я](?:[ёа-я]+-[ЁА-Я])?[ёа-я]+', r'[ЁА-Я]\.')
]:
# инициалы в начале:
text = re_sub(r'\b({0})({0})? ?({1})\b'
.format(re_init, re_lname),
r' \g<1> \g<2> \g<3> ', text, flags=flags)
# инициалы в конце:
text = re_sub(r'\b({1}) ({0})({0})?\b'
.format(re_init, re_lname),
r' \g<1> \g<2> \g<3> ', text, flags=flags)
# --- end of sentence w/o space after period ---
def process(match):
a, b = match.groups()
return a + '. ' + b if b.lower() not in [
'com', 'org', 'edu', 'net', 'info',
'de', 'cn', 'uk', 'ru', 'su', 'us', 'jp',
'бг', 'бел', 'рф', 'срб', 'укр'
] and (wform_isknown(a) or wform_isknown(b)) else match.group(0)
text = re_sub(r'(\w+)\.(\w+)', process, text)
text = re_sub(r'(\w+)\.(\w+)', process, text) # sic!
# period just before a word
text = re_sub(r'(^|\W)\.(\w)', r'\g<1>. \g<2>', text)
# period before of quotation:
text = re_sub(r'(\w+)\.\s*(["`«„]\s*\b)', r'\g<1> . \g<2>', text)
# known bugs of russian nltk punkt:
text = re_sub(r'\b(я|театр|нас|прав)\.', r'\g<1> .', text)
# --- known shortcuts ---
'''
re_0 = r'\b'
re_1 = r'\b\.?\s*([ЁА-Я])?' # конец слова; дальше м.б. точка и/или
# заглавная буква через пробелы или без
# них
re_2 = r'\s*\.?\s*'
re_3 = r'\b\s*\.?\s*' # конец слова, после которого возможны
# пробелы и/или точка
re_4 = r'\s*'
re_5 = r'\s+'
#TODO: capitalization
for a, b in [(r'{0}[иИ]{4}т{2}д{1}', r'и так далее'),
(r'{0}[иИ]{4}т{2}п{1}', r'и тому подобное'),
(r'{0}[мМ]{3}б{1}', r'может быть'),
(r'{0}[тТ]{3}е{1}', r'то есть'),
(r'{0}[тТ]{2}к{1}', r'так как')]:
text = re_sub(a.format(re_0, re_1, re_2, re_3, re_4, re_5),
# если после сокращения идёт слово
# с заглавной буквы, то ставим перед ним точку
lambda x: ' {} {}'
.format(b, ('. ' + x.group(1))
if x.group(1) else ''),
text)
for a, b in [(r'{0}г-ж([аеиу]|ой){0}', r'госпож\g<1>'),
(r'{0}г-н([аеу]|ом)?{0}', r'господин\g<1>')]:
text = re_sub(a.format(re_0), ' {} '.format(b), text)
'''
re_0 = r'\b'
re_1 = r'\s*([ЁА-Я])?' # заглавная буква через пробелы или без них
re_2 = r'\b\s*\.?' # конец слова, после которого возможны пробелы
# и/или точка
re_3 = re_2 + r'\s*' # то же, что и re_2, но в конце ещё может быть
# пробел
re_4 = r'\s*'
re_5 = r'\s+'
#TODO: capitalization
for a, b in [(r'({0}[иИ]{4}т{2}д{2}){1}', r'и так далее'),
(r'({0}[иИ]{4}т{2}п{2}){1}', r'и тому подобное'),
(r'({0}[мМ]{3}б{2}){1}', r'может быть'),
(r'({0}[тТ]{3}е{2}){1}', r'то есть'),
(r'({0}[тТ]{3}к{2}){1}', r'так как')]:
text = re_sub(a.format(re_0, re_1, re_2, re_3, re_4, re_5),
# если после сокращения идёт слово
# с заглавной буквы, то ставим перед ним точку
lambda x: ' {} {}'
.format(self.add_shortcut(x.group(1),
b),
('. ' + x.group(2))
if x.group(2) else ''),
text)
for a, b in [(r'({0}г-ж([аеиу]|ой){0})', r'госпож'),
(r'({0}г-н([аеу]|ом)?{0})', r'господин')]:
text = re_sub(a.format(re_0),
lambda x: ' {} '.format(
self.add_shortcut(x.group(1),
b + (x.group(2)
if x.group(2) else
''))
),
text)
# === HYPHENS ===
# ---------------
# --- searching dashes between hyphens ---
def process(match):
# если один из токенов - наш тэг, то ничего не меняем
if self.CHAR_DELIM in [match.group(1), match.group(3)]:
return match.group(0)
token = match.group(2)
# сохраняем разделители
hyphens = re_findall('\W+', token)
res = ''
words = token.replace(' ', '').split('-')
test_word = '{}-{}'.format(words[0], words[1])
if len(words) == 2 and (
wform_isknown(test_word) or (
words[0].isdecimal() and words[1].isalpha()
)
):
return '{}-{}'.format(words[0], words[1])
# поиск: -i-
# [xxx....] 0
# [.xxx...] 1
# [xx.....][..xxx..] 2
# [_xx....][...xxx.] 3
# [__xx...][....xxx] 4
# [___xx..] 5
# [____xx.] 6
# [_____xx] 7
# проверяем на реальность тройные и двойные сочетания слов с дефисами
# и без них
len_words = len(words)
last_3 = len_words - 3
i = 0
maybehyphen = -1 # -1: absolutely never (i == 0)
# 0: never (word with hyphen have just been added)
# 1: maybe (known word have just been added)
# 2: highly likely (unknown word have just been added)
def add_word(i):
nonlocal res, words, maybehyphen
word = words[i]
word_lower = word.lower()
# если мы в самом начале или если у нас частица
if maybehyphen == -1:
res += ' ' + word
maybehyphen = 2 - wform_isknown(word)
# частые ошибки
elif word_lower in ['бы', 'же', 'ли']:
res += ' ' + word
maybehyphen = 0
# частые ошибки
elif word_lower == 'равно' and \
words[i-1].lower().replace('ё', 'е') == 'все':
res += ' ' + word
maybehyphen = 0
# если предыдущее слово - с дефисом, то ставим тире
elif maybehyphen == 0:
res += ' - ' + word
maybehyphen = 2 - wform_isknown(word)
else: # maybehyphen in [1, 2]
isknown = wform_isknown(word)
## если и предыдущее, и текущее слово известны
if maybehyphen == 1 and isknown:
## если автор не добавлял пробелов, то и мы не будем
#if hyphens[i-1] == '-': # safe... I think %)
# res += '-' + word
# #maybehyphen = 1
#else:
# res += ' - ' + word
# maybehyphen = 2
res += ' - ' + word
maybehyphen = 1
## если хотя бы одно слово неизвестно, то дефис
else:
res += '-' + word
while True:
has1more = i > 0
if i >= 2:
for word in [words[i - 2] + '-' + words[i - 1],
words[i - 2] + '' + words[i - 1]]:
if wform_isknown(word):
res += ' ' + word
has1more = False
maybehyphen = 0
break
else:
add_word(i - 2)
if i >= len_words:
if has1more:
add_word(i - 1)
break
if i <= last_3:
for word in [
words[i] + '-' + words[i + 1] + '-' + words[i + 2],
words[i] + '' + words[i + 1] + '-' + words[i + 2],
words[i] + '-' + words[i + 1] + '' + words[i + 2],
words[i] + '' + words[i + 1] + '' + words[i + 2]
]:
if wform_isknown(word):
if has1more:
add_word(i - 1)
res += ' ' + word
words = words[i + 3:]
len_words = len(words)
last_3 = len_words - 3
i = 0
maybehyphen = 0
break
else:
i += 1
else:
i += 1
#print('{:40}{}'.format('(' + token + ')', '(' + res + ' )'))
return res + ' '
# находим все слова c дефисами; с одной стороны от дефиса м.б. пробел
text = re_sub(r'(\{})?(\w+(?:(?:-| -|- )\w+)(\{})?)+'
.format(self.CHAR_DELIM, self.CHAR_DELIM),
process, text)
# дефис в начале русского слова = тире
text = re_sub(r'(^|[^0-9ЁА-Яёа-я])-([ЁА-Яёа-я])', '\g<1>- \g<2>',
text)
# дефис после знака препинания = тире
text = re_sub(r'([.!?])-(\s|$)', '\g<1> -\g<2>', text)
return text
def sent_tokenize(self, text, kill_empty=True):
"""Return sentence-tokenized copy of a *text*
:rtype: list
"""
text = text.replace('«', '``').replace('“', '``').replace('„', "``") \
.replace('»', "''").replace('”', "''").replace('‟', "''")
sents_ = nltk_sent_tokenize(text, language='russian')
re_ellipsis = re_compile(r'(\.\.\.)\s+([0-9A-ZЁА-Я])')
def parse_el(sent):
sents = []
ellipsis = self.CHAR_DELIM + 'ellipsis' + self.CHAR_DELIM
len_ellipsis = len(ellipsis)
sent = re_ellipsis.sub(r'\g<1>{}\g<2>'.format(ellipsis), sent)
i = 0
while True:
i = sent.find(ellipsis)
if i == -1:
break
sents.append(sent[:i])
sent = sent[i + len_ellipsis:]
if sent:
sents.append(sent)
return sents
def notempty(text):
return re_search(r'[\d\w]', text)
sents, is_join_candidate = [], False
re_quot = re_compile(r'\d+' + '\\' + self.TAG_QUOTATION_END)
for sent in sents_:
match = re_quot.match(sent)
if sents and match:
quot = match.group(0)
sents[-1] += ' ' + quot
sent = sent[len(quot):]
if not notempty(sent):
sents[-1] += sent
#if sent:
# if is_join_candidate and sent[0] in '!?.':
# sents[-1] += sent
# else:
# sents.append(sent)
# is_join_candidate = sent[-1] in '!?.'
continue
for s_ in parse_el(sent):
for s in parse_el(s_):
if is_join_candidate and |