signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def sum ( self , only_valid = True ) -> ErrorValue :
"""Calculate the sum of pixels , not counting the masked ones if only _ valid is True .""" | if not only_valid :
mask = 1
else :
mask = self . mask
return ErrorValue ( ( self . intensity * mask ) . sum ( ) , ( ( self . error * mask ) ** 2 ) . sum ( ) ** 0.5 ) |
def _format_fields ( cls , declared_fields : typing . List [ tuple ] ) :
"""Process declared fields and construct a list of tuples
that can be fed into dataclass constructor factory .""" | formatted_fields = [ ]
for declared_field in declared_fields :
field_name = field_type = field_defn = None
# Case when only ( name ) , or " name " , is specified
if isinstance ( declared_field , str ) or len ( declared_field ) == 1 :
field_name = declared_field
field_type = typing . Any
field_defn = field ( default = None )
# Case when ( name , type ) are specified
elif len ( declared_field ) == 2 :
field_name = declared_field [ 0 ]
field_type = declared_field [ 1 ]
field_defn = field ( default = None )
# Case when ( name , type , field ) are specified
elif len ( declared_field ) == 3 :
field_name = declared_field [ 0 ]
field_type = declared_field [ 1 ]
# Process the definition and create a ` field ` object
# Definition will be of the form ` { ' required ' : False , ' default ' : ' John ' } `
assert isinstance ( declared_field [ 2 ] , dict )
metadata = default = None
if 'required' in declared_field [ 2 ] and declared_field [ 2 ] [ 'required' ] :
metadata = { 'required' : True }
if 'default' in declared_field [ 2 ] :
default = declared_field [ 2 ] [ 'default' ]
field_defn = field ( default = default , metadata = metadata )
formatted_fields . append ( ( field_name , field_type , field_defn ) )
return formatted_fields |
def sleep ( self , time ) :
"""Sleep ( no action ) for * time * ( in millisecond )""" | target = 'wait for %s' % str ( time )
self . device ( text = target ) . wait . exists ( timeout = time ) |
def s3_download ( source , destination , exists_strategy = ExistsStrategy . RAISE , profile_name = None ) :
"""Copy a file from an S3 source to a local destination .
Parameters
source : str
Path starting with s3 : / / , e . g . ' s3 : / / bucket - name / key / foo . bar '
destination : str
exists _ strategy : { ' raise ' , ' replace ' , ' abort ' }
What is done when the destination already exists ?
* ` ExistsStrategy . RAISE ` means a RuntimeError is raised ,
* ` ExistsStrategy . REPLACE ` means the local file is replaced ,
* ` ExistsStrategy . ABORT ` means the download is not done .
profile _ name : str , optional
AWS profile
Raises
botocore . exceptions . NoCredentialsError
Botocore is not able to find your credentials . Either specify
profile _ name or add the environment variables AWS _ ACCESS _ KEY _ ID ,
AWS _ SECRET _ ACCESS _ KEY and AWS _ SESSION _ TOKEN .
See https : / / boto3 . readthedocs . io / en / latest / guide / configuration . html""" | if not isinstance ( exists_strategy , ExistsStrategy ) :
raise ValueError ( 'exists_strategy \'{}\' is not in {}' . format ( exists_strategy , ExistsStrategy ) )
session = boto3 . Session ( profile_name = profile_name )
s3 = session . resource ( 's3' )
bucket_name , key = _s3_path_split ( source )
if os . path . isfile ( destination ) :
if exists_strategy is ExistsStrategy . RAISE :
raise RuntimeError ( 'File \'{}\' already exists.' . format ( destination ) )
elif exists_strategy is ExistsStrategy . ABORT :
return
s3 . Bucket ( bucket_name ) . download_file ( key , destination ) |
def record_sets_list_by_dns_zone ( zone_name , resource_group , top = None , recordsetnamesuffix = None , ** kwargs ) :
'''. . versionadded : : Fluorine
Lists all record sets in a DNS zone .
: param zone _ name : The name of the DNS zone ( without a terminating dot ) .
: param resource _ group : The name of the resource group .
: param top : The maximum number of record sets to return . If not specified ,
returns up to 100 record sets .
: param recordsetnamesuffix : The suffix label of the record set name that has
to be used to filter the record set enumerations .
CLI Example :
. . code - block : : bash
salt - call azurearm _ dns . record _ sets _ list _ by _ dns _ zone myzone testgroup''' | result = { }
dnsconn = __utils__ [ 'azurearm.get_client' ] ( 'dns' , ** kwargs )
try :
record_sets = __utils__ [ 'azurearm.paged_object_to_list' ] ( dnsconn . record_sets . list_by_dns_zone ( zone_name = zone_name , resource_group_name = resource_group , top = top , recordsetnamesuffix = recordsetnamesuffix ) )
for record_set in record_sets :
result [ record_set [ 'name' ] ] = record_set
except CloudError as exc :
__utils__ [ 'azurearm.log_cloud_error' ] ( 'dns' , str ( exc ) , ** kwargs )
result = { 'error' : str ( exc ) }
return result |
def _match_by_norm_func ( l1 , l2 , norm_fn , dist_fn , thresh ) :
"""Matches elements in l1 and l2 using normalization functions .
Splits the elements in each list into buckets given by the normalization
function . If the same normalization value points to a bucket from the
first list and a bucket from the second list , both with a single element
we consider the elements in the list as matching if the distance between
them is less ( or equal ) than the threshold .
e . g . l1 = [ ' X1 ' , ' Y1 ' , ' Y2 ' , ' Z5 ' ] , l2 = [ ' X1 ' , ' Y3 ' , ' Z1 ' ]
norm _ fn = lambda x : x [ 0]
dist _ fn = lambda e1 , e2 : 0 if e1 = = e2 else 1
thresh = 0
The buckets will then be :
l1 _ bucket = { ' X ' : [ ' X1 ' ] , ' Y ' : [ ' Y1 ' , ' Y2 ' ] , ' Z ' : [ ' Z5 ' ] }
l2 _ bucket = { ' X ' : [ ' X1 ' ] , ' Y ' : [ ' Y3 ' ] , ' Z ' : [ ' Z1 ' ] }
For each normalized value :
' X ' - > consider ' X1 ' equal with ' X1 ' since the distance is equal with
the thershold
' Y ' - > skip the lists since we have multiple possible matches
' Z ' - > consider ' Z1 ' and ' Z5 ' as different since the distance is
greater than the threshold .
Return :
[ ( ' X1 ' , ' X2 ' ) ]""" | common = [ ]
l1_only_idx = set ( range ( len ( l1 ) ) )
l2_only_idx = set ( range ( len ( l2 ) ) )
buckets_l1 = _group_by_fn ( enumerate ( l1 ) , lambda x : norm_fn ( x [ 1 ] ) )
buckets_l2 = _group_by_fn ( enumerate ( l2 ) , lambda x : norm_fn ( x [ 1 ] ) )
for normed , l1_elements in buckets_l1 . items ( ) :
l2_elements = buckets_l2 . get ( normed , [ ] )
if not l1_elements or not l2_elements :
continue
_ , ( _ , e1_first ) = l1_elements [ 0 ]
_ , ( _ , e2_first ) = l2_elements [ 0 ]
match_is_ambiguous = not ( len ( l1_elements ) == len ( l2_elements ) and ( all ( e2 == e2_first for ( _ , ( _ , e2 ) ) in l2_elements ) or all ( e1 == e1_first for ( _ , ( _ , e1 ) ) in l1_elements ) ) )
if match_is_ambiguous :
continue
for ( e1_idx , e1 ) , ( e2_idx , e2 ) in zip ( l1_elements , l2_elements ) :
if dist_fn ( e1 , e2 ) > thresh :
continue
l1_only_idx . remove ( e1_idx )
l2_only_idx . remove ( e2_idx )
common . append ( ( e1 , e2 ) )
l1_only = [ l1 [ i ] for i in l1_only_idx ]
l2_only = [ l2 [ i ] for i in l2_only_idx ]
return common , l1_only , l2_only |
def get_first_line ( filepath , dialect ) :
"""Returns List of first line items of file filepath""" | with open ( filepath , "rb" ) as csvfile :
csvreader = csv . reader ( csvfile , dialect = dialect )
for first_line in csvreader :
break
return first_line |
def complete_info ( self , text , line , begidx , endidx ) :
"""completion for info command""" | opts = self . INFO_OPTS
if not text :
completions = opts
else :
completions = [ f for f in opts if f . startswith ( text ) ]
return completions |
def map_dual_axis ( low , high , centre , dead_zone , hot_zone , value ) :
"""Map an axis with a central dead zone and hot zones at each end to a range from - 1.0 to 1.0 . This in effect uses two
calls to map _ single _ axis , choosing whether to use centre and low , or centre and high as the low and high values in
that call based on which side of the centre value the input value falls . This is the call that handles mapping of
values on regular joysticks where there ' s a centre point to which the physical control returns when no input is
being made .
: param low :
The raw value corresponding to the strongest negative input ( stick far left / down ) .
: param high :
The raw value corresponding to the strongest positive input ( stick far right / up ) .
: param centre :
The raw value corresponding to the resting position of the axis when no user interaction is happening .
: param dead _ zone :
The proportion of each ( positive and negative ) part of the motion away from the centre which should result in
an output of 0.0
: param hot _ zone :
The proportion of each ( positive and negative ) part of the motion away from each extreme end of the range which
should result in 1.0 or - 1.0 being returned ( depending on whether we ' re on the high or low side of the centre
point )
: param value :
The raw value to map
: return :
The filtered and clamped value , from - 1.0 at low to 1.0 at high , with a centre as specified mapping to 0.0""" | if value <= centre :
return map_single_axis ( centre , low , dead_zone , hot_zone , value )
else :
return map_single_axis ( centre , high , dead_zone , hot_zone , value ) |
def get ( account ) :
"""Returns the class object identified by ` account _ id `
Args :
account ( ` int ` , ` str ` ) : Unique ID of the account to load from database
Returns :
` Account ` object if found , else None""" | account = Account . get ( account )
if not account :
return None
acct_type = AccountType . get ( account . account_type_id ) . account_type
account_class = get_plugin_by_name ( PLUGIN_NAMESPACES [ 'accounts' ] , acct_type )
return account_class ( account ) |
def parse ( cls , element ) :
"""Create a new Agent by parsing root .
: param element : Element to be parsed into an Agent .
: raises exceptions . ParseError : If element is not a valid agent .""" | if element . tag != cls . AGENT_TAG :
raise exceptions . ParseError ( u"Agent got unexpected tag {}; expected {}" . format ( element . tag , cls . AGENT_TAG ) )
role = element . get ( u"ROLE" )
if not role :
raise exceptions . ParseError ( u"Agent must have a ROLE attribute." )
if role == u"OTHER" :
role = element . get ( u"OTHERROLE" ) or role
agent_type = element . get ( u"TYPE" )
if agent_type == u"OTHER" :
agent_type = element . get ( u"OTHERTYPE" ) or agent_type
agent_id = element . get ( u"ID" )
try :
name = element . find ( cls . NAME_TAG ) . text
except AttributeError :
name = None
notes = [ note . text for note in element . findall ( cls . NOTE_TAG ) ]
return cls ( role , id = agent_id , type = agent_type , name = name , notes = notes ) |
def deserialize ( self , value , ** kwargs ) :
"""Return a deserialized copy of the tuple""" | kwargs . update ( { 'trusted' : kwargs . get ( 'trusted' , False ) } )
if self . deserializer is not None :
return self . deserializer ( value , ** kwargs )
if value is None :
return None
output_list = [ self . prop . deserialize ( val , ** kwargs ) for val in value ]
return self . _class_container ( output_list ) |
def _git_enable_branch ( desired_branch ) :
"""Enable desired branch name .""" | preserved_branch = _git_get_current_branch ( )
try :
if preserved_branch != desired_branch :
_tool_run ( 'git checkout ' + desired_branch )
yield
finally :
if preserved_branch and preserved_branch != desired_branch :
_tool_run ( 'git checkout ' + preserved_branch ) |
def requires_application_json ( f ) :
"""Decorator for enforcing application / json Content - Type""" | @ functools . wraps ( f )
def wrapped ( * args , ** kwargs ) :
from flask import request
if request . get_json ( silent = True ) is None :
er = ErrorResponse ( description = 'Improper Content-Type header. Expecting "application/json"' )
return to_json_response ( er ) , HTTPStatus . BAD_REQUEST
else :
return f ( * args , ** kwargs )
return wrapped |
def _JRStaeckelIntegrandSquared ( u , E , Lz , I3U , delta , u0 , sinh2u0 , v0 , sin2v0 , potu0v0 , pot ) : # potu0v0 = potentialStaeckel ( u0 , v0 , pot , delta )
"""The J _ R integrand : p ^ 2 _ u ( u ) / 2 / delta ^ 2""" | sinh2u = nu . sinh ( u ) ** 2.
dU = ( sinh2u + sin2v0 ) * potentialStaeckel ( u , v0 , pot , delta ) - ( sinh2u0 + sin2v0 ) * potu0v0
return E * sinh2u - I3U - dU - Lz ** 2. / 2. / delta ** 2. / sinh2u |
def toggle_service_status ( self , service_id ) :
"""Toggles the service status .
: param int service _ id : The id of the service to delete""" | svc = self . client [ 'Network_Application_Delivery_Controller_' 'LoadBalancer_Service' ]
return svc . toggleStatus ( id = service_id ) |
def cli_run_viz ( source = None , outputpath = "" , theme = "" , verbose = False ) :
"""This application is a wrapper on the main ontospy - viz script . It generates docs for all models in the local library . Using the Complex - html template . .
@ todo allow to pass a custom folder . .
> python - m ontospy . viz . scripts . export _ all - o ~ / Desktop / test / - - theme random""" | if outputpath :
if not ( os . path . exists ( outputpath ) ) or not ( os . path . isdir ( outputpath ) ) :
click . secho ( "WARNING: the -o option must include a valid directory path." , fg = "red" )
sys . exit ( 0 )
else :
from os . path import expanduser
home = expanduser ( "~" )
outputpath = os . path . join ( home , "ontospy-viz-multi" )
if source :
source_folder = source [ 0 ]
if not os . path . isdir ( source_folder ) :
click . secho ( "WARNING: '%s' is not a valid directory path." % source_folder , fg = "red" )
sys . exit ( 0 )
files_list = [ f for f in os . listdir ( source_folder ) if os . path . isfile ( os . path . join ( source_folder , f ) ) ]
click . secho ( "Exporting the directory: '%s'" % source_folder , fg = "green" )
click . secho ( "----------" , fg = "green" )
else :
click . secho ( "Exporting the local library: '%s'" % get_home_location ( ) , fg = "green" )
click . secho ( "----------" , fg = "green" )
files_list = get_localontologies ( )
source_folder = get_home_location ( )
report_pages = [ ]
for onto_name in files_list :
full_uri = os . path . join ( source_folder , onto_name )
if theme :
if theme == "random" :
_theme = random_theme ( )
else :
_theme = theme
else :
_theme = BOOTSWATCH_THEME_DEFAULT
click . secho ( "Onto: <%s> Theme: '%s'" % ( onto_name , _theme ) , fg = "green" )
printDebug ( "Loading graph..." , dim = True )
g = Ontospy ( os . path . join ( source_folder , onto_name ) , verbose = verbose )
if g . sources : # if Ontospy graph has no valid ' sources ' = file passed was not valid RDF
printDebug ( "Building visualization..." , dim = True )
onto_name_safe = slugify ( unicode ( onto_name ) )
onto_outputpath = os . path . join ( outputpath , onto_name_safe )
# note : single static files output path
static_outputpath = os . path . join ( outputpath , "static" )
# v = KompleteViz ( g , theme = _ theme )
v = KompleteVizMultiModel ( g , theme = _theme , static_url = "../static/" , output_path_static = static_outputpath )
try : # note : onto _ outputpath is wiped out each time as part of the build
url = v . build ( onto_outputpath )
report_pages . append ( "<a href='%s/index.html' target='_blank'>%s</a> ('%s' theme)<br />" % ( onto_name_safe , onto_name , _theme ) )
except :
e = sys . exc_info ( ) [ 0 ]
printDebug ( "Error: " + str ( e ) , "red" )
continue
# generate a report page
report_path = os . path . join ( outputpath , "index.html" )
html = """
<html>
<head>
<style media="screen">
a {font-size: 20px; padding: 15px; text-transform: capitalize; text-decoration: none;}
a:hover {text-decoration: underline;}
</style>
</head>
<body>
<h1>Ontospy-generated documentation:</h1>
%s
</body>
</html>
"""
with open ( report_path , "w" ) as text_file :
text_file . write ( html % ( "" . join ( [ x for x in report_pages ] ) ) )
# open report
webbrowser . open ( "file:///" + report_path )
raise SystemExit ( 1 ) |
def build_sdist ( source_dir , sdist_dir , config_settings = None ) :
"""Build an sdist from a source directory using PEP 517 hooks .
: param str source _ dir : Source directory containing pyproject . toml
: param str sdist _ dir : Target directory to place sdist in
: param dict config _ settings : Options to pass to build backend
This is a blocking function which will run pip in a subprocess to install
build requirements .""" | if config_settings is None :
config_settings = { }
requires , backend = _load_pyproject ( source_dir )
hooks = Pep517HookCaller ( source_dir , backend )
with BuildEnvironment ( ) as env :
env . pip_install ( requires )
reqs = hooks . get_requires_for_build_sdist ( config_settings )
env . pip_install ( reqs )
return hooks . build_sdist ( sdist_dir , config_settings ) |
def subsystems ( self ) :
"""Returns all subsystem types used by tasks in this goal , in no particular order .""" | ret = set ( )
for task_type in self . task_types ( ) :
ret . update ( [ dep . subsystem_cls for dep in task_type . subsystem_dependencies_iter ( ) ] )
return ret |
def filter ( resources , query ) :
"""Filter a list of resources according to a query expression .
The search criteria specified in the query parameter has two parts :
1 . a VISA regular expression over a resource string .
2 . optional logical expression over attribute values
( not implemented in this function , see below ) .
. . note : The VISA regular expression syntax is not the same as the
Python regular expression syntax . ( see below )
The regular expression is matched against the resource strings of resources
known to the VISA Resource Manager . If the resource string matches the
regular expression , the attribute values of the resource are then matched
against the expression over attribute values . If the match is successful ,
the resource has met the search criteria and gets added to the list of
resources found .
By using the optional attribute expression , you can construct flexible
and powerful expressions with the use of logical ANDs ( & & ) , ORs ( | | ) ,
and NOTs ( ! ) . You can use equal ( = = ) and unequal ( ! = ) comparators to
compare attributes of any type , and other inequality comparators
( > , < , > = , < = ) to compare attributes of numeric type . Use only global
attributes in the attribute expression . Local attributes are not allowed
in the logical expression part of the expr parameter .
Symbol Meaning
? Matches any one character .
\ Makes the character that follows it an ordinary character
instead of special character . For example , when a question
mark follows a backslash ( \ ? ) , it matches the ? character
instead of any one character .
[ list ] Matches any one character from the enclosed list . You can
use a hyphen to match a range of characters .
[ ^ list ] Matches any character not in the enclosed list . You can use
a hyphen to match a range of characters .
* Matches 0 or more occurrences of the preceding character or
expression .
+ Matches 1 or more occurrences of the preceding character or
expression .
Exp | exp Matches either the preceding or following expression . The or
operator | matches the entire expression that precedes or
follows it and not just the character that precedes or follows
it . For example , VXI | GPIB means ( VXI ) | ( GPIB ) , not VX ( I | G ) PIB .
( exp ) Grouping characters or expressions .
: param resources : iterable of resources .
: param query : query expression .""" | if '{' in query :
query , _ = query . split ( '{' )
logger . warning ( 'optional part of the query expression not supported. ' 'See filter2' )
try :
query = query . replace ( '?' , '.' )
matcher = re . compile ( query , re . IGNORECASE )
except re . error :
raise errors . VisaIOError ( constants . VI_ERROR_INV_EXPR )
return tuple ( res for res in resources if matcher . match ( res ) ) |
def get_cpuinfo_field ( self , field ) :
"""Search / proc / cpuinfo for a field and return its value , if found ,
otherwise None .""" | # Match a line like ' Hardware : BCM2709 ' :
pattern = r'^' + field + r'\s+:\s+(.*)$'
with open ( '/proc/cpuinfo' , 'r' ) as infile :
cpuinfo = infile . read ( ) . split ( '\n' )
for line in cpuinfo :
match = re . search ( pattern , line , flags = re . IGNORECASE )
if match :
return match . group ( 1 )
return None |
def _upload_in_splits ( self , destination_folder_id , source_path , preflight_check , verbose = True , chunked_upload_threads = 5 ) :
'''Since Box has a maximum file size limit ( 15 GB at time of writing ) ,
we need to split files larger than this into smaller parts , and chunk upload each part''' | file_size = os . stat ( source_path ) . st_size
split_size = BOX_MAX_FILE_SIZE
# Make sure that the last split piece is still big enough for a chunked upload
while file_size % split_size < BOX_MIN_CHUNK_UPLOAD_SIZE :
split_size -= 1000
if split_size < BOX_MIN_CHUNK_UPLOAD_SIZE :
raise Exception ( 'Lazy programming error' )
split_start_byte = 0
part_count = 0
uploaded_file_ids = [ ]
while split_start_byte < file_size :
dest_file_name = '{0}.part{1}' . format ( os . path . basename ( source_path ) , part_count )
prev_uploaded_file_ids = self . find_file ( destination_folder_id , dest_file_name )
if len ( prev_uploaded_file_ids ) == 1 :
if verbose :
print ( '\nSkipping upload of split {0} of {1}; already exists' . format ( part_count + 1 , math . ceil ( file_size / split_size ) ) )
uploaded_file_ids . extend ( prev_uploaded_file_ids )
else :
if verbose :
print ( '\nUploading split {0} of {1}' . format ( part_count + 1 , math . ceil ( file_size / split_size ) ) )
uploaded_file_ids . append ( self . _chunked_upload ( destination_folder_id , source_path , dest_file_name = dest_file_name , split_start_byte = split_start_byte , file_size = min ( split_size , file_size - split_start_byte ) , # Take the min of file _ size - split _ start _ byte so that the last part of a split doesn ' t read into the next split
preflight_check = preflight_check , verbose = verbose , upload_threads = chunked_upload_threads , ) )
part_count += 1
split_start_byte += split_size
return uploaded_file_ids |
def get_app_guid ( self , app_name ) :
"""Returns the GUID for the app instance with
the given name .""" | summary = self . space . get_space_summary ( )
for app in summary [ 'apps' ] :
if app [ 'name' ] == app_name :
return app [ 'guid' ] |
def plot ( self , filename , title = None , reciprocal = None , limits = None , dtype = 'rho' , return_fig = False , ** kwargs ) :
"""Standard plot of spectrum
Parameters
filename : string
Output filename . Include the ending to specify the filetype
( usually . pdf or . png )
title : string , optional
Title for the plot
reciprocal : : class : ` reda . eis . plots . sip _ response ` , optional
If another : class : ` reda . eis . plots . sip _ response ` object is provided
here , use this as the reciprocal spectrum .
limits : dict , optional
A dictionary which contains plot limits . See code example below .
dtype : string , optional
Determines if the data plotted included geometric factors ( ' rho ' )
or not ( ' r ' ) . Default : ' rho '
return _ fig : bool , optional
If True , then do not delete the figure object after saving to file
and return the figure object . Default : False
* * kwargs : dict
kwargs is piped through to the _ plot function
Returns
fig : : class : ` matplotlib . Figure `
The figure object . Only returned if return _ fig is set to True
Examples
> > > from reda . eis . plots import sip _ response
> > > import numpy as np
> > > frequencies = np . array ( [
. . . 1.00000e - 03 , 1.77827941e - 03 , 3.16227766e - 03 , 5.62341325e - 03,
. . . 1.00000e - 02 , 1.77827941e - 02 , 3.16227766e - 02 , 5.62341325e - 02,
. . . 1.00000e - 01 , 1.77827941e - 01 , 3.16227766e - 01 , 5.62341325e - 01,
. . . 1.00000e + 00 , 1.77827941e + 00 , 3.16227766e + 00 , 5.62341325e + 00,
. . . 1.00000e + 01 , 1.77827941e + 01 , 3.16227766e + 01 , 5.62341325e + 01,
. . . 1.00000e + 02 , 1.77827941e + 02 , 3.16227766e + 02 , 5.62341325e + 02,
. . . 1.00000e + 03 ] )
> > > rcomplex = np . array ( [
. . . 49.34369772-0.51828971j , 49.11781581-0.59248806j ,
. . . 48.85819872-0.6331137j , 48.58762806-0.62835135j ,
. . . 48.33331113-0.57965851j , 48.11599009-0.50083533j ,
. . . 47.94405036-0.41005275j , 47.81528917-0.32210768j ,
. . . 47.72215469-0.24543425j , 47.65607773-0.18297794j ,
. . . 47.60962191-0.13433101j , 47.57706229-0.09755774j ,
. . . 47.55424286-0.07031682j , 47.53822912-0.05041399j ,
. . . 47.52697253-0.03601005j , 47.51904718-0.02565412j ,
. . . 47.51345965-0.01824266j , 47.50951606-0.01295546j ,
. . . 47.50673042-0.00919217j , 47.50476152-0.0065178j ,
. . . 47.50336925-0.00461938j , 47.50238442-0.00327285j ,
. . . 47.50168762-0.00231829j , 47.50119454-0.00164187j ,
. . . 47.50084556-0.00116268j ] )
> > > spectrum = sip _ response ( frequencies = frequencies , rcomplex = rcomplex )
> > > fig = spectrum . plot ( ' spectrum . pdf ' , return _ fig = True )""" | fig , axes = self . _plot ( reciprocal = reciprocal , limits = limits , title = title , dtype = dtype , ** kwargs )
fig . savefig ( filename , dpi = 300 )
if return_fig :
return fig
else :
plt . close ( fig ) |
def attach_session ( self , target_session = None ) :
"""` ` $ tmux attach - session ` ` aka alias : ` ` $ tmux attach ` ` .
Parameters
target _ session : str
name of the session . fnmatch ( 3 ) works .
Raises
: exc : ` exc . BadSessionName `""" | session_check_name ( target_session )
tmux_args = tuple ( )
if target_session :
tmux_args += ( '-t%s' % target_session , )
proc = self . cmd ( 'attach-session' , * tmux_args )
if proc . stderr :
raise exc . LibTmuxException ( proc . stderr ) |
async def post ( self , url_path : str , params : dict = None , rtype : str = RESPONSE_JSON , schema : dict = None ) -> Any :
"""POST request on self . endpoint + url _ path
: param url _ path : Url encoded path following the endpoint
: param params : Url query string parameters dictionary
: param rtype : Response type
: param schema : Json Schema to validate response ( optional , default None )
: return :""" | if params is None :
params = dict ( )
client = API ( self . endpoint . conn_handler ( self . session , self . proxy ) )
# get aiohttp response
response = await client . requests_post ( url_path , ** params )
# if schema supplied . . .
if schema is not None : # validate response
await parse_response ( response , schema )
# return the chosen type
if rtype == RESPONSE_AIOHTTP :
return response
elif rtype == RESPONSE_TEXT :
return await response . text ( )
elif rtype == RESPONSE_JSON :
return await response . json ( ) |
def wraps ( __fn , ** kw ) :
"""Like ` ` functools . wraps ` ` , with support for annotations .""" | kw [ 'assigned' ] = kw . get ( 'assigned' , WRAPPER_ASSIGNMENTS )
return functools . wraps ( __fn , ** kw ) |
def pack ( self , value = None ) :
"""Pack the value as a binary representation .
Returns :
bytes : The binary representation .""" | if isinstance ( value , type ( self ) ) :
return value . pack ( )
if value is None :
value = self
else :
container = type ( self ) ( items = None )
container . extend ( value )
value = container
bin_message = b''
try :
for item in value :
bin_message += item . pack ( )
return bin_message
except exceptions . PackException as err :
msg = "{} pack error: {}" . format ( type ( self ) . __name__ , err )
raise exceptions . PackException ( msg ) |
def update_stored_win32tz_map ( ) :
"""Downloads the cldr win32 timezone map and stores it in win32tz _ map . py .""" | windows_zones_xml = download_cldr_win32tz_map_xml ( )
source_hash = hashlib . md5 ( windows_zones_xml ) . hexdigest ( )
if hasattr ( windows_zones_xml , "decode" ) :
windows_zones_xml = windows_zones_xml . decode ( "utf-8" )
map_zones = create_win32tz_map ( windows_zones_xml )
map_dir = os . path . dirname ( os . path . abspath ( __file__ ) )
map_filename = os . path . join ( map_dir , "win32tz_map.py" )
if os . path . exists ( map_filename ) :
reload ( win32tz_map )
current_hash = getattr ( win32tz_map , "source_hash" , None )
if current_hash == source_hash :
return False
map_file = open ( map_filename , "w" )
comment = "Map between Windows and Olson timezones taken from %s" % ( _CLDR_WINZONES_URL , )
comment2 = "Generated automatically from datetime_tz.py"
map_file . write ( "'''%s\n" % comment )
map_file . write ( "%s'''\n" % comment2 )
map_file . write ( "source_hash = '%s' # md5 sum of xml source data\n" % ( source_hash ) )
map_file . write ( "win32timezones = {\n" )
for win32_name , territory , olson_name , comment in map_zones :
if territory == '001' :
map_file . write ( " %r: %r, # %s\n" % ( str ( win32_name ) , str ( olson_name ) , comment or "" ) )
else :
map_file . write ( " %r: %r, # %s\n" % ( ( str ( win32_name ) , str ( territory ) ) , str ( olson_name ) , comment or "" ) )
map_file . write ( "}\n" )
map_file . close ( )
return True |
def ring2nest ( nside , ipix ) :
"""Drop - in replacement for healpy ` ~ healpy . pixelfunc . ring2nest ` .""" | ipix = np . atleast_1d ( ipix ) . astype ( np . int64 , copy = False )
return ring_to_nested ( ipix , nside ) |
def dump ( bqm , fp , vartype_header = False ) :
"""Dump a binary quadratic model to a string in COOrdinate format .""" | for triplet in _iter_triplets ( bqm , vartype_header ) :
fp . write ( '%s\n' % triplet ) |
def _get_callable ( obj , of_class = None ) :
"""Get callable for an object and its full name .
Supports :
* functions
* classes ( jumps to _ _ init _ _ ( ) )
* methods
* @ classmethod
* @ property
: param obj : function | class
: type obj : Callable
: param of _ class : Class that this method is a member of
: type of _ class : class | None
: return : ( qualname , Callable | None , Class | None ) . Callable is None for classes without _ _ init _ _ ( )
: rtype : ( str , Callable | None , Class | None )""" | # Cases
o = obj
if inspect . isclass ( obj ) :
try :
o = obj . __init__
of_class = obj
except AttributeError :
pass
# Finish
return qualname ( obj ) , o , of_class |
def __grabHotkey ( self , key , modifiers , window ) :
"""Grab a specific hotkey in the given window""" | logger . debug ( "Grabbing hotkey: %r %r" , modifiers , key )
try :
keycode = self . __lookupKeyCode ( key )
mask = 0
for mod in modifiers :
mask |= self . modMasks [ mod ]
window . grab_key ( keycode , mask , True , X . GrabModeAsync , X . GrabModeAsync )
if Key . NUMLOCK in self . modMasks :
window . grab_key ( keycode , mask | self . modMasks [ Key . NUMLOCK ] , True , X . GrabModeAsync , X . GrabModeAsync )
if Key . CAPSLOCK in self . modMasks :
window . grab_key ( keycode , mask | self . modMasks [ Key . CAPSLOCK ] , True , X . GrabModeAsync , X . GrabModeAsync )
if Key . CAPSLOCK in self . modMasks and Key . NUMLOCK in self . modMasks :
window . grab_key ( keycode , mask | self . modMasks [ Key . CAPSLOCK ] | self . modMasks [ Key . NUMLOCK ] , True , X . GrabModeAsync , X . GrabModeAsync )
except Exception as e :
logger . warning ( "Failed to grab hotkey %r %r: %s" , modifiers , key , str ( e ) ) |
def write_f90 ( self ) :
"""Writes the F90 module file to the specified directory .""" | from os import path
self . _check_dir ( )
# Find the list of executables that we actually need to write wrappers for .
self . _find_executables ( )
lines = [ ]
lines . append ( "!!<summary>Auto-generated Fortran module for interaction with ctypes\n" "!!through python. Generated for module {}.</summary>" . format ( self . module . name ) )
lines . append ( "MODULE {}_c" . format ( self . module . name ) )
# Some of the variables and parameters will have special kinds that need to be imported .
# Check each of the executables to find additional dependencies .
lines . append ( " use {}" . format ( self . module . name ) )
lines . append ( " use ISO_C_BINDING" )
for modname in self . needs :
lines . append ( " use {}" . format ( modname ) )
lines . append ( " implicit none" )
lines . append ( "CONTAINS" )
# We want everything in these wrapper modules to be public , so we just exclude the ' private ' .
for execkey in self . uses :
self . _write_executable_f90 ( execkey , lines )
lines . append ( "END MODULE {}_c" . format ( self . module . name ) )
fullpath = path . join ( self . f90path , "{}_c.f90" . format ( self . module . name ) )
with open ( fullpath , 'w' ) as f :
f . write ( '\n' . join ( lines ) ) |
def format_item ( item , py = True ) :
""": param py : python format or not""" | # for non python format , just output itself .
# so the result is ` something ` instead of ` " something " `
if not py :
return unicode ( item )
if isinstance ( item , ( str , unicode ) ) : # long int is prefixed by a #
if item . startswith ( '#' ) :
return unicode ( long ( item [ 1 : ] ) )
return u'"{0}"' . format ( item )
return unicode ( item ) |
def send ( self , request , ** kwargs ) :
"""Send a given PreparedRequest .""" | # Set defaults that the hooks can utilize to ensure they always have
# the correct parameters to reproduce the previous request .
kwargs . setdefault ( 'stream' , self . stream )
kwargs . setdefault ( 'verify' , self . verify )
kwargs . setdefault ( 'cert' , self . cert )
kwargs . setdefault ( 'proxies' , self . proxies )
# It ' s possible that users might accidentally send a Request object .
# Guard against that specific failure case .
if not isinstance ( request , PreparedRequest ) :
raise ValueError ( 'You can only send PreparedRequests.' )
checked_urls = set ( )
while request . url in self . redirect_cache :
checked_urls . add ( request . url )
new_url = self . redirect_cache . get ( request . url )
if new_url in checked_urls :
break
request . url = new_url
# Set up variables needed for resolve _ redirects and dispatching of hooks
allow_redirects = kwargs . pop ( 'allow_redirects' , True )
stream = kwargs . get ( 'stream' )
timeout = kwargs . get ( 'timeout' )
verify = kwargs . get ( 'verify' )
cert = kwargs . get ( 'cert' )
proxies = kwargs . get ( 'proxies' )
hooks = request . hooks
# Get the appropriate adapter to use
adapter = self . get_adapter ( url = request . url )
# Start time ( approximately ) of the request
start = datetime . utcnow ( )
# Send the request
r = adapter . send ( request , ** kwargs )
# Total elapsed time of the request ( approximately )
r . elapsed = datetime . utcnow ( ) - start
# Response manipulation hooks
r = dispatch_hook ( 'response' , hooks , r , ** kwargs )
# Persist cookies
if r . history : # If the hooks create history then we want those cookies too
for resp in r . history :
extract_cookies_to_jar ( self . cookies , resp . request , resp . raw )
extract_cookies_to_jar ( self . cookies , request , r . raw )
# Redirect resolving generator .
gen = self . resolve_redirects ( r , request , stream = stream , timeout = timeout , verify = verify , cert = cert , proxies = proxies )
# Resolve redirects if allowed .
history = [ resp for resp in gen ] if allow_redirects else [ ]
# Shuffle things around if there ' s history .
if history : # Insert the first ( original ) request at the start
history . insert ( 0 , r )
# Get the last request made
r = history . pop ( )
r . history = history
if not stream :
r . content
return r |
def depth ( self ) :
"""Compute the depth of the tree ( depth of a leaf = 0 ) .""" | return self . fold_up ( lambda n , fl , fg : max ( fl + 1 , fg + 1 ) , lambda leaf : 0 ) |
def open ( self , name , mode = 'r' , compression = None ) :
"""Open a file pointer . Note that a file is * always * opened in text
mode . The method inherits its input parameters from the constructor
of : class : ` FileObject ` .""" | if compression == 'use_ext' :
self . get_compression_type ( name )
else :
self . ctype = compression
if not self . ctype :
self . fp = open ( name , mode )
elif self . ctype == 'gzip' :
self . fp = gzip . open ( name , mode + 't' )
elif self . ctype == 'bzip2' :
try : # Python 3 supports opening bzip2 files in text mode
# therefore , we prefer to open them this way
self . fp = bz2 . open ( name , mode + 't' )
except : # BZ2File opens a file in binary mode
# thus , we have to use codecs . getreader ( )
# to be able to use it in text mode
self . fp_extra = bz2 . BZ2File ( name , mode )
if mode == 'r' :
self . fp = codecs . getreader ( 'ascii' ) ( self . fp_extra )
else : # mode = = ' w '
self . fp = codecs . getwriter ( 'ascii' ) ( self . fp_extra )
else : # self . ctype = = ' lzma '
# LZMA is available in Python 2 only if backports . lzma is installed
# Python 3 supports it by default
assert lzma_present , 'LZMA compression is unavailable.'
self . fp = lzma . open ( name , mode = mode + 't' ) |
def _check_deprecated ( self , dest , kwargs ) :
"""Checks option for deprecation and issues a warning / error if necessary .""" | removal_version = kwargs . get ( 'removal_version' , None )
if removal_version is not None :
warn_or_error ( removal_version = removal_version , deprecated_entity_description = "option '{}' in {}" . format ( dest , self . _scope_str ( ) ) , deprecation_start_version = kwargs . get ( 'deprecation_start_version' , None ) , hint = kwargs . get ( 'removal_hint' , None ) , stacklevel = 9999 ) |
async def digital_write ( self , pin , value ) :
"""Set the specified pin to the specified value .
: param pin : pin number
: param value : pin value
: returns : No return value""" | # The command value is not a fixed value , but needs to be calculated
# using the pin ' s port number
port = pin // 8
calculated_command = PrivateConstants . DIGITAL_MESSAGE + port
mask = 1 << ( pin % 8 )
# Calculate the value for the pin ' s position in the port mask
if value == 1 :
PrivateConstants . DIGITAL_OUTPUT_PORT_PINS [ port ] |= mask
else :
PrivateConstants . DIGITAL_OUTPUT_PORT_PINS [ port ] &= ~ mask
# Assemble the command
command = ( calculated_command , PrivateConstants . DIGITAL_OUTPUT_PORT_PINS [ port ] & 0x7f , ( PrivateConstants . DIGITAL_OUTPUT_PORT_PINS [ port ] >> 7 ) & 0x7f )
await self . _send_command ( command ) |
def byte ( self ) :
"""Return a byte representation of ControlFlags .""" | flags = int ( self . _in_use ) << 7 | int ( self . _controller ) << 6 | int ( self . _bit5 ) << 5 | int ( self . _bit4 ) << 4 | int ( self . _used_before ) << 1
return flags |
def parse_pr_numbers ( git_log_lines ) :
"""Parse PR numbers from commit messages . At GitHub those have the format :
` here is the message ( # 1234 ) `
being ` 1234 ` the PR number .""" | prs = [ ]
for line in git_log_lines :
pr_number = parse_pr_number ( line )
if pr_number :
prs . append ( pr_number )
return prs |
def listDataTiers ( self , data_tier_name = "" ) :
"""API to list data tiers known to DBS .
: param data _ tier _ name : List details on that data tier ( Optional )
: type data _ tier _ name : str
: returns : List of dictionaries containing the following keys ( data _ tier _ id , data _ tier _ name , create _ by , creation _ date )""" | data_tier_name = data_tier_name . replace ( "*" , "%" )
try :
conn = self . dbi . connection ( )
return self . dbsDataTierListDAO . execute ( conn , data_tier_name . upper ( ) )
except dbsException as de :
dbsExceptionHandler ( de . eCode , de . message , self . logger . exception , de . message )
except ValueError as ve :
dbsExceptionHandler ( "dbsException-invalid-input2" , "Invalid Input Data" , self . logger . exception , ve . message )
except TypeError as te :
dbsExceptionHandler ( "dbsException-invalid-input2" , "Invalid Input DataType" , self . logger . exception , te . message )
except NameError as ne :
dbsExceptionHandler ( "dbsException-invalid-input2" , "Invalid Input Searching Key" , self . logger . exception , ne . message )
except Exception as ex :
sError = "DBSReaderModel/listDataTiers. %s\n. Exception trace: \n %s" % ( ex , traceback . format_exc ( ) )
dbsExceptionHandler ( 'dbsException-server-error' , dbsExceptionCode [ 'dbsException-server-error' ] , self . logger . exception , sError )
finally :
if conn :
conn . close ( ) |
def list_certs ( self , filters = None ) :
"""Retrieve loaded certificates .
: param filters : retrieve only matching certificates ( optional )
: type filters : dict
: return : list of installed trap , drop and bypass policies
: rtype : list""" | _ , cert_list = self . handler . streamed_request ( "list-certs" , "list-cert" , filters )
return cert_list |
def map2matrix ( data_map , layout ) :
r"""Map to Matrix
This method transforms a 2D map to a 2D matrix
Parameters
data _ map : np . ndarray
Input data map , 2D array
layout : tuple
2D layout of 2D images
Returns
np . ndarray 2D matrix
Raises
ValueError
For invalid layout
Examples
> > > from modopt . base . transform import map2matrix
> > > a = np . array ( [ [ 0 , 1 , 4 , 5 ] , [ 2 , 3 , 6 , 7 ] , [ 8 , 9 , 12 , 13 ] ,
[10 , 11 , 14 , 15 ] ] )
> > > map2matrix ( a , ( 2 , 2 ) )
array ( [ [ 0 , 4 , 8 , 12 ] ,
[ 1 , 5 , 9 , 13 ] ,
[ 2 , 6 , 10 , 14 ] ,
[ 3 , 7 , 11 , 15 ] ] )""" | layout = np . array ( layout )
# Select n objects
n_obj = np . prod ( layout )
# Get the shape of the images
image_shape = ( np . array ( data_map . shape ) // layout ) [ 0 ]
# Stack objects from map
data_matrix = [ ]
for i in range ( n_obj ) :
lower = ( image_shape * ( i // layout [ 1 ] ) , image_shape * ( i % layout [ 1 ] ) )
upper = ( image_shape * ( i // layout [ 1 ] + 1 ) , image_shape * ( i % layout [ 1 ] + 1 ) )
data_matrix . append ( ( data_map [ lower [ 0 ] : upper [ 0 ] , lower [ 1 ] : upper [ 1 ] ] ) . reshape ( image_shape ** 2 ) )
return np . array ( data_matrix ) . T |
def _py_func_with_gradient ( func , inp , Tout , stateful = True , name = None , grad_func = None ) :
"""PyFunc defined as given by Tensorflow
: param func : Custom Function
: param inp : Function Inputs
: param Tout : Ouput Type of out Custom Function
: param stateful : Calculate Gradients when stateful is True
: param name : Name of the PyFunction
: param grad : Custom Gradient Function
: return :""" | # Generate random name in order to avoid conflicts with inbuilt names
rnd_name = 'PyFuncGrad-' + '%0x' % getrandbits ( 30 * 4 )
# Register Tensorflow Gradient
tf . RegisterGradient ( rnd_name ) ( grad_func )
# Get current graph
g = tf . get_default_graph ( )
# Add gradient override map
with g . gradient_override_map ( { "PyFunc" : rnd_name , "PyFuncStateless" : rnd_name } ) :
return tf . py_func ( func , inp , Tout , stateful = stateful , name = name ) |
def list_parse ( name_list ) :
"""Parse a comma - separated list of values , or a filename ( starting with @ )
containing a list value on each line .""" | if name_list and name_list [ 0 ] == '@' :
value = name_list [ 1 : ]
if not os . path . exists ( value ) :
log . warning ( 'The file %s does not exist' % value )
return
try :
return [ v . strip ( ) for v in open ( value , 'r' ) . readlines ( ) ]
except IOError as e :
log . warning ( 'reading %s failed: %s; ignoring this file' % ( value , e ) )
else :
return [ v . strip ( ) for v in name_list . split ( ',' ) ] |
def get_pairs ( self , format_string ) :
"""Tokenize a logging format string and extract field names from tokens .
: param format _ string : The logging format string .
: returns : A generator of : class : ` FormatStringToken ` objects .""" | for token in self . get_tokens ( format_string ) :
match = self . name_pattern . search ( token )
name = match . group ( 1 ) if match else None
yield FormatStringToken ( name = name , text = token ) |
def total_memory ( self , image = 'ubuntu' ) :
'''Get the available ram fo the docker machine in Kb''' | try :
ret = subprocess . check_output ( f'''docker run -t {image} cat /proc/meminfo | grep MemTotal''' , shell = True , stdin = subprocess . DEVNULL )
# ret : MemTotal : 30208916 kB
self . tot_mem = int ( ret . split ( ) [ 1 ] )
except Exception : # some system does not have cat or grep
self . tot_mem = None
return self . tot_mem |
def import_components_from_dataframe ( network , dataframe , cls_name ) :
"""Import components from a pandas DataFrame .
If columns are missing then defaults are used .
If extra columns are added , these are left in the resulting component dataframe .
Parameters
dataframe : pandas . DataFrame
cls _ name : string
Name of class of component
Examples
> > > network . import _ components _ from _ dataframe ( dataframe , " Line " )""" | if cls_name == "Generator" and "source" in dataframe . columns :
logger . warning ( "'source' for generators is deprecated, use 'carrier' instead." )
if cls_name == "Generator" and "dispatch" in dataframe . columns :
logger . warning ( "'dispatch' for generators is deprecated, use time-varing 'p_max_pu' for 'variable' and static 'p_max_pu' for 'flexible'." )
if cls_name in [ "Generator" , "StorageUnit" ] and "p_max_pu_fixed" in dataframe . columns :
logger . warning ( "'p_max_pu_fixed' for generators is deprecated, use static 'p_max_pu' instead." )
if cls_name in [ "Generator" , "StorageUnit" ] and "p_min_pu_fixed" in dataframe . columns :
logger . warning ( "'p_min_pu_fixed' for generators is deprecated, use static 'p_min_pu' instead." )
if cls_name == "Bus" and "current_type" in dataframe . columns :
logger . warning ( "'current_type' for buses is deprecated, use 'carrier' instead." )
if cls_name == "Link" and "s_nom" in dataframe . columns :
logger . warning ( "'s_nom*' for links is deprecated, use 'p_nom*' instead." )
attrs = network . components [ cls_name ] [ "attrs" ]
static_attrs = attrs [ attrs . static ] . drop ( "name" )
non_static_attrs = attrs [ ~ attrs . static ]
# Clean dataframe and ensure correct types
dataframe = pd . DataFrame ( dataframe )
dataframe . index = dataframe . index . astype ( str )
for k in static_attrs . index :
if k not in dataframe . columns :
dataframe [ k ] = static_attrs . at [ k , "default" ]
else :
if static_attrs . at [ k , "type" ] == 'string' :
dataframe [ k ] = dataframe [ k ] . replace ( { np . nan : "" } )
dataframe [ k ] = dataframe [ k ] . astype ( static_attrs . at [ k , "typ" ] )
# check all the buses are well - defined
for attr in [ "bus" , "bus0" , "bus1" ] :
if attr in dataframe . columns :
missing = dataframe . index [ ~ dataframe [ attr ] . isin ( network . buses . index ) ]
if len ( missing ) > 0 :
logger . warning ( "The following %s have buses which are not defined:\n%s" , cls_name , missing )
non_static_attrs_in_df = non_static_attrs . index . intersection ( dataframe . columns )
old_df = network . df ( cls_name )
new_df = dataframe . drop ( non_static_attrs_in_df , axis = 1 )
if not old_df . empty :
new_df = pd . concat ( ( old_df , new_df ) , sort = False )
if not new_df . index . is_unique :
logger . error ( "Error, new components for {} are not unique" . format ( cls_name ) )
return
setattr ( network , network . components [ cls_name ] [ "list_name" ] , new_df )
# now deal with time - dependent properties
pnl = network . pnl ( cls_name )
for k in non_static_attrs_in_df : # If reading in outputs , fill the outputs
pnl [ k ] = pnl [ k ] . reindex ( columns = new_df . index , fill_value = non_static_attrs . at [ k , "default" ] )
pnl [ k ] . loc [ : , dataframe . index ] = dataframe . loc [ : , k ] . values
setattr ( network , network . components [ cls_name ] [ "list_name" ] + "_t" , pnl ) |
def get_neighborhood_in_mask ( image , mask , radius , physical_coordinates = False , boundary_condition = None , spatial_info = False , get_gradient = False ) :
"""Get neighborhoods for voxels within mask .
This converts a scalar image to a matrix with rows that contain neighbors
around a center voxel
ANTsR function : ` getNeighborhoodInMask `
Arguments
image : ANTsImage
image to get values from
mask : ANTsImage
image indicating which voxels to examine . Each voxel > 0 will be used as the
center of a neighborhood
radius : tuple / list
array of values for neighborhood radius ( in voxels )
physical _ coordinates : boolean
whether voxel indices and offsets should be in voxel or physical coordinates
boundary _ condition : string ( optional )
how to handle voxels in a neighborhood , but not in the mask .
None : fill values with ` NaN `
` image ` : use image value , even if not in mask
` mean ` : use mean of all non - NaN values for that neighborhood
spatial _ info : boolean
whether voxel locations and neighborhood offsets should be returned along with pixel values .
get _ gradient : boolean
whether a matrix of gradients ( at the center voxel ) should be returned in
addition to the value matrix ( WIP )
Returns
if spatial _ info is False :
if get _ gradient is False :
ndarray
an array of pixel values where the number of rows is the size of the
neighborhood and there is a column for each voxel
else if get _ gradient is True :
dictionary w / following key - value pairs :
values : ndarray
array of pixel values where the number of rows is the size of the
neighborhood and there is a column for each voxel .
gradients : ndarray
array providing the gradients at the center voxel of each
neighborhood
else if spatial _ info is True :
dictionary w / following key - value pairs :
values : ndarray
array of pixel values where the number of rows is the size of the
neighborhood and there is a column for each voxel .
indices : ndarray
array provinding the center coordinates for each neighborhood
offsets : ndarray
array providing the offsets from center for each voxel in a neighborhood
Example
> > > import ants
> > > r16 = ants . image _ read ( ants . get _ ants _ data ( ' r16 ' ) )
> > > mask = ants . get _ mask ( r16)
> > > mat = ants . get _ neighborhood _ in _ mask ( r16 , mask , radius = ( 2,2 ) )""" | if not isinstance ( image , iio . ANTsImage ) :
raise ValueError ( 'image must be ANTsImage type' )
if not isinstance ( mask , iio . ANTsImage ) :
raise ValueError ( 'mask must be ANTsImage type' )
if isinstance ( radius , ( int , float ) ) :
radius = [ radius ] * image . dimension
if ( not isinstance ( radius , ( tuple , list ) ) ) or ( len ( radius ) != image . dimension ) :
raise ValueError ( 'radius must be tuple or list with length == image.dimension' )
boundary = 0
if boundary_condition == 'image' :
boundary = 1
elif boundary_condition == 'mean' :
boundary = 2
libfn = utils . get_lib_fn ( 'getNeighborhoodMatrix%s' % image . _libsuffix )
retvals = libfn ( image . pointer , mask . pointer , list ( radius ) , int ( physical_coordinates ) , int ( boundary ) , int ( spatial_info ) , int ( get_gradient ) )
if not spatial_info :
if get_gradient :
retvals [ 'values' ] = np . asarray ( retvals [ 'values' ] )
retvals [ 'gradients' ] = np . asarray ( retvals [ 'gradients' ] )
else :
retvals = np . asarray ( retvals [ 'matrix' ] )
else :
retvals [ 'values' ] = np . asarray ( retvals [ 'values' ] )
retvals [ 'indices' ] = np . asarray ( retvals [ 'indices' ] )
retvals [ 'offsets' ] = np . asarray ( retvals [ 'offsets' ] )
return retvals |
def selected_subcategory ( self ) :
"""Obtain the subcategory selected by user .
: returns : Metadata of the selected subcategory .
: rtype : dict , None""" | item = self . lstSubcategories . currentItem ( )
try :
return definition ( item . data ( QtCore . Qt . UserRole ) )
except ( AttributeError , NameError ) :
return None |
def to_gremlin ( self ) :
"""Return a unicode object with the Gremlin representation of this expression .""" | self . validate ( )
edge_direction , edge_name = self . fold_scope_location . get_first_folded_edge ( )
validate_safe_string ( edge_name )
inverse_direction_table = { 'out' : 'in' , 'in' : 'out' , }
inverse_direction = inverse_direction_table [ edge_direction ]
base_location_name , _ = self . fold_scope_location . base_location . get_location_name ( )
validate_safe_string ( base_location_name )
_ , field_name = self . fold_scope_location . get_location_name ( )
validate_safe_string ( field_name )
if not self . folded_ir_blocks : # There is no filtering nor type coercions applied to this @ fold scope .
# This template generates code like :
# ( m . base . in _ Animal _ ParentOf = = null ) ?
# m . base . in _ Animal _ ParentOf . collect { entry - > entry . outV . next ( ) . uuid }
template = ( u'((m.{base_location_name}.{direction}_{edge_name} == null) ? [] : (' u'm.{base_location_name}.{direction}_{edge_name}.collect{{' u'entry -> entry.{inverse_direction}V.next().{field_name}{maybe_format}' u'}}' u'))' )
filter_and_traverse_data = ''
else : # There is filtering or type coercions in this @ fold scope .
# This template generates code like :
# ( m . base . in _ Animal _ ParentOf = = null ) ?
# m . base . in _ Animal _ ParentOf
# . collect { entry - > entry . outV . next ( ) }
# . findAll { it . alias . contains ( $ wanted ) }
# . collect { it . uuid }
template = ( u'((m.{base_location_name}.{direction}_{edge_name} == null) ? [] : (' u'm.{base_location_name}.{direction}_{edge_name}.collect{{' u'entry -> entry.{inverse_direction}V.next()' u'}}' u'.{filters_and_traverses}' u'.collect{{entry -> entry.{field_name}{maybe_format}}}' u'))' )
filter_and_traverse_data = u'.' . join ( block . to_gremlin ( ) for block in self . folded_ir_blocks )
maybe_format = ''
inner_type = strip_non_null_from_type ( self . field_type . of_type )
if GraphQLDate . is_same_type ( inner_type ) :
maybe_format = '.format("' + STANDARD_DATE_FORMAT + '")'
elif GraphQLDateTime . is_same_type ( inner_type ) :
maybe_format = '.format("' + STANDARD_DATETIME_FORMAT + '")'
template_data = { 'base_location_name' : base_location_name , 'direction' : edge_direction , 'edge_name' : edge_name , 'field_name' : field_name , 'inverse_direction' : inverse_direction , 'maybe_format' : maybe_format , 'filters_and_traverses' : filter_and_traverse_data , }
return template . format ( ** template_data ) |
def zset ( self , name , key , score = 1 ) :
"""Set the score of ` ` key ` ` from the zset ` ` name ` ` to ` ` score ` `
Like * * Redis . ZADD * *
: param string name : the zset name
: param string key : the key name
: param int score : the score for ranking
: return : ` ` True ` ` if ` ` zset ` ` created a new score , otherwise ` ` False ` `
: rtype : bool
> > > ssdb . zset ( " zset _ 1 " , ' z ' , 1024)
True
> > > ssdb . zset ( " zset _ 1 " , ' a ' , 1024)
False
> > > ssdb . zset ( " zset _ 2 " , ' key _ 10 ' , - 4)
> > > ssdb . zget ( " zset _ 2 " , ' key1 ' )
42""" | score = get_integer ( 'score' , score )
return self . execute_command ( 'zset' , name , key , score ) |
def populate_observable ( self , time , kind , dataset , ** kwargs ) :
"""TODO : add documentation""" | if kind in [ 'mesh' , 'orb' ] :
return
if time == self . time and dataset in self . populated_at_time and 'pblum' not in kind : # then we ' ve already computed the needed columns
# TODO : handle the case of intensities already computed by
# / different / dataset ( ie RVs computed first and filling intensities
# and then lc requesting intensities with SAME passband / atm )
return
new_mesh_cols = getattr ( self , '_populate_{}' . format ( kind . lower ( ) ) ) ( dataset , ** kwargs )
for key , col in new_mesh_cols . items ( ) :
self . mesh . update_columns_dict ( { '{}:{}' . format ( key , dataset ) : col } )
self . populated_at_time . append ( dataset ) |
def var ( self ) :
"""Compute the variance across images .""" | return self . _constructor ( self . values . var ( axis = 0 , keepdims = True ) ) |
def get_billing_report_active_devices ( self , month , ** kwargs ) : # noqa : E501
"""Get raw billing data of the active devices for the month . # noqa : E501
Fetch the raw billing data of the active devices for the currently authenticated commercial non - subtenant account . This is supplementary data for the billing report . The raw billing data of the active devices for subtenant accounts are included in their aggregator ' s raw billing data of the active devices . The endpoint returns the URL to download the gzipped CSV file . The first line is the header providing information on the active devices . For example , the ID of an active device . * * Example usage : * * curl - X GET https : / / api . us - east - 1 . mbedcloud . com / v3 / billing - report - active - devices ? month = 2018-07 - H ' authorization : Bearer { api - key } ' # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass asynchronous = True
> > > thread = api . get _ billing _ report _ active _ devices ( month , asynchronous = True )
> > > result = thread . get ( )
: param asynchronous bool
: param str month : Queried year and month of billing report . ( required )
: return : BillingReportRawDataResponse
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'asynchronous' ) :
return self . get_billing_report_active_devices_with_http_info ( month , ** kwargs )
# noqa : E501
else :
( data ) = self . get_billing_report_active_devices_with_http_info ( month , ** kwargs )
# noqa : E501
return data |
def filter_reads ( self , input_bam , output_bam , metrics_file , paired = False , cpus = 16 , Q = 30 ) :
"""Remove duplicates , filter for > Q , remove multiple mapping reads .
For paired - end reads , keep only proper pairs .""" | nodups = re . sub ( "\.bam$" , "" , output_bam ) + ".nodups.nofilter.bam"
cmd1 = self . tools . sambamba + " markdup -t {0} -r --compression-level=0 {1} {2} 2> {3}" . format ( cpus , input_bam , nodups , metrics_file )
cmd2 = self . tools . sambamba + ' view -t {0} -f bam --valid' . format ( cpus )
if paired :
cmd2 += ' -F "not (unmapped or mate_is_unmapped) and proper_pair'
else :
cmd2 += ' -F "not unmapped'
cmd2 += ' and not (secondary_alignment or supplementary) and mapping_quality >= {0}"' . format ( Q )
cmd2 += ' {0} |' . format ( nodups )
cmd2 += self . tools . sambamba + " sort -t {0} /dev/stdin -o {1}" . format ( cpus , output_bam )
cmd3 = "if [[ -s {0} ]]; then rm {0}; fi" . format ( nodups )
cmd4 = "if [[ -s {0} ]]; then rm {0}; fi" . format ( nodups + ".bai" )
return [ cmd1 , cmd2 , cmd3 , cmd4 ] |
def run ( self ) :
"""Begins simultaneous generation / acquisition
: returns : numpy . ndarray - - read samples""" | try :
if self . aotask is None :
print u"You must arm the calibration first"
return
# acquire data and stop task , lock must have been release by
# previous reset
self . daq_lock . acquire ( )
self . aotask . StartTask ( )
self . aitask . StartTask ( )
# blocking read
data = self . aitask . read ( )
# write task should always be shorter than read
# self . aotask . WaitUntilTaskDone ( 10)
self . nacquired += 1
self . aitask . stop ( )
self . aotask . stop ( )
except :
print u'ERROR! TERMINATE!'
self . daq_lock . release ( )
self . stop ( )
raise
return data |
def geoadd ( self , key , longitude , latitude , member , * args , ** kwargs ) :
"""Add one or more geospatial items in the geospatial index represented
using a sorted set .
: rtype : int""" | return self . execute ( b'GEOADD' , key , longitude , latitude , member , * args , ** kwargs ) |
def source ( inp , features = None , top = None , chunksize = None , ** kw ) :
r"""Defines trajectory data source
This function defines input trajectories without loading them . You can pass
the resulting object into transformers such as : func : ` pyemma . coordinates . tica `
or clustering algorithms such as : func : ` pyemma . coordinates . cluster _ kmeans ` .
Then , the data will be streamed instead of being loaded , thus saving memory .
You can also use this function to construct the first stage of a data
processing : func : ` pipeline ` .
Parameters
inp : str ( file name ) or ndarray or list of strings ( file names ) or list of ndarrays or nested list of str | ndarray ( 1 level )
The inp file names or input data . Can be given in any of
these ways :
1 . File name of a single trajectory . It can have any of the molecular
dynamics trajectory formats or raw data formats specified in : py : func : ` load ` .
2 . List of trajectory file names . It can have any of the molecular
dynamics trajectory formats or raw data formats specified in : py : func : ` load ` .
3 . Molecular dynamics trajectory in memory as a numpy array of shape
( T , N , 3 ) with T time steps , N atoms each having three ( x , y , z )
spatial coordinates .
4 . List of molecular dynamics trajectories in memory , each given as a
numpy array of shape ( T _ i , N , 3 ) , where trajectory i has T _ i time
steps and all trajectories have shape ( N , 3 ) .
5 . Trajectory of some features or order parameters in memory
as a numpy array of shape ( T , N ) with T time steps and N dimensions .
6 . List of trajectories of some features or order parameters in memory ,
each given as a numpy array of shape ( T _ i , N ) , where trajectory i
has T _ i time steps and all trajectories have N dimensions .
7 . List of NumPy array files ( . npy ) of shape ( T , N ) . Note these
arrays are not being loaded completely , but mapped into memory
( read - only ) .
8 . List of tabulated ASCII files of shape ( T , N ) .
9 . Nested lists ( 1 level ) like ) , eg . :
[ [ ' traj1_0 . xtc ' , ' traj1_1 . xtc ' ] , ' traj2 _ full . xtc ' ] , [ ' traj3_0 . xtc , . . . ] ]
the grouped fragments will be treated as a joint trajectory .
features : MDFeaturizer , optional , default = None
a featurizer object specifying how molecular dynamics files should be
read ( e . g . intramolecular distances , angles , dihedrals , etc ) . This
parameter only makes sense if the input comes in the form of molecular
dynamics trajectories or data , and will otherwise create a warning and
have no effect .
top : str , mdtraj . Trajectory or mdtraj . Topology , optional , default = None
A topology file name . This is needed when molecular dynamics
trajectories are given and no featurizer is given .
In this case , only the Cartesian coordinates will be read . You can also pass an already
loaded mdtraj . Topology object . If it is an mdtraj . Trajectory object , the topology
will be extracted from it .
chunksize : int , default = None
Number of data frames to process at once . Choose a higher value here ,
to optimize thread usage and gain processing speed . If None is passed ,
use the default value of the underlying reader / data source . Choose zero to
disable chunking at all .
Returns
reader : : class : ` DataSource < pyemma . coordinates . data . _ base . datasource . DataSource > ` object
See also
: func : ` pyemma . coordinates . load `
If your memory is big enough to load all features into memory , don ' t
bother using source - working in memory is faster !
: func : ` pyemma . coordinates . pipeline `
The data input is the first stage for your pipeline . Add other stages
to it and build a pipeline to analyze big data in streaming mode .
Examples
Create a reader for NumPy files :
> > > import numpy as np
> > > from pyemma . coordinates import source
> > > reader = source ( [ ' 001 . npy ' , ' 002 . npy ' ] # doctest : + SKIP
Create a reader for trajectory files and select some distance as feature :
> > > reader = source ( [ ' traj01 . xtc ' , ' traj02 . xtc ' ] , top = ' my _ structure . pdb ' ) # doctest : + SKIP
> > > reader . featurizer . add _ distances ( [ [ 0 , 1 ] , [ 5 , 6 ] ] ) # doctest : + SKIP
> > > calculated _ features = reader . get _ output ( ) # doctest : + SKIP
create a reader for a csv file :
> > > reader = source ( ' data . csv ' ) # doctest : + SKIP
Create a reader for huge NumPy in - memory arrays to process them in
huge chunks to avoid memory issues :
> > > data = np . random . random ( int ( 1e6 ) )
> > > reader = source ( data , chunksize = 1000)
> > > from pyemma . coordinates import cluster _ regspace
> > > regspace = cluster _ regspace ( reader , dmin = 0.1)
Returns
reader : a reader instance
. . autoclass : : pyemma . coordinates . data . interface . ReaderInterface
: members :
: undoc - members :
. . rubric : : Methods
. . autoautosummary : : pyemma . coordinates . data . interface . ReaderInterface
: methods :
. . rubric : : Attributes
. . autoautosummary : : pyemma . coordinates . data . interface . ReaderInterface
: attributes :""" | from pyemma . coordinates . data . _base . iterable import Iterable
from pyemma . coordinates . data . util . reader_utils import create_file_reader
from pyemma . util . reflection import get_default_args
cs = _check_old_chunksize_arg ( chunksize , get_default_args ( source ) [ 'chunksize' ] , ** kw )
# CASE 1 : input is a string or list of strings
# check : if single string create a one - element list
if isinstance ( inp , _string_types ) or ( isinstance ( inp , ( list , tuple ) ) and ( any ( isinstance ( item , ( list , tuple , _string_types ) ) for item in inp ) or len ( inp ) is 0 ) ) :
reader = create_file_reader ( inp , top , features , chunksize = cs , ** kw )
elif isinstance ( inp , _np . ndarray ) or ( isinstance ( inp , ( list , tuple ) ) and ( any ( isinstance ( item , _np . ndarray ) for item in inp ) or len ( inp ) is 0 ) ) : # CASE 2 : input is a ( T , N , 3 ) array or list of ( T _ i , N , 3 ) arrays
# check : if single array , create a one - element list
# check : do all arrays have compatible dimensions ( * , N , 3 ) ? If not : raise ValueError .
# check : if single array , create a one - element list
# check : do all arrays have compatible dimensions ( * , N ) ? If not : raise ValueError .
# create MemoryReader
from pyemma . coordinates . data . data_in_memory import DataInMemory as _DataInMemory
reader = _DataInMemory ( inp , chunksize = cs , ** kw )
elif isinstance ( inp , Iterable ) :
inp . chunksize = cs
return inp
else :
raise ValueError ( 'unsupported type (%s) of input' % type ( inp ) )
return reader |