hexsha
stringlengths 40
40
| size
int64 10
805k
| ext
stringclasses 6
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
176
| max_stars_repo_name
stringlengths 7
114
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
176
| max_issues_repo_name
stringlengths 7
114
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
48.5k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
176
| max_forks_repo_name
stringlengths 7
114
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 10
805k
| avg_line_length
float64 5.53
11k
| max_line_length
int64 10
129k
| alphanum_fraction
float64 0.13
0.93
| content_no_comment
stringlengths 0
449k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f717df2260eeca7af7e3eeca0a806d32faa65970
| 26,960
|
py
|
Python
|
coops_ndbc_obs_collector.py
|
saeed-moghimi-noaa/prep_obs_ca
|
6a23128326c5d8fa32f972ebd5ce8cd4aa626b08
|
[
"CC0-1.0"
] | null | null | null |
coops_ndbc_obs_collector.py
|
saeed-moghimi-noaa/prep_obs_ca
|
6a23128326c5d8fa32f972ebd5ce8cd4aa626b08
|
[
"CC0-1.0"
] | null | null | null |
coops_ndbc_obs_collector.py
|
saeed-moghimi-noaa/prep_obs_ca
|
6a23128326c5d8fa32f972ebd5ce8cd4aa626b08
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Read obs from NDBC and Co-opc
Generate csv data files
functions to write and read the data
Observations will be chosen based on the hurricane track to avoid more than neccessary download.
"""
__author__ = "Saeed Moghimi"
__copyright__ = "Copyright 2018, UCAR/NOAA"
__license__ = "GPL"
__version__ = "1.0"
__email__ = "moghimis@gmail.com"
#
import pandas as pd
import numpy as np
#
from bs4 import BeautifulSoup
import requests
import lxml.html
import sys,os
#
from pyoos.collectors.ndbc.ndbc_sos import NdbcSos
from pyoos.collectors.coops.coops_sos import CoopsSos
from retrying import retry
import datetime
import dateparser
import cf_units
from io import BytesIO
from ioos_tools.ioos import collector2table
import pickle
import arrow
#
sys.path.append('/disks/NASARCHIVE/saeed_moghimi/opt/python-packages/')
import geopandas as gpd
from shapely.geometry import LineString
#####################################################
if 'base_info' in sys.modules:
del(sys.modules["base_info"])
from base_info import *
#
##### Fucnstions #####
def url_lister(url):
urls = []
connection = urlopen(url)
dom = lxml.html.fromstring(connection.read())
for link in dom.xpath('//a/@href'):
urls.append(link)
return urls
#################
def download(url, path, fname):
sys.stdout.write(fname + '\n')
if not os.path.isfile(path):
urlretrieve(
url,
filename=path,
reporthook=progress_hook(sys.stdout)
)
sys.stdout.write('\n')
sys.stdout.flush()
#################
def progress_hook(out):
"""
Return a progress hook function, suitable for passing to
urllib.retrieve, that writes to the file object *out*.
"""
def it(n, bs, ts):
got = n * bs
if ts < 0:
outof = ''
else:
# On the last block n*bs can exceed ts, so we clamp it
# to avoid awkward questions.
got = min(got, ts)
outof = '/%d [%d%%]' % (ts, 100 * got // ts)
out.write("\r %d%s" % (got, outof))
out.flush()
return it
#################
def get_nhc_storm_info (year,name):
"""
"""
print('Read list of hurricanes from NHC based on year')
if int(year) < 2008:
print (' ERROR: GIS Data is not available for storms before 2008 ')
sys.exit('Exiting .....')
url = 'http://www.nhc.noaa.gov/gis/archive_wsurge.php?year='+year
r = requests.get(url,headers=headers,verify=False)
soup = BeautifulSoup(r.content, 'lxml')
table = soup.find('table')
#table = [row.get_text().strip().split(maxsplit=1) for row in table.find_all('tr')]
tab = []
for row in table.find_all('tr'):
tmp = row.get_text().strip().split()
tab.append([tmp[0],tmp[-1]])
print (tab)
df = pd.DataFrame(
data=tab[:],
columns=['identifier', 'name'],
).set_index('name')
###############################
print(' > based on specific storm go fetch gis files')
hid = df.to_dict()['identifier'][name.upper()]
al_code = ('{}'+year).format(hid)
hurricane_gis_files = '{}_5day'.format(al_code)
return al_code,hurricane_gis_files
#################
#@retry(stop_max_attempt_number=5, wait_fixed=3000)
def download_nhc_gis_files(hurricane_gis_files):
"""
"""
base = os.path.abspath(
os.path.join(os.path.curdir, 'data', hurricane_gis_files)
)
if len (glob(base+'/*')) < 1:
nhc = 'http://www.nhc.noaa.gov/gis/forecast/archive/'
# We don't need the latest file b/c that is redundant to the latest number.
fnames = [
fname for fname in url_lister(nhc)
if fname.startswith(hurricane_gis_files) and 'latest' not in fname
]
if not os.path.exists(base):
os.makedirs(base)
for fname in fnames:
path1 = os.path.join(base, fname)
if not os.path.exists(path1):
url = '{}/{}'.format(nhc, fname)
download(url, path1,fname)
return base
#################################
def read_advisory_cones_info(hurricane_gis_files,base,year,code):
print(' > Read cones shape file ...')
cones, points = [], []
for fname in sorted(glob(os.path.join(base, '{}_*.zip'.format(hurricane_gis_files)))):
number = os.path.splitext(os.path.split(fname)[-1])[0].split('_')[-1]
# read cone shapefiles
if int(year) < 2014:
#al092008.001_5day_pgn.shp
divd = '.'
else:
divd = '-'
pgn = gpd.read_file(
('/{}'+divd+'{}_5day_pgn.shp').format(code, number),
vfs='zip://{}'.format(fname)
)
cones.append(pgn)
#read points shapefiles
pts = gpd.read_file(
('/{}'+divd+'{}_5day_pts.shp').format(code, number),
vfs='zip://{}'.format(fname)
)
# Only the first "obsevartion."
points.append(pts.iloc[0])
return cones,points,pts
#################
#################
@retry(stop_max_attempt_number=5, wait_fixed=3000)
def get_coops(start, end, sos_name, units, bbox,datum='NAVD', verbose=True):
"""
function to read COOPS data
We need to retry in case of failure b/c the server cannot handle
the high traffic during hurricane season.
"""
collector = CoopsSos()
collector.set_bbox(bbox)
collector.end_time = end
collector.start_time = start
collector.variables = [sos_name]
ofrs = collector.server.offerings
title = collector.server.identification.title
config = dict(
units=units,
sos_name=sos_name,
datum = datum, ###Saeed added ["MLLW","MSL","MHW","STND","IGLD", "NAVD"]
)
data = collector2table(
collector=collector,
config=config,
col='{} ({})'.format(sos_name, units.format(cf_units.UT_ISO_8859_1))
)
# Clean the table.
table = dict(
station_name = [s._metadata.get('station_name') for s in data],
station_code = [s._metadata.get('station_code') for s in data],
sensor = [s._metadata.get('sensor') for s in data],
lon = [s._metadata.get('lon') for s in data],
lat = [s._metadata.get('lat') for s in data],
depth = [s._metadata.get('depth', 'NA') for s in data],
)
table = pd.DataFrame(table).set_index('station_name')
if verbose:
print('Collector offerings')
print('{}: {} offerings'.format(title, len(ofrs)))
return data, table
#################
@retry(stop_max_attempt_number=5, wait_fixed=3000)
def get_ndbc(start, end, bbox , sos_name='waves',datum='MSL', verbose=True):
"""
function to read NBDC data
###################
sos_name = waves
all_col = (['station_id', 'sensor_id', 'latitude (degree)', 'longitude (degree)',
'date_time', 'sea_surface_wave_significant_height (m)',
'sea_surface_wave_peak_period (s)', 'sea_surface_wave_mean_period (s)',
'sea_surface_swell_wave_significant_height (m)',
'sea_surface_swell_wave_period (s)',
'sea_surface_wind_wave_significant_height (m)',
'sea_surface_wind_wave_period (s)', 'sea_water_temperature (c)',
'sea_surface_wave_to_direction (degree)',
'sea_surface_swell_wave_to_direction (degree)',
'sea_surface_wind_wave_to_direction (degree)',
'number_of_frequencies (count)', 'center_frequencies (Hz)',
'bandwidths (Hz)', 'spectral_energy (m**2/Hz)',
'mean_wave_direction (degree)', 'principal_wave_direction (degree)',
'polar_coordinate_r1 (1)', 'polar_coordinate_r2 (1)',
'calculation_method', 'sampling_rate (Hz)', 'name'])
sos_name = winds
all_col = (['station_id', 'sensor_id', 'latitude (degree)', 'longitude (degree)',
'date_time', 'depth (m)', 'wind_from_direction (degree)',
'wind_speed (m/s)', 'wind_speed_of_gust (m/s)',
'upward_air_velocity (m/s)', 'name'])
"""
#add remove from above
if sos_name == 'waves':
col = ['sea_surface_wave_significant_height (m)','sea_surface_wave_peak_period (s)',
'sea_surface_wave_mean_period (s)','sea_water_temperature (c)',
'sea_surface_wave_to_direction (degree)']
elif sos_name == 'winds':
col = ['wind_from_direction (degree)','wind_speed (m/s)',
'wind_speed_of_gust (m/s)','upward_air_velocity (m/s)']
#if sos_name == 'waves':
# col = ['sea_surface_wave_significant_height (m)']
#elif sos_name == 'winds':
# col = ['wind_speed (m/s)']
collector = NdbcSos()
collector.set_bbox(bbox)
collector.start_time = start
collector.variables = [sos_name]
ofrs = collector.server.offerings
title = collector.server.identification.title
collector.features = None
collector.end_time = start + datetime.timedelta(1)
response = collector.raw(responseFormat='text/csv')
df = pd.read_csv(BytesIO(response), parse_dates=True)
g = df.groupby('station_id')
df = dict()
for station in g.groups.keys():
df.update({station: g.get_group(station).iloc[0]})
df = pd.DataFrame.from_dict(df).T
station_dict = {}
for offering in collector.server.offerings:
station_dict.update({offering.name: offering.description})
names = []
for sta in df.index:
names.append(station_dict.get(sta, sta))
df['name'] = names
#override short time
collector.end_time = end
data = []
for k, row in df.iterrows():
station_id = row['station_id'].split(':')[-1]
collector.features = [station_id]
response = collector.raw(responseFormat='text/csv')
kw = dict(parse_dates=True, index_col='date_time')
obs = pd.read_csv(BytesIO(response), **kw).reset_index()
obs = obs.drop_duplicates(subset='date_time').set_index('date_time')
series = obs[col]
series._metadata = dict(
station=row.get('station_id'),
station_name=row.get('name'),
station_code=str(row.get('station_id').split(':')[-1]),
sensor=row.get('sensor_id'),
lon=row.get('longitude (degree)'),
lat=row.get('latitude (degree)'),
depth=row.get('depth (m)'),
)
data.append(series)
# Clean the table.
table = dict(
station_name = [s._metadata.get('station_name') for s in data],
station_code = [s._metadata.get('station_code') for s in data],
sensor = [s._metadata.get('sensor') for s in data],
lon = [s._metadata.get('lon') for s in data],
lat = [s._metadata.get('lat') for s in data],
depth = [s._metadata.get('depth', 'NA') for s in data],
)
table = pd.DataFrame(table).set_index('station_name')
if verbose:
print('Collector offerings')
print('{}: {} offerings'.format(title, len(ofrs)))
return data, table
#################
def write_csv(obs_dir, name, year, table, data, label):
"""
examples
print(' > write csv files')
write_csv(obs_dir, name, year, table=wnd_ocn_table, data= wnd_ocn , label='ndbc_wind' )
write_csv(obs_dir, name, year, table=wav_ocn_table, data= wav_ocn , label='ndbc_wave' )
write_csv(obs_dir, name, year, table=ssh_table , data= ssh , label='coops_ssh' )
write_csv(obs_dir, name, year, table=wnd_obs_table, data= wnd_obs , label='coops_wind')
"""
#label = 'coops_ssh'
#table = ssh_table
#data = ssh
outt = os.path.join(obs_dir, name+year,label)
outd = os.path.join(outt,'data')
if not os.path.exists(outd):
os.makedirs(outd)
table.to_csv(os.path.join(outt,'table.csv'))
stations = table['station_code']
for ista in range(len(stations)):
sta = str(stations [ista])
fname = os.path.join(outd,sta+'.csv')
df = data[ista]
try:
#in case it is still a series like ssh
df = df.to_frame()
except:
pass
df.to_csv(fname)
fmeta = os.path.join(outd,sta)+'_metadata.csv'
metadata = pd.DataFrame.from_dict( data[ista]._metadata , orient="index")
metadata.to_csv(fmeta)
def read_csv(obs_dir, name, year, label):
"""
examples
print(' > write csv files')
write_csv(base_dir, name, year, table=wnd_ocn_table, data= wnd_ocn , label='ndbc_wind' )
write_csv(base_dir, name, year, table=wav_ocn_table, data= wav_ocn , label='ndbc_wave' )
write_csv(base_dir, name, year, table=ssh_table , data= ssh , label='coops_ssh' )
write_csv(base_dir, name, year, table=wnd_obs_table, data= wnd_obs , label='coops_wind')
"""
outt = os.path.join(obs_dir, name+year,label)
outd = os.path.join(outt,'data')
if not os.path.exists(outd):
sys.exit('ERROR: check path to: ',outd )
table = pd.read_csv(os.path.join(outt,'table.csv')).set_index('station_name')
table['station_code'] = table['station_code'].astype('str')
stations = table['station_code']
data = []
metadata = []
for ista in range(len(stations)):
sta = stations [ista]
fname8 = os.path.join(outd,sta)+'.csv'
df = pd.read_csv(fname8,parse_dates = ['date_time']).set_index('date_time')
fmeta = os.path.join(outd,sta) + '_metadata.csv'
meta = pd.read_csv(fmeta, header=0, names = ['names','info']).set_index('names')
meta_dict = meta.to_dict()['info']
meta_dict['lon'] = float(meta_dict['lon'])
meta_dict['lat'] = float(meta_dict['lat'])
df._metadata = meta_dict
data.append(df)
return table,data
#################
def write_csv(obs_dir, name, year, table, data, label):
"""
examples
print(' > write csv files')
write_csv(obs_dir, name, year, table=wnd_ocn_table, data= wnd_ocn , label='ndbc_wind' )
write_csv(obs_dir, name, year, table=wav_ocn_table, data= wav_ocn , label='ndbc_wave' )
write_csv(obs_dir, name, year, table=ssh_table , data= ssh , label='coops_ssh' )
write_csv(obs_dir, name, year, table=wnd_obs_table, data= wnd_obs , label='coops_wind')
"""
#label = 'coops_ssh'
#table = ssh_table
#data = ssh
outt = os.path.join(obs_dir, name+year,label)
outd = os.path.join(outt,'data')
if not os.path.exists(outd):
os.makedirs(outd)
table.to_csv(os.path.join(outt,'table.csv'))
stations = table['station_code']
for ista in range(len(stations)):
sta = str(stations [ista])
fname = os.path.join(outd,sta+'.csv')
df = data[ista]
try:
#in case it is still a series like ssh
df = df.to_frame()
except:
pass
df.to_csv(fname)
fmeta = os.path.join(outd,sta)+'_metadata.csv'
metadata = pd.DataFrame.from_dict( data[ista]._metadata , orient="index")
metadata.to_csv(fmeta)
def read_csv(obs_dir, name, year, label):
"""
examples
print(' > write csv files')
write_csv(base_dir, name, year, table=wnd_ocn_table, data= wnd_ocn , label='ndbc_wind' )
write_csv(base_dir, name, year, table=wav_ocn_table, data= wav_ocn , label='ndbc_wave' )
write_csv(base_dir, name, year, table=ssh_table , data= ssh , label='coops_ssh' )
write_csv(base_dir, name, year, table=wnd_obs_table, data= wnd_obs , label='coops_wind')
"""
outt = os.path.join(obs_dir, name+year,label)
outd = os.path.join(outt,'data')
if not os.path.exists(outd):
sys.exit('ERROR: check path to: ',outd )
table = pd.read_csv(os.path.join(outt,'table.csv')).set_index('station_name')
table['station_code'] = table['station_code'].astype('str')
stations = table['station_code']
data = []
metadata = []
for ista in range(len(stations)):
sta = stations [ista]
fname8 = os.path.join(outd,sta)+'.csv'
df = pd.read_csv(fname8,parse_dates = ['date_time']).set_index('date_time')
fmeta = os.path.join(outd,sta) + '_metadata.csv'
meta = pd.read_csv(fmeta, header=0, names = ['names','info']).set_index('names')
meta_dict = meta.to_dict()['info']
meta_dict['lon'] = float(meta_dict['lon'])
meta_dict['lat'] = float(meta_dict['lat'])
df._metadata = meta_dict
data.append(df)
return table,data
def write_high_water_marks(obs_dir, name, year):
url = 'https://stn.wim.usgs.gov/STNServices/HWMs/FilteredHWMs.json'
params = {'EventType': 2, # 2 for hurricane
'EventStatus': 0} # 0 for completed
default_filter = {"riverine": True,
"non_still_water": True}
nameyear = (name+year).lower()
out_dir = os.path.join(obs_dir,'hwm')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
fname = os.path.join(out_dir,nameyear+'.csv')
usgs_json_file = os.path.join(out_dir,'usgs_hwm_tmp.json')
if not os.path.exists( usgs_json_file):
response = requests.get(url, params=params, headers=headers,verify=False)
response.raise_for_status()
json_data = json.loads(response.text)
with open(usgs_json_file, 'w') as outfile:
json.dump(json_data, outfile )
else:
with open(usgs_json_file) as json_file:
json_data = json.load(json_file)
hwm_stations = dict()
for data in json_data:
if 'elev_ft' in data.keys() and name.lower() in data['eventName'].lower():
hwm_stations[str(data['hwm_id'])] = data
log = pd.DataFrame.from_dict(hwm_stations)
hwm = []
ii = 0
for key in log.keys():
l0 = []
for key0 in log[key].keys() :
l0.append(log[key][key0])
hwm.append(l0)
#
hwm = np.array(hwm)
df = pd.DataFrame(data=hwm, columns=log[key].keys())
drop_poor = False
if drop_poor:
for i in range(len(df)):
tt = df.hwmQualityName[i]
if 'poor' in tt.lower():
df.hwmQualityName[i] = np.nan
df = df.dropna()
df['elev_m'] = pd.to_numeric(df['elev_ft']) * 0.3048 #in meter
#
df.to_csv(fname)
def get_all_data()
###############################################
###############################################
############ MAIN code Starts here ############
if False:
# not needed. will take from the storm specific obs list from coops and ndbc
obs_station_list_gen()
#
#######
# out dir
obs_dir = os.path.join(base_dirf,'obs')
if get_usgs_hwm:
for key in storms.keys():
name = storms[key]['name']
year = storms[key]['year']
print(' > Get USGS HWM for ', name)
try:
write_high_water_marks(obs_dir, name, year)
except:
print (' > Get USGS HWM for ', name , ' ERROR ...')
for key in storms.keys():
name = storms[key]['name']
year = storms[key]['year']
print('\n\n\n\n\n\n********************************************************')
print( '***** Storm name ',name, ' Year ', year, ' *********')
print( '******************************************************** \n\n\n\n\n\n')
#if bbox_from_best_track:
try:
#bbox_from_best_track = False
code,hurricane_gis_files = get_nhc_storm_info (year,name)
###############################################################################
#download gis zip files
base = download_nhc_gis_files(hurricane_gis_files)
# get advisory cones and track points
cones,pts_actual,points_actual = read_advisory_cones_info(hurricane_gis_files,base,year,code)
start = pts_actual[0] ['FLDATELBL']
end = pts_actual[-1]['FLDATELBL']
#start_txt_actual = ('20' + start[:-2]).replace('/','')
#end_txt_actual = ('20' + end [:-2]).replace('/','')
#print('\n\n\n\n\n\n ********************************************************')
#for key1 in pts_actual[0].keys():
# print( '***** pts_actual[0] [', key1, ']',pts_actual[0] [key1] , '*********')
#print( '******************************************************** \n\n\n\n\n\n')
start_dt = dateparser.parse(start,settings={"TO_TIMEZONE": "UTC"}).replace(tzinfo=None) - obs_xtra_days
end_dt = dateparser.parse(end ,settings={"TO_TIMEZONE": "UTC"}).replace(tzinfo=None) + obs_xtra_days
#try:
# # bbox_from_best_track:
# start_txt = start_txt_best
# end_txt = end_txt_best
# #bbox = bbox_best
#except:
# start_txt = start_txt_actual
# end_txt = end_txt_actual
#
#start_dt = arrow.get(start_txt, 'YYYYMMDDhh').datetime - obs_xtra_days
#end_dt = arrow.get(end_txt , 'YYYYMMDDhh').datetime + obs_xtra_days
#if False:
# get bbox from actual data
last_cone = cones[-1]['geometry'].iloc[0]
track = LineString([point['geometry'] for point in pts_actual])
lons_actual = track.coords.xy[0]
lats_actual = track.coords.xy[1]
bbox_actual = min(lons_actual)-2, min(lats_actual)-2, max(lons_actual)+2, max(lats_actual)+2
################################################################################
# Find the bounding box to search the data.
bbox_from_best_track = False
bbox = bbox_actual
except:
start_dt = storms[key]['start']
end_dt = storms[key]['end' ]
bounds = storms[key]['bbox' ]
if storms[key]['bbox'] is not None:
bbox = storms[key]['bbox']
#print('\n\n\n\n >>>>> Download and read all GIS data for Storm >',name, ' Year > ', year, '\n ** This is an old STORM !!!!!! \n\n\n\n')
#
# Note that the bounding box is derived from the track and the latest prediction cone.
strbbox = ', '.join(format(v, '.2f') for v in bbox)
#
# Note that the bounding box is derived from the track and the latest prediction cone.
strbbox = ', '.join(format(v, '.2f') for v in bbox)
print('\n\n\n\n\n\n********************************************************')
print( '***** Storm name ',name, ' Year ', year, ' *********')
print('bbox: {}\nstart: {}\n end: {}'.format(strbbox, start_dt, end_dt))
print( '******************************************************** \n\n\n\n\n\n')
#
#########
if get_cops_wlev:
try:
print(' > Get water level information CO-OPS ... ')
# ["MLLW","MSL","MHW","STND","IGLD", "NAVD"]
datum = 'NAVD'
datum = 'MSL'
print ('datum=', datum )
ssh, ssh_table = get_coops(
start=start_dt,
end=end_dt,
sos_name='water_surface_height_above_reference_datum',
units=cf_units.Unit('meters'),
datum = datum ,
bbox=bbox,
)
write_csv(obs_dir, name, year, table=ssh_table , data= ssh , label='coops_ssh' )
except:
print(' > Get water level information CO-OPS >>>> ERRORRRRR')
######
if get_cops_wind:
try:
print(' > Get wind information CO-OPS ... ')
wnd_obs, wnd_obs_table = get_coops(
start=start_dt,
end=end_dt,
sos_name='wind_speed',
units=cf_units.Unit('m/s'),
bbox=bbox,
)
write_csv(obs_dir, name, year, table=wnd_obs_table, data= wnd_obs , label='coops_wind')
except:
print(' > Get wind information CO-OPS >>> ERORRRR')
######
if get_ndbc_wind:
try:
print(' > Get wind ocean information (ndbc) ... ')
wnd_ocn, wnd_ocn_table = get_ndbc(
start=start_dt,
end=end_dt,
sos_name='winds',
bbox=bbox,
)
write_csv(obs_dir, name, year, table=wnd_ocn_table, data= wnd_ocn , label='ndbc_wind' )
except:
print(' > Get wind ocean information (ndbc) >>> ERRRORRRR')
######
if get_ndbc_wave:
try:
print(' > Get wave ocean information (ndbc) ... ')
wav_ocn, wav_ocn_table = get_ndbc(
start=start_dt,
end=end_dt,
sos_name='waves',
bbox=bbox,
)
write_csv(obs_dir, name, year, table=wav_ocn_table, data= wav_ocn , label='ndbc_wave' )
except:
print(' > Get wave ocean information (ndbc) >>> ERRORRRR ')
######
if False:
# test reading files
ssh_table1 , ssh1 = read_csv (obs_dir, name, year, label='coops_ssh' )
wnd_obs_table1, wnd_obs1 = read_csv (obs_dir, name, year, label='coops_wind')
wnd_ocn_table1, wnd_ocn1 = read_csv (obs_dir, name, year, label='ndbc_wind' )
wav_ocn_table1, wav_ocn1 = read_csv (obs_dir, name, year, label='ndbc_wave' )
#if False:
#
# back up script file
args=sys.argv
scr_name = args[0]
scr_dir = os.path.join(obs_dir, name+year)
os.system('cp -fr ' + scr_name + ' ' + scr_dir)
#
#with open(pick, "rb") as f:
# w = pickle.load(f)
#f = open(pick, "rb")
#w = pickle.load(f)
#if __name__ == "__main__":
# main()
if __name__ == "__main__":
get_all_data()
| 33.997478
| 155
| 0.541728
|
"""
Read obs from NDBC and Co-opc
Generate csv data files
functions to write and read the data
Observations will be chosen based on the hurricane track to avoid more than neccessary download.
"""
__author__ = "Saeed Moghimi"
__copyright__ = "Copyright 2018, UCAR/NOAA"
__license__ = "GPL"
__version__ = "1.0"
__email__ = "moghimis@gmail.com"
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
import requests
import lxml.html
import sys,os
from pyoos.collectors.ndbc.ndbc_sos import NdbcSos
from pyoos.collectors.coops.coops_sos import CoopsSos
from retrying import retry
import datetime
import dateparser
import cf_units
from io import BytesIO
from ioos_tools.ioos import collector2table
import pickle
import arrow
sys.path.append('/disks/NASARCHIVE/saeed_moghimi/opt/python-packages/')
import geopandas as gpd
from shapely.geometry import LineString
if not os.path.exists(base):
os.makedirs(base)
for fname in fnames:
path1 = os.path.join(base, fname)
if not os.path.exists(path1):
url = '{}/{}'.format(nhc, fname)
download(url, path1,fname)
return base
#################################
def read_advisory_cones_info(hurricane_gis_files,base,year,code):
print(' > Read cones shape file ...')
cones, points = [], []
for fname in sorted(glob(os.path.join(base, '{}_*.zip'.format(hurricane_gis_files)))):
number = os.path.splitext(os.path.split(fname)[-1])[0].split('_')[-1]
# read cone shapefiles
if int(year) < 2014:
#al092008.001_5day_pgn.shp
divd = '.'
else:
divd = '-'
pgn = gpd.read_file(
('/{}'+divd+'{}_5day_pgn.shp').format(code, number),
vfs='zip://{}'.format(fname)
)
cones.append(pgn)
#read points shapefiles
pts = gpd.read_file(
('/{}'+divd+'{}_5day_pts.shp').format(code, number),
vfs='zip://{}'.format(fname)
)
# Only the first "obsevartion."
points.append(pts.iloc[0])
return cones,points,pts
#################
#################
@retry(stop_max_attempt_number=5, wait_fixed=3000)
def get_coops(start, end, sos_name, units, bbox,datum='NAVD', verbose=True):
"""
function to read COOPS data
We need to retry in case of failure b/c the server cannot handle
the high traffic during hurricane season.
"""
collector = CoopsSos()
collector.set_bbox(bbox)
collector.end_time = end
collector.start_time = start
collector.variables = [sos_name]
ofrs = collector.server.offerings
title = collector.server.identification.title
config = dict(
units=units,
sos_name=sos_name,
datum = datum, ###Saeed added ["MLLW","MSL","MHW","STND","IGLD", "NAVD"]
)
data = collector2table(
collector=collector,
config=config,
col='{} ({})'.format(sos_name, units.format(cf_units.UT_ISO_8859_1))
)
# Clean the table.
table = dict(
station_name = [s._metadata.get('station_name') for s in data],
station_code = [s._metadata.get('station_code') for s in data],
sensor = [s._metadata.get('sensor') for s in data],
lon = [s._metadata.get('lon') for s in data],
lat = [s._metadata.get('lat') for s in data],
depth = [s._metadata.get('depth', 'NA') for s in data],
)
table = pd.DataFrame(table).set_index('station_name')
if verbose:
print('Collector offerings')
print('{}: {} offerings'.format(title, len(ofrs)))
return data, table
#################
@retry(stop_max_attempt_number=5, wait_fixed=3000)
def get_ndbc(start, end, bbox , sos_name='waves',datum='MSL', verbose=True):
"""
function to read NBDC data
###################
sos_name = waves
all_col = (['station_id', 'sensor_id', 'latitude (degree)', 'longitude (degree)',
'date_time', 'sea_surface_wave_significant_height (m)',
'sea_surface_wave_peak_period (s)', 'sea_surface_wave_mean_period (s)',
'sea_surface_swell_wave_significant_height (m)',
'sea_surface_swell_wave_period (s)',
'sea_surface_wind_wave_significant_height (m)',
'sea_surface_wind_wave_period (s)', 'sea_water_temperature (c)',
'sea_surface_wave_to_direction (degree)',
'sea_surface_swell_wave_to_direction (degree)',
'sea_surface_wind_wave_to_direction (degree)',
'number_of_frequencies (count)', 'center_frequencies (Hz)',
'bandwidths (Hz)', 'spectral_energy (m**2/Hz)',
'mean_wave_direction (degree)', 'principal_wave_direction (degree)',
'polar_coordinate_r1 (1)', 'polar_coordinate_r2 (1)',
'calculation_method', 'sampling_rate (Hz)', 'name'])
sos_name = winds
all_col = (['station_id', 'sensor_id', 'latitude (degree)', 'longitude (degree)',
'date_time', 'depth (m)', 'wind_from_direction (degree)',
'wind_speed (m/s)', 'wind_speed_of_gust (m/s)',
'upward_air_velocity (m/s)', 'name'])
"""
#add remove from above
if sos_name == 'waves':
col = ['sea_surface_wave_significant_height (m)','sea_surface_wave_peak_period (s)',
'sea_surface_wave_mean_period (s)','sea_water_temperature (c)',
'sea_surface_wave_to_direction (degree)']
elif sos_name == 'winds':
col = ['wind_from_direction (degree)','wind_speed (m/s)',
'wind_speed_of_gust (m/s)','upward_air_velocity (m/s)']
#if sos_name == 'waves':
# col = ['sea_surface_wave_significant_height (m)']
#elif sos_name == 'winds':
# col = ['wind_speed (m/s)']
collector = NdbcSos()
collector.set_bbox(bbox)
collector.start_time = start
collector.variables = [sos_name]
ofrs = collector.server.offerings
title = collector.server.identification.title
collector.features = None
collector.end_time = start + datetime.timedelta(1)
response = collector.raw(responseFormat='text/csv')
df = pd.read_csv(BytesIO(response), parse_dates=True)
g = df.groupby('station_id')
df = dict()
for station in g.groups.keys():
df.update({station: g.get_group(station).iloc[0]})
df = pd.DataFrame.from_dict(df).T
station_dict = {}
for offering in collector.server.offerings:
station_dict.update({offering.name: offering.description})
names = []
for sta in df.index:
names.append(station_dict.get(sta, sta))
df['name'] = names
#override short time
collector.end_time = end
data = []
for k, row in df.iterrows():
station_id = row['station_id'].split(':')[-1]
collector.features = [station_id]
response = collector.raw(responseFormat='text/csv')
kw = dict(parse_dates=True, index_col='date_time')
obs = pd.read_csv(BytesIO(response), **kw).reset_index()
obs = obs.drop_duplicates(subset='date_time').set_index('date_time')
series = obs[col]
series._metadata = dict(
station=row.get('station_id'),
station_name=row.get('name'),
station_code=str(row.get('station_id').split(':')[-1]),
sensor=row.get('sensor_id'),
lon=row.get('longitude (degree)'),
lat=row.get('latitude (degree)'),
depth=row.get('depth (m)'),
)
data.append(series)
# Clean the table.
table = dict(
station_name = [s._metadata.get('station_name') for s in data],
station_code = [s._metadata.get('station_code') for s in data],
sensor = [s._metadata.get('sensor') for s in data],
lon = [s._metadata.get('lon') for s in data],
lat = [s._metadata.get('lat') for s in data],
depth = [s._metadata.get('depth', 'NA') for s in data],
)
table = pd.DataFrame(table).set_index('station_name')
if verbose:
print('Collector offerings')
print('{}: {} offerings'.format(title, len(ofrs)))
return data, table
#################
def write_csv(obs_dir, name, year, table, data, label):
"""
examples
print(' > write csv files')
write_csv(obs_dir, name, year, table=wnd_ocn_table, data= wnd_ocn , label='ndbc_wind' )
write_csv(obs_dir, name, year, table=wav_ocn_table, data= wav_ocn , label='ndbc_wave' )
write_csv(obs_dir, name, year, table=ssh_table , data= ssh , label='coops_ssh' )
write_csv(obs_dir, name, year, table=wnd_obs_table, data= wnd_obs , label='coops_wind')
"""
#label = 'coops_ssh'
#table = ssh_table
#data = ssh
outt = os.path.join(obs_dir, name+year,label)
outd = os.path.join(outt,'data')
if not os.path.exists(outd):
os.makedirs(outd)
table.to_csv(os.path.join(outt,'table.csv'))
stations = table['station_code']
for ista in range(len(stations)):
sta = str(stations [ista])
fname = os.path.join(outd,sta+'.csv')
df = data[ista]
try:
#in case it is still a series like ssh
df = df.to_frame()
except:
pass
df.to_csv(fname)
fmeta = os.path.join(outd,sta)+'_metadata.csv'
metadata = pd.DataFrame.from_dict( data[ista]._metadata , orient="index")
metadata.to_csv(fmeta)
def read_csv(obs_dir, name, year, label):
"""
examples
print(' > write csv files')
write_csv(base_dir, name, year, table=wnd_ocn_table, data= wnd_ocn , label='ndbc_wind' )
write_csv(base_dir, name, year, table=wav_ocn_table, data= wav_ocn , label='ndbc_wave' )
write_csv(base_dir, name, year, table=ssh_table , data= ssh , label='coops_ssh' )
write_csv(base_dir, name, year, table=wnd_obs_table, data= wnd_obs , label='coops_wind')
"""
outt = os.path.join(obs_dir, name+year,label)
outd = os.path.join(outt,'data')
if not os.path.exists(outd):
sys.exit('ERROR: check path to: ',outd )
table = pd.read_csv(os.path.join(outt,'table.csv')).set_index('station_name')
table['station_code'] = table['station_code'].astype('str')
stations = table['station_code']
data = []
metadata = []
for ista in range(len(stations)):
sta = stations [ista]
fname8 = os.path.join(outd,sta)+'.csv'
df = pd.read_csv(fname8,parse_dates = ['date_time']).set_index('date_time')
fmeta = os.path.join(outd,sta) + '_metadata.csv'
meta = pd.read_csv(fmeta, header=0, names = ['names','info']).set_index('names')
meta_dict = meta.to_dict()['info']
meta_dict['lon'] = float(meta_dict['lon'])
meta_dict['lat'] = float(meta_dict['lat'])
df._metadata = meta_dict
data.append(df)
return table,data
#################
def write_csv(obs_dir, name, year, table, data, label):
"""
examples
print(' > write csv files')
write_csv(obs_dir, name, year, table=wnd_ocn_table, data= wnd_ocn , label='ndbc_wind' )
write_csv(obs_dir, name, year, table=wav_ocn_table, data= wav_ocn , label='ndbc_wave' )
write_csv(obs_dir, name, year, table=ssh_table , data= ssh , label='coops_ssh' )
write_csv(obs_dir, name, year, table=wnd_obs_table, data= wnd_obs , label='coops_wind')
"""
#label = 'coops_ssh'
#table = ssh_table
#data = ssh
outt = os.path.join(obs_dir, name+year,label)
outd = os.path.join(outt,'data')
if not os.path.exists(outd):
os.makedirs(outd)
table.to_csv(os.path.join(outt,'table.csv'))
stations = table['station_code']
for ista in range(len(stations)):
sta = str(stations [ista])
fname = os.path.join(outd,sta+'.csv')
df = data[ista]
try:
#in case it is still a series like ssh
df = df.to_frame()
except:
pass
df.to_csv(fname)
fmeta = os.path.join(outd,sta)+'_metadata.csv'
metadata = pd.DataFrame.from_dict( data[ista]._metadata , orient="index")
metadata.to_csv(fmeta)
def read_csv(obs_dir, name, year, label):
"""
examples
print(' > write csv files')
write_csv(base_dir, name, year, table=wnd_ocn_table, data= wnd_ocn , label='ndbc_wind' )
write_csv(base_dir, name, year, table=wav_ocn_table, data= wav_ocn , label='ndbc_wave' )
write_csv(base_dir, name, year, table=ssh_table , data= ssh , label='coops_ssh' )
write_csv(base_dir, name, year, table=wnd_obs_table, data= wnd_obs , label='coops_wind')
"""
outt = os.path.join(obs_dir, name+year,label)
outd = os.path.join(outt,'data')
if not os.path.exists(outd):
sys.exit('ERROR: check path to: ',outd )
table = pd.read_csv(os.path.join(outt,'table.csv')).set_index('station_name')
table['station_code'] = table['station_code'].astype('str')
stations = table['station_code']
data = []
metadata = []
for ista in range(len(stations)):
sta = stations [ista]
fname8 = os.path.join(outd,sta)+'.csv'
df = pd.read_csv(fname8,parse_dates = ['date_time']).set_index('date_time')
fmeta = os.path.join(outd,sta) + '_metadata.csv'
meta = pd.read_csv(fmeta, header=0, names = ['names','info']).set_index('names')
meta_dict = meta.to_dict()['info']
meta_dict['lon'] = float(meta_dict['lon'])
meta_dict['lat'] = float(meta_dict['lat'])
df._metadata = meta_dict
data.append(df)
return table,data
def write_high_water_marks(obs_dir, name, year):
url = 'https://stn.wim.usgs.gov/STNServices/HWMs/FilteredHWMs.json'
params = {'EventType': 2, # 2 for hurricane
'EventStatus': 0} # 0 for completed
default_filter = {"riverine": True,
"non_still_water": True}
nameyear = (name+year).lower()
out_dir = os.path.join(obs_dir,'hwm')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
fname = os.path.join(out_dir,nameyear+'.csv')
usgs_json_file = os.path.join(out_dir,'usgs_hwm_tmp.json')
if not os.path.exists( usgs_json_file):
response = requests.get(url, params=params, headers=headers,verify=False)
response.raise_for_status()
json_data = json.loads(response.text)
with open(usgs_json_file, 'w') as outfile:
json.dump(json_data, outfile )
else:
with open(usgs_json_file) as json_file:
json_data = json.load(json_file)
hwm_stations = dict()
for data in json_data:
if 'elev_ft' in data.keys() and name.lower() in data['eventName'].lower():
hwm_stations[str(data['hwm_id'])] = data
log = pd.DataFrame.from_dict(hwm_stations)
hwm = []
ii = 0
for key in log.keys():
l0 = []
for key0 in log[key].keys() :
l0.append(log[key][key0])
hwm.append(l0)
#
hwm = np.array(hwm)
df = pd.DataFrame(data=hwm, columns=log[key].keys())
drop_poor = False
if drop_poor:
for i in range(len(df)):
tt = df.hwmQualityName[i]
if 'poor' in tt.lower():
df.hwmQualityName[i] = np.nan
df = df.dropna()
df['elev_m'] = pd.to_numeric(df['elev_ft']) * 0.3048 #in meter
#
df.to_csv(fname)
def get_all_data()
###############################################
###############################################
############ MAIN code Starts here ############
if False:
# not needed. will take from the storm specific obs list from coops and ndbc
obs_station_list_gen()
#
#######
# out dir
obs_dir = os.path.join(base_dirf,'obs')
if get_usgs_hwm:
for key in storms.keys():
name = storms[key]['name']
year = storms[key]['year']
print(' > Get USGS HWM for ', name)
try:
write_high_water_marks(obs_dir, name, year)
except:
print (' > Get USGS HWM for ', name , ' ERROR ...')
for key in storms.keys():
name = storms[key]['name']
year = storms[key]['year']
print('\n\n\n\n\n\n********************************************************')
print( '***** Storm name ',name, ' Year ', year, ' *********')
print( '******************************************************** \n\n\n\n\n\n')
#if bbox_from_best_track:
try:
#bbox_from_best_track = False
code,hurricane_gis_files = get_nhc_storm_info (year,name)
###############################################################################
#download gis zip files
base = download_nhc_gis_files(hurricane_gis_files)
# get advisory cones and track points
cones,pts_actual,points_actual = read_advisory_cones_info(hurricane_gis_files,base,year,code)
start = pts_actual[0] ['FLDATELBL']
end = pts_actual[-1]['FLDATELBL']
#start_txt_actual = ('20' + start[:-2]).replace('/','')
#end_txt_actual = ('20' + end [:-2]).replace('/','')
#print('\n\n\n\n\n\n ********************************************************')
#for key1 in pts_actual[0].keys():
# print( '***** pts_actual[0] [', key1, ']',pts_actual[0] [key1] , '*********')
#print( '******************************************************** \n\n\n\n\n\n')
start_dt = dateparser.parse(start,settings={"TO_TIMEZONE": "UTC"}).replace(tzinfo=None) - obs_xtra_days
end_dt = dateparser.parse(end ,settings={"TO_TIMEZONE": "UTC"}).replace(tzinfo=None) + obs_xtra_days
#try:
# # bbox_from_best_track:
# start_txt = start_txt_best
# end_txt = end_txt_best
# #bbox = bbox_best
#except:
# start_txt = start_txt_actual
# end_txt = end_txt_actual
#
#start_dt = arrow.get(start_txt, 'YYYYMMDDhh').datetime - obs_xtra_days
#end_dt = arrow.get(end_txt , 'YYYYMMDDhh').datetime + obs_xtra_days
#if False:
# get bbox from actual data
last_cone = cones[-1]['geometry'].iloc[0]
track = LineString([point['geometry'] for point in pts_actual])
lons_actual = track.coords.xy[0]
lats_actual = track.coords.xy[1]
bbox_actual = min(lons_actual)-2, min(lats_actual)-2, max(lons_actual)+2, max(lats_actual)+2
################################################################################
# Find the bounding box to search the data.
bbox_from_best_track = False
bbox = bbox_actual
except:
start_dt = storms[key]['start']
end_dt = storms[key]['end' ]
bounds = storms[key]['bbox' ]
if storms[key]['bbox'] is not None:
bbox = storms[key]['bbox']
#print('\n\n\n\n >>>>> Download and read all GIS data for Storm >',name, ' Year > ', year, '\n ** This is an old STORM !!!!!! \n\n\n\n')
#
# Note that the bounding box is derived from the track and the latest prediction cone.
strbbox = ', '.join(format(v, '.2f') for v in bbox)
#
# Note that the bounding box is derived from the track and the latest prediction cone.
strbbox = ', '.join(format(v, '.2f') for v in bbox)
print('\n\n\n\n\n\n********************************************************')
print( '***** Storm name ',name, ' Year ', year, ' *********')
print('bbox: {}\nstart: {}\n end: {}'.format(strbbox, start_dt, end_dt))
print( '******************************************************** \n\n\n\n\n\n')
#
#########
if get_cops_wlev:
try:
print(' > Get water level information CO-OPS ... ')
# ["MLLW","MSL","MHW","STND","IGLD", "NAVD"]
datum = 'NAVD'
datum = 'MSL'
print ('datum=', datum )
ssh, ssh_table = get_coops(
start=start_dt,
end=end_dt,
sos_name='water_surface_height_above_reference_datum',
units=cf_units.Unit('meters'),
datum = datum ,
bbox=bbox,
)
write_csv(obs_dir, name, year, table=ssh_table , data= ssh , label='coops_ssh' )
except:
print(' > Get water level information CO-OPS >>>> ERRORRRRR')
######
if get_cops_wind:
try:
print(' > Get wind information CO-OPS ... ')
wnd_obs, wnd_obs_table = get_coops(
start=start_dt,
end=end_dt,
sos_name='wind_speed',
units=cf_units.Unit('m/s'),
bbox=bbox,
)
write_csv(obs_dir, name, year, table=wnd_obs_table, data= wnd_obs , label='coops_wind')
except:
print(' > Get wind information CO-OPS >>> ERORRRR')
######
if get_ndbc_wind:
try:
print(' > Get wind ocean information (ndbc) ... ')
wnd_ocn, wnd_ocn_table = get_ndbc(
start=start_dt,
end=end_dt,
sos_name='winds',
bbox=bbox,
)
write_csv(obs_dir, name, year, table=wnd_ocn_table, data= wnd_ocn , label='ndbc_wind' )
except:
print(' > Get wind ocean information (ndbc) >>> ERRRORRRR')
######
if get_ndbc_wave:
try:
print(' > Get wave ocean information (ndbc) ... ')
wav_ocn, wav_ocn_table = get_ndbc(
start=start_dt,
end=end_dt,
sos_name='waves',
bbox=bbox,
)
write_csv(obs_dir, name, year, table=wav_ocn_table, data= wav_ocn , label='ndbc_wave' )
except:
print(' > Get wave ocean information (ndbc) >>> ERRORRRR ')
######
if False:
# test reading files
ssh_table1 , ssh1 = read_csv (obs_dir, name, year, label='coops_ssh' )
wnd_obs_table1, wnd_obs1 = read_csv (obs_dir, name, year, label='coops_wind')
wnd_ocn_table1, wnd_ocn1 = read_csv (obs_dir, name, year, label='ndbc_wind' )
wav_ocn_table1, wav_ocn1 = read_csv (obs_dir, name, year, label='ndbc_wave' )
#if False:
#
# back up script file
args=sys.argv
scr_name = args[0]
scr_dir = os.path.join(obs_dir, name+year)
os.system('cp -fr ' + scr_name + ' ' + scr_dir)
#
#with open(pick, "rb") as f:
# w = pickle.load(f)
#f = open(pick, "rb")
#w = pickle.load(f)
#if __name__ == "__main__":
# main()
if __name__ == "__main__":
get_all_data()
| false
| true
|
f717df53a4c80dee569899eb3e7c32f7b58fef74
| 12,004
|
py
|
Python
|
main.py
|
lakmalniranga/OpenCV-average-color-detection
|
615ca69002d2bc37191c118247ddd8986f04edb1
|
[
"MIT"
] | null | null | null |
main.py
|
lakmalniranga/OpenCV-average-color-detection
|
615ca69002d2bc37191c118247ddd8986f04edb1
|
[
"MIT"
] | null | null | null |
main.py
|
lakmalniranga/OpenCV-average-color-detection
|
615ca69002d2bc37191c118247ddd8986f04edb1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
OpenCV Python image average color detection script. You can use this to finding darkest color.
Coded by : Lakmal Niranga. 2016
"""
import os
import cv2
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.setWindowModality(QtCore.Qt.ApplicationModal)
Dialog.resize(790, 550)
Dialog.setSizeGripEnabled(True)
self.frame = QtGui.QFrame(Dialog)
self.frame.setGeometry(QtCore.QRect(10, 10, 381, 281))
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
self.horizontalLayoutWidget = QtGui.QWidget(self.frame)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(10, 230, 361, 41))
self.horizontalLayoutWidget.setObjectName(_fromUtf8("horizontalLayoutWidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.btnImg1_pc = QtGui.QPushButton(self.horizontalLayoutWidget)
self.btnImg1_pc.setObjectName(_fromUtf8("btnImg1_pc"))
self.horizontalLayout.addWidget(self.btnImg1_pc)
self.btnImg1_cam = QtGui.QPushButton(self.horizontalLayoutWidget)
self.btnImg1_cam.setObjectName(_fromUtf8("btnImg1_cam"))
self.horizontalLayout.addWidget(self.btnImg1_cam)
self.label_img1 = QtGui.QLabel(self.frame)
self.label_img1.setGeometry(QtCore.QRect(10, 10, 361, 211))
self.label_img1.setText(_fromUtf8(""))
self.label_img1.setAlignment(QtCore.Qt.AlignCenter)
self.label_img1.setObjectName(_fromUtf8("label_img1"))
self.horizontalLayoutWidget.raise_()
self.label_img1.raise_()
self.frame_3 = QtGui.QFrame(Dialog)
self.frame_3.setGeometry(QtCore.QRect(400, 10, 381, 281))
self.frame_3.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame_3.setFrameShadow(QtGui.QFrame.Raised)
self.frame_3.setObjectName(_fromUtf8("frame_3"))
self.horizontalLayoutWidget_2 = QtGui.QWidget(self.frame_3)
self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(10, 230, 361, 41))
self.horizontalLayoutWidget_2.setObjectName(_fromUtf8("horizontalLayoutWidget_2"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.horizontalLayoutWidget_2)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.btnImg2_pc = QtGui.QPushButton(self.horizontalLayoutWidget_2)
self.btnImg2_pc.setObjectName(_fromUtf8("btnImg2_pc"))
self.horizontalLayout_2.addWidget(self.btnImg2_pc)
self.btnImg2_cam = QtGui.QPushButton(self.horizontalLayoutWidget_2)
self.btnImg2_cam.setObjectName(_fromUtf8("btnImg2_cam"))
self.horizontalLayout_2.addWidget(self.btnImg2_cam)
self.label_img2 = QtGui.QLabel(self.frame_3)
self.label_img2.setGeometry(QtCore.QRect(10, 10, 361, 211))
self.label_img2.setText(_fromUtf8(""))
self.label_img2.setAlignment(QtCore.Qt.AlignCenter)
self.label_img2.setObjectName(_fromUtf8("label_img2"))
self.verticalLayoutWidget = QtGui.QWidget(Dialog)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 370, 771, 41))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(False)
font.setWeight(50)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
self.verticalLayoutWidget_2 = QtGui.QWidget(Dialog)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(10, 410, 381, 143))
self.verticalLayoutWidget_2.setObjectName(_fromUtf8("verticalLayoutWidget_2"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.colorbox_1 = QtGui.QLabel(self.verticalLayoutWidget_2)
self.colorbox_1.setText(_fromUtf8(""))
self.colorbox_1.setObjectName(_fromUtf8("colorbox_1"))
self.verticalLayout_2.addWidget(self.colorbox_1)
self.lable_img1 = QtGui.QLabel(self.verticalLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.lable_img1.setFont(font)
self.lable_img1.setAlignment(QtCore.Qt.AlignCenter)
self.lable_img1.setObjectName(_fromUtf8("lable_img1"))
self.verticalLayout_2.addWidget(self.lable_img1)
self.verticalLayoutWidget_3 = QtGui.QWidget(Dialog)
self.verticalLayoutWidget_3.setGeometry(QtCore.QRect(400, 410, 381, 143))
self.verticalLayoutWidget_3.setObjectName(_fromUtf8("verticalLayoutWidget_3"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.verticalLayoutWidget_3)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.colorbox_2 = QtGui.QLabel(self.verticalLayoutWidget_3)
self.colorbox_2.setText(_fromUtf8(""))
self.colorbox_2.setObjectName(_fromUtf8("colorbox_2"))
self.verticalLayout_3.addWidget(self.colorbox_2)
self.lable_img2 = QtGui.QLabel(self.verticalLayoutWidget_3)
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.lable_img2.setFont(font)
self.label_img1.setObjectName(_fromUtf8("label_img1"))
self.horizontalLayoutWidget.raise_()
self.lable_img2.setAlignment(QtCore.Qt.AlignCenter)
self.lable_img2.setObjectName(_fromUtf8("lable_img2"))
self.verticalLayout_3.addWidget(self.lable_img2)
self.btnComp = QtGui.QPushButton(Dialog)
self.btnComp.setGeometry(QtCore.QRect(310, 310, 171, 51))
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.btnComp.setFont(font)
self.btnComp.setObjectName(_fromUtf8("btnComp"))
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "OpenCV Darkest Cloth Identifier", None))
self.btnImg1_pc.setText(_translate("Dialog", "Select image from PC", None))
self.btnImg1_cam.setText(_translate("Dialog", "Take image from camera", None))
self.btnImg2_pc.setText(_translate("Dialog", "Select image from PC", None))
self.btnImg2_cam.setText(_translate("Dialog", "Take image from camera", None))
self.label.setText(_translate("Dialog", "Most Suitable Average Color", None))
self.lable_img1.setText(_translate("Dialog", "", None))
self.lable_img2.setText(_translate("Dialog", "", None))
self.btnComp.setText(_translate("Dialog", "Compare", None))
self.btnImg1_pc.clicked.connect(self.openimg1)
self.btnImg2_pc.clicked.connect(self.openimg2)
self.btnComp.clicked.connect(self.compare_color)
self.btnImg1_cam.clicked.connect(self.cameraImg1)
self.btnImg2_cam.clicked.connect(self.cameraImg2)
avg1=None
avg2=None
def get_avg_color(self, img_path):
img = cv2.imread(img_path,cv2.IMREAD_COLOR)
img_width = img.shape[1]
img_height = img.shape[0]
rows_cols = 10
part_of_width = img_width/rows_cols
part_of_height = img_height/rows_cols
avg_B=0
avg_G=0
avg_R=0
for x in range(part_of_width,img_width-part_of_width,part_of_width):
for y in range(part_of_height,img_height-part_of_height,part_of_height):
color = img[y,x] #[y and x] - gives BGR
avg_B+=color[0]
avg_G+=color[1]
avg_R+=color[2]
cv2.circle(img,(x,y), 5, (0,0,0), -1) #[x and y]
return (avg_B/81,avg_G/81,avg_R/81)[::-1] #return tuple in BGR
def openimg1(self):
global avg1
img1_path = QtGui.QFileDialog.getOpenFileName(Dialog, 'Open file', os.getcwd() ,"Image files (*.jpg *.gif)")
self.label_img1.setScaledContents(True)
self.label_img1.setPixmap(QtGui.QPixmap(img1_path))
avg1 = self.get_avg_color(str(img1_path))
self.colorbox_1.setStyleSheet('background-color: rgb'+ str(avg1))
def openimg2(self):
global avg2
img2_path = QtGui.QFileDialog.getOpenFileName(Dialog, 'Open file', os.getcwd() ,"Image files (*.jpg *.gif)")
self.label_img2.setScaledContents(True)
self.label_img2.setPixmap(QtGui.QPixmap(img2_path))
avg2 = self.get_avg_color(str(img2_path))
self.colorbox_2.setStyleSheet('background-color: rgb'+ str(avg2))
def compare_color(self):
global avg1, avg2
msgBox = QtGui.QMessageBox()
msgBox.setIcon(QtGui.QMessageBox.Critical)
try:
img1_avarage = sum(i for i in avg1)
img2_avarage = sum(i for i in avg2)
avg1_per = (float(img1_avarage)/(img1_avarage+img2_avarage))*100
avg2_per = (float(img2_avarage)/(img1_avarage+img2_avarage))*100
self.lable_img1.setText(str(round(100-avg1_per, 2)) + "%")
self.lable_img2.setText(str(round(100-avg2_per, 2)) + "%")
except NameError as e:
msgBox.setText("Please select images first!")
msgBox.setWindowTitle("Error")
msgBox.exec_()
def cameraImg1(self):
global avg1
cap = cv2.VideoCapture(0)
while(True):
global avg1
ret, frame = cap.read()
cv2.imshow('press S to take image | press C to cancel',frame)
k = cv2.waitKey(3) & 0xFF
if k == ord('s'):
img_path="image1.jpg"
cv2.imwrite(img_path, frame)
self.label_img1.setScaledContents(True)
self.label_img1.setPixmap(QtGui.QPixmap(img_path))
avg1 = self.get_avg_color(str(img_path))
self.colorbox_1.setStyleSheet('background-color: rgb'+ str(avg1))
break
if k == ord('c'):
break
cap.release()
cv2.destroyAllWindows()
def cameraImg2(self):
global avg2
cap = cv2.VideoCapture(0)
while(True):
global avg2
ret, frame = cap.read()
cv2.imshow('press S to take image | press C to cancel',frame)
k = cv2.waitKey(3) & 0xFF
if k == ord('s'):
img_path="image2.jpg"
cv2.imwrite(img_path, frame)
self.label_img2.setScaledContents(True)
self.label_img2.setPixmap(QtGui.QPixmap(img_path))
avg2 = self.get_avg_color(str(img_path))
self.colorbox_2.setStyleSheet('background-color: rgb'+ str(avg2))
break
if k == ord('c'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Dialog = QtGui.QDialog()
Dialog.setFixedSize(790, 550)
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| 43.02509
| 116
| 0.673692
|
import os
import cv2
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.setWindowModality(QtCore.Qt.ApplicationModal)
Dialog.resize(790, 550)
Dialog.setSizeGripEnabled(True)
self.frame = QtGui.QFrame(Dialog)
self.frame.setGeometry(QtCore.QRect(10, 10, 381, 281))
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
self.horizontalLayoutWidget = QtGui.QWidget(self.frame)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(10, 230, 361, 41))
self.horizontalLayoutWidget.setObjectName(_fromUtf8("horizontalLayoutWidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.btnImg1_pc = QtGui.QPushButton(self.horizontalLayoutWidget)
self.btnImg1_pc.setObjectName(_fromUtf8("btnImg1_pc"))
self.horizontalLayout.addWidget(self.btnImg1_pc)
self.btnImg1_cam = QtGui.QPushButton(self.horizontalLayoutWidget)
self.btnImg1_cam.setObjectName(_fromUtf8("btnImg1_cam"))
self.horizontalLayout.addWidget(self.btnImg1_cam)
self.label_img1 = QtGui.QLabel(self.frame)
self.label_img1.setGeometry(QtCore.QRect(10, 10, 361, 211))
self.label_img1.setText(_fromUtf8(""))
self.label_img1.setAlignment(QtCore.Qt.AlignCenter)
self.label_img1.setObjectName(_fromUtf8("label_img1"))
self.horizontalLayoutWidget.raise_()
self.label_img1.raise_()
self.frame_3 = QtGui.QFrame(Dialog)
self.frame_3.setGeometry(QtCore.QRect(400, 10, 381, 281))
self.frame_3.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame_3.setFrameShadow(QtGui.QFrame.Raised)
self.frame_3.setObjectName(_fromUtf8("frame_3"))
self.horizontalLayoutWidget_2 = QtGui.QWidget(self.frame_3)
self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(10, 230, 361, 41))
self.horizontalLayoutWidget_2.setObjectName(_fromUtf8("horizontalLayoutWidget_2"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.horizontalLayoutWidget_2)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.btnImg2_pc = QtGui.QPushButton(self.horizontalLayoutWidget_2)
self.btnImg2_pc.setObjectName(_fromUtf8("btnImg2_pc"))
self.horizontalLayout_2.addWidget(self.btnImg2_pc)
self.btnImg2_cam = QtGui.QPushButton(self.horizontalLayoutWidget_2)
self.btnImg2_cam.setObjectName(_fromUtf8("btnImg2_cam"))
self.horizontalLayout_2.addWidget(self.btnImg2_cam)
self.label_img2 = QtGui.QLabel(self.frame_3)
self.label_img2.setGeometry(QtCore.QRect(10, 10, 361, 211))
self.label_img2.setText(_fromUtf8(""))
self.label_img2.setAlignment(QtCore.Qt.AlignCenter)
self.label_img2.setObjectName(_fromUtf8("label_img2"))
self.verticalLayoutWidget = QtGui.QWidget(Dialog)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 370, 771, 41))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(False)
font.setWeight(50)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
self.verticalLayoutWidget_2 = QtGui.QWidget(Dialog)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(10, 410, 381, 143))
self.verticalLayoutWidget_2.setObjectName(_fromUtf8("verticalLayoutWidget_2"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.colorbox_1 = QtGui.QLabel(self.verticalLayoutWidget_2)
self.colorbox_1.setText(_fromUtf8(""))
self.colorbox_1.setObjectName(_fromUtf8("colorbox_1"))
self.verticalLayout_2.addWidget(self.colorbox_1)
self.lable_img1 = QtGui.QLabel(self.verticalLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.lable_img1.setFont(font)
self.lable_img1.setAlignment(QtCore.Qt.AlignCenter)
self.lable_img1.setObjectName(_fromUtf8("lable_img1"))
self.verticalLayout_2.addWidget(self.lable_img1)
self.verticalLayoutWidget_3 = QtGui.QWidget(Dialog)
self.verticalLayoutWidget_3.setGeometry(QtCore.QRect(400, 410, 381, 143))
self.verticalLayoutWidget_3.setObjectName(_fromUtf8("verticalLayoutWidget_3"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.verticalLayoutWidget_3)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.colorbox_2 = QtGui.QLabel(self.verticalLayoutWidget_3)
self.colorbox_2.setText(_fromUtf8(""))
self.colorbox_2.setObjectName(_fromUtf8("colorbox_2"))
self.verticalLayout_3.addWidget(self.colorbox_2)
self.lable_img2 = QtGui.QLabel(self.verticalLayoutWidget_3)
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.lable_img2.setFont(font)
self.label_img1.setObjectName(_fromUtf8("label_img1"))
self.horizontalLayoutWidget.raise_()
self.lable_img2.setAlignment(QtCore.Qt.AlignCenter)
self.lable_img2.setObjectName(_fromUtf8("lable_img2"))
self.verticalLayout_3.addWidget(self.lable_img2)
self.btnComp = QtGui.QPushButton(Dialog)
self.btnComp.setGeometry(QtCore.QRect(310, 310, 171, 51))
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.btnComp.setFont(font)
self.btnComp.setObjectName(_fromUtf8("btnComp"))
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "OpenCV Darkest Cloth Identifier", None))
self.btnImg1_pc.setText(_translate("Dialog", "Select image from PC", None))
self.btnImg1_cam.setText(_translate("Dialog", "Take image from camera", None))
self.btnImg2_pc.setText(_translate("Dialog", "Select image from PC", None))
self.btnImg2_cam.setText(_translate("Dialog", "Take image from camera", None))
self.label.setText(_translate("Dialog", "Most Suitable Average Color", None))
self.lable_img1.setText(_translate("Dialog", "", None))
self.lable_img2.setText(_translate("Dialog", "", None))
self.btnComp.setText(_translate("Dialog", "Compare", None))
self.btnImg1_pc.clicked.connect(self.openimg1)
self.btnImg2_pc.clicked.connect(self.openimg2)
self.btnComp.clicked.connect(self.compare_color)
self.btnImg1_cam.clicked.connect(self.cameraImg1)
self.btnImg2_cam.clicked.connect(self.cameraImg2)
avg1=None
avg2=None
def get_avg_color(self, img_path):
img = cv2.imread(img_path,cv2.IMREAD_COLOR)
img_width = img.shape[1]
img_height = img.shape[0]
rows_cols = 10
part_of_width = img_width/rows_cols
part_of_height = img_height/rows_cols
avg_B=0
avg_G=0
avg_R=0
for x in range(part_of_width,img_width-part_of_width,part_of_width):
for y in range(part_of_height,img_height-part_of_height,part_of_height):
color = img[y,x]
avg_B+=color[0]
avg_G+=color[1]
avg_R+=color[2]
cv2.circle(img,(x,y), 5, (0,0,0), -1)
return (avg_B/81,avg_G/81,avg_R/81)[::-1]
def openimg1(self):
global avg1
img1_path = QtGui.QFileDialog.getOpenFileName(Dialog, 'Open file', os.getcwd() ,"Image files (*.jpg *.gif)")
self.label_img1.setScaledContents(True)
self.label_img1.setPixmap(QtGui.QPixmap(img1_path))
avg1 = self.get_avg_color(str(img1_path))
self.colorbox_1.setStyleSheet('background-color: rgb'+ str(avg1))
def openimg2(self):
global avg2
img2_path = QtGui.QFileDialog.getOpenFileName(Dialog, 'Open file', os.getcwd() ,"Image files (*.jpg *.gif)")
self.label_img2.setScaledContents(True)
self.label_img2.setPixmap(QtGui.QPixmap(img2_path))
avg2 = self.get_avg_color(str(img2_path))
self.colorbox_2.setStyleSheet('background-color: rgb'+ str(avg2))
def compare_color(self):
global avg1, avg2
msgBox = QtGui.QMessageBox()
msgBox.setIcon(QtGui.QMessageBox.Critical)
try:
img1_avarage = sum(i for i in avg1)
img2_avarage = sum(i for i in avg2)
avg1_per = (float(img1_avarage)/(img1_avarage+img2_avarage))*100
avg2_per = (float(img2_avarage)/(img1_avarage+img2_avarage))*100
self.lable_img1.setText(str(round(100-avg1_per, 2)) + "%")
self.lable_img2.setText(str(round(100-avg2_per, 2)) + "%")
except NameError as e:
msgBox.setText("Please select images first!")
msgBox.setWindowTitle("Error")
msgBox.exec_()
def cameraImg1(self):
global avg1
cap = cv2.VideoCapture(0)
while(True):
global avg1
ret, frame = cap.read()
cv2.imshow('press S to take image | press C to cancel',frame)
k = cv2.waitKey(3) & 0xFF
if k == ord('s'):
img_path="image1.jpg"
cv2.imwrite(img_path, frame)
self.label_img1.setScaledContents(True)
self.label_img1.setPixmap(QtGui.QPixmap(img_path))
avg1 = self.get_avg_color(str(img_path))
self.colorbox_1.setStyleSheet('background-color: rgb'+ str(avg1))
break
if k == ord('c'):
break
cap.release()
cv2.destroyAllWindows()
def cameraImg2(self):
global avg2
cap = cv2.VideoCapture(0)
while(True):
global avg2
ret, frame = cap.read()
cv2.imshow('press S to take image | press C to cancel',frame)
k = cv2.waitKey(3) & 0xFF
if k == ord('s'):
img_path="image2.jpg"
cv2.imwrite(img_path, frame)
self.label_img2.setScaledContents(True)
self.label_img2.setPixmap(QtGui.QPixmap(img_path))
avg2 = self.get_avg_color(str(img_path))
self.colorbox_2.setStyleSheet('background-color: rgb'+ str(avg2))
break
if k == ord('c'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Dialog = QtGui.QDialog()
Dialog.setFixedSize(790, 550)
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| true
| true
|
f717e0abb049f00234b9b3cbf5c4910e36250c30
| 1,028
|
py
|
Python
|
pyvortex/__init__.py
|
pankajkarman/pyvortex
|
ba92d9b7702c33218377ac88f3045e880339f3ad
|
[
"MIT"
] | 5
|
2021-01-12T16:52:45.000Z
|
2021-10-13T23:26:42.000Z
|
pyvortex/__init__.py
|
pankajkarman/pyvortex
|
ba92d9b7702c33218377ac88f3045e880339f3ad
|
[
"MIT"
] | 2
|
2020-12-18T15:16:37.000Z
|
2021-12-02T14:47:07.000Z
|
pyvortex/__init__.py
|
pankajkarman/pyvortex
|
ba92d9b7702c33218377ac88f3045e880339f3ad
|
[
"MIT"
] | 3
|
2021-01-12T16:52:18.000Z
|
2021-10-14T02:18:06.000Z
|
"""
This module consists of functions to calculate the [equivalent latitude](https://journals.ametsoc.org/doi/citedby/10.1175/1520-0469%282003%29060%3C0287%3ATELADT%3E2.0.CO%3B2) and edge of a polar vortex using [Nash criteria](https://agupubs.onlinelibrary.wiley.com/doi/10.1029/96JD00066).
### Installation
```
pip install -U pyvortex
```
install the latest version using
```
pip install git+https://github.com/pankajkarman/pyvortex.git
```
### Usage
`pyvortex` is easy to use. Just import:
```python
import pyvortex as vr
```
#### Northern Hemisphere
Instantiate the `PolarVortex` class using:
```python
pol = PolarVortex(pv, uwind)
```
Get equivalent lqtitude for the provided vorticity data as:
```python
eql = pol.get_eql()
```
If you want to get both equivalent latitude and Vortex edge, just use:
```python
eql = pol.get_edge(min_eql=30)
```
#### Southern Hemisphere
Flip pv and uwind along latitude dimension and multiply pv by -1. All other things will be the same.
"""
from .pyvortex import PolarVortex
| 22.844444
| 287
| 0.737354
|
from .pyvortex import PolarVortex
| true
| true
|
f717e10070ef6a2f208a3ff8fb842d2f4dcf4f84
| 3,440
|
py
|
Python
|
simulation/simulation.py
|
cyberImperial/attack-graphs
|
40c5c2bcc3eaf01c484e51d8339d29da5154dd42
|
[
"MIT"
] | 18
|
2018-02-21T13:14:11.000Z
|
2021-07-25T05:15:56.000Z
|
simulation/simulation.py
|
BenDerPan/attack-graphs
|
40c5c2bcc3eaf01c484e51d8339d29da5154dd42
|
[
"MIT"
] | 70
|
2017-10-16T22:18:26.000Z
|
2020-05-11T14:01:06.000Z
|
simulation/simulation.py
|
BenDerPan/attack-graphs
|
40c5c2bcc3eaf01c484e51d8339d29da5154dd42
|
[
"MIT"
] | 14
|
2019-04-24T23:26:39.000Z
|
2021-12-03T09:36:13.000Z
|
from __future__ import absolute_import
import logging
logger = logging.getLogger(__name__)
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from topology.graph.graph import Graph
from topology.graph.graph import Node
from clint.textui import colored
import json
import ast
import time
from random import randrange
class Simulation():
"""
Class used to mock sniffer connections and ip discovery for running
simulations.
General description: The simulation module is lightweight and can easily
handle overlay topologies of magnitude of thousands. The simulations are
run on random overlay topologies with fixed number of nodes and edges.
Random packets get generated whenever the simulation module connection gets
a call within a fixed timeout of 0.5 seconds, whereas the scans are
generated within a timeout of 3 seconds.
"""
def __init__(self, conf_file, connection_timeout = 0.5, scan_timeout = 10):
"""
Construct a new simulation object from a given configuartion file.
:param conf_file: The configuration file must be a json that contains
a graph. For an example see: `confs/simple.json`
:param connection_timeout: packets get generated each
connection_timeout seconds
:param scan_timeout: the time to run a scan
"""
self.connection_timeout = connection_timeout
self.scan_timeout = scan_timeout
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.path.join(dir_path, "confs")
with open(os.path.join(dir_path, conf_file), 'r') as f:
data = json.dumps(ast.literal_eval(f.read()))
self.conf = json.loads(data)
logger.info("Configuration successfully parsed...")
self.graph = Graph.from_json(self.conf)
logger.info("Graph successfully loaded...")
def connection(self):
"""
Return a Connection class. The internals of the topology module use
only the next function from the `libpcap` Python wrapper.
"""
def build_packet(src, dest):
time.sleep(self.connection_timeout)
return "header", {
"src" : str(src),
"dest" : str(dest)
}
class Connection():
def __init__(self, graph):
self.graph = graph
def next(self):
# return a new random packet sent between 2 nodes of the graph
link_idx = randrange(len(self.graph.edges))
for (n1, n2) in self.graph.edges:
if link_idx == 0:
return build_packet(n1.ip, n2.ip)
link_idx -= 1
logger.error("Simulated connection crashed.")
raise Exception("Malformed simulation graph!")
return Connection(self.graph)
def discovery_ip(self, ip):
"""
Function used as a seam instead of the original `discovery_ip` function.
See sniffer module for more details.
"""
logger.info(colored.cyan("Started scan."))
time.sleep(self.scan_timeout)
for node in self.graph.nodes:
if Node(ip) == node:
logger.info(colored.green("Successful scan."))
return node.running
logger.info("Failed scan.")
return {}
| 34.4
| 80
| 0.62936
|
from __future__ import absolute_import
import logging
logger = logging.getLogger(__name__)
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from topology.graph.graph import Graph
from topology.graph.graph import Node
from clint.textui import colored
import json
import ast
import time
from random import randrange
class Simulation():
def __init__(self, conf_file, connection_timeout = 0.5, scan_timeout = 10):
self.connection_timeout = connection_timeout
self.scan_timeout = scan_timeout
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.path.join(dir_path, "confs")
with open(os.path.join(dir_path, conf_file), 'r') as f:
data = json.dumps(ast.literal_eval(f.read()))
self.conf = json.loads(data)
logger.info("Configuration successfully parsed...")
self.graph = Graph.from_json(self.conf)
logger.info("Graph successfully loaded...")
def connection(self):
def build_packet(src, dest):
time.sleep(self.connection_timeout)
return "header", {
"src" : str(src),
"dest" : str(dest)
}
class Connection():
def __init__(self, graph):
self.graph = graph
def next(self):
link_idx = randrange(len(self.graph.edges))
for (n1, n2) in self.graph.edges:
if link_idx == 0:
return build_packet(n1.ip, n2.ip)
link_idx -= 1
logger.error("Simulated connection crashed.")
raise Exception("Malformed simulation graph!")
return Connection(self.graph)
def discovery_ip(self, ip):
logger.info(colored.cyan("Started scan."))
time.sleep(self.scan_timeout)
for node in self.graph.nodes:
if Node(ip) == node:
logger.info(colored.green("Successful scan."))
return node.running
logger.info("Failed scan.")
return {}
| true
| true
|
f717e11a8a97f2e8f936ce7233ccad30aa232626
| 7,806
|
py
|
Python
|
examples/pwr_run/checkpointing/final_trace/top50/job48.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
examples/pwr_run/checkpointing/final_trace/top50/job48.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
examples/pwr_run/checkpointing/final_trace/top50/job48.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.resnet import ResNet50, ResNet101, ResNet152
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 32
args_lr = 0.0014
args_model = 'resnet101'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_final4/' + job_name + '*'
total_epochs = 36
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '50' in args_model:
base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '101' in args_model:
base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '152' in args_model:
base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_final4/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| 32.936709
| 118
| 0.691135
|
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.resnet import ResNet50, ResNet101, ResNet152
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
batch_size = 32
args_lr = 0.0014
args_model = 'resnet101'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_final4/' + job_name + '*'
total_epochs = 36
starting_epoch = 0
pid = os.getpid()
message = job_name + ' pid ' + str(pid)
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
subtract_pixel_mean = True
n = 3
model_type = args.tc
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '50' in args_model:
base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '101' in args_model:
base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '152' in args_model:
base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
print(model_type)
current_epoch = 0
| true
| true
|
f717e184728b5d47b6b0a24c1fd6cd16b391b36a
| 35,865
|
py
|
Python
|
pycromanager/zmq.py
|
ilyasdc/pycro-manager
|
5f0153e8a90104eb8715348c6eb22c4d8fdee477
|
[
"BSD-3-Clause"
] | null | null | null |
pycromanager/zmq.py
|
ilyasdc/pycro-manager
|
5f0153e8a90104eb8715348c6eb22c4d8fdee477
|
[
"BSD-3-Clause"
] | null | null | null |
pycromanager/zmq.py
|
ilyasdc/pycro-manager
|
5f0153e8a90104eb8715348c6eb22c4d8fdee477
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import re
import time
import typing
import warnings
import inspect
import numpy as np
import zmq
from weakref import WeakSet
import threading
import copy
import sys
from threading import Lock
class DataSocket:
"""
Wrapper for ZMQ socket that sends and recieves dictionaries
Includes ZMQ client, push, and pull sockets
"""
def __init__(self, context, port, type, debug=False, ip_address="127.0.0.1"):
# request reply socket
self._socket = context.socket(type)
self._debug = debug
# store these as wekrefs so that circular refs dont prevent garbage collection
self._java_objects = set()
self._port = port
self._close_lock = Lock()
self._closed = False
if type == zmq.PUSH:
if debug:
print("binding {}".format(port))
self._socket.bind("tcp://{}:{}".format(ip_address, port))
else:
if debug:
print("connecting {}".format(port))
self._socket.connect("tcp://{}:{}".format(ip_address, port))
def _register_java_object(self, object):
self._java_objects.add(object)
def _convert_np_to_python(self, d):
"""
recursively search dictionary and convert any values from numpy floats/ints to
python floats/ints so they can be json serialized
:return:
"""
if type(d) != dict:
return
for k, v in d.items():
if isinstance(v, dict):
self._convert_np_to_python(v)
elif type(v) == list:
for e in v:
self._convert_np_to_python(e)
elif np.issubdtype(type(v), np.floating):
d[k] = float(v)
elif np.issubdtype(type(v), np.integer):
d[k] = int(v)
def _make_array_identifier(self, entry):
"""
make a string to replace bytes data or numpy array in message, which encode data type if numpy
"""
# make up a random 32 bit int as the identifier
# TODO: change to simple counting
identifier = np.random.randint(-(2 ** 31), 2 ** 31 - 1, 1, dtype=np.int32)[0]
# '@{some_number}_{bytes_per_pixel}'
# if its a numpy array, include bytes per pixel, otherwise just interpret it as raw byts
# TODO : I thinkg its always raw binary and the argument deserialization types handles conversion to java arrays
# This definitely could use some cleanup and simplification. Probably best to encode the data type here and remove
# argument deserialization types
return identifier, "@" + str(int(identifier)) + "_" + str(
0 if isinstance(entry, bytes) else entry.dtype.itemsize
)
def _remove_bytes(self, bytes_data, structure):
if isinstance(structure, list):
for i, entry in enumerate(structure):
if isinstance(entry, bytes) or isinstance(entry, np.ndarray):
int_id, str_id = self._make_array_identifier(entry)
structure[i] = str_id
bytes_data.append((int_id, entry))
elif isinstance(entry, list) or isinstance(entry, dict):
self._remove_bytes(bytes_data, entry)
elif isinstance(structure, dict):
for key in structure.keys():
entry = structure[key]
if isinstance(entry, bytes) or isinstance(entry, np.ndarray):
int_id, str_id = self._make_array_identifier(entry)
structure[key] = str_id
bytes_data.append((int_id, entry))
elif isinstance(entry, list) or isinstance(entry, dict):
self._remove_bytes(bytes_data, structure[key])
def send(self, message, timeout=0):
if message is None:
message = {}
# make sure any np types convert to python types so they can be json serialized
self._convert_np_to_python(message)
# Send binary data in seperate messages so it doesnt need to be json serialized
bytes_data = []
self._remove_bytes(bytes_data, message)
message_string = json.dumps(message)
if self._debug:
print("DEBUG, sending: {}".format(message))
# convert keys to byte array
key_vals = [(identifier.tobytes(), value) for identifier, value in bytes_data]
message_parts = [bytes(message_string, "iso-8859-1")] + [
item for keyval in key_vals for item in keyval
]
if timeout == 0:
self._socket.send_multipart(message_parts)
else:
start = time.time()
while 1000 * (time.time() - start) < timeout:
try:
self._socket.send_multipart(message_parts, flags=zmq.NOBLOCK)
return True
except zmq.ZMQError:
pass # ignore, keep trying
return False
def _replace_bytes(self, dict_or_list, hash, value):
"""
Replace placeholders for byte arrays in JSON message with their actual values
"""
if isinstance(dict_or_list, dict):
for key in dict_or_list:
if isinstance(dict_or_list[key], str) and "@" in dict_or_list[key]:
hash_in_message = int(
dict_or_list[key].split("@")[1], 16
) # interpret hex hash string
if hash == hash_in_message:
dict_or_list[key] = value
return
elif isinstance(dict_or_list[key], list) or isinstance(dict_or_list[key], dict):
self._replace_bytes(dict_or_list[key], hash, value)
elif isinstance(dict_or_list, list):
for i, entry in enumerate(dict_or_list):
if isinstance(entry, str) and "@" in dict_or_list[entry]:
hash_in_message = int(entry.split("@")[1], 16) # interpret hex hash string
if hash == hash_in_message:
dict_or_list[i] = value
return
elif isinstance(entry, list) or isinstance(entry, dict):
self._replace_bytes(entry, hash, value)
def receive(self, timeout=0):
if timeout == 0:
reply = self._socket.recv_multipart()
else:
start = time.time()
reply = None
while 1000 * (time.time() - start) < timeout:
try:
reply = self._socket.recv_multipart(flags=zmq.NOBLOCK)
if reply is not None:
break
except zmq.ZMQError:
pass # ignore, keep trying
if reply is None:
return reply
message = json.loads(reply[0].decode("iso-8859-1"))
# replace any byte data placeholders with the byte data itself
for i in np.arange(1, len(reply), 2):
# messages come in pairs: first is hash, second it byte data
identity_hash = int.from_bytes(reply[i], byteorder=sys.byteorder)
value = reply[i + 1]
self._replace_bytes(message, identity_hash, value)
if self._debug:
print("DEBUG, recieved: {}".format(message))
self._check_exception(message)
return message
def _check_exception(self, response):
if "type" in response and response["type"] == "exception":
raise Exception(response["value"])
def __del__(self):
self.close() # make sure it closes properly
def close(self):
with self._close_lock:
if not self._closed:
for java_object in self._java_objects:
java_object._close()
del java_object #potentially redundant, trying to fix closing race condition
self._java_objects = None
self._socket.close()
while not self._socket.closed:
time.sleep(0.01)
self._socket = None
if self._debug:
print('closed socket {}'.format(self._port))
self._closed = True
class Bridge:
"""
Create an object which acts as a client to a corresponding server (running in a Java process).
This enables construction and interaction with arbitrary java objects. Each bridge object should
be run using a context manager (i.e. `with Bridge() as b:`) or bridge.close() should be explicitly
called when finished
"""
DEFAULT_PORT = 4827
DEFAULT_TIMEOUT = 500
_EXPECTED_ZMQ_SERVER_VERSION = "4.2.0"
thread_local = threading.local()
def __new__(cls, *args, **kwargs):
"""
Only one instance of Bridge per a thread
"""
port = kwargs.get('port', Bridge.DEFAULT_PORT)
if hasattr(Bridge.thread_local, "bridge") and Bridge.thread_local.bridge is not None and port in Bridge.thread_local.bridge:
Bridge.thread_local.bridge_count[port] += 1
return Bridge.thread_local.bridge[port]
else:
if (not hasattr(Bridge.thread_local, "bridge_count")) or Bridge.thread_local.bridge_count is None:
Bridge.thread_local.bridge_count = {}
Bridge.thread_local.bridge_count[port] = 1
return super(Bridge, cls).__new__(cls)
def __init__(
self, port: int=DEFAULT_PORT, convert_camel_case: bool=True,
debug: bool=False, ip_address: str="127.0.0.1", timeout: int=DEFAULT_TIMEOUT
):
"""
Parameters
----------
port : int
The port on which the bridge operates
convert_camel_case : bool
If True, methods for Java objects that are passed across the bridge
will have their names converted from camel case to underscores. i.e. class.methodName()
becomes class.method_name()
debug : bool
If True print helpful stuff for debugging
"""
self._ip_address = ip_address
self._port = port
self._closed = False
if not hasattr(self, "_context"):
Bridge._context = zmq.Context()
# if hasattr(self.thread_local, "bridge") and port in self.thread_local.bridge:
# return ### What was this supposed to do?
if not hasattr(Bridge.thread_local, "bridge") or Bridge.thread_local.bridge is None:
Bridge.thread_local.bridge = {}
Bridge.thread_local.bridge[port] = self # cache a thread-local version of the bridge
self._convert_camel_case = convert_camel_case
self._debug = debug
self._timeout = timeout
self._master_socket = DataSocket(
self._context, port, zmq.REQ, debug=debug, ip_address=self._ip_address
)
self._master_socket.send({"command": "connect", "debug": debug})
self._class_factory = _JavaClassFactory()
reply_json = self._master_socket.receive(timeout=timeout)
if reply_json is None:
raise TimeoutError(
f"Socket timed out after {timeout} milliseconds. Is Micro-Manager running and is the ZMQ server on {port} option enabled?"
)
if reply_json["type"] == "exception":
raise Exception(reply_json["message"])
if "version" not in reply_json:
reply_json["version"] = "2.0.0" # before version was added
if reply_json["version"] != self._EXPECTED_ZMQ_SERVER_VERSION:
warnings.warn(
"Version mistmatch between Java ZMQ server and Python client. "
"\nJava ZMQ server version: {}\nPython client expected version: {}"
"\n To fix, update to BOTH latest pycromanager and latest micro-manager nightly build".format(
reply_json["version"], self._EXPECTED_ZMQ_SERVER_VERSION
)
)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
Bridge.thread_local.bridge_count[self._port] -= 1
if Bridge.thread_local.bridge_count[self._port] == 0:
del Bridge.thread_local.bridge_count[self._port]
del Bridge.thread_local.bridge[self._port]
self._master_socket.close()
self._master_socket = None
self._closed = True
if len(Bridge.thread_local.bridge) == 0:
Bridge.thread_local.bridge = None
Bridge.thread_local.bridge_count = None
def get_class(self, serialized_object) -> typing.Type["JavaObjectShadow"]:
return self._class_factory.create(
serialized_object, convert_camel_case=self._convert_camel_case
)
def construct_java_object(self, classpath: str, new_socket: bool=False, args: list=None):
"""
Create a new instance of a an object on the Java side. Returns a Python "Shadow" of the object, which behaves
just like the object on the Java side (i.e. same methods, fields). Methods of the object can be inferred at
runtime using iPython autocomplete
Parameters
----------
classpath : str
Full classpath of the java object
new_socket : bool
If True, will create new java object on a new port so that blocking calls will not interfere
with the bridges master port
args : list
list of arguments to the constructor, if applicable
Returns
-------
Python "Shadow" to the Java object
"""
if args is None:
args = []
# classpath_minus_class = '.'.join(classpath.split('.')[:-1])
# query the server for constructors matching this classpath
message = {"command": "get-constructors", "classpath": classpath}
self._master_socket.send(message)
constructors = self._master_socket.receive()["api"]
methods_with_name = [m for m in constructors if m["name"] == classpath]
if len(methods_with_name) == 0:
raise Exception("No valid java constructor found with classpath {}".format(classpath))
valid_method_spec, deserialize_types = _check_method_args(methods_with_name, args)
# Calling a constructor, rather than getting return from method
message = {
"command": "constructor",
"classpath": classpath,
"argument-types": valid_method_spec["arguments"],
"argument-deserialization-types": deserialize_types,
"arguments": _package_arguments(valid_method_spec, args),
}
if new_socket:
message["new-port"] = True
self._master_socket.send(message)
serialized_object = self._master_socket.receive()
if new_socket:
socket = DataSocket(
self._context, serialized_object["port"], zmq.REQ, ip_address=self._ip_address
)
else:
socket = self._master_socket
return self._class_factory.create(
serialized_object, convert_camel_case=self._convert_camel_case
)(socket=socket, serialized_object=serialized_object, bridge=self)
def get_java_class(self, classpath: str, new_socket: bool=False):
"""
Get an an object corresponding to a java class, for example to be used
when calling static methods on the class directly
Parameters
----------
classpath : str
Full classpath of the java object
new_socket : bool
If True, will create new java object on a new port so that blocking calls will not interfere
with the bridges master port
Returns
-------
Python "Shadow" to the Java class
"""
message = {"command": "get-class", "classpath": classpath}
if new_socket:
message["new-port"] = True
self._master_socket.send(message)
serialized_object = self._master_socket.receive()
if new_socket:
socket = DataSocket(
self._context, serialized_object["port"], zmq.REQ, ip_address=self._ip_address
)
else:
socket = self._master_socket
return self._class_factory.create(
serialized_object, convert_camel_case=self._convert_camel_case
)(socket=socket, serialized_object=serialized_object, bridge=self)
def _connect_push(self, port):
"""
Connect a push socket on the given port
:param port:
:return:
"""
return DataSocket(
self._context, port, zmq.PUSH, debug=self._debug, ip_address=self._ip_address
)
def _connect_pull(self, port):
"""
Connect to a pull socket on the given port
:param port:
:return:
"""
return DataSocket(
self._context, port, zmq.PULL, debug=self._debug, ip_address=self._ip_address
)
def get_magellan(self):
"""
return an instance of the Micro-Magellan API
"""
return self.construct_java_object("org.micromanager.magellan.api.MagellanAPI")
def get_core(self):
"""
Connect to CMMCore and return object that has its methods
:return: Python "shadow" object for micromanager core
"""
if hasattr(self, "core"):
return getattr(self, "core")
self.core = self.construct_java_object("mmcorej.CMMCore")
return self.core
def get_studio(self):
"""
return an instance of the Studio object that provides access to micro-manager Java APIs
"""
return self.construct_java_object("org.micromanager.Studio")
class _JavaClassFactory:
"""
This class is responsible for generating subclasses of JavaObjectShadow. Each generated class is kept in a `dict`.
If a given class has already been generate once it will be returns from the cache rather than re-generating it.
"""
def __init__(self):
self.classes = {}
def create(
self, serialized_obj: dict, convert_camel_case: bool = True
) -> typing.Type["JavaObjectShadow"]:
"""Create a class (or return a class from the cache) based on the contents of `serialized_object` message."""
if serialized_obj["class"] in self.classes.keys(): # Return a cached class
return self.classes[serialized_obj["class"]]
else: # Generate a new class since it wasn't found in the cache.
_java_class: str = serialized_obj["class"]
python_class_name_translation = _java_class.replace(
".", "_"
) # Having periods in the name would be problematic.
_interfaces = serialized_obj["interfaces"]
static_attributes = {"_java_class": _java_class, "_interfaces": _interfaces}
fields = {} # Create a dict of field names with getter and setter funcs.
for field in serialized_obj["fields"]:
fields[field] = property(
fget=lambda instance, Field=field: instance._access_field(Field),
fset=lambda instance, val, Field=field: instance._set_field(Field, val),
)
methods = {} # Create a dict of methods for the class by name.
methodSpecs = serialized_obj["api"]
method_names = set([m["name"] for m in methodSpecs])
# parse method descriptions to make python stand ins
for method_name in method_names:
params, methods_with_name, method_name_modified = _parse_arg_names(
methodSpecs, method_name, convert_camel_case
)
return_type = methods_with_name[0]["return-type"]
fn = lambda instance, *args, signatures_list=tuple(
methods_with_name
): instance._translate_call(signatures_list, args, static = _java_class == 'java.lang.Class')
fn.__name__ = method_name_modified
fn.__doc__ = "{}.{}: A dynamically generated Java method.".format(
_java_class, method_name_modified
)
sig = inspect.signature(fn)
params = [
inspect.Parameter("self", inspect.Parameter.POSITIONAL_ONLY)
] + params # Add `self` as the first argument.
return_type = (
_JAVA_TYPE_NAME_TO_PYTHON_TYPE[return_type]
if return_type in _JAVA_TYPE_NAME_TO_PYTHON_TYPE
else return_type
)
fn.__signature__ = sig.replace(parameters=params, return_annotation=return_type)
methods[method_name_modified] = fn
newclass = type( # Dynamically create a class to shadow a java class.
python_class_name_translation, # Name, based on the original java name
(JavaObjectShadow,), # Inheritance
{
"__init__": lambda instance, socket, serialized_object, bridge: JavaObjectShadow.__init__(
instance, socket, serialized_object, bridge
),
**static_attributes,
**fields,
**methods,
},
)
self.classes[_java_class] = newclass
return newclass
class JavaObjectShadow:
"""
Generic class for serving as a python interface for a java class using a zmq server backend
"""
_interfaces = (
None # Subclasses should fill these out. This class should never be directly instantiated.
)
_java_class = None
def __init__(self, socket, serialized_object, bridge: Bridge):
self._socket = socket
self._hash_code = serialized_object["hash-code"]
self._bridge = bridge
# register objects with bridge so it can tell Java side to release them before socket shuts down
socket._register_java_object(self)
self._closed = False
# atexit.register(self._close)
self._close_lock = Lock()
def _close(self):
with self._close_lock:
if self._closed:
return
if not hasattr(self, "_hash_code"):
return # constructor didnt properly finish, nothing to clean up on java side
message = {"command": "destructor", "hash-code": self._hash_code}
if self._bridge._debug:
"closing: {}".format(self)
self._socket.send(message)
reply_json = self._socket.receive()
if reply_json["type"] == "exception":
raise Exception(reply_json["value"])
self._closed = True
def __del__(self):
"""
Tell java side this object is garbage collected so it can do the same if needed
"""
self._close()
def _access_field(self, name):
"""
Return a python version of the field with a given name
:return:
"""
message = {"command": "get-field", "hash-code": self._hash_code, "name": name}
self._socket.send(message)
return self._deserialize(self._socket.receive())
def _set_field(self, name, value):
"""
Return a python version of the field with a given name
:return:
"""
message = {
"command": "set-field",
"hash-code": self._hash_code,
"name": name,
"value": _serialize_arg(value),
}
self._socket.send(message)
reply = self._deserialize(self._socket.receive())
def _translate_call(self, method_specs, fn_args: tuple, static: bool):
"""
Translate to appropriate Java method, call it, and return converted python version of its result
Parameters
----------
args :
args[0] is list of dictionaries of possible method specifications
kwargs :
hold possible polymorphic args, or none
"""
# args that are none are placeholders to allow for polymorphism and not considered part of the spec
# fn_args = [a for a in fn_args if a is not None]
valid_method_spec, deserialize_types = _check_method_args(method_specs, fn_args)
# args are good, make call through socket, casting the correct type if needed (e.g. int to float)
message = {
"command": "run-method",
"static": static,
"hash-code": self._hash_code,
"name": valid_method_spec["name"],
"argument-types": valid_method_spec["arguments"],
"argument-deserialization-types": deserialize_types,
}
message["arguments"] = _package_arguments(valid_method_spec, fn_args)
if self._bridge._closed:
raise Exception('The Bridge used to create this has been closed. Are you trying to call it outside of a "with" block?')
self._socket.send(message)
recieved = self._socket.receive()
return self._deserialize(recieved)
def _deserialize(self, json_return):
"""
method_spec :
info about the method that called it
reply :
bytes that represents return
Returns
-------
An appropriate python type of the converted value
"""
if json_return["type"] == "exception":
raise Exception(json_return["value"])
elif json_return["type"] == "null":
return None
elif json_return["type"] == "primitive":
return json_return["value"]
elif json_return["type"] == "string":
return json_return["value"]
elif json_return["type"] == "list":
return [self._deserialize(obj) for obj in json_return["value"]]
elif json_return["type"] == "object":
if json_return["class"] == "JSONObject":
return json.loads(json_return["value"])
else:
raise Exception("Unrecognized return class")
elif json_return["type"] == "unserialized-object":
# inherit socket from parent object
return self._bridge.get_class(json_return)(
socket=self._socket, serialized_object=json_return, bridge=self._bridge
)
else:
return deserialize_array(json_return)
def deserialize_array(json_return):
"""
Convert a serialized java array to the appropriate numpy type
Parameters
----------
json_return
"""
if json_return["type"] in ["byte-array", "int-array", "short-array", "float-array"]:
decoded = json_return["value"]
if json_return["type"] == "byte-array":
return np.frombuffer(decoded, dtype="=u1").copy()
elif json_return["type"] == "double-array":
return np.frombuffer(decoded, dtype="=f8").copy()
elif json_return["type"] == "int-array":
return np.frombuffer(decoded, dtype="=u4").copy()
elif json_return["type"] == "short-array":
return np.frombuffer(decoded, dtype="=u2").copy()
elif json_return["type"] == "float-array":
return np.frombuffer(decoded, dtype="=f4").copy()
def _package_arguments(valid_method_spec, fn_args):
"""
Serialize function arguments and also include description of their Java types
Parameters
----------
valid_method_spec:
fn_args :
"""
arguments = []
for arg_type, arg_val in zip(valid_method_spec["arguments"], fn_args):
if isinstance(arg_val, JavaObjectShadow):
arguments.append(_serialize_arg(arg_val))
elif _JAVA_TYPE_NAME_TO_PYTHON_TYPE[arg_type] is object:
arguments.append(_serialize_arg(arg_val))
elif arg_val is None:
arguments.append(_serialize_arg(arg_val))
elif isinstance(arg_val, np.ndarray):
arguments.append(_serialize_arg(arg_val))
else:
arguments.append(_serialize_arg(_JAVA_TYPE_NAME_TO_PYTHON_TYPE[arg_type](arg_val)))
return arguments
def _serialize_arg(arg):
if arg is None:
return None
if type(arg) in [bool, str, int, float]:
return arg # json handles serialization
elif type(arg) == np.ndarray:
return arg.tobytes()
elif isinstance(arg, JavaObjectShadow):
return {"hash-code": arg._hash_code}
else:
raise Exception("Unknown argumetn type")
def _check_single_method_spec(method_spec, fn_args):
"""
Check if a single method specificiation is compatible with the arguments the function recieved
Parameters
----------
method_spec :
fn_args :
"""
if len(method_spec["arguments"]) != len(fn_args):
return False
for arg_java_type, arg_val in zip(method_spec["arguments"], fn_args):
if isinstance(arg_val, JavaObjectShadow):
if arg_java_type not in arg_val._interfaces:
# check that it shadows object of the correct type
return False
elif type(arg_val) == np.ndarray:
# For ND Arrays, need to make sure data types match
if (
arg_java_type != "java.lang.Object"
and arg_val.dtype.type != _JAVA_ARRAY_TYPE_NUMPY_DTYPE[arg_java_type]
):
return False
elif not any(
[
isinstance(arg_val, acceptable_type)
for acceptable_type in _JAVA_TYPE_NAME_TO_CASTABLE_PYTHON_TYPE[arg_java_type]
]
) and not (
arg_val is None and arg_java_type in _JAVA_NON_PRIMITIVES
): # could be null if its an object
# if a type that gets converted
return False
return True
def _check_method_args(method_specs, fn_args):
"""
Compare python arguments to java arguments to find correct function to call
Parameters
----------
method_specs :
fn_args :
Returns
-------
one of the method_specs that is valid
"""
valid_method_spec = None
for method_spec in method_specs:
if _check_single_method_spec(method_spec, fn_args):
valid_method_spec = method_spec
break
if valid_method_spec is None:
raise Exception(
"Incorrect arguments. \nExpected {} \nGot {}".format(
" or ".join([", ".join(method_spec["arguments"]) for method_spec in method_specs]),
", ".join([str(type(a)) for a in fn_args]),
)
)
# subclass NDArrays to the appropriate data type so they dont get incorrectly reconstructed as objects
valid_method_spec = copy.deepcopy(valid_method_spec)
deserialize_types = []
for java_arg_class, python_arg_val in zip(valid_method_spec["arguments"], fn_args):
if isinstance(python_arg_val, np.ndarray):
deserialize_types.append(
[
ja
for ja, npdt in zip(
_JAVA_ARRAY_TYPE_NUMPY_DTYPE.keys(), _JAVA_ARRAY_TYPE_NUMPY_DTYPE.values()
)
if python_arg_val.dtype.type == npdt
][0]
)
else:
deserialize_types.append(java_arg_class)
return valid_method_spec, deserialize_types
def _parse_arg_names(methods, method_name, convert_camel_case):
method_name_modified = (
_camel_case_2_snake_case(method_name) if convert_camel_case else method_name
)
# all methods with this name and different argument lists
methods_with_name = [m for m in methods if m["name"] == method_name]
min_required_args = (
0
if len(methods_with_name) == 1 and len(methods_with_name[0]["arguments"]) == 0
else min([len(m["arguments"]) for m in methods_with_name])
)
# sort with largest number of args last so lambda at end gets max num args
methods_with_name.sort(key=lambda val: len(val["arguments"]))
method = methods_with_name[-1] # We only need to evaluate the overload with the most arguments.
params = []
unique_argument_names = []
for arg_index, typ in enumerate(method["arguments"]):
hint = _CLASS_NAME_MAPPING[typ] if typ in _CLASS_NAME_MAPPING else "object"
python_type = (
_JAVA_TYPE_NAME_TO_PYTHON_TYPE[typ] if typ in _JAVA_TYPE_NAME_TO_PYTHON_TYPE else typ
)
if hint in unique_argument_names: # append numbers to end so arg hints have unique names
i = 1
while hint + str(i) in unique_argument_names:
i += 1
arg_name = hint + str(i)
else:
arg_name = hint
unique_argument_names.append(arg_name)
# this is how overloading is handled for now, by making default arguments as none, but
# it might be better to explicitly compare argument types
if arg_index >= min_required_args:
default_arg_value = None
else:
default_arg_value = inspect.Parameter.empty
params.append(
inspect.Parameter(
name=arg_name,
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
default=default_arg_value,
annotation=python_type,
)
)
return params, methods_with_name, method_name_modified
def _camel_case_2_snake_case(name):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
# Used for generating type hints in arguments
_CLASS_NAME_MAPPING = {
"byte[]": "uint8array",
"double[]": "float64_array",
"int[]": "uint32_array",
"short[]": "int16_array",
"char[]": "int16_array",
"float[]": "int16_array",
"long[]": "int16_array",
"java.lang.String": "string",
"boolean": "boolean",
"double": "float",
"float": "float",
"int": "int",
"long": "int",
"short": "int",
"void": "void",
}
#Used for deserializing java arrarys into numpy arrays
_JAVA_ARRAY_TYPE_NUMPY_DTYPE = {
"boolean[]": np.bool,
"byte[]": np.uint8,
"short[]": np.int16,
"char[]": np.uint16,
"float[]": np.float32,
"double[]": np.float64,
"int[]": np.int32,
"long[]": np.int64,
}
#used for figuring our which java methods to call and if python args match
_JAVA_TYPE_NAME_TO_PYTHON_TYPE = {
"boolean": bool,
"double": float,
"float": float,
#maybe could make these more specific to array type?
"byte[]": np.ndarray,
"short[]": np.ndarray,
"double[]": np.ndarray,
"int[]": np.ndarray,
"char[]": np.ndarray,
"float[]": np.ndarray,
"long[]": np.ndarray,
"int": int,
"java.lang.String": str,
"long": int,
"short": int,
"char": int,
"byte": int,
"void": None,
"java.lang.Object": object,
}
# type conversions that allow for autocasting
_JAVA_TYPE_NAME_TO_CASTABLE_PYTHON_TYPE = {
"boolean": {bool},
"byte[]": {np.ndarray},
"double": {float, int},
"double[]": {np.ndarray},
"float": {float},
"int": {int},
"int[]": {np.ndarray},
"java.lang.String": {str},
"long": {int},
"short": {int},
"char": {int},
"byte": {int},
"void": {None},
"java.lang.Object": {object},
}
_JAVA_NON_PRIMITIVES = {"byte[]", "double[]", "int[]", "short[]", "char[]", "long[]", "boolean[]",
"java.lang.String", "java.lang.Object"}
if __name__ == "__main__":
# Test basic bridge operations
import traceback
b = Bridge()
try:
s = b.get_studio()
except:
traceback.print_exc()
try:
c = b.get_core()
except:
traceback.print_exc()
a = 1
| 39.026115
| 138
| 0.597992
|
import json
import re
import time
import typing
import warnings
import inspect
import numpy as np
import zmq
from weakref import WeakSet
import threading
import copy
import sys
from threading import Lock
class DataSocket:
def __init__(self, context, port, type, debug=False, ip_address="127.0.0.1"):
self._socket = context.socket(type)
self._debug = debug
self._java_objects = set()
self._port = port
self._close_lock = Lock()
self._closed = False
if type == zmq.PUSH:
if debug:
print("binding {}".format(port))
self._socket.bind("tcp://{}:{}".format(ip_address, port))
else:
if debug:
print("connecting {}".format(port))
self._socket.connect("tcp://{}:{}".format(ip_address, port))
def _register_java_object(self, object):
self._java_objects.add(object)
def _convert_np_to_python(self, d):
if type(d) != dict:
return
for k, v in d.items():
if isinstance(v, dict):
self._convert_np_to_python(v)
elif type(v) == list:
for e in v:
self._convert_np_to_python(e)
elif np.issubdtype(type(v), np.floating):
d[k] = float(v)
elif np.issubdtype(type(v), np.integer):
d[k] = int(v)
def _make_array_identifier(self, entry):
identifier = np.random.randint(-(2 ** 31), 2 ** 31 - 1, 1, dtype=np.int32)[0]
return identifier, "@" + str(int(identifier)) + "_" + str(
0 if isinstance(entry, bytes) else entry.dtype.itemsize
)
def _remove_bytes(self, bytes_data, structure):
if isinstance(structure, list):
for i, entry in enumerate(structure):
if isinstance(entry, bytes) or isinstance(entry, np.ndarray):
int_id, str_id = self._make_array_identifier(entry)
structure[i] = str_id
bytes_data.append((int_id, entry))
elif isinstance(entry, list) or isinstance(entry, dict):
self._remove_bytes(bytes_data, entry)
elif isinstance(structure, dict):
for key in structure.keys():
entry = structure[key]
if isinstance(entry, bytes) or isinstance(entry, np.ndarray):
int_id, str_id = self._make_array_identifier(entry)
structure[key] = str_id
bytes_data.append((int_id, entry))
elif isinstance(entry, list) or isinstance(entry, dict):
self._remove_bytes(bytes_data, structure[key])
def send(self, message, timeout=0):
if message is None:
message = {}
self._convert_np_to_python(message)
bytes_data = []
self._remove_bytes(bytes_data, message)
message_string = json.dumps(message)
if self._debug:
print("DEBUG, sending: {}".format(message))
key_vals = [(identifier.tobytes(), value) for identifier, value in bytes_data]
message_parts = [bytes(message_string, "iso-8859-1")] + [
item for keyval in key_vals for item in keyval
]
if timeout == 0:
self._socket.send_multipart(message_parts)
else:
start = time.time()
while 1000 * (time.time() - start) < timeout:
try:
self._socket.send_multipart(message_parts, flags=zmq.NOBLOCK)
return True
except zmq.ZMQError:
pass
return False
def _replace_bytes(self, dict_or_list, hash, value):
if isinstance(dict_or_list, dict):
for key in dict_or_list:
if isinstance(dict_or_list[key], str) and "@" in dict_or_list[key]:
hash_in_message = int(
dict_or_list[key].split("@")[1], 16
)
if hash == hash_in_message:
dict_or_list[key] = value
return
elif isinstance(dict_or_list[key], list) or isinstance(dict_or_list[key], dict):
self._replace_bytes(dict_or_list[key], hash, value)
elif isinstance(dict_or_list, list):
for i, entry in enumerate(dict_or_list):
if isinstance(entry, str) and "@" in dict_or_list[entry]:
hash_in_message = int(entry.split("@")[1], 16)
if hash == hash_in_message:
dict_or_list[i] = value
return
elif isinstance(entry, list) or isinstance(entry, dict):
self._replace_bytes(entry, hash, value)
def receive(self, timeout=0):
if timeout == 0:
reply = self._socket.recv_multipart()
else:
start = time.time()
reply = None
while 1000 * (time.time() - start) < timeout:
try:
reply = self._socket.recv_multipart(flags=zmq.NOBLOCK)
if reply is not None:
break
except zmq.ZMQError:
pass
if reply is None:
return reply
message = json.loads(reply[0].decode("iso-8859-1"))
for i in np.arange(1, len(reply), 2):
identity_hash = int.from_bytes(reply[i], byteorder=sys.byteorder)
value = reply[i + 1]
self._replace_bytes(message, identity_hash, value)
if self._debug:
print("DEBUG, recieved: {}".format(message))
self._check_exception(message)
return message
def _check_exception(self, response):
if "type" in response and response["type"] == "exception":
raise Exception(response["value"])
def __del__(self):
self.close()
def close(self):
with self._close_lock:
if not self._closed:
for java_object in self._java_objects:
java_object._close()
del java_object
self._java_objects = None
self._socket.close()
while not self._socket.closed:
time.sleep(0.01)
self._socket = None
if self._debug:
print('closed socket {}'.format(self._port))
self._closed = True
class Bridge:
DEFAULT_PORT = 4827
DEFAULT_TIMEOUT = 500
_EXPECTED_ZMQ_SERVER_VERSION = "4.2.0"
thread_local = threading.local()
def __new__(cls, *args, **kwargs):
port = kwargs.get('port', Bridge.DEFAULT_PORT)
if hasattr(Bridge.thread_local, "bridge") and Bridge.thread_local.bridge is not None and port in Bridge.thread_local.bridge:
Bridge.thread_local.bridge_count[port] += 1
return Bridge.thread_local.bridge[port]
else:
if (not hasattr(Bridge.thread_local, "bridge_count")) or Bridge.thread_local.bridge_count is None:
Bridge.thread_local.bridge_count = {}
Bridge.thread_local.bridge_count[port] = 1
return super(Bridge, cls).__new__(cls)
def __init__(
self, port: int=DEFAULT_PORT, convert_camel_case: bool=True,
debug: bool=False, ip_address: str="127.0.0.1", timeout: int=DEFAULT_TIMEOUT
):
self._ip_address = ip_address
self._port = port
self._closed = False
if not hasattr(self, "_context"):
Bridge._context = zmq.Context()
Bridge.thread_local.bridge = {}
Bridge.thread_local.bridge[port] = self
self._convert_camel_case = convert_camel_case
self._debug = debug
self._timeout = timeout
self._master_socket = DataSocket(
self._context, port, zmq.REQ, debug=debug, ip_address=self._ip_address
)
self._master_socket.send({"command": "connect", "debug": debug})
self._class_factory = _JavaClassFactory()
reply_json = self._master_socket.receive(timeout=timeout)
if reply_json is None:
raise TimeoutError(
f"Socket timed out after {timeout} milliseconds. Is Micro-Manager running and is the ZMQ server on {port} option enabled?"
)
if reply_json["type"] == "exception":
raise Exception(reply_json["message"])
if "version" not in reply_json:
reply_json["version"] = "2.0.0"
if reply_json["version"] != self._EXPECTED_ZMQ_SERVER_VERSION:
warnings.warn(
"Version mistmatch between Java ZMQ server and Python client. "
"\nJava ZMQ server version: {}\nPython client expected version: {}"
"\n To fix, update to BOTH latest pycromanager and latest micro-manager nightly build".format(
reply_json["version"], self._EXPECTED_ZMQ_SERVER_VERSION
)
)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
Bridge.thread_local.bridge_count[self._port] -= 1
if Bridge.thread_local.bridge_count[self._port] == 0:
del Bridge.thread_local.bridge_count[self._port]
del Bridge.thread_local.bridge[self._port]
self._master_socket.close()
self._master_socket = None
self._closed = True
if len(Bridge.thread_local.bridge) == 0:
Bridge.thread_local.bridge = None
Bridge.thread_local.bridge_count = None
def get_class(self, serialized_object) -> typing.Type["JavaObjectShadow"]:
return self._class_factory.create(
serialized_object, convert_camel_case=self._convert_camel_case
)
def construct_java_object(self, classpath: str, new_socket: bool=False, args: list=None):
if args is None:
args = []
message = {"command": "get-constructors", "classpath": classpath}
self._master_socket.send(message)
constructors = self._master_socket.receive()["api"]
methods_with_name = [m for m in constructors if m["name"] == classpath]
if len(methods_with_name) == 0:
raise Exception("No valid java constructor found with classpath {}".format(classpath))
valid_method_spec, deserialize_types = _check_method_args(methods_with_name, args)
message = {
"command": "constructor",
"classpath": classpath,
"argument-types": valid_method_spec["arguments"],
"argument-deserialization-types": deserialize_types,
"arguments": _package_arguments(valid_method_spec, args),
}
if new_socket:
message["new-port"] = True
self._master_socket.send(message)
serialized_object = self._master_socket.receive()
if new_socket:
socket = DataSocket(
self._context, serialized_object["port"], zmq.REQ, ip_address=self._ip_address
)
else:
socket = self._master_socket
return self._class_factory.create(
serialized_object, convert_camel_case=self._convert_camel_case
)(socket=socket, serialized_object=serialized_object, bridge=self)
def get_java_class(self, classpath: str, new_socket: bool=False):
message = {"command": "get-class", "classpath": classpath}
if new_socket:
message["new-port"] = True
self._master_socket.send(message)
serialized_object = self._master_socket.receive()
if new_socket:
socket = DataSocket(
self._context, serialized_object["port"], zmq.REQ, ip_address=self._ip_address
)
else:
socket = self._master_socket
return self._class_factory.create(
serialized_object, convert_camel_case=self._convert_camel_case
)(socket=socket, serialized_object=serialized_object, bridge=self)
def _connect_push(self, port):
return DataSocket(
self._context, port, zmq.PUSH, debug=self._debug, ip_address=self._ip_address
)
def _connect_pull(self, port):
return DataSocket(
self._context, port, zmq.PULL, debug=self._debug, ip_address=self._ip_address
)
def get_magellan(self):
return self.construct_java_object("org.micromanager.magellan.api.MagellanAPI")
def get_core(self):
if hasattr(self, "core"):
return getattr(self, "core")
self.core = self.construct_java_object("mmcorej.CMMCore")
return self.core
def get_studio(self):
return self.construct_java_object("org.micromanager.Studio")
class _JavaClassFactory:
def __init__(self):
self.classes = {}
def create(
self, serialized_obj: dict, convert_camel_case: bool = True
) -> typing.Type["JavaObjectShadow"]:
if serialized_obj["class"] in self.classes.keys():
return self.classes[serialized_obj["class"]]
else:
_java_class: str = serialized_obj["class"]
python_class_name_translation = _java_class.replace(
".", "_"
) # Having periods in the name would be problematic.
_interfaces = serialized_obj["interfaces"]
static_attributes = {"_java_class": _java_class, "_interfaces": _interfaces}
fields = {} # Create a dict of field names with getter and setter funcs.
for field in serialized_obj["fields"]:
fields[field] = property(
fget=lambda instance, Field=field: instance._access_field(Field),
fset=lambda instance, val, Field=field: instance._set_field(Field, val),
)
methods = {} # Create a dict of methods for the class by name.
methodSpecs = serialized_obj["api"]
method_names = set([m["name"] for m in methodSpecs])
# parse method descriptions to make python stand ins
for method_name in method_names:
params, methods_with_name, method_name_modified = _parse_arg_names(
methodSpecs, method_name, convert_camel_case
)
return_type = methods_with_name[0]["return-type"]
fn = lambda instance, *args, signatures_list=tuple(
methods_with_name
): instance._translate_call(signatures_list, args, static = _java_class == 'java.lang.Class')
fn.__name__ = method_name_modified
fn.__doc__ = "{}.{}: A dynamically generated Java method.".format(
_java_class, method_name_modified
)
sig = inspect.signature(fn)
params = [
inspect.Parameter("self", inspect.Parameter.POSITIONAL_ONLY)
] + params # Add `self` as the first argument.
return_type = (
_JAVA_TYPE_NAME_TO_PYTHON_TYPE[return_type]
if return_type in _JAVA_TYPE_NAME_TO_PYTHON_TYPE
else return_type
)
fn.__signature__ = sig.replace(parameters=params, return_annotation=return_type)
methods[method_name_modified] = fn
newclass = type( # Dynamically create a class to shadow a java class.
python_class_name_translation, # Name, based on the original java name
(JavaObjectShadow,), # Inheritance
{
"__init__": lambda instance, socket, serialized_object, bridge: JavaObjectShadow.__init__(
instance, socket, serialized_object, bridge
),
**static_attributes,
**fields,
**methods,
},
)
self.classes[_java_class] = newclass
return newclass
class JavaObjectShadow:
_interfaces = (
None # Subclasses should fill these out. This class should never be directly instantiated.
)
_java_class = None
def __init__(self, socket, serialized_object, bridge: Bridge):
self._socket = socket
self._hash_code = serialized_object["hash-code"]
self._bridge = bridge
# register objects with bridge so it can tell Java side to release them before socket shuts down
socket._register_java_object(self)
self._closed = False
# atexit.register(self._close)
self._close_lock = Lock()
def _close(self):
with self._close_lock:
if self._closed:
return
if not hasattr(self, "_hash_code"):
return # constructor didnt properly finish, nothing to clean up on java side
message = {"command": "destructor", "hash-code": self._hash_code}
if self._bridge._debug:
"closing: {}".format(self)
self._socket.send(message)
reply_json = self._socket.receive()
if reply_json["type"] == "exception":
raise Exception(reply_json["value"])
self._closed = True
def __del__(self):
self._close()
def _access_field(self, name):
message = {"command": "get-field", "hash-code": self._hash_code, "name": name}
self._socket.send(message)
return self._deserialize(self._socket.receive())
def _set_field(self, name, value):
message = {
"command": "set-field",
"hash-code": self._hash_code,
"name": name,
"value": _serialize_arg(value),
}
self._socket.send(message)
reply = self._deserialize(self._socket.receive())
def _translate_call(self, method_specs, fn_args: tuple, static: bool):
# args that are none are placeholders to allow for polymorphism and not considered part of the spec
# fn_args = [a for a in fn_args if a is not None]
valid_method_spec, deserialize_types = _check_method_args(method_specs, fn_args)
# args are good, make call through socket, casting the correct type if needed (e.g. int to float)
message = {
"command": "run-method",
"static": static,
"hash-code": self._hash_code,
"name": valid_method_spec["name"],
"argument-types": valid_method_spec["arguments"],
"argument-deserialization-types": deserialize_types,
}
message["arguments"] = _package_arguments(valid_method_spec, fn_args)
if self._bridge._closed:
raise Exception('The Bridge used to create this has been closed. Are you trying to call it outside of a "with" block?')
self._socket.send(message)
recieved = self._socket.receive()
return self._deserialize(recieved)
def _deserialize(self, json_return):
if json_return["type"] == "exception":
raise Exception(json_return["value"])
elif json_return["type"] == "null":
return None
elif json_return["type"] == "primitive":
return json_return["value"]
elif json_return["type"] == "string":
return json_return["value"]
elif json_return["type"] == "list":
return [self._deserialize(obj) for obj in json_return["value"]]
elif json_return["type"] == "object":
if json_return["class"] == "JSONObject":
return json.loads(json_return["value"])
else:
raise Exception("Unrecognized return class")
elif json_return["type"] == "unserialized-object":
# inherit socket from parent object
return self._bridge.get_class(json_return)(
socket=self._socket, serialized_object=json_return, bridge=self._bridge
)
else:
return deserialize_array(json_return)
def deserialize_array(json_return):
if json_return["type"] in ["byte-array", "int-array", "short-array", "float-array"]:
decoded = json_return["value"]
if json_return["type"] == "byte-array":
return np.frombuffer(decoded, dtype="=u1").copy()
elif json_return["type"] == "double-array":
return np.frombuffer(decoded, dtype="=f8").copy()
elif json_return["type"] == "int-array":
return np.frombuffer(decoded, dtype="=u4").copy()
elif json_return["type"] == "short-array":
return np.frombuffer(decoded, dtype="=u2").copy()
elif json_return["type"] == "float-array":
return np.frombuffer(decoded, dtype="=f4").copy()
def _package_arguments(valid_method_spec, fn_args):
arguments = []
for arg_type, arg_val in zip(valid_method_spec["arguments"], fn_args):
if isinstance(arg_val, JavaObjectShadow):
arguments.append(_serialize_arg(arg_val))
elif _JAVA_TYPE_NAME_TO_PYTHON_TYPE[arg_type] is object:
arguments.append(_serialize_arg(arg_val))
elif arg_val is None:
arguments.append(_serialize_arg(arg_val))
elif isinstance(arg_val, np.ndarray):
arguments.append(_serialize_arg(arg_val))
else:
arguments.append(_serialize_arg(_JAVA_TYPE_NAME_TO_PYTHON_TYPE[arg_type](arg_val)))
return arguments
def _serialize_arg(arg):
if arg is None:
return None
if type(arg) in [bool, str, int, float]:
return arg # json handles serialization
elif type(arg) == np.ndarray:
return arg.tobytes()
elif isinstance(arg, JavaObjectShadow):
return {"hash-code": arg._hash_code}
else:
raise Exception("Unknown argumetn type")
def _check_single_method_spec(method_spec, fn_args):
if len(method_spec["arguments"]) != len(fn_args):
return False
for arg_java_type, arg_val in zip(method_spec["arguments"], fn_args):
if isinstance(arg_val, JavaObjectShadow):
if arg_java_type not in arg_val._interfaces:
# check that it shadows object of the correct type
return False
elif type(arg_val) == np.ndarray:
# For ND Arrays, need to make sure data types match
if (
arg_java_type != "java.lang.Object"
and arg_val.dtype.type != _JAVA_ARRAY_TYPE_NUMPY_DTYPE[arg_java_type]
):
return False
elif not any(
[
isinstance(arg_val, acceptable_type)
for acceptable_type in _JAVA_TYPE_NAME_TO_CASTABLE_PYTHON_TYPE[arg_java_type]
]
) and not (
arg_val is None and arg_java_type in _JAVA_NON_PRIMITIVES
): # could be null if its an object
# if a type that gets converted
return False
return True
def _check_method_args(method_specs, fn_args):
valid_method_spec = None
for method_spec in method_specs:
if _check_single_method_spec(method_spec, fn_args):
valid_method_spec = method_spec
break
if valid_method_spec is None:
raise Exception(
"Incorrect arguments. \nExpected {} \nGot {}".format(
" or ".join([", ".join(method_spec["arguments"]) for method_spec in method_specs]),
", ".join([str(type(a)) for a in fn_args]),
)
)
# subclass NDArrays to the appropriate data type so they dont get incorrectly reconstructed as objects
valid_method_spec = copy.deepcopy(valid_method_spec)
deserialize_types = []
for java_arg_class, python_arg_val in zip(valid_method_spec["arguments"], fn_args):
if isinstance(python_arg_val, np.ndarray):
deserialize_types.append(
[
ja
for ja, npdt in zip(
_JAVA_ARRAY_TYPE_NUMPY_DTYPE.keys(), _JAVA_ARRAY_TYPE_NUMPY_DTYPE.values()
)
if python_arg_val.dtype.type == npdt
][0]
)
else:
deserialize_types.append(java_arg_class)
return valid_method_spec, deserialize_types
def _parse_arg_names(methods, method_name, convert_camel_case):
method_name_modified = (
_camel_case_2_snake_case(method_name) if convert_camel_case else method_name
)
# all methods with this name and different argument lists
methods_with_name = [m for m in methods if m["name"] == method_name]
min_required_args = (
0
if len(methods_with_name) == 1 and len(methods_with_name[0]["arguments"]) == 0
else min([len(m["arguments"]) for m in methods_with_name])
)
# sort with largest number of args last so lambda at end gets max num args
methods_with_name.sort(key=lambda val: len(val["arguments"]))
method = methods_with_name[-1] # We only need to evaluate the overload with the most arguments.
params = []
unique_argument_names = []
for arg_index, typ in enumerate(method["arguments"]):
hint = _CLASS_NAME_MAPPING[typ] if typ in _CLASS_NAME_MAPPING else "object"
python_type = (
_JAVA_TYPE_NAME_TO_PYTHON_TYPE[typ] if typ in _JAVA_TYPE_NAME_TO_PYTHON_TYPE else typ
)
if hint in unique_argument_names: # append numbers to end so arg hints have unique names
i = 1
while hint + str(i) in unique_argument_names:
i += 1
arg_name = hint + str(i)
else:
arg_name = hint
unique_argument_names.append(arg_name)
# this is how overloading is handled for now, by making default arguments as none, but
# it might be better to explicitly compare argument types
if arg_index >= min_required_args:
default_arg_value = None
else:
default_arg_value = inspect.Parameter.empty
params.append(
inspect.Parameter(
name=arg_name,
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
default=default_arg_value,
annotation=python_type,
)
)
return params, methods_with_name, method_name_modified
def _camel_case_2_snake_case(name):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
# Used for generating type hints in arguments
_CLASS_NAME_MAPPING = {
"byte[]": "uint8array",
"double[]": "float64_array",
"int[]": "uint32_array",
"short[]": "int16_array",
"char[]": "int16_array",
"float[]": "int16_array",
"long[]": "int16_array",
"java.lang.String": "string",
"boolean": "boolean",
"double": "float",
"float": "float",
"int": "int",
"long": "int",
"short": "int",
"void": "void",
}
#Used for deserializing java arrarys into numpy arrays
_JAVA_ARRAY_TYPE_NUMPY_DTYPE = {
"boolean[]": np.bool,
"byte[]": np.uint8,
"short[]": np.int16,
"char[]": np.uint16,
"float[]": np.float32,
"double[]": np.float64,
"int[]": np.int32,
"long[]": np.int64,
}
#used for figuring our which java methods to call and if python args match
_JAVA_TYPE_NAME_TO_PYTHON_TYPE = {
"boolean": bool,
"double": float,
"float": float,
#maybe could make these more specific to array type?
"byte[]": np.ndarray,
"short[]": np.ndarray,
"double[]": np.ndarray,
"int[]": np.ndarray,
"char[]": np.ndarray,
"float[]": np.ndarray,
"long[]": np.ndarray,
"int": int,
"java.lang.String": str,
"long": int,
"short": int,
"char": int,
"byte": int,
"void": None,
"java.lang.Object": object,
}
# type conversions that allow for autocasting
_JAVA_TYPE_NAME_TO_CASTABLE_PYTHON_TYPE = {
"boolean": {bool},
"byte[]": {np.ndarray},
"double": {float, int},
"double[]": {np.ndarray},
"float": {float},
"int": {int},
"int[]": {np.ndarray},
"java.lang.String": {str},
"long": {int},
"short": {int},
"char": {int},
"byte": {int},
"void": {None},
"java.lang.Object": {object},
}
_JAVA_NON_PRIMITIVES = {"byte[]", "double[]", "int[]", "short[]", "char[]", "long[]", "boolean[]",
"java.lang.String", "java.lang.Object"}
if __name__ == "__main__":
# Test basic bridge operations
import traceback
b = Bridge()
try:
s = b.get_studio()
except:
traceback.print_exc()
try:
c = b.get_core()
except:
traceback.print_exc()
a = 1
| true
| true
|
f717e197e7a1baf6c61fc8fc8503678d23f39768
| 2,360
|
py
|
Python
|
app2.py
|
GFDRR/mobility_app
|
27285a0691fabcc2cede6772a04bb98d29e636da
|
[
"MIT"
] | null | null | null |
app2.py
|
GFDRR/mobility_app
|
27285a0691fabcc2cede6772a04bb98d29e636da
|
[
"MIT"
] | null | null | null |
app2.py
|
GFDRR/mobility_app
|
27285a0691fabcc2cede6772a04bb98d29e636da
|
[
"MIT"
] | null | null | null |
import streamlit as st
import pandas as pd
import seaborn as sns
import pylab as plt
import datetime as dt
#import geopandas as gpd
df = pd.read_csv('/Users/nicholasjones/Desktop/code/wbg-location-data/notebooks/nick/df_india_may9.csv')
df.ds = pd.to_datetime(df.ds)
df = df.set_index('ds')
df['datetime'] = df.index.copy()
## Header
st.title('Mobility trends of states in India')
st.write('This app visualizes mobility trends for states in India, based on the Facebook movement range maps data.')
default_states = ['Gujarat','NCT of Delhi','West Bengal','Rajasthan','Tamil Nadu','Maharashtra','Bihar']
states = st.multiselect('Select a state',df.polygon_name.unique())
# Line plot
colors = 'rgbycmkrgbycmkrgbycmkrgbycmk'
f, ax = plt.subplots(figsize = [9,9])
for background_state in df.polygon_name.unique():
sns.lineplot(x=df.index[df.polygon_name == background_state], y=df["all_day_bing_tiles_visited_relative_change"][df.polygon_name == background_state], color = 'grey', alpha = 0.3, linewidth = 1)
for n, state in enumerate(list(states)):
col = colors[n]
ax = sns.lineplot(x=df.index[df.polygon_name == state], y="all_day_bing_tiles_visited_relative_change", color = col,data=df[df.polygon_name == state], linewidth = 4)
plt.axvline(dt.datetime(2020, 3, 22),linestyle='--', alpha = 0.5)
plt.axvline(dt.datetime(2020, 3, 24),linestyle='--', alpha = 0.5)
plt.title('Percent users remaining in home grid cell all day', fontsize = 16);
st.write(f)
df
## Map
gdf = gpd.read_file('/Users/nicholasjones/Desktop/code/data/FB/India/gadm36_IND_shp/gadm36_IND_1.shp')
gdf = gdf[['NAME_1','geometry']]
income_data = pd.read_csv('/Users/nicholasjones/Desktop/code/data/FB/India/NSDP_per_capita.csv',names=['state','nsdp_USD'])
income_data = income_data.dropna()
income_data.nsdp_USD = [x[4:] for x in income_data.nsdp_USD]
income_data.nsdp_USD = income_data.nsdp_USD.str.replace(',','')
income_data.nsdp_USD = income_data.nsdp_USD.astype(int)
gdf = gpd.GeoDataFrame(df.merge(gdf, left_on='polygon_name', right_on = 'NAME_1'))
gdf = gdf[['NAME_1','all_day_bing_tiles_visited_relative_change','all_day_ratio_single_tile_users','geometry','datetime']]
gdf.head(1)
mydate = st.selectbox('Select a date',['2020-03-05','2020-03-22','2020-04-29'])
f = gdf[gdf.datetime == mydate].plot(column = 'all_day_bing_tiles_visited_relative_change')
st.pyplot()
| 41.403509
| 198
| 0.747034
|
import streamlit as st
import pandas as pd
import seaborn as sns
import pylab as plt
import datetime as dt
df = pd.read_csv('/Users/nicholasjones/Desktop/code/wbg-location-data/notebooks/nick/df_india_may9.csv')
df.ds = pd.to_datetime(df.ds)
df = df.set_index('ds')
df['datetime'] = df.index.copy()
le('Mobility trends of states in India')
st.write('This app visualizes mobility trends for states in India, based on the Facebook movement range maps data.')
default_states = ['Gujarat','NCT of Delhi','West Bengal','Rajasthan','Tamil Nadu','Maharashtra','Bihar']
states = st.multiselect('Select a state',df.polygon_name.unique())
colors = 'rgbycmkrgbycmkrgbycmkrgbycmk'
f, ax = plt.subplots(figsize = [9,9])
for background_state in df.polygon_name.unique():
sns.lineplot(x=df.index[df.polygon_name == background_state], y=df["all_day_bing_tiles_visited_relative_change"][df.polygon_name == background_state], color = 'grey', alpha = 0.3, linewidth = 1)
for n, state in enumerate(list(states)):
col = colors[n]
ax = sns.lineplot(x=df.index[df.polygon_name == state], y="all_day_bing_tiles_visited_relative_change", color = col,data=df[df.polygon_name == state], linewidth = 4)
plt.axvline(dt.datetime(2020, 3, 22),linestyle='--', alpha = 0.5)
plt.axvline(dt.datetime(2020, 3, 24),linestyle='--', alpha = 0.5)
plt.title('Percent users remaining in home grid cell all day', fontsize = 16);
st.write(f)
df
= gpd.read_file('/Users/nicholasjones/Desktop/code/data/FB/India/gadm36_IND_shp/gadm36_IND_1.shp')
gdf = gdf[['NAME_1','geometry']]
income_data = pd.read_csv('/Users/nicholasjones/Desktop/code/data/FB/India/NSDP_per_capita.csv',names=['state','nsdp_USD'])
income_data = income_data.dropna()
income_data.nsdp_USD = [x[4:] for x in income_data.nsdp_USD]
income_data.nsdp_USD = income_data.nsdp_USD.str.replace(',','')
income_data.nsdp_USD = income_data.nsdp_USD.astype(int)
gdf = gpd.GeoDataFrame(df.merge(gdf, left_on='polygon_name', right_on = 'NAME_1'))
gdf = gdf[['NAME_1','all_day_bing_tiles_visited_relative_change','all_day_ratio_single_tile_users','geometry','datetime']]
gdf.head(1)
mydate = st.selectbox('Select a date',['2020-03-05','2020-03-22','2020-04-29'])
f = gdf[gdf.datetime == mydate].plot(column = 'all_day_bing_tiles_visited_relative_change')
st.pyplot()
| true
| true
|
f717e1a9e531045800c5e7a2a00ed7b1dc29c82c
| 2,569
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/media/v20180601preview/get_asset_encryption_key.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/media/v20180601preview/get_asset_encryption_key.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/media/v20180601preview/get_asset_encryption_key.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetAssetEncryptionKeyResult',
'AwaitableGetAssetEncryptionKeyResult',
'get_asset_encryption_key',
]
@pulumi.output_type
class GetAssetEncryptionKeyResult:
"""
The Asset Storage encryption key.
"""
def __init__(__self__, storage_encryption_key=None):
if storage_encryption_key and not isinstance(storage_encryption_key, str):
raise TypeError("Expected argument 'storage_encryption_key' to be a str")
pulumi.set(__self__, "storage_encryption_key", storage_encryption_key)
@property
@pulumi.getter(name="storageEncryptionKey")
def storage_encryption_key(self) -> Optional[str]:
"""
The Asset storage encryption key.
"""
return pulumi.get(self, "storage_encryption_key")
class AwaitableGetAssetEncryptionKeyResult(GetAssetEncryptionKeyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAssetEncryptionKeyResult(
storage_encryption_key=self.storage_encryption_key)
def get_asset_encryption_key(account_name: Optional[str] = None,
asset_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAssetEncryptionKeyResult:
"""
Use this data source to access information about an existing resource.
:param str account_name: The Media Services account name.
:param str asset_name: The Asset name.
:param str resource_group_name: The name of the resource group within the Azure subscription.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['assetName'] = asset_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:media/v20180601preview:getAssetEncryptionKey', __args__, opts=opts, typ=GetAssetEncryptionKeyResult).value
return AwaitableGetAssetEncryptionKeyResult(
storage_encryption_key=__ret__.storage_encryption_key)
| 37.779412
| 157
| 0.709225
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetAssetEncryptionKeyResult',
'AwaitableGetAssetEncryptionKeyResult',
'get_asset_encryption_key',
]
@pulumi.output_type
class GetAssetEncryptionKeyResult:
def __init__(__self__, storage_encryption_key=None):
if storage_encryption_key and not isinstance(storage_encryption_key, str):
raise TypeError("Expected argument 'storage_encryption_key' to be a str")
pulumi.set(__self__, "storage_encryption_key", storage_encryption_key)
@property
@pulumi.getter(name="storageEncryptionKey")
def storage_encryption_key(self) -> Optional[str]:
return pulumi.get(self, "storage_encryption_key")
class AwaitableGetAssetEncryptionKeyResult(GetAssetEncryptionKeyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAssetEncryptionKeyResult(
storage_encryption_key=self.storage_encryption_key)
def get_asset_encryption_key(account_name: Optional[str] = None,
asset_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAssetEncryptionKeyResult:
__args__ = dict()
__args__['accountName'] = account_name
__args__['assetName'] = asset_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:media/v20180601preview:getAssetEncryptionKey', __args__, opts=opts, typ=GetAssetEncryptionKeyResult).value
return AwaitableGetAssetEncryptionKeyResult(
storage_encryption_key=__ret__.storage_encryption_key)
| true
| true
|
f717e1e1dd47ba1807b3c7b1e5175c4d151b536b
| 447
|
py
|
Python
|
students/k3340/practical_works/Voronov Alexey/jango1/project_first_app/migrations/0002_person_vehicles.py
|
voronoff2803/ITMO_ICT_WebProgramming_2020
|
c59d8b2cdefe8b821049a2716733070983d08ad2
|
[
"MIT"
] | null | null | null |
students/k3340/practical_works/Voronov Alexey/jango1/project_first_app/migrations/0002_person_vehicles.py
|
voronoff2803/ITMO_ICT_WebProgramming_2020
|
c59d8b2cdefe8b821049a2716733070983d08ad2
|
[
"MIT"
] | null | null | null |
students/k3340/practical_works/Voronov Alexey/jango1/project_first_app/migrations/0002_person_vehicles.py
|
voronoff2803/ITMO_ICT_WebProgramming_2020
|
c59d8b2cdefe8b821049a2716733070983d08ad2
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.5 on 2020-04-03 20:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project_first_app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='person',
name='vehicles',
field=models.ManyToManyField(through='project_first_app.Ownership', to='project_first_app.Vehicle'),
),
]
| 23.526316
| 112
| 0.63311
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project_first_app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='person',
name='vehicles',
field=models.ManyToManyField(through='project_first_app.Ownership', to='project_first_app.Vehicle'),
),
]
| true
| true
|
f717e28a218983e53bb05193c90627d19da33fc9
| 110
|
py
|
Python
|
CodeWars/7 Kyu/Average Array.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
CodeWars/7 Kyu/Average Array.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
CodeWars/7 Kyu/Average Array.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
def avg_array(arrs):
x=[]
for i in zip(*arrs):
x.append(sum(i)/len(arrs))
return x
| 18.333333
| 34
| 0.5
|
def avg_array(arrs):
x=[]
for i in zip(*arrs):
x.append(sum(i)/len(arrs))
return x
| true
| true
|
f717e456d5d1b3e37be07a8321b8d5d0fadafa26
| 10,526
|
py
|
Python
|
kubernetes/client/models/v1_persistent_volume_claim_spec.py
|
philipp-sontag-by/python
|
51c481692ab0d9c71b9dd96342bfa93b721b029d
|
[
"Apache-2.0"
] | 1
|
2022-02-22T23:10:55.000Z
|
2022-02-22T23:10:55.000Z
|
kubernetes/client/models/v1_persistent_volume_claim_spec.py
|
philipp-sontag-by/python
|
51c481692ab0d9c71b9dd96342bfa93b721b029d
|
[
"Apache-2.0"
] | 6
|
2021-09-13T19:03:02.000Z
|
2022-03-16T18:56:42.000Z
|
kubernetes/client/models/v1_persistent_volume_claim_spec.py
|
philipp-sontag-by/python
|
51c481692ab0d9c71b9dd96342bfa93b721b029d
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1PersistentVolumeClaimSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'access_modes': 'list[str]',
'data_source': 'V1TypedLocalObjectReference',
'data_source_ref': 'V1TypedLocalObjectReference',
'resources': 'V1ResourceRequirements',
'selector': 'V1LabelSelector',
'storage_class_name': 'str',
'volume_mode': 'str',
'volume_name': 'str'
}
attribute_map = {
'access_modes': 'accessModes',
'data_source': 'dataSource',
'data_source_ref': 'dataSourceRef',
'resources': 'resources',
'selector': 'selector',
'storage_class_name': 'storageClassName',
'volume_mode': 'volumeMode',
'volume_name': 'volumeName'
}
def __init__(self, access_modes=None, data_source=None, data_source_ref=None, resources=None, selector=None, storage_class_name=None, volume_mode=None, volume_name=None, local_vars_configuration=None): # noqa: E501
"""V1PersistentVolumeClaimSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._access_modes = None
self._data_source = None
self._data_source_ref = None
self._resources = None
self._selector = None
self._storage_class_name = None
self._volume_mode = None
self._volume_name = None
self.discriminator = None
if access_modes is not None:
self.access_modes = access_modes
if data_source is not None:
self.data_source = data_source
if data_source_ref is not None:
self.data_source_ref = data_source_ref
if resources is not None:
self.resources = resources
if selector is not None:
self.selector = selector
if storage_class_name is not None:
self.storage_class_name = storage_class_name
if volume_mode is not None:
self.volume_mode = volume_mode
if volume_name is not None:
self.volume_name = volume_name
@property
def access_modes(self):
"""Gets the access_modes of this V1PersistentVolumeClaimSpec. # noqa: E501
AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 # noqa: E501
:return: The access_modes of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: list[str]
"""
return self._access_modes
@access_modes.setter
def access_modes(self, access_modes):
"""Sets the access_modes of this V1PersistentVolumeClaimSpec.
AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 # noqa: E501
:param access_modes: The access_modes of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: list[str]
"""
self._access_modes = access_modes
@property
def data_source(self):
"""Gets the data_source of this V1PersistentVolumeClaimSpec. # noqa: E501
:return: The data_source of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: V1TypedLocalObjectReference
"""
return self._data_source
@data_source.setter
def data_source(self, data_source):
"""Sets the data_source of this V1PersistentVolumeClaimSpec.
:param data_source: The data_source of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: V1TypedLocalObjectReference
"""
self._data_source = data_source
@property
def data_source_ref(self):
"""Gets the data_source_ref of this V1PersistentVolumeClaimSpec. # noqa: E501
:return: The data_source_ref of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: V1TypedLocalObjectReference
"""
return self._data_source_ref
@data_source_ref.setter
def data_source_ref(self, data_source_ref):
"""Sets the data_source_ref of this V1PersistentVolumeClaimSpec.
:param data_source_ref: The data_source_ref of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: V1TypedLocalObjectReference
"""
self._data_source_ref = data_source_ref
@property
def resources(self):
"""Gets the resources of this V1PersistentVolumeClaimSpec. # noqa: E501
:return: The resources of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: V1ResourceRequirements
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this V1PersistentVolumeClaimSpec.
:param resources: The resources of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: V1ResourceRequirements
"""
self._resources = resources
@property
def selector(self):
"""Gets the selector of this V1PersistentVolumeClaimSpec. # noqa: E501
:return: The selector of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: V1LabelSelector
"""
return self._selector
@selector.setter
def selector(self, selector):
"""Sets the selector of this V1PersistentVolumeClaimSpec.
:param selector: The selector of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: V1LabelSelector
"""
self._selector = selector
@property
def storage_class_name(self):
"""Gets the storage_class_name of this V1PersistentVolumeClaimSpec. # noqa: E501
Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 # noqa: E501
:return: The storage_class_name of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: str
"""
return self._storage_class_name
@storage_class_name.setter
def storage_class_name(self, storage_class_name):
"""Sets the storage_class_name of this V1PersistentVolumeClaimSpec.
Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 # noqa: E501
:param storage_class_name: The storage_class_name of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: str
"""
self._storage_class_name = storage_class_name
@property
def volume_mode(self):
"""Gets the volume_mode of this V1PersistentVolumeClaimSpec. # noqa: E501
volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. # noqa: E501
:return: The volume_mode of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: str
"""
return self._volume_mode
@volume_mode.setter
def volume_mode(self, volume_mode):
"""Sets the volume_mode of this V1PersistentVolumeClaimSpec.
volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. # noqa: E501
:param volume_mode: The volume_mode of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: str
"""
self._volume_mode = volume_mode
@property
def volume_name(self):
"""Gets the volume_name of this V1PersistentVolumeClaimSpec. # noqa: E501
VolumeName is the binding reference to the PersistentVolume backing this claim. # noqa: E501
:return: The volume_name of this V1PersistentVolumeClaimSpec. # noqa: E501
:rtype: str
"""
return self._volume_name
@volume_name.setter
def volume_name(self, volume_name):
"""Sets the volume_name of this V1PersistentVolumeClaimSpec.
VolumeName is the binding reference to the PersistentVolume backing this claim. # noqa: E501
:param volume_name: The volume_name of this V1PersistentVolumeClaimSpec. # noqa: E501
:type: str
"""
self._volume_name = volume_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PersistentVolumeClaimSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PersistentVolumeClaimSpec):
return True
return self.to_dict() != other.to_dict()
| 33.845659
| 219
| 0.652005
|
import pprint
import re
import six
from kubernetes.client.configuration import Configuration
class V1PersistentVolumeClaimSpec(object):
openapi_types = {
'access_modes': 'list[str]',
'data_source': 'V1TypedLocalObjectReference',
'data_source_ref': 'V1TypedLocalObjectReference',
'resources': 'V1ResourceRequirements',
'selector': 'V1LabelSelector',
'storage_class_name': 'str',
'volume_mode': 'str',
'volume_name': 'str'
}
attribute_map = {
'access_modes': 'accessModes',
'data_source': 'dataSource',
'data_source_ref': 'dataSourceRef',
'resources': 'resources',
'selector': 'selector',
'storage_class_name': 'storageClassName',
'volume_mode': 'volumeMode',
'volume_name': 'volumeName'
}
def __init__(self, access_modes=None, data_source=None, data_source_ref=None, resources=None, selector=None, storage_class_name=None, volume_mode=None, volume_name=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._access_modes = None
self._data_source = None
self._data_source_ref = None
self._resources = None
self._selector = None
self._storage_class_name = None
self._volume_mode = None
self._volume_name = None
self.discriminator = None
if access_modes is not None:
self.access_modes = access_modes
if data_source is not None:
self.data_source = data_source
if data_source_ref is not None:
self.data_source_ref = data_source_ref
if resources is not None:
self.resources = resources
if selector is not None:
self.selector = selector
if storage_class_name is not None:
self.storage_class_name = storage_class_name
if volume_mode is not None:
self.volume_mode = volume_mode
if volume_name is not None:
self.volume_name = volume_name
@property
def access_modes(self):
return self._access_modes
@access_modes.setter
def access_modes(self, access_modes):
self._access_modes = access_modes
@property
def data_source(self):
return self._data_source
@data_source.setter
def data_source(self, data_source):
self._data_source = data_source
@property
def data_source_ref(self):
return self._data_source_ref
@data_source_ref.setter
def data_source_ref(self, data_source_ref):
self._data_source_ref = data_source_ref
@property
def resources(self):
return self._resources
@resources.setter
def resources(self, resources):
self._resources = resources
@property
def selector(self):
return self._selector
@selector.setter
def selector(self, selector):
self._selector = selector
@property
def storage_class_name(self):
return self._storage_class_name
@storage_class_name.setter
def storage_class_name(self, storage_class_name):
self._storage_class_name = storage_class_name
@property
def volume_mode(self):
return self._volume_mode
@volume_mode.setter
def volume_mode(self, volume_mode):
self._volume_mode = volume_mode
@property
def volume_name(self):
return self._volume_name
@volume_name.setter
def volume_name(self, volume_name):
self._volume_name = volume_name
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1PersistentVolumeClaimSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, V1PersistentVolumeClaimSpec):
return True
return self.to_dict() != other.to_dict()
| true
| true
|
f717e46b95455cc849096cc7da73943ce2b7377f
| 2,104
|
py
|
Python
|
tests/test_searchalgo.py
|
Intelecy/chocolate
|
0ba4f6f0130eab851d32d5534241c8cac3f6666e
|
[
"BSD-3-Clause"
] | 105
|
2017-10-27T02:14:22.000Z
|
2022-01-13T12:57:05.000Z
|
tests/test_searchalgo.py
|
Intelecy/chocolate
|
0ba4f6f0130eab851d32d5534241c8cac3f6666e
|
[
"BSD-3-Clause"
] | 31
|
2017-10-03T13:41:35.000Z
|
2021-08-20T21:01:29.000Z
|
tests/test_searchalgo.py
|
areeh/chocolate
|
5f946cb9daf42c3ab44508648917d46bc105c2fc
|
[
"BSD-3-Clause"
] | 38
|
2017-10-05T20:19:42.000Z
|
2022-03-28T11:34:04.000Z
|
import unittest
from unittest.mock import MagicMock
from chocolate.space import *
from chocolate.base import SearchAlgorithm
class TestSearchAlgorithm(unittest.TestCase):
def setUp(self):
self.mock_conn = MagicMock(name="connection")
def test_space_none_none(self):
self.mock_conn.get_space.return_value = None
self.assertRaises(RuntimeError, SearchAlgorithm, self.mock_conn, None)
def test_space_not_equal_nowrite(self):
s1 = Space({"a": uniform(1, 2)})
s2 = Space({"a": uniform(1, 3)})
self.mock_conn.get_space.return_value = s1
self.assertRaises(RuntimeError, SearchAlgorithm, self.mock_conn, s2)
def test_space_not_equal_write(self):
s1 = Space({"a": uniform(1, 2)})
s2 = Space({"a": uniform(1, 3)})
self.mock_conn.get_space.return_value = s1
algo = SearchAlgorithm(self.mock_conn, s2, clear_db=True)
self.mock_conn.clear.assert_called_with()
self.mock_conn.insert_space.assert_called_with(s2)
self.assertEqual(algo.space, s2)
def test_space_none_not_none(self):
s1 = Space({"a": uniform(1, 2)})
self.mock_conn.get_space.return_value = None
algo = SearchAlgorithm(self.mock_conn, s1)
self.mock_conn.insert_space.assert_called_with(s1)
self.assertEqual(algo.space, s1)
def test_space_not_none_none(self):
s1 = Space({"a": uniform(1, 2)})
self.mock_conn.get_space.return_value = s1
algo = SearchAlgorithm(self.mock_conn, None)
self.assertEqual(algo.space, s1)
def test_update_value(self):
token = {"a": 0}
algo = SearchAlgorithm(self.mock_conn, None)
algo.update(token, 9.0)
expected = {"_loss": 9.0}
self.mock_conn.update_result.assert_called_with(token, expected)
def test_update_mapping(self):
token = {"a": 0}
algo = SearchAlgorithm(self.mock_conn, None)
algo.update(token, {"f1": 9.0})
expected = {"_loss_f1": 9.0}
self.mock_conn.update_result.assert_called_with(token, expected)
| 30.941176
| 78
| 0.663023
|
import unittest
from unittest.mock import MagicMock
from chocolate.space import *
from chocolate.base import SearchAlgorithm
class TestSearchAlgorithm(unittest.TestCase):
def setUp(self):
self.mock_conn = MagicMock(name="connection")
def test_space_none_none(self):
self.mock_conn.get_space.return_value = None
self.assertRaises(RuntimeError, SearchAlgorithm, self.mock_conn, None)
def test_space_not_equal_nowrite(self):
s1 = Space({"a": uniform(1, 2)})
s2 = Space({"a": uniform(1, 3)})
self.mock_conn.get_space.return_value = s1
self.assertRaises(RuntimeError, SearchAlgorithm, self.mock_conn, s2)
def test_space_not_equal_write(self):
s1 = Space({"a": uniform(1, 2)})
s2 = Space({"a": uniform(1, 3)})
self.mock_conn.get_space.return_value = s1
algo = SearchAlgorithm(self.mock_conn, s2, clear_db=True)
self.mock_conn.clear.assert_called_with()
self.mock_conn.insert_space.assert_called_with(s2)
self.assertEqual(algo.space, s2)
def test_space_none_not_none(self):
s1 = Space({"a": uniform(1, 2)})
self.mock_conn.get_space.return_value = None
algo = SearchAlgorithm(self.mock_conn, s1)
self.mock_conn.insert_space.assert_called_with(s1)
self.assertEqual(algo.space, s1)
def test_space_not_none_none(self):
s1 = Space({"a": uniform(1, 2)})
self.mock_conn.get_space.return_value = s1
algo = SearchAlgorithm(self.mock_conn, None)
self.assertEqual(algo.space, s1)
def test_update_value(self):
token = {"a": 0}
algo = SearchAlgorithm(self.mock_conn, None)
algo.update(token, 9.0)
expected = {"_loss": 9.0}
self.mock_conn.update_result.assert_called_with(token, expected)
def test_update_mapping(self):
token = {"a": 0}
algo = SearchAlgorithm(self.mock_conn, None)
algo.update(token, {"f1": 9.0})
expected = {"_loss_f1": 9.0}
self.mock_conn.update_result.assert_called_with(token, expected)
| true
| true
|
f717e481bf8d3b27429577d92a41047f92b8a9d4
| 185
|
py
|
Python
|
exam_retake/grocery_shop/project/deliveries/food.py
|
PetkoAndreev/Python-OOP
|
2cc3094940cdf078f0ee60be938e883f843766e4
|
[
"MIT"
] | 1
|
2021-05-27T07:59:17.000Z
|
2021-05-27T07:59:17.000Z
|
exam_retake/grocery_shop/project/deliveries/food.py
|
PetkoAndreev/Python-OOP
|
2cc3094940cdf078f0ee60be938e883f843766e4
|
[
"MIT"
] | null | null | null |
exam_retake/grocery_shop/project/deliveries/food.py
|
PetkoAndreev/Python-OOP
|
2cc3094940cdf078f0ee60be938e883f843766e4
|
[
"MIT"
] | null | null | null |
from project.deliveries.product import Product
class Food(Product):
food_quantity: int = 15
def __init__(self, name: str):
super().__init__(name, self.food_quantity)
| 20.555556
| 50
| 0.708108
|
from project.deliveries.product import Product
class Food(Product):
food_quantity: int = 15
def __init__(self, name: str):
super().__init__(name, self.food_quantity)
| true
| true
|
f717e56f1c9229960dfeaed5d108ebdcab4bd8a6
| 3,760
|
py
|
Python
|
contrib/macdeploy/custom_dsstore.py
|
sqoin/xdisk
|
7f93d461b0168f11512a9dcfd9cf133122157544
|
[
"MIT"
] | null | null | null |
contrib/macdeploy/custom_dsstore.py
|
sqoin/xdisk
|
7f93d461b0168f11512a9dcfd9cf133122157544
|
[
"MIT"
] | null | null | null |
contrib/macdeploy/custom_dsstore.py
|
sqoin/xdisk
|
7f93d461b0168f11512a9dcfd9cf133122157544
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00xdiskuser:\x00Documents:\x00xdisk:\x00xdisk:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/xdiskuser/Documents/xdisk/xdisk/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['xdisk-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 61.639344
| 1,817
| 0.72633
|
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00xdiskuser:\x00Documents:\x00xdisk:\x00xdisk:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/xdiskuser/Documents/xdisk/xdisk/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['xdisk-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| true
| true
|
f717e5abe192eeacd489fb3abdcfc529c914593b
| 8,031
|
py
|
Python
|
src/tests/test_markdown2man.py
|
dante-signal31/markdown2man
|
ce57b905b01a6fb8fe6d3d0989af3a15f42c78cf
|
[
"BSD-3-Clause"
] | null | null | null |
src/tests/test_markdown2man.py
|
dante-signal31/markdown2man
|
ce57b905b01a6fb8fe6d3d0989af3a15f42c78cf
|
[
"BSD-3-Clause"
] | null | null | null |
src/tests/test_markdown2man.py
|
dante-signal31/markdown2man
|
ce57b905b01a6fb8fe6d3d0989af3a15f42c78cf
|
[
"BSD-3-Clause"
] | null | null | null |
""" Test for markdown2man launcher."""
import gzip
import os
import sys
import tempfile
import test_common.fs.ops as test_ops
from test_common.fs.temp import temp_dir
# TODO: Refactor project layout to leave tests folder out of src.
sys.path.append("src")
import src.markdown2man as markdown2man
def test_launcher_all_options_given(temp_dir):
# Setup test.
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-s", "1", "-t",
"cifra usage documentation"]
expected_output_file = os.path.join(temp_dir, "cifra.1.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
# Perform test.
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
def test_launcher_all_long_options_given(temp_dir):
# Setup test.
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "--manpage_section", "1", "--manpage_title",
"cifra usage documentation"]
expected_output_file = os.path.join(temp_dir, "cifra.1.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
# Perform test.
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
def test_launcher_section_changed(temp_dir):
# Setup test.
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-s", "2", "-t",
"cifra usage documentation"]
expected_output_file = os.path.join(temp_dir, "cifra.2.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
expected_content = expected_content.replace(".TH \"cifra\" \"1\"",
".TH \"cifra\" \"2\"")
# Perform test.
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
def test_launcher_section_omitted(temp_dir):
# Setup test.
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-t",
"cifra usage documentation"]
expected_output_file = os.path.join(temp_dir, "cifra.1.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
# Perform test.
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
def test_launcher_title_omitted(temp_dir):
# Setup test.
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra"]
expected_output_file = os.path.join(temp_dir, "cifra.1.gz")
recovered_content = ""
expected_line = ".TH \"cifra\" \"1\" \"\" \"\" \"cifra\"\n"
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
# Perform test.
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = [line.decode() for line in output_file.readlines()]
assert expected_line in recovered_content
def test_launcher_uncompressed(temp_dir):
# Setup test.
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-s", "1", "-t",
"cifra usage documentation", "-u"]
expected_output_file = os.path.join(temp_dir, "cifra.1")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
# Perform test.
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with open(expected_output_file) as output_file:
recovered_content = output_file.read()
assert recovered_content == expected_content
def test_launcher_different_output_folder(temp_dir):
with tempfile.TemporaryDirectory() as temp_output_folder:
# Setup test.
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-s", "1", "-t",
"cifra usage documentation", "-f", f"{temp_output_folder}"]
expected_output_file = os.path.join(temp_output_folder, "cifra.1.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
# Perform test.
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
def test_launcher_different_non_existing_output_folder(temp_dir):
with tempfile.TemporaryDirectory() as temp_output_folder:
# Setup test.
temporal_markdown_file = os.path.join(temp_dir, "README.md")
temp_output_subfolder = os.path.join(temp_output_folder, "man/")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-s", "1", "-t",
"cifra usage documentation", "-f", f"{temp_output_subfolder}"]
expected_output_file = os.path.join(temp_output_subfolder, "cifra.1.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
# Perform test.
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
| 43.410811
| 102
| 0.696551
|
import gzip
import os
import sys
import tempfile
import test_common.fs.ops as test_ops
from test_common.fs.temp import temp_dir
sys.path.append("src")
import src.markdown2man as markdown2man
def test_launcher_all_options_given(temp_dir):
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-s", "1", "-t",
"cifra usage documentation"]
expected_output_file = os.path.join(temp_dir, "cifra.1.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
def test_launcher_all_long_options_given(temp_dir):
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "--manpage_section", "1", "--manpage_title",
"cifra usage documentation"]
expected_output_file = os.path.join(temp_dir, "cifra.1.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
def test_launcher_section_changed(temp_dir):
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-s", "2", "-t",
"cifra usage documentation"]
expected_output_file = os.path.join(temp_dir, "cifra.2.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
expected_content = expected_content.replace(".TH \"cifra\" \"1\"",
".TH \"cifra\" \"2\"")
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
def test_launcher_section_omitted(temp_dir):
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-t",
"cifra usage documentation"]
expected_output_file = os.path.join(temp_dir, "cifra.1.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
def test_launcher_title_omitted(temp_dir):
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra"]
expected_output_file = os.path.join(temp_dir, "cifra.1.gz")
recovered_content = ""
expected_line = ".TH \"cifra\" \"1\" \"\" \"\" \"cifra\"\n"
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = [line.decode() for line in output_file.readlines()]
assert expected_line in recovered_content
def test_launcher_uncompressed(temp_dir):
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-s", "1", "-t",
"cifra usage documentation", "-u"]
expected_output_file = os.path.join(temp_dir, "cifra.1")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with open(expected_output_file) as output_file:
recovered_content = output_file.read()
assert recovered_content == expected_content
def test_launcher_different_output_folder(temp_dir):
with tempfile.TemporaryDirectory() as temp_output_folder:
temporal_markdown_file = os.path.join(temp_dir, "README.md")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-s", "1", "-t",
"cifra usage documentation", "-f", f"{temp_output_folder}"]
expected_output_file = os.path.join(temp_output_folder, "cifra.1.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
def test_launcher_different_non_existing_output_folder(temp_dir):
with tempfile.TemporaryDirectory() as temp_output_folder:
temporal_markdown_file = os.path.join(temp_dir, "README.md")
temp_output_subfolder = os.path.join(temp_output_folder, "man/")
test_ops.copy_file("src/tests/resources/README.md", temporal_markdown_file)
command_args = [f"{temporal_markdown_file}", "cifra", "-s", "1", "-t",
"cifra usage documentation", "-f", f"{temp_output_subfolder}"]
expected_output_file = os.path.join(temp_output_subfolder, "cifra.1.gz")
recovered_content = ""
expected_content = ""
with open("src/tests/resources/cifra.1") as manpage:
expected_content = manpage.read()
assert not os.path.exists(expected_output_file)
markdown2man.main(command_args)
assert os.path.exists(expected_output_file)
with gzip.open(expected_output_file) as output_file:
recovered_content = "".join(line.decode() for line in output_file.readlines())
assert recovered_content == expected_content
| true
| true
|
f717e6e123e9b4e6acce7b7cd6d35c7024149784
| 104
|
py
|
Python
|
rules/tabs_spaces.py
|
Ahuge/Pepper
|
2afe398629d0505dfa1b5ad7d13eb68a3df695bf
|
[
"MIT"
] | null | null | null |
rules/tabs_spaces.py
|
Ahuge/Pepper
|
2afe398629d0505dfa1b5ad7d13eb68a3df695bf
|
[
"MIT"
] | 3
|
2015-10-16T00:58:27.000Z
|
2019-06-20T16:57:03.000Z
|
rules/tabs_spaces.py
|
Ahuge/Pepper
|
2afe398629d0505dfa1b5ad7d13eb68a3df695bf
|
[
"MIT"
] | null | null | null |
__author__ = 'Alex'
import re
def main(line):
sub = re.sub(r"(\t)", r" ", line)
return sub
| 13
| 40
| 0.548077
|
__author__ = 'Alex'
import re
def main(line):
sub = re.sub(r"(\t)", r" ", line)
return sub
| true
| true
|
f717e99028c5d14443a9263ee3de86569fca8475
| 377
|
py
|
Python
|
ppr-api/src/endpoints/api.py
|
gh2os/ppr
|
9f67321baa5bbb450ac5e06755e2838497a2cf96
|
[
"Apache-2.0"
] | null | null | null |
ppr-api/src/endpoints/api.py
|
gh2os/ppr
|
9f67321baa5bbb450ac5e06755e2838497a2cf96
|
[
"Apache-2.0"
] | 2
|
2020-03-18T23:26:53.000Z
|
2020-03-18T23:40:19.000Z
|
ppr-api/src/endpoints/api.py
|
gh2os/ppr
|
9f67321baa5bbb450ac5e06755e2838497a2cf96
|
[
"Apache-2.0"
] | null | null | null |
""" Set up all the endpoints for the web service. """
import fastapi
from . import financing_statement, healthcheck, search
router = fastapi.APIRouter()
router.include_router(healthcheck.router, prefix='/operations', tags=['Operations'])
router.include_router(financing_statement.router, tags=['Financing Statement'])
router.include_router(search.router, tags=['Search'])
| 29
| 84
| 0.777188
|
import fastapi
from . import financing_statement, healthcheck, search
router = fastapi.APIRouter()
router.include_router(healthcheck.router, prefix='/operations', tags=['Operations'])
router.include_router(financing_statement.router, tags=['Financing Statement'])
router.include_router(search.router, tags=['Search'])
| true
| true
|
f717e9994215a9e2f730997e5778606b01734396
| 2,349
|
py
|
Python
|
openspeech_cli/hydra_train.py
|
tqslj2/openspeech
|
10307587f08615224df5a868fb5249c68c70b12d
|
[
"Apache-2.0",
"MIT"
] | 1
|
2022-03-04T02:52:44.000Z
|
2022-03-04T02:52:44.000Z
|
openspeech_cli/hydra_train.py
|
tqslj2/openspeech
|
10307587f08615224df5a868fb5249c68c70b12d
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
openspeech_cli/hydra_train.py
|
tqslj2/openspeech
|
10307587f08615224df5a868fb5249c68c70b12d
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import hydra
import wandb
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.utilities import rank_zero_info
from openspeech.tokenizers import TOKENIZER_REGISTRY
from openspeech.datasets import DATA_MODULE_REGISTRY
from openspeech.dataclass.initialize import hydra_train_init
from openspeech.models import MODEL_REGISTRY
from openspeech.utils import parse_configs, get_pl_trainer
@hydra.main(config_path=os.path.join("..", "openspeech", "configs"), config_name="train")
def hydra_main(configs: DictConfig) -> None:
rank_zero_info(OmegaConf.to_yaml(configs))
pl.seed_everything(configs.trainer.seed)
logger, num_devices = parse_configs(configs)
data_module = DATA_MODULE_REGISTRY[configs.dataset.dataset](configs)
data_module.prepare_data()
tokenizer = TOKENIZER_REGISTRY[configs.tokenizer.unit](configs)
data_module.setup(tokenizer=tokenizer)
model = MODEL_REGISTRY[configs.model.model_name](configs=configs, tokenizer=tokenizer)
trainer = get_pl_trainer(configs, num_devices, logger)
trainer.fit(model, data_module)
trainer.test()
if __name__ == '__main__':
hydra_train_init()
hydra_main()
| 39.15
| 90
| 0.787143
|
import os
import hydra
import wandb
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.utilities import rank_zero_info
from openspeech.tokenizers import TOKENIZER_REGISTRY
from openspeech.datasets import DATA_MODULE_REGISTRY
from openspeech.dataclass.initialize import hydra_train_init
from openspeech.models import MODEL_REGISTRY
from openspeech.utils import parse_configs, get_pl_trainer
@hydra.main(config_path=os.path.join("..", "openspeech", "configs"), config_name="train")
def hydra_main(configs: DictConfig) -> None:
rank_zero_info(OmegaConf.to_yaml(configs))
pl.seed_everything(configs.trainer.seed)
logger, num_devices = parse_configs(configs)
data_module = DATA_MODULE_REGISTRY[configs.dataset.dataset](configs)
data_module.prepare_data()
tokenizer = TOKENIZER_REGISTRY[configs.tokenizer.unit](configs)
data_module.setup(tokenizer=tokenizer)
model = MODEL_REGISTRY[configs.model.model_name](configs=configs, tokenizer=tokenizer)
trainer = get_pl_trainer(configs, num_devices, logger)
trainer.fit(model, data_module)
trainer.test()
if __name__ == '__main__':
hydra_train_init()
hydra_main()
| true
| true
|
f717eab9315eef9eda1defc31f9c5122f0ff1655
| 1,026
|
py
|
Python
|
Math/x^2 = y^3.py
|
vsriv90/mechanical_engineering
|
c922cdce1a595e9acb6a87cf415fb3685caf51a3
|
[
"MIT"
] | 1
|
2021-11-03T06:37:44.000Z
|
2021-11-03T06:37:44.000Z
|
Math/x^2 = y^3.py
|
vsriv90/mechanical_engineering
|
c922cdce1a595e9acb6a87cf415fb3685caf51a3
|
[
"MIT"
] | null | null | null |
Math/x^2 = y^3.py
|
vsriv90/mechanical_engineering
|
c922cdce1a595e9acb6a87cf415fb3685caf51a3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# #### Show the common numbers for $x^2=y^3$
#
# [Link](https://www.quora.com/Is-64-the-first-perfect-square-and-a-perfect-cube-Is-it-the-only-one/answer/Alon-Amit?ch=3&share=e27e1c03&srid=iBLa) to Quora (Alon Amit's answer)
#
#
#
# In[1]:
import numpy
import sympy
import pandas
import csv
import matplotlib.pyplot as plt
import seaborn as sn # to draw plots
import plotly.express as px
# In[2]:
import keyword
print(keyword.kwlist) # A list of all 33 keywords in python
# In[68]:
list1 = [] # for all sqaured values
list2 = [] # for all cubed values
n = 100 # till what values of n to check
for i in range(0,n): # if i is in the above given range
j=i**2
k=i**3
list1.append(j) # add the squared values to list1
list2.append(k) # add the cubed values to list2
elem = sorted(set(list1) & set(list2)) # check if an element is on both "list1" and "list2"
print(elem) # print the list
# print(set(list1)) # if you want to see the list as a set
| 20.117647
| 177
| 0.674464
|
ess as px
# In[2]:
import keyword
print(keyword.kwlist) # A list of all 33 keywords in python
# In[68]:
list1 = [] # for all sqaured values
list2 = [] # for all cubed values
n = 100 # till what values of n to check
for i in range(0,n): # if i is in the above given range
j=i**2
k=i**3
list1.append(j) # add the squared values to list1
list2.append(k) # add the cubed values to list2
elem = sorted(set(list1) & set(list2)) # check if an element is on both "list1" and "list2"
print(elem) # print the list
# print(set(list1)) # if you want to see the list as a set
| true
| true
|
f717eb7deff235aa9cb2449ef700d1d63d624333
| 155
|
py
|
Python
|
utilities/printing.py
|
tarsqi/ttk
|
085007047ab591426d5c08b123906c070deb6627
|
[
"Apache-2.0"
] | 25
|
2016-02-28T16:42:57.000Z
|
2022-01-03T13:29:48.000Z
|
utilities/printing.py
|
tarsqi/ttk
|
085007047ab591426d5c08b123906c070deb6627
|
[
"Apache-2.0"
] | 84
|
2016-02-13T01:07:55.000Z
|
2021-04-06T18:57:36.000Z
|
utilities/printing.py
|
tarsqi/ttk
|
085007047ab591426d5c08b123906c070deb6627
|
[
"Apache-2.0"
] | 10
|
2016-05-30T14:35:59.000Z
|
2022-03-16T12:24:09.000Z
|
from __future__ import absolute_import
import pprint
def pp(stuff):
pretty_printer = pprint.PrettyPrinter(indent=3)
pretty_printer.pprint(stuff)
| 19.375
| 51
| 0.787097
|
from __future__ import absolute_import
import pprint
def pp(stuff):
pretty_printer = pprint.PrettyPrinter(indent=3)
pretty_printer.pprint(stuff)
| true
| true
|
f717ed1a92cd4103d8f8d7eecb5ad29aa817477f
| 8,388
|
py
|
Python
|
src/ingest_financials.py
|
ozacas/asxtrade
|
a3645ae526bfc7a546fdf2a39520feda99e3390a
|
[
"Apache-2.0"
] | 8
|
2021-03-20T13:12:25.000Z
|
2022-02-07T11:17:40.000Z
|
src/ingest_financials.py
|
ozacas/asxtrade
|
a3645ae526bfc7a546fdf2a39520feda99e3390a
|
[
"Apache-2.0"
] | 8
|
2021-03-07T03:23:46.000Z
|
2021-06-01T10:49:56.000Z
|
src/ingest_financials.py
|
ozacas/asxtrade
|
a3645ae526bfc7a546fdf2a39520feda99e3390a
|
[
"Apache-2.0"
] | 3
|
2020-12-08T10:22:23.000Z
|
2021-08-04T01:59:24.000Z
|
#!/usr/bin/python3
"""
Responsible for ingesting data related to the business performance over time. Data is placed into the asx_company_financial_metric
collection, ready for the core viewer app to use. Stocks whose financial details have been retrieved in the past month are skipped.
"""
import pymongo
import argparse
import yfinance as yf
import time
from utils import read_config
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from bson.objectid import ObjectId
def melt_dataframes(dfs: tuple) -> pd.DataFrame:
result = None
for df in filter(lambda df: df is not None and len(df) > 0, dfs):
df["metric"] = df.index
melted = pd.melt(df, id_vars=("metric"), var_name="date")
melted = melted.dropna(axis=0, how="any")
if len(melted) == 0:
continue
# print(melted)
# print(melted.shape)
if result is None:
result = melted
else:
result = result.append(melted)
if result is not None and "date" in result.columns:
# print(result)
result["date"] = pd.to_datetime(
result["date"], infer_datetime_format=True
) # format="%Y-%m-%d")
# print(result)
return result
def desired_stocks():
available_stocks = set(db.asx_company_details.distinct("asx_code"))
print(f"Found {len(available_stocks)} available stocks.")
gen_time = datetime.today() - timedelta(days=30)
month_ago = ObjectId.from_datetime(gen_time)
recently_updated_stocks = set(
[
rec["asx_code"]
for rec in db.asx_company_financial_metrics.find(
{"_id": {"$gte": month_ago}}
)
]
)
ret = available_stocks.difference(recently_updated_stocks)
print(f"Found {len(ret)} desired stocks to process.")
return ret
def update_all_metrics(df: pd.DataFrame, asx_code: str) -> int:
"""
Add (or update) all financial metrics (ie. rows) for the specified asx_code in the specified dataframe
:rtype: the number of records updated/created is returned
"""
print(f"Updating {len(df)} financial metrics for {asx_code}")
n = 0
for t in df.itertuples():
d = {
"metric": t.metric,
"date": t.date,
"value": t.value,
"asx_code": t.asx_code,
}
assert t.asx_code == asx_code
result = db.asx_company_financial_metrics.update_one(
{"asx_code": asx_code, "date": t.date, "metric": t.metric},
{"$set": d},
upsert=True,
)
assert result is not None
assert isinstance(result, pymongo.results.UpdateResult)
assert result.matched_count == 1 or result.upserted_id is not None
n += 1
return n
def fetch_metrics(asx_code: str) -> pd.DataFrame:
"""
Using the excellent yfinance, we fetch all possible metrics of business performance for the specified stock code.
Returns a dataframe (possibly empty or none) representing each metric and its datapoints as separate rows
"""
assert len(asx_code) >= 3
ticker = yf.Ticker(asx_code + ".AX")
cashflow_df = ticker.cashflow
financial_df = ticker.financials
earnings_df = ticker.earnings
if set(earnings_df.columns) == set(["Earnings", "Revenue"]):
earnings_df.index = earnings_df.index.map(
str
) # convert years to str (maybe int)
earnings_df = earnings_df.transpose()
# print(earnings_df)
balance_sheet_df = ticker.balance_sheet
melted_df = melt_dataframes(
(cashflow_df, financial_df, earnings_df, balance_sheet_df)
)
return melted_df
def make_asx_prices_dict(new_quote: tuple, asx_code: str) -> dict:
#print(new_quote)
d = {
"asx_code": asx_code,
"fetch_date": new_quote.Index,
"volume": new_quote.Volume,
"last_price": new_quote.Close,
"day_low_price": new_quote.Low,
"day_high_price": new_quote.High,
"open_price": new_quote.Open,
"error_code": "",
"error_descr": "",
# we dont set nan fields so that existing values (if any) are used ie. merge with existing data
# "annual_dividend_yield": np.nan, # no available data from yf.Ticker.history() although may be available elsewhere, but for now set to missing
# "annual_daily_volume": np.nan,
# "bid_price": np.nan,
"change_price": new_quote.change_price,
"change_in_percent": new_quote.change_in_percent,
}
return d
def fill_stock_quote_gaps(db, stock_to_fetch: str, force=False) -> int:
assert db is not None
assert len(stock_to_fetch) >= 3
ticker = yf.Ticker(stock_to_fetch + ".AX")
df = ticker.history(period="max")
df.index = [d.strftime("%Y-%m-%d") for d in df.index]
# print(df)
available_dates = set(df.index)
available_quotes = list(db.asx_prices.find({"asx_code": stock_to_fetch}))
quoted_dates = set(
[q["fetch_date"] for q in available_quotes if not np.isnan(q["last_price"])]
)
assert set(df.columns) == set(
["Open", "High", "Low", "Close", "Volume", "Dividends", "Stock Splits"]
)
dates_to_fill = (
available_dates.difference(quoted_dates) if not force else available_dates
)
print(
"Got {} existing daily quotes for {}, found {} yfinance daily quotes, gap filling for {} dates (force={})".format(
len(available_quotes), stock_to_fetch, len(df), len(dates_to_fill), force
)
)
if len(dates_to_fill) < 1:
return 0
df["change_price"] = df["Close"].diff()
df["change_in_percent"] = df["Close"].pct_change() * 100.0
gap_quotes_df = df.filter(dates_to_fill, axis=0)
# print(df)
n = 0
for new_quote in gap_quotes_df.itertuples():
d = make_asx_prices_dict(new_quote, stock_to_fetch)
result = db.asx_prices.update_one(
{"fetch_date": d["fetch_date"], "asx_code": d["asx_code"]},
{"$set": d},
upsert=True,
)
assert result is not None
# assert result.modified_count == 1 or result.upserted_id is not None
n += 1
assert n == len(gap_quotes_df)
return n
if __name__ == "__main__":
args = argparse.ArgumentParser(
description="Update financial performance metrics for ASX stocks using yfinance"
)
args.add_argument(
"--config",
help="Configuration file to use [config.json]",
type=str,
default="config.json",
)
args.add_argument(
"--fill-gaps",
help="Fill dates with no existing quotes for each stock (use --debug for a particular stock)",
action="store_true",
)
args.add_argument("--fail-fast", help="Stop on first error", action="store_true")
args.add_argument(
"--delay", help="Delay between stocks in seconds [30]", type=int, default=30
)
args.add_argument("--force", help="Overwrite existing data (if any)", action="store_true")
args.add_argument(
"--debug",
help="Try to fetch specified stock (for debugging)",
type=str,
required=False,
default=None,
)
a = args.parse_args()
config, password = read_config(a.config)
m = config.get("mongo")
mongo = pymongo.MongoClient(
m.get("host"), m.get("port"), username=m.get("user"), password=password
)
db = mongo[m.get("db")]
stock_codes = desired_stocks() if not a.debug else set([a.debug])
print(f"Updating financial metrics for {len(stock_codes)} stocks")
for asx_code in sorted(stock_codes):
print(f"Processing stock {asx_code}")
try:
melted_df = fetch_metrics(asx_code)
if melted_df is None or len(melted_df) < 1:
raise ValueError(f"No data available for {asx_code}... skipping")
melted_df["asx_code"] = asx_code
ret = update_all_metrics(melted_df, asx_code)
assert ret == len(melted_df)
if a.fill_gaps:
fill_stock_quote_gaps(db, asx_code, force=a.force)
# FALLTHRU...
time.sleep(a.delay)
except Exception as e:
print(f"WARNING: unable to download financials for {asx_code}")
print(str(e))
if a.fail_fast:
raise e
exit(0)
| 35.542373
| 152
| 0.625298
|
import pymongo
import argparse
import yfinance as yf
import time
from utils import read_config
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from bson.objectid import ObjectId
def melt_dataframes(dfs: tuple) -> pd.DataFrame:
result = None
for df in filter(lambda df: df is not None and len(df) > 0, dfs):
df["metric"] = df.index
melted = pd.melt(df, id_vars=("metric"), var_name="date")
melted = melted.dropna(axis=0, how="any")
if len(melted) == 0:
continue
if result is None:
result = melted
else:
result = result.append(melted)
if result is not None and "date" in result.columns:
result["date"] = pd.to_datetime(
result["date"], infer_datetime_format=True
)
return result
def desired_stocks():
available_stocks = set(db.asx_company_details.distinct("asx_code"))
print(f"Found {len(available_stocks)} available stocks.")
gen_time = datetime.today() - timedelta(days=30)
month_ago = ObjectId.from_datetime(gen_time)
recently_updated_stocks = set(
[
rec["asx_code"]
for rec in db.asx_company_financial_metrics.find(
{"_id": {"$gte": month_ago}}
)
]
)
ret = available_stocks.difference(recently_updated_stocks)
print(f"Found {len(ret)} desired stocks to process.")
return ret
def update_all_metrics(df: pd.DataFrame, asx_code: str) -> int:
print(f"Updating {len(df)} financial metrics for {asx_code}")
n = 0
for t in df.itertuples():
d = {
"metric": t.metric,
"date": t.date,
"value": t.value,
"asx_code": t.asx_code,
}
assert t.asx_code == asx_code
result = db.asx_company_financial_metrics.update_one(
{"asx_code": asx_code, "date": t.date, "metric": t.metric},
{"$set": d},
upsert=True,
)
assert result is not None
assert isinstance(result, pymongo.results.UpdateResult)
assert result.matched_count == 1 or result.upserted_id is not None
n += 1
return n
def fetch_metrics(asx_code: str) -> pd.DataFrame:
assert len(asx_code) >= 3
ticker = yf.Ticker(asx_code + ".AX")
cashflow_df = ticker.cashflow
financial_df = ticker.financials
earnings_df = ticker.earnings
if set(earnings_df.columns) == set(["Earnings", "Revenue"]):
earnings_df.index = earnings_df.index.map(
str
)
earnings_df = earnings_df.transpose()
balance_sheet_df = ticker.balance_sheet
melted_df = melt_dataframes(
(cashflow_df, financial_df, earnings_df, balance_sheet_df)
)
return melted_df
def make_asx_prices_dict(new_quote: tuple, asx_code: str) -> dict:
d = {
"asx_code": asx_code,
"fetch_date": new_quote.Index,
"volume": new_quote.Volume,
"last_price": new_quote.Close,
"day_low_price": new_quote.Low,
"day_high_price": new_quote.High,
"open_price": new_quote.Open,
"error_code": "",
"error_descr": "",
ange_in_percent,
}
return d
def fill_stock_quote_gaps(db, stock_to_fetch: str, force=False) -> int:
assert db is not None
assert len(stock_to_fetch) >= 3
ticker = yf.Ticker(stock_to_fetch + ".AX")
df = ticker.history(period="max")
df.index = [d.strftime("%Y-%m-%d") for d in df.index]
available_dates = set(df.index)
available_quotes = list(db.asx_prices.find({"asx_code": stock_to_fetch}))
quoted_dates = set(
[q["fetch_date"] for q in available_quotes if not np.isnan(q["last_price"])]
)
assert set(df.columns) == set(
["Open", "High", "Low", "Close", "Volume", "Dividends", "Stock Splits"]
)
dates_to_fill = (
available_dates.difference(quoted_dates) if not force else available_dates
)
print(
"Got {} existing daily quotes for {}, found {} yfinance daily quotes, gap filling for {} dates (force={})".format(
len(available_quotes), stock_to_fetch, len(df), len(dates_to_fill), force
)
)
if len(dates_to_fill) < 1:
return 0
df["change_price"] = df["Close"].diff()
df["change_in_percent"] = df["Close"].pct_change() * 100.0
gap_quotes_df = df.filter(dates_to_fill, axis=0)
n = 0
for new_quote in gap_quotes_df.itertuples():
d = make_asx_prices_dict(new_quote, stock_to_fetch)
result = db.asx_prices.update_one(
{"fetch_date": d["fetch_date"], "asx_code": d["asx_code"]},
{"$set": d},
upsert=True,
)
assert result is not None
n += 1
assert n == len(gap_quotes_df)
return n
if __name__ == "__main__":
args = argparse.ArgumentParser(
description="Update financial performance metrics for ASX stocks using yfinance"
)
args.add_argument(
"--config",
help="Configuration file to use [config.json]",
type=str,
default="config.json",
)
args.add_argument(
"--fill-gaps",
help="Fill dates with no existing quotes for each stock (use --debug for a particular stock)",
action="store_true",
)
args.add_argument("--fail-fast", help="Stop on first error", action="store_true")
args.add_argument(
"--delay", help="Delay between stocks in seconds [30]", type=int, default=30
)
args.add_argument("--force", help="Overwrite existing data (if any)", action="store_true")
args.add_argument(
"--debug",
help="Try to fetch specified stock (for debugging)",
type=str,
required=False,
default=None,
)
a = args.parse_args()
config, password = read_config(a.config)
m = config.get("mongo")
mongo = pymongo.MongoClient(
m.get("host"), m.get("port"), username=m.get("user"), password=password
)
db = mongo[m.get("db")]
stock_codes = desired_stocks() if not a.debug else set([a.debug])
print(f"Updating financial metrics for {len(stock_codes)} stocks")
for asx_code in sorted(stock_codes):
print(f"Processing stock {asx_code}")
try:
melted_df = fetch_metrics(asx_code)
if melted_df is None or len(melted_df) < 1:
raise ValueError(f"No data available for {asx_code}... skipping")
melted_df["asx_code"] = asx_code
ret = update_all_metrics(melted_df, asx_code)
assert ret == len(melted_df)
if a.fill_gaps:
fill_stock_quote_gaps(db, asx_code, force=a.force)
time.sleep(a.delay)
except Exception as e:
print(f"WARNING: unable to download financials for {asx_code}")
print(str(e))
if a.fail_fast:
raise e
exit(0)
| true
| true
|
f717eda1c497c2f501c58a071e0a58d22211d3f9
| 6,442
|
py
|
Python
|
src/waldur_slurm/serializers.py
|
opennode/nodeconductor-assembly-waldur
|
cad9966389dc9b52b13d2301940c99cf4b243900
|
[
"MIT"
] | 2
|
2017-01-20T15:26:25.000Z
|
2017-08-03T04:38:08.000Z
|
src/waldur_slurm/serializers.py
|
opennode/nodeconductor-assembly-waldur
|
cad9966389dc9b52b13d2301940c99cf4b243900
|
[
"MIT"
] | null | null | null |
src/waldur_slurm/serializers.py
|
opennode/nodeconductor-assembly-waldur
|
cad9966389dc9b52b13d2301940c99cf4b243900
|
[
"MIT"
] | null | null | null |
import re
from django.core.validators import MinValueValidator
from django.utils.translation import gettext_lazy as _
from rest_framework import exceptions as rf_exceptions
from rest_framework import serializers as rf_serializers
from waldur_core.core import serializers as core_serializers
from waldur_core.structure import serializers as structure_serializers
from waldur_core.structure.permissions import _has_admin_access
from waldur_freeipa import models as freeipa_models
from . import models
class SlurmServiceSerializer(structure_serializers.ServiceOptionsSerializer):
class Meta:
secret_fields = ('hostname', 'username', 'port', 'gateway')
username = rf_serializers.CharField(
max_length=100, help_text=_('Administrative user'), default='root'
)
hostname = rf_serializers.CharField(
source='options.hostname', label=_('Hostname or IP address of master node')
)
default_account = rf_serializers.CharField(
source='options.default_account', label=_('Default SLURM account for user')
)
port = rf_serializers.IntegerField(source='options.port', required=False)
use_sudo = rf_serializers.BooleanField(
source='options.use_sudo',
default=False,
help_text=_('Set to true to activate privilege escalation'),
required=False,
)
gateway = rf_serializers.CharField(
source='options.gateway',
label=_('Hostname or IP address of gateway node'),
required=False,
)
firecrest_api_url = rf_serializers.CharField(
source='options.firecrest_api_url',
label=_('FirecREST API base URL'),
required=False,
)
class AllocationSerializer(
structure_serializers.BaseResourceSerializer,
core_serializers.AugmentedSerializerMixin,
):
username = rf_serializers.SerializerMethodField()
gateway = rf_serializers.SerializerMethodField()
homepage = rf_serializers.ReadOnlyField(source='service_settings.homepage')
def get_username(self, allocation):
request = self.context['request']
try:
profile = freeipa_models.Profile.objects.get(user=request.user)
return profile.username
except freeipa_models.Profile.DoesNotExist:
return None
def get_gateway(self, allocation):
options = allocation.service_settings.options
return options.get('gateway') or options.get('hostname')
class Meta(structure_serializers.BaseResourceSerializer.Meta):
model = models.Allocation
fields = structure_serializers.BaseResourceSerializer.Meta.fields + (
'cpu_limit',
'cpu_usage',
'gpu_limit',
'gpu_usage',
'ram_limit',
'ram_usage',
'username',
'gateway',
'is_active',
'homepage',
)
read_only_fields = (
structure_serializers.BaseResourceSerializer.Meta.read_only_fields
+ (
'cpu_usage',
'gpu_usage',
'ram_usage',
'cpu_limit',
'gpu_limit',
'ram_limit',
'is_active',
)
)
extra_kwargs = dict(
url={'lookup_field': 'uuid', 'view_name': 'slurm-allocation-detail'},
cpu_limit={'validators': [MinValueValidator(0)]},
gpu_limit={'validators': [MinValueValidator(0)]},
ram_limit={'validators': [MinValueValidator(0)]},
)
def validate(self, attrs):
attrs = super(AllocationSerializer, self).validate(attrs)
# Skip validation on update
if self.instance:
return attrs
correct_name_regex = '^([%s]{1,63})$' % models.SLURM_ALLOCATION_REGEX
name = attrs.get('name')
if not re.match(correct_name_regex, name):
raise rf_serializers.ValidationError(
_(
"Name '%s' must be 1-63 characters long, each of "
"which can only be alphanumeric or a hyphen"
)
% name
)
project = attrs['project']
user = self.context['request'].user
if not _has_admin_access(user, project):
raise rf_exceptions.PermissionDenied(
_('You do not have permissions to create allocation for given project.')
)
return attrs
class AllocationSetLimitsSerializer(rf_serializers.ModelSerializer):
cpu_limit = rf_serializers.IntegerField(min_value=-1)
gpu_limit = rf_serializers.IntegerField(min_value=-1)
ram_limit = rf_serializers.IntegerField(min_value=-1)
class Meta:
model = models.Allocation
fields = ('cpu_limit', 'gpu_limit', 'ram_limit')
class AllocationUserUsageCreateSerializer(rf_serializers.HyperlinkedModelSerializer):
class Meta:
model = models.AllocationUserUsage
fields = (
'cpu_usage',
'ram_usage',
'gpu_usage',
'month',
'year',
'user',
'username',
)
extra_kwargs = {
'user': {
'lookup_field': 'uuid',
'view_name': 'user-detail',
},
}
class AllocationUserUsageSerializer(rf_serializers.HyperlinkedModelSerializer):
full_name = rf_serializers.ReadOnlyField(source='user.full_name')
class Meta:
model = models.AllocationUserUsage
fields = (
'cpu_usage',
'ram_usage',
'gpu_usage',
'month',
'year',
'allocation',
'user',
'username',
'full_name',
)
extra_kwargs = {
'allocation': {
'lookup_field': 'uuid',
'view_name': 'slurm-allocation-detail',
},
'user': {
'lookup_field': 'uuid',
'view_name': 'user-detail',
},
}
class AssociationSerializer(rf_serializers.HyperlinkedModelSerializer):
allocation = rf_serializers.HyperlinkedRelatedField(
queryset=models.Allocation.objects.all(),
view_name='slurm-allocation-detail',
lookup_field='uuid',
)
class Meta:
model = models.Association
fields = (
'uuid',
'username',
'allocation',
)
| 31.42439
| 88
| 0.606023
|
import re
from django.core.validators import MinValueValidator
from django.utils.translation import gettext_lazy as _
from rest_framework import exceptions as rf_exceptions
from rest_framework import serializers as rf_serializers
from waldur_core.core import serializers as core_serializers
from waldur_core.structure import serializers as structure_serializers
from waldur_core.structure.permissions import _has_admin_access
from waldur_freeipa import models as freeipa_models
from . import models
class SlurmServiceSerializer(structure_serializers.ServiceOptionsSerializer):
class Meta:
secret_fields = ('hostname', 'username', 'port', 'gateway')
username = rf_serializers.CharField(
max_length=100, help_text=_('Administrative user'), default='root'
)
hostname = rf_serializers.CharField(
source='options.hostname', label=_('Hostname or IP address of master node')
)
default_account = rf_serializers.CharField(
source='options.default_account', label=_('Default SLURM account for user')
)
port = rf_serializers.IntegerField(source='options.port', required=False)
use_sudo = rf_serializers.BooleanField(
source='options.use_sudo',
default=False,
help_text=_('Set to true to activate privilege escalation'),
required=False,
)
gateway = rf_serializers.CharField(
source='options.gateway',
label=_('Hostname or IP address of gateway node'),
required=False,
)
firecrest_api_url = rf_serializers.CharField(
source='options.firecrest_api_url',
label=_('FirecREST API base URL'),
required=False,
)
class AllocationSerializer(
structure_serializers.BaseResourceSerializer,
core_serializers.AugmentedSerializerMixin,
):
username = rf_serializers.SerializerMethodField()
gateway = rf_serializers.SerializerMethodField()
homepage = rf_serializers.ReadOnlyField(source='service_settings.homepage')
def get_username(self, allocation):
request = self.context['request']
try:
profile = freeipa_models.Profile.objects.get(user=request.user)
return profile.username
except freeipa_models.Profile.DoesNotExist:
return None
def get_gateway(self, allocation):
options = allocation.service_settings.options
return options.get('gateway') or options.get('hostname')
class Meta(structure_serializers.BaseResourceSerializer.Meta):
model = models.Allocation
fields = structure_serializers.BaseResourceSerializer.Meta.fields + (
'cpu_limit',
'cpu_usage',
'gpu_limit',
'gpu_usage',
'ram_limit',
'ram_usage',
'username',
'gateway',
'is_active',
'homepage',
)
read_only_fields = (
structure_serializers.BaseResourceSerializer.Meta.read_only_fields
+ (
'cpu_usage',
'gpu_usage',
'ram_usage',
'cpu_limit',
'gpu_limit',
'ram_limit',
'is_active',
)
)
extra_kwargs = dict(
url={'lookup_field': 'uuid', 'view_name': 'slurm-allocation-detail'},
cpu_limit={'validators': [MinValueValidator(0)]},
gpu_limit={'validators': [MinValueValidator(0)]},
ram_limit={'validators': [MinValueValidator(0)]},
)
def validate(self, attrs):
attrs = super(AllocationSerializer, self).validate(attrs)
if self.instance:
return attrs
correct_name_regex = '^([%s]{1,63})$' % models.SLURM_ALLOCATION_REGEX
name = attrs.get('name')
if not re.match(correct_name_regex, name):
raise rf_serializers.ValidationError(
_(
"Name '%s' must be 1-63 characters long, each of "
"which can only be alphanumeric or a hyphen"
)
% name
)
project = attrs['project']
user = self.context['request'].user
if not _has_admin_access(user, project):
raise rf_exceptions.PermissionDenied(
_('You do not have permissions to create allocation for given project.')
)
return attrs
class AllocationSetLimitsSerializer(rf_serializers.ModelSerializer):
cpu_limit = rf_serializers.IntegerField(min_value=-1)
gpu_limit = rf_serializers.IntegerField(min_value=-1)
ram_limit = rf_serializers.IntegerField(min_value=-1)
class Meta:
model = models.Allocation
fields = ('cpu_limit', 'gpu_limit', 'ram_limit')
class AllocationUserUsageCreateSerializer(rf_serializers.HyperlinkedModelSerializer):
class Meta:
model = models.AllocationUserUsage
fields = (
'cpu_usage',
'ram_usage',
'gpu_usage',
'month',
'year',
'user',
'username',
)
extra_kwargs = {
'user': {
'lookup_field': 'uuid',
'view_name': 'user-detail',
},
}
class AllocationUserUsageSerializer(rf_serializers.HyperlinkedModelSerializer):
full_name = rf_serializers.ReadOnlyField(source='user.full_name')
class Meta:
model = models.AllocationUserUsage
fields = (
'cpu_usage',
'ram_usage',
'gpu_usage',
'month',
'year',
'allocation',
'user',
'username',
'full_name',
)
extra_kwargs = {
'allocation': {
'lookup_field': 'uuid',
'view_name': 'slurm-allocation-detail',
},
'user': {
'lookup_field': 'uuid',
'view_name': 'user-detail',
},
}
class AssociationSerializer(rf_serializers.HyperlinkedModelSerializer):
allocation = rf_serializers.HyperlinkedRelatedField(
queryset=models.Allocation.objects.all(),
view_name='slurm-allocation-detail',
lookup_field='uuid',
)
class Meta:
model = models.Association
fields = (
'uuid',
'username',
'allocation',
)
| true
| true
|
f717efcb5290a464a7824eb3e23e80853f7e2668
| 1,230
|
py
|
Python
|
ddns_clienter_core/runtimes/address_providers/host_name.py
|
rexzhang/ddns-clienter
|
f170cb579d49df2aa4aa1f607bbcf088af9cd4a5
|
[
"MIT"
] | null | null | null |
ddns_clienter_core/runtimes/address_providers/host_name.py
|
rexzhang/ddns-clienter
|
f170cb579d49df2aa4aa1f607bbcf088af9cd4a5
|
[
"MIT"
] | null | null | null |
ddns_clienter_core/runtimes/address_providers/host_name.py
|
rexzhang/ddns-clienter
|
f170cb579d49df2aa4aa1f607bbcf088af9cd4a5
|
[
"MIT"
] | null | null | null |
import socket
from logging import getLogger
from .abs import AddressProviderAbs, AddressProviderException
logger = getLogger(__name__)
class AddressProviderHostName(AddressProviderAbs):
@property
def name(self):
return "hostname"
def _detect_ip_address(self) -> None:
try:
data = socket.getaddrinfo(self._address_c.parameter, 80)
except socket.gaierror as e:
message = "Detect IP Address failed, hostname:'{}', message:{}".format(
self._address_c.parameter, e
)
logger.error(message)
raise AddressProviderException(message)
for item in data:
if (
item[0] == socket.AF_INET
and item[1] == socket.SOCK_STREAM
and self._address_c.ipv4
):
ip_address = item[4][0]
self.set_ipv4_address(ip_address)
continue
if (
item[0] == socket.AF_INET6
and item[1] == socket.SOCK_STREAM
and self._address_c.ipv6
):
ip_address = item[4][0]
self.set_ipv6_address(ip_address)
continue
| 29.285714
| 83
| 0.550407
|
import socket
from logging import getLogger
from .abs import AddressProviderAbs, AddressProviderException
logger = getLogger(__name__)
class AddressProviderHostName(AddressProviderAbs):
@property
def name(self):
return "hostname"
def _detect_ip_address(self) -> None:
try:
data = socket.getaddrinfo(self._address_c.parameter, 80)
except socket.gaierror as e:
message = "Detect IP Address failed, hostname:'{}', message:{}".format(
self._address_c.parameter, e
)
logger.error(message)
raise AddressProviderException(message)
for item in data:
if (
item[0] == socket.AF_INET
and item[1] == socket.SOCK_STREAM
and self._address_c.ipv4
):
ip_address = item[4][0]
self.set_ipv4_address(ip_address)
continue
if (
item[0] == socket.AF_INET6
and item[1] == socket.SOCK_STREAM
and self._address_c.ipv6
):
ip_address = item[4][0]
self.set_ipv6_address(ip_address)
continue
| true
| true
|
f717f02ea026ba8bb72ef7721a359f8e060f9f1e
| 1,289
|
py
|
Python
|
homeassistant/components/homekit/diagnostics.py
|
mtarjoianu/core
|
44e9146463ac505eb3d1c0651ad126cb25c28a54
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
homeassistant/components/homekit/diagnostics.py
|
mtarjoianu/core
|
44e9146463ac505eb3d1c0651ad126cb25c28a54
|
[
"Apache-2.0"
] | 24,710
|
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
homeassistant/components/homekit/diagnostics.py
|
mtarjoianu/core
|
44e9146463ac505eb3d1c0651ad126cb25c28a54
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Diagnostics support for HomeKit."""
from __future__ import annotations
from typing import Any
from pyhap.accessory_driver import AccessoryDriver
from pyhap.state import State
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from . import HomeKit
from .const import DOMAIN, HOMEKIT
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, entry: ConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
homekit: HomeKit = hass.data[DOMAIN][entry.entry_id][HOMEKIT]
data: dict[str, Any] = {
"status": homekit.status,
"config-entry": {
"title": entry.title,
"version": entry.version,
"data": dict(entry.data),
"options": dict(entry.options),
},
}
if not hasattr(homekit, "driver"):
return data
driver: AccessoryDriver = homekit.driver
data.update(driver.get_accessories())
state: State = driver.state
data.update(
{
"client_properties": {
str(client): props for client, props in state.client_properties.items()
},
"config_version": state.config_version,
"pairing_id": state.mac,
}
)
return data
| 28.644444
| 87
| 0.644686
|
from __future__ import annotations
from typing import Any
from pyhap.accessory_driver import AccessoryDriver
from pyhap.state import State
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from . import HomeKit
from .const import DOMAIN, HOMEKIT
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, entry: ConfigEntry
) -> dict[str, Any]:
homekit: HomeKit = hass.data[DOMAIN][entry.entry_id][HOMEKIT]
data: dict[str, Any] = {
"status": homekit.status,
"config-entry": {
"title": entry.title,
"version": entry.version,
"data": dict(entry.data),
"options": dict(entry.options),
},
}
if not hasattr(homekit, "driver"):
return data
driver: AccessoryDriver = homekit.driver
data.update(driver.get_accessories())
state: State = driver.state
data.update(
{
"client_properties": {
str(client): props for client, props in state.client_properties.items()
},
"config_version": state.config_version,
"pairing_id": state.mac,
}
)
return data
| true
| true
|
f717f0755133f546a112ab7edad203a839137d37
| 3,107
|
py
|
Python
|
tests/test_rmsa.py
|
ReleaseTheSpice/optical-rl-gym
|
1913e19ba59dfd1e426d5783b68c045d2daf354a
|
[
"MIT"
] | null | null | null |
tests/test_rmsa.py
|
ReleaseTheSpice/optical-rl-gym
|
1913e19ba59dfd1e426d5783b68c045d2daf354a
|
[
"MIT"
] | null | null | null |
tests/test_rmsa.py
|
ReleaseTheSpice/optical-rl-gym
|
1913e19ba59dfd1e426d5783b68c045d2daf354a
|
[
"MIT"
] | null | null | null |
import os
import gym
from optical_rl_gym.envs.rmsa_env import shortest_path_first_fit, shortest_available_path_first_fit, \
least_loaded_path_first_fit, SimpleMatrixObservation
from optical_rl_gym.utils import evaluate_heuristic, random_policy
import pickle
import logging
import numpy as np
import matplotlib.pyplot as plt
load = 50
logging.getLogger('rmsaenv').setLevel(logging.INFO)
seed = 20
episodes = 10
episode_length = 100
monitor_files = []
policies = []
# topology_name = 'gbn'
# topology_name = 'nobel-us'
# topology_name = 'germany50'
with open(os.path.join('..', 'examples', 'topologies', 'nsfnet_chen_eon_5-paths.h5'), 'rb') as f:
topology = pickle.load(f)
env_args = dict(topology=topology, seed=10, allow_rejection=True, load=load, mean_service_holding_time=25,
episode_length=episode_length, num_spectrum_resources=64, bit_rate_selection='discrete')
print('STR'.ljust(5), 'REW'.rjust(7), 'STD'.rjust(7))
init_env = gym.make('RMSA-v0', **env_args)
env_rnd = SimpleMatrixObservation(init_env)
mean_reward_rnd, std_reward_rnd = evaluate_heuristic(env_rnd, random_policy, n_eval_episodes=episodes)
print('Rnd:'.ljust(8), f'{mean_reward_rnd:.4f} {std_reward_rnd:>7.4f}')
print('\tBit rate blocking:', (init_env.episode_bit_rate_requested - init_env.episode_bit_rate_provisioned) / init_env.episode_bit_rate_requested)
print('\tRequest blocking:', (init_env.episode_services_processed - init_env.episode_services_accepted) / init_env.episode_services_processed)
print(init_env.topology.graph['throughput'])
# exit(0)
env_sp = gym.make('RMSA-v0', **env_args)
mean_reward_sp, std_reward_sp = evaluate_heuristic(env_sp, shortest_path_first_fit, n_eval_episodes=episodes)
print('SP-FF:'.ljust(8), f'{mean_reward_sp:.4f} {std_reward_sp:<7.4f}')
print('\tBit rate blocking:', (env_sp.episode_bit_rate_requested - env_sp.episode_bit_rate_provisioned) / env_sp.episode_bit_rate_requested)
print('\tRequest blocking:', (env_sp.episode_services_processed - env_sp.episode_services_accepted) / env_sp.episode_services_processed)
env_sap = gym.make('RMSA-v0', **env_args)
mean_reward_sap, std_reward_sap = evaluate_heuristic(env_sap, shortest_available_path_first_fit, n_eval_episodes=episodes)
print('SAP-FF:'.ljust(8), f'{mean_reward_sap:.4f} {std_reward_sap:.4f}')
print('\tBit rate blocking:', (env_sap.episode_bit_rate_requested - env_sap.episode_bit_rate_provisioned) / env_sap.episode_bit_rate_requested)
print('\tRequest blocking:', (env_sap.episode_services_processed - env_sap.episode_services_accepted) / env_sap.episode_services_processed)
env_llp = gym.make('RMSA-v0', **env_args)
mean_reward_llp, std_reward_llp = evaluate_heuristic(env_llp, least_loaded_path_first_fit, n_eval_episodes=episodes)
print('LLP-FF:'.ljust(8), f'{mean_reward_llp:.4f} {std_reward_llp:.4f}')
print('\tBit rate blocking:', (env_llp.episode_bit_rate_requested - env_llp.episode_bit_rate_provisioned) / env_llp.episode_bit_rate_requested)
print('\tRequest blocking:', (env_llp.episode_services_processed - env_llp.episode_services_accepted) / env_llp.episode_services_processed)
| 51.783333
| 146
| 0.798519
|
import os
import gym
from optical_rl_gym.envs.rmsa_env import shortest_path_first_fit, shortest_available_path_first_fit, \
least_loaded_path_first_fit, SimpleMatrixObservation
from optical_rl_gym.utils import evaluate_heuristic, random_policy
import pickle
import logging
import numpy as np
import matplotlib.pyplot as plt
load = 50
logging.getLogger('rmsaenv').setLevel(logging.INFO)
seed = 20
episodes = 10
episode_length = 100
monitor_files = []
policies = []
with open(os.path.join('..', 'examples', 'topologies', 'nsfnet_chen_eon_5-paths.h5'), 'rb') as f:
topology = pickle.load(f)
env_args = dict(topology=topology, seed=10, allow_rejection=True, load=load, mean_service_holding_time=25,
episode_length=episode_length, num_spectrum_resources=64, bit_rate_selection='discrete')
print('STR'.ljust(5), 'REW'.rjust(7), 'STD'.rjust(7))
init_env = gym.make('RMSA-v0', **env_args)
env_rnd = SimpleMatrixObservation(init_env)
mean_reward_rnd, std_reward_rnd = evaluate_heuristic(env_rnd, random_policy, n_eval_episodes=episodes)
print('Rnd:'.ljust(8), f'{mean_reward_rnd:.4f} {std_reward_rnd:>7.4f}')
print('\tBit rate blocking:', (init_env.episode_bit_rate_requested - init_env.episode_bit_rate_provisioned) / init_env.episode_bit_rate_requested)
print('\tRequest blocking:', (init_env.episode_services_processed - init_env.episode_services_accepted) / init_env.episode_services_processed)
print(init_env.topology.graph['throughput'])
env_sp = gym.make('RMSA-v0', **env_args)
mean_reward_sp, std_reward_sp = evaluate_heuristic(env_sp, shortest_path_first_fit, n_eval_episodes=episodes)
print('SP-FF:'.ljust(8), f'{mean_reward_sp:.4f} {std_reward_sp:<7.4f}')
print('\tBit rate blocking:', (env_sp.episode_bit_rate_requested - env_sp.episode_bit_rate_provisioned) / env_sp.episode_bit_rate_requested)
print('\tRequest blocking:', (env_sp.episode_services_processed - env_sp.episode_services_accepted) / env_sp.episode_services_processed)
env_sap = gym.make('RMSA-v0', **env_args)
mean_reward_sap, std_reward_sap = evaluate_heuristic(env_sap, shortest_available_path_first_fit, n_eval_episodes=episodes)
print('SAP-FF:'.ljust(8), f'{mean_reward_sap:.4f} {std_reward_sap:.4f}')
print('\tBit rate blocking:', (env_sap.episode_bit_rate_requested - env_sap.episode_bit_rate_provisioned) / env_sap.episode_bit_rate_requested)
print('\tRequest blocking:', (env_sap.episode_services_processed - env_sap.episode_services_accepted) / env_sap.episode_services_processed)
env_llp = gym.make('RMSA-v0', **env_args)
mean_reward_llp, std_reward_llp = evaluate_heuristic(env_llp, least_loaded_path_first_fit, n_eval_episodes=episodes)
print('LLP-FF:'.ljust(8), f'{mean_reward_llp:.4f} {std_reward_llp:.4f}')
print('\tBit rate blocking:', (env_llp.episode_bit_rate_requested - env_llp.episode_bit_rate_provisioned) / env_llp.episode_bit_rate_requested)
print('\tRequest blocking:', (env_llp.episode_services_processed - env_llp.episode_services_accepted) / env_llp.episode_services_processed)
| true
| true
|
f717f2946a256963590e7fa9639348755efec876
| 244
|
py
|
Python
|
pay_using_try_catch.py
|
maainul/Paython
|
c72d7fff3b00bc4f379ca6f9dbef0678f01b55f9
|
[
"DOC"
] | null | null | null |
pay_using_try_catch.py
|
maainul/Paython
|
c72d7fff3b00bc4f379ca6f9dbef0678f01b55f9
|
[
"DOC"
] | null | null | null |
pay_using_try_catch.py
|
maainul/Paython
|
c72d7fff3b00bc4f379ca6f9dbef0678f01b55f9
|
[
"DOC"
] | null | null | null |
try:
Hours=input('Enter Hours:')
Rates=input('Enter Rates:')
if int(Hours)>40:
pay=40*int(Rates)+(int(Hours)-40)*(int(Rates)*1.5)
print(pay)
else:
pay=int(Hours)*int(Rates)
print(pay)
except:
print('Error,Please enter numeric input.')
| 20.333333
| 52
| 0.680328
|
try:
Hours=input('Enter Hours:')
Rates=input('Enter Rates:')
if int(Hours)>40:
pay=40*int(Rates)+(int(Hours)-40)*(int(Rates)*1.5)
print(pay)
else:
pay=int(Hours)*int(Rates)
print(pay)
except:
print('Error,Please enter numeric input.')
| false
| true
|
f717f359062fe9bbd5d9893e4b7b8942420830f7
| 1,037
|
py
|
Python
|
auctionbot/users/migrations/0002_auto_20171231_1027.py
|
netvigator/auctions
|
f88bcce800b60083a5d1a6f272c51bb540b8342a
|
[
"MIT"
] | null | null | null |
auctionbot/users/migrations/0002_auto_20171231_1027.py
|
netvigator/auctions
|
f88bcce800b60083a5d1a6f272c51bb540b8342a
|
[
"MIT"
] | 13
|
2019-12-12T03:07:55.000Z
|
2022-03-07T12:59:27.000Z
|
auctionbot/users/migrations/0002_auto_20171231_1027.py
|
netvigator/auctions
|
f88bcce800b60083a5d1a6f272c51bb540b8342a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-12-31 03:27
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
# ('markets', '0012_auto_20171220_1319'),
operations = [
migrations.AddField(
model_name='user',
name='cBio',
field=models.TextField(blank=True, max_length=500),
),
migrations.AddField(
model_name='user',
name='cLocation',
field=models.CharField(blank=True, max_length=30),
),
migrations.AddField(
model_name='user',
name='iMarket',
#field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='markets.Market', verbose_name='ebay market (default)'),
field=models.PositiveIntegerField(default=1, verbose_name='ebay market (default)'),
),
]
| 30.5
| 152
| 0.611379
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='cBio',
field=models.TextField(blank=True, max_length=500),
),
migrations.AddField(
model_name='user',
name='cLocation',
field=models.CharField(blank=True, max_length=30),
),
migrations.AddField(
model_name='user',
name='iMarket',
field=models.PositiveIntegerField(default=1, verbose_name='ebay market (default)'),
),
]
| true
| true
|
f717f4028211e6a9f3c853dde20a6d21323b607a
| 362
|
py
|
Python
|
examples/list_all_adb_devices.py
|
riquedev/WhatsAppManifest
|
bcbbd48f6f9152024a54172886876d3a725a3a62
|
[
"MIT"
] | 15
|
2020-03-11T17:31:12.000Z
|
2021-11-19T03:26:09.000Z
|
examples/list_all_adb_devices.py
|
riquedev/WhatsAppManifest
|
bcbbd48f6f9152024a54172886876d3a725a3a62
|
[
"MIT"
] | 5
|
2021-03-31T19:43:15.000Z
|
2022-03-12T00:18:38.000Z
|
examples/list_all_adb_devices.py
|
riquedev/WhatsAppManifest
|
bcbbd48f6f9152024a54172886876d3a725a3a62
|
[
"MIT"
] | 4
|
2020-03-11T01:52:57.000Z
|
2021-03-16T04:14:33.000Z
|
from WhatsAppManifest import ADB, Automator
# Note: We need the AdbServer class (even without using SSH) so that Automator can open the internal connection.
with ADB(use_ssh=False) as AdbServer:
automator = Automator(adb_server=AdbServer, adb_host="127.0.0.1", adb_port=5037)
for device in automator.list_devices(state=None):
help(device)
| 36.2
| 112
| 0.748619
|
from WhatsAppManifest import ADB, Automator
with ADB(use_ssh=False) as AdbServer:
automator = Automator(adb_server=AdbServer, adb_host="127.0.0.1", adb_port=5037)
for device in automator.list_devices(state=None):
help(device)
| true
| true
|
f717f4717d60ec922e24c1a81798c104320021d4
| 33,686
|
py
|
Python
|
scipy/interpolate/polyint.py
|
f0k/scipy
|
3145a226339b14bbc22f2e984848e05def7659c5
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/interpolate/polyint.py
|
f0k/scipy
|
3145a226339b14bbc22f2e984848e05def7659c5
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/interpolate/polyint.py
|
f0k/scipy
|
3145a226339b14bbc22f2e984848e05def7659c5
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.misc import factorial
from scipy.lib.six.moves import xrange
__all__ = ["KroghInterpolator", "krogh_interpolate", "BarycentricInterpolator", "barycentric_interpolate", "PiecewisePolynomial", "piecewise_polynomial_interpolate","approximate_taylor_polynomial", "pchip"]
class KroghInterpolator(object):
"""
The interpolating polynomial for a set of points
Constructs a polynomial that passes through a given set of points,
optionally with specified derivatives at those points.
Allows evaluation of the polynomial and all its derivatives.
For reasons of numerical stability, this function does not compute
the coefficients of the polynomial, although they can be obtained
by evaluating all the derivatives.
Be aware that the algorithms implemented here are not necessarily
the most numerically stable known. Moreover, even in a world of
exact computation, unless the x coordinates are chosen very
carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon. In general, even with well-chosen
x values, degrees higher than about thirty cause problems with
numerical instability in this code.
Based on [1]_.
Parameters
----------
xi : array_like, length N
Known x-coordinates
yi : array_like, N by R
Known y-coordinates, interpreted as vectors of length R,
or scalars if R=1. When an xi occurs two or more times in
a row, the corresponding yi's represent derivative values.
References
----------
.. [1] Krogh, "Efficient Algorithms for Polynomial Interpolation
and Numerical Differentiation", 1970.
"""
def __init__(self, xi, yi):
"""Construct an interpolator passing through the specified points
The polynomial passes through all the pairs (xi,yi). One may additionally
specify a number of derivatives at each point xi; this is done by
repeating the value xi and specifying the derivatives as successive
yi values.
Parameters
----------
xi : array-like, length N
known x-coordinates
yi : array-like, N by R
known y-coordinates, interpreted as vectors of length R,
or scalars if R=1. When an xi occurs two or more times in
a row, the corresponding yi's represent derivative values.
Examples
--------
To produce a polynomial that is zero at 0 and 1 and has
derivative 2 at 0, call
>>> KroghInterpolator([0,0,1],[0,2,0])
This constructs the quadratic 2*X**2-2*X. The derivative condition
is indicated by the repeated zero in the xi array; the corresponding
yi values are 0, the function value, and 2, the derivative value.
For another example, given xi, yi, and a derivative ypi for each
point, appropriate arrays can be constructed as:
>>> xi_k, yi_k = np.repeat(xi, 2), np.ravel(np.dstack((yi,ypi)))
>>> KroghInterpolator(xi_k, yi_k)
To produce a vector-valued polynomial, supply a higher-dimensional
array for yi:
>>> KroghInterpolator([0,1],[[2,3],[4,5]])
This constructs a linear polynomial giving (2,3) at 0 and (4,5) at 1.
"""
self.xi = np.asarray(xi)
self.yi = np.asarray(yi)
if len(self.yi.shape)==1:
self.vector_valued = False
self.yi = self.yi[:,np.newaxis]
elif len(self.yi.shape)>2:
raise ValueError("y coordinates must be either scalars or vectors")
else:
self.vector_valued = True
n = len(xi)
self.n = n
nn, r = self.yi.shape
if nn!=n:
raise ValueError("%d x values provided and %d y values; must be equal" % (n, nn))
self.r = r
c = np.zeros((n+1,r))
c[0] = yi[0]
Vk = np.zeros((n,r))
for k in xrange(1,n):
s = 0
while s<=k and xi[k-s]==xi[k]:
s += 1
s -= 1
Vk[0] = yi[k]/float(factorial(s))
for i in xrange(k-s):
if xi[i] == xi[k]:
raise ValueError("Elements if `xi` can't be equal.")
if s==0:
Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k])
else:
Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k])
c[k] = Vk[k-s]
self.c = c
def __call__(self,x):
"""Evaluate the polynomial at the point x
Parameters
----------
x : scalar or array-like of length N
Returns
-------
y : scalar, array of length R, array of length N, or array of length N by R
If x is a scalar, returns either a vector or a scalar depending on
whether the interpolator is vector-valued or scalar-valued.
If x is a vector, returns a vector of values.
"""
if _isscalar(x):
scalar = True
m = 1
else:
scalar = False
m = len(x)
x = np.asarray(x)
n = self.n
pi = 1
p = np.zeros((m,self.r))
p += self.c[0,np.newaxis,:]
for k in xrange(1,n):
w = x - self.xi[k-1]
pi = w*pi
p = p + np.multiply.outer(pi,self.c[k])
if not self.vector_valued:
if scalar:
return p[0,0]
else:
return p[:,0]
else:
if scalar:
return p[0]
else:
return p
def derivatives(self,x,der=None):
"""
Evaluate many derivatives of the polynomial at the point x
Produce an array of all derivative values at the point x.
Parameters
----------
x : scalar or array_like of length N
Point or points at which to evaluate the derivatives
der : None or integer
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points). This number includes the function value as 0th
derivative.
Returns
-------
d : ndarray
If the interpolator's values are R-dimensional then the
returned array will be der by N by R. If x is a scalar,
the middle dimension will be dropped; if R is 1 then the
last dimension will be dropped.
Examples
--------
>>> KroghInterpolator([0,0,0],[1,2,3]).derivatives(0)
array([1.0,2.0,3.0])
>>> KroghInterpolator([0,0,0],[1,2,3]).derivatives([0,0])
array([[1.0,1.0],
[2.0,2.0],
[3.0,3.0]])
"""
if _isscalar(x):
scalar = True
m = 1
else:
scalar = False
m = len(x)
x = np.asarray(x)
n = self.n
r = self.r
if der is None:
der = self.n
dern = min(self.n,der)
pi = np.zeros((n,m))
w = np.zeros((n,m))
pi[0] = 1
p = np.zeros((m,self.r))
p += self.c[0,np.newaxis,:]
for k in xrange(1,n):
w[k-1] = x - self.xi[k-1]
pi[k] = w[k-1]*pi[k-1]
p += np.multiply.outer(pi[k],self.c[k])
cn = np.zeros((max(der,n+1),m,r))
cn[:n+1,...] += self.c[:n+1,np.newaxis,:]
cn[0] = p
for k in xrange(1,n):
for i in xrange(1,n-k+1):
pi[i] = w[k+i-1]*pi[i-1]+pi[i]
cn[k] = cn[k]+pi[i,:,np.newaxis]*cn[k+i]
cn[k]*=factorial(k)
cn[n,...] = 0
if not self.vector_valued:
if scalar:
return cn[:der,0,0]
else:
return cn[:der,:,0]
else:
if scalar:
return cn[:der,0]
else:
return cn[:der]
def derivative(self,x,der):
"""
Evaluate one derivative of the polynomial at the point x
Parameters
----------
x : scalar or array_like of length N
Point or points at which to evaluate the derivatives
der : None or integer
Which derivative to extract. This number includes the
function value as 0th derivative.
Returns
-------
d : ndarray
If the interpolator's values are R-dimensional then the
returned array will be N by R. If x is a scalar,
the middle dimension will be dropped; if R is 1 then the
last dimension will be dropped.
Notes
-----
This is computed by evaluating all derivatives up to the desired
one (using self.derivatives()) and then discarding the rest.
"""
return self.derivatives(x,der=der+1)[der]
def krogh_interpolate(xi,yi,x,der=0):
"""
Convenience function for polynomial interpolation.
Constructs a polynomial that passes through a given set of points,
optionally with specified derivatives at those points.
Evaluates the polynomial or some of its derivatives.
For reasons of numerical stability, this function does not compute
the coefficients of the polynomial, although they can be obtained
by evaluating all the derivatives.
Be aware that the algorithms implemented here are not necessarily
the most numerically stable known. Moreover, even in a world of
exact computation, unless the x coordinates are chosen very
carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon. In general, even with well-chosen
x values, degrees higher than about thirty cause problems with
numerical instability in this code.
Based on Krogh 1970, "Efficient Algorithms for Polynomial Interpolation
and Numerical Differentiation"
The polynomial passes through all the pairs (xi,yi). One may additionally
specify a number of derivatives at each point xi; this is done by
repeating the value xi and specifying the derivatives as successive
yi values.
Parameters
----------
xi : array_like, length N
known x-coordinates
yi : array_like, N by R
known y-coordinates, interpreted as vectors of length R,
or scalars if R=1
x : scalar or array_like of length N
Point or points at which to evaluate the derivatives
der : integer or list
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points), or a list of derivatives to extract. This number
includes the function value as 0th derivative.
Returns
-------
d : ndarray
If the interpolator's values are R-dimensional then the
returned array will be the number of derivatives by N by R.
If x is a scalar, the middle dimension will be dropped; if
the yi are scalars then the last dimension will be dropped.
Notes
-----
Construction of the interpolating polynomial is a relatively expensive
process. If you want to evaluate it repeatedly consider using the class
KroghInterpolator (which is what this function uses).
"""
P = KroghInterpolator(xi, yi)
if der==0:
return P(x)
elif _isscalar(der):
return P.derivative(x,der=der)
else:
return P.derivatives(x,der=np.amax(der)+1)[der]
def approximate_taylor_polynomial(f,x,degree,scale,order=None):
"""
Estimate the Taylor polynomial of f at x by polynomial fitting.
Parameters
----------
f : callable
The function whose Taylor polynomial is sought. Should accept
a vector of x values.
x : scalar
The point at which the polynomial is to be evaluated.
degree : int
The degree of the Taylor polynomial
scale : scalar
The width of the interval to use to evaluate the Taylor polynomial.
Function values spread over a range this wide are used to fit the
polynomial. Must be chosen carefully.
order : int or None
The order of the polynomial to be used in the fitting; f will be
evaluated ``order+1`` times. If None, use `degree`.
Returns
-------
p : poly1d instance
The Taylor polynomial (translated to the origin, so that
for example p(0)=f(x)).
Notes
-----
The appropriate choice of "scale" is a trade-off; too large and the
function differs from its Taylor polynomial too much to get a good
answer, too small and round-off errors overwhelm the higher-order terms.
The algorithm used becomes numerically unstable around order 30 even
under ideal circumstances.
Choosing order somewhat larger than degree may improve the higher-order
terms.
"""
if order is None:
order=degree
n = order+1
# Choose n points that cluster near the endpoints of the interval in
# a way that avoids the Runge phenomenon. Ensure, by including the
# endpoint or not as appropriate, that one point always falls at x
# exactly.
xs = scale*np.cos(np.linspace(0,np.pi,n,endpoint=n%1)) + x
P = KroghInterpolator(xs, f(xs))
d = P.derivatives(x,der=degree+1)
return np.poly1d((d/factorial(np.arange(degree+1)))[::-1])
class BarycentricInterpolator(object):
"""The interpolating polynomial for a set of points
Constructs a polynomial that passes through a given set of points.
Allows evaluation of the polynomial, efficient changing of the y
values to be interpolated, and updating by adding more x values.
For reasons of numerical stability, this function does not compute
the coefficients of the polynomial.
This class uses a "barycentric interpolation" method that treats
the problem as a special case of rational function interpolation.
This algorithm is quite stable, numerically, but even in a world of
exact computation, unless the x coordinates are chosen very
carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon.
Based on Berrut and Trefethen 2004, "Barycentric Lagrange Interpolation".
"""
def __init__(self, xi, yi=None):
"""Construct an object capable of interpolating functions sampled at xi
The values yi need to be provided before the function is evaluated,
but none of the preprocessing depends on them, so rapid updates
are possible.
Parameters
----------
xi : array-like of length N
The x coordinates of the points the polynomial should pass through
yi : array-like N by R or None
The y coordinates of the points the polynomial should pass through;
if R>1 the polynomial is vector-valued. If None the y values
will be supplied later.
"""
self.n = len(xi)
self.xi = np.asarray(xi)
if yi is not None and len(yi)!=len(self.xi):
raise ValueError("yi dimensions do not match xi dimensions")
self.set_yi(yi)
self.wi = np.zeros(self.n)
self.wi[0] = 1
for j in xrange(1,self.n):
self.wi[:j]*=(self.xi[j]-self.xi[:j])
self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])
self.wi**=-1
def set_yi(self, yi):
"""
Update the y values to be interpolated
The barycentric interpolation algorithm requires the calculation
of weights, but these depend only on the xi. The yi can be changed
at any time.
Parameters
----------
yi : array_like N by R
The y coordinates of the points the polynomial should pass through;
if R>1 the polynomial is vector-valued. If None the y values
will be supplied later.
"""
if yi is None:
self.yi = None
return
yi = np.asarray(yi)
if len(yi.shape)==1:
self.vector_valued = False
yi = yi[:,np.newaxis]
elif len(yi.shape)>2:
raise ValueError("y coordinates must be either scalars or vectors")
else:
self.vector_valued = True
n, r = yi.shape
if n!=len(self.xi):
raise ValueError("yi dimensions do not match xi dimensions")
self.yi = yi
self.r = r
def add_xi(self, xi, yi=None):
"""
Add more x values to the set to be interpolated
The barycentric interpolation algorithm allows easy updating by
adding more points for the polynomial to pass through.
Parameters
----------
xi : array_like of length N1
The x coordinates of the points the polynomial should pass through
yi : array_like N1 by R or None
The y coordinates of the points the polynomial should pass through;
if R>1 the polynomial is vector-valued. If None the y values
will be supplied later. The yi should be specified if and only if
the interpolator has y values specified.
"""
if yi is not None:
if self.yi is None:
raise ValueError("No previous yi value to update!")
yi = np.asarray(yi)
if len(yi.shape)==1:
if self.vector_valued:
raise ValueError("Cannot extend dimension %d y vectors with scalars" % self.r)
yi = yi[:,np.newaxis]
elif len(yi.shape)>2:
raise ValueError("y coordinates must be either scalars or vectors")
else:
n, r = yi.shape
if r!=self.r:
raise ValueError("Cannot extend dimension %d y vectors with dimension %d y vectors" % (self.r, r))
self.yi = np.vstack((self.yi,yi))
else:
if self.yi is not None:
raise ValueError("No update to yi provided!")
old_n = self.n
self.xi = np.concatenate((self.xi,xi))
self.n = len(self.xi)
self.wi**=-1
old_wi = self.wi
self.wi = np.zeros(self.n)
self.wi[:old_n] = old_wi
for j in xrange(old_n,self.n):
self.wi[:j]*=(self.xi[j]-self.xi[:j])
self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])
self.wi**=-1
def __call__(self, x):
"""Evaluate the interpolating polynomial at the points x
Parameters
----------
x : scalar or array-like of length M
Returns
-------
y : scalar or array-like of length R or length M or M by R
The shape of y depends on the shape of x and whether the
interpolator is vector-valued or scalar-valued.
Notes
-----
Currently the code computes an outer product between x and the
weights, that is, it constructs an intermediate array of size
N by M, where N is the degree of the polynomial.
"""
scalar = _isscalar(x)
x = np.atleast_1d(x)
c = np.subtract.outer(x,self.xi)
z = c==0
c[z] = 1
c = self.wi/c
p = np.dot(c,self.yi)/np.sum(c,axis=-1)[:,np.newaxis]
i, j = np.nonzero(z)
p[i] = self.yi[j]
if not self.vector_valued:
if scalar:
return p[0,0]
else:
return p[:,0]
else:
if scalar:
return p[0]
else:
return p
def barycentric_interpolate(xi, yi, x):
"""
Convenience function for polynomial interpolation
Constructs a polynomial that passes through a given set of points,
then evaluates the polynomial. For reasons of numerical stability,
this function does not compute the coefficients of the polynomial.
This function uses a "barycentric interpolation" method that treats
the problem as a special case of rational function interpolation.
This algorithm is quite stable, numerically, but even in a world of
exact computation, unless the x coordinates are chosen very
carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon.
Based on Berrut and Trefethen 2004, "Barycentric Lagrange Interpolation".
Parameters
----------
xi : array_like of length N
The x coordinates of the points the polynomial should pass through
yi : array_like N by R
The y coordinates of the points the polynomial should pass through;
if R>1 the polynomial is vector-valued.
x : scalar or array_like of length M
Returns
-------
y : scalar or array_like of length R or length M or M by R
The shape of y depends on the shape of x and whether the
interpolator is vector-valued or scalar-valued.
Notes
-----
Construction of the interpolation weights is a relatively slow process.
If you want to call this many times with the same xi (but possibly
varying yi or x) you should use the class BarycentricInterpolator.
This is what this function uses internally.
"""
return BarycentricInterpolator(xi, yi)(x)
class PiecewisePolynomial(object):
"""Piecewise polynomial curve specified by points and derivatives
This class represents a curve that is a piecewise polynomial. It
passes through a list of points and has specified derivatives at
each point. The degree of the polynomial may very from segment to
segment, as may the number of derivatives available. The degree
should not exceed about thirty.
Appending points to the end of the curve is efficient.
"""
def __init__(self, xi, yi, orders=None, direction=None):
"""Construct a piecewise polynomial
Parameters
----------
xi : array-like of length N
a sorted list of x-coordinates
yi : list of lists of length N
yi[i] is the list of derivatives known at xi[i]
orders : list of integers, or integer
a list of polynomial orders, or a single universal order
direction : {None, 1, -1}
indicates whether the xi are increasing or decreasing
+1 indicates increasing
-1 indicates decreasing
None indicates that it should be deduced from the first two xi
Notes
-----
If orders is None, or orders[i] is None, then the degree of the
polynomial segment is exactly the degree required to match all i
available derivatives at both endpoints. If orders[i] is not None,
then some derivatives will be ignored. The code will try to use an
equal number of derivatives from each end; if the total number of
derivatives needed is odd, it will prefer the rightmost endpoint. If
not enough derivatives are available, an exception is raised.
"""
yi0 = np.asarray(yi[0])
if len(yi0.shape)==2:
self.vector_valued = True
self.r = yi0.shape[1]
elif len(yi0.shape)==1:
self.vector_valued = False
self.r = 1
else:
raise ValueError("Each derivative must be a vector, not a higher-rank array")
self.xi = [xi[0]]
self.yi = [yi0]
self.n = 1
self.direction = direction
self.orders = []
self.polynomials = []
self.extend(xi[1:],yi[1:],orders)
def _make_polynomial(self,x1,y1,x2,y2,order,direction):
"""Construct the interpolating polynomial object
Deduces the number of derivatives to match at each end
from order and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
"""
n = order+1
n1 = min(n//2,len(y1))
n2 = min(n-n1,len(y2))
n1 = min(n-n2,len(y1))
if n1+n2!=n:
raise ValueError("Point %g has %d derivatives, point %g has %d derivatives, but order %d requested" % (x1, len(y1), x2, len(y2), order))
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with length y1 or y2.")
xi = np.zeros(n)
if self.vector_valued:
yi = np.zeros((n,self.r))
else:
yi = np.zeros((n,))
xi[:n1] = x1
yi[:n1] = y1[:n1]
xi[n1:] = x2
yi[n1:] = y2[:n2]
return KroghInterpolator(xi,yi)
def append(self, xi, yi, order=None):
"""
Append a single point with derivatives to the PiecewisePolynomial
Parameters
----------
xi : float
yi : array_like
yi is the list of derivatives known at xi
order : integer or None
a polynomial order, or instructions to use the highest
possible order
"""
yi = np.asarray(yi)
if self.vector_valued:
if (len(yi.shape)!=2 or yi.shape[1]!=self.r):
raise ValueError("Each derivative must be a vector of length %d" % self.r)
else:
if len(yi.shape)!=1:
raise ValueError("Each derivative must be a scalar")
if self.direction is None:
self.direction = np.sign(xi-self.xi[-1])
elif (xi-self.xi[-1])*self.direction < 0:
raise ValueError("x coordinates must be in the %d direction: %s" % (self.direction, self.xi))
self.xi.append(xi)
self.yi.append(yi)
if order is None:
n1 = len(self.yi[-2])
n2 = len(self.yi[-1])
n = n1+n2
order = n-1
self.orders.append(order)
self.polynomials.append(self._make_polynomial(
self.xi[-2], self.yi[-2],
self.xi[-1], self.yi[-1],
order, self.direction))
self.n += 1
def extend(self, xi, yi, orders=None):
"""
Extend the PiecewisePolynomial by a list of points
Parameters
----------
xi : array_like of length N1
a sorted list of x-coordinates
yi : list of lists of length N1
yi[i] is the list of derivatives known at xi[i]
orders : list of integers, or integer
a list of polynomial orders, or a single universal order
direction : {None, 1, -1}
indicates whether the xi are increasing or decreasing
+1 indicates increasing
-1 indicates decreasing
None indicates that it should be deduced from the first two xi
"""
for i in xrange(len(xi)):
if orders is None or _isscalar(orders):
self.append(xi[i],yi[i],orders)
else:
self.append(xi[i],yi[i],orders[i])
def __call__(self, x):
"""Evaluate the piecewise polynomial
Parameters
----------
x : scalar or array-like of length N
Returns
-------
y : scalar or array-like of length R or length N or N by R
"""
if _isscalar(x):
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
y = self.polynomials[pos](x)
else:
x = np.asarray(x)
m = len(x)
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
if self.vector_valued:
y = np.zeros((m,self.r))
else:
y = np.zeros(m)
for i in xrange(self.n-1):
c = pos==i
y[c] = self.polynomials[i](x[c])
return y
def derivative(self, x, der):
"""
Evaluate a derivative of the piecewise polynomial
Parameters
----------
x : scalar or array_like of length N
der : integer
which single derivative to extract
Returns
-------
y : scalar or array_like of length R or length N or N by R
Notes
-----
This currently computes (using self.derivatives()) all derivatives
of the curve segment containing each x but returns only one.
"""
return self.derivatives(x,der=der+1)[der]
def derivatives(self, x, der):
"""
Evaluate a derivative of the piecewise polynomial
Parameters
----------
x : scalar or array_like of length N
der : integer
how many derivatives (including the function value as
0th derivative) to extract
Returns
-------
y : array_like of shape der by R or der by N or der by N by R
"""
if _isscalar(x):
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
y = self.polynomials[pos].derivatives(x,der=der)
else:
x = np.asarray(x)
m = len(x)
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
if self.vector_valued:
y = np.zeros((der,m,self.r))
else:
y = np.zeros((der,m))
for i in xrange(self.n-1):
c = pos==i
y[:,c] = self.polynomials[i].derivatives(x[c],der=der)
return y
def piecewise_polynomial_interpolate(xi,yi,x,orders=None,der=0):
"""
Convenience function for piecewise polynomial interpolation
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : list of lists
yi[i] is the list of derivatives known at xi[i]. Of length N.
x : scalar or array_like
Of length M.
orders : int or list of ints
a list of polynomial orders, or a single universal order
der : int
Which single derivative to extract.
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
Notes
-----
If orders is None, or orders[i] is None, then the degree of the
polynomial segment is exactly the degree required to match all i
available derivatives at both endpoints. If orders[i] is not None,
then some derivatives will be ignored. The code will try to use an
equal number of derivatives from each end; if the total number of
derivatives needed is odd, it will prefer the rightmost endpoint. If
not enough derivatives are available, an exception is raised.
Construction of these piecewise polynomials can be an expensive process;
if you repeatedly evaluate the same polynomial, consider using the class
PiecewisePolynomial (which is what this function does).
"""
P = PiecewisePolynomial(xi, yi, orders)
if der==0:
return P(x)
elif _isscalar(der):
return P.derivative(x,der=der)
else:
return P.derivatives(x,der=np.amax(der)+1)[der]
def _isscalar(x):
"""Check whether x is if a scalar type, or 0-dim"""
return np.isscalar(x) or hasattr(x, 'shape') and x.shape == ()
def _edge_case(m0, d1):
return np.where((d1==0) | (m0==0), 0.0, 1.0/(1.0/m0+1.0/d1))
def _find_derivatives(x, y):
# Determine the derivatives at the points y_k, d_k, by using
# PCHIP algorithm is:
# We choose the derivatives at the point x_k by
# Let m_k be the slope of the kth segment (between k and k+1)
# If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
# else use weighted harmonic mean:
# w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
# 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
# where h_k is the spacing between x_k and x_{k+1}
hk = x[1:] - x[:-1]
mk = (y[1:] - y[:-1]) / hk
smk = np.sign(mk)
condition = ((smk[1:] != smk[:-1]) | (mk[1:]==0) | (mk[:-1]==0))
w1 = 2*hk[1:] + hk[:-1]
w2 = hk[1:] + 2*hk[:-1]
whmean = 1.0/(w1+w2)*(w1/mk[1:] + w2/mk[:-1])
dk = np.zeros_like(y)
dk[1:-1][condition] = 0.0
dk[1:-1][~condition] = 1.0/whmean[~condition]
# For end-points choose d_0 so that 1/d_0 = 1/m_0 + 1/d_1 unless
# one of d_1 or m_0 is 0, then choose d_0 = 0
dk[0] = _edge_case(mk[0],dk[1])
dk[-1] = _edge_case(mk[-1],dk[-2])
return dk
def pchip(x, y):
"""PCHIP 1-d monotonic cubic interpolation
x and y are arrays of values used to approximate some function f, with
``y = f(x)``. This class factory function returns a callable class whose
``__call__`` method uses monotonic cubic, interpolation to find the value
of new points.
Parameters
----------
x : array
A 1D array of monotonically increasing real values. x cannot
include duplicate values (otherwise f is overspecified)
y : array
A 1-D array of real values. y's length along the interpolation
axis must be equal to the length of x.
Assumes x is sorted in monotonic order (e.g. ``x[1] > x[0]``).
Returns
-------
pchip : PiecewisePolynomial instance
The result of the interpolation.
"""
derivs = _find_derivatives(x,y)
return PiecewisePolynomial(x, list(zip(y, derivs)), orders=3, direction=None)
| 34.514344
| 206
| 0.591759
|
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.misc import factorial
from scipy.lib.six.moves import xrange
__all__ = ["KroghInterpolator", "krogh_interpolate", "BarycentricInterpolator", "barycentric_interpolate", "PiecewisePolynomial", "piecewise_polynomial_interpolate","approximate_taylor_polynomial", "pchip"]
class KroghInterpolator(object):
def __init__(self, xi, yi):
self.xi = np.asarray(xi)
self.yi = np.asarray(yi)
if len(self.yi.shape)==1:
self.vector_valued = False
self.yi = self.yi[:,np.newaxis]
elif len(self.yi.shape)>2:
raise ValueError("y coordinates must be either scalars or vectors")
else:
self.vector_valued = True
n = len(xi)
self.n = n
nn, r = self.yi.shape
if nn!=n:
raise ValueError("%d x values provided and %d y values; must be equal" % (n, nn))
self.r = r
c = np.zeros((n+1,r))
c[0] = yi[0]
Vk = np.zeros((n,r))
for k in xrange(1,n):
s = 0
while s<=k and xi[k-s]==xi[k]:
s += 1
s -= 1
Vk[0] = yi[k]/float(factorial(s))
for i in xrange(k-s):
if xi[i] == xi[k]:
raise ValueError("Elements if `xi` can't be equal.")
if s==0:
Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k])
else:
Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k])
c[k] = Vk[k-s]
self.c = c
def __call__(self,x):
if _isscalar(x):
scalar = True
m = 1
else:
scalar = False
m = len(x)
x = np.asarray(x)
n = self.n
pi = 1
p = np.zeros((m,self.r))
p += self.c[0,np.newaxis,:]
for k in xrange(1,n):
w = x - self.xi[k-1]
pi = w*pi
p = p + np.multiply.outer(pi,self.c[k])
if not self.vector_valued:
if scalar:
return p[0,0]
else:
return p[:,0]
else:
if scalar:
return p[0]
else:
return p
def derivatives(self,x,der=None):
if _isscalar(x):
scalar = True
m = 1
else:
scalar = False
m = len(x)
x = np.asarray(x)
n = self.n
r = self.r
if der is None:
der = self.n
dern = min(self.n,der)
pi = np.zeros((n,m))
w = np.zeros((n,m))
pi[0] = 1
p = np.zeros((m,self.r))
p += self.c[0,np.newaxis,:]
for k in xrange(1,n):
w[k-1] = x - self.xi[k-1]
pi[k] = w[k-1]*pi[k-1]
p += np.multiply.outer(pi[k],self.c[k])
cn = np.zeros((max(der,n+1),m,r))
cn[:n+1,...] += self.c[:n+1,np.newaxis,:]
cn[0] = p
for k in xrange(1,n):
for i in xrange(1,n-k+1):
pi[i] = w[k+i-1]*pi[i-1]+pi[i]
cn[k] = cn[k]+pi[i,:,np.newaxis]*cn[k+i]
cn[k]*=factorial(k)
cn[n,...] = 0
if not self.vector_valued:
if scalar:
return cn[:der,0,0]
else:
return cn[:der,:,0]
else:
if scalar:
return cn[:der,0]
else:
return cn[:der]
def derivative(self,x,der):
return self.derivatives(x,der=der+1)[der]
def krogh_interpolate(xi,yi,x,der=0):
P = KroghInterpolator(xi, yi)
if der==0:
return P(x)
elif _isscalar(der):
return P.derivative(x,der=der)
else:
return P.derivatives(x,der=np.amax(der)+1)[der]
def approximate_taylor_polynomial(f,x,degree,scale,order=None):
if order is None:
order=degree
n = order+1
# Choose n points that cluster near the endpoints of the interval in
# a way that avoids the Runge phenomenon. Ensure, by including the
# endpoint or not as appropriate, that one point always falls at x
# exactly.
xs = scale*np.cos(np.linspace(0,np.pi,n,endpoint=n%1)) + x
P = KroghInterpolator(xs, f(xs))
d = P.derivatives(x,der=degree+1)
return np.poly1d((d/factorial(np.arange(degree+1)))[::-1])
class BarycentricInterpolator(object):
def __init__(self, xi, yi=None):
self.n = len(xi)
self.xi = np.asarray(xi)
if yi is not None and len(yi)!=len(self.xi):
raise ValueError("yi dimensions do not match xi dimensions")
self.set_yi(yi)
self.wi = np.zeros(self.n)
self.wi[0] = 1
for j in xrange(1,self.n):
self.wi[:j]*=(self.xi[j]-self.xi[:j])
self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])
self.wi**=-1
def set_yi(self, yi):
if yi is None:
self.yi = None
return
yi = np.asarray(yi)
if len(yi.shape)==1:
self.vector_valued = False
yi = yi[:,np.newaxis]
elif len(yi.shape)>2:
raise ValueError("y coordinates must be either scalars or vectors")
else:
self.vector_valued = True
n, r = yi.shape
if n!=len(self.xi):
raise ValueError("yi dimensions do not match xi dimensions")
self.yi = yi
self.r = r
def add_xi(self, xi, yi=None):
if yi is not None:
if self.yi is None:
raise ValueError("No previous yi value to update!")
yi = np.asarray(yi)
if len(yi.shape)==1:
if self.vector_valued:
raise ValueError("Cannot extend dimension %d y vectors with scalars" % self.r)
yi = yi[:,np.newaxis]
elif len(yi.shape)>2:
raise ValueError("y coordinates must be either scalars or vectors")
else:
n, r = yi.shape
if r!=self.r:
raise ValueError("Cannot extend dimension %d y vectors with dimension %d y vectors" % (self.r, r))
self.yi = np.vstack((self.yi,yi))
else:
if self.yi is not None:
raise ValueError("No update to yi provided!")
old_n = self.n
self.xi = np.concatenate((self.xi,xi))
self.n = len(self.xi)
self.wi**=-1
old_wi = self.wi
self.wi = np.zeros(self.n)
self.wi[:old_n] = old_wi
for j in xrange(old_n,self.n):
self.wi[:j]*=(self.xi[j]-self.xi[:j])
self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])
self.wi**=-1
def __call__(self, x):
scalar = _isscalar(x)
x = np.atleast_1d(x)
c = np.subtract.outer(x,self.xi)
z = c==0
c[z] = 1
c = self.wi/c
p = np.dot(c,self.yi)/np.sum(c,axis=-1)[:,np.newaxis]
i, j = np.nonzero(z)
p[i] = self.yi[j]
if not self.vector_valued:
if scalar:
return p[0,0]
else:
return p[:,0]
else:
if scalar:
return p[0]
else:
return p
def barycentric_interpolate(xi, yi, x):
return BarycentricInterpolator(xi, yi)(x)
class PiecewisePolynomial(object):
def __init__(self, xi, yi, orders=None, direction=None):
yi0 = np.asarray(yi[0])
if len(yi0.shape)==2:
self.vector_valued = True
self.r = yi0.shape[1]
elif len(yi0.shape)==1:
self.vector_valued = False
self.r = 1
else:
raise ValueError("Each derivative must be a vector, not a higher-rank array")
self.xi = [xi[0]]
self.yi = [yi0]
self.n = 1
self.direction = direction
self.orders = []
self.polynomials = []
self.extend(xi[1:],yi[1:],orders)
def _make_polynomial(self,x1,y1,x2,y2,order,direction):
n = order+1
n1 = min(n//2,len(y1))
n2 = min(n-n1,len(y2))
n1 = min(n-n2,len(y1))
if n1+n2!=n:
raise ValueError("Point %g has %d derivatives, point %g has %d derivatives, but order %d requested" % (x1, len(y1), x2, len(y2), order))
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with length y1 or y2.")
xi = np.zeros(n)
if self.vector_valued:
yi = np.zeros((n,self.r))
else:
yi = np.zeros((n,))
xi[:n1] = x1
yi[:n1] = y1[:n1]
xi[n1:] = x2
yi[n1:] = y2[:n2]
return KroghInterpolator(xi,yi)
def append(self, xi, yi, order=None):
yi = np.asarray(yi)
if self.vector_valued:
if (len(yi.shape)!=2 or yi.shape[1]!=self.r):
raise ValueError("Each derivative must be a vector of length %d" % self.r)
else:
if len(yi.shape)!=1:
raise ValueError("Each derivative must be a scalar")
if self.direction is None:
self.direction = np.sign(xi-self.xi[-1])
elif (xi-self.xi[-1])*self.direction < 0:
raise ValueError("x coordinates must be in the %d direction: %s" % (self.direction, self.xi))
self.xi.append(xi)
self.yi.append(yi)
if order is None:
n1 = len(self.yi[-2])
n2 = len(self.yi[-1])
n = n1+n2
order = n-1
self.orders.append(order)
self.polynomials.append(self._make_polynomial(
self.xi[-2], self.yi[-2],
self.xi[-1], self.yi[-1],
order, self.direction))
self.n += 1
def extend(self, xi, yi, orders=None):
for i in xrange(len(xi)):
if orders is None or _isscalar(orders):
self.append(xi[i],yi[i],orders)
else:
self.append(xi[i],yi[i],orders[i])
def __call__(self, x):
if _isscalar(x):
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
y = self.polynomials[pos](x)
else:
x = np.asarray(x)
m = len(x)
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
if self.vector_valued:
y = np.zeros((m,self.r))
else:
y = np.zeros(m)
for i in xrange(self.n-1):
c = pos==i
y[c] = self.polynomials[i](x[c])
return y
def derivative(self, x, der):
return self.derivatives(x,der=der+1)[der]
def derivatives(self, x, der):
if _isscalar(x):
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
y = self.polynomials[pos].derivatives(x,der=der)
else:
x = np.asarray(x)
m = len(x)
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
if self.vector_valued:
y = np.zeros((der,m,self.r))
else:
y = np.zeros((der,m))
for i in xrange(self.n-1):
c = pos==i
y[:,c] = self.polynomials[i].derivatives(x[c],der=der)
return y
def piecewise_polynomial_interpolate(xi,yi,x,orders=None,der=0):
P = PiecewisePolynomial(xi, yi, orders)
if der==0:
return P(x)
elif _isscalar(der):
return P.derivative(x,der=der)
else:
return P.derivatives(x,der=np.amax(der)+1)[der]
def _isscalar(x):
return np.isscalar(x) or hasattr(x, 'shape') and x.shape == ()
def _edge_case(m0, d1):
return np.where((d1==0) | (m0==0), 0.0, 1.0/(1.0/m0+1.0/d1))
def _find_derivatives(x, y):
# Determine the derivatives at the points y_k, d_k, by using
# PCHIP algorithm is:
# We choose the derivatives at the point x_k by
# Let m_k be the slope of the kth segment (between k and k+1)
# If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
# else use weighted harmonic mean:
# w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
# 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
# where h_k is the spacing between x_k and x_{k+1}
hk = x[1:] - x[:-1]
mk = (y[1:] - y[:-1]) / hk
smk = np.sign(mk)
condition = ((smk[1:] != smk[:-1]) | (mk[1:]==0) | (mk[:-1]==0))
w1 = 2*hk[1:] + hk[:-1]
w2 = hk[1:] + 2*hk[:-1]
whmean = 1.0/(w1+w2)*(w1/mk[1:] + w2/mk[:-1])
dk = np.zeros_like(y)
dk[1:-1][condition] = 0.0
dk[1:-1][~condition] = 1.0/whmean[~condition]
# For end-points choose d_0 so that 1/d_0 = 1/m_0 + 1/d_1 unless
# one of d_1 or m_0 is 0, then choose d_0 = 0
dk[0] = _edge_case(mk[0],dk[1])
dk[-1] = _edge_case(mk[-1],dk[-2])
return dk
def pchip(x, y):
derivs = _find_derivatives(x,y)
return PiecewisePolynomial(x, list(zip(y, derivs)), orders=3, direction=None)
| true
| true
|
f717f5329e9080881c559dfd976b9a5f38d7606a
| 670
|
py
|
Python
|
680_Valid_Palindrome_II.py
|
yuqingchen/Leetcode
|
6cbcb36e66a10a226ddb0966701e61ce4c2434d4
|
[
"MIT"
] | 1
|
2019-12-12T20:16:08.000Z
|
2019-12-12T20:16:08.000Z
|
680_Valid_Palindrome_II.py
|
yuqingchen/Leetcode
|
6cbcb36e66a10a226ddb0966701e61ce4c2434d4
|
[
"MIT"
] | null | null | null |
680_Valid_Palindrome_II.py
|
yuqingchen/Leetcode
|
6cbcb36e66a10a226ddb0966701e61ce4c2434d4
|
[
"MIT"
] | null | null | null |
class Solution:
def validPalindrome(self, s: str) -> bool:
left, right = self.twopointer(0, len(s) - 1, s)
if left >= right :
return True
return self.valid(left + 1, right, s) or self.valid(left, right - 1, s)
def valid(self, left, right, s) :
l, r = self.twopointer(left, right, s)
if l >= r :
return True
else:
return False
def twopointer(self, left, right, s) :
while left < right :
if s[left] == s[right] :
left += 1
right -= 1
else :
return left, right
return left, right
| 30.454545
| 79
| 0.474627
|
class Solution:
def validPalindrome(self, s: str) -> bool:
left, right = self.twopointer(0, len(s) - 1, s)
if left >= right :
return True
return self.valid(left + 1, right, s) or self.valid(left, right - 1, s)
def valid(self, left, right, s) :
l, r = self.twopointer(left, right, s)
if l >= r :
return True
else:
return False
def twopointer(self, left, right, s) :
while left < right :
if s[left] == s[right] :
left += 1
right -= 1
else :
return left, right
return left, right
| true
| true
|
f717f561ebd073978d59e58a6e54a7189383291d
| 24,623
|
py
|
Python
|
tensorflow_probability/python/distributions/student_t_process.py
|
hendriksanta/probability
|
6eedc0f01a539b3bee7be28ccd2a9cce15d92f7f
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/distributions/student_t_process.py
|
hendriksanta/probability
|
6eedc0f01a539b3bee7be28ccd2a9cce15d92f7f
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/distributions/student_t_process.py
|
hendriksanta/probability
|
6eedc0f01a539b3bee7be28ccd2a9cce15d92f7f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The StudentTProcess distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import warnings
# Dependency imports
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import identity as identity_bijector
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import multivariate_student_t
from tensorflow_probability.python.distributions import student_t
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import
__all__ = [
'StudentTProcess',
]
def _add_diagonal_shift(matrix, shift):
return tf.linalg.set_diag(
matrix, tf.linalg.diag_part(matrix) + shift, name='add_diagonal_shift')
def make_cholesky_factored_marginal_fn(jitter):
"""Construct a `marginal_fn` for use with `tfd.StudentTProcess`.
The returned function computes the Cholesky factorization of the input
covariance plus a diagonal jitter, and uses that for the `scale` of a
`tfd.MultivariateNormalLinearOperator`.
Args:
jitter: `float` scalar `Tensor` added to the diagonal of the covariance
matrix to ensure positive definiteness of the covariance matrix.
Returns:
marginal_fn: A Python function that takes a location, covariance matrix,
optional `validate_args`, `allow_nan_stats` and `name` arguments, and
returns a `tfd.MultivariateNormalLinearOperator`.
"""
def marginal_fn(
df,
loc,
covariance,
validate_args=False,
allow_nan_stats=False,
name='marginal_distribution'):
squared_scale = ((df - 2.) / df)[
..., tf.newaxis, tf.newaxis] * covariance
scale = tf.linalg.LinearOperatorLowerTriangular(
tf.linalg.cholesky(_add_diagonal_shift(squared_scale, jitter)),
is_non_singular=True,
name='StudentTProcessScaleLinearOperator')
return multivariate_student_t.MultivariateStudentTLinearOperator(
df=df,
loc=loc,
scale=scale,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
return marginal_fn
class StudentTProcess(distribution.Distribution):
"""Marginal distribution of a Student's T process at finitely many points.
A Student's T process (TP) is an indexed collection of random variables, any
finite collection of which are jointly Multivariate Student's T. While this
definition applies to finite index sets, it is typically implicit that the
index set is infinite; in applications, it is often some finite dimensional
real or complex vector space. In such cases, the TP may be thought of as a
distribution over (real- or complex-valued) functions defined over the index
set.
Just as Student's T distributions are fully specified by their degrees of
freedom, location and scale, a Student's T process can be completely specified
by a degrees of freedom parameter, mean function and covariance function.
Let `S` denote the index set and `K` the space in
which each indexed random variable takes its values (again, often R or C).
The mean function is then a map `m: S -> K`, and the covariance function,
or kernel, is a positive-definite function `k: (S x S) -> K`. The properties
of functions drawn from a TP are entirely dictated (up to translation) by
the form of the kernel function.
This `Distribution` represents the marginal joint distribution over function
values at a given finite collection of points `[x[1], ..., x[N]]` from the
index set `S`. By definition, this marginal distribution is just a
multivariate Student's T distribution, whose mean is given by the vector
`[ m(x[1]), ..., m(x[N]) ]` and whose covariance matrix is constructed from
pairwise applications of the kernel function to the given inputs:
```none
| k(x[1], x[1]) k(x[1], x[2]) ... k(x[1], x[N]) |
| k(x[2], x[1]) k(x[2], x[2]) ... k(x[2], x[N]) |
| ... ... ... |
| k(x[N], x[1]) k(x[N], x[2]) ... k(x[N], x[N]) |
```
For this to be a valid covariance matrix, it must be symmetric and positive
definite; hence the requirement that `k` be a positive definite function
(which, by definition, says that the above procedure will yield PD matrices).
Note also we use a parameterization as suggested in [1], which requires `df`
to be greater than 2. This allows for the covariance for any finite
dimensional marginal of the TP (a multivariate Student's T distribution) to
just be the PD matrix generated by the kernel.
#### Mathematical Details
The probability density function (pdf) is a multivariate Student's T whose
parameters are derived from the TP's properties:
```none
pdf(x; df, index_points, mean_fn, kernel) = MultivariateStudentT(df, loc, K)
K = (df - 2) / df * (kernel.matrix(index_points, index_points) +
observation_noise_variance * eye(N))
loc = (x - mean_fn(index_points))^T @ K @ (x - mean_fn(index_points))
```
where:
* `df` is the degrees of freedom parameter for the TP.
* `index_points` are points in the index set over which the TP is defined,
* `mean_fn` is a callable mapping the index set to the TP's mean values,
* `kernel` is `PositiveSemidefiniteKernel`-like and represents the covariance
function of the TP,
* `observation_noise_variance` is a term added to the diagonal of the kernel
matrix. In the limit of `df` to `inf`, this represents the observation noise
of a gaussian likelihood.
* `eye(N)` is an N-by-N identity matrix.
#### Examples
##### Draw joint samples from a TP prior
```python
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
tf.enable_v2_behavior()
tfd = tfp.distributions
psd_kernels = tfp.math.psd_kernels
num_points = 100
# Index points should be a collection (100, here) of feature vectors. In this
# example, we're using 1-d vectors, so we just need to reshape the output from
# np.linspace, to give a shape of (100, 1).
index_points = np.expand_dims(np.linspace(-1., 1., num_points), -1)
# Define a kernel with default parameters.
kernel = psd_kernels.ExponentiatedQuadratic()
tp = tfd.StudentTProcess(3., kernel, index_points)
samples = tp.sample(10)
# ==> 10 independently drawn, joint samples at `index_points`
noisy_tp = tfd.StudentTProcess(
df=3.,
kernel=kernel,
index_points=index_points)
noisy_samples = noisy_tp.sample(10)
# ==> 10 independently drawn, noisy joint samples at `index_points`
```
##### Optimize kernel parameters via maximum marginal likelihood.
```python
# Suppose we have some data from a known function. Note the index points in
# general have shape `[b1, ..., bB, f1, ..., fF]` (here we assume `F == 1`),
# so we need to explicitly consume the feature dimensions (just the last one
# here).
f = lambda x: np.sin(10*x[..., 0]) * np.exp(-x[..., 0]**2)
observed_index_points = np.expand_dims(np.random.uniform(-1., 1., 50), -1)
# Squeeze to take the shape from [50, 1] to [50].
observed_values = f(observed_index_points)
amplitude = tfp.util.TransformedVariable(
1., tfp.bijectors.Softplus(), dtype=np.float64, name='amplitude')
length_scale = tfp.util.TransformedVariable(
1., tfp.bijectors.Softplus(), dtype=np.float64, name='length_scale')
# Define a kernel with trainable parameters.
kernel = psd_kernels.ExponentiatedQuadratic(
amplitude=amplitude,
length_scale=length_scale)
tp = tfd.StudentTProcess(3., kernel, observed_index_points)
optimizer = tf.optimizers.Adam()
@tf.function
def optimize():
with tf.GradientTape() as tape:
loss = -tp.log_prob(observed_values)
grads = tape.gradient(loss, tp.trainable_variables)
optimizer.apply_gradients(zip(grads, tp.trainable_variables))
return loss
for i in range(1000):
nll = optimize()
if i % 100 == 0:
print("Step {}: NLL = {}".format(i, nll))
print("Final NLL = {}".format(nll))
```
#### References
[1]: Amar Shah, Andrew Gordon Wilson, and Zoubin Ghahramani. Student-t
Processes as Alternatives to Gaussian Processes. In _Artificial
Intelligence and Statistics_, 2014.
https://www.cs.cmu.edu/~andrewgw/tprocess.pdf
"""
@deprecation.deprecated_args(
'2021-06-26',
'`jitter` is deprecated; please use `marginal_fn` directly.',
'jitter')
def __init__(self,
df,
kernel,
index_points=None,
mean_fn=None,
observation_noise_variance=0.,
marginal_fn=None,
jitter=1e-6,
validate_args=False,
allow_nan_stats=False,
name='StudentTProcess'):
"""Instantiate a StudentTProcess Distribution.
Args:
df: Positive Floating-point `Tensor` representing the degrees of freedom.
Must be greater than 2.
kernel: `PositiveSemidefiniteKernel`-like instance representing the
TP's covariance function.
index_points: `float` `Tensor` representing finite (batch of) vector(s) of
points in the index set over which the TP is defined. Shape has the form
`[b1, ..., bB, e, f1, ..., fF]` where `F` is the number of feature
dimensions and must equal `kernel.feature_ndims` and `e` is the number
(size) of index points in each batch. Ultimately this distribution
corresponds to a `e`-dimensional multivariate Student's T. The batch
shape must be broadcastable with `kernel.batch_shape` and any batch dims
yielded by `mean_fn`.
mean_fn: Python `callable` that acts on `index_points` to produce a (batch
of) vector(s) of mean values at `index_points`. Takes a `Tensor` of
shape `[b1, ..., bB, f1, ..., fF]` and returns a `Tensor` whose shape is
broadcastable with `[b1, ..., bB]`. Default value: `None` implies
constant zero function.
observation_noise_variance: `float` `Tensor` representing (batch of)
scalar variance(s) of the noise in the Normal likelihood
distribution of the model. If batched, the batch shape must be
broadcastable with the shapes of all other batched parameters
(`kernel.batch_shape`, `index_points`, etc.).
Default value: `0.`
marginal_fn: A Python callable that takes a location, covariance matrix,
optional `validate_args`, `allow_nan_stats` and `name` arguments, and
returns a multivariate normal subclass of `tfd.Distribution`.
Default value: `None`, in which case a Cholesky-factorizing function is
is created using `make_cholesky_factorizing_marginal_fn` and the
`jitter` argument.
jitter: `float` scalar `Tensor` added to the diagonal of the covariance
matrix to ensure positive definiteness of the covariance matrix.
Default value: `1e-6`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
Default value: `False`.
name: Python `str` name prefixed to Ops created by this class.
Default value: "StudentTProcess".
Raises:
ValueError: if `mean_fn` is not `None` and is not callable.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype(
[df, index_points, observation_noise_variance, jitter], tf.float32)
df = tensor_util.convert_nonref_to_tensor(df, dtype=dtype, name='df')
observation_noise_variance = tensor_util.convert_nonref_to_tensor(
observation_noise_variance,
dtype=dtype,
name='observation_noise_variance')
index_points = tensor_util.convert_nonref_to_tensor(
index_points, dtype=dtype, name='index_points')
jitter = tensor_util.convert_nonref_to_tensor(
jitter, dtype=dtype, name='jitter')
self._kernel = kernel
self._index_points = index_points
# Default to a constant zero function, borrowing the dtype from
# index_points to ensure consistency.
if mean_fn is None:
mean_fn = lambda x: tf.zeros([1], dtype=dtype)
else:
if not callable(mean_fn):
raise ValueError('`mean_fn` must be a Python callable')
self._df = df
self._observation_noise_variance = observation_noise_variance
self._mean_fn = mean_fn
self._jitter = jitter
if marginal_fn is None:
self._marginal_fn = make_cholesky_factored_marginal_fn(jitter)
else:
self._marginal_fn = marginal_fn
with tf.name_scope('init'):
super(StudentTProcess, self).__init__(
dtype=dtype,
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
def _is_univariate_marginal(self, index_points):
"""True if the given index_points would yield a univariate marginal.
Args:
index_points: the set of index set locations at which to compute the
marginal Student T distribution. If this set is of size 1, the marginal is
univariate.
Returns:
is_univariate: Boolean indicating whether the marginal is univariate or
multivariate. In the case of dynamic shape in the number of index points,
defaults to "multivariate" since that's the best we can do.
"""
num_index_points = tf.compat.dimension_value(
index_points.shape[-(self.kernel.feature_ndims + 1)])
if num_index_points is None:
warnings.warn(
'Unable to detect statically whether the number of index_points is '
'1. As a result, defaulting to treating the marginal Student T '
'Process at `index_points` as a multivariate Student T. This makes '
'some methods, like `cdf` unavailable.')
return num_index_points == 1
def _compute_covariance(self, index_points):
kernel_matrix = self.kernel.matrix(index_points, index_points)
if self._is_univariate_marginal(index_points):
# kernel_matrix thus has shape [..., 1, 1]; squeeze off the last dims and
# tack on the observation noise variance.
return (tf.squeeze(kernel_matrix, axis=[-2, -1]) +
self.observation_noise_variance)
else:
observation_noise_variance = tf.convert_to_tensor(
self.observation_noise_variance)
# We are compute K + obs_noise_variance * I. The shape of this matrix
# is going to be a broadcast of the shapes of K and obs_noise_variance *
# I.
broadcast_shape = distribution_util.get_broadcast_shape(
kernel_matrix,
# We pad with two single dimension since this represents a batch of
# scaled identity matrices.
observation_noise_variance[..., tf.newaxis, tf.newaxis])
kernel_matrix = tf.broadcast_to(kernel_matrix, broadcast_shape)
return _add_diagonal_shift(
kernel_matrix, observation_noise_variance[..., tf.newaxis])
def get_marginal_distribution(self, index_points=None):
"""Compute the marginal over function values at `index_points`.
Args:
index_points: `float` `Tensor` representing finite (batch of) vector(s) of
points in the index set over which the TP is defined. Shape has the form
`[b1, ..., bB, e, f1, ..., fF]` where `F` is the number of feature
dimensions and must equal `kernel.feature_ndims` and `e` is the number
(size) of index points in each batch. Ultimately this distribution
corresponds to a `e`-dimensional multivariate student t. The batch shape
must be broadcastable with `kernel.batch_shape` and any batch dims
yielded by `mean_fn`.
Returns:
marginal: a `StudentT` or `MultivariateStudentT` distribution,
according to whether `index_points` consists of one or many index
points, respectively.
"""
with self._name_and_control_scope('get_marginal_distribution'):
df = tf.convert_to_tensor(self.df)
index_points = self._get_index_points(index_points)
covariance = self._compute_covariance(index_points)
loc = self._mean_fn(index_points)
# If we're sure the number of index points is 1, we can just construct a
# scalar Normal. This has computational benefits and supports things like
# CDF that aren't otherwise straightforward to provide.
if self._is_univariate_marginal(index_points):
squared_scale = (df - 2.) / df * covariance
scale = tf.sqrt(squared_scale)
# `loc` has a trailing 1 in the shape; squeeze it.
loc = tf.squeeze(loc, axis=-1)
return student_t.StudentT(
df=df,
loc=loc,
scale=scale,
validate_args=self.validate_args,
allow_nan_stats=self.allow_nan_stats,
name='marginal_distribution')
else:
return self._marginal_fn(
df=df,
loc=loc,
covariance=covariance,
validate_args=self.validate_args,
allow_nan_stats=self.allow_nan_stats,
name='marginal_distribution')
@property
def df(self):
return self._df
@property
def observation_noise_variance(self):
return self._observation_noise_variance
@property
def mean_fn(self):
return self._mean_fn
@property
def kernel(self):
return self._kernel
@property
def index_points(self):
return self._index_points
@property
def marginal_fn(self):
return self._marginal_fn
@property
def jitter(self):
return self._jitter
def _get_index_points(self, index_points=None):
"""Return `index_points` if not None, else `self._index_points`.
Args:
index_points: if given, this is what is returned; else,
`self._index_points`
Returns:
index_points: the given arg, if not None, else the class member
`self._index_points`.
Rases:
ValueError: if `index_points` and `self._index_points` are both `None`.
"""
if self._index_points is None and index_points is None:
raise ValueError(
'This StudentTProcess instance was not instantiated with a value for '
'index_points. One must therefore be provided when calling sample, '
'log_prob, and other such methods.')
return (index_points if index_points is not None
else tf.convert_to_tensor(self._index_points))
def _log_prob(self, value, index_points=None):
return self.get_marginal_distribution(index_points).log_prob(value)
def _batch_shape_tensor(self, index_points=None):
index_points = self._get_index_points(index_points)
return functools.reduce(tf.broadcast_dynamic_shape, [
tf.shape(index_points)[:-(self.kernel.feature_ndims + 1)],
self.kernel.batch_shape_tensor(),
tf.shape(self.observation_noise_variance),
tf.shape(self.df)
])
def _batch_shape(self, index_points=None):
index_points = (
index_points if index_points is not None else self._index_points)
return functools.reduce(
tf.broadcast_static_shape,
[index_points.shape[:-(self.kernel.feature_ndims + 1)],
self.kernel.batch_shape,
self.observation_noise_variance.shape,
self.df.shape])
def _event_shape_tensor(self, index_points=None):
index_points = self._get_index_points(index_points)
if self._is_univariate_marginal(index_points):
return tf.constant([], dtype=tf.int32)
else:
# The examples index is one position to the left of the feature dims.
examples_index = -(self.kernel.feature_ndims + 1)
return tf.shape(index_points)[examples_index:examples_index + 1]
def _event_shape(self, index_points=None):
index_points = (
index_points if index_points is not None else self._index_points)
if self._is_univariate_marginal(index_points):
return tf.TensorShape([])
else:
# The examples index is one position to the left of the feature dims.
examples_index = -(self.kernel.feature_ndims + 1)
shape = index_points.shape[examples_index:examples_index + 1]
if tensorshape_util.rank(shape) is None:
return tf.TensorShape([None])
return shape
def _sample_n(self, n, seed=None, index_points=None):
return self.get_marginal_distribution(index_points).sample(n, seed=seed)
def _log_survival_function(self, value, index_points=None):
return self.get_marginal_distribution(
index_points).log_survival_function(value)
def _survival_function(self, value, index_points=None):
return self.get_marginal_distribution(index_points).survival_function(value)
def _log_cdf(self, value, index_points=None):
return self.get_marginal_distribution(index_points).log_cdf(value)
def _entropy(self, index_points=None):
return self.get_marginal_distribution(index_points).entropy()
def _mean(self, index_points=None):
return self.get_marginal_distribution(index_points).mean()
def _quantile(self, value, index_points=None):
return self.get_marginal_distribution(index_points).quantile(value)
def _stddev(self, index_points=None):
return tf.sqrt(self._variance(index_points=index_points))
def _variance(self, index_points=None):
index_points = self._get_index_points(index_points)
kernel_diag = self.kernel.apply(index_points, index_points, example_ndims=1)
if self._is_univariate_marginal(index_points):
return (tf.squeeze(kernel_diag, axis=[-1]) +
self.observation_noise_variance)
else:
# We are computing diag(K + obs_noise_variance * I) = diag(K) +
# obs_noise_variance. We pad obs_noise_variance with a dimension in order
# to broadcast batch shapes of kernel_diag and obs_noise_variance (since
# kernel_diag has an extra dimension corresponding to the number of index
# points).
return kernel_diag + self.observation_noise_variance[..., tf.newaxis]
def _covariance(self, index_points=None):
# Using the result of get_marginal_distribution would involve an extra
# matmul, and possibly even an unnecessary cholesky first. We can avoid that
# by going straight through the kernel function.
return self._compute_covariance(self._get_index_points(index_points))
def _mode(self, index_points=None):
return self.get_marginal_distribution(index_points).mode()
def _default_event_space_bijector(self):
return identity_bijector.Identity(validate_args=self.validate_args)
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
if is_init != tensor_util.is_ref(self.df):
assertions.append(
assert_util.assert_greater(
self.df, dtype_util.as_numpy_dtype(self.df.dtype)(2.),
message='`df` must be greater than 2.'))
return assertions
| 41.038333
| 92
| 0.699671
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import warnings
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import identity as identity_bijector
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import multivariate_student_t
from tensorflow_probability.python.distributions import student_t
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow.python.util import deprecation
__all__ = [
'StudentTProcess',
]
def _add_diagonal_shift(matrix, shift):
return tf.linalg.set_diag(
matrix, tf.linalg.diag_part(matrix) + shift, name='add_diagonal_shift')
def make_cholesky_factored_marginal_fn(jitter):
def marginal_fn(
df,
loc,
covariance,
validate_args=False,
allow_nan_stats=False,
name='marginal_distribution'):
squared_scale = ((df - 2.) / df)[
..., tf.newaxis, tf.newaxis] * covariance
scale = tf.linalg.LinearOperatorLowerTriangular(
tf.linalg.cholesky(_add_diagonal_shift(squared_scale, jitter)),
is_non_singular=True,
name='StudentTProcessScaleLinearOperator')
return multivariate_student_t.MultivariateStudentTLinearOperator(
df=df,
loc=loc,
scale=scale,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
return marginal_fn
class StudentTProcess(distribution.Distribution):
@deprecation.deprecated_args(
'2021-06-26',
'`jitter` is deprecated; please use `marginal_fn` directly.',
'jitter')
def __init__(self,
df,
kernel,
index_points=None,
mean_fn=None,
observation_noise_variance=0.,
marginal_fn=None,
jitter=1e-6,
validate_args=False,
allow_nan_stats=False,
name='StudentTProcess'):
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype(
[df, index_points, observation_noise_variance, jitter], tf.float32)
df = tensor_util.convert_nonref_to_tensor(df, dtype=dtype, name='df')
observation_noise_variance = tensor_util.convert_nonref_to_tensor(
observation_noise_variance,
dtype=dtype,
name='observation_noise_variance')
index_points = tensor_util.convert_nonref_to_tensor(
index_points, dtype=dtype, name='index_points')
jitter = tensor_util.convert_nonref_to_tensor(
jitter, dtype=dtype, name='jitter')
self._kernel = kernel
self._index_points = index_points
if mean_fn is None:
mean_fn = lambda x: tf.zeros([1], dtype=dtype)
else:
if not callable(mean_fn):
raise ValueError('`mean_fn` must be a Python callable')
self._df = df
self._observation_noise_variance = observation_noise_variance
self._mean_fn = mean_fn
self._jitter = jitter
if marginal_fn is None:
self._marginal_fn = make_cholesky_factored_marginal_fn(jitter)
else:
self._marginal_fn = marginal_fn
with tf.name_scope('init'):
super(StudentTProcess, self).__init__(
dtype=dtype,
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
def _is_univariate_marginal(self, index_points):
num_index_points = tf.compat.dimension_value(
index_points.shape[-(self.kernel.feature_ndims + 1)])
if num_index_points is None:
warnings.warn(
'Unable to detect statically whether the number of index_points is '
'1. As a result, defaulting to treating the marginal Student T '
'Process at `index_points` as a multivariate Student T. This makes '
'some methods, like `cdf` unavailable.')
return num_index_points == 1
def _compute_covariance(self, index_points):
kernel_matrix = self.kernel.matrix(index_points, index_points)
if self._is_univariate_marginal(index_points):
return (tf.squeeze(kernel_matrix, axis=[-2, -1]) +
self.observation_noise_variance)
else:
observation_noise_variance = tf.convert_to_tensor(
self.observation_noise_variance)
broadcast_shape = distribution_util.get_broadcast_shape(
kernel_matrix,
observation_noise_variance[..., tf.newaxis, tf.newaxis])
kernel_matrix = tf.broadcast_to(kernel_matrix, broadcast_shape)
return _add_diagonal_shift(
kernel_matrix, observation_noise_variance[..., tf.newaxis])
def get_marginal_distribution(self, index_points=None):
with self._name_and_control_scope('get_marginal_distribution'):
df = tf.convert_to_tensor(self.df)
index_points = self._get_index_points(index_points)
covariance = self._compute_covariance(index_points)
loc = self._mean_fn(index_points)
# scalar Normal. This has computational benefits and supports things like
# CDF that aren't otherwise straightforward to provide.
if self._is_univariate_marginal(index_points):
squared_scale = (df - 2.) / df * covariance
scale = tf.sqrt(squared_scale)
loc = tf.squeeze(loc, axis=-1)
return student_t.StudentT(
df=df,
loc=loc,
scale=scale,
validate_args=self.validate_args,
allow_nan_stats=self.allow_nan_stats,
name='marginal_distribution')
else:
return self._marginal_fn(
df=df,
loc=loc,
covariance=covariance,
validate_args=self.validate_args,
allow_nan_stats=self.allow_nan_stats,
name='marginal_distribution')
@property
def df(self):
return self._df
@property
def observation_noise_variance(self):
return self._observation_noise_variance
@property
def mean_fn(self):
return self._mean_fn
@property
def kernel(self):
return self._kernel
@property
def index_points(self):
return self._index_points
@property
def marginal_fn(self):
return self._marginal_fn
@property
def jitter(self):
return self._jitter
def _get_index_points(self, index_points=None):
if self._index_points is None and index_points is None:
raise ValueError(
'This StudentTProcess instance was not instantiated with a value for '
'index_points. One must therefore be provided when calling sample, '
'log_prob, and other such methods.')
return (index_points if index_points is not None
else tf.convert_to_tensor(self._index_points))
def _log_prob(self, value, index_points=None):
return self.get_marginal_distribution(index_points).log_prob(value)
def _batch_shape_tensor(self, index_points=None):
index_points = self._get_index_points(index_points)
return functools.reduce(tf.broadcast_dynamic_shape, [
tf.shape(index_points)[:-(self.kernel.feature_ndims + 1)],
self.kernel.batch_shape_tensor(),
tf.shape(self.observation_noise_variance),
tf.shape(self.df)
])
def _batch_shape(self, index_points=None):
index_points = (
index_points if index_points is not None else self._index_points)
return functools.reduce(
tf.broadcast_static_shape,
[index_points.shape[:-(self.kernel.feature_ndims + 1)],
self.kernel.batch_shape,
self.observation_noise_variance.shape,
self.df.shape])
def _event_shape_tensor(self, index_points=None):
index_points = self._get_index_points(index_points)
if self._is_univariate_marginal(index_points):
return tf.constant([], dtype=tf.int32)
else:
examples_index = -(self.kernel.feature_ndims + 1)
return tf.shape(index_points)[examples_index:examples_index + 1]
def _event_shape(self, index_points=None):
index_points = (
index_points if index_points is not None else self._index_points)
if self._is_univariate_marginal(index_points):
return tf.TensorShape([])
else:
examples_index = -(self.kernel.feature_ndims + 1)
shape = index_points.shape[examples_index:examples_index + 1]
if tensorshape_util.rank(shape) is None:
return tf.TensorShape([None])
return shape
def _sample_n(self, n, seed=None, index_points=None):
return self.get_marginal_distribution(index_points).sample(n, seed=seed)
def _log_survival_function(self, value, index_points=None):
return self.get_marginal_distribution(
index_points).log_survival_function(value)
def _survival_function(self, value, index_points=None):
return self.get_marginal_distribution(index_points).survival_function(value)
def _log_cdf(self, value, index_points=None):
return self.get_marginal_distribution(index_points).log_cdf(value)
def _entropy(self, index_points=None):
return self.get_marginal_distribution(index_points).entropy()
def _mean(self, index_points=None):
return self.get_marginal_distribution(index_points).mean()
def _quantile(self, value, index_points=None):
return self.get_marginal_distribution(index_points).quantile(value)
def _stddev(self, index_points=None):
return tf.sqrt(self._variance(index_points=index_points))
def _variance(self, index_points=None):
index_points = self._get_index_points(index_points)
kernel_diag = self.kernel.apply(index_points, index_points, example_ndims=1)
if self._is_univariate_marginal(index_points):
return (tf.squeeze(kernel_diag, axis=[-1]) +
self.observation_noise_variance)
else:
return kernel_diag + self.observation_noise_variance[..., tf.newaxis]
def _covariance(self, index_points=None):
return self._compute_covariance(self._get_index_points(index_points))
def _mode(self, index_points=None):
return self.get_marginal_distribution(index_points).mode()
def _default_event_space_bijector(self):
return identity_bijector.Identity(validate_args=self.validate_args)
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
if is_init != tensor_util.is_ref(self.df):
assertions.append(
assert_util.assert_greater(
self.df, dtype_util.as_numpy_dtype(self.df.dtype)(2.),
message='`df` must be greater than 2.'))
return assertions
| true
| true
|
f717f5f0396b42207e68544fbe1af909acdf9d1d
| 520
|
py
|
Python
|
rest/test-credentials/test-calls-example-2/test-calls-example-2.6.x.py
|
Tshisuaka/api-snippets
|
52b50037d4af0f3b96adf76197964725a1501e96
|
[
"MIT"
] | 234
|
2016-01-27T03:04:38.000Z
|
2022-02-25T20:13:43.000Z
|
rest/test-credentials/test-calls-example-2/test-calls-example-2.6.x.py
|
Tshisuaka/api-snippets
|
52b50037d4af0f3b96adf76197964725a1501e96
|
[
"MIT"
] | 351
|
2016-04-06T16:55:33.000Z
|
2022-03-10T18:42:36.000Z
|
rest/test-credentials/test-calls-example-2/test-calls-example-2.6.x.py
|
Tshisuaka/api-snippets
|
52b50037d4af0f3b96adf76197964725a1501e96
|
[
"MIT"
] | 494
|
2016-03-30T15:28:20.000Z
|
2022-03-28T19:39:36.000Z
|
# Download the Python helper library from twilio.com/docs/python/install
import os
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
call = client.calls.create(
url="http://demo.twilio.com/docs/voice.xml",
to="+15005550003",
from_="+15005550006"
)
print(call.sid)
| 27.368421
| 72
| 0.755769
|
import os
from twilio.rest import Client
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
call = client.calls.create(
url="http://demo.twilio.com/docs/voice.xml",
to="+15005550003",
from_="+15005550006"
)
print(call.sid)
| true
| true
|
f717f64d749aac3930348898cd007d5ac9c4b917
| 1,314
|
py
|
Python
|
example/test_img_similarity.py
|
Pandinosaurus/img2vec
|
e80c2f46ee707fb95d7bd6944b5c224acc1ec8c0
|
[
"MIT"
] | 1
|
2019-05-31T14:02:51.000Z
|
2019-05-31T14:02:51.000Z
|
example/test_img_similarity.py
|
Pandinosaurus/img2vec
|
e80c2f46ee707fb95d7bd6944b5c224acc1ec8c0
|
[
"MIT"
] | null | null | null |
example/test_img_similarity.py
|
Pandinosaurus/img2vec
|
e80c2f46ee707fb95d7bd6944b5c224acc1ec8c0
|
[
"MIT"
] | null | null | null |
import sys
import os
sys.path.append("../img2vec_pytorch") # Adds higher directory to python modules path.
from img_to_vec import Img2Vec
from PIL import Image
from sklearn.metrics.pairwise import cosine_similarity
input_path = './test_images'
print("Getting vectors for test images...\n")
img2vec = Img2Vec()
# For each test image, we store the filename and vector as key, value in a dictionary
pics = {}
for file in os.listdir(input_path):
filename = os.fsdecode(file)
img = Image.open(os.path.join(input_path, filename)).convert('RGB')
vec = img2vec.get_vec(img)
pics[filename] = vec
available_filenames = ", ".join(pics.keys())
pic_name = ""
while pic_name != "exit":
pic_name = str(input("\nWhich filename would you like similarities for?\nAvailable options: " + available_filenames + "\n"))
try:
sims = {}
for key in list(pics.keys()):
if key == pic_name:
continue
sims[key] = cosine_similarity(pics[pic_name].reshape((1, -1)), pics[key].reshape((1, -1)))[0][0]
d_view = [(v, k) for k, v in sims.items()]
d_view.sort(reverse=True)
for v, k in d_view:
print(v, k)
except KeyError as e:
print('Could not find filename %s' % e)
except Exception as e:
print(e)
| 29.2
| 128
| 0.642314
|
import sys
import os
sys.path.append("../img2vec_pytorch")
from img_to_vec import Img2Vec
from PIL import Image
from sklearn.metrics.pairwise import cosine_similarity
input_path = './test_images'
print("Getting vectors for test images...\n")
img2vec = Img2Vec()
pics = {}
for file in os.listdir(input_path):
filename = os.fsdecode(file)
img = Image.open(os.path.join(input_path, filename)).convert('RGB')
vec = img2vec.get_vec(img)
pics[filename] = vec
available_filenames = ", ".join(pics.keys())
pic_name = ""
while pic_name != "exit":
pic_name = str(input("\nWhich filename would you like similarities for?\nAvailable options: " + available_filenames + "\n"))
try:
sims = {}
for key in list(pics.keys()):
if key == pic_name:
continue
sims[key] = cosine_similarity(pics[pic_name].reshape((1, -1)), pics[key].reshape((1, -1)))[0][0]
d_view = [(v, k) for k, v in sims.items()]
d_view.sort(reverse=True)
for v, k in d_view:
print(v, k)
except KeyError as e:
print('Could not find filename %s' % e)
except Exception as e:
print(e)
| true
| true
|
f717f6ee21c9fa11dd8b2998e6722883254a2f34
| 8,734
|
py
|
Python
|
testing/test_cde_io.py
|
eberharf/cfl
|
077b99a05824f1371ac47d76dfed6bb160222668
|
[
"BSD-3-Clause"
] | 6
|
2021-01-09T04:46:55.000Z
|
2022-03-19T22:27:13.000Z
|
testing/test_cde_io.py
|
eberharf/cfl
|
077b99a05824f1371ac47d76dfed6bb160222668
|
[
"BSD-3-Clause"
] | 12
|
2021-01-11T16:32:58.000Z
|
2022-03-19T13:21:30.000Z
|
testing/test_cde_io.py
|
eberharf/cfl
|
077b99a05824f1371ac47d76dfed6bb160222668
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import shutil
from shutil import Error
import unittest
import numpy as np
import tensorflow as tf
from cdes_for_testing import all_cdes
from cfl.dataset import Dataset
''' The following code runs all tests in CondExpInputTests on all implemented
CondExpXxxx classes.
'''
def make_cde_io_tests(cond_exp_class):
# generic test class for any CondExpBase descendant
# (passed in as cond_exp_class)
class CondExpIOTests(unittest.TestCase):
def setUp(self): # overriden unittest.TestCase method that will be
# called in initializaiton
self.data_info = { 'X_dims' : (10,3),
'Y_dims' : (10,2),
'Y_type' : 'continuous'}
self.params = { 'show_plot' : False,
'n_epochs' : 2}
self.ceb = cond_exp_class(self.data_info, self.params)
## INIT ###############################################################
def test_init_wrong_input_types(self):
data_info = 'str is bad'
params = 'these are not params'
self.assertRaises(AssertionError, cond_exp_class, data_info, params)
def test_init_wrong_data_info_keys(self):
data_info = {}
params = {}
self.assertRaises(AssertionError, cond_exp_class, data_info,
params)
def test_init_wrong_data_info_value_types(self):
data_info = {'X_dims' : None, 'Y_dims' : None, 'Y_type' : None}
params = {}
self.assertRaises(AssertionError, cond_exp_class, data_info,
params)
def test_init_wrong_data_info_values(self):
data_info = { 'X_dims' : (0,0),
'Y_dims' : (0,0),
'Y_type' : 'continuous'}
params = {}
self.assertRaises(AssertionError, cond_exp_class, data_info,
params)
data_info = { 'X_dims' : (10,3),
'Y_dims' : (12,2),
'Y_type' : 'continuous'}
params = {}
self.assertRaises(AssertionError, cond_exp_class, data_info,
params)
def test_init_correct_inputs(self):
data_info = {'X_dims' : (10,3),
'Y_dims' : (10,2),
'Y_type' : 'continuous'}
params = {}
ceb = cond_exp_class(data_info, params)
## SAVE_BLOCK #########################################################
def test_save_block_wrong_input_type(self):
path = 123
self.assertRaises(AssertionError, self.ceb.save_block, path)
def test_save_block_correct_input_type(self):
path = 'not/a/real/path'
self.ceb.save_block(path)
shutil.rmtree('not')
## LOAD_BLOCK #########################################################
def test_load_block_wrong_input_type(self):
path = 123
self.assertRaises(AssertionError, self.ceb.load_block, path)
def test_load_block_correct_input_type(self):
# should only be run after test_save_block_correct_input_type so
# there is something to load
path = 'not/a/real/path'
self.ceb.save_block(path)
self.ceb.load_block(path)
shutil.rmtree('not')
# check and reset state
assert self.ceb.trained, 'CDE should be trained after loading'
self.ceb.trained = False
### TRAIN ############################################################
def test_train_wrong_input_type(self):
dataset = 'this is not a Dataset'
prev_results = 'this is not a dict'
self.assertRaises(AssertionError, self.ceb.train, dataset,
prev_results)
def test_train_correct_input_type(self):
dataset = Dataset(X=np.ones(self.data_info['X_dims']),
Y=np.zeros(self.data_info['Y_dims']))
# what we expect from train outputs
tkeys = ['train_loss','val_loss','loss_plot','model_weights','pyx']
tshapes = {'train_loss' : (self.params['n_epochs'],),
'val_loss' : (self.params['n_epochs'],),
'pyx' : (self.data_info['Y_dims'])
}
for prev_results in [None, {}]:
# reset
self.ceb.trained = False
train_results = self.ceb.train(dataset, prev_results)
# check state
assert self.ceb.trained, 'CDE should be trained after loading'
# check outputs
assert set(train_results.keys())==set(tkeys), \
f'train should return dict with keys: {tkeys}'
for k in tshapes.keys():
assert tshapes[k]==np.array(train_results[k]).shape, \
f'expected {k} to have shape {tshapes[k]} but got \
{train_results[k].shape}'
def test_train_twice(self):
dataset = Dataset(X=np.ones(self.data_info['X_dims']),
Y=np.zeros(self.data_info['Y_dims']))
prev_results = None
# reset
self.ceb.trained = False
# what we expect from train outputs first time
tkeys = ['train_loss','val_loss','loss_plot','model_weights','pyx']
train_results = self.ceb.train(dataset, prev_results)
# check state and outputs
assert self.ceb.trained, 'CDE should be trained after loading'
assert set(train_results.keys())==set(tkeys), \
f'train should return dict with keys: {tkeys}'
# what we expect from train outputs second time
tkeys = ['pyx']
train_results = self.ceb.train(dataset, prev_results)
# check state and outputs
assert self.ceb.trained, 'CDE should be trained after loading'
assert set(train_results.keys())==set(tkeys), \
f'train should return dict with keys: {tkeys}'
### PREDICT ##########################################################
def test_predict_wrong_input_type(self):
# artifically set CDE trained = True
self.ceb.trained = True
dataset = 'this is not a Dataset'
prev_results = 'this is not a dict'
self.assertRaises(AssertionError, self.ceb.predict, dataset,
prev_results)
def test_predict_correct_input_type(self):
dataset = Dataset(X=np.ones(self.data_info['X_dims']),
Y=np.zeros(self.data_info['Y_dims']))
prev_results = None
for prev_results in [None, {}]:
self.ceb.train(dataset, prev_results)
pred_results = self.ceb.predict(dataset, prev_results)
# check output
assert set(pred_results.keys())==set(['pyx']), f'pred_results \
keys should contain pyx, but contains {pred_results.keys()}'
assert pred_results['pyx'].shape==self.data_info['Y_dims'], \
f"expected {self.data_info['Y_dims']} but got \
{pred_results['pyx'].shape}"
### EVALUATE #########################################################
def test_evaluate_wrong_input_type(self):
# artifically set CDE trained = True
self.ceb.trained = True
dataset = 'this is not a Dataset'
prev_results = 'this is not a dict'
self.assertRaises(AssertionError, self.ceb.evaluate, dataset)
def test_evaluate_correct_input_type(self):
dataset = Dataset(X=np.ones(self.data_info['X_dims']),
Y=np.zeros(self.data_info['Y_dims']))
prev_results = None
self.ceb.train(dataset, prev_results)
score = self.ceb.evaluate(dataset)
assert score.shape==()
assert score.dtype==np.float32
### BUILD_MODEL ######################################################
def test_build_model(self):
assert isinstance(self.ceb._build_model(), tf.keras.Sequential)
return CondExpIOTests
for cond_exp_class in all_cdes:
class ConcreteIOTests(make_cde_io_tests(cond_exp_class)):
pass
| 39.342342
| 80
| 0.517174
|
import os
import shutil
from shutil import Error
import unittest
import numpy as np
import tensorflow as tf
from cdes_for_testing import all_cdes
from cfl.dataset import Dataset
def make_cde_io_tests(cond_exp_class):
class CondExpIOTests(unittest.TestCase):
def setUp(self):
self.data_info = { 'X_dims' : (10,3),
'Y_dims' : (10,2),
'Y_type' : 'continuous'}
self.params = { 'show_plot' : False,
'n_epochs' : 2}
self.ceb = cond_exp_class(self.data_info, self.params)
| true
| true
|
f717f76f6731c769b821c9ceaf17edbc8eba9b54
| 50,551
|
py
|
Python
|
python/ccxt/async_support/bitbay.py
|
mariuszskon/ccxt
|
13253de7346e33cd384f79abf7dfb64dcbfdc35f
|
[
"MIT"
] | 4
|
2021-09-24T09:18:36.000Z
|
2022-03-15T16:47:09.000Z
|
python/ccxt/async_support/bitbay.py
|
mariuszskon/ccxt
|
13253de7346e33cd384f79abf7dfb64dcbfdc35f
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/bitbay.py
|
mariuszskon/ccxt
|
13253de7346e33cd384f79abf7dfb64dcbfdc35f
|
[
"MIT"
] | 2
|
2021-10-01T21:51:37.000Z
|
2021-10-02T16:23:05.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import OrderImmediatelyFillable
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import InvalidNonce
from ccxt.base.precise import Precise
class bitbay(Exchange):
def describe(self):
return self.deep_extend(super(bitbay, self).describe(), {
'id': 'bitbay',
'name': 'BitBay',
'countries': ['MT', 'EU'], # Malta
'rateLimit': 1000,
'has': {
'cancelOrder': True,
'CORS': True,
'createOrder': True,
'fetchBalance': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrderBook': True,
'fetchTicker': True,
'fetchTrades': True,
'withdraw': True,
},
'timeframes': {
'1m': '60',
'3m': '180',
'5m': '300',
'15m': '900',
'30m': '1800',
'1h': '3600',
'2h': '7200',
'4h': '14400',
'6h': '21600',
'12h': '43200',
'1d': '86400',
'3d': '259200',
'1w': '604800',
},
'hostname': 'bitbay.net',
'urls': {
'referral': 'https://auth.bitbay.net/ref/jHlbB4mIkdS1',
'logo': 'https://user-images.githubusercontent.com/1294454/27766132-978a7bd8-5ece-11e7-9540-bc96d1e9bbb8.jpg',
'www': 'https://bitbay.net',
'api': {
'public': 'https://{hostname}/API/Public',
'private': 'https://{hostname}/API/Trading/tradingApi.php',
'v1_01Public': 'https://api.{hostname}/rest',
'v1_01Private': 'https://api.{hostname}/rest',
},
'doc': [
'https://bitbay.net/public-api',
'https://bitbay.net/en/private-api',
'https://bitbay.net/account/tab-api',
'https://github.com/BitBayNet/API',
'https://docs.bitbay.net/v1.0.1-en/reference',
],
'support': 'https://support.bitbay.net',
'fees': 'https://bitbay.net/en/fees',
},
'api': {
'public': {
'get': [
'{id}/all',
'{id}/market',
'{id}/orderbook',
'{id}/ticker',
'{id}/trades',
],
},
'private': {
'post': [
'info',
'trade',
'cancel',
'orderbook',
'orders',
'transfer',
'withdraw',
'history',
'transactions',
],
},
'v1_01Public': {
'get': [
'trading/ticker',
'trading/ticker/{symbol}',
'trading/stats',
'trading/orderbook/{symbol}',
'trading/transactions/{symbol}',
'trading/candle/history/{symbol}/{resolution}',
],
},
'v1_01Private': {
'get': [
'payments/withdrawal/{detailId}',
'payments/deposit/{detailId}',
'trading/offer',
'trading/config/{symbol}',
'trading/history/transactions',
'balances/BITBAY/history',
'balances/BITBAY/balance',
'fiat_cantor/rate/{baseId}/{quoteId}',
'fiat_cantor/history',
],
'post': [
'trading/offer/{symbol}',
'trading/config/{symbol}',
'balances/BITBAY/balance',
'balances/BITBAY/balance/transfer/{source}/{destination}',
'fiat_cantor/exchange',
],
'delete': [
'trading/offer/{symbol}/{id}/{side}/{price}',
],
'put': [
'balances/BITBAY/balance/{id}',
],
},
},
'fees': {
'trading': {
'maker': 0.0,
'taker': 0.1 / 100,
'percentage': True,
'tierBased': False,
},
'fiat': {
'maker': 0.30 / 100,
'taker': 0.43 / 100,
'percentage': True,
'tierBased': True,
'tiers': {
'taker': [
[0.0043, 0],
[0.0042, 1250],
[0.0041, 3750],
[0.0040, 7500],
[0.0039, 10000],
[0.0038, 15000],
[0.0037, 20000],
[0.0036, 25000],
[0.0035, 37500],
[0.0034, 50000],
[0.0033, 75000],
[0.0032, 100000],
[0.0031, 150000],
[0.0030, 200000],
[0.0029, 250000],
[0.0028, 375000],
[0.0027, 500000],
[0.0026, 625000],
[0.0025, 875000],
],
'maker': [
[0.0030, 0],
[0.0029, 1250],
[0.0028, 3750],
[0.0028, 7500],
[0.0027, 10000],
[0.0026, 15000],
[0.0025, 20000],
[0.0025, 25000],
[0.0024, 37500],
[0.0023, 50000],
[0.0023, 75000],
[0.0022, 100000],
[0.0021, 150000],
[0.0021, 200000],
[0.0020, 250000],
[0.0019, 375000],
[0.0018, 500000],
[0.0018, 625000],
[0.0017, 875000],
],
},
},
'funding': {
'withdraw': {
'BTC': 0.0009,
'LTC': 0.005,
'ETH': 0.00126,
'LSK': 0.2,
'BCH': 0.0006,
'GAME': 0.005,
'DASH': 0.001,
'BTG': 0.0008,
'PLN': 4,
'EUR': 1.5,
},
},
},
'options': {
'fiatCurrencies': ['EUR', 'USD', 'GBP', 'PLN'],
},
'exceptions': {
'400': ExchangeError, # At least one parameter wasn't set
'401': InvalidOrder, # Invalid order type
'402': InvalidOrder, # No orders with specified currencies
'403': InvalidOrder, # Invalid payment currency name
'404': InvalidOrder, # Error. Wrong transaction type
'405': InvalidOrder, # Order with self id doesn't exist
'406': InsufficientFunds, # No enough money or crypto
# code 407 not specified are not specified in their docs
'408': InvalidOrder, # Invalid currency name
'501': AuthenticationError, # Invalid public key
'502': AuthenticationError, # Invalid sign
'503': InvalidNonce, # Invalid moment parameter. Request time doesn't match current server time
'504': ExchangeError, # Invalid method
'505': AuthenticationError, # Key has no permission for self action
'506': AccountSuspended, # Account locked. Please contact with customer service
# codes 507 and 508 are not specified in their docs
'509': ExchangeError, # The BIC/SWIFT is required for self currency
'510': BadSymbol, # Invalid market name
'FUNDS_NOT_SUFFICIENT': InsufficientFunds,
'OFFER_FUNDS_NOT_EXCEEDING_MINIMUMS': InvalidOrder,
'OFFER_NOT_FOUND': OrderNotFound,
'OFFER_WOULD_HAVE_BEEN_PARTIALLY_FILLED': OrderImmediatelyFillable,
'ACTION_LIMIT_EXCEEDED': RateLimitExceeded,
'UNDER_MAINTENANCE': OnMaintenance,
'REQUEST_TIMESTAMP_TOO_OLD': InvalidNonce,
'PERMISSIONS_NOT_SUFFICIENT': PermissionDenied,
},
'commonCurrencies': {
'GGC': 'Global Game Coin',
},
})
async def fetch_markets(self, params={}):
response = await self.v1_01PublicGetTradingTicker(params)
fiatCurrencies = self.safe_value(self.options, 'fiatCurrencies', [])
#
# {
# status: 'Ok',
# items: {
# 'BSV-USD': {
# market: {
# code: 'BSV-USD',
# first: {currency: 'BSV', minOffer: '0.00035', scale: 8},
# second: {currency: 'USD', minOffer: '5', scale: 2}
# },
# time: '1557569762154',
# highestBid: '52.31',
# lowestAsk: '62.99',
# rate: '63',
# previousRate: '51.21',
# },
# },
# }
#
result = []
items = self.safe_value(response, 'items')
keys = list(items.keys())
for i in range(0, len(keys)):
key = keys[i]
item = items[key]
market = self.safe_value(item, 'market', {})
first = self.safe_value(market, 'first', {})
second = self.safe_value(market, 'second', {})
baseId = self.safe_string(first, 'currency')
quoteId = self.safe_string(second, 'currency')
id = baseId + quoteId
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'amount': self.safe_integer(first, 'scale'),
'price': self.safe_integer(second, 'scale'),
}
fees = self.safe_value(self.fees, 'trading', {})
if self.in_array(base, fiatCurrencies) or self.in_array(quote, fiatCurrencies):
fees = self.safe_value(self.fees, 'fiat', {})
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
# todo: check that the limits have ben interpreted correctly
# todo: parse the fees page
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'precision': precision,
'active': None,
'maker': maker,
'taker': taker,
'limits': {
'amount': {
'min': self.safe_number(first, 'minOffer'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(second, 'minOffer'),
'max': None,
},
},
'info': item,
})
return result
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
response = await self.v1_01PrivateGetTradingOffer(self.extend(request, params))
items = self.safe_value(response, 'items', [])
return self.parse_orders(items, None, since, limit, {'status': 'open'})
def parse_order(self, order, market=None):
#
# {
# market: 'ETH-EUR',
# offerType: 'Sell',
# id: '93d3657b-d616-11e9-9248-0242ac110005',
# currentAmount: '0.04',
# lockedAmount: '0.04',
# rate: '280',
# startAmount: '0.04',
# time: '1568372806924',
# postOnly: False,
# hidden: False,
# mode: 'limit',
# receivedAmount: '0.0',
# firstBalanceId: '5b816c3e-437c-4e43-9bef-47814ae7ebfc',
# secondBalanceId: 'ab43023b-4079-414c-b340-056e3430a3af'
# }
#
marketId = self.safe_string(order, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(order, 'time')
amount = self.safe_number(order, 'startAmount')
remaining = self.safe_number(order, 'currentAmount')
postOnly = self.safe_value(order, 'postOnly')
return self.safe_order({
'id': self.safe_string(order, 'id'),
'clientOrderId': None,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': None,
'symbol': symbol,
'type': self.safe_string(order, 'mode'),
'timeInForce': None,
'postOnly': postOnly,
'side': self.safe_string_lower(order, 'offerType'),
'price': self.safe_number(order, 'rate'),
'stopPrice': None,
'amount': amount,
'cost': None,
'filled': None,
'remaining': remaining,
'average': None,
'fee': None,
'trades': None,
})
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
if symbol:
markets = [self.market_id(symbol)]
request['markets'] = markets
query = {'query': self.json(self.extend(request, params))}
response = await self.v1_01PrivateGetTradingHistoryTransactions(query)
#
# {
# status: 'Ok',
# totalRows: '67',
# items: [
# {
# id: 'b54659a0-51b5-42a0-80eb-2ac5357ccee2',
# market: 'BTC-EUR',
# time: '1541697096247',
# amount: '0.00003',
# rate: '4341.44',
# initializedBy: 'Sell',
# wasTaker: False,
# userAction: 'Buy',
# offerId: 'bd19804a-6f89-4a69-adb8-eb078900d006',
# commissionValue: null
# },
# ]
# }
#
items = self.safe_value(response, 'items')
result = self.parse_trades(items, None, since, limit)
if symbol is None:
return result
return self.filter_by_symbol(result, symbol)
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.v1_01PrivateGetBalancesBITBAYBalance(params)
balances = self.safe_value(response, 'balances')
if balances is None:
raise ExchangeError(self.id + ' empty balance response ' + self.json(response))
result = {'info': response}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['used'] = self.safe_string(balance, 'lockedFunds')
account['free'] = self.safe_string(balance, 'availableFunds')
result[code] = account
return self.parse_balance(result, False)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'id': self.market_id(symbol),
}
orderbook = await self.publicGetIdOrderbook(self.extend(request, params))
return self.parse_order_book(orderbook, symbol)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
request = {
'id': self.market_id(symbol),
}
ticker = await self.publicGetIdTicker(self.extend(request, params))
timestamp = self.milliseconds()
baseVolume = self.safe_number(ticker, 'volume')
vwap = self.safe_number(ticker, 'vwap')
quoteVolume = None
if baseVolume is not None and vwap is not None:
quoteVolume = baseVolume * vwap
last = self.safe_number(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'max'),
'low': self.safe_number(ticker, 'min'),
'bid': self.safe_number(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_number(ticker, 'ask'),
'askVolume': None,
'vwap': vwap,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': self.safe_number(ticker, 'average'),
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
async def fetch_ledger(self, code=None, since=None, limit=None, params={}):
balanceCurrencies = []
if code is not None:
currency = self.currency(code)
balanceCurrencies.append(currency['id'])
request = {
'balanceCurrencies': balanceCurrencies,
}
if since is not None:
request['fromTime'] = since
if limit is not None:
request['limit'] = limit
request = self.extend(request, params)
response = await self.v1_01PrivateGetBalancesBITBAYHistory({'query': self.json(request)})
items = response['items']
return self.parse_ledger(items, None, since, limit)
def parse_ledger_entry(self, item, currency=None):
#
# FUNDS_MIGRATION
# {
# "historyId": "84ea7a29-7da5-4de5-b0c0-871e83cad765",
# "balance": {
# "id": "821ec166-cb88-4521-916c-f4eb44db98df",
# "currency": "LTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "LTC"
# },
# "detailId": null,
# "time": 1506128252968,
# "type": "FUNDS_MIGRATION",
# "value": 0.0009957,
# "fundsBefore": {"total": 0, "available": 0, "locked": 0},
# "fundsAfter": {"total": 0.0009957, "available": 0.0009957, "locked": 0},
# "change": {"total": 0.0009957, "available": 0.0009957, "locked": 0}
# }
#
# CREATE_BALANCE
# {
# "historyId": "d0fabd8d-9107-4b5e-b9a6-3cab8af70d49",
# "balance": {
# "id": "653ffcf2-3037-4ebe-8e13-d5ea1a01d60d",
# "currency": "BTG",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTG"
# },
# "detailId": null,
# "time": 1508895244751,
# "type": "CREATE_BALANCE",
# "value": 0,
# "fundsBefore": {"total": null, "available": null, "locked": null},
# "fundsAfter": {"total": 0, "available": 0, "locked": 0},
# "change": {"total": 0, "available": 0, "locked": 0}
# }
#
# BITCOIN_GOLD_FORK
# {
# "historyId": "2b4d52d3-611c-473d-b92c-8a8d87a24e41",
# "balance": {
# "id": "653ffcf2-3037-4ebe-8e13-d5ea1a01d60d",
# "currency": "BTG",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTG"
# },
# "detailId": null,
# "time": 1508895244778,
# "type": "BITCOIN_GOLD_FORK",
# "value": 0.00453512,
# "fundsBefore": {"total": 0, "available": 0, "locked": 0},
# "fundsAfter": {"total": 0.00453512, "available": 0.00453512, "locked": 0},
# "change": {"total": 0.00453512, "available": 0.00453512, "locked": 0}
# }
#
# ADD_FUNDS
# {
# "historyId": "3158236d-dae5-4a5d-81af-c1fa4af340fb",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": "8e83a960-e737-4380-b8bb-259d6e236faa",
# "time": 1520631178816,
# "type": "ADD_FUNDS",
# "value": 0.628405,
# "fundsBefore": {"total": 0.00453512, "available": 0.00453512, "locked": 0},
# "fundsAfter": {"total": 0.63294012, "available": 0.63294012, "locked": 0},
# "change": {"total": 0.628405, "available": 0.628405, "locked": 0}
# }
#
# TRANSACTION_PRE_LOCKING
# {
# "historyId": "e7d19e0f-03b3-46a8-bc72-dde72cc24ead",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": null,
# "time": 1520706403868,
# "type": "TRANSACTION_PRE_LOCKING",
# "value": -0.1,
# "fundsBefore": {"total": 0.63294012, "available": 0.63294012, "locked": 0},
# "fundsAfter": {"total": 0.63294012, "available": 0.53294012, "locked": 0.1},
# "change": {"total": 0, "available": -0.1, "locked": 0.1}
# }
#
# TRANSACTION_POST_OUTCOME
# {
# "historyId": "c4010825-231d-4a9c-8e46-37cde1f7b63c",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": "bf2876bc-b545-4503-96c8-ef4de8233876",
# "time": 1520706404032,
# "type": "TRANSACTION_POST_OUTCOME",
# "value": -0.01771415,
# "fundsBefore": {"total": 0.63294012, "available": 0.53294012, "locked": 0.1},
# "fundsAfter": {"total": 0.61522597, "available": 0.53294012, "locked": 0.08228585},
# "change": {"total": -0.01771415, "available": 0, "locked": -0.01771415}
# }
#
# TRANSACTION_POST_INCOME
# {
# "historyId": "7f18b7af-b676-4125-84fd-042e683046f6",
# "balance": {
# "id": "ab43023b-4079-414c-b340-056e3430a3af",
# "currency": "EUR",
# "type": "FIAT",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "EUR"
# },
# "detailId": "f5fcb274-0cc7-4385-b2d3-bae2756e701f",
# "time": 1520706404035,
# "type": "TRANSACTION_POST_INCOME",
# "value": 628.78,
# "fundsBefore": {"total": 0, "available": 0, "locked": 0},
# "fundsAfter": {"total": 628.78, "available": 628.78, "locked": 0},
# "change": {"total": 628.78, "available": 628.78, "locked": 0}
# }
#
# TRANSACTION_COMMISSION_OUTCOME
# {
# "historyId": "843177fa-61bc-4cbf-8be5-b029d856c93b",
# "balance": {
# "id": "ab43023b-4079-414c-b340-056e3430a3af",
# "currency": "EUR",
# "type": "FIAT",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "EUR"
# },
# "detailId": "f5fcb274-0cc7-4385-b2d3-bae2756e701f",
# "time": 1520706404050,
# "type": "TRANSACTION_COMMISSION_OUTCOME",
# "value": -2.71,
# "fundsBefore": {"total": 766.06, "available": 766.06, "locked": 0},
# "fundsAfter": {"total": 763.35,"available": 763.35, "locked": 0},
# "change": {"total": -2.71, "available": -2.71, "locked": 0}
# }
#
# TRANSACTION_OFFER_COMPLETED_RETURN
# {
# "historyId": "cac69b04-c518-4dc5-9d86-e76e91f2e1d2",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": null,
# "time": 1520714886425,
# "type": "TRANSACTION_OFFER_COMPLETED_RETURN",
# "value": 0.00000196,
# "fundsBefore": {"total": 0.00941208, "available": 0.00941012, "locked": 0.00000196},
# "fundsAfter": {"total": 0.00941208, "available": 0.00941208, "locked": 0},
# "change": {"total": 0, "available": 0.00000196, "locked": -0.00000196}
# }
#
# WITHDRAWAL_LOCK_FUNDS
# {
# "historyId": "03de2271-66ab-4960-a786-87ab9551fc14",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": "6ad3dc72-1d6d-4ec2-8436-ca43f85a38a6",
# "time": 1522245654481,
# "type": "WITHDRAWAL_LOCK_FUNDS",
# "value": -0.8,
# "fundsBefore": {"total": 0.8, "available": 0.8, "locked": 0},
# "fundsAfter": {"total": 0.8, "available": 0, "locked": 0.8},
# "change": {"total": 0, "available": -0.8, "locked": 0.8}
# }
#
# WITHDRAWAL_SUBTRACT_FUNDS
# {
# "historyId": "b0308c89-5288-438d-a306-c6448b1a266d",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": "6ad3dc72-1d6d-4ec2-8436-ca43f85a38a6",
# "time": 1522246526186,
# "type": "WITHDRAWAL_SUBTRACT_FUNDS",
# "value": -0.8,
# "fundsBefore": {"total": 0.8, "available": 0, "locked": 0.8},
# "fundsAfter": {"total": 0, "available": 0, "locked": 0},
# "change": {"total": -0.8, "available": 0, "locked": -0.8}
# }
#
# TRANSACTION_OFFER_ABORTED_RETURN
# {
# "historyId": "b1a3c075-d403-4e05-8f32-40512cdd88c0",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": null,
# "time": 1522512298662,
# "type": "TRANSACTION_OFFER_ABORTED_RETURN",
# "value": 0.0564931,
# "fundsBefore": {"total": 0.44951311, "available": 0.39302001, "locked": 0.0564931},
# "fundsAfter": {"total": 0.44951311, "available": 0.44951311, "locked": 0},
# "change": {"total": 0, "available": 0.0564931, "locked": -0.0564931}
# }
#
# WITHDRAWAL_UNLOCK_FUNDS
# {
# "historyId": "0ed569a2-c330-482e-bb89-4cb553fb5b11",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": "0c7be256-c336-4111-bee7-4eb22e339700",
# "time": 1527866360785,
# "type": "WITHDRAWAL_UNLOCK_FUNDS",
# "value": 0.05045,
# "fundsBefore": {"total": 0.86001578, "available": 0.80956578, "locked": 0.05045},
# "fundsAfter": {"total": 0.86001578, "available": 0.86001578, "locked": 0},
# "change": {"total": 0, "available": 0.05045, "locked": -0.05045}
# }
#
# TRANSACTION_COMMISSION_RETURN
# {
# "historyId": "07c89c27-46f1-4d7a-8518-b73798bf168a",
# "balance": {
# "id": "ab43023b-4079-414c-b340-056e3430a3af",
# "currency": "EUR",
# "type": "FIAT",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "EUR"
# },
# "detailId": null,
# "time": 1528304043063,
# "type": "TRANSACTION_COMMISSION_RETURN",
# "value": 0.6,
# "fundsBefore": {"total": 0, "available": 0, "locked": 0},
# "fundsAfter": {"total": 0.6, "available": 0.6, "locked": 0},
# "change": {"total": 0.6, "available": 0.6, "locked": 0}
# }
#
timestamp = self.safe_integer(item, 'time')
balance = self.safe_value(item, 'balance', {})
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
change = self.safe_value(item, 'change', {})
amount = self.safe_number(change, 'total')
direction = 'in'
if amount < 0:
direction = 'out'
amount = -amount
id = self.safe_string(item, 'historyId')
# there are 2 undocumented api calls: (v1_01PrivateGetPaymentsDepositDetailId and v1_01PrivateGetPaymentsWithdrawalDetailId)
# that can be used to enrich the transfers with txid, address etc(you need to use info.detailId as a parameter)
referenceId = self.safe_string(item, 'detailId')
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
fundsBefore = self.safe_value(item, 'fundsBefore', {})
before = self.safe_number(fundsBefore, 'total')
fundsAfter = self.safe_value(item, 'fundsAfter', {})
after = self.safe_number(fundsAfter, 'total')
return {
'info': item,
'id': id,
'direction': direction,
'account': None,
'referenceId': referenceId,
'referenceAccount': None,
'type': type,
'currency': code,
'amount': amount,
'before': before,
'after': after,
'status': 'ok',
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': None,
}
def parse_ledger_entry_type(self, type):
types = {
'ADD_FUNDS': 'transaction',
'BITCOIN_GOLD_FORK': 'transaction',
'CREATE_BALANCE': 'transaction',
'FUNDS_MIGRATION': 'transaction',
'WITHDRAWAL_LOCK_FUNDS': 'transaction',
'WITHDRAWAL_SUBTRACT_FUNDS': 'transaction',
'WITHDRAWAL_UNLOCK_FUNDS': 'transaction',
'TRANSACTION_COMMISSION_OUTCOME': 'fee',
'TRANSACTION_COMMISSION_RETURN': 'fee',
'TRANSACTION_OFFER_ABORTED_RETURN': 'trade',
'TRANSACTION_OFFER_COMPLETED_RETURN': 'trade',
'TRANSACTION_POST_INCOME': 'trade',
'TRANSACTION_POST_OUTCOME': 'trade',
'TRANSACTION_PRE_LOCKING': 'trade',
}
return self.safe_string(types, type, type)
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# '1582399800000',
# {
# o: '0.0001428',
# c: '0.0001428',
# h: '0.0001428',
# l: '0.0001428',
# v: '4',
# co: '1'
# }
# ]
#
first = self.safe_value(ohlcv, 1, {})
return [
self.safe_integer(ohlcv, 0),
self.safe_number(first, 'o'),
self.safe_number(first, 'h'),
self.safe_number(first, 'l'),
self.safe_number(first, 'c'),
self.safe_number(first, 'v'),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
tradingSymbol = market['baseId'] + '-' + market['quoteId']
request = {
'symbol': tradingSymbol,
'resolution': self.timeframes[timeframe],
# 'from': 1574709092000, # unix timestamp in milliseconds, required
# 'to': 1574709092000, # unix timestamp in milliseconds, required
}
if limit is None:
limit = 100
duration = self.parse_timeframe(timeframe)
timerange = limit * duration * 1000
if since is None:
request['to'] = self.milliseconds()
request['from'] = request['to'] - timerange
else:
request['from'] = int(since)
request['to'] = self.sum(request['from'], timerange)
response = await self.v1_01PublicGetTradingCandleHistorySymbolResolution(self.extend(request, params))
#
# {
# "status":"Ok",
# "items":[
# ["1591503060000",{"o":"0.02509572","c":"0.02509438","h":"0.02509664","l":"0.02509438","v":"0.02082165","co":"17"}],
# ["1591503120000",{"o":"0.02509606","c":"0.02509515","h":"0.02509606","l":"0.02509487","v":"0.04971703","co":"13"}],
# ["1591503180000",{"o":"0.02509532","c":"0.02509589","h":"0.02509589","l":"0.02509454","v":"0.01332236","co":"7"}],
# ]
# }
#
items = self.safe_value(response, 'items', [])
return self.parse_ohlcvs(items, market, timeframe, since, limit)
def parse_trade(self, trade, market=None):
#
# createOrder trades
#
# {
# "rate": "0.02195928",
# "amount": "0.00167952"
# }
#
# fetchMyTrades(private)
#
# {
# amount: "0.29285199",
# commissionValue: "0.00125927",
# id: "11c8203a-a267-11e9-b698-0242ac110007",
# initializedBy: "Buy",
# market: "ETH-EUR",
# offerId: "11c82038-a267-11e9-b698-0242ac110007",
# rate: "277",
# time: "1562689917517",
# userAction: "Buy",
# wasTaker: True,
# }
#
# fetchTrades(public)
#
# {
# id: 'df00b0da-e5e0-11e9-8c19-0242ac11000a',
# t: '1570108958831',
# a: '0.04776653',
# r: '0.02145854',
# ty: 'Sell'
# }
#
timestamp = self.safe_integer_2(trade, 'time', 't')
userAction = self.safe_string(trade, 'userAction')
side = 'buy' if (userAction == 'Buy') else 'sell'
wasTaker = self.safe_value(trade, 'wasTaker')
takerOrMaker = None
if wasTaker is not None:
takerOrMaker = 'taker' if wasTaker else 'maker'
priceString = self.safe_string_2(trade, 'rate', 'r')
amountString = self.safe_string_2(trade, 'amount', 'a')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
feeCost = self.safe_number(trade, 'commissionValue')
marketId = self.safe_string(trade, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
fee = None
if feeCost is not None:
feeCcy = market['base'] if (side == 'buy') else market['quote']
fee = {
'currency': feeCcy,
'cost': feeCost,
}
order = self.safe_string(trade, 'offerId')
# todo: check self logic
type = None
if order is not None:
type = 'limit' if order else 'market'
return {
'id': self.safe_string(trade, 'id'),
'order': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'takerOrMaker': takerOrMaker,
'fee': fee,
'info': trade,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
tradingSymbol = market['baseId'] + '-' + market['quoteId']
request = {
'symbol': tradingSymbol,
}
if since is not None:
request['fromTime'] = since - 1 # result does not include exactly `since` time therefore decrease by 1
if limit is not None:
request['limit'] = limit # default - 10, max - 300
response = await self.v1_01PublicGetTradingTransactionsSymbol(self.extend(request, params))
items = self.safe_value(response, 'items')
return self.parse_trades(items, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
tradingSymbol = market['baseId'] + '-' + market['quoteId']
request = {
'symbol': tradingSymbol,
'offerType': side,
'amount': amount,
'mode': type,
}
if type == 'limit':
request['rate'] = price
price = float(price)
amount = float(amount)
response = await self.v1_01PrivatePostTradingOfferSymbol(self.extend(request, params))
#
# unfilled(open order)
#
# {
# status: 'Ok',
# completed: False, # can deduce status from here
# offerId: 'ce9cc72e-d61c-11e9-9248-0242ac110005',
# transactions: [], # can deduce order info from here
# }
#
# filled(closed order)
#
# {
# "status": "Ok",
# "offerId": "942a4a3e-e922-11e9-8c19-0242ac11000a",
# "completed": True,
# "transactions": [
# {
# "rate": "0.02195928",
# "amount": "0.00167952"
# },
# {
# "rate": "0.02195928",
# "amount": "0.00167952"
# },
# {
# "rate": "0.02196207",
# "amount": "0.27704177"
# }
# ]
# }
#
# partially-filled(open order)
#
# {
# "status": "Ok",
# "offerId": "d0ebefab-f4d7-11e9-8c19-0242ac11000a",
# "completed": False,
# "transactions": [
# {
# "rate": "0.02106404",
# "amount": "0.0019625"
# },
# {
# "rate": "0.02106404",
# "amount": "0.0019625"
# },
# {
# "rate": "0.02105901",
# "amount": "0.00975256"
# }
# ]
# }
#
timestamp = self.milliseconds() # the real timestamp is missing in the response
id = self.safe_string(response, 'offerId')
completed = self.safe_value(response, 'completed', False)
status = 'closed' if completed else 'open'
filled = 0
cost = None
transactions = self.safe_value(response, 'transactions')
trades = None
if transactions is not None:
trades = self.parse_trades(transactions, market, None, None, {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'side': side,
'type': type,
'orderId': id,
})
cost = 0
for i in range(0, len(trades)):
filled = self.sum(filled, trades[i]['amount'])
cost = self.sum(cost, trades[i]['cost'])
remaining = amount - filled
return {
'id': id,
'info': response,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'filled': filled,
'remaining': remaining,
'average': None,
'fee': None,
'trades': trades,
'clientOrderId': None,
}
async def cancel_order(self, id, symbol=None, params={}):
side = self.safe_string(params, 'side')
if side is None:
raise ExchangeError(self.id + ' cancelOrder() requires a `side` parameter("buy" or "sell")')
price = self.safe_value(params, 'price')
if price is None:
raise ExchangeError(self.id + ' cancelOrder() requires a `price` parameter(float or string)')
await self.load_markets()
market = self.market(symbol)
tradingSymbol = market['baseId'] + '-' + market['quoteId']
request = {
'symbol': tradingSymbol,
'id': id,
'side': side,
'price': price,
}
# {status: 'Fail', errors: ['NOT_RECOGNIZED_OFFER_TYPE']} -- if required params are missing
# {status: 'Ok', errors: []}
return self.v1_01PrivateDeleteTradingOfferSymbolIdSidePrice(self.extend(request, params))
def is_fiat(self, currency):
fiatCurrencies = {
'USD': True,
'EUR': True,
'PLN': True,
}
return self.safe_value(fiatCurrencies, currency, False)
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
method = None
currency = self.currency(code)
request = {
'currency': currency['id'],
'quantity': amount,
}
if self.is_fiat(code):
method = 'privatePostWithdraw'
# request['account'] = params['account'] # they demand an account number
# request['express'] = params['express'] # whatever it means, they don't explain
# request['bic'] = ''
else:
method = 'privatePostTransfer'
if tag is not None:
address += '?dt=' + str(tag)
request['address'] = address
response = await getattr(self, method)(self.extend(request, params))
return {
'info': response,
'id': None,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.implode_params(self.urls['api'][api], {'hostname': self.hostname})
if api == 'public':
query = self.omit(params, self.extract_params(path))
url += '/' + self.implode_params(path, params) + '.json'
if query:
url += '?' + self.urlencode(query)
elif api == 'v1_01Public':
query = self.omit(params, self.extract_params(path))
url += '/' + self.implode_params(path, params)
if query:
url += '?' + self.urlencode(query)
elif api == 'v1_01Private':
self.check_required_credentials()
query = self.omit(params, self.extract_params(path))
url += '/' + self.implode_params(path, params)
nonce = str(self.milliseconds())
payload = None
if method != 'POST':
if query:
url += '?' + self.urlencode(query)
payload = self.apiKey + nonce
elif body is None:
body = self.json(query)
payload = self.apiKey + nonce + body
headers = {
'Request-Timestamp': nonce,
'Operation-Id': self.uuid(),
'API-Key': self.apiKey,
'API-Hash': self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha512),
'Content-Type': 'application/json',
}
else:
self.check_required_credentials()
body = self.urlencode(self.extend({
'method': path,
'moment': self.nonce(),
}, params))
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'API-Key': self.apiKey,
'API-Hash': self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if 'code' in response:
#
# bitbay returns the integer 'success': 1 key from their private API
# or an integer 'code' value from 0 to 510 and an error message
#
# {'success': 1, ...}
# {'code': 502, 'message': 'Invalid sign'}
# {'code': 0, 'message': 'offer funds not exceeding minimums'}
#
# 400 At least one parameter wasn't set
# 401 Invalid order type
# 402 No orders with specified currencies
# 403 Invalid payment currency name
# 404 Error. Wrong transaction type
# 405 Order with self id doesn't exist
# 406 No enough money or crypto
# 408 Invalid currency name
# 501 Invalid public key
# 502 Invalid sign
# 503 Invalid moment parameter. Request time doesn't match current server time
# 504 Invalid method
# 505 Key has no permission for self action
# 506 Account locked. Please contact with customer service
# 509 The BIC/SWIFT is required for self currency
# 510 Invalid market name
#
code = self.safe_string(response, 'code') # always an integer
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions, code, feedback)
raise ExchangeError(feedback)
elif 'status' in response:
#
# {"status":"Fail","errors":["OFFER_FUNDS_NOT_EXCEEDING_MINIMUMS"]}
#
status = self.safe_string(response, 'status')
if status == 'Fail':
errors = self.safe_value(response, 'errors')
feedback = self.id + ' ' + body
for i in range(0, len(errors)):
error = errors[i]
self.throw_exactly_matched_exception(self.exceptions, error, feedback)
raise ExchangeError(feedback)
| 41.266122
| 137
| 0.46369
|
rt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import OrderImmediatelyFillable
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import InvalidNonce
from ccxt.base.precise import Precise
class bitbay(Exchange):
def describe(self):
return self.deep_extend(super(bitbay, self).describe(), {
'id': 'bitbay',
'name': 'BitBay',
'countries': ['MT', 'EU'],
'rateLimit': 1000,
'has': {
'cancelOrder': True,
'CORS': True,
'createOrder': True,
'fetchBalance': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrderBook': True,
'fetchTicker': True,
'fetchTrades': True,
'withdraw': True,
},
'timeframes': {
'1m': '60',
'3m': '180',
'5m': '300',
'15m': '900',
'30m': '1800',
'1h': '3600',
'2h': '7200',
'4h': '14400',
'6h': '21600',
'12h': '43200',
'1d': '86400',
'3d': '259200',
'1w': '604800',
},
'hostname': 'bitbay.net',
'urls': {
'referral': 'https://auth.bitbay.net/ref/jHlbB4mIkdS1',
'logo': 'https://user-images.githubusercontent.com/1294454/27766132-978a7bd8-5ece-11e7-9540-bc96d1e9bbb8.jpg',
'www': 'https://bitbay.net',
'api': {
'public': 'https://{hostname}/API/Public',
'private': 'https://{hostname}/API/Trading/tradingApi.php',
'v1_01Public': 'https://api.{hostname}/rest',
'v1_01Private': 'https://api.{hostname}/rest',
},
'doc': [
'https://bitbay.net/public-api',
'https://bitbay.net/en/private-api',
'https://bitbay.net/account/tab-api',
'https://github.com/BitBayNet/API',
'https://docs.bitbay.net/v1.0.1-en/reference',
],
'support': 'https://support.bitbay.net',
'fees': 'https://bitbay.net/en/fees',
},
'api': {
'public': {
'get': [
'{id}/all',
'{id}/market',
'{id}/orderbook',
'{id}/ticker',
'{id}/trades',
],
},
'private': {
'post': [
'info',
'trade',
'cancel',
'orderbook',
'orders',
'transfer',
'withdraw',
'history',
'transactions',
],
},
'v1_01Public': {
'get': [
'trading/ticker',
'trading/ticker/{symbol}',
'trading/stats',
'trading/orderbook/{symbol}',
'trading/transactions/{symbol}',
'trading/candle/history/{symbol}/{resolution}',
],
},
'v1_01Private': {
'get': [
'payments/withdrawal/{detailId}',
'payments/deposit/{detailId}',
'trading/offer',
'trading/config/{symbol}',
'trading/history/transactions',
'balances/BITBAY/history',
'balances/BITBAY/balance',
'fiat_cantor/rate/{baseId}/{quoteId}',
'fiat_cantor/history',
],
'post': [
'trading/offer/{symbol}',
'trading/config/{symbol}',
'balances/BITBAY/balance',
'balances/BITBAY/balance/transfer/{source}/{destination}',
'fiat_cantor/exchange',
],
'delete': [
'trading/offer/{symbol}/{id}/{side}/{price}',
],
'put': [
'balances/BITBAY/balance/{id}',
],
},
},
'fees': {
'trading': {
'maker': 0.0,
'taker': 0.1 / 100,
'percentage': True,
'tierBased': False,
},
'fiat': {
'maker': 0.30 / 100,
'taker': 0.43 / 100,
'percentage': True,
'tierBased': True,
'tiers': {
'taker': [
[0.0043, 0],
[0.0042, 1250],
[0.0041, 3750],
[0.0040, 7500],
[0.0039, 10000],
[0.0038, 15000],
[0.0037, 20000],
[0.0036, 25000],
[0.0035, 37500],
[0.0034, 50000],
[0.0033, 75000],
[0.0032, 100000],
[0.0031, 150000],
[0.0030, 200000],
[0.0029, 250000],
[0.0028, 375000],
[0.0027, 500000],
[0.0026, 625000],
[0.0025, 875000],
],
'maker': [
[0.0030, 0],
[0.0029, 1250],
[0.0028, 3750],
[0.0028, 7500],
[0.0027, 10000],
[0.0026, 15000],
[0.0025, 20000],
[0.0025, 25000],
[0.0024, 37500],
[0.0023, 50000],
[0.0023, 75000],
[0.0022, 100000],
[0.0021, 150000],
[0.0021, 200000],
[0.0020, 250000],
[0.0019, 375000],
[0.0018, 500000],
[0.0018, 625000],
[0.0017, 875000],
],
},
},
'funding': {
'withdraw': {
'BTC': 0.0009,
'LTC': 0.005,
'ETH': 0.00126,
'LSK': 0.2,
'BCH': 0.0006,
'GAME': 0.005,
'DASH': 0.001,
'BTG': 0.0008,
'PLN': 4,
'EUR': 1.5,
},
},
},
'options': {
'fiatCurrencies': ['EUR', 'USD', 'GBP', 'PLN'],
},
'exceptions': {
'400': ExchangeError,
'401': InvalidOrder, # Invalid order type
'402': InvalidOrder, # No orders with specified currencies
'403': InvalidOrder, # Invalid payment currency name
'404': InvalidOrder, # Error. Wrong transaction type
'405': InvalidOrder, # Order with self id doesn't exist
'406': InsufficientFunds,
'408': InvalidOrder,
'501': AuthenticationError,
'502': AuthenticationError,
'503': InvalidNonce,
'504': ExchangeError, # Invalid method
'505': AuthenticationError, # Key has no permission for self action
'506': AccountSuspended, # Account locked. Please contact with customer service
# codes 507 and 508 are not specified in their docs
'509': ExchangeError, # The BIC/SWIFT is required for self currency
'510': BadSymbol, # Invalid market name
'FUNDS_NOT_SUFFICIENT': InsufficientFunds,
'OFFER_FUNDS_NOT_EXCEEDING_MINIMUMS': InvalidOrder,
'OFFER_NOT_FOUND': OrderNotFound,
'OFFER_WOULD_HAVE_BEEN_PARTIALLY_FILLED': OrderImmediatelyFillable,
'ACTION_LIMIT_EXCEEDED': RateLimitExceeded,
'UNDER_MAINTENANCE': OnMaintenance,
'REQUEST_TIMESTAMP_TOO_OLD': InvalidNonce,
'PERMISSIONS_NOT_SUFFICIENT': PermissionDenied,
},
'commonCurrencies': {
'GGC': 'Global Game Coin',
},
})
async def fetch_markets(self, params={}):
response = await self.v1_01PublicGetTradingTicker(params)
fiatCurrencies = self.safe_value(self.options, 'fiatCurrencies', [])
#
# {
# status: 'Ok',
# items: {
# 'BSV-USD': {
# market: {
# code: 'BSV-USD',
# first: {currency: 'BSV', minOffer: '0.00035', scale: 8},
# second: {currency: 'USD', minOffer: '5', scale: 2}
# },
# time: '1557569762154',
# highestBid: '52.31',
# lowestAsk: '62.99',
# rate: '63',
# previousRate: '51.21',
# },
# },
# }
#
result = []
items = self.safe_value(response, 'items')
keys = list(items.keys())
for i in range(0, len(keys)):
key = keys[i]
item = items[key]
market = self.safe_value(item, 'market', {})
first = self.safe_value(market, 'first', {})
second = self.safe_value(market, 'second', {})
baseId = self.safe_string(first, 'currency')
quoteId = self.safe_string(second, 'currency')
id = baseId + quoteId
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'amount': self.safe_integer(first, 'scale'),
'price': self.safe_integer(second, 'scale'),
}
fees = self.safe_value(self.fees, 'trading', {})
if self.in_array(base, fiatCurrencies) or self.in_array(quote, fiatCurrencies):
fees = self.safe_value(self.fees, 'fiat', {})
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
# todo: check that the limits have ben interpreted correctly
# todo: parse the fees page
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'precision': precision,
'active': None,
'maker': maker,
'taker': taker,
'limits': {
'amount': {
'min': self.safe_number(first, 'minOffer'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(second, 'minOffer'),
'max': None,
},
},
'info': item,
})
return result
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
response = await self.v1_01PrivateGetTradingOffer(self.extend(request, params))
items = self.safe_value(response, 'items', [])
return self.parse_orders(items, None, since, limit, {'status': 'open'})
def parse_order(self, order, market=None):
#
# {
# market: 'ETH-EUR',
# offerType: 'Sell',
# id: '93d3657b-d616-11e9-9248-0242ac110005',
# currentAmount: '0.04',
# lockedAmount: '0.04',
# rate: '280',
# startAmount: '0.04',
# time: '1568372806924',
# postOnly: False,
# hidden: False,
# mode: 'limit',
# receivedAmount: '0.0',
# firstBalanceId: '5b816c3e-437c-4e43-9bef-47814ae7ebfc',
# secondBalanceId: 'ab43023b-4079-414c-b340-056e3430a3af'
# }
#
marketId = self.safe_string(order, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(order, 'time')
amount = self.safe_number(order, 'startAmount')
remaining = self.safe_number(order, 'currentAmount')
postOnly = self.safe_value(order, 'postOnly')
return self.safe_order({
'id': self.safe_string(order, 'id'),
'clientOrderId': None,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': None,
'symbol': symbol,
'type': self.safe_string(order, 'mode'),
'timeInForce': None,
'postOnly': postOnly,
'side': self.safe_string_lower(order, 'offerType'),
'price': self.safe_number(order, 'rate'),
'stopPrice': None,
'amount': amount,
'cost': None,
'filled': None,
'remaining': remaining,
'average': None,
'fee': None,
'trades': None,
})
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
if symbol:
markets = [self.market_id(symbol)]
request['markets'] = markets
query = {'query': self.json(self.extend(request, params))}
response = await self.v1_01PrivateGetTradingHistoryTransactions(query)
#
# {
# status: 'Ok',
# totalRows: '67',
# items: [
# {
# id: 'b54659a0-51b5-42a0-80eb-2ac5357ccee2',
# market: 'BTC-EUR',
# time: '1541697096247',
# amount: '0.00003',
# rate: '4341.44',
# initializedBy: 'Sell',
# wasTaker: False,
# userAction: 'Buy',
# offerId: 'bd19804a-6f89-4a69-adb8-eb078900d006',
# commissionValue: null
# },
# ]
# }
#
items = self.safe_value(response, 'items')
result = self.parse_trades(items, None, since, limit)
if symbol is None:
return result
return self.filter_by_symbol(result, symbol)
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.v1_01PrivateGetBalancesBITBAYBalance(params)
balances = self.safe_value(response, 'balances')
if balances is None:
raise ExchangeError(self.id + ' empty balance response ' + self.json(response))
result = {'info': response}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['used'] = self.safe_string(balance, 'lockedFunds')
account['free'] = self.safe_string(balance, 'availableFunds')
result[code] = account
return self.parse_balance(result, False)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'id': self.market_id(symbol),
}
orderbook = await self.publicGetIdOrderbook(self.extend(request, params))
return self.parse_order_book(orderbook, symbol)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
request = {
'id': self.market_id(symbol),
}
ticker = await self.publicGetIdTicker(self.extend(request, params))
timestamp = self.milliseconds()
baseVolume = self.safe_number(ticker, 'volume')
vwap = self.safe_number(ticker, 'vwap')
quoteVolume = None
if baseVolume is not None and vwap is not None:
quoteVolume = baseVolume * vwap
last = self.safe_number(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'max'),
'low': self.safe_number(ticker, 'min'),
'bid': self.safe_number(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_number(ticker, 'ask'),
'askVolume': None,
'vwap': vwap,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': self.safe_number(ticker, 'average'),
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
async def fetch_ledger(self, code=None, since=None, limit=None, params={}):
balanceCurrencies = []
if code is not None:
currency = self.currency(code)
balanceCurrencies.append(currency['id'])
request = {
'balanceCurrencies': balanceCurrencies,
}
if since is not None:
request['fromTime'] = since
if limit is not None:
request['limit'] = limit
request = self.extend(request, params)
response = await self.v1_01PrivateGetBalancesBITBAYHistory({'query': self.json(request)})
items = response['items']
return self.parse_ledger(items, None, since, limit)
def parse_ledger_entry(self, item, currency=None):
#
# FUNDS_MIGRATION
# {
# "historyId": "84ea7a29-7da5-4de5-b0c0-871e83cad765",
# "balance": {
# "id": "821ec166-cb88-4521-916c-f4eb44db98df",
# "currency": "LTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "LTC"
# },
# "detailId": null,
# "time": 1506128252968,
# "type": "FUNDS_MIGRATION",
# "value": 0.0009957,
# "fundsBefore": {"total": 0, "available": 0, "locked": 0},
# "fundsAfter": {"total": 0.0009957, "available": 0.0009957, "locked": 0},
# "change": {"total": 0.0009957, "available": 0.0009957, "locked": 0}
# }
#
# CREATE_BALANCE
# {
# "historyId": "d0fabd8d-9107-4b5e-b9a6-3cab8af70d49",
# "balance": {
# "id": "653ffcf2-3037-4ebe-8e13-d5ea1a01d60d",
# "currency": "BTG",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTG"
# },
# "detailId": null,
# "time": 1508895244751,
# "type": "CREATE_BALANCE",
# "value": 0,
# "fundsBefore": {"total": null, "available": null, "locked": null},
# "fundsAfter": {"total": 0, "available": 0, "locked": 0},
# "change": {"total": 0, "available": 0, "locked": 0}
# }
#
# BITCOIN_GOLD_FORK
# {
# "historyId": "2b4d52d3-611c-473d-b92c-8a8d87a24e41",
# "balance": {
# "id": "653ffcf2-3037-4ebe-8e13-d5ea1a01d60d",
# "currency": "BTG",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTG"
# },
# "detailId": null,
# "time": 1508895244778,
# "type": "BITCOIN_GOLD_FORK",
# "value": 0.00453512,
# "fundsBefore": {"total": 0, "available": 0, "locked": 0},
# "fundsAfter": {"total": 0.00453512, "available": 0.00453512, "locked": 0},
# "change": {"total": 0.00453512, "available": 0.00453512, "locked": 0}
# }
#
# ADD_FUNDS
# {
# "historyId": "3158236d-dae5-4a5d-81af-c1fa4af340fb",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": "8e83a960-e737-4380-b8bb-259d6e236faa",
# "time": 1520631178816,
# "type": "ADD_FUNDS",
# "value": 0.628405,
# "fundsBefore": {"total": 0.00453512, "available": 0.00453512, "locked": 0},
# "fundsAfter": {"total": 0.63294012, "available": 0.63294012, "locked": 0},
# "change": {"total": 0.628405, "available": 0.628405, "locked": 0}
# }
#
# TRANSACTION_PRE_LOCKING
# {
# "historyId": "e7d19e0f-03b3-46a8-bc72-dde72cc24ead",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": null,
# "time": 1520706403868,
# "type": "TRANSACTION_PRE_LOCKING",
# "value": -0.1,
# "fundsBefore": {"total": 0.63294012, "available": 0.63294012, "locked": 0},
# "fundsAfter": {"total": 0.63294012, "available": 0.53294012, "locked": 0.1},
# "change": {"total": 0, "available": -0.1, "locked": 0.1}
# }
#
# TRANSACTION_POST_OUTCOME
# {
# "historyId": "c4010825-231d-4a9c-8e46-37cde1f7b63c",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": "bf2876bc-b545-4503-96c8-ef4de8233876",
# "time": 1520706404032,
# "type": "TRANSACTION_POST_OUTCOME",
# "value": -0.01771415,
# "fundsBefore": {"total": 0.63294012, "available": 0.53294012, "locked": 0.1},
# "fundsAfter": {"total": 0.61522597, "available": 0.53294012, "locked": 0.08228585},
# "change": {"total": -0.01771415, "available": 0, "locked": -0.01771415}
# }
#
# TRANSACTION_POST_INCOME
# {
# "historyId": "7f18b7af-b676-4125-84fd-042e683046f6",
# "balance": {
# "id": "ab43023b-4079-414c-b340-056e3430a3af",
# "currency": "EUR",
# "type": "FIAT",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "EUR"
# },
# "detailId": "f5fcb274-0cc7-4385-b2d3-bae2756e701f",
# "time": 1520706404035,
# "type": "TRANSACTION_POST_INCOME",
# "value": 628.78,
# "fundsBefore": {"total": 0, "available": 0, "locked": 0},
# "fundsAfter": {"total": 628.78, "available": 628.78, "locked": 0},
# "change": {"total": 628.78, "available": 628.78, "locked": 0}
# }
#
# TRANSACTION_COMMISSION_OUTCOME
# {
# "historyId": "843177fa-61bc-4cbf-8be5-b029d856c93b",
# "balance": {
# "id": "ab43023b-4079-414c-b340-056e3430a3af",
# "currency": "EUR",
# "type": "FIAT",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "EUR"
# },
# "detailId": "f5fcb274-0cc7-4385-b2d3-bae2756e701f",
# "time": 1520706404050,
# "type": "TRANSACTION_COMMISSION_OUTCOME",
# "value": -2.71,
# "fundsBefore": {"total": 766.06, "available": 766.06, "locked": 0},
# "fundsAfter": {"total": 763.35,"available": 763.35, "locked": 0},
# "change": {"total": -2.71, "available": -2.71, "locked": 0}
# }
#
# TRANSACTION_OFFER_COMPLETED_RETURN
# {
# "historyId": "cac69b04-c518-4dc5-9d86-e76e91f2e1d2",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": null,
# "time": 1520714886425,
# "type": "TRANSACTION_OFFER_COMPLETED_RETURN",
# "value": 0.00000196,
# "fundsBefore": {"total": 0.00941208, "available": 0.00941012, "locked": 0.00000196},
# "fundsAfter": {"total": 0.00941208, "available": 0.00941208, "locked": 0},
# "change": {"total": 0, "available": 0.00000196, "locked": -0.00000196}
# }
#
# WITHDRAWAL_LOCK_FUNDS
# {
# "historyId": "03de2271-66ab-4960-a786-87ab9551fc14",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": "6ad3dc72-1d6d-4ec2-8436-ca43f85a38a6",
# "time": 1522245654481,
# "type": "WITHDRAWAL_LOCK_FUNDS",
# "value": -0.8,
# "fundsBefore": {"total": 0.8, "available": 0.8, "locked": 0},
# "fundsAfter": {"total": 0.8, "available": 0, "locked": 0.8},
# "change": {"total": 0, "available": -0.8, "locked": 0.8}
# }
#
# WITHDRAWAL_SUBTRACT_FUNDS
# {
# "historyId": "b0308c89-5288-438d-a306-c6448b1a266d",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": "6ad3dc72-1d6d-4ec2-8436-ca43f85a38a6",
# "time": 1522246526186,
# "type": "WITHDRAWAL_SUBTRACT_FUNDS",
# "value": -0.8,
# "fundsBefore": {"total": 0.8, "available": 0, "locked": 0.8},
# "fundsAfter": {"total": 0, "available": 0, "locked": 0},
# "change": {"total": -0.8, "available": 0, "locked": -0.8}
# }
#
# TRANSACTION_OFFER_ABORTED_RETURN
# {
# "historyId": "b1a3c075-d403-4e05-8f32-40512cdd88c0",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": null,
# "time": 1522512298662,
# "type": "TRANSACTION_OFFER_ABORTED_RETURN",
# "value": 0.0564931,
# "fundsBefore": {"total": 0.44951311, "available": 0.39302001, "locked": 0.0564931},
# "fundsAfter": {"total": 0.44951311, "available": 0.44951311, "locked": 0},
# "change": {"total": 0, "available": 0.0564931, "locked": -0.0564931}
# }
#
# WITHDRAWAL_UNLOCK_FUNDS
# {
# "historyId": "0ed569a2-c330-482e-bb89-4cb553fb5b11",
# "balance": {
# "id": "3a7e7a1e-0324-49d5-8f59-298505ebd6c7",
# "currency": "BTC",
# "type": "CRYPTO",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "BTC"
# },
# "detailId": "0c7be256-c336-4111-bee7-4eb22e339700",
# "time": 1527866360785,
# "type": "WITHDRAWAL_UNLOCK_FUNDS",
# "value": 0.05045,
# "fundsBefore": {"total": 0.86001578, "available": 0.80956578, "locked": 0.05045},
# "fundsAfter": {"total": 0.86001578, "available": 0.86001578, "locked": 0},
# "change": {"total": 0, "available": 0.05045, "locked": -0.05045}
# }
#
# TRANSACTION_COMMISSION_RETURN
# {
# "historyId": "07c89c27-46f1-4d7a-8518-b73798bf168a",
# "balance": {
# "id": "ab43023b-4079-414c-b340-056e3430a3af",
# "currency": "EUR",
# "type": "FIAT",
# "userId": "a34d361d-7bad-49c1-888e-62473b75d877",
# "name": "EUR"
# },
# "detailId": null,
# "time": 1528304043063,
# "type": "TRANSACTION_COMMISSION_RETURN",
# "value": 0.6,
# "fundsBefore": {"total": 0, "available": 0, "locked": 0},
# "fundsAfter": {"total": 0.6, "available": 0.6, "locked": 0},
# "change": {"total": 0.6, "available": 0.6, "locked": 0}
# }
#
timestamp = self.safe_integer(item, 'time')
balance = self.safe_value(item, 'balance', {})
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
change = self.safe_value(item, 'change', {})
amount = self.safe_number(change, 'total')
direction = 'in'
if amount < 0:
direction = 'out'
amount = -amount
id = self.safe_string(item, 'historyId')
# there are 2 undocumented api calls: (v1_01PrivateGetPaymentsDepositDetailId and v1_01PrivateGetPaymentsWithdrawalDetailId)
# that can be used to enrich the transfers with txid, address etc(you need to use info.detailId as a parameter)
referenceId = self.safe_string(item, 'detailId')
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
fundsBefore = self.safe_value(item, 'fundsBefore', {})
before = self.safe_number(fundsBefore, 'total')
fundsAfter = self.safe_value(item, 'fundsAfter', {})
after = self.safe_number(fundsAfter, 'total')
return {
'info': item,
'id': id,
'direction': direction,
'account': None,
'referenceId': referenceId,
'referenceAccount': None,
'type': type,
'currency': code,
'amount': amount,
'before': before,
'after': after,
'status': 'ok',
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': None,
}
def parse_ledger_entry_type(self, type):
types = {
'ADD_FUNDS': 'transaction',
'BITCOIN_GOLD_FORK': 'transaction',
'CREATE_BALANCE': 'transaction',
'FUNDS_MIGRATION': 'transaction',
'WITHDRAWAL_LOCK_FUNDS': 'transaction',
'WITHDRAWAL_SUBTRACT_FUNDS': 'transaction',
'WITHDRAWAL_UNLOCK_FUNDS': 'transaction',
'TRANSACTION_COMMISSION_OUTCOME': 'fee',
'TRANSACTION_COMMISSION_RETURN': 'fee',
'TRANSACTION_OFFER_ABORTED_RETURN': 'trade',
'TRANSACTION_OFFER_COMPLETED_RETURN': 'trade',
'TRANSACTION_POST_INCOME': 'trade',
'TRANSACTION_POST_OUTCOME': 'trade',
'TRANSACTION_PRE_LOCKING': 'trade',
}
return self.safe_string(types, type, type)
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# '1582399800000',
# {
# o: '0.0001428',
# c: '0.0001428',
# h: '0.0001428',
# l: '0.0001428',
# v: '4',
# co: '1'
# }
# ]
#
first = self.safe_value(ohlcv, 1, {})
return [
self.safe_integer(ohlcv, 0),
self.safe_number(first, 'o'),
self.safe_number(first, 'h'),
self.safe_number(first, 'l'),
self.safe_number(first, 'c'),
self.safe_number(first, 'v'),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
tradingSymbol = market['baseId'] + '-' + market['quoteId']
request = {
'symbol': tradingSymbol,
'resolution': self.timeframes[timeframe],
# 'from': 1574709092000, # unix timestamp in milliseconds, required
# 'to': 1574709092000, # unix timestamp in milliseconds, required
}
if limit is None:
limit = 100
duration = self.parse_timeframe(timeframe)
timerange = limit * duration * 1000
if since is None:
request['to'] = self.milliseconds()
request['from'] = request['to'] - timerange
else:
request['from'] = int(since)
request['to'] = self.sum(request['from'], timerange)
response = await self.v1_01PublicGetTradingCandleHistorySymbolResolution(self.extend(request, params))
#
# {
# "status":"Ok",
# "items":[
# ["1591503060000",{"o":"0.02509572","c":"0.02509438","h":"0.02509664","l":"0.02509438","v":"0.02082165","co":"17"}],
# ["1591503120000",{"o":"0.02509606","c":"0.02509515","h":"0.02509606","l":"0.02509487","v":"0.04971703","co":"13"}],
# ["1591503180000",{"o":"0.02509532","c":"0.02509589","h":"0.02509589","l":"0.02509454","v":"0.01332236","co":"7"}],
# ]
# }
#
items = self.safe_value(response, 'items', [])
return self.parse_ohlcvs(items, market, timeframe, since, limit)
def parse_trade(self, trade, market=None):
#
# createOrder trades
#
# {
# "rate": "0.02195928",
# "amount": "0.00167952"
# }
#
# fetchMyTrades(private)
#
# {
# amount: "0.29285199",
# commissionValue: "0.00125927",
# id: "11c8203a-a267-11e9-b698-0242ac110007",
# initializedBy: "Buy",
# market: "ETH-EUR",
# offerId: "11c82038-a267-11e9-b698-0242ac110007",
# rate: "277",
# time: "1562689917517",
# userAction: "Buy",
# wasTaker: True,
# }
#
# fetchTrades(public)
#
# {
# id: 'df00b0da-e5e0-11e9-8c19-0242ac11000a',
# t: '1570108958831',
# a: '0.04776653',
# r: '0.02145854',
# ty: 'Sell'
# }
#
timestamp = self.safe_integer_2(trade, 'time', 't')
userAction = self.safe_string(trade, 'userAction')
side = 'buy' if (userAction == 'Buy') else 'sell'
wasTaker = self.safe_value(trade, 'wasTaker')
takerOrMaker = None
if wasTaker is not None:
takerOrMaker = 'taker' if wasTaker else 'maker'
priceString = self.safe_string_2(trade, 'rate', 'r')
amountString = self.safe_string_2(trade, 'amount', 'a')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
feeCost = self.safe_number(trade, 'commissionValue')
marketId = self.safe_string(trade, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
fee = None
if feeCost is not None:
feeCcy = market['base'] if (side == 'buy') else market['quote']
fee = {
'currency': feeCcy,
'cost': feeCost,
}
order = self.safe_string(trade, 'offerId')
# todo: check self logic
type = None
if order is not None:
type = 'limit' if order else 'market'
return {
'id': self.safe_string(trade, 'id'),
'order': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'takerOrMaker': takerOrMaker,
'fee': fee,
'info': trade,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
tradingSymbol = market['baseId'] + '-' + market['quoteId']
request = {
'symbol': tradingSymbol,
}
if since is not None:
request['fromTime'] = since - 1 # result does not include exactly `since` time therefore decrease by 1
if limit is not None:
request['limit'] = limit # default - 10, max - 300
response = await self.v1_01PublicGetTradingTransactionsSymbol(self.extend(request, params))
items = self.safe_value(response, 'items')
return self.parse_trades(items, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
tradingSymbol = market['baseId'] + '-' + market['quoteId']
request = {
'symbol': tradingSymbol,
'offerType': side,
'amount': amount,
'mode': type,
}
if type == 'limit':
request['rate'] = price
price = float(price)
amount = float(amount)
response = await self.v1_01PrivatePostTradingOfferSymbol(self.extend(request, params))
#
# unfilled(open order)
#
# {
# status: 'Ok',
# completed: False, # can deduce status from here
# offerId: 'ce9cc72e-d61c-11e9-9248-0242ac110005',
# transactions: [], # can deduce order info from here
# }
#
# filled(closed order)
#
# {
# "status": "Ok",
# "offerId": "942a4a3e-e922-11e9-8c19-0242ac11000a",
# "completed": True,
# "transactions": [
# {
# "rate": "0.02195928",
# "amount": "0.00167952"
# },
# {
# "rate": "0.02195928",
# "amount": "0.00167952"
# },
# {
# "rate": "0.02196207",
# "amount": "0.27704177"
# }
# ]
# }
#
# partially-filled(open order)
#
# {
# "status": "Ok",
# "offerId": "d0ebefab-f4d7-11e9-8c19-0242ac11000a",
# "completed": False,
# "transactions": [
# {
# "rate": "0.02106404",
# "amount": "0.0019625"
# },
# {
# "rate": "0.02106404",
# "amount": "0.0019625"
# },
# {
# "rate": "0.02105901",
# "amount": "0.00975256"
# }
# ]
# }
#
timestamp = self.milliseconds() # the real timestamp is missing in the response
id = self.safe_string(response, 'offerId')
completed = self.safe_value(response, 'completed', False)
status = 'closed' if completed else 'open'
filled = 0
cost = None
transactions = self.safe_value(response, 'transactions')
trades = None
if transactions is not None:
trades = self.parse_trades(transactions, market, None, None, {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'side': side,
'type': type,
'orderId': id,
})
cost = 0
for i in range(0, len(trades)):
filled = self.sum(filled, trades[i]['amount'])
cost = self.sum(cost, trades[i]['cost'])
remaining = amount - filled
return {
'id': id,
'info': response,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'filled': filled,
'remaining': remaining,
'average': None,
'fee': None,
'trades': trades,
'clientOrderId': None,
}
async def cancel_order(self, id, symbol=None, params={}):
side = self.safe_string(params, 'side')
if side is None:
raise ExchangeError(self.id + ' cancelOrder() requires a `side` parameter("buy" or "sell")')
price = self.safe_value(params, 'price')
if price is None:
raise ExchangeError(self.id + ' cancelOrder() requires a `price` parameter(float or string)')
await self.load_markets()
market = self.market(symbol)
tradingSymbol = market['baseId'] + '-' + market['quoteId']
request = {
'symbol': tradingSymbol,
'id': id,
'side': side,
'price': price,
}
# {status: 'Fail', errors: ['NOT_RECOGNIZED_OFFER_TYPE']} -- if required params are missing
# {status: 'Ok', errors: []}
return self.v1_01PrivateDeleteTradingOfferSymbolIdSidePrice(self.extend(request, params))
def is_fiat(self, currency):
fiatCurrencies = {
'USD': True,
'EUR': True,
'PLN': True,
}
return self.safe_value(fiatCurrencies, currency, False)
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
method = None
currency = self.currency(code)
request = {
'currency': currency['id'],
'quantity': amount,
}
if self.is_fiat(code):
method = 'privatePostWithdraw'
# request['account'] = params['account'] # they demand an account number
# request['express'] = params['express'] # whatever it means, they don't explain
else:
method = 'privatePostTransfer'
if tag is not None:
address += '?dt=' + str(tag)
request['address'] = address
response = await getattr(self, method)(self.extend(request, params))
return {
'info': response,
'id': None,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.implode_params(self.urls['api'][api], {'hostname': self.hostname})
if api == 'public':
query = self.omit(params, self.extract_params(path))
url += '/' + self.implode_params(path, params) + '.json'
if query:
url += '?' + self.urlencode(query)
elif api == 'v1_01Public':
query = self.omit(params, self.extract_params(path))
url += '/' + self.implode_params(path, params)
if query:
url += '?' + self.urlencode(query)
elif api == 'v1_01Private':
self.check_required_credentials()
query = self.omit(params, self.extract_params(path))
url += '/' + self.implode_params(path, params)
nonce = str(self.milliseconds())
payload = None
if method != 'POST':
if query:
url += '?' + self.urlencode(query)
payload = self.apiKey + nonce
elif body is None:
body = self.json(query)
payload = self.apiKey + nonce + body
headers = {
'Request-Timestamp': nonce,
'Operation-Id': self.uuid(),
'API-Key': self.apiKey,
'API-Hash': self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha512),
'Content-Type': 'application/json',
}
else:
self.check_required_credentials()
body = self.urlencode(self.extend({
'method': path,
'moment': self.nonce(),
}, params))
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'API-Key': self.apiKey,
'API-Hash': self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
if 'code' in response:
# 401 Invalid order type
# 402 No orders with specified currencies
# 403 Invalid payment currency name
# 404 Error. Wrong transaction type
# 405 Order with self id doesn't exist
# 504 Invalid method
# 505 Key has no permission for self action
# 506 Account locked. Please contact with customer service
# 509 The BIC/SWIFT is required for self currency
# 510 Invalid market name
#
code = self.safe_string(response, 'code') # always an integer
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions, code, feedback)
raise ExchangeError(feedback)
elif 'status' in response:
#
# {"status":"Fail","errors":["OFFER_FUNDS_NOT_EXCEEDING_MINIMUMS"]}
#
status = self.safe_string(response, 'status')
if status == 'Fail':
errors = self.safe_value(response, 'errors')
feedback = self.id + ' ' + body
for i in range(0, len(errors)):
error = errors[i]
self.throw_exactly_matched_exception(self.exceptions, error, feedback)
raise ExchangeError(feedback)
| true
| true
|
f717f7d7f8f771201fee15a195eb1be65208493e
| 207
|
py
|
Python
|
CodeHS/Unit 1/1.5/pancakes.py
|
nitrospam/APCSP2020
|
275f576036805d244c3244f3f3646951940c9575
|
[
"MIT"
] | null | null | null |
CodeHS/Unit 1/1.5/pancakes.py
|
nitrospam/APCSP2020
|
275f576036805d244c3244f3f3646951940c9575
|
[
"MIT"
] | null | null | null |
CodeHS/Unit 1/1.5/pancakes.py
|
nitrospam/APCSP2020
|
275f576036805d244c3244f3f3646951940c9575
|
[
"MIT"
] | null | null | null |
def place_3_balls():
put_ball()
put_ball()
put_ball()
def move_twice():
move()
move()
move()
place_3_balls()
move_twice()
place_3_balls()
move_twice()
place_3_balls()
move()
| 10.35
| 20
| 0.618357
|
def place_3_balls():
put_ball()
put_ball()
put_ball()
def move_twice():
move()
move()
move()
place_3_balls()
move_twice()
place_3_balls()
move_twice()
place_3_balls()
move()
| true
| true
|
f717f82fd0f391aa09b205b652a744cfaac5ba2d
| 188
|
py
|
Python
|
testData/search/childrenKeywordArgument.py
|
seandstewart/typical-pycharm-plugin
|
4f6ec99766239421201faae9d75c32fa0ee3565a
|
[
"MIT"
] | null | null | null |
testData/search/childrenKeywordArgument.py
|
seandstewart/typical-pycharm-plugin
|
4f6ec99766239421201faae9d75c32fa0ee3565a
|
[
"MIT"
] | null | null | null |
testData/search/childrenKeywordArgument.py
|
seandstewart/typical-pycharm-plugin
|
4f6ec99766239421201faae9d75c32fa0ee3565a
|
[
"MIT"
] | null | null | null |
from pydantic import BaseModel
class A(BaseModel):
abc: str
class B(A):
abc: str
class C(A):
abc: str # expected
A(abc='cde')
B(abc='cde')
C(ab<caret>c='cde')
## count: 1
| 11.75
| 30
| 0.606383
|
from pydantic import BaseModel
class A(BaseModel):
abc: str
class B(A):
abc: str
class C(A):
abc: str
A(abc='cde')
B(abc='cde')
C(ab<caret>c='cde')
| false
| true
|
f717f87eae6eff378694a1f1173d6bf41dba6abe
| 505
|
py
|
Python
|
66-plus-one/66-plus-one.py
|
yuzhengcuhk/MyLeetcodeRecord
|
bd516c6f2946b922da53e587fc186935c6a8819c
|
[
"MIT"
] | 3
|
2022-02-07T12:47:43.000Z
|
2022-03-13T16:40:12.000Z
|
66-plus-one/66-plus-one.py
|
yuzhengcuhk/MyLeetcodeRecord
|
bd516c6f2946b922da53e587fc186935c6a8819c
|
[
"MIT"
] | null | null | null |
66-plus-one/66-plus-one.py
|
yuzhengcuhk/MyLeetcodeRecord
|
bd516c6f2946b922da53e587fc186935c6a8819c
|
[
"MIT"
] | null | null | null |
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
cnt = len(digits)
if digits[cnt-1] != 9:
digits[cnt-1] = digits[cnt-1] + 1
return digits
else:
for i in range(0, len(digits)):
digits[i] = str(digits[i])
intdig = ''.join(digits)
intdig = int(intdig) + 1
result = []
for item in str(intdig):
result.append(int(item))
return result
| 33.666667
| 54
| 0.463366
|
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
cnt = len(digits)
if digits[cnt-1] != 9:
digits[cnt-1] = digits[cnt-1] + 1
return digits
else:
for i in range(0, len(digits)):
digits[i] = str(digits[i])
intdig = ''.join(digits)
intdig = int(intdig) + 1
result = []
for item in str(intdig):
result.append(int(item))
return result
| true
| true
|
f717f8f11f852a6ff486c6c6a1bdbf3db226a42b
| 849
|
py
|
Python
|
Chapter_7_code/build/hector_quadrotor_controller_gazebo/catkin_generated/pkg.develspace.context.pc.py
|
crepuscularlight/ROSbyExample
|
fa7b1a60cacca9b1034e318a2ac16ce4c8530d7c
|
[
"MIT"
] | 1
|
2021-04-23T10:01:22.000Z
|
2021-04-23T10:01:22.000Z
|
Chapter_7_code/build/hector_quadrotor_controller_gazebo/catkin_generated/pkg.develspace.context.pc.py
|
crepuscularlight/ROSbyExample
|
fa7b1a60cacca9b1034e318a2ac16ce4c8530d7c
|
[
"MIT"
] | null | null | null |
Chapter_7_code/build/hector_quadrotor_controller_gazebo/catkin_generated/pkg.develspace.context.pc.py
|
crepuscularlight/ROSbyExample
|
fa7b1a60cacca9b1034e318a2ac16ce4c8530d7c
|
[
"MIT"
] | null | null | null |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/liudiyang1998/Git/ROS-Robotics-By-Example/Chapter_7_code/src/hector_quadrotor/hector_quadrotor_controller_gazebo/include".split(';') if "/home/liudiyang1998/Git/ROS-Robotics-By-Example/Chapter_7_code/src/hector_quadrotor/hector_quadrotor_controller_gazebo/include" != "" else []
PROJECT_CATKIN_DEPENDS = "gazebo_ros_control;hector_quadrotor_interface".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lhector_quadrotor_controller_gazebo".split(';') if "-lhector_quadrotor_controller_gazebo" != "" else []
PROJECT_NAME = "hector_quadrotor_controller_gazebo"
PROJECT_SPACE_DIR = "/home/liudiyang1998/Git/ROS-Robotics-By-Example/Chapter_7_code/devel/.private/hector_quadrotor_controller_gazebo"
PROJECT_VERSION = "0.3.5"
| 94.333333
| 319
| 0.829211
|
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/liudiyang1998/Git/ROS-Robotics-By-Example/Chapter_7_code/src/hector_quadrotor/hector_quadrotor_controller_gazebo/include".split(';') if "/home/liudiyang1998/Git/ROS-Robotics-By-Example/Chapter_7_code/src/hector_quadrotor/hector_quadrotor_controller_gazebo/include" != "" else []
PROJECT_CATKIN_DEPENDS = "gazebo_ros_control;hector_quadrotor_interface".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lhector_quadrotor_controller_gazebo".split(';') if "-lhector_quadrotor_controller_gazebo" != "" else []
PROJECT_NAME = "hector_quadrotor_controller_gazebo"
PROJECT_SPACE_DIR = "/home/liudiyang1998/Git/ROS-Robotics-By-Example/Chapter_7_code/devel/.private/hector_quadrotor_controller_gazebo"
PROJECT_VERSION = "0.3.5"
| true
| true
|
f717f935bd346a3daf5e4df75d97e3d4a4dd5155
| 1,221
|
py
|
Python
|
tests/test_persistentkv.py
|
fakedrake/WikipediaBase
|
ab5aa92786bddcd7942ad3e3f1f4e433575ba3fb
|
[
"Apache-2.0"
] | 1
|
2017-11-26T17:57:59.000Z
|
2017-11-26T17:57:59.000Z
|
tests/test_persistentkv.py
|
fakedrake/WikipediaBase
|
ab5aa92786bddcd7942ad3e3f1f4e433575ba3fb
|
[
"Apache-2.0"
] | 34
|
2015-03-23T10:28:59.000Z
|
2021-12-13T20:16:48.000Z
|
tests/test_persistentkv.py
|
fakedrake/WikipediaBase
|
ab5aa92786bddcd7942ad3e3f1f4e433575ba3fb
|
[
"Apache-2.0"
] | 2
|
2015-05-17T00:56:45.000Z
|
2015-06-27T22:10:59.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_persistentkv
----------------------------------
Tests for `persistentkv` module.
"""
try:
import unittest2 as unittest
except ImportError:
import unittest
import os
import common
from wikipediabase import persistentkv as pkv
DATABASE = "/tmp/remove-me.db"
class TestPersistentkv(unittest.TestCase):
def setUp(self):
pass
def test_non_persist(self):
ps = pkv.PersistentDict(DATABASE)
ps['hello'] = "yes"
ps["bye"] = "no"
ps['\xe2\x98\x83snowman'.decode('utf8')] = "well"
self.assertEqual(ps['hello'], "yes")
self.assertEqual(ps['bye'], "no")
del ps
# Test persistence
ps = pkv.PersistentDict(DATABASE)
self.assertEqual(ps['hello'], "yes")
self.assertEqual(ps['bye'], "no")
self.assertEqual(ps['\xe2\x98\x83snowman'.decode('utf8')], "well")
del ps
# Test file dependency
os.remove(DATABASE)
ps = pkv.PersistentDict(DATABASE)
with self.assertRaises(KeyError):
bo = ps['hello'] == "yes"
def tearDown(self):
os.remove(DATABASE)
if __name__ == '__main__':
unittest.main()
| 22.611111
| 74
| 0.585586
|
try:
import unittest2 as unittest
except ImportError:
import unittest
import os
import common
from wikipediabase import persistentkv as pkv
DATABASE = "/tmp/remove-me.db"
class TestPersistentkv(unittest.TestCase):
def setUp(self):
pass
def test_non_persist(self):
ps = pkv.PersistentDict(DATABASE)
ps['hello'] = "yes"
ps["bye"] = "no"
ps['\xe2\x98\x83snowman'.decode('utf8')] = "well"
self.assertEqual(ps['hello'], "yes")
self.assertEqual(ps['bye'], "no")
del ps
ps = pkv.PersistentDict(DATABASE)
self.assertEqual(ps['hello'], "yes")
self.assertEqual(ps['bye'], "no")
self.assertEqual(ps['\xe2\x98\x83snowman'.decode('utf8')], "well")
del ps
os.remove(DATABASE)
ps = pkv.PersistentDict(DATABASE)
with self.assertRaises(KeyError):
bo = ps['hello'] == "yes"
def tearDown(self):
os.remove(DATABASE)
if __name__ == '__main__':
unittest.main()
| true
| true
|
f717f96bb0c2423c47e922ab54cdfb5493b76d10
| 2,593
|
py
|
Python
|
seinfeld_laugh_corpus/humor_recogniser/data_generation_scripts/word_prevalence_calc.py
|
ranyadshalom/seinfeld_laugh_corpus
|
b1e1a5208d2d3499144743028205336f8ca34552
|
[
"MIT"
] | null | null | null |
seinfeld_laugh_corpus/humor_recogniser/data_generation_scripts/word_prevalence_calc.py
|
ranyadshalom/seinfeld_laugh_corpus
|
b1e1a5208d2d3499144743028205336f8ca34552
|
[
"MIT"
] | 2
|
2018-09-04T05:32:22.000Z
|
2018-09-17T10:58:11.000Z
|
seinfeld_laugh_corpus/humor_recogniser/data_generation_scripts/word_prevalence_calc.py
|
ranyadshalom/seinfeld_laugh_corpus
|
b1e1a5208d2d3499144743028205336f8ca34552
|
[
"MIT"
] | null | null | null |
import argparse
import re
import sys
from collections import Counter
sys.path.append("..")
from ml_humor_recogniser import read_data
from screenplay import Line
def run(data, output):
screenplays = read_data(data)
txt = screenplays_to_txt(screenplays)
word_counts = get_word_counts(txt)
word_probabilities = get_probabilities(word_counts)
write_to_file(word_probabilities, output)
# TODO take care of UNKs
def screenplays_to_txt(screenplays):
result = ''
for screenplay in screenplays:
for line in screenplay:
if isinstance(line, Line):
result += ('\n' + line.txt)
return result
def get_word_counts(txt):
"""
Counts word occurrences in "txt".
The methodology of dealing with unknown words is to calculate a count of "UNK" by splitting the set of words, and
after counting words in the bigger set, every unknown word that appears in the smaller set will be counted as "UNK".
:param txt:
:return: a {'word':integer} dictionary that represents the number of times a word appears in the txt.
"""
counts = Counter()
all_words = re.split(r'[\s\,\.\?\!\;\:"]', txt.lower())
all_words = [w for w in all_words if w]
size = len(all_words)
most_words, rest = all_words[:int(size*0.9)], all_words[int(size*0.9):]
for word in most_words:
counts[word] += 1
for word in rest:
if word in counts:
counts[word] += 1
else:
counts['UNK'] += 1
return counts
def get_probabilities(word_counts):
probabilities = {}
total_num_of_words = sum((count for _, count in word_counts.items()))
for word in word_counts.keys():
probabilities[word] = word_counts[word] / total_num_of_words
return probabilities
def write_to_file(word_probabilities, output):
with open(output, 'w') as f:
for word, prob in word_probabilities.items():
f.write("%s %.9f\n" % (word, prob))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="A script to calculate the probabilities of words occurring in a "
"screenplay.")
parser.add_argument('data', help='The folder where the training data is located. Training data is .merged '
'files, created by the data_merger.py module and contain screenplays, '
'laugh times & dialog times.')
parser.add_argument('output', help='Output file.')
args = parser.parse_args()
run(args.data, args.output)
| 32.822785
| 120
| 0.644042
|
import argparse
import re
import sys
from collections import Counter
sys.path.append("..")
from ml_humor_recogniser import read_data
from screenplay import Line
def run(data, output):
screenplays = read_data(data)
txt = screenplays_to_txt(screenplays)
word_counts = get_word_counts(txt)
word_probabilities = get_probabilities(word_counts)
write_to_file(word_probabilities, output)
def screenplays_to_txt(screenplays):
result = ''
for screenplay in screenplays:
for line in screenplay:
if isinstance(line, Line):
result += ('\n' + line.txt)
return result
def get_word_counts(txt):
counts = Counter()
all_words = re.split(r'[\s\,\.\?\!\;\:"]', txt.lower())
all_words = [w for w in all_words if w]
size = len(all_words)
most_words, rest = all_words[:int(size*0.9)], all_words[int(size*0.9):]
for word in most_words:
counts[word] += 1
for word in rest:
if word in counts:
counts[word] += 1
else:
counts['UNK'] += 1
return counts
def get_probabilities(word_counts):
probabilities = {}
total_num_of_words = sum((count for _, count in word_counts.items()))
for word in word_counts.keys():
probabilities[word] = word_counts[word] / total_num_of_words
return probabilities
def write_to_file(word_probabilities, output):
with open(output, 'w') as f:
for word, prob in word_probabilities.items():
f.write("%s %.9f\n" % (word, prob))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="A script to calculate the probabilities of words occurring in a "
"screenplay.")
parser.add_argument('data', help='The folder where the training data is located. Training data is .merged '
'files, created by the data_merger.py module and contain screenplays, '
'laugh times & dialog times.')
parser.add_argument('output', help='Output file.')
args = parser.parse_args()
run(args.data, args.output)
| true
| true
|
f717f9a709ac6da00ce8729b7850f20a3de65921
| 59
|
py
|
Python
|
uiSimple.py
|
smithgoo/python3Learn
|
d0c066c10887db3942ca285b86ce464463998aad
|
[
"MIT"
] | 1
|
2019-05-30T08:08:34.000Z
|
2019-05-30T08:08:34.000Z
|
uiSimple.py
|
smithgoo/python3Learn
|
d0c066c10887db3942ca285b86ce464463998aad
|
[
"MIT"
] | null | null | null |
uiSimple.py
|
smithgoo/python3Learn
|
d0c066c10887db3942ca285b86ce464463998aad
|
[
"MIT"
] | null | null | null |
from _tkinter import *
root = tkinter.Tk()
root.mainloop()
| 19.666667
| 23
| 0.728814
|
from _tkinter import *
root = tkinter.Tk()
root.mainloop()
| true
| true
|
f717f9e8df46378c8a416ad5be38e9da22664eeb
| 798
|
py
|
Python
|
deal/_cli/_main.py
|
toonarmycaptain/deal
|
9dff86e1dc5c8607f02ded34b6d64e770f1959fa
|
[
"MIT"
] | null | null | null |
deal/_cli/_main.py
|
toonarmycaptain/deal
|
9dff86e1dc5c8607f02ded34b6d64e770f1959fa
|
[
"MIT"
] | null | null | null |
deal/_cli/_main.py
|
toonarmycaptain/deal
|
9dff86e1dc5c8607f02ded34b6d64e770f1959fa
|
[
"MIT"
] | null | null | null |
# built-in
from argparse import ArgumentParser
from types import MappingProxyType
from typing import Callable, Mapping, Sequence
# app
from ._lint import lint_command
from ._memtest import memtest_command
from ._stub import stub_command
from ._test import test_command
CommandsType = Mapping[str, Callable[[Sequence[str]], int]]
COMMANDS: CommandsType = MappingProxyType(dict(
lint=lint_command,
memtest=memtest_command,
stub=stub_command,
test=test_command,
))
def main(argv: Sequence[str], *, commands: CommandsType = COMMANDS) -> int:
parser = ArgumentParser(prog='python3 -m deal')
parser.add_argument('command', choices=sorted(commands))
args, unknown_argv = parser.parse_known_args(argv)
command = commands[args.command]
return command(unknown_argv)
| 27.517241
| 75
| 0.761905
|
from argparse import ArgumentParser
from types import MappingProxyType
from typing import Callable, Mapping, Sequence
from ._lint import lint_command
from ._memtest import memtest_command
from ._stub import stub_command
from ._test import test_command
CommandsType = Mapping[str, Callable[[Sequence[str]], int]]
COMMANDS: CommandsType = MappingProxyType(dict(
lint=lint_command,
memtest=memtest_command,
stub=stub_command,
test=test_command,
))
def main(argv: Sequence[str], *, commands: CommandsType = COMMANDS) -> int:
parser = ArgumentParser(prog='python3 -m deal')
parser.add_argument('command', choices=sorted(commands))
args, unknown_argv = parser.parse_known_args(argv)
command = commands[args.command]
return command(unknown_argv)
| true
| true
|
f717fa3b3ac3c83afb7de4e2d210b524c7409f46
| 2,049
|
py
|
Python
|
examples/extract_table_names.py
|
hugovk/sqlparse
|
3598bf4670b0f4d80b7ca0557f156aa8bf87add4
|
[
"BSD-3-Clause"
] | null | null | null |
examples/extract_table_names.py
|
hugovk/sqlparse
|
3598bf4670b0f4d80b7ca0557f156aa8bf87add4
|
[
"BSD-3-Clause"
] | null | null | null |
examples/extract_table_names.py
|
hugovk/sqlparse
|
3598bf4670b0f4d80b7ca0557f156aa8bf87add4
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (C) 2009-2020 the sqlparse authors and contributors
# <see AUTHORS file>
#
# This example is part of python-sqlparse and is released under
# the BSD License: https://opensource.org/licenses/BSD-3-Clause
#
# This example illustrates how to extract table names from nested
# SELECT statements.
#
# See:
# https://groups.google.com/forum/#!forum/sqlparse/browse_thread/thread/b0bd9a022e9d4895
import sqlparse
from sqlparse.sql import IdentifierList, Identifier
from sqlparse.tokens import Keyword, DML
def is_subselect(parsed):
if not parsed.is_group:
return False
for item in parsed.tokens:
if item.ttype is DML and item.value.upper() == 'SELECT':
return True
return False
def extract_from_part(parsed):
from_seen = False
for item in parsed.tokens:
if from_seen:
if is_subselect(item):
yield from extract_from_part(item)
elif item.ttype is Keyword:
return
else:
yield item
elif item.ttype is Keyword and item.value.upper() == 'FROM':
from_seen = True
def extract_table_identifiers(token_stream):
for item in token_stream:
if isinstance(item, IdentifierList):
for identifier in item.get_identifiers():
yield identifier.get_name()
elif isinstance(item, Identifier):
yield item.get_name()
# It's a bug to check for Keyword here, but in the example
# above some tables names are identified as keywords...
elif item.ttype is Keyword:
yield item.value
def extract_tables(sql):
stream = extract_from_part(sqlparse.parse(sql)[0])
return list(extract_table_identifiers(stream))
if __name__ == '__main__':
sql = """
select K.a,K.b from (select H.b from (select G.c from (select F.d from
(select E.e from A, B, C, D, E), F), G), H), I, J, K order by 1,2;
"""
tables = ', '.join(extract_tables(sql))
print(f'Tables: {tables}')
| 29.695652
| 88
| 0.650073
|
ist, Identifier
from sqlparse.tokens import Keyword, DML
def is_subselect(parsed):
if not parsed.is_group:
return False
for item in parsed.tokens:
if item.ttype is DML and item.value.upper() == 'SELECT':
return True
return False
def extract_from_part(parsed):
from_seen = False
for item in parsed.tokens:
if from_seen:
if is_subselect(item):
yield from extract_from_part(item)
elif item.ttype is Keyword:
return
else:
yield item
elif item.ttype is Keyword and item.value.upper() == 'FROM':
from_seen = True
def extract_table_identifiers(token_stream):
for item in token_stream:
if isinstance(item, IdentifierList):
for identifier in item.get_identifiers():
yield identifier.get_name()
elif isinstance(item, Identifier):
yield item.get_name()
# above some tables names are identified as keywords...
elif item.ttype is Keyword:
yield item.value
def extract_tables(sql):
stream = extract_from_part(sqlparse.parse(sql)[0])
return list(extract_table_identifiers(stream))
if __name__ == '__main__':
sql = """
select K.a,K.b from (select H.b from (select G.c from (select F.d from
(select E.e from A, B, C, D, E), F), G), H), I, J, K order by 1,2;
"""
tables = ', '.join(extract_tables(sql))
print(f'Tables: {tables}')
| true
| true
|
f717faec2a4bce7642e1b452032a060d7e5853ec
| 3,970
|
py
|
Python
|
01-datamodeling/project02-data-modeling-with-cassandra/cassandra_mgr.py
|
ultranet1/DATA-ENGINEERING-NANODEGREE-UDACITY
|
d04e39e7312f04307f12257157c19ea40da2f11a
|
[
"Apache-2.0"
] | 33
|
2020-09-01T20:10:28.000Z
|
2022-02-11T06:15:55.000Z
|
01-datamodeling/project02-data-modeling-with-cassandra/cassandra_mgr.py
|
ultranet1/DATA-ENGINEERING-NANODEGREE-UDACITY
|
d04e39e7312f04307f12257157c19ea40da2f11a
|
[
"Apache-2.0"
] | null | null | null |
01-datamodeling/project02-data-modeling-with-cassandra/cassandra_mgr.py
|
ultranet1/DATA-ENGINEERING-NANODEGREE-UDACITY
|
d04e39e7312f04307f12257157c19ea40da2f11a
|
[
"Apache-2.0"
] | 64
|
2021-01-21T11:55:34.000Z
|
2022-03-10T08:14:11.000Z
|
from cassandra.cluster import Cluster
class CassandraMgr:
"""
Manage orerations with Apache Cassandra.
"""
def __init__(self, config):
"""
Constructor.
:param config: configuration of the cluster of Apache Cassandra -> ip, replicator factor, replication class and
key space.
"""
self.ip = config['ip']
self.replication_factor = config["replication_factor"]
self.replication_class = config["replication_class"]
self.key_space = config["key_space"]
self.cluster = Cluster(self.ip)
def connect(self):
"""
Create a connection from the configuration passed in class constructor.
Creates a Keyspace an returns a session.
:return: session.
"""
session = self.cluster.connect()
cql_create_keyspace = """
CREATE KEYSPACE IF NOT EXISTS %s WITH REPLICATION = { 'class' : '%s', 'replication_factor' : %s }
""" % (self.key_space, self.replication_class, self.replication_factor)
try:
session.execute(cql_create_keyspace)
except Exception as e:
print(e)
try:
session.set_keyspace(self.key_space )
except Exception as e:
print(e)
return session
def disconnect(self, session):
"""
Finalise the session and cluster shutdown.
:param session: session
"""
session.shutdown()
self.cluster.shutdown()
@staticmethod
def create_table(session, table, fields, primary_key):
"""
Create an Apache Cassandra table.
:param session: session.
:param table: table to create.
:param fields: fields of the table.
:param primary_key: primary key of the table.
"""
fields_string = ", ".join(fields)
query = "CREATE TABLE IF NOT EXISTS %s (%s , PRIMARY KEY %s)" % (table, fields_string, primary_key)
try:
session.execute(query)
except Exception as e:
print(e)
@staticmethod
def insert_cassandra_from_df(session, table, columns_table, df):
"""
Insert a pandas dataframe into a Cassandra table.
:param session: session.
:param table: table where insert rows.
:param columns_table: columns of the table.
:param df: pandas dataframe to insert into the table.
"""
query = CassandraMgr.get_insert_query(table, columns_table)
for index, row in df.iterrows():
session.execute(query, (row[x] for x in df.columns))
@staticmethod
def select(session, fields, table, filters):
"""
Make a select to an apache Cassandra table.
:param session: session.
:param fields: projection of the select statement
:param table: table
:param filters: filters of the WHERE clause.
:return: list of rows of the request.
"""
fields_string = ", ".join(fields)
query = "select %s from %s WHERE %s" % (fields_string, table, filters)
try:
rows = session.execute(query)
except Exception as e:
print(e)
return rows
@staticmethod
def get_insert_query(table: str, columns):
"""
Builds an INSERT statement string.
:param table: table
:param columns: columns to insert.
:return: string with INSERT query.
"""
query = "INSERT INTO %s (%s) " % (table, ", ".join(columns))
query = query + " VALUES (" + ", ".join(["%s"] * len(columns)) + ") "
return query
@staticmethod
def drop_table(session, table):
"""
Drop an Apache Cassandra table.
:param session: session.
:param table: table to drop.
"""
query = "drop table %s" % table
try:
session.execute(query)
except Exception as e:
print(e)
| 29.626866
| 119
| 0.58262
|
from cassandra.cluster import Cluster
class CassandraMgr:
def __init__(self, config):
self.ip = config['ip']
self.replication_factor = config["replication_factor"]
self.replication_class = config["replication_class"]
self.key_space = config["key_space"]
self.cluster = Cluster(self.ip)
def connect(self):
session = self.cluster.connect()
cql_create_keyspace = """
CREATE KEYSPACE IF NOT EXISTS %s WITH REPLICATION = { 'class' : '%s', 'replication_factor' : %s }
""" % (self.key_space, self.replication_class, self.replication_factor)
try:
session.execute(cql_create_keyspace)
except Exception as e:
print(e)
try:
session.set_keyspace(self.key_space )
except Exception as e:
print(e)
return session
def disconnect(self, session):
session.shutdown()
self.cluster.shutdown()
@staticmethod
def create_table(session, table, fields, primary_key):
fields_string = ", ".join(fields)
query = "CREATE TABLE IF NOT EXISTS %s (%s , PRIMARY KEY %s)" % (table, fields_string, primary_key)
try:
session.execute(query)
except Exception as e:
print(e)
@staticmethod
def insert_cassandra_from_df(session, table, columns_table, df):
query = CassandraMgr.get_insert_query(table, columns_table)
for index, row in df.iterrows():
session.execute(query, (row[x] for x in df.columns))
@staticmethod
def select(session, fields, table, filters):
fields_string = ", ".join(fields)
query = "select %s from %s WHERE %s" % (fields_string, table, filters)
try:
rows = session.execute(query)
except Exception as e:
print(e)
return rows
@staticmethod
def get_insert_query(table: str, columns):
query = "INSERT INTO %s (%s) " % (table, ", ".join(columns))
query = query + " VALUES (" + ", ".join(["%s"] * len(columns)) + ") "
return query
@staticmethod
def drop_table(session, table):
query = "drop table %s" % table
try:
session.execute(query)
except Exception as e:
print(e)
| true
| true
|
f717fc05188de674e02f5c99af90516ab0930a2f
| 814
|
py
|
Python
|
backend/server/apps/notes/migrations/0001_initial.py
|
Bonifase/django-react
|
ea18c3192ee28ce2291d6cabb08addd8cf8eb27e
|
[
"MIT"
] | 508
|
2020-10-05T14:03:16.000Z
|
2022-03-30T09:04:42.000Z
|
backend/server/apps/notes/migrations/0001_initial.py
|
Bonifase/django-react
|
ea18c3192ee28ce2291d6cabb08addd8cf8eb27e
|
[
"MIT"
] | 17
|
2020-12-10T08:23:55.000Z
|
2022-03-20T17:10:37.000Z
|
backend/server/apps/notes/migrations/0001_initial.py
|
Bonifase/django-react
|
ea18c3192ee28ce2291d6cabb08addd8cf8eb27e
|
[
"MIT"
] | 80
|
2020-12-23T13:59:14.000Z
|
2022-03-12T03:52:21.000Z
|
# Generated by Django 3.1.3 on 2020-11-09 10:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('content', models.TextField(blank=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 30.148148
| 124
| 0.637592
|
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('content', models.TextField(blank=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true
| true
|
f717fceec4380b1677eb124c6b56d04232940628
| 8,716
|
py
|
Python
|
tests/api_connexion/endpoints/test_task_endpoint.py
|
emilioego/airflow
|
3457c7847cd24413ff5b622e65c27d8370f94502
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 79
|
2021-10-15T07:32:27.000Z
|
2022-03-28T04:10:19.000Z
|
tests/api_connexion/endpoints/test_task_endpoint.py
|
emilioego/airflow
|
3457c7847cd24413ff5b622e65c27d8370f94502
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 153
|
2021-10-15T05:23:46.000Z
|
2022-02-23T06:07:10.000Z
|
tests/api_connexion/endpoints/test_task_endpoint.py
|
emilioego/airflow
|
3457c7847cd24413ff5b622e65c27d8370f94502
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 23
|
2021-10-15T02:36:37.000Z
|
2022-03-17T02:59:27.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import unittest
from datetime import datetime
from airflow import DAG
from airflow.models import DagBag
from airflow.models.serialized_dag import SerializedDagModel
from airflow.operators.dummy import DummyOperator
from airflow.security import permissions
from airflow.www import app
from tests.test_utils.api_connexion_utils import assert_401, create_user, delete_user
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_dags, clear_db_runs, clear_db_serialized_dags
class TestTaskEndpoint(unittest.TestCase):
dag_id = "test_dag"
task_id = "op1"
@staticmethod
def clean_db():
clear_db_runs()
clear_db_dags()
clear_db_serialized_dags()
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
with conf_vars({("api", "auth_backend"): "tests.test_utils.remote_user_api_auth_backend"}):
cls.app = app.create_app(testing=True) # type:ignore
create_user(
cls.app, # type: ignore
username="test",
role_name="Test",
permissions=[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
],
)
create_user(cls.app, username="test_no_permissions", role_name="TestNoPermissions") # type: ignore
with DAG(cls.dag_id, start_date=datetime(2020, 6, 15), doc_md="details") as dag:
DummyOperator(task_id=cls.task_id)
cls.dag = dag # type:ignore
dag_bag = DagBag(os.devnull, include_examples=False)
dag_bag.dags = {dag.dag_id: dag}
cls.app.dag_bag = dag_bag # type:ignore
@classmethod
def tearDownClass(cls) -> None:
delete_user(cls.app, username="test") # type: ignore
delete_user(cls.app, username="test_no_permissions") # type: ignore
def setUp(self) -> None:
self.clean_db()
self.client = self.app.test_client() # type:ignore
def tearDown(self) -> None:
self.clean_db()
class TestGetTask(TestTaskEndpoint):
def test_should_respond_200(self):
expected = {
"class_ref": {
"class_name": "DummyOperator",
"module_path": "airflow.operators.dummy",
},
"depends_on_past": False,
"downstream_task_ids": [],
"end_date": None,
"execution_timeout": None,
"extra_links": [],
"owner": "airflow",
"pool": "default_pool",
"pool_slots": 1.0,
"priority_weight": 1.0,
"queue": "default",
"retries": 0.0,
"retry_delay": {"__type": "TimeDelta", "days": 0, "seconds": 300, "microseconds": 0},
"retry_exponential_backoff": False,
"start_date": "2020-06-15T00:00:00+00:00",
"task_id": "op1",
"template_fields": [],
"trigger_rule": "all_success",
"ui_color": "#e8f7e4",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"weight_rule": "downstream",
}
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/tasks/{self.task_id}", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json == expected
def test_should_respond_200_serialized(self):
# Create empty app with empty dagbag to check if DAG is read from db
with conf_vars({("api", "auth_backend"): "tests.test_utils.remote_user_api_auth_backend"}):
app_serialized = app.create_app(testing=True)
dag_bag = DagBag(os.devnull, include_examples=False, read_dags_from_db=True)
app_serialized.dag_bag = dag_bag
client = app_serialized.test_client()
SerializedDagModel.write_dag(self.dag)
expected = {
"class_ref": {
"class_name": "DummyOperator",
"module_path": "airflow.operators.dummy",
},
"depends_on_past": False,
"downstream_task_ids": [],
"end_date": None,
"execution_timeout": None,
"extra_links": [],
"owner": "airflow",
"pool": "default_pool",
"pool_slots": 1.0,
"priority_weight": 1.0,
"queue": "default",
"retries": 0.0,
"retry_delay": {"__type": "TimeDelta", "days": 0, "seconds": 300, "microseconds": 0},
"retry_exponential_backoff": False,
"start_date": "2020-06-15T00:00:00+00:00",
"task_id": "op1",
"template_fields": [],
"trigger_rule": "all_success",
"ui_color": "#e8f7e4",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"weight_rule": "downstream",
}
response = client.get(
f"/api/v1/dags/{self.dag_id}/tasks/{self.task_id}", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json == expected
def test_should_respond_404(self):
task_id = "xxxx_not_existing"
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/tasks/{task_id}", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 404
def test_should_raises_401_unauthenticated(self):
response = self.client.get(f"/api/v1/dags/{self.dag_id}/tasks/{self.task_id}")
assert_401(response)
def test_should_raise_403_forbidden(self):
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/tasks", environ_overrides={'REMOTE_USER': "test_no_permissions"}
)
assert response.status_code == 403
class TestGetTasks(TestTaskEndpoint):
def test_should_respond_200(self):
expected = {
"tasks": [
{
"class_ref": {
"class_name": "DummyOperator",
"module_path": "airflow.operators.dummy",
},
"depends_on_past": False,
"downstream_task_ids": [],
"end_date": None,
"execution_timeout": None,
"extra_links": [],
"owner": "airflow",
"pool": "default_pool",
"pool_slots": 1.0,
"priority_weight": 1.0,
"queue": "default",
"retries": 0.0,
"retry_delay": {"__type": "TimeDelta", "days": 0, "seconds": 300, "microseconds": 0},
"retry_exponential_backoff": False,
"start_date": "2020-06-15T00:00:00+00:00",
"task_id": "op1",
"template_fields": [],
"trigger_rule": "all_success",
"ui_color": "#e8f7e4",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"weight_rule": "downstream",
}
],
"total_entries": 1,
}
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/tasks", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json == expected
def test_should_respond_404(self):
dag_id = "xxxx_not_existing"
response = self.client.get(f"/api/v1/dags/{dag_id}/tasks", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 404
def test_should_raises_401_unauthenticated(self):
response = self.client.get(f"/api/v1/dags/{self.dag_id}/tasks")
assert_401(response)
| 38.566372
| 109
| 0.58559
|
import os
import unittest
from datetime import datetime
from airflow import DAG
from airflow.models import DagBag
from airflow.models.serialized_dag import SerializedDagModel
from airflow.operators.dummy import DummyOperator
from airflow.security import permissions
from airflow.www import app
from tests.test_utils.api_connexion_utils import assert_401, create_user, delete_user
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_dags, clear_db_runs, clear_db_serialized_dags
class TestTaskEndpoint(unittest.TestCase):
dag_id = "test_dag"
task_id = "op1"
@staticmethod
def clean_db():
clear_db_runs()
clear_db_dags()
clear_db_serialized_dags()
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
with conf_vars({("api", "auth_backend"): "tests.test_utils.remote_user_api_auth_backend"}):
cls.app = app.create_app(testing=True)
create_user(
cls.app,
username="test",
role_name="Test",
permissions=[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
],
)
create_user(cls.app, username="test_no_permissions", role_name="TestNoPermissions")
with DAG(cls.dag_id, start_date=datetime(2020, 6, 15), doc_md="details") as dag:
DummyOperator(task_id=cls.task_id)
cls.dag = dag
dag_bag = DagBag(os.devnull, include_examples=False)
dag_bag.dags = {dag.dag_id: dag}
cls.app.dag_bag = dag_bag
@classmethod
def tearDownClass(cls) -> None:
delete_user(cls.app, username="test")
delete_user(cls.app, username="test_no_permissions")
def setUp(self) -> None:
self.clean_db()
self.client = self.app.test_client()
def tearDown(self) -> None:
self.clean_db()
class TestGetTask(TestTaskEndpoint):
def test_should_respond_200(self):
expected = {
"class_ref": {
"class_name": "DummyOperator",
"module_path": "airflow.operators.dummy",
},
"depends_on_past": False,
"downstream_task_ids": [],
"end_date": None,
"execution_timeout": None,
"extra_links": [],
"owner": "airflow",
"pool": "default_pool",
"pool_slots": 1.0,
"priority_weight": 1.0,
"queue": "default",
"retries": 0.0,
"retry_delay": {"__type": "TimeDelta", "days": 0, "seconds": 300, "microseconds": 0},
"retry_exponential_backoff": False,
"start_date": "2020-06-15T00:00:00+00:00",
"task_id": "op1",
"template_fields": [],
"trigger_rule": "all_success",
"ui_color": "#e8f7e4",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"weight_rule": "downstream",
}
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/tasks/{self.task_id}", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json == expected
def test_should_respond_200_serialized(self):
with conf_vars({("api", "auth_backend"): "tests.test_utils.remote_user_api_auth_backend"}):
app_serialized = app.create_app(testing=True)
dag_bag = DagBag(os.devnull, include_examples=False, read_dags_from_db=True)
app_serialized.dag_bag = dag_bag
client = app_serialized.test_client()
SerializedDagModel.write_dag(self.dag)
expected = {
"class_ref": {
"class_name": "DummyOperator",
"module_path": "airflow.operators.dummy",
},
"depends_on_past": False,
"downstream_task_ids": [],
"end_date": None,
"execution_timeout": None,
"extra_links": [],
"owner": "airflow",
"pool": "default_pool",
"pool_slots": 1.0,
"priority_weight": 1.0,
"queue": "default",
"retries": 0.0,
"retry_delay": {"__type": "TimeDelta", "days": 0, "seconds": 300, "microseconds": 0},
"retry_exponential_backoff": False,
"start_date": "2020-06-15T00:00:00+00:00",
"task_id": "op1",
"template_fields": [],
"trigger_rule": "all_success",
"ui_color": "#e8f7e4",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"weight_rule": "downstream",
}
response = client.get(
f"/api/v1/dags/{self.dag_id}/tasks/{self.task_id}", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json == expected
def test_should_respond_404(self):
task_id = "xxxx_not_existing"
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/tasks/{task_id}", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 404
def test_should_raises_401_unauthenticated(self):
response = self.client.get(f"/api/v1/dags/{self.dag_id}/tasks/{self.task_id}")
assert_401(response)
def test_should_raise_403_forbidden(self):
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/tasks", environ_overrides={'REMOTE_USER': "test_no_permissions"}
)
assert response.status_code == 403
class TestGetTasks(TestTaskEndpoint):
def test_should_respond_200(self):
expected = {
"tasks": [
{
"class_ref": {
"class_name": "DummyOperator",
"module_path": "airflow.operators.dummy",
},
"depends_on_past": False,
"downstream_task_ids": [],
"end_date": None,
"execution_timeout": None,
"extra_links": [],
"owner": "airflow",
"pool": "default_pool",
"pool_slots": 1.0,
"priority_weight": 1.0,
"queue": "default",
"retries": 0.0,
"retry_delay": {"__type": "TimeDelta", "days": 0, "seconds": 300, "microseconds": 0},
"retry_exponential_backoff": False,
"start_date": "2020-06-15T00:00:00+00:00",
"task_id": "op1",
"template_fields": [],
"trigger_rule": "all_success",
"ui_color": "#e8f7e4",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"weight_rule": "downstream",
}
],
"total_entries": 1,
}
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/tasks", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json == expected
def test_should_respond_404(self):
dag_id = "xxxx_not_existing"
response = self.client.get(f"/api/v1/dags/{dag_id}/tasks", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 404
def test_should_raises_401_unauthenticated(self):
response = self.client.get(f"/api/v1/dags/{self.dag_id}/tasks")
assert_401(response)
| true
| true
|
f717fd45456b4946ca8ef640da5053f02d8f3da7
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/numpy/core/tests/test_simd.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/numpy/core/tests/test_simd.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/numpy/core/tests/test_simd.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/70/91/d2/f5d75cdb5bed21a1004b1a4f5dbe1978b2a45ab6bedfa5d6b5bbb225e8
| 96
| 96
| 0.895833
|
/home/runner/.cache/pip/pool/70/91/d2/f5d75cdb5bed21a1004b1a4f5dbe1978b2a45ab6bedfa5d6b5bbb225e8
| false
| true
|
f717fe26b70d466a7b574b0a4659b534cb647013
| 961
|
py
|
Python
|
testapp/app.py
|
movermeyer/Flask-Dropbox
|
bfc59c64a6a55b50cacb9b362ed520c50705778a
|
[
"BSD-3-Clause"
] | 22
|
2015-02-07T21:37:36.000Z
|
2021-12-06T07:12:49.000Z
|
testapp/app.py
|
movermeyer/Flask-Dropbox
|
bfc59c64a6a55b50cacb9b362ed520c50705778a
|
[
"BSD-3-Clause"
] | 33
|
2020-03-16T03:48:37.000Z
|
2021-08-02T03:40:08.000Z
|
testapp/app.py
|
movermeyer/Flask-Dropbox
|
bfc59c64a6a55b50cacb9b362ed520c50705778a
|
[
"BSD-3-Clause"
] | 6
|
2017-02-04T04:29:15.000Z
|
2021-12-06T07:12:51.000Z
|
import os
import sys
from flask import Flask
from flask.ext.dropbox import Dropbox
from flask.ext.lazyviews import LazyViews
from flask.ext.script import Manager
import settings
# Initialize and configure Flask app
app = Flask(__name__)
app.config.from_object(settings)
# Setup Dropbox and script extensions
dropbox = Dropbox(app)
dropbox.register_blueprint(url_prefix='/dropbox')
manager = Manager(app)
# Add test project views
views = LazyViews(app, 'testapp.views')
views.add('/', 'home')
views.add('/delete/<path:filename>', 'delete')
views.add('/download/<path:filename>', 'download', endpoint='download')
views.add('/files', 'files')
views.add('/media/<path:filename>',
'download',
defaults={'media': True},
endpoint='media')
views.add('/session/clear', 'session_clear')
views.add('/session/dump', 'session_dump')
views.add('/success/<path:filename>', 'success')
views.add('/upload', 'upload', methods=('GET', 'POST'))
| 27.457143
| 71
| 0.715921
|
import os
import sys
from flask import Flask
from flask.ext.dropbox import Dropbox
from flask.ext.lazyviews import LazyViews
from flask.ext.script import Manager
import settings
app = Flask(__name__)
app.config.from_object(settings)
dropbox = Dropbox(app)
dropbox.register_blueprint(url_prefix='/dropbox')
manager = Manager(app)
views = LazyViews(app, 'testapp.views')
views.add('/', 'home')
views.add('/delete/<path:filename>', 'delete')
views.add('/download/<path:filename>', 'download', endpoint='download')
views.add('/files', 'files')
views.add('/media/<path:filename>',
'download',
defaults={'media': True},
endpoint='media')
views.add('/session/clear', 'session_clear')
views.add('/session/dump', 'session_dump')
views.add('/success/<path:filename>', 'success')
views.add('/upload', 'upload', methods=('GET', 'POST'))
| true
| true
|
f7180103c1420b4319a7785c69d208a63ea1cce0
| 3,462
|
py
|
Python
|
Code/all-starter-code/bases.py
|
stasi815/CS-1.3-Core-Data-Structures
|
8586d92a841a80bbfbb0f4acfabda8552f04ff92
|
[
"MIT"
] | null | null | null |
Code/all-starter-code/bases.py
|
stasi815/CS-1.3-Core-Data-Structures
|
8586d92a841a80bbfbb0f4acfabda8552f04ff92
|
[
"MIT"
] | null | null | null |
Code/all-starter-code/bases.py
|
stasi815/CS-1.3-Core-Data-Structures
|
8586d92a841a80bbfbb0f4acfabda8552f04ff92
|
[
"MIT"
] | null | null | null |
#!python
import string
# Hint: Use these string constants to encode/decode hexadecimal digits and more
# string.digits is '0123456789'
# string.hexdigits is '0123456789abcdefABCDEF'
# string.ascii_lowercase is 'abcdefghijklmnopqrstuvwxyz'
# string.ascii_uppercase is 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# string.ascii_letters is ascii_lowercase + ascii_uppercase
# string.printable is digits + ascii_letters + punctuation + whitespace
def decode(digits, base):
"""Decode given digits in given base to number in base 10.
digits: str -- string representation of number (in given base)
base: int -- base of given number
return: int -- integer representation of number (in base 10)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, f'base is out of range: {base}'
# Decode digits from any base (2 up to 36)
# for each digit, use the position or the index an the base to digit * base ** index
decimal_num = 0
digits = digits[::-1]
for i in range(len(digits)):
digit = int(digits[i], base=base)
decimal_num += digit * base ** i
return decimal_num
def encode(number, base):
"""Encode given number in base 10 to digits in given base.
number: int -- integer representation of number (in base 10)
base: int -- base to convert to
return: str -- string representation of number (in given base)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, f'base is out of range: {base}'
# Handle unsigned numbers only for now
assert number >= 0, f'number is negative: {number}'
# binary (base 2)
# 10 -> 2:
# 10/2 = 5: 0
# 5/2 = 2: 1
# 2/2 = 1: 0
# 1/2 = 0: 1 - then read the remainders bottom up: 1010 = 1 * 2^3 + 0 * 2^2 + 1 * 2^1 + 0 * 2^0
# Encode number in any base (2 up to 36)
result = ""
while number > 0:
remainder = number % base
number -= remainder
number = number // base
if remainder > 9:
remainder = string.ascii_lowercase[remainder-10]
result = str(remainder) + result
return result
def convert(digits, base1, base2):
"""Convert given digits in base1 to digits in base2.
digits: str -- string representation of number (in base1)
base1: int -- base of given number
base2: int -- base to convert to
return: str -- string representation of number (in base2)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base1 <= 36, f'base1 is out of range: {base1}'
assert 2 <= base2 <= 36, f'base2 is out of range: {base2}'
# start by using decode to decoded digits in base 10 form
# use encode to turn base 10 digits into desired base form
# Convert digits from any base to any base (2 up to 36)
decoded_base10 = decode(digits, base1)
result = encode(decoded_base10, base2)
return result
def main():
"""Read command-line arguments and convert given digits between bases."""
import sys
args = sys.argv[1:] # Ignore script file name
if len(args) == 3:
digits = args[0]
base1 = int(args[1])
base2 = int(args[2])
# Convert given digits between bases
result = convert(digits, base1, base2)
print(f'{digits} in base {base1} is {result} in base {base2}')
else:
print(f'Usage: {sys.argv[0]} digits base1 base2')
print('Converts digits from base1 to base2')
if __name__ == '__main__':
main()
| 34.62
| 115
| 0.634027
|
import string
def decode(digits, base):
assert 2 <= base <= 36, f'base is out of range: {base}'
decimal_num = 0
digits = digits[::-1]
for i in range(len(digits)):
digit = int(digits[i], base=base)
decimal_num += digit * base ** i
return decimal_num
def encode(number, base):
assert 2 <= base <= 36, f'base is out of range: {base}'
assert number >= 0, f'number is negative: {number}'
result = ""
while number > 0:
remainder = number % base
number -= remainder
number = number // base
if remainder > 9:
remainder = string.ascii_lowercase[remainder-10]
result = str(remainder) + result
return result
def convert(digits, base1, base2):
assert 2 <= base1 <= 36, f'base1 is out of range: {base1}'
assert 2 <= base2 <= 36, f'base2 is out of range: {base2}'
decoded_base10 = decode(digits, base1)
result = encode(decoded_base10, base2)
return result
def main():
import sys
args = sys.argv[1:]
if len(args) == 3:
digits = args[0]
base1 = int(args[1])
base2 = int(args[2])
result = convert(digits, base1, base2)
print(f'{digits} in base {base1} is {result} in base {base2}')
else:
print(f'Usage: {sys.argv[0]} digits base1 base2')
print('Converts digits from base1 to base2')
if __name__ == '__main__':
main()
| true
| true
|
f71801af019ea004db2031fbf73a7074a38968cc
| 6,665
|
py
|
Python
|
eval_ke.py
|
naviocean/imgclsmob
|
f2993d3ce73a2f7ddba05da3891defb08547d504
|
[
"MIT"
] | 2,649
|
2018-08-03T14:18:00.000Z
|
2022-03-31T08:08:17.000Z
|
eval_ke.py
|
naviocean/imgclsmob
|
f2993d3ce73a2f7ddba05da3891defb08547d504
|
[
"MIT"
] | 95
|
2018-08-13T01:46:03.000Z
|
2022-03-13T08:38:14.000Z
|
eval_ke.py
|
naviocean/imgclsmob
|
f2993d3ce73a2f7ddba05da3891defb08547d504
|
[
"MIT"
] | 549
|
2018-08-06T08:09:22.000Z
|
2022-03-31T08:08:21.000Z
|
"""
Script for evaluating trained model on Keras (validate/test).
"""
import argparse
import time
import logging
import keras
from common.logger_utils import initialize_logging
from keras_.utils import prepare_ke_context, prepare_model, get_data_rec, get_data_generator, backend_agnostic_compile
def parse_args():
"""
Parse python script parameters.
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Evaluate a model for image classification (Keras)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--rec-train",
type=str,
default="../imgclsmob_data/imagenet_rec/train.rec",
help="the training data")
parser.add_argument(
"--rec-train-idx",
type=str,
default="../imgclsmob_data/imagenet_rec/train.idx",
help="the index of training data")
parser.add_argument(
"--rec-val",
type=str,
default="../imgclsmob_data/imagenet_rec/val.rec",
help="the validation data")
parser.add_argument(
"--rec-val-idx",
type=str,
default="../imgclsmob_data/imagenet_rec/val.idx",
help="the index of validation data")
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="data type for training")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters if not None")
parser.add_argument(
"--input-size",
type=int,
default=224,
help="size of the input for model")
parser.add_argument(
"--resize-inv-factor",
type=float,
default=0.875,
help="inverted ratio for input image crop")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--log-packages",
type=str,
default="keras, mxnet, tensorflow, tensorflow-gpu",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="keras, keras-mxnet, mxnet, mxnet-cu110",
help="list of pip packages for logging")
args = parser.parse_args()
return args
def test(net,
val_gen,
val_size,
batch_size,
num_gpus,
calc_weight_count=False,
extended_log=False):
"""
Main test routine.
Parameters:
----------
net : Model
Model.
val_gen : generator
Data loader.
val_size : int
Size of validation subset.
batch_size : int
Batch size.
num_gpus : int
Number of used GPUs.
calc_weight_count : bool, default False
Whether to calculate count of weights.
extended_log : bool, default False
Whether to log more precise accuracy values.
"""
keras.backend.set_learning_phase(0)
backend_agnostic_compile(
model=net,
loss="categorical_crossentropy",
optimizer=keras.optimizers.SGD(
lr=0.01,
momentum=0.0,
decay=0.0,
nesterov=False),
metrics=[keras.metrics.categorical_accuracy, keras.metrics.top_k_categorical_accuracy],
num_gpus=num_gpus)
# net.summary()
tic = time.time()
score = net.evaluate_generator(
generator=val_gen,
steps=(val_size // batch_size),
verbose=True)
err_top1_val = 1.0 - score[1]
err_top5_val = 1.0 - score[2]
if calc_weight_count:
weight_count = keras.utils.layer_utils.count_params(net.trainable_weights)
logging.info("Model: {} trainable parameters".format(weight_count))
if extended_log:
logging.info("Test: err-top1={top1:.4f} ({top1})\terr-top5={top5:.4f} ({top5})".format(
top1=err_top1_val, top5=err_top5_val))
else:
logging.info("Test: err-top1={top1:.4f}\terr-top5={top5:.4f}".format(
top1=err_top1_val, top5=err_top5_val))
logging.info("Time cost: {:.4f} sec".format(
time.time() - tic))
def main():
"""
Main body of script.
"""
args = parse_args()
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
batch_size = prepare_ke_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip())
num_classes = net.classes if hasattr(net, "classes") else 1000
input_image_size = net.in_size if hasattr(net, "in_size") else (args.input_size, args.input_size)
train_data, val_data = get_data_rec(
rec_train=args.rec_train,
rec_train_idx=args.rec_train_idx,
rec_val=args.rec_val,
rec_val_idx=args.rec_val_idx,
batch_size=batch_size,
num_workers=args.num_workers,
input_image_size=input_image_size,
resize_inv_factor=args.resize_inv_factor,
only_val=True)
val_gen = get_data_generator(
data_iterator=val_data,
num_classes=num_classes)
val_size = 50000
assert (args.use_pretrained or args.resume.strip())
test(
net=net,
val_gen=val_gen,
val_size=val_size,
batch_size=batch_size,
num_gpus=args.num_gpus,
calc_weight_count=True,
extended_log=True)
if __name__ == "__main__":
main()
| 28.361702
| 118
| 0.616954
|
import argparse
import time
import logging
import keras
from common.logger_utils import initialize_logging
from keras_.utils import prepare_ke_context, prepare_model, get_data_rec, get_data_generator, backend_agnostic_compile
def parse_args():
parser = argparse.ArgumentParser(
description="Evaluate a model for image classification (Keras)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--rec-train",
type=str,
default="../imgclsmob_data/imagenet_rec/train.rec",
help="the training data")
parser.add_argument(
"--rec-train-idx",
type=str,
default="../imgclsmob_data/imagenet_rec/train.idx",
help="the index of training data")
parser.add_argument(
"--rec-val",
type=str,
default="../imgclsmob_data/imagenet_rec/val.rec",
help="the validation data")
parser.add_argument(
"--rec-val-idx",
type=str,
default="../imgclsmob_data/imagenet_rec/val.idx",
help="the index of validation data")
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="data type for training")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters if not None")
parser.add_argument(
"--input-size",
type=int,
default=224,
help="size of the input for model")
parser.add_argument(
"--resize-inv-factor",
type=float,
default=0.875,
help="inverted ratio for input image crop")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--log-packages",
type=str,
default="keras, mxnet, tensorflow, tensorflow-gpu",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="keras, keras-mxnet, mxnet, mxnet-cu110",
help="list of pip packages for logging")
args = parser.parse_args()
return args
def test(net,
val_gen,
val_size,
batch_size,
num_gpus,
calc_weight_count=False,
extended_log=False):
keras.backend.set_learning_phase(0)
backend_agnostic_compile(
model=net,
loss="categorical_crossentropy",
optimizer=keras.optimizers.SGD(
lr=0.01,
momentum=0.0,
decay=0.0,
nesterov=False),
metrics=[keras.metrics.categorical_accuracy, keras.metrics.top_k_categorical_accuracy],
num_gpus=num_gpus)
tic = time.time()
score = net.evaluate_generator(
generator=val_gen,
steps=(val_size // batch_size),
verbose=True)
err_top1_val = 1.0 - score[1]
err_top5_val = 1.0 - score[2]
if calc_weight_count:
weight_count = keras.utils.layer_utils.count_params(net.trainable_weights)
logging.info("Model: {} trainable parameters".format(weight_count))
if extended_log:
logging.info("Test: err-top1={top1:.4f} ({top1})\terr-top5={top5:.4f} ({top5})".format(
top1=err_top1_val, top5=err_top5_val))
else:
logging.info("Test: err-top1={top1:.4f}\terr-top5={top5:.4f}".format(
top1=err_top1_val, top5=err_top5_val))
logging.info("Time cost: {:.4f} sec".format(
time.time() - tic))
def main():
args = parse_args()
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
batch_size = prepare_ke_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip())
num_classes = net.classes if hasattr(net, "classes") else 1000
input_image_size = net.in_size if hasattr(net, "in_size") else (args.input_size, args.input_size)
train_data, val_data = get_data_rec(
rec_train=args.rec_train,
rec_train_idx=args.rec_train_idx,
rec_val=args.rec_val,
rec_val_idx=args.rec_val_idx,
batch_size=batch_size,
num_workers=args.num_workers,
input_image_size=input_image_size,
resize_inv_factor=args.resize_inv_factor,
only_val=True)
val_gen = get_data_generator(
data_iterator=val_data,
num_classes=num_classes)
val_size = 50000
assert (args.use_pretrained or args.resume.strip())
test(
net=net,
val_gen=val_gen,
val_size=val_size,
batch_size=batch_size,
num_gpus=args.num_gpus,
calc_weight_count=True,
extended_log=True)
if __name__ == "__main__":
main()
| true
| true
|
f71802a5127f7c7fb60315e16f2f50fa2f4a7235
| 1,504
|
py
|
Python
|
app/auth/views.py
|
Bchizi/Pitch-app
|
f52398d270e812eab70b66df9f7f80d579bab7d4
|
[
"CNRI-Python",
"Info-ZIP"
] | null | null | null |
app/auth/views.py
|
Bchizi/Pitch-app
|
f52398d270e812eab70b66df9f7f80d579bab7d4
|
[
"CNRI-Python",
"Info-ZIP"
] | null | null | null |
app/auth/views.py
|
Bchizi/Pitch-app
|
f52398d270e812eab70b66df9f7f80d579bab7d4
|
[
"CNRI-Python",
"Info-ZIP"
] | null | null | null |
from flask import render_template,redirect,url_for,flash,request
from . import auth
from flask_login import login_user,logout_user,login_required
from ..models import User
from .forms import LoginForm,RegistrationForm
from .. import db
from ..email import mail_message
@auth.route('/login',methods=['GET','POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email = form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user,form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
title = "Login"
return render_template('auth/login.html',form =form,title=title)
@auth.route('/register',methods = ["GET","POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email = form.email.data, username = form.username.data,firstname= form.firstname.data,lastname= form.lastname.data,password = form.password.data)
db.session.add(user)
db.session.commit()
mail_message("Welcome to one minute pitch","email/welcome_user",user.email,user=user)
return redirect(url_for('auth.login'))
title = "New Account"
return render_template('auth/register.html',form =form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
| 31.333333
| 165
| 0.696809
|
from flask import render_template,redirect,url_for,flash,request
from . import auth
from flask_login import login_user,logout_user,login_required
from ..models import User
from .forms import LoginForm,RegistrationForm
from .. import db
from ..email import mail_message
@auth.route('/login',methods=['GET','POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email = form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user,form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
title = "Login"
return render_template('auth/login.html',form =form,title=title)
@auth.route('/register',methods = ["GET","POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email = form.email.data, username = form.username.data,firstname= form.firstname.data,lastname= form.lastname.data,password = form.password.data)
db.session.add(user)
db.session.commit()
mail_message("Welcome to one minute pitch","email/welcome_user",user.email,user=user)
return redirect(url_for('auth.login'))
title = "New Account"
return render_template('auth/register.html',form =form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
| true
| true
|
f71802e152bdc5a880b16e7aa88ec372a25c5854
| 193
|
py
|
Python
|
mani_sales/mani_sales/doctype/linked_suppliers/linked_suppliers.py
|
Momscode-Technologies/mani_sales
|
e3c8de6b50367bfd15adadf38c658e89559e71ab
|
[
"MIT"
] | null | null | null |
mani_sales/mani_sales/doctype/linked_suppliers/linked_suppliers.py
|
Momscode-Technologies/mani_sales
|
e3c8de6b50367bfd15adadf38c658e89559e71ab
|
[
"MIT"
] | null | null | null |
mani_sales/mani_sales/doctype/linked_suppliers/linked_suppliers.py
|
Momscode-Technologies/mani_sales
|
e3c8de6b50367bfd15adadf38c658e89559e71ab
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021, jan and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class LinkedSuppliers(Document):
pass
| 21.444444
| 49
| 0.792746
|
from frappe.model.document import Document
class LinkedSuppliers(Document):
pass
| true
| true
|
f718043592242ea890cc97835b3f6db1a9ca2b43
| 322
|
py
|
Python
|
firmware/adafruit-circuitpython-bundle-5.x-mpy-20200915/examples/bd3491fs_simpletest.py
|
freeglow/microcontroller-cpy
|
5adfda49da6eefaece81be2a2f26122d68736355
|
[
"MIT"
] | null | null | null |
firmware/adafruit-circuitpython-bundle-5.x-mpy-20200915/examples/bd3491fs_simpletest.py
|
freeglow/microcontroller-cpy
|
5adfda49da6eefaece81be2a2f26122d68736355
|
[
"MIT"
] | null | null | null |
firmware/adafruit-circuitpython-bundle-5.x-mpy-20200915/examples/bd3491fs_simpletest.py
|
freeglow/microcontroller-cpy
|
5adfda49da6eefaece81be2a2f26122d68736355
|
[
"MIT"
] | null | null | null |
import board
import busio
import adafruit_bd3491fs
i2c = busio.I2C(board.SCL, board.SDA)
bd3491fs = adafruit_bd3491fs.BD3491FS(i2c)
bd3491fs.active_input = adafruit_bd3491fs.Input.A
bd3491fs.input_gain = adafruit_bd3491fs.Level.LEVEL_20DB
bd3491fs.channel_1_attenuation = 0
bd3491fs.channel_2_attenuation = 0
| 26.833333
| 57
| 0.810559
|
import board
import busio
import adafruit_bd3491fs
i2c = busio.I2C(board.SCL, board.SDA)
bd3491fs = adafruit_bd3491fs.BD3491FS(i2c)
bd3491fs.active_input = adafruit_bd3491fs.Input.A
bd3491fs.input_gain = adafruit_bd3491fs.Level.LEVEL_20DB
bd3491fs.channel_1_attenuation = 0
bd3491fs.channel_2_attenuation = 0
| true
| true
|
f71806245033ff31b7f8e029e27f81e487b11834
| 20,468
|
py
|
Python
|
cadnano/views/pathview/tools/pathselection.py
|
sherwoodyao/cadnano2.5
|
ce6ff019b88ee7728de947bd86b35861cf57848d
|
[
"BSD-3-Clause"
] | 69
|
2015-01-13T02:54:40.000Z
|
2022-03-27T14:25:51.000Z
|
cadnano/views/pathview/tools/pathselection.py
|
scholer/cadnano2.5
|
ce6ff019b88ee7728de947bd86b35861cf57848d
|
[
"BSD-3-Clause"
] | 127
|
2015-01-01T06:26:34.000Z
|
2022-03-02T12:48:05.000Z
|
cadnano/views/pathview/tools/pathselection.py
|
scholer/cadnano2.5
|
ce6ff019b88ee7728de947bd86b35861cf57848d
|
[
"BSD-3-Clause"
] | 48
|
2015-01-22T19:57:49.000Z
|
2022-03-27T14:27:53.000Z
|
# -*- coding: utf-8 -*-
import logging
from math import floor
from PyQt5.QtCore import (
QPointF,
QRectF,
Qt
)
from PyQt5.QtGui import (
QPainterPath,
QKeyEvent,
QMouseEvent
)
from PyQt5.QtWidgets import (
QGraphicsItem,
QGraphicsItemGroup,
QGraphicsPathItem,
QGraphicsSceneMouseEvent,
)
from cadnano.gui.palette import getPenObj
from cadnano.views.pathview import pathstyles as styles
from cadnano.views.pathview import (
PathRootItemT,
)
from cadnano.cntypes import (
Vec2T,
DocT
)
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
class SelectionItemGroup(QGraphicsItemGroup):
"""SelectionItemGroup
Attributes:
getR (TYPE): Description
selectionbox (TYPE): SelectionBox
translateR (TYPE): Description
viewroot: Description
"""
def __init__(self, boxtype: QGraphicsItem,
constraint: str,
viewroot: PathRootItemT):
"""
Args:
boxtype: :class:`EndpointHandleSelectionBox` or
:class:`VirtualHelixHandleSelectionBox` instance
constraint: ``x`` or ``y``. Default to ``y`` (up and down)
viewroot: view root item and object parent
"""
super(SelectionItemGroup, self).__init__(viewroot)
self.viewroot: PathRootItemT = viewroot
self.setFiltersChildEvents(True)
# LOOK at Qt Source for deprecated code to replace this behavior
# self.setHandlesChildEvents(True) # commented out NC
self.setFlag(QGraphicsItem.ItemIsSelectable)
self.setFlag(QGraphicsItem.ItemIsFocusable) # for keyPressEvents
self.setFlag(QGraphicsItem.ItemHasNoContents)
self._rect = QRectF()
self._PEN = getPenObj(styles.BLUE_STROKE,
styles.PATH_SELECTBOX_STROKE_WIDTH)
self.selectionbox = boxtype(self)
self._drag_enable = False
self._dragged = False
self._r0 = 0 # save original mousedown
self._r = 0 # latest position for moving
# self._lastKid = 0
# this keeps track of mousePressEvents within the class
# to aid in intellignetly removing items from the group
self._added_to_press_list = False
self._pending_to_add_dict = {}
if constraint == 'y':
self.getR = self.selectionbox.getY
self.translateR = self.selectionbox.translateY
else:
self.getR = self.selectionbox.getX
self.translateR = self.selectionbox.translateX
self._normal_select = True
self.setZValue(styles.ZPATHSELECTION)
# end def
# def paint(self, painter, option, widget):
# painter.drawRect(self.boundingRect())
# # end def
def pendToAdd(self, item):
"""
Args:
item (TYPE): Description
"""
self._pending_to_add_dict[item] = True
# end def
def isPending(self, item):
"""
Args:
item (TYPE): Description
Returns:
TYPE: Description
"""
return item in self._pending_to_add_dict
# end def
def document(self) -> DocT:
"""
Returns:
:class:`Document`
"""
return self.viewroot.document()
# end def
def pendToRemove(self, item):
"""
Args:
item (TYPE): Description
"""
if item in self._pending_to_add_dict:
del self._pending_to_add_dict[item]
# end def
def setNormalSelect(self, bool_val: bool):
"""
Args:
bool_val: Description
"""
self._normal_select = bool_val
# end def
def isNormalSelect(self) -> bool:
"""
Returns:
is it normal select?
"""
return self._normal_select
# end def
def processPendingToAddList(self):
"""
Adds to the local selection and the document if required
"""
doc = self.document()
p2add = self._pending_to_add_dict
# logger.debug("processPendingToAddList")
if len(p2add) > 0:
plist = list(self._pending_to_add_dict.keys())
for item in plist:
if p2add[item]:
p2add[item] = False
# logger.debug("just checking1", item, item.group(), item.parentItem())
self.addToGroup(item)
item.modelSelect(doc)
# end for
# logger.debug('finished')
self._pending_to_add_dict = {}
doc.updateStrandSelection()
# end def
def selectionLock(self):
"""
Returns:
TYPE: Description
"""
return self.viewroot.selectionLock()
# end def
def setSelectionLock(self, selection_group):
"""
Args:
selection_group (TYPE): Description
"""
self.viewroot.setSelectionLock(selection_group)
# end def
def keyPressEvent(self, event: QKeyEvent):
"""
Must intercept invalid input events. Make changes here
Args:
event (TYPE): Description
"""
key = event.key()
if key in [Qt.Key_Backspace, Qt.Key_Delete]:
self.selectionbox.deleteSelection()
self.clearSelection(False)
return QGraphicsItemGroup.keyPressEvent(self, event)
else:
return QGraphicsItemGroup.keyPressEvent(self, event)
# end def
def mousePressEvent(self, event: QGraphicsSceneMouseEvent):
"""Handler for user mouse press.
Args:
event: Contains item, scene, and screen
coordinates of the the event, and previous event.
"""
# self.show()
if event.button() != Qt.LeftButton:
return QGraphicsItemGroup.mousePressEvent(self, event)
else:
self._drag_enable = True
# required to get the itemChanged event to work
# correctly for this
self.setSelected(True)
# self.selectionbox.resetTransform()
self.selectionbox.resetPosition()
self.selectionbox.refreshPath()
# self.selectionbox.resetTransform()
self.selectionbox.resetPosition()
self.selectionbox.show()
# for some reason we need to skip the first mouseMoveEvent
self._dragged = False
if self._added_to_press_list is False:
self._added_to_press_list = True
self.scene().views()[0].addToPressList(self)
return QGraphicsItemGroup.mousePressEvent(self, event)
# end def
def mouseMoveEvent(self, event: QGraphicsSceneMouseEvent):
"""
Args:
event: Description
"""
if self._drag_enable is True:
# map the item to the scene coordinates
# to help keep coordinates uniform
rf = self.getR(self.mapFromScene(QPointF(event.scenePos())))
# for some reason we need to skip the first mouseMoveEvent
if self._dragged is False:
self._dragged = True
self._r0 = rf
# end if
else:
delta = self.selectionbox.delta(rf, self._r0)
self.translateR(delta)
# logger.debug('mouse move path selectionbox', delta, rf, self._r0)
# end else
self._r = rf
# end if
else:
QGraphicsItemGroup.mouseMoveEvent(self, event)
# end else
# end def
def customMouseRelease(self, event: QMouseEvent):
"""
Args:
event: Description
"""
self.selectionbox.setParentItem(self.viewroot)
self.selectionbox.hide()
self.selectionbox.resetTransform()
self._drag_enable = False
# now do stuff
if not (self._r0 == 0 and self._r == 0):
modifiers = event.modifiers()
self.selectionbox.processSelectedItems(self._r0, self._r, modifiers)
# end if
self._r0 = 0 # reset
self._r = 0 # reset
self.setFocus() # needed to get keyPresses post a move
self._added_to_press_list = False
# end def
def resetSelection(self):
"""Summary
Returns:
TYPE: Description
"""
self._pending_to_add_dict = {}
self._added_to_press_list = False
self.clearSelection(False)
self.setSelectionLock(None)
self.selectionbox.setParentItem(self.viewroot)
self.setParentItem(self.viewroot)
# end def
def clearSelection(self, value):
"""value is for keyPressEvents
Arguments:
value (QVariant): resolves in Python as an integer
"""
if value == False: # noqa
self.selectionbox.hide()
self.selectionbox.resetPosition()
self.removeSelectedItems()
self.viewroot.setSelectionLock(None)
self.clearFocus() # this is to disable delete keyPressEvents
self.prepareGeometryChange()
self._rect.setWidth(0)
# self._rect = QRectF()
# end if
else:
self.setFocus() # this is to get delete keyPressEvents
self.update(self.boundingRect())
# end def
def itemChange(self, change, value):
"""docstring for itemChange
Arguments:
change (GraphicsItemChange): see http://doc.qt.io/qt-5/qgraphicsitem.html#GraphicsItemChange-enum
value (QVariant): resolves in Python as an integer
"""
# logger.debug("ps itemChange")
if change == QGraphicsItem.ItemSelectedChange:
# logger.debug("isc", value)
if value == False: # noqa
self.clearSelection(False)
return False
else:
return True
elif change == QGraphicsItem.ItemChildAddedChange:
# logger.debug("icac")
if self._added_to_press_list is False:
# logger.debug("kid added")
self.setFocus() # this is to get delete keyPressEvents
self.selectionbox.boxParent()
# self.setParentItem(self.selectionbox.boxParent())
self._added_to_press_list = True
self.scene().views()[0].addToPressList(self)
return
return QGraphicsItemGroup.itemChange(self, change, value)
# end def
def removeChild(self, child):
"""
remove only the child and ask it to
restore it's original parent
Args:
child (TYPE): Description
"""
doc = self.document()
self.removeFromGroup(child)
child.modelDeselect(doc)
# end def
def removeSelectedItems(self):
"""docstring for removeSelectedItems
"""
doc = self.document()
for item in self.childItems():
self.removeFromGroup(item)
item.modelDeselect(doc)
# end for
doc.updateStrandSelection()
# end def
def setBoundingRect(self, rect):
"""Summary
Args:
rect (TYPE): Description
Returns:
TYPE: Description
"""
self.prepareGeometryChange()
self._rect = rect
# end def
def boundingRect(self):
"""Summary
Returns:
TYPE: Description
"""
return self._rect
# end class
class VirtualHelixHandleSelectionBox(QGraphicsPathItem):
"""
docstring for VirtualHelixHandleSelectionBox
"""
_HELIX_HEIGHT = styles.PATH_HELIX_HEIGHT + styles.PATH_HELIX_PADDING
_RADIUS = styles.VIRTUALHELIXHANDLEITEM_RADIUS
_PEN_WIDTH = styles.SELECTIONBOX_PEN_WIDTH
_BOX_PEN = getPenObj(styles.BLUE_STROKE, _PEN_WIDTH)
def __init__(self, item_group: SelectionItemGroup):
"""
The item_group.parentItem() is expected to be a partItem
Args:
item_group (TYPE): Description
"""
super(VirtualHelixHandleSelectionBox, self).__init__(item_group.parentItem())
self._item_group = item_group
self._rect = item_group.boundingRect()
self.hide()
self.setPen(self._BOX_PEN)
self.setZValue(styles.ZPATHSELECTION)
self._bounds = None
self._pos0 = QPointF()
# end def
def getY(self, pos):
"""Summary
Args:
pos (TYPE): Description
Returns:
TYPE: Description
"""
pos = self._item_group.mapToScene(QPointF(pos))
return pos.y()
# end def
def translateY(self, delta):
"""Summary
Args:
delta (TYPE): Description
Returns:
TYPE: Description
"""
self.setY(delta)
# end def
def refreshPath(self):
"""Summary
Returns:
TYPE: Description
"""
self.prepareGeometryChange()
self.setPath(self.painterPath())
self._pos0 = self.pos()
# end def
def painterPath(self):
"""Summary
Returns:
TYPE: Description
"""
i_g = self._item_group
# the childrenBoundingRect is necessary to get this to work
rect = self.mapRectFromItem(i_g, i_g.childrenBoundingRect())
radius = self._RADIUS
path = QPainterPath()
path.addRoundedRect(rect, radius, radius)
path.moveTo(rect.right(), rect.center().y())
path.lineTo(rect.right() + radius / 2, rect.center().y())
return path
# end def
def processSelectedItems(self, r_start, r_end, modifiers):
"""docstring for processSelectedItems
Args:
r_start (TYPE): Description
r_end (TYPE): Description
modifiers (TYPE): Description
"""
margin = styles.VIRTUALHELIXHANDLEITEM_RADIUS
delta = (r_end - r_start) # r delta
mid_height = (self.boundingRect().height()) / 2 - margin
helix_height = self._HELIX_HEIGHT
if abs(delta) < mid_height: # move is too short for reordering
return
if delta > 0: # moved down, delta is positive
indexDelta = int((delta - mid_height) / helix_height)
else: # moved up, delta is negative
indexDelta = int((delta + mid_height) / helix_height)
# sort on y to determine the extremes of the selection group
items = sorted(self._item_group.childItems(), key=lambda vhhi: vhhi.y())
part_item = items[0].partItem()
part_item.reorderHelices([item.idNum() for item in items],
indexDelta)
# part_item.reorderHelices(items[0].idNum(),
# items[-1].idNum(),
# indexDelta)
part_item.updateStatusBar("")
# end def
def boxParent(self):
"""Summary
Returns:
TYPE: Description
"""
temp = self._item_group.childItems()[0].partItem()
self.setParentItem(temp)
return temp
# end def
def deleteSelection(self):
"""
Delete selection operates outside of the documents a virtual helices
are not actually selected in the model
"""
vh_handle_items = self._item_group.childItems()
u_s = self._item_group.document().undoStack()
u_s.beginMacro("delete Virtual Helices")
for vhhi in vh_handle_items:
part = vhhi.part()
part.removeVirtualHelix(vhhi.idNum())
u_s.endMacro()
# end def
def bounds(self):
"""Summary
Returns:
TYPE: Description
"""
return self._bounds
# end def
def delta(self, yf, y0):
"""Summary
Args:
yf (TYPE): Description
y0 (TYPE): Description
Returns:
TYPE: Description
"""
return yf - y0
# end def
def resetPosition(self):
"""Summary
Returns:
TYPE: Description
"""
self.setPos(self._pos0)
# end def
# end class
class EndpointHandleSelectionBox(QGraphicsPathItem):
"""Summary
"""
_PEN_WIDTH = styles.SELECTIONBOX_PEN_WIDTH
_BOX_PEN = getPenObj(styles.SELECTED_COLOR, _PEN_WIDTH)
_BASE_WIDTH = styles.PATH_BASE_WIDTH
def __init__(self, item_group: SelectionItemGroup):
"""The item_group.parentItem() is expected to be a partItem
Args:
item_group: Description
"""
super(EndpointHandleSelectionBox, self).__init__(item_group.parentItem())
self._item_group = item_group
self._rect = item_group.boundingRect()
self.hide()
self.setPen(self._BOX_PEN)
self.setZValue(styles.ZPATHSELECTION)
self._bounds = (0, 0)
self._pos0 = QPointF()
# end def
def getX(self, pos: QPointF) -> float:
"""
Args:
pos: Description
Returns:
``x`` position
"""
return pos.x()
# end def
def translateX(self, delta: float):
"""
Args:
delta: Description
"""
children = self._item_group.childItems()
if children:
p_i = children[0].partItem()
str = "+%d" % delta if delta >= 0 else "%d" % delta
p_i.updateStatusBar(str)
self.setX(self._BASE_WIDTH * delta)
# end def
def resetPosition(self):
"""
"""
self.setPos(self._pos0)
def delta(self, xf: float, x0: float) -> float:
"""
Args:
xf: Description
x0: Description
Returns:
change distance
"""
bound_l, bound_h = self._bounds
delta = int(floor((xf - x0) / self._BASE_WIDTH))
if delta > 0 and delta > bound_h:
delta = bound_h
elif delta < 0 and abs(delta) > bound_l:
delta = -bound_l
return delta
def refreshPath(self):
"""
"""
temp_low, temp_high = self._item_group.viewroot.document().getSelectionBounds()
self._bounds = (temp_low, temp_high)
# logger.debug("rp:", self._bounds)
self.prepareGeometryChange()
self.setPath(self.painterPath())
self._pos0 = self.pos()
# end def
def painterPath(self) -> QPainterPath:
"""
Returns:
:class:`QPainterPath`
"""
bw = self._BASE_WIDTH
i_g = self._item_group
# the childrenBoundingRect is necessary to get this to work
rect_IG = i_g.childrenBoundingRect()
rect = self.mapRectFromItem(i_g, rect_IG)
if rect.width() < bw:
rect.adjust(-bw / 4, 0, bw / 2, 0)
path = QPainterPath()
path.addRect(rect)
self._item_group.setBoundingRect(rect_IG)
# path.addRoundedRect(rect, radius, radius)
# path.moveTo(rect.right(),\
# rect.center().y())
# path.lineTo(rect.right() + radius / 2,\
# rect.center().y())
return path
# end def
def processSelectedItems(self, r_start: float, r_end: float, modifiers):
"""
Args:
r_start: Description
r_end: Description
modifiers (TYPE): Description
"""
delta = self.delta(r_end, r_start)
# TODO reenable do_maximize?????
# if modifiers & Qt.AltModifier:
# do_maximize = True
# else:
# do_maximize = False
self._item_group.viewroot.document().resizeSelection(delta)
# end def
def deleteSelection(self):
"""Summary
Returns:
TYPE: Description
"""
self._item_group.document().deleteStrandSelection()
def boxParent(self) -> QGraphicsItem:
"""Get the parent :class:`ProxyParentItem`
Returns:
:class:`ProxyParentItem`
"""
temp = self._item_group.childItems()[0].partItem().proxy()
self.setParentItem(temp)
return temp
# end def
def bounds(self) -> Vec2T:
"""
Returns:
the bounds
"""
return self._bounds
# end def
# end class
| 28.467316
| 109
| 0.569572
|
import logging
from math import floor
from PyQt5.QtCore import (
QPointF,
QRectF,
Qt
)
from PyQt5.QtGui import (
QPainterPath,
QKeyEvent,
QMouseEvent
)
from PyQt5.QtWidgets import (
QGraphicsItem,
QGraphicsItemGroup,
QGraphicsPathItem,
QGraphicsSceneMouseEvent,
)
from cadnano.gui.palette import getPenObj
from cadnano.views.pathview import pathstyles as styles
from cadnano.views.pathview import (
PathRootItemT,
)
from cadnano.cntypes import (
Vec2T,
DocT
)
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
class SelectionItemGroup(QGraphicsItemGroup):
def __init__(self, boxtype: QGraphicsItem,
constraint: str,
viewroot: PathRootItemT):
super(SelectionItemGroup, self).__init__(viewroot)
self.viewroot: PathRootItemT = viewroot
self.setFiltersChildEvents(True)
Flag(QGraphicsItem.ItemIsSelectable)
self.setFlag(QGraphicsItem.ItemIsFocusable)
self.setFlag(QGraphicsItem.ItemHasNoContents)
self._rect = QRectF()
self._PEN = getPenObj(styles.BLUE_STROKE,
styles.PATH_SELECTBOX_STROKE_WIDTH)
self.selectionbox = boxtype(self)
self._drag_enable = False
self._dragged = False
self._r0 = 0
self._r = 0
self._added_to_press_list = False
self._pending_to_add_dict = {}
if constraint == 'y':
self.getR = self.selectionbox.getY
self.translateR = self.selectionbox.translateY
else:
self.getR = self.selectionbox.getX
self.translateR = self.selectionbox.translateX
self._normal_select = True
self.setZValue(styles.ZPATHSELECTION)
pendToAdd(self, item):
self._pending_to_add_dict[item] = True
def isPending(self, item):
return item in self._pending_to_add_dict
def document(self) -> DocT:
return self.viewroot.document()
def pendToRemove(self, item):
if item in self._pending_to_add_dict:
del self._pending_to_add_dict[item]
def setNormalSelect(self, bool_val: bool):
self._normal_select = bool_val
def isNormalSelect(self) -> bool:
return self._normal_select
def processPendingToAddList(self):
doc = self.document()
p2add = self._pending_to_add_dict
if len(p2add) > 0:
plist = list(self._pending_to_add_dict.keys())
for item in plist:
if p2add[item]:
p2add[item] = False
self.addToGroup(item)
item.modelSelect(doc)
self._pending_to_add_dict = {}
doc.updateStrandSelection()
def selectionLock(self):
return self.viewroot.selectionLock()
def setSelectionLock(self, selection_group):
self.viewroot.setSelectionLock(selection_group)
def keyPressEvent(self, event: QKeyEvent):
key = event.key()
if key in [Qt.Key_Backspace, Qt.Key_Delete]:
self.selectionbox.deleteSelection()
self.clearSelection(False)
return QGraphicsItemGroup.keyPressEvent(self, event)
else:
return QGraphicsItemGroup.keyPressEvent(self, event)
def mousePressEvent(self, event: QGraphicsSceneMouseEvent):
if event.button() != Qt.LeftButton:
return QGraphicsItemGroup.mousePressEvent(self, event)
else:
self._drag_enable = True
self.setSelected(True)
self.selectionbox.resetPosition()
self.selectionbox.refreshPath()
self.selectionbox.resetPosition()
self.selectionbox.show()
self._dragged = False
if self._added_to_press_list is False:
self._added_to_press_list = True
self.scene().views()[0].addToPressList(self)
return QGraphicsItemGroup.mousePressEvent(self, event)
def mouseMoveEvent(self, event: QGraphicsSceneMouseEvent):
if self._drag_enable is True:
rf = self.getR(self.mapFromScene(QPointF(event.scenePos())))
if self._dragged is False:
self._dragged = True
self._r0 = rf
else:
delta = self.selectionbox.delta(rf, self._r0)
self.translateR(delta)
self._r = rf
else:
QGraphicsItemGroup.mouseMoveEvent(self, event)
def customMouseRelease(self, event: QMouseEvent):
self.selectionbox.setParentItem(self.viewroot)
self.selectionbox.hide()
self.selectionbox.resetTransform()
self._drag_enable = False
if not (self._r0 == 0 and self._r == 0):
modifiers = event.modifiers()
self.selectionbox.processSelectedItems(self._r0, self._r, modifiers)
self._r0 = 0
self._r = 0
self.setFocus()
self._added_to_press_list = False
def resetSelection(self):
self._pending_to_add_dict = {}
self._added_to_press_list = False
self.clearSelection(False)
self.setSelectionLock(None)
self.selectionbox.setParentItem(self.viewroot)
self.setParentItem(self.viewroot)
def clearSelection(self, value):
if value == False:
self.selectionbox.hide()
self.selectionbox.resetPosition()
self.removeSelectedItems()
self.viewroot.setSelectionLock(None)
self.clearFocus()
self.prepareGeometryChange()
self._rect.setWidth(0)
else:
self.setFocus()
self.update(self.boundingRect())
def itemChange(self, change, value):
if change == QGraphicsItem.ItemSelectedChange:
if value == False:
self.clearSelection(False)
return False
else:
return True
elif change == QGraphicsItem.ItemChildAddedChange:
if self._added_to_press_list is False:
self.setFocus()
self.selectionbox.boxParent()
self._added_to_press_list = True
self.scene().views()[0].addToPressList(self)
return
return QGraphicsItemGroup.itemChange(self, change, value)
def removeChild(self, child):
doc = self.document()
self.removeFromGroup(child)
child.modelDeselect(doc)
def removeSelectedItems(self):
doc = self.document()
for item in self.childItems():
self.removeFromGroup(item)
item.modelDeselect(doc)
doc.updateStrandSelection()
def setBoundingRect(self, rect):
self.prepareGeometryChange()
self._rect = rect
def boundingRect(self):
return self._rect
class VirtualHelixHandleSelectionBox(QGraphicsPathItem):
_HELIX_HEIGHT = styles.PATH_HELIX_HEIGHT + styles.PATH_HELIX_PADDING
_RADIUS = styles.VIRTUALHELIXHANDLEITEM_RADIUS
_PEN_WIDTH = styles.SELECTIONBOX_PEN_WIDTH
_BOX_PEN = getPenObj(styles.BLUE_STROKE, _PEN_WIDTH)
def __init__(self, item_group: SelectionItemGroup):
super(VirtualHelixHandleSelectionBox, self).__init__(item_group.parentItem())
self._item_group = item_group
self._rect = item_group.boundingRect()
self.hide()
self.setPen(self._BOX_PEN)
self.setZValue(styles.ZPATHSELECTION)
self._bounds = None
self._pos0 = QPointF()
def getY(self, pos):
pos = self._item_group.mapToScene(QPointF(pos))
return pos.y()
def translateY(self, delta):
self.setY(delta)
def refreshPath(self):
self.prepareGeometryChange()
self.setPath(self.painterPath())
self._pos0 = self.pos()
def painterPath(self):
i_g = self._item_group
rect = self.mapRectFromItem(i_g, i_g.childrenBoundingRect())
radius = self._RADIUS
path = QPainterPath()
path.addRoundedRect(rect, radius, radius)
path.moveTo(rect.right(), rect.center().y())
path.lineTo(rect.right() + radius / 2, rect.center().y())
return path
def processSelectedItems(self, r_start, r_end, modifiers):
margin = styles.VIRTUALHELIXHANDLEITEM_RADIUS
delta = (r_end - r_start)
mid_height = (self.boundingRect().height()) / 2 - margin
helix_height = self._HELIX_HEIGHT
if abs(delta) < mid_height:
return
if delta > 0:
indexDelta = int((delta - mid_height) / helix_height)
else:
indexDelta = int((delta + mid_height) / helix_height)
items = sorted(self._item_group.childItems(), key=lambda vhhi: vhhi.y())
part_item = items[0].partItem()
part_item.reorderHelices([item.idNum() for item in items],
indexDelta)
part_item.updateStatusBar("")
def boxParent(self):
temp = self._item_group.childItems()[0].partItem()
self.setParentItem(temp)
return temp
def deleteSelection(self):
vh_handle_items = self._item_group.childItems()
u_s = self._item_group.document().undoStack()
u_s.beginMacro("delete Virtual Helices")
for vhhi in vh_handle_items:
part = vhhi.part()
part.removeVirtualHelix(vhhi.idNum())
u_s.endMacro()
def bounds(self):
return self._bounds
def delta(self, yf, y0):
return yf - y0
def resetPosition(self):
self.setPos(self._pos0)
class EndpointHandleSelectionBox(QGraphicsPathItem):
_PEN_WIDTH = styles.SELECTIONBOX_PEN_WIDTH
_BOX_PEN = getPenObj(styles.SELECTED_COLOR, _PEN_WIDTH)
_BASE_WIDTH = styles.PATH_BASE_WIDTH
def __init__(self, item_group: SelectionItemGroup):
super(EndpointHandleSelectionBox, self).__init__(item_group.parentItem())
self._item_group = item_group
self._rect = item_group.boundingRect()
self.hide()
self.setPen(self._BOX_PEN)
self.setZValue(styles.ZPATHSELECTION)
self._bounds = (0, 0)
self._pos0 = QPointF()
def getX(self, pos: QPointF) -> float:
return pos.x()
def translateX(self, delta: float):
children = self._item_group.childItems()
if children:
p_i = children[0].partItem()
str = "+%d" % delta if delta >= 0 else "%d" % delta
p_i.updateStatusBar(str)
self.setX(self._BASE_WIDTH * delta)
def resetPosition(self):
self.setPos(self._pos0)
def delta(self, xf: float, x0: float) -> float:
bound_l, bound_h = self._bounds
delta = int(floor((xf - x0) / self._BASE_WIDTH))
if delta > 0 and delta > bound_h:
delta = bound_h
elif delta < 0 and abs(delta) > bound_l:
delta = -bound_l
return delta
def refreshPath(self):
temp_low, temp_high = self._item_group.viewroot.document().getSelectionBounds()
self._bounds = (temp_low, temp_high)
self.prepareGeometryChange()
self.setPath(self.painterPath())
self._pos0 = self.pos()
def painterPath(self) -> QPainterPath:
bw = self._BASE_WIDTH
i_g = self._item_group
rect_IG = i_g.childrenBoundingRect()
rect = self.mapRectFromItem(i_g, rect_IG)
if rect.width() < bw:
rect.adjust(-bw / 4, 0, bw / 2, 0)
path = QPainterPath()
path.addRect(rect)
self._item_group.setBoundingRect(rect_IG)
return path
def processSelectedItems(self, r_start: float, r_end: float, modifiers):
delta = self.delta(r_end, r_start)
self._item_group.viewroot.document().resizeSelection(delta)
def deleteSelection(self):
self._item_group.document().deleteStrandSelection()
def boxParent(self) -> QGraphicsItem:
temp = self._item_group.childItems()[0].partItem().proxy()
self.setParentItem(temp)
return temp
def bounds(self) -> Vec2T:
return self._bounds
| true
| true
|
f7180731325d42a74cf0349d5377f43a897a9155
| 12,937
|
py
|
Python
|
lama/elastix/invert_transforms.py
|
MiaRatkovic/LAMA
|
3ccfed0864001c8c270861e23cc81bc43d7d25c9
|
[
"Apache-2.0"
] | 6
|
2016-08-15T22:07:02.000Z
|
2022-02-17T04:22:58.000Z
|
lama/elastix/invert_transforms.py
|
MiaRatkovic/LAMA
|
3ccfed0864001c8c270861e23cc81bc43d7d25c9
|
[
"Apache-2.0"
] | 25
|
2019-12-05T02:02:20.000Z
|
2021-09-08T01:39:17.000Z
|
lama/elastix/invert_transforms.py
|
MiaRatkovic/LAMA
|
3ccfed0864001c8c270861e23cc81bc43d7d25c9
|
[
"Apache-2.0"
] | 5
|
2019-12-05T00:15:29.000Z
|
2021-07-06T05:24:54.000Z
|
from pathlib import Path
import tempfile
import os
import subprocess
from collections import defaultdict
from multiprocessing import Pool
from os.path import join, abspath, isfile
from typing import Union, List, Dict
from logzero import logger as logging
import yaml
from lama import common
from lama.common import cfg_load
from lama.registration_pipeline.validate_config import LamaConfig
from lama.elastix import (ELX_TRANSFORM_NAME, ELX_PARAM_PREFIX, PROPAGATE_LABEL_TRANFORM,
PROPAGATE_IMAGE_TRANSFORM, PROPAGATE_CONFIG, RESOLUTION_IMGS_DIR, IMG_PYRAMID_DIR)
LABEL_REPLACEMENTS = {
'FinalBSplineInterpolationOrder': '0',
'FixedInternalImagePixelType': 'short',
'MovingInternalImagePixelType': 'short',
'ResultImagePixelType': 'unsigned char',
'WriteTransformParametersEachResolution': 'false',
'WriteResultImageAfterEachResolution': 'false'
}
IMAGE_REPLACEMENTS = {
'FinalBSplineInterpolationOrder': '3',
'FixedInternalImagePixelType': 'float',
'MovingInternalImagePixelType': 'float',
'ResultImagePixelType': 'float',
'WriteTransformParametersEachResolution': 'false',
'WriteResultImageAfterEachResolution': 'false'
}
def batch_invert_transform_parameters(config: Union[Path, LamaConfig],
clobber=True, new_log:bool=False):
"""
Create new elastix TransformParameter files that can then be used by transformix to invert labelmaps, stats etc
Parameters
----------
config
path to original reg pipeline config file
clobber
if True overwrite inverted parameters present
new_log:
Whether to create a new log file. If called from another module, logging may happen there
"""
common.test_installation('elastix')
if isinstance(config, (Path, str)):
config = LamaConfig(config)
threads = str(config['threads'])
if new_log:
common.init_logging(config / 'invert_transforms.log')
reg_dirs = get_reg_dirs(config)
# Get the image basenames from the first stage registration folder (usually rigid)
# ignore images in non-relevent folder that may be present
volume_names = [x.stem for x in common.get_file_paths(reg_dirs[0], ignore_folders=[RESOLUTION_IMGS_DIR, IMG_PYRAMID_DIR])]
inv_outdir = config.mkdir('inverted_transforms')
stages_to_invert = defaultdict(list)
jobs: List[Dict] = []
reg_stage_dir: Path
for i, vol_id in enumerate(volume_names):
for reg_stage_dir in reg_dirs:
if not reg_stage_dir.is_dir():
logging.error('cannot find {}'.format(reg_stage_dir))
raise FileNotFoundError(f'Cannot find registration dir {reg_stage_dir}')
inv_stage_dir = inv_outdir / reg_stage_dir.name
specimen_stage_reg_dir = reg_stage_dir / vol_id
specimen_stage_inversion_dir = inv_stage_dir / vol_id
transform_file = common.getfile_startswith(specimen_stage_reg_dir, ELX_TRANSFORM_NAME)
parameter_file = common.getfile_startswith(reg_stage_dir, ELX_PARAM_PREFIX)
# Create the folder to put the specimen inversion parameter files in.
inv_stage_dir.mkdir(exist_ok=True)
# Add the stage to the inversion order config (in reverse order), if not already.
if reg_stage_dir.name not in stages_to_invert['label_propagation_order']:
stages_to_invert['label_propagation_order'].insert(0, reg_stage_dir.name)
if clobber:
common.mkdir_force(specimen_stage_inversion_dir) # Overwrite any inversion file that exist for a single specimen
# Each registration directory contains a metadata file, which contains the relative path to the fixed volume
reg_metadata = cfg_load(specimen_stage_reg_dir / common.INDV_REG_METADATA)
fixed_volume = (specimen_stage_reg_dir / reg_metadata['fixed_vol']).resolve()
# Invert the Transform parameters with options for normal image inversion
job = {
'specimen_stage_inversion_dir': specimen_stage_inversion_dir,
'parameter_file': abspath(parameter_file),
'transform_file': transform_file,
'fixed_volume': fixed_volume,
'param_file_output_name': 'inversion_parameters.txt',
'image_replacements': IMAGE_REPLACEMENTS,
'label_replacements': LABEL_REPLACEMENTS,
'image_transform_file': PROPAGATE_IMAGE_TRANSFORM,
'label_transform_file': PROPAGATE_LABEL_TRANFORM,
'clobber': clobber,
'threads': threads
}
jobs.append(job)
# By putting each inverison job (a single job per registration stage) we can speed things up a bit
# If we can get multithreded inversion in elastix we can remove this python multithreading
pool = Pool(8)
try:
pool.map(_invert_transform_parameters, jobs)
except KeyboardInterrupt:
print('terminating inversion')
pool.terminate()
pool.join()
# TODO: Should we replace the need for this invert.yaml?
reg_dir = Path(os.path.relpath(reg_stage_dir, inv_outdir))
stages_to_invert['registration_directory'] = str(reg_dir) # Doc why we need this
# Create a yaml config file so that inversions can be run seperatley
invert_config = config['inverted_transforms'] / PROPAGATE_CONFIG
with open(invert_config, 'w') as yf:
yf.write(yaml.dump(dict(stages_to_invert), default_flow_style=False))
def _invert_transform_parameters(args: Dict):
"""
Generate a single inverted elastix transform parameter file. This can then be used to invert labels, masks etc.
If any of the step fail, return as subsequent steps will also fail. The logging of failures is handled
within each function
"""
# If we have both the image and label inverted transforms, don't do anything if noclobber is True
clobber = args['clobber']
threads = args['threads']
image_transform_param_path = abspath(join(args['specimen_stage_inversion_dir'], args['image_transform_file']))
label_transform_param_path = abspath(join(args['specimen_stage_inversion_dir'], args['label_transform_file']))
if not clobber and isfile(label_transform_param_path) and isfile(image_transform_param_path):
logging.info('skipping {} as noclobber is True and inverted parameter files exist')
return
# Modify the elastix registration input parameter file to enable inversion (Change metric and don't write image results)
inversion_params = abspath(join(args['specimen_stage_inversion_dir'], args['param_file_output_name'])) # The elastix registration parameters used for inversion
make_elastix_inversion_parameter_file(abspath(args['parameter_file']), inversion_params, args['image_replacements']) # I don't think we need the replacements here!!!!!!!!
# Do the inversion, making the inverted TransformParameters file
fixed_vol = args['fixed_volume']
forward_tform_file = abspath(args['transform_file'])
invert_param_dir = args['specimen_stage_inversion_dir']
if not invert_elastix_transform_parameters(fixed_vol, forward_tform_file, inversion_params, invert_param_dir, threads):
return
# Get the resulting TransformParameters file, and create a transform file suitable for inverting normal volumes
image_inverted_tform = abspath(join(args['specimen_stage_inversion_dir'], 'TransformParameters.0.txt'))
if not _modify_inverted_tform_file(image_inverted_tform, image_transform_param_path):
return
# Get the resulting TransformParameters file, and create a transform file suitable for inverting label volumes
# replace the parameter in the image file with label-specific parameters and save in new file. No need to
# generate one from scratch
if not make_elastix_inversion_parameter_file(image_transform_param_path, label_transform_param_path, args['label_replacements']):
return
_modify_inverted_tform_file(label_transform_param_path)
def get_reg_dirs(config: LamaConfig) -> List[Path]:
"""
Get the registration output directories paths in the order they were made
"""
reg_stages = []
for i, reg_stage in enumerate(config['registration_stage_params']):
stage_id = reg_stage['stage_id']
stage_dir = config['root_reg_dir'] / stage_id
reg_stages.append(stage_dir)
return reg_stages
def make_elastix_inversion_parameter_file(elx_param_file: Path, newfile_name: str, replacements: Dict):
"""
Modifies the elastix input parameter file that was used in the original transformation.
Adds DisplacementMagnitudePenalty (which is needed for inverting)
Turns off writing the image results at the end as we only need an inverted output file.
Also changes interpolation order in the case of inverting labels
Parameters
----------
elx_param_file: str
path to elastix input parameter file
newfile_name: str
path to save modified parameter file to
"""
try:
with open(elx_param_file) as old, open(newfile_name, "w") as new:
for line in old:
if line.startswith("(Metric "):
line = '(Metric "DisplacementMagnitudePenalty")\n'
if line.startswith('(WriteResultImage '):
line = '(WriteResultImage "false")\n'
if line.startswith('WriteResultImageAfterEachResolution '):
continue
try:
param_name = line.split()[0][1:]
except IndexError:
continue # comment?
if param_name in replacements:
value = replacements[param_name]
try:
int(value)
except ValueError:
# Not an int, neeed quotes
line = '({} "{}")\n'.format(param_name, value)
else:
# An int, no quotes
line = '({} {})\n'.format(param_name, value)
new.write(line)
except IOError as e:
logging.error("Error modifying the elastix parameter file: {}".format(e))
return False
return True
def invert_elastix_transform_parameters(fixed: Path, tform_file: Path, param: Path, outdir: Path, threads: str):
"""
Invert the transform and get a new transform file
"""
if not common.test_installation('elastix'):
raise OSError('elastix not installed')
cmd = ['elastix',
'-t0', tform_file,
'-p', param,
'-f', fixed,
'-m', fixed,
'-out', outdir,
'-threads', threads # 11/09/18. This was set to 1. Can iversions take advantage of multithreading?
]
try:
subprocess.check_output(cmd)
except (Exception, subprocess.CalledProcessError) as e:
msg = f'Inverting transform file failed. cmd: {cmd}\n{str(e)}:'
logging.error(msg)
logging.exception(msg)
return False
return True
def _modify_inverted_tform_file(elx_tform_file: Path, newfile_name: str=None):
"""
Remove "NoInitialTransform" from the output transform parameter file
Set output image format to unsigned char. Writes out a modified elastix transform parameter file
that can be used for inverting volumes
Parameters
----------
elx_tform_file: str
path to elastix transform file
newfile_mame: str
path to save modified transform file
"""
if not newfile_name: # Write to temporary file before overwriting
new_file = tempfile.NamedTemporaryFile().name
else:
new_file = newfile_name
try:
with open(new_file, "w+") as new_tform_param_fh, open(elx_tform_file, "r") as tform_param_fh:
for line in tform_param_fh:
if line.startswith('(InitialTransformParametersFileName'):
line = '(InitialTransformParametersFileName "NoInitialTransform")\n'
new_tform_param_fh.write(line)
new_tform_param_fh.close()
tform_param_fh.close()
except IOError:
logging.warning("Error reading or writing transform files {}".format(elx_tform_file))
return False
return True
# def is_euler_stage(tform_param):
# """
# Return True if the registration used to create this param file was a Euler transform. Can't currently invert
# Euler transforms with this method, and is usually not required
# :param tform_param:
# :return:
# """
# with open(tform_param, 'r') as fh:
# line = fh.readline()
# if 'EulerTransform' in line:
# return True
# else:
# return False
| 38.84985
| 175
| 0.678287
|
from pathlib import Path
import tempfile
import os
import subprocess
from collections import defaultdict
from multiprocessing import Pool
from os.path import join, abspath, isfile
from typing import Union, List, Dict
from logzero import logger as logging
import yaml
from lama import common
from lama.common import cfg_load
from lama.registration_pipeline.validate_config import LamaConfig
from lama.elastix import (ELX_TRANSFORM_NAME, ELX_PARAM_PREFIX, PROPAGATE_LABEL_TRANFORM,
PROPAGATE_IMAGE_TRANSFORM, PROPAGATE_CONFIG, RESOLUTION_IMGS_DIR, IMG_PYRAMID_DIR)
LABEL_REPLACEMENTS = {
'FinalBSplineInterpolationOrder': '0',
'FixedInternalImagePixelType': 'short',
'MovingInternalImagePixelType': 'short',
'ResultImagePixelType': 'unsigned char',
'WriteTransformParametersEachResolution': 'false',
'WriteResultImageAfterEachResolution': 'false'
}
IMAGE_REPLACEMENTS = {
'FinalBSplineInterpolationOrder': '3',
'FixedInternalImagePixelType': 'float',
'MovingInternalImagePixelType': 'float',
'ResultImagePixelType': 'float',
'WriteTransformParametersEachResolution': 'false',
'WriteResultImageAfterEachResolution': 'false'
}
def batch_invert_transform_parameters(config: Union[Path, LamaConfig],
clobber=True, new_log:bool=False):
common.test_installation('elastix')
if isinstance(config, (Path, str)):
config = LamaConfig(config)
threads = str(config['threads'])
if new_log:
common.init_logging(config / 'invert_transforms.log')
reg_dirs = get_reg_dirs(config)
volume_names = [x.stem for x in common.get_file_paths(reg_dirs[0], ignore_folders=[RESOLUTION_IMGS_DIR, IMG_PYRAMID_DIR])]
inv_outdir = config.mkdir('inverted_transforms')
stages_to_invert = defaultdict(list)
jobs: List[Dict] = []
reg_stage_dir: Path
for i, vol_id in enumerate(volume_names):
for reg_stage_dir in reg_dirs:
if not reg_stage_dir.is_dir():
logging.error('cannot find {}'.format(reg_stage_dir))
raise FileNotFoundError(f'Cannot find registration dir {reg_stage_dir}')
inv_stage_dir = inv_outdir / reg_stage_dir.name
specimen_stage_reg_dir = reg_stage_dir / vol_id
specimen_stage_inversion_dir = inv_stage_dir / vol_id
transform_file = common.getfile_startswith(specimen_stage_reg_dir, ELX_TRANSFORM_NAME)
parameter_file = common.getfile_startswith(reg_stage_dir, ELX_PARAM_PREFIX)
inv_stage_dir.mkdir(exist_ok=True)
if reg_stage_dir.name not in stages_to_invert['label_propagation_order']:
stages_to_invert['label_propagation_order'].insert(0, reg_stage_dir.name)
if clobber:
common.mkdir_force(specimen_stage_inversion_dir)
reg_metadata = cfg_load(specimen_stage_reg_dir / common.INDV_REG_METADATA)
fixed_volume = (specimen_stage_reg_dir / reg_metadata['fixed_vol']).resolve()
job = {
'specimen_stage_inversion_dir': specimen_stage_inversion_dir,
'parameter_file': abspath(parameter_file),
'transform_file': transform_file,
'fixed_volume': fixed_volume,
'param_file_output_name': 'inversion_parameters.txt',
'image_replacements': IMAGE_REPLACEMENTS,
'label_replacements': LABEL_REPLACEMENTS,
'image_transform_file': PROPAGATE_IMAGE_TRANSFORM,
'label_transform_file': PROPAGATE_LABEL_TRANFORM,
'clobber': clobber,
'threads': threads
}
jobs.append(job)
pool = Pool(8)
try:
pool.map(_invert_transform_parameters, jobs)
except KeyboardInterrupt:
print('terminating inversion')
pool.terminate()
pool.join()
reg_dir = Path(os.path.relpath(reg_stage_dir, inv_outdir))
stages_to_invert['registration_directory'] = str(reg_dir)
invert_config = config['inverted_transforms'] / PROPAGATE_CONFIG
with open(invert_config, 'w') as yf:
yf.write(yaml.dump(dict(stages_to_invert), default_flow_style=False))
def _invert_transform_parameters(args: Dict):
clobber = args['clobber']
threads = args['threads']
image_transform_param_path = abspath(join(args['specimen_stage_inversion_dir'], args['image_transform_file']))
label_transform_param_path = abspath(join(args['specimen_stage_inversion_dir'], args['label_transform_file']))
if not clobber and isfile(label_transform_param_path) and isfile(image_transform_param_path):
logging.info('skipping {} as noclobber is True and inverted parameter files exist')
return
# Modify the elastix registration input parameter file to enable inversion (Change metric and don't write image results)
inversion_params = abspath(join(args['specimen_stage_inversion_dir'], args['param_file_output_name']))
make_elastix_inversion_parameter_file(abspath(args['parameter_file']), inversion_params, args['image_replacements'])
# Do the inversion, making the inverted TransformParameters file
fixed_vol = args['fixed_volume']
forward_tform_file = abspath(args['transform_file'])
invert_param_dir = args['specimen_stage_inversion_dir']
if not invert_elastix_transform_parameters(fixed_vol, forward_tform_file, inversion_params, invert_param_dir, threads):
return
# Get the resulting TransformParameters file, and create a transform file suitable for inverting normal volumes
image_inverted_tform = abspath(join(args['specimen_stage_inversion_dir'], 'TransformParameters.0.txt'))
if not _modify_inverted_tform_file(image_inverted_tform, image_transform_param_path):
return
# Get the resulting TransformParameters file, and create a transform file suitable for inverting label volumes
# replace the parameter in the image file with label-specific parameters and save in new file. No need to
# generate one from scratch
if not make_elastix_inversion_parameter_file(image_transform_param_path, label_transform_param_path, args['label_replacements']):
return
_modify_inverted_tform_file(label_transform_param_path)
def get_reg_dirs(config: LamaConfig) -> List[Path]:
reg_stages = []
for i, reg_stage in enumerate(config['registration_stage_params']):
stage_id = reg_stage['stage_id']
stage_dir = config['root_reg_dir'] / stage_id
reg_stages.append(stage_dir)
return reg_stages
def make_elastix_inversion_parameter_file(elx_param_file: Path, newfile_name: str, replacements: Dict):
try:
with open(elx_param_file) as old, open(newfile_name, "w") as new:
for line in old:
if line.startswith("(Metric "):
line = '(Metric "DisplacementMagnitudePenalty")\n'
if line.startswith('(WriteResultImage '):
line = '(WriteResultImage "false")\n'
if line.startswith('WriteResultImageAfterEachResolution '):
continue
try:
param_name = line.split()[0][1:]
except IndexError:
continue # comment?
if param_name in replacements:
value = replacements[param_name]
try:
int(value)
except ValueError:
# Not an int, neeed quotes
line = '({} "{}")\n'.format(param_name, value)
else:
# An int, no quotes
line = '({} {})\n'.format(param_name, value)
new.write(line)
except IOError as e:
logging.error("Error modifying the elastix parameter file: {}".format(e))
return False
return True
def invert_elastix_transform_parameters(fixed: Path, tform_file: Path, param: Path, outdir: Path, threads: str):
if not common.test_installation('elastix'):
raise OSError('elastix not installed')
cmd = ['elastix',
'-t0', tform_file,
'-p', param,
'-f', fixed,
'-m', fixed,
'-out', outdir,
'-threads', threads # 11/09/18. This was set to 1. Can iversions take advantage of multithreading?
]
try:
subprocess.check_output(cmd)
except (Exception, subprocess.CalledProcessError) as e:
msg = f'Inverting transform file failed. cmd: {cmd}\n{str(e)}:'
logging.error(msg)
logging.exception(msg)
return False
return True
def _modify_inverted_tform_file(elx_tform_file: Path, newfile_name: str=None):
if not newfile_name: # Write to temporary file before overwriting
new_file = tempfile.NamedTemporaryFile().name
else:
new_file = newfile_name
try:
with open(new_file, "w+") as new_tform_param_fh, open(elx_tform_file, "r") as tform_param_fh:
for line in tform_param_fh:
if line.startswith('(InitialTransformParametersFileName'):
line = '(InitialTransformParametersFileName "NoInitialTransform")\n'
new_tform_param_fh.write(line)
new_tform_param_fh.close()
tform_param_fh.close()
except IOError:
logging.warning("Error reading or writing transform files {}".format(elx_tform_file))
return False
return True
# def is_euler_stage(tform_param):
# """
# Return True if the registration used to create this param file was a Euler transform. Can't currently invert
# Euler transforms with this method, and is usually not required
# :param tform_param:
# :return:
# """
| true
| true
|
f718082ff8a1b480495d2fe2964e1b8479a5f70b
| 3,677
|
py
|
Python
|
tests/python/unittest/test_tir_ptx_ldmatrix.py
|
shengxinhu/tvm
|
06c443e9959452c6da3a911fe0c11e08c5554477
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 4,640
|
2017-08-17T19:22:15.000Z
|
2019-11-04T15:29:46.000Z
|
tests/python/unittest/test_tir_ptx_ldmatrix.py
|
shengxinhu/tvm
|
06c443e9959452c6da3a911fe0c11e08c5554477
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2,863
|
2017-08-17T19:55:50.000Z
|
2019-11-04T17:18:41.000Z
|
tests/python/unittest/test_tir_ptx_ldmatrix.py
|
shengxinhu/tvm
|
06c443e9959452c6da3a911fe0c11e08c5554477
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1,352
|
2017-08-17T19:30:38.000Z
|
2019-11-04T16:09:29.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm.script import tir as T
import numpy as np
import tvm.testing
@T.prim_func
def ptx_ldmatrix(
A: T.Buffer[(16, 16), "float16"], B: T.Buffer[(16, 16), "float16"], num: T.int32, trans: T.uint8
) -> None:
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
bx = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(bx, 1)
T.launch_thread(tx, 32)
with T.block():
A_shared = T.alloc_buffer([16, 16], "float16", scope="shared")
A_local = T.alloc_buffer([8], "float16", scope="local")
for i in range(8):
A_shared[i * 2 + tx // 16, tx % 16] = A[i * 2 + tx // 16, tx % 16]
T.evaluate(
T.ptx_ldmatrix(
trans,
num,
".b16",
A_local.data,
0,
A_shared.data,
16 * (tx % 16) + 8 * (tx // 16),
dtype="float16",
)
)
for k in range(2):
for j in range(2):
for i in range(2):
B[8 * j + tx // 4, 8 * k + (tx % 4) * 2 + i] = A_local[4 * k + 2 * j + i]
@tvm.testing.requires_cuda
def test_ptx_ldmatrix():
f = ptx_ldmatrix
_, _, param_num, param_trans = f.params
arch = tvm.contrib.nvcc.get_target_compute_version()
major, minor = tvm.contrib.nvcc.parse_compute_version(arch)
if major * 10 + minor < 75:
# Require at least SM75
return
for num in [1, 2, 4]:
for trans in [False, True]:
mod = tvm.build(f.specialize({param_num: num, param_trans: trans}), target="cuda")
A_np = np.random.rand(16, 16).astype("float16")
A_mask_np = np.zeros_like(A_np)
if num == 1:
if trans:
A_mask_np[:8, :8] = A_np[:8, :8].T
else:
A_mask_np[:8, :8] = A_np[:8, :8]
elif num == 2:
if trans:
A_mask_np[:8, :8] = A_np[:8, :8].T
A_mask_np[8:16, :8] = A_np[8:16, :8].T
else:
A_mask_np[:16, :8] = A_np[:16, :8]
else: # num == 4
if trans:
A_mask_np[:8, :8] = A_np[:8, :8].T
A_mask_np[8:16, :8] = A_np[8:16, :8].T
A_mask_np[:8, 8:16] = A_np[:8, 8:16].T
A_mask_np[8:16, 8:16] = A_np[8:16, 8:16].T
else:
A_mask_np[:16, :16] = A_np[:16, :16]
B_np = np.zeros((16, 16)).astype("float16")
dev = tvm.cuda(0)
A_nd = tvm.nd.array(A_np, device=dev)
B_nd = tvm.nd.array(B_np, device=dev)
mod(A_nd, B_nd)
tvm.testing.assert_allclose(B_nd.numpy(), A_mask_np)
if __name__ == "__main__":
test_ptx_ldmatrix()
| 36.04902
| 100
| 0.536035
|
import tvm
from tvm.script import tir as T
import numpy as np
import tvm.testing
@T.prim_func
def ptx_ldmatrix(
A: T.Buffer[(16, 16), "float16"], B: T.Buffer[(16, 16), "float16"], num: T.int32, trans: T.uint8
) -> None:
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
bx = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(bx, 1)
T.launch_thread(tx, 32)
with T.block():
A_shared = T.alloc_buffer([16, 16], "float16", scope="shared")
A_local = T.alloc_buffer([8], "float16", scope="local")
for i in range(8):
A_shared[i * 2 + tx // 16, tx % 16] = A[i * 2 + tx // 16, tx % 16]
T.evaluate(
T.ptx_ldmatrix(
trans,
num,
".b16",
A_local.data,
0,
A_shared.data,
16 * (tx % 16) + 8 * (tx // 16),
dtype="float16",
)
)
for k in range(2):
for j in range(2):
for i in range(2):
B[8 * j + tx // 4, 8 * k + (tx % 4) * 2 + i] = A_local[4 * k + 2 * j + i]
@tvm.testing.requires_cuda
def test_ptx_ldmatrix():
f = ptx_ldmatrix
_, _, param_num, param_trans = f.params
arch = tvm.contrib.nvcc.get_target_compute_version()
major, minor = tvm.contrib.nvcc.parse_compute_version(arch)
if major * 10 + minor < 75:
return
for num in [1, 2, 4]:
for trans in [False, True]:
mod = tvm.build(f.specialize({param_num: num, param_trans: trans}), target="cuda")
A_np = np.random.rand(16, 16).astype("float16")
A_mask_np = np.zeros_like(A_np)
if num == 1:
if trans:
A_mask_np[:8, :8] = A_np[:8, :8].T
else:
A_mask_np[:8, :8] = A_np[:8, :8]
elif num == 2:
if trans:
A_mask_np[:8, :8] = A_np[:8, :8].T
A_mask_np[8:16, :8] = A_np[8:16, :8].T
else:
A_mask_np[:16, :8] = A_np[:16, :8]
else:
if trans:
A_mask_np[:8, :8] = A_np[:8, :8].T
A_mask_np[8:16, :8] = A_np[8:16, :8].T
A_mask_np[:8, 8:16] = A_np[:8, 8:16].T
A_mask_np[8:16, 8:16] = A_np[8:16, 8:16].T
else:
A_mask_np[:16, :16] = A_np[:16, :16]
B_np = np.zeros((16, 16)).astype("float16")
dev = tvm.cuda(0)
A_nd = tvm.nd.array(A_np, device=dev)
B_nd = tvm.nd.array(B_np, device=dev)
mod(A_nd, B_nd)
tvm.testing.assert_allclose(B_nd.numpy(), A_mask_np)
if __name__ == "__main__":
test_ptx_ldmatrix()
| true
| true
|
f71808b9f205aac0b404b9509ad046d9f41b7eab
| 12,174
|
py
|
Python
|
google/ads/googleads/v10/services/services/conversion_value_rule_set_service/transports/grpc.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v10/services/services/conversion_value_rule_set_service/transports/grpc.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v10/services/services/conversion_value_rule_set_service/transports/grpc.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v10.services.types import (
conversion_value_rule_set_service,
)
from .base import ConversionValueRuleSetServiceTransport, DEFAULT_CLIENT_INFO
class ConversionValueRuleSetServiceGrpcTransport(
ConversionValueRuleSetServiceTransport
):
"""gRPC backend transport for ConversionValueRuleSetService.
Service to manage conversion value rule sets.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn(
"client_cert_source is deprecated", DeprecationWarning
)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = (
SslCredentials().ssl_credentials
)
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def mutate_conversion_value_rule_sets(
self,
) -> Callable[
[
conversion_value_rule_set_service.MutateConversionValueRuleSetsRequest
],
conversion_value_rule_set_service.MutateConversionValueRuleSetsResponse,
]:
r"""Return a callable for the mutate conversion value rule
sets method over gRPC.
Creates, updates or removes conversion value rule
sets. Operation statuses are returned.
Returns:
Callable[[~.MutateConversionValueRuleSetsRequest],
~.MutateConversionValueRuleSetsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_conversion_value_rule_sets" not in self._stubs:
self._stubs[
"mutate_conversion_value_rule_sets"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v10.services.ConversionValueRuleSetService/MutateConversionValueRuleSets",
request_serializer=conversion_value_rule_set_service.MutateConversionValueRuleSetsRequest.serialize,
response_deserializer=conversion_value_rule_set_service.MutateConversionValueRuleSetsResponse.deserialize,
)
return self._stubs["mutate_conversion_value_rule_sets"]
def close(self):
self.grpc_channel.close()
__all__ = ("ConversionValueRuleSetServiceGrpcTransport",)
| 43.634409
| 122
| 0.637013
|
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth
from google.auth import credentials as ga_credentials
from google.auth.transport.grpc import SslCredentials
import grpc
from google.ads.googleads.v10.services.types import (
conversion_value_rule_set_service,
)
from .base import ConversionValueRuleSetServiceTransport, DEFAULT_CLIENT_INFO
class ConversionValueRuleSetServiceGrpcTransport(
ConversionValueRuleSetServiceTransport
):
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn(
"client_cert_source is deprecated", DeprecationWarning
)
if channel:
credentials = False
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = (
SslCredentials().ssl_credentials
)
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
return self._grpc_channel
@property
def mutate_conversion_value_rule_sets(
self,
) -> Callable[
[
conversion_value_rule_set_service.MutateConversionValueRuleSetsRequest
],
conversion_value_rule_set_service.MutateConversionValueRuleSetsResponse,
]:
if "mutate_conversion_value_rule_sets" not in self._stubs:
self._stubs[
"mutate_conversion_value_rule_sets"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v10.services.ConversionValueRuleSetService/MutateConversionValueRuleSets",
request_serializer=conversion_value_rule_set_service.MutateConversionValueRuleSetsRequest.serialize,
response_deserializer=conversion_value_rule_set_service.MutateConversionValueRuleSetsResponse.deserialize,
)
return self._stubs["mutate_conversion_value_rule_sets"]
def close(self):
self.grpc_channel.close()
__all__ = ("ConversionValueRuleSetServiceGrpcTransport",)
| true
| true
|
f7180a3e45377a91711d6c8fa67895d8d860641f
| 1,780
|
py
|
Python
|
mlprocessors/consolecapture.py
|
flatironinstitute/mountaintools
|
d5680599381e0810c4aa5b309b9ef9ec7f2d1b25
|
[
"Apache-2.0"
] | 2
|
2019-11-07T14:09:02.000Z
|
2021-09-23T01:09:04.000Z
|
mountaintools/mlprocessors/consolecapture.py
|
flatironinstitute/spikeforest_old
|
d9470194dc906b949178b9c44d14aea57a1f6c27
|
[
"Apache-2.0"
] | 13
|
2019-05-04T09:34:53.000Z
|
2019-06-23T07:05:58.000Z
|
mountaintools/mlprocessors/consolecapture.py
|
flatironinstitute/spikeforest_old
|
d9470194dc906b949178b9c44d14aea57a1f6c27
|
[
"Apache-2.0"
] | 1
|
2021-09-23T01:07:21.000Z
|
2021-09-23T01:07:21.000Z
|
from typing import Any
import sys
import time
import os
import tempfile
class Logger2():
def __init__(self, file1: Any, file2: Any):
self.file1 = file1
self.file2 = file2
def write(self, data: str) -> None:
self.file1.write(data)
self.file2.write(data)
def flush(self) -> None:
self.file1.flush()
self.file2.flush()
class ConsoleCapture():
def __init__(self):
self._console_out = ''
self._tmp_fname = None
self._file_handle = None
self._time_start = None
self._time_stop = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
def start_capturing(self) -> None:
self._tmp_fname = tempfile.mktemp(suffix='.txt')
self._file_handle = open(self._tmp_fname, 'w')
sys.stdout = Logger2(self._file_handle, self._original_stdout)
sys.stderr = Logger2(self._file_handle, self._original_stderr)
self._time_start = time.time()
def stop_capturing(self) -> None:
assert self._tmp_fname is not None
self._time_stop = time.time()
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._file_handle.close()
with open(self._tmp_fname, 'r') as f:
self._console_out = f.read()
os.unlink(self._tmp_fname)
def addToConsoleOut(self, txt: str) -> None:
self._file_handle.write(txt)
def runtimeInfo(self) -> dict:
assert self._time_start is not None
return dict(
start_time=self._time_start - 0,
end_time=self._time_stop - 0,
elapsed_sec=self._time_stop - self._time_start
)
def consoleOut(self) -> str:
return self._console_out
| 28.709677
| 70
| 0.626404
|
from typing import Any
import sys
import time
import os
import tempfile
class Logger2():
def __init__(self, file1: Any, file2: Any):
self.file1 = file1
self.file2 = file2
def write(self, data: str) -> None:
self.file1.write(data)
self.file2.write(data)
def flush(self) -> None:
self.file1.flush()
self.file2.flush()
class ConsoleCapture():
def __init__(self):
self._console_out = ''
self._tmp_fname = None
self._file_handle = None
self._time_start = None
self._time_stop = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
def start_capturing(self) -> None:
self._tmp_fname = tempfile.mktemp(suffix='.txt')
self._file_handle = open(self._tmp_fname, 'w')
sys.stdout = Logger2(self._file_handle, self._original_stdout)
sys.stderr = Logger2(self._file_handle, self._original_stderr)
self._time_start = time.time()
def stop_capturing(self) -> None:
assert self._tmp_fname is not None
self._time_stop = time.time()
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._file_handle.close()
with open(self._tmp_fname, 'r') as f:
self._console_out = f.read()
os.unlink(self._tmp_fname)
def addToConsoleOut(self, txt: str) -> None:
self._file_handle.write(txt)
def runtimeInfo(self) -> dict:
assert self._time_start is not None
return dict(
start_time=self._time_start - 0,
end_time=self._time_stop - 0,
elapsed_sec=self._time_stop - self._time_start
)
def consoleOut(self) -> str:
return self._console_out
| true
| true
|
f7180a3fe9d499c15ed0c134b63d4d7772dbd786
| 3,357
|
py
|
Python
|
profiles_project/settings.py
|
LaiZiSen/profiles_REST_API_course
|
83662a33b3a318dc7e52c5d56b577e4863ed7c5d
|
[
"MIT"
] | null | null | null |
profiles_project/settings.py
|
LaiZiSen/profiles_REST_API_course
|
83662a33b3a318dc7e52c5d56b577e4863ed7c5d
|
[
"MIT"
] | null | null | null |
profiles_project/settings.py
|
LaiZiSen/profiles_REST_API_course
|
83662a33b3a318dc7e52c5d56b577e4863ed7c5d
|
[
"MIT"
] | null | null | null |
"""
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9%&3aa&mz9nkbfr0!b(^9a^((@_wbd&m3f$3wbyseq9ai9m!^v'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.environ.get('DEBUG',1)))
ALLOWED_HOSTS = ['ec2-18-117-223-244.us-east-2.compute.amazonaws.com','127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
STATIC_ROOT = 'static/'
| 26.226563
| 91
| 0.699136
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '9%&3aa&mz9nkbfr0!b(^9a^((@_wbd&m3f$3wbyseq9ai9m!^v'
DEBUG = bool(int(os.environ.get('DEBUG',1)))
ALLOWED_HOSTS = ['ec2-18-117-223-244.us-east-2.compute.amazonaws.com','127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
STATIC_ROOT = 'static/'
| true
| true
|
f7180b932ca3d3eac6846747dbbfa23652d97e6d
| 1,458
|
py
|
Python
|
sa/profiles/AlliedTelesis/AT8100/get_version.py
|
xUndero/noc
|
9fb34627721149fcf7064860bd63887e38849131
|
[
"BSD-3-Clause"
] | 1
|
2019-09-20T09:36:48.000Z
|
2019-09-20T09:36:48.000Z
|
sa/profiles/AlliedTelesis/AT8100/get_version.py
|
ewwwcha/noc
|
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
|
[
"BSD-3-Clause"
] | null | null | null |
sa/profiles/AlliedTelesis/AT8100/get_version.py
|
ewwwcha/noc
|
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# AlliedTelesis.AT8100.get_version
# ---------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
"""
"""
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetversion import IGetVersion
import re
class Script(BaseScript):
name = "AlliedTelesis.AT8100.get_version"
cache = True
interface = IGetVersion
rx_plat = re.compile(
r"^Base\s+(?P<platform>AT-81\S+)\s+(?P<hardware>\S+)\s+(?P<serial>\S+)\s*\n", re.MULTILINE
)
rx_boot = re.compile(r"^Bootloader version\s+:\s+(?P<bootprom>\S+)\s*\n", re.MULTILINE)
rx_version = re.compile(r"^Software version\s+:\s+(?P<version>\S+)\s*\n", re.MULTILINE)
def execute_cli(self):
v = self.cli("show system")
match1 = self.rx_plat.search(v)
match2 = self.rx_boot.search(v)
match3 = self.rx_version.search(v)
return {
"vendor": "Allied Telesis",
"platform": match1.group("platform"),
"version": match3.group("version"),
"attributes": {
"Boot PROM": match2.group("bootprom"),
"HW version": match1.group("hardware"),
"Serial Number": match1.group("serial"),
},
}
| 34.714286
| 98
| 0.504801
|
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetversion import IGetVersion
import re
class Script(BaseScript):
name = "AlliedTelesis.AT8100.get_version"
cache = True
interface = IGetVersion
rx_plat = re.compile(
r"^Base\s+(?P<platform>AT-81\S+)\s+(?P<hardware>\S+)\s+(?P<serial>\S+)\s*\n", re.MULTILINE
)
rx_boot = re.compile(r"^Bootloader version\s+:\s+(?P<bootprom>\S+)\s*\n", re.MULTILINE)
rx_version = re.compile(r"^Software version\s+:\s+(?P<version>\S+)\s*\n", re.MULTILINE)
def execute_cli(self):
v = self.cli("show system")
match1 = self.rx_plat.search(v)
match2 = self.rx_boot.search(v)
match3 = self.rx_version.search(v)
return {
"vendor": "Allied Telesis",
"platform": match1.group("platform"),
"version": match3.group("version"),
"attributes": {
"Boot PROM": match2.group("bootprom"),
"HW version": match1.group("hardware"),
"Serial Number": match1.group("serial"),
},
}
| true
| true
|
f7180d0a373f4396eb239868bbafcb05362fb150
| 18,059
|
py
|
Python
|
CodeIA/venv/Lib/site-packages/imblearn/pipeline.py
|
Finasty-lab/IA-Python
|
286113504906fec11a5aa5fd1d12e38536b1c859
|
[
"Apache-2.0"
] | 2
|
2017-06-15T04:49:43.000Z
|
2020-06-20T02:29:29.000Z
|
CodeIA/venv/Lib/site-packages/imblearn/pipeline.py
|
Finasty-lab/IA-Python
|
286113504906fec11a5aa5fd1d12e38536b1c859
|
[
"Apache-2.0"
] | 3
|
2016-07-26T09:39:44.000Z
|
2020-06-20T02:29:30.000Z
|
CodeIA/venv/Lib/site-packages/imblearn/pipeline.py
|
Finasty-lab/IA-Python
|
286113504906fec11a5aa5fd1d12e38536b1c859
|
[
"Apache-2.0"
] | null | null | null |
"""
The :mod:`imblearn.pipeline` module implements utilities to build a
composite estimator, as a chain of transforms, samples and estimators.
"""
# Adapted from scikit-learn
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Christos Aridas
# Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: BSD
from sklearn import pipeline
from sklearn.base import clone
from sklearn.utils import _print_elapsed_time
from sklearn.utils.metaestimators import if_delegate_has_method
from sklearn.utils.validation import check_memory
__all__ = ["Pipeline", "make_pipeline"]
class Pipeline(pipeline.Pipeline):
"""Pipeline of transforms and resamples with a final estimator.
Sequentially apply a list of transforms, sampling, and a final estimator.
Intermediate steps of the pipeline must be transformers or resamplers,
that is, they must implement fit, transform and sample methods.
The samplers are only applied during fit.
The final estimator only needs to implement fit.
The transformers and samplers in the pipeline can be cached using
``memory`` argument.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
A step's estimator may be replaced entirely by setting the parameter
with its name to another estimator, or a transformer removed by setting
it to 'passthrough' or ``None``.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing
fit/transform/fit_resample) that are chained, in the order in which
they are chained, with the last object an estimator.
memory : Instance of joblib.Memory or str, default=None
Used to cache the fitted transformers of the pipeline. By default,
no caching is performed. If a string is given, it is the path to
the caching directory. Enabling caching triggers a clone of
the transformers before fitting. Therefore, the transformer
instance given to the pipeline cannot be inspected
directly. Use the attribute ``named_steps`` or ``steps`` to
inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
verbose : bool, default=False
If True, the time elapsed while fitting each step will be printed as it
is completed.
Attributes
----------
named_steps : bunch object, a dictionary with attribute access
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
See Also
--------
make_pipeline : Helper function to make pipeline.
Notes
-----
See :ref:`sphx_glr_auto_examples_pipeline_plot_pipeline_classification.py`
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import train_test_split as tts
>>> from sklearn.decomposition import PCA
>>> from sklearn.neighbors import KNeighborsClassifier as KNN
>>> from sklearn.metrics import classification_report
>>> from imblearn.over_sampling import SMOTE
>>> from imblearn.pipeline import Pipeline # doctest: +NORMALIZE_WHITESPACE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape {}'.format(Counter(y)))
Original dataset shape Counter({1: 900, 0: 100})
>>> pca = PCA()
>>> smt = SMOTE(random_state=42)
>>> knn = KNN()
>>> pipeline = Pipeline([('smt', smt), ('pca', pca), ('knn', knn)])
>>> X_train, X_test, y_train, y_test = tts(X, y, random_state=42)
>>> pipeline.fit(X_train, y_train) # doctest: +ELLIPSIS
Pipeline(...)
>>> y_hat = pipeline.predict(X_test)
>>> print(classification_report(y_test, y_hat))
precision recall f1-score support
<BLANKLINE>
0 0.87 1.00 0.93 26
1 1.00 0.98 0.99 224
<BLANKLINE>
accuracy 0.98 250
macro avg 0.93 0.99 0.96 250
weighted avg 0.99 0.98 0.98 250
<BLANKLINE>
"""
# BaseEstimator interface
def _validate_steps(self):
names, estimators = zip(*self.steps)
# validate names
self._validate_names(names)
# validate estimators
transformers = estimators[:-1]
estimator = estimators[-1]
for t in transformers:
if t is None or t == "passthrough":
continue
if not (
hasattr(t, "fit")
or hasattr(t, "fit_transform")
or hasattr(t, "fit_resample")
) or not (hasattr(t, "transform") or hasattr(t, "fit_resample")):
raise TypeError(
"All intermediate steps of the chain should "
"be estimators that implement fit and transform or "
"fit_resample (but not both) or be a string 'passthrough' "
"'%s' (type %s) doesn't)" % (t, type(t))
)
if hasattr(t, "fit_resample") and (
hasattr(t, "fit_transform") or hasattr(t, "transform")
):
raise TypeError(
"All intermediate steps of the chain should "
"be estimators that implement fit and transform or "
"fit_resample."
" '%s' implements both)" % (t)
)
if isinstance(t, pipeline.Pipeline):
raise TypeError(
"All intermediate steps of the chain should not be"
" Pipelines"
)
# We allow last estimator to be None as an identity transformation
if (
estimator is not None
and estimator != "passthrough"
and not hasattr(estimator, "fit")
):
raise TypeError(
"Last step of Pipeline should implement fit or be "
"the string 'passthrough'. '%s' (type %s) doesn't"
% (estimator, type(estimator))
)
def _iter(
self, with_final=True, filter_passthrough=True, filter_resample=True
):
"""Generate (idx, (name, trans)) tuples from self.steps.
When `filter_passthrough` is `True`, 'passthrough' and None
transformers are filtered out. When `filter_resample` is `True`,
estimator with a method `fit_resample` are filtered out.
"""
it = super()._iter(with_final, filter_passthrough)
if filter_resample:
return filter(lambda x: not hasattr(x[-1], "fit_resample"), it)
else:
return it
# Estimator interface
def _fit(self, X, y=None, **fit_params):
self.steps = list(self.steps)
self._validate_steps()
# Setup the memory
memory = check_memory(self.memory)
fit_transform_one_cached = memory.cache(pipeline._fit_transform_one)
fit_resample_one_cached = memory.cache(_fit_resample_one)
fit_params_steps = {
name: {} for name, step in self.steps if step is not None
}
for pname, pval in fit_params.items():
if '__' not in pname:
raise ValueError(
"Pipeline.fit does not accept the {} parameter. "
"You can pass parameters to specific steps of your "
"pipeline using the stepname__parameter format, e.g. "
"`Pipeline.fit(X, y, logisticregression__sample_weight"
"=sample_weight)`.".format(pname))
step, param = pname.split("__", 1)
fit_params_steps[step][param] = pval
for (step_idx,
name,
transformer) in self._iter(with_final=False,
filter_passthrough=False,
filter_resample=False):
if (transformer is None or transformer == 'passthrough'):
with _print_elapsed_time('Pipeline',
self._log_message(step_idx)):
continue
try:
# joblib >= 0.12
mem = memory.location
except AttributeError:
mem = memory.cachedir
finally:
cloned_transformer = clone(transformer) if mem else transformer
# Fit or load from cache the current transfomer
if hasattr(cloned_transformer, "transform") or hasattr(
cloned_transformer, "fit_transform"
):
X, fitted_transformer = fit_transform_one_cached(
cloned_transformer, X, y, None,
message_clsname='Pipeline',
message=self._log_message(step_idx),
**fit_params_steps[name]
)
elif hasattr(cloned_transformer, "fit_resample"):
X, y, fitted_transformer = fit_resample_one_cached(
cloned_transformer, X, y,
message_clsname='Pipeline',
message=self._log_message(step_idx),
**fit_params_steps[name]
)
# Replace the transformer of the step with the fitted
# transformer. This is necessary when loading the transformer
# from the cache.
self.steps[step_idx] = (name, fitted_transformer)
if self._final_estimator == "passthrough":
return X, y, {}
return X, y, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit the model.
Fit all the transforms/samplers one after the other and
transform/sample the data, then fit the transformed/sampled
data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**fit_params : dict of str -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
self : Pipeline
This estimator.
"""
Xt, yt, fit_params = self._fit(X, y, **fit_params)
with _print_elapsed_time('Pipeline',
self._log_message(len(self.steps) - 1)):
if self._final_estimator != "passthrough":
self._final_estimator.fit(Xt, yt, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit the model and transform with the final estimator.
Fits all the transformers/samplers one after the other and
transform/sample the data, then uses fit_transform on
transformed data with the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
Xt : array-like of shape (n_samples, n_transformed_features)
Transformed samples.
"""
last_step = self._final_estimator
Xt, yt, fit_params = self._fit(X, y, **fit_params)
with _print_elapsed_time('Pipeline',
self._log_message(len(self.steps) - 1)):
if last_step == "passthrough":
return Xt
elif hasattr(last_step, "fit_transform"):
return last_step.fit_transform(Xt, yt, **fit_params)
else:
return last_step.fit(Xt, yt, **fit_params).transform(Xt)
def fit_resample(self, X, y=None, **fit_params):
"""Fit the model and sample with the final estimator.
Fits all the transformers/samplers one after the other and
transform/sample the data, then uses fit_resample on transformed
data with the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
Xt : array-like of shape (n_samples, n_transformed_features)
Transformed samples.
yt : array-like of shape (n_samples, n_transformed_features)
Transformed target.
"""
last_step = self._final_estimator
Xt, yt, fit_params = self._fit(X, y, **fit_params)
with _print_elapsed_time('Pipeline',
self._log_message(len(self.steps) - 1)):
if last_step == "passthrough":
return Xt
elif hasattr(last_step, "fit_resample"):
return last_step.fit_resample(Xt, yt, **fit_params)
@if_delegate_has_method(delegate="_final_estimator")
def fit_predict(self, X, y=None, **fit_params):
"""Apply `fit_predict` of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
y_pred : ndarray of shape (n_samples,)
The predicted target.
"""
Xt, yt, fit_params = self._fit(X, y, **fit_params)
with _print_elapsed_time('Pipeline',
self._log_message(len(self.steps) - 1)):
y_pred = self.steps[-1][-1].fit_predict(Xt, yt, **fit_params)
return y_pred
def _fit_resample_one(sampler,
X,
y,
message_clsname='',
message=None,
**fit_params):
with _print_elapsed_time(message_clsname, message):
X_res, y_res = sampler.fit_resample(X, y, **fit_params)
return X_res, y_res, sampler
def make_pipeline(*steps, **kwargs):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, their names will be set
to the lowercase of their types automatically.
Parameters
----------
*steps : list of estimators
A list of estimators.
memory : None, str or object with the joblib.Memory interface, default=None
Used to cache the fitted transformers of the pipeline. By default,
no caching is performed. If a string is given, it is the path to
the caching directory. Enabling caching triggers a clone of
the transformers before fitting. Therefore, the transformer
instance given to the pipeline cannot be inspected
directly. Use the attribute ``named_steps`` or ``steps`` to
inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
verbose : bool, default=False
If True, the time elapsed while fitting each step will be printed as it
is completed.
Returns
-------
p : Pipeline
See Also
--------
imblearn.pipeline.Pipeline : Class for creating a pipeline of
transforms with a final estimator.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB(priors=None))
... # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler', StandardScaler()),
('gaussiannb', GaussianNB())])
"""
memory = kwargs.pop("memory", None)
verbose = kwargs.pop('verbose', False)
if kwargs:
raise TypeError(
'Unknown keyword arguments: "{}"'.format(list(kwargs.keys())[0])
)
return Pipeline(
pipeline._name_estimators(steps), memory=memory, verbose=verbose
)
| 39.173536
| 79
| 0.5966
|
"""
The :mod:`imblearn.pipeline` module implements utilities to build a
composite estimator, as a chain of transforms, samples and estimators.
"""
from sklearn import pipeline
from sklearn.base import clone
from sklearn.utils import _print_elapsed_time
from sklearn.utils.metaestimators import if_delegate_has_method
from sklearn.utils.validation import check_memory
__all__ = ["Pipeline", "make_pipeline"]
class Pipeline(pipeline.Pipeline):
"""Pipeline of transforms and resamples with a final estimator.
Sequentially apply a list of transforms, sampling, and a final estimator.
Intermediate steps of the pipeline must be transformers or resamplers,
that is, they must implement fit, transform and sample methods.
The samplers are only applied during fit.
The final estimator only needs to implement fit.
The transformers and samplers in the pipeline can be cached using
``memory`` argument.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
A step's estimator may be replaced entirely by setting the parameter
with its name to another estimator, or a transformer removed by setting
it to 'passthrough' or ``None``.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing
fit/transform/fit_resample) that are chained, in the order in which
they are chained, with the last object an estimator.
memory : Instance of joblib.Memory or str, default=None
Used to cache the fitted transformers of the pipeline. By default,
no caching is performed. If a string is given, it is the path to
the caching directory. Enabling caching triggers a clone of
the transformers before fitting. Therefore, the transformer
instance given to the pipeline cannot be inspected
directly. Use the attribute ``named_steps`` or ``steps`` to
inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
verbose : bool, default=False
If True, the time elapsed while fitting each step will be printed as it
is completed.
Attributes
----------
named_steps : bunch object, a dictionary with attribute access
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
See Also
--------
make_pipeline : Helper function to make pipeline.
Notes
-----
See :ref:`sphx_glr_auto_examples_pipeline_plot_pipeline_classification.py`
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import train_test_split as tts
>>> from sklearn.decomposition import PCA
>>> from sklearn.neighbors import KNeighborsClassifier as KNN
>>> from sklearn.metrics import classification_report
>>> from imblearn.over_sampling import SMOTE
>>> from imblearn.pipeline import Pipeline # doctest: +NORMALIZE_WHITESPACE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape {}'.format(Counter(y)))
Original dataset shape Counter({1: 900, 0: 100})
>>> pca = PCA()
>>> smt = SMOTE(random_state=42)
>>> knn = KNN()
>>> pipeline = Pipeline([('smt', smt), ('pca', pca), ('knn', knn)])
>>> X_train, X_test, y_train, y_test = tts(X, y, random_state=42)
>>> pipeline.fit(X_train, y_train) # doctest: +ELLIPSIS
Pipeline(...)
>>> y_hat = pipeline.predict(X_test)
>>> print(classification_report(y_test, y_hat))
precision recall f1-score support
<BLANKLINE>
0 0.87 1.00 0.93 26
1 1.00 0.98 0.99 224
<BLANKLINE>
accuracy 0.98 250
macro avg 0.93 0.99 0.96 250
weighted avg 0.99 0.98 0.98 250
<BLANKLINE>
"""
# BaseEstimator interface
def _validate_steps(self):
names, estimators = zip(*self.steps)
# validate names
self._validate_names(names)
# validate estimators
transformers = estimators[:-1]
estimator = estimators[-1]
for t in transformers:
if t is None or t == "passthrough":
continue
if not (
hasattr(t, "fit")
or hasattr(t, "fit_transform")
or hasattr(t, "fit_resample")
) or not (hasattr(t, "transform") or hasattr(t, "fit_resample")):
raise TypeError(
"All intermediate steps of the chain should "
"be estimators that implement fit and transform or "
"fit_resample (but not both) or be a string 'passthrough' "
"'%s' (type %s) doesn't)" % (t, type(t))
)
if hasattr(t, "fit_resample") and (
hasattr(t, "fit_transform") or hasattr(t, "transform")
):
raise TypeError(
"All intermediate steps of the chain should "
"be estimators that implement fit and transform or "
"fit_resample."
" '%s' implements both)" % (t)
)
if isinstance(t, pipeline.Pipeline):
raise TypeError(
"All intermediate steps of the chain should not be"
" Pipelines"
)
if (
estimator is not None
and estimator != "passthrough"
and not hasattr(estimator, "fit")
):
raise TypeError(
"Last step of Pipeline should implement fit or be "
"the string 'passthrough'. '%s' (type %s) doesn't"
% (estimator, type(estimator))
)
def _iter(
self, with_final=True, filter_passthrough=True, filter_resample=True
):
"""Generate (idx, (name, trans)) tuples from self.steps.
When `filter_passthrough` is `True`, 'passthrough' and None
transformers are filtered out. When `filter_resample` is `True`,
estimator with a method `fit_resample` are filtered out.
"""
it = super()._iter(with_final, filter_passthrough)
if filter_resample:
return filter(lambda x: not hasattr(x[-1], "fit_resample"), it)
else:
return it
# Estimator interface
def _fit(self, X, y=None, **fit_params):
self.steps = list(self.steps)
self._validate_steps()
# Setup the memory
memory = check_memory(self.memory)
fit_transform_one_cached = memory.cache(pipeline._fit_transform_one)
fit_resample_one_cached = memory.cache(_fit_resample_one)
fit_params_steps = {
name: {} for name, step in self.steps if step is not None
}
for pname, pval in fit_params.items():
if '__' not in pname:
raise ValueError(
"Pipeline.fit does not accept the {} parameter. "
"You can pass parameters to specific steps of your "
"pipeline using the stepname__parameter format, e.g. "
"`Pipeline.fit(X, y, logisticregression__sample_weight"
"=sample_weight)`.".format(pname))
step, param = pname.split("__", 1)
fit_params_steps[step][param] = pval
for (step_idx,
name,
transformer) in self._iter(with_final=False,
filter_passthrough=False,
filter_resample=False):
if (transformer is None or transformer == 'passthrough'):
with _print_elapsed_time('Pipeline',
self._log_message(step_idx)):
continue
try:
# joblib >= 0.12
mem = memory.location
except AttributeError:
mem = memory.cachedir
finally:
cloned_transformer = clone(transformer) if mem else transformer
# Fit or load from cache the current transfomer
if hasattr(cloned_transformer, "transform") or hasattr(
cloned_transformer, "fit_transform"
):
X, fitted_transformer = fit_transform_one_cached(
cloned_transformer, X, y, None,
message_clsname='Pipeline',
message=self._log_message(step_idx),
**fit_params_steps[name]
)
elif hasattr(cloned_transformer, "fit_resample"):
X, y, fitted_transformer = fit_resample_one_cached(
cloned_transformer, X, y,
message_clsname='Pipeline',
message=self._log_message(step_idx),
**fit_params_steps[name]
)
# Replace the transformer of the step with the fitted
# transformer. This is necessary when loading the transformer
# from the cache.
self.steps[step_idx] = (name, fitted_transformer)
if self._final_estimator == "passthrough":
return X, y, {}
return X, y, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit the model.
Fit all the transforms/samplers one after the other and
transform/sample the data, then fit the transformed/sampled
data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**fit_params : dict of str -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
self : Pipeline
This estimator.
"""
Xt, yt, fit_params = self._fit(X, y, **fit_params)
with _print_elapsed_time('Pipeline',
self._log_message(len(self.steps) - 1)):
if self._final_estimator != "passthrough":
self._final_estimator.fit(Xt, yt, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit the model and transform with the final estimator.
Fits all the transformers/samplers one after the other and
transform/sample the data, then uses fit_transform on
transformed data with the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
Xt : array-like of shape (n_samples, n_transformed_features)
Transformed samples.
"""
last_step = self._final_estimator
Xt, yt, fit_params = self._fit(X, y, **fit_params)
with _print_elapsed_time('Pipeline',
self._log_message(len(self.steps) - 1)):
if last_step == "passthrough":
return Xt
elif hasattr(last_step, "fit_transform"):
return last_step.fit_transform(Xt, yt, **fit_params)
else:
return last_step.fit(Xt, yt, **fit_params).transform(Xt)
def fit_resample(self, X, y=None, **fit_params):
"""Fit the model and sample with the final estimator.
Fits all the transformers/samplers one after the other and
transform/sample the data, then uses fit_resample on transformed
data with the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
Xt : array-like of shape (n_samples, n_transformed_features)
Transformed samples.
yt : array-like of shape (n_samples, n_transformed_features)
Transformed target.
"""
last_step = self._final_estimator
Xt, yt, fit_params = self._fit(X, y, **fit_params)
with _print_elapsed_time('Pipeline',
self._log_message(len(self.steps) - 1)):
if last_step == "passthrough":
return Xt
elif hasattr(last_step, "fit_resample"):
return last_step.fit_resample(Xt, yt, **fit_params)
@if_delegate_has_method(delegate="_final_estimator")
def fit_predict(self, X, y=None, **fit_params):
"""Apply `fit_predict` of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
y_pred : ndarray of shape (n_samples,)
The predicted target.
"""
Xt, yt, fit_params = self._fit(X, y, **fit_params)
with _print_elapsed_time('Pipeline',
self._log_message(len(self.steps) - 1)):
y_pred = self.steps[-1][-1].fit_predict(Xt, yt, **fit_params)
return y_pred
def _fit_resample_one(sampler,
X,
y,
message_clsname='',
message=None,
**fit_params):
with _print_elapsed_time(message_clsname, message):
X_res, y_res = sampler.fit_resample(X, y, **fit_params)
return X_res, y_res, sampler
def make_pipeline(*steps, **kwargs):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, their names will be set
to the lowercase of their types automatically.
Parameters
----------
*steps : list of estimators
A list of estimators.
memory : None, str or object with the joblib.Memory interface, default=None
Used to cache the fitted transformers of the pipeline. By default,
no caching is performed. If a string is given, it is the path to
the caching directory. Enabling caching triggers a clone of
the transformers before fitting. Therefore, the transformer
instance given to the pipeline cannot be inspected
directly. Use the attribute ``named_steps`` or ``steps`` to
inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
verbose : bool, default=False
If True, the time elapsed while fitting each step will be printed as it
is completed.
Returns
-------
p : Pipeline
See Also
--------
imblearn.pipeline.Pipeline : Class for creating a pipeline of
transforms with a final estimator.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB(priors=None))
... # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler', StandardScaler()),
('gaussiannb', GaussianNB())])
"""
memory = kwargs.pop("memory", None)
verbose = kwargs.pop('verbose', False)
if kwargs:
raise TypeError(
'Unknown keyword arguments: "{}"'.format(list(kwargs.keys())[0])
)
return Pipeline(
pipeline._name_estimators(steps), memory=memory, verbose=verbose
)
| false
| true
|
f7180d77b8c3f9bb8f6b2d45b2d1f43aa01a1d41
| 673
|
py
|
Python
|
net/wyun/tests/basic/test_basicfunction.py
|
michaelyin/im2markup-prep
|
0613e4f77f1b50084a85e5c0b1511c9ae007211d
|
[
"Apache-2.0"
] | 3
|
2018-04-19T13:51:33.000Z
|
2020-10-04T12:35:50.000Z
|
net/wyun/tests/basic/test_basicfunction.py
|
michaelyin/im2markup-prep
|
0613e4f77f1b50084a85e5c0b1511c9ae007211d
|
[
"Apache-2.0"
] | null | null | null |
net/wyun/tests/basic/test_basicfunction.py
|
michaelyin/im2markup-prep
|
0613e4f77f1b50084a85e5c0b1511c9ae007211d
|
[
"Apache-2.0"
] | 1
|
2018-11-22T08:44:11.000Z
|
2018-11-22T08:44:11.000Z
|
import unittest
from net.wyun.mer.basicfunction import BasicFunction
class TestBasicFunction(unittest.TestCase):
def setUp(self):
self.func = BasicFunction()
def test_1(self):
self.assertTrue(True)
def test_2(self):
self.assertTrue(True)
def test_3(self):
self.assertEqual(self.func.state, 0)
def test_4(self):
self.func.increment_state()
self.assertEqual(self.func.state, 1)
def test_5(self):
self.func.increment_state()
self.func.increment_state()
self.func.clear_state()
self.assertEqual(self.func.state, 0)
if __name__ == '__main__':
unittest.main()
| 20.393939
| 52
| 0.649331
|
import unittest
from net.wyun.mer.basicfunction import BasicFunction
class TestBasicFunction(unittest.TestCase):
def setUp(self):
self.func = BasicFunction()
def test_1(self):
self.assertTrue(True)
def test_2(self):
self.assertTrue(True)
def test_3(self):
self.assertEqual(self.func.state, 0)
def test_4(self):
self.func.increment_state()
self.assertEqual(self.func.state, 1)
def test_5(self):
self.func.increment_state()
self.func.increment_state()
self.func.clear_state()
self.assertEqual(self.func.state, 0)
if __name__ == '__main__':
unittest.main()
| true
| true
|
f7180f9594e73d237384205187769766c8cda637
| 13,033
|
py
|
Python
|
google-cloud-sdk/lib/surface/compute/ssh.py
|
bopopescu/Social-Lite
|
ee05d6a7431c36ff582c8d6b58bb20a8c5f550bf
|
[
"Apache-2.0"
] | null | null | null |
google-cloud-sdk/lib/surface/compute/ssh.py
|
bopopescu/Social-Lite
|
ee05d6a7431c36ff582c8d6b58bb20a8c5f550bf
|
[
"Apache-2.0"
] | 4
|
2020-07-21T12:51:46.000Z
|
2022-01-22T10:29:25.000Z
|
google-cloud-sdk/lib/surface/compute/ssh.py
|
bopopescu/Social-Lite
|
ee05d6a7431c36ff582c8d6b58bb20a8c5f550bf
|
[
"Apache-2.0"
] | 1
|
2020-07-25T18:17:57.000Z
|
2020-07-25T18:17:57.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the command for SSHing into an instance."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import argparse
import sys
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import completers
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute import iap_tunnel
from googlecloudsdk.command_lib.compute import scope as compute_scope
from googlecloudsdk.command_lib.compute import ssh_utils
from googlecloudsdk.command_lib.compute.instances import flags as instance_flags
from googlecloudsdk.command_lib.util.ssh import containers
from googlecloudsdk.command_lib.util.ssh import ssh
from googlecloudsdk.core import log
from googlecloudsdk.core.util import retry
def AddCommandArg(parser):
parser.add_argument(
'--command',
help="""\
A command to run on the virtual machine.
Runs the command on the target instance and then exits.
""")
def AddSSHArgs(parser):
"""Additional flags and positional args to be passed to *ssh(1)*."""
parser.add_argument(
'--ssh-flag',
action='append',
help="""\
Additional flags to be passed to *ssh(1)*. It is recommended that flags
be passed using an assignment operator and quotes. This flag will
replace occurences of ``%USER%'' and ``%INSTANCE%'' with their
dereferenced values. Example:
$ {command} example-instance --zone=us-central1-a --ssh-flag="-vvv" --ssh-flag="-L 80:%INSTANCE%:80"
is equivalent to passing the flags ``--vvv'' and ``-L
80:162.222.181.197:80'' to *ssh(1)* if the external IP address of
'example-instance' is 162.222.181.197.
If connecting to the instance's external IP, then %INSTANCE% is replaced
with that, otherwise it is replaced with the internal IP.
""")
parser.add_argument(
'user_host',
completer=completers.InstancesCompleter,
metavar='[USER@]INSTANCE',
help="""\
Specifies the instance to SSH into.
``USER'' specifies the username with which to SSH. If omitted,
the user login name is used. If using OS Login, USER will be replaced
by the OS Login user.
``INSTANCE'' specifies the name of the virtual machine instance to SSH
into.
""")
parser.add_argument(
'ssh_args',
nargs=argparse.REMAINDER,
help="""\
Flags and positionals passed to the underlying ssh implementation.
""",
example="""\
$ {command} example-instance --zone=us-central1-a -- -vvv -L 80:%INSTANCE%:80
""")
def AddContainerArg(parser):
parser.add_argument(
'--container',
help="""\
The name or ID of a container inside of the virtual machine instance
to connect to. This only applies to virtual machines that are using
a Google Container-Optimized virtual machine image. For more
information, see [](https://cloud.google.com/compute/docs/containers).
""")
def AddInternalIPArg(group):
group.add_argument(
'--internal-ip',
default=False,
action='store_true',
help="""\
Connect to instances using their internal IP addresses rather than their
external IP addresses. Use this to connect from one instance to another
on the same VPC network, over a VPN connection, or between two peered
VPC networks.
For this connection to work, you must configure your networks and
firewall to allow SSH connections to the internal IP address of
the instance to which you want to connect.
To learn how to use this flag, see
[](https://cloud.google.com/compute/docs/instances/connecting-advanced#sshbetweeninstances).
""")
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Ssh(base.Command):
"""SSH into a virtual machine instance."""
category = base.TOOLS_CATEGORY
@staticmethod
def Args(parser):
"""Set up arguments for this command.
Args:
parser: An argparse.ArgumentParser.
"""
ssh_utils.BaseSSHCLIHelper.Args(parser)
AddCommandArg(parser)
AddSSHArgs(parser)
AddContainerArg(parser)
flags.AddZoneFlag(
parser, resource_type='instance', operation_type='connect to')
routing_group = parser.add_mutually_exclusive_group()
AddInternalIPArg(routing_group)
iap_tunnel.AddSshTunnelArgs(parser, routing_group)
def Run(self, args):
"""See ssh_utils.BaseSSHCLICommand.Run."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
ssh_helper = ssh_utils.BaseSSHCLIHelper()
ssh_helper.Run(args)
user, instance_name = ssh_utils.GetUserAndInstance(args.user_host)
instance_ref = instance_flags.SSH_INSTANCE_RESOLVER.ResolveResources(
[instance_name], compute_scope.ScopeEnum.ZONE, args.zone,
holder.resources,
scope_lister=instance_flags.GetInstanceZoneScopeLister(client))[0]
instance = ssh_helper.GetInstance(client, instance_ref)
project = ssh_helper.GetProject(client, instance_ref.project)
host_keys = ssh_helper.GetHostKeysFromGuestAttributes(client, instance_ref,
instance, project)
if not host_keys and host_keys is not None:
# Only display this message if there was an attempt to retrieve
# host keys but it was unsuccessful. If Guest Attributes is disabled,
# there is no attempt to retrieve host keys.
log.status.Print('Unable to retrieve host keys from instance metadata. '
'Continuing.')
expiration, expiration_micros = ssh_utils.GetSSHKeyExpirationFromArgs(args)
if args.plain:
use_oslogin = False
else:
public_key = ssh_helper.keys.GetPublicKey().ToEntry(include_comment=True)
# If there is an '@' symbol in the user_host arg, the user is requesting
# to connect as a specific user. This may get overridden by OS Login.
username_requested = '@' in args.user_host
user, use_oslogin = ssh.CheckForOsloginAndGetUser(
instance, project, user, public_key, expiration_micros,
self.ReleaseTrack(), username_requested=username_requested)
iap_tunnel_args = iap_tunnel.SshTunnelArgs.FromArgs(
args, self.ReleaseTrack(), instance_ref,
ssh_utils.GetExternalInterface(instance, no_raise=True))
internal_address = ssh_utils.GetInternalIPAddress(instance)
if iap_tunnel_args:
# IAP Tunnel only uses instance_address for the purpose of --ssh-flag
# substitution. In this case, dest_addr doesn't do much, it just matches
# against entries in the user's ssh_config file. It's best to use
# something unique to avoid false positive matches, thus we use
# HostKeyAlias.
instance_address = internal_address
dest_addr = ssh_utils.HostKeyAlias(instance)
elif args.internal_ip:
instance_address = internal_address
dest_addr = instance_address
else:
instance_address = ssh_utils.GetExternalIPAddress(instance)
dest_addr = instance_address
remote = ssh.Remote(dest_addr, user)
identity_file = None
options = None
if not args.plain:
identity_file = ssh_helper.keys.key_file
options = ssh_helper.GetConfig(ssh_utils.HostKeyAlias(instance),
args.strict_host_key_checking,
host_keys_to_add=host_keys)
extra_flags = ssh.ParseAndSubstituteSSHFlags(args, remote, instance_address,
internal_address)
remainder = []
if args.ssh_args:
remainder.extend(args.ssh_args)
# Transform args.command into arg list or None if no command
command_list = args.command.split(' ') if args.command else None
tty = containers.GetTty(args.container, command_list)
remote_command = containers.GetRemoteCommand(args.container, command_list)
# Do not include default port since that will prevent users from
# specifying a custom port (b/121998342).
ssh_cmd_args = {'remote': remote,
'identity_file': identity_file,
'options': options,
'extra_flags': extra_flags,
'remote_command': remote_command,
'tty': tty,
'iap_tunnel_args': iap_tunnel_args,
'remainder': remainder}
cmd = ssh.SSHCommand(**ssh_cmd_args)
if args.dry_run:
log.out.Print(' '.join(cmd.Build(ssh_helper.env)))
return
if args.plain or use_oslogin:
keys_newly_added = False
else:
keys_newly_added = ssh_helper.EnsureSSHKeyExists(
client, remote.user, instance, project, expiration=expiration)
if keys_newly_added:
poller = ssh_utils.CreateSSHPoller(remote, identity_file, options,
iap_tunnel_args,
extra_flags=extra_flags)
log.status.Print('Waiting for SSH key to propagate.')
# TODO(b/35355795): Don't force_connect
try:
poller.Poll(ssh_helper.env, force_connect=True)
except retry.WaitException:
raise ssh_utils.NetworkError()
if args.internal_ip:
ssh_helper.PreliminarilyVerifyInstance(instance.id, remote, identity_file,
options)
# Errors from SSH itself result in an ssh.CommandError being raised
return_code = cmd.Run(ssh_helper.env, force_connect=True)
if return_code:
# This is the return code of the remote command. Problems with SSH itself
# will result in ssh.CommandError being raised above.
sys.exit(return_code)
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class SshBeta(Ssh):
"""SSH into a virtual machine instance (Beta)."""
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class SshAlpha(SshBeta):
"""SSH into a virtual machine instance (Alpha)."""
def DetailedHelp():
"""Construct help text based on the command release track."""
detailed_help = {
'brief': 'SSH into a virtual machine instance',
'DESCRIPTION': """\
*{command}* is a thin wrapper around the *ssh(1)* command that
takes care of authentication and the translation of the
instance name into an IP address.
Note, this command does not work when connecting to Windows VMs. To
connect to a Windows instance using a command-line method, refer to this
guide: https://cloud.google.com/compute/docs/instances/connecting-to-instance#windows_cli
The default network comes preconfigured to allow ssh access to
all VMs. If the default network was edited, or if not using the
default network, you may need to explicitly enable ssh access by adding
a firewall-rule:
$ gcloud compute firewall-rules create --network=NETWORK \
default-allow-ssh --allow=tcp:22
{command} ensures that the user's public SSH key is present
in the project's metadata. If the user does not have a public
SSH key, one is generated using *ssh-keygen(1)* (if the `--quiet`
flag is given, the generated key will have an empty passphrase).
""",
'EXAMPLES': """\
To SSH into 'example-instance' in zone ``us-central1-a'', run:
$ {command} example-instance --zone=us-central1-a
You can also run a command on the virtual machine. For
example, to get a snapshot of the guest's process tree, run:
$ {command} example-instance --zone=us-central1-a --command="ps -ejH"
If you are using the Google Container-Optimized virtual machine image,
you can SSH into one of your containers with:
$ {command} example-instance --zone=us-central1-a --container=CONTAINER
You can limit the allowed time to ssh. For example, to allow a key to be
used through 2019:
$ {command} example-instance --zone=us-central1-a --ssh-key-expiration="2020-01-01T00:00:00:00Z"
Or alternatively, allow access for the next two minutes:
$ {command} example-instance --zone=us-central1-a --ssh-key-expire-after=2m
""",
}
return detailed_help
Ssh.detailed_help = DetailedHelp()
| 38.559172
| 108
| 0.682115
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import argparse
import sys
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import completers
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute import iap_tunnel
from googlecloudsdk.command_lib.compute import scope as compute_scope
from googlecloudsdk.command_lib.compute import ssh_utils
from googlecloudsdk.command_lib.compute.instances import flags as instance_flags
from googlecloudsdk.command_lib.util.ssh import containers
from googlecloudsdk.command_lib.util.ssh import ssh
from googlecloudsdk.core import log
from googlecloudsdk.core.util import retry
def AddCommandArg(parser):
parser.add_argument(
'--command',
help="""\
A command to run on the virtual machine.
Runs the command on the target instance and then exits.
""")
def AddSSHArgs(parser):
parser.add_argument(
'--ssh-flag',
action='append',
help="""\
Additional flags to be passed to *ssh(1)*. It is recommended that flags
be passed using an assignment operator and quotes. This flag will
replace occurences of ``%USER%'' and ``%INSTANCE%'' with their
dereferenced values. Example:
$ {command} example-instance --zone=us-central1-a --ssh-flag="-vvv" --ssh-flag="-L 80:%INSTANCE%:80"
is equivalent to passing the flags ``--vvv'' and ``-L
80:162.222.181.197:80'' to *ssh(1)* if the external IP address of
'example-instance' is 162.222.181.197.
If connecting to the instance's external IP, then %INSTANCE% is replaced
with that, otherwise it is replaced with the internal IP.
""")
parser.add_argument(
'user_host',
completer=completers.InstancesCompleter,
metavar='[USER@]INSTANCE',
help="""\
Specifies the instance to SSH into.
``USER'' specifies the username with which to SSH. If omitted,
the user login name is used. If using OS Login, USER will be replaced
by the OS Login user.
``INSTANCE'' specifies the name of the virtual machine instance to SSH
into.
""")
parser.add_argument(
'ssh_args',
nargs=argparse.REMAINDER,
help="""\
Flags and positionals passed to the underlying ssh implementation.
""",
example="""\
$ {command} example-instance --zone=us-central1-a -- -vvv -L 80:%INSTANCE%:80
""")
def AddContainerArg(parser):
parser.add_argument(
'--container',
help="""\
The name or ID of a container inside of the virtual machine instance
to connect to. This only applies to virtual machines that are using
a Google Container-Optimized virtual machine image. For more
information, see [](https://cloud.google.com/compute/docs/containers).
""")
def AddInternalIPArg(group):
group.add_argument(
'--internal-ip',
default=False,
action='store_true',
help="""\
Connect to instances using their internal IP addresses rather than their
external IP addresses. Use this to connect from one instance to another
on the same VPC network, over a VPN connection, or between two peered
VPC networks.
For this connection to work, you must configure your networks and
firewall to allow SSH connections to the internal IP address of
the instance to which you want to connect.
To learn how to use this flag, see
[](https://cloud.google.com/compute/docs/instances/connecting-advanced#sshbetweeninstances).
""")
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Ssh(base.Command):
category = base.TOOLS_CATEGORY
@staticmethod
def Args(parser):
ssh_utils.BaseSSHCLIHelper.Args(parser)
AddCommandArg(parser)
AddSSHArgs(parser)
AddContainerArg(parser)
flags.AddZoneFlag(
parser, resource_type='instance', operation_type='connect to')
routing_group = parser.add_mutually_exclusive_group()
AddInternalIPArg(routing_group)
iap_tunnel.AddSshTunnelArgs(parser, routing_group)
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
ssh_helper = ssh_utils.BaseSSHCLIHelper()
ssh_helper.Run(args)
user, instance_name = ssh_utils.GetUserAndInstance(args.user_host)
instance_ref = instance_flags.SSH_INSTANCE_RESOLVER.ResolveResources(
[instance_name], compute_scope.ScopeEnum.ZONE, args.zone,
holder.resources,
scope_lister=instance_flags.GetInstanceZoneScopeLister(client))[0]
instance = ssh_helper.GetInstance(client, instance_ref)
project = ssh_helper.GetProject(client, instance_ref.project)
host_keys = ssh_helper.GetHostKeysFromGuestAttributes(client, instance_ref,
instance, project)
if not host_keys and host_keys is not None:
# Only display this message if there was an attempt to retrieve
# host keys but it was unsuccessful. If Guest Attributes is disabled,
# there is no attempt to retrieve host keys.
log.status.Print('Unable to retrieve host keys from instance metadata. '
'Continuing.')
expiration, expiration_micros = ssh_utils.GetSSHKeyExpirationFromArgs(args)
if args.plain:
use_oslogin = False
else:
public_key = ssh_helper.keys.GetPublicKey().ToEntry(include_comment=True)
# If there is an '@' symbol in the user_host arg, the user is requesting
# to connect as a specific user. This may get overridden by OS Login.
username_requested = '@' in args.user_host
user, use_oslogin = ssh.CheckForOsloginAndGetUser(
instance, project, user, public_key, expiration_micros,
self.ReleaseTrack(), username_requested=username_requested)
iap_tunnel_args = iap_tunnel.SshTunnelArgs.FromArgs(
args, self.ReleaseTrack(), instance_ref,
ssh_utils.GetExternalInterface(instance, no_raise=True))
internal_address = ssh_utils.GetInternalIPAddress(instance)
if iap_tunnel_args:
# IAP Tunnel only uses instance_address for the purpose of --ssh-flag
# substitution. In this case, dest_addr doesn't do much, it just matches
instance_address = internal_address
dest_addr = ssh_utils.HostKeyAlias(instance)
elif args.internal_ip:
instance_address = internal_address
dest_addr = instance_address
else:
instance_address = ssh_utils.GetExternalIPAddress(instance)
dest_addr = instance_address
remote = ssh.Remote(dest_addr, user)
identity_file = None
options = None
if not args.plain:
identity_file = ssh_helper.keys.key_file
options = ssh_helper.GetConfig(ssh_utils.HostKeyAlias(instance),
args.strict_host_key_checking,
host_keys_to_add=host_keys)
extra_flags = ssh.ParseAndSubstituteSSHFlags(args, remote, instance_address,
internal_address)
remainder = []
if args.ssh_args:
remainder.extend(args.ssh_args)
command_list = args.command.split(' ') if args.command else None
tty = containers.GetTty(args.container, command_list)
remote_command = containers.GetRemoteCommand(args.container, command_list)
ssh_cmd_args = {'remote': remote,
'identity_file': identity_file,
'options': options,
'extra_flags': extra_flags,
'remote_command': remote_command,
'tty': tty,
'iap_tunnel_args': iap_tunnel_args,
'remainder': remainder}
cmd = ssh.SSHCommand(**ssh_cmd_args)
if args.dry_run:
log.out.Print(' '.join(cmd.Build(ssh_helper.env)))
return
if args.plain or use_oslogin:
keys_newly_added = False
else:
keys_newly_added = ssh_helper.EnsureSSHKeyExists(
client, remote.user, instance, project, expiration=expiration)
if keys_newly_added:
poller = ssh_utils.CreateSSHPoller(remote, identity_file, options,
iap_tunnel_args,
extra_flags=extra_flags)
log.status.Print('Waiting for SSH key to propagate.')
try:
poller.Poll(ssh_helper.env, force_connect=True)
except retry.WaitException:
raise ssh_utils.NetworkError()
if args.internal_ip:
ssh_helper.PreliminarilyVerifyInstance(instance.id, remote, identity_file,
options)
# Errors from SSH itself result in an ssh.CommandError being raised
return_code = cmd.Run(ssh_helper.env, force_connect=True)
if return_code:
# This is the return code of the remote command. Problems with SSH itself
# will result in ssh.CommandError being raised above.
sys.exit(return_code)
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class SshBeta(Ssh):
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class SshAlpha(SshBeta):
def DetailedHelp():
detailed_help = {
'brief': 'SSH into a virtual machine instance',
'DESCRIPTION': """\
*{command}* is a thin wrapper around the *ssh(1)* command that
takes care of authentication and the translation of the
instance name into an IP address.
Note, this command does not work when connecting to Windows VMs. To
connect to a Windows instance using a command-line method, refer to this
guide: https://cloud.google.com/compute/docs/instances/connecting-to-instance#windows_cli
The default network comes preconfigured to allow ssh access to
all VMs. If the default network was edited, or if not using the
default network, you may need to explicitly enable ssh access by adding
a firewall-rule:
$ gcloud compute firewall-rules create --network=NETWORK \
default-allow-ssh --allow=tcp:22
{command} ensures that the user's public SSH key is present
in the project's metadata. If the user does not have a public
SSH key, one is generated using *ssh-keygen(1)* (if the `--quiet`
flag is given, the generated key will have an empty passphrase).
""",
'EXAMPLES': """\
To SSH into 'example-instance' in zone ``us-central1-a'', run:
$ {command} example-instance --zone=us-central1-a
You can also run a command on the virtual machine. For
example, to get a snapshot of the guest's process tree, run:
$ {command} example-instance --zone=us-central1-a --command="ps -ejH"
If you are using the Google Container-Optimized virtual machine image,
you can SSH into one of your containers with:
$ {command} example-instance --zone=us-central1-a --container=CONTAINER
You can limit the allowed time to ssh. For example, to allow a key to be
used through 2019:
$ {command} example-instance --zone=us-central1-a --ssh-key-expiration="2020-01-01T00:00:00:00Z"
Or alternatively, allow access for the next two minutes:
$ {command} example-instance --zone=us-central1-a --ssh-key-expire-after=2m
""",
}
return detailed_help
Ssh.detailed_help = DetailedHelp()
| true
| true
|
f7180fdca7de8d265e0d9027890060eff6ecc433
| 3,563
|
py
|
Python
|
playground/algorithms/ddpg.py
|
brandontrabucco/playground
|
069be961aaecb45d75f12f4a71cfa65d7152ea8a
|
[
"MIT"
] | 3
|
2019-12-06T19:22:22.000Z
|
2020-01-20T01:57:26.000Z
|
playground/algorithms/ddpg.py
|
brandontrabucco/playground
|
069be961aaecb45d75f12f4a71cfa65d7152ea8a
|
[
"MIT"
] | null | null | null |
playground/algorithms/ddpg.py
|
brandontrabucco/playground
|
069be961aaecb45d75f12f4a71cfa65d7152ea8a
|
[
"MIT"
] | null | null | null |
"""Author: Brandon Trabucco, Copyright 2019, MIT License"""
from playground.algorithms.algorithm import Algorithm
import tensorflow as tf
class DDPG(Algorithm):
def __init__(
self,
policy,
target_policy,
qf,
target_qf,
replay_buffer,
reward_scale=1.0,
discount=0.99,
observation_key="observation",
batch_size=32,
update_every=1,
update_after=0,
logger=None,
logging_prefix="ddpg/"
):
# train a policy using the deep deterministic policy gradient
Algorithm.__init__(
self,
replay_buffer,
batch_size=batch_size,
update_every=update_every,
update_after=update_after,
logger=logger,
logging_prefix=logging_prefix)
# each neural network is probabilistic
self.policy = policy
self.target_policy = target_policy
self.qf = qf
self.target_qf = target_qf
# select into the observation dictionary
self.observation_key = observation_key
# control some parameters that are important for ddpg
self.reward_scale = reward_scale
self.discount = discount
def update_algorithm(
self,
observations,
actions,
rewards,
next_observations,
terminals
):
# select from the observation dictionary
observations = observations[self.observation_key]
next_observations = next_observations[self.observation_key]
# build a tape to collect gradients from the policy and critics
with tf.GradientTape(persistent=True) as tape:
mean_actions, log_pi = self.policy.expected_value(observations)
next_mean_actions, next_log_pi = self.target_policy.expected_value(
next_observations)
# build the q function target value
inputs = tf.concat([next_observations, next_mean_actions], -1)
target_qf_value = self.target_qf(inputs)[..., 0]
self.record("target_qf_value", tf.reduce_mean(target_qf_value).numpy())
qf_targets = tf.stop_gradient(
self.reward_scale * rewards + terminals * self.discount * (
target_qf_value))
self.record("qf_targets", tf.reduce_mean(qf_targets).numpy())
# build the q function loss
inputs = tf.concat([observations, actions], -1)
qf_value = self.qf(inputs)[..., 0]
self.record("qf_value", tf.reduce_mean(qf_value).numpy())
qf_loss = tf.reduce_mean(tf.keras.losses.logcosh(qf_targets, qf_value))
self.record("qf_loss", qf_loss.numpy())
# build the policy loss
inputs = tf.concat([observations, mean_actions], -1)
policy_qf_value = self.qf(inputs)[..., 0]
self.record("policy_qf_value", tf.reduce_mean(policy_qf_value).numpy())
policy_loss = -tf.reduce_mean(policy_qf_value)
self.record("policy_loss", policy_loss.numpy())
# back prop gradients
self.policy.apply_gradients(
self.policy.compute_gradients(policy_loss, tape))
self.qf.apply_gradients(
self.qf.compute_gradients(qf_loss, tape))
# soft update target parameters
self.target_policy.soft_update(self.policy.get_weights())
self.target_qf.soft_update(self.qf.get_weights())
| 35.989899
| 83
| 0.609879
|
from playground.algorithms.algorithm import Algorithm
import tensorflow as tf
class DDPG(Algorithm):
def __init__(
self,
policy,
target_policy,
qf,
target_qf,
replay_buffer,
reward_scale=1.0,
discount=0.99,
observation_key="observation",
batch_size=32,
update_every=1,
update_after=0,
logger=None,
logging_prefix="ddpg/"
):
Algorithm.__init__(
self,
replay_buffer,
batch_size=batch_size,
update_every=update_every,
update_after=update_after,
logger=logger,
logging_prefix=logging_prefix)
self.policy = policy
self.target_policy = target_policy
self.qf = qf
self.target_qf = target_qf
self.observation_key = observation_key
self.reward_scale = reward_scale
self.discount = discount
def update_algorithm(
self,
observations,
actions,
rewards,
next_observations,
terminals
):
observations = observations[self.observation_key]
next_observations = next_observations[self.observation_key]
with tf.GradientTape(persistent=True) as tape:
mean_actions, log_pi = self.policy.expected_value(observations)
next_mean_actions, next_log_pi = self.target_policy.expected_value(
next_observations)
inputs = tf.concat([next_observations, next_mean_actions], -1)
target_qf_value = self.target_qf(inputs)[..., 0]
self.record("target_qf_value", tf.reduce_mean(target_qf_value).numpy())
qf_targets = tf.stop_gradient(
self.reward_scale * rewards + terminals * self.discount * (
target_qf_value))
self.record("qf_targets", tf.reduce_mean(qf_targets).numpy())
inputs = tf.concat([observations, actions], -1)
qf_value = self.qf(inputs)[..., 0]
self.record("qf_value", tf.reduce_mean(qf_value).numpy())
qf_loss = tf.reduce_mean(tf.keras.losses.logcosh(qf_targets, qf_value))
self.record("qf_loss", qf_loss.numpy())
inputs = tf.concat([observations, mean_actions], -1)
policy_qf_value = self.qf(inputs)[..., 0]
self.record("policy_qf_value", tf.reduce_mean(policy_qf_value).numpy())
policy_loss = -tf.reduce_mean(policy_qf_value)
self.record("policy_loss", policy_loss.numpy())
self.policy.apply_gradients(
self.policy.compute_gradients(policy_loss, tape))
self.qf.apply_gradients(
self.qf.compute_gradients(qf_loss, tape))
self.target_policy.soft_update(self.policy.get_weights())
self.target_qf.soft_update(self.qf.get_weights())
| true
| true
|
f71810217649e9ce7c57c78566cb40789c548173
| 3,089
|
py
|
Python
|
google/ads/google_ads/v0/proto/resources/gender_view_pb2.py
|
jwygoda/google-ads-python
|
863892b533240cb45269d9c2cceec47e2c5a8b68
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v0/proto/resources/gender_view_pb2.py
|
jwygoda/google-ads-python
|
863892b533240cb45269d9c2cceec47e2c5a8b68
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v0/proto/resources/gender_view_pb2.py
|
jwygoda/google-ads-python
|
863892b533240cb45269d9c2cceec47e2c5a8b68
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v0/proto/resources/gender_view.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v0/proto/resources/gender_view.proto',
package='google.ads.googleads.v0.resources',
syntax='proto3',
serialized_options=_b('\n%com.google.ads.googleads.v0.resourcesB\017GenderViewProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v0/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V0.Resources\312\002!Google\\Ads\\GoogleAds\\V0\\Resources\352\002%Google::Ads::GoogleAds::V0::Resources'),
serialized_pb=_b('\n9google/ads/googleads_v0/proto/resources/gender_view.proto\x12!google.ads.googleads.v0.resources\"#\n\nGenderView\x12\x15\n\rresource_name\x18\x01 \x01(\tB\xfc\x01\n%com.google.ads.googleads.v0.resourcesB\x0fGenderViewProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v0/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V0.Resources\xca\x02!Google\\Ads\\GoogleAds\\V0\\Resources\xea\x02%Google::Ads::GoogleAds::V0::Resourcesb\x06proto3')
)
_GENDERVIEW = _descriptor.Descriptor(
name='GenderView',
full_name='google.ads.googleads.v0.resources.GenderView',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v0.resources.GenderView.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=96,
serialized_end=131,
)
DESCRIPTOR.message_types_by_name['GenderView'] = _GENDERVIEW
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GenderView = _reflection.GeneratedProtocolMessageType('GenderView', (_message.Message,), dict(
DESCRIPTOR = _GENDERVIEW,
__module__ = 'google.ads.googleads_v0.proto.resources.gender_view_pb2'
,
__doc__ = """A gender view.
Attributes:
resource_name:
The resource name of the gender view. Gender view resource
names have the form: ``customers/{customer_id}/genderViews/{a
d_group_id}_{criterion_id}``
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v0.resources.GenderView)
))
_sym_db.RegisterMessage(GenderView)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 38.135802
| 488
| 0.767886
|
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v0/proto/resources/gender_view.proto',
package='google.ads.googleads.v0.resources',
syntax='proto3',
serialized_options=_b('\n%com.google.ads.googleads.v0.resourcesB\017GenderViewProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v0/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V0.Resources\312\002!Google\\Ads\\GoogleAds\\V0\\Resources\352\002%Google::Ads::GoogleAds::V0::Resources'),
serialized_pb=_b('\n9google/ads/googleads_v0/proto/resources/gender_view.proto\x12!google.ads.googleads.v0.resources\"#\n\nGenderView\x12\x15\n\rresource_name\x18\x01 \x01(\tB\xfc\x01\n%com.google.ads.googleads.v0.resourcesB\x0fGenderViewProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v0/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V0.Resources\xca\x02!Google\\Ads\\GoogleAds\\V0\\Resources\xea\x02%Google::Ads::GoogleAds::V0::Resourcesb\x06proto3')
)
_GENDERVIEW = _descriptor.Descriptor(
name='GenderView',
full_name='google.ads.googleads.v0.resources.GenderView',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v0.resources.GenderView.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=96,
serialized_end=131,
)
DESCRIPTOR.message_types_by_name['GenderView'] = _GENDERVIEW
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GenderView = _reflection.GeneratedProtocolMessageType('GenderView', (_message.Message,), dict(
DESCRIPTOR = _GENDERVIEW,
__module__ = 'google.ads.googleads_v0.proto.resources.gender_view_pb2'
,
__doc__ = """A gender view.
Attributes:
resource_name:
The resource name of the gender view. Gender view resource
names have the form: ``customers/{customer_id}/genderViews/{a
d_group_id}_{criterion_id}``
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v0.resources.GenderView)
))
_sym_db.RegisterMessage(GenderView)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| true
| true
|
f71810c4ecead08a935a56fcd5fb6be0cdf8d125
| 130
|
py
|
Python
|
pythreshold/global_th/entropy/__init__.py
|
pedrogalher/pythreshold
|
135e42fb4be1ff4d4c52ea05daca84be1acaa0fc
|
[
"MIT"
] | null | null | null |
pythreshold/global_th/entropy/__init__.py
|
pedrogalher/pythreshold
|
135e42fb4be1ff4d4c52ea05daca84be1acaa0fc
|
[
"MIT"
] | null | null | null |
pythreshold/global_th/entropy/__init__.py
|
pedrogalher/pythreshold
|
135e42fb4be1ff4d4c52ea05daca84be1acaa0fc
|
[
"MIT"
] | null | null | null |
from .pun import pun_threshold
from .kapur import kapur_threshold, kapur_multithreshold
from .johannsen import johannsen_threshold
| 43.333333
| 56
| 0.876923
|
from .pun import pun_threshold
from .kapur import kapur_threshold, kapur_multithreshold
from .johannsen import johannsen_threshold
| true
| true
|
f71811f3f0271780fcc16a9db631d5dee72d81ba
| 1,078
|
py
|
Python
|
src/python/pants/backend/codegen/jaxb/targets.py
|
stuhood/pants
|
107b8335a03482516f64aefa98aadf9f5278b2ee
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/codegen/jaxb/targets.py
|
stuhood/pants
|
107b8335a03482516f64aefa98aadf9f5278b2ee
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/codegen/jaxb/targets.py
|
stuhood/pants
|
107b8335a03482516f64aefa98aadf9f5278b2ee
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.jvm.rules.targets import COMMON_JVM_FIELDS
from pants.engine.target import Sources, StringField, Target
class JaxbJavaPackage(StringField):
"""Java package (com.company.package) in which to generate the output Java files.
If unspecified, Pants guesses it from the file path leading to the schema (xsd) file. This guess
is accurate only if the .xsd file is in a path like `.../com/company/package/schema.xsd`. Pants
looks for packages that start with 'com', 'org', or 'net'.
"""
alias = "package"
class JaxbLanguage(StringField):
"""The language to use, which currently can only be `java`."""
alias = "language"
valid_choices = ("java",)
default = "java"
value: str
class JaxbLibrary(Target):
"""A Java library generated from JAXB xsd files."""
alias = "jaxb_library"
core_fields = (*COMMON_JVM_FIELDS, Sources, JaxbJavaPackage, JaxbLanguage)
v1_only = True
| 31.705882
| 100
| 0.71243
|
from pants.backend.jvm.rules.targets import COMMON_JVM_FIELDS
from pants.engine.target import Sources, StringField, Target
class JaxbJavaPackage(StringField):
alias = "package"
class JaxbLanguage(StringField):
alias = "language"
valid_choices = ("java",)
default = "java"
value: str
class JaxbLibrary(Target):
alias = "jaxb_library"
core_fields = (*COMMON_JVM_FIELDS, Sources, JaxbJavaPackage, JaxbLanguage)
v1_only = True
| true
| true
|
f718134ad71e50e6db3ca760f1916747c1d91ee2
| 4,100
|
py
|
Python
|
tests/plot_time_space.py
|
folk85/gen_turb
|
4390938c4cefae334e95414f83b9c484991bff67
|
[
"MIT"
] | 1
|
2020-09-10T07:42:29.000Z
|
2020-09-10T07:42:29.000Z
|
tests/plot_time_space.py
|
folk85/gen_turb
|
4390938c4cefae334e95414f83b9c484991bff67
|
[
"MIT"
] | null | null | null |
tests/plot_time_space.py
|
folk85/gen_turb
|
4390938c4cefae334e95414f83b9c484991bff67
|
[
"MIT"
] | 1
|
2019-08-08T20:08:49.000Z
|
2019-08-08T20:08:49.000Z
|
# -*- coding: utf-8 -*-
import os
import numpy as np
import matplotlib as m
import matplotlib.pyplot as plt
from scipy.fftpack import *
from plot_spectr import *
def main_routine():
print(os.getcwd())
nfile = './store.dat'
#Read the file by blocks to reduce required memory
with open(nfile,'r') as f:
nel = sum(1 for _ in f)
f.close()
#repeat for each timesteps
nk = 64*64 *64
ntimes = nel / nk
def get_nel(nfile):
with open(nfile,'r') as f:
nel = sum(1 for _ in f)
f.close()
return nel
def plot_spectr(uin,vin,win):
alpha = 1.339e0
L = 1.0e-1
sigma = 1.0e+1
# x,y,z = np.genfromtxt('tests/spectr.dat',unpack=True)
# x,y,z = np.genfromtxt('../hita/spectrum.dat',unpack=True)
# x1,y1,z1 = np.genfromtxt('../hita/spectrum_32.dat',unpack=True)
uvel,vvel,wvel = np.genfromtxt('./store.dat',unpack=True)
nk = int(round(np.size(uvel)**(1./3.)))
nel = nk
ufft = fftn(uvel.reshape(nk,nk,nk))
vfft = fftn(vvel.reshape(nk,nk,nk))
wfft = fftn(wvel.reshape(nk,nk,nk))
muu = ufft*np.conj(ufft) / nel**6
mvv = vfft*np.conj(vfft) / nel**6
mww = wfft*np.conj(wfft) / nel**6
# calc std
umean = np.array([np.mean(uvel),np.mean(vvel),np.mean(wvel)])
std_i = np.array([np.std(uvel),np.std(vvel),np.std(wvel)])
sigma = np.sqrt(np.sum(std_i[:]**2))
print(std_i[0],np.sqrt(np.mean((uvel[:]-umean[0])**2)), sigma)
dx = 10.
k = np.arange(-nk//2,nk//2)*dx
k = np.roll(k,nk//2)
spectrum = np.zeros(nk)
count = np.zeros(nk)
# ?np.meshgrid(k,k,k)
X,Y,Z = np.meshgrid(k,k,k)
r = np.sqrt(X**2+Y**2+Z**2) #*dx
# print(np.shape(r),r.min(),r.max(),k.max(),r[:,0,0])
for i,ki in enumerate(k[:nk//2]):
t = np.where((r<=ki+dx/2)&(r>ki-dx/2))
spectrum[i] = np.sum(muu[t].real) + np.sum(mvv[t].real) + np.sum(mww[t].real)
count[i] = np.size(t[0])
spectrum[i] *= 2.*np.pi*k[i]**2/dx**3/(count[i]+1.0e-30)
font = {'family': 'Droid Sans',
'weight': 'normal',
'size': 12}
m.rc('axes',linewidth=2)
m.rc('font',**font)
m.rc('lines',markeredgewidth=1.0)
f,ax = plt.subplots()
xf = np.linspace(np.log(k[1]/2),np.log(k[nk//2-1]*2.),100)
xf = np.exp(xf)
ax.loglog(xf,Ek(xf,alpha,L,sigma),c='g',lw=2)
ax.loglog(k[:nk//2],spectrum[:nk//2],'bx-',lw=0.5,ms=8)
# ax.loglog(x,y,'bx')
# ax.loglog(x1,y1,'ro')
ax.set_xlabel(u'$k, 1/м$',size='large')
ax.set_ylabel(u'$E(k), м^3/с^2$',size='large')
plt.grid()
plt.tight_layout()
plt.show()
del(f)
del(ax)
plt.clf()
Rij_x=(ufft*np.conj(ufft)) # compute velo. correlation tensor
Rij_y=(vfft*np.conj(vfft))
Rij_z=(wfft*np.conj(wfft))
R1=ifftn(Rij_x)/np.std((uvel))**2/nel**3;
R2=ifftn(Rij_y)/np.std((vvel))**2/nel**3;
R3=ifftn(Rij_z)/np.std((wvel))**2/nel**3;
NFFT=np.size(ufft,1)
R11 = (R3[0,0,:]+R2[0,:,0]+R1[:,0,0])/3.
# R11 = R11[:np.size(ufft)//2+1]
R1_22 = (R1[0,:,0]+R3[0,:,0])/2.0e0
R2_22 = (R2[:,0,0]+R3[:,0,0])/2.0e0
R3_22 = (R1[0,0,:]+R2[0,0,:])/2.0e0
R22 = (R1_22+R2_22+R3_22)/3.0e0
# R22 = R22(1:size(u_fft)/2+1);
Lx = 2.0*np.pi*1.0e-1
r = np.linspace(0,Lx,NFFT)/(Lx/2);
l11 = np.trapz(np.real(R11[:NFFT//2+1]),dx=r[1]-r[0])
l22 = np.trapz(np.real(R22[:NFFT//2+1]),dx=r[1]-r[0])
print("Integral Length Scale Longitudal: %g"%(l11))
print("Integral Length Scale Tangent: %g"%(l22))
f,ax = plt.subplots(1)
ax.plot(r[:NFFT//2+1],R11[:NFFT//2+1],marker='>',mfc='w',lw=2,label=u'$R_{11}$')
ax.plot(r[:NFFT//2+1],R22[:NFFT//2+1],marker='s',markerfacecolor='w',lw=2,label=u'$R_{22}$')
ax.plot(r[:NFFT//2],np.exp(-r[:NFFT//2]/l11))
ax.plot(r[:NFFT//2],1.e0+(1.0e0-R22[NFFT//2])*(np.exp(-r[:NFFT//2]/(l22-R22[NFFT//2]))-1.0e0))
plt.legend()
plt.tight_layout()
ax.set_xlabel(u'$r$')
ax.set_ylabel(u'$R_{11}, R_{22}$')
plt.grid()
plt.show()
return [k[:nk//2],spectrum[:nk//2],r[:NFFT//2+1],R11[:NFFT//2+1],R22[:NFFT//2+1]]
def Ek(k,alpha=1.339,L=0.01,sigma=10.):
tmp = (alpha * L * k) **2
tmp = sigma*sigma*L * tmp * tmp * 5.5e+1/ (27.0 * np.pi * (1.0 + tmp)**(1.7e+1/6.0e0))
return tmp
if __name__ == '__main__':
main_routine()
| 30.37037
| 96
| 0.580488
|
import os
import numpy as np
import matplotlib as m
import matplotlib.pyplot as plt
from scipy.fftpack import *
from plot_spectr import *
def main_routine():
print(os.getcwd())
nfile = './store.dat'
with open(nfile,'r') as f:
nel = sum(1 for _ in f)
f.close()
nk = 64*64 *64
ntimes = nel / nk
def get_nel(nfile):
with open(nfile,'r') as f:
nel = sum(1 for _ in f)
f.close()
return nel
def plot_spectr(uin,vin,win):
alpha = 1.339e0
L = 1.0e-1
sigma = 1.0e+1
uvel,vvel,wvel = np.genfromtxt('./store.dat',unpack=True)
nk = int(round(np.size(uvel)**(1./3.)))
nel = nk
ufft = fftn(uvel.reshape(nk,nk,nk))
vfft = fftn(vvel.reshape(nk,nk,nk))
wfft = fftn(wvel.reshape(nk,nk,nk))
muu = ufft*np.conj(ufft) / nel**6
mvv = vfft*np.conj(vfft) / nel**6
mww = wfft*np.conj(wfft) / nel**6
umean = np.array([np.mean(uvel),np.mean(vvel),np.mean(wvel)])
std_i = np.array([np.std(uvel),np.std(vvel),np.std(wvel)])
sigma = np.sqrt(np.sum(std_i[:]**2))
print(std_i[0],np.sqrt(np.mean((uvel[:]-umean[0])**2)), sigma)
dx = 10.
k = np.arange(-nk//2,nk//2)*dx
k = np.roll(k,nk//2)
spectrum = np.zeros(nk)
count = np.zeros(nk)
X,Y,Z = np.meshgrid(k,k,k)
r = np.sqrt(X**2+Y**2+Z**2)
for i,ki in enumerate(k[:nk//2]):
t = np.where((r<=ki+dx/2)&(r>ki-dx/2))
spectrum[i] = np.sum(muu[t].real) + np.sum(mvv[t].real) + np.sum(mww[t].real)
count[i] = np.size(t[0])
spectrum[i] *= 2.*np.pi*k[i]**2/dx**3/(count[i]+1.0e-30)
font = {'family': 'Droid Sans',
'weight': 'normal',
'size': 12}
m.rc('axes',linewidth=2)
m.rc('font',**font)
m.rc('lines',markeredgewidth=1.0)
f,ax = plt.subplots()
xf = np.linspace(np.log(k[1]/2),np.log(k[nk//2-1]*2.),100)
xf = np.exp(xf)
ax.loglog(xf,Ek(xf,alpha,L,sigma),c='g',lw=2)
ax.loglog(k[:nk//2],spectrum[:nk//2],'bx-',lw=0.5,ms=8)
ax.set_xlabel(u'$k, 1/м$',size='large')
ax.set_ylabel(u'$E(k), м^3/с^2$',size='large')
plt.grid()
plt.tight_layout()
plt.show()
del(f)
del(ax)
plt.clf()
Rij_x=(ufft*np.conj(ufft))
Rij_y=(vfft*np.conj(vfft))
Rij_z=(wfft*np.conj(wfft))
R1=ifftn(Rij_x)/np.std((uvel))**2/nel**3;
R2=ifftn(Rij_y)/np.std((vvel))**2/nel**3;
R3=ifftn(Rij_z)/np.std((wvel))**2/nel**3;
NFFT=np.size(ufft,1)
R11 = (R3[0,0,:]+R2[0,:,0]+R1[:,0,0])/3.
R1_22 = (R1[0,:,0]+R3[0,:,0])/2.0e0
R2_22 = (R2[:,0,0]+R3[:,0,0])/2.0e0
R3_22 = (R1[0,0,:]+R2[0,0,:])/2.0e0
R22 = (R1_22+R2_22+R3_22)/3.0e0
Lx = 2.0*np.pi*1.0e-1
r = np.linspace(0,Lx,NFFT)/(Lx/2);
l11 = np.trapz(np.real(R11[:NFFT//2+1]),dx=r[1]-r[0])
l22 = np.trapz(np.real(R22[:NFFT//2+1]),dx=r[1]-r[0])
print("Integral Length Scale Longitudal: %g"%(l11))
print("Integral Length Scale Tangent: %g"%(l22))
f,ax = plt.subplots(1)
ax.plot(r[:NFFT//2+1],R11[:NFFT//2+1],marker='>',mfc='w',lw=2,label=u'$R_{11}$')
ax.plot(r[:NFFT//2+1],R22[:NFFT//2+1],marker='s',markerfacecolor='w',lw=2,label=u'$R_{22}$')
ax.plot(r[:NFFT//2],np.exp(-r[:NFFT//2]/l11))
ax.plot(r[:NFFT//2],1.e0+(1.0e0-R22[NFFT//2])*(np.exp(-r[:NFFT//2]/(l22-R22[NFFT//2]))-1.0e0))
plt.legend()
plt.tight_layout()
ax.set_xlabel(u'$r$')
ax.set_ylabel(u'$R_{11}, R_{22}$')
plt.grid()
plt.show()
return [k[:nk//2],spectrum[:nk//2],r[:NFFT//2+1],R11[:NFFT//2+1],R22[:NFFT//2+1]]
def Ek(k,alpha=1.339,L=0.01,sigma=10.):
tmp = (alpha * L * k) **2
tmp = sigma*sigma*L * tmp * tmp * 5.5e+1/ (27.0 * np.pi * (1.0 + tmp)**(1.7e+1/6.0e0))
return tmp
if __name__ == '__main__':
main_routine()
| true
| true
|
f71813e7fa972b662bb12978d9498a527f879572
| 60
|
py
|
Python
|
tacotron2/__init__.py
|
samia-mmx/T2_PT
|
25ed08791f72492440e9a796d37c5e67a51aaf05
|
[
"BSD-3-Clause"
] | null | null | null |
tacotron2/__init__.py
|
samia-mmx/T2_PT
|
25ed08791f72492440e9a796d37c5e67a51aaf05
|
[
"BSD-3-Clause"
] | null | null | null |
tacotron2/__init__.py
|
samia-mmx/T2_PT
|
25ed08791f72492440e9a796d37c5e67a51aaf05
|
[
"BSD-3-Clause"
] | null | null | null |
from .entrypoints import nvidia_tacotron2, nvidia_tts_utils
| 30
| 59
| 0.883333
|
from .entrypoints import nvidia_tacotron2, nvidia_tts_utils
| true
| true
|
f718170478283f6fd995f6b98c28ab10f3a084fa
| 5,620
|
py
|
Python
|
google/ads/google_ads/v4/proto/enums/change_status_resource_type_pb2.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | 1
|
2021-04-09T04:28:47.000Z
|
2021-04-09T04:28:47.000Z
|
google/ads/google_ads/v4/proto/enums/change_status_resource_type_pb2.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v4/proto/enums/change_status_resource_type_pb2.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v4/proto/enums/change_status_resource_type.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v4/proto/enums/change_status_resource_type.proto',
package='google.ads.googleads.v4.enums',
syntax='proto3',
serialized_options=_b('\n!com.google.ads.googleads.v4.enumsB\035ChangeStatusResourceTypeProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v4/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V4.Enums\312\002\035Google\\Ads\\GoogleAds\\V4\\Enums\352\002!Google::Ads::GoogleAds::V4::Enums'),
serialized_pb=_b('\nEgoogle/ads/googleads_v4/proto/enums/change_status_resource_type.proto\x12\x1dgoogle.ads.googleads.v4.enums\x1a\x1cgoogle/api/annotations.proto\"\x90\x02\n\x1c\x43hangeStatusResourceTypeEnum\"\xef\x01\n\x18\x43hangeStatusResourceType\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x0c\n\x08\x41\x44_GROUP\x10\x03\x12\x0f\n\x0b\x41\x44_GROUP_AD\x10\x04\x12\x16\n\x12\x41\x44_GROUP_CRITERION\x10\x05\x12\x0c\n\x08\x43\x41MPAIGN\x10\x06\x12\x16\n\x12\x43\x41MPAIGN_CRITERION\x10\x07\x12\x08\n\x04\x46\x45\x45\x44\x10\t\x12\r\n\tFEED_ITEM\x10\n\x12\x11\n\rAD_GROUP_FEED\x10\x0b\x12\x11\n\rCAMPAIGN_FEED\x10\x0c\x12\x19\n\x15\x41\x44_GROUP_BID_MODIFIER\x10\rB\xf2\x01\n!com.google.ads.googleads.v4.enumsB\x1d\x43hangeStatusResourceTypeProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v4/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V4.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V4\\Enums\xea\x02!Google::Ads::GoogleAds::V4::Enumsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_CHANGESTATUSRESOURCETYPEENUM_CHANGESTATUSRESOURCETYPE = _descriptor.EnumDescriptor(
name='ChangeStatusResourceType',
full_name='google.ads.googleads.v4.enums.ChangeStatusResourceTypeEnum.ChangeStatusResourceType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AD_GROUP', index=2, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AD_GROUP_AD', index=3, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AD_GROUP_CRITERION', index=4, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAMPAIGN', index=5, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAMPAIGN_CRITERION', index=6, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FEED', index=7, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FEED_ITEM', index=8, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AD_GROUP_FEED', index=9, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAMPAIGN_FEED', index=10, number=12,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AD_GROUP_BID_MODIFIER', index=11, number=13,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=168,
serialized_end=407,
)
_sym_db.RegisterEnumDescriptor(_CHANGESTATUSRESOURCETYPEENUM_CHANGESTATUSRESOURCETYPE)
_CHANGESTATUSRESOURCETYPEENUM = _descriptor.Descriptor(
name='ChangeStatusResourceTypeEnum',
full_name='google.ads.googleads.v4.enums.ChangeStatusResourceTypeEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_CHANGESTATUSRESOURCETYPEENUM_CHANGESTATUSRESOURCETYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=135,
serialized_end=407,
)
_CHANGESTATUSRESOURCETYPEENUM_CHANGESTATUSRESOURCETYPE.containing_type = _CHANGESTATUSRESOURCETYPEENUM
DESCRIPTOR.message_types_by_name['ChangeStatusResourceTypeEnum'] = _CHANGESTATUSRESOURCETYPEENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ChangeStatusResourceTypeEnum = _reflection.GeneratedProtocolMessageType('ChangeStatusResourceTypeEnum', (_message.Message,), dict(
DESCRIPTOR = _CHANGESTATUSRESOURCETYPEENUM,
__module__ = 'google.ads.googleads_v4.proto.enums.change_status_resource_type_pb2'
,
__doc__ = """Container for enum describing supported resource types for the
ChangeStatus resource.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v4.enums.ChangeStatusResourceTypeEnum)
))
_sym_db.RegisterMessage(ChangeStatusResourceTypeEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 41.62963
| 1,005
| 0.775801
|
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v4/proto/enums/change_status_resource_type.proto',
package='google.ads.googleads.v4.enums',
syntax='proto3',
serialized_options=_b('\n!com.google.ads.googleads.v4.enumsB\035ChangeStatusResourceTypeProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v4/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V4.Enums\312\002\035Google\\Ads\\GoogleAds\\V4\\Enums\352\002!Google::Ads::GoogleAds::V4::Enums'),
serialized_pb=_b('\nEgoogle/ads/googleads_v4/proto/enums/change_status_resource_type.proto\x12\x1dgoogle.ads.googleads.v4.enums\x1a\x1cgoogle/api/annotations.proto\"\x90\x02\n\x1c\x43hangeStatusResourceTypeEnum\"\xef\x01\n\x18\x43hangeStatusResourceType\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x0c\n\x08\x41\x44_GROUP\x10\x03\x12\x0f\n\x0b\x41\x44_GROUP_AD\x10\x04\x12\x16\n\x12\x41\x44_GROUP_CRITERION\x10\x05\x12\x0c\n\x08\x43\x41MPAIGN\x10\x06\x12\x16\n\x12\x43\x41MPAIGN_CRITERION\x10\x07\x12\x08\n\x04\x46\x45\x45\x44\x10\t\x12\r\n\tFEED_ITEM\x10\n\x12\x11\n\rAD_GROUP_FEED\x10\x0b\x12\x11\n\rCAMPAIGN_FEED\x10\x0c\x12\x19\n\x15\x41\x44_GROUP_BID_MODIFIER\x10\rB\xf2\x01\n!com.google.ads.googleads.v4.enumsB\x1d\x43hangeStatusResourceTypeProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v4/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V4.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V4\\Enums\xea\x02!Google::Ads::GoogleAds::V4::Enumsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_CHANGESTATUSRESOURCETYPEENUM_CHANGESTATUSRESOURCETYPE = _descriptor.EnumDescriptor(
name='ChangeStatusResourceType',
full_name='google.ads.googleads.v4.enums.ChangeStatusResourceTypeEnum.ChangeStatusResourceType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AD_GROUP', index=2, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AD_GROUP_AD', index=3, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AD_GROUP_CRITERION', index=4, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAMPAIGN', index=5, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAMPAIGN_CRITERION', index=6, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FEED', index=7, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FEED_ITEM', index=8, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AD_GROUP_FEED', index=9, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAMPAIGN_FEED', index=10, number=12,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AD_GROUP_BID_MODIFIER', index=11, number=13,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=168,
serialized_end=407,
)
_sym_db.RegisterEnumDescriptor(_CHANGESTATUSRESOURCETYPEENUM_CHANGESTATUSRESOURCETYPE)
_CHANGESTATUSRESOURCETYPEENUM = _descriptor.Descriptor(
name='ChangeStatusResourceTypeEnum',
full_name='google.ads.googleads.v4.enums.ChangeStatusResourceTypeEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_CHANGESTATUSRESOURCETYPEENUM_CHANGESTATUSRESOURCETYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=135,
serialized_end=407,
)
_CHANGESTATUSRESOURCETYPEENUM_CHANGESTATUSRESOURCETYPE.containing_type = _CHANGESTATUSRESOURCETYPEENUM
DESCRIPTOR.message_types_by_name['ChangeStatusResourceTypeEnum'] = _CHANGESTATUSRESOURCETYPEENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ChangeStatusResourceTypeEnum = _reflection.GeneratedProtocolMessageType('ChangeStatusResourceTypeEnum', (_message.Message,), dict(
DESCRIPTOR = _CHANGESTATUSRESOURCETYPEENUM,
__module__ = 'google.ads.googleads_v4.proto.enums.change_status_resource_type_pb2'
,
__doc__ = """Container for enum describing supported resource types for the
ChangeStatus resource.
""",
))
_sym_db.RegisterMessage(ChangeStatusResourceTypeEnum)
DESCRIPTOR._options = None
| true
| true
|
f718190eca4cc66afac5d11490eec0b6d1f694cf
| 10,310
|
py
|
Python
|
tests/unit/Stories.py
|
rashmi43/platform-engine
|
dd9a22742bc8dc43a530ea5edef39b3c35db57c1
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/Stories.py
|
rashmi43/platform-engine
|
dd9a22742bc8dc43a530ea5edef39b3c35db57c1
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/Stories.py
|
rashmi43/platform-engine
|
dd9a22742bc8dc43a530ea5edef39b3c35db57c1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import pathlib
import time
from asyncy.Stories import MAX_BYTES_LOGGING, Stories
from asyncy.utils import Dict, Resolver
from pytest import mark
def test_stories_init(app, logger, story):
assert story.entrypoint == app.stories['hello.story']['entrypoint']
assert story.app == app
assert story.name == 'hello.story'
assert story.logger == logger
assert story.execution_id is not None
assert story.results == {}
def test_stories_get_tmp_dir(story):
story.execution_id = 'ex'
assert story.get_tmp_dir() == '/tmp/story.ex'
def test_stories_create_tmp_dir(patch, story):
patch.object(pathlib, 'Path')
patch.object(story, 'get_tmp_dir')
# Yes, called twice to ensure the dir is created just once.
story.create_tmp_dir()
story.create_tmp_dir()
story.get_tmp_dir.assert_called_once()
pathlib.Path.assert_called_with(story.get_tmp_dir())
pathlib.Path().mkdir.assert_called_with(
parents=True, mode=0o700, exist_ok=True)
@mark.parametrize('long', [True, False])
def test_get_str_for_logging(long):
def make_string(length):
out = ''
for i in range(0, length):
out += 'a'
return out
test_str = 'hello world'
if long:
test_str = make_string(1024)
actual_val = Stories.get_str_for_logging(test_str)
if long:
assert actual_val == f'{test_str[:MAX_BYTES_LOGGING]} ... ' \
f'({1024-MAX_BYTES_LOGGING} bytes truncated)'
else:
assert actual_val == 'hello world'
def test_stories_line(magic, story):
story.tree = magic()
line = story.line('1')
assert line == story.tree['1']
def test_stories_line_none(magic, story):
story.tree = magic()
line = story.line(None)
assert line is None
def test_stories_first_line(patch, story):
story.entrypoint = '16'
story.tree = {'23': {'ln': '23'}, '16': {'ln': '16'}}
result = story.first_line()
assert result == '16'
def test_stories_function_line_by_name(patch, story):
patch.object(story, 'line')
ret = story.function_line_by_name('execute')
story.line.assert_called_with(
story.app.stories[story.name]['functions']['execute'])
assert ret == story.line()
def test_stories_resolve(patch, logger, story):
patch.object(Resolver, 'resolve')
story.context = 'context'
result = story.resolve('args')
assert result == 'args'
def test_command_arguments_list(patch, story):
patch.object(Stories, 'resolve', return_value='something')
obj = {'$OBJECT': 'string', 'string': 'string'}
result = story.command_arguments_list([obj])
Stories.resolve.assert_called_with(obj, encode=True)
assert result == ['something']
def test_command_arguments_list_none(patch, story):
"""
Ensures that when an argument resolves to None it is used literally
"""
patch.object(Stories, 'resolve', return_value=None)
obj = {'$OBJECT': 'path', 'paths': ['literal']}
result = story.command_arguments_list([obj])
Stories.resolve.assert_called_with(obj)
assert result == ['literal']
def test_stories_start_line(patch, story):
patch.object(time, 'time')
story.start_line('1')
assert story.results['1'] == {'start': time.time()}
def test_stories_end_line(patch, story):
patch.object(time, 'time')
story.results = {'1': {'start': 'start'}}
story.end_line('1')
assert story.results['1']['output'] is None
assert story.results['1']['end'] == time.time()
assert story.results['1']['start'] == 'start'
def test_stories_end_line_output(patch, story):
patch.object(time, 'time')
story.results = {'1': {'start': 'start'}}
story.end_line('1', output='output')
assert story.results['1']['output'] == 'output'
def test_stories_end_line_output_assign(patch, story):
patch.object(Dict, 'set')
story.results = {'1': {'start': 'start'}}
assign = {'paths': ['x']}
story.end_line('1', output='output', assign=assign)
assert story.results['1']['output'] == 'output'
Dict.set.assert_called_with(story.context, assign['paths'], 'output')
def test_stories_end_line_output_as_list(patch, story):
patch.object(time, 'time')
story.results = {'1': {'start': 'start'}}
story.end_line('1', output=['a', 'b'])
assert story.results['1']['output'] == ['a', 'b']
def test_stories_end_line_output_as_json_no_auto_convert(patch, story):
patch.object(time, 'time')
story.results = {'1': {'start': 'start'}}
story.end_line('1', output='{"key":"value"}')
assert story.results['1']['output'] == '{"key":"value"}'
def test_stories_end_line_output_as_sting(patch, story):
patch.object(time, 'time')
story.results = {'1': {'start': 'start'}}
story.end_line('1', output=' foobar\n\t')
assert story.results['1']['output'] == ' foobar\n\t'
def test_stories_end_line_output_as_bytes(patch, story):
patch.object(time, 'time')
story.results = {'1': {'start': 'start'}}
story.end_line('1', output=b'output')
assert story.results['1']['output'] == b'output'
@mark.parametrize('input,output', [
(None, 'null'),
(False, 'false'),
(True, 'true'),
('string', "'string'"),
("st'ring", "'st\'ring'"),
(1, "'1'"),
({'foo': 'bar'}, "'{\"foo\": \"bar\"}'"),
(['foobar'], "'[\"foobar\"]'"),
])
def test_stories_encode(story, input, output):
assert story.encode(input) == output
def test_stories_argument_by_name_empty(story):
assert story.argument_by_name({}, 'foo') is None
def test_stories_argument_by_name_lookup(patch, story):
line = {
'args': [
{
'$OBJECT': 'argument',
'name': 'foo',
'argument': {'$OBJECT': 'string', 'string': 'bar'}
}
]
}
patch.object(story, 'resolve')
story.argument_by_name(line, 'foo')
story.resolve.assert_called_with(line['args'][0]['argument'], encode=False)
def test_stories_argument_by_name_missing(patch, story):
line = {'args': []}
assert story.argument_by_name(line, 'foo') is None
def test_stories_prepare(story):
story.prepare(None)
def test_stories_prepare_context(story, app):
story.app = app
context = {'app': app.app_context}
story.prepare(context=context)
assert story.environment == app.environment
assert story.context == context
def test_stories_next_block_simple(patch, story):
story.tree = {
'2': {'ln': '2', 'enter': '3', 'next': '3'},
'3': {'ln': '3', 'parent': '2', 'next': '4'},
'4': {'ln': '4'}
}
assert isinstance(story, Stories)
assert story.next_block(story.line('2')) == story.tree['4']
def test_stories_next_block_as_lines(patch, story):
story.tree = {
'2': {'ln': '2', 'next': '3'},
'3': {'ln': '3', 'next': '4'}
}
assert isinstance(story, Stories)
assert story.next_block(story.line('2')) == story.tree['3']
def test_stories_next_block_where_next_block_is_block(patch, story):
story.tree = {
'2': {'ln': '2', 'next': '3'},
'3': {'ln': '3', 'next': '4', 'enter': '4'},
'4': {'ln': '4', 'parent': '3'}
}
assert isinstance(story, Stories)
assert story.next_block(story.line('2')) == story.tree['3']
def test_stories_next_block_only_block(patch, story):
story.tree = {
'2': {'ln': '2'}
}
assert isinstance(story, Stories)
assert story.next_block(story.line('2')) is None
def test_stories_context_for_function_call(story):
assert story.context_for_function_call({}, {}) == {}
def test_stories_context_for_function_call_with_args(story):
line = {
'args': [
{
'$OBJECT': 'argument',
'name': 'foo',
'argument': {
'$OBJECT': 'string',
'string': 'bar'
}
},
{
'$OBJECT': 'argument',
'name': 'foo1',
'argument': {
'$OBJECT': 'string',
'string': 'bar1'
}
}
]
}
function_line = {
'args': [
{
'$OBJECT': 'argument',
'name': 'foo',
'argument': {
'$OBJECT': 'type',
'type': 'string'
}
},
{
'$OBJECT': 'argument',
'name': 'foo1',
'argument': {
'$OBJECT': 'type',
'type': 'string'
}
}
]
}
assert story.context_for_function_call(line, function_line) == {
'foo': 'bar',
'foo1': 'bar1'
}
def test_stories_next_block_nested(patch, story):
story.tree = {
'2': {'ln': '2', 'enter': '3', 'next': '3'},
'3': {'ln': '3', 'parent': '2', 'next': '4'},
'4': {'ln': '4', 'enter': '5', 'parent': '2', 'next': '5'},
'5': {'ln': '5', 'parent': '4', 'next': '6'},
'6': {'ln': '6', 'parent': '4', 'next': '7'},
'7': {'ln': '7'}
}
assert isinstance(story, Stories)
assert story.next_block(story.line('2')) == story.tree['7']
def test_stories_next_block_last_line(patch, story):
story.tree = {
'2': {'ln': '2', 'enter': '3', 'next': '3'},
'3': {'ln': '3', 'parent': '2', 'next': '4'},
'4': {'ln': '4', 'enter': '5', 'parent': '2', 'next': '5'},
'5': {'ln': '5', 'parent': '4', 'next': '6'},
'6': {'ln': '6', 'parent': '4'}
}
assert isinstance(story, Stories)
assert story.next_block(story.line('2')) is None
def test_stories_next_block_nested_inner(patch, story):
story.tree = {
'2': {'ln': '2', 'enter': '3', 'next': '3'},
'3': {'ln': '3', 'parent': '2', 'next': '4'},
'4': {'ln': '4', 'enter': '5', 'parent': '2', 'next': '5'},
'5': {'ln': '5', 'parent': '4', 'next': '6'},
'6': {'ln': '6', 'parent': '4', 'next': '7'},
'7': {'ln': '7', 'parent': '2', 'next': '8'},
'8': {'ln': '8', 'parent': '2'}
}
assert isinstance(story, Stories)
assert story.tree['7'] == story.next_block(story.line('4'))
| 28.169399
| 79
| 0.562949
|
import pathlib
import time
from asyncy.Stories import MAX_BYTES_LOGGING, Stories
from asyncy.utils import Dict, Resolver
from pytest import mark
def test_stories_init(app, logger, story):
assert story.entrypoint == app.stories['hello.story']['entrypoint']
assert story.app == app
assert story.name == 'hello.story'
assert story.logger == logger
assert story.execution_id is not None
assert story.results == {}
def test_stories_get_tmp_dir(story):
story.execution_id = 'ex'
assert story.get_tmp_dir() == '/tmp/story.ex'
def test_stories_create_tmp_dir(patch, story):
patch.object(pathlib, 'Path')
patch.object(story, 'get_tmp_dir')
story.create_tmp_dir()
story.create_tmp_dir()
story.get_tmp_dir.assert_called_once()
pathlib.Path.assert_called_with(story.get_tmp_dir())
pathlib.Path().mkdir.assert_called_with(
parents=True, mode=0o700, exist_ok=True)
@mark.parametrize('long', [True, False])
def test_get_str_for_logging(long):
def make_string(length):
out = ''
for i in range(0, length):
out += 'a'
return out
test_str = 'hello world'
if long:
test_str = make_string(1024)
actual_val = Stories.get_str_for_logging(test_str)
if long:
assert actual_val == f'{test_str[:MAX_BYTES_LOGGING]} ... ' \
f'({1024-MAX_BYTES_LOGGING} bytes truncated)'
else:
assert actual_val == 'hello world'
def test_stories_line(magic, story):
story.tree = magic()
line = story.line('1')
assert line == story.tree['1']
def test_stories_line_none(magic, story):
story.tree = magic()
line = story.line(None)
assert line is None
def test_stories_first_line(patch, story):
story.entrypoint = '16'
story.tree = {'23': {'ln': '23'}, '16': {'ln': '16'}}
result = story.first_line()
assert result == '16'
def test_stories_function_line_by_name(patch, story):
patch.object(story, 'line')
ret = story.function_line_by_name('execute')
story.line.assert_called_with(
story.app.stories[story.name]['functions']['execute'])
assert ret == story.line()
def test_stories_resolve(patch, logger, story):
patch.object(Resolver, 'resolve')
story.context = 'context'
result = story.resolve('args')
assert result == 'args'
def test_command_arguments_list(patch, story):
patch.object(Stories, 'resolve', return_value='something')
obj = {'$OBJECT': 'string', 'string': 'string'}
result = story.command_arguments_list([obj])
Stories.resolve.assert_called_with(obj, encode=True)
assert result == ['something']
def test_command_arguments_list_none(patch, story):
patch.object(Stories, 'resolve', return_value=None)
obj = {'$OBJECT': 'path', 'paths': ['literal']}
result = story.command_arguments_list([obj])
Stories.resolve.assert_called_with(obj)
assert result == ['literal']
def test_stories_start_line(patch, story):
patch.object(time, 'time')
story.start_line('1')
assert story.results['1'] == {'start': time.time()}
def test_stories_end_line(patch, story):
patch.object(time, 'time')
story.results = {'1': {'start': 'start'}}
story.end_line('1')
assert story.results['1']['output'] is None
assert story.results['1']['end'] == time.time()
assert story.results['1']['start'] == 'start'
def test_stories_end_line_output(patch, story):
patch.object(time, 'time')
story.results = {'1': {'start': 'start'}}
story.end_line('1', output='output')
assert story.results['1']['output'] == 'output'
def test_stories_end_line_output_assign(patch, story):
patch.object(Dict, 'set')
story.results = {'1': {'start': 'start'}}
assign = {'paths': ['x']}
story.end_line('1', output='output', assign=assign)
assert story.results['1']['output'] == 'output'
Dict.set.assert_called_with(story.context, assign['paths'], 'output')
def test_stories_end_line_output_as_list(patch, story):
patch.object(time, 'time')
story.results = {'1': {'start': 'start'}}
story.end_line('1', output=['a', 'b'])
assert story.results['1']['output'] == ['a', 'b']
def test_stories_end_line_output_as_json_no_auto_convert(patch, story):
patch.object(time, 'time')
story.results = {'1': {'start': 'start'}}
story.end_line('1', output='{"key":"value"}')
assert story.results['1']['output'] == '{"key":"value"}'
def test_stories_end_line_output_as_sting(patch, story):
patch.object(time, 'time')
story.results = {'1': {'start': 'start'}}
story.end_line('1', output=' foobar\n\t')
assert story.results['1']['output'] == ' foobar\n\t'
def test_stories_end_line_output_as_bytes(patch, story):
patch.object(time, 'time')
story.results = {'1': {'start': 'start'}}
story.end_line('1', output=b'output')
assert story.results['1']['output'] == b'output'
@mark.parametrize('input,output', [
(None, 'null'),
(False, 'false'),
(True, 'true'),
('string', "'string'"),
("st'ring", "'st\'ring'"),
(1, "'1'"),
({'foo': 'bar'}, "'{\"foo\": \"bar\"}'"),
(['foobar'], "'[\"foobar\"]'"),
])
def test_stories_encode(story, input, output):
assert story.encode(input) == output
def test_stories_argument_by_name_empty(story):
assert story.argument_by_name({}, 'foo') is None
def test_stories_argument_by_name_lookup(patch, story):
line = {
'args': [
{
'$OBJECT': 'argument',
'name': 'foo',
'argument': {'$OBJECT': 'string', 'string': 'bar'}
}
]
}
patch.object(story, 'resolve')
story.argument_by_name(line, 'foo')
story.resolve.assert_called_with(line['args'][0]['argument'], encode=False)
def test_stories_argument_by_name_missing(patch, story):
line = {'args': []}
assert story.argument_by_name(line, 'foo') is None
def test_stories_prepare(story):
story.prepare(None)
def test_stories_prepare_context(story, app):
story.app = app
context = {'app': app.app_context}
story.prepare(context=context)
assert story.environment == app.environment
assert story.context == context
def test_stories_next_block_simple(patch, story):
story.tree = {
'2': {'ln': '2', 'enter': '3', 'next': '3'},
'3': {'ln': '3', 'parent': '2', 'next': '4'},
'4': {'ln': '4'}
}
assert isinstance(story, Stories)
assert story.next_block(story.line('2')) == story.tree['4']
def test_stories_next_block_as_lines(patch, story):
story.tree = {
'2': {'ln': '2', 'next': '3'},
'3': {'ln': '3', 'next': '4'}
}
assert isinstance(story, Stories)
assert story.next_block(story.line('2')) == story.tree['3']
def test_stories_next_block_where_next_block_is_block(patch, story):
story.tree = {
'2': {'ln': '2', 'next': '3'},
'3': {'ln': '3', 'next': '4', 'enter': '4'},
'4': {'ln': '4', 'parent': '3'}
}
assert isinstance(story, Stories)
assert story.next_block(story.line('2')) == story.tree['3']
def test_stories_next_block_only_block(patch, story):
story.tree = {
'2': {'ln': '2'}
}
assert isinstance(story, Stories)
assert story.next_block(story.line('2')) is None
def test_stories_context_for_function_call(story):
assert story.context_for_function_call({}, {}) == {}
def test_stories_context_for_function_call_with_args(story):
line = {
'args': [
{
'$OBJECT': 'argument',
'name': 'foo',
'argument': {
'$OBJECT': 'string',
'string': 'bar'
}
},
{
'$OBJECT': 'argument',
'name': 'foo1',
'argument': {
'$OBJECT': 'string',
'string': 'bar1'
}
}
]
}
function_line = {
'args': [
{
'$OBJECT': 'argument',
'name': 'foo',
'argument': {
'$OBJECT': 'type',
'type': 'string'
}
},
{
'$OBJECT': 'argument',
'name': 'foo1',
'argument': {
'$OBJECT': 'type',
'type': 'string'
}
}
]
}
assert story.context_for_function_call(line, function_line) == {
'foo': 'bar',
'foo1': 'bar1'
}
def test_stories_next_block_nested(patch, story):
story.tree = {
'2': {'ln': '2', 'enter': '3', 'next': '3'},
'3': {'ln': '3', 'parent': '2', 'next': '4'},
'4': {'ln': '4', 'enter': '5', 'parent': '2', 'next': '5'},
'5': {'ln': '5', 'parent': '4', 'next': '6'},
'6': {'ln': '6', 'parent': '4', 'next': '7'},
'7': {'ln': '7'}
}
assert isinstance(story, Stories)
assert story.next_block(story.line('2')) == story.tree['7']
def test_stories_next_block_last_line(patch, story):
story.tree = {
'2': {'ln': '2', 'enter': '3', 'next': '3'},
'3': {'ln': '3', 'parent': '2', 'next': '4'},
'4': {'ln': '4', 'enter': '5', 'parent': '2', 'next': '5'},
'5': {'ln': '5', 'parent': '4', 'next': '6'},
'6': {'ln': '6', 'parent': '4'}
}
assert isinstance(story, Stories)
assert story.next_block(story.line('2')) is None
def test_stories_next_block_nested_inner(patch, story):
story.tree = {
'2': {'ln': '2', 'enter': '3', 'next': '3'},
'3': {'ln': '3', 'parent': '2', 'next': '4'},
'4': {'ln': '4', 'enter': '5', 'parent': '2', 'next': '5'},
'5': {'ln': '5', 'parent': '4', 'next': '6'},
'6': {'ln': '6', 'parent': '4', 'next': '7'},
'7': {'ln': '7', 'parent': '2', 'next': '8'},
'8': {'ln': '8', 'parent': '2'}
}
assert isinstance(story, Stories)
assert story.tree['7'] == story.next_block(story.line('4'))
| true
| true
|
f7181a51ac70864c0872ec1652625be1aa4f459a
| 3,736
|
py
|
Python
|
code/UNET_lowered.py
|
sagnik1511/U-Net-Lowered-with-keras
|
364336b244ece288a52cf76df451501a665e745a
|
[
"MIT"
] | 6
|
2021-06-14T14:42:48.000Z
|
2021-06-14T15:16:22.000Z
|
code/UNET_lowered.py
|
sagnik1511/U-Net-Reduced-with-TF-keras
|
364336b244ece288a52cf76df451501a665e745a
|
[
"MIT"
] | null | null | null |
code/UNET_lowered.py
|
sagnik1511/U-Net-Reduced-with-TF-keras
|
364336b244ece288a52cf76df451501a665e745a
|
[
"MIT"
] | 2
|
2021-12-16T12:40:36.000Z
|
2022-02-04T23:10:09.000Z
|
# -*- coding: utf-8 -*-
"""
UNET LOwered Model :
This customized UNet Model has been generated lowering the filters to their 25% .
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers import Input , Conv2D , MaxPooling2D , Dropout , concatenate , UpSampling2D
from tensorflow.keras import models
from tensorflow.keras import losses
from tensorflow.keras import optimizers
import numpy as np
def UNet(input_shape):
keras.backend.clear_session()
inputs = Input(input_shape)
conv1 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
conv1 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
conv5 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
merge6 = concatenate([drop4,up6], axis = 3)
conv6 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
up7 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3,up7], axis = 3)
conv7 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
up8 = Conv2D(32, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2,up8], axis = 3)
conv8 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
up9 = Conv2D(16, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9], axis = 3)
conv9 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
outputs = layers.Conv2D(1, 1, activation = 'sigmoid')(conv9)
model = keras.Model(inputs = inputs , outputs = outputs,name = 'UNet')
return model
| 54.941176
| 131
| 0.677195
|
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers import Input , Conv2D , MaxPooling2D , Dropout , concatenate , UpSampling2D
from tensorflow.keras import models
from tensorflow.keras import losses
from tensorflow.keras import optimizers
import numpy as np
def UNet(input_shape):
keras.backend.clear_session()
inputs = Input(input_shape)
conv1 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
conv1 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
conv5 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
merge6 = concatenate([drop4,up6], axis = 3)
conv6 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
up7 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3,up7], axis = 3)
conv7 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
up8 = Conv2D(32, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2,up8], axis = 3)
conv8 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
up9 = Conv2D(16, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9], axis = 3)
conv9 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
outputs = layers.Conv2D(1, 1, activation = 'sigmoid')(conv9)
model = keras.Model(inputs = inputs , outputs = outputs,name = 'UNet')
return model
| true
| true
|
f7181b13ca73f4b482d5f775d442f82f8780cd58
| 20,330
|
py
|
Python
|
modin/experimental/engines/omnisci_on_ray/frame/calcite_builder.py
|
Rippling/modin
|
b2cf1d5fc704803a1ce6699e9a373dc7abeb409e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
modin/experimental/engines/omnisci_on_ray/frame/calcite_builder.py
|
Rippling/modin
|
b2cf1d5fc704803a1ce6699e9a373dc7abeb409e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
modin/experimental/engines/omnisci_on_ray/frame/calcite_builder.py
|
Rippling/modin
|
b2cf1d5fc704803a1ce6699e9a373dc7abeb409e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
from .expr import (
InputRefExpr,
LiteralExpr,
OpExpr,
AggregateExpr,
build_if_then_else,
build_row_idx_filter_expr,
)
from .calcite_algebra import (
CalciteBaseNode,
CalciteInputRefExpr,
CalciteInputIdxExpr,
CalciteScanNode,
CalciteProjectionNode,
CalciteFilterNode,
CalciteAggregateNode,
CalciteCollation,
CalciteSortNode,
CalciteJoinNode,
CalciteUnionNode,
)
from .df_algebra import (
FrameNode,
MaskNode,
GroupbyAggNode,
TransformNode,
JoinNode,
UnionNode,
SortNode,
FilterNode,
)
from collections import abc
from pandas.core.dtypes.common import _get_dtype
class CalciteBuilder:
class CompoundAggregate:
def __init__(self, builder, arg):
self._builder = builder
self._arg = arg
def gen_proj_exprs(self):
return []
def gen_agg_exprs(self):
pass
def gen_reduce_expr(self):
pass
class StdAggregate(CompoundAggregate):
def __init__(self, builder, arg):
assert isinstance(arg, InputRefExpr)
super().__init__(builder, arg)
self._quad_name = self._arg.column + "__quad__"
self._sum_name = self._arg.column + "__sum__"
self._quad_sum_name = self._arg.column + "__quad_sum__"
self._count_name = self._arg.column + "__count__"
def gen_proj_exprs(self):
expr = self._builder._translate(self._arg.mul(self._arg))
return {self._quad_name: expr}
def gen_agg_exprs(self):
count_expr = self._builder._translate(AggregateExpr("count", self._arg))
sum_expr = self._builder._translate(AggregateExpr("sum", self._arg))
self._sum_dtype = sum_expr._dtype
qsum_expr = AggregateExpr(
"SUM",
self._builder._ref_idx(self._arg.modin_frame, self._quad_name),
dtype=sum_expr._dtype,
)
return {
self._sum_name: sum_expr,
self._quad_sum_name: qsum_expr,
self._count_name: count_expr,
}
def gen_reduce_expr(self):
count_expr = self._builder._ref(self._arg.modin_frame, self._count_name)
count_expr._dtype = _get_dtype(int)
sum_expr = self._builder._ref(self._arg.modin_frame, self._sum_name)
sum_expr._dtype = self._sum_dtype
qsum_expr = self._builder._ref(self._arg.modin_frame, self._quad_sum_name)
qsum_expr._dtype = self._sum_dtype
null_expr = LiteralExpr(None)
count_or_null = build_if_then_else(
count_expr.eq(LiteralExpr(0)), null_expr, count_expr, count_expr._dtype
)
count_m_1_or_null = build_if_then_else(
count_expr.eq(LiteralExpr(1)),
null_expr,
count_expr.sub(LiteralExpr(1)),
count_expr._dtype,
)
# sqrt((sum(x * x) - sum(x) * sum(x) / n) / (n - 1))
return (
qsum_expr.sub(sum_expr.mul(sum_expr).truediv(count_or_null))
.truediv(count_m_1_or_null)
.pow(LiteralExpr(0.5))
)
class SkewAggregate(CompoundAggregate):
def __init__(self, builder, arg):
assert isinstance(arg, InputRefExpr)
super().__init__(builder, arg)
self._quad_name = self._arg.column + "__quad__"
self._cube_name = self._arg.column + "__cube__"
self._sum_name = self._arg.column + "__sum__"
self._quad_sum_name = self._arg.column + "__quad_sum__"
self._cube_sum_name = self._arg.column + "__cube_sum__"
self._count_name = self._arg.column + "__count__"
def gen_proj_exprs(self):
quad_expr = self._builder._translate(self._arg.mul(self._arg))
cube_expr = self._builder._translate(
self._arg.mul(self._arg).mul(self._arg)
)
return {self._quad_name: quad_expr, self._cube_name: cube_expr}
def gen_agg_exprs(self):
count_expr = self._builder._translate(AggregateExpr("count", self._arg))
sum_expr = self._builder._translate(AggregateExpr("sum", self._arg))
self._sum_dtype = sum_expr._dtype
qsum_expr = AggregateExpr(
"SUM",
self._builder._ref_idx(self._arg.modin_frame, self._quad_name),
dtype=sum_expr._dtype,
)
csum_expr = AggregateExpr(
"SUM",
self._builder._ref_idx(self._arg.modin_frame, self._cube_name),
dtype=sum_expr._dtype,
)
return {
self._sum_name: sum_expr,
self._quad_sum_name: qsum_expr,
self._cube_sum_name: csum_expr,
self._count_name: count_expr,
}
def gen_reduce_expr(self):
count_expr = self._builder._ref(self._arg.modin_frame, self._count_name)
count_expr._dtype = _get_dtype(int)
sum_expr = self._builder._ref(self._arg.modin_frame, self._sum_name)
sum_expr._dtype = self._sum_dtype
qsum_expr = self._builder._ref(self._arg.modin_frame, self._quad_sum_name)
qsum_expr._dtype = self._sum_dtype
csum_expr = self._builder._ref(self._arg.modin_frame, self._cube_sum_name)
csum_expr._dtype = self._sum_dtype
mean_expr = sum_expr.truediv(count_expr)
# n * sqrt(n - 1) / (n - 2)
# * (sum(x ** 3) - 3 * mean * sum(x * x) + 2 * mean * mean * sum(x))
# / (sum(x * x) - mean * sum(x)) ** 1.5
part1 = count_expr.mul(
count_expr.sub(LiteralExpr(1)).pow(LiteralExpr(0.5))
).truediv(count_expr.sub(LiteralExpr(2)))
part2 = csum_expr.sub(mean_expr.mul(qsum_expr).mul(LiteralExpr(3.0))).add(
mean_expr.mul(mean_expr).mul(sum_expr).mul(LiteralExpr(2.0))
)
part3 = qsum_expr.sub(mean_expr.mul(sum_expr)).pow(LiteralExpr(1.5))
skew_expr = part1.mul(part2).truediv(part3)
# The result is NULL if n <= 2
return build_if_then_else(
count_expr.le(LiteralExpr(2)),
LiteralExpr(None),
skew_expr,
skew_expr._dtype,
)
_compound_aggregates = {"std": StdAggregate, "skew": SkewAggregate}
class InputContext:
_simple_aggregates = {
"sum": "SUM",
"mean": "AVG",
"max": "MAX",
"min": "MIN",
"size": "COUNT",
"count": "COUNT",
}
_no_arg_aggregates = {"size"}
def __init__(self, input_frames, input_nodes):
self.input_nodes = input_nodes
self.frame_to_node = {x: y for x, y in zip(input_frames, input_nodes)}
self.input_offsets = {}
self.replacements = {}
offs = 0
for frame in input_frames:
self.input_offsets[frame] = offs
offs += len(frame._table_cols)
# Materialized frames have additional 'rowid' column
if isinstance(frame._op, FrameNode):
offs += 1
def replace_input_node(self, frame, node, new_cols):
self.replacements[frame] = new_cols
def _idx(self, frame, col):
assert (
frame in self.input_offsets
), f"unexpected reference to {frame.id_str()}"
offs = self.input_offsets[frame]
if frame in self.replacements:
return self.replacements[frame].index(col) + offs
if col == "__rowid__":
if not isinstance(self.frame_to_node[frame], CalciteScanNode):
raise NotImplementedError(
"rowid can be accessed in materialized frames only"
)
return len(frame._table_cols) + offs
assert (
col in frame._table_cols
), f"unexpected reference to '{col}' in {frame.id_str()}"
return frame._table_cols.index(col) + offs
def ref(self, frame, col):
return CalciteInputRefExpr(self._idx(frame, col))
def ref_idx(self, frame, col):
return CalciteInputIdxExpr(self._idx(frame, col))
def input_ids(self):
return [x.id for x in self.input_nodes]
def translate(self, expr):
"""Copy those parts of expr tree that have input references
and translate all references into CalciteInputRefExr"""
return self._maybe_copy_and_translate_expr(expr)
def _maybe_copy_and_translate_expr(self, expr, ref_idx=False):
if isinstance(expr, InputRefExpr):
if ref_idx:
return self.ref_idx(expr.modin_frame, expr.column)
else:
return self.ref(expr.modin_frame, expr.column)
if isinstance(expr, AggregateExpr):
expr = expr.copy()
if expr.agg in self._no_arg_aggregates:
expr.operands = []
else:
expr.operands[0] = self._maybe_copy_and_translate_expr(
expr.operands[0], True
)
expr.agg = self._simple_aggregates[expr.agg]
return expr
copied = False
for i, op in enumerate(getattr(expr, "operands", [])):
new_op = self._maybe_copy_and_translate_expr(op)
if new_op != op:
if not copied:
expr = expr.copy()
expr.operands[i] = new_op
return expr
class InputContextMgr:
def __init__(self, builder, input_frames, input_nodes):
self.builder = builder
self.input_frames = input_frames
self.input_nodes = input_nodes
def __enter__(self):
self.builder._input_ctx_stack.append(
self.builder.InputContext(self.input_frames, self.input_nodes)
)
return self.builder._input_ctx_stack[-1]
def __exit__(self, type, value, traceback):
self.builder._input_ctx_stack.pop()
type_strings = {
int: "INTEGER",
bool: "BOOLEAN",
}
def __init__(self):
self._input_ctx_stack = []
def build(self, op):
CalciteBaseNode.reset_id()
self.res = []
self._to_calcite(op)
return self.res
def _input_ctx(self):
return self._input_ctx_stack[-1]
def _set_input_ctx(self, op):
input_frames = getattr(op, "input", [])
input_nodes = [self._to_calcite(x._op) for x in input_frames]
return self.InputContextMgr(self, input_frames, input_nodes)
def _set_tmp_ctx(self, input_frames, input_nodes):
return self.InputContextMgr(self, input_frames, input_nodes)
def _ref(self, frame, col):
return self._input_ctx().ref(frame, col)
def _ref_idx(self, frame, col):
return self._input_ctx().ref_idx(frame, col)
def _translate(self, exprs):
if isinstance(exprs, abc.Iterable):
return [self._input_ctx().translate(x) for x in exprs]
return self._input_ctx().translate(exprs)
def _push(self, node):
self.res.append(node)
def _last(self):
return self.res[-1]
def _input_nodes(self):
return self._input_ctx().input_nodes
def _input_node(self, idx):
return self._input_nodes()[idx]
def _input_ids(self):
return self._input_ctx().input_ids()
def _to_calcite(self, op):
# This context translates input operands and setup current
# input context to translate input references (recursion
# over tree happens here).
with self._set_input_ctx(op):
if isinstance(op, FrameNode):
self._process_frame(op)
elif isinstance(op, MaskNode):
self._process_mask(op)
elif isinstance(op, GroupbyAggNode):
self._process_groupby(op)
elif isinstance(op, TransformNode):
self._process_transform(op)
elif isinstance(op, JoinNode):
self._process_join(op)
elif isinstance(op, UnionNode):
self._process_union(op)
elif isinstance(op, SortNode):
self._process_sort(op)
elif isinstance(op, FilterNode):
self._process_filter(op)
else:
raise NotImplementedError(
f"CalciteBuilder doesn't support {type(op).__name__}"
)
return self.res[-1]
def _process_frame(self, op):
self._push(CalciteScanNode(op.modin_frame))
def _process_mask(self, op):
if op.row_indices is not None:
raise NotImplementedError("row indices masking is not yet supported")
frame = op.input[0]
# select rows by rowid
rowid_col = self._ref(frame, "__rowid__")
condition = build_row_idx_filter_expr(op.row_numeric_idx, rowid_col)
self._push(CalciteFilterNode(condition))
# mask is currently always applied over scan, it means
# we need additional projection to remove rowid column
fields = frame._table_cols
exprs = [self._ref(frame, col) for col in frame._table_cols]
self._push(CalciteProjectionNode(fields, exprs))
def _process_groupby(self, op):
frame = op.input[0]
# Aggregation's input should always be a projection and
# group key columns should always go first
proj_cols = op.by.copy()
for col in frame._table_cols:
if col not in op.by:
proj_cols.append(col)
proj_exprs = [self._ref(frame, col) for col in proj_cols]
# Add expressions required for compound aggregates
compound_aggs = {}
for agg, expr in op.agg_exprs.items():
if expr.agg in self._compound_aggregates:
compound_aggs[agg] = self._compound_aggregates[expr.agg](
self, expr.operands[0]
)
extra_exprs = compound_aggs[agg].gen_proj_exprs()
proj_cols.extend(extra_exprs.keys())
proj_exprs.extend(extra_exprs.values())
proj = CalciteProjectionNode(proj_cols, proj_exprs)
self._push(proj)
self._input_ctx().replace_input_node(frame, proj, proj_cols)
group = [self._ref_idx(frame, col) for col in op.by]
fields = op.by.copy()
aggs = []
for agg, expr in op.agg_exprs.items():
if agg in compound_aggs:
extra_aggs = compound_aggs[agg].gen_agg_exprs()
fields.extend(extra_aggs.keys())
aggs.extend(extra_aggs.values())
else:
fields.append(agg)
aggs.append(self._translate(expr))
node = CalciteAggregateNode(fields, group, aggs)
self._push(node)
if compound_aggs:
self._input_ctx().replace_input_node(frame, node, fields)
proj_cols = op.by.copy()
proj_exprs = [self._ref(frame, col) for col in proj_cols]
proj_cols.extend(op.agg_exprs.keys())
for agg in op.agg_exprs:
if agg in compound_aggs:
proj_exprs.append(compound_aggs[agg].gen_reduce_expr())
else:
proj_exprs.append(self._ref(frame, agg))
proj = CalciteProjectionNode(proj_cols, proj_exprs)
self._push(proj)
if op.groupby_opts["sort"]:
collation = [CalciteCollation(col) for col in group]
self._push(CalciteSortNode(collation))
def _process_transform(self, op):
fields = list(op.exprs.keys())
exprs = self._translate(op.exprs.values())
self._push(CalciteProjectionNode(fields, exprs))
def _process_join(self, op):
left = op.input[0]
right = op.input[1]
assert (
op.on is not None
), "Merge with unspecified 'on' parameter is not supported in the engine"
for col in op.on:
assert (
col in left._table_cols and col in right._table_cols
), f"Column '{col}'' is missing in one of merge operands"
""" Join, only equal-join supported """
cmps = [self._ref(left, c).eq(self._ref(right, c)) for c in op.on]
if len(cmps) > 1:
condition = OpExpr("AND", cmps, _get_dtype(bool))
else:
condition = cmps[0]
node = CalciteJoinNode(
left_id=self._input_node(0).id,
right_id=self._input_node(1).id,
how=op.how,
condition=condition,
)
self._push(node)
"""Projection for both frames"""
fields = []
exprs = []
conflicting_cols = set(left.columns) & set(right.columns) - set(op.on)
"""First goes 'on' column then all left columns(+suffix for conflicting names)
but 'on' then all right columns(+suffix for conflicting names) but 'on'"""
on_idx = [-1] * len(op.on)
for c in left.columns:
if c in op.on:
on_idx[op.on.index(c)] = len(fields)
suffix = op.suffixes[0] if c in conflicting_cols else ""
fields.append(c + suffix)
exprs.append(self._ref(left, c))
for c in right.columns:
if c not in op.on:
suffix = op.suffixes[1] if c in conflicting_cols else ""
fields.append(c + suffix)
exprs.append(self._ref(right, c))
self._push(CalciteProjectionNode(fields, exprs))
# TODO: current input translation system doesn't work here
# because there is no frame to reference for index computation.
# We should build calcite tree to keep references to input
# nodes and keep scheme in calcite nodes. For now just use
# known index on_idx.
if op.sort is True:
"""Sort by key column"""
collation = [CalciteCollation(CalciteInputIdxExpr(x)) for x in on_idx]
self._push(CalciteSortNode(collation))
def _process_union(self, op):
self._push(CalciteUnionNode(self._input_ids(), True))
def _process_sort(self, op):
frame = op.input[0]
# Sort should be applied to projections.
if not isinstance(self._input_node(0), CalciteProjectionNode):
proj = CalciteProjectionNode(
frame._table_cols, [self._ref(frame, col) for col in frame._table_cols]
)
self._push(proj)
self._input_ctx().replace_input_node(frame, proj, frame._table_cols)
nulls = op.na_position.upper()
collations = []
for col, asc in zip(op.columns, op.ascending):
ascending = "ASCENDING" if asc else "DESCENDING"
collations.append(
CalciteCollation(self._ref_idx(frame, col), ascending, nulls)
)
self._push(CalciteSortNode(collations))
def _process_filter(self, op):
condition = self._translate(op.condition)
self._push(CalciteFilterNode(condition))
| 37.302752
| 87
| 0.588096
|
from .expr import (
InputRefExpr,
LiteralExpr,
OpExpr,
AggregateExpr,
build_if_then_else,
build_row_idx_filter_expr,
)
from .calcite_algebra import (
CalciteBaseNode,
CalciteInputRefExpr,
CalciteInputIdxExpr,
CalciteScanNode,
CalciteProjectionNode,
CalciteFilterNode,
CalciteAggregateNode,
CalciteCollation,
CalciteSortNode,
CalciteJoinNode,
CalciteUnionNode,
)
from .df_algebra import (
FrameNode,
MaskNode,
GroupbyAggNode,
TransformNode,
JoinNode,
UnionNode,
SortNode,
FilterNode,
)
from collections import abc
from pandas.core.dtypes.common import _get_dtype
class CalciteBuilder:
class CompoundAggregate:
def __init__(self, builder, arg):
self._builder = builder
self._arg = arg
def gen_proj_exprs(self):
return []
def gen_agg_exprs(self):
pass
def gen_reduce_expr(self):
pass
class StdAggregate(CompoundAggregate):
def __init__(self, builder, arg):
assert isinstance(arg, InputRefExpr)
super().__init__(builder, arg)
self._quad_name = self._arg.column + "__quad__"
self._sum_name = self._arg.column + "__sum__"
self._quad_sum_name = self._arg.column + "__quad_sum__"
self._count_name = self._arg.column + "__count__"
def gen_proj_exprs(self):
expr = self._builder._translate(self._arg.mul(self._arg))
return {self._quad_name: expr}
def gen_agg_exprs(self):
count_expr = self._builder._translate(AggregateExpr("count", self._arg))
sum_expr = self._builder._translate(AggregateExpr("sum", self._arg))
self._sum_dtype = sum_expr._dtype
qsum_expr = AggregateExpr(
"SUM",
self._builder._ref_idx(self._arg.modin_frame, self._quad_name),
dtype=sum_expr._dtype,
)
return {
self._sum_name: sum_expr,
self._quad_sum_name: qsum_expr,
self._count_name: count_expr,
}
def gen_reduce_expr(self):
count_expr = self._builder._ref(self._arg.modin_frame, self._count_name)
count_expr._dtype = _get_dtype(int)
sum_expr = self._builder._ref(self._arg.modin_frame, self._sum_name)
sum_expr._dtype = self._sum_dtype
qsum_expr = self._builder._ref(self._arg.modin_frame, self._quad_sum_name)
qsum_expr._dtype = self._sum_dtype
null_expr = LiteralExpr(None)
count_or_null = build_if_then_else(
count_expr.eq(LiteralExpr(0)), null_expr, count_expr, count_expr._dtype
)
count_m_1_or_null = build_if_then_else(
count_expr.eq(LiteralExpr(1)),
null_expr,
count_expr.sub(LiteralExpr(1)),
count_expr._dtype,
)
return (
qsum_expr.sub(sum_expr.mul(sum_expr).truediv(count_or_null))
.truediv(count_m_1_or_null)
.pow(LiteralExpr(0.5))
)
class SkewAggregate(CompoundAggregate):
def __init__(self, builder, arg):
assert isinstance(arg, InputRefExpr)
super().__init__(builder, arg)
self._quad_name = self._arg.column + "__quad__"
self._cube_name = self._arg.column + "__cube__"
self._sum_name = self._arg.column + "__sum__"
self._quad_sum_name = self._arg.column + "__quad_sum__"
self._cube_sum_name = self._arg.column + "__cube_sum__"
self._count_name = self._arg.column + "__count__"
def gen_proj_exprs(self):
quad_expr = self._builder._translate(self._arg.mul(self._arg))
cube_expr = self._builder._translate(
self._arg.mul(self._arg).mul(self._arg)
)
return {self._quad_name: quad_expr, self._cube_name: cube_expr}
def gen_agg_exprs(self):
count_expr = self._builder._translate(AggregateExpr("count", self._arg))
sum_expr = self._builder._translate(AggregateExpr("sum", self._arg))
self._sum_dtype = sum_expr._dtype
qsum_expr = AggregateExpr(
"SUM",
self._builder._ref_idx(self._arg.modin_frame, self._quad_name),
dtype=sum_expr._dtype,
)
csum_expr = AggregateExpr(
"SUM",
self._builder._ref_idx(self._arg.modin_frame, self._cube_name),
dtype=sum_expr._dtype,
)
return {
self._sum_name: sum_expr,
self._quad_sum_name: qsum_expr,
self._cube_sum_name: csum_expr,
self._count_name: count_expr,
}
def gen_reduce_expr(self):
count_expr = self._builder._ref(self._arg.modin_frame, self._count_name)
count_expr._dtype = _get_dtype(int)
sum_expr = self._builder._ref(self._arg.modin_frame, self._sum_name)
sum_expr._dtype = self._sum_dtype
qsum_expr = self._builder._ref(self._arg.modin_frame, self._quad_sum_name)
qsum_expr._dtype = self._sum_dtype
csum_expr = self._builder._ref(self._arg.modin_frame, self._cube_sum_name)
csum_expr._dtype = self._sum_dtype
mean_expr = sum_expr.truediv(count_expr)
part1 = count_expr.mul(
count_expr.sub(LiteralExpr(1)).pow(LiteralExpr(0.5))
).truediv(count_expr.sub(LiteralExpr(2)))
part2 = csum_expr.sub(mean_expr.mul(qsum_expr).mul(LiteralExpr(3.0))).add(
mean_expr.mul(mean_expr).mul(sum_expr).mul(LiteralExpr(2.0))
)
part3 = qsum_expr.sub(mean_expr.mul(sum_expr)).pow(LiteralExpr(1.5))
skew_expr = part1.mul(part2).truediv(part3)
return build_if_then_else(
count_expr.le(LiteralExpr(2)),
LiteralExpr(None),
skew_expr,
skew_expr._dtype,
)
_compound_aggregates = {"std": StdAggregate, "skew": SkewAggregate}
class InputContext:
_simple_aggregates = {
"sum": "SUM",
"mean": "AVG",
"max": "MAX",
"min": "MIN",
"size": "COUNT",
"count": "COUNT",
}
_no_arg_aggregates = {"size"}
def __init__(self, input_frames, input_nodes):
self.input_nodes = input_nodes
self.frame_to_node = {x: y for x, y in zip(input_frames, input_nodes)}
self.input_offsets = {}
self.replacements = {}
offs = 0
for frame in input_frames:
self.input_offsets[frame] = offs
offs += len(frame._table_cols)
if isinstance(frame._op, FrameNode):
offs += 1
def replace_input_node(self, frame, node, new_cols):
self.replacements[frame] = new_cols
def _idx(self, frame, col):
assert (
frame in self.input_offsets
), f"unexpected reference to {frame.id_str()}"
offs = self.input_offsets[frame]
if frame in self.replacements:
return self.replacements[frame].index(col) + offs
if col == "__rowid__":
if not isinstance(self.frame_to_node[frame], CalciteScanNode):
raise NotImplementedError(
"rowid can be accessed in materialized frames only"
)
return len(frame._table_cols) + offs
assert (
col in frame._table_cols
), f"unexpected reference to '{col}' in {frame.id_str()}"
return frame._table_cols.index(col) + offs
def ref(self, frame, col):
return CalciteInputRefExpr(self._idx(frame, col))
def ref_idx(self, frame, col):
return CalciteInputIdxExpr(self._idx(frame, col))
def input_ids(self):
return [x.id for x in self.input_nodes]
def translate(self, expr):
return self._maybe_copy_and_translate_expr(expr)
def _maybe_copy_and_translate_expr(self, expr, ref_idx=False):
if isinstance(expr, InputRefExpr):
if ref_idx:
return self.ref_idx(expr.modin_frame, expr.column)
else:
return self.ref(expr.modin_frame, expr.column)
if isinstance(expr, AggregateExpr):
expr = expr.copy()
if expr.agg in self._no_arg_aggregates:
expr.operands = []
else:
expr.operands[0] = self._maybe_copy_and_translate_expr(
expr.operands[0], True
)
expr.agg = self._simple_aggregates[expr.agg]
return expr
copied = False
for i, op in enumerate(getattr(expr, "operands", [])):
new_op = self._maybe_copy_and_translate_expr(op)
if new_op != op:
if not copied:
expr = expr.copy()
expr.operands[i] = new_op
return expr
class InputContextMgr:
def __init__(self, builder, input_frames, input_nodes):
self.builder = builder
self.input_frames = input_frames
self.input_nodes = input_nodes
def __enter__(self):
self.builder._input_ctx_stack.append(
self.builder.InputContext(self.input_frames, self.input_nodes)
)
return self.builder._input_ctx_stack[-1]
def __exit__(self, type, value, traceback):
self.builder._input_ctx_stack.pop()
type_strings = {
int: "INTEGER",
bool: "BOOLEAN",
}
def __init__(self):
self._input_ctx_stack = []
def build(self, op):
CalciteBaseNode.reset_id()
self.res = []
self._to_calcite(op)
return self.res
def _input_ctx(self):
return self._input_ctx_stack[-1]
def _set_input_ctx(self, op):
input_frames = getattr(op, "input", [])
input_nodes = [self._to_calcite(x._op) for x in input_frames]
return self.InputContextMgr(self, input_frames, input_nodes)
def _set_tmp_ctx(self, input_frames, input_nodes):
return self.InputContextMgr(self, input_frames, input_nodes)
def _ref(self, frame, col):
return self._input_ctx().ref(frame, col)
def _ref_idx(self, frame, col):
return self._input_ctx().ref_idx(frame, col)
def _translate(self, exprs):
if isinstance(exprs, abc.Iterable):
return [self._input_ctx().translate(x) for x in exprs]
return self._input_ctx().translate(exprs)
def _push(self, node):
self.res.append(node)
def _last(self):
return self.res[-1]
def _input_nodes(self):
return self._input_ctx().input_nodes
def _input_node(self, idx):
return self._input_nodes()[idx]
def _input_ids(self):
return self._input_ctx().input_ids()
def _to_calcite(self, op):
with self._set_input_ctx(op):
if isinstance(op, FrameNode):
self._process_frame(op)
elif isinstance(op, MaskNode):
self._process_mask(op)
elif isinstance(op, GroupbyAggNode):
self._process_groupby(op)
elif isinstance(op, TransformNode):
self._process_transform(op)
elif isinstance(op, JoinNode):
self._process_join(op)
elif isinstance(op, UnionNode):
self._process_union(op)
elif isinstance(op, SortNode):
self._process_sort(op)
elif isinstance(op, FilterNode):
self._process_filter(op)
else:
raise NotImplementedError(
f"CalciteBuilder doesn't support {type(op).__name__}"
)
return self.res[-1]
def _process_frame(self, op):
self._push(CalciteScanNode(op.modin_frame))
def _process_mask(self, op):
if op.row_indices is not None:
raise NotImplementedError("row indices masking is not yet supported")
frame = op.input[0]
# select rows by rowid
rowid_col = self._ref(frame, "__rowid__")
condition = build_row_idx_filter_expr(op.row_numeric_idx, rowid_col)
self._push(CalciteFilterNode(condition))
# mask is currently always applied over scan, it means
# we need additional projection to remove rowid column
fields = frame._table_cols
exprs = [self._ref(frame, col) for col in frame._table_cols]
self._push(CalciteProjectionNode(fields, exprs))
def _process_groupby(self, op):
frame = op.input[0]
# Aggregation's input should always be a projection and
proj_cols = op.by.copy()
for col in frame._table_cols:
if col not in op.by:
proj_cols.append(col)
proj_exprs = [self._ref(frame, col) for col in proj_cols]
compound_aggs = {}
for agg, expr in op.agg_exprs.items():
if expr.agg in self._compound_aggregates:
compound_aggs[agg] = self._compound_aggregates[expr.agg](
self, expr.operands[0]
)
extra_exprs = compound_aggs[agg].gen_proj_exprs()
proj_cols.extend(extra_exprs.keys())
proj_exprs.extend(extra_exprs.values())
proj = CalciteProjectionNode(proj_cols, proj_exprs)
self._push(proj)
self._input_ctx().replace_input_node(frame, proj, proj_cols)
group = [self._ref_idx(frame, col) for col in op.by]
fields = op.by.copy()
aggs = []
for agg, expr in op.agg_exprs.items():
if agg in compound_aggs:
extra_aggs = compound_aggs[agg].gen_agg_exprs()
fields.extend(extra_aggs.keys())
aggs.extend(extra_aggs.values())
else:
fields.append(agg)
aggs.append(self._translate(expr))
node = CalciteAggregateNode(fields, group, aggs)
self._push(node)
if compound_aggs:
self._input_ctx().replace_input_node(frame, node, fields)
proj_cols = op.by.copy()
proj_exprs = [self._ref(frame, col) for col in proj_cols]
proj_cols.extend(op.agg_exprs.keys())
for agg in op.agg_exprs:
if agg in compound_aggs:
proj_exprs.append(compound_aggs[agg].gen_reduce_expr())
else:
proj_exprs.append(self._ref(frame, agg))
proj = CalciteProjectionNode(proj_cols, proj_exprs)
self._push(proj)
if op.groupby_opts["sort"]:
collation = [CalciteCollation(col) for col in group]
self._push(CalciteSortNode(collation))
def _process_transform(self, op):
fields = list(op.exprs.keys())
exprs = self._translate(op.exprs.values())
self._push(CalciteProjectionNode(fields, exprs))
def _process_join(self, op):
left = op.input[0]
right = op.input[1]
assert (
op.on is not None
), "Merge with unspecified 'on' parameter is not supported in the engine"
for col in op.on:
assert (
col in left._table_cols and col in right._table_cols
), f"Column '{col}'' is missing in one of merge operands"
cmps = [self._ref(left, c).eq(self._ref(right, c)) for c in op.on]
if len(cmps) > 1:
condition = OpExpr("AND", cmps, _get_dtype(bool))
else:
condition = cmps[0]
node = CalciteJoinNode(
left_id=self._input_node(0).id,
right_id=self._input_node(1).id,
how=op.how,
condition=condition,
)
self._push(node)
fields = []
exprs = []
conflicting_cols = set(left.columns) & set(right.columns) - set(op.on)
on_idx = [-1] * len(op.on)
for c in left.columns:
if c in op.on:
on_idx[op.on.index(c)] = len(fields)
suffix = op.suffixes[0] if c in conflicting_cols else ""
fields.append(c + suffix)
exprs.append(self._ref(left, c))
for c in right.columns:
if c not in op.on:
suffix = op.suffixes[1] if c in conflicting_cols else ""
fields.append(c + suffix)
exprs.append(self._ref(right, c))
self._push(CalciteProjectionNode(fields, exprs))
# TODO: current input translation system doesn't work here
if op.sort is True:
collation = [CalciteCollation(CalciteInputIdxExpr(x)) for x in on_idx]
self._push(CalciteSortNode(collation))
def _process_union(self, op):
self._push(CalciteUnionNode(self._input_ids(), True))
def _process_sort(self, op):
frame = op.input[0]
if not isinstance(self._input_node(0), CalciteProjectionNode):
proj = CalciteProjectionNode(
frame._table_cols, [self._ref(frame, col) for col in frame._table_cols]
)
self._push(proj)
self._input_ctx().replace_input_node(frame, proj, frame._table_cols)
nulls = op.na_position.upper()
collations = []
for col, asc in zip(op.columns, op.ascending):
ascending = "ASCENDING" if asc else "DESCENDING"
collations.append(
CalciteCollation(self._ref_idx(frame, col), ascending, nulls)
)
self._push(CalciteSortNode(collations))
def _process_filter(self, op):
condition = self._translate(op.condition)
self._push(CalciteFilterNode(condition))
| true
| true
|
f7181b87434e6a3a078b7f233f6a61d24e5fe9cc
| 3,374
|
py
|
Python
|
data/test/python/f7181b87434e6a3a078b7f233f6a61d24e5fe9ccbase.py
|
harshp8l/deep-learning-lang-detection
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
[
"MIT"
] | 84
|
2017-10-25T15:49:21.000Z
|
2021-11-28T21:25:54.000Z
|
data/test/python/f7181b87434e6a3a078b7f233f6a61d24e5fe9ccbase.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 5
|
2018-03-29T11:50:46.000Z
|
2021-04-26T13:33:18.000Z
|
data/test/python/f7181b87434e6a3a078b7f233f6a61d24e5fe9ccbase.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 24
|
2017-11-22T08:31:00.000Z
|
2022-03-27T01:22:31.000Z
|
from __future__ import absolute_import
import os
import sys
from django.core.management.base import BaseCommand
import celery
import djcelery
DB_SHARED_THREAD = """\
DatabaseWrapper objects created in a thread can only \
be used in that same thread. The object with alias '%s' \
was created in thread id %s and this is thread id %s.\
"""
def patch_thread_ident():
# monkey patch django.
# This patch make sure that we use real threads to get the ident which
# is going to happen if we are using gevent or eventlet.
# -- patch taken from gunicorn
if getattr(patch_thread_ident, 'called', False):
return
try:
from django.db.backends import BaseDatabaseWrapper, DatabaseError
if 'validate_thread_sharing' in BaseDatabaseWrapper.__dict__:
import thread
_get_ident = thread.get_ident
__old__init__ = BaseDatabaseWrapper.__init__
def _init(self, *args, **kwargs):
__old__init__(self, *args, **kwargs)
self._thread_ident = _get_ident()
def _validate_thread_sharing(self):
if (not self.allow_thread_sharing
and self._thread_ident != _get_ident()):
raise DatabaseError(
DB_SHARED_THREAD % (
self.alias, self._thread_ident, _get_ident()),
)
BaseDatabaseWrapper.__init__ = _init
BaseDatabaseWrapper.validate_thread_sharing = \
_validate_thread_sharing
patch_thread_ident.called = True
except ImportError:
pass
patch_thread_ident()
class CeleryCommand(BaseCommand):
options = BaseCommand.option_list
skip_opts = ['--app', '--loader', '--config']
keep_base_opts = False
def get_version(self):
return 'celery %s\ndjango-celery %s' % (celery.__version__,
djcelery.__version__)
def execute(self, *args, **options):
broker = options.get('broker')
if broker:
self.set_broker(broker)
super(CeleryCommand, self).execute(*args, **options)
def set_broker(self, broker):
os.environ['CELERY_BROKER_URL'] = broker
def run_from_argv(self, argv):
self.handle_default_options(argv[2:])
return super(CeleryCommand, self).run_from_argv(argv)
def handle_default_options(self, argv):
acc = []
broker = None
for i, arg in enumerate(argv):
if '--settings=' in arg:
_, settings_module = arg.split('=')
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
elif '--pythonpath=' in arg:
_, pythonpath = arg.split('=')
sys.path.insert(0, pythonpath)
elif '--broker=' in arg:
_, broker = arg.split('=')
elif arg == '-b':
broker = argv[i + 1]
else:
acc.append(arg)
if broker:
self.set_broker(broker)
return argv if self.keep_base_opts else acc
def die(self, msg):
sys.stderr.write(msg)
sys.stderr.write('\n')
sys.exit()
@property
def option_list(self):
return [x for x in self.options
if x._long_opts[0] not in self.skip_opts]
| 31.53271
| 74
| 0.590101
|
from __future__ import absolute_import
import os
import sys
from django.core.management.base import BaseCommand
import celery
import djcelery
DB_SHARED_THREAD = """\
DatabaseWrapper objects created in a thread can only \
be used in that same thread. The object with alias '%s' \
was created in thread id %s and this is thread id %s.\
"""
def patch_thread_ident():
if getattr(patch_thread_ident, 'called', False):
return
try:
from django.db.backends import BaseDatabaseWrapper, DatabaseError
if 'validate_thread_sharing' in BaseDatabaseWrapper.__dict__:
import thread
_get_ident = thread.get_ident
__old__init__ = BaseDatabaseWrapper.__init__
def _init(self, *args, **kwargs):
__old__init__(self, *args, **kwargs)
self._thread_ident = _get_ident()
def _validate_thread_sharing(self):
if (not self.allow_thread_sharing
and self._thread_ident != _get_ident()):
raise DatabaseError(
DB_SHARED_THREAD % (
self.alias, self._thread_ident, _get_ident()),
)
BaseDatabaseWrapper.__init__ = _init
BaseDatabaseWrapper.validate_thread_sharing = \
_validate_thread_sharing
patch_thread_ident.called = True
except ImportError:
pass
patch_thread_ident()
class CeleryCommand(BaseCommand):
options = BaseCommand.option_list
skip_opts = ['--app', '--loader', '--config']
keep_base_opts = False
def get_version(self):
return 'celery %s\ndjango-celery %s' % (celery.__version__,
djcelery.__version__)
def execute(self, *args, **options):
broker = options.get('broker')
if broker:
self.set_broker(broker)
super(CeleryCommand, self).execute(*args, **options)
def set_broker(self, broker):
os.environ['CELERY_BROKER_URL'] = broker
def run_from_argv(self, argv):
self.handle_default_options(argv[2:])
return super(CeleryCommand, self).run_from_argv(argv)
def handle_default_options(self, argv):
acc = []
broker = None
for i, arg in enumerate(argv):
if '--settings=' in arg:
_, settings_module = arg.split('=')
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
elif '--pythonpath=' in arg:
_, pythonpath = arg.split('=')
sys.path.insert(0, pythonpath)
elif '--broker=' in arg:
_, broker = arg.split('=')
elif arg == '-b':
broker = argv[i + 1]
else:
acc.append(arg)
if broker:
self.set_broker(broker)
return argv if self.keep_base_opts else acc
def die(self, msg):
sys.stderr.write(msg)
sys.stderr.write('\n')
sys.exit()
@property
def option_list(self):
return [x for x in self.options
if x._long_opts[0] not in self.skip_opts]
| true
| true
|
f7181bc2790949201ed0b3f57763455f00d8b77a
| 28,933
|
py
|
Python
|
Simulador.py
|
edrhat/simulator
|
d243443c84ccb3e4efa880990d11b395125d16d3
|
[
"MIT"
] | null | null | null |
Simulador.py
|
edrhat/simulator
|
d243443c84ccb3e4efa880990d11b395125d16d3
|
[
"MIT"
] | null | null | null |
Simulador.py
|
edrhat/simulator
|
d243443c84ccb3e4efa880990d11b395125d16d3
|
[
"MIT"
] | null | null | null |
from tkinter import *
from tkinter import messagebox
import tkinter as tk
from tkinter import ttk
#IMAGENS DEFEITO: 240X240
class Tela:
def fechar(self, event):
janela.destroy()
exit()
def fecharPc(self, event):
self.lb_simulador.place_forget()
self.imgFundo.place_forget()
self.imgg2.place_forget()
self.lbGabinete.config(bg="white")
lbMonitor.place(x=100, y=30)
self.imggg.place_forget()
self.imgg3.place_forget()
self.imgg4.place_forget()
self.imgg5.place_forget()
self.imgg6.place_forget()
self.imgg7.place_forget()
self.imgg8.place_forget()
self.imgg9.place_forget()
def __init__(self, master):
global lbMonitor
monitor = PhotoImage(file="monitor.png")
lbMonitor = Label(image=monitor)
lbMonitor.monitor = monitor
lbMonitor.place(x=100, y=30)
gabinete = PhotoImage(file="gabinete.png")
self.lbGabinete = Label(janela, image=gabinete)
self.lbGabinete.gabinete = gabinete
self.lbGabinete.place(x=970, y=285)
self.lbGabinete.bind("<Enter>", self.abrirPc)
self.lbGabinete.bind("<Leave>", self.fecharPc)
self.lbGabinete.bind("<Button-1>", self.defeitos)
teclado = PhotoImage(file="teclado.png")
lbTeclado = Label(janela, image=teclado)
lbTeclado.teclado = teclado
lbTeclado.place(x=50, y=530)
delete = PhotoImage(file="delete.png")
lbDelete = Label(janela, image=delete)
lbDelete.delete = delete
lbDelete.config(bg="red")
lbDelete.bind("<Button-1>", self.bios)
lbDelete.place(x=842, y=722)
self.sair = Button(janela, text="[X]")
self.sair["font"] = ("Arial", "15")
self.sair.config(bg="red", foreground="white")
self.sair.place(x=1200, y=30)
self.sair.bind("<Button-1>", self.fechar)
def defeitos(self, event):
janela2 = Tk()
self.p = Label(janela2, text="O computador liga normalmente mas não aparece nada\n no monitor. Quais peças devem ser testadas ?")
self.p["font"] = ("Lucida console", "30")
self.p.config(bg="black", foreground="limegreen")
self.p.place(x=140, y=30)
img_monitor = PhotoImage(master=janela2, file="monitor2.png")
self.monitor2 = Label(janela2, image=img_monitor)
self.monitor2.img_monitor = img_monitor
self.monitor2.place(x=120,y=200)
img_placa = PhotoImage(master=janela2, file="placa2.png")
self.placa = Label(janela2, image=img_placa)
self.placa.img_placa = img_placa
self.placa.place(x=420,y=200)
img_hd = PhotoImage(master=janela2, file="hd2.png")
self.hd = Label(janela2, image=img_hd)
self.hd.img_hd = img_hd
self.hd.place(x=720,y=200)
img_gpu = PhotoImage(master=janela2, file="gpu2.png")
self.gpu = Label(janela2, image=img_gpu)
self.gpu.img_gpu = img_gpu
self.gpu.place(x=1020,y=200)
janela.title("Simulador de defeitos")
janela2.geometry("1400x830+50+5")
def abrirPc(self, event):
global lbMonitor
self.lb_simulador = Label(janela, text="Clique para iniciar\n simulador de defeitos")
self.lb_simulador["font"] = ("Arial", "20")
self.lb_simulador.config(bg="black", foreground="white")
self.lb_simulador.place(x=970, y=210)
lbMonitor.place(x=1800, y=10)
fundobranco = PhotoImage(file="fundobranco.png")
self.imgFundo = Label(janela, image=fundobranco)
self.imgFundo.fundobranco = fundobranco
self.imgFundo.config(bg="white")
self.imgFundo.place(x=80,y=30)
gabineteAberto = PhotoImage(file="gabineteAberto.png")
self.imggg = Label(janela, image=gabineteAberto)
self.imggg.gabineteAberto = gabineteAberto
self.lbGabinete.config(bg="green")
self.imggg.place(x=60,y=100)
hd = PhotoImage(file="hd.png")
self.imgg2 = Label(janela, image=hd)
self.imgg2.hd = hd
self.imgg2.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg2.place(x=500,y=30)
fonte = PhotoImage(file="fonte.png")
self.imgg3 = Label(janela, image=fonte)
self.imgg3.fonte = fonte
self.imgg3.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg3.place(x=650,y=30)
cpu = PhotoImage(file="cpu.png")
self.imgg4 = Label(janela, image=cpu)
self.imgg4.cpu = cpu
self.imgg4.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg4.place(x=800,y=30)
placa = PhotoImage(file="placa.png")
self.imgg5 = Label(janela, image=placa)
self.imgg5.placa = placa
self.imgg5.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg5.place(x=500,y=200)
memoria = PhotoImage(file="memoria.png")
self.imgg6 = Label(janela, image=memoria)
self.imgg6.memoria = memoria
self.imgg6.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg6.place(x=650,y=200)
sata = PhotoImage(file="sata.png")
self.imgg7 = Label(janela, image=sata)
self.imgg7.sata = sata
self.imgg7.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg7.place(x=800,y=200)
cooler = PhotoImage(file="cooler.png")
self.imgg8 = Label(janela, image=cooler)
self.imgg8.cooler = cooler
self.imgg8.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg8.place(x=500,y=370)
gpu = PhotoImage(file="gpu.png")
self.imgg9 = Label(janela, image=gpu)
self.imgg9.gpu = gpu
self.imgg9.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg9.place(x=650,y=370)
def bios(self, event):
janela2 = tk.Tk()
#Label inicial
p1 = tk.Label(janela2,foreground="white",background="#00008B",text="CMOS Setup Utility - Copyright (C) 1984-1999 Award Software")
p1["font"] = ("Lucida Console","18")
p1.pack(pady=7,padx=7,ipady=20,ipadx=7)
linhaH = tk.Label(janela2,foreground="white",background="#00008B",text="____________________________________________________________________")
linhaH["font"] = ("Lucida Console","18")
linhaH.place(x=0,y=60)
linhaV = tk.Label(janela2,foreground="white",background="#00008B",text="|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n")
linhaV["font"] = ("Lucida Console","12")
linhaV.place(x=470,y=90)
#Label 1
self.p2 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Standard CMOS Features")
self.p2["font"] = ("Lucida Console","15")
self.p2.place(x=80, y=100)
#Label 2
self.p3 = tk.Label(janela2,foreground="yellow",background="red",text="> Advanced BIOS Features")
self.p3["font"] = ("Lucida Console","15")
self.p3.place(x=80, y=140)
self.p3.bind("<Button-1>", self.bios2)
#Label 3
p4 = tk.Label(janela2, foreground="#FFD700",background="#00008B",text="> Advanced Chipset Features")
p4["font"] = ("Lucida Console","15")
p4.place(x=80, y=180)
#Label 4
p5 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Integrated Peripherials")
p5["font"] = ("Lucida Console","15")
p5.place(x=80, y=220)
#Label 5
p6 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Power Management Setup")
p6["font"] = ("Lucida Console","15")
p6.place(x=80, y=260)
#Label 6
p7 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> PnP/PCI Configurations")
p7["font"] = ("Lucida Console","15")
p7.place(x=80, y=300)
#Label 7
p8 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> PC Health Status")
p8["font"] = ("Lucida Console","15")
p8.place(x=80, y=340)
#///////////////////////////////////////////////////////////////////////////////////
#Label 8
p9 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Frequency/Voltage Control")
p9["font"] = ("Lucida Console","15")
p9.place(x=520, y=100)
#Label 9
p10 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Load Fail-Safe Defaults")
p10["font"] = ("Lucida Console","15")
p10.place(x=520, y=140)
#Label 10
p11 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Load Optimized Defaults")
p11["font"] = ("Lucida Console","15")
p11.place(x=520, y=180)
#Label 11
p12 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Set Supervisor Password")
p12["font"] = ("Lucida Console","15")
p12.place(x=520, y=220)
#Label 12
p13 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Set User Password")
p13["font"] = ("Lucida Console","15")
p13.place(x=520, y=260)
#Label 13
p14 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="Save & Exit Setup")
p14["font"] = ("Lucida Console","15")
p14.place(x=520, y=300)
#Label 14
p15 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="Exit Without Saving")
p15["font"] = ("Lucida Console","15")
p15.place(x=520, y=300)
#Esc
esc = tk.Label(janela2,foreground="white",background="#00008B",text="Esc : Quit")
esc["font"] = ("Lucida Console","15")
esc.place(x=23, y=470)
#F10
f10 = tk.Label(janela2,foreground="white",background="#00008B",text="F10 : Save & Exit Setup")
f10["font"] = ("Lucida Console","15")
f10.place(x=23, y=498)
#Rodapé
rodape = tk.Label(janela2, text="Time, Date, Hard Disk Type. . .")
rodape["font"] = ("Helvetica","16")
rodape.configure(background="#00008B", foreground="#FFD700")
rodape.place(x=280,y=580)
janela2.title("BIOS")
janela2.geometry("880x640+200+30")
janela2.config(bg="#00008B")
janela2.config(cursor="hand2")
janela2.resizable(width=False, height=False)
janela2.mainloop()
def fecharBios(self, event):
janela2.destroy()
def bios2(self, event):
jan2= tk.Tk()
jan2.configure(bg="#00008B")
jan2.geometry('880x700+200+20')
jan2.config(cursor="hand2")
jan2.resizable(width=False, height=False)
jan2.title("Ordem de Boot")
#Label inicial
self.lb1 = tk.Label(jan2,foreground="white",background="#00008B",text="Phoenix - Award BIOS CMOS Setup Utility\nAdvanced BIOS Features")
self.lb1["font"] = ("Lucida Console","18")
self.lb1.pack(pady=7,padx=7,ipady=15,ipadx=7)
#Linha horizontal
self.l1 = tk.Label(jan2,foreground="white",background="#00008B",text="____________________________________________________________________________")
self.l1["font"] = ("Lucida Console","18")
self.l1.place(x=0,y=70)
#Linha vertical
self.l2 = tk.Label(jan2,foreground="white",background="#00008B",text="|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|")
self.l2["font"] = ("Lucida Console","15")
self.l2.place(x=630, y=95)
#Label 1
self.lb3 = tk.Label(jan2,foreground="white",background="#00008B",text="Virus Warning")
self.lb3["font"] = ("Lucida Console","15")
self.lb3.place(x=30, y=100)
self.lb4 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Disabled")
self.lb4["font"] = ("Lucida Console","15")
self.lb4.place(x=400, y=100)
self.lb5 = tk.Label(jan2,foreground="white",background="#00008B",text="CPU L1 Cache")
self.lb5["font"] = ("Lucida Console","15")
self.lb5.place(x=30, y=130)
self.lb6 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Enabled")
self.lb6["font"] = ("Lucida Console","15")
self.lb6.place(x=400, y=130)
self.lb7 = tk.Label(jan2,foreground="white",background="#00008B",text="CPU L2 Cache")
self.lb7["font"] = ("Lucida Console","15")
self.lb7.place(x=30, y=160)
self.lb8 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Enabled")
self.lb8["font"] = ("Lucida Console","15")
self.lb8.place(x=400, y=160)
self.lb9 = tk.Label(jan2,foreground="white",background="#00008B",text="Quick Power On Self Test")
self.lb9["font"] = ("Lucida Console","15")
self.lb9.place(x=30, y=190)
self.lb10 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Enabled")
self.lb10["font"] = ("Lucida Console","15")
self.lb10.place(x=400, y=190)
self.l11 = tk.Label(jan2,foreground="white",background="#00008B",text="HDD Boot Sprite")
self.l11["font"] = ("Lucida Console","15")
self.l11.place(x=30, y=220)
self.l12 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Disabled")
self.l12["font"] = ("Lucida Console","15")
self.l12.place(x=400, y=220)
self.l13 = tk.Label(jan2,foreground="white",background="#00008B",text="First Boot Device")
self.l13["font"] = ("Lucida Console","15")
self.l13.place(x=30, y=250)
self.l14 = tk.Label(jan2,foreground="#FFD700",background="red",text="CD-ROM")
self.l14["font"] = ("Lucida Console","15")
self.l14.place(x=400, y=250)
self.l14.bind("<Button-1>", self.boot)
self.l15 = tk.Label(jan2,foreground="white",background="#00008B",text="Second Boot Device")
self.l15["font"] = ("Lucida Console","15")
self.l15.place(x=30, y=280)
self.l16 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="HDD-0")
self.l16["font"] = ("Lucida Console","15")
self.l16.place(x=400, y=280)
self.l17 = tk.Label(jan2,foreground="white",background="#00008B",text="Third Boot Device")
self.l17["font"] = ("Lucida Console","15")
self.l17.place(x=30, y=310)
self.l18 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Disabled")
self.l18["font"] = ("Lucida Console","15")
self.l18.place(x=400, y=310)
self.l19 = tk.Label(jan2,foreground="white",background="#00008B",text="Boot Other Device")
self.l19["font"] = ("Lucida Console","15")
self.l19.place(x=30, y=340)
self.l20 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Disabled")
self.l20["font"] = ("Lucida Console","15")
self.l20.place(x=400, y=340)
self.l21 = tk.Label(jan2,foreground="white",background="#00008B",text="Swap Floppy Seek")
self.l21["font"] = ("Lucida Console","15")
self.l21.place(x=30, y=370)
self.l22 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Disabled")
self.l22["font"] = ("Lucida Console","15")
self.l22.place(x=400, y=370)
self.l23 = tk.Label(jan2,foreground="white",background="#00008B",text="Boot Up Floppy Seek")
self.l23["font"] = ("Lucida Console","15")
self.l23.place(x=30, y=400)
self.l24 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Enabled")
self.l24["font"] = ("Lucida Console","15")
self.l24.place(x=400, y=400)
self.l25 = tk.Label(jan2,foreground="white",background="#00008B",text="Boot Up NumLock Status")
self.l25["font"] = ("Lucida Console","15")
self.l25.place(x=30, y=430)
self.l26 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="On")
self.l26["font"] = ("Lucida Console","15")
self.l26.place(x=400, y=430)
self.l27 = tk.Label(jan2,foreground="white",background="#00008B",text="Gate A20 Option")
self.l27["font"] = ("Lucida Console","15")
self.l27.place(x=30, y=460)
self.l28 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Normal")
self.l28["font"] = ("Lucida Console","15")
self.l28.place(x=400, y=460)
self.l29 = tk.Label(jan2,foreground="white",background="#00008B",text="Typematic Rate Setting")
self.l29["font"] = ("Lucida Console","15")
self.l29.place(x=30, y=490)
self.l30 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Disabled")
self.l30["font"] = ("Lucida Console","15")
self.l30.place(x=400, y=490)
self.l31 = tk.Label(jan2,foreground="#1E90FF",background="#00008B",text="x Typematic Rate (Chars/Sec)")
self.l31["font"] = ("Lucida Console","15")
self.l31.place(x=9, y=520)
self.l32 = tk.Label(jan2,foreground="#1E90FF",background="#00008B",text="6")
self.l32["font"] = ("Lucida Console","15")
self.l32.place(x=400, y=520)
self.l33 = tk.Label(jan2,foreground="#1E90FF",background="#00008B",text="x Typematic Delay (Msec)")
self.l33["font"] = ("Lucida Console","15")
self.l33.place(x=9, y=550)
self.l34 = tk.Label(jan2,foreground="#1E90FF",background="#00008B",text="250")
self.l34["font"] = ("Lucida Console","15")
self.l34.place(x=400, y=550)
self.l33 = tk.Label(jan2,foreground="white",background="#00008B",text="Security Option")
self.l33["font"] = ("Lucida Console","15")
self.l33.place(x=30, y=580)
self.l34 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Setup")
self.l34["font"] = ("Lucida Console","15")
self.l34.place(x=400, y=580)
self.l35 = tk.Label(jan2,foreground="white",background="#00008B",text="OS Select For DRAM > 64MB")
self.l35["font"] = ("Lucida Console","15")
self.l35.place(x=30, y=580)
self.l36 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Non-OS2")
self.l36["font"] = ("Lucida Console","15")
self.l36.place(x=400, y=580)
self.l35 = tk.Label(jan2,foreground="white",background="#00008B",text="HDD S.M.A.R.T. Capability")
self.l35["font"] = ("Lucida Console","15")
self.l35.place(x=30, y=610)
self.l36 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Enabled")
self.l36["font"] = ("Lucida Console","15")
self.l36.place(x=400, y=610)
self.l37 = tk.Label(jan2,foreground="white",background="#00008B",text="_____________________________________________________________________________________")
self.l37["font"] = ("Lucida Console","15")
self.l37.place(x=0, y=630)
self.f10 = tk.Label(jan2,foreground="white",background="#00008B",text="F10: Save & Exit")
self.f10["font"] = ("Lucida Console","15")
self.f10.place(x=25, y=665)
self.l38 = tk.Label(jan2,foreground="white",background="#00008B",text="Item Help")
self.l38["font"] = ("Lucida Console","15")
self.l38.place(x=705, y=120)
self.l1 = tk.Label(jan2,foreground="white",background="#00008B",text="---------------------------------")
self.l1["font"] = ("Lucida Console","15")
self.l1.place(x=640, y=152)
self.p17 = tk.Label(jan2,foreground="white",background="#00008B",text="-Menu Level >")
self.p17["font"] = ("Lucida Console","15")
self.p17.place(x=650, y=180)
jan2.mainloop()
def boot(self,event):
messagebox.showinfo("WINDOWS 10", "Iniciando instalação...")
#tux = PhotoImage(file="tux.png")
#self.img0 = Label(janela, image=tux)
#self.img0.tux = tux
#self.img0.place(x=20, y=800)
w1 = PhotoImage(file="w1.png")
self.img1 = Label(janela, image=w1)
self.img1.w1 = w1
self.img1.place(x=123, y=50)
abnts = ["(Português Brasil ABNT-2)", "(Português Brasil ABNT)"]
abnt = ttk.Combobox(values=abnts)
abnt.set("(Português Brasil ABNT)")
abnt.place(x=412, y=262, width=337, height=22)
btAvancar = PhotoImage(file="btAvancar.png")
self.img2 = Label(janela, image=btAvancar)
self.img2.btAvancar = btAvancar
self.img2.place(x=740, y=324)
self.img2.bind("<Button-1>", self.avancar)
def avancar(self, event):
w2 = PhotoImage(file="w2.png")
self.img3 = Label(janela, image=w2)
self.img3.w2 = w2
self.img3.place(x=123, y=50)
btInstalar = PhotoImage(file="btInstalar.png")
self.img4 = Label(janela, image=btInstalar)
self.img4.btInstalar = btInstalar
self.img4.place(x=400, y=205)
self.img4.bind("<Button-1>", self.instalar)
def instalar(self, event):
w3 = PhotoImage(file="w3.png")
self.img5 = Label(janela, image=w3)
self.img5.w3 = w3
self.img5.place(x=113, y=52)
chave = PhotoImage(file="chave.png")
self.img6 = Label(janela, image=chave)
self.img6.chave = chave
self.img6.place(x=485, y=290)
self.img6.bind("<Button-1>", self.chaveW)
btAvancar2 = PhotoImage(file="btAvancar2.png")
self.img7 = Label(janela, image=btAvancar2)
self.img7.btAvancar2 = btAvancar2
self.img7.place(x=726, y=300)
self.img7.bind("<Button-1>", self.avancar2)
def chaveW(self, event):
self.img6.config(bg="lightblue")
def avancar2(self, event):
w4 = PhotoImage(file="w4.png")
self.img8 = Label(janela, image=w4)
self.img8.w4 = w4
self.img8.place(x=112, y=49)
btAvancar3 = PhotoImage(file="btAvancar3.png")
self.img9 = Label(janela, image=btAvancar3)
self.img9.btAvancar3 = btAvancar3
self.img9.place(x=726, y=300)
self.img9.bind("<Button-1>", self.avancar3)
def avancar3(self, event):
w5 = PhotoImage(file="w5.png")
self.img10 = Label(janela, image=w5)
self.img10.w5 = w5
self.img10.place(x=112, y=49)
btAvancar4 = PhotoImage(file="btAvancar4.png")
self.img11 = Label(janela, image=btAvancar4)
self.img11.btAvancar4 = btAvancar4
self.img11.place(x=726, y=305)
self.img11.bind("<Button-1>", self.avancar4)
def avancar4(self, event):
w6 = PhotoImage(file="w6.png")
self.img12 = Label(janela, image=w6)
self.img12.w6 = w6
self.img12.place(x=112, y=49)
personalizada = PhotoImage(file="personalizada.png")
self.img13 = Label(janela, image=personalizada)
self.img13.personalizada = personalizada
self.img13.place(x=206, y=205)
self.img13.bind("<Button-1>", self.avancar5)
def avancar5(self, event):
w7 = PhotoImage(file="w7.png")
self.img14 = Label(janela, image=w7)
self.img14.w7 = w7
self.img14.place(x=112, y=49)
formatar = PhotoImage(file="formatar.png")
self.img15 = Label(janela, image=formatar)
self.img15.formatar = formatar
self.img15.place(x=460, y=238)
self.img15.bind("<Button-1>", self.formatarW)
btAvancar6 = PhotoImage(file="btAvancar6.png")
self.img16 = Label(janela, image=btAvancar6)
self.img16.btAvancar6 = btAvancar6
self.img16.place(x=726, y=310)
self.img16.bind("<Button-1>", self.avancar6)
def formatarW(self, event):
messagebox.showwarning("Formatação Windows 10", "TODOS OS DADOS DESSA PARTIÇÃO SERÃO EXCLUÍDOS !!")
def avancar6(self, event):
w8 = PhotoImage(file="w8.png")
self.img18 = Label(janela, image=w8)
self.img18.w8 = w8
self.img18.place(x=112, y=49)
self.img18.bind("<Button-1>", self.win)
def win(self, event):
w9 = PhotoImage(file="w9.png")
self.img19 = Label(janela, image=w9)
self.img19.w9 = w9
self.img19.place(x=112, y=49)
self.img19.bind("<Button-1>", self.win10)
def win10(self, event):
w10 = PhotoImage(file="w10.png")
self.img20 = Label(janela, image=w10)
self.img20.w10 = w10
self.img20.place(x=112, y=49)
iniciar = PhotoImage(file="iniciar.png")
self.img21 = Label(janela, image=iniciar)
self.img21.iniciar = iniciar
self.img21.place(x=112, y=354)
self.img21.bind("<Enter>", self.gerenciador)
self.img21.bind("<Leave>", self.fecharGerenciador)
chrome = PhotoImage(file="chrome.png")
self.img23 = Label(janela, image=chrome)
self.img23.chrome = chrome
self.img23.place(x=600, y=100)
self.img23.bind("<Enter>", self.chrome)
self.img23.bind("<Leave>", self.chromeSair)
winrar = PhotoImage(file="winrar.png")
self.img26 = Label(janela, image=winrar)
self.img26.winrar = winrar
self.img26.place(x=700, y=100)
self.img26.bind("<Enter>", self.winrar)
self.img26.bind("<Leave>", self.winrarSair)
reader = PhotoImage(file="reader.png")
self.img27 = Label(janela, image=reader)
self.img27.reader = reader
self.img27.place(x=600, y=200)
self.img27.bind("<Enter>", self.reader)
self.img27.bind("<Leave>", self.readerSair)
driver = PhotoImage(file="driver.png")
self.img28 = Label(janela, image=driver)
self.img28.driver = driver
self.img28.place(x=700, y=200)
self.img28.bind("<Enter>", self.driver)
self.img28.bind("<Leave>", self.driverSair)
def reader(self, event):
telaReader = PhotoImage(file="telaReader.png")
self.img27 = Label(janela, image=telaReader)
self.img27.telaReader = telaReader
self.img27.place(x=150, y=80)
def driver(self, event):
telaDriver = PhotoImage(file="telaDriver.png")
self.img28 = Label(janela, image=telaDriver)
self.img28.telaDriver = telaDriver
self.img28.place(x=150, y=80)
def chrome(self, event):
telaChrome = PhotoImage(file="telaChrome.png")
self.img24 = Label(janela, image=telaChrome)
self.img24.telaChrome = telaChrome
self.img24.place(x=150, y=80)
def winrar(self, event):
telaWinrar = PhotoImage(file="telaWinrar.png")
self.img26 = Label(janela, image=telaWinrar)
self.img26.telaWinrar = telaWinrar
self.img26.place(x=150, y=80)
def chromeSair(self, event):
self.img24.place(x=1900, y=80)
def driverSair(self, event):
self.img28.place(x=1900, y=80)
def readerSair(self, event):
self.img27.place(x=1900, y=80)
def winrarSair(self, event):
self.img26.place(x=1900, y=80)
def gerenciador(self, event):
gerenciador = PhotoImage(file="gerenciador.png")
self.img22 = Label(janela, image=gerenciador)
self.img22.gerenciador = gerenciador
self.img22.place(x=112, y=54)
def fecharGerenciador(self, event):
self.img22.place(x=1900, y=0)
janela = Tk()
Tela(janela)
janela.title("Simulador Formatação")
janela.geometry("1400x830+50+5")
janela.resizable(width=False, height=False)
janela.config(bg="white")
janela.config(cursor="hand2")
janela.iconbitmap("placa2.ico")
janela.mainloop()
| 35.544226
| 169
| 0.571631
|
from tkinter import *
from tkinter import messagebox
import tkinter as tk
from tkinter import ttk
class Tela:
def fechar(self, event):
janela.destroy()
exit()
def fecharPc(self, event):
self.lb_simulador.place_forget()
self.imgFundo.place_forget()
self.imgg2.place_forget()
self.lbGabinete.config(bg="white")
lbMonitor.place(x=100, y=30)
self.imggg.place_forget()
self.imgg3.place_forget()
self.imgg4.place_forget()
self.imgg5.place_forget()
self.imgg6.place_forget()
self.imgg7.place_forget()
self.imgg8.place_forget()
self.imgg9.place_forget()
def __init__(self, master):
global lbMonitor
monitor = PhotoImage(file="monitor.png")
lbMonitor = Label(image=monitor)
lbMonitor.monitor = monitor
lbMonitor.place(x=100, y=30)
gabinete = PhotoImage(file="gabinete.png")
self.lbGabinete = Label(janela, image=gabinete)
self.lbGabinete.gabinete = gabinete
self.lbGabinete.place(x=970, y=285)
self.lbGabinete.bind("<Enter>", self.abrirPc)
self.lbGabinete.bind("<Leave>", self.fecharPc)
self.lbGabinete.bind("<Button-1>", self.defeitos)
teclado = PhotoImage(file="teclado.png")
lbTeclado = Label(janela, image=teclado)
lbTeclado.teclado = teclado
lbTeclado.place(x=50, y=530)
delete = PhotoImage(file="delete.png")
lbDelete = Label(janela, image=delete)
lbDelete.delete = delete
lbDelete.config(bg="red")
lbDelete.bind("<Button-1>", self.bios)
lbDelete.place(x=842, y=722)
self.sair = Button(janela, text="[X]")
self.sair["font"] = ("Arial", "15")
self.sair.config(bg="red", foreground="white")
self.sair.place(x=1200, y=30)
self.sair.bind("<Button-1>", self.fechar)
def defeitos(self, event):
janela2 = Tk()
self.p = Label(janela2, text="O computador liga normalmente mas não aparece nada\n no monitor. Quais peças devem ser testadas ?")
self.p["font"] = ("Lucida console", "30")
self.p.config(bg="black", foreground="limegreen")
self.p.place(x=140, y=30)
img_monitor = PhotoImage(master=janela2, file="monitor2.png")
self.monitor2 = Label(janela2, image=img_monitor)
self.monitor2.img_monitor = img_monitor
self.monitor2.place(x=120,y=200)
img_placa = PhotoImage(master=janela2, file="placa2.png")
self.placa = Label(janela2, image=img_placa)
self.placa.img_placa = img_placa
self.placa.place(x=420,y=200)
img_hd = PhotoImage(master=janela2, file="hd2.png")
self.hd = Label(janela2, image=img_hd)
self.hd.img_hd = img_hd
self.hd.place(x=720,y=200)
img_gpu = PhotoImage(master=janela2, file="gpu2.png")
self.gpu = Label(janela2, image=img_gpu)
self.gpu.img_gpu = img_gpu
self.gpu.place(x=1020,y=200)
janela.title("Simulador de defeitos")
janela2.geometry("1400x830+50+5")
def abrirPc(self, event):
global lbMonitor
self.lb_simulador = Label(janela, text="Clique para iniciar\n simulador de defeitos")
self.lb_simulador["font"] = ("Arial", "20")
self.lb_simulador.config(bg="black", foreground="white")
self.lb_simulador.place(x=970, y=210)
lbMonitor.place(x=1800, y=10)
fundobranco = PhotoImage(file="fundobranco.png")
self.imgFundo = Label(janela, image=fundobranco)
self.imgFundo.fundobranco = fundobranco
self.imgFundo.config(bg="white")
self.imgFundo.place(x=80,y=30)
gabineteAberto = PhotoImage(file="gabineteAberto.png")
self.imggg = Label(janela, image=gabineteAberto)
self.imggg.gabineteAberto = gabineteAberto
self.lbGabinete.config(bg="green")
self.imggg.place(x=60,y=100)
hd = PhotoImage(file="hd.png")
self.imgg2 = Label(janela, image=hd)
self.imgg2.hd = hd
self.imgg2.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg2.place(x=500,y=30)
fonte = PhotoImage(file="fonte.png")
self.imgg3 = Label(janela, image=fonte)
self.imgg3.fonte = fonte
self.imgg3.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg3.place(x=650,y=30)
cpu = PhotoImage(file="cpu.png")
self.imgg4 = Label(janela, image=cpu)
self.imgg4.cpu = cpu
self.imgg4.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg4.place(x=800,y=30)
placa = PhotoImage(file="placa.png")
self.imgg5 = Label(janela, image=placa)
self.imgg5.placa = placa
self.imgg5.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg5.place(x=500,y=200)
memoria = PhotoImage(file="memoria.png")
self.imgg6 = Label(janela, image=memoria)
self.imgg6.memoria = memoria
self.imgg6.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg6.place(x=650,y=200)
sata = PhotoImage(file="sata.png")
self.imgg7 = Label(janela, image=sata)
self.imgg7.sata = sata
self.imgg7.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg7.place(x=800,y=200)
cooler = PhotoImage(file="cooler.png")
self.imgg8 = Label(janela, image=cooler)
self.imgg8.cooler = cooler
self.imgg8.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg8.place(x=500,y=370)
gpu = PhotoImage(file="gpu.png")
self.imgg9 = Label(janela, image=gpu)
self.imgg9.gpu = gpu
self.imgg9.config(bg="green")
self.lbGabinete.config(bg="green")
self.imgg9.place(x=650,y=370)
def bios(self, event):
janela2 = tk.Tk()
p1 = tk.Label(janela2,foreground="white",background="#00008B",text="CMOS Setup Utility - Copyright (C) 1984-1999 Award Software")
p1["font"] = ("Lucida Console","18")
p1.pack(pady=7,padx=7,ipady=20,ipadx=7)
linhaH = tk.Label(janela2,foreground="white",background="#00008B",text="____________________________________________________________________")
linhaH["font"] = ("Lucida Console","18")
linhaH.place(x=0,y=60)
linhaV = tk.Label(janela2,foreground="white",background="#00008B",text="|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n")
linhaV["font"] = ("Lucida Console","12")
linhaV.place(x=470,y=90)
self.p2 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Standard CMOS Features")
self.p2["font"] = ("Lucida Console","15")
self.p2.place(x=80, y=100)
self.p3 = tk.Label(janela2,foreground="yellow",background="red",text="> Advanced BIOS Features")
self.p3["font"] = ("Lucida Console","15")
self.p3.place(x=80, y=140)
self.p3.bind("<Button-1>", self.bios2)
p4 = tk.Label(janela2, foreground="#FFD700",background="#00008B",text="> Advanced Chipset Features")
p4["font"] = ("Lucida Console","15")
p4.place(x=80, y=180)
p5 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Integrated Peripherials")
p5["font"] = ("Lucida Console","15")
p5.place(x=80, y=220)
p6 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Power Management Setup")
p6["font"] = ("Lucida Console","15")
p6.place(x=80, y=260)
p7 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> PnP/PCI Configurations")
p7["font"] = ("Lucida Console","15")
p7.place(x=80, y=300)
p8 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> PC Health Status")
p8["font"] = ("Lucida Console","15")
p8.place(x=80, y=340)
p9 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Frequency/Voltage Control")
p9["font"] = ("Lucida Console","15")
p9.place(x=520, y=100)
p10 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Load Fail-Safe Defaults")
p10["font"] = ("Lucida Console","15")
p10.place(x=520, y=140)
p11 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Load Optimized Defaults")
p11["font"] = ("Lucida Console","15")
p11.place(x=520, y=180)
p12 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Set Supervisor Password")
p12["font"] = ("Lucida Console","15")
p12.place(x=520, y=220)
p13 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="> Set User Password")
p13["font"] = ("Lucida Console","15")
p13.place(x=520, y=260)
p14 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="Save & Exit Setup")
p14["font"] = ("Lucida Console","15")
p14.place(x=520, y=300)
p15 = tk.Label(janela2,foreground="#FFD700",background="#00008B",text="Exit Without Saving")
p15["font"] = ("Lucida Console","15")
p15.place(x=520, y=300)
esc = tk.Label(janela2,foreground="white",background="#00008B",text="Esc : Quit")
esc["font"] = ("Lucida Console","15")
esc.place(x=23, y=470)
f10 = tk.Label(janela2,foreground="white",background="#00008B",text="F10 : Save & Exit Setup")
f10["font"] = ("Lucida Console","15")
f10.place(x=23, y=498)
rodape = tk.Label(janela2, text="Time, Date, Hard Disk Type. . .")
rodape["font"] = ("Helvetica","16")
rodape.configure(background="#00008B", foreground="#FFD700")
rodape.place(x=280,y=580)
janela2.title("BIOS")
janela2.geometry("880x640+200+30")
janela2.config(bg="#00008B")
janela2.config(cursor="hand2")
janela2.resizable(width=False, height=False)
janela2.mainloop()
def fecharBios(self, event):
janela2.destroy()
def bios2(self, event):
jan2= tk.Tk()
jan2.configure(bg="#00008B")
jan2.geometry('880x700+200+20')
jan2.config(cursor="hand2")
jan2.resizable(width=False, height=False)
jan2.title("Ordem de Boot")
self.lb1 = tk.Label(jan2,foreground="white",background="#00008B",text="Phoenix - Award BIOS CMOS Setup Utility\nAdvanced BIOS Features")
self.lb1["font"] = ("Lucida Console","18")
self.lb1.pack(pady=7,padx=7,ipady=15,ipadx=7)
self.l1 = tk.Label(jan2,foreground="white",background="#00008B",text="____________________________________________________________________________")
self.l1["font"] = ("Lucida Console","18")
self.l1.place(x=0,y=70)
self.l2 = tk.Label(jan2,foreground="white",background="#00008B",text="|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|\n|")
self.l2["font"] = ("Lucida Console","15")
self.l2.place(x=630, y=95)
self.lb3 = tk.Label(jan2,foreground="white",background="#00008B",text="Virus Warning")
self.lb3["font"] = ("Lucida Console","15")
self.lb3.place(x=30, y=100)
self.lb4 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Disabled")
self.lb4["font"] = ("Lucida Console","15")
self.lb4.place(x=400, y=100)
self.lb5 = tk.Label(jan2,foreground="white",background="#00008B",text="CPU L1 Cache")
self.lb5["font"] = ("Lucida Console","15")
self.lb5.place(x=30, y=130)
self.lb6 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Enabled")
self.lb6["font"] = ("Lucida Console","15")
self.lb6.place(x=400, y=130)
self.lb7 = tk.Label(jan2,foreground="white",background="#00008B",text="CPU L2 Cache")
self.lb7["font"] = ("Lucida Console","15")
self.lb7.place(x=30, y=160)
self.lb8 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Enabled")
self.lb8["font"] = ("Lucida Console","15")
self.lb8.place(x=400, y=160)
self.lb9 = tk.Label(jan2,foreground="white",background="#00008B",text="Quick Power On Self Test")
self.lb9["font"] = ("Lucida Console","15")
self.lb9.place(x=30, y=190)
self.lb10 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Enabled")
self.lb10["font"] = ("Lucida Console","15")
self.lb10.place(x=400, y=190)
self.l11 = tk.Label(jan2,foreground="white",background="#00008B",text="HDD Boot Sprite")
self.l11["font"] = ("Lucida Console","15")
self.l11.place(x=30, y=220)
self.l12 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Disabled")
self.l12["font"] = ("Lucida Console","15")
self.l12.place(x=400, y=220)
self.l13 = tk.Label(jan2,foreground="white",background="#00008B",text="First Boot Device")
self.l13["font"] = ("Lucida Console","15")
self.l13.place(x=30, y=250)
self.l14 = tk.Label(jan2,foreground="#FFD700",background="red",text="CD-ROM")
self.l14["font"] = ("Lucida Console","15")
self.l14.place(x=400, y=250)
self.l14.bind("<Button-1>", self.boot)
self.l15 = tk.Label(jan2,foreground="white",background="#00008B",text="Second Boot Device")
self.l15["font"] = ("Lucida Console","15")
self.l15.place(x=30, y=280)
self.l16 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="HDD-0")
self.l16["font"] = ("Lucida Console","15")
self.l16.place(x=400, y=280)
self.l17 = tk.Label(jan2,foreground="white",background="#00008B",text="Third Boot Device")
self.l17["font"] = ("Lucida Console","15")
self.l17.place(x=30, y=310)
self.l18 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Disabled")
self.l18["font"] = ("Lucida Console","15")
self.l18.place(x=400, y=310)
self.l19 = tk.Label(jan2,foreground="white",background="#00008B",text="Boot Other Device")
self.l19["font"] = ("Lucida Console","15")
self.l19.place(x=30, y=340)
self.l20 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Disabled")
self.l20["font"] = ("Lucida Console","15")
self.l20.place(x=400, y=340)
self.l21 = tk.Label(jan2,foreground="white",background="#00008B",text="Swap Floppy Seek")
self.l21["font"] = ("Lucida Console","15")
self.l21.place(x=30, y=370)
self.l22 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Disabled")
self.l22["font"] = ("Lucida Console","15")
self.l22.place(x=400, y=370)
self.l23 = tk.Label(jan2,foreground="white",background="#00008B",text="Boot Up Floppy Seek")
self.l23["font"] = ("Lucida Console","15")
self.l23.place(x=30, y=400)
self.l24 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Enabled")
self.l24["font"] = ("Lucida Console","15")
self.l24.place(x=400, y=400)
self.l25 = tk.Label(jan2,foreground="white",background="#00008B",text="Boot Up NumLock Status")
self.l25["font"] = ("Lucida Console","15")
self.l25.place(x=30, y=430)
self.l26 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="On")
self.l26["font"] = ("Lucida Console","15")
self.l26.place(x=400, y=430)
self.l27 = tk.Label(jan2,foreground="white",background="#00008B",text="Gate A20 Option")
self.l27["font"] = ("Lucida Console","15")
self.l27.place(x=30, y=460)
self.l28 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Normal")
self.l28["font"] = ("Lucida Console","15")
self.l28.place(x=400, y=460)
self.l29 = tk.Label(jan2,foreground="white",background="#00008B",text="Typematic Rate Setting")
self.l29["font"] = ("Lucida Console","15")
self.l29.place(x=30, y=490)
self.l30 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Disabled")
self.l30["font"] = ("Lucida Console","15")
self.l30.place(x=400, y=490)
self.l31 = tk.Label(jan2,foreground="#1E90FF",background="#00008B",text="x Typematic Rate (Chars/Sec)")
self.l31["font"] = ("Lucida Console","15")
self.l31.place(x=9, y=520)
self.l32 = tk.Label(jan2,foreground="#1E90FF",background="#00008B",text="6")
self.l32["font"] = ("Lucida Console","15")
self.l32.place(x=400, y=520)
self.l33 = tk.Label(jan2,foreground="#1E90FF",background="#00008B",text="x Typematic Delay (Msec)")
self.l33["font"] = ("Lucida Console","15")
self.l33.place(x=9, y=550)
self.l34 = tk.Label(jan2,foreground="#1E90FF",background="#00008B",text="250")
self.l34["font"] = ("Lucida Console","15")
self.l34.place(x=400, y=550)
self.l33 = tk.Label(jan2,foreground="white",background="#00008B",text="Security Option")
self.l33["font"] = ("Lucida Console","15")
self.l33.place(x=30, y=580)
self.l34 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Setup")
self.l34["font"] = ("Lucida Console","15")
self.l34.place(x=400, y=580)
self.l35 = tk.Label(jan2,foreground="white",background="#00008B",text="OS Select For DRAM > 64MB")
self.l35["font"] = ("Lucida Console","15")
self.l35.place(x=30, y=580)
self.l36 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Non-OS2")
self.l36["font"] = ("Lucida Console","15")
self.l36.place(x=400, y=580)
self.l35 = tk.Label(jan2,foreground="white",background="#00008B",text="HDD S.M.A.R.T. Capability")
self.l35["font"] = ("Lucida Console","15")
self.l35.place(x=30, y=610)
self.l36 = tk.Label(jan2,foreground="#FFD700",background="#00008B",text="Enabled")
self.l36["font"] = ("Lucida Console","15")
self.l36.place(x=400, y=610)
self.l37 = tk.Label(jan2,foreground="white",background="#00008B",text="_____________________________________________________________________________________")
self.l37["font"] = ("Lucida Console","15")
self.l37.place(x=0, y=630)
self.f10 = tk.Label(jan2,foreground="white",background="#00008B",text="F10: Save & Exit")
self.f10["font"] = ("Lucida Console","15")
self.f10.place(x=25, y=665)
self.l38 = tk.Label(jan2,foreground="white",background="#00008B",text="Item Help")
self.l38["font"] = ("Lucida Console","15")
self.l38.place(x=705, y=120)
self.l1 = tk.Label(jan2,foreground="white",background="#00008B",text="---------------------------------")
self.l1["font"] = ("Lucida Console","15")
self.l1.place(x=640, y=152)
self.p17 = tk.Label(jan2,foreground="white",background="#00008B",text="-Menu Level >")
self.p17["font"] = ("Lucida Console","15")
self.p17.place(x=650, y=180)
jan2.mainloop()
def boot(self,event):
messagebox.showinfo("WINDOWS 10", "Iniciando instalação...")
w1 = PhotoImage(file="w1.png")
self.img1 = Label(janela, image=w1)
self.img1.w1 = w1
self.img1.place(x=123, y=50)
abnts = ["(Português Brasil ABNT-2)", "(Português Brasil ABNT)"]
abnt = ttk.Combobox(values=abnts)
abnt.set("(Português Brasil ABNT)")
abnt.place(x=412, y=262, width=337, height=22)
btAvancar = PhotoImage(file="btAvancar.png")
self.img2 = Label(janela, image=btAvancar)
self.img2.btAvancar = btAvancar
self.img2.place(x=740, y=324)
self.img2.bind("<Button-1>", self.avancar)
def avancar(self, event):
w2 = PhotoImage(file="w2.png")
self.img3 = Label(janela, image=w2)
self.img3.w2 = w2
self.img3.place(x=123, y=50)
btInstalar = PhotoImage(file="btInstalar.png")
self.img4 = Label(janela, image=btInstalar)
self.img4.btInstalar = btInstalar
self.img4.place(x=400, y=205)
self.img4.bind("<Button-1>", self.instalar)
def instalar(self, event):
w3 = PhotoImage(file="w3.png")
self.img5 = Label(janela, image=w3)
self.img5.w3 = w3
self.img5.place(x=113, y=52)
chave = PhotoImage(file="chave.png")
self.img6 = Label(janela, image=chave)
self.img6.chave = chave
self.img6.place(x=485, y=290)
self.img6.bind("<Button-1>", self.chaveW)
btAvancar2 = PhotoImage(file="btAvancar2.png")
self.img7 = Label(janela, image=btAvancar2)
self.img7.btAvancar2 = btAvancar2
self.img7.place(x=726, y=300)
self.img7.bind("<Button-1>", self.avancar2)
def chaveW(self, event):
self.img6.config(bg="lightblue")
def avancar2(self, event):
w4 = PhotoImage(file="w4.png")
self.img8 = Label(janela, image=w4)
self.img8.w4 = w4
self.img8.place(x=112, y=49)
btAvancar3 = PhotoImage(file="btAvancar3.png")
self.img9 = Label(janela, image=btAvancar3)
self.img9.btAvancar3 = btAvancar3
self.img9.place(x=726, y=300)
self.img9.bind("<Button-1>", self.avancar3)
def avancar3(self, event):
w5 = PhotoImage(file="w5.png")
self.img10 = Label(janela, image=w5)
self.img10.w5 = w5
self.img10.place(x=112, y=49)
btAvancar4 = PhotoImage(file="btAvancar4.png")
self.img11 = Label(janela, image=btAvancar4)
self.img11.btAvancar4 = btAvancar4
self.img11.place(x=726, y=305)
self.img11.bind("<Button-1>", self.avancar4)
def avancar4(self, event):
w6 = PhotoImage(file="w6.png")
self.img12 = Label(janela, image=w6)
self.img12.w6 = w6
self.img12.place(x=112, y=49)
personalizada = PhotoImage(file="personalizada.png")
self.img13 = Label(janela, image=personalizada)
self.img13.personalizada = personalizada
self.img13.place(x=206, y=205)
self.img13.bind("<Button-1>", self.avancar5)
def avancar5(self, event):
w7 = PhotoImage(file="w7.png")
self.img14 = Label(janela, image=w7)
self.img14.w7 = w7
self.img14.place(x=112, y=49)
formatar = PhotoImage(file="formatar.png")
self.img15 = Label(janela, image=formatar)
self.img15.formatar = formatar
self.img15.place(x=460, y=238)
self.img15.bind("<Button-1>", self.formatarW)
btAvancar6 = PhotoImage(file="btAvancar6.png")
self.img16 = Label(janela, image=btAvancar6)
self.img16.btAvancar6 = btAvancar6
self.img16.place(x=726, y=310)
self.img16.bind("<Button-1>", self.avancar6)
def formatarW(self, event):
messagebox.showwarning("Formatação Windows 10", "TODOS OS DADOS DESSA PARTIÇÃO SERÃO EXCLUÍDOS !!")
def avancar6(self, event):
w8 = PhotoImage(file="w8.png")
self.img18 = Label(janela, image=w8)
self.img18.w8 = w8
self.img18.place(x=112, y=49)
self.img18.bind("<Button-1>", self.win)
def win(self, event):
w9 = PhotoImage(file="w9.png")
self.img19 = Label(janela, image=w9)
self.img19.w9 = w9
self.img19.place(x=112, y=49)
self.img19.bind("<Button-1>", self.win10)
def win10(self, event):
w10 = PhotoImage(file="w10.png")
self.img20 = Label(janela, image=w10)
self.img20.w10 = w10
self.img20.place(x=112, y=49)
iniciar = PhotoImage(file="iniciar.png")
self.img21 = Label(janela, image=iniciar)
self.img21.iniciar = iniciar
self.img21.place(x=112, y=354)
self.img21.bind("<Enter>", self.gerenciador)
self.img21.bind("<Leave>", self.fecharGerenciador)
chrome = PhotoImage(file="chrome.png")
self.img23 = Label(janela, image=chrome)
self.img23.chrome = chrome
self.img23.place(x=600, y=100)
self.img23.bind("<Enter>", self.chrome)
self.img23.bind("<Leave>", self.chromeSair)
winrar = PhotoImage(file="winrar.png")
self.img26 = Label(janela, image=winrar)
self.img26.winrar = winrar
self.img26.place(x=700, y=100)
self.img26.bind("<Enter>", self.winrar)
self.img26.bind("<Leave>", self.winrarSair)
reader = PhotoImage(file="reader.png")
self.img27 = Label(janela, image=reader)
self.img27.reader = reader
self.img27.place(x=600, y=200)
self.img27.bind("<Enter>", self.reader)
self.img27.bind("<Leave>", self.readerSair)
driver = PhotoImage(file="driver.png")
self.img28 = Label(janela, image=driver)
self.img28.driver = driver
self.img28.place(x=700, y=200)
self.img28.bind("<Enter>", self.driver)
self.img28.bind("<Leave>", self.driverSair)
def reader(self, event):
telaReader = PhotoImage(file="telaReader.png")
self.img27 = Label(janela, image=telaReader)
self.img27.telaReader = telaReader
self.img27.place(x=150, y=80)
def driver(self, event):
telaDriver = PhotoImage(file="telaDriver.png")
self.img28 = Label(janela, image=telaDriver)
self.img28.telaDriver = telaDriver
self.img28.place(x=150, y=80)
def chrome(self, event):
telaChrome = PhotoImage(file="telaChrome.png")
self.img24 = Label(janela, image=telaChrome)
self.img24.telaChrome = telaChrome
self.img24.place(x=150, y=80)
def winrar(self, event):
telaWinrar = PhotoImage(file="telaWinrar.png")
self.img26 = Label(janela, image=telaWinrar)
self.img26.telaWinrar = telaWinrar
self.img26.place(x=150, y=80)
def chromeSair(self, event):
self.img24.place(x=1900, y=80)
def driverSair(self, event):
self.img28.place(x=1900, y=80)
def readerSair(self, event):
self.img27.place(x=1900, y=80)
def winrarSair(self, event):
self.img26.place(x=1900, y=80)
def gerenciador(self, event):
gerenciador = PhotoImage(file="gerenciador.png")
self.img22 = Label(janela, image=gerenciador)
self.img22.gerenciador = gerenciador
self.img22.place(x=112, y=54)
def fecharGerenciador(self, event):
self.img22.place(x=1900, y=0)
janela = Tk()
Tela(janela)
janela.title("Simulador Formatação")
janela.geometry("1400x830+50+5")
janela.resizable(width=False, height=False)
janela.config(bg="white")
janela.config(cursor="hand2")
janela.iconbitmap("placa2.ico")
janela.mainloop()
| true
| true
|
f7181c55922ded847f3c093a97e05cf3a83a7542
| 502
|
py
|
Python
|
descarteslabs/vectors/exceptions.py
|
carderne/descarteslabs-python
|
757b480efb8d58474a3bf07f1dbd90652b46ed64
|
[
"Apache-2.0"
] | 167
|
2017-03-23T22:16:58.000Z
|
2022-03-08T09:19:30.000Z
|
descarteslabs/vectors/exceptions.py
|
carderne/descarteslabs-python
|
757b480efb8d58474a3bf07f1dbd90652b46ed64
|
[
"Apache-2.0"
] | 93
|
2017-03-23T22:11:40.000Z
|
2021-12-13T18:38:53.000Z
|
descarteslabs/vectors/exceptions.py
|
carderne/descarteslabs-python
|
757b480efb8d58474a3bf07f1dbd90652b46ed64
|
[
"Apache-2.0"
] | 46
|
2017-03-25T19:12:14.000Z
|
2021-08-15T18:04:29.000Z
|
class VectorException(Exception):
"""Base exception for Vector operations"""
pass
class WaitTimeoutError(VectorException):
"""The timeout period for a wait operation has been exceeded"""
pass
class FailedJobError(VectorException):
"""Used to indicate that an asynchronous job has failed"""
pass
class InvalidQueryException(VectorException):
"""The submitted query is invalid"""
pass
# FailedCopyError, use the FailedJobError
FailedCopyError = FailedJobError
| 18.592593
| 67
| 0.737052
|
class VectorException(Exception):
pass
class WaitTimeoutError(VectorException):
pass
class FailedJobError(VectorException):
pass
class InvalidQueryException(VectorException):
pass
FailedCopyError = FailedJobError
| true
| true
|
f7181d74255d1aac4659dd861c34a79c119960a0
| 2,097
|
py
|
Python
|
permutation.py
|
kaixindelele/self_demo
|
cdde94de6d7fa2beb4d0cc9d14eedcb6228cf0af
|
[
"Apache-2.0"
] | null | null | null |
permutation.py
|
kaixindelele/self_demo
|
cdde94de6d7fa2beb4d0cc9d14eedcb6228cf0af
|
[
"Apache-2.0"
] | null | null | null |
permutation.py
|
kaixindelele/self_demo
|
cdde94de6d7fa2beb4d0cc9d14eedcb6228cf0af
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 9 14:56:07 2018
f(n) = n * f(n-1)
and if a is a string variable
a = "hello"
b = a
b = b+" world"
print(b): hello world
print(a): hello
so "=" equal "copy"
and creat a new date
@author: lele
"""
a = "1234"
#def permutation(a,size,n):
# if n == 1:
# print(new_array)
# return
# for i in range(size):
# pass
#
#def main(a):
# a = input("please input a string of integer:")
# permutation(a,sizeof(a)/sizeof(int),n)
print("size:",size)
ls = range(1,size+1)
minimum = 0
for figure in ls:
minimum += figure * (10 ** (size-1-ls.index(figure)))
maximum = list(str(minimum))
maximum.reverse()
maximum = "".join(maximum)
def swap(temp,a,b):
temp = list(temp)
temp[a],temp[b] = temp[b],temp[a]
return temp
#temp_ls = list(str(minimum))
temp_ls = list("123")
size = len(temp_ls)
print("a:",a)
print("original temp_ls:",temp_ls)
count = 0
while(1):
if("".join(temp_ls) == maximum):
break
for i in range(size):
if(temp_ls[size-i-1]>temp_ls[size-i-2]):
roi = temp_ls[size-i-2:]
a = size-i-2
a_value = temp_ls[a]
second = []
for j in roi:
if(j>a_value):
second.append(j)
print("second",second)
b_value = min(second)
b = temp_ls.index(b_value)
print("a",a)
print("b",b)
temp_ls = swap(temp_ls,a,b)
print("swap:",temp_ls)
rest = temp_ls[size-i-1:]
print("rest",rest)
rest.reverse()
temp_ls[size-i-1:] = rest
print("finally temp_ls",temp_ls)
count += 1
print("count:",count)
print("--------------")
break
| 19.063636
| 58
| 0.44206
|
a = "1234"
print("size:",size)
ls = range(1,size+1)
minimum = 0
for figure in ls:
minimum += figure * (10 ** (size-1-ls.index(figure)))
maximum = list(str(minimum))
maximum.reverse()
maximum = "".join(maximum)
def swap(temp,a,b):
temp = list(temp)
temp[a],temp[b] = temp[b],temp[a]
return temp
temp_ls = list("123")
size = len(temp_ls)
print("a:",a)
print("original temp_ls:",temp_ls)
count = 0
while(1):
if("".join(temp_ls) == maximum):
break
for i in range(size):
if(temp_ls[size-i-1]>temp_ls[size-i-2]):
roi = temp_ls[size-i-2:]
a = size-i-2
a_value = temp_ls[a]
second = []
for j in roi:
if(j>a_value):
second.append(j)
print("second",second)
b_value = min(second)
b = temp_ls.index(b_value)
print("a",a)
print("b",b)
temp_ls = swap(temp_ls,a,b)
print("swap:",temp_ls)
rest = temp_ls[size-i-1:]
print("rest",rest)
rest.reverse()
temp_ls[size-i-1:] = rest
print("finally temp_ls",temp_ls)
count += 1
print("count:",count)
print("--------------")
break
| true
| true
|
f7181e6bab2403dd9cc3515a9e46f280c4a1f683
| 4,961
|
py
|
Python
|
airbyte-integrations/connectors/source-smartsheets/source_smartsheets/source.py
|
OTRI-Unipd/OTRI-airbyte
|
50eeeb773f75246e86c6e167b0cd7d2dda6efe0d
|
[
"MIT"
] | 2
|
2022-03-02T13:46:05.000Z
|
2022-03-05T12:31:28.000Z
|
airbyte-integrations/connectors/source-smartsheets/source_smartsheets/source.py
|
OTRI-Unipd/OTRI-airbyte
|
50eeeb773f75246e86c6e167b0cd7d2dda6efe0d
|
[
"MIT"
] | 5
|
2022-02-22T14:49:48.000Z
|
2022-03-19T10:43:08.000Z
|
airbyte-integrations/connectors/source-smartsheets/source_smartsheets/source.py
|
OTRI-Unipd/OTRI-airbyte
|
50eeeb773f75246e86c6e167b0cd7d2dda6efe0d
|
[
"MIT"
] | 1
|
2022-03-11T06:21:24.000Z
|
2022-03-11T06:21:24.000Z
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import json
from datetime import datetime
from typing import Dict, Generator
import smartsheet
from airbyte_cdk import AirbyteLogger
from airbyte_cdk.models import (
AirbyteCatalog,
AirbyteConnectionStatus,
AirbyteMessage,
AirbyteRecordMessage,
AirbyteStream,
ConfiguredAirbyteCatalog,
Status,
Type,
)
# helpers
from airbyte_cdk.sources import Source
def get_prop(col_type: str) -> Dict[str, any]:
props = {
"TEXT_NUMBER": {"type": "string"},
"DATE": {"type": "string", "format": "date"},
"DATETIME": {"type": "string", "format": "date-time"},
}
return props.get(col_type, {"type": "string"})
def get_json_schema(sheet: Dict) -> Dict:
column_info = {i["title"]: get_prop(i["type"]) for i in sheet["columns"]}
json_schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": column_info,
}
return json_schema
# main class definition
class SourceSmartsheets(Source):
def check(self, logger: AirbyteLogger, config: json) -> AirbyteConnectionStatus:
try:
access_token = config["access_token"]
spreadsheet_id = config["spreadsheet_id"]
smartsheet_client = smartsheet.Smartsheet(access_token)
smartsheet_client.errors_as_exceptions(True)
smartsheet_client.Sheets.get_sheet(spreadsheet_id)
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
if isinstance(e, smartsheet.exceptions.ApiError):
err = e.error.result
code = 404 if err.code == 1006 else err.code
reason = f"{err.name}: {code} - {err.message} | Check your spreadsheet ID."
else:
reason = str(e)
logger.error(reason)
return AirbyteConnectionStatus(status=Status.FAILED)
def discover(self, logger: AirbyteLogger, config: json) -> AirbyteCatalog:
access_token = config["access_token"]
spreadsheet_id = config["spreadsheet_id"]
streams = []
smartsheet_client = smartsheet.Smartsheet(access_token)
try:
sheet = smartsheet_client.Sheets.get_sheet(spreadsheet_id)
sheet = json.loads(str(sheet)) # make it subscriptable
sheet_json_schema = get_json_schema(sheet)
logger.info(f"Running discovery on sheet: {sheet['name']} with {spreadsheet_id}")
stream = AirbyteStream(name=sheet["name"], json_schema=sheet_json_schema)
stream.supported_sync_modes = ["full_refresh"]
streams.append(stream)
except Exception as e:
raise Exception(f"Could not run discovery: {str(e)}")
return AirbyteCatalog(streams=streams)
def read(
self, logger: AirbyteLogger, config: json, catalog: ConfiguredAirbyteCatalog, state: Dict[str, any]
) -> Generator[AirbyteMessage, None, None]:
access_token = config["access_token"]
spreadsheet_id = config["spreadsheet_id"]
smartsheet_client = smartsheet.Smartsheet(access_token)
for configured_stream in catalog.streams:
stream = configured_stream.stream
properties = stream.json_schema["properties"]
if isinstance(properties, list):
columns = tuple(key for dct in properties for key in dct.keys())
elif isinstance(properties, dict):
columns = tuple(i for i in properties.keys())
else:
logger.error("Could not read properties from the JSONschema in this stream")
name = stream.name
try:
sheet = smartsheet_client.Sheets.get_sheet(spreadsheet_id)
sheet = json.loads(str(sheet)) # make it subscriptable
logger.info(f"Starting syncing spreadsheet {sheet['name']}")
logger.info(f"Row count: {sheet['totalRowCount']}")
for row in sheet["rows"]:
# convert all data to string as it is only expected format in schema
values = tuple(str(i["value"]) if "value" in i else "" for i in row["cells"])
try:
data = dict(zip(columns, values))
yield AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(stream=name, data=data, emitted_at=int(datetime.now().timestamp()) * 1000),
)
except Exception as e:
logger.error(f"Unable to encode row into an AirbyteMessage with the following error: {e}")
except Exception as e:
logger.error(f"Could not read smartsheet: {name}")
raise e
logger.info(f"Finished syncing spreadsheet with ID: {spreadsheet_id}")
| 37.583333
| 131
| 0.610966
|
import json
from datetime import datetime
from typing import Dict, Generator
import smartsheet
from airbyte_cdk import AirbyteLogger
from airbyte_cdk.models import (
AirbyteCatalog,
AirbyteConnectionStatus,
AirbyteMessage,
AirbyteRecordMessage,
AirbyteStream,
ConfiguredAirbyteCatalog,
Status,
Type,
)
from airbyte_cdk.sources import Source
def get_prop(col_type: str) -> Dict[str, any]:
props = {
"TEXT_NUMBER": {"type": "string"},
"DATE": {"type": "string", "format": "date"},
"DATETIME": {"type": "string", "format": "date-time"},
}
return props.get(col_type, {"type": "string"})
def get_json_schema(sheet: Dict) -> Dict:
column_info = {i["title"]: get_prop(i["type"]) for i in sheet["columns"]}
json_schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": column_info,
}
return json_schema
class SourceSmartsheets(Source):
def check(self, logger: AirbyteLogger, config: json) -> AirbyteConnectionStatus:
try:
access_token = config["access_token"]
spreadsheet_id = config["spreadsheet_id"]
smartsheet_client = smartsheet.Smartsheet(access_token)
smartsheet_client.errors_as_exceptions(True)
smartsheet_client.Sheets.get_sheet(spreadsheet_id)
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
if isinstance(e, smartsheet.exceptions.ApiError):
err = e.error.result
code = 404 if err.code == 1006 else err.code
reason = f"{err.name}: {code} - {err.message} | Check your spreadsheet ID."
else:
reason = str(e)
logger.error(reason)
return AirbyteConnectionStatus(status=Status.FAILED)
def discover(self, logger: AirbyteLogger, config: json) -> AirbyteCatalog:
access_token = config["access_token"]
spreadsheet_id = config["spreadsheet_id"]
streams = []
smartsheet_client = smartsheet.Smartsheet(access_token)
try:
sheet = smartsheet_client.Sheets.get_sheet(spreadsheet_id)
sheet = json.loads(str(sheet))
sheet_json_schema = get_json_schema(sheet)
logger.info(f"Running discovery on sheet: {sheet['name']} with {spreadsheet_id}")
stream = AirbyteStream(name=sheet["name"], json_schema=sheet_json_schema)
stream.supported_sync_modes = ["full_refresh"]
streams.append(stream)
except Exception as e:
raise Exception(f"Could not run discovery: {str(e)}")
return AirbyteCatalog(streams=streams)
def read(
self, logger: AirbyteLogger, config: json, catalog: ConfiguredAirbyteCatalog, state: Dict[str, any]
) -> Generator[AirbyteMessage, None, None]:
access_token = config["access_token"]
spreadsheet_id = config["spreadsheet_id"]
smartsheet_client = smartsheet.Smartsheet(access_token)
for configured_stream in catalog.streams:
stream = configured_stream.stream
properties = stream.json_schema["properties"]
if isinstance(properties, list):
columns = tuple(key for dct in properties for key in dct.keys())
elif isinstance(properties, dict):
columns = tuple(i for i in properties.keys())
else:
logger.error("Could not read properties from the JSONschema in this stream")
name = stream.name
try:
sheet = smartsheet_client.Sheets.get_sheet(spreadsheet_id)
sheet = json.loads(str(sheet))
logger.info(f"Starting syncing spreadsheet {sheet['name']}")
logger.info(f"Row count: {sheet['totalRowCount']}")
for row in sheet["rows"]:
values = tuple(str(i["value"]) if "value" in i else "" for i in row["cells"])
try:
data = dict(zip(columns, values))
yield AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(stream=name, data=data, emitted_at=int(datetime.now().timestamp()) * 1000),
)
except Exception as e:
logger.error(f"Unable to encode row into an AirbyteMessage with the following error: {e}")
except Exception as e:
logger.error(f"Could not read smartsheet: {name}")
raise e
logger.info(f"Finished syncing spreadsheet with ID: {spreadsheet_id}")
| true
| true
|
f7181ee86ad8cc7e5af71dcdfa13dd1e97cf1945
| 4,658
|
py
|
Python
|
python/download-all-data.py
|
wizzardz/vehicle-statistics-india
|
a54f84460ce3129d170510ce2c33799008b1a7a6
|
[
"Apache-2.0"
] | null | null | null |
python/download-all-data.py
|
wizzardz/vehicle-statistics-india
|
a54f84460ce3129d170510ce2c33799008b1a7a6
|
[
"Apache-2.0"
] | null | null | null |
python/download-all-data.py
|
wizzardz/vehicle-statistics-india
|
a54f84460ce3129d170510ce2c33799008b1a7a6
|
[
"Apache-2.0"
] | null | null | null |
import urllib.request
import json
import sys
import os
# specify the url format for downloading the json data
url_format = 'https://data.gov.in/node/{0}/datastore/export/json'
years = [2011, 2009, 2006, 2004, 2002]
# default data for constructing the urls for each States and union teritories
json_string = json.dumps({
"Data": [{
"Andaman and Nicobar Islands": [89524, 100624, 100681, 100729, 100794]
}, {
"Chandigarh": [89529, 100629, 100682, 100730, 100795]
}, {
"Dadra And Nagar Haveli": [
89531, 100626, 100683, 100731, 100796
]
}, {
"Daman and Diu": [89532, 100627, 100684, 100732, 100797]
}, {
"Delhi": [89533, 100628,
100685, 100733, 100798
]
}, {
"Lakshadweep": [89539, 100629, 100686, 100734, 100799]
}, {
"Puducherry": [89546, 100630, 100687, 100735, 100800]
}, {
"Bihar": [89528, 100599, 100656, 100704, 100769]
}, {
"Chhattisgarh": [
89530, 100600, 100657, 100705, 100770
]
}, {
"Goa": [89534, 100601, 100658, 100706, 100771]
}, {
"Gujarat": [89535, 100602, 100659, 100706, 100772]
}, {
"Haryana": [89536, 100603, 100660, 100708, 100773]
}, {
"Himachal Pradesh": [
89537, 100604, 100661, 100709, 100774
]
}, {
"Jammu and Kashmir": [
89555, 100605, 100662, 100710, 100775
]
}, {
"Jharkhand": [89556, 100606,
100663, 100711, 100776
]
}, {
"Karnataka": [89557, 100607,
100664, 100712, 100777
]
}, {
"Kerala": [89538, 100608, 100665, 100713, 100778]
}, {
"Madhya Pradesh": [
89558, 100609, 100666, 100714, 100779
]
}, {
"Maharashtra": [89540, 100610,
100667, 100715, 100780
]
}, {
"Manipur": [89541, 100611, 100668, 100716, 100781]
}, {
"Meghalaya": [89542, 100612,
100669, 100717, 100782
]
}, {
"Mizoram": [89543, 100613, 100670, 100718, 100783]
}, {
"Nagaland": [89544, 100614, 100671, 100719, 100784]
}, {
"Odisha": [89545, 100615, 100672, 100720, 100785]
}, {
"Punjab": [89547, 100616, 100673, 100721, 100786]
}, {
"Rajasthan": [89548, 100617,
100674, 100722, 100787
]
}, {
"Sikkim": [89549, 100618, 100675, 100723, 100788]
}, {
"Tamil Nadu": [89550, 100619,
100676, 100724, 100789
]
}, {
"Tripura": [89551, 100620, 100677, 100725, 100790]
}, {
"Uttarakhand": [89553, 100621,
100678, 100726, 100791
]
}, {
"Uttar Pradesh": [89552, 100622,
100679, 100727, 100792
]
}, {
"West Bengal": [89554, 100623, 100680, 100728, 100793]
}]
})
# loads the default data in josn format
state_data = json.loads(json_string)
# check whether an url data is specified through an input file, if thats
# the case then overwrite the default data by the input file
if len(sys.argv) > 1:
with open(sys.argv[1], 'r') as json_file:
state_data = json.loads(json_file.read())
failed_urls = ''
# iterates through each data for downloading the json content
for state in state_data["Data"]:
# get the name of the state, ideally the key is same as that of
# state/union teritory
state_name = ''
for key in state.keys():
state_name = key
# initialises the index for downloading the data
index = 0
# for a state, download the json data for each year
for identifer in state[state_name]:
url = url_format.format(identifer)
try:
downloaded_data = ''
with urllib.request.urlopen(url) as response:
downloaded_data = response.read().decode('utf-8')
fille_name = '{0}/{1}.json'.format(state_name, years[index])
os.makedirs(os.path.dirname(fille_name), exist_ok=True)
with open(fille_name, "w") as output_file:
output_file.write(downloaded_data)
print(
'Downloading completed for {0}-{1}'.format(state_name, str(years[index])))
index += 1
except Exception as e:
failed_urls += "{0} - {1}\n".format(state_name, url)
if len(failed_urls) > 0:
with open("failedurl.txt", 'w') as f:
f.write(failed_urls)
print('Failed url details has been written to failedurl.txt')
| 30.246753
| 84
| 0.542078
|
import urllib.request
import json
import sys
import os
url_format = 'https://data.gov.in/node/{0}/datastore/export/json'
years = [2011, 2009, 2006, 2004, 2002]
json_string = json.dumps({
"Data": [{
"Andaman and Nicobar Islands": [89524, 100624, 100681, 100729, 100794]
}, {
"Chandigarh": [89529, 100629, 100682, 100730, 100795]
}, {
"Dadra And Nagar Haveli": [
89531, 100626, 100683, 100731, 100796
]
}, {
"Daman and Diu": [89532, 100627, 100684, 100732, 100797]
}, {
"Delhi": [89533, 100628,
100685, 100733, 100798
]
}, {
"Lakshadweep": [89539, 100629, 100686, 100734, 100799]
}, {
"Puducherry": [89546, 100630, 100687, 100735, 100800]
}, {
"Bihar": [89528, 100599, 100656, 100704, 100769]
}, {
"Chhattisgarh": [
89530, 100600, 100657, 100705, 100770
]
}, {
"Goa": [89534, 100601, 100658, 100706, 100771]
}, {
"Gujarat": [89535, 100602, 100659, 100706, 100772]
}, {
"Haryana": [89536, 100603, 100660, 100708, 100773]
}, {
"Himachal Pradesh": [
89537, 100604, 100661, 100709, 100774
]
}, {
"Jammu and Kashmir": [
89555, 100605, 100662, 100710, 100775
]
}, {
"Jharkhand": [89556, 100606,
100663, 100711, 100776
]
}, {
"Karnataka": [89557, 100607,
100664, 100712, 100777
]
}, {
"Kerala": [89538, 100608, 100665, 100713, 100778]
}, {
"Madhya Pradesh": [
89558, 100609, 100666, 100714, 100779
]
}, {
"Maharashtra": [89540, 100610,
100667, 100715, 100780
]
}, {
"Manipur": [89541, 100611, 100668, 100716, 100781]
}, {
"Meghalaya": [89542, 100612,
100669, 100717, 100782
]
}, {
"Mizoram": [89543, 100613, 100670, 100718, 100783]
}, {
"Nagaland": [89544, 100614, 100671, 100719, 100784]
}, {
"Odisha": [89545, 100615, 100672, 100720, 100785]
}, {
"Punjab": [89547, 100616, 100673, 100721, 100786]
}, {
"Rajasthan": [89548, 100617,
100674, 100722, 100787
]
}, {
"Sikkim": [89549, 100618, 100675, 100723, 100788]
}, {
"Tamil Nadu": [89550, 100619,
100676, 100724, 100789
]
}, {
"Tripura": [89551, 100620, 100677, 100725, 100790]
}, {
"Uttarakhand": [89553, 100621,
100678, 100726, 100791
]
}, {
"Uttar Pradesh": [89552, 100622,
100679, 100727, 100792
]
}, {
"West Bengal": [89554, 100623, 100680, 100728, 100793]
}]
})
state_data = json.loads(json_string)
if len(sys.argv) > 1:
with open(sys.argv[1], 'r') as json_file:
state_data = json.loads(json_file.read())
failed_urls = ''
for state in state_data["Data"]:
state_name = ''
for key in state.keys():
state_name = key
index = 0
for identifer in state[state_name]:
url = url_format.format(identifer)
try:
downloaded_data = ''
with urllib.request.urlopen(url) as response:
downloaded_data = response.read().decode('utf-8')
fille_name = '{0}/{1}.json'.format(state_name, years[index])
os.makedirs(os.path.dirname(fille_name), exist_ok=True)
with open(fille_name, "w") as output_file:
output_file.write(downloaded_data)
print(
'Downloading completed for {0}-{1}'.format(state_name, str(years[index])))
index += 1
except Exception as e:
failed_urls += "{0} - {1}\n".format(state_name, url)
if len(failed_urls) > 0:
with open("failedurl.txt", 'w') as f:
f.write(failed_urls)
print('Failed url details has been written to failedurl.txt')
| true
| true
|
f71820aea4c4ecfde9e0adb936a156185abd7e94
| 10,068
|
py
|
Python
|
constph/gromos_factory.py
|
bbraunsfeld/const_pH_gromos
|
6ef02da6fc0f451aa0082b726926c6fccabf324b
|
[
"MIT"
] | null | null | null |
constph/gromos_factory.py
|
bbraunsfeld/const_pH_gromos
|
6ef02da6fc0f451aa0082b726926c6fccabf324b
|
[
"MIT"
] | 1
|
2021-09-17T18:17:39.000Z
|
2021-09-17T18:17:39.000Z
|
constph/gromos_factory.py
|
bbraunsfeld/const_pH_gromos
|
6ef02da6fc0f451aa0082b726926c6fccabf324b
|
[
"MIT"
] | null | null | null |
import datetime
from os import stat
class GromosFactory:
"""Class to build the string needed to create a Gromos input file (*.imd), a make_script fiel (*.arg) and a job file (*.job)"""
def __init__(self, configuration: dict, structure: str) -> None:
self.configuration = configuration
self.structure = structure
def _get_search_run_parameters(self):
prms = {}
for key in self.configuration["search_run"]["search_parameters"]:
prms[key] = self.configuration["search_run"]["search_parameters"][key]
return prms
def _get_production_run_parameters(self):
prms = {}
for key in self.configuration["production_run"]["production_parameters"]:
prms[key] = self.configuration["production_run"]["production_parameters"][key]
return prms
def generate_Gromos_search_input(self, env: str) -> str:
gromos_search_script = self._get_Gromos_input_header(env)
if env == "search":
gromos_search_script += (
self._get_Gromos_search_body()
)
else:
raise NotImplementedError(f"Something went wrong with {env} input.")
return gromos_search_script
def generate_Gromos_production_input(self, env: str) -> str:
gromos_search_script = self._get_Gromos_input_header(env)
if env == "production":
gromos_search_script += (
self._get_Gromos_production_body()
)
else:
raise NotImplementedError(f"Something went wrong with {env} input.")
return gromos_search_script
def _get_Gromos_input_header(self, env: str) -> str:
date = datetime.date.today()
header = f"""TITLE
Automatically generated input file for {env} run with constph
Version {date}
END
"""
return header
def _get_Gromos_search_body(self) -> str:
NSM = self.configuration["search_run"]["search_parameters"]["NSM"]
NSTLIM = self.configuration["search_run"]["search_parameters"]["NSTLIM"]
DT = self.configuration["search_run"]["search_parameters"]["dt"]
ATMNR1 = self.configuration["search_run"]["search_parameters"]["ATMNR1"]
ATMNR2 = self.configuration["search_run"]["search_parameters"]["ATMNR2"]
NTWX = self.configuration["search_run"]["search_parameters"]["NTWX"]
NTWE = self.configuration["search_run"]["search_parameters"]["NTWE"]
FORM = "4"
NSTATES = self.configuration["search_run"]["search_parameters"]["NSTATES"]
OFFSETS = "0 " * int(NSTATES)
SIGMA = self.configuration["search_run"]["search_parameters"]["sigma"]
ASTEPS = self.configuration["search_run"]["search_parameters"]["asteps"]
BSTEPS = self.configuration["search_run"]["search_parameters"]["bsteps"]
body = f"""SYSTEM
# NPM NSM
1 {NSM}
END
STEP
# NSTLIM T DT
{NSTLIM} 0 {DT}
END
BOUNDCOND
# NTB NDFMIN
1 3
END
MULTIBATH
# NTBTYP:
# weak-coupling: use weak-coupling scheme
# nose-hoover: use Nose Hoover scheme
# nose-hoover-chains: use Nose Hoover chains scheme
# NUM: number of chains in Nose Hoover chains scheme
# !! only specify NUM when needed !!
# NBATHS: number of temperature baths to couple to
# NTBTYP
0
# NBATHS
2
# TEMP0(1 ... NBATHS) TAU(1 ... NBATHS)
300 0.1
300 0.1
# DOFSET: number of distinguishable sets of d.o.f.
2
# LAST(1 ... DOFSET) COMBATH(1 ... DOFSET) IRBATH(1 ... DOFSET)
{ATMNR1} 1 1 {ATMNR2} 2 2
END
PRESSURESCALE
# COUPLE SCALE COMP TAUP VIRIAL
2 1 0.0007624 0.5 2
# SEMIANISOTROPIC COUPLINGS(X, Y, Z)
1 1 2
# PRES0(1...3,1...3)
0.06102 0 0
0 0.06102 0
0 0 0.06102
END
FORCE
# NTF array
# bonds angles imp. dihe charge nonbonded
0 1 1 1 1 1
# NEGR NRE(1) NRE(2) ... NRE(NEGR)
2
{ATMNR1} {ATMNR2}
END
COVALENTFORM
# NTBBH NTBAH NTBDN
0 0 0
END
CONSTRAINT
# NTC
3
# NTCP NTCP0(1)
1 0.0001
# NTCS NTCS0(1)
1 0.0001
END
PAIRLIST
# algorithm NSNB RCUTP RCUTL SIZE TYPE
1 5 0.8 1.4 0.4 0
END
NONBONDED
# NLRELE
1
# APPAK RCRF EPSRF NSLFEXCL
0 1.4 78.5 1
# NSHAPE ASHAPE NA2CLC TOLA2 EPSLS
3 1.4 2 1e-10 0
# NKX NKY NKZ KCUT
10 10 10 100
# NGX NGY NGZ NASORD NFDORD NALIAS NSPORD
32 32 32 3 2 3 4
# NQEVAL FACCUR NRDGRD NWRGRD
100000 1.6 0 0
# NLRLJ SLVDNS
0 33.3
END
INITIALISE
# Default values for NTI values: 0
# NTIVEL NTISHK NTINHT NTINHB
0 0 0 0
# NTISHI NTIRTC NTICOM
0 0 0
# NTISTI
0
# IG TEMPI
210185 0
END
COMTRANSROT
# NSCM
1000
END
PRINTOUT
#NTPR: print out energies, etc. every NTPR steps
#NTPP: =1 perform dihedral angle transition monitoring
# NTPR NTPP
500 0
END
WRITETRAJ
# NTWX NTWSE NTWV NTWF NTWE NTWG NTWB
{NTWX} 0 0 0 {NTWE} 0 0
END
AEDS
# AEDS
1
# ALPHLJ ALPHCRF FORM NUMSTATES
0 0 {FORM} {NSTATES}
# EMAX EMIN
0 0
# EIR [1..NUMSTATES]
{OFFSETS}
# NTIAEDSS RESTREMIN BMAXTYPE BMAX ASTEPS BSTEPS
1 1 {SIGMA} 2 {ASTEPS} {BSTEPS}
END"""
return body
def _get_Gromos_production_body(self) -> str:
NSM = self.configuration["production_run"]["_parameters"]["NSM"]
NSTLIM = self.configuration["production_run"]["production_parameters"]["NSTLIM"]
DT = self.configuration["production_run"]["production_parameters"]["dt"]
ATMNR1 = self.configuration["production_run"]["production_parameters"]["ATMNR1"]
ATMNR2 = self.configuration["production_run"]["production_parameters"]["ATMNR2"]
NTWX = self.configuration["production_run"]["production_parameters"]["NTWX"]
NTWE = self.configuration["production_run"]["production_parameters"]["NTWE"]
FORM = "4"
NSTATES = self.configuration["production_run"]["production_parameters"]["NSTATES"]
OFFSETS = "0 {new_offset}"
SIGMA = self.configuration["production_run"]["production_parameters"]["sigma"]
EMIN = "found in search"
EMAX = "found in search"
body = f"""SYSTEM
# NPM NSM
1 {NSM}
END
STEP
# NSTLIM T DT
{NSTLIM} 0 {DT}
END
BOUNDCOND
# NTB NDFMIN
1 3
END
MULTIBATH
# NTBTYP:
# weak-coupling: use weak-coupling scheme
# nose-hoover: use Nose Hoover scheme
# nose-hoover-chains: use Nose Hoover chains scheme
# NUM: number of chains in Nose Hoover chains scheme
# !! only specify NUM when needed !!
# NBATHS: number of temperature baths to couple to
# NTBTYP
0
# NBATHS
2
# TEMP0(1 ... NBATHS) TAU(1 ... NBATHS)
300 0.1
300 0.1
# DOFSET: number of distinguishable sets of d.o.f.
2
# LAST(1 ... DOFSET) COMBATH(1 ... DOFSET) IRBATH(1 ... DOFSET)
{ATMNR1} 1 1 {ATMNR2} 2 2
END
PRESSURESCALE
# COUPLE SCALE COMP TAUP VIRIAL
2 1 0.0007624 0.5 2
# SEMIANISOTROPIC COUPLINGS(X, Y, Z)
1 1 2
# PRES0(1...3,1...3)
0.06102 0 0
0 0.06102 0
0 0 0.06102
END
FORCE
# NTF array
# bonds angles imp. dihe charge nonbonded
0 1 1 1 1 1
# NEGR NRE(1) NRE(2) ... NRE(NEGR)
2
{ATMNR1} {ATMNR2}
END
COVALENTFORM
# NTBBH NTBAH NTBDN
0 0 0
END
CONSTRAINT
# NTC
3
# NTCP NTCP0(1)
1 0.0001
# NTCS NTCS0(1)
1 0.0001
END
PAIRLIST
# algorithm NSNB RCUTP RCUTL SIZE TYPE
1 5 0.8 1.4 0.4 0
END
NONBONDED
# NLRELE
1
# APPAK RCRF EPSRF NSLFEXCL
0 1.4 78.5 1
# NSHAPE ASHAPE NA2CLC TOLA2 EPSLS
3 1.4 2 1e-10 0
# NKX NKY NKZ KCUT
10 10 10 100
# NGX NGY NGZ NASORD NFDORD NALIAS NSPORD
32 32 32 3 2 3 4
# NQEVAL FACCUR NRDGRD NWRGRD
100000 1.6 0 0
# NLRLJ SLVDNS
0 33.3
END
INITIALISE
# Default values for NTI values: 0
# NTIVEL NTISHK NTINHT NTINHB
0 0 0 0
# NTISHI NTIRTC NTICOM
0 0 0
# NTISTI
0
# IG TEMPI
210185 0
END
COMTRANSROT
# NSCM
1000
END
PRINTOUT
#NTPR: print out energies, etc. every NTPR steps
#NTPP: =1 perform dihedral angle transition monitoring
# NTPR NTPP
500 0
END
WRITETRAJ
# NTWX NTWSE NTWV NTWF NTWE NTWG NTWB
{NTWX} 0 0 0 {NTWE} 0 0
END
AEDS
# AEDS
1
# ALPHLJ ALPHCRF FORM NUMSTATES
0 0 {FORM} {NSTATES}
# EMAX EMIN
{EMAX} {EMIN}
# EIR [1..NUMSTATES]
{OFFSETS}
# NTIAEDSS RESTREMIN BMAXTYPE BMAX ASTEPS BSTEPS
1 1 {SIGMA} 2 0 0
END"""
return body
| 29.786982
| 131
| 0.527712
|
import datetime
from os import stat
class GromosFactory:
def __init__(self, configuration: dict, structure: str) -> None:
self.configuration = configuration
self.structure = structure
def _get_search_run_parameters(self):
prms = {}
for key in self.configuration["search_run"]["search_parameters"]:
prms[key] = self.configuration["search_run"]["search_parameters"][key]
return prms
def _get_production_run_parameters(self):
prms = {}
for key in self.configuration["production_run"]["production_parameters"]:
prms[key] = self.configuration["production_run"]["production_parameters"][key]
return prms
def generate_Gromos_search_input(self, env: str) -> str:
gromos_search_script = self._get_Gromos_input_header(env)
if env == "search":
gromos_search_script += (
self._get_Gromos_search_body()
)
else:
raise NotImplementedError(f"Something went wrong with {env} input.")
return gromos_search_script
def generate_Gromos_production_input(self, env: str) -> str:
gromos_search_script = self._get_Gromos_input_header(env)
if env == "production":
gromos_search_script += (
self._get_Gromos_production_body()
)
else:
raise NotImplementedError(f"Something went wrong with {env} input.")
return gromos_search_script
def _get_Gromos_input_header(self, env: str) -> str:
date = datetime.date.today()
header = f"""TITLE
Automatically generated input file for {env} run with constph
Version {date}
END
"""
return header
def _get_Gromos_search_body(self) -> str:
NSM = self.configuration["search_run"]["search_parameters"]["NSM"]
NSTLIM = self.configuration["search_run"]["search_parameters"]["NSTLIM"]
DT = self.configuration["search_run"]["search_parameters"]["dt"]
ATMNR1 = self.configuration["search_run"]["search_parameters"]["ATMNR1"]
ATMNR2 = self.configuration["search_run"]["search_parameters"]["ATMNR2"]
NTWX = self.configuration["search_run"]["search_parameters"]["NTWX"]
NTWE = self.configuration["search_run"]["search_parameters"]["NTWE"]
FORM = "4"
NSTATES = self.configuration["search_run"]["search_parameters"]["NSTATES"]
OFFSETS = "0 " * int(NSTATES)
SIGMA = self.configuration["search_run"]["search_parameters"]["sigma"]
ASTEPS = self.configuration["search_run"]["search_parameters"]["asteps"]
BSTEPS = self.configuration["search_run"]["search_parameters"]["bsteps"]
body = f"""SYSTEM
# NPM NSM
1 {NSM}
END
STEP
# NSTLIM T DT
{NSTLIM} 0 {DT}
END
BOUNDCOND
# NTB NDFMIN
1 3
END
MULTIBATH
# NTBTYP:
# weak-coupling: use weak-coupling scheme
# nose-hoover: use Nose Hoover scheme
# nose-hoover-chains: use Nose Hoover chains scheme
# NUM: number of chains in Nose Hoover chains scheme
# !! only specify NUM when needed !!
# NBATHS: number of temperature baths to couple to
# NTBTYP
0
# NBATHS
2
# TEMP0(1 ... NBATHS) TAU(1 ... NBATHS)
300 0.1
300 0.1
# DOFSET: number of distinguishable sets of d.o.f.
2
# LAST(1 ... DOFSET) COMBATH(1 ... DOFSET) IRBATH(1 ... DOFSET)
{ATMNR1} 1 1 {ATMNR2} 2 2
END
PRESSURESCALE
# COUPLE SCALE COMP TAUP VIRIAL
2 1 0.0007624 0.5 2
# SEMIANISOTROPIC COUPLINGS(X, Y, Z)
1 1 2
# PRES0(1...3,1...3)
0.06102 0 0
0 0.06102 0
0 0 0.06102
END
FORCE
# NTF array
# bonds angles imp. dihe charge nonbonded
0 1 1 1 1 1
# NEGR NRE(1) NRE(2) ... NRE(NEGR)
2
{ATMNR1} {ATMNR2}
END
COVALENTFORM
# NTBBH NTBAH NTBDN
0 0 0
END
CONSTRAINT
# NTC
3
# NTCP NTCP0(1)
1 0.0001
# NTCS NTCS0(1)
1 0.0001
END
PAIRLIST
# algorithm NSNB RCUTP RCUTL SIZE TYPE
1 5 0.8 1.4 0.4 0
END
NONBONDED
# NLRELE
1
# APPAK RCRF EPSRF NSLFEXCL
0 1.4 78.5 1
# NSHAPE ASHAPE NA2CLC TOLA2 EPSLS
3 1.4 2 1e-10 0
# NKX NKY NKZ KCUT
10 10 10 100
# NGX NGY NGZ NASORD NFDORD NALIAS NSPORD
32 32 32 3 2 3 4
# NQEVAL FACCUR NRDGRD NWRGRD
100000 1.6 0 0
# NLRLJ SLVDNS
0 33.3
END
INITIALISE
# Default values for NTI values: 0
# NTIVEL NTISHK NTINHT NTINHB
0 0 0 0
# NTISHI NTIRTC NTICOM
0 0 0
# NTISTI
0
# IG TEMPI
210185 0
END
COMTRANSROT
# NSCM
1000
END
PRINTOUT
#NTPR: print out energies, etc. every NTPR steps
#NTPP: =1 perform dihedral angle transition monitoring
# NTPR NTPP
500 0
END
WRITETRAJ
# NTWX NTWSE NTWV NTWF NTWE NTWG NTWB
{NTWX} 0 0 0 {NTWE} 0 0
END
AEDS
# AEDS
1
# ALPHLJ ALPHCRF FORM NUMSTATES
0 0 {FORM} {NSTATES}
# EMAX EMIN
0 0
# EIR [1..NUMSTATES]
{OFFSETS}
# NTIAEDSS RESTREMIN BMAXTYPE BMAX ASTEPS BSTEPS
1 1 {SIGMA} 2 {ASTEPS} {BSTEPS}
END"""
return body
def _get_Gromos_production_body(self) -> str:
NSM = self.configuration["production_run"]["_parameters"]["NSM"]
NSTLIM = self.configuration["production_run"]["production_parameters"]["NSTLIM"]
DT = self.configuration["production_run"]["production_parameters"]["dt"]
ATMNR1 = self.configuration["production_run"]["production_parameters"]["ATMNR1"]
ATMNR2 = self.configuration["production_run"]["production_parameters"]["ATMNR2"]
NTWX = self.configuration["production_run"]["production_parameters"]["NTWX"]
NTWE = self.configuration["production_run"]["production_parameters"]["NTWE"]
FORM = "4"
NSTATES = self.configuration["production_run"]["production_parameters"]["NSTATES"]
OFFSETS = "0 {new_offset}"
SIGMA = self.configuration["production_run"]["production_parameters"]["sigma"]
EMIN = "found in search"
EMAX = "found in search"
body = f"""SYSTEM
# NPM NSM
1 {NSM}
END
STEP
# NSTLIM T DT
{NSTLIM} 0 {DT}
END
BOUNDCOND
# NTB NDFMIN
1 3
END
MULTIBATH
# NTBTYP:
# weak-coupling: use weak-coupling scheme
# nose-hoover: use Nose Hoover scheme
# nose-hoover-chains: use Nose Hoover chains scheme
# NUM: number of chains in Nose Hoover chains scheme
# !! only specify NUM when needed !!
# NBATHS: number of temperature baths to couple to
# NTBTYP
0
# NBATHS
2
# TEMP0(1 ... NBATHS) TAU(1 ... NBATHS)
300 0.1
300 0.1
# DOFSET: number of distinguishable sets of d.o.f.
2
# LAST(1 ... DOFSET) COMBATH(1 ... DOFSET) IRBATH(1 ... DOFSET)
{ATMNR1} 1 1 {ATMNR2} 2 2
END
PRESSURESCALE
# COUPLE SCALE COMP TAUP VIRIAL
2 1 0.0007624 0.5 2
# SEMIANISOTROPIC COUPLINGS(X, Y, Z)
1 1 2
# PRES0(1...3,1...3)
0.06102 0 0
0 0.06102 0
0 0 0.06102
END
FORCE
# NTF array
# bonds angles imp. dihe charge nonbonded
0 1 1 1 1 1
# NEGR NRE(1) NRE(2) ... NRE(NEGR)
2
{ATMNR1} {ATMNR2}
END
COVALENTFORM
# NTBBH NTBAH NTBDN
0 0 0
END
CONSTRAINT
# NTC
3
# NTCP NTCP0(1)
1 0.0001
# NTCS NTCS0(1)
1 0.0001
END
PAIRLIST
# algorithm NSNB RCUTP RCUTL SIZE TYPE
1 5 0.8 1.4 0.4 0
END
NONBONDED
# NLRELE
1
# APPAK RCRF EPSRF NSLFEXCL
0 1.4 78.5 1
# NSHAPE ASHAPE NA2CLC TOLA2 EPSLS
3 1.4 2 1e-10 0
# NKX NKY NKZ KCUT
10 10 10 100
# NGX NGY NGZ NASORD NFDORD NALIAS NSPORD
32 32 32 3 2 3 4
# NQEVAL FACCUR NRDGRD NWRGRD
100000 1.6 0 0
# NLRLJ SLVDNS
0 33.3
END
INITIALISE
# Default values for NTI values: 0
# NTIVEL NTISHK NTINHT NTINHB
0 0 0 0
# NTISHI NTIRTC NTICOM
0 0 0
# NTISTI
0
# IG TEMPI
210185 0
END
COMTRANSROT
# NSCM
1000
END
PRINTOUT
#NTPR: print out energies, etc. every NTPR steps
#NTPP: =1 perform dihedral angle transition monitoring
# NTPR NTPP
500 0
END
WRITETRAJ
# NTWX NTWSE NTWV NTWF NTWE NTWG NTWB
{NTWX} 0 0 0 {NTWE} 0 0
END
AEDS
# AEDS
1
# ALPHLJ ALPHCRF FORM NUMSTATES
0 0 {FORM} {NSTATES}
# EMAX EMIN
{EMAX} {EMIN}
# EIR [1..NUMSTATES]
{OFFSETS}
# NTIAEDSS RESTREMIN BMAXTYPE BMAX ASTEPS BSTEPS
1 1 {SIGMA} 2 0 0
END"""
return body
| true
| true
|
f718217d51a3402d72204f81cd749070c51ae9c6
| 387
|
py
|
Python
|
borsa/asgi.py
|
bozcani/borsa-scraper-app
|
56c767a9b6d6c9be40046aa03763f13465860f6f
|
[
"MIT"
] | 3
|
2020-02-06T10:05:29.000Z
|
2020-04-18T10:11:37.000Z
|
borsa/asgi.py
|
bozcani/borsa
|
56c767a9b6d6c9be40046aa03763f13465860f6f
|
[
"MIT"
] | 10
|
2020-02-06T08:50:13.000Z
|
2020-04-25T12:17:17.000Z
|
borsa/asgi.py
|
bozcani/borsa-scraper-app
|
56c767a9b6d6c9be40046aa03763f13465860f6f
|
[
"MIT"
] | 1
|
2020-02-06T07:40:06.000Z
|
2020-02-06T07:40:06.000Z
|
"""
ASGI config for borsa project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'borsa.settings')
application = get_asgi_application()
| 22.764706
| 78
| 0.782946
|
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'borsa.settings')
application = get_asgi_application()
| true
| true
|
f71821ed4d2b1e66e27c2cefdf134e9907e7d2b1
| 8,869
|
py
|
Python
|
src/python/main/segeval/ml/PercentageTest.py
|
anna-ka/segmentation.evaluation
|
b7eddc9067fc773f3d040dd5eef33dabac07abc0
|
[
"BSD-3-Clause"
] | 1
|
2017-05-09T06:16:58.000Z
|
2017-05-09T06:16:58.000Z
|
src/python/main/segeval/ml/PercentageTest.py
|
anna-ka/segmentation.evaluation
|
b7eddc9067fc773f3d040dd5eef33dabac07abc0
|
[
"BSD-3-Clause"
] | null | null | null |
src/python/main/segeval/ml/PercentageTest.py
|
anna-ka/segmentation.evaluation
|
b7eddc9067fc773f3d040dd5eef33dabac07abc0
|
[
"BSD-3-Clause"
] | null | null | null |
'''
Tests the WindowDiff evaluation metric.
.. moduleauthor:: Chris Fournier <chris.m.fournier@gmail.com>
'''
#===============================================================================
# Copyright (c) 2011-2012, Chris Fournier
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import unittest
from decimal import Decimal
from .Percentage import percentage, pairwise_percentage, \
find_boundary_position_freqs
from ..data.Samples import KAZANTSEVA2012_G5, KAZANTSEVA2012_G2, \
COMPLETE_AGREEMENT, LARGE_DISAGREEMENT
from .. import convert_positions_to_masses
class TestPercentage(unittest.TestCase):
'''
Test segmentation percentage.
'''
# pylint: disable=R0904
def test_identical(self):
'''
Test whether identical segmentations produce 1.0.
'''
# pylint: disable=C0324
segs_a = convert_positions_to_masses(
[1,1,1,1,1,2,2,2,3,3,3,3,3])
segs_b = convert_positions_to_masses(
[1,1,1,1,1,2,2,2,3,3,3,3,3])
self.assertEqual(percentage(segs_a, segs_b),1.0)
def test_no_boundaries(self):
'''
Test whether no segments versus some segments produce 0.0.
'''
# pylint: disable=C0324
segs_a = convert_positions_to_masses(
[1,1,1,1,1,1,1,1,1,1,1,1,1])
segs_b = convert_positions_to_masses(
[1,1,1,1,2,2,2,2,3,3,3,3,3])
self.assertEqual(percentage(segs_a, segs_b),0)
self.assertEqual(percentage(segs_b, segs_a),0)
def test_all_boundaries(self):
'''
Test whether all segments versus some segments produces 2/12, or 0.167.
'''
# pylint: disable=C0324
segs_a = convert_positions_to_masses(
[1,2,3,4,5,6,7,8,9,10,11,12,13])
segs_b = convert_positions_to_masses(
[1,1,1,1,2,2,2,2,3,3,3,3,3])
self.assertEqual(percentage(segs_a, segs_b),
Decimal('0.1666666666666666666666666667'))
self.assertEqual(percentage(segs_b, segs_a),
Decimal('0.1666666666666666666666666667'))
def test_all_and_no_boundaries(self):
'''
Test whether all segments versus no segments produces 0.0.
'''
# pylint: disable=C0324
segs_a = convert_positions_to_masses(
[1,2,3,4,5,6,7,8,9,10,11,12,13])
segs_b = convert_positions_to_masses(
[1,1,1,1,1,1,1,1,1,1,1,1,1])
self.assertEqual(percentage(segs_a, segs_b),0)
self.assertEqual(percentage(segs_b, segs_a),0)
def test_translated_boundary(self):
'''
Test whether 2/3 total segments participate in mis-alignment produces
0.33.
'''
# pylint: disable=C0324
segs_a = convert_positions_to_masses(
[1,1,1,1,1,2,2,2,3,3,3,3,3])
segs_b = convert_positions_to_masses(
[1,1,1,1,2,2,2,2,3,3,3,3,3])
self.assertEqual(percentage(segs_a, segs_b),
Decimal('0.3333333333333333333333333333'))
self.assertEqual(percentage(segs_b, segs_a),
Decimal('0.3333333333333333333333333333'))
def test_extra_boundary(self):
'''
Test whether 1/3 segments that are non-existent produces 0.66.
'''
# pylint: disable=C0324
segs_a = convert_positions_to_masses(
[1,1,1,1,1,2,2,2,3,3,3,3,3])
segs_b = convert_positions_to_masses(
[1,1,1,1,1,2,3,3,4,4,4,4,4])
self.assertEqual(percentage(segs_a, segs_b),
Decimal('0.6666666666666666666666666667'))
self.assertEqual(percentage(segs_b, segs_a),
Decimal('0.6666666666666666666666666667'))
def test_full_miss_and_misaligned(self):
'''
Test whether a full miss and a translated boundary out of 4 produces
0.25.
'''
# pylint: disable=C0324
segs_a = convert_positions_to_masses(
[1,1,1,1,2,2,2,2,3,3,3,3,3])
segs_b = convert_positions_to_masses(
[1,1,1,1,1,2,3,3,4,4,4,4,4])
self.assertEqual(percentage(segs_a, segs_b), Decimal('0.25'))
self.assertEqual(percentage(segs_b, segs_a), Decimal('0.25'))
class TestPairwisePercentage(unittest.TestCase):
# pylint: disable=R0904
'''
Test permuted pairwise percentage.
'''
def test_kazantseva2012_g5(self):
'''
Calculate permuted pairwise percentage on Group 5 from the dataset
collected in Kazantseva (2012).
'''
self.assertEqual(pairwise_percentage(KAZANTSEVA2012_G5),
(Decimal('0.1621263635243898401793138635'),
Decimal('0.1788409781886208812486660585'),
Decimal('0.03198409547946276978304443503'),
Decimal('0.03650576180519474391025947712')))
def test_kazantseva2012_g2(self):
'''
Calculate mean permuted pairwise percentage on Group 2 from the dataset
collected in Kazantseva (2012).
'''
self.assertEqual(pairwise_percentage(KAZANTSEVA2012_G2),
(Decimal('0.3398087832646656176067940768'),
Decimal('0.1948481072924021072633034332'),
Decimal('0.03796578491543144325163024138'),
Decimal('0.02515478248611697670879150623')))
def test_large_disagreement(self):
'''
Calculate mean permuted pairwise percentage on a theoretical dataset
containing large disagreement.
'''
self.assertEqual(pairwise_percentage(LARGE_DISAGREEMENT),
(0.0,
0.0,
0.0,
0.0))
def test_complete_agreement(self):
'''
Calculate mean permuted pairwise percentage on a theoretical dataset
containing complete agreement.
'''
self.assertEqual(pairwise_percentage(COMPLETE_AGREEMENT),
(1.0,
0.0,
0.0,
0.0))
class TestPercentageUtils(unittest.TestCase):
# pylint: disable=R0904
'''
Test utility functions used to calculate percentage.
'''
def test_find_seg_positions(self):
'''
Test segmentation position frequency counting.
'''
# pylint: disable=C0324
seg_positions = find_boundary_position_freqs([[1,2,3,3,2,1],
[1,2,2,4,2,1]])
self.assertEqual(seg_positions, { 1: 2,
3: 2,
5: 1,
6: 1,
9: 2,
11: 2})
| 42.033175
| 80
| 0.56658
|
import unittest
from decimal import Decimal
from .Percentage import percentage, pairwise_percentage, \
find_boundary_position_freqs
from ..data.Samples import KAZANTSEVA2012_G5, KAZANTSEVA2012_G2, \
COMPLETE_AGREEMENT, LARGE_DISAGREEMENT
from .. import convert_positions_to_masses
class TestPercentage(unittest.TestCase):
def test_identical(self):
segs_a = convert_positions_to_masses(
[1,1,1,1,1,2,2,2,3,3,3,3,3])
segs_b = convert_positions_to_masses(
[1,1,1,1,1,2,2,2,3,3,3,3,3])
self.assertEqual(percentage(segs_a, segs_b),1.0)
def test_no_boundaries(self):
segs_a = convert_positions_to_masses(
[1,1,1,1,1,1,1,1,1,1,1,1,1])
segs_b = convert_positions_to_masses(
[1,1,1,1,2,2,2,2,3,3,3,3,3])
self.assertEqual(percentage(segs_a, segs_b),0)
self.assertEqual(percentage(segs_b, segs_a),0)
def test_all_boundaries(self):
segs_a = convert_positions_to_masses(
[1,2,3,4,5,6,7,8,9,10,11,12,13])
segs_b = convert_positions_to_masses(
[1,1,1,1,2,2,2,2,3,3,3,3,3])
self.assertEqual(percentage(segs_a, segs_b),
Decimal('0.1666666666666666666666666667'))
self.assertEqual(percentage(segs_b, segs_a),
Decimal('0.1666666666666666666666666667'))
def test_all_and_no_boundaries(self):
segs_a = convert_positions_to_masses(
[1,2,3,4,5,6,7,8,9,10,11,12,13])
segs_b = convert_positions_to_masses(
[1,1,1,1,1,1,1,1,1,1,1,1,1])
self.assertEqual(percentage(segs_a, segs_b),0)
self.assertEqual(percentage(segs_b, segs_a),0)
def test_translated_boundary(self):
segs_a = convert_positions_to_masses(
[1,1,1,1,1,2,2,2,3,3,3,3,3])
segs_b = convert_positions_to_masses(
[1,1,1,1,2,2,2,2,3,3,3,3,3])
self.assertEqual(percentage(segs_a, segs_b),
Decimal('0.3333333333333333333333333333'))
self.assertEqual(percentage(segs_b, segs_a),
Decimal('0.3333333333333333333333333333'))
def test_extra_boundary(self):
segs_a = convert_positions_to_masses(
[1,1,1,1,1,2,2,2,3,3,3,3,3])
segs_b = convert_positions_to_masses(
[1,1,1,1,1,2,3,3,4,4,4,4,4])
self.assertEqual(percentage(segs_a, segs_b),
Decimal('0.6666666666666666666666666667'))
self.assertEqual(percentage(segs_b, segs_a),
Decimal('0.6666666666666666666666666667'))
def test_full_miss_and_misaligned(self):
segs_a = convert_positions_to_masses(
[1,1,1,1,2,2,2,2,3,3,3,3,3])
segs_b = convert_positions_to_masses(
[1,1,1,1,1,2,3,3,4,4,4,4,4])
self.assertEqual(percentage(segs_a, segs_b), Decimal('0.25'))
self.assertEqual(percentage(segs_b, segs_a), Decimal('0.25'))
class TestPairwisePercentage(unittest.TestCase):
def test_kazantseva2012_g5(self):
self.assertEqual(pairwise_percentage(KAZANTSEVA2012_G5),
(Decimal('0.1621263635243898401793138635'),
Decimal('0.1788409781886208812486660585'),
Decimal('0.03198409547946276978304443503'),
Decimal('0.03650576180519474391025947712')))
def test_kazantseva2012_g2(self):
self.assertEqual(pairwise_percentage(KAZANTSEVA2012_G2),
(Decimal('0.3398087832646656176067940768'),
Decimal('0.1948481072924021072633034332'),
Decimal('0.03796578491543144325163024138'),
Decimal('0.02515478248611697670879150623')))
def test_large_disagreement(self):
self.assertEqual(pairwise_percentage(LARGE_DISAGREEMENT),
(0.0,
0.0,
0.0,
0.0))
def test_complete_agreement(self):
self.assertEqual(pairwise_percentage(COMPLETE_AGREEMENT),
(1.0,
0.0,
0.0,
0.0))
class TestPercentageUtils(unittest.TestCase):
def test_find_seg_positions(self):
seg_positions = find_boundary_position_freqs([[1,2,3,3,2,1],
[1,2,2,4,2,1]])
self.assertEqual(seg_positions, { 1: 2,
3: 2,
5: 1,
6: 1,
9: 2,
11: 2})
| true
| true
|
f7182279ccd3d16543495752c131fb1fcf6fbcc0
| 5,356
|
py
|
Python
|
torchmetrics/regression/pearson.py
|
lucadiliello/metrics
|
e98fbafd2af5d217596958f9cfe6152543a00b7f
|
[
"Apache-2.0"
] | null | null | null |
torchmetrics/regression/pearson.py
|
lucadiliello/metrics
|
e98fbafd2af5d217596958f9cfe6152543a00b7f
|
[
"Apache-2.0"
] | null | null | null |
torchmetrics/regression/pearson.py
|
lucadiliello/metrics
|
e98fbafd2af5d217596958f9cfe6152543a00b7f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Tuple
import torch
from torch import Tensor
from torchmetrics.functional.regression.pearson import _pearson_corrcoef_compute, _pearson_corrcoef_update
from torchmetrics.metric import Metric
def _final_aggregation(
means_x: Tensor,
means_y: Tensor,
vars_x: Tensor,
vars_y: Tensor,
corrs_xy: Tensor,
nbs: Tensor,
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Aggregate the statistics from multiple devices.
Formula taken from here: `Aggregate the statistics from multiple devices`_
"""
# assert len(means_x) > 1 and len(means_y) > 1 and len(vars_x) > 1 and len(vars_y) > 1 and len(corrs_xy) > 1
mx1, my1, vx1, vy1, cxy1, n1 = means_x[0], means_y[0], vars_x[0], vars_y[0], corrs_xy[0], nbs[0]
for i in range(1, len(means_x)):
mx2, my2, vx2, vy2, cxy2, n2 = means_x[i], means_y[i], vars_x[i], vars_y[i], corrs_xy[i], nbs[i]
nb = n1 + n2
mean_x = (n1 * mx1 + n2 * mx2) / nb
mean_y = (n1 * my1 + n2 * my2) / nb
var_x = 1 / (n1 + n2 - 1) * ((n1 - 1) * vx1 + (n2 - 1) * vx2 + ((n1 * n2) / (n1 + n2)) * (mx1 - mx2) ** 2)
var_y = 1 / (n1 + n2 - 1) * ((n1 - 1) * vy1 + (n2 - 1) * vy2 + ((n1 * n2) / (n1 + n2)) * (my1 - my2) ** 2)
corr1 = n1 * cxy1 + n1 * (mx1 - mean_x) * (my1 - mean_y)
corr2 = n2 * cxy2 + n2 * (mx2 - mean_x) * (my2 - mean_y)
corr_xy = (corr1 + corr2) / (n1 + n2)
mx1, my1, vx1, vy1, cxy1, n1 = mean_x, mean_y, var_x, var_y, corr_xy, nb
return var_x, var_y, corr_xy, nb
class PearsonCorrcoef(Metric):
r"""
Computes `Pearson Correlation Coefficient`_:
.. math::
P_{corr}(x,y) = \frac{cov(x,y)}{\sigma_x \sigma_y}
Where :math:`y` is a tensor of target values, and :math:`x` is a
tensor of predictions.
Forward accepts
- ``preds`` (float tensor): ``(N,)``
- ``target``(float tensor): ``(N,)``
Args:
compute_on_step:
Forward only calls ``update()`` and return None if this is set to False. default: True
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()``
before returning the value at the step. default: False
process_group:
Specify the process group on which synchronization is called. default: None (which selects the entire world)
Example:
>>> from torchmetrics import PearsonCorrcoef
>>> target = torch.tensor([3, -0.5, 2, 7])
>>> preds = torch.tensor([2.5, 0.0, 2, 8])
>>> pearson = PearsonCorrcoef()
>>> pearson(preds, target)
tensor(0.9849)
"""
is_differentiable = True
higher_is_better = None # both -1 and 1 are optimal
preds: List[Tensor]
target: List[Tensor]
mean_x: Tensor
mean_y: Tensor
var_x: Tensor
var_y: Tensor
corr_xy: Tensor
n_total: Tensor
def __init__(
self,
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
) -> None:
super().__init__(
compute_on_step=compute_on_step,
dist_sync_on_step=dist_sync_on_step,
process_group=process_group,
)
self.add_state("mean_x", default=torch.tensor(0.0), dist_reduce_fx=None)
self.add_state("mean_y", default=torch.tensor(0.0), dist_reduce_fx=None)
self.add_state("var_x", default=torch.tensor(0.0), dist_reduce_fx=None)
self.add_state("var_y", default=torch.tensor(0.0), dist_reduce_fx=None)
self.add_state("corr_xy", default=torch.tensor(0.0), dist_reduce_fx=None)
self.add_state("n_total", default=torch.tensor(0.0), dist_reduce_fx=None)
def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
"""Update state with predictions and targets.
Args:
preds: Predictions from model
target: Ground truth values
"""
self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total = _pearson_corrcoef_update(
preds, target, self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total
)
def compute(self) -> Tensor:
"""Computes pearson correlation coefficient over state."""
if self.mean_x.numel() > 1: # multiple devices, need further reduction
var_x, var_y, corr_xy, n_total = _final_aggregation(
self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total
)
else:
var_x = self.var_x
var_y = self.var_y
corr_xy = self.corr_xy
n_total = self.n_total
return _pearson_corrcoef_compute(var_x, var_y, corr_xy, n_total)
| 37.71831
| 120
| 0.626027
|
from typing import Any, List, Optional, Tuple
import torch
from torch import Tensor
from torchmetrics.functional.regression.pearson import _pearson_corrcoef_compute, _pearson_corrcoef_update
from torchmetrics.metric import Metric
def _final_aggregation(
means_x: Tensor,
means_y: Tensor,
vars_x: Tensor,
vars_y: Tensor,
corrs_xy: Tensor,
nbs: Tensor,
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
mx1, my1, vx1, vy1, cxy1, n1 = means_x[0], means_y[0], vars_x[0], vars_y[0], corrs_xy[0], nbs[0]
for i in range(1, len(means_x)):
mx2, my2, vx2, vy2, cxy2, n2 = means_x[i], means_y[i], vars_x[i], vars_y[i], corrs_xy[i], nbs[i]
nb = n1 + n2
mean_x = (n1 * mx1 + n2 * mx2) / nb
mean_y = (n1 * my1 + n2 * my2) / nb
var_x = 1 / (n1 + n2 - 1) * ((n1 - 1) * vx1 + (n2 - 1) * vx2 + ((n1 * n2) / (n1 + n2)) * (mx1 - mx2) ** 2)
var_y = 1 / (n1 + n2 - 1) * ((n1 - 1) * vy1 + (n2 - 1) * vy2 + ((n1 * n2) / (n1 + n2)) * (my1 - my2) ** 2)
corr1 = n1 * cxy1 + n1 * (mx1 - mean_x) * (my1 - mean_y)
corr2 = n2 * cxy2 + n2 * (mx2 - mean_x) * (my2 - mean_y)
corr_xy = (corr1 + corr2) / (n1 + n2)
mx1, my1, vx1, vy1, cxy1, n1 = mean_x, mean_y, var_x, var_y, corr_xy, nb
return var_x, var_y, corr_xy, nb
class PearsonCorrcoef(Metric):
is_differentiable = True
higher_is_better = None
preds: List[Tensor]
target: List[Tensor]
mean_x: Tensor
mean_y: Tensor
var_x: Tensor
var_y: Tensor
corr_xy: Tensor
n_total: Tensor
def __init__(
self,
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
) -> None:
super().__init__(
compute_on_step=compute_on_step,
dist_sync_on_step=dist_sync_on_step,
process_group=process_group,
)
self.add_state("mean_x", default=torch.tensor(0.0), dist_reduce_fx=None)
self.add_state("mean_y", default=torch.tensor(0.0), dist_reduce_fx=None)
self.add_state("var_x", default=torch.tensor(0.0), dist_reduce_fx=None)
self.add_state("var_y", default=torch.tensor(0.0), dist_reduce_fx=None)
self.add_state("corr_xy", default=torch.tensor(0.0), dist_reduce_fx=None)
self.add_state("n_total", default=torch.tensor(0.0), dist_reduce_fx=None)
def update(self, preds: Tensor, target: Tensor) -> None:
self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total = _pearson_corrcoef_update(
preds, target, self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total
)
def compute(self) -> Tensor:
if self.mean_x.numel() > 1:
var_x, var_y, corr_xy, n_total = _final_aggregation(
self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total
)
else:
var_x = self.var_x
var_y = self.var_y
corr_xy = self.corr_xy
n_total = self.n_total
return _pearson_corrcoef_compute(var_x, var_y, corr_xy, n_total)
| true
| true
|
f718227f7c7f9f79e60bd34c13ee360426d8cedb
| 12,071
|
py
|
Python
|
8.SAC/SAC-continuous.py
|
Lizhi-sjtu/DRL-code-pytorch
|
2ca05f4ed64d2d032e161fc3a2d2a68c818c4337
|
[
"MIT"
] | 2
|
2022-03-27T01:56:48.000Z
|
2022-03-31T05:02:39.000Z
|
8.SAC/SAC-continuous.py
|
Lizhi-sjtu/DRL-code-pytorch
|
2ca05f4ed64d2d032e161fc3a2d2a68c818c4337
|
[
"MIT"
] | null | null | null |
8.SAC/SAC-continuous.py
|
Lizhi-sjtu/DRL-code-pytorch
|
2ca05f4ed64d2d032e161fc3a2d2a68c818c4337
|
[
"MIT"
] | null | null | null |
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import copy
from torch.utils.tensorboard import SummaryWriter
from torch.distributions import Normal
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, hidden_width, max_action):
super(Actor, self).__init__()
self.max_action = max_action
self.l1 = nn.Linear(state_dim, hidden_width)
self.l2 = nn.Linear(hidden_width, hidden_width)
self.mean_layer = nn.Linear(hidden_width, action_dim)
self.log_std_layer = nn.Linear(hidden_width, action_dim)
def forward(self, x, deterministic=False, with_logprob=True):
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
mean = self.mean_layer(x)
log_std = self.log_std_layer(x) # We output the log_std to ensure that std=exp(log_std)>0
log_std = torch.clamp(log_std, -20, 2)
std = torch.exp(log_std)
dist = Normal(mean, std) # Generate a Gaussian distribution
if deterministic: # When evaluating,we use the deterministic policy
a = mean
else:
a = dist.rsample() # reparameterization trick: mean+std*N(0,1)
if with_logprob: # The method refers to Open AI Spinning up, which is more stable.
log_pi = dist.log_prob(a).sum(dim=1, keepdim=True)
log_pi -= (2 * (np.log(2) - a - F.softplus(-2 * a))).sum(dim=1, keepdim=True)
else:
log_pi = None
a = self.max_action * torch.tanh(a) # Use tanh to compress the unbounded Gaussian distribution into a bounded action interval.
return a, log_pi
class Critic(nn.Module): # According to (s,a), directly calculate Q(s,a)
def __init__(self, state_dim, action_dim, hidden_width):
super(Critic, self).__init__()
# Q1
self.l1 = nn.Linear(state_dim + action_dim, hidden_width)
self.l2 = nn.Linear(hidden_width, hidden_width)
self.l3 = nn.Linear(hidden_width, 1)
# Q2
self.l4 = nn.Linear(state_dim + action_dim, hidden_width)
self.l5 = nn.Linear(hidden_width, hidden_width)
self.l6 = nn.Linear(hidden_width, 1)
def forward(self, s, a):
s_a = torch.cat([s, a], 1)
q1 = F.relu(self.l1(s_a))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(s_a))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
class ReplayBuffer(object):
def __init__(self, state_dim, action_dim):
self.max_size = int(1e6)
self.count = 0
self.size = 0
self.s = np.zeros((self.max_size, state_dim))
self.a = np.zeros((self.max_size, action_dim))
self.r = np.zeros((self.max_size, 1))
self.s_ = np.zeros((self.max_size, state_dim))
self.dw = np.zeros((self.max_size, 1))
def store(self, s, a, r, s_, dw):
self.s[self.count] = s
self.a[self.count] = a
self.r[self.count] = r
self.s_[self.count] = s_
self.dw[self.count] = dw
self.count = (self.count + 1) % self.max_size # When the 'count' reaches max_size, it will be reset to 0.
self.size = min(self.size + 1, self.max_size) # Record the number of transitions
def sample(self, batch_size):
index = np.random.choice(self.size, size=batch_size) # Randomly sampling
batch_s = torch.tensor(self.s[index], dtype=torch.float)
batch_a = torch.tensor(self.a[index], dtype=torch.float)
batch_r = torch.tensor(self.r[index], dtype=torch.float)
batch_s_ = torch.tensor(self.s_[index], dtype=torch.float)
batch_dw = torch.tensor(self.dw[index], dtype=torch.float)
return batch_s, batch_a, batch_r, batch_s_, batch_dw
class SAC(object):
def __init__(self, state_dim, action_dim, max_action):
self.max_action = max_action
self.hidden_width = 256 # The number of neurons in hidden layers of the neural network
self.batch_size = 256 # batch size
self.GAMMA = 0.99 # discount factor
self.TAU = 0.005 # Softly update the target network
self.lr = 3e-4 # learning rate
self.adaptive_alpha = True # Whether to automatically learn the temperature alpha
if self.adaptive_alpha:
# Target Entropy = −dim(A) (e.g. , -6 for HalfCheetah-v2) as given in the paper
self.target_entropy = -action_dim
# We learn log_alpha instead of alpha to ensure that alpha=exp(log_alpha)>0
self.log_alpha = torch.zeros(1, requires_grad=True)
self.alpha = self.log_alpha.exp()
self.alpha_optimizer = torch.optim.Adam([self.log_alpha], lr=self.lr)
else:
self.alpha = 0.2
self.actor = Actor(state_dim, action_dim, self.hidden_width, max_action)
self.critic = Critic(state_dim, action_dim, self.hidden_width)
self.critic_target = copy.deepcopy(self.critic)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.lr)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self.lr)
def choose_action(self, s, deterministic=False):
s = torch.unsqueeze(torch.tensor(s, dtype=torch.float), 0)
a, _ = self.actor(s, deterministic, False) # When choosing actions, we do not need to compute log_pi
return a.data.numpy().flatten()
def learn(self, relay_buffer):
batch_s, batch_a, batch_r, batch_s_, batch_dw = relay_buffer.sample(self.batch_size) # Sample a batch
with torch.no_grad():
batch_a_, log_pi_ = self.actor(batch_s_) # a' from the current policy
# Compute target Q
target_Q1, target_Q2 = self.critic_target(batch_s_, batch_a_)
target_Q = batch_r + self.GAMMA * (1 - batch_dw) * (torch.min(target_Q1, target_Q2) - self.alpha * log_pi_)
# Compute current Q
current_Q1, current_Q2 = self.critic(batch_s, batch_a)
# Compute critic loss
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Freeze critic networks so you don't waste computational effort
for params in self.critic.parameters():
params.requires_grad = False
# Compute actor loss
a, log_pi = self.actor(batch_s)
Q1, Q2 = self.critic(batch_s, a)
Q = torch.min(Q1, Q2)
actor_loss = (self.alpha * log_pi - Q).mean()
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Unfreeze critic networks
for params in self.critic.parameters():
params.requires_grad = True
# Update alpha
if self.adaptive_alpha:
# We learn log_alpha instead of alpha to ensure that alpha=exp(log_alpha)>0
alpha_loss = -(self.log_alpha.exp() * (log_pi + self.target_entropy).detach()).mean()
self.alpha_optimizer.zero_grad()
alpha_loss.backward()
self.alpha_optimizer.step()
self.alpha = self.log_alpha.exp()
# Softly update target networks
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.TAU * param.data + (1 - self.TAU) * target_param.data)
def evaluate_policy(env, agent):
times = 3 # Perform three evaluations and calculate the average
evaluate_reward = 0
for _ in range(times):
s = env.reset()
done = False
episode_reward = 0
while not done:
a = agent.choose_action(s, deterministic=True) # We use the deterministic policy during the evaluating
s_, r, done, _ = env.step(a)
episode_reward += r
s = s_
evaluate_reward += episode_reward
return int(evaluate_reward / times)
def reward_adapter(r, env_index):
if env_index == 0: # Pendulum-v1
r = (r + 8) / 8
elif env_index == 1: # BipedalWalker-v3
if r <= -100:
r = -1
return r
if __name__ == '__main__':
env_name = ['Pendulum-v1', 'BipedalWalker-v3', 'HalfCheetah-v2', 'Hopper-v2', 'Walker2d-v2']
env_index = 0
env = gym.make(env_name[env_index])
env_evaluate = gym.make(env_name[env_index]) # When evaluating the policy, we need to rebuild an environment
number = 1
seed = 0
# Set random seed
env.seed(seed)
env.action_space.seed(seed)
env_evaluate.seed(seed)
env_evaluate.action_space.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
max_episode_steps = env._max_episode_steps # Maximum number of steps per episode
print("env={}".format(env_name[env_index]))
print("state_dim={}".format(state_dim))
print("action_dim={}".format(action_dim))
print("max_action={}".format(max_action))
print("max_episode_steps={}".format(max_episode_steps))
agent = SAC(state_dim, action_dim, max_action)
replay_buffer = ReplayBuffer(state_dim, action_dim)
# Build a tensorboard
writer = SummaryWriter(log_dir='runs/SAC/SAC_env_{}_number_{}_seed_{}'.format(env_name[env_index], number, seed))
max_train_steps = 3e6 # Maximum number of training steps
random_steps = 25e3 # Take the random actions in the beginning for the better exploration
evaluate_freq = 5e3 # Evaluate the policy every 'evaluate_freq' steps
evaluate_num = 0 # Record the number of evaluations
evaluate_rewards = [] # Record the rewards during the evaluating
total_steps = 0 # Record the total steps during the training
while total_steps < max_train_steps:
s = env.reset()
episode_steps = 0
done = False
while not done:
episode_steps += 1
if total_steps < random_steps: # Take the random actions in the beginning for the better exploration
a = env.action_space.sample()
else:
a = agent.choose_action(s)
s_, r, done, _ = env.step(a)
r = reward_adapter(r, env_index) # Adjust rewards for better performance
# When dead or win or reaching the max_episode_steps, done will be Ture, we need to distinguish them;
# dw means dead or win,there is no next state s';
# but when reaching the max_episode_steps,there is a next state s' actually.
if done and episode_steps != max_episode_steps:
dw = True
else:
dw = False
replay_buffer.store(s, a, r, s_, dw) # Store the transition
s = s_
if total_steps >= random_steps:
agent.learn(replay_buffer)
# Evaluate the policy every 'evaluate_freq' steps
if (total_steps + 1) % evaluate_freq == 0:
evaluate_num += 1
evaluate_reward = evaluate_policy(env_evaluate, agent)
evaluate_rewards.append(evaluate_reward)
print("evaluate_num:{} \t evaluate_reward:{}".format(evaluate_num, evaluate_reward))
writer.add_scalar('step_rewards_{}'.format(env_name[env_index]), evaluate_reward, global_step=total_steps)
# Save the rewards
if evaluate_num % 10 == 0:
np.save('./data_train/SAC_env_{}_number_{}_seed_{}.npy'.format(env_name[env_index], number, seed), np.array(evaluate_rewards))
total_steps += 1
| 42.65371
| 147
| 0.618341
|
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import copy
from torch.utils.tensorboard import SummaryWriter
from torch.distributions import Normal
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, hidden_width, max_action):
super(Actor, self).__init__()
self.max_action = max_action
self.l1 = nn.Linear(state_dim, hidden_width)
self.l2 = nn.Linear(hidden_width, hidden_width)
self.mean_layer = nn.Linear(hidden_width, action_dim)
self.log_std_layer = nn.Linear(hidden_width, action_dim)
def forward(self, x, deterministic=False, with_logprob=True):
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
mean = self.mean_layer(x)
log_std = self.log_std_layer(x)
log_std = torch.clamp(log_std, -20, 2)
std = torch.exp(log_std)
dist = Normal(mean, std)
if deterministic:
a = mean
else:
a = dist.rsample()
if with_logprob:
log_pi = dist.log_prob(a).sum(dim=1, keepdim=True)
log_pi -= (2 * (np.log(2) - a - F.softplus(-2 * a))).sum(dim=1, keepdim=True)
else:
log_pi = None
a = self.max_action * torch.tanh(a)
return a, log_pi
class Critic(nn.Module):
def __init__(self, state_dim, action_dim, hidden_width):
super(Critic, self).__init__()
self.l1 = nn.Linear(state_dim + action_dim, hidden_width)
self.l2 = nn.Linear(hidden_width, hidden_width)
self.l3 = nn.Linear(hidden_width, 1)
self.l4 = nn.Linear(state_dim + action_dim, hidden_width)
self.l5 = nn.Linear(hidden_width, hidden_width)
self.l6 = nn.Linear(hidden_width, 1)
def forward(self, s, a):
s_a = torch.cat([s, a], 1)
q1 = F.relu(self.l1(s_a))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(s_a))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
class ReplayBuffer(object):
def __init__(self, state_dim, action_dim):
self.max_size = int(1e6)
self.count = 0
self.size = 0
self.s = np.zeros((self.max_size, state_dim))
self.a = np.zeros((self.max_size, action_dim))
self.r = np.zeros((self.max_size, 1))
self.s_ = np.zeros((self.max_size, state_dim))
self.dw = np.zeros((self.max_size, 1))
def store(self, s, a, r, s_, dw):
self.s[self.count] = s
self.a[self.count] = a
self.r[self.count] = r
self.s_[self.count] = s_
self.dw[self.count] = dw
self.count = (self.count + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample(self, batch_size):
index = np.random.choice(self.size, size=batch_size)
batch_s = torch.tensor(self.s[index], dtype=torch.float)
batch_a = torch.tensor(self.a[index], dtype=torch.float)
batch_r = torch.tensor(self.r[index], dtype=torch.float)
batch_s_ = torch.tensor(self.s_[index], dtype=torch.float)
batch_dw = torch.tensor(self.dw[index], dtype=torch.float)
return batch_s, batch_a, batch_r, batch_s_, batch_dw
class SAC(object):
def __init__(self, state_dim, action_dim, max_action):
self.max_action = max_action
self.hidden_width = 256
self.batch_size = 256
self.GAMMA = 0.99
self.TAU = 0.005
self.lr = 3e-4
self.adaptive_alpha = True
if self.adaptive_alpha:
self.target_entropy = -action_dim
self.log_alpha = torch.zeros(1, requires_grad=True)
self.alpha = self.log_alpha.exp()
self.alpha_optimizer = torch.optim.Adam([self.log_alpha], lr=self.lr)
else:
self.alpha = 0.2
self.actor = Actor(state_dim, action_dim, self.hidden_width, max_action)
self.critic = Critic(state_dim, action_dim, self.hidden_width)
self.critic_target = copy.deepcopy(self.critic)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.lr)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self.lr)
def choose_action(self, s, deterministic=False):
s = torch.unsqueeze(torch.tensor(s, dtype=torch.float), 0)
a, _ = self.actor(s, deterministic, False)
return a.data.numpy().flatten()
def learn(self, relay_buffer):
batch_s, batch_a, batch_r, batch_s_, batch_dw = relay_buffer.sample(self.batch_size)
with torch.no_grad():
batch_a_, log_pi_ = self.actor(batch_s_)
# Compute target Q
target_Q1, target_Q2 = self.critic_target(batch_s_, batch_a_)
target_Q = batch_r + self.GAMMA * (1 - batch_dw) * (torch.min(target_Q1, target_Q2) - self.alpha * log_pi_)
# Compute current Q
current_Q1, current_Q2 = self.critic(batch_s, batch_a)
# Compute critic loss
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Freeze critic networks so you don't waste computational effort
for params in self.critic.parameters():
params.requires_grad = False
a, log_pi = self.actor(batch_s)
Q1, Q2 = self.critic(batch_s, a)
Q = torch.min(Q1, Q2)
actor_loss = (self.alpha * log_pi - Q).mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
for params in self.critic.parameters():
params.requires_grad = True
if self.adaptive_alpha:
alpha_loss = -(self.log_alpha.exp() * (log_pi + self.target_entropy).detach()).mean()
self.alpha_optimizer.zero_grad()
alpha_loss.backward()
self.alpha_optimizer.step()
self.alpha = self.log_alpha.exp()
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.TAU * param.data + (1 - self.TAU) * target_param.data)
def evaluate_policy(env, agent):
times = 3
evaluate_reward = 0
for _ in range(times):
s = env.reset()
done = False
episode_reward = 0
while not done:
a = agent.choose_action(s, deterministic=True)
s_, r, done, _ = env.step(a)
episode_reward += r
s = s_
evaluate_reward += episode_reward
return int(evaluate_reward / times)
def reward_adapter(r, env_index):
if env_index == 0:
r = (r + 8) / 8
elif env_index == 1:
if r <= -100:
r = -1
return r
if __name__ == '__main__':
env_name = ['Pendulum-v1', 'BipedalWalker-v3', 'HalfCheetah-v2', 'Hopper-v2', 'Walker2d-v2']
env_index = 0
env = gym.make(env_name[env_index])
env_evaluate = gym.make(env_name[env_index])
number = 1
seed = 0
env.seed(seed)
env.action_space.seed(seed)
env_evaluate.seed(seed)
env_evaluate.action_space.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
max_episode_steps = env._max_episode_steps
print("env={}".format(env_name[env_index]))
print("state_dim={}".format(state_dim))
print("action_dim={}".format(action_dim))
print("max_action={}".format(max_action))
print("max_episode_steps={}".format(max_episode_steps))
agent = SAC(state_dim, action_dim, max_action)
replay_buffer = ReplayBuffer(state_dim, action_dim)
writer = SummaryWriter(log_dir='runs/SAC/SAC_env_{}_number_{}_seed_{}'.format(env_name[env_index], number, seed))
max_train_steps = 3e6
random_steps = 25e3
evaluate_freq = 5e3
evaluate_num = 0
evaluate_rewards = []
total_steps = 0
while total_steps < max_train_steps:
s = env.reset()
episode_steps = 0
done = False
while not done:
episode_steps += 1
if total_steps < random_steps:
a = env.action_space.sample()
else:
a = agent.choose_action(s)
s_, r, done, _ = env.step(a)
r = reward_adapter(r, env_index)
# but when reaching the max_episode_steps,there is a next state s' actually.
if done and episode_steps != max_episode_steps:
dw = True
else:
dw = False
replay_buffer.store(s, a, r, s_, dw)
s = s_
if total_steps >= random_steps:
agent.learn(replay_buffer)
if (total_steps + 1) % evaluate_freq == 0:
evaluate_num += 1
evaluate_reward = evaluate_policy(env_evaluate, agent)
evaluate_rewards.append(evaluate_reward)
print("evaluate_num:{} \t evaluate_reward:{}".format(evaluate_num, evaluate_reward))
writer.add_scalar('step_rewards_{}'.format(env_name[env_index]), evaluate_reward, global_step=total_steps)
if evaluate_num % 10 == 0:
np.save('./data_train/SAC_env_{}_number_{}_seed_{}.npy'.format(env_name[env_index], number, seed), np.array(evaluate_rewards))
total_steps += 1
| true
| true
|
f71823696e8d384656f678616b79009f7bcd95a6
| 14,225
|
py
|
Python
|
esrally/client.py
|
Kua-Fu/rally
|
7c58ef6f81f618fbc142dfa58b0ed00a5b05fbae
|
[
"Apache-2.0"
] | 1,577
|
2016-04-19T12:38:58.000Z
|
2022-03-31T07:18:25.000Z
|
esrally/client.py
|
Kua-Fu/rally
|
7c58ef6f81f618fbc142dfa58b0ed00a5b05fbae
|
[
"Apache-2.0"
] | 1,079
|
2016-04-19T12:09:16.000Z
|
2022-03-31T05:38:50.000Z
|
esrally/client.py
|
Kua-Fu/rally
|
7c58ef6f81f618fbc142dfa58b0ed00a5b05fbae
|
[
"Apache-2.0"
] | 300
|
2016-04-19T18:27:12.000Z
|
2022-03-23T07:54:16.000Z
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextvars
import logging
import time
import certifi
import urllib3
from esrally import doc_link, exceptions
from esrally.utils import console, convert
class RequestContextManager:
"""
Ensures that request context span the defined scope and allow nesting of request contexts with proper propagation.
This means that we can span a top-level request context, open sub-request contexts that can be used to measure
individual timings and still measure the proper total time on the top-level request context.
"""
def __init__(self, request_context_holder):
self.ctx_holder = request_context_holder
self.ctx = None
self.token = None
async def __aenter__(self):
self.ctx, self.token = self.ctx_holder.init_request_context()
return self
@property
def request_start(self):
return self.ctx["request_start"]
@property
def request_end(self):
return self.ctx["request_end"]
async def __aexit__(self, exc_type, exc_val, exc_tb):
# propagate earliest request start and most recent request end to parent
request_start = self.request_start
request_end = self.request_end
self.ctx_holder.restore_context(self.token)
# don't attempt to restore these values on the top-level context as they don't exist
if self.token.old_value != contextvars.Token.MISSING:
self.ctx_holder.update_request_start(request_start)
self.ctx_holder.update_request_end(request_end)
self.token = None
return False
class RequestContextHolder:
"""
Holds request context variables. This class is only meant to be used together with RequestContextManager.
"""
request_context = contextvars.ContextVar("rally_request_context")
def new_request_context(self):
return RequestContextManager(self)
@classmethod
def init_request_context(cls):
ctx = {}
token = cls.request_context.set(ctx)
return ctx, token
@classmethod
def restore_context(cls, token):
cls.request_context.reset(token)
@classmethod
def update_request_start(cls, new_request_start):
meta = cls.request_context.get()
# this can happen if multiple requests are sent on the wire for one logical request (e.g. scrolls)
if "request_start" not in meta:
meta["request_start"] = new_request_start
@classmethod
def update_request_end(cls, new_request_end):
meta = cls.request_context.get()
meta["request_end"] = new_request_end
@classmethod
def on_request_start(cls):
cls.update_request_start(time.perf_counter())
@classmethod
def on_request_end(cls):
cls.update_request_end(time.perf_counter())
@classmethod
def return_raw_response(cls):
ctx = cls.request_context.get()
ctx["raw_response"] = True
class EsClientFactory:
"""
Abstracts how the Elasticsearch client is created. Intended for testing.
"""
def __init__(self, hosts, client_options):
self.hosts = hosts
self.client_options = dict(client_options)
self.ssl_context = None
self.logger = logging.getLogger(__name__)
masked_client_options = dict(client_options)
if "basic_auth_password" in masked_client_options:
masked_client_options["basic_auth_password"] = "*****"
if "http_auth" in masked_client_options:
masked_client_options["http_auth"] = (masked_client_options["http_auth"][0], "*****")
self.logger.info("Creating ES client connected to %s with options [%s]", hosts, masked_client_options)
# we're using an SSL context now and it is not allowed to have use_ssl present in client options anymore
if self.client_options.pop("use_ssl", False):
# pylint: disable=import-outside-toplevel
import ssl
self.logger.info("SSL support: on")
self.client_options["scheme"] = "https"
# ssl.Purpose.CLIENT_AUTH allows presenting client certs and can only be enabled during instantiation
# but can be disabled via the verify_mode property later on.
self.ssl_context = ssl.create_default_context(
ssl.Purpose.CLIENT_AUTH, cafile=self.client_options.pop("ca_certs", certifi.where())
)
if not self.client_options.pop("verify_certs", True):
self.logger.info("SSL certificate verification: off")
# order matters to avoid ValueError: check_hostname needs a SSL context with either CERT_OPTIONAL or CERT_REQUIRED
self.ssl_context.verify_mode = ssl.CERT_NONE
self.ssl_context.check_hostname = False
self.logger.warning(
"User has enabled SSL but disabled certificate verification. This is dangerous but may be ok for a "
"benchmark. Disabling urllib warnings now to avoid a logging storm. "
"See https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings for details."
)
# disable: "InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly \
# advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings"
urllib3.disable_warnings()
else:
self.ssl_context.verify_mode = ssl.CERT_REQUIRED
self.ssl_context.check_hostname = True
self.logger.info("SSL certificate verification: on")
# When using SSL_context, all SSL related kwargs in client options get ignored
client_cert = self.client_options.pop("client_cert", False)
client_key = self.client_options.pop("client_key", False)
if not client_cert and not client_key:
self.logger.info("SSL client authentication: off")
elif bool(client_cert) != bool(client_key):
self.logger.error("Supplied client-options contain only one of client_cert/client_key. ")
defined_client_ssl_option = "client_key" if client_key else "client_cert"
missing_client_ssl_option = "client_cert" if client_key else "client_key"
console.println(
"'{}' is missing from client-options but '{}' has been specified.\n"
"If your Elasticsearch setup requires client certificate verification both need to be supplied.\n"
"Read the documentation at {}\n".format(
missing_client_ssl_option,
defined_client_ssl_option,
console.format.link(doc_link("command_line_reference.html#client-options")),
)
)
raise exceptions.SystemSetupError(
"Cannot specify '{}' without also specifying '{}' in client-options.".format(
defined_client_ssl_option, missing_client_ssl_option
)
)
elif client_cert and client_key:
self.logger.info("SSL client authentication: on")
self.ssl_context.load_cert_chain(certfile=client_cert, keyfile=client_key)
else:
self.logger.info("SSL support: off")
self.client_options["scheme"] = "http"
if self._is_set(self.client_options, "basic_auth_user") and self._is_set(self.client_options, "basic_auth_password"):
self.logger.info("HTTP basic authentication: on")
self.client_options["http_auth"] = (self.client_options.pop("basic_auth_user"), self.client_options.pop("basic_auth_password"))
else:
self.logger.info("HTTP basic authentication: off")
if self._is_set(self.client_options, "compressed"):
console.warn("You set the deprecated client option 'compressed‘. Please use 'http_compress' instead.", logger=self.logger)
self.client_options["http_compress"] = self.client_options.pop("compressed")
if self._is_set(self.client_options, "http_compress"):
self.logger.info("HTTP compression: on")
else:
self.logger.info("HTTP compression: off")
if self._is_set(self.client_options, "enable_cleanup_closed"):
self.client_options["enable_cleanup_closed"] = convert.to_bool(self.client_options.pop("enable_cleanup_closed"))
def _is_set(self, client_opts, k):
try:
return client_opts[k]
except KeyError:
return False
def create(self):
# pylint: disable=import-outside-toplevel
import elasticsearch
return elasticsearch.Elasticsearch(hosts=self.hosts, ssl_context=self.ssl_context, **self.client_options)
def create_async(self):
# pylint: disable=import-outside-toplevel
import io
import aiohttp
import elasticsearch
from elasticsearch.serializer import JSONSerializer
import esrally.async_connection
class LazyJSONSerializer(JSONSerializer):
def loads(self, s):
meta = RallyAsyncElasticsearch.request_context.get()
if "raw_response" in meta:
return io.BytesIO(s)
else:
return super().loads(s)
async def on_request_start(session, trace_config_ctx, params):
RallyAsyncElasticsearch.on_request_start()
async def on_request_end(session, trace_config_ctx, params):
RallyAsyncElasticsearch.on_request_end()
trace_config = aiohttp.TraceConfig()
trace_config.on_request_start.append(on_request_start)
trace_config.on_request_end.append(on_request_end)
# ensure that we also stop the timer when a request "ends" with an exception (e.g. a timeout)
trace_config.on_request_exception.append(on_request_end)
# override the builtin JSON serializer
self.client_options["serializer"] = LazyJSONSerializer()
self.client_options["trace_config"] = trace_config
class VerifiedAsyncTransport(elasticsearch.AsyncTransport):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# skip verification at this point; we've already verified this earlier with the synchronous client.
# The async client is used in the hot code path and we use customized overrides (such as that we don't
# parse response bodies in some cases for performance reasons, e.g. when using the bulk API).
self._verified_elasticsearch = True
class RallyAsyncElasticsearch(elasticsearch.AsyncElasticsearch, RequestContextHolder):
pass
return RallyAsyncElasticsearch(
hosts=self.hosts,
transport_class=VerifiedAsyncTransport,
connection_class=esrally.async_connection.AIOHttpConnection,
ssl_context=self.ssl_context,
**self.client_options,
)
def wait_for_rest_layer(es, max_attempts=40):
"""
Waits for ``max_attempts`` until Elasticsearch's REST API is available.
:param es: Elasticsearch client to use for connecting.
:param max_attempts: The maximum number of attempts to check whether the REST API is available.
:return: True iff Elasticsearch's REST API is available.
"""
# assume that at least the hosts that we expect to contact should be available. Note that this is not 100%
# bullet-proof as a cluster could have e.g. dedicated masters which are not contained in our list of target hosts
# but this is still better than just checking for any random node's REST API being reachable.
expected_node_count = len(es.transport.hosts)
logger = logging.getLogger(__name__)
for attempt in range(max_attempts):
logger.debug("REST API is available after %s attempts", attempt)
# pylint: disable=import-outside-toplevel
import elasticsearch
try:
# see also WaitForHttpResource in Elasticsearch tests. Contrary to the ES tests we consider the API also
# available when the cluster status is RED (as long as all required nodes are present)
es.cluster.health(wait_for_nodes=">={}".format(expected_node_count))
logger.info("REST API is available for >= [%s] nodes after [%s] attempts.", expected_node_count, attempt)
return True
except elasticsearch.ConnectionError as e:
if "SSL: UNKNOWN_PROTOCOL" in str(e):
raise exceptions.SystemSetupError("Could not connect to cluster via https. Is this an https endpoint?", e)
else:
logger.debug("Got connection error on attempt [%s]. Sleeping...", attempt)
time.sleep(3)
except elasticsearch.TransportError as e:
# cluster block, x-pack not initialized yet, our wait condition is not reached
if e.status_code in (503, 401, 408):
logger.debug("Got status code [%s] on attempt [%s]. Sleeping...", e.status_code, attempt)
time.sleep(3)
else:
logger.warning("Got unexpected status code [%s] on attempt [%s].", e.status_code, attempt)
raise e
return False
| 44.873817
| 139
| 0.664745
|
import contextvars
import logging
import time
import certifi
import urllib3
from esrally import doc_link, exceptions
from esrally.utils import console, convert
class RequestContextManager:
def __init__(self, request_context_holder):
self.ctx_holder = request_context_holder
self.ctx = None
self.token = None
async def __aenter__(self):
self.ctx, self.token = self.ctx_holder.init_request_context()
return self
@property
def request_start(self):
return self.ctx["request_start"]
@property
def request_end(self):
return self.ctx["request_end"]
async def __aexit__(self, exc_type, exc_val, exc_tb):
request_start = self.request_start
request_end = self.request_end
self.ctx_holder.restore_context(self.token)
if self.token.old_value != contextvars.Token.MISSING:
self.ctx_holder.update_request_start(request_start)
self.ctx_holder.update_request_end(request_end)
self.token = None
return False
class RequestContextHolder:
request_context = contextvars.ContextVar("rally_request_context")
def new_request_context(self):
return RequestContextManager(self)
@classmethod
def init_request_context(cls):
ctx = {}
token = cls.request_context.set(ctx)
return ctx, token
@classmethod
def restore_context(cls, token):
cls.request_context.reset(token)
@classmethod
def update_request_start(cls, new_request_start):
meta = cls.request_context.get()
if "request_start" not in meta:
meta["request_start"] = new_request_start
@classmethod
def update_request_end(cls, new_request_end):
meta = cls.request_context.get()
meta["request_end"] = new_request_end
@classmethod
def on_request_start(cls):
cls.update_request_start(time.perf_counter())
@classmethod
def on_request_end(cls):
cls.update_request_end(time.perf_counter())
@classmethod
def return_raw_response(cls):
ctx = cls.request_context.get()
ctx["raw_response"] = True
class EsClientFactory:
def __init__(self, hosts, client_options):
self.hosts = hosts
self.client_options = dict(client_options)
self.ssl_context = None
self.logger = logging.getLogger(__name__)
masked_client_options = dict(client_options)
if "basic_auth_password" in masked_client_options:
masked_client_options["basic_auth_password"] = "*****"
if "http_auth" in masked_client_options:
masked_client_options["http_auth"] = (masked_client_options["http_auth"][0], "*****")
self.logger.info("Creating ES client connected to %s with options [%s]", hosts, masked_client_options)
if self.client_options.pop("use_ssl", False):
# pylint: disable=import-outside-toplevel
import ssl
self.logger.info("SSL support: on")
self.client_options["scheme"] = "https"
# ssl.Purpose.CLIENT_AUTH allows presenting client certs and can only be enabled during instantiation
# but can be disabled via the verify_mode property later on.
self.ssl_context = ssl.create_default_context(
ssl.Purpose.CLIENT_AUTH, cafile=self.client_options.pop("ca_certs", certifi.where())
)
if not self.client_options.pop("verify_certs", True):
self.logger.info("SSL certificate verification: off")
# order matters to avoid ValueError: check_hostname needs a SSL context with either CERT_OPTIONAL or CERT_REQUIRED
self.ssl_context.verify_mode = ssl.CERT_NONE
self.ssl_context.check_hostname = False
self.logger.warning(
"User has enabled SSL but disabled certificate verification. This is dangerous but may be ok for a "
"benchmark. Disabling urllib warnings now to avoid a logging storm. "
"See https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings for details."
)
# disable: "InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly \
# advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings"
urllib3.disable_warnings()
else:
self.ssl_context.verify_mode = ssl.CERT_REQUIRED
self.ssl_context.check_hostname = True
self.logger.info("SSL certificate verification: on")
# When using SSL_context, all SSL related kwargs in client options get ignored
client_cert = self.client_options.pop("client_cert", False)
client_key = self.client_options.pop("client_key", False)
if not client_cert and not client_key:
self.logger.info("SSL client authentication: off")
elif bool(client_cert) != bool(client_key):
self.logger.error("Supplied client-options contain only one of client_cert/client_key. ")
defined_client_ssl_option = "client_key" if client_key else "client_cert"
missing_client_ssl_option = "client_cert" if client_key else "client_key"
console.println(
"'{}' is missing from client-options but '{}' has been specified.\n"
"If your Elasticsearch setup requires client certificate verification both need to be supplied.\n"
"Read the documentation at {}\n".format(
missing_client_ssl_option,
defined_client_ssl_option,
console.format.link(doc_link("command_line_reference.html#client-options")),
)
)
raise exceptions.SystemSetupError(
"Cannot specify '{}' without also specifying '{}' in client-options.".format(
defined_client_ssl_option, missing_client_ssl_option
)
)
elif client_cert and client_key:
self.logger.info("SSL client authentication: on")
self.ssl_context.load_cert_chain(certfile=client_cert, keyfile=client_key)
else:
self.logger.info("SSL support: off")
self.client_options["scheme"] = "http"
if self._is_set(self.client_options, "basic_auth_user") and self._is_set(self.client_options, "basic_auth_password"):
self.logger.info("HTTP basic authentication: on")
self.client_options["http_auth"] = (self.client_options.pop("basic_auth_user"), self.client_options.pop("basic_auth_password"))
else:
self.logger.info("HTTP basic authentication: off")
if self._is_set(self.client_options, "compressed"):
console.warn("You set the deprecated client option 'compressed‘. Please use 'http_compress' instead.", logger=self.logger)
self.client_options["http_compress"] = self.client_options.pop("compressed")
if self._is_set(self.client_options, "http_compress"):
self.logger.info("HTTP compression: on")
else:
self.logger.info("HTTP compression: off")
if self._is_set(self.client_options, "enable_cleanup_closed"):
self.client_options["enable_cleanup_closed"] = convert.to_bool(self.client_options.pop("enable_cleanup_closed"))
def _is_set(self, client_opts, k):
try:
return client_opts[k]
except KeyError:
return False
def create(self):
import elasticsearch
return elasticsearch.Elasticsearch(hosts=self.hosts, ssl_context=self.ssl_context, **self.client_options)
def create_async(self):
import io
import aiohttp
import elasticsearch
from elasticsearch.serializer import JSONSerializer
import esrally.async_connection
class LazyJSONSerializer(JSONSerializer):
def loads(self, s):
meta = RallyAsyncElasticsearch.request_context.get()
if "raw_response" in meta:
return io.BytesIO(s)
else:
return super().loads(s)
async def on_request_start(session, trace_config_ctx, params):
RallyAsyncElasticsearch.on_request_start()
async def on_request_end(session, trace_config_ctx, params):
RallyAsyncElasticsearch.on_request_end()
trace_config = aiohttp.TraceConfig()
trace_config.on_request_start.append(on_request_start)
trace_config.on_request_end.append(on_request_end)
trace_config.on_request_exception.append(on_request_end)
self.client_options["serializer"] = LazyJSONSerializer()
self.client_options["trace_config"] = trace_config
class VerifiedAsyncTransport(elasticsearch.AsyncTransport):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# The async client is used in the hot code path and we use customized overrides (such as that we don't
self._verified_elasticsearch = True
class RallyAsyncElasticsearch(elasticsearch.AsyncElasticsearch, RequestContextHolder):
pass
return RallyAsyncElasticsearch(
hosts=self.hosts,
transport_class=VerifiedAsyncTransport,
connection_class=esrally.async_connection.AIOHttpConnection,
ssl_context=self.ssl_context,
**self.client_options,
)
def wait_for_rest_layer(es, max_attempts=40):
expected_node_count = len(es.transport.hosts)
logger = logging.getLogger(__name__)
for attempt in range(max_attempts):
logger.debug("REST API is available after %s attempts", attempt)
# pylint: disable=import-outside-toplevel
import elasticsearch
try:
# see also WaitForHttpResource in Elasticsearch tests. Contrary to the ES tests we consider the API also
# available when the cluster status is RED (as long as all required nodes are present)
es.cluster.health(wait_for_nodes=">={}".format(expected_node_count))
logger.info("REST API is available for >= [%s] nodes after [%s] attempts.", expected_node_count, attempt)
return True
except elasticsearch.ConnectionError as e:
if "SSL: UNKNOWN_PROTOCOL" in str(e):
raise exceptions.SystemSetupError("Could not connect to cluster via https. Is this an https endpoint?", e)
else:
logger.debug("Got connection error on attempt [%s]. Sleeping...", attempt)
time.sleep(3)
except elasticsearch.TransportError as e:
# cluster block, x-pack not initialized yet, our wait condition is not reached
if e.status_code in (503, 401, 408):
logger.debug("Got status code [%s] on attempt [%s]. Sleeping...", e.status_code, attempt)
time.sleep(3)
else:
logger.warning("Got unexpected status code [%s] on attempt [%s].", e.status_code, attempt)
raise e
return False
| true
| true
|
f71823bb0a9512d2ba7d2b03e46696bf17185a01
| 2,937
|
py
|
Python
|
cloudfeaster/privacy.py
|
simonsdave/clf
|
643ce7e6ba9bd47c35b235cb24264dbc9024367c
|
[
"MIT"
] | 4
|
2015-12-17T17:32:23.000Z
|
2022-01-02T20:31:08.000Z
|
cloudfeaster/privacy.py
|
simonsdave/clf
|
643ce7e6ba9bd47c35b235cb24264dbc9024367c
|
[
"MIT"
] | 61
|
2015-05-25T10:16:55.000Z
|
2022-01-15T23:49:38.000Z
|
cloudfeaster/privacy.py
|
simonsdave/clf
|
643ce7e6ba9bd47c35b235cb24264dbc9024367c
|
[
"MIT"
] | 2
|
2015-12-10T18:18:10.000Z
|
2021-01-30T15:29:13.000Z
|
"""This module exists as a place to centralize functionality and
configuration related to privacy.
"""
import hashlib
import logging
class RedactingFormatter(object):
"""
Credits - this formatter was heavily inspired by https://relaxdiego.com/2014/07/logging-in-python.html
"""
@classmethod
def install_for_all_handlers(self, crawl_args):
# :TODO: can this be configured when configuring logging
# this is inspired by https://gist.github.com/acdha/9238791
for handler in logging.root.handlers:
handler.setFormatter(self(handler.formatter, crawl_args))
def __init__(self, original_formatter, crawl_args):
self.original_formatter = original_formatter
self._patterns_and_replacements = []
for crawl_arg in crawl_args:
replacement = hash_crawl_arg(crawl_arg)
self._patterns_and_replacements.append((crawl_arg, replacement))
pattern = '"' + '", "'.join(crawl_arg) + '"'
replacement = '"' + '", "'.join(hash_crawl_arg(crawl_arg)) + '"'
self._patterns_and_replacements.append((pattern, replacement))
def format(self, record):
msg = self.original_formatter.format(record)
for (pattern, replacement) in self._patterns_and_replacements:
msg = msg.replace(pattern, replacement)
return msg
def __getattr__(self, attr):
return getattr(self.original_formatter, attr)
class RedactingFilter(logging.Filter):
def __init__(self, crawl_args):
super(RedactingFilter, self).__init__()
self._patterns_and_replacements = []
for crawl_arg in crawl_args:
replacement = hash_crawl_arg(crawl_arg)
self._patterns_and_replacements.append((crawl_arg, replacement))
pattern = '"' + '", "'.join(crawl_arg) + '"'
replacement = '"' + '", "'.join(hash_crawl_arg(crawl_arg)) + '"'
self._patterns_and_replacements.append((pattern, replacement))
def filter(self, record):
record.msg = self.redact(record.msg)
if isinstance(record.args, dict):
for k in record.args.keys():
record.args[k] = self._redact(record.args[k])
else:
record.args = tuple(self._redact(arg) for arg in record.args)
return True
def _redact(self, msg):
msg = None
# isinstance(msg, basestring) and msg or str(msg)
for (pattern, replacement) in self._patterns_and_replacements:
msg = msg.replace(pattern, replacement)
return msg
def hash_crawl_arg(crawl_arg):
"""Take a crawl argument (ie. an identifying or authenticating factor)
and create a hash. Hash will have the form <hash function name>:<hash digest>.
"""
hash = hashlib.sha256(str(crawl_arg).encode('utf-8'))
return '{hash_name}:{hash_digest}'.format(hash_name=hash.name, hash_digest=hash.hexdigest())
| 36.7125
| 106
| 0.657133
|
import hashlib
import logging
class RedactingFormatter(object):
@classmethod
def install_for_all_handlers(self, crawl_args):
for handler in logging.root.handlers:
handler.setFormatter(self(handler.formatter, crawl_args))
def __init__(self, original_formatter, crawl_args):
self.original_formatter = original_formatter
self._patterns_and_replacements = []
for crawl_arg in crawl_args:
replacement = hash_crawl_arg(crawl_arg)
self._patterns_and_replacements.append((crawl_arg, replacement))
pattern = '"' + '", "'.join(crawl_arg) + '"'
replacement = '"' + '", "'.join(hash_crawl_arg(crawl_arg)) + '"'
self._patterns_and_replacements.append((pattern, replacement))
def format(self, record):
msg = self.original_formatter.format(record)
for (pattern, replacement) in self._patterns_and_replacements:
msg = msg.replace(pattern, replacement)
return msg
def __getattr__(self, attr):
return getattr(self.original_formatter, attr)
class RedactingFilter(logging.Filter):
def __init__(self, crawl_args):
super(RedactingFilter, self).__init__()
self._patterns_and_replacements = []
for crawl_arg in crawl_args:
replacement = hash_crawl_arg(crawl_arg)
self._patterns_and_replacements.append((crawl_arg, replacement))
pattern = '"' + '", "'.join(crawl_arg) + '"'
replacement = '"' + '", "'.join(hash_crawl_arg(crawl_arg)) + '"'
self._patterns_and_replacements.append((pattern, replacement))
def filter(self, record):
record.msg = self.redact(record.msg)
if isinstance(record.args, dict):
for k in record.args.keys():
record.args[k] = self._redact(record.args[k])
else:
record.args = tuple(self._redact(arg) for arg in record.args)
return True
def _redact(self, msg):
msg = None
for (pattern, replacement) in self._patterns_and_replacements:
msg = msg.replace(pattern, replacement)
return msg
def hash_crawl_arg(crawl_arg):
hash = hashlib.sha256(str(crawl_arg).encode('utf-8'))
return '{hash_name}:{hash_digest}'.format(hash_name=hash.name, hash_digest=hash.hexdigest())
| true
| true
|
f718246c9b97a010fbc0d6588245bd4852b549f4
| 1,163
|
py
|
Python
|
www/tests/test_import.py
|
sejalseth/brython
|
0b59368eac40a3b1eef7b13f2102b18cb5629687
|
[
"BSD-3-Clause"
] | 5,926
|
2015-01-01T07:45:08.000Z
|
2022-03-31T12:34:38.000Z
|
www/tests/test_import.py
|
sejalseth/brython
|
0b59368eac40a3b1eef7b13f2102b18cb5629687
|
[
"BSD-3-Clause"
] | 1,728
|
2015-01-01T01:09:12.000Z
|
2022-03-30T23:25:22.000Z
|
www/tests/test_import.py
|
sejalseth/brython
|
0b59368eac40a3b1eef7b13f2102b18cb5629687
|
[
"BSD-3-Clause"
] | 574
|
2015-01-02T01:36:10.000Z
|
2022-03-26T10:18:48.000Z
|
import simple
class Simple2:
def __init__(self):
self.info = "SimpleClass2"
class Simple3(simple.Simple):
def __init__(self):
simple.Simple.__init__(self)
text = "text in simple"
assert simple.text == text
_s = simple.Simple()
_s3 = Simple3()
assert _s.info == _s3.info
import recursive_import
_s = recursive_import.myClass()
assert str(_s) == "success!"
import from_import_test.b
assert from_import_test.b.v == 1
import from_import_test.c
assert from_import_test.c.v == 1
# test of keyword "global" in functions of an imported module
import global_in_imported
assert global_in_imported.X == 15
from delegator import Delegator
delegate = Delegator([])
# issue 768
import modtest
# issue 1261
import colorsys
colorsys.ONE_THIRD # no AttributeError
from colorsys import *
try:
ONE_THIRD
raise Exception("should have raised NameError")
except NameError:
pass
# use "__getattr__" and "__dir__" at module level (PEP 562)
assert simple.strange == "a strange name"
assert dir(simple) == ["Simple", "text", "strange", "unknown"]
# issue 1483
from foobar import *
assert str(Foo()) == "foo"
print('passed all tests')
| 18.758065
| 62
| 0.72485
|
import simple
class Simple2:
def __init__(self):
self.info = "SimpleClass2"
class Simple3(simple.Simple):
def __init__(self):
simple.Simple.__init__(self)
text = "text in simple"
assert simple.text == text
_s = simple.Simple()
_s3 = Simple3()
assert _s.info == _s3.info
import recursive_import
_s = recursive_import.myClass()
assert str(_s) == "success!"
import from_import_test.b
assert from_import_test.b.v == 1
import from_import_test.c
assert from_import_test.c.v == 1
import global_in_imported
assert global_in_imported.X == 15
from delegator import Delegator
delegate = Delegator([])
import modtest
import colorsys
colorsys.ONE_THIRD
from colorsys import *
try:
ONE_THIRD
raise Exception("should have raised NameError")
except NameError:
pass
assert simple.strange == "a strange name"
assert dir(simple) == ["Simple", "text", "strange", "unknown"]
from foobar import *
assert str(Foo()) == "foo"
print('passed all tests')
| true
| true
|
f71824b21dd9aad49d682f6da45462de71c6c6b0
| 403
|
py
|
Python
|
mozillians/api/urls.py
|
caktus/mozillians
|
312eb5d993b60092fa4f8eb94548c1db4b21fa01
|
[
"BSD-3-Clause"
] | null | null | null |
mozillians/api/urls.py
|
caktus/mozillians
|
312eb5d993b60092fa4f8eb94548c1db4b21fa01
|
[
"BSD-3-Clause"
] | null | null | null |
mozillians/api/urls.py
|
caktus/mozillians
|
312eb5d993b60092fa4f8eb94548c1db4b21fa01
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf.urls import include, patterns, url
from tastypie.api import Api
import mozillians.groups.api
import mozillians.users.api
v1_api = Api(api_name='v1')
v1_api.register(mozillians.users.api.UserResource())
v1_api.register(mozillians.groups.api.GroupResource())
v1_api.register(mozillians.groups.api.SkillResource())
urlpatterns = patterns(
'',
url(r'', include(v1_api.urls)),)
| 23.705882
| 54
| 0.769231
|
from django.conf.urls import include, patterns, url
from tastypie.api import Api
import mozillians.groups.api
import mozillians.users.api
v1_api = Api(api_name='v1')
v1_api.register(mozillians.users.api.UserResource())
v1_api.register(mozillians.groups.api.GroupResource())
v1_api.register(mozillians.groups.api.SkillResource())
urlpatterns = patterns(
'',
url(r'', include(v1_api.urls)),)
| true
| true
|
f71826ef8902c67bed889b8698b64504138920f2
| 10,060
|
py
|
Python
|
graspologic/layouts/render.py
|
tliu68/graspologic
|
d1cf7678bc63ab9769828a82a90f66bf1dfa0eff
|
[
"MIT"
] | 1
|
2021-07-06T15:36:27.000Z
|
2021-07-06T15:36:27.000Z
|
graspologic/layouts/render.py
|
tliu68/graspologic
|
d1cf7678bc63ab9769828a82a90f66bf1dfa0eff
|
[
"MIT"
] | null | null | null |
graspologic/layouts/render.py
|
tliu68/graspologic
|
d1cf7678bc63ab9769828a82a90f66bf1dfa0eff
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import networkx as nx
from typing import Any, Dict, List, Optional, Tuple
from graspologic.layouts.classes import NodePosition
import matplotlib.pyplot as plt
def _calculate_x_y_domain(
positions: List[NodePosition],
) -> Tuple[Tuple[float, float], Tuple[float, float]]:
"""calculate the overall x/y domain, converting to a square
so we can have a consistent scale
"""
min_x = min_y = float("inf")
max_x = max_y = float("-inf")
for node_position in positions:
min_x = min(min_x, node_position.x - node_position.size)
max_x = max(max_x, node_position.x + node_position.size)
min_y = min(min_y, node_position.y - node_position.size)
max_y = max(max_y, node_position.y + node_position.size)
x_delta = max_x - min_x
y_delta = max_y - min_y
max_delta = max(x_delta, y_delta)
if max_delta == x_delta:
difference = (max_delta - y_delta) / 2
min_y = min_y - difference
max_y = max_y + difference
elif max_delta == y_delta:
difference = (max_delta - x_delta) / 2
min_x = min_x - difference
max_x = max_x + difference
return (min_x, max_x), (min_y, max_y)
def _scale_value(
domain: Tuple[float, float], data_range: Tuple[float, float], value: float
) -> float:
return data_range[0] + (data_range[1] - data_range[0]) * (
(value - domain[0]) / (domain[1] - domain[0])
)
def _scale_node_sizes_for_rendering(
sizes: List[float],
spatial_domain: Tuple[float, float],
spatial_range: Tuple[float, float],
dpi: float,
):
"""scale the size again to match the rendered pixel range
we would expect this to be handled by the underlying viz framework, but it isn't, size is specified
as the bounding box in points of the rendered output, so we need to transform our size to match.
There are 72 points per inch. Multiplying by 72 / dpi converts from pixels to points.
"""
spatial_domain = (0, spatial_domain[1] - spatial_domain[0])
return list(
map(
lambda s: _scale_value(spatial_domain, spatial_range, s * 2 * 72.0 / dpi)
** 2,
sizes,
)
)
def _draw_graph(
graph: nx.Graph,
positions: List[NodePosition],
node_colors: Dict[Any, str],
vertex_alpha: float,
edge_line_width: float,
edge_alpha: float,
figure_width: float,
figure_height: float,
vertex_line_width: float = 0.01,
vertex_shape: str = "o",
arrows: bool = False,
dpi: int = 100,
):
if len(positions) != len(graph.nodes()):
raise ValueError(
f"The number of positions provided {len(positions)} is not the same as the "
f"number of nodes in the graph {len(graph.nodes())}"
)
for position in positions:
if position.node_id not in graph:
raise ValueError(
f"The node position provided for {position.node_id} references a node "
f"not found in our graph"
)
plt.rcParams["figure.dpi"] = dpi # TODO, test at different dpi
plt.clf()
figure = plt.gcf()
ax = plt.gca()
ax.set_axis_off()
figure.set_size_inches(figure_width, figure_height)
window_extent_width = ax.get_window_extent().width
x_domain, y_domain = _calculate_x_y_domain(positions)
position_map = {position.node_id: position for position in positions}
node_positions = {
position.node_id: (position.x, position.y) for position in positions
}
vertices = []
vertex_sizes = []
node_color_list = []
edge_color_list = []
for node in graph.nodes():
vertices.append(node)
vertex_sizes.append(position_map[node].size)
node_color_list.append(node_colors[node])
vertex_sizes = _scale_node_sizes_for_rendering(
vertex_sizes, x_domain, (0, window_extent_width), dpi
)
for source, target in graph.edges():
edge_color_list.append(node_colors[source])
ax.set_xbound(x_domain)
ax.set_xlim(x_domain)
ax.set_ybound(y_domain)
ax.set_ylim(y_domain)
nx.draw_networkx_edges(
graph,
pos=node_positions,
alpha=edge_alpha,
width=edge_line_width,
edge_color=edge_color_list,
arrows=arrows,
ax=ax,
)
nx.draw_networkx_nodes(
graph,
pos=node_positions,
nodelist=vertices,
node_color=node_color_list,
alpha=vertex_alpha,
linewidths=vertex_line_width,
node_size=vertex_sizes,
node_shape=vertex_shape,
ax=ax,
)
def show_graph(
graph: nx.Graph,
positions: List[NodePosition],
node_colors: Dict[Any, str],
vertex_line_width: float = 0.01,
vertex_alpha: float = 0.55,
edge_line_width: float = 0.5,
edge_alpha: float = 0.02,
figure_width: float = 15.0,
figure_height: float = 15.0,
light_background: bool = True,
vertex_shape: str = "o",
arrows: bool = False,
dpi: int = 500,
):
"""
Renders and displays a graph.
Attempts to display it via the platform-specific display library such as TkInter
Edges will be displayed with the same color as the source node.
Parameters
----------
graph : nx.Graph
The graph to be displayed. If the networkx Graph contains only nodes, no
edges will be displayed.
positions : List[:class:`graspologic.layouts.NodePosition`]
The positionsfor every node in the graph.
node_colors : Dict[Any, str]
A mapping of node id to colors. Must contain an entry for every node in the
graph.
vertex_line_width : float
Line width of vertex outline. Default is``0.01``.
vertex_alpha : float
Alpha (transparency) of vertices in visualization. Default is``0.55``.
edge_line_width : float
Line width of edge. Default is``0.5``.
edge_alpha : float
Alpha (transparency) of edges in visualization. Default is``0.02``.
figure_width : float
Width of figure. Default is ``15.0``.
figure_height : float
eight of figure. Default is``15.0``.
light_background : bool
Light background or dark background. Default is``True``.
vertex_shape : str
Matplotlib Marker for the vertex shape. See
`https://matplotlib.org/api/markers_api.html <https://matplotlib.org/api/markers_api.html>`_
for a list of allowed values . Default is ``o`` (i.e: a circle)
arrows : bool
For directed graphs, if ``True``, draw arrow heads. Default is ``False``
dpi : float
Dots per inch of the figure. Default is ``500``.
"""
ax = plt.gca()
if light_background:
facecolor = ax.get_facecolor()
else:
facecolor = "#030303"
_draw_graph(
graph=graph,
positions=positions,
node_colors=node_colors,
vertex_line_width=vertex_line_width,
vertex_alpha=vertex_alpha,
edge_line_width=edge_line_width,
edge_alpha=edge_alpha,
figure_width=figure_width,
figure_height=figure_height,
vertex_shape=vertex_shape,
arrows=arrows,
dpi=dpi,
)
plt.gcf().set_facecolor(facecolor)
plt.show()
plt.close("all")
def save_graph(
output_path: str,
graph: nx.Graph,
positions: List[NodePosition],
node_colors: Dict[Any, str],
vertex_line_width: float = 0.01,
vertex_alpha: float = 0.55,
edge_line_width: float = 0.5,
edge_alpha: float = 0.02,
figure_width: float = 15.0,
figure_height: float = 15.0,
light_background: bool = True,
vertex_shape: str = "o",
arrows: bool = False,
dpi: int = 100,
):
"""
Renders a graph to file.
Edges will be displayed with the same color as the source node.
Parameters
----------
output_path : str
The output path to write the rendered graph to. Suggested file extension is
``.png``.
graph : nx.Graph
The graph to be displayed. If the networkx Graph contains only nodes, no
edges will be displayed.
positions : List[:class:`graspologic.layouts.NodePosition`]
The positionsfor every node in the graph.
node_colors : Dict[Any, str]
A mapping of node id to colors. Must contain an entry for every node in the
graph.
vertex_line_width : float
Line width of vertex outline. Default is``0.01``.
vertex_alpha : float
Alpha (transparency) of vertices in visualization. Default is``0.55``.
edge_line_width : float
Line width of edge. Default is``0.5``.
edge_alpha : float
Alpha (transparency) of edges in visualization. Default is``0.02``.
figure_width : float
Width of figure. Default is ``15.0``.
figure_height : float
eight of figure. Default is``15.0``.
light_background : bool
Light background or dark background. Default is``True``.
vertex_shape : str
Matplotlib Marker for the vertex shape. See
`https://matplotlib.org/api/markers_api.html <https://matplotlib.org/api/markers_api.html>`_
for a list of allowed values . Default is ``o`` (i.e: a circle)
arrows : bool
For directed graphs, if ``True``, draw arrow heads. Default is ``False``
dpi : float
Dots per inch of the figure. Default is ``100``.
Returns
-------
"""
_draw_graph(
graph=graph,
positions=positions,
node_colors=node_colors,
vertex_line_width=vertex_line_width,
vertex_alpha=vertex_alpha,
edge_line_width=edge_line_width,
edge_alpha=edge_alpha,
figure_width=figure_width,
figure_height=figure_height,
vertex_shape=vertex_shape,
arrows=arrows,
dpi=dpi,
)
ax = plt.gca()
if light_background:
facecolor = ax.get_facecolor()
else:
facecolor = "#030303"
plt.savefig(output_path, facecolor=facecolor)
plt.close("all")
| 31.53605
| 103
| 0.641451
|
import networkx as nx
from typing import Any, Dict, List, Optional, Tuple
from graspologic.layouts.classes import NodePosition
import matplotlib.pyplot as plt
def _calculate_x_y_domain(
positions: List[NodePosition],
) -> Tuple[Tuple[float, float], Tuple[float, float]]:
min_x = min_y = float("inf")
max_x = max_y = float("-inf")
for node_position in positions:
min_x = min(min_x, node_position.x - node_position.size)
max_x = max(max_x, node_position.x + node_position.size)
min_y = min(min_y, node_position.y - node_position.size)
max_y = max(max_y, node_position.y + node_position.size)
x_delta = max_x - min_x
y_delta = max_y - min_y
max_delta = max(x_delta, y_delta)
if max_delta == x_delta:
difference = (max_delta - y_delta) / 2
min_y = min_y - difference
max_y = max_y + difference
elif max_delta == y_delta:
difference = (max_delta - x_delta) / 2
min_x = min_x - difference
max_x = max_x + difference
return (min_x, max_x), (min_y, max_y)
def _scale_value(
domain: Tuple[float, float], data_range: Tuple[float, float], value: float
) -> float:
return data_range[0] + (data_range[1] - data_range[0]) * (
(value - domain[0]) / (domain[1] - domain[0])
)
def _scale_node_sizes_for_rendering(
sizes: List[float],
spatial_domain: Tuple[float, float],
spatial_range: Tuple[float, float],
dpi: float,
):
spatial_domain = (0, spatial_domain[1] - spatial_domain[0])
return list(
map(
lambda s: _scale_value(spatial_domain, spatial_range, s * 2 * 72.0 / dpi)
** 2,
sizes,
)
)
def _draw_graph(
graph: nx.Graph,
positions: List[NodePosition],
node_colors: Dict[Any, str],
vertex_alpha: float,
edge_line_width: float,
edge_alpha: float,
figure_width: float,
figure_height: float,
vertex_line_width: float = 0.01,
vertex_shape: str = "o",
arrows: bool = False,
dpi: int = 100,
):
if len(positions) != len(graph.nodes()):
raise ValueError(
f"The number of positions provided {len(positions)} is not the same as the "
f"number of nodes in the graph {len(graph.nodes())}"
)
for position in positions:
if position.node_id not in graph:
raise ValueError(
f"The node position provided for {position.node_id} references a node "
f"not found in our graph"
)
plt.rcParams["figure.dpi"] = dpi
plt.clf()
figure = plt.gcf()
ax = plt.gca()
ax.set_axis_off()
figure.set_size_inches(figure_width, figure_height)
window_extent_width = ax.get_window_extent().width
x_domain, y_domain = _calculate_x_y_domain(positions)
position_map = {position.node_id: position for position in positions}
node_positions = {
position.node_id: (position.x, position.y) for position in positions
}
vertices = []
vertex_sizes = []
node_color_list = []
edge_color_list = []
for node in graph.nodes():
vertices.append(node)
vertex_sizes.append(position_map[node].size)
node_color_list.append(node_colors[node])
vertex_sizes = _scale_node_sizes_for_rendering(
vertex_sizes, x_domain, (0, window_extent_width), dpi
)
for source, target in graph.edges():
edge_color_list.append(node_colors[source])
ax.set_xbound(x_domain)
ax.set_xlim(x_domain)
ax.set_ybound(y_domain)
ax.set_ylim(y_domain)
nx.draw_networkx_edges(
graph,
pos=node_positions,
alpha=edge_alpha,
width=edge_line_width,
edge_color=edge_color_list,
arrows=arrows,
ax=ax,
)
nx.draw_networkx_nodes(
graph,
pos=node_positions,
nodelist=vertices,
node_color=node_color_list,
alpha=vertex_alpha,
linewidths=vertex_line_width,
node_size=vertex_sizes,
node_shape=vertex_shape,
ax=ax,
)
def show_graph(
graph: nx.Graph,
positions: List[NodePosition],
node_colors: Dict[Any, str],
vertex_line_width: float = 0.01,
vertex_alpha: float = 0.55,
edge_line_width: float = 0.5,
edge_alpha: float = 0.02,
figure_width: float = 15.0,
figure_height: float = 15.0,
light_background: bool = True,
vertex_shape: str = "o",
arrows: bool = False,
dpi: int = 500,
):
ax = plt.gca()
if light_background:
facecolor = ax.get_facecolor()
else:
facecolor = "#030303"
_draw_graph(
graph=graph,
positions=positions,
node_colors=node_colors,
vertex_line_width=vertex_line_width,
vertex_alpha=vertex_alpha,
edge_line_width=edge_line_width,
edge_alpha=edge_alpha,
figure_width=figure_width,
figure_height=figure_height,
vertex_shape=vertex_shape,
arrows=arrows,
dpi=dpi,
)
plt.gcf().set_facecolor(facecolor)
plt.show()
plt.close("all")
def save_graph(
output_path: str,
graph: nx.Graph,
positions: List[NodePosition],
node_colors: Dict[Any, str],
vertex_line_width: float = 0.01,
vertex_alpha: float = 0.55,
edge_line_width: float = 0.5,
edge_alpha: float = 0.02,
figure_width: float = 15.0,
figure_height: float = 15.0,
light_background: bool = True,
vertex_shape: str = "o",
arrows: bool = False,
dpi: int = 100,
):
_draw_graph(
graph=graph,
positions=positions,
node_colors=node_colors,
vertex_line_width=vertex_line_width,
vertex_alpha=vertex_alpha,
edge_line_width=edge_line_width,
edge_alpha=edge_alpha,
figure_width=figure_width,
figure_height=figure_height,
vertex_shape=vertex_shape,
arrows=arrows,
dpi=dpi,
)
ax = plt.gca()
if light_background:
facecolor = ax.get_facecolor()
else:
facecolor = "#030303"
plt.savefig(output_path, facecolor=facecolor)
plt.close("all")
| true
| true
|
f7182817f87774a0db791bb29d67b66f78795e8a
| 500
|
py
|
Python
|
multimodalDetection/splitAudio.py
|
Rubik90/TFM_AG
|
5e836245d0704122f2a0d47413e93bf53d966ca0
|
[
"MIT"
] | 1
|
2021-02-16T18:32:38.000Z
|
2021-02-16T18:32:38.000Z
|
multimodalDetection/splitAudio.py
|
Rubik90/TFM_AG
|
5e836245d0704122f2a0d47413e93bf53d966ca0
|
[
"MIT"
] | null | null | null |
multimodalDetection/splitAudio.py
|
Rubik90/TFM_AG
|
5e836245d0704122f2a0d47413e93bf53d966ca0
|
[
"MIT"
] | 1
|
2021-02-16T18:32:39.000Z
|
2021-02-16T18:32:39.000Z
|
import subprocess
import os
import shutil
def split(audio,output):
command = f"ffmpeg -i '{audio}' -f segment -segment_time 0.0416666666666667 -c copy {output}"
subprocess.call(command,shell=True)
split('audio.wav','out%03d.wav')
elements = os.listdir('./')
if(os.path.isdir("./audioChunks/")):
shutil.rmtree("./audioChunks/")
os.mkdir("./audioChunks/")
for element in elements:
if element[:3]=="out":
shutil.copyfile('./'+element, './audioChunks/'+element)
os.remove(element)
| 26.315789
| 97
| 0.688
|
import subprocess
import os
import shutil
def split(audio,output):
command = f"ffmpeg -i '{audio}' -f segment -segment_time 0.0416666666666667 -c copy {output}"
subprocess.call(command,shell=True)
split('audio.wav','out%03d.wav')
elements = os.listdir('./')
if(os.path.isdir("./audioChunks/")):
shutil.rmtree("./audioChunks/")
os.mkdir("./audioChunks/")
for element in elements:
if element[:3]=="out":
shutil.copyfile('./'+element, './audioChunks/'+element)
os.remove(element)
| true
| true
|
f718289e90dd9a48a67856934c04c619c34d7999
| 252
|
py
|
Python
|
practice_exam/1070.py
|
omar115/code_for_Kids
|
3f50ffb1d492c6ea5aa09688944aa01a0cadf1fd
|
[
"MIT"
] | null | null | null |
practice_exam/1070.py
|
omar115/code_for_Kids
|
3f50ffb1d492c6ea5aa09688944aa01a0cadf1fd
|
[
"MIT"
] | null | null | null |
practice_exam/1070.py
|
omar115/code_for_Kids
|
3f50ffb1d492c6ea5aa09688944aa01a0cadf1fd
|
[
"MIT"
] | 2
|
2021-01-08T03:52:46.000Z
|
2021-04-01T19:16:12.000Z
|
x = int(input())
if x % 2 == 0:
y = x + 1
print(y)
y = y + 2
print(y)
y = y + 2
print(y)
y = y + 2
print(y)
else:
y = x
print(y)
y = y + 2
print(y)
y = y + 2
print(y)
y = y + 2
print(y)
| 11.454545
| 16
| 0.353175
|
x = int(input())
if x % 2 == 0:
y = x + 1
print(y)
y = y + 2
print(y)
y = y + 2
print(y)
y = y + 2
print(y)
else:
y = x
print(y)
y = y + 2
print(y)
y = y + 2
print(y)
y = y + 2
print(y)
| true
| true
|
f718293c3ac36f83b257d72add6c9f74092f29f6
| 2,148
|
py
|
Python
|
src/git_portfolio/use_cases/gh.py
|
staticdev/github-portfolio
|
850461eed8160e046ee16664ac3dbc19e3ec0965
|
[
"MIT"
] | null | null | null |
src/git_portfolio/use_cases/gh.py
|
staticdev/github-portfolio
|
850461eed8160e046ee16664ac3dbc19e3ec0965
|
[
"MIT"
] | null | null | null |
src/git_portfolio/use_cases/gh.py
|
staticdev/github-portfolio
|
850461eed8160e046ee16664ac3dbc19e3ec0965
|
[
"MIT"
] | null | null | null |
"""Base Github use case."""
from __future__ import annotations
import traceback
from typing import Any
import git_portfolio.config_manager as cm
import git_portfolio.github_service as gs
import git_portfolio.responses as res
class GhUseCase:
"""Github use case."""
def __init__(
self,
config_manager: cm.ConfigManager,
github_service: gs.AbstractGithubService,
github_repo: str = "",
) -> None:
"""Initializer."""
self.config_manager = config_manager
self.github_service = github_service
self.github_repo = github_repo
self.responses: list[res.Response] = []
def call_github_service(
self, method: str, *args: Any, **kwargs: Any
) -> res.Response:
"""Handle error from github_service and return response."""
response: res.Response
try:
method_to_call = getattr(self.github_service, method)
output = method_to_call(*args, **kwargs)
response = res.ResponseSuccess(output)
except gs.GithubServiceError as gse:
response = res.ResponseFailure(res.ResponseTypes.RESOURCE_ERROR, str(gse))
except Exception:
error_msg = (
"An unexpected error occured. Please report at "
"https://github.com/staticdev/git-portfolio/issues/new "
f"with the following info:\n{traceback.format_exc()}"
)
response = res.ResponseFailure(res.ResponseTypes.SYSTEM_ERROR, error_msg)
self.responses.append(response)
return response
def action(self, github_repo: str, *args: Any, **kwargs: Any) -> None:
"""Execute some action in a repo."""
raise NotImplementedError # pragma: no cover
def execute(self, *args: Any, **kwargs: Any) -> list[res.Response]:
"""Execute GitHubUseCase."""
if self.github_repo:
self.action(self.github_repo, *args, **kwargs)
else:
for github_repo in self.config_manager.config.github_selected_repos:
self.action(github_repo, *args, **kwargs)
return self.responses
| 35.8
| 86
| 0.635475
|
from __future__ import annotations
import traceback
from typing import Any
import git_portfolio.config_manager as cm
import git_portfolio.github_service as gs
import git_portfolio.responses as res
class GhUseCase:
def __init__(
self,
config_manager: cm.ConfigManager,
github_service: gs.AbstractGithubService,
github_repo: str = "",
) -> None:
self.config_manager = config_manager
self.github_service = github_service
self.github_repo = github_repo
self.responses: list[res.Response] = []
def call_github_service(
self, method: str, *args: Any, **kwargs: Any
) -> res.Response:
response: res.Response
try:
method_to_call = getattr(self.github_service, method)
output = method_to_call(*args, **kwargs)
response = res.ResponseSuccess(output)
except gs.GithubServiceError as gse:
response = res.ResponseFailure(res.ResponseTypes.RESOURCE_ERROR, str(gse))
except Exception:
error_msg = (
"An unexpected error occured. Please report at "
"https://github.com/staticdev/git-portfolio/issues/new "
f"with the following info:\n{traceback.format_exc()}"
)
response = res.ResponseFailure(res.ResponseTypes.SYSTEM_ERROR, error_msg)
self.responses.append(response)
return response
def action(self, github_repo: str, *args: Any, **kwargs: Any) -> None:
raise NotImplementedError
def execute(self, *args: Any, **kwargs: Any) -> list[res.Response]:
if self.github_repo:
self.action(self.github_repo, *args, **kwargs)
else:
for github_repo in self.config_manager.config.github_selected_repos:
self.action(github_repo, *args, **kwargs)
return self.responses
| true
| true
|
f7182afb3ce7fb885646e29465ecd1cff61e5f5f
| 1,224
|
py
|
Python
|
src/test/python/apache/thermos/common/test_pathspec.py
|
wickman/incubator-aurora
|
9906d217093568ed4c9cfe620862818f15ce4150
|
[
"Apache-2.0"
] | 1
|
2016-05-01T20:17:24.000Z
|
2016-05-01T20:17:24.000Z
|
src/test/python/apache/thermos/common/test_pathspec.py
|
wickman/incubator-aurora
|
9906d217093568ed4c9cfe620862818f15ce4150
|
[
"Apache-2.0"
] | null | null | null |
src/test/python/apache/thermos/common/test_pathspec.py
|
wickman/incubator-aurora
|
9906d217093568ed4c9cfe620862818f15ce4150
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2013 Apache Software Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from apache.thermos.common.path import TaskPath
def test_legacy_task_roots():
assert TaskPath().given(task_id='foo').getpath('checkpoint_path').startswith(
TaskPath.DEFAULT_CHECKPOINT_ROOT)
assert TaskPath(root='/var/lib/mesos').given(task_id='foo').getpath('checkpoint_path').startswith(
'/var/lib/mesos')
def test_legacy_log_dirs():
assert TaskPath().given(task_id='foo').getpath('process_logbase') == os.path.join(
TaskPath.DEFAULT_CHECKPOINT_ROOT, 'logs', 'foo')
assert TaskPath(log_dir='sloth_love_chunk').given(task_id='foo').getpath(
'process_logbase') == 'sloth_love_chunk'
| 36
| 100
| 0.75
|
import os
from apache.thermos.common.path import TaskPath
def test_legacy_task_roots():
assert TaskPath().given(task_id='foo').getpath('checkpoint_path').startswith(
TaskPath.DEFAULT_CHECKPOINT_ROOT)
assert TaskPath(root='/var/lib/mesos').given(task_id='foo').getpath('checkpoint_path').startswith(
'/var/lib/mesos')
def test_legacy_log_dirs():
assert TaskPath().given(task_id='foo').getpath('process_logbase') == os.path.join(
TaskPath.DEFAULT_CHECKPOINT_ROOT, 'logs', 'foo')
assert TaskPath(log_dir='sloth_love_chunk').given(task_id='foo').getpath(
'process_logbase') == 'sloth_love_chunk'
| true
| true
|
f7182bad3db9d7c2c8ceff11acfdcada87264c50
| 1,951
|
py
|
Python
|
ex35.py
|
sansanaye12/python_exercises
|
cfe13b34dba4414f421fd2302a9d620e4d32b987
|
[
"MIT"
] | null | null | null |
ex35.py
|
sansanaye12/python_exercises
|
cfe13b34dba4414f421fd2302a9d620e4d32b987
|
[
"MIT"
] | null | null | null |
ex35.py
|
sansanaye12/python_exercises
|
cfe13b34dba4414f421fd2302a9d620e4d32b987
|
[
"MIT"
] | null | null | null |
from sys import exit
def gold_room():
print("This room is full of gold. How much do you take?")
choice=input("> ")
if "0" in choice or "1" in choice:
how_much=int(choice)
else:
dead("Man,learn to type a number.")
if how_much < 50:
print("Nice,you're not greedy,you win!")
exit(0)
else:
dead("You greedy bastard!")
def bear_room():
print("There is a bear here.")
print("The bear has a bunch of honey.")
print("The fat bear is in front of another door.")
print("How are you going to move the bear?")
bear_moved=False
while True:
choice=input("> ")
if choice == "take honey":
dead("The bear looks at you then slaps your face off.")
elif choice == "taunt bear" and not bear_moved:
print("The bear has moved from the door.")
print("You can go through it now.")
bear_moved=True
elif choice == "taunt bear" and bear_moved:
dead("The bear gets pissed off and chews your leg off.")
elif choice == "open door" and bear_moved
gold_room()
else:
print("I got no idea what that means.")
def cthulhu_room():
print("Here you see the great evil Cthulhu.")
print("He,it,whatever stares at you and you go insane.")
print("Do you flee for your life or eat your heads?")
choice=input("> ")
if "flee" in choice:
start()
elif "head" in choice:
dead("Well that was tasty!")
else:
cthulhu_room()
def dead(why):
print(why, "Good job!")
exit(0)
def start():
print("You are in a dark room.")
print("There is a door to your right and left.")
print("Which one do you take?")
choice=input('> ")
if choice == "left":
bear_room()
elif choice == "right":
cthulhu_room()
else:
dead("You stumble around the room until you starve.")
start()
| 31.983607
| 68
| 0.579703
|
from sys import exit
def gold_room():
print("This room is full of gold. How much do you take?")
choice=input("> ")
if "0" in choice or "1" in choice:
how_much=int(choice)
else:
dead("Man,learn to type a number.")
if how_much < 50:
print("Nice,you're not greedy,you win!")
exit(0)
else:
dead("You greedy bastard!")
def bear_room():
print("There is a bear here.")
print("The bear has a bunch of honey.")
print("The fat bear is in front of another door.")
print("How are you going to move the bear?")
bear_moved=False
while True:
choice=input("> ")
if choice == "take honey":
dead("The bear looks at you then slaps your face off.")
elif choice == "taunt bear" and not bear_moved:
print("The bear has moved from the door.")
print("You can go through it now.")
bear_moved=True
elif choice == "taunt bear" and bear_moved:
dead("The bear gets pissed off and chews your leg off.")
elif choice == "open door" and bear_moved
gold_room()
else:
print("I got no idea what that means.")
def cthulhu_room():
print("Here you see the great evil Cthulhu.")
print("He,it,whatever stares at you and you go insane.")
print("Do you flee for your life or eat your heads?")
choice=input("> ")
if "flee" in choice:
start()
elif "head" in choice:
dead("Well that was tasty!")
else:
cthulhu_room()
def dead(why):
print(why, "Good job!")
exit(0)
def start():
print("You are in a dark room.")
print("There is a door to your right and left.")
print("Which one do you take?")
choice=input('> ")
if choice == "left":
bear_room()
elif choice == "right":
cthulhu_room()
else:
dead("You stumble around the room until you starve.")
start()
| false
| true
|
f7182bb3084036979b843ba667b570839f34a904
| 430
|
py
|
Python
|
weather__openweathermap__pyowm.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | 117
|
2015-12-18T07:18:27.000Z
|
2022-03-28T00:25:54.000Z
|
weather__openweathermap__pyowm.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | 8
|
2018-10-03T09:38:46.000Z
|
2021-12-13T19:51:09.000Z
|
weather__openweathermap__pyowm.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | 28
|
2016-08-02T17:43:47.000Z
|
2022-03-21T08:31:12.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
API_KEY = '87c7712a9b72646a269102230858837b'
place = 'Магнитогорск'
# pip install pyowm
import pyowm
owm = pyowm.OWM(API_KEY)
observation = owm.weather_at_place(place)
w = observation.get_weather()
temperature = w.get_temperature('celsius')['temp']
status = w.get_status()
print('Температура: {} °C'.format(temperature))
print('Небо: {}'.format(status))
| 21.5
| 50
| 0.725581
|
__author__ = 'ipetrash'
API_KEY = '87c7712a9b72646a269102230858837b'
place = 'Магнитогорск'
import pyowm
owm = pyowm.OWM(API_KEY)
observation = owm.weather_at_place(place)
w = observation.get_weather()
temperature = w.get_temperature('celsius')['temp']
status = w.get_status()
print('Температура: {} °C'.format(temperature))
print('Небо: {}'.format(status))
| true
| true
|
f7182c6753abe0014d9e87b3ad738768a634aa00
| 10,275
|
py
|
Python
|
redis_simple_cache/test_rediscache.py
|
tru-software/redis-simple-cache
|
6480b38aa5efc022d91cfb87b168e75c03be181a
|
[
"BSD-3-Clause"
] | null | null | null |
redis_simple_cache/test_rediscache.py
|
tru-software/redis-simple-cache
|
6480b38aa5efc022d91cfb87b168e75c03be181a
|
[
"BSD-3-Clause"
] | null | null | null |
redis_simple_cache/test_rediscache.py
|
tru-software/redis-simple-cache
|
6480b38aa5efc022d91cfb87b168e75c03be181a
|
[
"BSD-3-Clause"
] | null | null | null |
# SimpleCache Tests
# ~~~~~~~~~~~~~~~~~~~
from datetime import timedelta
from rediscache import SimpleCache, RedisConnect, cache_it, cache_it_json, CacheMissException, ExpiredKeyException, \
DoNotCache
from unittest import TestCase, main
import time
class ComplexNumber(object): # used in pickle test
def __init__(self, real, imag):
self.real = real
self.imag = imag
def __eq__(self, other):
return self.real == other.real and self.imag == other.imag
class SimpleCacheTest(TestCase):
def setUp(self):
self.c = SimpleCache(10) # Cache that has a maximum limit of 10 keys
self.assertIsNotNone(self.c.connection)
self.redis = RedisConnect().connect()
def test_expire(self):
quick_c = SimpleCache()
quick_c.store("foo", "bar", expire=1)
time.sleep(1.1)
self.assertRaises(ExpiredKeyException, quick_c.get, "foo")
quick_c.flush()
quick_c.store("foo", "bar", expire=timedelta(seconds=1))
time.sleep(1.1)
self.assertRaises(ExpiredKeyException, quick_c.get, "foo")
quick_c.flush()
def test_miss(self):
self.assertRaises(CacheMissException, self.c.get, "blablabla")
def test_kwargs_decorator(self):
@cache_it_json(cache=self.c)
def add_it(a, b=10, c=5):
return a + b + c
add_it(3)
self.assertEqual(add_it(3), 18)
add_it(5, b=7)
self.assertEqual(add_it(5, b=7), 17)
add_it(6, c=3)
self.assertEqual(add_it(6, c=3), 19)
def test_store_retrieve(self):
self.c.store("foo", "bar")
foo = self.c.get("foo")
self.assertEqual(foo, "bar")
def test_json(self):
payload = {"example": "data"}
self.c.store_json("json", payload)
self.assertEqual(self.c.get_json("json"), payload)
def test_pickle(self):
payload = ComplexNumber(3, 4)
self.c.store_pickle("pickle", payload)
self.assertEqual(self.c.get_pickle("pickle"), payload)
def test_decorator(self):
self.redis.flushall()
mutable = []
@cache_it(cache=self.c)
def append(n):
mutable.append(n)
return mutable
append(1)
len_before = len(mutable)
mutable_cached = append(1)
len_after = len(mutable)
self.assertEqual(len_before, len_after)
self.assertNotEqual(id(mutable), id(mutable_cached))
self.assertEqual(mutable, mutable_cached)
def test_decorator_do_not_cache(self):
@cache_it(cache=self.c)
def test_no_cache(n):
result = n * 10
raise DoNotCache(result)
keys_before = len(self.c.keys())
r1 = test_no_cache(20)
r2 = test_no_cache(10)
r3 = test_no_cache(30)
r4 = test_no_cache(20)
self.assertEqual(r1, (10 * 20))
self.assertEqual(r2, (10 * 10))
self.assertEqual(r3, (10 * 30))
self.assertEqual(r4, (10 * 20))
keys_after = len(self.c.keys())
self.assertEqual(keys_before, keys_after)
def test_decorator_do_not_cache_reraised(self):
@cache_it(cache=self.c)
def test_no_cache(n):
result = n * 10
try:
raise DoNotCache(result)
except DoNotCache as e:
raise e
except Exception:
pass
keys_before = len(self.c.keys())
r1 = test_no_cache(20)
r2 = test_no_cache(10)
r3 = test_no_cache(30)
r4 = test_no_cache(20)
self.assertEqual(r1, (10 * 20))
self.assertEqual(r4, (10 * 20))
self.assertEqual(r2, (10 * 10))
self.assertEqual(r3, (10 * 30))
keys_after = len(self.c.keys())
self.assertEqual(keys_before, keys_after)
def test_decorator_do_not_cache_wrapping_exception(self):
@cache_it(cache=self.c)
def test_no_cache(n):
try:
result = n / 0
except ZeroDivisionError as e:
raise DoNotCache(e)
keys_before = len(self.c.keys())
r1 = test_no_cache(20)
self.assertTrue(isinstance(r1, ZeroDivisionError))
keys_after = len(self.c.keys())
self.assertEqual(keys_before, keys_after)
def test_decorator_json(self):
import random
mutable = {}
@cache_it_json(cache=self.c)
def set_key(n):
mutable[str(random.random())] = n
return mutable
set_key('a')
len_before = len(mutable)
mutable_cached = set_key('a')
len_after = len(mutable)
self.assertEqual(len_before, len_after)
self.assertNotEqual(id(mutable), id(mutable_cached))
self.assertEqual(mutable, mutable_cached)
def test_decorator_complex_type(self):
import math
@cache_it(cache=self.c)
def add(x, y):
return ComplexNumber(x.real + y.real, x.imag + y.imag)
result = add(ComplexNumber(3, 4), ComplexNumber(4, 5))
result_cached = add(ComplexNumber(3, 4), ComplexNumber(4, 5))
self.assertNotEqual(id(result), id(result_cached))
self.assertEqual(result, result_cached)
self.assertEqual(result, complex(3, 4) + complex(4, 5))
def test_cache_limit(self):
for i in range(100):
self.c.store("foo%d" % i, "foobar")
self.assertTrue(len(self.c) <= 10)
self.assertTrue(len(self.c.keys()) <= 10)
def test_flush(self):
connection = self.c.connection
connection.set("will_not_be_deleted", '42')
self.c.store("will_be_deleted", '10')
len_before = len(self.c)
len_keys_before = len(connection.keys(self.c.make_key("*")))
self.c.flush()
len_after = len(self.c)
len_keys_after = connection.get("will_not_be_deleted")
self.assertTrue(len_before > 0)
self.assertEqual(len_after, 0)
self.assertTrue(len_keys_before > 0)
self.assertEqual(len_keys_after, '42')
self.assertEqual(connection.get("will_not_be_deleted"), '42')
connection.delete("will_not_be_deleted")
def test_flush_namespace(self):
self.redis.flushall()
self.c.store("foo:one", "bir")
self.c.store("foo:two", "bor")
self.c.store("fii", "bur")
len_keys_before = len(self.c.keys())
self.c.flush_namespace('foo')
len_keys_after = len(self.c.keys())
self.assertEqual((len_keys_before - len_keys_after), 2)
self.assertEqual(self.c.get('fii'), 'bur')
self.assertRaises(CacheMissException, self.c.get, "foo:one")
self.assertRaises(CacheMissException, self.c.get, "foo:two")
self.c.flush()
def test_flush_multiple(self):
c1 = SimpleCache(10, namespace=__name__)
c2 = SimpleCache(10)
c1.store("foo", "bar")
c2.store("foo", "bar")
c1.flush()
self.assertEqual(len(c1), 0)
self.assertEqual(len(c2), 1)
c2.flush()
def test_expire_all_in_set(self):
self.c.store("foo", "bir")
self.c.store("fuu", "bor")
self.c.store("fii", "bur")
self.assertEqual(self.c.expire_all_in_set(), (3, 3))
self.assertRaises(ExpiredKeyException, self.c.get, "foo")
self.assertRaises(ExpiredKeyException, self.c.get, "fuu")
self.assertRaises(ExpiredKeyException, self.c.get, "fii")
self.assertTrue(self.c.isexpired("foo"))
self.assertTrue(self.c.isexpired("fuu"))
self.assertTrue(self.c.isexpired("fii"))
def test_expire_namespace(self):
self.c.store("foo:one", "bir")
self.c.store("foo:two", "bor")
self.c.store("fii", "bur")
self.assertEqual(self.c.expire_namespace('foo'), (3, 2))
self.assertRaises(ExpiredKeyException, self.c.get, "foo:one")
self.assertRaises(ExpiredKeyException, self.c.get, "foo:two")
self.assertTrue(self.c.isexpired("foo:one"))
self.assertTrue(self.c.isexpired("foo:two"))
self.assertTrue(self.c.isexpired("fii") > 0)
self.c.flush()
def test_mget(self):
self.c.store("a1", "a")
self.c.store("a2", "aa")
self.c.store("a3", "aaa")
d = self.c.mget(["a1", "a2", "a3"])
self.assertEqual(d["a1"], "a")
self.assertEqual(d["a2"], "aa")
self.assertEqual(d["a3"], "aaa")
def test_mget_nonexistant_key(self):
self.c.store("b1", "b")
self.c.store("b3", "bbb")
d = self.c.mget(["b1", "b2", "b3"])
self.assertEqual(d["b1"], "b")
self.assertTrue("b2" not in d)
self.assertEqual(d["b3"], "bbb")
def test_mget_expiry(self):
self.c.store("c1", "c")
self.c.store("c2", "cc", expire=1)
self.c.store("c3", "ccc")
time.sleep(1.1)
d = self.c.mget(["c1", "c2", "c3"])
self.assertEqual(d["c1"], "c")
self.assertTrue("c2" not in d)
self.assertEqual(d["c3"], "ccc")
def test_mget_json(self):
payload_a1 = {"example_a1": "data_a1"}
payload_a2 = {"example_a2": "data_a2"}
self.c.store_json("json_a1", payload_a1)
self.c.store_json("json_a2", payload_a2)
d = self.c.mget_json(["json_a1", "json_a2"])
self.assertEqual(d["json_a1"], payload_a1)
self.assertEqual(d["json_a2"], payload_a2)
def test_mget_json_nonexistant_key(self):
payload_b1 = {"example_b1": "data_b1"}
payload_b3 = {"example_b3": "data_b3"}
self.c.store_json("json_b1", payload_b1)
self.c.store_json("json_b3", payload_b3)
d = self.c.mget_json(["json_b1", "json_b2", "json_b3"])
self.assertEqual(d["json_b1"], payload_b1)
self.assertTrue("json_b2" not in d)
self.assertEqual(d["json_b3"], payload_b3)
def test_invalidate_key(self):
self.c.store("d1", "d")
self.c.store("d2", "dd")
self.c.store("d3", "ddd")
self.c.invalidate("d2")
d = self.c.mget(["d1", "d2", "d3"])
self.assertEqual(d["d1"], "d")
self.assertTrue("d2" not in d)
self.assertEqual(d["d3"], "ddd")
def tearDown(self):
self.c.flush()
if __name__ == '__main__':
main()
| 33.252427
| 117
| 0.588808
|
from datetime import timedelta
from rediscache import SimpleCache, RedisConnect, cache_it, cache_it_json, CacheMissException, ExpiredKeyException, \
DoNotCache
from unittest import TestCase, main
import time
class ComplexNumber(object):
def __init__(self, real, imag):
self.real = real
self.imag = imag
def __eq__(self, other):
return self.real == other.real and self.imag == other.imag
class SimpleCacheTest(TestCase):
def setUp(self):
self.c = SimpleCache(10)
self.assertIsNotNone(self.c.connection)
self.redis = RedisConnect().connect()
def test_expire(self):
quick_c = SimpleCache()
quick_c.store("foo", "bar", expire=1)
time.sleep(1.1)
self.assertRaises(ExpiredKeyException, quick_c.get, "foo")
quick_c.flush()
quick_c.store("foo", "bar", expire=timedelta(seconds=1))
time.sleep(1.1)
self.assertRaises(ExpiredKeyException, quick_c.get, "foo")
quick_c.flush()
def test_miss(self):
self.assertRaises(CacheMissException, self.c.get, "blablabla")
def test_kwargs_decorator(self):
@cache_it_json(cache=self.c)
def add_it(a, b=10, c=5):
return a + b + c
add_it(3)
self.assertEqual(add_it(3), 18)
add_it(5, b=7)
self.assertEqual(add_it(5, b=7), 17)
add_it(6, c=3)
self.assertEqual(add_it(6, c=3), 19)
def test_store_retrieve(self):
self.c.store("foo", "bar")
foo = self.c.get("foo")
self.assertEqual(foo, "bar")
def test_json(self):
payload = {"example": "data"}
self.c.store_json("json", payload)
self.assertEqual(self.c.get_json("json"), payload)
def test_pickle(self):
payload = ComplexNumber(3, 4)
self.c.store_pickle("pickle", payload)
self.assertEqual(self.c.get_pickle("pickle"), payload)
def test_decorator(self):
self.redis.flushall()
mutable = []
@cache_it(cache=self.c)
def append(n):
mutable.append(n)
return mutable
append(1)
len_before = len(mutable)
mutable_cached = append(1)
len_after = len(mutable)
self.assertEqual(len_before, len_after)
self.assertNotEqual(id(mutable), id(mutable_cached))
self.assertEqual(mutable, mutable_cached)
def test_decorator_do_not_cache(self):
@cache_it(cache=self.c)
def test_no_cache(n):
result = n * 10
raise DoNotCache(result)
keys_before = len(self.c.keys())
r1 = test_no_cache(20)
r2 = test_no_cache(10)
r3 = test_no_cache(30)
r4 = test_no_cache(20)
self.assertEqual(r1, (10 * 20))
self.assertEqual(r2, (10 * 10))
self.assertEqual(r3, (10 * 30))
self.assertEqual(r4, (10 * 20))
keys_after = len(self.c.keys())
self.assertEqual(keys_before, keys_after)
def test_decorator_do_not_cache_reraised(self):
@cache_it(cache=self.c)
def test_no_cache(n):
result = n * 10
try:
raise DoNotCache(result)
except DoNotCache as e:
raise e
except Exception:
pass
keys_before = len(self.c.keys())
r1 = test_no_cache(20)
r2 = test_no_cache(10)
r3 = test_no_cache(30)
r4 = test_no_cache(20)
self.assertEqual(r1, (10 * 20))
self.assertEqual(r4, (10 * 20))
self.assertEqual(r2, (10 * 10))
self.assertEqual(r3, (10 * 30))
keys_after = len(self.c.keys())
self.assertEqual(keys_before, keys_after)
def test_decorator_do_not_cache_wrapping_exception(self):
@cache_it(cache=self.c)
def test_no_cache(n):
try:
result = n / 0
except ZeroDivisionError as e:
raise DoNotCache(e)
keys_before = len(self.c.keys())
r1 = test_no_cache(20)
self.assertTrue(isinstance(r1, ZeroDivisionError))
keys_after = len(self.c.keys())
self.assertEqual(keys_before, keys_after)
def test_decorator_json(self):
import random
mutable = {}
@cache_it_json(cache=self.c)
def set_key(n):
mutable[str(random.random())] = n
return mutable
set_key('a')
len_before = len(mutable)
mutable_cached = set_key('a')
len_after = len(mutable)
self.assertEqual(len_before, len_after)
self.assertNotEqual(id(mutable), id(mutable_cached))
self.assertEqual(mutable, mutable_cached)
def test_decorator_complex_type(self):
import math
@cache_it(cache=self.c)
def add(x, y):
return ComplexNumber(x.real + y.real, x.imag + y.imag)
result = add(ComplexNumber(3, 4), ComplexNumber(4, 5))
result_cached = add(ComplexNumber(3, 4), ComplexNumber(4, 5))
self.assertNotEqual(id(result), id(result_cached))
self.assertEqual(result, result_cached)
self.assertEqual(result, complex(3, 4) + complex(4, 5))
def test_cache_limit(self):
for i in range(100):
self.c.store("foo%d" % i, "foobar")
self.assertTrue(len(self.c) <= 10)
self.assertTrue(len(self.c.keys()) <= 10)
def test_flush(self):
connection = self.c.connection
connection.set("will_not_be_deleted", '42')
self.c.store("will_be_deleted", '10')
len_before = len(self.c)
len_keys_before = len(connection.keys(self.c.make_key("*")))
self.c.flush()
len_after = len(self.c)
len_keys_after = connection.get("will_not_be_deleted")
self.assertTrue(len_before > 0)
self.assertEqual(len_after, 0)
self.assertTrue(len_keys_before > 0)
self.assertEqual(len_keys_after, '42')
self.assertEqual(connection.get("will_not_be_deleted"), '42')
connection.delete("will_not_be_deleted")
def test_flush_namespace(self):
self.redis.flushall()
self.c.store("foo:one", "bir")
self.c.store("foo:two", "bor")
self.c.store("fii", "bur")
len_keys_before = len(self.c.keys())
self.c.flush_namespace('foo')
len_keys_after = len(self.c.keys())
self.assertEqual((len_keys_before - len_keys_after), 2)
self.assertEqual(self.c.get('fii'), 'bur')
self.assertRaises(CacheMissException, self.c.get, "foo:one")
self.assertRaises(CacheMissException, self.c.get, "foo:two")
self.c.flush()
def test_flush_multiple(self):
c1 = SimpleCache(10, namespace=__name__)
c2 = SimpleCache(10)
c1.store("foo", "bar")
c2.store("foo", "bar")
c1.flush()
self.assertEqual(len(c1), 0)
self.assertEqual(len(c2), 1)
c2.flush()
def test_expire_all_in_set(self):
self.c.store("foo", "bir")
self.c.store("fuu", "bor")
self.c.store("fii", "bur")
self.assertEqual(self.c.expire_all_in_set(), (3, 3))
self.assertRaises(ExpiredKeyException, self.c.get, "foo")
self.assertRaises(ExpiredKeyException, self.c.get, "fuu")
self.assertRaises(ExpiredKeyException, self.c.get, "fii")
self.assertTrue(self.c.isexpired("foo"))
self.assertTrue(self.c.isexpired("fuu"))
self.assertTrue(self.c.isexpired("fii"))
def test_expire_namespace(self):
self.c.store("foo:one", "bir")
self.c.store("foo:two", "bor")
self.c.store("fii", "bur")
self.assertEqual(self.c.expire_namespace('foo'), (3, 2))
self.assertRaises(ExpiredKeyException, self.c.get, "foo:one")
self.assertRaises(ExpiredKeyException, self.c.get, "foo:two")
self.assertTrue(self.c.isexpired("foo:one"))
self.assertTrue(self.c.isexpired("foo:two"))
self.assertTrue(self.c.isexpired("fii") > 0)
self.c.flush()
def test_mget(self):
self.c.store("a1", "a")
self.c.store("a2", "aa")
self.c.store("a3", "aaa")
d = self.c.mget(["a1", "a2", "a3"])
self.assertEqual(d["a1"], "a")
self.assertEqual(d["a2"], "aa")
self.assertEqual(d["a3"], "aaa")
def test_mget_nonexistant_key(self):
self.c.store("b1", "b")
self.c.store("b3", "bbb")
d = self.c.mget(["b1", "b2", "b3"])
self.assertEqual(d["b1"], "b")
self.assertTrue("b2" not in d)
self.assertEqual(d["b3"], "bbb")
def test_mget_expiry(self):
self.c.store("c1", "c")
self.c.store("c2", "cc", expire=1)
self.c.store("c3", "ccc")
time.sleep(1.1)
d = self.c.mget(["c1", "c2", "c3"])
self.assertEqual(d["c1"], "c")
self.assertTrue("c2" not in d)
self.assertEqual(d["c3"], "ccc")
def test_mget_json(self):
payload_a1 = {"example_a1": "data_a1"}
payload_a2 = {"example_a2": "data_a2"}
self.c.store_json("json_a1", payload_a1)
self.c.store_json("json_a2", payload_a2)
d = self.c.mget_json(["json_a1", "json_a2"])
self.assertEqual(d["json_a1"], payload_a1)
self.assertEqual(d["json_a2"], payload_a2)
def test_mget_json_nonexistant_key(self):
payload_b1 = {"example_b1": "data_b1"}
payload_b3 = {"example_b3": "data_b3"}
self.c.store_json("json_b1", payload_b1)
self.c.store_json("json_b3", payload_b3)
d = self.c.mget_json(["json_b1", "json_b2", "json_b3"])
self.assertEqual(d["json_b1"], payload_b1)
self.assertTrue("json_b2" not in d)
self.assertEqual(d["json_b3"], payload_b3)
def test_invalidate_key(self):
self.c.store("d1", "d")
self.c.store("d2", "dd")
self.c.store("d3", "ddd")
self.c.invalidate("d2")
d = self.c.mget(["d1", "d2", "d3"])
self.assertEqual(d["d1"], "d")
self.assertTrue("d2" not in d)
self.assertEqual(d["d3"], "ddd")
def tearDown(self):
self.c.flush()
if __name__ == '__main__':
main()
| true
| true
|
f7182cf422c1ce203e5951c6e5cd92b91dbb91ce
| 4,237
|
py
|
Python
|
chatbot_env/Lib/site-packages/nltk/test/unit/lm/test_counter.py
|
rakmakan/Chatbot
|
d04bc1526b56961a16c25148d9ef18c4f157e9c4
|
[
"MIT"
] | 10
|
2021-05-31T07:18:08.000Z
|
2022-03-19T09:20:11.000Z
|
chatbot_env/Lib/site-packages/nltk/test/unit/lm/test_counter.py
|
rakmakan/Chatbot
|
d04bc1526b56961a16c25148d9ef18c4f157e9c4
|
[
"MIT"
] | 37
|
2020-10-20T08:30:53.000Z
|
2020-12-22T13:15:45.000Z
|
chatbot_env/Lib/site-packages/nltk/test/unit/lm/test_counter.py
|
rakmakan/Chatbot
|
d04bc1526b56961a16c25148d9ef18c4f157e9c4
|
[
"MIT"
] | 7
|
2015-09-30T03:00:44.000Z
|
2021-06-04T05:34:39.000Z
|
# Natural Language Toolkit: Language Model Unit Tests
#
# Copyright (C) 2001-2020 NLTK Project
# Author: Ilia Kurenkov <ilia.kurenkov@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
import unittest
from nltk import FreqDist
from nltk.lm import NgramCounter
from nltk.util import everygrams
class NgramCounterTests(unittest.TestCase):
"""Tests for NgramCounter that only involve lookup, no modification."""
@classmethod
def setUpClass(cls):
text = [list("abcd"), list("egdbe")]
cls.trigram_counter = NgramCounter(
(everygrams(sent, max_len=3) for sent in text)
)
cls.bigram_counter = NgramCounter(
(everygrams(sent, max_len=2) for sent in text)
)
def test_N(self):
self.assertEqual(self.bigram_counter.N(), 16)
self.assertEqual(self.trigram_counter.N(), 21)
def test_counter_len_changes_with_lookup(self):
self.assertEqual(len(self.bigram_counter), 2)
_ = self.bigram_counter[50]
self.assertEqual(len(self.bigram_counter), 3)
def test_ngram_order_access_unigrams(self):
self.assertEqual(self.bigram_counter[1], self.bigram_counter.unigrams)
def test_ngram_conditional_freqdist(self):
expected_trigram_contexts = [
("a", "b"),
("b", "c"),
("e", "g"),
("g", "d"),
("d", "b"),
]
expected_bigram_contexts = [("a",), ("b",), ("d",), ("e",), ("c",), ("g",)]
bigrams = self.trigram_counter[2]
trigrams = self.trigram_counter[3]
self.assertCountEqual(expected_bigram_contexts, bigrams.conditions())
self.assertCountEqual(expected_trigram_contexts, trigrams.conditions())
def test_bigram_counts_seen_ngrams(self):
b_given_a_count = 1
unk_given_b_count = 1
self.assertEqual(b_given_a_count, self.bigram_counter[["a"]]["b"])
self.assertEqual(unk_given_b_count, self.bigram_counter[["b"]]["c"])
def test_bigram_counts_unseen_ngrams(self):
z_given_b_count = 0
self.assertEqual(z_given_b_count, self.bigram_counter[["b"]]["z"])
def test_unigram_counts_seen_words(self):
expected_count_b = 2
self.assertEqual(expected_count_b, self.bigram_counter["b"])
def test_unigram_counts_completely_unseen_words(self):
unseen_count = 0
self.assertEqual(unseen_count, self.bigram_counter["z"])
class NgramCounterTrainingTests(unittest.TestCase):
def setUp(self):
self.counter = NgramCounter()
def test_empty_string(self):
test = NgramCounter("")
self.assertNotIn(2, test)
self.assertEqual(test[1], FreqDist())
def test_empty_list(self):
test = NgramCounter([])
self.assertNotIn(2, test)
self.assertEqual(test[1], FreqDist())
def test_None(self):
test = NgramCounter(None)
self.assertNotIn(2, test)
self.assertEqual(test[1], FreqDist())
def test_train_on_unigrams(self):
words = list("abcd")
counter = NgramCounter([[(w,) for w in words]])
self.assertFalse(bool(counter[3]))
self.assertFalse(bool(counter[2]))
self.assertCountEqual(words, counter[1].keys())
def test_train_on_illegal_sentences(self):
str_sent = ["Check", "this", "out", "!"]
list_sent = [["Check", "this"], ["this", "out"], ["out", "!"]]
with self.assertRaises(TypeError):
NgramCounter([str_sent])
with self.assertRaises(TypeError):
NgramCounter([list_sent])
def test_train_on_bigrams(self):
bigram_sent = [("a", "b"), ("c", "d")]
counter = NgramCounter([bigram_sent])
self.assertFalse(bool(counter[3]))
def test_train_on_mix(self):
mixed_sent = [("a", "b"), ("c", "d"), ("e", "f", "g"), ("h",)]
counter = NgramCounter([mixed_sent])
unigrams = ["h"]
bigram_contexts = [("a",), ("c",)]
trigram_contexts = [("e", "f")]
self.assertCountEqual(unigrams, counter[1].keys())
self.assertCountEqual(bigram_contexts, counter[2].keys())
self.assertCountEqual(trigram_contexts, counter[3].keys())
| 31.857143
| 83
| 0.626623
|
import unittest
from nltk import FreqDist
from nltk.lm import NgramCounter
from nltk.util import everygrams
class NgramCounterTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
text = [list("abcd"), list("egdbe")]
cls.trigram_counter = NgramCounter(
(everygrams(sent, max_len=3) for sent in text)
)
cls.bigram_counter = NgramCounter(
(everygrams(sent, max_len=2) for sent in text)
)
def test_N(self):
self.assertEqual(self.bigram_counter.N(), 16)
self.assertEqual(self.trigram_counter.N(), 21)
def test_counter_len_changes_with_lookup(self):
self.assertEqual(len(self.bigram_counter), 2)
_ = self.bigram_counter[50]
self.assertEqual(len(self.bigram_counter), 3)
def test_ngram_order_access_unigrams(self):
self.assertEqual(self.bigram_counter[1], self.bigram_counter.unigrams)
def test_ngram_conditional_freqdist(self):
expected_trigram_contexts = [
("a", "b"),
("b", "c"),
("e", "g"),
("g", "d"),
("d", "b"),
]
expected_bigram_contexts = [("a",), ("b",), ("d",), ("e",), ("c",), ("g",)]
bigrams = self.trigram_counter[2]
trigrams = self.trigram_counter[3]
self.assertCountEqual(expected_bigram_contexts, bigrams.conditions())
self.assertCountEqual(expected_trigram_contexts, trigrams.conditions())
def test_bigram_counts_seen_ngrams(self):
b_given_a_count = 1
unk_given_b_count = 1
self.assertEqual(b_given_a_count, self.bigram_counter[["a"]]["b"])
self.assertEqual(unk_given_b_count, self.bigram_counter[["b"]]["c"])
def test_bigram_counts_unseen_ngrams(self):
z_given_b_count = 0
self.assertEqual(z_given_b_count, self.bigram_counter[["b"]]["z"])
def test_unigram_counts_seen_words(self):
expected_count_b = 2
self.assertEqual(expected_count_b, self.bigram_counter["b"])
def test_unigram_counts_completely_unseen_words(self):
unseen_count = 0
self.assertEqual(unseen_count, self.bigram_counter["z"])
class NgramCounterTrainingTests(unittest.TestCase):
def setUp(self):
self.counter = NgramCounter()
def test_empty_string(self):
test = NgramCounter("")
self.assertNotIn(2, test)
self.assertEqual(test[1], FreqDist())
def test_empty_list(self):
test = NgramCounter([])
self.assertNotIn(2, test)
self.assertEqual(test[1], FreqDist())
def test_None(self):
test = NgramCounter(None)
self.assertNotIn(2, test)
self.assertEqual(test[1], FreqDist())
def test_train_on_unigrams(self):
words = list("abcd")
counter = NgramCounter([[(w,) for w in words]])
self.assertFalse(bool(counter[3]))
self.assertFalse(bool(counter[2]))
self.assertCountEqual(words, counter[1].keys())
def test_train_on_illegal_sentences(self):
str_sent = ["Check", "this", "out", "!"]
list_sent = [["Check", "this"], ["this", "out"], ["out", "!"]]
with self.assertRaises(TypeError):
NgramCounter([str_sent])
with self.assertRaises(TypeError):
NgramCounter([list_sent])
def test_train_on_bigrams(self):
bigram_sent = [("a", "b"), ("c", "d")]
counter = NgramCounter([bigram_sent])
self.assertFalse(bool(counter[3]))
def test_train_on_mix(self):
mixed_sent = [("a", "b"), ("c", "d"), ("e", "f", "g"), ("h",)]
counter = NgramCounter([mixed_sent])
unigrams = ["h"]
bigram_contexts = [("a",), ("c",)]
trigram_contexts = [("e", "f")]
self.assertCountEqual(unigrams, counter[1].keys())
self.assertCountEqual(bigram_contexts, counter[2].keys())
self.assertCountEqual(trigram_contexts, counter[3].keys())
| true
| true
|
f7182d31dac31bfb93476460c912fb64e81d7691
| 15,236
|
py
|
Python
|
ambari-server/target/classes/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
|
willwill1101/ambari
|
3bed8e0abd0b6f60f15ffd4fa0035b5a57cf81e1
|
[
"Apache-2.0",
"MIT"
] | 3
|
2016-12-01T15:55:11.000Z
|
2016-12-01T15:56:38.000Z
|
ambari-server/target/classes/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
|
willwill1101/ambari
|
3bed8e0abd0b6f60f15ffd4fa0035b5a57cf81e1
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
ambari-server/target/classes/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
|
willwill1101/ambari
|
3bed8e0abd0b6f60f15ffd4fa0035b5a57cf81e1
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.libraries.functions import format
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
from resource_management.libraries.functions.default import default
from utils import get_bare_principal
from resource_management.libraries.functions.get_hdp_version import get_hdp_version
from resource_management.libraries.functions.is_empty import is_empty
import status_params
from resource_management.core.logger import Logger
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions import hdp_select
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
stack_name = default("/hostLevelParams/stack_name", None)
retryAble = default("/commandParams/command_retry_enabled", False)
# Version being upgraded/downgraded to
version = default("/commandParams/version", None)
# Version that is CURRENT.
current_version = default("/hostLevelParams/current_version", None)
host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
upgrade_direction = default("/commandParams/upgrade_direction", None)
# When downgrading the 'version' and 'current_version' are both pointing to the downgrade-target version
# downgrade_from_version provides the source-version the downgrade is happening from
downgrade_from_version = default("/commandParams/downgrade_from_version", None)
hostname = config['hostname']
# default kafka parameters
kafka_home = '/usr/lib/kafka/'
kafka_bin = kafka_home+'/bin/kafka'
conf_dir = "/etc/kafka/conf"
limits_conf_dir = "/etc/security/limits.d"
# Used while upgrading the stack in a kerberized cluster and running kafka-acls.sh
zookeeper_connect = default("/configurations/kafka-broker/zookeeper.connect", None)
kafka_user_nofile_limit = config['configurations']['kafka-env']['kafka_user_nofile_limit']
kafka_user_nproc_limit = config['configurations']['kafka-env']['kafka_user_nproc_limit']
# parameters for 2.2+
if Script.is_hdp_stack_greater_or_equal("2.2"):
kafka_home = '/usr/hdp/current/kafka-broker/'
kafka_bin = kafka_home+'bin/kafka'
conf_dir = "/usr/hdp/current/kafka-broker/config"
kafka_user = config['configurations']['kafka-env']['kafka_user']
kafka_log_dir = config['configurations']['kafka-env']['kafka_log_dir']
kafka_pid_dir = status_params.kafka_pid_dir
kafka_pid_file = kafka_pid_dir+"/kafka.pid"
# This is hardcoded on the kafka bash process lifecycle on which we have no control over
kafka_managed_pid_dir = "/var/run/kafka"
kafka_managed_log_dir = "/var/log/kafka"
user_group = config['configurations']['cluster-env']['user_group']
java64_home = config['hostLevelParams']['java_home']
kafka_env_sh_template = config['configurations']['kafka-env']['content']
kafka_hosts = config['clusterHostInfo']['kafka_broker_hosts']
kafka_hosts.sort()
zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
zookeeper_hosts.sort()
if (('kafka-log4j' in config['configurations']) and ('content' in config['configurations']['kafka-log4j'])):
log4j_props = config['configurations']['kafka-log4j']['content']
else:
log4j_props = None
if 'ganglia_server_host' in config['clusterHostInfo'] and \
len(config['clusterHostInfo']['ganglia_server_host'])>0:
ganglia_installed = True
ganglia_server = config['clusterHostInfo']['ganglia_server_host'][0]
ganglia_report_interval = 60
else:
ganglia_installed = False
metric_collector_host = ""
metric_collector_port = ""
metric_collector_protocol = ""
metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
has_metric_collector = not len(ams_collector_hosts) == 0
if has_metric_collector:
if 'cluster-env' in config['configurations'] and \
'metrics_collector_vip_host' in config['configurations']['cluster-env']:
metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']
else:
metric_collector_host = ams_collector_hosts[0]
if 'cluster-env' in config['configurations'] and \
'metrics_collector_vip_port' in config['configurations']['cluster-env']:
metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
else:
metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "localhost:6188")
if metric_collector_web_address.find(':') != -1:
metric_collector_port = metric_collector_web_address.split(':')[1]
else:
metric_collector_port = '6188'
if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
metric_collector_protocol = 'https'
else:
metric_collector_protocol = 'http'
pass
# Security-related params
security_enabled = config['configurations']['cluster-env']['security_enabled']
kafka_kerberos_enabled = (('security.inter.broker.protocol' in config['configurations']['kafka-broker']) and
((config['configurations']['kafka-broker']['security.inter.broker.protocol'] == "PLAINTEXTSASL") or
(config['configurations']['kafka-broker']['security.inter.broker.protocol'] == "SASL_PLAINTEXT")))
if security_enabled and hdp_stack_version != "" and 'kafka_principal_name' in config['configurations']['kafka-env'] and compare_versions(hdp_stack_version, '2.3') >= 0:
_hostname_lowercase = config['hostname'].lower()
_kafka_principal_name = config['configurations']['kafka-env']['kafka_principal_name']
kafka_jaas_principal = _kafka_principal_name.replace('_HOST',_hostname_lowercase)
kafka_keytab_path = config['configurations']['kafka-env']['kafka_keytab']
kafka_bare_jaas_principal = get_bare_principal(_kafka_principal_name)
kafka_kerberos_params = "-Djava.security.auth.login.config="+ conf_dir +"/kafka_jaas.conf"
else:
kafka_kerberos_params = ''
# *********************** RANGER PLUGIN CHANGES ***********************
# ranger host
# **********************************************************************
ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
has_ranger_admin = not len(ranger_admin_hosts) == 0
xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
ranger_admin_log_dir = default("/configurations/ranger-env/ranger_admin_log_dir","/var/log/ranger/admin")
is_supported_kafka_ranger = config['configurations']['kafka-env']['is_supported_kafka_ranger']
#ranger kafka properties
if has_ranger_admin and is_supported_kafka_ranger:
enable_ranger_kafka = config['configurations']['ranger-kafka-plugin-properties']['ranger-kafka-plugin-enabled']
enable_ranger_kafka = not is_empty(enable_ranger_kafka) and enable_ranger_kafka.lower() == 'yes'
policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
sql_connector_jar = config['configurations']['admin-properties']['SQL_CONNECTOR_JAR']
xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
xa_audit_db_flavor = xa_audit_db_flavor.lower() if xa_audit_db_flavor else None
xa_audit_db_name = config['configurations']['admin-properties']['audit_db_name']
xa_audit_db_user = config['configurations']['admin-properties']['audit_db_user']
xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
xa_db_host = config['configurations']['admin-properties']['db_host']
repo_name = str(config['clusterName']) + '_kafka'
ranger_env = config['configurations']['ranger-env']
ranger_plugin_properties = config['configurations']['ranger-kafka-plugin-properties']
ranger_kafka_audit = config['configurations']['ranger-kafka-audit']
ranger_kafka_audit_attrs = config['configuration_attributes']['ranger-kafka-audit']
ranger_kafka_security = config['configurations']['ranger-kafka-security']
ranger_kafka_security_attrs = config['configuration_attributes']['ranger-kafka-security']
ranger_kafka_policymgr_ssl = config['configurations']['ranger-kafka-policymgr-ssl']
ranger_kafka_policymgr_ssl_attrs = config['configuration_attributes']['ranger-kafka-policymgr-ssl']
policy_user = config['configurations']['ranger-kafka-plugin-properties']['policy_user']
ranger_plugin_config = {
'username' : config['configurations']['ranger-kafka-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
'password' : unicode(config['configurations']['ranger-kafka-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']),
'zookeeper.connect' : config['configurations']['ranger-kafka-plugin-properties']['zookeeper.connect'],
'commonNameForCertificate' : config['configurations']['ranger-kafka-plugin-properties']['common.name.for.certificate']
}
kafka_ranger_plugin_repo = {
'isEnabled': 'true',
'configs': ranger_plugin_config,
'description': 'kafka repo',
'name': repo_name,
'repositoryType': 'kafka',
'type': 'kafka',
'assetType': '1'
}
#For curl command in ranger plugin to get db connector
jdk_location = config['hostLevelParams']['jdk_location']
java_share_dir = '/usr/share/java'
if xa_audit_db_flavor and xa_audit_db_flavor == 'mysql':
jdbc_symlink_name = "mysql-jdbc-driver.jar"
jdbc_jar_name = "mysql-connector-java.jar"
audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
jdbc_driver = "com.mysql.jdbc.Driver"
elif xa_audit_db_flavor and xa_audit_db_flavor == 'oracle':
jdbc_jar_name = "ojdbc6.jar"
jdbc_symlink_name = "oracle-jdbc-driver.jar"
colon_count = xa_db_host.count(':')
if colon_count == 2 or colon_count == 0:
audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
else:
audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
jdbc_driver = "oracle.jdbc.OracleDriver"
elif xa_audit_db_flavor and xa_audit_db_flavor == 'postgres':
jdbc_jar_name = "postgresql.jar"
jdbc_symlink_name = "postgres-jdbc-driver.jar"
audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
jdbc_driver = "org.postgresql.Driver"
elif xa_audit_db_flavor and xa_audit_db_flavor == 'mssql':
jdbc_jar_name = "sqljdbc4.jar"
jdbc_symlink_name = "mssql-jdbc-driver.jar"
audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
elif xa_audit_db_flavor and xa_audit_db_flavor == 'sqla':
jdbc_jar_name = "sajdbc4.jar"
jdbc_symlink_name = "sqlanywhere-jdbc-driver.tar.gz"
audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}")
driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}")
driver_curl_target = format("{kafka_home}libs/{jdbc_jar_name}")
ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
xa_audit_db_is_enabled = config['configurations']['ranger-kafka-audit']['xasecure.audit.destination.db'] if xml_configurations_supported else None
xa_audit_hdfs_is_enabled = config['configurations']['ranger-kafka-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else None
ssl_keystore_password = unicode(config['configurations']['ranger-kafka-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
ssl_truststore_password = unicode(config['configurations']['ranger-kafka-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
hdp_version = get_hdp_version('kafka-broker')
setup_ranger_env_sh_source = format('/usr/hdp/{hdp_version}/ranger-kafka-plugin/install/conf.templates/enable/kafka-ranger-env.sh')
setup_ranger_env_sh_target = format("{conf_dir}/kafka-ranger-env.sh")
#For SQLA explicitly disable audit to DB for Ranger
if xa_audit_db_flavor == 'sqla':
xa_audit_db_is_enabled = False
namenode_hosts = default("/clusterHostInfo/namenode_host", [])
has_namenode = not len(namenode_hosts) == 0
hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] if has_namenode else None
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] if has_namenode else None
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] if has_namenode else None
hdfs_site = config['configurations']['hdfs-site'] if has_namenode else None
default_fs = config['configurations']['core-site']['fs.defaultFS'] if has_namenode else None
hadoop_bin_dir = hdp_select.get_hadoop_dir("bin") if has_namenode else None
hadoop_conf_dir = conf_select.get_hadoop_conf_dir() if has_namenode else None
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
import functools
#create partial functions with common arguments for every HdfsResource call
#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
HdfsResource = functools.partial(
HdfsResource,
user=hdfs_user,
hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs,
immutable_paths = get_not_managed_resources()
)
| 52.537931
| 185
| 0.774416
|
from resource_management.libraries.functions import format
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
from resource_management.libraries.functions.default import default
from utils import get_bare_principal
from resource_management.libraries.functions.get_hdp_version import get_hdp_version
from resource_management.libraries.functions.is_empty import is_empty
import status_params
from resource_management.core.logger import Logger
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions import hdp_select
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
stack_name = default("/hostLevelParams/stack_name", None)
retryAble = default("/commandParams/command_retry_enabled", False)
version = default("/commandParams/version", None)
current_version = default("/hostLevelParams/current_version", None)
host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
upgrade_direction = default("/commandParams/upgrade_direction", None)
downgrade_from_version = default("/commandParams/downgrade_from_version", None)
hostname = config['hostname']
kafka_home = '/usr/lib/kafka/'
kafka_bin = kafka_home+'/bin/kafka'
conf_dir = "/etc/kafka/conf"
limits_conf_dir = "/etc/security/limits.d"
zookeeper_connect = default("/configurations/kafka-broker/zookeeper.connect", None)
kafka_user_nofile_limit = config['configurations']['kafka-env']['kafka_user_nofile_limit']
kafka_user_nproc_limit = config['configurations']['kafka-env']['kafka_user_nproc_limit']
if Script.is_hdp_stack_greater_or_equal("2.2"):
kafka_home = '/usr/hdp/current/kafka-broker/'
kafka_bin = kafka_home+'bin/kafka'
conf_dir = "/usr/hdp/current/kafka-broker/config"
kafka_user = config['configurations']['kafka-env']['kafka_user']
kafka_log_dir = config['configurations']['kafka-env']['kafka_log_dir']
kafka_pid_dir = status_params.kafka_pid_dir
kafka_pid_file = kafka_pid_dir+"/kafka.pid"
kafka_managed_pid_dir = "/var/run/kafka"
kafka_managed_log_dir = "/var/log/kafka"
user_group = config['configurations']['cluster-env']['user_group']
java64_home = config['hostLevelParams']['java_home']
kafka_env_sh_template = config['configurations']['kafka-env']['content']
kafka_hosts = config['clusterHostInfo']['kafka_broker_hosts']
kafka_hosts.sort()
zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
zookeeper_hosts.sort()
if (('kafka-log4j' in config['configurations']) and ('content' in config['configurations']['kafka-log4j'])):
log4j_props = config['configurations']['kafka-log4j']['content']
else:
log4j_props = None
if 'ganglia_server_host' in config['clusterHostInfo'] and \
len(config['clusterHostInfo']['ganglia_server_host'])>0:
ganglia_installed = True
ganglia_server = config['clusterHostInfo']['ganglia_server_host'][0]
ganglia_report_interval = 60
else:
ganglia_installed = False
metric_collector_host = ""
metric_collector_port = ""
metric_collector_protocol = ""
metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
has_metric_collector = not len(ams_collector_hosts) == 0
if has_metric_collector:
if 'cluster-env' in config['configurations'] and \
'metrics_collector_vip_host' in config['configurations']['cluster-env']:
metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']
else:
metric_collector_host = ams_collector_hosts[0]
if 'cluster-env' in config['configurations'] and \
'metrics_collector_vip_port' in config['configurations']['cluster-env']:
metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
else:
metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "localhost:6188")
if metric_collector_web_address.find(':') != -1:
metric_collector_port = metric_collector_web_address.split(':')[1]
else:
metric_collector_port = '6188'
if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
metric_collector_protocol = 'https'
else:
metric_collector_protocol = 'http'
pass
security_enabled = config['configurations']['cluster-env']['security_enabled']
kafka_kerberos_enabled = (('security.inter.broker.protocol' in config['configurations']['kafka-broker']) and
((config['configurations']['kafka-broker']['security.inter.broker.protocol'] == "PLAINTEXTSASL") or
(config['configurations']['kafka-broker']['security.inter.broker.protocol'] == "SASL_PLAINTEXT")))
if security_enabled and hdp_stack_version != "" and 'kafka_principal_name' in config['configurations']['kafka-env'] and compare_versions(hdp_stack_version, '2.3') >= 0:
_hostname_lowercase = config['hostname'].lower()
_kafka_principal_name = config['configurations']['kafka-env']['kafka_principal_name']
kafka_jaas_principal = _kafka_principal_name.replace('_HOST',_hostname_lowercase)
kafka_keytab_path = config['configurations']['kafka-env']['kafka_keytab']
kafka_bare_jaas_principal = get_bare_principal(_kafka_principal_name)
kafka_kerberos_params = "-Djava.security.auth.login.config="+ conf_dir +"/kafka_jaas.conf"
else:
kafka_kerberos_params = ''
ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
has_ranger_admin = not len(ranger_admin_hosts) == 0
xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
ranger_admin_log_dir = default("/configurations/ranger-env/ranger_admin_log_dir","/var/log/ranger/admin")
is_supported_kafka_ranger = config['configurations']['kafka-env']['is_supported_kafka_ranger']
if has_ranger_admin and is_supported_kafka_ranger:
enable_ranger_kafka = config['configurations']['ranger-kafka-plugin-properties']['ranger-kafka-plugin-enabled']
enable_ranger_kafka = not is_empty(enable_ranger_kafka) and enable_ranger_kafka.lower() == 'yes'
policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
sql_connector_jar = config['configurations']['admin-properties']['SQL_CONNECTOR_JAR']
xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
xa_audit_db_flavor = xa_audit_db_flavor.lower() if xa_audit_db_flavor else None
xa_audit_db_name = config['configurations']['admin-properties']['audit_db_name']
xa_audit_db_user = config['configurations']['admin-properties']['audit_db_user']
xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
xa_db_host = config['configurations']['admin-properties']['db_host']
repo_name = str(config['clusterName']) + '_kafka'
ranger_env = config['configurations']['ranger-env']
ranger_plugin_properties = config['configurations']['ranger-kafka-plugin-properties']
ranger_kafka_audit = config['configurations']['ranger-kafka-audit']
ranger_kafka_audit_attrs = config['configuration_attributes']['ranger-kafka-audit']
ranger_kafka_security = config['configurations']['ranger-kafka-security']
ranger_kafka_security_attrs = config['configuration_attributes']['ranger-kafka-security']
ranger_kafka_policymgr_ssl = config['configurations']['ranger-kafka-policymgr-ssl']
ranger_kafka_policymgr_ssl_attrs = config['configuration_attributes']['ranger-kafka-policymgr-ssl']
policy_user = config['configurations']['ranger-kafka-plugin-properties']['policy_user']
ranger_plugin_config = {
'username' : config['configurations']['ranger-kafka-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
'password' : unicode(config['configurations']['ranger-kafka-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']),
'zookeeper.connect' : config['configurations']['ranger-kafka-plugin-properties']['zookeeper.connect'],
'commonNameForCertificate' : config['configurations']['ranger-kafka-plugin-properties']['common.name.for.certificate']
}
kafka_ranger_plugin_repo = {
'isEnabled': 'true',
'configs': ranger_plugin_config,
'description': 'kafka repo',
'name': repo_name,
'repositoryType': 'kafka',
'type': 'kafka',
'assetType': '1'
}
jdk_location = config['hostLevelParams']['jdk_location']
java_share_dir = '/usr/share/java'
if xa_audit_db_flavor and xa_audit_db_flavor == 'mysql':
jdbc_symlink_name = "mysql-jdbc-driver.jar"
jdbc_jar_name = "mysql-connector-java.jar"
audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
jdbc_driver = "com.mysql.jdbc.Driver"
elif xa_audit_db_flavor and xa_audit_db_flavor == 'oracle':
jdbc_jar_name = "ojdbc6.jar"
jdbc_symlink_name = "oracle-jdbc-driver.jar"
colon_count = xa_db_host.count(':')
if colon_count == 2 or colon_count == 0:
audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
else:
audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
jdbc_driver = "oracle.jdbc.OracleDriver"
elif xa_audit_db_flavor and xa_audit_db_flavor == 'postgres':
jdbc_jar_name = "postgresql.jar"
jdbc_symlink_name = "postgres-jdbc-driver.jar"
audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
jdbc_driver = "org.postgresql.Driver"
elif xa_audit_db_flavor and xa_audit_db_flavor == 'mssql':
jdbc_jar_name = "sqljdbc4.jar"
jdbc_symlink_name = "mssql-jdbc-driver.jar"
audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
elif xa_audit_db_flavor and xa_audit_db_flavor == 'sqla':
jdbc_jar_name = "sajdbc4.jar"
jdbc_symlink_name = "sqlanywhere-jdbc-driver.tar.gz"
audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}")
driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}")
driver_curl_target = format("{kafka_home}libs/{jdbc_jar_name}")
ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
xa_audit_db_is_enabled = config['configurations']['ranger-kafka-audit']['xasecure.audit.destination.db'] if xml_configurations_supported else None
xa_audit_hdfs_is_enabled = config['configurations']['ranger-kafka-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else None
ssl_keystore_password = unicode(config['configurations']['ranger-kafka-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
ssl_truststore_password = unicode(config['configurations']['ranger-kafka-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
hdp_version = get_hdp_version('kafka-broker')
setup_ranger_env_sh_source = format('/usr/hdp/{hdp_version}/ranger-kafka-plugin/install/conf.templates/enable/kafka-ranger-env.sh')
setup_ranger_env_sh_target = format("{conf_dir}/kafka-ranger-env.sh")
if xa_audit_db_flavor == 'sqla':
xa_audit_db_is_enabled = False
namenode_hosts = default("/clusterHostInfo/namenode_host", [])
has_namenode = not len(namenode_hosts) == 0
hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] if has_namenode else None
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] if has_namenode else None
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] if has_namenode else None
hdfs_site = config['configurations']['hdfs-site'] if has_namenode else None
default_fs = config['configurations']['core-site']['fs.defaultFS'] if has_namenode else None
hadoop_bin_dir = hdp_select.get_hadoop_dir("bin") if has_namenode else None
hadoop_conf_dir = conf_select.get_hadoop_conf_dir() if has_namenode else None
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
import functools
HdfsResource = functools.partial(
HdfsResource,
user=hdfs_user,
hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs,
immutable_paths = get_not_managed_resources()
)
| true
| true
|
f7182d6adde17e18d0bef7bffd4aa49b2b45e58d
| 22,427
|
py
|
Python
|
sdk/python/pulumi_azure_native/storagesync/v20190601/registered_server.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/storagesync/v20190601/registered_server.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/storagesync/v20190601/registered_server.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['RegisteredServerArgs', 'RegisteredServer']
@pulumi.input_type
class RegisteredServerArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
storage_sync_service_name: pulumi.Input[str],
agent_version: Optional[pulumi.Input[str]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
last_heart_beat: Optional[pulumi.Input[str]] = None,
server_certificate: Optional[pulumi.Input[str]] = None,
server_id: Optional[pulumi.Input[str]] = None,
server_os_version: Optional[pulumi.Input[str]] = None,
server_role: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a RegisteredServer resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] storage_sync_service_name: Name of Storage Sync Service resource.
:param pulumi.Input[str] agent_version: Registered Server Agent Version
:param pulumi.Input[str] cluster_id: Registered Server clusterId
:param pulumi.Input[str] cluster_name: Registered Server clusterName
:param pulumi.Input[str] friendly_name: Friendly Name
:param pulumi.Input[str] last_heart_beat: Registered Server last heart beat
:param pulumi.Input[str] server_certificate: Registered Server Certificate
:param pulumi.Input[str] server_id: Registered Server serverId
:param pulumi.Input[str] server_os_version: Registered Server OS Version
:param pulumi.Input[str] server_role: Registered Server serverRole
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "storage_sync_service_name", storage_sync_service_name)
if agent_version is not None:
pulumi.set(__self__, "agent_version", agent_version)
if cluster_id is not None:
pulumi.set(__self__, "cluster_id", cluster_id)
if cluster_name is not None:
pulumi.set(__self__, "cluster_name", cluster_name)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if last_heart_beat is not None:
pulumi.set(__self__, "last_heart_beat", last_heart_beat)
if server_certificate is not None:
pulumi.set(__self__, "server_certificate", server_certificate)
if server_id is not None:
pulumi.set(__self__, "server_id", server_id)
if server_os_version is not None:
pulumi.set(__self__, "server_os_version", server_os_version)
if server_role is not None:
pulumi.set(__self__, "server_role", server_role)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="storageSyncServiceName")
def storage_sync_service_name(self) -> pulumi.Input[str]:
"""
Name of Storage Sync Service resource.
"""
return pulumi.get(self, "storage_sync_service_name")
@storage_sync_service_name.setter
def storage_sync_service_name(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_sync_service_name", value)
@property
@pulumi.getter(name="agentVersion")
def agent_version(self) -> Optional[pulumi.Input[str]]:
"""
Registered Server Agent Version
"""
return pulumi.get(self, "agent_version")
@agent_version.setter
def agent_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "agent_version", value)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[pulumi.Input[str]]:
"""
Registered Server clusterId
"""
return pulumi.get(self, "cluster_id")
@cluster_id.setter
def cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_id", value)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> Optional[pulumi.Input[str]]:
"""
Registered Server clusterName
"""
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly Name
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="lastHeartBeat")
def last_heart_beat(self) -> Optional[pulumi.Input[str]]:
"""
Registered Server last heart beat
"""
return pulumi.get(self, "last_heart_beat")
@last_heart_beat.setter
def last_heart_beat(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_heart_beat", value)
@property
@pulumi.getter(name="serverCertificate")
def server_certificate(self) -> Optional[pulumi.Input[str]]:
"""
Registered Server Certificate
"""
return pulumi.get(self, "server_certificate")
@server_certificate.setter
def server_certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_certificate", value)
@property
@pulumi.getter(name="serverId")
def server_id(self) -> Optional[pulumi.Input[str]]:
"""
Registered Server serverId
"""
return pulumi.get(self, "server_id")
@server_id.setter
def server_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_id", value)
@property
@pulumi.getter(name="serverOSVersion")
def server_os_version(self) -> Optional[pulumi.Input[str]]:
"""
Registered Server OS Version
"""
return pulumi.get(self, "server_os_version")
@server_os_version.setter
def server_os_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_os_version", value)
@property
@pulumi.getter(name="serverRole")
def server_role(self) -> Optional[pulumi.Input[str]]:
"""
Registered Server serverRole
"""
return pulumi.get(self, "server_role")
@server_role.setter
def server_role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_role", value)
class RegisteredServer(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
agent_version: Optional[pulumi.Input[str]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
last_heart_beat: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_certificate: Optional[pulumi.Input[str]] = None,
server_id: Optional[pulumi.Input[str]] = None,
server_os_version: Optional[pulumi.Input[str]] = None,
server_role: Optional[pulumi.Input[str]] = None,
storage_sync_service_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Registered Server resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] agent_version: Registered Server Agent Version
:param pulumi.Input[str] cluster_id: Registered Server clusterId
:param pulumi.Input[str] cluster_name: Registered Server clusterName
:param pulumi.Input[str] friendly_name: Friendly Name
:param pulumi.Input[str] last_heart_beat: Registered Server last heart beat
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] server_certificate: Registered Server Certificate
:param pulumi.Input[str] server_id: Registered Server serverId
:param pulumi.Input[str] server_os_version: Registered Server OS Version
:param pulumi.Input[str] server_role: Registered Server serverRole
:param pulumi.Input[str] storage_sync_service_name: Name of Storage Sync Service resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RegisteredServerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Registered Server resource.
:param str resource_name: The name of the resource.
:param RegisteredServerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RegisteredServerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
agent_version: Optional[pulumi.Input[str]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
last_heart_beat: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_certificate: Optional[pulumi.Input[str]] = None,
server_id: Optional[pulumi.Input[str]] = None,
server_os_version: Optional[pulumi.Input[str]] = None,
server_role: Optional[pulumi.Input[str]] = None,
storage_sync_service_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RegisteredServerArgs.__new__(RegisteredServerArgs)
__props__.__dict__["agent_version"] = agent_version
__props__.__dict__["cluster_id"] = cluster_id
__props__.__dict__["cluster_name"] = cluster_name
__props__.__dict__["friendly_name"] = friendly_name
__props__.__dict__["last_heart_beat"] = last_heart_beat
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["server_certificate"] = server_certificate
__props__.__dict__["server_id"] = server_id
__props__.__dict__["server_os_version"] = server_os_version
__props__.__dict__["server_role"] = server_role
if storage_sync_service_name is None and not opts.urn:
raise TypeError("Missing required property 'storage_sync_service_name'")
__props__.__dict__["storage_sync_service_name"] = storage_sync_service_name
__props__.__dict__["discovery_endpoint_uri"] = None
__props__.__dict__["last_operation_name"] = None
__props__.__dict__["last_workflow_id"] = None
__props__.__dict__["management_endpoint_uri"] = None
__props__.__dict__["monitoring_configuration"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["resource_location"] = None
__props__.__dict__["server_management_error_code"] = None
__props__.__dict__["service_location"] = None
__props__.__dict__["storage_sync_service_uid"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:storagesync/v20190601:RegisteredServer"), pulumi.Alias(type_="azure-native:storagesync:RegisteredServer"), pulumi.Alias(type_="azure-nextgen:storagesync:RegisteredServer"), pulumi.Alias(type_="azure-native:storagesync/v20170605preview:RegisteredServer"), pulumi.Alias(type_="azure-nextgen:storagesync/v20170605preview:RegisteredServer"), pulumi.Alias(type_="azure-native:storagesync/v20180402:RegisteredServer"), pulumi.Alias(type_="azure-nextgen:storagesync/v20180402:RegisteredServer"), pulumi.Alias(type_="azure-native:storagesync/v20180701:RegisteredServer"), pulumi.Alias(type_="azure-nextgen:storagesync/v20180701:RegisteredServer"), pulumi.Alias(type_="azure-native:storagesync/v20181001:RegisteredServer"), pulumi.Alias(type_="azure-nextgen:storagesync/v20181001:RegisteredServer"), pulumi.Alias(type_="azure-native:storagesync/v20190201:RegisteredServer"), pulumi.Alias(type_="azure-nextgen:storagesync/v20190201:RegisteredServer"), pulumi.Alias(type_="azure-native:storagesync/v20190301:RegisteredServer"), pulumi.Alias(type_="azure-nextgen:storagesync/v20190301:RegisteredServer"), pulumi.Alias(type_="azure-native:storagesync/v20191001:RegisteredServer"), pulumi.Alias(type_="azure-nextgen:storagesync/v20191001:RegisteredServer"), pulumi.Alias(type_="azure-native:storagesync/v20200301:RegisteredServer"), pulumi.Alias(type_="azure-nextgen:storagesync/v20200301:RegisteredServer"), pulumi.Alias(type_="azure-native:storagesync/v20200901:RegisteredServer"), pulumi.Alias(type_="azure-nextgen:storagesync/v20200901:RegisteredServer")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(RegisteredServer, __self__).__init__(
'azure-native:storagesync/v20190601:RegisteredServer',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'RegisteredServer':
"""
Get an existing RegisteredServer resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = RegisteredServerArgs.__new__(RegisteredServerArgs)
__props__.__dict__["agent_version"] = None
__props__.__dict__["cluster_id"] = None
__props__.__dict__["cluster_name"] = None
__props__.__dict__["discovery_endpoint_uri"] = None
__props__.__dict__["friendly_name"] = None
__props__.__dict__["last_heart_beat"] = None
__props__.__dict__["last_operation_name"] = None
__props__.__dict__["last_workflow_id"] = None
__props__.__dict__["management_endpoint_uri"] = None
__props__.__dict__["monitoring_configuration"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["resource_location"] = None
__props__.__dict__["server_certificate"] = None
__props__.__dict__["server_id"] = None
__props__.__dict__["server_management_error_code"] = None
__props__.__dict__["server_os_version"] = None
__props__.__dict__["server_role"] = None
__props__.__dict__["service_location"] = None
__props__.__dict__["storage_sync_service_uid"] = None
__props__.__dict__["type"] = None
return RegisteredServer(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="agentVersion")
def agent_version(self) -> pulumi.Output[Optional[str]]:
"""
Registered Server Agent Version
"""
return pulumi.get(self, "agent_version")
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> pulumi.Output[Optional[str]]:
"""
Registered Server clusterId
"""
return pulumi.get(self, "cluster_id")
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> pulumi.Output[Optional[str]]:
"""
Registered Server clusterName
"""
return pulumi.get(self, "cluster_name")
@property
@pulumi.getter(name="discoveryEndpointUri")
def discovery_endpoint_uri(self) -> pulumi.Output[Optional[str]]:
"""
Resource discoveryEndpointUri
"""
return pulumi.get(self, "discovery_endpoint_uri")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> pulumi.Output[Optional[str]]:
"""
Friendly Name
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="lastHeartBeat")
def last_heart_beat(self) -> pulumi.Output[Optional[str]]:
"""
Registered Server last heart beat
"""
return pulumi.get(self, "last_heart_beat")
@property
@pulumi.getter(name="lastOperationName")
def last_operation_name(self) -> pulumi.Output[Optional[str]]:
"""
Resource Last Operation Name
"""
return pulumi.get(self, "last_operation_name")
@property
@pulumi.getter(name="lastWorkflowId")
def last_workflow_id(self) -> pulumi.Output[Optional[str]]:
"""
Registered Server lastWorkflowId
"""
return pulumi.get(self, "last_workflow_id")
@property
@pulumi.getter(name="managementEndpointUri")
def management_endpoint_uri(self) -> pulumi.Output[Optional[str]]:
"""
Management Endpoint Uri
"""
return pulumi.get(self, "management_endpoint_uri")
@property
@pulumi.getter(name="monitoringConfiguration")
def monitoring_configuration(self) -> pulumi.Output[Optional[str]]:
"""
Monitoring Configuration
"""
return pulumi.get(self, "monitoring_configuration")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
Registered Server Provisioning State
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceLocation")
def resource_location(self) -> pulumi.Output[Optional[str]]:
"""
Resource Location
"""
return pulumi.get(self, "resource_location")
@property
@pulumi.getter(name="serverCertificate")
def server_certificate(self) -> pulumi.Output[Optional[str]]:
"""
Registered Server Certificate
"""
return pulumi.get(self, "server_certificate")
@property
@pulumi.getter(name="serverId")
def server_id(self) -> pulumi.Output[Optional[str]]:
"""
Registered Server serverId
"""
return pulumi.get(self, "server_id")
@property
@pulumi.getter(name="serverManagementErrorCode")
def server_management_error_code(self) -> pulumi.Output[Optional[int]]:
"""
Registered Server Management Error Code
"""
return pulumi.get(self, "server_management_error_code")
@property
@pulumi.getter(name="serverOSVersion")
def server_os_version(self) -> pulumi.Output[Optional[str]]:
"""
Registered Server OS Version
"""
return pulumi.get(self, "server_os_version")
@property
@pulumi.getter(name="serverRole")
def server_role(self) -> pulumi.Output[Optional[str]]:
"""
Registered Server serverRole
"""
return pulumi.get(self, "server_role")
@property
@pulumi.getter(name="serviceLocation")
def service_location(self) -> pulumi.Output[Optional[str]]:
"""
Service Location
"""
return pulumi.get(self, "service_location")
@property
@pulumi.getter(name="storageSyncServiceUid")
def storage_sync_service_uid(self) -> pulumi.Output[Optional[str]]:
"""
Registered Server storageSyncServiceUid
"""
return pulumi.get(self, "storage_sync_service_uid")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
| 43.295367
| 1,633
| 0.661925
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['RegisteredServerArgs', 'RegisteredServer']
@pulumi.input_type
class RegisteredServerArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
storage_sync_service_name: pulumi.Input[str],
agent_version: Optional[pulumi.Input[str]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
last_heart_beat: Optional[pulumi.Input[str]] = None,
server_certificate: Optional[pulumi.Input[str]] = None,
server_id: Optional[pulumi.Input[str]] = None,
server_os_version: Optional[pulumi.Input[str]] = None,
server_role: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "storage_sync_service_name", storage_sync_service_name)
if agent_version is not None:
pulumi.set(__self__, "agent_version", agent_version)
if cluster_id is not None:
pulumi.set(__self__, "cluster_id", cluster_id)
if cluster_name is not None:
pulumi.set(__self__, "cluster_name", cluster_name)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if last_heart_beat is not None:
pulumi.set(__self__, "last_heart_beat", last_heart_beat)
if server_certificate is not None:
pulumi.set(__self__, "server_certificate", server_certificate)
if server_id is not None:
pulumi.set(__self__, "server_id", server_id)
if server_os_version is not None:
pulumi.set(__self__, "server_os_version", server_os_version)
if server_role is not None:
pulumi.set(__self__, "server_role", server_role)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="storageSyncServiceName")
def storage_sync_service_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "storage_sync_service_name")
@storage_sync_service_name.setter
def storage_sync_service_name(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_sync_service_name", value)
@property
@pulumi.getter(name="agentVersion")
def agent_version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "agent_version")
@agent_version.setter
def agent_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "agent_version", value)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cluster_id")
@cluster_id.setter
def cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_id", value)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="lastHeartBeat")
def last_heart_beat(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "last_heart_beat")
@last_heart_beat.setter
def last_heart_beat(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_heart_beat", value)
@property
@pulumi.getter(name="serverCertificate")
def server_certificate(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "server_certificate")
@server_certificate.setter
def server_certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_certificate", value)
@property
@pulumi.getter(name="serverId")
def server_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "server_id")
@server_id.setter
def server_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_id", value)
@property
@pulumi.getter(name="serverOSVersion")
def server_os_version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "server_os_version")
@server_os_version.setter
def server_os_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_os_version", value)
@property
@pulumi.getter(name="serverRole")
def server_role(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "server_role")
@server_role.setter
def server_role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_role", value)
class RegisteredServer(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
agent_version: Optional[pulumi.Input[str]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
last_heart_beat: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_certificate: Optional[pulumi.Input[str]] = None,
server_id: Optional[pulumi.Input[str]] = None,
server_os_version: Optional[pulumi.Input[str]] = None,
server_role: Optional[pulumi.Input[str]] = None,
storage_sync_service_name: Optional[pulumi.Input[str]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: RegisteredServerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RegisteredServerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
agent_version: Optional[pulumi.Input[str]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
last_heart_beat: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_certificate: Optional[pulumi.Input[str]] = None,
server_id: Optional[pulumi.Input[str]] = None,
server_os_version: Optional[pulumi.Input[str]] = None,
server_role: Optional[pulumi.Input[str]] = None,
storage_sync_service_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RegisteredServerArgs.__new__(RegisteredServerArgs)
__props__.__dict__["agent_version"] = agent_version
__props__.__dict__["cluster_id"] = cluster_id
__props__.__dict__["cluster_name"] = cluster_name
__props__.__dict__["friendly_name"] = friendly_name
__props__.__dict__["last_heart_beat"] = last_heart_beat
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["server_certificate"] = server_certificate
__props__.__dict__["server_id"] = server_id
__props__.__dict__["server_os_version"] = server_os_version
__props__.__dict__["server_role"] = server_role
if storage_sync_service_name is None and not opts.urn:
raise TypeError("Missing required property 'storage_sync_service_name'")
__props__.__dict__["storage_sync_service_name"] = storage_sync_service_name
__props__.__dict__["discovery_endpoint_uri"] = None
__props__.__dict__["last_operation_name"] = None
__props__.__dict__["last_workflow_id"] = None
__props__.__dict__["management_endpoint_uri"] = None
__props__.__dict__["monitoring_configuration"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["resource_location"] = None
__props__.__dict__["server_management_error_code"] = None
__props__.__dict__["service_location"] = None
__props__.__dict__["storage_sync_service_uid"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:storagesync/v20190601:RegisteredServer"), pulumi.Alias(type_="azure-native:storagesync:RegisteredServer"), pulumi.Alias(type_="azure-nextgen:storagesync:RegisteredServer"), pulumi.Alias(type_="azure-native:storagesync/v20170605preview:RegisteredServer"), pulumi.Alias(type_="azure-nextgen:storagesync/v20170605preview:RegisteredServer"), pulumi.Alias(type_="azure-native:storagesync/v20180402:RegisteredServer"), pulumi.Alias(type_="azure-nextgen:storagesync/v20180402:RegisteredServer"), pulumi.Alias(type_="azure-native:storagesync/v20180701:RegisteredServer"), pulumi.Alias(type_="azure-nextgen:storagesync/v20180701:RegisteredServer"), pulumi.Alias(type_="azure-native:storagesync/v20181001:RegisteredServer"), pulumi.Alias(type_="azure-nextgen:storagesync/v20181001:RegisteredServer"), pulumi.Alias(type_="azure-native:storagesync/v20190201:RegisteredServer"), pulumi.Alias(type_="azure-nextgen:storagesync/v20190201:RegisteredServer"), pulumi.Alias(type_="azure-native:storagesync/v20190301:RegisteredServer"), pulumi.Alias(type_="azure-nextgen:storagesync/v20190301:RegisteredServer"), pulumi.Alias(type_="azure-native:storagesync/v20191001:RegisteredServer"), pulumi.Alias(type_="azure-nextgen:storagesync/v20191001:RegisteredServer"), pulumi.Alias(type_="azure-native:storagesync/v20200301:RegisteredServer"), pulumi.Alias(type_="azure-nextgen:storagesync/v20200301:RegisteredServer"), pulumi.Alias(type_="azure-native:storagesync/v20200901:RegisteredServer"), pulumi.Alias(type_="azure-nextgen:storagesync/v20200901:RegisteredServer")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(RegisteredServer, __self__).__init__(
'azure-native:storagesync/v20190601:RegisteredServer',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'RegisteredServer':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = RegisteredServerArgs.__new__(RegisteredServerArgs)
__props__.__dict__["agent_version"] = None
__props__.__dict__["cluster_id"] = None
__props__.__dict__["cluster_name"] = None
__props__.__dict__["discovery_endpoint_uri"] = None
__props__.__dict__["friendly_name"] = None
__props__.__dict__["last_heart_beat"] = None
__props__.__dict__["last_operation_name"] = None
__props__.__dict__["last_workflow_id"] = None
__props__.__dict__["management_endpoint_uri"] = None
__props__.__dict__["monitoring_configuration"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["resource_location"] = None
__props__.__dict__["server_certificate"] = None
__props__.__dict__["server_id"] = None
__props__.__dict__["server_management_error_code"] = None
__props__.__dict__["server_os_version"] = None
__props__.__dict__["server_role"] = None
__props__.__dict__["service_location"] = None
__props__.__dict__["storage_sync_service_uid"] = None
__props__.__dict__["type"] = None
return RegisteredServer(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="agentVersion")
def agent_version(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "agent_version")
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "cluster_id")
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "cluster_name")
@property
@pulumi.getter(name="discoveryEndpointUri")
def discovery_endpoint_uri(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "discovery_endpoint_uri")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="lastHeartBeat")
def last_heart_beat(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "last_heart_beat")
@property
@pulumi.getter(name="lastOperationName")
def last_operation_name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "last_operation_name")
@property
@pulumi.getter(name="lastWorkflowId")
def last_workflow_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "last_workflow_id")
@property
@pulumi.getter(name="managementEndpointUri")
def management_endpoint_uri(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "management_endpoint_uri")
@property
@pulumi.getter(name="monitoringConfiguration")
def monitoring_configuration(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "monitoring_configuration")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceLocation")
def resource_location(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "resource_location")
@property
@pulumi.getter(name="serverCertificate")
def server_certificate(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "server_certificate")
@property
@pulumi.getter(name="serverId")
def server_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "server_id")
@property
@pulumi.getter(name="serverManagementErrorCode")
def server_management_error_code(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "server_management_error_code")
@property
@pulumi.getter(name="serverOSVersion")
def server_os_version(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "server_os_version")
@property
@pulumi.getter(name="serverRole")
def server_role(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "server_role")
@property
@pulumi.getter(name="serviceLocation")
def service_location(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "service_location")
@property
@pulumi.getter(name="storageSyncServiceUid")
def storage_sync_service_uid(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "storage_sync_service_uid")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.