hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
61cec369b3732fca5135012a654d9aa0eab32326 | 7,944 | py | Python | MTVulnerability/utils/losses_pytorch.py | yamizi/taskaugment | 8393e15d7ae16f267592edf1d67e20368aeeb1b5 | [
"MIT"
] | null | null | null | MTVulnerability/utils/losses_pytorch.py | yamizi/taskaugment | 8393e15d7ae16f267592edf1d67e20368aeeb1b5 | [
"MIT"
] | null | null | null | MTVulnerability/utils/losses_pytorch.py | yamizi/taskaugment | 8393e15d7ae16f267592edf1d67e20368aeeb1b5 | [
"MIT"
] | null | null | null | import sys, os
sys.path.append("./midlevel-reps")
from visualpriors.taskonomy_network import TaskonomyDecoder
import torch
import torch.nn.functional as F
import torch.nn as nn
SMOOTH = 1e-6
CHANNELS_TO_TASKS = {
1: ['colorization', 'edge_texture', 'edge_occlusion', 'keypoints3d', 'keypoints2d', 'reshading', 'depth_zbuffer', 'depth_euclidean', ],
2: ['curvature', 'principal_curvature'],
3: ['autoencoding', 'denoising', 'normal', 'inpainting', 'rgb', 'normals'],
128: ['segment_unsup2d', 'segment_unsup25d'],
1000: ['class_object'],
None: ['segment_semantic']
}
TASKS_TO_CHANNELS = {}
for n, tasks in CHANNELS_TO_TASKS.items():
for task in tasks:
TASKS_TO_CHANNELS[task] = n
PIX_TO_PIX_TASKS = ['colorization', 'edge_texture', 'edge_occlusion', 'keypoints3d', 'keypoints2d', 'reshading', 'depth_zbuffer', 'depth_euclidean', 'curvature', 'autoencoding', 'denoising', 'normal', 'inpainting', 'segment_unsup2d', 'segment_unsup25d', 'segment_semantic', ]
FEED_FORWARD_TASKS = ['class_object', 'class_scene', 'room_layout', 'vanishing_point']
SINGLE_IMAGE_TASKS = PIX_TO_PIX_TASKS + FEED_FORWARD_TASKS
| 45.136364 | 276 | 0.656596 |
61cf7342efb940a3f5d7c9b44e90c3d3f4d12610 | 21,205 | py | Python | src/trails/flow_model.py | BenDickens/trails | a89a1a901c7be38cdcb7a59339587e518ab8f14d | [
"MIT"
] | 4 | 2020-09-14T07:20:19.000Z | 2021-04-22T14:23:04.000Z | src/trails/flow_model.py | BenDickens/trails | a89a1a901c7be38cdcb7a59339587e518ab8f14d | [
"MIT"
] | 5 | 2021-03-17T17:02:27.000Z | 2021-08-31T10:09:38.000Z | src/trails/flow_model.py | BenDickens/trails | a89a1a901c7be38cdcb7a59339587e518ab8f14d | [
"MIT"
] | 3 | 2020-09-07T07:35:28.000Z | 2021-04-22T14:23:39.000Z | import os,sys
import numpy as np
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import pygeos
from osgeo import gdal
from tqdm import tqdm
import igraph as ig
import contextily as ctx
from rasterstats import zonal_stats
import time
import pylab as pl
from IPython import display
import seaborn as sns
import subprocess
import shutil
from multiprocessing import Pool,cpu_count
import pathlib
code_path = (pathlib.Path(__file__).parent.absolute())
gdal.SetConfigOption("OSM_CONFIG_FILE", os.path.join(code_path,'..','..',"osmconf.ini"))
from shapely.wkb import loads
data_path = os.path.join('..','data')
from simplify import *
from extract import railway,ferries,mainRoads,roads
from population_OD import create_bbox,create_grid
pd.options.mode.chained_assignment = None
def closest_node(node, nodes):
"""[summary]
Args:
node ([type]): [description]
nodes ([type]): [description]
Returns:
[type]: [description]
"""
dist_2 = np.sum((nodes - node)**2, axis=1)
return np.argmin(dist_2)
def load_network(osm_path,mainroad=True):
"""[summary]
Args:
osm_path ([type]): [description]
mainroad (bool, optional): [description]. Defaults to True.
Returns:
[type]: [description]
"""
if mainroad:
df = mainRoads(osm_path)
else:
df = roads(osm_path)
net = Network(edges=df)
net = clean_roundabouts(net)
net = split_edges_at_nodes(net)
net = add_endpoints(net)
net = add_ids(net)
net = add_topology(net)
net = drop_hanging_nodes(net)
net = merge_edges(net)
net = reset_ids(net)
net = add_distances(net)
net = merge_multilinestrings(net)
net = fill_attributes(net)
net = add_travel_time(net)
return net
def get_gdp_values(gdf,data_path):
"""[summary]
Args:
gdf ([type]): [description]
Returns:
[type]: [description]
"""
world_pop = os.path.join(data_path,'global_gdp','GDP_2015.tif')
gdf['geometry'] = gdf.geometry.apply(lambda x: loads(pygeos.to_wkb(x)))
gdp = list(item['sum'] for item in zonal_stats(gdf.geometry,world_pop,
stats="sum"))
gdp = [x if x is not None else 0 for x in gdp]
gdf['geometry'] = pygeos.from_shapely(gdf.geometry)
return gdp
def country_grid_gdp_filled(trans_network,country,data_path,rough_grid_split=100,from_main_graph=False):
"""[summary]
Args:
trans_network ([type]): [description]
rough_grid_split (int, optional): [description]. Defaults to 100.
Returns:
[type]: [description]
"""
if from_main_graph==True:
node_df = trans_network.copy()
envelop = pygeos.envelope(pygeos.multilinestrings(node_df.geometry.values))
height = np.sqrt(pygeos.area(envelop)/rough_grid_split)
else:
node_df = trans_network.nodes.copy()
node_df.geometry,approximate_crs = convert_crs(node_df)
envelop = pygeos.envelope(pygeos.multilinestrings(node_df.geometry.values))
height = np.sqrt(pygeos.area(envelop)/rough_grid_split)
gdf_admin = pd.DataFrame(create_grid(create_bbox(node_df),height),columns=['geometry'])
#load data and convert to pygeos
country_shape = gpd.read_file(os.path.join(data_path,'GADM','gadm36_levels.gpkg'),layer=0)
country_shape = pd.DataFrame(country_shape.loc[country_shape.GID_0==country])
country_shape.geometry = pygeos.from_shapely(country_shape.geometry)
gdf_admin = pygeos.intersection(gdf_admin,country_shape.geometry)
gdf_admin = gdf_admin.loc[~pygeos.is_empty(gdf_admin.geometry)]
gdf_admin['centroid'] = pygeos.centroid(gdf_admin.geometry)
gdf_admin['km2'] = area(gdf_admin)
gdf_admin['gdp'] = get_gdp_values(gdf_admin,data_path)
gdf_admin = gdf_admin.loc[gdf_admin.gdp > 0].reset_index()
gdf_admin['gdp_area'] = gdf_admin.gdp/gdf_admin['km2']
return gdf_admin
def convert_crs(gdf,current_crs="epsg:4326"):
"""[summary]
Args:
gdf ([type]): [description]
Returns:
[type]: [description]
"""
if current_crs == "epsg:4326":
lat = pygeos.geometry.get_y(pygeos.centroid(gdf['geometry'].iloc[0]))
lon = pygeos.geometry.get_x(pygeos.centroid(gdf['geometry'].iloc[0]))
# formula below based on :https://gis.stackexchange.com/a/190209/80697
approximate_crs = "epsg:" + str(int(32700-np.round((45+lat)/90,0)*100+np.round((183+lon)/6,0)))
else:
approximate_crs = "epsg:4326"
#from pygeos/issues/95
geometries = gdf['geometry']
coords = pygeos.get_coordinates(geometries)
transformer=pyproj.Transformer.from_crs(current_crs, approximate_crs,always_xy=True)
new_coords = transformer.transform(coords[:, 0], coords[:, 1])
result = pygeos.set_coordinates(geometries.copy(), np.array(new_coords).T)
return result,approximate_crs
def area(gdf,km=True):
"""[summary]
Args:
gdf ([type]): [description]
km (bool, optional): [description]. Defaults to True.
Returns:
[type]: [description]
"""
if km:
return pygeos.area(convert_crs(gdf)[0])/1e6
else:
return pygeos.area(convert_crs(gdf)[0])
def create_OD(gdf_admin,country_name,data_path):
"""[summary]
Args:
gdf_admin ([type]): [description]
country_name ([type]): [description]
Returns:
[type]: [description]
"""
# create list of sectors
sectors = [chr(i).upper() for i in range(ord('a'),ord('o')+1)]
# add a region column if not existing yet.
if 'NAME_1' not in gdf_admin.columns:
gdf_admin['NAME_1'] = ['reg'+str(x) for x in list(gdf_admin.index)]
# prepare paths to downscale a country. We give a country its own directory
# to allow for multiple unique countries running at the same time
downscale_basepath = os.path.join(code_path,'..','..','downscale_od')
downscale_countrypath = os.path.join(code_path,'..','..','run_downscale_od_{}'.format(country_name))
# copy downscaling method into the country directory
shutil.copytree(downscale_basepath,downscale_countrypath)
# save national IO table as basetable for downscaling
get_basetable(country_name,data_path).to_csv(os.path.join(downscale_countrypath,'basetable.csv'),
sep=',',header=False,index=False)
# create proxy table with GDP values per region/area
proxy_reg = pd.DataFrame(gdf_admin[['NAME_1','gdp_area']])
proxy_reg['year'] = 2016
proxy_reg = proxy_reg[['year','NAME_1','gdp_area']]
proxy_reg.columns = ['year','id','gdp_area']
proxy_reg.to_csv(os.path.join(downscale_countrypath,'proxy_reg.csv'),index=False)
indices = pd.DataFrame(sectors,columns=['sector'])
indices['name'] = country_name
indices = indices.reindex(['name','sector'],axis=1)
indices.to_csv(os.path.join(downscale_countrypath,'indices.csv'),index=False,header=False)
# prepare yaml file
yaml_file = open(os.path.join(downscale_countrypath,"settings_basic.yml"), "r")
list_of_lines = yaml_file.readlines()
list_of_lines[6] = ' - id: {}\n'.format(country_name)
list_of_lines[8] = ' into: [{}] \n'.format(','.join(['reg'+str(x) for x in list(gdf_admin.index)]))
yaml_file = open(os.path.join(downscale_countrypath,"settings_basic.yml"), "w")
yaml_file.writelines(list_of_lines)
yaml_file.close()
# run libmrio
p = subprocess.Popen([os.path.join(downscale_countrypath,'mrio_disaggregate'), 'settings_basic.yml'],
cwd=os.path.join(downscale_countrypath))
p.wait()
# create OD matrix from libmrio results
OD = pd.read_csv(os.path.join(downscale_countrypath,'output.csv'),header=None)
OD.columns = pd.MultiIndex.from_product([gdf_admin.NAME_1,sectors])
OD.index = pd.MultiIndex.from_product([gdf_admin.NAME_1,sectors])
OD = OD.groupby(level=0,axis=0).sum().groupby(level=0,axis=1).sum()
OD = (OD*5)/365
OD_dict = OD.stack().to_dict()
gdf_admin['import'] = list(OD.sum(axis=1))
gdf_admin['export'] = list(OD.sum(axis=0))
gdf_admin = gdf_admin.rename({'NAME_1': 'name'}, axis='columns')
# and remove country folder again to avoid clutter in the directory
shutil.rmtree(downscale_countrypath)
return OD,OD_dict,sectors,gdf_admin
def prepare_network_routing(transport_network):
"""[summary]
Args:
transport_network ([type]): [description]
Returns:
[type]: [description]
"""
gdf_roads = make_directed(transport_network.edges)
gdf_roads = gdf_roads.rename(columns={"highway": "infra_type"})
gdf_roads['GC'] = gdf_roads.apply(gc_function,axis=1)
gdf_roads['max_flow'] = gdf_roads.apply(set_max_flow,axis=1)
gdf_roads['flow'] = 0
gdf_roads['wait_time'] = 0
return gdf_roads
def create_graph(gdf_roads):
"""[summary]
Args:
gdf_roads ([type]): [description]
Returns:
[type]: [description]
"""
gdf_in = gdf_roads.reindex(['from_id','to_id'] + [x for x in list(gdf_roads.columns) if x not in ['from_id','to_id']],axis=1)
g = ig.Graph.TupleList(gdf_in.itertuples(index=False), edge_attrs=list(gdf_in.columns)[2:],directed=True)
sg = g.clusters().giant()
gdf_in.set_index('id',inplace=True)
return sg,gdf_in
def nearest_network_node_list(gdf_admin,gdf_nodes,sg):
"""[summary]
Args:
gdf_admin ([type]): [description]
gdf_nodes ([type]): [description]
sg ([type]): [description]
Returns:
[type]: [description]
"""
gdf_nodes = gdf_nodes.loc[gdf_nodes.id.isin(sg.vs['name'])]
gdf_nodes.reset_index(drop=True,inplace=True)
nodes = {}
for admin_ in gdf_admin.itertuples():
nodes[admin_.name] = gdf_nodes.iloc[pygeos.distance((admin_.centroid),gdf_nodes.geometry).idxmin()].id
return nodes
def set_max_flow(segment):
"""[summary]
Args:
segment ([type]): [description]
Returns:
[type]: [description]
"""
empty_trip_correction = 0.7 #available capacity for freight reduces
# standard lane capacity = 1000 passenger vehicles per lane per hour
# trunk and motorway correct by factor 4
# primary correct by factor 2
# secondary correct by factor 1
# tertiary correct factor 0.5
# other roads correct factor 0.5
# passenger vehicle equvalent for trucks: 3.5
# average truck load: 8 tonnes
# 30 % of trips are empty
# median value per ton: 2,000 USD
# median truck value: 8*2000 = 16,000 USD
standard_max_flow = 1000/3.5*16000*empty_trip_correction
if (segment.infra_type == 'trunk') | (segment.infra_type == 'trunk_link'):
return standard_max_flow*4
elif (segment.infra_type == 'motorway') | (segment.infra_type == 'motorway_link'):
return standard_max_flow*4
elif (segment.infra_type == 'primary') | (segment.infra_type == 'primary_link'):
return standard_max_flow*2
elif (segment.infra_type == 'secondary') | (segment.infra_type == 'secondary_link'):
return standard_max_flow*1
elif (segment.infra_type == 'tertiary') | (segment.infra_type == 'tertiary_link'):
return standard_max_flow*0.5
else:
return standard_max_flow*0.5
def gc_function(segment):
"""[summary]
Args:
segment ([type]): [description]
Returns:
[type]: [description]
"""
# GC = WaitT + TrvlT + Trate + stddev
Wait_time = 0
if segment.infra_type in ['primary','primary_link']:
Trate = 0.5
return 0.57*Wait_time+0.49*segment['time']+1*Trate+0.44*1
elif segment.infra_type in ['secondary','secondary_link']:
Trate = 1
return 0.57*Wait_time+0.49*segment['time']+1*Trate+0.44*1
elif segment.infra_type in ['tertiary','tertiary_link']:
Trate = 1.5
return 0.57*Wait_time+0.49*segment['time']+1*Trate+0.44*1
else:
Trate = 2
return 0.57*Wait_time+0.49*segment['time']+1*Trate+0.44*1
def update_gc_function(segment):
"""[summary]
Args:
segment ([type]): [description]
Returns:
[type]: [description]
"""
# GC = WaitT + TrvlT + Trate + stddev
if segment['flow'] > segment['max_flow']:
segment['wait_time'] += 1
elif segment['wait_time'] > 0:
segment['wait_time'] - 1
else:
segment['wait_time'] = 0
if segment['infra_type'] in ['primary','primary_link']:
Trate = 0.5
return 0.57*segment['wait_time']+0.49*segment['time']+1*Trate+0.44*1
elif segment['infra_type'] in ['secondary','secondary_link']:
Trate = 1
return 0.57*segment['wait_time']+0.49*segment['time']+1*Trate+0.44*1
elif segment['infra_type'] in ['tertiary','tertiary_link']:
Trate = 1.5
return 0.57*segment['wait_time']+0.49*segment['time']+1*Trate+0.44*1
else:
Trate = 2
return 0.57*segment['wait_time']+0.49*segment['time']+1*Trate+0.44*1
def run_flow_analysis(country,transport_network,gdf_admin,OD_dict,notebook=False):
"""[summary]
Args:
transport_network ([type]): [description]
gdf_admin ([type]): [description]
Returns:
[type]: [description]
"""
plt.rcParams['figure.figsize'] = [5, 5]
gdf_roads = prepare_network_routing(transport_network)
sg,gdf_in = create_graph(gdf_roads)
nearest_node = nearest_network_node_list(gdf_admin,transport_network.nodes,sg)
dest_nodes = [sg.vs['name'].index(nearest_node[x]) for x in list(nearest_node.keys())]
# this is where the iterations goes
iterator = 0
optimal = False
max_iter = 100
save_fits = []
if not notebook:
plt.ion() ## Note this correction
# run flow optimization model
while optimal == False:
#update cost function per segment, dependent on flows from previous iteration.
sg.es['GC'] = [(lambda segment: update_gc_function(segment))(segment) for segment in list(sg.es)]
sg.es['flow'] = 0
#(re-)assess shortest paths between all regions
for admin_orig in (list(gdf_admin.name)):
paths = sg.get_shortest_paths(sg.vs[sg.vs['name'].index(nearest_node[admin_orig])],dest_nodes,weights='GC',output="epath")
for path,admin_dest in zip(paths,list(gdf_admin.name)):
flow_value = OD_dict[(admin_orig,admin_dest)]
sg.es[path]['flow'] = [x + flow_value for x in sg.es[path]['flow']]
fitting_edges = (sum([x<y for x,y in zip(sg.es['flow'],sg.es['max_flow'])])/len(sg.es))
save_fits.append(fitting_edges)
# if at least 99% of roads are below max flow, we say its good enough
if (sum([x<y for x,y in zip(sg.es['flow'],sg.es['max_flow'])])/len(sg.es)) > 0.99:
optimal = True
iterator += 1
# when running the code in a notebook, the figure updates instead of a new figure each iteration
if notebook:
pl.plot(save_fits)
display.display(pl.gcf())
display.clear_output(wait=True)
else:
plt.plot(save_fits)
plt.xlabel('# iteration')
plt.ylabel('Share of edges below maximum flow')
plt.show()
plt.pause(0.0001) #Note this correction
if iterator == max_iter:
break
# save output
plt.savefig(os.path.join(code_path,'..','..','figures','{}_flow_modelling.png'.format(country)))
gdf_in['flow'] = pd.DataFrame(sg.es['flow'],columns=['flow'],index=sg.es['id'])
gdf_in['max_flow'] = pd.DataFrame(sg.es['max_flow'],columns=['max_flow'],index=sg.es['id'])
gdf_in['wait_time'] = pd.DataFrame(sg.es['wait_time'],columns=['wait_time'],index=sg.es['id'])
gdf_in['overflow'] = gdf_in['flow'].div(gdf_in['max_flow'])
return gdf_in
def plot_OD_matrix(OD):
"""[summary]
Args:
OD ([type]): [description]
"""
plt.rcParams['figure.figsize'] = [20, 15]
sns.heatmap(OD,vmin=0,vmax=1e5,cmap='Reds')
def plot_results(gdf_in):
"""[summary]
Args:
gdf_in ([type]): [description]
"""
gdf_in['geometry'] = gdf_in.geometry.apply(lambda x : loads(pygeos.to_wkb(x)))
gdf_plot = gpd.GeoDataFrame(gdf_in)
gdf_plot.crs = 4326
gdf_plot = gdf_plot.to_crs(3857)
plt.rcParams['figure.figsize'] = [20, 10]
fig, axes = plt.subplots(1, 2)
for iter_,ax in enumerate(axes.flatten()):
if iter_ == 0:
gdf_plot.loc[gdf_plot.flow>1].plot(ax=ax,column='flow',legend=False,cmap='Reds',linewidth=3) #loc[gdf_plot.flow>1]
ctx.add_basemap(ax, source=ctx.providers.Stamen.TonerLite,zoom=15)
ax.set_axis_off()
ax.set_title('Flows along the network')
else:
pd.DataFrame(gdf_in.loc[gdf_in.max_flow>1].groupby(
'infra_type').sum()['distance']/gdf_in.groupby('infra_type').sum()['distance']).dropna().sort_values(
by='distance',ascending=False).plot(type='bar',color='red',ax=ax)
ax.set_ylabel('Percentage of edges > max flow')
ax.set_xlabel('Road type')
#plt.show(block=True)
def country_run(country,data_path=os.path.join('C:\\','Data'),plot=False,save=True):
"""[summary]
Args:
country ([type]): [description]
plot (bool, optional): [description]. Defaults to True.
"""
osm_path = os.path.join(data_path,'country_osm','{}.osm.pbf'.format(country))
transport_network = load_network(osm_path)
print('NOTE: Network created')
gdf_roads = prepare_network_routing(transport_network)
sg = create_graph(gdf_roads)[0]
main_graph = pd.DataFrame(list(sg.es['geometry']),columns=['geometry'])
gdf_admin = country_grid_gdp_filled(main_graph,country,data_path,rough_grid_split=100,from_main_graph=True)
print('NOTE: GDP values extracted')
# OD,OD_dict,sectors,gdf_admin = create_OD(gdf_admin,country,data_path)
# print('NOTE: OD created')
# gdf_out = run_flow_analysis(country,transport_network,gdf_admin,OD_dict)
# print('NOTE: Flow analysis finished')
# if save:
# gdf_admin['geometry'] = gdf_admin.geometry.apply(lambda x: loads(pygeos.to_wkb(x)))
# gdf_out = gdf_out.loc[~gdf_out.max_flow.isna()].reset_index(drop=True)
# gdf_out_save = gdf_out.copy()
# gdf_out_save['geometry'] = gdf_out_save.geometry.apply(lambda x: loads(pygeos.to_wkb(x)))
# gpd.GeoDataFrame(gdf_admin.drop('centroid',axis=1)).to_file(
# os.path.join(code_path,'..','..','data',
# '{}.gpkg'.format(country)),layer='grid',driver='GPKG')
# gpd.GeoDataFrame(gdf_out_save).to_file(os.path.join('..','..','data',
# '{}.gpkg'.format(country)),layer='network',driver='GPKG')
# if plot:
# plot_results(gdf_out)
if __name__ == '__main__':
#country_run(sys.argv[1],os.path.join('C:\\','Data'),plot=False)
#country_run(sys.argv[1],os.path.join(code_path,'..','..','Data'),plot=False)
#data_path = os.path.join('C:\\','Data')
if (len(sys.argv) > 1) & (len(sys.argv[1]) == 3):
country_run(sys.argv[1])
elif (len(sys.argv) > 1) & (len(sys.argv[1]) > 3):
glob_info = pd.read_excel(os.path.join('/scistor','ivm','eks510','projects','trails','global_information.xlsx'))
glob_info = glob_info.loc[glob_info.continent==sys.argv[1]]
countries = list(glob_info.ISO_3digit)
if len(countries) == 0:
print('FAILED: Please write the continents as follows: Africa, Asia, Central-America, Europe, North-America,Oceania, South-America')
with Pool(cpu_count()) as pool:
pool.map(country_run,countries,chunksize=1)
else:
print('FAILED: Either provide an ISO3 country name or a continent name') | 35.400668 | 145 | 0.636831 |
61cffba0eebf31780c12f21faf032f94e065f6a5 | 1,238 | py | Python | offsite/core/utils.py | wh1te909/backup-offsite | 694f773583eb825b44ff20c51598ac9e1106cd32 | [
"MIT"
] | 4 | 2021-01-20T15:45:35.000Z | 2021-07-09T02:15:31.000Z | offsite/core/utils.py | wh1te909/backup-offsite | 694f773583eb825b44ff20c51598ac9e1106cd32 | [
"MIT"
] | 6 | 2020-08-02T23:31:07.000Z | 2021-09-22T19:19:50.000Z | offsite/core/utils.py | wh1te909/backup-offsite | 694f773583eb825b44ff20c51598ac9e1106cd32 | [
"MIT"
] | null | null | null | from channels.auth import AuthMiddlewareStack
from knox.auth import TokenAuthentication
from django.contrib.auth.models import AnonymousUser
from channels.db import database_sync_to_async
KnoxAuthMiddlewareStack = lambda inner: KnoxAuthMiddleware(AuthMiddlewareStack(inner))
| 25.265306 | 86 | 0.697092 |
61d14e7bc92cdd86e7f3f92f3039ee396ac2a457 | 6,841 | py | Python | unik/indexing.py | balbasty/unik | 7b8b2a0989495eec7bc0db6c672ce904cbcb1063 | [
"MIT"
] | null | null | null | unik/indexing.py | balbasty/unik | 7b8b2a0989495eec7bc0db6c672ce904cbcb1063 | [
"MIT"
] | null | null | null | unik/indexing.py | balbasty/unik | 7b8b2a0989495eec7bc0db6c672ce904cbcb1063 | [
"MIT"
] | null | null | null | """Access / change tensor shape."""
import tensorflow as tf
import numpy as np
from .magik import tensor_compat
from .alloc import zeros_like
from .types import has_tensor, as_tensor, cast, dtype
from .shapes import shape, reshape, flatten, transpose, unstack
from ._math_for_indexing import cumprod, minimum, maximum
from ._utils import pop
| 35.262887 | 77 | 0.582517 |
61d192d69ecdae0462072ff12464ac90f01f69d0 | 1,478 | py | Python | aleph/views/alerts_api.py | adikadashrieq/aleph | acc03197c10e511a279ae3a05120187223f173d2 | [
"MIT"
] | 1 | 2019-06-18T21:35:59.000Z | 2019-06-18T21:35:59.000Z | aleph/views/alerts_api.py | heartofstone/aleph | d66b6615d2bfa10c291c63754f53b468de8bebde | [
"MIT"
] | null | null | null | aleph/views/alerts_api.py | heartofstone/aleph | d66b6615d2bfa10c291c63754f53b468de8bebde | [
"MIT"
] | null | null | null | from flask import Blueprint, request
from aleph.core import db
from aleph.model import Alert
from aleph.search import DatabaseQueryResult
from aleph.views.forms import AlertSchema
from aleph.views.serializers import AlertSerializer
from aleph.views.util import require, obj_or_404
from aleph.views.util import parse_request
from aleph.views.context import tag_request
blueprint = Blueprint('alerts_api', __name__)
| 31.446809 | 71 | 0.750338 |
61d210a06894e407303586520efa2e44fe445461 | 11,283 | py | Python | run.py | Acforest/LogPrompt | 199766cea9988bc6e8b1c71352b090da68bbb71d | [
"Apache-2.0"
] | null | null | null | run.py | Acforest/LogPrompt | 199766cea9988bc6e8b1c71352b090da68bbb71d | [
"Apache-2.0"
] | null | null | null | run.py | Acforest/LogPrompt | 199766cea9988bc6e8b1c71352b090da68bbb71d | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to train and evaluate either a regular supervised model or a PET/iPET model on
one of the supported tasks and datasets.
"""
import os
import log
import pet
import torch
import argparse
from pet.config import load_configs
from pet.tasks import PROCESSORS, UNLABELED_SET, TRAIN_SET, DEV_SET, TEST_SET, METRICS, DEFAULT_METRICS, load_examples
from pet.utils import eq_div
from pet.wrapper import WRAPPER_TYPES, MODEL_CLASSES
logger = log.get_logger('root')
if __name__ == "__main__":
main()
| 58.46114 | 119 | 0.660197 |
61d24122d7792980c0b72c95b9dc3ec6c9efd631 | 2,282 | py | Python | data/external/repositories_2to3/253384/national_data_science_bowl_2-master/alexcode/code/model.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/253384/national_data_science_bowl_2-master/alexcode/code/model.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/253384/national_data_science_bowl_2-master/alexcode/code/model.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | 1 | 2019-12-04T08:23:33.000Z | 2019-12-04T08:23:33.000Z |
from keras.models import Sequential
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.layers.core import Activation, Dense, Flatten, Dropout
from keras.optimizers import Adam
from keras.regularizers import l2
from keras import backend as K
def center_normalize(x):
"""
Custom activation for online sample-wise center and std. normalization
"""
return (x - K.mean(x)) / K.std(x)
| 34.575758 | 82 | 0.659509 |
61d29e48cb817ece86e476bffbf91b00d5532c33 | 8,685 | py | Python | BuildDeb.py | KOLANICH/GraalVM_deb_packages_CI | f41786b4daa11efebe24402f5000111137365b4f | [
"Apache-2.0",
"Unlicense"
] | null | null | null | BuildDeb.py | KOLANICH/GraalVM_deb_packages_CI | f41786b4daa11efebe24402f5000111137365b4f | [
"Apache-2.0",
"Unlicense"
] | null | null | null | BuildDeb.py | KOLANICH/GraalVM_deb_packages_CI | f41786b4daa11efebe24402f5000111137365b4f | [
"Apache-2.0",
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
import sys
import struct
import re
import os
from itertools import chain
import warnings
import tarfile
import sh
from tqdm import tqdm
from pydebhelper import *
from getLatestVersionAndURLWithGitHubAPI import getTargets
config = OrderedDict()
config["llvm"] = {
"descriptionLong": "LLVM engine for GraalVM",
"homepage": "https://www.graalvm.org/docs/reference-manual/languages/llvm/",
"rip": {
"bin": ["lli"],
"other": ["jre/languages/llvm"]
}
}
config["js"] = {
"descriptionLong": "JavaScript engine & node.js runtime for GraalVM",
"homepage": "https://www.graalvm.org/docs/reference-manual/languages/js/",
"rip": {
"bin": ["js", "node", "npm"],
"other": ["jre/languages/js", "jre/lib/graalvm/graaljs-launcher.jar"]
}
}
config["python"] = {
"descriptionLong": "python runtime for GraalVM",
"homepage": "https://www.graalvm.org/docs/reference-manual/languages/python/",
"rip": {
"bin": ["graalpython"],
"other": ["jre/languages/python", "jre/lib/graalvm/graalpython-launcher.jar", "LICENSE_GRAALPYTHON", "jre/languages/python/LICENSE_GRAALPYTHON"]
}
}
config["ruby"] = {
"descriptionLong": "ruby runtime for GraalVM",
"homepage": "https://www.graalvm.org/docs/reference-manual/languages/ruby/",
"rip": {
"bin": ["truffleruby", "ruby", "bundle", "bundler", "gem", "irb", "rake", "rdoc", "ri"],
"other": ["jre/languages/ruby", "jre/lib/boot/truffleruby-services.jar", "jre/lib/graalvm/truffleruby-launcher.jar", "LICENSE_TRUFFLERUBY.md", "3rd_party_licenses_truffleruby.txt"]
}
}
config["r"] = {
"descriptionLong": "R runtime for GraalVM",
"homepage": "https://www.graalvm.org/docs/reference-manual/languages/R/",
"rip": {
"bin": ["R", "Rscript"],
"other": ["jre/languages/R", "LICENSE_FASTR", "3rd_party_licenses_fastr.txt"]
}
}
config["gu"] = {
"descriptionLong": "Package manager for GraalVM",
"homepage": "https://www.graalvm.org/docs/reference-manual/graal-updater/",
"rip": {
"bin": ["gu"],
"other": ["jre/lib/installer", "bin/gu"]
}
}
config["polyglot"] = {
"descriptionLong": "Polyglot for GraalVM",
"homepage": "https://www.graalvm.org/docs/reference-manual/polyglot/",
"rip": {
"bin": ["polyglot"],
"other": ["jre/lib/polyglot"]
}
}
config["samples"] = {
"descriptionLong": "Example code for GraalVM",
"homepage": "https://www.graalvm.org/",
"rip": {
"other": ["sample"]
}
}
config["visualvm"] = {
"descriptionLong": "VisualVM for GraalVM",
"homepage": "https://www.graalvm.org/docs/reference-manual/tools/#heap-viewer",
"rip": {
"bin": ["jvisualvm"],
"other": ["lib/visualvm"]
}
}
currentProcFileDescriptors = Path("/proc") / str(os.getpid()) / "fd"
fj = sh.firejail.bake(noblacklist=str(currentProcFileDescriptors), _fg=True)
aria2c = fj.aria2c.bake(_fg=True, **{"continue": "true", "check-certificate": "true", "enable-mmap": "true", "optimize-concurrent-downloads": "true", "j": 16, "x": 16, "file-allocation": "falloc"})
vmTagRx = re.compile("^vm-((?:\\d+\\.){2}\\d+(?:-rc\\d+))?$")
vmTitleMarker = "GraalVM Community Edition .+$"
platformMarker = "linux-amd64"
versionFileNameMarker = "[\\w\\.-]+"
releaseFileNameMarker = versionFileNameMarker + "-" + platformMarker
if __name__ == "__main__":
doBuild()
| 32.773585 | 691 | 0.687737 |
61d2ae9ec01343c7273afc66fcb5912f5895801a | 6,267 | py | Python | mergify_engine/utils.py | Madhu-1/mergify-engine | 9ca4f4697cc825230b1584f5587f10393cabc971 | [
"Apache-2.0"
] | null | null | null | mergify_engine/utils.py | Madhu-1/mergify-engine | 9ca4f4697cc825230b1584f5587f10393cabc971 | [
"Apache-2.0"
] | null | null | null | mergify_engine/utils.py | Madhu-1/mergify-engine | 9ca4f4697cc825230b1584f5587f10393cabc971 | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
#
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import datetime
import hashlib
import hmac
import logging
import shutil
import subprocess
import sys
import tempfile
from billiard import current_process
import celery.app.log
import daiquiri
import github
import redis
from mergify_engine import config
LOG = daiquiri.getLogger(__name__)
global REDIS_CONNECTION_CACHE
REDIS_CONNECTION_CACHE = None
global REDIS_CONNECTION_HTTP_CACHE
REDIS_CONNECTION_HTTP_CACHE = None
def unicode_truncate(s, length, encoding="utf-8"):
"""Truncate a string to length in bytes.
:param s: The string to truncate.
:param length: The length in number of bytes not characters."""
return s.encode(encoding)[:length].decode(encoding, errors="ignore")
CELERY_EXTRAS_FORMAT = (
"%(asctime)s [%(process)d] %(color)s%(levelname)-8.8s "
"[%(task_id)s] "
"%(name)s%(extras)s: %(message)s%(color_stop)s"
)
github.PullRequest.PullRequest.log = property(GithubPullRequestLog)
def Github(*args, **kwargs):
kwargs["base_url"] = "https://api.%s" % config.GITHUB_DOMAIN
return github.Github(*args, **kwargs)
| 27.977679 | 88 | 0.6322 |
61d440e6d71c032e6b0102e0319c9ad174f35ff4 | 1,750 | py | Python | milefrienddb/models/vehicles.py | jcrjaci/mil_test | ed54f55c5aacd8ffd110b7c173422dbd0cac631f | [
"MIT"
] | null | null | null | milefrienddb/models/vehicles.py | jcrjaci/mil_test | ed54f55c5aacd8ffd110b7c173422dbd0cac631f | [
"MIT"
] | null | null | null | milefrienddb/models/vehicles.py | jcrjaci/mil_test | ed54f55c5aacd8ffd110b7c173422dbd0cac631f | [
"MIT"
] | null | null | null | """Vehicle's app models."""
import uuid
from django.db import models
from .clients import Client
| 38.043478 | 87 | 0.737714 |
61d6182a3cde9be8c7c4791931417d4e0d9e7b55 | 187 | py | Python | ejercicio_4.py | Laurardila440/taller-de-secuencias | 9db216d2431661e0777273fc5b8360a316d7dbd2 | [
"Apache-2.0"
] | null | null | null | ejercicio_4.py | Laurardila440/taller-de-secuencias | 9db216d2431661e0777273fc5b8360a316d7dbd2 | [
"Apache-2.0"
] | null | null | null | ejercicio_4.py | Laurardila440/taller-de-secuencias | 9db216d2431661e0777273fc5b8360a316d7dbd2 | [
"Apache-2.0"
] | null | null | null | """
Entradas
compra-->int-->c
salidas
Descuento-->flot-->d
"""
c=float(input("digite compra"))
#caja negra
d=(c*0.15)
total=(c-d)
#Salidas
print("el total a pagar es de :"+str(total))
| 14.384615 | 44 | 0.641711 |
61d6aa3833e84422d5bd54157900ea1d35ffca0b | 878 | py | Python | 429.py | geethakamath18/Leetcode | 8e55e0a47ee35ed100b30dda6682c7ce1033d4b2 | [
"MIT"
] | null | null | null | 429.py | geethakamath18/Leetcode | 8e55e0a47ee35ed100b30dda6682c7ce1033d4b2 | [
"MIT"
] | null | null | null | 429.py | geethakamath18/Leetcode | 8e55e0a47ee35ed100b30dda6682c7ce1033d4b2 | [
"MIT"
] | null | null | null | #LeetCode problem 429: N-ary Tree Level Order Traversal
"""
# Definition for a Node.
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
"""
| 26.606061 | 61 | 0.544419 |
61d7cc4850de782acf97ce8fd6bae60d5d5eb06f | 544 | py | Python | PyhonServer/app-client.py | sixfourtwo/auhack19 | 65b94c6cbdbfbd50e355c12b8ca2792b3b086321 | [
"Apache-2.0"
] | null | null | null | PyhonServer/app-client.py | sixfourtwo/auhack19 | 65b94c6cbdbfbd50e355c12b8ca2792b3b086321 | [
"Apache-2.0"
] | null | null | null | PyhonServer/app-client.py | sixfourtwo/auhack19 | 65b94c6cbdbfbd50e355c12b8ca2792b3b086321 | [
"Apache-2.0"
] | null | null | null | # importing the requests library
import requests
import json
# api-endpoint
URL = "http://127.0.0.1:80/water_mark"
# defining a params dict for the parameters to be sent to the API
# data is picture data
# tagString is the text to embed into picture.
data = {
"data":"This is the original text",
"tagString":" Yesyesyes"
}
PARAMS = json.dumps(data)
rPost = requests.post(url = URL, data = PARAMS) # kr det med JSON
data1 = json.loads(rPost.text)
#print("waterMarked data: " + rPost.text )
print("DATA: \n" + data1["data"])
| 22.666667 | 66 | 0.6875 |
61d90f523acdcf1af2ba8df7242ffe2e2fdeac93 | 9,827 | py | Python | memnet.py | 404akhan/memnet | a8cf9e0a480575d9d36de6fa3357f667d64e0b05 | [
"BSD-3-Clause"
] | 1 | 2018-02-01T05:17:13.000Z | 2018-02-01T05:17:13.000Z | memnet.py | 404akhan/memnet | a8cf9e0a480575d9d36de6fa3357f667d64e0b05 | [
"BSD-3-Clause"
] | null | null | null | memnet.py | 404akhan/memnet | a8cf9e0a480575d9d36de6fa3357f667d64e0b05 | [
"BSD-3-Clause"
] | null | null | null | import torch
import torch.nn.functional as F
import torch.nn.init as init
from torch import nn, autograd
from torch.utils.data import DataLoader
from babi import BabiDataset, pad_collate
from torch.nn.utils import clip_grad_norm
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.fastest = True
HIDDEN_DIM = 100
BATCH_SIZE = 100
NUM_EPOCHS = 250
LOG_FILE = "memnet.txt"
if __name__ == '__main__':
dataset = BabiDataset()
vocab_size = len(dataset.QA.VOCAB)
criterion = nn.CrossEntropyLoss(size_average=False)
model = RecurrentEntityNetwork(HIDDEN_DIM, 130, vocab_size)
model.cuda()
early_stopping_counter = 0
best_accuracy = 0
optimizer = torch.optim.Adam(model.parameters(), lr=0.005)
for epoch in range(NUM_EPOCHS):
dataset.set_mode('train')
train_loader = DataLoader(
dataset, batch_size=BATCH_SIZE, shuffle=True, collate_fn=pad_collate
)
model.train()
if early_stopping_counter < 20:
total_accuracy = 0
num_batches = 0
for batch_idx, data in enumerate(train_loader):
optimizer.zero_grad()
contexts, questions, answers = data
contexts = autograd.Variable(contexts.long().cuda())
questions = autograd.Variable(questions.long().cuda())
answers = autograd.Variable(answers.cuda())
outputs = model(contexts, questions)
l2_loss = 0
for name, param in model.named_parameters():
l2_loss += 0.001 * torch.sum(param * param)
loss = criterion(outputs, answers) + l2_loss
predictions = F.softmax(outputs).data.max(1)[1]
correct = predictions.eq(answers.data).cpu().sum()
acc = correct * 100. / len(contexts)
loss.backward()
clip_grad_norm(model.parameters(), 40)
total_accuracy += acc
num_batches += 1
if batch_idx % 20 == 0:
print('[Epoch %d] [Training] loss : %f, acc : %f, batch_idx : %d' % (
epoch, loss.data[0], total_accuracy / num_batches, batch_idx
))
optimizer.step()
dataset.set_mode('valid')
valid_loader = DataLoader(
dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=pad_collate
)
model.eval()
total_accuracy = 0
num_batches = 0
for batch_idx, data in enumerate(valid_loader):
contexts, questions, answers = data
contexts = autograd.Variable(contexts.long().cuda())
questions = autograd.Variable(questions.long().cuda())
answers = autograd.Variable(answers.cuda())
outputs = model(contexts, questions)
l2_loss = 0
for name, param in model.named_parameters():
l2_loss += 0.001 * torch.sum(param * param)
loss = criterion(outputs, answers) + l2_loss
predictions = F.softmax(outputs).data.max(1)[1]
correct = predictions.eq(answers.data).cpu().sum()
acc = correct * 100. / len(contexts)
total_accuracy += acc
num_batches += 1
total_accuracy = total_accuracy / num_batches
if total_accuracy > best_accuracy:
best_accuracy = total_accuracy
best_state = model.state_dict()
early_stopping_counter = 0
else:
early_stopping_counter += 1
print('[Epoch %d] [Validate] Accuracy : %f' % (epoch, total_accuracy))
with open(LOG_FILE, 'a') as fp:
fp.write('[Epoch %d] [Validate] Accuracy : %f' % (epoch, total_accuracy) + '\n')
if total_accuracy == 1.0:
break
else:
print('Early Stopping at Epoch %d, Valid Accuracy : %f' % (epoch, best_accuracy))
break
dataset.set_mode('test')
test_loader = DataLoader(
dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=pad_collate
)
test_acc = 0
num_batches = 0
for batch_idx, data in enumerate(test_loader):
contexts, questions, answers = data
contexts = autograd.Variable(contexts.long().cuda())
questions = autograd.Variable(questions.long().cuda())
answers = autograd.Variable(answers.cuda())
model.state_dict().update(best_state)
outputs = model(contexts, questions)
l2_loss = 0
for name, param in model.named_parameters():
l2_loss += 0.001 * torch.sum(param * param)
loss = criterion(outputs, answers) + l2_loss
predictions = F.softmax(outputs).data.max(1)[1]
correct = predictions.eq(answers.data).cpu().sum()
acc = correct * 100. / len(contexts)
test_acc += acc
num_batches += 1
print('[Epoch %d] [Test] Accuracy : %f' % (epoch, test_acc / num_batches))
with open(LOG_FILE, 'a') as fp:
fp.write('[Epoch %d] [Test] Accuracy : %f' % (epoch, test_acc / num_batches) + '\n') | 37.083019 | 110 | 0.623792 |
61d93349709f00bb603d8566d8afdb83080026fb | 3,444 | py | Python | tests/test_tba.py | StanfordAHA/Lake | 34df001db107e1a0824b7fdb05b9f2145bf49a3e | [
"BSD-3-Clause"
] | 11 | 2019-10-14T02:05:38.000Z | 2022-03-10T14:10:22.000Z | tests/test_tba.py | StanfordAHA/Lake | 34df001db107e1a0824b7fdb05b9f2145bf49a3e | [
"BSD-3-Clause"
] | 29 | 2019-09-02T05:49:40.000Z | 2022-02-26T00:57:54.000Z | tests/test_tba.py | StanfordAHA/Lake | 34df001db107e1a0824b7fdb05b9f2145bf49a3e | [
"BSD-3-Clause"
] | 1 | 2021-04-16T20:26:13.000Z | 2021-04-16T20:26:13.000Z | from lake.models.tba_model import TBAModel
from lake.modules.transpose_buffer_aggregation import TransposeBufferAggregation
from lake.passes.passes import lift_config_reg
import magma as m
from magma import *
import fault
import tempfile
import kratos as k
import random as rand
import pytest
if __name__ == "__main__":
test_tba()
| 29.947826 | 94 | 0.594948 |
61da398102287561106f2583dbf3dd6a0d400ea3 | 1,442 | py | Python | 2018/02/py/run.py | Bigsby/aoc | 409fefbb0467628fa298288064acb622bb53ee58 | [
"CC0-1.0"
] | 1 | 2021-06-11T17:24:05.000Z | 2021-06-11T17:24:05.000Z | 2018/02/py/run.py | Bigsby/aoc | 409fefbb0467628fa298288064acb622bb53ee58 | [
"CC0-1.0"
] | null | null | null | 2018/02/py/run.py | Bigsby/aoc | 409fefbb0467628fa298288064acb622bb53ee58 | [
"CC0-1.0"
] | null | null | null | #! /usr/bin/python3
import sys, os, time
from typing import List, Tuple
from itertools import combinations
if __name__ == "__main__":
main() | 25.75 | 72 | 0.615811 |
61da655b21d56bf52e1b1c392472699b90dc9b53 | 275 | py | Python | text_directions.py | ErnestoPena/Game-Of-Life.-Python | 046bca4ae1c69e08efed4f45057b097653410ec8 | [
"MIT"
] | null | null | null | text_directions.py | ErnestoPena/Game-Of-Life.-Python | 046bca4ae1c69e08efed4f45057b097653410ec8 | [
"MIT"
] | null | null | null | text_directions.py | ErnestoPena/Game-Of-Life.-Python | 046bca4ae1c69e08efed4f45057b097653410ec8 | [
"MIT"
] | null | null | null | import pygame | 45.833333 | 122 | 0.632727 |
61db9a4dde565ed6cc9ccd45b6a858a56b15b618 | 515 | py | Python | brummi/config.py | fredreichbier/brummi | d833e6dc0b74c8bddea225c785b3cba463b13ecd | [
"MIT"
] | 1 | 2015-11-05T04:35:07.000Z | 2015-11-05T04:35:07.000Z | brummi/config.py | fredreichbier/brummi | d833e6dc0b74c8bddea225c785b3cba463b13ecd | [
"MIT"
] | null | null | null | brummi/config.py | fredreichbier/brummi | d833e6dc0b74c8bddea225c785b3cba463b13ecd | [
"MIT"
] | null | null | null | import pkg_resources
from . import BrummiRepository
DEFAULTS = {
'templates': pkg_resources.resource_filename('brummi', 'templates'),
'out_path': 'docs',
}
| 23.409091 | 72 | 0.615534 |
61de22931c74120ebd50c4d032782c041e459df7 | 730 | py | Python | kittiground/__init__.py | JeremyBYU/polylidar-kitti | bfad0dc4a74e136d2841dccf3ccc05d982f18a8e | [
"MIT"
] | null | null | null | kittiground/__init__.py | JeremyBYU/polylidar-kitti | bfad0dc4a74e136d2841dccf3ccc05d982f18a8e | [
"MIT"
] | null | null | null | kittiground/__init__.py | JeremyBYU/polylidar-kitti | bfad0dc4a74e136d2841dccf3ccc05d982f18a8e | [
"MIT"
] | null | null | null | from pathlib import Path
import numpy as np
THIS_FILE = Path(__file__)
THIS_DIR = THIS_FILE.parent
DEFAULT_CONFIG_FILE = THIS_DIR / 'config' / 'default.yaml'
# Width/height of the visual screens
IMG_WIDTH = 1242
IMG_HEIGHT = 375
# INTRINISCS = np.array([[649.51905284, 0.00000000, 620.50000000],
# [0.00000000, 649.51905284, 374.50000000],
# [0.00000000, 0.00000000, 1.00000000]])
EXTRINSICS = np.array([[0.99960128, 0.00806920, -0.02705864, -0.07041882],
[-0.01559983, -0.64094650, -0.76742702, 7.50137898],
[-0.02353566, 0.76754314, -0.64056507, 8.23519670],
[0.00000000, 0.00000000, 0.00000000, 1.00000000]])
| 38.421053 | 75 | 0.610959 |
61df694948c2ba5c7d34c79e97268eab5f090a30 | 3,272 | py | Python | palette/core/color_transfer.py | SuziKim/PaletteSelection | cfc0052996b5c8dc1da2d6e30798dd1fed138ebe | [
"MIT"
] | 23 | 2015-08-25T12:31:44.000Z | 2021-12-15T03:18:12.000Z | palette/core/color_transfer.py | SuziKim/PaletteSelection | cfc0052996b5c8dc1da2d6e30798dd1fed138ebe | [
"MIT"
] | null | null | null | palette/core/color_transfer.py | SuziKim/PaletteSelection | cfc0052996b5c8dc1da2d6e30798dd1fed138ebe | [
"MIT"
] | 7 | 2017-07-27T10:57:36.000Z | 2022-02-22T06:51:44.000Z | # -*- coding: utf-8 -*-
## @package palette.core.color_transfer
#
# Color transfer.
# @author tody
# @date 2015/09/16
import numpy as np
from scipy.interpolate import Rbf
import matplotlib.pyplot as plt
from palette.core.lab_slices import LabSlice, LabSlicePlot, Lab2rgb_py
## Color transfer for ab coordinates.
## Simple plotter for ABTransfer.
| 37.181818 | 93 | 0.652812 |
61dfafddb5a99f013e5962a29c6779ac49a5f150 | 1,447 | py | Python | CursoEmVideoPython/desafio95.py | miguelabreuss/scripts_python | cf33934731a9d1b731672d4309aaea0a24ae151a | [
"MIT"
] | null | null | null | CursoEmVideoPython/desafio95.py | miguelabreuss/scripts_python | cf33934731a9d1b731672d4309aaea0a24ae151a | [
"MIT"
] | 1 | 2020-07-04T16:27:25.000Z | 2020-07-04T16:27:25.000Z | CursoEmVideoPython/desafio95.py | miguelabreuss/scripts_python | cf33934731a9d1b731672d4309aaea0a24ae151a | [
"MIT"
] | null | null | null | scoult = dict()
gols = list()
time = list()
temp = 0
while True:
scoult['Jogador'] = str(input('Qual o nome do jogador: '))
scoult['Nmero partidas'] = int(input('Quantas partidas foram jogadas? '))
for i in range(0,scoult['Nmero partidas']):
gols.append(int(input(f'Quantos gols foram marcados na partida {i+1} de {scoult["Jogador"]}? ')))
scoult['Gols'] = gols[:]
for i in range(0,scoult['Nmero partidas']):
if i == 0:
scoult['Total de gols'] = gols[i]
else:
scoult['Total de gols'] += gols[i]
time.append(scoult.copy())
gols.clear()
if str(input('Deseja continuar [S/N]? ')) in 'Nn':
break
print('-' * 50)
print('-' * 50)
print('{:^50}'.format('TABELO PERFORMANCE'))
print('-' * 50)
print('{:<5}{:<15}{:<25}{:<5}'.format('cod', 'Jogador', 'Gols', 'Total'))
for e in time:
print('{:<5}{:<15}{:<25}{:<5}'.format(temp, e['Jogador'], str(e['Gols']), e['Total de gols']))
temp += 1
print('-' * 50)
while True:
temp = int(input('De aual jogador voc deseja mais detalhes? [cod] 999 p/ sair. '))
if temp == 999:
break
else:
print(f'-- Performance do jogador: {time[temp]["Jogador"]}')
for i in range(0,time[temp]["Nmero partidas"]):
print(f' => Na partida {i+1} {time[temp]["Jogador"]} marcou {time[temp]["Gols"][i]} vez(es).')
print(f'Foi um total de {time[temp]["Total de gols"]} gols')
| 38.078947 | 109 | 0.561852 |
61dfc58457362e0a41be0f73d8c2ed155141035c | 428 | py | Python | Section07_Bridge/script-Bridge.py | enriqueescobar-askida/Kinito.Python | e4c5521e771c4de0ceaf81776a4a61f7de01edb4 | [
"MIT"
] | 1 | 2020-10-20T07:41:51.000Z | 2020-10-20T07:41:51.000Z | Section07_Bridge/script-Bridge.py | enriqueescobar-askida/Kinito.Python | e4c5521e771c4de0ceaf81776a4a61f7de01edb4 | [
"MIT"
] | null | null | null | Section07_Bridge/script-Bridge.py | enriqueescobar-askida/Kinito.Python | e4c5521e771c4de0ceaf81776a4a61f7de01edb4 | [
"MIT"
] | null | null | null | # Circles and squares
# Each can be rendered in vector or raster form
from Section07_Bridge.Brigde.Circle import Circle
from Section07_Bridge.Brigde.RasterRenderer import RasterRenderer
from Section07_Bridge.Brigde.VectorRenderer import VectorRenderer
if __name__ == '__main__':
raster = RasterRenderer()
vector = VectorRenderer()
circle = Circle(vector, 5)
circle.draw()
circle.resize(2)
circle.draw()
| 30.571429 | 65 | 0.766355 |
61e1ff665914cfb40790ee569edb6f9cb201dad5 | 3,668 | py | Python | Algorithms/On-Policy/A2C/DISCOVER_A2C.py | baturaysaglam/DISCOVER | 423158c84a5935ca5755ccad06ea5fe20fb57d76 | [
"MIT"
] | null | null | null | Algorithms/On-Policy/A2C/DISCOVER_A2C.py | baturaysaglam/DISCOVER | 423158c84a5935ca5755ccad06ea5fe20fb57d76 | [
"MIT"
] | null | null | null | Algorithms/On-Policy/A2C/DISCOVER_A2C.py | baturaysaglam/DISCOVER | 423158c84a5935ca5755ccad06ea5fe20fb57d76 | [
"MIT"
] | null | null | null | import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from utils import init
| 37.050505 | 104 | 0.638768 |
61e3abea3e991562a75549fe727c93817d1999de | 3,400 | py | Python | user/beaninfo_Global.py | dvdrm/gd | c004724344577bb608fa0611d10c16b211995f72 | [
"Apache-2.0"
] | null | null | null | user/beaninfo_Global.py | dvdrm/gd | c004724344577bb608fa0611d10c16b211995f72 | [
"Apache-2.0"
] | null | null | null | user/beaninfo_Global.py | dvdrm/gd | c004724344577bb608fa0611d10c16b211995f72 | [
"Apache-2.0"
] | null | null | null | from telethon import events, Button
from .login import user
from .. import jdbot
from ..bot.utils import cmd, TASK_CMD,split_list, press_event
from ..diy.utils import read, write
import asyncio
import re
| 33.009709 | 134 | 0.516471 |
61e4ce6929b3cf5b02ae69957ea9065425f62a24 | 2,409 | py | Python | samples/boxcheck.py | cg2v/box_requests | 024141a263c8918962957b75d208c532c7b853f0 | [
"Apache-2.0"
] | null | null | null | samples/boxcheck.py | cg2v/box_requests | 024141a263c8918962957b75d208c532c7b853f0 | [
"Apache-2.0"
] | null | null | null | samples/boxcheck.py | cg2v/box_requests | 024141a263c8918962957b75d208c532c7b853f0 | [
"Apache-2.0"
] | 1 | 2018-10-23T15:48:00.000Z | 2018-10-23T15:48:00.000Z | #!/usr/bin/python
import box_requests
import requests
import os
import sys
import time
import socket
import optparse
import logging
o=optparse.OptionParser()
o.add_option('-v', '--verbose', action="store_true", dest="verbose",
default=False, help="Display username on success")
o.add_option('-d', '--debug', action="store_true", dest="debug",
default=False, help="Enable debug logging")
rl=logging.getLogger()
sh=logging.StreamHandler(sys.stderr)
fm=logging.Formatter("%(asctime)s %(name)s [%(levelname)s] %(filename)s:%(lineno)d:%(funcName)s %(message)s")
sh.setFormatter(fm)
rl.addHandler(sh)
(options, args) = o.parse_args()
if options.debug:
sh.setLevel(logging.DEBUG)
rl.setLevel(logging.DEBUG)
try:
with box_requests.boxsession("/var/local/box/boxtoken.dat") as bs:
ok=False
try:
resp=bs.request("GET", "/2.0/users/me")
except requests.ConnectionError:
if not checktime("/var/local/box/boxtoken.dat", 7):
logfailure("Some sort of network problem occured, and has prevented the refresh\nprocess for several days.")
except requests.Timeout:
if not checktime("/var/local/box/boxtoken.dat", 7):
logfailure("Some sort of network problem occured, and has prevented the refresh\nprocess for several days.")
except ValueError:
logfailure("This failure seems to be due to a programming error")
except box_requests.BoxAPIError:
logfailure("Box rejected the credentials, they may already be invalid")
except:
logfailure(None)
else:
if options.verbose:
print "Current user is {0}".format(resp["login"])
except OSError:
logfailure("The credentials are missing or could not be loaded")
except box_requests.BoxTokenError:
logfailure("The credentials are missing or could not be loaded")
finally:
rl.removeHandler(sh)
| 33.929577 | 120 | 0.688252 |
61e6e408c9d358e1ba90de75b214eb2a33ce5303 | 2,663 | py | Python | sourcecode/src/vx/lha/Main.py | ivarvb/LHA | b3b7613180d533468edf762195922b73c70c525c | [
"MIT"
] | null | null | null | sourcecode/src/vx/lha/Main.py | ivarvb/LHA | b3b7613180d533468edf762195922b73c70c525c | [
"MIT"
] | null | null | null | sourcecode/src/vx/lha/Main.py | ivarvb/LHA | b3b7613180d533468edf762195922b73c70c525c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Author: Ivar
"""
from Description import *
from Classification import *
if __name__ == "__main__":
inputdir = "../../../../data/LHA/dataset_1"
outputdir = inputdir+"/csv/exp/"+Util.now()
template = [
{
"name":"RAD",
"imagedir":"images_cleaned",
"maskdir":"seg/seg_window",
"masksubsetdir":"100",
"parameters":{"tile_size":100},
"erode":[30]
},
{
"name":"RAD",
"imagedir":"images_cleaned",
"maskdir":"seg/seg_window",
"masksubsetdir":"200",
"parameters":{"tile_size":200},
"erode":[30]
},
{
"name":"RAD",
"imagedir":"images_cleaned",
"maskdir":"seg/seg_window",
"masksubsetdir":"300",
"parameters":{"tile_size":300},
"erode":[30]
},
{
"name":"LBP",
"imagedir":"images_cleaned",
"maskdir":"masks",
"masksubsetdir":"100",
"parameters":{"tile_size":100, "radius":5},
"erode":[30]
},
{
"name":"LBP",
"imagedir":"images_cleaned",
"maskdir":"masks",
"masksubsetdir":"200",
"parameters":{"tile_size":200, "radius":5},
"erode":[30]
},
{
"name":"LBP",
"imagedir":"images_cleaned",
"maskdir":"masks",
"masksubsetdir":"300",
"parameters":{"tile_size":300, "radius":5},
"erode":[30]
},
{
"name":"LBP",
"imagedir":"images_cleaned",
"maskdir":"masks",
"masksubsetdir":"100",
"parameters":{"tile_size":100, "radius":10},
"erode":[30]
},
{
"name":"LBP",
"imagedir":"images_cleaned",
"maskdir":"masks",
"masksubsetdir":"200",
"parameters":{"tile_size":200, "radius":10},
"erode":[30]
},
{
"name":"LBP",
"imagedir":"images_cleaned",
"maskdir":"masks",
"masksubsetdir":"300",
"parameters":{"tile_size":300, "radius":10},
"erode":[30]
},
]
inputdir = "../../../../data/LHA/dataset_2"
outputdir = inputdir+"/csv/exp/"+Util.now()
Description.start(inputdir, outputdir, template)
Classification.start(outputdir, outputdir)
print("Complete in {}".format(outputdir))
| 23.156522 | 56 | 0.437852 |
61e6fadc19dca2b7aaa1c0e67b41806d94ed6219 | 12,263 | py | Python | pyemits/core/ml/regression/trainer.py | thompson0012/PyEmits | 9cb6fbf27ca7e8952ed5aca26118055e04492c23 | [
"Apache-2.0"
] | 6 | 2021-10-21T14:13:25.000Z | 2021-12-26T12:22:51.000Z | pyemits/core/ml/regression/trainer.py | thompson0012/PyEmits | 9cb6fbf27ca7e8952ed5aca26118055e04492c23 | [
"Apache-2.0"
] | null | null | null | pyemits/core/ml/regression/trainer.py | thompson0012/PyEmits | 9cb6fbf27ca7e8952ed5aca26118055e04492c23 | [
"Apache-2.0"
] | null | null | null | from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import ElasticNet, Ridge, Lasso, BayesianRidge, HuberRegressor
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from pyemits.core.ml.base import BaseTrainer, BaseWrapper, NeuralNetworkWrapperBase
from pyemits.common.config_model import BaseConfig, KerasSequentialConfig, TorchLightningSequentialConfig
from pyemits.common.data_model import RegressionDataModel
from pyemits.common.py_native_dtype import SliceableDeque
from pyemits.common.validation import raise_if_value_not_contains
from typing import List, Dict, Optional, Union, Any
from pyemits.core.ml.regression.nn import TorchLightningWrapper
RegModelContainer = {
'RF': RandomForestRegressor,
'GBDT': GradientBoostingRegressor,
# 'HGBDT': HistGradientBoostingRegressor,
'AdaBoost': AdaBoostRegressor,
'MLP': MLPRegressor,
'ElasticNet': ElasticNet,
'Ridge': Ridge,
'Lasso': Lasso,
'BayesianRidge': BayesianRidge,
'Huber': HuberRegressor,
'XGBoost': XGBRegressor,
'LightGBM': LGBMRegressor
}
| 40.471947 | 125 | 0.644133 |
61e7231e5da397e138846e32322894665e310b28 | 7,092 | py | Python | network_core/network_graph.py | markusgl/SocialCompanion | e816af21c600b33dbcac25d088d4d75957d0349a | [
"MIT"
] | 2 | 2018-12-21T12:55:21.000Z | 2019-05-29T06:35:58.000Z | network_core/network_graph.py | markusgl/SocialCompanion | e816af21c600b33dbcac25d088d4d75957d0349a | [
"MIT"
] | 8 | 2019-12-16T21:08:36.000Z | 2021-03-31T18:58:35.000Z | network_core/network_graph.py | markusgl/SocialCompanion | e816af21c600b33dbcac25d088d4d75957d0349a | [
"MIT"
] | null | null | null | """
knowledge graph representation using neo4j
this class uses py2neo with will be the final version
"""
import os
import json
from py2neo import Graph, Relationship, NodeMatcher, Node
from network_core.ogm.node_objects import Me, Contact, Misc
USERTYPE = "User"
CONTACTTYPE = "Contact"
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
relationships = {'freund': 'FRIEND',
'schwester': 'SISTER',
'bruder': 'BROTHER',
'mutter': 'MOTHER',
'vater': 'FATHER',
'tochter': 'DAUGHTER',
'sohn': 'SON',
'enkel': 'GRANDCHILD',
'enkelin': 'GRANDCHILD'}
| 30.437768 | 129 | 0.563593 |
61e83a0c30e6a67dbfeb574d16e0f027af82160b | 538 | py | Python | randutils/lists.py | JoelLefkowitz/randutils | 91bfb6a56676675edb241f11b602a46880520c72 | [
"MIT"
] | 1 | 2021-08-03T17:34:31.000Z | 2021-08-03T17:34:31.000Z | randutils/lists.py | JoelLefkowitz/randutils | 91bfb6a56676675edb241f11b602a46880520c72 | [
"MIT"
] | null | null | null | randutils/lists.py | JoelLefkowitz/randutils | 91bfb6a56676675edb241f11b602a46880520c72 | [
"MIT"
] | null | null | null | import numpy as np
from .chance import by_chance
from .exceptions import EmptyListError
| 18.551724 | 53 | 0.689591 |
61ea28b84ee81d7761635919c06d71cde4b781c4 | 2,355 | py | Python | src/train_and_evaluate.py | rajeevteejwal/mlops_wine_quality | 970ce27712932ca535309230da69fc5c29d82c38 | [
"MIT"
] | null | null | null | src/train_and_evaluate.py | rajeevteejwal/mlops_wine_quality | 970ce27712932ca535309230da69fc5c29d82c38 | [
"MIT"
] | null | null | null | src/train_and_evaluate.py | rajeevteejwal/mlops_wine_quality | 970ce27712932ca535309230da69fc5c29d82c38 | [
"MIT"
] | null | null | null | import os
import pandas as pd
from sklearn.linear_model import ElasticNet
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
import argparse
import numpy as np
import json
import joblib
from get_data import read_config
if __name__ == '__main__':
args = argparse.ArgumentParser()
args.add_argument("--config",default="params.yaml")
parsed_args = args.parse_args()
train_and_evaluate(config_path=parsed_args.config)
| 31.824324 | 77 | 0.675159 |
61ebdb6920b4b4c3e3a8b0b2f9c1a74ed61083fb | 961 | py | Python | examples/plot_magnitudes.py | zsiciarz/pygcvs | ed5522ab9cf9237592a6af7a0bc8cad079afeb67 | [
"MIT"
] | null | null | null | examples/plot_magnitudes.py | zsiciarz/pygcvs | ed5522ab9cf9237592a6af7a0bc8cad079afeb67 | [
"MIT"
] | null | null | null | examples/plot_magnitudes.py | zsiciarz/pygcvs | ed5522ab9cf9237592a6af7a0bc8cad079afeb67 | [
"MIT"
] | null | null | null | """
Visualisation of maximum/minimum magnitude for GCVS stars.
"""
import sys
import matplotlib.pyplot as plot
from pygcvs import read_gcvs
if __name__ == '__main__':
try:
gcvs_file = sys.argv[1]
except IndexError:
print('Usage: python plot_magnitudes.py <path to iii.dat>')
else:
min_magnitudes = []
max_magnitudes = []
for star in read_gcvs(gcvs_file):
if star['min_magnitude'] and star['max_magnitude']:
min_magnitudes.append(star['min_magnitude'])
max_magnitudes.append(star['max_magnitude'])
plot.title('GCVS variable star magnitudes')
plot.plot(min_magnitudes, max_magnitudes, 'ro')
plot.xlabel('Min magnitude')
plot.ylabel('Max magnitude')
# invert axes because brightest stars have lowest magnitude value
plot.gca().invert_xaxis()
plot.gca().invert_yaxis()
plot.savefig('magnitudes.png')
| 30.03125 | 73 | 0.64204 |
61ebe9703928c7c7be701af932bf4a612970dd3f | 382 | py | Python | InvenTree/stock/migrations/0028_auto_20200421_0724.py | ArakniD/InvenTree | 0ebf2ebd832b2d736e895abe054ca56bfd1cc477 | [
"MIT"
] | 656 | 2017-03-29T22:06:14.000Z | 2022-03-30T11:23:52.000Z | InvenTree/stock/migrations/0028_auto_20200421_0724.py | ArakniD/InvenTree | 0ebf2ebd832b2d736e895abe054ca56bfd1cc477 | [
"MIT"
] | 1,545 | 2017-04-10T23:26:04.000Z | 2022-03-31T18:32:10.000Z | InvenTree/stock/migrations/0028_auto_20200421_0724.py | fablabbcn/InvenTree | 1d7ea7716cc96c6ffd151c822b01cd1fb5dcfecd | [
"MIT"
] | 196 | 2017-03-28T03:06:21.000Z | 2022-03-28T11:53:29.000Z | # Generated by Django 3.0.5 on 2020-04-21 07:24
from django.db import migrations
| 20.105263 | 48 | 0.604712 |
61ec2ee4a4b5c284984cd0be3baf3b3ee50702c4 | 1,595 | py | Python | weideshop/urls.py | michaelgichia/weideshop | 01a408b358b9ad7d52747b42c36dc16206b4b915 | [
"BSD-2-Clause"
] | null | null | null | weideshop/urls.py | michaelgichia/weideshop | 01a408b358b9ad7d52747b42c36dc16206b4b915 | [
"BSD-2-Clause"
] | null | null | null | weideshop/urls.py | michaelgichia/weideshop | 01a408b358b9ad7d52747b42c36dc16206b4b915 | [
"BSD-2-Clause"
] | null | null | null | """weideshop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
from weideshop.products.views import CatalogueListView,CatalogueDetailView
from weideshop.public.views import IndexView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$',IndexView.as_view(), name='home'),
url(r'^catalog/$', CatalogueListView.as_view(), name='catalogue'),
# url(r'^catalog/(?P<product_slug>[-\w]+)/$', CatalogueDetailView.as_view(), name='detail'),
url(r'^category/', include('weideshop.products.urls', namespace='products-app', app_name='products')),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
] | 37.093023 | 106 | 0.717241 |
61ed3298ce258d1708cb601b97ca2bb3d32448c9 | 18,023 | py | Python | netor/tinydb/scripts/netorconf.py | aegiacometti/neto | 4169a93a4d789facfe9a41d214b1a6c15e8f2fb9 | [
"Apache-2.0"
] | 1 | 2020-01-02T04:31:11.000Z | 2020-01-02T04:31:11.000Z | netor/tinydb/scripts/netorconf.py | aegiacometti/neto | 4169a93a4d789facfe9a41d214b1a6c15e8f2fb9 | [
"Apache-2.0"
] | null | null | null | netor/tinydb/scripts/netorconf.py | aegiacometti/neto | 4169a93a4d789facfe9a41d214b1a6c15e8f2fb9 | [
"Apache-2.0"
] | 1 | 2021-02-23T04:34:48.000Z | 2021-02-23T04:34:48.000Z | #!/usr/bin/env python3
import os
import sys
import configparser
import fileinput
import netorlogging
import datetime
from shutil import copyfile
def _netor_config():
"""
It is used for updating the Neto home directory in the configuration files and scripts.
This is useful, if you want to have 2 working installations of Neto in completely independent directories.
It will update the ``NETOR_HOME_DIRECTORY`` variable in the ``netor.conf`` file,
and also in the following Neto python scripts which then works with the TinyDB:
# netor/tinydb/scripts/listdb.py
# netor/tinydb/scripts/pushcustdb.py
# netor/tinydb/scripts/worker.py
# netor/tinydb/scripts/switchdb.py
Later it will also update the ``hosts_file`` variable in the following bash scripts:
# bin/netor-ping
# bin/netor-traceroute
:return: nothing
"""
_NETOR_HOME_DIRECTORY = os.getenv('NETOR')
config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())
netor_config_path_name = _NETOR_HOME_DIRECTORY + "netor/netor.config"
config.read(netor_config_path_name)
if os.path.isdir(_NETOR_HOME_DIRECTORY):
answer = input("\nDefault \"$NETOR/netor\" directory found at:\n" + str(_NETOR_HOME_DIRECTORY) +
"\nDo you want to keep it (y/n): ").lower()
if answer == "y":
print("Keeping same configuration\n")
try:
config['Netor']['netor_home_directory'] = _NETOR_HOME_DIRECTORY
except KeyError:
print("\nConfiguration files do no exist, clone the previous directory before start the changes\n")
sys.exit(1)
with open(netor_config_path_name, 'w') as configfile:
config.write(configfile)
_update_ansible(_NETOR_HOME_DIRECTORY)
tinydb_log_file = config['TinyDB']['tinydb_log_file']
_update_config(tinydb_log_file, __file__, _NETOR_HOME_DIRECTORY)
sys.exit()
elif answer == "n":
print('If you want to change the $NETOR directory, you must first update the $NETOR environment variable')
print('Set $NETOR environment value by adding/changing the line at the end of the file /etc/environment')
print('NETOR=\"/my/dir/netor/\"')
print('Restart the system and execute this script again')
else:
print("Invalid option/n")
sys.exit()
else:
print("\nDefault \"$NETOR/netor\" NOT found")
print('Set $NETOR environment value by adding/changing the line at the end of the file /etc/environment')
print('NETOR=\"/my/dir/netor/\"')
print('Restart the system and execute this script again')
def _update_ansible(netor_home_directory):
"""
Update Ansible configuration files.
:param netor_home_directory: Neto home directory to used for updating the configuration files
:return: nothing
"""
ansible_config_file = os.environ['HOME'] + '/.ansible.cfg'
replace_static_vars_scripts(ansible_config_file, '#inventory ', '= ' + netor_home_directory +
'netor/ansible/hosts', '', '')
replace_static_vars_scripts(ansible_config_file, 'transport', ' = paramiko', '', '')
replace_static_vars_scripts(ansible_config_file, 'host_key_auto_add', ' = True', '', '')
replace_static_vars_scripts(ansible_config_file, 'host_key_checking', ' = False', '', '')
replace_static_vars_scripts(ansible_config_file, 'inventory = ', netor_home_directory +
'netor/ansible/hosts', '', '')
print('\nNetor home directory replaced in Ansible.')
def _backup_filename(new_netor_home_directory, filename):
"""
Create a backup of the specified configuration file
:param new_netor_home_directory: it is the actual new Neto home directory to be updated on files
:param filename: file name to backup
:return: nothing
"""
print('\nBacking up ' + filename + ' to ' + new_netor_home_directory + 'netor/salt/backup/')
source = new_netor_home_directory + 'netor/salt/config/' + filename
destination = new_netor_home_directory + 'netor/salt/backup/' + filename + "_" + \
datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
copyfile(source, destination)
def _create_master_config_file(new_netor_home_directory, filename):
"""
Create new Salt master configuration file.
:param new_netor_home_directory: it is the actual new Neto home directory to be updated on files
:param filename: filename to backup
:return: nothing
"""
full_path_filename = new_netor_home_directory + 'netor/salt/config/' + filename
file = open(full_path_filename, '+w')
file.write('# for salt-sproxy\n')
file.write('use_existing_proxy: true\n')
file.write('##### Large-scale tuning settings #####\n')
file.write('##########################################\n')
file.write('#max_open_files: 100000\n')
file.write('\n')
file.write('##### Security settings #####\n')
file.write('# Enable "open mode", this mode still maintains encryption, but turns off\n')
file.write('# authentication, this is only intended for highly secure environments or for\n')
file.write('# the situation where your keys end up in a bad state. If you run in open mode\n')
file.write('# you do so at your own risk!\n')
file.write('open_mode: True\n')
file.write('\n')
file.write('# Enable auto_accept, this setting will automatically accept all incoming\n')
file.write('# public keys from the minions. Note that this is insecure.\n')
file.write('auto_accept: True\n')
file.write('\n')
file.write('# The path to the master\'s configuration file.\n')
file.write('conf_file: ' + new_netor_home_directory + 'netor/salt/config/master\n')
file.write('\n')
file.write('# Directory used to store public key data:\n')
file.write('pki_dir: ' + new_netor_home_directory + 'netor/salt/config/pki/master\n')
file.write('\n')
file.write('##### File Server settings #####\n')
file.write('file_roots:\n')
file.write(' base:\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/states/\n')
file.write('\n')
file.write('##### Pillar settings #####\n')
file.write('pillar_roots:\n')
file.write(' base:\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/states/\n')
file.write('engines:\n')
file.write(' - slack:\n')
file.write(' token: YOUR-TOKEN-GOES-HERE\n')
file.write(' control: true\n')
file.write(' fire_all: False\n')
file.write('######## CREATE YOUR OWN POLICIES FOR COMMAND PERMISSIONS ########\n')
file.write(' groups:\n')
file.write(' default:\n')
file.write(' users:\n')
file.write(' - \'*\'\n')
file.write(' commands:\n')
file.write(' - \'*\'\n')
file.close()
def _update_master_config_file(new_netor_home_directory, filename):
"""
Update Salt master configuration file.
:param new_netor_home_directory: Location where the file is located
:param filename: file name
:return: nothing
"""
_backup_filename(new_netor_home_directory, filename)
# pending to develop update of the file with the new directory
_create_master_config_file(new_netor_home_directory, filename)
def _create_minion_config_file(new_netor_home_directory, filename):
"""
Create Salt minion configuration file.
:param new_netor_home_directory: Location where the file will be located
:param filename: file name
:return: nothing
"""
full_path_filename = new_netor_home_directory + 'netor/salt/config/' + filename
file = open(full_path_filename, '+w')
file.write('##### Primary configuration settings #####\n')
file.write('master: localhost\n')
file.write('\n')
file.write('# The path to the minion\'s configuration file.\n')
file.write('conf_file: ' + new_netor_home_directory + 'netor/salt/config/minion\n')
file.write('# The directory to store the pki information in\n')
file.write('pki_dir: ' + new_netor_home_directory + 'netor/salt/config/pki/minion\n')
file.write('\n')
file.write('##### File Directory Settings #####\n')
file.write('file_roots:\n')
file.write(' base:\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/\n')
file.write(' - ' + new_netor_home_directory + 'neto/salt/config/pillar/states/\n')
file.write('\n')
file.write('pillar_roots:\n')
file.write(' base:\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/ states /\n')
file.write('\n')
file.write('###### Security settings #####\n')
file.write('# Enable "open mode", this mode still maintains encryption, but turns off\n')
file.write('# authentication, this is only intended for highly secure environments or for\n')
file.write('# the situation where your keys end up in a bad state. If you run in open mode\n')
file.write('# you do so at your own risk!\n')
file.write('open_mode: True\n')
file.close()
def _update_minion_config_file(new_netor_home_directory, filename):
"""
Update Salt minion configuration file.
:param new_netor_home_directory: Location where the file is located
:param filename: file name
:return:
"""
_backup_filename(new_netor_home_directory, filename)
# pending to develop update of the file with the new directory
_create_minion_config_file(new_netor_home_directory, filename)
def _create_proxy_config_file(new_netor_home_directory, filename):
"""
Create Salt proxy configuration file.
:param new_netor_home_directory: Location where the file will be located
:param filename: file name
:return:
"""
full_path_filename = new_netor_home_directory + 'netor/salt/config/' + filename
file = open(full_path_filename, '+w')
file.write('##### Primary configuration settings #####\n')
file.write('\n')
file.write('master: localhost\n')
file.write('conf_file: ' + new_netor_home_directory + 'netor/salt/config/proxy\n')
file.write('mine_enabled: true # not required, but nice to have\n')
file.write('mine_functions:\n')
file.write(' net.ipaddrs: []\n')
file.write(' net.lldp: []\n')
file.write(' net.mac: []\n')
file.write(' net.arp: []\n')
file.write(' net.interfaces: []\n')
file.write('mine_interval: 5\n')
file.write('\n')
file.write('###### Thread settings #####\n')
file.write('multiprocessing: false\n')
file.write('\n')
file.write('##### File Directory Settings #####\n')
file.write('file_roots:\n')
file.write(' base:\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/\n')
file.write('pillar_roots:\n')
file.write(' base:\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/\n')
file.write(' - ' + new_netor_home_directory + 'netor/salt/config/pillar/\n')
file.write('\n')
file.write('###### Security settings #####\n')
file.write('###########################################\n')
file.write('# Enable "open mode", this mode still maintains encryption, but turns off\n')
file.write('# authentication, this is only intended for highly secure environments or for\n')
file.write('# the situation where your keys end up in a bad state. If you run in open mode\n')
file.write('# you do so at your own risk!\n')
file.write('open_mode: True\n')
file.write('# The directory to store the pki information in\n')
file.write('pki_dir: ' + new_netor_home_directory + 'netor/salt/config/pki/proxy # not required - this separates '
'the proxy keys into a different directory\n')
file.close()
def _update_proxy_config_file(new_netor_home_directory, filename):
"""
Update Salt proxy configuration file.
:param new_netor_home_directory: Directory where the file is located
:param filename: file name
:return:
"""
_backup_filename(new_netor_home_directory, filename)
# pending to develop update of the file with the new directory
_create_proxy_config_file(new_netor_home_directory, filename)
def _file_update_redirect(new_netor_home_directory, filename):
"""
Update the configuration files.
:param new_netor_home_directory: Directory where the files are located
:param filename: file name to update
:return: nothing
"""
if 'master' in filename:
_update_master_config_file(new_netor_home_directory, filename)
elif 'minion' in filename:
_update_minion_config_file(new_netor_home_directory, filename)
elif 'proxy' in filename:
_update_proxy_config_file(new_netor_home_directory, filename)
else:
print('\nError while checking Salt master, minion and proxy configuration files')
sys.exit(1)
def _file_create_redirect(new_netor_home_directory, filename):
"""
Create the configuration files.
:param new_netor_home_directory: it is the actual new Neto home directory where to create the file
:param filename: file name to create
:return: nothing
"""
if 'master' in filename:
_create_master_config_file(new_netor_home_directory, filename)
elif 'minion' in filename:
_create_minion_config_file(new_netor_home_directory, filename)
elif 'proxy' in filename:
_create_proxy_config_file(new_netor_home_directory, filename)
else:
print('\nError while checking Salt master, minion and proxy configuration files')
sys.exit(1)
def _create_update_master_minion_proxy(new_netor_home_directory, filename):
"""
Update or create (if do not exists) Salt configuration files.
:param new_netor_home_directory: it is the actual new Neto home directory to used in the process
:param filename: file name to update
:return: nothing
"""
full_salt_config_filename = new_netor_home_directory + 'netor/salt/' + filename
if os.path.isfile(full_salt_config_filename):
_file_update_redirect(new_netor_home_directory, filename)
else:
_file_create_redirect(new_netor_home_directory, filename)
def _update_config(tinydb_log_file, __file__, new_netor_home_directory):
"""
Execute the actual updates in the files. Salt master, minion and proxy.
:param tinydb_log_file: the filename to send the logging message after the operation is completed
:param __file__: script name who is sending the message to log
:param new_netor_home_directory: it is the actual new Neto home directory to be updated on files
:return: nothing
"""
_create_update_master_minion_proxy(new_netor_home_directory, 'master')
_create_update_master_minion_proxy(new_netor_home_directory, 'minion')
_create_update_master_minion_proxy(new_netor_home_directory, 'proxy')
print('\nNetor home directory replaced in salt master, minion and proxy.')
print("\nAdd or modified if necessary " + new_netor_home_directory + "bin to your .profile")
print(" vi $HOME/.profile")
print(" PATH=\"$PATH:" + new_netor_home_directory + "bin\n")
print("\nAdd or modified if necessary " + new_netor_home_directory + " to /etc/environment")
print(" sudo vi /etc/environment")
print(" NETOR=\"$PATH:" + new_netor_home_directory)
print("\nLogoff session or restart system, and login again.")
print("\nATTENTION: If you are using Salt restart the daemons with \"netor-salt-restart\"\n")
netorlogging.log_msg(tinydb_log_file, __file__,
"Netconf executed. Neto.config and static vars in scripts updated. ")
def replace_static_vars_scripts(filename, search, replace, delimiter, extra):
"""
Replace line by line the ``NETOR_HOME_DIRECTORY`` static variable in scripts.
:param filename: filename to review
:param search: search pattern to look for
:param replace: patter to replace
:param delimiter: to add a delimiter surrounding the path names
:param extra: add extra path information
:return: nothing
"""
try:
for line in fileinput.input(filename, inplace=True):
if search in line:
print((search + delimiter + replace + extra + delimiter), end="\n")
else:
print(line, end="")
except FileNotFoundError:
print("\nERROR File not found " + filename)
print("Manually find systemd folder and file " + filename.split("/")[-1] +
" and modify the parameter \"" + search + "\" in the file to point to " + replace + "\n")
except PermissionError:
print("\nERROR Permission denied to modify file " + filename)
print("Manually modify the parameter -\"" + search + "\" in the file to point to " + replace)
def check_netor_config(netor_home_directory):
"""
Verifies if the ``netor.config`` file exists in the file tree.
:param netor_home_directory: to verify if the netor home directory and file exists
:return: nothing
"""
if (os.path.isdir(netor_home_directory)) and (os.path.isfile((netor_home_directory + "netor/netor.config"))):
return
else:
print("Neto home directory or config file not found.\nRun configuration script (netor-config).")
sys.exit(1)
if __name__ == '__main__':
_netor_config()
print()
| 42.011655 | 119 | 0.668146 |
61edb2c25c99c318b707a55fcdfcaaf007b47999 | 4,780 | py | Python | test/api/mutations/test_check_repository_by_commit.py | uliana291/the-zoo | a15a4162c39553abe91224f4feff5d3b66f9413e | [
"MIT"
] | null | null | null | test/api/mutations/test_check_repository_by_commit.py | uliana291/the-zoo | a15a4162c39553abe91224f4feff5d3b66f9413e | [
"MIT"
] | null | null | null | test/api/mutations/test_check_repository_by_commit.py | uliana291/the-zoo | a15a4162c39553abe91224f4feff5d3b66f9413e | [
"MIT"
] | null | null | null | import pytest
from zoo.auditing.models import Issue
from zoo.auditing.check_discovery import Effort, Kind, Severity
pytestmark = pytest.mark.django_db
query = """
mutation test ($input: CheckRepositoryByCommitInput!) {
checkRepositoryByCommit (input: $input) {
allCheckResults {
isFound
kindKey
status
details
severity
effort
title
description
}
}
}
"""
| 34.142857 | 88 | 0.604393 |
61ee17d15d59c91dd4a80c2ec70be31d3dc1095f | 134 | py | Python | start/hello_world.py | nguyenductamlhp/tensorflow_demo | 7c4b55dff80dd435806a1b22dee6eb32ae39c02d | [
"MIT"
] | null | null | null | start/hello_world.py | nguyenductamlhp/tensorflow_demo | 7c4b55dff80dd435806a1b22dee6eb32ae39c02d | [
"MIT"
] | null | null | null | start/hello_world.py | nguyenductamlhp/tensorflow_demo | 7c4b55dff80dd435806a1b22dee6eb32ae39c02d | [
"MIT"
] | 1 | 2018-11-05T06:40:09.000Z | 2018-11-05T06:40:09.000Z | # -*- coding: utf-8 -*-
import tensorflow as tf
hello = tf.constant('Hello, TensorFlow!')
sess = tf.Session()
print(sess.run(hello))
| 19.142857 | 41 | 0.671642 |
61f0c4fd22f5b70221a5b58b1db5553ecb4e26b8 | 755 | py | Python | behavioral/command/logic/generators/trader_generator.py | Kozak24/Patterns | 351d5c11f7c64ce5d58db37b6715fc8f7d31945a | [
"MIT"
] | null | null | null | behavioral/command/logic/generators/trader_generator.py | Kozak24/Patterns | 351d5c11f7c64ce5d58db37b6715fc8f7d31945a | [
"MIT"
] | null | null | null | behavioral/command/logic/generators/trader_generator.py | Kozak24/Patterns | 351d5c11f7c64ce5d58db37b6715fc8f7d31945a | [
"MIT"
] | null | null | null | from random import randint
from typing import Optional
from behavioral.command.data import Trader
from behavioral.command.logic.generators import ItemsGenerator
| 30.2 | 98 | 0.717881 |
61f42117264c7b5f1ae0b590ff1b7ddfa85808ba | 11,119 | py | Python | test3.py | dg1223/GestureRecognition | 07078b0b8340c8b94f42414efe0ed36158e8c0ea | [
"MIT"
] | 2 | 2019-02-12T17:59:41.000Z | 2019-10-27T03:36:08.000Z | test3.py | dg1223/GestureRecognition | 07078b0b8340c8b94f42414efe0ed36158e8c0ea | [
"MIT"
] | null | null | null | test3.py | dg1223/GestureRecognition | 07078b0b8340c8b94f42414efe0ed36158e8c0ea | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 29 00:07:11 2015
@author: Shamir
"""
for i in range(len(os.listdir(sourcePath))): # we have 6 files corresponding to 6 gestures
print 'i = ', i
gesture = os.listdir(sourcePath)[i] # Jab, Uppercut, Throw, Jets, Block, Asgard
#dataset = os.listdir(sourcePath + gesture)[0] # Train, Cross Validation, Test
copy = False
AngVel_array = []
for k in range(len(os.listdir(sourcePath + gesture))):
sensor = os.listdir(sourcePath + gesture)[k] # Sensor15, Sensor16, Sensor17, Sensor18, Sensor19
sensorFolder = os.listdir(sourcePath + gesture + backslash + sensor)
print sensorFolder
for l in range(len(sensorFolder)):
csvfile = sourcePath + gesture + backslash + sensor + backslash + sensorFolder[l] # full filepath
readFile = pandas.read_csv(csvfile, header = None)
readFile.values[1:] = readFile.values[1:].astype(float)
velocityAlpha = ['Precession_' + sensor[6:]]
velocityBeta = ['Nutation_' + sensor[6:]]
velocityGamma = ['Spin_' + sensor[6:]]
#print velocityAlpha
velocityAlpha = np.asarray(velocityAlpha)
velocityBeta = np.asarray(velocityBeta)
velocityGamma = np.asarray(velocityGamma)
#time = np.shape(readFile.values)[1] / frequency_euc
if copy == True:
print 'This is the If phase'
for m in range(1, len(readFile.values)): # for every two files ???
## need to add code to check if number_of_rows matches
precession, nutation, spin = 0, 0, 0
for n in range(0, np.shape(readFile.values)[1] - 5, 3):
alpha = n
beta = n + 1
gamma = n + 2
alphaNext = n + 3
betaNext = n + 4
gammaNext = n + 5
try:
precession += euclidean(readFile.values[m, alpha], readFile.values[m, alphaNext])
#print 'precession = ', precession
nutation += euclidean(readFile.values[m, beta], readFile.values[m, betaNext])
spin += euclidean(readFile.values[m, gamma], readFile.values[m, gammaNext])
except ValueError:
#print '1st catch (copy = True) at file, m, n = ', csvfile[-6:], m, n
break
valid_data = CalculateValidData() # Exclude missing values (we exclude 6 more values to remain within a safer margin)
time = valid_data / frequency_euc
precessionVelocity = precession/time
#print 'precessionVelocity = ', precessionVelocity
nutationVelocity = nutation/time
spinVelocity = spin/time
for n in range(0, np.shape(readFile.values)[1] - 3, 3):
alpha = n
beta = n + 1
gamma = n + 2
try:
readFile.values[m, alpha] = (precessionVelocity * np.sin(readFile.values[m, gamma]) * np.sin(readFile.values[m, beta])) + (nutationVelocity * np.cos(readFile.values[m, gamma])) # alpha component
readFile.values[m, beta] = (precessionVelocity * np.cos(readFile.values[m, gamma]) * np.sin(readFile.values[m, beta])) - (nutationVelocity * np.sin(readFile.values[m, gamma])) # beta component
readFile.values[m, beta] = (precessionVelocity * np.cos(readFile.values[m, beta])) * spinVelocity # gamma compomemt
except ValueError:
#print '2nd catch (copy = True) at file, m, n = ', csvfile[-6:], m, n
continue
averageAlpha = np.sum(readFile.values[m, range(0, valid_data, 3)]) / time
averageBeta = np.sum(readFile.values[m, range(1, valid_data, 3)]) / time
averageGamma = np.sum(readFile.values[m, range(2, valid_data, 3)]) / time
velocityAlpha = np.vstack((velocityAlpha, averageAlpha))
#print 'filename, m, velocityAlpha = ', csvfile[-6:], m, velocityAlpha
velocityBeta = np.vstack((velocityBeta, averageBeta))
velocityGamma = np.vstack((velocityGamma, averageGamma))
columnSize = len(velocityAlpha)
angular_velocity = np.zeros((len(velocityAlpha), 3))
angular_velocity = angular_velocity.astype(str) # to avoid string to float conversion error
# Return the column vectors in a single 2D array
angular_velocity[:,0] = velocityAlpha.reshape(1, columnSize)
angular_velocity[:,1] = velocityBeta.reshape (1, columnSize)
angular_velocity[:,2] = velocityGamma.reshape(1, columnSize)
AngVel_array = np.hstack((AngVel_array, angular_velocity))
#print 'AngVel_array = ', AngVel_array
else:
print 'This is the Else phase'
for m in range(1, len(readFile.values)): # for every two files
## need to add code to check if number_of_rows matches
precession, nutation, spin = 0, 0, 0
for n in range(0, np.shape(readFile.values)[1] - 5, 3):
alpha = n
beta = n + 1
gamma = n + 2
alphaNext = n + 3
betaNext = n + 4
gammaNext = n + 5
try:
precession += euclidean(readFile.values[m, alpha], readFile.values[m, alphaNext])
nutation += euclidean(readFile.values[m, beta], readFile.values[m, betaNext])
spin += euclidean(readFile.values[m, gamma], readFile.values[m, gammaNext])
except ValueError:
#print '1st catch (copy = False) at print file, m, n = ', csvfile[-6:], m, n
continue
valid_data = CalculateValidData()
time = valid_data / frequency_euc
precessionVelocity = precession/time
nutationVelocity = nutation/time
spinVelocity = spin/time
#print 'precession,nutation,spinVelocity = ', precessionVelocity, nutationVelocity, spinVelocity
for n in range(0, np.shape(readFile.values)[1] - 3, 3):
alpha = n
beta = n + 1
gamma = n + 2
try:
readFile.values[m, alpha] = (precessionVelocity * np.sin(readFile.values[m, gamma]) * np.sin(readFile.values[m, beta])) + (nutationVelocity * np.cos(readFile.values[m, gamma])) # alpha component
readFile.values[m, beta] = (precessionVelocity * np.cos(readFile.values[m, gamma]) * np.sin(readFile.values[m, beta])) - (nutationVelocity * np.sin(readFile.values[m, gamma])) # beta component
readFile.values[m, beta] = (precessionVelocity * np.cos(readFile.values[m, beta])) * spinVelocity # gamma compomemt
except ValueError:
#print '2nd catch (copy = True) at file, m, n = ', csvfile[-6:], m, n
continue
averageAlpha = np.sum(readFile.values[m, range(0, valid_data, 3)]) / time
#print 'averageAlpha = ', averageAlpha
averageBeta = np.sum(readFile.values[m, range(1, valid_data, 3)]) / time
averageGamma = np.sum(readFile.values[m, range(2, valid_data, 3)]) / time
velocityAlpha = np.vstack((velocityAlpha, averageAlpha))
#print 'filename, m, velocityAlpha = ', csvfile[-6:], m, velocityAlpha
velocityBeta = np.vstack((velocityBeta, averageBeta))
velocityGamma = np.vstack((velocityGamma, averageGamma))
columnSize = len(velocityAlpha)
angular_velocity = np.zeros((len(velocityAlpha), 3))
angular_velocity = angular_velocity.astype(str)
# Return the column vectors in a single 2D array
angular_velocity[:,0] = velocityAlpha.reshape(1, columnSize)
angular_velocity[:,1] = velocityBeta.reshape (1, columnSize)
angular_velocity[:,2] = velocityGamma.reshape(1, columnSize)
AngVel_array = angular_velocity.copy()
#print 'AngVel_array = ', AngVel_array
copy = True
# Create complete file structure/dataframe
if i == 0:
fullFile4 = DataFrame(AngVel_array)
else:
AngVel_array = DataFrame(AngVel_array)
fullFile4 = pandas.concat([fullFile4, AngVel_array], join = 'inner')
| 63.176136 | 229 | 0.461462 |
61f65e88bb74b76264401d01893c2004742b5044 | 1,919 | py | Python | build.py | micklenguyen/hw2-scripting | 3603a2c4d7518890eacc4f071f347f90dd295ee6 | [
"MIT"
] | null | null | null | build.py | micklenguyen/hw2-scripting | 3603a2c4d7518890eacc4f071f347f90dd295ee6 | [
"MIT"
] | null | null | null | build.py | micklenguyen/hw2-scripting | 3603a2c4d7518890eacc4f071f347f90dd295ee6 | [
"MIT"
] | null | null | null |
if __name__ == "__main__":
main() | 24.922078 | 97 | 0.730589 |
61f94a0bece7deb448882a08f6a458e64ef93c8e | 35,113 | py | Python | src/jote/jote.py | InformaticsMatters/data-manager-job-tester | f8915e005f16685d159535a2455628eb1d7ac518 | [
"MIT"
] | null | null | null | src/jote/jote.py | InformaticsMatters/data-manager-job-tester | f8915e005f16685d159535a2455628eb1d7ac518 | [
"MIT"
] | 1 | 2022-01-28T10:06:28.000Z | 2022-01-31T14:51:52.000Z | src/jote/jote.py | InformaticsMatters/data-manager-job-tester | f8915e005f16685d159535a2455628eb1d7ac518 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Informatics Matters Job Tester (JOTE).
Get help running this utility with 'jote --help'
"""
import argparse
import os
import shutil
import stat
from stat import S_IRGRP, S_IRUSR, S_IWGRP, S_IWUSR
import subprocess
import sys
from typing import Any, Dict, List, Optional, Tuple
from munch import DefaultMunch
import yaml
from yamllint import linter
from yamllint.config import YamlLintConfig
from decoder import decoder
from .compose import get_test_root, INSTANCE_DIRECTORY, DEFAULT_TEST_TIMEOUT_M
from .compose import Compose
# Where can we expect to find Job definitions?
_DEFINITION_DIRECTORY: str = "data-manager"
# What's the default manifest file?
_DEFAULT_MANIFEST: str = os.path.join(_DEFINITION_DIRECTORY, "manifest.yaml")
# Where can we expect to find test data?
_DATA_DIRECTORY: str = "data"
# Our yamllint configuration file
# from the same directory as us.
_YAMLLINT_FILE: str = os.path.join(os.path.dirname(__file__), "jote.yamllint")
# Read the version file
_VERSION_FILE: str = os.path.join(os.path.dirname(__file__), "VERSION")
with open(_VERSION_FILE, "r", encoding="utf-8") as file_handle:
_VERSION = file_handle.read().strip()
# Job image types (lower-case)
_IMAGE_TYPE_SIMPLE: str = "simple"
_IMAGE_TYPE_NEXTFLOW: str = "nextflow"
_DEFAULT_IMAGE_TYPE: str = _IMAGE_TYPE_SIMPLE
# User HOME directory.
# Used to check for netflow files if nextflow is executed.
# The user CANNOT have any pf their own nextflow config.
_USR_HOME: str = os.environ.get("HOME", "")
def _lint(definition_filename: str) -> bool:
"""Lints the provided job definition file."""
if not os.path.isfile(_YAMLLINT_FILE):
print(f"! The yamllint file ({_YAMLLINT_FILE}) is missing")
return False
with open(definition_filename, "rt", encoding="UTF-8") as definition_file:
errors = linter.run(definition_file, YamlLintConfig(file=_YAMLLINT_FILE))
if errors:
# We're given a 'generator' and we don't know if there are errors
# until we iterator over it. So here we print an initial error message
# on the first error.
found_errors: bool = False
for error in errors:
if not found_errors:
print(f'! Job definition "{definition_file}" fails yamllint:')
found_errors = True
print(error)
if found_errors:
return False
return True
def _validate_schema(definition_filename: str) -> bool:
"""Checks the Job Definition against the decoder's schema."""
with open(definition_filename, "rt", encoding="UTF-8") as definition_file:
job_def: Optional[Dict[str, Any]] = yaml.load(
definition_file, Loader=yaml.FullLoader
)
assert job_def
# If the decoder returns something there's been an error.
error: Optional[str] = decoder.validate_job_schema(job_def)
if error:
print(
f'! Job definition "{definition_filename}"' " does not comply with schema"
)
print("! Full response follows:")
print(error)
return False
return True
def _validate_manifest_schema(manifest_filename: str) -> bool:
"""Checks the Manifest against the decoder's schema."""
with open(manifest_filename, "rt", encoding="UTF-8") as definition_file:
job_def: Optional[Dict[str, Any]] = yaml.load(
definition_file, Loader=yaml.FullLoader
)
assert job_def
# If the decoder returns something there's been an error.
error: Optional[str] = decoder.validate_manifest_schema(job_def)
if error:
print(f'! Manifest "{manifest_filename}"' " does not comply with schema")
print("! Full response follows:")
print(error)
return False
return True
def _check_cwd() -> bool:
"""Checks the execution directory for sanity (cwd). Here we must find
a data-manager directory
"""
expected_directories: List[str] = [_DEFINITION_DIRECTORY, _DATA_DIRECTORY]
for expected_directory in expected_directories:
if not os.path.isdir(expected_directory):
print(f'! Expected directory "{expected_directory}"' " but it is not here")
return False
return True
def _load(manifest_filename: str, skip_lint: bool) -> Tuple[List[DefaultMunch], int]:
"""Loads definition files listed in the manifest
and extracts the definitions that contain at least one test. The
definition blocks for those that have tests (ignored or otherwise)
are returned along with a count of the number of tests found
(ignored or otherwise).
If there was a problem loading the files an empty list and
-ve count is returned.
"""
# Prefix manifest filename with definition directory if required...
manifest_path: str = (
manifest_filename
if manifest_filename.startswith(f"{_DEFINITION_DIRECTORY}/")
else os.path.join(_DEFINITION_DIRECTORY, manifest_filename)
)
if not os.path.isfile(manifest_path):
print(f'! The manifest file is missing ("{manifest_path}")')
return [], -1
if not _validate_manifest_schema(manifest_path):
return [], -1
with open(manifest_path, "r", encoding="UTF-8") as manifest_file:
manifest: Dict[str, Any] = yaml.load(manifest_file, Loader=yaml.FullLoader)
if manifest:
manifest_munch: DefaultMunch = DefaultMunch.fromDict(manifest)
# Iterate through the named files...
job_definitions: List[DefaultMunch] = []
num_tests: int = 0
for jd_filename in manifest_munch["job-definition-files"]:
# Does the definition comply with the dschema?
# No options here - it must.
jd_path: str = os.path.join(_DEFINITION_DIRECTORY, jd_filename)
if not _validate_schema(jd_path):
return [], -1
# YAML-lint the definition?
if not skip_lint:
if not _lint(jd_path):
return [], -2
with open(jd_path, "r", encoding="UTF-8") as jd_file:
job_def: Dict[str, Any] = yaml.load(jd_file, Loader=yaml.FullLoader)
if job_def:
jd_munch: DefaultMunch = DefaultMunch.fromDict(job_def)
for jd_name in jd_munch.jobs:
if jd_munch.jobs[jd_name].tests:
num_tests += len(jd_munch.jobs[jd_name].tests)
if num_tests:
jd_munch.definition_filename = jd_filename
job_definitions.append(jd_munch)
return job_definitions, num_tests
def _copy_inputs(test_inputs: List[str], project_path: str) -> bool:
"""Copies all the test files into the test project directory."""
# The files are assumed to reside in the repo's 'data' directory.
print(f'# Copying inputs (from "${{PWD}}/{_DATA_DIRECTORY}")...')
expected_prefix: str = f"{_DATA_DIRECTORY}/"
for test_input in test_inputs:
print(f"# + {test_input}")
if not test_input.startswith(expected_prefix):
print("! FAILURE")
print(f'! Input file {test_input} must start with "{expected_prefix}"')
return False
if not os.path.isfile(test_input):
print("! FAILURE")
print(f"! Missing input file {test_input} ({test_input})")
return False
# Looks OK, copy it
shutil.copy(test_input, project_path)
print("# Copied")
return True
def _check(
t_compose: Compose, output_checks: DefaultMunch, fix_permissions: bool
) -> bool:
"""Runs the checks on the Job outputs.
We currently support 'exists' and 'lineCount'.
If 'fix_permissions' is True we error if the permissions are correct,
if False we error if the permissions are not correct.
"""
assert t_compose
assert isinstance(t_compose, Compose)
assert output_checks
assert isinstance(output_checks, List)
print("# Checking...")
for output_check in output_checks:
output_name: str = output_check.name
print(f"# - {output_name}")
expected_file: str = os.path.join(
t_compose.get_test_project_path(), output_name
)
for check in output_check.checks:
check_type: str = list(check.keys())[0]
if check_type == "exists":
if not _check_exists(
output_name, expected_file, check.exists, fix_permissions
):
return False
elif check_type == "lineCount":
if not _check_line_count(output_name, expected_file, check.lineCount):
return False
else:
print("! FAILURE")
print(f"! Unknown output check type ({check_type})")
return False
print("# Checked")
return True
def _run_nextflow(
command: str, project_path: str, timeout_minutes: int = DEFAULT_TEST_TIMEOUT_M
) -> Tuple[int, str, str]:
"""Runs nextflow in the project directory returning the exit code,
stdout and stderr.
"""
assert command
assert project_path
# The user cannot have a nextflow config in their home directory.
# Nextflow looks here and any config will be merged with the test config.
if _USR_HOME:
home_config: str = os.path.join(_USR_HOME, ".nextflow", "config")
if os.path.exists(home_config) and os.path.isfile(home_config):
print("! FAILURE")
print(
"! A nextflow test but"
f" you have your own config file ({home_config})"
)
print("! You cannot test Jobs and have your own config file")
return 1, "", ""
print('# Executing the test ("nextflow")...')
print(f'# Execution directory is "{project_path}"')
cwd = os.getcwd()
os.chdir(project_path)
try:
test = subprocess.run(
command,
shell=True,
check=False,
capture_output=True,
timeout=timeout_minutes * 60,
)
finally:
os.chdir(cwd)
return test.returncode, test.stdout.decode("utf-8"), test.stderr.decode("utf-8")
def _test(
args: argparse.Namespace,
filename: str,
collection: str,
job: str,
job_definition: DefaultMunch,
) -> Tuple[int, int, int, int]:
"""Runs the tests for a specific Job definition returning the number
of tests passed, skipped (due to run-level), ignored and failed.
"""
assert job_definition
assert isinstance(job_definition, DefaultMunch)
# The test status, assume success
tests_passed: int = 0
tests_skipped: int = 0
tests_ignored: int = 0
tests_failed: int = 0
if args.image_tag:
print(f"W Replacing image tag. Using '{args.image_tag}'")
job_image: str = f"{job_definition.image.name}:{args.image_tag}"
else:
job_image = f"{job_definition.image.name}:{job_definition.image.tag}"
job_image_memory: str = job_definition.image["memory"]
if job_image_memory is None:
job_image_memory = "1Gi"
job_image_cores: int = job_definition.image["cores"]
if job_image_cores is None:
job_image_cores = 1
job_project_directory: str = job_definition.image["project-directory"]
job_working_directory: str = job_definition.image["working-directory"]
if "type" in job_definition.image:
job_image_type: str = job_definition.image["type"].lower()
else:
job_image_type = _DEFAULT_IMAGE_TYPE
# Does the image need the (group write) permissions
# of files it creates fixing? Default is 'no'.
# If 'yes' (true) the DM is expected to fix the permissions of the
# generated files once the job has finished.
job_image_fix_permissions: bool = False
if "fix-permissions" in job_definition.image:
job_image_fix_permissions = job_definition.image["fix-permissions"]
for job_test_name in job_definition.tests:
# If a job test has been named,
# skip this test if it doesn't match.
# We do not include this test in the count.
if args.test and not args.test == job_test_name:
continue
_print_test_banner(collection, job, job_test_name)
# The status changes to False if any
# part of this block fails.
test_status: bool = True
print(f"> definition filename={filename}")
# Does the test have an 'ignore' declaration?
# Obey it unless the test is named explicitly -
# i.e. if th user has named a specific test, run it.
if "ignore" in job_definition.tests[job_test_name]:
if args.test:
print("W Ignoring the ignore: property (told to run this test)")
else:
print('W Ignoring test (found "ignore")')
tests_ignored += 1
continue
# Does the test have a 'run-level' declaration?
# If so, is it higher than the run-level specified?
if args.test:
print("W Ignoring any run-level check (told to run this test)")
else:
if "run-level" in job_definition.tests[job_test_name]:
run_level = job_definition.tests[job_test_name]["run-level"]
print(f"> run-level={run_level}")
if run_level > args.run_level:
print(f'W Skipping test (test is "run-level: {run_level}")')
tests_skipped += 1
continue
else:
print("> run-level=Undefined")
# Render the command for this test.
# First extract the variables and values from 'options'
# and then 'inputs'.
job_variables: Dict[str, Any] = {}
for variable in job_definition.tests[job_test_name].options:
job_variables[variable] = job_definition.tests[job_test_name].options[
variable
]
# If the option variable's declaration is 'multiple'
# it must be handled as a list, e.g. it might be declared like this: -
#
# The double-comment is used
# to avoid mypy getting upset by the 'type' line...
#
# # properties:
# # fragments:
# # title: Fragment molecules
# # multiple: true
# # mime-types:
# # - chemical/x-mdl-molfile
# # type: file
#
# We only pass the basename of the input to the command decoding
# i.e. strip the source directory.
# A list of input files (relative to this directory)
# We populate this with everything we find declared as an input
input_files: List[str] = []
# Process every 'input'
if job_definition.tests[job_test_name].inputs:
for variable in job_definition.tests[job_test_name].inputs:
# Test variable must be known as an input or option.
# Is the variable an option (otherwise it's an input)
variable_is_option: bool = False
variable_is_input: bool = False
if variable in job_definition.variables.options.properties:
variable_is_option = True
elif variable in job_definition.variables.inputs.properties:
variable_is_input = True
if not variable_is_option and not variable_is_input:
print("! FAILURE")
print(
f"! Test variable ({variable})"
+ " not declared as input or option"
)
# Record but do no further processing
tests_failed += 1
test_status = False
# Is it declared as a list?
value_is_list: bool = False
if variable_is_option:
if job_definition.variables.options.properties[variable].multiple:
value_is_list = True
else:
if job_definition.variables.inputs.properties[variable].multiple:
value_is_list = True
# Add each value or just one value
# (depending on whether it's a list)
if value_is_list:
job_variables[variable] = []
for value in job_definition.tests[job_test_name].inputs[variable]:
job_variables[variable].append(os.path.basename(value))
input_files.append(value)
else:
value = job_definition.tests[job_test_name].inputs[variable]
job_variables[variable] = os.path.basename(value)
input_files.append(value)
decoded_command: str = ""
test_environment: Dict[str, str] = {}
if test_status:
# Jote injects Job variables that are expected.
# 'DM_' variables are injected by the Data Manager,
# other are injected by Jote.
# - DM_INSTANCE_DIRECTORY
job_variables["DM_INSTANCE_DIRECTORY"] = INSTANCE_DIRECTORY
# - CODE_DIRECTORY
job_variables["CODE_DIRECTORY"] = os.getcwd()
# Has the user defined any environment variables in the test?
# If so they must exist, although we don't care about their value.
# Extract them here to pass to the test.
if "environment" in job_definition.tests[job_test_name]:
for env_name in job_definition.tests[job_test_name].environment:
env_value: Optional[str] = os.environ.get(env_name, None)
if env_value is None:
print("! FAILURE")
print("! Test environment variable is not defined")
print(f"! variable={env_name}")
# Record but do no further processing
tests_failed += 1
test_status = False
break
test_environment[env_name] = env_value
if test_status:
# Get the raw (encoded) command from the job definition...
raw_command: str = job_definition.command
# Decode it using our variables...
decoded_command, test_status = decoder.decode(
raw_command,
job_variables,
"command",
decoder.TextEncoding.JINJA2_3_0,
)
if not test_status:
print("! FAILURE")
print("! Failed to render command")
print(f"! error={decoded_command}")
# Record but do no further processing
tests_failed += 1
test_status = False
# Create the test directories, docker-compose file
# and copy inputs...
t_compose: Optional[Compose] = None
job_command: str = ""
project_path: str = ""
if test_status:
# The command must not contain new-lines.
# So split then join the command.
assert decoded_command
job_command = "".join(decoded_command.splitlines())
print(f"> image={job_image}")
print(f"> image-type={job_image_type}")
print(f"> command={job_command}")
# Create the project
t_compose = Compose(
collection,
job,
job_test_name,
job_image,
job_image_type,
job_image_memory,
job_image_cores,
job_project_directory,
job_working_directory,
job_command,
test_environment,
args.run_as_user,
)
project_path = t_compose.create()
if input_files:
# Copy the data into the test's project directory.
# Data's expected to be found in the Job's 'inputs'.
test_status = _copy_inputs(input_files, project_path)
# Run the container
if test_status and not args.dry_run:
timeout_minutes: int = DEFAULT_TEST_TIMEOUT_M
if "timeout-minutes" in job_definition.tests[job_test_name]:
timeout_minutes = job_definition.tests[job_test_name]["timeout-minutes"]
exit_code: int = 0
out: str = ""
err: str = ""
if job_image_type in [_IMAGE_TYPE_SIMPLE]:
# Run the image container
assert t_compose
exit_code, out, err = t_compose.run(timeout_minutes)
elif job_image_type in [_IMAGE_TYPE_NEXTFLOW]:
# Run nextflow directly
assert job_command
assert project_path
exit_code, out, err = _run_nextflow(
job_command, project_path, timeout_minutes
)
else:
print("! FAILURE")
print(f"! unsupported image-type ({job_image_type}")
test_status = False
if test_status:
expected_exit_code: int = job_definition.tests[
job_test_name
].checks.exitCode
if exit_code != expected_exit_code:
print("! FAILURE")
print(
f"! exit_code={exit_code}"
f" expected_exit_code={expected_exit_code}"
)
print("! Test stdout follows...")
print(out)
print("! Test stderr follows...")
print(err)
test_status = False
if args.verbose:
print(out)
# Inspect the results
# (only if successful so far)
if (
test_status
and not args.dry_run
and job_definition.tests[job_test_name].checks.outputs
):
assert t_compose
test_status = _check(
t_compose,
job_definition.tests[job_test_name].checks.outputs,
job_image_fix_permissions,
)
# Clean-up
if test_status and not args.keep_results:
assert t_compose
t_compose.delete()
# Count?
if test_status:
print("- SUCCESS")
tests_passed += 1
else:
tests_failed += 1
# Told to stop on first failure?
if not test_status and args.exit_on_failure:
break
return tests_passed, tests_skipped, tests_ignored, tests_failed
def _wipe() -> None:
"""Wipes the results of all tests."""
test_root: str = get_test_root()
if os.path.isdir(test_root):
shutil.rmtree(test_root)
def arg_check_run_level(value: str) -> int:
"""A type checker for the argparse run-level."""
i_value = int(value)
if i_value < 1:
raise argparse.ArgumentTypeError("Minimum value is 1")
if i_value > 100:
raise argparse.ArgumentTypeError("Maximum value is 100")
return i_value
def arg_check_run_as_user(value: str) -> int:
"""A type checker for the argparse run-as-user."""
i_value = int(value)
if i_value < 0:
raise argparse.ArgumentTypeError("Minimum value is 0")
if i_value > 65_535:
raise argparse.ArgumentTypeError("Maximum value is 65535")
return i_value
# -----------------------------------------------------------------------------
# main
# -----------------------------------------------------------------------------
def main() -> int:
"""The console script entry-point. Called when jote is executed
or from __main__.py, which is used by the installed console script.
"""
# Build a command-line parser
# and process the command-line...
arg_parser: argparse.ArgumentParser = argparse.ArgumentParser(
description="Data Manager Job Tester",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
arg_parser.add_argument(
"-m",
"--manifest",
help="The manifest file.",
default=_DEFAULT_MANIFEST,
type=str,
)
arg_parser.add_argument(
"-c",
"--collection",
help="The Job collection to test. If not"
" specified the Jobs in all collections"
" will be candidates for testing.",
)
arg_parser.add_argument(
"-j",
"--job",
help="The Job to test. If specified the collection"
" is required. If not specified all the Jobs"
" that match the collection will be"
" candidates for testing.",
)
arg_parser.add_argument(
"--image-tag",
help="An image tag to use rather then the one defined in the job definition.",
)
arg_parser.add_argument(
"-t",
"--test",
help="A specific test to run. If specified the job"
" is required. If not specified all the Tests"
" that match the collection will be"
" candidates for testing.",
)
arg_parser.add_argument(
"-r",
"--run-level",
help="The run-level of the tests you want to"
" execute. All tests at or below this level"
" will be executed, a value from 1 to 100",
default=1,
type=arg_check_run_level,
)
arg_parser.add_argument(
"-u",
"--run-as-user",
help="A user ID to run the tests as. If not set"
" your user ID is used to run the test"
" containers.",
type=arg_check_run_as_user,
)
arg_parser.add_argument(
"-d",
"--dry-run",
action="store_true",
help="Setting this flag will result in jote"
" simply parsing the Job definitions"
" but not running any of the tests."
" It is can be used to check the syntax of"
" your definition file and its test commands"
" and data.",
)
arg_parser.add_argument(
"-k",
"--keep-results",
action="store_true",
help="Normally all material created to run each"
" test is removed when the test is"
" successful",
)
arg_parser.add_argument(
"-v", "--verbose", action="store_true", help="Displays test stdout"
)
arg_parser.add_argument(
"--version", action="store_true", help="Displays jote version"
)
arg_parser.add_argument(
"-x",
"--exit-on-failure",
action="store_true",
help="Normally jote reports test failures but"
" continues with the next test."
" Setting this flag will force jote to"
" stop when it encounters the first failure",
)
arg_parser.add_argument(
"-s",
"--skip-lint",
action="store_true",
help="Normally jote runs the job definition"
" files against the prevailing lint"
" configuration of the repository under test."
" Using this flag skips that step",
)
arg_parser.add_argument(
"-w",
"--wipe",
action="store_true",
help="Wipe does nto run any tests, it simply"
" wipes the repository clean of jote"
" test material. It would be wise"
" to run this once you have finished testing."
" Using this negates the effect of any other"
" option.",
)
arg_parser.add_argument(
"-a",
"--allow-no-tests",
action="store_true",
help="Normally jote expects to run tests"
" and if you have no tests jote will fail."
" To prevent jote complaining about the lack"
" of tests you can use this option.",
)
args: argparse.Namespace = arg_parser.parse_args()
# If a version's been asked for act on it and then leave
if args.version:
print(_VERSION)
return 0
if args.test and args.job is None:
arg_parser.error("--test requires --job")
if args.job and args.collection is None:
arg_parser.error("--job requires --collection")
if args.wipe and args.keep_results:
arg_parser.error("Cannot use --wipe and --keep-results")
# Args are OK if we get here.
total_passed_count: int = 0
total_skipped_count: int = 0
total_ignore_count: int = 0
total_failed_count: int = 0
# Check CWD
if not _check_cwd():
print("! FAILURE")
print("! The directory does not look correct")
arg_parser.error("Done (FAILURE)")
# Told to wipe?
# If so wipe, and leave.
if args.wipe:
_wipe()
print("Done [Wiped]")
return 0
print(f'# Using manifest "{args.manifest}"')
# Load all the files we can and then run the tests.
job_definitions, num_tests = _load(args.manifest, args.skip_lint)
if num_tests < 0:
print("! FAILURE")
print("! Definition file has failed yamllint")
arg_parser.error("Done (FAILURE)")
msg: str = "test" if num_tests == 1 else "tests"
print(f"# Found {num_tests} {msg}")
if args.collection:
print(f'# Limiting to Collection "{args.collection}"')
if args.job:
print(f'# Limiting to Job "{args.job}"')
if args.test:
print(f'# Limiting to Test "{args.test}"')
if job_definitions:
# There is at least one job-definition with a test
# Now process all the Jobs that have tests...
for job_definition in job_definitions:
# If a collection's been named,
# skip this file if it's not the named collection
collection: str = job_definition.collection
if args.collection and not args.collection == collection:
continue
for job_name in job_definition.jobs:
# If a Job's been named,
# skip this test if the job does not match
if args.job and not args.job == job_name:
continue
if job_definition.jobs[job_name].tests:
num_passed, num_skipped, num_ignored, num_failed = _test(
args,
job_definition.definition_filename,
collection,
job_name,
job_definition.jobs[job_name],
)
total_passed_count += num_passed
total_skipped_count += num_skipped
total_ignore_count += num_ignored
total_failed_count += num_failed
# Break out of this loop if told to stop on failures
if num_failed > 0 and args.exit_on_failure:
break
# Break out of this loop if told to stop on failures
if num_failed > 0 and args.exit_on_failure:
break
# Success or failure?
# It's an error to find no tests.
print(" ---")
dry_run: str = "[DRY RUN]" if args.dry_run else ""
summary: str = (
f"passed={total_passed_count}"
f" skipped={total_skipped_count}"
f" ignored={total_ignore_count}"
f" failed={total_failed_count}"
)
failed: bool = False
if total_failed_count:
arg_parser.error(f"Done (FAILURE) {summary} {dry_run}")
failed = True
elif total_passed_count == 0 and not args.allow_no_tests:
arg_parser.error(
f"Done (FAILURE) {summary}" f" (at least one test must pass)" f" {dry_run}"
)
failed = True
else:
print(f"Done (OK) {summary} {dry_run}")
# Automatically wipe.
# If there have been no failures
# and not told to keep directories.
if total_failed_count == 0 and not args.keep_results:
_wipe()
return 1 if failed else 0
# -----------------------------------------------------------------------------
# MAIN
# -----------------------------------------------------------------------------
if __name__ == "__main__":
_RET_VAL: int = main()
if _RET_VAL != 0:
sys.exit(_RET_VAL)
| 35.183367 | 100 | 0.584741 |
61f95f027b40f870a2f775166934b53fdd79358c | 18,636 | py | Python | src/cred_manage/bitwarden.py | areese801/cred_manage | 01f08ddc0b954c36e27ce1d6407f087c8aff0d4f | [
"MIT"
] | null | null | null | src/cred_manage/bitwarden.py | areese801/cred_manage | 01f08ddc0b954c36e27ce1d6407f087c8aff0d4f | [
"MIT"
] | null | null | null | src/cred_manage/bitwarden.py | areese801/cred_manage | 01f08ddc0b954c36e27ce1d6407f087c8aff0d4f | [
"MIT"
] | null | null | null | """
Subclass of the BaseCredContainer used for reading secrets from bitwarden password manager.
This class wraps the bitwarden CLI. See: https://bitwarden.com/help/article/cli/#using-an-api-key
Note that only the Enterprise version of bitwarden can (supported) hit the REST API.
In contrast, the API key that can be found under the "My Account" page can be used to log into the cli tool
"""
from cred_manage.flat_file import FlatFileCredContainer
from cred_manage.base_cred_container import CredContainerBase
import json
import getpass
import os
import subprocess
import uuid
from shutil import which
from packaging import version
def make_bitwarden_container(api_key_flat_file:str = '/.credentials/bw_api.json'):
"""
Factory function to return a BitwardenCredContainer object, instantiated using data
read from a flat file.See 'View API Key' button at https://vault.bitwarden.com/#/settings/account
Args:
api_key_flat_file (str): The flat file that contains the API details.
Returns:
BitwardenCredContainer
"""
# Validate that the api key flat file actually exists
if not os.path.isfile(api_key_flat_file):
raise FileNotFoundError(f"Cannot read the bitwarden API key out of the file '{api_key_flat_file}' because it does not exist!")
# Read the contents of the flat file
file_cred_obj = FlatFileCredContainer(
file_path=api_key_flat_file,
allow_broad_permissions=False) # This is very stubborn about reading a file that isn't locked down properly
file_contents = file_cred_obj.read()
j = json.loads(file_contents)
o = BitwardenCredContainer(**j)
return o
| 41.972973 | 247 | 0.6481 |
61f9d61ddf16dfe982de5cd443717f5e39b05a82 | 7,027 | py | Python | transforms/waveform.py | koukyo1994/kaggle-rfcx | c3573d014d99312b58882e7b939de6c1055129b1 | [
"MIT"
] | 6 | 2021-02-18T05:18:17.000Z | 2022-02-19T02:49:32.000Z | transforms/waveform.py | koukyo1994/kaggle-rfcx | c3573d014d99312b58882e7b939de6c1055129b1 | [
"MIT"
] | null | null | null | transforms/waveform.py | koukyo1994/kaggle-rfcx | c3573d014d99312b58882e7b939de6c1055129b1 | [
"MIT"
] | 2 | 2021-02-18T11:31:50.000Z | 2022-02-19T02:49:07.000Z | import colorednoise as cn
import librosa
import numpy as np
| 31.231111 | 104 | 0.605237 |
61fa26c1e849bd6b6249a17cb7a588c5997af757 | 438 | py | Python | Back/generalchatroom/models.py | sadeghjafari5528/404- | 0499b93cc473ec4def96d95364180eb4f4dafb11 | [
"MIT"
] | null | null | null | Back/generalchatroom/models.py | sadeghjafari5528/404- | 0499b93cc473ec4def96d95364180eb4f4dafb11 | [
"MIT"
] | 1 | 2020-12-27T14:59:35.000Z | 2020-12-27T14:59:35.000Z | Back/generalchatroom/models.py | sadeghjafari5528/404- | 0499b93cc473ec4def96d95364180eb4f4dafb11 | [
"MIT"
] | 2 | 2020-10-30T08:08:32.000Z | 2020-10-30T20:47:51.000Z | from django.db import models
from registeration.models import User
from chatroom.models import Chatroom
| 36.5 | 83 | 0.773973 |
61fa91668b7e930a4d4c6429b8910bfdb88b86e5 | 1,095 | py | Python | plyplus/test/test_trees.py | rubycandy/test-plyplus | ced9377e6c26dcf308dd9f480411af9c8dbe9c56 | [
"MIT"
] | 169 | 2015-01-16T12:48:23.000Z | 2021-12-09T16:00:13.000Z | plyplus/test/test_trees.py | rubycandy/test-plyplus | ced9377e6c26dcf308dd9f480411af9c8dbe9c56 | [
"MIT"
] | 26 | 2015-01-23T16:30:28.000Z | 2018-07-07T09:14:18.000Z | plyplus/test/test_trees.py | rubycandy/test-plyplus | ced9377e6c26dcf308dd9f480411af9c8dbe9c56 | [
"MIT"
] | 53 | 2015-01-22T20:20:10.000Z | 2021-12-05T13:39:57.000Z | from __future__ import absolute_import
import unittest
import logging
import copy
import pickle
from plyplus.plyplus import STree
logging.basicConfig(level=logging.INFO)
if __name__ == '__main__':
unittest.main()
| 24.886364 | 76 | 0.613699 |
61fae1b5b671ac52f912549b4f9c186cb38b0495 | 1,563 | py | Python | misaligned.py | clean-code-craft-tcq-2/test-failer-in-py-yashaswin-mayya | 1861f2db11a508e9c1e2f7ce351d11d87c0c734c | [
"MIT"
] | null | null | null | misaligned.py | clean-code-craft-tcq-2/test-failer-in-py-yashaswin-mayya | 1861f2db11a508e9c1e2f7ce351d11d87c0c734c | [
"MIT"
] | null | null | null | misaligned.py | clean-code-craft-tcq-2/test-failer-in-py-yashaswin-mayya | 1861f2db11a508e9c1e2f7ce351d11d87c0c734c | [
"MIT"
] | null | null | null |
MAJOR_COLORS = ["White", "Red", "Black", "Yellow", "Violet"]
MINOR_COLORS = ["Blue", "Orange", "Green", "Brown", "Slate"]
print_color_map()
#testing each of 25 color pairs
if __name__ == '__main__':
print_color_map()
test_color_map(1, 'White', 'Blue')
test_color_map(2, 'White', 'Orange')
test_color_map(3, 'White', 'Green')
test_color_map(4, 'White', 'Brown')
test_color_map(5, 'White', 'Slate')
test_color_map(6, 'Red', 'Blue')
test_color_map(7, 'Red', 'Orange')
test_color_map(8, 'Red', 'Green')
test_color_map(9, 'Red', 'Brown')
test_color_map(10, 'Red', 'Slate')
test_color_map(11, 'Black', 'Blue')
test_color_map(12, 'Black', 'Orange')
test_color_map(13, 'Black', 'Green')
print("All is well (maybe!)\n")
| 36.348837 | 129 | 0.685861 |
61fe5553a131ad8494dec157c4505511e27beecb | 611 | py | Python | examples/embed_cmd.py | bentettmar/discord.py-self_embed | 4253ea7977b17972de2e15de3606a183f70b22b0 | [
"MIT"
] | 2 | 2022-03-31T04:06:05.000Z | 2022-03-31T16:39:40.000Z | examples/embed_cmd.py | bentettmar/discord.py-self_embed | 4253ea7977b17972de2e15de3606a183f70b22b0 | [
"MIT"
] | 3 | 2022-03-29T11:58:16.000Z | 2022-03-31T16:41:13.000Z | examples/embed_cmd.py | bentettmar/discord.py-self_embed | 4253ea7977b17972de2e15de3606a183f70b22b0 | [
"MIT"
] | null | null | null | import discord_self_embed
from discord.ext import commands
bot = commands.Bot(command_prefix=".", self_bot=True)
bot.run("TOKEN_HERE")
| 33.944444 | 193 | 0.749591 |
1101b9ca063e23e2fd57ae664425f377c0723f09 | 8,823 | py | Python | analysis.py | liunx/lmms | ea54f64934d90887a38446ef02ed2baed91548db | [
"MIT"
] | null | null | null | analysis.py | liunx/lmms | ea54f64934d90887a38446ef02ed2baed91548db | [
"MIT"
] | null | null | null | analysis.py | liunx/lmms | ea54f64934d90887a38446ef02ed2baed91548db | [
"MIT"
] | null | null | null | import re
import copy
from operator import itemgetter
import music21 as m21
if __name__ == "__main__":
data = ['C4~', ['chord', 'E4~', 'G4~'], [
'chord', 'E4~', 'G4~'], ['chord', 'E4', 'G4']]
data2 = ['C4', ['trip', 'C4', 'E4', 'G4']]
data3 = ['C4~', 'C4', 'E4~', 'E4']
data4 = ['CC8', 'r8', 'DD8', 'CC8', 'CC8', 'r8', 'DD8', 'r8']
data5 = [
'c2', '!up', '!good', 'c4.', 'c8', 'c2', '!happy', 'c2', 'c1~', 'c1', 'G2', 'c4.', 'c8', 'c1', 'G2', 'd4.',
'B8', 'c1', 'G2', 'c4.', 'e8', 'g2', 'e4', 'c4', 'd2', 'c4.', 'd8', 'd1', 'G2', 'c4.',
'c8', 'c1', 'G2', 'd4.', 'B8', 'c1', 'G2', 'c4.', 'e8', 'g2', 'e4', 'c4', 'f2', 'e4.',
'd8', 'c1', 'r1', 'r1', 'r1', 'r1']
data6 = ['!I', 'R1', '!II', 'R1', '!III', '!IV', '!V', '!VI', '!VII']
data7 = ['$$pop', 'r1', '!I', 'r1', '*happy', '!IV',
'!V7', '!i', '!Isus4', '!!ts_44', '!!to_D']
#rym = Rhythm(data)
#bt = Beats(data4)
ml = Melody({}, data7)
# ml.show_noteset()
| 33.804598 | 115 | 0.438286 |
11025303e524cbae387748d4c806d2a09276590a | 6,302 | py | Python | tests/server/utils.py | csadorf/aiida-optimade | 99ee1113cfc109a40a83bb43af8d07ce7e1601e6 | [
"MIT"
] | null | null | null | tests/server/utils.py | csadorf/aiida-optimade | 99ee1113cfc109a40a83bb43af8d07ce7e1601e6 | [
"MIT"
] | null | null | null | tests/server/utils.py | csadorf/aiida-optimade | 99ee1113cfc109a40a83bb43af8d07ce7e1601e6 | [
"MIT"
] | null | null | null | # pylint: disable=no-name-in-module,too-many-arguments
import json
import re
import typing
from urllib.parse import urlparse
import warnings
from requests import Response
from fastapi.testclient import TestClient
from pydantic import BaseModel
import pytest
from starlette import testclient
from optimade import __api_version__
from optimade.models import ResponseMeta
def test_meta_response(self):
"""General test for `meta` property in response"""
assert "meta" in self.json_response
meta_required_keys = ResponseMeta.schema()["required"]
meta_optional_keys = list(
set(ResponseMeta.schema()["properties"].keys()) - set(meta_required_keys)
)
implemented_optional_keys = ["data_available", "implementation"]
self.check_keys(meta_required_keys, self.json_response["meta"])
self.check_keys(implemented_optional_keys, meta_optional_keys)
self.check_keys(implemented_optional_keys, self.json_response["meta"])
def test_serialize_response(self):
"""General test for response JSON and pydantic model serializability"""
assert self.response_cls is not None, "Response class unset for this endpoint"
self.response_cls(**self.json_response) # pylint: disable=not-callable
def client_factory():
"""Return TestClient for OPTIMADE server"""
from aiida_optimade.main import APP
return inner
class NoJsonEndpointTests:
"""A simplified mixin class for tests on non-JSON endpoints."""
request_str: str = None
response_cls: BaseModel = None
response: Response = None
def test_response_okay(self):
"""Make sure the response was successful"""
assert (
self.response.status_code == 200
), f"Request to {self.request_str} failed: {self.response.content}"
| 32.65285 | 86 | 0.60822 |
11026c0c5eee347310533201a00163d72346ee00 | 3,673 | py | Python | super_topic/main.py | susmote/WeiboTools | 659232b4525bcbedf350da1127d382ff6c6e9e71 | [
"MIT"
] | 3 | 2018-11-11T22:07:23.000Z | 2019-03-08T08:20:31.000Z | super_topic/main.py | susmote/WeiboTools | 659232b4525bcbedf350da1127d382ff6c6e9e71 | [
"MIT"
] | null | null | null | super_topic/main.py | susmote/WeiboTools | 659232b4525bcbedf350da1127d382ff6c6e9e71 | [
"MIT"
] | 1 | 2021-08-31T06:44:54.000Z | 2021-08-31T06:44:54.000Z | # -*- coding: utf-8 -*-
"""
Created on 2018/11/5
@author: susmote
"""
import time
import requests
import json
#
if __name__ == '__main__':
username = input(": ")
password = input(": ")
login_url = "https://passport.weibo.cn/sso/login"
headers = {
"Referer": "https://passport.weibo.cn/signin/login?entry=mweibo&res=wel&wm=3349&r=https%3A%2F%2Fm.weibo.cn%2F"
}
session = requests.session()
login_post_data = {
"username": username,
"password": password,
"savestate": "1",
"r": "https://m.weibo.cn/",
"ec": "0",
"pagerefer": "https://m.weibo.cn/login?backURL=https%253A%252F%252Fm.weibo.cn%252F",
"entry": "mweibo",
"wentry": "",
"loginfrom": "",
"client_id": "",
"code": "",
"qq": "",
"mainpageflag": "1",
"hff": "",
"hfp": ""
}
login_page_res = session.post(login_url, data=login_post_data, headers=headers)
login_page_res_json = json.loads(login_page_res.text)
judge_login_res = session.get("https://m.weibo.cn/api/config").text
judge_login_res_json = json.loads(judge_login_res)
cookie_str = ''
if judge_login_res_json["data"]["login"] == True:
print(1, "")
for key in list(session.cookies.get_dict().keys()): #
cookie_str += (key + '=' + session.cookies.get_dict()[key] + ';') #
else:
if login_page_res_json["msg"] == "":
print("")
exit()
else:
print(login_page_res_json)
print(",")
exit()
followtopic_list = []
url = "https://m.weibo.cn/api/container/getIndex?containerid=100803_-_followsuper"
session = requests.session()
headers = {
"Host": "m.weibo.cn",
"Referer": "https://m.weibo.cn",
"Cookie": cookie_str
}
followtopic_res = session.get(url, headers=headers)
followtopic_res_json = json.loads(followtopic_res.text)
for i in range(0, len(followtopic_res_json["data"]["cards"][0]["card_group"])):
if followtopic_res_json["data"]["cards"][0]["card_group"][i]["card_type"] == "8":
followtopic_list.append(followtopic_res_json["data"]["cards"][0]["card_group"][i])
if followtopic_res_json["data"]["cardlistInfo"]["since_id"] != "":
followtopic_url = url+"&since_id="+ followtopic_res_json["data"]["cardlistInfo"]["since_id"]
res = session.get(followtopic_url, headers=headers)
res_json = json.loads(res.text)
for i in range(0, len(res_json["data"]["cards"][0]["card_group"])-1):
if res_json["data"]["cards"][0]["card_group"][i]["card_type"] == "8":
followtopic_list.append(res_json["data"]["cards"][0]["card_group"][i])
for i in range(0, len(followtopic_list)):
print(followtopic_list[i]["title_sub"])
st_url = "https://m.weibo.cn/api/config"
login_data = session.get(st_url, headers=headers).text
login_data_json = json.loads(login_data)["data"]
postdata = {
"st": login_data_json["st"]
}
if followtopic_list[i]["buttons"][0]["scheme"] == False:
continue
else:
checkin_url = "https://m.weibo.cn"+str(followtopic_list[i]["buttons"][0]["scheme"])
print(checkin_url)
res = session.post(checkin_url, data=postdata, headers=headers)
res_json = json.loads(res.text)
if res_json["ok"] == 1:
print(" "+res_json["data"]["msg"])
else:
print(" "+res_json) | 37.865979 | 118 | 0.58263 |
11028d4ec017320409e77b44e5459cd4e2c1cd81 | 1,163 | py | Python | websupportsk_ddns/notifiers.py | JozefGalbicka/websupportsk-ddns | 8fe1408121dc5f14f42e6603d9a50bcaa5afabee | [
"MIT"
] | 2 | 2021-07-28T09:09:58.000Z | 2021-07-28T10:28:45.000Z | websupportsk_ddns/notifiers.py | JozefGalbicka/websupportsk-ddns | 8fe1408121dc5f14f42e6603d9a50bcaa5afabee | [
"MIT"
] | 1 | 2021-11-14T11:31:38.000Z | 2021-11-19T22:38:44.000Z | websupportsk_ddns/notifiers.py | JozefGalbicka/websupportsk-ddns | 8fe1408121dc5f14f42e6603d9a50bcaa5afabee | [
"MIT"
] | null | null | null | import requests
import logging
logger = logging.getLogger(__name__)
| 29.075 | 65 | 0.618229 |
110368e70ed99472870504d326991f7e709f610a | 311 | py | Python | apps/xero_workspace/job_urls.py | akshay-codemonk/fyle-xero | a040dab12282a9a64ca01aca2dc09f8bb7eaa0f6 | [
"MIT"
] | null | null | null | apps/xero_workspace/job_urls.py | akshay-codemonk/fyle-xero | a040dab12282a9a64ca01aca2dc09f8bb7eaa0f6 | [
"MIT"
] | null | null | null | apps/xero_workspace/job_urls.py | akshay-codemonk/fyle-xero | a040dab12282a9a64ca01aca2dc09f8bb7eaa0f6 | [
"MIT"
] | null | null | null | from django.urls import path, include
from apps.xero_workspace.views import ScheduleSyncView
urlpatterns = [
path('<int:workspace_id>/expense_group/', include('apps.fyle_expense.job_urls')),
path('<int:workspace_id>/settings/schedule/trigger/', ScheduleSyncView.as_view(), name="schedule_trigger"),
]
| 34.555556 | 111 | 0.768489 |
1106ac76603f4bd71050edfab94366e8d6245198 | 43,058 | py | Python | tests/test_decision_maker/test_base.py | cyenyxe/agents-aea | c2aec9127028ae13def3f69fbc80d35400de1565 | [
"Apache-2.0"
] | null | null | null | tests/test_decision_maker/test_base.py | cyenyxe/agents-aea | c2aec9127028ae13def3f69fbc80d35400de1565 | [
"Apache-2.0"
] | 1 | 2020-02-21T14:28:13.000Z | 2020-03-05T14:53:53.000Z | tests/test_decision_maker/test_base.py | cyenyxe/agents-aea | c2aec9127028ae13def3f69fbc80d35400de1565 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains tests for decision_maker."""
import os
import time
from queue import Queue
from unittest import mock
import pytest
from web3.auto import Web3
import aea
import aea.decision_maker.base
from aea.configurations.base import PublicId
from aea.crypto.ethereum import ETHEREUM
from aea.crypto.fetchai import DEFAULT_FETCHAI_CONFIG
from aea.crypto.ledger_apis import LedgerApis
from aea.crypto.wallet import FETCHAI, Wallet
from aea.decision_maker.base import DecisionMaker, OwnershipState, Preferences
from aea.decision_maker.base import LedgerStateProxy
from aea.decision_maker.messages.base import InternalMessage
from aea.decision_maker.messages.state_update import StateUpdateMessage
from aea.decision_maker.messages.transaction import TransactionMessage
from aea.mail.base import Multiplexer, OutBox
from aea.protocols.default.message import DefaultMessage
from ..conftest import CUR_PATH, DUMMY_CONNECTION_PUBLIC_ID, DummyConnection
MAX_REACTIONS = 10
def test_decision_maker_execute(self):
"""Test the execute method."""
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
info=self.info,
ledger_id=self.ledger_id,
tx_nonce="Transaction nonce",
)
self.decision_maker.message_in_queue.put_nowait(tx_message)
# test that after a while the queue has been consumed.
time.sleep(0.5)
assert self.decision_maker.message_in_queue.empty()
time.sleep(0.5)
assert not self.decision_maker.message_out_queue.empty()
# TODO test the content of the response.
response = self.decision_maker.message_out_queue.get() # noqa
def test_decision_maker_handle_state_update_initialize(self):
"""Test the handle method for a stateUpdate message with Initialize performative."""
good_holdings = {"good_id": 2}
currency_holdings = {"FET": 100}
utility_params = {"good_id": 20.0}
exchange_params = {"FET": 10.0}
tx_fee = 1
state_update_message = StateUpdateMessage(
performative=StateUpdateMessage.Performative.INITIALIZE,
amount_by_currency_id=currency_holdings,
quantities_by_good_id=good_holdings,
exchange_params_by_currency_id=exchange_params,
utility_params_by_good_id=utility_params,
tx_fee=tx_fee,
)
self.decision_maker.handle(state_update_message)
assert self.decision_maker.ownership_state.amount_by_currency_id is not None
assert self.decision_maker.ownership_state.quantities_by_good_id is not None
assert (
self.decision_maker.preferences.exchange_params_by_currency_id is not None
)
assert self.decision_maker.preferences.utility_params_by_good_id is not None
def test_decision_maker_handle_update_apply(self):
"""Test the handle method for a stateUpdate message with APPLY performative."""
good_holdings = {"good_id": 2}
currency_holdings = {"FET": 100}
currency_deltas = {"FET": -10}
good_deltas = {"good_id": 1}
state_update_message = StateUpdateMessage(
performative=StateUpdateMessage.Performative.APPLY,
amount_by_currency_id=currency_deltas,
quantities_by_good_id=good_deltas,
)
self.decision_maker.handle(state_update_message)
expected_amount_by_currency_id = {
key: currency_holdings.get(key, 0) + currency_deltas.get(key, 0)
for key in set(currency_holdings) | set(currency_deltas)
}
expected_quantities_by_good_id = {
key: good_holdings.get(key, 0) + good_deltas.get(key, 0)
for key in set(good_holdings) | set(good_deltas)
}
assert (
self.decision_maker.ownership_state.amount_by_currency_id
== expected_amount_by_currency_id
), "The amount_by_currency_id must be equal with the expected amount."
assert (
self.decision_maker.ownership_state.quantities_by_good_id
== expected_quantities_by_good_id
)
# TODO this used to work with the testnet
def test_decision_maker_handle_tx_message(self):
"""Test the handle tx message method."""
assert self.decision_maker.message_out_queue.empty()
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -2},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
info=self.info,
ledger_id=self.ledger_id,
tx_nonce="Transaction nonce",
)
with mock.patch.object(
self.decision_maker.ledger_apis, "token_balance", return_value=1000000
):
with mock.patch.object(
self.decision_maker.ledger_apis,
"transfer",
return_value="This is a test digest",
):
self.decision_maker.handle(tx_message)
assert not self.decision_maker.message_out_queue.empty()
def test_decision_maker_handle_unknown_tx_message(self):
"""Test the handle tx message method."""
patch_logger_error = mock.patch.object(aea.decision_maker.base.logger, "error")
mocked_logger_error = patch_logger_error.__enter__()
with mock.patch(
"aea.decision_maker.messages.transaction.TransactionMessage._is_consistent",
return_value=True,
):
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -2},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
info=self.info,
ledger_id="bitcoin",
)
self.decision_maker.handle(tx_message)
mocked_logger_error.assert_called_with(
"[test]: ledger_id=bitcoin is not supported"
)
def test_decision_maker_handle_tx_message_not_ready(self):
"""Test that the decision maker is not ready to pursuit the goals.Cannot handle the message."""
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -2},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
info=self.info,
ledger_id=self.ledger_id,
tx_nonce="Transaction nonce",
)
with mock.patch.object(
self.decision_maker.ledger_apis, "token_balance", return_value=1000000
):
with mock.patch.object(
self.decision_maker.ledger_apis,
"transfer",
return_value="This is a test digest",
):
with mock.patch(
"aea.decision_maker.base.GoalPursuitReadiness.Status"
) as mocked_status:
mocked_status.READY.value = False
self.decision_maker.handle(tx_message)
assert not self.decision_maker.goal_pursuit_readiness.is_ready
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -2},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
info=self.info,
ledger_id=self.ledger_id,
tx_nonce="transaction nonce",
)
self.decision_maker.handle(tx_message)
assert not self.decision_maker.message_out_queue.empty()
def test_decision_maker_hand_tx_ready_for_signing(self):
"""Test that the decision maker can handle a message that is ready for signing."""
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SIGNING,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 0},
ledger_id=self.ledger_id,
info=self.info,
signing_payload={"key": b"some_bytes"},
)
self.decision_maker.handle(tx_message)
assert not self.decision_maker.message_out_queue.empty()
def test_decision_maker_handle_tx_message_acceptable_for_settlement(self):
"""Test that a tx_message is acceptable for settlement."""
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -2},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
info=self.info,
ledger_id=self.ledger_id,
tx_nonce="Transaction nonce",
)
with mock.patch.object(
self.decision_maker, "_is_acceptable_for_settlement", return_value=True
):
with mock.patch.object(
self.decision_maker, "_settle_tx", return_value="tx_digest"
):
self.decision_maker.handle(tx_message)
assert not self.decision_maker.message_out_queue.empty()
def test_decision_maker_tx_message_is_not_acceptable_for_settlement(self):
"""Test that a tx_message is not acceptable for settlement."""
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -2},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
ledger_id=self.ledger_id,
info=self.info,
tx_nonce="Transaction nonce",
)
with mock.patch.object(
self.decision_maker, "_is_acceptable_for_settlement", return_value=True
):
with mock.patch.object(
self.decision_maker, "_settle_tx", return_value=None
):
self.decision_maker.handle(tx_message)
assert not self.decision_maker.message_out_queue.empty()
def test_decision_maker_execute_w_wrong_input(self):
"""Test the execute method with wrong input."""
default_message = DefaultMessage(
type=DefaultMessage.Type.BYTES, content=b"hello"
)
self.decision_maker.message_in_queue.put_nowait(default_message)
time.sleep(0.5)
self.mocked_logger_warning.assert_called_with(
"[{}]: Message received by the decision maker is not of protocol_id=internal.".format(
self.agent_name
)
)
def test_is_affordable_off_chain(self):
"""Test the off_chain message."""
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
ledger_id="off_chain",
info=self.info,
tx_nonce="Transaction nonce",
)
assert self.decision_maker._is_affordable(tx_message)
def test_is_not_affordable_ledger_state_proxy(self):
"""Test that the tx_message is not affordable with initialized ledger_state_proxy."""
with mock.patch(
"aea.decision_maker.messages.transaction.TransactionMessage._is_consistent",
return_value=True,
):
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
ledger_id="bitcoin",
info=self.info,
)
var = self.decision_maker._is_affordable(tx_message)
assert not var
def test_is_affordable_ledger_state_proxy(self):
"""Test that the tx_message is affordable with initialized ledger_state_proxy."""
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
ledger_id=self.ledger_id,
info=self.info,
tx_nonce="Transaction nonce",
)
with mock.patch.object(
self.decision_maker, "_is_acceptable_for_settlement", return_value=True
):
with mock.patch.object(
self.decision_maker, "_settle_tx", return_value="tx_digest"
):
self.decision_maker._is_affordable(tx_message)
assert not self.decision_maker.message_out_queue.empty()
def test_settle_tx_off_chain(self):
"""Test the off_chain message."""
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
ledger_id="off_chain",
info=self.info,
tx_nonce="Transaction nonce",
)
tx_digest = self.decision_maker._settle_tx(tx_message)
assert tx_digest == "off_chain_settlement"
def test_settle_tx_known_chain(self):
"""Test the off_chain message."""
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
ledger_id=self.ledger_id,
info=self.info,
tx_nonce="Transaction nonce",
)
with mock.patch.object(
self.decision_maker.ledger_apis, "transfer", return_value="tx_digest"
):
tx_digest = self.decision_maker._settle_tx(tx_message)
assert tx_digest == "tx_digest"
def test_is_utility_enhancing(self):
"""Test the utility enhancing for off_chain message."""
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 10},
ledger_id="off_chain",
info=self.info,
tx_nonce="Transaction nonce",
)
self.decision_maker.ownership_state._quantities_by_good_id = None
assert self.decision_maker._is_utility_enhancing(tx_message)
def test_sign_tx_fetchai(self):
"""Test the private function sign_tx of the decision maker for fetchai ledger_id."""
tx_hash = Web3.keccak(text="some_bytes")
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SIGNING,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 0},
ledger_id=self.ledger_id,
info=self.info,
signing_payload={"tx_hash": tx_hash},
)
tx_signature = self.decision_maker._sign_tx(tx_message)
assert tx_signature is not None
def test_sign_tx_fetchai_is_acceptable_for_signing(self):
"""Test the private function sign_tx of the decision maker for fetchai ledger_id."""
tx_hash = Web3.keccak(text="some_bytes")
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SIGNING,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 0},
ledger_id=self.ledger_id,
info=self.info,
signing_payload={"tx_hash": tx_hash},
)
tx_signature = self.decision_maker._sign_tx(tx_message)
assert tx_signature is not None
def test_sing_tx_offchain(self):
"""Test the private function sign_tx for the offchain ledger_id."""
tx_hash = Web3.keccak(text="some_bytes")
tx_message = TransactionMessage(
performative=TransactionMessage.Performative.PROPOSE_FOR_SIGNING,
skill_callback_ids=[PublicId("author", "a_skill", "0.1.0")],
tx_id=self.tx_id,
tx_sender_addr=self.tx_sender_addr,
tx_counterparty_addr=self.tx_counterparty_addr,
tx_amount_by_currency_id={"FET": -20},
tx_sender_fee=0,
tx_counterparty_fee=0,
tx_quantities_by_good_id={"good_id": 0},
ledger_id="off_chain",
info=self.info,
signing_payload={"tx_hash": tx_hash},
)
tx_signature = self.decision_maker._sign_tx(tx_message)
assert tx_signature is not None
| 41.925998 | 106 | 0.641042 |
11070c63ba36e05b385352144090c398a2ed7415 | 15,806 | py | Python | code/plotting/plot_lsst.py | modichirag/21cm_cleaning | 1615fea4e2d617bb6ef00770a49698901227daa8 | [
"MIT"
] | 1 | 2019-08-27T10:05:41.000Z | 2019-08-27T10:05:41.000Z | code/plotting/plot_lsst.py | modichirag/21cm_cleaning | 1615fea4e2d617bb6ef00770a49698901227daa8 | [
"MIT"
] | null | null | null | code/plotting/plot_lsst.py | modichirag/21cm_cleaning | 1615fea4e2d617bb6ef00770a49698901227daa8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# Plots the power spectra and Fourier-space biases for the HI.
#
import warnings
from mpi4py import MPI
rank = MPI.COMM_WORLD.rank
#warnings.filterwarnings("ignore")
if rank!=0: warnings.filterwarnings("ignore")
import numpy as np
import os, sys
import matplotlib.pyplot as plt
from pmesh.pm import ParticleMesh
from scipy.interpolate import InterpolatedUnivariateSpline as ius
from nbodykit.lab import BigFileMesh, BigFileCatalog, FFTPower
from nbodykit.cosmology import Planck15, EHPower, Cosmology
sys.path.append('../utils/')
sys.path.append('../recon/')
sys.path.append('../recon/cosmo4d/')
from cosmo4d.pmeshengine import nyquist_mask
from lab import mapbias as mapp
from lab import mapnoise as mapn
from lab import report as rp
from lab import dg
from getbiasparams import getbias
import tools
#
from matplotlib import rc, rcParams, font_manager
rcParams['font.family'] = 'serif'
fsize = 12-1
fontmanage = font_manager.FontProperties(family='serif', style='normal',
size=fsize, weight='normal', stretch='normal')
font = {'family': fontmanage.get_family()[0],
'style': fontmanage.get_style(),
'weight': fontmanage.get_weight(),
'size': fontmanage.get_size(),
}
#
import argparse
parser = argparse.ArgumentParser()
#parser.add_argument('-m', '--model', help='model name to use')
parser.add_argument('-a', '--aa', help='scale factor', default=0.5000, type=float)
parser.add_argument('-l', '--bs', help='boxsize', default=1024, type=float)
parser.add_argument('-n', '--nmesh', help='nmesh', default=256, type=int)
parser.add_argument('-t', '--angle', help='angle of the wedge', default=50, type=float)
parser.add_argument('-k', '--kmin', help='kmin of the wedge', default=0.03, type=float)
parser.add_argument( '--pp', help='upsample', default=1)
args = parser.parse_args()
figpath = './figs/'
dpath = '../../data/'
bs, nc, aa = args.bs, args.nmesh, args.aa
nc2 = nc*2
zz = 1/aa- 1
kmin = args.kmin
ang = args.angle
if args.pp: pm = ParticleMesh(BoxSize=bs, Nmesh=[nc2, nc2, nc2])
else: pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc])
rank = pm.comm.rank
##
################
if __name__=="__main__":
#save2dphoto(Nmu=4, numd=10**-2.5, aa=0.2000)
#save2dphoto(Nmu=8, numd=10**-2.5, aa=0.2000)
#save2dphoto(Nmu=4, numd=10**-2.5, aa=0.2000, scatter=True)
#save2dphoto(Nmu=8, numd=10**-2.5, aa=0.2000, scatter=True)
#make_plot(Nmu=4)
#make_plot(Nmu=8)
make_plot_data(aa=0.2000, numd=10**-2.5, Nmu=8)
make_plot_data(aa=0.2000, numd=10**-2.5, Nmu=8, scatter=True)
make_plot_data(aa=0.2000, numd=10**-2.5, Nmu=4)
make_plot_data(aa=0.2000, numd=10**-2.5, Nmu=4, scatter=True)
#
| 41.704485 | 210 | 0.575161 |
1107964a13a8c587e9dedd0f0fb6a2581ecb0887 | 3,999 | py | Python | ndfinance/strategies/basic/__init__.py | gomtinQQ/NDFinance | 522bf0486e5f5337c522d0e34b088f386c7c3290 | [
"MIT"
] | 35 | 2020-09-26T16:31:45.000Z | 2022-01-01T12:12:21.000Z | ndfinance/strategies/basic/__init__.py | gomtinQQ/NDFinance | 522bf0486e5f5337c522d0e34b088f386c7c3290 | [
"MIT"
] | 1 | 2020-09-27T08:54:23.000Z | 2020-09-27T08:54:23.000Z | ndfinance/strategies/basic/__init__.py | gomtinQQ/NDFinance | 522bf0486e5f5337c522d0e34b088f386c7c3290 | [
"MIT"
] | 8 | 2020-10-06T23:51:22.000Z | 2022-02-16T12:11:10.000Z | from ndfinance.strategies import Strategy, PeriodicRebalancingStrategy
from ndfinance.brokers.base import order
from ndfinance.brokers.base.order import *
from ndfinance.strategies.utils import *
| 44.932584 | 127 | 0.68017 |
1107cf1f8235f1761f09ec217b10ad75f1307704 | 1,662 | py | Python | 06_Trees/shortest_unique_prefix.py | Sheetal0601/InterviewBit | 72ba1507278dafac6e5fb81da20d372e3d141348 | [
"MIT"
] | 61 | 2018-02-18T08:16:31.000Z | 2022-02-17T17:18:57.000Z | 06_Trees/shortest_unique_prefix.py | Sheetal0601/InterviewBit | 72ba1507278dafac6e5fb81da20d372e3d141348 | [
"MIT"
] | 1 | 2018-02-23T20:06:18.000Z | 2019-12-29T18:52:20.000Z | 06_Trees/shortest_unique_prefix.py | Sheetal0601/InterviewBit | 72ba1507278dafac6e5fb81da20d372e3d141348 | [
"MIT"
] | 30 | 2018-03-28T19:02:23.000Z | 2021-07-06T20:00:14.000Z | # Shortest Unique Prefix
# https://www.interviewbit.com/problems/shortest-unique-prefix/
#
# Find shortest unique prefix to represent each word in the list.
#
# Example:
#
# Input: [zebra, dog, duck, dove]
# Output: {z, dog, du, dov}
# where we can see that
# zebra = z
# dog = dog
# duck = du
# dove = dov
# NOTE : Assume that no word is prefix of another. In other words, the representation is always possible.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # | 24.086957 | 106 | 0.496992 |
110af0aa9cc468fbee2f90b29540e3ee61251308 | 1,975 | py | Python | daemon.py | hletrd/TRIPOL_polarizer | 124d202bf876635bd402306fb5d7572fd45ce599 | [
"MIT"
] | null | null | null | daemon.py | hletrd/TRIPOL_polarizer | 124d202bf876635bd402306fb5d7572fd45ce599 | [
"MIT"
] | null | null | null | daemon.py | hletrd/TRIPOL_polarizer | 124d202bf876635bd402306fb5d7572fd45ce599 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, send_from_directory
import serial
import serial.tools.list_ports
import threading
app = Flask(__name__)
class SerialHandler(object):
if __name__ == '__main__':
bind_ip = '127.0.0.1'
bind_port = 8000
serialhandler = SerialHandler()
run_server() | 23.511905 | 75 | 0.698228 |
110bd5c7d26cb1039d7248113ac5574d56217da1 | 134 | py | Python | tests/agave_mock_server/wsgi.py | vdjserver/agave-cli | 4ad0826779cd760eb8a102978c456fee214ce8a3 | [
"BSD-3-Clause"
] | null | null | null | tests/agave_mock_server/wsgi.py | vdjserver/agave-cli | 4ad0826779cd760eb8a102978c456fee214ce8a3 | [
"BSD-3-Clause"
] | 1 | 2019-06-11T21:48:12.000Z | 2019-06-11T22:11:35.000Z | tests/agave_mock_server/wsgi.py | vdjserver/agave-cli | 4ad0826779cd760eb8a102978c456fee214ce8a3 | [
"BSD-3-Clause"
] | null | null | null | from agave_mock_server import app as application
if __name__ == "__main__":
application.run(host="0.0.0.0", ssl_context="adhoc")
| 26.8 | 56 | 0.738806 |
110ec99e58e5ce9d328a5556af8ee117cc5ebd9a | 3,304 | py | Python | src/utils.py | senadkurtisi/neural-style-transfer | 0048d8b184959de095f0821f63205c8ce3ff2dff | [
"MIT"
] | null | null | null | src/utils.py | senadkurtisi/neural-style-transfer | 0048d8b184959de095f0821f63205c8ce3ff2dff | [
"MIT"
] | null | null | null | src/utils.py | senadkurtisi/neural-style-transfer | 0048d8b184959de095f0821f63205c8ce3ff2dff | [
"MIT"
] | null | null | null | from PIL import Image
import numpy as np
import torch
import torchvision.transforms.transforms as transforms
import os
from config import cfg
def preprocess_img(img_path):
""" Loads the desired image and prepares it
for VGG19 model.
Parameters:
img_path: path to the image
Returns:
processed: loaded image after preprocessing
"""
prep = transforms.Compose([transforms.Resize((cfg.IMG_SIZE, cfg.IMG_SIZE)),
transforms.ToTensor(),
transforms.Lambda(lambda x: x[torch.LongTensor([2, 1, 0])]),
transforms.Normalize(mean=[0.40760392, 0.45795686, 0.48501961],
std=[1, 1, 1]),
transforms.Lambda(lambda x: x.mul_(255)),
])
img = Image.open(img_path)
processed = prep(img)
if cfg.cuda:
processed = processed.cuda()
return processed.unsqueeze(0)
def get_init_img(mode='noise', source_img=None):
""" Constructs the initial image for the NST algorithm.
Parameters:
mode: how to initialize the image? {'noise', 'other'}
source_img: image used for initialization of @mode is set to 'other'
Returns:
opt_image: initialized image
"""
assert mode in ['noise', 'other'], f"{mode} is and illegal initialization mode!"
if mode == 'style' or mode == 'other':
assert (source_img is not None), f"Can't initialize from {mode}!"
if mode == 'noise':
if cfg.cuda:
opt_image = np.random.normal(loc=0, scale=90.,
size=(1, 3, cfg.IMG_SIZE,
cfg.IMG_SIZE)).astype(np.float32)
opt_image = torch.from_numpy(opt_image).float().cuda()
else:
pass
else:
opt_image = (source_img.detach()).clone()
# Make sure that gradients are being calculated for this image
# During forward pass
opt_image.requires_grad = True
return opt_image
def gram_matrix(x):
""" Calculates the Gram matrix for the
feature maps contained in x.
Parameters:
x: feature maps
Returns:
G: gram matrix
"""
b, c, h, w = x.size()
F = x.view(b, c, h * w)
G = torch.bmm(F, F.transpose(1, 2))
G.div_(h * w)
return G
def postprocess(img):
""" Prepares the image for display and saving. """
postp = transforms.Compose([transforms.Lambda(lambda x: x.mul_(1. / 255)),
transforms.Normalize(mean=[-0.40760392, -0.45795686, -0.48501961],
std=[1, 1, 1]),
transforms.Lambda(lambda x: x[torch.LongTensor([2, 1, 0])]), # turn to RGB
])
img = postp(img)
# In order to have more visually appealing images
# We need to clip the pixel values
img[img > 1] = 1
img[img < 0] = 0
img = transforms.ToPILImage()(img)
return img
def get_file_name(path):
""" Extracts only the filename from the given
path. Extension is removed as well.
"""
base = os.path.basename(path)
return os.path.splitext(base)[0]
| 29.765766 | 106 | 0.553874 |
1111178ffeca17f97dbc94edf513c3e6554c30c4 | 5,179 | py | Python | myfunc.py | dedo94/GUIGG | be4d6243ee9abcfaf42ab9aec6cd87f8e2149d4d | [
"MIT"
] | 1 | 2019-02-15T22:38:40.000Z | 2019-02-15T22:38:40.000Z | myfunc.py | dedo94/GUIGG | be4d6243ee9abcfaf42ab9aec6cd87f8e2149d4d | [
"MIT"
] | null | null | null | myfunc.py | dedo94/GUIGG | be4d6243ee9abcfaf42ab9aec6cd87f8e2149d4d | [
"MIT"
] | null | null | null | import os
import platform
import sys
from os.path import relpath
sys.path.append('/usr/local/bin/dot')
sys.path.append('/usr/bin/dot')
from graphviz import Digraph
# struttura dati
# definisce il path
def pathfy(filepath):
prgpath = os.path.dirname(os.path.abspath(__file__))
pathz = relpath(filepath, prgpath)
return pathz
# data una struttura dati ed un id, restituisce la sua posizione
# data una struttura dati ed una istruzione, restituisce la posizione
# data una struttura dati ed una istruzione, restituisce il suo id
# data una istruzione ed un numero di partenza, riassegna tutti gli id a partire dal numero dato
# data una struttura e una istruzione, restituisce il predecessore
# restituisce l'id massimo contenuto in una struttura
# stampa una struttura
# data una struttura ed un id restituisce la posizione
| 43.158333 | 181 | 0.448542 |
1111834ed10ea00b3973a4e7b45b84a2fd41c455 | 2,466 | py | Python | EllucianEthosPythonClient/ChangeNotificationUtils.py | rmetcalf9/EllucainEthosPythonClient | 6913322b1e583f655f67399f2baa763833583c27 | [
"MIT"
] | 1 | 2021-02-09T22:05:50.000Z | 2021-02-09T22:05:50.000Z | EllucianEthosPythonClient/ChangeNotificationUtils.py | rmetcalf9/EllucainEthosPythonClient | 6913322b1e583f655f67399f2baa763833583c27 | [
"MIT"
] | 1 | 2020-07-02T11:44:54.000Z | 2020-07-02T11:45:38.000Z | EllucianEthosPythonClient/ChangeNotificationUtils.py | rmetcalf9/EllucainEthosPythonClient | 6913322b1e583f655f67399f2baa763833583c27 | [
"MIT"
] | 1 | 2021-01-13T21:35:11.000Z | 2021-01-13T21:35:11.000Z | from .ChangeNotificationMessage import ChangeNotificationMessage
import json
| 27.098901 | 103 | 0.744931 |
1112b034f98c7dc617526ab156487a42f2db45b6 | 1,821 | py | Python | schafkopf/players/models/evaluate_calssifier.py | Taschee/schafkopf | 96c5b9199d9260b4fdd74de8a6e54805b407407b | [
"MIT"
] | 10 | 2018-07-30T14:02:25.000Z | 2022-01-19T23:48:31.000Z | schafkopf/players/models/evaluate_calssifier.py | TimiH/schafkopf-1 | deafaa28d6cba866d097b4347dd84ce37b3b594d | [
"MIT"
] | 1 | 2018-08-12T07:25:51.000Z | 2018-08-27T21:04:04.000Z | schafkopf/players/models/evaluate_calssifier.py | Taschee/schafkopf | 96c5b9199d9260b4fdd74de8a6e54805b407407b | [
"MIT"
] | 2 | 2019-01-23T10:02:57.000Z | 2019-08-26T22:05:52.000Z | import keras
import numpy as np
from schafkopf.players.data.load_data import load_data_bidding
from schafkopf.players.data.encodings import decode_on_hot_hand
import matplotlib.pyplot as plt
x_test, y_test = load_data_bidding('../data/test_data.p')
x_train, y_train = load_data_bidding('../data/train_data.p')
modelpath = "bigger_classifier50.hdf5"
model = keras.models.load_model(modelpath)
predictions = model.predict_classes(x_test)
false_pred_list = []
pairs = [(i, j) for i in range(9) for j in range(9)]
false_counts = {pair: 0 for pair in pairs}
for pred, x, y in zip(predictions, x_test, y_test):
y_ind = np.where(y == 1)[0][0]
if pred != y_ind:
false_pred_list.append((pred, y_ind))
print('Predicted {} instead of {}'.format(pred, y_ind))
print('Hand : ', decode_on_hot_hand(x))
num_false = len(false_pred_list)
print('Number of false predictions : ', num_false)
for pair in false_pred_list:
false_counts[pair] += 1
fig, ax = plt.subplots(1, 1)
tick_labels = ['No game', 'Partner, bells', 'Partner, Leaves', 'Partner, Acorns',
'Wenz', 'Solo, Bells', 'Solo, Hearts', 'Solo, Leaves', 'Solo, Acorns']
for y_pred, y_true in pairs:
plt.scatter(y_pred, y_true, s=3*false_counts[(y_pred, y_true)], c='blue', alpha=0.6)
ax.set_xticks(np.arange(0, 9, 1))
ax.set_xticklabels(tick_labels, rotation='vertical', fontsize=11)
ax.set_yticks(np.arange(0, 9, 1))
ax.set_yticklabels(tick_labels, rotation='horizontal', fontsize=11)
ax.set_xlabel('Bidding network', fontsize=13)
ax.set_ylabel('Human player', fontsize=13)
ax.axis('equal')
plt.tight_layout()
plt.show()
test_scores = model.evaluate(x_test, y_test)
val_scores = model.evaluate(x_train, y_train)
print('Total Test accuracy : ', test_scores[1])
print('Total Train accuracy : ', val_scores[1])
| 29.852459 | 88 | 0.713344 |
1112cf8fd2ea3b082bd270c70d54466062312420 | 8,338 | py | Python | personnages3d/filtre.py | mxbossard/personnages3d | 87c2ab8dc9b502c0074f1dec04b832803dee1462 | [
"Apache-2.0"
] | null | null | null | personnages3d/filtre.py | mxbossard/personnages3d | 87c2ab8dc9b502c0074f1dec04b832803dee1462 | [
"Apache-2.0"
] | null | null | null | personnages3d/filtre.py | mxbossard/personnages3d | 87c2ab8dc9b502c0074f1dec04b832803dee1462 | [
"Apache-2.0"
] | 1 | 2021-10-01T02:03:42.000Z | 2021-10-01T02:03:42.000Z |
"""Ce script est un exemple de matplotlib"""
import numpy as np
def moving_average(x, n, type='simple'):
"""
compute an n period moving average.
type is 'simple' | 'exponential'
"""
x = np.asarray(x)
if type == 'simple':
weights = np.ones(n)
else:
weights = np.exp(np.linspace(-1., 0., n))
weights /= weights.sum()
a = np.convolve(x, weights, mode='full')[:len(x)]
a[:n] = a[n]
return a[-1]
if __name__ == '__main__':
data = [[1.0], [1.12], [1.17], [1.18], [1.26], [1.33], [1.37], [1.4], [1.46],
[1.49], [1.51], [1.52], [1.53], [1.51], [1.52], [1.52], [1.52], [1.51],
[1.52], [1.51], [1.51], [1.51], [1.51], [1.52], [1.52], [1.54], [1.56],
[1.59], [1.64], [1.69], [1.71], [1.73], [1.78], [1.83], [1.88], [1.94],
[1.96], [2.01], [2.04], [2.1], [2.13], [2.16], [2.2], [2.27], [2.3],
[2.37], [2.41], [2.46], [2.54], [2.58], [2.64], [2.67], [2.72], [2.76],
[2.84], [2.89], [2.93], [2.97], [3.03], [3.08], [3.1], [3.15], [3.18],
[3.24], [3.31], [3.33], [3.37], [3.42], [3.42], [3.44], [3.46], [3.51],
[3.51], [3.55], [3.59], [3.6], [3.64], [3.63], [3.66], [3.65], [3.66],
[3.69], [3.69], [3.71], [3.77], [3.76], [3.79], [3.84], [3.86], [3.94],
[4.0], [4.08], [4.05], [4.11], [4.16], [4.24], [4.27], [4.35], [4.35],
[4.42], [4.43], [4.47], [4.54], [4.57], [4.62], [4.64], [4.68], [4.74],
[4.79], [4.84], [4.93], [4.95], [5.08], [5.15], [5.21], [5.25], [5.3],
[5.35], [5.39], [5.46], [5.55], [5.57], [5.6], [5.7], [5.73], [5.77],
[5.95], [5.93], [6.06], [6.1], [6.08], [6.13], [6.14], [6.17], [6.25],
[6.36], [6.44], [6.61], [6.58], [6.59], [6.71], [6.75], [6.78], [6.8],
[6.85], [6.84], [6.95], [6.9], [7.03], [7.03], [7.12], [7.17], [7.13],
[7.11], [7.22], [7.25], [7.29], [7.4], [7.42], [7.44], [7.48], [7.46],
[7.49], [7.39], [7.56], [7.64], [7.6], [7.64], [7.65], [7.63], [7.66],
[7.6], [7.66], [7.62], [7.54], [7.44], [7.43], [7.38], [7.39], [7.22],
[7.19], [7.04], [6.97], [6.92], [6.83], [6.83], [6.64], [6.58], [6.52],
[6.44], [6.24], [6.24], [6.08], [5.99], [5.93], [5.89], [5.74], [5.65],
[5.6], [5.51], [5.46], [5.39], [5.26], [5.2], [5.08], [4.94], [4.81],
[4.75], [4.71], [4.66], [4.56], [4.44], [4.39], [4.33], [4.18], [4.15],
[4.04], [3.96], [3.84], [3.75], [3.65], [3.58], [3.52], [3.46], [3.34],
[3.29], [3.18], [3.11], [3.01], [2.97], [2.87], [2.79], [2.69], [2.59],
[2.52], [2.44], [2.39], [2.29], [2.22], [2.15], [2.08], [2.03], [1.92],
[1.82], [1.74], [1.68], [1.62], [1.55], [1.46], [1.42], [1.33], [1.27],
[1.19], [1.14], [1.08], [1.03], [1.0], [0.93], [0.9], [0.85], [0.81],
[0.78], [0.77], [0.73], [0.72], [0.69], [0.68], [0.66], [0.66], [0.65],
[0.64], [0.64], [0.63], [0.63], [0.62], [0.61], [0.59], [0.59], [0.57],
[0.56], [0.57], [0.56], [0.56], [0.55], [0.61], [0.61], [0.61], [0.61],
[0.59], [0.61], [0.61], [0.61], [0.59], [0.59], [0.57], [0.55], [0.56],
[0.56], [0.57], [0.57], [0.6], [0.56], [0.57], [0.57], [0.57], [0.56],
[0.56], [0.57], [0.58], [0.57], [0.56], [0.55], [0.58], [0.57], [0.75],
[0.74], [0.56], [0.56], [0.61], [0.55], [0.68], [0.55], [0.93], [0.71],
[0.7], [0.74], [0.7], [0.69], [0.69], [0.52], [0.69], [0.52], [0.67],
[0.52], [0.52], [0.52], [0.72], [0.65], [0.7], [0.71], [0.82], [0.93],
[0.55], [0.74], [0.72], [0.75], [0.57], [0.58], [0.63], [0.63], [0.65],
[0.69], [0.72], [0.79], [0.82], [0.83], [0.89], [0.94], [0.96], [1.03],
[1.07], [1.11], [1.13], [1.17], [1.21], [1.23], [1.29], [1.32], [1.36],
[1.41], [1.46], [1.5], [1.55], [1.58], [1.62], [1.65], [1.72], [1.74],
[1.78], [1.82], [1.88], [1.91], [1.96], [2.01], [2.06], [2.11], [2.17],
[2.24], [2.27], [2.31], [2.35], [2.4], [2.45], [2.48], [2.56], [2.59],
[2.65], [2.68], [2.74], [2.78], [2.83], [2.86], [2.92], [2.98], [3.01],
[3.1], [3.11], [3.18], [3.24], [3.26], [3.31], [3.42], [3.51], [3.56],
[3.61], [3.68], [3.67], [3.78], [3.82], [3.83], [3.89], [3.93], [4.03],
[4.06], [4.11], [4.19], [4.25], [4.27], [4.36], [4.42], [4.48], [4.53],
[4.6], [4.62], [4.66], [4.76], [4.81], [4.91], [5.04], [4.96], [5.07],
[5.23], [5.26], [5.33], [5.34], [5.43], [5.43], [5.53], [5.58], [5.61],
[5.68], [5.72], [5.77], [5.9], [5.95], [6.04], [6.17], [6.15], [6.27],
[6.21], [6.3], [6.36], [6.39], [6.45], [6.54], [6.7], [6.84], [6.78],
[6.95], [6.83], [6.91], [6.89], [7.04], [7.22], [7.39], [7.42], [7.38],
[7.36], [7.38], [7.45], [7.49], [7.51], [7.38], [7.44], [7.43], [7.43],
[7.37], [7.4], [7.36], [7.43], [7.29], [7.26], [7.12], [7.08], [6.96],
[6.84], [6.91], [6.78], [6.77], [6.72], [6.63], [6.62], [6.55], [6.51],
[6.39], [6.26], [6.07], [6.02], [5.87], [5.85], [5.79], [5.74], [5.72],
[5.63], [5.58], [5.51], [5.47], [5.38], [5.3], [5.25], [5.14], [5.08],
[5.06], [4.95], [4.98], [4.96], [4.84], [4.83], [4.78], [4.75], [4.69],
[4.65], [4.54], [4.47], [4.4], [4.35], [4.5], [4.21], [4.14], [4.1],
[4.01], [3.95], [3.86], [3.74], [3.69], [3.63], [3.58], [3.5], [3.46],
[3.4], [3.34], [3.27], [3.21], [3.15], [3.02], [2.99], [2.94], [2.85],
[2.78], [2.7], [2.65], [2.61], [2.54], [2.5], [2.45], [2.4], [2.31],
[2.24], [2.19], [2.16], [2.09], [2.01], [1.95], [1.91], [1.89], [1.83],
[1.77], [1.73], [1.7], [1.64], [1.58], [1.52], [1.48], [1.43], [1.4],
[1.36], [1.34], [1.31], [1.27], [1.27], [1.23], [1.22], [1.2], [1.2],
[1.29], [1.27], [1.26], [1.3], [1.35], [1.4], [1.42], [1.45], [1.49],
[1.54], [1.64], [1.69], [1.78], [1.82], [1.88], [1.93], [1.96], [1.98],
[2.04], [2.1], [2.13], [2.19], [2.26], [2.3], [2.35], [2.42], [2.47],
[2.52], [2.62], [2.68], [2.71], [2.76], [2.79], [2.8], [2.85], [2.89],
[2.94], [3.03], [3.1], [3.17], [3.24], [3.25], [3.3], [3.33], [3.37],
[3.42], [3.45], [3.5], [3.54], [3.59], [3.66], [3.69], [3.78], [3.84],
[3.87], [3.91], [4.01], [4.02], [4.05], [4.1], [4.18], [4.24], [4.32],
[4.41], [4.51], [4.54], [4.67], [4.69], [4.7], [4.73], [4.79], [4.87],
[4.94], [5.07], [5.14], [5.17], [5.22], [5.32], [5.42], [5.49], [5.52],
[5.59], [5.64], [5.68], [5.79], [5.91], [5.83], [5.92], [5.99], [6.05],
[6.09], [6.16], [6.23], [6.43], [6.44], [6.51], [6.52], [6.74], [6.65],
[6.66], [6.62], [6.77], [6.67], [6.84], [6.88], [6.97], [6.87], [6.88],
[7.01], [7.16], [7.16], [7.31], [7.44], [7.4], [7.47], [7.51], [7.55],
[7.49], [7.54], [7.61], [7.6], [7.57], [7.5], [7.71], [7.62], [7.53],
[7.56], [7.53], [7.54], [7.48], [7.49], [7.41], [7.32], [7.22], [7.12],
[7.1], [7.02], [6.91], [6.9], [6.93], [6.85], [6.8], [6.75], [6.6],
[6.62], [6.48], [6.35], [6.27], [6.2], [6.09], [6.14], [5.95], [5.89],
[5.67], [5.62], [5.52], [5.47], [5.39], [5.33], [5.18], [5.11], [5.05],
[4.92], [4.89], [4.75], [4.71], [4.58], [4.49], [4.39], [4.29], [4.22],
[4.13], [4.07], [4.0], [3.88], [3.8], [3.75], [3.63], [3.59], [3.52],
[3.44], [3.34], [3.25], [3.15], [3.06], [3.0], [2.94], [2.83], [2.75],
[2.65], [2.56], [2.51], [2.44], [2.34], [2.27], [2.21], [2.15], [2.09],
[2.01], [1.95], [1.89], [1.83], [1.76], [1.68], [1.64], [1.55], [1.46],
[1.41], [1.34], [1.26], [1.24], [1.15], [1.11], [1.06], [0.99], [0.95],
[0.9], [0.83], [0.79], [0.75], [0.71], [0.67], [0.6], [0.56], [0.55],
[0.56], [0.61], [0.53], [2.49], [1000]]
print(len(data)) # 824
for i in range(750):
d = []
for j in range(10):
d.append(data[i + j])
d = np.ravel(d)
m = moving_average(d, 8, type='simple')
print(m)
| 63.166667 | 83 | 0.333173 |
11149a0fbf72edf9f65a573647189aedcbd0a01f | 309 | py | Python | tests/test_utils.py | wxy6655/pymycobot | 504716257af7b36c9750929ad3b0b7fe96582e14 | [
"MIT"
] | 37 | 2021-01-16T13:57:06.000Z | 2022-03-27T06:16:37.000Z | tests/test_utils.py | wxy6655/pymycobot | 504716257af7b36c9750929ad3b0b7fe96582e14 | [
"MIT"
] | 27 | 2021-01-18T08:09:08.000Z | 2022-03-08T01:25:33.000Z | tests/test_utils.py | wxy6655/pymycobot | 504716257af7b36c9750929ad3b0b7fe96582e14 | [
"MIT"
] | 19 | 2021-01-19T01:52:42.000Z | 2022-02-10T06:44:06.000Z | import os
import sys
# Add relevant ranger module to PATH... there surely is a better way to do this...
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
from pymycobot import utils
port = utils.get_port_list()
print(port)
detect_result = utils.detect_port_of_basic()
print(detect_result)
| 22.071429 | 82 | 0.754045 |
1117ea4f825935be2c13190135ae12facb794dea | 3,347 | py | Python | backend_api/vozila_specials/migrations/0001_initial.py | KoliosterNikolayIliev/vozila_backend | a1c5036a77cb78d7968bbcc6e66e9015c982be8b | [
"MIT"
] | null | null | null | backend_api/vozila_specials/migrations/0001_initial.py | KoliosterNikolayIliev/vozila_backend | a1c5036a77cb78d7968bbcc6e66e9015c982be8b | [
"MIT"
] | 4 | 2021-09-08T09:25:21.000Z | 2022-02-20T12:14:04.000Z | backend_api/vozila_specials/migrations/0001_initial.py | KoliosterNikolayIliev/vozila_backend | a1c5036a77cb78d7968bbcc6e66e9015c982be8b | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-08-04 18:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 54.868852 | 266 | 0.612788 |
11185bdb3235153c501d87ad0d59a5aa426df74b | 385 | py | Python | inhouse/users/migrations/0011_alter_user_username.py | samuelmmorse/In-House | 9a6e103c0d53598e0b9028754fbc1fdc830cf9bd | [
"Apache-2.0"
] | 1 | 2022-02-03T18:15:09.000Z | 2022-02-03T18:15:09.000Z | inhouse/users/migrations/0011_alter_user_username.py | samuelmmorse/In-House | 9a6e103c0d53598e0b9028754fbc1fdc830cf9bd | [
"Apache-2.0"
] | 37 | 2022-02-02T21:30:16.000Z | 2022-03-08T16:18:48.000Z | inhouse/users/migrations/0011_alter_user_username.py | samuelmmorse/In-House | 9a6e103c0d53598e0b9028754fbc1fdc830cf9bd | [
"Apache-2.0"
] | null | null | null | # Generated by Django 4.0.1 on 2022-02-21 03:56
from django.db import migrations, models
| 20.263158 | 51 | 0.597403 |
111a060dfd860a5ffaba0f5cb789e1d77010aef4 | 1,742 | py | Python | PyFlow/Packages/DepthAI_Device/Nodes/NeuralNetwork/NeuralNetworkNode.py | AsherVo/depthai-gui | f6d5da7c00f09239d07ff77dd2e4433d40e43633 | [
"Apache-2.0"
] | 46 | 2021-01-05T13:41:54.000Z | 2022-03-29T09:47:20.000Z | PyFlow/Packages/DepthAI_Device/Nodes/NeuralNetwork/NeuralNetworkNode.py | AsherVo/depthai-gui | f6d5da7c00f09239d07ff77dd2e4433d40e43633 | [
"Apache-2.0"
] | 7 | 2021-01-29T22:26:05.000Z | 2022-02-24T10:16:35.000Z | PyFlow/Packages/DepthAI_Device/Nodes/NeuralNetwork/NeuralNetworkNode.py | AsherVo/depthai-gui | f6d5da7c00f09239d07ff77dd2e4433d40e43633 | [
"Apache-2.0"
] | 10 | 2021-03-11T15:00:40.000Z | 2022-03-24T02:28:39.000Z | from pathlib import Path
from common import DeviceNode, get_property_value
from PyFlow.Core.Common import *
from PyFlow.Core.NodeBase import NodePinsSuggestionsHelper
| 35.55102 | 85 | 0.695178 |
111b82803ba6219820d34735a4253abbca5f8527 | 1,785 | py | Python | tests/test_grizzly/steps/scenario/test_results.py | boffman/grizzly | eabe7b8f6cd7098914a1473928135c1e05758af7 | [
"MIT"
] | null | null | null | tests/test_grizzly/steps/scenario/test_results.py | boffman/grizzly | eabe7b8f6cd7098914a1473928135c1e05758af7 | [
"MIT"
] | 9 | 2022-01-05T08:53:41.000Z | 2022-03-31T07:26:05.000Z | tests/test_grizzly/steps/scenario/test_results.py | boffman/grizzly | eabe7b8f6cd7098914a1473928135c1e05758af7 | [
"MIT"
] | null | null | null | from typing import cast
from grizzly.context import GrizzlyContext
from grizzly.steps import * # pylint: disable=unused-wildcard-import # noqa: F403
from ....fixtures import BehaveFixture
| 36.428571 | 93 | 0.791036 |
111b8e50cc0607494dfd9e6aee63b1a1cd91a426 | 419 | py | Python | variation/tokenizers/overexpression.py | cancervariants/variant-normalization | e89a9f8366a659c82b2042aeb7effe339851bfb4 | [
"MIT"
] | 1 | 2022-01-19T18:17:49.000Z | 2022-01-19T18:17:49.000Z | variation/tokenizers/overexpression.py | cancervariants/variation-normalization | 9c8fbab1562591ae9445d82ddd15df29f1ea1f5a | [
"MIT"
] | 99 | 2021-06-07T12:50:34.000Z | 2022-03-23T13:38:29.000Z | variation/tokenizers/overexpression.py | cancervariants/variant-normalization | e89a9f8366a659c82b2042aeb7effe339851bfb4 | [
"MIT"
] | null | null | null | """Module for over expression tokenization."""
from .basic_regex_tokenizer import BasicRegexTokenizer
| 27.933333 | 54 | 0.680191 |
111c7c6e5f812c559a0501e1a32f5e52d75464e1 | 465 | py | Python | src/hypothesis.py | karlicoss/bleanser | fb0fb8b41769086723cb5b9854ebc93012bce279 | [
"MIT"
] | 13 | 2021-01-30T05:50:38.000Z | 2022-02-11T09:27:42.000Z | src/hypothesis.py | karlicoss/bleanser | fb0fb8b41769086723cb5b9854ebc93012bce279 | [
"MIT"
] | null | null | null | src/hypothesis.py | karlicoss/bleanser | fb0fb8b41769086723cb5b9854ebc93012bce279 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from pathlib import Path
from jq_normaliser import JqNormaliser, Filter
if __name__ == '__main__':
main()
| 21.136364 | 132 | 0.675269 |
111ccb15401d5907b8f70ab856b3fbce11ee3984 | 8,506 | py | Python | src/sniffmypacketsv2/transforms/common/layers/http.py | SneakersInc/sniffmypacketsv2 | 55d8ff70eedb4dd948351425c25a1e904ea6d50e | [
"Apache-2.0"
] | 11 | 2015-01-01T19:44:04.000Z | 2020-03-26T07:30:26.000Z | src/sniffmypacketsv2/transforms/common/layers/http.py | SneakersInc/sniffmypacketsv2 | 55d8ff70eedb4dd948351425c25a1e904ea6d50e | [
"Apache-2.0"
] | 8 | 2015-01-01T22:45:59.000Z | 2015-12-12T10:37:50.000Z | src/sniffmypacketsv2/transforms/common/layers/http.py | SneakersInc/sniffmypacketsv2 | 55d8ff70eedb4dd948351425c25a1e904ea6d50e | [
"Apache-2.0"
] | 3 | 2017-06-04T05:18:24.000Z | 2020-03-26T07:30:27.000Z | #! /usr/bin/env python
# -*- coding: UTF-8 -*-
# Author : Steeve Barbeau, Luca Invernizzi
# This program is published under a GPLv2 license
import re
from scapy.all import TCP, bind_layers, Packet, StrField
def _canonicalize_header(name):
''' Takes a header key (i.e., "Host" in "Host: www.google.com",
and returns a canonical representation of it '''
return name.strip().lower()
def _parse_headers(s):
''' Takes a HTTP packet, and returns a tuple containing:
- the first line (e.g., "GET ...")
- the headers in a dictionary
- the body '''
try:
headers, body = s.split("\r\n\r\n", 1)
except:
headers = s
body = ''
headers = headers.split("\r\n")
first_line, headers = headers[0].strip(), headers[1:]
headers_found = {}
for header_line in headers:
try:
key, value = header_line.split(':', 1)
except:
continue
headers_found[_canonicalize_header(key)] = header_line.strip()
return first_line, headers_found, body
def _dissect_headers(obj, s):
''' Takes a HTTP packet as the string s, and populates the scapy layer obj
(either HTTPResponse or HTTPRequest). Returns the first line of the
HTTP packet, and the body
'''
first_line, headers, body = _parse_headers(s)
for f in obj.fields_desc:
canonical_name = _canonicalize_header(f.name)
try:
header_line = headers[canonical_name]
except:
continue
key, value = header_line.split(':', 1)
obj.setfieldval(f.name, value.strip())
del headers[canonical_name]
if headers:
obj.setfieldval(
'Additional-Headers', '\r\n'.join(headers.values()) + '\r\n')
return first_line, body
def _self_build(obj, field_pos_list=None):
''' Takse an HTTPRequest or HTTPResponse object, and creates its internal
scapy representation as a string. That is, generates the HTTP
packet as a string '''
p = ""
for f in obj.fields_desc:
val = obj.getfieldval(f.name)
if not val:
continue
val += '\r\n'
if f.name in ['Method', 'Additional-Headers', 'Status-Line']:
p = f.addfield(obj, p, val)
else:
p = f.addfield(obj, p, "%s: %s" % (f.name, val))
return p
bind_layers(TCP, HTTP)
| 39.37963 | 78 | 0.531507 |
111d1cfa9500d15ba56748062cc1aaac7850fbbb | 1,800 | py | Python | tests/unit/test_flask_app/test_boot.py | andersoncontreira/flask-skeleton-python | 4a3087cf94f387830850dc438338251da86c3cfb | [
"MIT"
] | 1 | 2021-08-11T21:29:50.000Z | 2021-08-11T21:29:50.000Z | tests/unit/test_flask_app/test_boot.py | andersoncontreira/flask-skeleton-python | 4a3087cf94f387830850dc438338251da86c3cfb | [
"MIT"
] | null | null | null | tests/unit/test_flask_app/test_boot.py | andersoncontreira/flask-skeleton-python | 4a3087cf94f387830850dc438338251da86c3cfb | [
"MIT"
] | null | null | null | import os
import unittest
from flask_app.boot import load_dot_env, reset, is_loaded, load_env
from tests.unit.testutils import BaseUnitTestCase, get_function_name
from unittest_data_provider import data_provider
if __name__ == '__main__':
unittest.main()
| 34.615385 | 108 | 0.632778 |
111dff9ff8122d79f8e2380fe35f3b04555c5059 | 805 | py | Python | quadratic.py | Varanasi-Software-Junction/Python-repository-for-basics | 01128ccb91866cb1abb6d8abf035213f722f5750 | [
"MIT"
] | 2 | 2021-07-14T11:01:58.000Z | 2021-07-14T11:02:01.000Z | quadratic.py | Maurya232Abhishek/Python-repository-for-basics | 3dcec5c529a0847df07c9dcc1424675754ce6376 | [
"MIT"
] | 4 | 2021-04-09T10:14:06.000Z | 2021-04-13T10:25:58.000Z | quadratic.py | Maurya232Abhishek/Python-repository-for-basics | 3dcec5c529a0847df07c9dcc1424675754ce6376 | [
"MIT"
] | 2 | 2021-07-11T08:17:30.000Z | 2021-07-14T11:10:58.000Z | x=[0,1,2,3,4]
y=[1,1.8,1.3,2.5,6.3]
print(QuadraticRegression(x,y))
| 25.967742 | 129 | 0.468323 |
111e4289c2fc2ba12c2caeb52c1314824fc19de1 | 5,591 | py | Python | config/atcoder-tools/custom_code_generator.py | ay65535/dotfiles-sei40kr | 32a930b0b3f08b15038c28f14e11b5f4ccf367fd | [
"MIT"
] | null | null | null | config/atcoder-tools/custom_code_generator.py | ay65535/dotfiles-sei40kr | 32a930b0b3f08b15038c28f14e11b5f4ccf367fd | [
"MIT"
] | null | null | null | config/atcoder-tools/custom_code_generator.py | ay65535/dotfiles-sei40kr | 32a930b0b3f08b15038c28f14e11b5f4ccf367fd | [
"MIT"
] | null | null | null | from typing import Any, Dict, Optional
from atcodertools.codegen.code_style_config import CodeStyleConfig
from atcodertools.codegen.models.code_gen_args import CodeGenArgs
from atcodertools.codegen.template_engine import render
from atcodertools.fmtprediction.models.format import (Format, ParallelPattern,
Pattern, SingularPattern,
TwoDimensionalPattern)
from atcodertools.fmtprediction.models.type import Type
from atcodertools.fmtprediction.models.variable import Variable
def main(args: CodeGenArgs) -> str:
code_parameters = RustCodeGenerator(
args.format, args.config).generate_parameters()
return render(
args.template,
mod=args.constants.mod,
yes_str=args.constants.yes_str,
no_str=args.constants.no_str,
**code_parameters
)
| 34.94375 | 117 | 0.609909 |
111f8861f8a268462a9c22cdca35e2db764c3102 | 16,873 | py | Python | crypto_futures_py/binance_futures.py | bear2u/CryptoFuturesPy | 9cfbf5f3a32b35a8a7cd53c2a3ded55d7b3c78d0 | [
"MIT"
] | 7 | 2020-08-23T19:02:33.000Z | 2022-03-24T15:48:18.000Z | crypto_futures_py/binance_futures.py | bear2u/CryptoFuturesPy | 9cfbf5f3a32b35a8a7cd53c2a3ded55d7b3c78d0 | [
"MIT"
] | null | null | null | crypto_futures_py/binance_futures.py | bear2u/CryptoFuturesPy | 9cfbf5f3a32b35a8a7cd53c2a3ded55d7b3c78d0 | [
"MIT"
] | 1 | 2021-09-15T04:17:04.000Z | 2021-09-15T04:17:04.000Z | """
This module contains an implementation for Binance Futures (BinanceFuturesExchangeHandler)
"""
from __future__ import annotations
import pandas as pd
import typing
import json
import logging
import pandas as pd
from datetime import datetime
from dataclasses import dataclass
from . import futurespy as fp
from . import AbstractExchangeHandler
| 37.412417 | 122 | 0.551295 |
112006d175386d520393db8a5fbf90e3082f8290 | 39 | py | Python | orbit/__init__.py | CrossNox/orbit | 1c803596b3477a91c122093c1f6e34333b6db42e | [
"Apache-2.0"
] | null | null | null | orbit/__init__.py | CrossNox/orbit | 1c803596b3477a91c122093c1f6e34333b6db42e | [
"Apache-2.0"
] | null | null | null | orbit/__init__.py | CrossNox/orbit | 1c803596b3477a91c122093c1f6e34333b6db42e | [
"Apache-2.0"
] | null | null | null | name = 'orbit'
__version__ = '1.0.10'
| 9.75 | 22 | 0.615385 |
11207abb79cc5820b7575449526dbb965b477f82 | 1,971 | py | Python | repsim/kernels/kernel_base.py | wrongu/representational-similarity | adca614053973def176044437e6a064c04943ce0 | [
"MIT"
] | 2 | 2022-03-23T21:24:21.000Z | 2022-03-24T04:18:30.000Z | repsim/kernels/kernel_base.py | wrongu/representational-similarity | adca614053973def176044437e6a064c04943ce0 | [
"MIT"
] | 3 | 2022-03-23T19:35:58.000Z | 2022-03-24T04:20:29.000Z | repsim/kernels/kernel_base.py | wrongu/representational-similarity | adca614053973def176044437e6a064c04943ce0 | [
"MIT"
] | 1 | 2022-03-23T19:16:19.000Z | 2022-03-23T19:16:19.000Z | import torch
from typing import Union, Iterable
def center(k: torch.Tensor) -> torch.Tensor:
"""Center features of a kernel by pre- and post-multiplying by the centering matrix H.
In other words, if k_ij is dot(x_i, x_j), the result will be dot(x_i - mu_x, x_j - mu_x).
:param k: a n by n Gram matrix of inner products between xs
:return: a n by n centered matrix
"""
n = k.size()[0]
if k.size() != (n, n):
raise ValueError(
f"Expected k to be nxn square matrix, but it has size {k.size()}"
)
H = (
torch.eye(n, device=k.device, dtype=k.dtype)
- torch.ones((n, n), device=k.device, dtype=k.dtype) / n
)
return H @ k @ H
| 31.790323 | 93 | 0.598681 |
11207edc04bb4169510f36f55f71d608452b5ac2 | 6,253 | py | Python | add_sensors.py | gve-sw/gve_devnet_meraki_sensor_deployment | 7add073bf3e2728f811ea8f5da80c138e3067af7 | [
"RSA-MD"
] | null | null | null | add_sensors.py | gve-sw/gve_devnet_meraki_sensor_deployment | 7add073bf3e2728f811ea8f5da80c138e3067af7 | [
"RSA-MD"
] | null | null | null | add_sensors.py | gve-sw/gve_devnet_meraki_sensor_deployment | 7add073bf3e2728f811ea8f5da80c138e3067af7 | [
"RSA-MD"
] | null | null | null | #!/usr/bin/env python3
"""Copyright (c) 2020 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied."""
import csv
import sys
from meraki_functions import *
from env import *
sensors_file = open("sensors.csv", "r")
csv_reader = csv.DictReader(sensors_file)
networks = {} #dictionary will map network names to network ids
networks_to_organizations = {} #dictionary will map networks to their organizations
for row in csv_reader:
org_id = getOrgID(base_url, headers, row["organization"])
if org_id is None:
print("No organization exists with the name".format(row["organization"]))
sys.exit(1)
net_id = getNetworkID(base_url, headers, org_id, row["network"])
if net_id is None:
print("No network exists in the organization with ID {} with the name {}".format(org_id, row["network"]))
sys.exit(1)
networks[row["network"]] = net_id #the key row["network"] is the network name
networks_to_organizations[net_id] = org_id
serial = row["serial"]
status_code = claimDevicesToNetwork(base_url, headers, net_id, [serial])
if status_code != 200:
print("{} Error".format(status_code))
print("There was an error adding the device: {} to the network with ID {}.".format(serial, net_id))
sys.exit(1)
sensor_details = {
"name": row["name"],
"address": row["location"]
}
status_code = editDeviceDetails(base_url, headers, serial, sensor_details)
if status_code != 200:
print("{} Error".format(status_code))
print("There was an error editing the device {} with these details: {}".format(serial, sensor_details))
sys.exit(1)
print("Sensor {} was added to network {}".format(serial, row["network"]))
sensors_file.close()
sensor_profile_file = open("sensors_to_profiles.csv", "r")
csv_reader = csv.DictReader(sensor_profile_file)
sensors_to_profiles = {} #dictionary will map the sensors to the alert profiles they need
for row in csv_reader:
alert_profile = row["alert_profile"]
serial = row["sensor_serial"]
if alert_profile in sensors_to_profiles.keys(): #we've already added this alert profile to the dictionary, so we just add another sensor the list
sensors_to_profiles[alert_profile].append(serial)
else: #we haven't yet added this alert profile to the dictionary, so we create a new alert profile key and assign it a value of a new list with this serial number as the first value
sensors_to_profiles[alert_profile] = [serial]
sensor_profile_file.close()
alert_recipients_file = open("alert_recipients.csv", "r")
csv_reader = csv.DictReader(alert_recipients_file)
profiles_to_recipients = {} #dictionary will map the alert profiles to the alert recipients for that profile - this will be a nested dictionary
'''
Data structure example
profiles_to_recipients = {
"network name": {
"alert profile": ["email", "email", "email"],
"alert profile": ["email", "email", "email"]
}
}
'''
for row in csv_reader:
profile_name = row["alert_profile"]
net_name = row["network"]
recipient = row["email"] #the recipient is defined by an email address
if net_name in profiles_to_recipients.keys(): #we've already added this network to the dictionary, so we need to check if the alert profile has also already been seen
if profile_name in profiles_to_recipients[net_name].keys(): #we've already added this alert profile to the dictionary, so we just need to add the recipient to the list
profiles_to_recipients[net_name][profile_name].append(recipient)
else: #we haven't yet seen this alert profile, so we need to add a new key to the dictionary that is the profile name and assign it a value of a new list with this recipient as the first value
profiles_to_recipients[net_name][profile_name] = [recipient]
else: #we haven't yet added this network to the dictionary, so we need to add a new key to the dictionary that is the network name and assign it a value of a dictionary with a key of the alert profile name with the value of a new list with this recipient as the first value
profiles_to_recipients[net_name] = {
profile_name: [recipient]
}
alert_recipients_file.close()
alert_profile_file = open("alert_profiles.csv", "r")
csv_reader = csv.DictReader(alert_profile_file)
for row in csv_reader:
temp_threshold = row["temp_threshold"]
temp_duration = row["temp_duration"]
profile_name = row["name"]
net_name = row["network"]
net_id = networks[net_name]
org_id = networks_to_organizations[net_id]
serials = sensors_to_profiles[profile_name]
alert_profile_details = {
"name": profile_name,
"scheduleId": "",
"conditions": [
{
"type": "temperature",
"unit": "fahrenheit",
"direction": "+",
"threshold": temp_threshold,
"duration": temp_duration
}
],
"recipients": {
"emails": profiles_to_recipients[net_name][profile_name],
"snmp": False,
"allAdmins": False,
"smsNumbers": [],
"httpServerIds": [],
"pushUserIds": []
},
"serials": serials
}
status_code = createAlertProfile(base_url, headers, net_id, alert_profile_details)
if status_code != 201:
print("Error {}".format(status_code))
print("There was an issue creating the alert profile: {} to the network with ID {}".format(alert_profile_details, net_id))
print("Alert profile {} was added to the network {}".format(profile_name, net_name))
alert_profile_file.close()
| 40.869281 | 277 | 0.689909 |
1120e9e47c16ba9929729ce5750b83aea2535437 | 663 | py | Python | BasicOperations/01_01_PyQt4/tableDoubleClicked.py | UpSea/midProjects | ed6086e74f68b1b89f725abe0b270e67cf8993a8 | [
"MIT"
] | 1 | 2018-07-02T13:54:49.000Z | 2018-07-02T13:54:49.000Z | BasicOperations/01_01_PyQt4/tableDoubleClicked.py | UpSea/midProjects | ed6086e74f68b1b89f725abe0b270e67cf8993a8 | [
"MIT"
] | null | null | null | BasicOperations/01_01_PyQt4/tableDoubleClicked.py | UpSea/midProjects | ed6086e74f68b1b89f725abe0b270e67cf8993a8 | [
"MIT"
] | 3 | 2016-05-28T15:13:02.000Z | 2021-04-10T06:04:25.000Z | from PyQt4.QtGui import *
from PyQt4.QtCore import *
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
w = QWidget()
w.resize(1024, 768)
v = MyTabView(w)
w.show()
app.exec_() | 25.5 | 70 | 0.618401 |
11212e7dd8a44a25b511bd3b53859f4b2622a99f | 2,868 | py | Python | pyboss/cogs/commands.py | ajayat/pyboss | 42615d8b3af1601b3d7572e669d692cf3c537703 | [
"MIT"
] | 2 | 2021-07-27T21:46:40.000Z | 2021-10-31T20:31:05.000Z | pyboss/cogs/commands.py | ajayat/pyboss | 42615d8b3af1601b3d7572e669d692cf3c537703 | [
"MIT"
] | 1 | 2021-06-25T12:17:45.000Z | 2021-06-25T12:18:54.000Z | pyboss/cogs/commands.py | ajayat/pyboss | 42615d8b3af1601b3d7572e669d692cf3c537703 | [
"MIT"
] | 2 | 2021-06-24T18:28:25.000Z | 2021-06-25T12:20:47.000Z | import logging
from itertools import cycle
import discord
from discord.ext import commands, tasks
from pyboss.controllers.guild import GuildController
from .utils import youtube
from .utils.checkers import is_guild_owner
logger = logging.getLogger(__name__)
| 30.510638 | 78 | 0.611576 |
1121313e46cf1f2e2cb2bc3065f395b37613a84b | 19,860 | py | Python | legacy/glucose-insulin.py | IllumiNate411/SBINNs | 37e68ce97a997090d17a3d487de77aa9059bfc91 | [
"Apache-2.0"
] | 23 | 2020-07-15T07:41:15.000Z | 2022-02-10T23:09:03.000Z | legacy/glucose-insulin.py | IllumiNate411/SBINNs | 37e68ce97a997090d17a3d487de77aa9059bfc91 | [
"Apache-2.0"
] | 2 | 2021-06-20T20:41:52.000Z | 2022-02-09T19:26:10.000Z | legacy/glucose-insulin.py | IllumiNate411/SBINNs | 37e68ce97a997090d17a3d487de77aa9059bfc91 | [
"Apache-2.0"
] | 21 | 2020-07-15T07:41:17.000Z | 2022-03-03T12:01:37.000Z | import tensorflow as tf
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from plotting import newfig, savefig
import matplotlib.gridspec as gridspec
import seaborn as sns
import time
from utilities import neural_net, fwd_gradients, heaviside, \
tf_session, mean_squared_error, relative_error
if __name__ == "__main__":
layers = [1] + 6*[6*30] + [6]
meal_t = [300., 650., 1100., 2000.]
meal_q = [60e3, 40e3, 50e3, 100e3]
# function that returns dx/dt
# function that returns dx/dt
# time points
t_star = np.arange(0, 3000, 1.0)
N = t_star.shape[0]
N_eqns = N
N_data = N // 5
k = 1./120.
Vp = 3.0
Vi = 11.0
Vg2 = 10.0*10.0
S0 = 12.0*Vp
S1 = 4.0*Vi
S2 = 110.0*Vg2
S3 = 0.0
S4 = 0.0
S5 = 0.0
# initial condition
x0 = np.array([S0, S1, S2, S3, S4, S5]).flatten()
# solve ODE
S_star = odeint(f, x0, t_star)
S_star /= Vg2 # scaling by Vg^2
IG_star = np.array([intake(t, k) for t in t_star]) / Vg2
t_train = t_star[:,None]
S_train = S_star
# two-point data must be given for all the species
# 1st: initial at t=0; 2nd: any point between (0,T]
N0 = 0
N1 = N - 1
idx_data = np.concatenate([np.array([N0]),
np.random.choice(np.arange(1, N-1), size=N_data, replace=False),
np.array([N-1]),
np.array([N1])])
idx_eqns = np.concatenate([np.array([N0]),
np.random.choice(np.arange(1, N-1), size=N_eqns-2, replace=False),
np.array([N-1])])
meal_tq = [np.array([N_eqns*[x] for x in meal_t]).T,
np.array([N_eqns*[x/Vg2] for x in meal_q]).T]
model = HiddenPathways(t_train[idx_data],
S_train[idx_data,:],
t_train[idx_eqns],
layers,
meal_tq)
model.train(num_epochs=25000, batch_size=N_eqns, learning_rate=1e-3)
model.train(num_epochs=25000, batch_size=N_eqns, learning_rate=1e-4)
model.train(num_epochs=10000, batch_size=N_eqns, learning_rate=1e-5)
# NN prediction
meal_tq = [np.array([N*[x] for x in meal_t]).T,
np.array([N*[x/Vg2] for x in meal_q]).T]
S_pred = model.predict(t_star[:,None], meal_tq)
plotting(t_star, S_star, S_pred, idx_data, Vg2)
print('k = %.6f' % ( model.sess.run(model.k) ) )
print('Rm = %.6f' % ( model.sess.run(model.Rm)*Vg2 ) )
print('Vg = %.6f' % ( model.sess.run(model.Vg) ) )
print('C1 = %.6f' % ( model.sess.run(model.C1)*Vg2 ) )
print('a1 = %.6f' % ( model.sess.run(model.a1) ) )
print('Ub = %.6f' % ( model.sess.run(model.Ub)*Vg2 ) )
print('C2 = %.6f' % ( model.sess.run(model.C2)*Vg2 ) )
print('U0 = %.6f' % ( model.sess.run(model.U0)*Vg2 ) )
print('Um = %.6f' % ( model.sess.run(model.Um)*Vg2 ) )
print('C3 = %.6f' % ( model.sess.run(model.C3)*Vg2 ) )
print('C4 = %.6f' % ( model.sess.run(model.C4)*Vg2 ) )
print('Vi = %.6f' % ( model.sess.run(model.Vi) ) )
print('E = %.6f' % ( model.sess.run(model.E) ) )
print('ti = %.6f' % ( model.sess.run(model.ti) ) )
print('beta = %.6f' % ( model.sess.run(model.beta) ) )
print('Rg = %.6f' % ( model.sess.run(model.Rg)*Vg2 ) )
print('alpha = %.6f' % ( model.sess.run(model.alpha) ) )
print('Vp = %.6f' % ( model.sess.run(model.Vp) ) )
print('C5 = %.6f' % ( model.sess.run(model.C5)*Vg2 ) )
print('tp = %.6f' % ( model.sess.run(model.tp) ) )
print('td = %.6f' % ( model.sess.run(model.td) ) )
# Prediction based on inferred parameters
# k = 0.007751
# Vp = 0.707807
# Vi = 2.689281
# S0 = 12.0*Vp
# S1 = 4.0*Vi
# S2 = 110.0*Vg2
# S3 = 0.0
# S4 = 0.0
# S5 = 0.0
# x0 = np.array([S0, S1, S2, S3, S4, S5]).flatten()
# S_pred = odeint(f_pred, x0, t_star)
# S_pred /= Vg2
# IG_pred = np.array([intake(t, k) for t in t_star]) / Vg2
# S_pred = np.append(S_pred[:,:], IG_pred[:,None], axis=1)
# plotting(t_star, S_star, S_pred, idx_data, Vg2, forecast=True)
# savefig('./figures/Glycolytic', crop = False) | 42.801724 | 128 | 0.533938 |
112171429cd4a9b17bf8545cbd8d125eb2b1fe8d | 775 | py | Python | papounet_diet/food_items/migrations/0002_auto_20201112_0653.py | Fabrice-64/OC_Project_8 | 4776a2fa6edffa5c965340b384083c2618f091d9 | [
"BSD-2-Clause"
] | null | null | null | papounet_diet/food_items/migrations/0002_auto_20201112_0653.py | Fabrice-64/OC_Project_8 | 4776a2fa6edffa5c965340b384083c2618f091d9 | [
"BSD-2-Clause"
] | null | null | null | papounet_diet/food_items/migrations/0002_auto_20201112_0653.py | Fabrice-64/OC_Project_8 | 4776a2fa6edffa5c965340b384083c2618f091d9 | [
"BSD-2-Clause"
] | null | null | null | # Generated by Django 3.1.2 on 2020-11-12 06:53
from django.db import migrations, models
| 24.21875 | 67 | 0.550968 |
11218cc7070af833cd314550b9c5614c1e90bc7d | 511 | py | Python | example.py | nils-wisiol/ntru | 9e8848c8883ffd61724604a7ad6dc39498331e2d | [
"MIT"
] | 1 | 2020-03-12T15:09:55.000Z | 2020-03-12T15:09:55.000Z | example.py | nils-wisiol/ntru | 9e8848c8883ffd61724604a7ad6dc39498331e2d | [
"MIT"
] | 1 | 2020-02-06T19:25:46.000Z | 2020-02-06T19:25:46.000Z | example.py | nils-wisiol/ntru | 9e8848c8883ffd61724604a7ad6dc39498331e2d | [
"MIT"
] | null | null | null | from ciphers import StreamlinedNTRUPrime
# choose your parameters
p, q, w = 761, 4591, 286
print('Streamlined NTRU Prime Example for', f'p={p}, q={q}, w={w}')
print('-' * 50)
cipher = StreamlinedNTRUPrime(p, q, w, seed=1337)
print('Generating key pair ... ')
pk, sk = cipher.generate_keys()
print('En/decrypting...')
message = cipher.random_small_poly(w, None, cipher.modulus_r)
assert message == cipher.decrypt(cipher.encrypt(message, pk), sk), 'En/decryption failed.'
print('Successfully en/decrypted.')
| 28.388889 | 90 | 0.712329 |
1125ca0aabe90d99b7a8c515657de5d0e0420e5b | 3,721 | py | Python | netease_cloud_music_cache_export.py | simonchen/netease_cloud_music_cache_export | 2e5538c13f4bf1af0a1e138010b111462370fa69 | [
"MIT"
] | null | null | null | netease_cloud_music_cache_export.py | simonchen/netease_cloud_music_cache_export | 2e5538c13f4bf1af0a1e138010b111462370fa69 | [
"MIT"
] | null | null | null | netease_cloud_music_cache_export.py | simonchen/netease_cloud_music_cache_export | 2e5538c13f4bf1af0a1e138010b111462370fa69 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os, sys, shutil, re
# ID3V2, ID3V3
if __name__ == '__main__':
if len(sys.argv) < 3:
print 'Usage: %s [cache folder] [output_folder]' %sys.argv[0]
sys.exit(0)
input_dir = sys.argv[1]
output_dir = sys.argv[2]
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
files = os.listdir(input_dir)
for file in files:
src_path = os.path.join(input_dir, file)
if os.path.isdir(src_path): continue # don't process directory
mp3 = MP3(src_path)
if mp3.is_valid():
title = mp3.title()
title_repl = re.sub('[\\\/\:\*\?"<>|]', '_', title)
dest_path = os.path.join(output_dir, title_repl+'.mp3')
_print(src_path, '>>', dest_path)
shutil.copy(src_path, dest_path)
| 32.356522 | 145 | 0.508465 |
112717e60382646da056e160e879beb3deb10306 | 4,123 | py | Python | word_ladder/utilities.py | RacingTadpole/boggle | 2185da9e204e2d1ed686ccaac76d0d73396408fb | [
"MIT"
] | null | null | null | word_ladder/utilities.py | RacingTadpole/boggle | 2185da9e204e2d1ed686ccaac76d0d73396408fb | [
"MIT"
] | null | null | null | word_ladder/utilities.py | RacingTadpole/boggle | 2185da9e204e2d1ed686ccaac76d0d73396408fb | [
"MIT"
] | null | null | null | from typing import Dict, Iterable, Iterator, List, Sequence, Optional, Tuple
from word_ladder.types import WordDict
from word_ladder.rung import Rung
def get_word_with_letter_missing(word: str, position: int) -> str:
"""
>>> get_word_with_letter_missing('dog', 0)
'?og'
>>> get_word_with_letter_missing('dog', 1)
'd?g'
>>> get_word_with_letter_missing('dog', 2)
'do?'
"""
if position == 0:
return f'?{word[1:]}'
if position == len(word) - 1:
return f'{word[:-1]}?'
return f'{word[:position]}?{word[position + 1:]}'
def get_neighbors(word: str, words: WordDict) -> Sequence[str]:
"""
>>> words = {'?og': ['dog', 'log', 'fog'], 'd?g': ['dog', 'dig'], 'do?': ['dog'], 'l?g': ['log'], 'lo?': ['log']}
>>> sorted(get_neighbors('dig', words))
['dig', 'dog']
>>> sorted(get_neighbors('fog', words))
['dog', 'fog', 'log']
"""
return frozenset(
neighbor
for position in range(len(word))
for neighbor in words.get(get_word_with_letter_missing(word, position), [])
)
def get_all_previous_words(rung: Rung) -> Tuple[str]:
"""
>>> rung_0 = Rung(None, ['dig'], {})
>>> path = {'dog': ('log', 'fog', 'dig', 'dug', 'don', 'dob'), 'fig': ('dig', 'fog', 'fin')}
>>> words = ['dob', 'don', 'dug', 'fin', 'fog', 'log']
>>> rung_1 = Rung(rung_0, words, path)
>>> sorted(get_all_previous_words(rung_1))
['dig', 'dob', 'don', 'dug', 'fin', 'fog', 'log']
"""
return tuple(rung.words) + (get_all_previous_words(rung.previous) if rung.previous else ())
def get_next_rung(previous_rung: Rung, words: WordDict) -> Rung:
"""
>>> from word_ladder.compile_words import add_to_words_dict
>>> words = {}
>>> for w in ['dog', 'log', 'fog', 'dig', 'dug', 'dim', 'don', 'dob', 'lug', 'fin']:
... words = add_to_words_dict(words, w)
>>> rung = Rung(None, ['dog', 'fig'], {})
>>> next_rung = get_next_rung(rung, words)
>>> {k: sorted(v) for k,v in next_rung.path.items()}
{'dog': ['dig', 'dob', 'don', 'dug', 'fog', 'log'], 'fig': ['dig', 'fin', 'fog']}
>>> sorted(next_rung.words)
['dig', 'dob', 'don', 'dug', 'fin', 'fog', 'log']
"""
previous_words = get_all_previous_words(previous_rung)
path = {
source_word: tuple(w for w in get_neighbors(source_word, words) if w not in previous_words)
for source_word in previous_rung.words
}
word_soup = frozenset(w for these_words in path.values() for w in these_words)
return Rung(previous_rung, word_soup, path)
def get_ladders(rung: Rung, word: str) -> Sequence[List[str]]:
"""
>>> rung_0 = Rung(None, ['dig'], {})
>>> rung_1 = Rung(rung_0, ['dog', 'log', 'fig', 'din'], {'dig': ('dog', 'log', 'fig', 'din')})
>>> words = ['dig', 'dob', 'don', 'dug', 'fin', 'fog', 'log', 'din']
>>> path = {'dog': ('log', 'fog', 'dig', 'dug', 'don', 'dob'), 'fig': ('dig', 'fog', 'fin'), 'din': ('dig', 'fin')}
>>> rung_2 = Rung(rung_1, words, path)
>>> get_ladders(rung_2, 'fin')
[['dig', 'fig', 'fin'], ['dig', 'din', 'fin']]
"""
if not rung.previous:
return [[word]]
return [
ladder + [word]
for previous_word in keys_for_value(rung.path, word)
for ladder in get_ladders(rung.previous, previous_word)
]
| 37.825688 | 119 | 0.551298 |
1127e97d3747a0a490202eaf8f996051a3a32f10 | 194 | py | Python | nawrapper/__init__.py | xzackli/nawrapper | f67c02b48d04ed35ab05a378b9884fefd9d07d7f | [
"MIT"
] | null | null | null | nawrapper/__init__.py | xzackli/nawrapper | f67c02b48d04ed35ab05a378b9884fefd9d07d7f | [
"MIT"
] | 9 | 2019-08-27T11:52:37.000Z | 2021-09-21T05:13:25.000Z | nawrapper/__init__.py | xzackli/nawrapper | f67c02b48d04ed35ab05a378b9884fefd9d07d7f | [
"MIT"
] | 1 | 2020-07-07T14:31:43.000Z | 2020-07-07T14:31:43.000Z | """Package init file.
We want the user to get everything right away upon `import nawrapper as nw`.
"""
from .power import *
from .maptools import *
from .covtools import *
from . import planck
| 21.555556 | 76 | 0.731959 |
11290c9f84712a0ff71c67c6213ef09024350d23 | 923 | py | Python | apps/plea/migrations/0015_datavalidation.py | uk-gov-mirror/ministryofjustice.manchester_traffic_offences_pleas | 4c625b13fa2826bdde083a0270dcea1791f6dc18 | [
"MIT"
] | 3 | 2015-12-22T16:37:14.000Z | 2018-01-22T18:44:38.000Z | apps/plea/migrations/0015_datavalidation.py | uk-gov-mirror/ministryofjustice.manchester_traffic_offences_pleas | 4c625b13fa2826bdde083a0270dcea1791f6dc18 | [
"MIT"
] | 145 | 2015-03-04T11:17:50.000Z | 2022-03-21T12:10:13.000Z | apps/plea/migrations/0015_datavalidation.py | uk-gov-mirror/ministryofjustice.manchester_traffic_offences_pleas | 4c625b13fa2826bdde083a0270dcea1791f6dc18 | [
"MIT"
] | 3 | 2015-12-29T14:59:12.000Z | 2021-04-11T06:24:11.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| 34.185185 | 114 | 0.600217 |
1129de727312bd65742888ccbd67f3c84eed3c48 | 498 | py | Python | src/test/resources/outputs/blueprints/openstack/lamp/plugins/overrides/openstack/wrapper.py | alien4cloud/alien4cloud-cloudify4-provider | 97faee855255eb0c3ce25bb3075c29acd11a63c5 | [
"Apache-2.0"
] | null | null | null | src/test/resources/outputs/blueprints/openstack/lamp/plugins/overrides/openstack/wrapper.py | alien4cloud/alien4cloud-cloudify4-provider | 97faee855255eb0c3ce25bb3075c29acd11a63c5 | [
"Apache-2.0"
] | 3 | 2015-12-04T15:27:22.000Z | 2016-04-08T11:32:43.000Z | src/test/resources/outputs/blueprints/openstack/lamp/plugins/overrides/openstack/wrapper.py | alien4cloud/alien4cloud-cloudify4-provider | 97faee855255eb0c3ce25bb3075c29acd11a63c5 | [
"Apache-2.0"
] | 16 | 2015-01-29T10:05:09.000Z | 2019-06-24T19:23:54.000Z | from cloudify import ctx
from cloudify.decorators import operation
from a4c_common.wrapper_util import (USE_EXTERNAL_RESOURCE_KEY,handle_external_resource,handle_resource_ids)
from openstack import with_cinder_client
from openstack.volume import create
| 31.125 | 108 | 0.843373 |
112a43415fecd46be252ae4a4ada95880e19dcdf | 571 | py | Python | drift_report/domain/model_repository.py | rufusnufus/drift-report-plugin | 37019491a82e3478d6bfc718962a477266e1fa26 | [
"Apache-2.0"
] | null | null | null | drift_report/domain/model_repository.py | rufusnufus/drift-report-plugin | 37019491a82e3478d6bfc718962a477266e1fa26 | [
"Apache-2.0"
] | null | null | null | drift_report/domain/model_repository.py | rufusnufus/drift-report-plugin | 37019491a82e3478d6bfc718962a477266e1fa26 | [
"Apache-2.0"
] | null | null | null | from typing import Iterator, List, Optional
from drift_report.domain.model import Model
MODEL_REPO = ModelRepository()
| 24.826087 | 67 | 0.628722 |
112b2b33af67e21e163e5b4e0d7e900b33eee428 | 1,407 | py | Python | day_2/day_2.py | DillonHirth/advent_of_code | 3af280134757945958f816c5c1522c8b7178c290 | [
"MIT"
] | null | null | null | day_2/day_2.py | DillonHirth/advent_of_code | 3af280134757945958f816c5c1522c8b7178c290 | [
"MIT"
] | null | null | null | day_2/day_2.py | DillonHirth/advent_of_code | 3af280134757945958f816c5c1522c8b7178c290 | [
"MIT"
] | null | null | null | # PART 1
with open('input.txt') as input_file:
x_pos = 0
y_pos = 0
for line in input_file:
direction = line.split(' ')[0]
distance = int(line.split(' ')[1])
if direction == "forward":
x_pos += distance
elif direction == "down":
y_pos += distance
else:
y_pos -= distance
print(x_pos * y_pos)
# PART 2
with open('input.txt') as input_file:
x_pos = 0
y_pos = 0
aim_vector = 0
for line in input_file:
direction = line.split(' ')[0]
distance = int(line.split(' ')[1])
print("*******************************************")
print("direction:", direction)
print("distance:", distance)
print("old_aim:", aim_vector)
print("old_x_pos:", x_pos)
print("old_y_pos:", y_pos)
print("---------------------------")
if direction == "forward":
x_pos += distance
y_pos += aim_vector * distance
elif direction == "down":
aim_vector += distance
else:
aim_vector -= distance
print("direction:", direction)
print("distance:", distance)
print("aim:", aim_vector)
print("x_pos:", x_pos)
print("y_pos:", y_pos)
print("*******************************************")
print("x_pos:", x_pos)
print("y_pos:", y_pos)
print("x*y:", x_pos * y_pos) | 30.586957 | 60 | 0.484009 |
112cd4d3258cf792afd7adf62c3d6949ad17d6fc | 794 | py | Python | example.py | macieydev/cosmopy | b039d07dd0cbdd52d1c5aeac79c3bfeccb186284 | [
"MIT"
] | null | null | null | example.py | macieydev/cosmopy | b039d07dd0cbdd52d1c5aeac79c3bfeccb186284 | [
"MIT"
] | null | null | null | example.py | macieydev/cosmopy | b039d07dd0cbdd52d1c5aeac79c3bfeccb186284 | [
"MIT"
] | null | null | null | from typing import Optional
from pydantic import BaseModel
from cosmopy.model import CosmosModel
if __name__ == "__main__":
passat = Car(make="VW", model="Passat")
print(f"Car: {passat}")
passat.save()
passat.model = "Golf"
golf = passat.save()
print(f"Model changed: {golf}")
passat = Car(make="VW", model="Passat", engine=Engine(hp=100, volume=1600))
passat.save()
print(f"New passat: {passat}")
cars_100_hp = Car.query(engine__hp=100)
print(f"Cars with 100 HP: {cars_100_hp}")
cars = Car.all()
print(f"All cars: {cars}")
for c in cars:
print(f"Deleting: {c}")
c.delete()
| 19.365854 | 79 | 0.625945 |
112cd54188922444eead3eae29559f88d75ef846 | 6,882 | py | Python | quadcopter/script/extras/sdre_mavros.py | Bibbidi-Babbidi-Boo/SDRE-based-Cooperative-UAV-Landing-on-High-speed-targets | 515fb38120990cb707521da1a5870721b0ee842a | [
"MIT"
] | 2 | 2020-12-30T16:24:27.000Z | 2021-05-29T08:06:54.000Z | quadcopter/script/extras/sdre_mavros.py | Bibbidi-Babbidi-Boo/SDRE-based-Cooperative-UAV-Landing-on-High-speed-targets | 515fb38120990cb707521da1a5870721b0ee842a | [
"MIT"
] | null | null | null | quadcopter/script/extras/sdre_mavros.py | Bibbidi-Babbidi-Boo/SDRE-based-Cooperative-UAV-Landing-on-High-speed-targets | 515fb38120990cb707521da1a5870721b0ee842a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
import tf
import scipy.linalg as la
import numpy as np
from math import *
import mavros_msgs.srv
from mavros_msgs.msg import AttitudeTarget
from nav_msgs.msg import Odometry
from std_msgs.msg import *
from test.msg import *
from geometry_msgs.msg import *
from mavros_msgs.msg import *
from quadcopter.msg import *
import time
import control.matlab as mb
rospy.init_node('sdre', anonymous=True)
pub = rospy.Publisher("/mavros/setpoint_raw/attitude", AttitudeTarget, queue_size=10)
roll = 0.0
pitch = 0.0
yaw = 0.0
msg = AttitudeTarget()
#[10,-10,0] #ardu
goal = np.array([50.0, 5.0, 0.0])
goal_body = np.array([0.0, 0.0, 0.0])
x = 0.0
y = 0.0
z = 0.0
error_head_prev = 0.0
camera_mount = 0.785398
horizontal = 1.04719/2
vertical = 1.04719/2
vel_rover = [0,0,0]
A = np.array([[0, 1, 0, 0, 0, 0]
,[0, 0, 0, 0, 0, 0]
,[0, 0, 0, 1, 0, 0]
,[0, 0, 0, 0, 0, 0]
,[0, 0, 0, 0, 0, 1]
,[0, 0, 0, 0, 0, 0]])
now_p = time.time()
#### msg.x in mavros is -y in gazebo
'''
def ReceiveTar(data):
global goal, x, y, z, roll, pitch, yaw, camera_mount, horizontal, vertical
xt_image=data.contour.center.x
yt_image=data.contour.center.y
xt_image -= 250
yt_image -= 250
width=data.contour.width
height=data.contour.height
if(width<30 or height<30):
goal[0] = goal[0] + vel_rover[0]*0.1
goal[1] = goal[1] + vel_rover[1]*0.1
ro
#rospy.loginfo("DATA %s %s",xt_image,yt_image)
else:
d_xbound = 2*(z/sin(camera_mount))*tan(horizontal)
x_ppm = d_xbound/500
d_ybound = z/tan(camera_mount-vertical) - z/tan(camera_mount+vertical)
y_ppm = d_ybound/500
x_origin = x + (z/tan(camera_mount))*cos(yaw) #In global frame
y_origin = y + (z/tan(camera_mount))*sin(yaw)
yt_image = -yt_image
xt_image = xt_image*x_ppm
yt_image = yt_image*y_ppm
x_new = x_origin + xt_image*cos(yaw-np.pi/2) - yt_image*sin(yaw-np.pi/2)
y_new = y_origin + xt_image*sin(yaw-np.pi/2) + yt_image*cos(yaw-np.pi/2)
#x_new = x - x_prev*cos(yaw)
#y_new = y - y_prev*sin(yaw)
goal[0] = x_new
goal[1] = y_new
rospy.loginfo("POSN %s %s %s %s ", x_new, y_new, x, y)
'''
if __name__ == '__main__':
try:
listener()
except rospy.ROSInterruptException:
pass | 29.285106 | 159 | 0.583406 |
112cd847b88d905fd3029731c809d6d6df124316 | 1,233 | py | Python | scripts/metwork_topics.py | metwork-framework/ressources | 5d88c13b823089c965a5a2204b981da8b652757c | [
"BSD-3-Clause"
] | null | null | null | scripts/metwork_topics.py | metwork-framework/ressources | 5d88c13b823089c965a5a2204b981da8b652757c | [
"BSD-3-Clause"
] | 47 | 2018-09-12T14:30:00.000Z | 2021-09-09T12:26:10.000Z | scripts/metwork_topics.py | metwork-framework/ressources | 5d88c13b823089c965a5a2204b981da8b652757c | [
"BSD-3-Clause"
] | 2 | 2018-10-15T09:15:31.000Z | 2019-03-13T14:25:48.000Z | #!/usr/bin/env python3
import argparse
import asyncio
import json
from aiohttp import ClientSession, BasicAuth, ClientTimeout
import os
import aiohttp_github_helpers as h
GITHUB_USER = os.environ.get('GITHUB_USER', None)
GITHUB_PASS = os.environ.get('GITHUB_PASS', None)
TIMEOUT = ClientTimeout(total=20)
AUTH = None
if GITHUB_USER is not None and GITHUB_PASS is not None:
AUTH = BasicAuth(GITHUB_USER, GITHUB_PASS)
ORG = "metwork-framework"
TOPICS_TO_EXCLUDE = ["testrepo"]
parser = argparse.ArgumentParser(description='get repo topics')
parser.add_argument('owner', type=str, help='repo owner')
parser.add_argument('name', type=str, help='repo name')
parser.add_argument('--json', action='store_true',
help='if set, format the output as a json list')
args = parser.parse_args()
loop = asyncio.get_event_loop()
reply = loop.run_until_complete(get_repo_topics(args.owner, args.name))
loop.close()
if args.json:
print(json.dumps(reply))
else:
for topic in reply:
print(topic)
| 30.825 | 71 | 0.739659 |
112f282e3098cdc6a98d1c6bbec33fdd6b4350c1 | 23,588 | py | Python | test2.py | juanmed/singleshot6Dpose | a32d5159d557451ac3ed710ca7d4da6f7c64ff52 | [
"MIT"
] | 5 | 2019-03-27T08:40:07.000Z | 2021-01-08T05:44:46.000Z | test2.py | juanmed/singleshot6Dpose | a32d5159d557451ac3ed710ca7d4da6f7c64ff52 | [
"MIT"
] | null | null | null | test2.py | juanmed/singleshot6Dpose | a32d5159d557451ac3ed710ca7d4da6f7c64ff52 | [
"MIT"
] | 1 | 2019-07-11T09:20:25.000Z | 2019-07-11T09:20:25.000Z | # import support libraries
import os
import time
import numpy as np
# import main working libraries
import cv2
import torch
from torch.autograd import Variable
from torchvision import transforms
from PIL import Image
# import app libraries
from darknet import Darknet
from utils import *
from MeshPly import MeshPly
# estimate bounding box
#@torch.no_grad
if __name__ == '__main__':
import sys
if (len(sys.argv) == 5):
datacfg_file = sys.argv[1] # data file
cfgfile_file = sys.argv[2] # yolo network file
weightfile_file = sys.argv[3] # weightd file
imgfile_file = sys.argv[4] # image file
test(datacfg_file, cfgfile_file, weightfile_file, imgfile_file)
else:
print('Usage:')
print(' python valid.py datacfg cfgfile weightfile imagefile') | 40.115646 | 187 | 0.598186 |
112fbb6bacb51a637008a37470e77beab2c5a20e | 4,028 | py | Python | nba/model/utils.py | mattdhart/GBling | ed868dccfcaf7588e7a1297f2294fd62b62e43be | [
"Apache-2.0"
] | null | null | null | nba/model/utils.py | mattdhart/GBling | ed868dccfcaf7588e7a1297f2294fd62b62e43be | [
"Apache-2.0"
] | null | null | null | nba/model/utils.py | mattdhart/GBling | ed868dccfcaf7588e7a1297f2294fd62b62e43be | [
"Apache-2.0"
] | null | null | null |
team_abbr_lookup = {
"Toronto Raptors": "TOR",
"Brooklyn Nets": "BRK",
"New York Knicks": "NYK",
"Boston Celtics": "BOS",
"Philadelphia 76ers": "PHI",
"Indiana Pacers": "IND",
"Chicago Bulls": "CHI",
"Cleveland Cavaliers": "CLE",
"Detroit Pistons": "DET",
"Milwaukee Bucks": "MIL",
"Miami Heat": "MIA",
"Washington Wizards": "WAS",
"Charlotte Bobcats": "CHA",
"Charlotte Hornets": "CHA",
"Atlanta Hawks": "ATL",
"Orlando Magic": "ORL",
"Oklahoma City Thunder": "OKC",
"Portland Trail Blazers": "POR",
"Minnesota Timberwolves": "MIN",
"Denver Nuggets": "DEN",
"Utah Jazz": "UTA",
"Los Angeles Clippers": "LAC",
"Golden State Warriors": "GSW",
"Phoenix Suns": "PHO",
"Sacramento Kings": "SAC",
"Los Angeles Lakers": "LAL",
"San Antonio Spurs": "SAS",
"Houston Rockets": "HOU",
"Memphis Grizzlies": "MEM",
"Dallas Mavericks": "DAL",
"New Orleans Pelicans": "NOP"
}
abbr_team_lookup = {
"TOR": "Toronto Raptors",
"BRK": "Brooklyn Nets",
"NYK": "New York Knicks",
"BOS": "Boston Celtics",
"PHI": "Philadelphia 76ers",
"IND": "Indiana Pacers",
"CHI": "Chicago Bulls",
"CLE": "Cleveland Cavaliers",
"DET": "Detroit Pistons",
"MIL": "Milwaukee Bucks",
"MIA": "Miami Heat",
"WAS": "Washington Wizards",
"CHA": "Charlotte Hornets",
"ATL": "Atlanta Hawks",
"ORL": "Orlando Magic",
"OKC": "Oklahoma City Thunder",
"POR": "Portland Trail Blazers",
"MIN": "Minnesota Timberwolves",
"DEN": "Denver Nuggets",
"UTA": "Utah Jazz",
"LAC": "Los Angeles Clippers",
"GSW": "Golden State Warriors",
"PHO": "Phoenix Suns",
"SAC": "Sacramento Kings",
"LAL": "Los Angeles Lakers",
"SAS": "San Antonio Spurs",
"HOU": "Houston Rockets",
"MEM": "Memphis Grizzlies",
"DAL": "Dallas Mavericks",
"NOP": "New Orleans Pelicans"
}
oddsshark_team_id_lookup = {
"Toronto Raptors": 20742,
"Brooklyn Nets": 20749,
"New York Knicks": 20747,
"Boston Celtics": 20722,
"Philadelphia 76ers": 20731,
"Indiana Pacers": 20737,
"Chicago Bulls": 20732,
"Cleveland Cavaliers": 20735,
"Detroit Pistons": 20743,
"Milwaukee Bucks": 20725,
"Miami Heat": 20726,
"Washington Wizards": 20746,
"Charlotte Bobcats": 20751,
"Atlanta Hawks": 20734,
"Orlando Magic": 20750,
"Oklahoma City Thunder": 20728,
"Portland Trail Blazers": 20748,
"Minnesota Timberwolves": 20744,
"Denver Nuggets": 20723,
"Utah Jazz": 20738,
"Los Angeles Clippers": 20736,
"Golden State Warriors": 20741,
"Phoenix Suns": 20730,
"Sacramento Kings": 20745,
"Los Angeles Lakers": 20739,
"San Antonio Spurs": 20724,
"Houston Rockets": 20740,
"Memphis Grizzlies": 20729,
"Dallas Mavericks": 20727,
"New Orleans Pelicans": 20733
}
oddsshark_city_lookup = {
"Toronto": "Toronto Raptors",
"Brooklyn": "Brooklyn Nets",
"New York": "New York Knicks",
"Boston": "Boston Celtics",
"Philadelphia": "Philadelphia 76ers",
"Indiana": "Indiana Pacers",
"Chicago": "Chicago Bulls",
"Cleveland": "Cleveland Cavaliers",
"Detroit": "Detroit Pistons",
"Milwaukee": "Milwaukee Bucks",
"Miami": "Miami Heat",
"Washington": "Washington Wizards",
"Charlotte": "Charlotte Hornets",
"Atlanta": "Atlanta Hawks",
"Orlando": "Orlando Magic",
"Oklahoma City": "Oklahoma City Thunder",
"Portland": "Portland Trail Blazers",
"Minnesota": "Minnesota Timberwolves",
"Denver": "Denver Nuggets",
"Utah": "Utah Jazz",
"LA Clippers": "Los Angeles Clippers",
"Golden State": "Golden State Warriors",
"Phoenix": "Phoenix Suns",
"Sacramento": "Sacramento Kings",
"LA Lakers": "Los Angeles Lakers",
"San Antonio": "San Antonio Spurs",
"Houston": "Houston Rockets",
"Memphis": "Memphis Grizzlies",
"Dallas": "Dallas Mavericks",
"New Orleans": "New Orleans Pelicans"
}
| 29.188406 | 45 | 0.606753 |
11309186f51ff34a8ed70944cd3fd480bd97b840 | 335 | py | Python | FCMDemo_server/main.py | charsyam/AndroidFCMDemo | 67e3bb2fbbdd1bb7ba5e194d064b9f9fc62d5697 | [
"MIT"
] | null | null | null | FCMDemo_server/main.py | charsyam/AndroidFCMDemo | 67e3bb2fbbdd1bb7ba5e194d064b9f9fc62d5697 | [
"MIT"
] | null | null | null | FCMDemo_server/main.py | charsyam/AndroidFCMDemo | 67e3bb2fbbdd1bb7ba5e194d064b9f9fc62d5697 | [
"MIT"
] | null | null | null | from flask import Flask, request
import redis
app = Flask(__name__)
rconn = redis.StrictRedis()
| 18.611111 | 45 | 0.683582 |
11317909c862fb2dd860a82e60e0abd38c112ba8 | 4,832 | py | Python | unn/linalg.py | hacksparr0w/unn | 8ee8a8a07034fa65d33de1750fb38e93ec8ed60c | [
"MIT"
] | 1 | 2021-07-28T09:15:19.000Z | 2021-07-28T09:15:19.000Z | unn/linalg.py | hacksparr0w/unn | 8ee8a8a07034fa65d33de1750fb38e93ec8ed60c | [
"MIT"
] | null | null | null | unn/linalg.py | hacksparr0w/unn | 8ee8a8a07034fa65d33de1750fb38e93ec8ed60c | [
"MIT"
] | null | null | null | import random
from collections import namedtuple
MatrixShape = namedtuple("MatrixShape", ["rows", "columns"])
| 27.611429 | 79 | 0.548013 |
1131872ab4a0cec6debce22fccdd6997732871ab | 3,975 | py | Python | src/tfx_helper/local.py | dlabsai/tfx-helper | 74a05ffeaa14fdc0866d063e36114f7d654a5ae9 | [
"MIT"
] | null | null | null | src/tfx_helper/local.py | dlabsai/tfx-helper | 74a05ffeaa14fdc0866d063e36114f7d654a5ae9 | [
"MIT"
] | null | null | null | src/tfx_helper/local.py | dlabsai/tfx-helper | 74a05ffeaa14fdc0866d063e36114f7d654a5ae9 | [
"MIT"
] | null | null | null | import os.path
from typing import Any, Iterable, Mapping, Optional, Tuple
import tfx.v1 as tfx
from absl import logging
from ml_metadata.proto import metadata_store_pb2
from tfx.dsl.components.base.base_component import BaseComponent
from tfx.types.channel import Channel
from .base import BasePipelineHelper
from .interface import DEFAULT_CUSTOM_CONFIG, Resources
| 35.176991 | 84 | 0.646289 |
113207da50dd87a4fba010e2037a5449d9f802b7 | 7,956 | py | Python | flash_services/utils.py | textbook/flash_services | 9422f48f62dd0cbef4ad5e593513de357496ed72 | [
"0BSD"
] | 2 | 2016-05-05T20:09:45.000Z | 2017-09-29T08:52:56.000Z | flash_services/utils.py | textbook/flash_services | 9422f48f62dd0cbef4ad5e593513de357496ed72 | [
"0BSD"
] | 27 | 2016-04-18T08:32:47.000Z | 2021-11-25T11:05:15.000Z | flash_services/utils.py | textbook/flash_services | 9422f48f62dd0cbef4ad5e593513de357496ed72 | [
"0BSD"
] | null | null | null | """Useful utility functions for services."""
import logging
import re
from datetime import datetime, timezone
from inspect import Parameter, Signature
from dateutil.parser import parse
from humanize import naturaldelta, naturaltime
logger = logging.getLogger(__name__)
WORDS = {'1': 'one', '2': 'two', '3': 'three', '4': 'four', '5': 'five',
'6': 'six', '7': 'seven', '8': 'eight', '9': 'nine', '10': 'ten'}
NUMBERS = re.compile(r'\b([1-9]|10)\b')
def _numeric_words(text):
"""Replace numbers 1-10 with words.
Arguments:
text (:py:class:`str`): The text to replace numbers in.
Returns:
:py:class:`str`: The new text containing words.
"""
return NUMBERS.sub(lambda m: WORDS[m.group()], text)
def friendlier(func):
"""Replace numbers to make functions friendlier.
Arguments:
func: The function to wrap.
Returns:
A wrapper function applying :py:func:`_numeric_words`.
"""
def wrapper(*args, **kwargs):
"""Wrapper function to apply _numeric_words."""
result = func(*args, **kwargs)
try:
return _numeric_words(result)
except TypeError:
return result
return wrapper
naturaldelta = friendlier(naturaldelta) # pylint: disable=invalid-name
naturaltime = friendlier(naturaltime) # pylint: disable=invalid-name
def elapsed_time(start, end):
"""Calculate the elapsed time for a service activity.
Arguments:
start (:py:class:`str`): The activity start time.
end (:py:class:`str`): The activity end time.
Returns:
:py:class:`tuple`: The start and end times and humanized elapsed
time.
"""
start_time = safe_parse(start)
end_time = safe_parse(end)
if start_time is None or end_time is None:
logger.warning('failed to generate elapsed time')
text = 'elapsed time not available'
else:
text = 'took {}'.format(naturaldelta(parse(end) - parse(start)))
return to_utc_timestamp(start_time), to_utc_timestamp(end_time), text
def to_utc_timestamp(date_time):
"""Convert a naive or timezone-aware datetime to UTC timestamp.
Arguments:
date_time (:py:class:`datetime.datetime`): The datetime to
convert.
Returns:
:py:class:`int`: The timestamp (in seconds).
"""
if date_time is None:
return
if date_time.tzname() is None:
timestamp = date_time.replace(tzinfo=timezone.utc).timestamp()
else:
timestamp = date_time.timestamp()
return int(round(timestamp, 0))
def safe_parse(time):
"""Parse a string without throwing an error.
Arguments:
time (:py:class:`str`): The string to parse.
Returns:
:py:class:`datetime.datetime`: The parsed datetime.
"""
if time is None:
return
try:
return parse(time)
except (OverflowError, ValueError):
pass
def occurred(at_):
"""Calculate when a service event occurred.
Arguments:
at_ (:py:class:`str`): When the event occurred.
Returns:
:py:class:`str`: The humanized occurrence time.
"""
try:
occurred_at = parse(at_)
except (TypeError, ValueError):
logger.warning('failed to parse occurrence time %r', at_)
return 'time not available'
utc_now = datetime.now(tz=timezone.utc)
try:
return naturaltime((utc_now - occurred_at).total_seconds())
except TypeError: # at_ is a naive datetime
return naturaltime((datetime.now() - occurred_at).total_seconds())
def health_summary(builds):
"""Summarise the health of a project based on builds.
Arguments:
builds (:py:class:`list`): List of builds.
Returns:
:py:class:`str`: The health summary.
"""
for build in builds:
if build['outcome'] in {Outcome.PASSED}:
return 'ok'
elif build['outcome'] in {Outcome.CRASHED, Outcome.FAILED}:
return 'error'
else:
continue
return 'neutral'
def estimate_time(builds):
"""Update the working build with an estimated completion time.
Takes a simple average over the previous builds, using those
whose outcome is ``'passed'``.
Arguments:
builds (:py:class:`list`): All builds.
"""
try:
index, current = next(
(index, build) for index, build in enumerate(builds[:4])
if build['outcome'] == 'working'
)
except StopIteration:
return # no in-progress builds
if current.get('started_at') is None:
current['elapsed'] = 'estimate not available'
return
usable = [
current for current in builds[index + 1:]
if current['outcome'] == 'passed' and current['duration'] is not None
]
if not usable:
current['elapsed'] = 'estimate not available'
return
average_duration = int(sum(build['duration'] for build in usable) /
float(len(usable)))
finish = current['started_at'] + average_duration
remaining = (datetime.fromtimestamp(finish) -
datetime.now()).total_seconds()
if remaining >= 0:
current['elapsed'] = '{} left'.format(naturaldelta(remaining))
else:
current['elapsed'] = 'nearly done'
GITHUB_ISSUE = re.compile(r'''
(?: # one of:
fix(?:e(?:s|d))? # fix, fixes or fixed
| close(?:s|d)? # close, closes or closed
| resolve(?:s|d)? # resolve, resolves or resolved
)\s*(?:[^/]+/[^#]+)? # the account and repository name
\#\d+ # the issue number
''', re.IGNORECASE + re.VERBOSE)
"""Pattern for commit comment issue ID format, per `GitHub documentation`_.
.. _GitHub documentation: https://help.github.com/articles/closing-issues-via-commit-messages/
"""
TRACKER_STORY = re.compile(r'''
\[(?:
(?:
finish(?:e(?:s|d))? # finish, finishes or finished
| complete(?:s|d)? # complete, completes or completed
| fix(?:e(?:s|d))? # fix, fixes or fixed
)?
\s*\#\d+\s* # the story ID
)+\]
''', re.IGNORECASE + re.VERBOSE)
"""Pattern for commit hook story ID format, per `Tracker documentation`_.
.. _Tracker documentation: https://www.pivotaltracker.com/help/api/rest/v5#Source_Commits
"""
def remove_tags(commit_message):
"""Remove issue/tracker tags from a commit message.
Note:
Currently implemented for :py:class:`~.Tracker` and
:py:class:`~.GitHub` commit messages.
Arguments:
commit_message (:py:class:`str`): The commit message.
Returns:
:py:class:`str`: The message with tags removed.
"""
for remove in [GITHUB_ISSUE, TRACKER_STORY]:
commit_message = remove.sub('', commit_message)
return commit_message.strip()
def required_args(attrs):
"""Extract the required arguments from a class's attrs.
Arguments:
attrs (:py:class:`dict`) :The attributes of a class.
Returns:
:py:class:`set`: The required arguments.
"""
init_args = attr_args = set()
if '__init__' in attrs:
sig = Signature.from_callable(attrs['__init__'])
init_args = set(
name
for name, param in sig.parameters.items()
if param.kind == Parameter.KEYWORD_ONLY
and param.default is Signature.empty
)
if 'REQUIRED' in attrs:
attr_args = attrs['REQUIRED']
return set.union(attr_args, init_args)
def provided_args(attrs):
"""Extract the provided arguments from a class's attrs.
Arguments:
attrs (:py:class:`dict`) :The attributes of a class.
Returns:
:py:class:`set`: The provided arguments.
"""
return attrs.get('PROVIDED', set())
| 27.434483 | 94 | 0.616013 |
11327c7421ed7b895a1170478e90b2ac25d66a3a | 1,233 | py | Python | d16.py | JasperGeurtz/aoc-2020 | 976b54016364e24fdf827b6e60edae82e9458277 | [
"MIT"
] | 1 | 2021-01-03T12:08:39.000Z | 2021-01-03T12:08:39.000Z | d16.py | JasperGeurtz/aoc-2020 | 976b54016364e24fdf827b6e60edae82e9458277 | [
"MIT"
] | null | null | null | d16.py | JasperGeurtz/aoc-2020 | 976b54016364e24fdf827b6e60edae82e9458277 | [
"MIT"
] | null | null | null | import utils
m = utils.opener.raw("input/16.txt")
rm, tm, om = m.split("\n\n")
rules = {}
for line in rm.split("\n"):
name, expr = line.split(": ")
rules[name] = [[int(q) for q in x.split("-")] for x in expr.split(" or ")]
myticket = [int(x) for x in tm.split("\n")[1].split(",")]
tickets = [[int(q) for q in x.split(",")] for x in tm.split("\n")[1:] + om.split("\n")[1:-1]]
s1 = 0
for t in tickets[:]:
for v in t:
if not any([r[0][0] <= v <= r[0][1] or r[1][0] <= v <= r[1][1] for r in rules.values()]):
s1 += v
tickets.remove(t)
print("1:", s1)
possible = {}
for rule in rules:
possible[rule] = set(range(len(myticket)))
for t in tickets:
for i, v in enumerate(t):
for rname, r in rules.items():
if not (r[0][0] <= v <= r[0][1] or r[1][0] <= v <= r[1][1]):
if i in possible[rname]:
possible[rname].remove(i)
found = {}
while possible:
k, v = min(possible.items(), key=lambda item: item[1])
found[k] = list(v)[0]
del possible[k]
for val in possible.values():
val.remove(found[k])
s2 = 1
for k, v in found.items():
if k.startswith("departure"):
s2 *= myticket[v]
print("2:", s2)
| 25.6875 | 97 | 0.518248 |
11331886bdb42648eba47c6e484600231ff9a470 | 4,931 | py | Python | run_portfolio.py | drewvolpe/vc_modeling | 5ba33e41e3c1ffad212d1a0a1abb585b2c384221 | [
"MIT"
] | 1 | 2020-07-12T09:16:37.000Z | 2020-07-12T09:16:37.000Z | run_portfolio.py | drewvolpe/vc_modeling | 5ba33e41e3c1ffad212d1a0a1abb585b2c384221 | [
"MIT"
] | null | null | null | run_portfolio.py | drewvolpe/vc_modeling | 5ba33e41e3c1ffad212d1a0a1abb585b2c384221 | [
"MIT"
] | null | null | null |
from collections import Counter
import random
import math
###
# Parameters of assumptions
###
# How many initial investments and avg check size
num_seed_rounds = 50
invested_per_seed_round = 0.5
# Probabilities of different outcomes (prob, outcome multiple)
outcome_probs_seed = [ [0.01, 100], # N% chance of Mx return
[0.03, 20],
[0.03, 10],
[0.03, 6],
[0.25, 1],
[0.65, 0]]
follow_on_pct = 0.5 # % of deals in which fund invests in next round
invested_per_follow_on = 1.0 # avg size of follow-on investment
outcome_probs_follow = [ [0.02, 30],
[0.06, 15],
[0.06, 8],
[0.06, 4],
[0.30, 1],
[0.50, 0]]
# number of simulated portfolios to generate
num_simulations = 10000
# constants
fund_size = (num_seed_rounds * invested_per_seed_round) +\
(num_seed_rounds * follow_on_pct * invested_per_follow_on)
###
# Classes
###
###
# Funcs
##
def validate_params():
if (sum([x[0] for x in outcome_probs_seed]) != 1.0):
raise Exception("Seed probabilities don't add to 1! ")
if (sum([x[0] for x in outcome_probs_follow]) != 1.0):
raise Exception("Follow on probabilities don't add to 1! ")
###
# main()
###
if __name__ == "__main__":
# for dev
# random.seed(31331)
print('starting...')
print('validating params...')
validate_params()
print('Parameters')
print(' $%0.0fm fund which makes %s $%sm seed investments.' %\
( fund_size, num_seed_rounds, invested_per_seed_round))
print(' Follows on with $%sm, %s of the time.' % (invested_per_follow_on, follow_on_pct))
print('')
print('Running portfolio simluation...')
run_simulations(num_simulations)
print('done.')
| 28.33908 | 98 | 0.589536 |