text stringlengths 38 1.54M |
|---|
from string import ascii_letters, digits
from django.contrib import admin
from django.db import transaction
from django.db.models import Count
from django.db.utils import IntegrityError
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.utils.crypto import get_random_string
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from ..forms.registrationlink import RegistrationLinkAdminForm
from ..models.courses import CourseRegistration
from ..models.events import EventRegistration
from ..models.orderables import OrderableRegistration
from ..models.registrationlink import RegistrationLink
from ..models.subjects import SubjectType
from ..utils import attributes
from .export import AdminExportMixin
from .filters import SchoolYearListFilter, SubjectTypeListFilter
@admin.register(RegistrationLink)
class RegistrationLinkAdmin(AdminExportMixin, admin.ModelAdmin):
_registration_models = {
SubjectType.COURSE: CourseRegistration,
SubjectType.EVENT: EventRegistration,
SubjectType.ORDERABLE: OrderableRegistration,
}
list_display = (
"id",
"name",
"get_link",
"subject_type",
"reg_from",
"reg_to",
"get_registrations_link",
)
list_filter = (
("school_year", SchoolYearListFilter),
"subject_type__subject_type",
("subject_type", SubjectTypeListFilter),
)
filter_horizontal = ("subject_variants",)
def changeform_view(self, request, object_id=None, form_url="", extra_context=None):
if not object_id and request.method == "POST" and len(request.POST) == 3:
return HttpResponseRedirect(
"{}?subject_type={}".format(
request.path,
request.POST.get("subject_type", ""),
)
)
else:
return super().changeform_view(request, object_id, form_url, extra_context)
def get_form(self, request, obj, **kwargs):
# set school year
if obj:
request.school_year = obj.school_year
# get subject type
try:
# first try request.POST (user may want to change subject type)
request.subject_type = SubjectType.objects.get(id=int(request.POST.get("subject_type")))
except (SubjectType.DoesNotExist, TypeError, ValueError):
if obj:
# use subject type from object
request.subject_type = obj.subject_type
else:
# try to get subject type from request.GET
try:
request.subject_type = SubjectType.objects.get(
id=int(request.GET.get("subject_type")),
)
except (SubjectType.DoesNotExist, TypeError, ValueError):
request.subject_type = None
if request.subject_type:
kwargs["form"] = type(
RegistrationLinkAdminForm.__name__,
(RegistrationLinkAdminForm,),
{
"school_year": request.school_year,
"subject_type": request.subject_type,
},
)
else:
kwargs["fields"] = ["subject_type"]
return super().get_form(request, obj, **kwargs)
def save_form(self, request, form, change):
obj = super().save_form(request, form, change)
obj.school_year = request.school_year
return obj
def save_model(self, request, obj, form, change):
if change:
obj.save()
else:
obj.school_year = request.school_year
while not obj.id:
try:
with transaction.atomic():
obj.slug = get_random_string(64, ascii_letters + digits)
obj.save()
except IntegrityError:
pass
@attributes(short_description=_("registration link"))
def get_link(self, obj):
return mark_safe(
'<a href="{url}" title="{title}" target="_blank">{url}</a>'.format(
url=obj.link,
title=_("registration link"),
)
)
def get_queryset(self, request):
return (
super()
.get_queryset(request)
.select_related("subject_type")
.annotate(registrations_count=Count("registrations"))
)
@attributes(short_description=_("registrations"))
def get_registrations_link(self, obj):
registration_model = self._registration_models[obj.subject_type.subject_type]
return mark_safe(
format_html(
'<a href="{url}">{count}</a>',
url=reverse(
"admin:{}_{}_changelist".format(
registration_model._meta.app_label,
registration_model._meta.model_name,
)
)
+ "?registration_link__id__exact={}".format(obj.id),
count=obj.registrations_count,
)
)
|
class A:
def __init__(self):
print("A.__init__")
class B(A):
def __init__(self):
print("B.__init__")
print(super().__init__)
super().__init__()
class C(A):
def __init__(self):
print("C.__init__")
print(super().__init__)
super().__init__()
class D(C):
def __init__(self):
print("D.__init__")
print(super().__init__)
super().__init__()
class E(D, B):
def __init__(self):
print("E.__init__")
print(super().__init__)
super().__init__()
print(E.mro())
e = E()
print(e)
"""
explicitly call constructor
use keyword args **kwargs
can't know what super() will give you - determined during runtime
"""
|
# -*- coding:utf-8 -*-
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# 返回二维列表,内部每个列表表示找到的路径
def FindPath(self, root, expectNumber):
# write code here
import copy
if root == None:
return []
# ret用来保存结果
ret = [] # 用来最后return
supportArrayList = [[root.val]] # 用来保存路径
support = [root] # 用来做层序遍历
while support:
# 判断根节点
tmpNode = support[0]
tmpArrayList = supportArrayList[0]
if tmpNode.left == None and tmpNode.right == None:
if sum(tmpArrayList) == expectNumber:
ret.insert(0, tmpArrayList)
# 判断左节点
if tmpNode.left:
support.append(tmpNode.left)
newTmpArrayList = copy.copy(tmpArrayList) #这里是浅拷贝
newTmpArrayList.append(tmpNode.left.val)
supportArrayList.append(newTmpArrayList)
# 判断右节点
if tmpNode.right:
support.append(tmpNode.right)
newTmpArrayList = copy.copy(tmpArrayList)
newTmpArrayList.append(tmpNode.right.val)
supportArrayList.append(newTmpArrayList)
del support[0]
del supportArrayList[0]
return ret
if __name__ == '__main__':
s = Solution() |
from five import grok
from plone.directives import dexterity, form
from dkiscm.jobmatrix.content.jobgroup import IJobGroup
grok.templatedir('templates')
class Index(dexterity.DisplayForm):
grok.context(IJobGroup)
grok.require('zope2.View')
grok.template('jobgroup_view')
grok.name('view')
|
#!/usr/bin/python3
# RA, 2018-10-26
## ================== IMPORTS :
import os
import sklearn.neighbors
import geopy.distance
import numpy as np
import builtins
import json
import inspect
import pickle
import time
import urllib.request, urllib.parse
import base64
import re
import datetime as dt
from collections import defaultdict
## ==================== NOTES :
pass
## ==================== INPUT :
IFILE = {
'request-routes' : "request_cache/explicit/routes_{lang}.json",
'request-stops' : "request_cache/explicit/UV/stops_{ID}-{Dir}_{lang}.json",
'bus-network' : "compute_cache/UV/bus-network.json",
'compute-knn' : "compute_cache/UV/stops-knn.pkl",
}
## =================== OUTPUT :
OFILE = {
'request-routes' : IFILE['request-routes'],
'request-stops' : IFILE['request-stops'],
'bus-network' : IFILE['bus-network'],
'compute-knn' : IFILE['compute-knn'],
'wget-generic-cache' : "request_cache/wget/UV/{ID}.dat",
}
# Create output directories
for f in OFILE.values() : os.makedirs(os.path.dirname(f), exist_ok=True)
## ==================== PARAM :
PARAM_PRODUCTION = False
PARAM = {
'pref-lang' : 'tw',
'logged-open' : not PARAM_PRODUCTION,
'wget-max-calls': 15,
'wget-throttle-seconds' : (0.1 if PARAM_PRODUCTION else 1),
'wget-always-reuse-file' : True,
'url-routes' : {
'en' : "https://ibus.tbkc.gov.tw/KSBUSN/NewAPI/RealRoute.ashx?type=GetRoute&Lang=En",
'tw' : "https://ibus.tbkc.gov.tw/KSBUSN/NewAPI/RealRoute.ashx?type=GetRoute&Lang=Cht",
},
# If True, should override 'wget-always-reuse-file'
'force-fetch-request-routes' : False,
'url-routestops' : {
'en' : "https://ibus.tbkc.gov.tw/KSBUSN/NewAPI/RealRoute.ashx?type=GetStop&Data={ID}_,{Dir}&Lang=En",
'tw' : "https://ibus.tbkc.gov.tw/KSBUSN/NewAPI/RealRoute.ashx?type=GetStop&Data={ID}_,{Dir}&Lang=Cht",
# Note: 'Dir' is the direction there=1 and back=2, elsewhere keyed by 'GoBack' or 'Goback' instead
},
# If True, should override 'wget-always-reuse-file'
'force-fetch-request-routestops' : False,
'force-recompute-bus-network' : False,
'force-recompute-knn' : False,
'url-eta' :{
'en' : "https://ibus.tbkc.gov.tw/KSBUSN/newAPI/CrossRoute.ashx?stopname={stopname}&Lang=En",
'tw' : "https://ibus.tbkc.gov.tw/KSBUSN/newAPI/CrossRoute.ashx?stopname={stopname}&Lang=Cht",
},
}
## ====================== AUX :
# https://stackoverflow.com/questions/34491808/how-to-get-the-current-scripts-code-in-python
THIS = inspect.getsource(inspect.getmodule(inspect.currentframe()))
# Log which files are opened
def logged_open(filename, mode='r', *argv, **kwargs) :
print("({}):\t{}".format(mode, filename))
return builtins.open(filename, mode, *argv, **kwargs)
# Activate this function?
if PARAM.get('logged-open') : open = logged_open
# Class to fetch files from WWW
class wget :
number_of_calls = 0
CACHE = '/'
def __init__(self, url, filename=CACHE) :
assert(url), "Illegal URL parameter"
# Encode potential Chinese characters
url = urllib.parse.quote(url, safe=':/?&=')
if (filename == wget.CACHE) :
# https://stackoverflow.com/a/295150
filename = OFILE['wget-generic-cache'].format(ID=base64.urlsafe_b64encode(url.encode('utf-8')).decode('utf-8'))
if filename and PARAM['wget-always-reuse-file'] :
if os.path.isfile(filename) :
with open(filename, 'rb') as f :
self.bytes = f.read()
return
wget.number_of_calls = wget.number_of_calls + 1
if (wget.number_of_calls > PARAM['wget-max-calls']) :
raise RuntimeError("Call limit exceeded for wget")
time.sleep(PARAM['wget-throttle-seconds'])
with urllib.request.urlopen(url) as response :
self.bytes = response.read()
if filename :
try :
with open(filename, 'wb') as f :
f.write(self.bytes)
except IOError as e :
pass
# Index a list _I_ of dict's by the return value of key_func
def reindex_by_key(I, key_func) :
J = defaultdict(lambda: defaultdict(list))
for i in I :
for (k, v) in i.items() :
J[key_func(i)][k].append(v)
for (j, i) in J.items() :
for (k, V) in i.items() :
if (1 == len(set(json.dumps(v) for v in V))) :
J[j][k] = next(iter(V))
# Convert all defaultdict to dict
J = json.loads(json.dumps(J))
return J
# "Turn" a JSON structure, rekey by id_key
# Assume: I[lang] is a list of dict's, where each has a field id_key
def lang_reform(I, id_key) :
J = defaultdict(lambda: defaultdict(dict))
for (lang, E) in I.items() :
for e in E :
for (k, v) in e.items() :
J[e[id_key]][k][lang] = v
for (i, e) in J.items() :
for (k, V) in e.items() :
V = V.values()
if (len(set(V)) == 1) :
J[i][k] = set(V).pop()
# Convert all defaultdict to dict
J = json.loads(json.dumps(J))
return J
# Metric for (lat, lon) coordinates
def geodesic(a, b) :
return geopy.distance.geodesic(a, b).m
## ================== CLASSES :
class RoutesMeta :
def routes_init(self) :
routes_by_lang = { }
for (lang, url) in PARAM['url-routes'].items() :
filename = IFILE['request-routes'].format(lang=lang)
if PARAM['force-fetch-request-routes'] :
with open(filename, 'wb') as f :
f.write(wget(url).bytes)
else :
wget(url, filename)
with open(filename, 'r') as f :
routes_by_lang[lang] = json.load(f)
self.routes = lang_reform(routes_by_lang, 'ID')
# An entry of self.routes now looks like this:
# (assuming route_id_key is 'ID')
#
# self.routes['1431'] == {
# 'ID': '1431',
# 'nameZh': {'en': '0 North', 'tw': '0北'},
# 'gxcode': '001',
# 'ddesc': {'en': 'Golden Lion Lake Station<->Golden Lion Lake Station', 'tw': '金獅湖站-金獅湖站'},
# 'departureZh': {'en': 'Golden Lion Lake Station', 'tw': '金獅湖站'},
# 'destinationZh': {'en': 'MRT Yanchengpu Station', 'tw': '捷運鹽埕埔站'},
# 'RouteType': {'en': 'General Line', 'tw': '一般公車'},
# 'MasterRouteName': ' ',
# 'MasterRouteNo': '0',
# 'MasterRouteDesc': {'en': 'MRT Yanchengpu Station<->MRT Yanchengpu Station', 'tw': '捷運鹽埕站-捷運鹽埕站'},
# 'routes': '2',
# 'ProviderName': {'en': 'Han Cheng Bus Company', 'tw': '漢程客運'},
# 'ProviderWebsite': 'http://www.ibus.com.tw/city-bus/khh/',
# 'TimeTableUrl': 'http://ibus.com.tw/timetable/0N.pdf'
# }
def __init__(self) :
self.routes_init()
assert(self.routes)
class BusNetwork :
def init(self) :
if os.path.isfile(IFILE['bus-network']) and not PARAM['force-recompute-bus-network'] :
try :
with open(IFILE['bus-network'], 'r') as f :
network = json.load(f)
(self.routes, self.stops) = (network['routes'], network['stops'])
return
except json.JSONDecodeError as e :
pass
routesmeta = RoutesMeta()
# self.routes[route_id][direction] is a list of stop SIDs
self.routes = defaultdict(dict)
# self.stops is a dict of all stops/platforms keyed by SID
self.stops = defaultdict(dict)
for (lang, preurl) in PARAM['url-routestops'].items():
for (i, route) in routesmeta.routes.items() :
nroutes = int(route['routes'])
assert(nroutes in [1, 2])
self.routes[i] = route
self.routes[i]['Dir'] = { }
for Dir in ['1', '2'][:nroutes] :
url = preurl.format(ID=i, Dir=Dir)
filename = IFILE['request-stops'].format(ID=i, Dir=Dir, lang=lang)
if PARAM['force-fetch-request-routestops'] :
with open(filename, 'wb') as f :
f.write(wget(url).bytes)
else :
wget(url, filename)
# Special cases:
#
# As of 2018-10-27, the following routes get an error response:
# 1602-1, 2173-2, 2482-1, 371-1
#
# http://southeastbus.com/index/kcg/Time/248.htm
if (i == '2482') and (int(Dir) == 1) : continue
# https://www.crowntaxi.com.tw/news.aspx?ID=49
if (i == '331') and (int(Dir) == 1) : continue
# http://southeastbus.com/index/kcg/Time/37.htm
if (i == '371') and (int(Dir) == 1) : continue
# 'http://southeastbus.com/index/kcg/Time/O7A.htm
if (i == '1602') and (int(Dir) == 1) : continue
with open(filename, 'r') as f :
try :
J = json.load(f)
except json.decoder.JSONDecodeError as e :
print(route)
raise
if not (type(J) is list) :
raise RuntimeWarning("Expect a list in JSON response here")
# The SIDs of stops along this route will be written here in correct order
self.routes[i]['Dir'][Dir] = []
for stop in sorted(J, key=(lambda r : int(r['seqNo']))) :
n = stop['SID']
self.routes[i]['Dir'][Dir].append(n)
del stop['seqNo']
# Special cases:
if True :
# Presumably error in data in route 2482
# Assign Id of 'MRT Wukuaicuo Station (Jhongjheng 1st Rd.)'
if (n == '4536') : stop['Id'] = '0062'
# Assign id of 'MRT Martial Arts Stadium Station'
if (n == '3522') : stop['Id'] = '0004'
# Special case (apparent data error in route 351-2)
if (n == '15883') : stop['Id'] = '3215'
# In route 50-2
# Resolve 'Id' of 'MRT Sizihwan Station (LRT Hamasen)'
if (n == '2120') : stop['Id'] = '9203'
# In route 701-2
# Resolve 'Id' of 'Chengcing Lake Baseball Field'
if (n == '2542') : stop['Id'] = '3535'
# In route 7-1 and 7-2
# Resolve 'Id' of 'Houjin Junior High School (MRT Houjin Station)'
if (n == '4583') : stop['Id'] = '7010'
if (n == '4680') : stop['Id'] = '7010'
# In route 731-1
# Resolve 'Id' of 'MRT Metropolitan Park Station'
if (n == '5135') : stop['Id'] = '7508'
# ETC... WHATEVER, WE SIMPLY NULLIFY THE 'Id' FIELD
stop['Id'] = None
if n in self.stops[lang] :
if not (self.stops[lang][n] == stop) :
print("Ex. A:", self.stops[lang][n])
print("Ex. B:", stop)
raise RuntimeError("Data inconsistency")
else :
self.stops[lang][n] = stop
# Rekey by the Stop ID
self.stops = { lang : stops.values() for (lang, stops) in self.stops.items() }
self.stops = lang_reform(self.stops, 'SID')
# Now, to each stop append the list of incident routes
for n in self.stops.keys() :
self.stops[n]['routes'] = defaultdict(list)
for (i, r) in self.routes.items() :
for (Dir, stops) in r['Dir'].items() :
for n in stops :
self.stops[n]['routes'][Dir].append(i)
# Convert all defaultdict to dict
self.routes = json.loads(json.dumps(self.routes))
self.stops = json.loads(json.dumps(self.stops))
# Save to disk
with open(OFILE['bus-network'], 'w') as f :
json.dump({ 'routes' : self.routes, 'stops' : self.stops }, f)
def __init__(self) :
self.init()
def group_by_name(stops) :
assert(type(stops) is dict)
stops = reindex_by_key(stops.values(), (lambda s: "{} / {}".format(s['nameZh']['tw'], s['nameZh']['en'])))
for (k, stop) in stops.items() :
if not ('distance' in stop) : continue
d = stop['distance']
if type(d) is list:
stops[k]['distance-min'] = int(min(d))
stops[k]['distance-tty'] = "{}~{}m".format(int(min(d)), int(max(d)))
else:
stops[k]['distance-min'] = int(d)
stops[k]['distance-tty'] = "{}m".format(int(d))
return stops
def get_routes_through(self, ids) :
return { Dir : sorted(set(sum([ self.stops[i]['routes'].get(Dir, []) for i in ids ], []))) for Dir in ['1', '2'] }
class BusNetworkKNN(BusNetwork) :
def __init__(self) :
BusNetwork.__init__(self)
self.init_knn()
def init_knn(self) :
if PARAM['force-recompute-knn'] or (not os.path.isfile(IFILE['compute-knn'])) :
(I, X) = zip(*[ (i, (float(s['latitude']), float(s['longitude']))) for (i, s) in self.stops.items() ])
self.knn = {
'SIDs' : I,
'tree' : sklearn.neighbors.BallTree(X, leaf_size=30, metric='pyfunc', func=geodesic),
}
with open(OFILE['compute-knn'], 'wb') as f :
pickle.dump(self.knn, f, pickle.HIGHEST_PROTOCOL)
else :
try :
with open(IFILE['compute-knn'], 'rb') as f :
self.knn = pickle.load(f)
except EOFError as e :
PARAM['force-recompute-knn'] = True
self.init_knn()
def get_nearest_stops(self, pos, k=10) :
# Note: assume a single sample pos, i.e. pos = (lat, lon)
(dist, ind) = self.knn['tree'].query(np.asarray(pos).reshape(1, -1), k=k)
(dist, ind) = (dist.flatten(), ind.flatten())
# Convert ind to stop IDs
ind = [ self.knn['SIDs'][n] for n in ind ]
# Get the complete nearest stops info
stops = [ self.stops[j] for j in ind ]
# Append the 'distance' info to each nearest stop
for (k, d) in enumerate(dist) :
stops[k]['distance'] = d
# Index stops by ID
stops = dict(zip(ind, stops))
return stops
class BusOracle(BusNetworkKNN) :
def __init__(self) :
BusNetworkKNN.__init__(self)
# Returns an iterable over bus ETA's in chronological order
def eta_by_stopname(self, stopname, force_fetch=PARAM_PRODUCTION) :
url = PARAM['url-eta'][PARAM['pref-lang']].format(stopname=stopname)
eta_groups = json.loads(wget(url, (None if force_fetch else wget.CACHE)).bytes)
ETA_INFO = []
for group in eta_groups :
for car in group['Info'] :
#print(car['Pathid'], car['Goback'], car['Time'])
route = self.routes[car['Pathid']]
car_dest = { '1' : route['destinationZh'], '2' : route['departureZh'] }[car['Goback']]
if (type(car_dest) is dict) : car_dest = car_dest[PARAM['pref-lang']]
car_dest = car_dest.strip()
route_name = route['nameZh']
if (type(route_name) is dict) : route_name = route_name[PARAM['pref-lang']]
route_name = route_name.strip()
eta = car['Time'].strip()
try :
# Is the ETA in the format Hour:Minute?
eta = dt.timedelta(
hours = int(re.match(r'(?P<hour>\d+):(?P<min>\d+)', eta).group('hour')),
minutes = int(re.match(r'(?P<hour>\d+):(?P<min>\d+)', eta).group('min'))
) + dt.datetime.combine(dt.date.today(), dt.time())
except AttributeError :
try :
# Is ETA in the format XMinutes?
eta = dt.timedelta(
minutes = int(re.match(r'(?P<min>^\d+)', eta).group('min'))
) + dt.datetime.now()
except AttributeError :
pass
ETA_INFO.append( (eta, route_name, car_dest, route, car ) )
ETA_INFO_1 = sorted(filter(lambda ei : type(ei[0]) is dt.datetime, ETA_INFO))
ETA_INFO_2 = sorted(filter(lambda ei : not type(ei[0]) is dt.datetime, ETA_INFO))
for (eta, route_name, car_dest, route, car) in ETA_INFO_1 :
yield {
'eta' : "{0:%H:%M}".format(eta),
'route_name' : route_name,
'car_dest' : car_dest,
'route' : route,
'car' : car
}
for (eta, route_name, car_dest, route, car) in ETA_INFO_2 :
yield {
'eta' : eta,
'route_name' : route_name,
'car_dest' : car_dest,
'route' : route,
'car' : car
}
return
def eta_by_loc(self, loc, k=20) :
return sorted(
[
{
'stop' : s,
'etas' : list(self.eta_by_stopname(s['nameZh'][PARAM['pref-lang']]))
}
for (j, s) in BusOracle.group_by_name(self.get_nearest_stops(loc, k=k)).items()
],
key=(lambda stop_etas : stop_etas['stop']['distance-min'])
)
## ===================== WORK :
## ==================== TESTS :
def test_000() :
for i in range(10) :
print("Executing wget call #{}".format(i+1))
wget("https://www.google.com/")
def test_001() :
R = RoutesMeta()
assert(R.routes)
print("All bus routes:")
print(R.routes)
def test_002() :
S = BusNetwork()
print("Some bus trajectories:")
for r in list(S.routes.items())[0:10] :
print(r)
print("Some platforms:")
for s in list(S.stops.items())[0:10] :
print(s)
def test_003() :
S = BusNetworkKNN()
(lat, lon) = (22.63279, 120.33447)
print("Finding bus stops closest to (lat, lon) = ({}, {})...".format(lat, lon))
kS = S.get_nearest_stops((lat, lon), 10)
for s in json.loads(json.dumps(kS)).values() :
assert(type(s['distance']) is float)
for (i, s) in kS.items() :
print("{}m -- {} (SID: {})".format(int(round(s['distance'])), s['nameZh']['en'], i))
print("Grouped by name:")
for (j, s) in BusNetworkKNN.group_by_name(kS).items() :
print(j, s)
def test_004() :
oracle = BusOracle()
(lat, lon) = (22.63279, 120.33447)
kS = BusOracle.group_by_name(oracle.get_nearest_stops((lat, lon), k=20))
for (j, s) in kS.items() :
(dist, dist_nice, stopname) = (s['distance'], s['distance-tty'], s['nameZh'][PARAM['pref-lang']])
print("")
print("{} ({})".format(stopname, dist_nice))
print("")
for eta_info in oracle.eta_by_stopname(stopname) :
print('[{}] "{}" ~~> {}'.format(eta_info['eta'], eta_info['route_name'], eta_info['car_dest']))
def test_005() :
(lat, lon) = (22.63279, 120.33447)
ETA = BusOracle().eta_by_loc((lat, lon))
for stop_etas in ETA :
print("")
print("{} ({})".format(stop_etas['stop']['nameZh'][PARAM['pref-lang']], stop_etas['stop']['distance-tty']))
print("")
for eta in stop_etas['etas'] :
print('[{}] "{}" ~~> {}'.format(eta['eta'], eta['route_name'], eta['car_dest']))
def tests() :
test_005()
## ==================== ENTRY :
if (__name__ == "__main__") :
raise RuntimeWarning("Please include this file as a module")
|
# -*- python -*-
load("//tools/skylark:py.bzl", "py_binary")
load("//tools/skylark:drake_py.bzl", "drake_py_unittest")
load("//tools/lint:lint.bzl", "add_lint_tests")
package(default_visibility = ["//visibility:public"])
# Used by :python_env.bzl.
config_setting(
name = "linux",
values = {"cpu": "k8"},
)
exports_files([
"py_env_runner.py",
])
drake_py_unittest(
name = "pathutils_test",
data = [
":pathutils.bzl",
],
deps = [
"@bazel_tools//tools/python/runfiles",
],
)
add_lint_tests()
|
#
# $Id$
#
"""module to do div, grad, curl (but not 'all that') for pencil-code data.
"""
import numpy as N
from .der import *
from sys import exit
def div(f,dx,dy,dz):
"""
take divervenge of pencil code vector array
"""
if (f.ndim != 4):
print("div: must have vector 4-D array f[mvar,mz,my,mx] for divergence")
raise ValueError
return xder(f[0,...],dx) + yder(f[1,...],dy) + zder(f[2,...],dz)
def grad(f,dx,dy,dz):
"""
take the curl of a pencil code scalar array.
"""
if (f.ndim != 3):
print("grad: must have scalar 3-D array f[mz,my,mx] for gradient")
raise ValueError
grad = N.empty((3,)+f.shape)
grad[0,...] = xder(f,dx)
grad[1,...] = yder(f,dy)
grad[2,...] = zder(f,dz)
return grad
def curl(f,dx,dy,dz,run2D=False):
"""
take the curl of a pencil code vector array.
23-fev-2009/dintrans+morin: introduced the run2D parameter to deal
with pure 2-D snapshots (solved the (x,z)-plane pb)
"""
if (f.shape[0] != 3):
print("curl: must have vector 4-D array f[3,mz,my,mx] for curl")
raise ValueError
curl = N.empty_like(f)
if (dy != 0. and dz != 0.):
# 3-D case
curl[0,...] = yder(f[2,...],dy) - zder(f[1,...],dz)
curl[1,...] = zder(f[0,...],dz) - xder(f[2,...],dx)
curl[2,...] = xder(f[1,...],dx) - yder(f[0,...],dy)
elif (dy == 0.):
# 2-D case in the (x,z)-plane
# f[...,nz,1,nx] if run2D=False or f[...,nz,nx] if run2D=True
curl[0,...] = zder(f,dz,run2D)[0,...] - xder(f,dx)[2,...]
else:
# 2-D case in the (x,y)-plane
# f[...,1,ny,nx] if run2D=False or f[...,ny,nx] if run2D=True
curl[0,...] = xder(f,dx)[1,...] - yder(f,dy)[0,...]
return curl
def curl2(f,dx,dy,dz):
"""
take the double curl of a pencil code vector array.
CARTESIAN COORDINATES ONLY!!
"""
if (f.ndim != 4 or f.shape[0] != 3):
print("curl2: must have vector 4-D array f[3,mz,my,mx] for curl2")
raise ValueError
curl2 = N.empty(f.shape)
curl2[0,...] = xder(yder(f[1,...],dy) + zder(f[2,...],dz),dx) \
- yder2(f[0,...],dy) - zder2(f[0,...],dz)
curl2[1,...] = yder(xder(f[0,...],dx) + zder(f[2,...],dz),dy) \
- xder2(f[1,...],dx) - zder2(f[1,...],dz)
curl2[2,...] = zder(xder(f[0,...],dx) + yder(f[1,...],dy),dz) \
- xder2(f[2,...],dx) - yder2(f[2,...],dy)
return curl2
def del2(f,dx,dy,dz):
"""taken from pencil code's sub.f90
! calculate del6 (defined here as d^6/dx^6 + d^6/dy^6 + d^6/dz^6, rather
! than del2^3) of a scalar for hyperdiffusion
"""
del2 = xder2(f,dx)
del2 = del2 + yder2(f,dy)
del2 = del2 + zder2(f,dz)
return del2
def del6(f,dx,dy,dz):
"""taken from pencil code's sub.f90
! calculate del6 (defined here as d^6/dx^6 + d^6/dy^6 + d^6/dz^6, rather
! than del2^3) of a scalar for hyperdiffusion
"""
del6 = xder6(f,dx)
del6 = del6 + yder6(f,dy)
del6 = del6 + zder6(f,dz)
return del6
|
import codecs
import re
import sys
from collections import OrderedDict
from fnmatch import fnmatch
from invisibleroads_macros.configuration import (
RawCaseSensitiveConfigParser, format_settings, load_relative_settings,
load_settings, make_absolute_paths, make_relative_paths, save_settings)
from invisibleroads_macros.descriptor import cached_property
from invisibleroads_macros.disk import (
are_same_path, get_absolute_path, link_path)
from invisibleroads_macros.exceptions import BadPath
from invisibleroads_macros.log import (
filter_nested_dictionary, format_path, get_log, parse_nested_dictionary,
parse_nested_dictionary_from)
from invisibleroads_macros.shell import make_executable
from invisibleroads_macros.table import normalize_key
from invisibleroads_macros.text import has_whitespace, unicode_safely
from os import getcwd, walk
from os.path import basename, dirname, isabs, join
from pyramid.settings import asbool
from six import text_type
from .exceptions import (
DataParseError, DataTypeError, ToolConfigurationNotFound,
ToolConfigurationNotValid, ToolNotFound, ToolNotSpecified)
from .symmetries import (
prepare_path_argument, suppress, COMMAND_LINE_JOIN, SCRIPT_EXTENSION)
from .types import get_data_type, RESERVED_ARGUMENT_NAMES
TOOL_NAME_PATTERN = re.compile(r'crosscompute\s*(.*)')
ARGUMENT_PATTERN = re.compile(r'(\{\s*.+?\s*\})')
ARGUMENT_NAME_PATTERN = re.compile(r'\{\s*(.+?)\s*\}')
ARGUMENT_SETTING_PATTERN = re.compile(r'(--)?(.+?)\s*=\s*(.+?)')
L = get_log(__name__)
class ResultConfiguration(object):
def __init__(self, result_folder, quiet=False):
self.result_folder = result_folder
self.quiet = quiet
def save_tool_location(self, tool_definition, tool_id=None):
configuration_folder = tool_definition['configuration_folder']
with suppress(ValueError):
link_path(join(self.result_folder, 'f'), configuration_folder)
tool_location = {
'configuration_folder': configuration_folder,
'tool_name': tool_definition['tool_name'],
}
if tool_id:
tool_location['tool_id'] = tool_id
d = {'tool_location': tool_location}
if not self.quiet:
print(format_settings(d))
print('')
tool_location['configuration_folder'] = 'f'
return save_settings(join(self.result_folder, 'f.cfg'), d)
def save_result_arguments(
self, tool_definition, result_arguments, environment=None,
external_folders=None):
d = {'result_arguments': OrderedDict((
k, get_data_type(k).render(v)
) for k, v in result_arguments.items())}
if environment:
d['environment_variables'] = environment
if not self.quiet:
print(format_settings(d))
print('')
d = filter_nested_dictionary(d, lambda x: x.startswith(
'_') or x in RESERVED_ARGUMENT_NAMES)
d = make_relative_paths(d, self.result_folder, external_folders or [
tool_definition['configuration_folder'],
])
return save_settings(join(self.result_folder, 'x.cfg'), d)
def save_result_properties(self, result_properties):
d = {'result_properties': result_properties}
if not self.quiet:
print(format_settings(d))
d = filter_nested_dictionary(d, lambda x: x.startswith('_'))
d = make_relative_paths(d, self.result_folder)
return save_settings(join(self.result_folder, 'y.cfg'), d)
def save_result_scripts(self, tool_definition, result_arguments):
command_template = tool_definition['command_template']
self.save_script(
'x', 'command', command_template, tool_definition,
result_arguments)
if command_template.startswith('python'):
debugger_command = 'pudb' if sys.version_info[0] < 3 else 'pudb3'
debugger_template = ' '.join([
debugger_command] + command_template.split(' ', 1)[1:])
self.save_script(
'x-debugger', 'debugger', debugger_template, tool_definition,
result_arguments)
def save_script(
self, script_name, command_name, command_template, tool_definition,
result_arguments):
target_path = join(self.result_folder, script_name + SCRIPT_EXTENSION)
command_parts = [
'cd "%s"' % tool_definition['configuration_folder'],
render_command(command_template.replace(
'\n', ' %s\n' % COMMAND_LINE_JOIN), result_arguments)]
with codecs.open(target_path, 'w', encoding='utf-8') as target_file:
target_file.write('\n'.join(command_parts) + '\n')
if not self.quiet:
print(command_name + '_path = %s' % format_path(target_path))
return make_executable(target_path)
@cached_property
def tool_definition(self):
return load_tool_definition(join(self.result_folder, 'f.cfg'))
@cached_property
def result_arguments(self):
return load_result_arguments(join(
self.result_folder, 'x.cfg'), self.tool_definition)
@cached_property
def result_properties(self):
return load_result_properties(join(self.result_folder, 'y.cfg'))
def find_tool_definition_by_name(folder, default_tool_name=None):
tool_definition_by_name = {}
folder = unicode_safely(folder)
default_tool_name = unicode_safely(default_tool_name)
for root_folder, folder_names, file_names in walk(folder):
if are_same_path(root_folder, folder):
tool_name = default_tool_name or basename(folder)
else:
tool_name = basename(root_folder)
for file_name in file_names:
if not fnmatch(file_name, '*.ini'):
continue
try:
tool_configuration_path = get_absolute_path(
file_name, root_folder)
except BadPath:
L.warning('link skipped (%s)' % join(root_folder, file_name))
continue
for tool_name, tool_definition in load_tool_definition_by_name(
tool_configuration_path, tool_name).items():
tool_name = _get_unique_tool_name(
tool_name, tool_definition_by_name)
tool_definition_by_name[tool_name] = tool_definition
return tool_definition_by_name
def find_tool_definition(folder=None, tool_name='', default_tool_name=''):
tool_definition_by_name = find_tool_definition_by_name(
folder or getcwd(), default_tool_name)
if not tool_definition_by_name:
raise ToolConfigurationNotFound(
'Tool configuration not found. Run this command in a folder '
'with a tool configuration file or in a parent folder.')
if len(tool_definition_by_name) == 1:
return list(tool_definition_by_name.values())[0]
if not tool_name:
raise ToolNotSpecified('Tool not specified. {}'.format(
format_available_tools(tool_definition_by_name)))
tool_name = tool_name or tool_definition_by_name.keys()[0]
try:
tool_definition = tool_definition_by_name[tool_name]
except KeyError:
raise ToolNotFound('Tool not found ({}). {}'.format(
tool_name, format_available_tools(tool_definition_by_name)))
return tool_definition
def load_tool_definition_by_name(
tool_configuration_path, default_tool_name=None):
tool_definition_by_name = {}
configuration = RawCaseSensitiveConfigParser()
configuration.read(tool_configuration_path, 'utf-8')
configuration_folder = dirname(tool_configuration_path)
for section_name in configuration.sections():
try:
tool_name = _parse_tool_name(section_name, default_tool_name)
except ToolConfigurationNotValid as e:
continue
try:
tool_definition = _parse_tool_definition(dict(configuration.items(
section_name)), configuration_folder, tool_name)
except ToolConfigurationNotValid as e:
L.warning('tool skipped (configuration_path=%s, tool_name=%s) ' % (
tool_configuration_path, tool_name) + str(e))
continue
tool_definition_by_name[tool_name] = tool_definition
return tool_definition_by_name
def load_tool_definition(result_configuration_path):
s = load_settings(result_configuration_path, 'tool_location')
try:
tool_configuration_folder = s['configuration_folder']
tool_name = s['tool_name']
except KeyError:
raise ToolConfigurationNotFound
if not isabs(tool_configuration_folder):
result_configuration_folder = dirname(result_configuration_path)
tool_configuration_folder = join(
result_configuration_folder, tool_configuration_folder)
return find_tool_definition(tool_configuration_folder, tool_name)
def load_result_arguments(result_configuration_path, tool_definition):
results_folder = dirname(dirname(result_configuration_path))
external_folders = [
tool_definition['configuration_folder'],
results_folder]
arguments = load_relative_settings(
result_configuration_path, 'result_arguments', external_folders)
arguments.pop('target_folder', None)
result_configuration_folder = dirname(result_configuration_path)
try:
d = parse_data_dictionary_from(
arguments, result_configuration_folder, external_folders,
tool_definition)
except DataParseError as e:
d = e.value_by_key
for k, v in e.message_by_name.items():
L.warning(
'argument skipped (' +
'configuration_path=%s ' % result_configuration_path +
'argument_name=%s ' % k +
'error_message=%s)' % v)
del d[k]
return d
def load_result_properties(result_configuration_path):
properties = load_relative_settings(
result_configuration_path, 'result_properties')
return parse_nested_dictionary_from(properties, max_depth=1)
def format_available_tools(tool_definition_by_name):
tool_count = len(tool_definition_by_name)
return '{} available:\n{}'.format(
tool_count, '\n'.join(tool_definition_by_name))
def parse_data_dictionary(
text, root_folder, external_folders=None, tool_definition=None):
d = parse_nested_dictionary(
text, is_key=lambda x: ':' not in x and ' ' not in x)
return parse_data_dictionary_from(
d, root_folder, external_folders, tool_definition)
def parse_data_dictionary_from(
raw_dictionary, root_folder, external_folders=None,
tool_definition=None):
if tool_definition:
def get_default_value_for(key):
return get_default_value(key, tool_definition)
else:
def get_default_value_for(key):
return
d = make_absolute_paths(raw_dictionary, root_folder, external_folders)
errors = OrderedDict()
for key, value in d.items():
if key in RESERVED_ARGUMENT_NAMES:
continue
data_type = get_data_type(key)
try:
default_value = get_default_value_for(key)
value = data_type.parse_safely(value, default_value)
except DataTypeError as e:
errors[key] = text_type(e)
d[key] = value
if errors:
raise DataParseError(errors, d)
return d
def get_default_key(key, tool_definition):
for prefix in 'x.', '':
default_key = prefix + key
if default_key in tool_definition:
return default_key
default_key = prefix + key + '_path'
if default_key in tool_definition:
return default_key
def get_default_value(key, tool_definition):
data_type = get_data_type(key)
for prefix in 'x.', '':
default_key = prefix + key
if default_key in tool_definition:
return data_type.parse_safely(tool_definition[default_key])
default_key = prefix + key + '_path'
if default_key in tool_definition:
return data_type.load(tool_definition[default_key])
def render_command(command_template, result_arguments):
d = {}
quote_pattern = re.compile(r"""["'].*["']""")
for k, v in result_arguments.items():
v = get_data_type(k).render(v)
if k.endswith('_path') or k.endswith('_folder'):
v = prepare_path_argument(v)
if not v or (has_whitespace(v) and not quote_pattern.match(v)):
v = '"%s"' % v
d[k] = v
return command_template.format(**d)
def _get_unique_tool_name(tool_name, existing_tool_names):
suggested_tool_name = tool_name
i = 2
while True:
if suggested_tool_name not in existing_tool_names:
break
suggested_tool_name = '%s-%s' % (tool_name, i)
i += 1
return suggested_tool_name
def _parse_tool_name(configuration_section_name, default_tool_name=None):
match = TOOL_NAME_PATTERN.match(configuration_section_name)
if not match:
raise ToolConfigurationNotValid
tool_name = match.group(1).strip() or default_tool_name or ''
return normalize_key(tool_name, word_separator='-')
def _parse_tool_definition(value_by_key, configuration_folder, tool_name):
try:
d = _parse_tool_arguments(value_by_key)
except KeyError as e:
raise ToolConfigurationNotValid('%s required' % e)
d['configuration_folder'] = configuration_folder
d['tool_name'] = tool_name
d['show_raw_output'] = asbool(value_by_key.get('show_raw_output'))
for k, v in make_absolute_paths(d, configuration_folder).items():
if k in ('argument_names', 'show_raw_output'):
continue
v = v.strip()
if not v:
message = 'value required for %s' % k
if k.endswith('_path'):
message += ' which must be a file in ' + configuration_folder
raise ToolConfigurationNotValid(message)
d[unicode_safely(k)] = unicode_safely(v)
return d
def _parse_tool_arguments(value_by_key):
d = value_by_key.copy()
terms, argument_names = [], []
for term in ARGUMENT_PATTERN.split(value_by_key['command_template']):
name_match = ARGUMENT_NAME_PATTERN.match(term)
if name_match:
argument_name = name_match.group(1)
setting_match = ARGUMENT_SETTING_PATTERN.match(argument_name)
if setting_match:
prefix, argument_name, argument_value = setting_match.groups()
term = '--%s={%s}' % (
argument_name, argument_name,
) if prefix else '{%s}' % argument_name
argument_key = get_default_key(argument_name, value_by_key)
if not argument_key:
d['x.%s' % argument_name] = argument_value
else:
term = '{%s}' % argument_name
argument_names.append(argument_name)
terms.append(term.strip())
d['command_template'] = ' '.join(terms).strip()
d['argument_names'] = argument_names
return d
|
from django.contrib.auth import get_user_model
from django.shortcuts import render, get_object_or_404
from .models import Customer
from .models import EnergyData
from rest_framework import status
from .serializer import NodeSerializer
from django.contrib.humanize.templatetags.humanize import naturaltime
from django.views.generic.base import TemplateView, View
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.edit import FormView
from django.http.response import HttpResponse, HttpResponseBadRequest
from django.utils import formats, timezone
from django.http import JsonResponse
from django.shortcuts import render
from django.views.generic import View
from rest_framework.views import APIView
from rest_framework.response import Response
User = get_user_model()
class HomeView(View):
def get(self, request, *args, **kwargs):
return render(request, 'charts.html', {"customer": 10})
def get_data(request, *args, **kwargs):
data = {
"sales":100,
"customers":10,
}
return JsonResponse(data)
class ChartData(APIView):
authentication_classes = []
permission_classes = []
def get(self, request, format=None):
gs_count = User.objects.all().count()
labels = ["Users", "Blue", "Yellow", "Green", "Purple", "Orange"]
default_items = [gs_count, 34, 23, 32, 12, 2]
data = {
"labels": labels,
"default": default_items,
}
return Response(data)
#def index(request):
# all_nodes = Customer.objects.all()
# context = {
# 'all_nodes':all_nodes,
# }
# return render(request, 'energy/index.html', context)
#def detail(request, customer_id):
# customer = get_object_or_404(Customer, id=customer_id)
# print (Customer)
# return render(request, 'energy/detail.html', {'customer': customer})
#class APINodeView(APIView):
# def get(self,request):
# all_nodes = Customer.objects.all()
# serializer = NodeSerializer(all_nodes, many=True)
# return Response(serializer.data)
# def post(self,request, format=None):
# serializer = NodeSerializer(data=request.data)
# if serializer.is_valid():
# serializer.save()
# return Response(serializer.data, status=status.HTTP_201_CREATED)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
from operator import methodcaller
from readers import FileReader
COM = "COM"
YOU = "YOU"
SAN = "SAN"
def main():
raw_orbits = list(map(methodcaller("split", ")"), map(str.strip, FileReader.read_input_as_list())))
orbits = {o[1]: o[0] for o in raw_orbits}
you_planets = set_of_planets_to_home(YOU, orbits, set())
santa_planets = set_of_planets_to_home(SAN, orbits, set())
print(f"Total number jumps to santa: {len(you_planets ^ santa_planets) - 2}")
def set_of_planets_to_home(planet, orbits, planets):
if planet in orbits:
planets.add(planet)
if orbits[planet] == COM:
print(f"{len(planets)} planets to home")
return planets
return set_of_planets_to_home(orbits[planet], orbits, planets)
print(f"This is odd, did not expect to get here! Processing: {planet}")
return planets
if __name__ == "__main__":
main()
|
from peachpy.x86_64 import *
from peachpy import *
from peachpy.c.types import *
# =================================================
# ADDITION
# =================================================
avx_vector_add_map = {
Yep8s : VPADDB,
Yep8u : VPADDB,
Yep16s: VPADDW,
Yep16u: VPADDW,
Yep32s: VPADDD,
Yep32u: VPADDD,
Yep64s: VPADDQ,
Yep64u: VPADDQ,
Yep32f: VADDPS,
Yep64f: VADDPD
}
avx_scalar_add_map = {
Yep8s : ADD,
Yep8u : ADD,
Yep16s: ADD,
Yep16u: ADD,
Yep32s: ADD,
Yep32u: ADD,
Yep64s: ADD,
Yep64u: ADD,
Yep32f: VADDSS,
Yep64f: VADDSD
}
# =================================================
# SUBTRACTION
# =================================================
avx_vector_sub_map = {
Yep8s : VPSUBB,
Yep8u : VPSUBB,
Yep16s: VPSUBW,
Yep16u: VPSUBW,
Yep32s: VPSUBD,
Yep32u: VPSUBD,
Yep64s: VPSUBQ,
Yep64u: VPSUBQ,
Yep32f: VSUBPS,
Yep64f: VSUBPD
}
avx_scalar_sub_map = {
Yep8s : SUB,
Yep8u : SUB,
Yep16s: SUB,
Yep16u: SUB,
Yep32s: SUB,
Yep32u: SUB,
Yep64s: SUB,
Yep64u: SUB,
Yep32f: VSUBSS,
Yep64f: VSUBSD
}
# =================================================
# MULTIPLICATION
# =================================================
avx_vector_mult_low_map = {
Yep16s : VPMULLW,
Yep16u : VPMULLW,
Yep32s : VPMULLD,
Yep32u : VPMULLD
}
avx_vector_mult_high_map = {
Yep16s : VPMULHW,
Yep16u : VPMULHUW
}
avx_vector_mult_map = {
Yep32f : VMULPS,
Yep64f : VMULPD
}
avx_scalar_mult_map = {
Yep8s : IMUL,
Yep8u : IMUL,
Yep16s : IMUL,
Yep16u : IMUL,
Yep32s : IMUL,
Yep32u : IMUL,
Yep64s : IMUL,
Yep64u : IMUL,
Yep32f : VMULSS,
Yep64f : VMULSD
}
# =================================================
# MAXIMUM/MINIMUM
# =================================================
avx_vector_max_map = {
Yep8s : VPMAXSB,
Yep8u : VPMAXUB,
Yep16s : VPMAXSW,
Yep16u : VPMAXUW,
Yep32s : VPMAXSD,
Yep32u : VPMAXUD,
Yep64s : VPMAXSQ,
Yep64u : VPMAXUQ,
Yep32f : VMAXPS,
Yep64f : VMAXPD
}
avx_scalar_max_map = {
Yep8s : None,
Yep8u : None,
Yep16s : None,
Yep16u : None,
Yep32s : None,
Yep32u : None,
Yep64s : None,
Yep64u : None,
Yep32f : VMAXSS,
Yep64f : VMAXSD
}
avx_vector_min_map = {
Yep8s : VPMINSB,
Yep8u : VPMINUB,
Yep16s : VPMINSW,
Yep16u : VPMINUW,
Yep32s : VPMINSD,
Yep32u : VPMINUD,
Yep64s : VPMINSQ,
Yep64u : VPMINUQ,
Yep32f : VMINPS,
Yep64f : VMINPD
}
avx_scalar_min_map = {
Yep8s : None,
Yep8u : None,
Yep16s : None,
Yep16u : None,
Yep32s : None,
Yep32u : None,
Yep64s : None,
Yep64u : None,
Yep32f : VMINSS,
Yep64f : VMINSD
}
# =================================================
# UNPACK
# =================================================
avx_high_unpack_map = {
(Yep8s, Yep16s) : VPUNPCKHBW,
(Yep8u, Yep16u) : VPUNPCKHBW,
(Yep16s, Yep32s): VPUNPCKHWD,
(Yep16u, Yep32u): VPUNPCKHWD,
(Yep32s, Yep64s): VPUNPCKHDQ,
(Yep32u, Yep64u): VPUNPCKHDQ
}
avx_low_unpack_map = {
(Yep8s, Yep16s) : VPUNPCKLBW,
(Yep8u, Yep16u) : VPUNPCKLBW,
(Yep16s, Yep32s): VPUNPCKLWD,
(Yep16u, Yep32u): VPUNPCKLWD,
(Yep32s, Yep64s): VPUNPCKLDQ,
(Yep32u, Yep64u): VPUNPCKLDQ
}
# =================================================
# MOV
# =================================================
avx_vector_aligned_mov_map = {
Yep8s : VMOVDQA,
Yep8u : VMOVDQA,
Yep16s : VMOVDQA,
Yep16u : VMOVDQA,
Yep32s : VMOVDQA,
Yep32u : VMOVDQA,
Yep64s : VMOVDQA,
Yep64u : VMOVDQA,
Yep32f : VMOVAPS,
Yep64f : VMOVAPD,
}
avx_vector_unaligned_mov_map = {
Yep8s : VMOVDQU,
Yep8u : VMOVDQU,
Yep16s: VMOVDQU,
Yep16u: VMOVDQU,
Yep32s: VMOVDQU,
Yep64s: VMOVDQU,
Yep32f: VMOVUPS,
Yep64f: VMOVUPD
}
avx_vector_movsx_map = {
(Yep8s, Yep16s) : VPMOVSXBW,
(Yep8u, Yep16u) : VPMOVZXBW,
(Yep16s, Yep32s): VPMOVSXWD,
(Yep16u, Yep32u): VPMOVZXWD,
(Yep32s, Yep64s): VPMOVSXDQ,
(Yep32u, Yep64u): VPMOVZXDQ
}
avx_scalar_mov_map = {
Yep8s : MOV,
Yep8u : MOV,
Yep16s : MOV,
Yep16u : MOV,
Yep32s : MOV,
Yep32u : MOV,
Yep64s : MOV,
Yep64u : MOV,
Yep32f : VMOVSS,
Yep64f : VMOVSD
}
avx_scalar_movsx_map = {
(Yep8s, Yep16s) : MOVSX,
(Yep8u, Yep16u) : MOVZX,
(Yep16s, Yep32s): MOVSX,
(Yep16u, Yep32u): MOVZX,
(Yep32s, Yep64s): MOVSXD,
(Yep32u, Yep64u): MOV
}
avx_scalar_reg_to_vector_reg_mov_map = {
Yep8s : VMOVD,
Yep8u : VMOVD,
Yep16s : VMOVD,
Yep16u : VMOVD,
Yep32s : VMOVD,
Yep32u : VMOVD,
Yep64s : VMOVQ,
Yep64u : VMOVQ,
Yep32f : None,
Yep64f : None
}
# =================================================
# BROADCAST
# =================================================
avx_broadcast_map = {
Yep8s : VPBROADCASTB,
Yep8u : VPBROADCASTB,
Yep16s : VPBROADCASTW,
Yep16u : VPBROADCASTW,
Yep32s : VPBROADCASTD,
Yep32u : VPBROADCASTD,
Yep64s : VPBROADCASTQ,
Yep64u : VPBROADCASTQ,
Yep32f : VBROADCASTSS,
Yep64f : VBROADCASTSD
}
# =================================================
# REGS
# =================================================
avx_scalar_register_map = {
Yep8s : GeneralPurposeRegister8,
Yep8u : GeneralPurposeRegister8,
Yep16s : GeneralPurposeRegister16,
Yep16u : GeneralPurposeRegister16,
Yep32s : GeneralPurposeRegister32,
Yep32u : GeneralPurposeRegister32,
Yep64s : GeneralPurposeRegister64,
Yep64u : GeneralPurposeRegister64,
Yep32f : XMMRegister,
Yep64f : XMMRegister
}
|
# author: Hendrik Werner s4549775
# author: Constantin Blach s4329872
from datetime import datetime
from random import randint
import socket
from typing import Tuple
from bTCP.exceptions import ChecksumMismatch
from bTCP.message import BTCPMessage, MessageFactory
from bTCP.state_machine import State, StateMachine
class Client(StateMachine):
def __init__(
self,
sock: socket.socket,
input_bytes: bytes,
destination_address: Tuple[str, int],
window: int,
timeout: float,
retry_limit: int,
output_file: str,
):
self.closed = Client.Closed(self)
self.syn_sent = Client.SynSent(self)
self.established = Client.Established(self, input_bytes, timeout)
self.fin_sent = Client.FinSent(self, retry_limit)
self.fin_received = Client.FinReceived(self, retry_limit)
self.finished = Client.Finished(self)
self.state = self.closed
self.destination_address = destination_address
self.expected_syn = 0
self.factory = MessageFactory(0, window)
self.highest_ack = 0
self.output_file = bytes(output_file, "utf-8")
self.server_window = 0
self.sock = sock
self.sock.settimeout(timeout)
self.stream_id = 0
self.syn_number = 0
def accept_ack(self, ack: int):
self.highest_ack = ack if ack > self.highest_ack else self.highest_ack
class Closed(State):
def run(self):
sm = self.state_machine
sm.syn_number = randint(0, 2 ** 8)
stream_id = randint(0, 2 ** 32)
sm.stream_id = stream_id
sm.factory.stream_id = stream_id
return sm.syn_sent
class SynSent(State):
def run(self):
sm = self.state_machine
sm.sock.sendto(
sm.factory.syn_message(
sm.syn_number, sm.expected_syn, sm.output_file
).to_bytes(),
sm.destination_address,
)
try:
synack_message = BTCPMessage.from_bytes(sm.sock.recv(1016))
except socket.timeout:
self.log_error("timed out")
return sm.syn_sent
except ChecksumMismatch:
self.log_error("checksum mismatch")
return sm.syn_sent
if not (
synack_message.header.id == sm.stream_id and
synack_message.header.syn and
synack_message.header.ack
):
self.log_error("wrong message received")
return sm.syn_sent
sm.server_window = synack_message.header.window_size
sm.accept_ack(synack_message.header.ack_number)
sm.expected_syn = synack_message.header.syn_number + 1
sm.syn_number += 1
sm.sock.sendto(
sm.factory.ack_message(
sm.syn_number, sm.expected_syn
).to_bytes(),
sm.destination_address,
)
print("Connection established")
return sm.established
class Established(State):
def __init__(
self,
state_machine: StateMachine,
input_bytes: bytes,
timeout: float,
):
super().__init__(state_machine)
self.input_bytes = input_bytes
self.messages = {}
self.timeout = timeout
def run(self):
sm = self.state_machine
while (
self.input_bytes and
sm.syn_number < sm.highest_ack + sm.server_window
):
data = self.input_bytes[:BTCPMessage.payload_size]
self.input_bytes = self.input_bytes[
BTCPMessage.payload_size:
]
message = sm.factory.message(
sm.syn_number, sm.expected_syn, data
)
sm.sock.sendto(message.to_bytes(), sm.destination_address)
self.messages[sm.syn_number] = (
message, datetime.now().timestamp(),
)
sm.syn_number += 1
while sm.highest_ack < sm.syn_number:
try:
message = BTCPMessage.from_bytes(sm.sock.recv(1016))
except socket.timeout:
self.log_error("timed out")
break
except ChecksumMismatch:
self.log_error("checksum mismatch")
continue
if message.header.id != sm.stream_id:
continue
for syn_nr in range(sm.highest_ack, message.header.ack_number):
del self.messages[syn_nr]
sm.accept_ack(message.header.ack_number)
if message.header.fin:
sm.expected_syn += 1
return sm.fin_received
for syn_nr in self.messages:
message, timestamp = self.messages[syn_nr]
now = datetime.now().timestamp()
if now - timestamp > self.timeout:
message.header.ack_number = sm.expected_syn
sm.sock.sendto(message.to_bytes(), sm.destination_address)
self.messages[syn_nr] = (message, now)
if self.input_bytes or sm.highest_ack < sm.syn_number:
return sm.established
return sm.fin_sent
class FinSent(State):
def __init__(
self,
state_machine: StateMachine,
retry_limit: int,
):
super().__init__(state_machine)
self.retries = retry_limit
def run(self):
sm = self.state_machine
if self.retries <= 0:
self.log_error("retry limit reached")
return sm.finished
self.retries -= 1
global syn_number
global expected_syn
sm.sock.sendto(
sm.factory.fin_message(
sm.syn_number, sm.expected_syn
).to_bytes(),
sm.destination_address,
)
try:
finack_message = BTCPMessage.from_bytes(sm.sock.recv(1016))
except socket.timeout:
self.log_error("timed out")
return sm.fin_sent
except ChecksumMismatch:
self.log_error("checksum mismatch")
return sm.fin_sent
if not (
finack_message.header.id == sm.stream_id and
finack_message.header.fin and
finack_message.header.ack and
finack_message.header.syn_number == sm.expected_syn
):
self.log_error("wrong message received")
return sm.fin_sent
sm.accept_ack(finack_message.header.ack_number)
sm.syn_number += 1
sm.expected_syn += 1
sm.sock.sendto(
sm.factory.ack_message(
sm.syn_number, sm.expected_syn
).to_bytes(),
sm.destination_address,
)
return sm.finished
class FinReceived(State):
def __init__(
self,
state_machine: StateMachine,
retry_limit: int,
):
super().__init__(state_machine)
self.retries = retry_limit
def run(self):
sm = self.state_machine
if self.retries <= 0:
self.log_error("retry limit reached")
return sm.finished
self.retries -= 1
sm.sock.sendto(
sm.factory.finack_message(
sm.syn_number, sm.expected_syn
).to_bytes(),
sm.destination_address,
)
try:
ack_message = BTCPMessage.from_bytes(sm.sock.recv(1016))
except socket.timeout:
self.log_error("timed out")
return sm.fin_received
except ChecksumMismatch:
self.log_error("checksum mismatch")
return sm.fin_received
if not (
ack_message.header.ack and
ack_message.header.id == sm.stream_id and
ack_message.header.syn_number == expected_syn
):
self.log_error("wrong message received")
return sm.fin_received
return sm.finished
class Finished(State):
pass
|
#!/usr/bin/env python3
def parse_note(string, note_search, note_dict):
note = get_note(string,note_search,note_dict)
return(note, get_chord(string, note).lower().replace(" ", ""))
def get_note(string, note_search, note_dict):
for n in note_search:
if n == string[:len(n)].capitalize():
return note_dict[n]
def get_chord(string, note):
return string[len(note):]
note_dict = {}
note_dict["C"] = "C"
note_dict["C#"] = "C#"
note_dict["Db"] = "C#"
note_dict["D"] = "D"
note_dict["D#"] = "D#"
note_dict["Eb"] = "D#"
note_dict["E"] = "E"
note_dict["F"] = "F"
note_dict["F#"] = "F#"
note_dict["Gb"] = "F#"
note_dict["G"] = "G"
note_dict["G#"] = "G#"
note_dict["Ab"] = "G#"
note_dict["A"] = "A"
note_dict["A#"] = "A#"
note_dict["Bb"] = "A#"
note_dict["B"] = "B"
notes_search_order = ["C#", "Db", "D#", "Eb", "F#", "Gb", "G#", "Ab", "A#", "Bb", "C", "D", "E", "F", "G", "A", "B"]
def main():
note = ""
pattern = ""
user_input = input("Skriv ackord: ")
print(parse_note(user_input, notes_search_order, note_dict))
if __name__ == "__main__":
main()
|
##################################################################
#----------------- Initial conditions for modRSW -----------------
# (T. Kent: amttk@leeds.ac.uk)
##################################################################
'''
Functions generate different initial conditions described below for modRSW model with
and without bottom topography...
INPUT ARGS:
# x: mesh coords
# Neq: number of equations (variables) - 4 w/o topography, 5 w/ topography
# Nk: no. of cells in mesh
# H0: reference (scaled) height
# L: length of domain
# A: amplitude
# V: velocity scale
OUTPUT:
# U0: array of initial data, size (Neq,Nk)
##################################################################
DESCRIPTIONS:
Rotation, no topography:
<init_cond_1>
--- sinusiodal waves in h and u, zero v and r.
<init_cond_2>
--- Rossby adj with disturbed height profile:
--- Exact step in h, zero u, v, and r.
<init_cond_3>
--- Rossby adj with disturbed height profile:
--- Smoothed step in h, zero u, v, and r.
<init_cond_4>
--- Rossby adj with disturbed v-velocity profile:
--- Single jet in v, flat h profile, zero u and r.
<init_cond_5>
--- Rossby adj with disturbed v-velocity profile:
--- Double jet in v, flat h profile, zero u and r.
<init_cond_6>
--- Rossby adj with disturbed v-velocity profile:
--- Quadrupel jet in v, flat h profile, zero u and r.
<init_cond_6_1>
--- Rossby adj with disturbed v-velocity profile:
--- Quadrupel jet in v, flat h=1 profile, u = constant \ne 0, and zero r.
Topography, no rotation:
<init_cond_topog>
--- single parabolic ridge
<init_cond_topog4>
--- 4 parabolic ridges
<init_cond_topog_cos>
--- superposition of sinusoids, as used in thesis chapter 6
'''
###############################################################
import numpy as np
###############################################################
def init_cond_1(x,Nk,Neq,H0,L,A,V):
k = 2*np.pi # for sinusoidal waves
ic1 = H0 + A*np.sin(2*k*x/L)
ic2 = A*np.sin(1*k*x/L)
#ic2 = np.zeros(len(x))
ic3 = np.zeros(len(x))
ic4 = np.zeros(len(x))
# Define array and fill with first-order FV (piecewise constant) initial data
U0 = np.zeros((Neq,Nk))
U0[0,:] = 0.5*(ic1[0:Nk] + ic1[1:Nk+1]) # h
U0[1,:] = 0.5*(ic1[0:Nk]*ic2[0:Nk] + ic1[1:Nk+1]*ic2[1:Nk+1]) # hu
U0[2,:] = 0.5*(ic1[0:Nk]*ic3[0:Nk] + ic1[1:Nk+1]*ic3[1:Nk+1]) # hr
U0[3,:] = 0.5*(ic1[0:Nk]*ic4[0:Nk] + ic1[1:Nk+1]*ic4[1:Nk+1]) # hv
return U0
###############################################################
def init_cond_2(x,Nk,Neq,H0,L,A,V):
from f_modRSW import heaviside
# for disturbed height (top-hat) Rossby adj. set up.
# Exact step:
f1 = heaviside(x-0.25*L)
f2 = heaviside(x-0.75*L)
ic1 = H0 + A*(0.5*f1 - 0.5*f2)
ic2 = np.zeros(len(x))
ic3 = np.zeros(len(x))
ic4 = np.zeros(len(x))
# Define array and fill with first-order FV (piecewise constant) initial data
U0 = np.zeros((Neq,Nk))
U0[0,:] = 0.5*(ic1[0:Nk] + ic1[1:Nk+1]) # h
U0[1,:] = 0.5*(ic1[0:Nk]*ic2[0:Nk] + ic1[1:Nk+1]*ic2[1:Nk+1]) # hu
U0[2,:] = 0.5*(ic1[0:Nk]*ic3[0:Nk] + ic1[1:Nk+1]*ic3[1:Nk+1]) # hr
U0[3,:] = 0.5*(ic1[0:Nk]*ic4[0:Nk] + ic1[1:Nk+1]*ic4[1:Nk+1]) # hv
return U0
###############################################################
def init_cond_3(x,Nk,Neq,H0,L,A,V):
# for disturbed height (top-hat) Rossby adj. set up
# Smoothed step:
gam = 100
f1 = 1-np.tanh(gam*(x-0.75*L))
f2 = 1-np.tanh(gam*(x-0.25*L))
ic1 = H0 + A*(0.5*f1 - 0.5*f2)
ic2 = np.zeros(len(x))
ic3 = np.zeros(len(x))
ic4 = np.zeros(len(x))
# Define array and fill with first-order FV (piecewise constant) initial data
U0 = np.zeros((Neq,Nk))
U0[0,:] = 0.5*(ic1[0:Nk] + ic1[1:Nk+1]) # h
U0[1,:] = 0.5*(ic1[0:Nk]*ic2[0:Nk] + ic1[1:Nk+1]*ic2[1:Nk+1]) # hu
U0[2,:] = 0.5*(ic1[0:Nk]*ic3[0:Nk] + ic1[1:Nk+1]*ic3[1:Nk+1]) # hr
U0[3,:] = 0.5*(ic1[0:Nk]*ic4[0:Nk] + ic1[1:Nk+1]*ic4[1:Nk+1]) # hv
return U0
###############################################################
def init_cond_4(x,Nk,Neq,H0,L,A,V):
# for transverse jet Rossby adj. set-up
ic1 = H0*np.ones(len(x))
ic2 = np.zeros(len(x))
ic3 = np.zeros(len(x))
# single jet
Lj = 0.1*L
ic4 = V*(1+np.tanh(4*(x-0.5*L)/Lj + 2))*(1-np.tanh(4*(x-0.5*L)/Lj - 2))/4
#ic4 = V*(1+np.tanh(4*(x)/Lj + 2))*(1-np.tanh(4*(x)/Lj - 2))/4
# Define array and fill with first-order FV (piecewise constant) initial data
U0 = np.zeros((Neq,Nk))
U0[0,:] = 0.5*(ic1[0:Nk] + ic1[1:Nk+1]) # h
U0[1,:] = 0.5*(ic1[0:Nk]*ic2[0:Nk] + ic1[1:Nk+1]*ic2[1:Nk+1]) # hu
U0[2,:] = 0.5*(ic1[0:Nk]*ic3[0:Nk] + ic1[1:Nk+1]*ic3[1:Nk+1]) # hr
U0[3,:] = 0.5*(ic1[0:Nk]*ic4[0:Nk] + ic1[1:Nk+1]*ic4[1:Nk+1]) # hv
return U0
###############################################################
def init_cond_5(x,Nk,Neq,H0,L,A,V):
# for transverse jet Rossby adj. set-up
ic1 = H0*np.ones(len(x))
ic2 = np.zeros(len(x))
ic3 = np.zeros(len(x))
## double jet
Lj = 0.1*L
f1 = V*(1+np.tanh(4*(x-0.75*L)/Lj + 2))*(1-np.tanh(4*(x-0.75*L)/Lj - 2))/4
f2 = V*(1+np.tanh(4*(x-0.25*L)/Lj + 2))*(1-np.tanh(4*(x-0.25*L)/Lj - 2))/4
ic4 = f1-f2
# Define array and fill with first-order FV (piecewise constant) initial data
U0 = np.zeros((Neq,Nk))
U0[0,:] = 0.5*(ic1[0:Nk] + ic1[1:Nk+1]) # h
U0[1,:] = 0.5*(ic1[0:Nk]*ic2[0:Nk] + ic1[1:Nk+1]*ic2[1:Nk+1]) # hu
U0[2,:] = 0.5*(ic1[0:Nk]*ic3[0:Nk] + ic1[1:Nk+1]*ic3[1:Nk+1]) # hr
U0[3,:] = 0.5*(ic1[0:Nk]*ic4[0:Nk] + ic1[1:Nk+1]*ic4[1:Nk+1]) # hv
return U0
###############################################################
def init_cond_5_1(x,Nk,Neq,H0,L,A,V):
# for transverse jet Rossby adj. set-up
ic1 = H0*np.ones(len(x))
ic2 = 0.5*np.ones(len(x))
ic3 = np.zeros(len(x))
## double jet
Lj = 0.1*L
f1 = V*(1+np.tanh(4*(x-0.75*L)/Lj + 2))*(1-np.tanh(4*(x-0.75*L)/Lj - 2))/4
f2 = V*(1+np.tanh(4*(x-0.25*L)/Lj + 2))*(1-np.tanh(4*(x-0.25*L)/Lj - 2))/4
ic4 = f1-f2
# Define array and fill with first-order FV (piecewise constant) initial data
U0 = np.zeros((Neq,Nk))
U0[0,:] = 0.5*(ic1[0:Nk] + ic1[1:Nk+1]) # h
U0[1,:] = 0.5*(ic1[0:Nk]*ic2[0:Nk] + ic1[1:Nk+1]*ic2[1:Nk+1]) # hu
U0[2,:] = 0.5*(ic1[0:Nk]*ic3[0:Nk] + ic1[1:Nk+1]*ic3[1:Nk+1]) # hr
U0[3,:] = 0.5*(ic1[0:Nk]*ic4[0:Nk] + ic1[1:Nk+1]*ic4[1:Nk+1]) # hv
return U0
###############################################################
def init_cond_6(x,Nk,Neq,H0,L,A,V):
# for transverse jet Rossby adj. set-up
ic1 = H0*np.ones(len(x))
ic2 = np.zeros(len(x))
ic3 = np.zeros(len(x))
## multiple (>2) jets
Lj = 0.05
f3 = (1+np.tanh(4*(x-0.8)/Lj + 2))*(1-np.tanh(4*(x-0.8)/Lj - 2))/4
f4 = (1+np.tanh(4*(x-0.2)/Lj + 2))*(1-np.tanh(4*(x-0.2)/Lj - 2))/4
f5 = (1+np.tanh(4*(x-0.6)/Lj + 2))*(1-np.tanh(4*(x-0.6)/Lj - 2))/4
f6 = (1+np.tanh(4*(x-0.4)/Lj + 2))*(1-np.tanh(4*(x-0.4)/Lj - 2))/4
#ic4 = V*(f3+f4-f5-f6)
ic4 = V*(f3-f4+f5-f6)
# Define array and fill with first-order FV (piecewise constant) initial data
U0 = np.zeros((Neq,Nk))
U0[0,:] = 0.5*(ic1[0:Nk] + ic1[1:Nk+1]) # h
U0[1,:] = 0.5*(ic1[0:Nk]*ic2[0:Nk] + ic1[1:Nk+1]*ic2[1:Nk+1]) # hu
U0[2,:] = 0.5*(ic1[0:Nk]*ic3[0:Nk] + ic1[1:Nk+1]*ic3[1:Nk+1]) # hr
U0[3,:] = 0.5*(ic1[0:Nk]*ic4[0:Nk] + ic1[1:Nk+1]*ic4[1:Nk+1]) # hv
return U0
###############################################################
def init_cond_6_1(x,Nk,Neq,H0,L,A,V):
# for transverse jet Rossby adj. set-up
ic1 = H0*np.ones(len(x))
ic2 = 0.5*np.ones(len(x))
ic3 = np.zeros(len(x))
## multiple (>2) jets
Lj = 0.05
f3 = (1+np.tanh(4*(x-0.8)/Lj + 2))*(1-np.tanh(4*(x-0.8)/Lj - 2))/4
f4 = (1+np.tanh(4*(x-0.2)/Lj + 2))*(1-np.tanh(4*(x-0.2)/Lj - 2))/4
f5 = (1+np.tanh(4*(x-0.6)/Lj + 2))*(1-np.tanh(4*(x-0.6)/Lj - 2))/4
f6 = (1+np.tanh(4*(x-0.4)/Lj + 2))*(1-np.tanh(4*(x-0.4)/Lj - 2))/4
#ic4 = V*(f3+f4-f5-f6)
ic4 = V*(f3-f4+f5-f6)
# Define array and fill with first-order FV (piecewise constant) initial data
U0 = np.zeros((Neq,Nk))
U0[0,:] = 0.5*(ic1[0:Nk] + ic1[1:Nk+1]) # h
U0[1,:] = 0.5*(ic1[0:Nk]*ic2[0:Nk] + ic1[1:Nk+1]*ic2[1:Nk+1]) # hu
U0[2,:] = 0.5*(ic1[0:Nk]*ic3[0:Nk] + ic1[1:Nk+1]*ic3[1:Nk+1]) # hr
U0[3,:] = 0.5*(ic1[0:Nk]*ic4[0:Nk] + ic1[1:Nk+1]*ic4[1:Nk+1]) # hv
return U0
###############################################################
def init_cond_topog(x,Nk,Neq,H0,L,A,V):
# for a single parabolic ridge
ic1 = H0*np.ones(len(x))
ic2 = np.zeros(len(x))
ic2= 1./ic1 # for hu = 1:
ic3 = np.zeros(len(x))
# single hill
bc = 0.5
xp = 0.1
a = 0.05*L
B = np.maximum(0, bc*(1 - ((x - L*xp)**2)/a**2))
B = np.maximum(0,B)
U0 = np.zeros((Neq,Nk))
B = 0.5*(B[0:Nk] + B[1:Nk+1]); # b
U0[0,:] = np.maximum(0, 0.5*(ic1[0:Nk] + ic1[1:Nk+1]) - B) # h
U0[1,:] = 0.5*(ic1[0:Nk]*ic2[0:Nk] + ic1[1:Nk+1]*ic2[1:Nk+1]) # hu
U0[2,:] = 0.5*(ic1[0:Nk]*ic3[0:Nk] + ic1[1:Nk+1]*ic3[1:Nk+1]) # hr
return U0, B
###############################################################
def init_cond_topog4(x,Nk,Neq,H0,L,A,V):
# for 4 parabolic ridges
ic1 = H0*np.ones(len(x))
ic2 = np.zeros(len(x))
ic2=1/ic1 # for hu = 1:
ic3 = np.zeros(len(x))
# 4 hills
bc = 0.4
xp = 0.5
a = 0.025*L
B = np.maximum(bc*(1 - ((x - L*0.25*xp)**2)/a**2), bc*(1 - ((x - L*0.45*xp)**2)/a**2))
B = np.maximum(B, bc*(1 - ((x - L*0.65*xp)**2)/a**2))
B = np.maximum(B, bc*(1 - ((x - L*0.85*xp)**2)/a**2))
B = np.maximum(0,B)
U0 = np.zeros((Neq,Nk))
B = 0.5*(B[0:Nk] + B[1:Nk+1]); # b
U0[0,:] = np.maximum(0, 0.5*(ic1[0:Nk] + ic1[1:Nk+1]) - B) # h
U0[1,:] = 0.5*(ic1[0:Nk]*ic2[0:Nk] + ic1[1:Nk+1]*ic2[1:Nk+1]) # hu
U0[2,:] = 0.5*(ic1[0:Nk]*ic3[0:Nk] + ic1[1:Nk+1]*ic3[1:Nk+1]) # hr
return U0, B
###############################################################
def init_cond_topog_cos(x,Nk,Neq,H0,L,A,V):
# superposition of cosines
ic1 = H0*np.ones(len(x))
ic2=1/ic1 # for hu = 1:
ic3 = np.zeros(len(x))
k = 2*np.pi
xp = 0.1
waven = [2,4,6]
A = [0.2, 0.1, 0.2]
B = A[0]*(1+np.cos(k*(waven[0]*(x-xp)-0.5)))+ A[1]*(1+np.cos(k*(waven[1]*(x-xp)-0.5)))+ A[2]*(1+np.cos(k*(waven[2]*(x-xp)-0.5)))
B = 0.5*B
index = np.where(B<=np.min(B)+1e-10)
index = index[0]
B[:index[0]] = 0
B[index[-1]:] = 0
U0 = np.zeros((Neq,Nk))
B = 0.5*(B[0:Nk] + B[1:Nk+1]); # b
U0[0,:] = np.maximum(0, 0.5*(ic1[0:Nk] + ic1[1:Nk+1]) - B) # h
U0[1,:] = 0.5*(ic1[0:Nk]*ic2[0:Nk] + ic1[1:Nk+1]*ic2[1:Nk+1]) # hu
U0[2,:] = 0.5*(ic1[0:Nk]*ic3[0:Nk] + ic1[1:Nk+1]*ic3[1:Nk+1]) # hr
return U0, B
###############################################################
|
import time
import numpy as np
import pandas as pd
import xgboost as xgb
import shap
import itertools
import scipy
#_________________________________
# run transshipment_trips.sql and save as transhipment_trips.csv
encounter = pd.read_csv('transshipment_trips.csv')
# run transshipment_loitering.sql and save as transhipment_loitering.csv
loitering = pd.read_csv('transshipment_loitering.csv')
#_________________________________
# trips with predictors
all = encounter.dropna(subset=['carrier_flag_group','neighbor_flag_group', 'time_at_sea', 'neighbor_vessel_class']).copy()
all.reset_index(inplace=True, drop=True)
# data frame for predictors
foo = all.groupby('gfw_trip_id').first()
# add time at sea
tas = all.time_at_sea.unique()
for i in range(len(tas)):
foo[tas[i]] = [1 if x == tas[i] else 0 for x in foo.time_at_sea]
# add flags of carrier vessels
carrier_flags = all.carrier_flag_group.unique()
for i in range(len(carrier_flags)):
foo[carrier_flags[i]] = [1 if x == carrier_flags[i] else 0 for x in foo.carrier_flag_group]
foo = foo[np.append(tas, carrier_flags)]
# add flags of encountered fishing vessels
bar = pd.DataFrame()
bar['gfw_trip_id'] = all.gfw_trip_id
neighbor_flags = all.neighbor_flag_group.unique()
for i in range(len(neighbor_flags)):
bar['with_' + neighbor_flags[i]] = [1 if x == neighbor_flags[i] else 0 for x in all.neighbor_flag_group]
# add vessel class of encountered fishing vessels
neighbor_vessels = all.neighbor_vessel_class.unique()
for i in range(len(neighbor_vessels)):
bar['with_' + neighbor_vessels[i]] = [1 if x == neighbor_vessels[i] else 0 for x in all.neighbor_vessel_class]
# summarize within trip id (0: no encounter, 1: encountred)
bar = bar.groupby('gfw_trip_id').sum()
bar = bar.applymap(lambda x: (x > 0)*1) #convert n > 0 to n = 1
foo = foo.merge(bar, left_index=True, right_index=True)
# number of loitering
foo['loitering'] = loitering.groupby('gfw_trip_id').count().ssvid
foo['loitering'] = foo.loitering.fillna(0)
foo['loitering'] = [1 if x > 0 else 0 for x in foo.loitering]
foo['no_loitering'] = 1 - foo.loitering
# get a subset with port risk assessment
bar = all.groupby('gfw_trip_id').first()
bar['n_total_to'] = bar.to_iuu_no + bar.to_iuu_low + bar.to_iuu_med + bar.to_iuu_high
## for labor abuse
## bar['n_total_to'] = bar.to_la_no + bar.to_la_low + bar.to_la_med + bar.to_la_high
obs = foo[bar.n_total_to > 0].copy()
# risk score
obs['risk_score'] = 1/3*bar.to_iuu_low + 2/3*bar.to_iuu_med + bar.to_iuu_high - bar.to_iuu_no
## for labor abuse
## obs['risk_score'] = 1/3*bar.to_la_low + 2/3*bar.to_la_med + bar.to_la_high - bar.to_la_no
obs['type'] = 'obs'
# input for the model to predict missing port risk score
x_obs = obs.drop(columns=['risk_score', 'type']).copy()
y_obs = obs.risk_score.astype('float')
dtrain = xgb.DMatrix(data=x_obs,label=y_obs)
# fit model
params = {'eta':0.01, 'min_child_weight':1, 'max_depth':10, 'colsample_bytree':0.6}
n_trees = 300
evals_result = {}
bst = xgb.train(params=params, dtrain=dtrain, num_boost_round=n_trees, evals=[(dtrain, 'train')],
verbose_eval=50, evals_result=evals_result)
#______________________________________
# predict
bar = foo.drop(obs.index).copy()
x = xgb.DMatrix(bar)
y_pred = bst.predict(x)
y_pred = pd.DataFrame(y_pred, columns=['risk_score'], index=bar.index)
bar['risk_score'] = y_pred
bar['type'] = 'pred'
# combine observed and predicted risk scores
foo = pd.concat([obs, bar])
# add coordinates
encounter.set_index('gfw_trip_id', inplace=True)
encounter['risk_score'] = foo.risk_score
encounter.dropna(subset=['risk_score'], inplace=True)
x = encounter[['lon_mean', 'lat_mean', 'risk_score']].copy()
loitering.set_index('gfw_trip_id', inplace=True)
loitering['risk_score'] = foo.risk_score
loitering.dropna(subset=['risk_score'], inplace=True)
y = loitering[['lon_mean', 'lat_mean', 'risk_score']].copy()
xy = pd.concat([x, y])
threshold = [0,2]
xy['risk_class'] = [0 if x < threshold[0] else 1 if x < threshold[1] else 2 for x in xy.risk_score]
#________________________________________________
# SHAP interaction values
explainer = shap.TreeExplainer(bst)
shap_value = explainer.shap_interaction_values(x_obs)
#________________________________
# feature importance
is_tas_idx = slice(0,5,1)
is_flag_idx = slice(5,10,1)
with_flag_idx = slice(10,15,1)
with_gear_idx = slice(15,23,1)
loitering_idx = slice(23,25,1)
col_idx = list(itertools.combinations_with_replacement([is_tas_idx, is_flag_idx, with_flag_idx, with_gear_idx, loitering_idx],2))
col_name = list(itertools.combinations_with_replacement(['is_tas', 'is_flag', 'with_flag', 'with_gear', 'loitering'],2))
foo = [[shap_value[i,x1,x2].sum() for (x1,x2) in col_idx] for i in range(x_obs.shape[0])]
foo = pd.DataFrame(foo)
foo.columns = col_name
# summarize
importance = pd.DataFrame()
importance['mean'] = foo.abs().mean(axis=0)
importance['sd'] = foo.abs().std(axis=0)
importance['se'] = foo.abs().sem(axis=0)
importance['lower'] = foo.abs().quantile(q=0.025, axis=0)
importance['upper'] = foo.abs().quantile(q=0.975, axis=0)
#___________________________
# effect of features when present
# model baseline
X = xgb.DMatrix(x_obs)
y_pred = bst.predict(X)
base = np.mean(y_pred)
col_idx = list(itertools.combinations_with_replacement([is_tas_idx, is_flag_idx,
10,11,12,13,14,15,16,17,18,19,20,21,22,loitering_idx],2))
col_name = list(itertools.combinations_with_replacement(['is_tas', 'is_flag', 'with_china', 'with_group3',
'with_group2', 'with_other', 'with_group1', 'with_squid_jigger',
'with_set_longline', 'with_drifting_longline', 'with_pots_and_traps',
'with_trawlers', 'with_purse_seine', 'with_pole_and_line',
'with_set_gillnet', 'loitering'],2))
foo = [[shap_value[i,x1,x2].sum() for (x1,x2) in col_idx] for i in range(x_obs.shape[0])]
foo = pd.DataFrame(foo)
foo.columns = col_name
## combination of features
a = list(itertools.combinations_with_replacement(x_obs.columns,2))
b1 = list(itertools.combinations(x_obs.columns[is_tas_idx],2))
b2 = list(itertools.combinations(x_obs.columns[is_flag_idx],2))
b3 = list(itertools.combinations(x_obs.columns[loitering_idx],2))
combo = list(set(a).difference(set(b1 + b2 + b3)))
# corresponding class
x = ['is_tas']*5 + ['is_flag']*5 + ['with_china', 'with_group3',
'with_group2', 'with_other', 'with_group1', 'with_squid_jigger',
'with_set_longline', 'with_drifting_longline', 'with_pots_and_traps',
'with_trawlers', 'with_purse_seine', 'with_pole_and_line',
'with_set_gillnet'] + ['loitering']*2
combo2 = [(x[x_obs.columns.get_loc(x1)],x[x_obs.columns.get_loc(x2)]) for (x1,x2) in combo]
# select solo features
solo_idx = list(np.where([x1==x2 for (x1,x2) in combo])[0])
# summarize
mean = []; sd = []; se = []; lower = []; upper = []
for x in solo_idx:
bar = foo[combo2[x]] # SHAP values of solo features
true_idx = list(np.where(x_obs[combo[x][0]]==1)[0]) # find where it is true
bar2 = bar[true_idx]
if len(bar2) > 1:
mean.append(np.mean(bar2) + base)
sd.append(np.std(bar2))
se.append(scipy.stats.sem(bar2))
lower.append(np.quantile(bar2, 0.025) + base)
upper.append(np.quantile(bar2, 0.975) + base)
if len(bar2) <= 1:
mean.append(np.nan)
sd.append(np.nan)
se.append(np.nan)
lower.append(np.nan)
upper.append(np.nan)
solo_effect = pd.DataFrame()
solo_effect['mean'] = mean
solo_effect['sd'] = sd
solo_effect['se'] = se
solo_effect['lower'] = lower
solo_effect['upper'] = upper
solo_effect.index = [combo[x][0] for x in solo_idx]
solo_effect.dropna(inplace=True)
# select combo features
combo_idx = list(np.where([x1!=x2 for (x1,x2) in combo])[0])
# summarize
mean = []; sd = []; se = []; lower = []; upper = []
for x in combo_idx:
bar1 = np.array(foo[(combo2[x][0], combo2[x][0])]) + np.array(foo[combo2[x]])
bar2 = np.array(foo[(combo2[x][1], combo2[x][1])]) + np.array(foo[combo2[x]])
bar = bar1 + bar2
# find where it is true
true_idx = list(np.where(np.logical_and(x_obs[combo[x][0]]==1, x_obs[combo[x][1]]==1))[0])
bar2 = bar[true_idx]
if len(bar2) > 1:
mean.append(np.mean(bar2) + base)
sd.append(np.std(bar2))
se.append(scipy.stats.sem(bar2))
lower.append(np.quantile(bar2, 0.025) + base)
upper.append(np.quantile(bar2, 0.975) + base)
if len(bar2) <= 1:
mean.append(np.nan)
sd.append(np.nan)
se.append(np.nan)
lower.append(np.nan)
upper.append(np.nan)
combo_effect = pd.DataFrame()
combo_effect['mean'] = mean
combo_effect['sd'] = sd
combo_effect['se'] = se
combo_effect['lower'] = lower
combo_effect['upper'] = upper
combo_effect.index = [combo[x] for x in combo_idx]
combo_effect.dropna(inplace=True)
effect = pd.concat([solo_effect, combo_effect])
|
s=input()
d=dict()
for i in s:
if i in d:
d[i]+=1
else:
d[i]=1
for i in d:
if (d[i]==1):
print (i)
break
|
# -*- coding: utf-8 -*-
#
# __init__.py
# ========================
# A low-entropy nucleic/amino acid
# sequencing masking library.
# ========================
#
# Copyright 2017 Joseph Szymborski
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from math import log
import numpy as np
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import generic_protein
def overlapping_windows(sequence, L):
"""
Returns overlapping windows of size `L` from sequence `sequence`
:param sequence: the nucleotide or protein sequence to scan over
:param L: the length of the windows to yield
"""
windows = []
for index, residue in enumerate(sequence):
if (index + L) < (len(sequence) + 1):
window = sequence[index:L+index]
windows.append(window)
return windows
def compute_rep_vector(sequence, N):
"""
Computes the repetition vector (as seen in Wooton, 1993) from a
given sequence of a biopolymer with `N` possible residues.
:param sequence: the nucleotide or protein sequence to generate a repetition vector for.
:param N: the total number of possible residues in the biopolymer `sequence` belongs to.
"""
encountered_residues = set()
repvec = []
for residue in sequence:
if residue not in encountered_residues:
residue_count = sequence.count(residue)
repvec.append(residue_count)
encountered_residues.add(residue)
if len(encountered_residues) == N:
break
while len(repvec) < N:
repvec.append(0)
return sorted(repvec, reverse=True)
def complexity(sequence, N):
"""
Computes the Shannon Entropy of a given sequence of a
biopolymer with `N` possible residues. See (Wooton, 1993)
for more.
:param sequence: the nucleotide or protein sequence whose Shannon Entropy is to calculated.
:param N: the total number of possible residues in the biopolymer `sequence` belongs to.
"""
repvec = compute_rep_vector(sequence, N)
L = len(sequence)
entropy = sum([-1*(n/L)*log((n/L), N) for n in repvec if n != 0])
return entropy
def mask_low_complexity(seq_rec, maskchar="x", N=20, L=12):
"""
Masks low-complexity nucleic/amino acid sequences with
a given mask character.
:param seq_rec: a Biopython Sequence Record
:param maskchar: Character to mask low-complexity residues with.
:param N: Number of residues to expect in the sequence. (20 for AA, 4 for DNA)
:param L: Length of sliding window that reads the sequence.
"""
windows = overlapping_windows(seq_rec.seq, L)
rep_vectors = [(window, compute_rep_vector(window, N)) for window in windows]
window_complexity_pairs = [(rep_vector[0], complexity(rep_vector[1], N)) for rep_vector in rep_vectors]
complexities = np.array([complexity(rep_vector[1], N) for rep_vector in rep_vectors])
avg_complexity = complexities.mean()
std_complexity = complexities.std()
k1_cutoff = min([avg_complexity + std_complexity,
avg_complexity - std_complexity])
alignment = [[] for i in range(0, len(seq_rec.seq))]
for window_offset, window_complexity_pair in enumerate(window_complexity_pairs):
if window_complexity_pair[1] < k1_cutoff:
window = "".join([maskchar for i in range(0, L)])
else:
window = window_complexity_pair[0]
for residue_offset, residue in enumerate(window):
i = window_offset+residue_offset
alignment[i].append(residue)
new_seq = []
for residue_array in alignment:
if residue_array.count(maskchar) > 3:
new_seq.append(maskchar)
else:
new_seq.append(residue_array[0])
new_seq = "".join(new_seq)
return (SeqRecord(Seq(new_seq), seq_rec.id, description=seq_rec.description), alignment)
|
# coding: utf-8
from __future__ import absolute_import
from google.appengine.ext import ndb
from flask.ext import restful
import flask
from api import helpers
import auth
import model
import util
from main import api_v1
@api_v1.resource('/project/', endpoint='api.project.list')
class ProjectListAPI(restful.Resource):
@auth.login_required
def get(self):
project_dbs, project_cursor = model.Project.get_dbs(user_key=auth.current_user_key())
return helpers.make_response(project_dbs, model.Project.FIELDS, project_cursor)
@api_v1.resource('/project/<string:project_key>/', endpoint='api.project')
class ProjectAPI(restful.Resource):
@auth.login_required
def get(self, project_key):
project_db = ndb.Key(urlsafe=project_key).get()
if not project_db or project_db.user_key != auth.current_user_key():
helpers.make_not_found_exception('Project %s not found' % project_key)
return helpers.make_response(project_db, model.Project.FIELDS)
###############################################################################
# Admin
###############################################################################
@api_v1.resource('/admin/project/', endpoint='api.admin.project.list')
class AdminProjectListAPI(restful.Resource):
@auth.admin_required
def get(self):
project_keys = util.param('project_keys', list)
if project_keys:
project_db_keys = [ndb.Key(urlsafe=k) for k in project_keys]
project_dbs = ndb.get_multi(project_db_keys)
return helpers.make_response(project_dbs, model.project.FIELDS)
project_dbs, project_cursor = model.Project.get_dbs()
return helpers.make_response(project_dbs, model.Project.FIELDS, project_cursor)
@api_v1.resource('/admin/project/<string:project_key>/', endpoint='api.admin.project')
class AdminProjectAPI(restful.Resource):
@auth.admin_required
def get(self, project_key):
project_db = ndb.Key(urlsafe=project_key).get()
if not project_db:
helpers.make_not_found_exception('Project %s not found' % project_key)
return helpers.make_response(project_db, model.Project.FIELDS)
|
#encoding=utf-8
from gensim.models import word2vec
if __name__ == '__main__':
sentences=word2vec.Text8Corpus(u'data/words.txt')
model=word2vec.Word2Vec(sentences, size=50)
for i in model.most_similar(u"北京"):
print(i[0],i[1])
|
# 1. test case 수를 입력받는다.
test_case = int(input())
# 2. 각 테스트 케이스가 공백문자로 구분되어 입력된다.
for i in range(test_case):
nums = map(int, input().split())
print(f'#{i+1} {max(nums)}') |
#!/usr/bin/python3
from main import *
from matplotlib import pyplot as plt
import numpy as np
import argparse
def rainbow_colors(n):
colormap = plt.cm.gist_rainbow
return [colormap(i) for i in np.linspace(0, 1, n)]
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--filter', default='', help='filter on a specific party')
return parser.parse_args()
# TODO: Refactor, extract functions, use arguments instead of relying on functions in main.py
def plot(args):
yearids = ["20" + str(n).rjust(2, "0") + str(n+1).rjust(2, "0") for n in range(2,18)]
print("Plotting for year {} through {}".format(yearids[0], yearids[-1]))
replace_name = lambda x: x if x != 'FP' else 'L'
agreements_by_year = {}
n_votings = {}
for yearid in yearids:
votings = get_votings(yearid)
agreements_by_year[yearid] = get_agreements(votings, replace_name=replace_name)
n_votings[yearid] = len(votings)
party_pairs = agreements_by_year[yearids[-1]].keys()
# Set to None or "" to not filter by party
filter_party = args.filter #"M"
def includes_party(pair):
has_party = filter_party in pair.split("-") if filter_party else True
return pair[0] != "-" and has_party
party_pairs = sorted(list(filter(includes_party, party_pairs)))
print("Party pairs: {}".format(party_pairs))
# Set a colormap that is easier to differentiate from
plt.gca().set_prop_cycle('color', rainbow_colors(len(party_pairs)))
lines = []
for party_pair in party_pairs:
agreements = []
for yearid in sorted(agreements_by_year.keys()):
try:
agreements.append(100 * agreements_by_year[yearid][party_pair] / n_votings[yearid])
except KeyError:
# If one if the parties in the pair is missing from data
agreements.append(None)
lines.append(plt.plot(range(len(yearids)), agreements, label=party_pair))
plt.title("Percentage of polls where a pair of parties respective majority voted the same")
plt.style.use('ggplot')
plt.xlabel("Year")
plt.xlim(0, len(yearids)-1)
plt.xticks(range(len(yearids)), [yid[:-2] for yid in yearids])
plt.ylabel("")
plt.ylim(0, 100)
plt.yticks(np.linspace(0, 100, 6), ["", "20%", "40%", "60%", "80%", "100%"])
plt.legend()
plt.tight_layout()
plt.grid()
plt.setp(lines, linewidth=3)
plt.show(lines)
if __name__ == "__main__":
args = get_args()
plot(args)
|
class Human():
'''인간'''
def __init__(self, name,weight):
"""초기화 함수"""
print("__init__실행")
self.name=name
self.weight= weight
def __str__(self):
"""문자열과 함수"""
def eat(self):
person.weight += 0.1
print("{}가 먹어서{}kg이 되었습니다".format(person.name,person.weight))
def walk(self):
person.weight-=0.1
print("{}가 걸어서{}가 되었습니다".format(person.name,person.weight))
person = Human("사람",60.5)
print(person.name)
print(person.weight) |
import threading
from .cli import Console
from .concurrent import AtomicMemoryReference
from .concurrent import MemoryUpdater
class MemvisController(object):
def __init__(self, pid, width=26, height=10, start_address=None, use_ptrace=True, convert_ascii=True):
self.pid = pid
self.memory_reference = AtomicMemoryReference()
self.memory_updater = MemoryUpdater(
pid, self.memory_reference, use_ptrace)
self.start_address = start_address
if start_address is None:
self.start_address = self.memory_updater.get_stack_pointer()
self.console = Console(
pid, self.start_address, self.memory_reference, page_height=height, page_width=width,
convert_ascii=convert_ascii)
def start(self):
self.memory_updater.start()
self.console.start()
self.memory_updater.stop()
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table as dt
from dash.dependencies import Input,Output,State
import pandas as pd
import sqlalchemy
import random
from server import app
from global_var import label_table,mean_table,db_0_50,get_source_show_table,get_show_table_id,get_show_db
import make_model
from listing_page import transe_df_col
# TODO model 적용 뒤, query에 쓰기 위해 사용될 column
def source_id_table_pair(df):
id_f=list(df['SOURCE_ID_1'])
id_s=list(df['SOURCE_ID_2'])
source_f=list(df['SOURCE_1'])
source_s=list(df['SOURCE_2'])
stat=list(df['SIM_LEVEL'])
res=[]
for idx in range(0,len(id_f)):
add=[]
add.append(id_f[idx])
add.append(id_s[idx])
add.append(source_f[idx])
add.append(source_s[idx])
add.append(stat[idx])
res.append(add)
return res
adapt_page=html.Div([
html.A(' 1 . select model to adapt'),
dcc.Dropdown(
id='adapt_model_select_drop',
options=[{'label':'logistic_regression','value':'log'},
{'label':'random_forest','value':'rf'},
{'label':'GBC','value':'gbc'},
{'label':'ensemble','value':'ens'},
{'label':'ECLF','value':'eclf'}
]
),
html.Button('select model',id='adapt_model_sel_btn'),
html.Div(id='selected_model'),
html.Div(id='adapt_process',style={'display':'none'}),
html.Br(),
html.A(' 2. threshold 를 정하세요 '),
html.Br(),
html.A(' same class는 100%에서 (100-threshold)% 사이의 pair이고, '),
html.Br(),
html.A(' unsure class는 (100-threshold) 와 threshold 사이, '),
html.Br(),
html.A(' differernt 는 threshold보다 작은 class 입니다. '),
html.Br(),
dcc.Input(id='adapt_thresh',type='number'),
html.Button('adapt thresh',id='adapt_thresh_btn'),
html.Div(id='adapt_thresh_var',style={'display':'none'}),
html.Br(),
html.A(' 3. select class want to see, and click select class button'),
dcc.Dropdown(
id='select_class',
options=[{'label':'same','value':1},
{'label':'different','value':0},
{'label':'unsure','value':2}
]
),
html.Button('select class',id='select_class_btn'),
html.Div(id='seleced_class_var'),
html.Br(),
html.Div(id='adapt_show_table'),
html.Br(),
html.Button('yes',id='adapt_yes',n_clicks_timestamp=0),
html.Button('no',id='adapt_no',n_clicks_timestamp=0),
html.Button('unsure',id='adapt_unsure',n_clicks_timestamp=0),
html.Button('back',id='adapt_back',n_clicks_timestamp=0),
html.Button('reset',id='adapt_reset',n_clicks_timestamp=0),
html.Div(id='adapt_list_var',style={'display':'none'}),
html.Br(),
html.Button('save to db',id='save_to_db'),
html.Div(id='save_db_div'),
html.Div(id='predict_var',style={'display':'none'})
])
# labeled data로 model fitting 뒤, model이 다른 data(평가되지 않은 mean table data)를 preict
@app.callback(
Output('adapt_process','children'),
[Input('selected_model','children')]
)
def adapting_model(value):
#print(value)
if value is not None:
#df_set=make_model.get_data_to_db_for_statistic(db_0_50,mean_table,label_table)
test_set=pd.DataFrame()
query=(""" select A.SOURCE_ID_1,A.SOURCE_ID_2
from ci_dev.SIM_FEATURES_test A
left JOIN ci_dev.features_lable B
on A.SOURCE_ID_1 = B.SOURCE_ID_1 and A.SOURCE_ID_2=B.SOURCE_ID_2
where B.source_id_1 is null and A.source_1!=A.source_2 and A.pair_source='phonenum_B' """)
# and A.SOURCE_1=B.SOURCE_1 and A.source_2=B.source_2
print('get id!')
test_set_id=pd.read_sql_query(query,db_0_50)
test_zip=zip(list(test_set_id['SOURCE_ID_1']),list(test_set_id['SOURCE_ID_2']))
test_zip_list=list(test_zip)
test_pair=random.sample(test_zip_list,100)
#test_pair=test_pair[0:100]
test_set_query=(""" select * from ci_dev.SIM_FEATURES_test where (source_id_1,source_id_2) in {} and source_1!=source_2""".format(tuple(test_pair)))
test_set=pd.read_sql(test_set_query,db_0_50)
test_set=source_id_table_pair(test_set)
print(test_set)
#print(test_set[1])
res=[value,test_set]
res=pd.Series(res).to_json(orient='values')
print(res)
return res
@app.callback(
Output('predict_var','children'),
[Input('adapt_process','children')]
)
def predict_set(value):
if value is not None:
df=pd.read_json(value,orient='values')
val=df[0][0]
test_set=pd.DataFrame()
df_set=make_model.get_data_to_db_for_statistic(db_0_50,mean_table,label_table)
if val == 'rf':
model = make_model.random_forest(df_set,test_set,0)[0]
elif val == 'log':
model = make_model.logistic_score(df_set,test_set,0)[0]
elif val == 'gbc':
model = make_model.GBC(df_set,test_set,0)[0]
elif val == 'ens':
model = make_model.ENSE(df_set,test_set,0)[0]
elif val == 'eclf':
model = make_model.ECLF(df_set,test_set,0)[0]
src_id=[]
for idx in range(0,len(df[0][1])):
src_id.append((df[0][1][idx][0],df[0][1][idx][1],df[0][1][idx][2],df[0][1][idx][3]))
#TODO
query=(""" select * from ci_dev.SIM_FEATURES_test where """+
""" (source_id_1,source_id_2,source_1,source_2) in {} """.format(tuple(src_id)))
src_df=pd.read_sql_query(query,db_0_50)
res_test=make_model.make_set(src_df)
res=model.predict_proba(res_test)[:,1]
return pd.Series(res).to_json(orient='values')
@app.callback(
Output('adapt_thresh_var','children'),
[Input('adapt_thresh_btn','n_clicks')],
[State('adapt_thresh','value'),
State('predict_var','children'),
State('adapt_process','children')]
)
def set_thresh(n_clicks,value,pre,src):
print('??')
if n_clicks is not None:
thresh_var=value/100
source=pd.read_json(src,orient='values')[0][1]
predict=pd.read_json(pre,orient='values')[0]
class_true=[]
class_false=[]
class_unsure=[]
yes_adapt_arr=[]
no_adapt_arr=[]
unsure_adapt_arr=[]
for idx in range(0,len(source)):
add=idx
if predict[idx]>(1-thresh_var):
class_true.append(add)
elif predict[idx]<thresh_var:
class_false.append(add)
else:
class_unsure.append(add)
print('true')
print(len(class_true))
print('false')
print(len(class_false))
print('unsure')
print(len(class_unsure))
res=[class_true,class_false,class_unsure]
return pd.Series(res).to_json(orient='values')
@app.callback(
Output('seleced_class_var','children'),
[Input('select_class_btn','n_clicks')],
[State('select_class','value')]
)
def set_class(n_clicks,value):
if n_clicks is not None:
return value
@app.callback(
Output('adapt_show_table','children'),
[Input('seleced_class_var','children'),
Input('adapt_list_var','children')],
[State('predict_var','children'),
State('adapt_process','children'),
State('adapt_thresh_var','children')]
)
def make_adapt_table(select_cls,arr,pre,src,clas):
if select_cls is not None:
if arr is not None:
init=pd.read_json(arr,orient='values')
yes_adapt_arr=list(init.loc[0])
yes_adapt_arr=[x for x in yes_adapt_arr if x ==1 or x==0 or x==2]
no_adapt_arr=list(init.loc[1])
no_adapt_arr=[x for x in no_adapt_arr if x ==1 or x==0 or x==2]
unsure_adapt_arr=list(init.loc[2])
unsure_adapt_arr=[x for x in unsure_adapt_arr if x ==1 or x==0 or x==2]
else:
yes_adapt_arr=[]
no_adapt_arr=[]
unsure_adapt_arr=[]
dff=pd.read_json(clas,orient='value')
pred=list(pd.read_json(pre,orient='value')[0])
sourc=pd.read_json(src,orient='values')[0][1]
tr=list(dff.loc[0])
ne=list(dff.loc[1])
un=list(dff.loc[2])
if select_cls==1:
page_len=len(yes_adapt_arr)
elif select_cls==0:
page_len=len(no_adapt_arr)
else:
page_len=len(unsure_adapt_arr)
print(page_len)
if select_cls ==1:
flag=tr[page_len]
elif select_cls == 0:
flag=ne[page_len]
else:
flag=un[page_len]
flag=int(flag)
src=sourc[flag]
prob=pred[flag]
#print(src)
#print(prob)
#print(predict)
source_f=get_source_show_table(src[2])
source_id_f=get_show_table_id(source_f)
f_db=get_show_db(source_f)
source_s=get_source_show_table(src[3])
source_id_s=get_show_table_id(source_s)
s_db=get_show_db(source_s)
adapt_table_query_f=""" select * from %s where %s = %d """%(source_f,source_id_f,src[0])
adapt_table_query_s=""" select * from %s where %s = %d """%(source_s,source_id_s,src[1])
adapt_table_f=pd.read_sql_query(adapt_table_query_f,f_db)
adapt_table_f=transe_df_col(adapt_table_f,source_f)
adapt_table_s=pd.read_sql_query(adapt_table_query_s,s_db)
adapt_table_s=transe_df_col(adapt_table_s,source_s)
adapt_table=pd.concat([adapt_table_f,adapt_table_s],sort=False)
style_list=[{
"if" : {"row_index":0},
"backgroundColor": "#00cc00",
}]
if prob>=0.5:
style_list.append({
"if" : {"row_index":1},
"backgroundColor": "#ccffcc"
})
elif prob<0.5:
style_list.append({
"if" : {"row_index":1},
"backgroundColor": "#ff4d4d"
})
return html.Div([
html.A('SOURCE 1 = %s'%(source_f)),
html.Br(),
html.A('SOURCE 2 = %s'%(source_s)),
dt.DataTable(
columns= [{"name":n,"id":n} for n in adapt_table.columns],
data= adapt_table.to_dict('rows'),
style_data_conditional=style_list,
style_table={'overflowX': 'scroll'}
)
])
@app.callback(
Output('adapt_list_var','children'),
[Input('adapt_yes','n_clicks_timestamp'),
Input('adapt_no','n_clicks_timestamp'),
Input('adapt_back','n_clicks_timestamp'),
Input('adapt_reset','n_clicks_timestamp'),
Input('adapt_unsure','n_clicks_timestamp')
],
[State('seleced_class_var','children'),
State('adapt_list_var','children')]
)
def adapt_list_set(yes,no,back,reset,unsure,clas,sel):
if (yes != 0 or no !=0 or unsure!=0) and clas is not None :
if sel is not None:
init=pd.read_json(sel,orient='values')
yes_adapt_arr=list(init.loc[0])
yes_adapt_arr=[x for x in yes_adapt_arr if x ==1 or x==0 or x==2]
no_adapt_arr=list(init.loc[1])
no_adapt_arr=[x for x in no_adapt_arr if x ==1 or x==0 or x==2]
unsure_adapt_arr=list(init.loc[2])
unsure_adapt_arr=[x for x in unsure_adapt_arr if x ==1 or x==0 or x==2]
else:
yes_adapt_arr=[]
no_adapt_arr=[]
unsure_adapt_arr=[]
if sel is not None:
init=pd.read_json(sel,orient='values')
yes_adapt_arr=list(init.loc[0])
yes_adapt_arr=[x for x in yes_adapt_arr if x ==1 or x==0 or x==2]
no_adapt_arr=list(init.loc[1])
no_adapt_arr=[x for x in no_adapt_arr if x ==1 or x==0 or x==2]
unsure_adapt_arr=list(init.loc[2])
unsure_adapt_arr=[x for x in unsure_adapt_arr if x ==1 or x==0 or x==2]
else:
yes_adapt_arr=[]
no_adapt_arr=[]
unsure_adapt_arr=[]
if clas ==1:
res=yes_adapt_arr
elif clas==0:
res=no_adapt_arr
else:
res=unsure_adapt_arr
print(res)
flag=max(yes,no,back,reset,unsure)
if flag==0:
flag=-1
if flag==yes:
res.append(1)
elif flag==no:
res.append(0)
elif flag==back:
res.pop()
elif flag==unsure:
res.append(2)
else:
res=[]
if clas ==1:
yes_adapt_arr=res
elif clas==0:
no_adapt_arr=res
else:
unsure_adapt_arr=res
res_re=[yes_adapt_arr,no_adapt_arr,unsure_adapt_arr]
return pd.Series(res_re).to_json(orient='values')
@app.callback(
Output('save_db_div','children'),
[Input('save_to_db','n_clicks')],
[
State('adapt_process','children'),
State('adapt_thresh_var','children'),
State('adapt_list_var','children')]
)
def save_adapt_listing_to_db(n_clicks,src,clas,arr):
if n_clicks is not None:
dff=pd.read_json(clas,orient='value')
sourc=list(pd.read_json(src,orient='values')[0][1])
if arr is not None:
init=pd.read_json(arr,orient='values')
yes_adapt_arr=list(init.loc[0])
yes_adapt_arr=[x for x in yes_adapt_arr if x ==1 or x==0 or x==2]
no_adapt_arr=list(init.loc[1])
no_adapt_arr=[x for x in no_adapt_arr if x ==1 or x==0 or x==2]
unsure_adapt_arr=list(init.loc[2])
unsure_adapt_arr=[x for x in unsure_adapt_arr if x ==1 or x==0 or x==2]
else:
yes_adapt_arr=[]
no_adapt_arr=[]
unsure_adapt_arr=[]
tr=list(dff.loc[0])
ne=list(dff.loc[1])
un=list(dff.loc[2])
if yes_adapt_arr != []:
for idx in range(0,len(yes_adapt_arr)):
flag=int(tr[idx])
insert_ad_query_yes= (""" INSERT INTO %s """%(label_table)+
""" VALUES (%s,%s,'%s','%s',%d,%d) """
%(sourc[flag][0],sourc[flag][1],
str(sourc[flag][2]),str(sourc[flag][3]),
yes_adapt_arr[idx],sourc[flag][4]))
print(insert_ad_query_yes)
try:
db_0_50.execute(insert_ad_query_yes)
except sqlalchemy.exc.IntegrityError:
pass
if no_adapt_arr !=[]:
for idx in range(0,len(no_adapt_arr)):
flag=int(ne[idx])
insert_ad_query_no=(""" INSERT INTO %s """%(label_table)+
""" VALUES (%s,%s,'%s','%s',%d,%d) """
%(sourc[flag][0],sourc[flag][1],
str(sourc[flag][2]),str(sourc[flag][3]),
no_adapt_arr[idx],sourc[flag][4]))
print(insert_ad_query_no)
try:
db_0_50.execute(insert_ad_query_no)
except sqlalchemy.exc.IntegrityError:
pass
if unsure_adapt_arr !=[]:
for idx in range(0,len(unsure_adapt_arr)):
flag=int(un[idx])
insert_ad_query_un=(""" INSERT INTO %s """%(label_table)+
""" VALUES (%s,%s,'%s','%s',%d,%d) """
%(sourc[flag][0],sourc[flag][1],
str(sourc[flag][2]),str(sourc[flag][3]),
unsure_adapt_arr[idx],sourc[flag][4]))
print(insert_ad_query_un)
try:
db_0_50.execute(insert_ad_query_un)
except sqlalchemy.exc.IntegrityError:
pass
|
# Generated by Django 3.1 on 2020-08-09 16:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comments', '0005_auto_20200424_1144'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='metadata',
field=models.JSONField(null=True),
),
migrations.AlterField(
model_name='historicalcomment',
name='metadata',
field=models.JSONField(null=True),
),
]
|
import keras
import keras.backend as K
from keras.models import Model
from keras.layers import (Input, Convolution2D, Activation, BatchNormalization,
merge, GlobalAveragePooling2D, Dense, Dropout)
from keras.regularizers import l2
from rme.datasets import cifar10, cifar100, svhn, mnist, preprocessing
from rme.callbacks import Step
def bottleneck_layer(x, num_channels, kernel_size, l2_reg, stride=1,
first=False, name=''):
'''
Resnet preactivation bottleneck layer with 1x1xn, 3x3xn, 1x1x4n convolution
layers.
'''
if first: # Skip BN-Relu
out = x
else:
out = BatchNormalization(name=name + '_bn1')(x)
out = Activation('relu', name=name + '_relu1')(out)
# Apply the bottleneck convolution
out = Convolution2D(num_channels, 1, 1,
border_mode='same', init='he_normal',
W_regularizer=l2(l2_reg), bias=False,
name=name + '_conv1')(out)
# 3x3 conv with bottlenecked channels
# We stride it on 3x3 conv as done on Facebook's implementation
out = BatchNormalization(name=name + '_bn2')(out)
out = Activation('relu', name=name + '_relu2')(out)
out = Convolution2D(num_channels, kernel_size, kernel_size,
subsample=(stride, stride),
border_mode='same', init='he_normal',
W_regularizer=l2(l2_reg), bias=False,
name=name + '_conv2')(out)
out = BatchNormalization(name=name + '_bn3')(out)
out = Activation('relu', name=name + '_relu3')(out)
# 1x1 conv that expands the number of channels
out = Convolution2D(num_channels*4, 1, 1,
border_mode='same', init='he_normal',
W_regularizer=l2(l2_reg), bias=False,
name=name + '_conv3')(out)
return out
def two_conv_layer(x, num_channels, kernel_size, l2_reg, stride=1,
first=False, name=''):
'''
Regular resnet preactivation two convolution 3x3 layer.
'''
if first: # Skip BN-Relu
out = x
else:
out = BatchNormalization(name=name + '_bn1')(x)
out = Activation('relu', name=name + '_relu1')(out)
out = Convolution2D(num_channels, kernel_size, kernel_size,
subsample=(stride, stride),
border_mode='same', init='he_normal',
W_regularizer=l2(l2_reg), bias=False,
name=name + '_conv1')(out)
out = BatchNormalization(name=name + '_bn2')(out)
out = Activation('relu', name=name + '_relu2')(out)
out = Convolution2D(num_channels, kernel_size, kernel_size,
border_mode='same', init='he_normal',
W_regularizer=l2(l2_reg), bias=False,
name=name + '_conv2')(out)
return out
def residual_block(x, num_channels, kernel_size, l2_reg, bottleneck, stride=1,
first=False, name=''):
'''
Resnet residual block. Output is the sum of the layer's output and the
input (shortcut connection).
'''
if bottleneck:
out = bottleneck_layer(x, num_channels, kernel_size, l2_reg,
stride=stride, first=first, name=name)
# if first:
# # Shortcut needs mapping for the first bottleneck layer
# x = Convolution2D(num_channels * 4, 1, 1,
# border_mode='valid', init='he_normal',
# W_regularizer=l2(l2_reg), bias=False,
# name=name + '_shortcut_proj')(x)
else:
out = two_conv_layer(x, num_channels, kernel_size, l2_reg,
stride=stride, first=first, name=name)
out_shape = K.int_shape(out)
if out_shape == K.int_shape(x): # Identity mapping
shortcut = x
else: # If dimensions change, we project the input to the new size
if first:
# Do not apply BN-ReLU
shortcut = x
else:
shortcut = BatchNormalization(name=name + '_shortcut_bn')(x)
shortcut = Activation('relu', name=name + '_shortcut_relu')(shortcut)
shortcut = Convolution2D(out_shape[-1], 1, 1, subsample=(stride, stride),
border_mode='valid',
init='he_normal', W_regularizer=l2(l2_reg),
bias=False, name=name + '_shortcut_conv')(shortcut)
out = merge([shortcut, out], mode='sum', name=name + '_sum')
return out
def downsample_block(x, num_channels, kernel_size, l2_reg, bottleneck,
name=''):
'''
Resnet residual block that downsamples the feature maps.
'''
# Perform pre-activation for both the residual and the projection
x = BatchNormalization(name=name+'_shared_bn')(x)
x = Activation('relu', name=name+'_shared_relu')(x)
if bottleneck:
out = bottleneck_layer(x, num_channels, kernel_size, l2_reg,
stride=2, first=True, name=name)
# The output channels is 4x bigger on this case
num_channels = num_channels * 4
else:
out = two_conv_layer(x, num_channels, kernel_size, l2_reg,
stride=2, first=True, name=name)
# Projection on the shortcut
# Pre-activated conv
proj = Convolution2D(num_channels, 1, 1, subsample=(2, 2),
border_mode='valid', init='he_normal',
W_regularizer=l2(l2_reg), bias=False,
name=name + '_shortcut_proj')(x)
# proj = AveragePooling2D((1, 1), (2, 2))(x)
out = merge([proj, out], mode='sum', name=name + '_sum')
return out
def block_stack(x, num_channels, num_blocks, kernel_size, l2_reg, bottleneck,
first=False, name=''):
'''
Resnet block stack with residual units that share the same feature map size.
'''
if first:
x = residual_block(x, num_channels, kernel_size, l2_reg, bottleneck,
first=True, name=name + '_resblock1')
else:
x = residual_block(x, num_channels, kernel_size, l2_reg, bottleneck,
stride=2, name=name + '_downsample')
for i in range(num_blocks-1):
x = residual_block(x, num_channels, kernel_size, l2_reg, bottleneck,
name=name + '_resblock%d' %(i + 2))
return x
def model(dataset, num_blocks=18, width=1, bottleneck=True, l2_reg=1e-4):
'''
Resnet[1] model that uses preactivation[2]. Supports both regular and
bottleneck residual units. Uses B-type shortcuts: shortcuts are identity
unless output and input feature maps have different dimensions. In this
case, a 1x1 convolution (possibly with stride 2) is used as projection.
[1] He et al. `Deep Residual Learning for Image Recognition`:
https://arxiv.org/abs/1512.03385
[2] He et al. `Identity Mappings in Deep Residual Networks`:
https://arxiv.org/abs/1603.05027
'''
num_channels = [16*width, 32*width, 64*width]
if dataset == 'cifar10':
x = Input((32, 32, 3))
else:
raise ValueError('Model is not defined for dataset: %s' %dataset)
o = Convolution2D(16, 3, 3, border_mode='same', init='he_normal',
W_regularizer=l2(l2_reg), bias=False)(x)
o = BatchNormalization()(o)
o = Activation('relu')(o)
for i, (n, f) in enumerate(zip(num_channels, [True, False, False])):
o = block_stack(o, n, num_blocks, 3, l2_reg, bottleneck,
first=f, name='stack%d' %(i+1))
# Last BN-Relu
o = BatchNormalization(name='last_bn')(o)
o = Activation('relu', name='last_relu')(o)
o = GlobalAveragePooling2D()(o)
o = Dense(10)(o)
o = Activation('softmax')(o)
return Model(input=x, output=o)
def preprocess_data(train_set, valid_set, test_set, dataset):
if dataset == 'cifar10':
train_set = cifar10.preprocess(train_set)
valid_set = cifar10.preprocess(valid_set)
test_set = cifar10.preprocess(test_set)
else:
raise ValueError('Preprocessing not defined for dataset: %s' %dataset)
return train_set, valid_set, test_set
def default_args(dataset):
training_args = {}
if dataset == 'cifar10':
training_args['lr'] = 0.1
training_args['epochs'] = 164
training_args['batch_size'] = 64
else:
print('Default args not defined for dataset: %s' %dataset)
return training_args
def schedule(dataset, lr):
if dataset == 'cifar10':
steps = [82, 123]
lrs = [lr, lr/10, lr/100]
else:
raise ValueError('Schedule not defined for dataset: %s' %dataset)
return Step(steps, lrs)
|
def on_enter(event_data):
""" """
pocs = event_data.model
pocs.next_state = 'parked'
pocs.say("Parking Huntsman.")
# Clear any current observation
pocs.observatory.current_observation = None
pocs.observatory.close_dome()
pocs.say("I'm takin' it on home and then parking.")
pocs.observatory.mount.home_and_park()
|
import cv2
import urllib
import sys, os
import xml.etree.ElementTree as ET
pwd = os.path.abspath(os.path.dirname(__file__))
image_dir_name = pwd + '/../images/'
tmp_folder = pwd + '/../tmp/'
if __name__ == '__main__':
for x in os.walk(pwd):
if (x[0] != pwd):
label = x[0].split('/')[-1]
print 'Writing bbox images for %s' % label
image_urls_mapping = {}
with open(label + "_imageurls.txt") as f:
for line in f:
if line.strip():
(key, val) = line.split()
image_urls_mapping[key] = val
print 'Image URL mapping created for %s' % label
count = 0
for xml_filename in x[2]:
if not xml_filename.endswith('.xml'): continue
tree = ET.parse(label + '/' + xml_filename)
root = tree.getroot()
filename = root.findall('./filename')[0].text
local_filename = tmp_folder + '%s.JPEG' % filename
img = cv2.imread(local_filename)
if img is None:
url = image_urls_mapping.get(filename)
if url is not None:
try:
urllib.urlretrieve(url, local_filename)
img = cv2.imread(local_filename)
except Exception,e:
print "Error downloading image file at url: %s" % url
print e
if img is not None:
xmin = int(root.findall('./object/bndbox/xmin')[0].text)
ymin = int(root.findall('./object/bndbox/ymin')[0].text)
xmax = int(root.findall('./object/bndbox/xmax')[0].text)
ymax = int(root.findall('./object/bndbox/ymax')[0].text)
count = count + 1
bbox_img = img[ymin:ymax, xmin:xmax]
bbox_filename = image_dir_name + label + '_' + filename + "_bbox.jpg"
cv2.imwrite(bbox_filename, bbox_img)
if count % 50 == 0:
print '%s bbox images downloaded so far' % str(count)
else:
print local_filename + ' could not be read'
print 'Done'
print 'You can now delete the folder %s' % tmp_folder
|
# 10845_큐.py
import sys
input = sys.stdin.readline
n = int(input())
q = []
for i in range(n):
cmd = list(map(str, input().split()))
if cmd[0] == 'push':
q.append(cmd[1])
elif cmd[0] == 'front':
if q:
print(q[0])
else:
print(-1)
elif cmd[0] == 'back':
if q:
print(q[-1])
else:
print(-1)
elif cmd[0] == 'size':
print(len(q))
elif cmd[0] == 'empty':
if q:
print(0)
else:
print(1)
else:
if q:
print(q[0])
del q[0]
else:
print(-1) |
from flask import Flask, request
import json
import uuid
from util import Address
app = Flask(__name__)
addresses = set()
@app.route("/allocate", methods=["POST"])
def allocate():
ip = request.remote_addr
port = request.form.get("port")
addresses.add((ip, port))
return json.dumps({
"addresses": list(addresses)
})
@app.route("/deallocate", methods=["POST"])
def deallocate():
ip = request.remote_addr
port = request.form.get("port")
addresses.remove((ip, port))
return "ok"
@app.route("/keep_alive", methods=["GET"])
def keep_alive():
return json.dumps({
"addresses": list(addresses)
})
if __name__ == '__main__':
app.run("0.0.0.0", port=1140)
|
# # lua bindings shootout
# The MIT License (MIT)
#
# Copyright � 2018 ThePhD
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.ticker as mticker
import csv
import json
import math
import random
import bisect
import os
import argparse
import fnmatch
import sys
def parse_csv(c, data_point_names, time_scales):
benchmarks = {}
benchmark_heuristics = {}
return benchmarks, benchmark_heuristics
def parse_json(j, data_point_names, time_scales):
timescale_units = [x[0] for x in time_scales]
benchmarks = []
benchmark_heuristics = {
"min": sys.float_info.max,
"max": sys.float_info.min
}
j_benchmarks_array = j["benchmarks"]
for j_benchmark in j_benchmarks_array:
name = j_benchmark['name']
base_name = j_benchmark['base_name']
potential_targets = [b for b in benchmarks if b['name'] == base_name]
if (len(potential_targets) < 1):
benchmarks.append({
"name": base_name,
"data": {},
"statistics": {},
"name_index": {},
"color_index": {}
})
benchmark = benchmarks[-1]
data = benchmark["data"]
statistics = benchmark["statistics"]
for point_name_lower in data_point_names:
point_name = point_name_lower[0]
if point_name not in data:
data[point_name] = []
time_unit = j_benchmark['time_unit']
unit_index = timescale_units.index(time_unit)
time_scale = time_scales[unit_index]
to_seconds_multiplier = time_scale[2]
if name == base_name:
# is a data point
for point_name_lower in data_point_names:
point_name = point_name_lower[0]
point_list = data[point_name]
point = j_benchmark[point_name]
point_adjusted = point * to_seconds_multiplier
point_list.append(point_adjusted)
benchmark_heuristics["max"] = max(
benchmark_heuristics["max"], point_adjusted)
benchmark_heuristics["min"] = min(
benchmark_heuristics["min"], point_adjusted)
else:
# is a statistic
statistic_name = name.replace(base_name + "_", "")
if statistic_name not in statistics:
statistics[statistic_name] = {}
statistic = statistics[statistic_name]
for point_name_lower in data_point_names:
point_name = point_name_lower[0]
point = j_benchmark[point_name]
point_adjusted = point * to_seconds_multiplier
statistic[point_name] = point_adjusted
def name_sorter(b):
return b["name"]
def mean_sorter(b):
data_point_name_lower = data_point_names[0]
data_point_name = data_point_name_lower[0]
return b["statistics"]["mean"][data_point_name]
# first, sort by name so we can assign colors to each
# benchmark appropriately (and make those color assignments)
# stable
benchmarks.sort(key=name_sorter)
for bi, b in enumerate(benchmarks):
b["name_index"] = bi
ci = b["color_index"]
if (len(data_point_names) < 2):
dp = data_point_names[0]
ci[dp[0]] = bi
else:
for dpi, dp in enumerate(data_point_names):
ci[dp[0]] = dpi
# second, sort by mean
# so the actual benchmark
# ordering of graphs and the like
# is based on lowest to highest mean
lower_is_better = data_point_names[0][1]
benchmarks.sort(key=mean_sorter, reverse=lower_is_better)
return benchmarks, benchmark_heuristics
def draw_graph(name, benchmarks, benchmark_heuristics, data_point_names,
time_scales):
# initialize figures
figures, axes = plt.subplots()
# get the values of the time scale to perform bisecting
time_scale_values_from_seconds = [x[2] for x in time_scales]
benchmarks_max = benchmark_heuristics["max"]
benchmarks_min = benchmark_heuristics["min"]
# some pattern constants, to help us be pretty
# some color constants, to help us be pretty!
# and differentiate graphs
# yapf: disable
data_point_aesthetics = [
('#a6cee3', '/'),
('#f255bb', 'O'),
('#00c9ab', '\\'),
('#b15928', 'o'),
('#33a02c', '.'),
('#fb9a99', '*'),
('#e31a1c', '+'),
('#fdbf6f', 'x'),
('#ff7f00', '|'),
('#cab2d6', None),
('#6a3d9a', '-'),
('#ffff99', 'xx'),
('#f5f5f5', '..'),
('#1f78b4', '||'),
('#b2df8a', '**'),
('#cc33cc', '--')
]
#yapf: enable
# transpose data into forms we need
benchmark_names = [b["name"] for b in benchmarks]
bars = []
scatters = []
num_data_points = len(data_point_names)
bar_padding = 0.15
bar_height = 0.35
bar_all_sizes = bar_height * num_data_points + bar_padding
quarter_bar_height = bar_height * 0.25
bar_y_positions = []
# draw mean-based bars with error indicators
# and draw scatter-plot points
for bi, benchmark in enumerate(benchmarks):
for di, data_point_name_lower in enumerate(data_point_names):
data_point_name = data_point_name_lower[0]
bar_y = (bi * bar_all_sizes) + (di * bar_height) + (
bar_padding * 0.5)
bar_y_positions.append(bar_y)
mean = benchmark["statistics"]["mean"][data_point_name]
stddev = benchmark["statistics"]["stddev"][data_point_name]
color_index = benchmark["color_index"][data_point_name]
aesthetics = data_point_aesthetics[color_index]
color = aesthetics[0]
colorhsv = matplotlib.colors.rgb_to_hsv(
matplotlib.colors.hex2color(color))
colorhsv[2] *= 0.6
edgecolor = matplotlib.colors.hsv_to_rgb(colorhsv)
#color = 'green'
hatch = aesthetics[1]
bar = axes.barh(
bar_y,
mean,
height=bar_height,
xerr=stddev,
linewidth=0.2,
edgecolor=edgecolor,
color=color,
hatch=hatch,
align='edge',
error_kw={
"capsize": 5.0,
"mew": 1.2,
"ecolor": 'black',
},
alpha=0.82)
bars.append(bar)
# the scatter plot should be semi-transparent in color...
xscatter = benchmark["data"][data_point_name]
xscatter_len = len(xscatter)
yscatter = [
bar_y + random.uniform(quarter_bar_height,
bar_height - quarter_bar_height)
for _ in xscatter
]
scatter_alpha = 0.20 if xscatter_len < 11 else 0.10 if xscatter_len < 101 else 0.05 if xscatter_len < 1001 else 0.002
scatter = axes.scatter(
xscatter,
yscatter,
color=color,
edgecolor='black',
linewidth=0.5,
alpha=scatter_alpha)
scatters.append(scatter)
xscaleindex = bisect.bisect_left(time_scale_values_from_seconds,
benchmarks_max)
xscale = time_scales[xscaleindex - 1]
def time_axis_formatting(value, pos):
if value == 0:
return '0'
if value.is_integer():
return '{0:.0f}'.format(value * xscale[3])
return '{0:.2f}'.format(value * xscale[3])
absoluterange = benchmarks_max - benchmarks_min
axes.set_xlim([0, benchmarks_max + (absoluterange * 0.25)])
axes.xaxis.set_major_formatter(
mticker.FuncFormatter(time_axis_formatting))
# have ticks drawn from base of bar graph
# to text labels
y_ticks = [((y + 0.5) * bar_all_sizes)
for y in range(0, int(len(bar_y_positions) / num_data_points))]
axes.set_yticks(y_ticks)
# label each group (each cluster along the x axes)
# with the names of the benchmarks we ran
axes.set_yticklabels(benchmark_names)
# if we have 2 or more data points,
# a legend will help us label it all
if (num_data_points > 1):
# a proper legend for each name in data_point_names
legend_texts = [(data_point_name[0] +
('- lower=good'
if data_point_name[1] else 'higher=good')
for data_point_name in data_point_names)]
# retrieve the color/shape of the bar as a reference so we can construct
bar_style_references = [bar[0] for bar in bars]
# make legend
axes.legend(bar_style_references, legend_texts)
axes.set_xlabel('measured in ' + xscale[1])
else:
# no need to put a legend, it's basically fine as-is
data_point_name = data_point_names[0]
legend_text = (data_point_name[0], 'lower is better'
if data_point_name[1] else 'higher is better')
axes.set_xlabel(legend_text[0] + ' measured in ' + xscale[1] +
' - ' + legend_text[1])
# set the benchmark name, typically derived from the file name
axes.set_title(name)
# get a nice, clean layout
figures.tight_layout()
# make sure to adjust top and bottoms
figures.subplots_adjust(bottom=0.2)
return figures, axes
def main():
parser = argparse.ArgumentParser(
description=
'Generate graphs from a Google-Benchmark compatible json/csv listing of data'
)
parser.add_argument(
'-i',
'--input',
nargs='?',
default='ptrptr_benchmarks.json',
type=argparse.FileType('r'))
parser.add_argument('-f', '--input_format', nargs='?')
parser.add_argument('-o', '--output', nargs='?')
parser.add_argument(
'-d', '--data_point_names', nargs='+', default=['real_time'])
parser.add_argument('-l', '--lower', nargs='+', default=['real_time'])
args = parser.parse_args()
args.input_format = args.input_format or ("csv" if fnmatch.fnmatch(
args.input.name, "*.csv") else "json")
if not args.output:
directoryname, filename = os.path.split(args.input.name)
file = os.path.splitext(filename)[0]
args.output = os.path.join(directoryname, file + ".png")
if len(args.data_point_names) < 1:
print(
"You must specify 1 or more valid data point names",
file=sys.stderr)
sys.exit(1)
data_point_names = [(dpn, dpn in args.lower)
for dpn in args.data_point_names]
random.seed(1782905257495843795)
name = os.path.split(args.input.name)[1]
name = os.path.splitext(name)[0]
time_scales = [
("fs", "femtoseconds", 1e-15, 1e+15),
("ps", "picoseconds", 1e-12, 1e+12),
("ns", "nanoseconds", 1e-9, 1e+9),
("µs", "microseconds", .00001, 1000000),
("us", "microseconds", .00001, 1000000),
("ms", "milliseconds", .001, 1000),
("s", "seconds", 1, 1),
("m", "minutes", 60, 1 / 60),
("h", "hours", 60 * 60, (1 / 60) / 60),
]
is_csv = args.input_format == "csv"
is_json = args.input_format == "json"
if (not is_csv and not is_json):
print(
"You must specify either 'json' or 'csv' as the format.",
file=sys.stderr)
sys.exit(1)
if is_csv:
c = csv.reader(args.input)
benchmarks, benchmark_heuristics = parse_csv(c, data_point_names,
time_scales)
draw_graph(name, benchmarks, benchmark_heuristics, data_point_names,
time_scales)
elif is_json:
j = json.load(args.input)
benchmarks, benchmark_heuristics = parse_json(
j, data_point_names, time_scales)
draw_graph(name, benchmarks, benchmark_heuristics, data_point_names,
time_scales)
else:
pass
plt.savefig(args.output, bbox_inches='tight', transparent=False)
if __name__ == "__main__":
main() |
"""data process tools"""
from __future__ import annotations
import csv
from typing import List, Literal
from src.schema import InputExample
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter=",", quotechar='"')
lines = []
for line in reader:
lines.append(line)
return lines
class AgNewsDataProcessor(DataProcessor):
"""
process the agnews
Args:
DataProcessor ([type]): [description]
"""
def get_labels(self):
return [1, 2, 3, 4]
def get_examples(self, file: str) -> List[InputExample]:
lines = self._read_tsv(file)
examples: List[InputExample] = []
for index, (label, title, description) in enumerate(lines[1:]):
example = InputExample(
guid=f'guid-{index}',
text_a=title,
text_b=description,
label=label
)
examples.append(example)
return examples
def get_train_examples(self, data_dir) -> List[InputExample]:
return self.get_examples(data_dir)
def get_dev_examples(self, data_dir) -> List[InputExample]:
return self.get_examples(data_dir)
def get_test_examples(self, data_dir):
return self.get_examples(data_dir)
|
import cv2
import numpy
import numpy as np
import pywt
import math
import scipy
import matplotlib.pyplot as plt
import os
import bisect
import GaussianMixtureClassifier
import CrossCorrelation
from xlwt import Workbook
class Localize:
def __init__(self):
self.CorrelationValues = []
def __init__(self, CorrelationValues):
self.CorrelationValues = CorrelationValues
def SetCorrelationValues(self):
self.CorrelationValues = []
def Thershold(self):
'''
operation: for each frame thershold the correlation value to 1 if value>=0.9 otherwise 0
'''
self.BinarizedCorrelationValues = []
for Blocks in range(0, len(self.CorrelationValues)):
BinarizedCorrelationValues = [[0 for i in range(len(self.CorrelationValues[Blocks][0]) + 2)] for j in
range(len(self.CorrelationValues[Blocks]) + 2)]
for row in range(0, len(self.CorrelationValues[Blocks])): # for each correlation values frame
for col in range(0,
len(self.CorrelationValues[Blocks][row])): # for each correlation value in the frame
binaryvalue = 1
if (self.CorrelationValues[Blocks][row][col] < .9): # check if value <0.9 to thershold it to 0
binaryvalue = 0
BinarizedCorrelationValues[row + 1][col + 1] = binaryvalue # store the thershold value
self.BinarizedCorrelationValues.append(
BinarizedCorrelationValues) # store all the binarized frame of blocks in list
self.PreProcessing()
def PreProcessing(self):
'''
operation:
remove noises from correlation values frame in all video frames
'''
self.SmoothedCorrelationValues = []
for Blocks in range(0, len(self.BinarizedCorrelationValues)):
SmoothedCorrelationValues = CrossCorrelation.CrossCorrelation.NoiseFilter(
self.BinarizedCorrelationValues[Blocks]) # remove noise from frame
self.SmoothedCorrelationValues.append(
SmoothedCorrelationValues) # list of lists to store the smoothed correlation values
def ConnectedComponent(self, Frame):
'''
:param Frame: 2D array of correlation value
:operation: find connected components
:return: connected components
'''
blocks = np.uint8(Frame)
ret, thresh = cv2.threshold(blocks, 0, 255, 0)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
return contours
def DrawBox(self, ForgedFrames, Video):
'''
:param ForgedFrames: list of lists of frame number and coordinates of forged region
:param Video: video frames
:operation: draw rectangle around the forged region if the video is forged
:return: video and result
'''
Result = "Original"
if len(ForgedFrames) > 0:
Result = "Forged"
for i in range(0, len(ForgedFrames)):
avgx = 0
avgy = 0
avgw = 0
avgh = 0
k = ForgedFrames[i]
for j in range(0, len(k)):
avgx += k[j][1][0]
avgy += k[j][1][1]
avgw += k[j][1][2]
avgh += k[j][1][3]
avgx /= (len(k))
avgy /= (len(k))
avgw /= (len(k))
avgh /= (len(k))
avgx = int(avgx)
avgy = int(avgy)
avgw = int(avgw)
avgh = int(avgh)
for j in range(0, len(k)):
cv2.rectangle(Video[k[j][0]], (avgx, avgy), (avgx + avgw , avgy + avgh ), (0, 255, 0),
2)
return Video, Result
def Localization(self, Video):
'''
:param Video: actual video frames
:operation: localize the forged region in the video
:return: video and result
'''
tmp = []
List_forged = []
for Blocks in range(0, len(self.SmoothedCorrelationValues)):
contours = self.ConnectedComponent(self.SmoothedCorrelationValues[Blocks]) # find all objects in the frame
idx = 0
MaxArea = 0
for contour in range(0, len(contours), 1):
area = cv2.contourArea(contours[contour])
if (MaxArea < area): # take the largest object in the frame
MaxArea = area
idx = contour
if (MaxArea < 100): # ignore very small objects
if len(tmp) > 10:
List_forged.append(tmp)
tmp = []
else:
x, y, w, h = cv2.boundingRect(contours[idx]) # find coordinates
coor = [(x * 8), (y * 8), w * 8, h * 8] # calcluate the coordinates in actual frame
tmp.append([Blocks + 1, coor])
if (len(tmp) > 10):
List_forged.append(tmp)
return self.DrawBox(List_forged, Video) # draw rectangel
|
#!/usr/bin/env python3
/*
* Copyright (c) 2015 Alex Richardson
* All rights reserved.
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
* ("CTSRD"), as part of the DARPA CRASH research programme.
*
* This software was developed at the University of Cambridge Computer
* Laboratory with support from a grant from Google, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
import unittest
import os
import sys
import tempfile
sys.path.insert(0, os.path.abspath(".."))
import linkerwrapper
def getIrCommand(realCmd):
wrapper = linkerwrapper.ArWrapper(realCmd)
wrapper.computeWrapperCommand()
# we only want the executable name not the full path
wrapper.generateIrCommand[0] = os.path.basename(wrapper.generateIrCommand[0])
result = list(wrapper.generateIrCommand)
return result
class TestArWrapper(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tempdir = tempfile.TemporaryDirectory()
# print(cls.tempdir.name)
os.chdir(cls.tempdir.name)
# print(os.getcwd())
cls.bitcodeFile = open(os.path.join(cls.tempdir.name, 'foo.o.bc'), 'x')
@classmethod
def tearDownClass(cls):
os.chdir('/')
cls.bitcodeFile.close()
cls.tempdir.cleanup()
# TODO: handle multiple definitions
def testBasic(self):
command = getIrCommand("ar cqs foo foo.o".split())
self.assertEqual(command, "llvm-link -libmd foo.o.bc -o foo.bc".split())
command = getIrCommand("ar r foo foo.o".split())
self.assertEqual(command, "llvm-link -libmd foo.o.bc -o foo.bc".split())
def testMultipleDef(self):
command = getIrCommand("ar r foo foo.o foo.o".split())
self.assertEqual(command, "llvm-link -libmd foo.o.bc foo.o.bc -o foo.bc".split())
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 2.2.5 on 2019-11-12 04:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cuestionario', '0003_auto_20191028_2010'),
]
operations = [
migrations.CreateModel(
name='Vacuna',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=50)),
],
),
migrations.AddField(
model_name='cuestionario',
name='vacuna',
field=models.ManyToManyField(blank=True, to='cuestionario.Vacuna'),
),
]
|
#!/usr/bin/env python
import os
import path
import platform
import subprocess
import sys
nuget_base = None
def find_tool(package, path='tools', platform_path=None):
global nuget_base
if nuget_base is None:
if 'NUGET_PACKAGES_BASE' in os.environ:
nuget_base = os.environ['NUGET_PACKAGES_BASE']
if not os.path.exists(nuget_base):
print("Couldn't find NuGet packages at", nuget_base)
sys.exit(1)
else:
try:
nuget_base = subprocess.run(['nuget', 'config', 'globalPackagesFolder'], stdout=subprocess.PIPE, universal_newlines=True, check=True).stdout.strip()
except:
import traceback
traceback.print_exc()
print("Couldn't run NuGet")
sys.exit(1)
if not nuget_base:
if os.path.exists(os.path.expanduser('~/.nuget/packages')):
nuget_base = os.path.expanduser('~/.nuget/packages')
elif os.path.exists('packages'):
nuget_base = os.path.abspath('packages')
else:
print("Couldn't locate NuGet packages")
sys.exit(1)
pkg_path = os.path.join(nuget_base, package)
if not os.path.exists(pkg_path):
print("Package {0} not found. You can install it with `nuget install {0}`.".format(package))
sys.exit(2)
versions = os.listdir(pkg_path)
if len(versions) == 0:
print("Package {0} not found. You can install it with `nuget install {0}`.".format(package))
sys.exit(2)
elif len(versions) == 1:
ver_path = os.path.join(pkg_path, versions[0])
else:
try:
import semver
except ImportError:
print("You have more than one version of package {0}. Please install the Python semver package (`pip install semver`) so we can figure out which one to use.".format(package))
sys.exit(3)
version_infos = []
for version in versions:
try:
version_infos.append(semver.parse(version))
version_infos[-1]._pathname = version
except:
pass
if len(version_infos) == 0:
print("Package {0} not found. You can install it with `nuget install {0}`.".format(package))
sys.exit(2)
version_infos.sort()
ver_path = os.path.join(pkg_path, version_infos[-1]._pathname)
if not platform_path:
return os.path.join(ver_path, path)
else:
uname = platform.uname()
platform_part = uname.system.lower()
if platform_part == 'darwin': platform_part = 'macosx'
elif platform_part.startswith('linux'): platform_part = 'linux'
if uname.machine.endswith('64'): platform_part += '_x64'
else: platform_part += '_x32'
full_path = os.path.join(ver_path, path, platform_part, platform_path)
if os.path.exists(full_path):
return full_path
elif os.path.exists(full_path + '.exe'):
return full_path + '.exe'
else:
print("Tool {1} of package {0} not found. You may be able to install it with `nuget install {0}`.".format(package, platform_part))
sys.exit(4)
protoc = find_tool('grpc.tools', platform_path='protoc')
protobuf_tools = find_tool('google.protobuf.tools')
grpc_plugin = find_tool('grpc.tools', platform_path='grpc_csharp_plugin')
os.makedirs('ListenField/Client', exist_ok=True)
cmd = [
protoc,
'--plugin=protoc-gen-grpc=' + grpc_plugin,
'-I', '../protos', '-I', protobuf_tools,
'--csharp_out=.',
# XXX the plugin generates all grpc files in one output directory!
'--grpc_out=ListenField/Client',
'--csharp_opt=base_namespace=',
'../protos/repo/common-rpc.proto',
'../protos/repo/common-types.proto',
'../protos/repo/catalog-rpc.proto',
'../protos/repo/catalog-types.proto',
'../protos/api.proto',
'../protos/auth.proto',
]
print(*cmd)
subprocess.run(cmd)
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import sys
import PartituraToROM
import ROMtoVHDL
import ReadWriteFiles
def getVhdl():
"""
S'encarrega de crear un directori amb els fitxers necessàris a dins. Si ja hi és fa les conversions Partitura-ROM-VHDL
"""
fol= ReadWriteFiles.createUseFolder()
if(not fol[1]):
return
fol = fol[0]
rom = PartituraToROM.linesToROM(fol)
ReadWriteFiles.writeToFile(fol, ReadWriteFiles.ROM, rom)
vhdl = ROMtoVHDL.getCodeFromFolder(fol)
ReadWriteFiles.writeToFile(fol, ReadWriteFiles.VHDL, vhdl)
if (__name__ == "__main__"):
getVhdl()
|
# mnist.py
#
# Author : James Mnatzaganian
# Contact : http://techtorials.me
# Organization : NanoComputing Research Lab - Rochester Institute of
# Technology
# Website : https://www.rit.edu/kgcoe/nanolab/
# Date Created : 10/13/15
#
# Description : Testing SP with MNIST.
# Python Version : 2.7.X
#
# License : MIT License http://opensource.org/licenses/mit-license.php
# Copyright : (c) 2016 James Mnatzaganian
"""
Testing SP with MNIST.
G{packagetree mHTM}
"""
__docformat__ = 'epytext'
# Native imports
import cPickle, time, os
from itertools import izip
# Third party imports
import numpy as np
from numpy.lib.stride_tricks import as_strided
# import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
# from scipy.misc import imread
# import matplotlib.gridspec as gridspec
# from sklearn.feature_extraction.image import extract_patches_2d
from joblib import Parallel, delayed
# Program imports
from mHTM.region import SPRegion
from mHTM.datasets.loader import load_mnist, MNISTCV
def get_windows(x, window_size):
# Dimensions of new array
shape = tuple(np.array(x.shape) / window_size) + (window_size, window_size)
# The strides
strides = tuple(np.array(x.strides) * window_size) + x.strides
# The new array
windows = as_strided(x, shape=shape, strides=strides)
# flatten the first two and last two dimensions
return windows.reshape(shape[0] * shape[1], shape[2] * shape[3])
def plot_weights(input, weights, nrows, ncols, shape, out_path=None,
show=True):
"""
Make a video showing what happens with the input.
"""
# Construct the basic plot
fig = plt.figure(figsize=(16, 8))
gs = gridspec.GridSpec(nrows, ncols + 2, wspace=0, hspace=0.5)
ax = plt.subplot(gs[nrows/2, 0])
ax.imshow(input.reshape(shape), cmap=plt.get_cmap('gray'), vmin=0,
vmax=1, interpolation='none')
ax.axis('off')
ax = plt.subplot(gs[nrows/2, 1])
ax.imshow(imread('arrow2.png'), cmap=plt.get_cmap('gray'))
ax.axis('off')
# Add all of the figures to the grid
for i, weight_set in enumerate(weights):
row = i / ncols
col = i + 2 - row * ncols
ax = plt.subplot(gs[row, col])
ax.imshow(weight_set.reshape(shape), cmap=plt.get_cmap('gray'), vmin=0,
vmax=1, interpolation='none')
ax.axis('off')
fig.patch.set_visible(False)
# Save the plot
# fig.set_size_inches(19.20, 10.80)
if out_path is not None:
# plt.savefig(out_path, format=out_path.split('.')[-1], dpi = 100)
plt.savefig(out_path, format=out_path.split('.')[-1])
# Show the plot and close it after the user is done
if show: plt.show()
plt.close()
def fit_grid():
"""
Use a grid technique with many SPs.
"""
p = 'results\\mnist_filter'
# try:
# os.makedirs(p)
# except OSError:
# pass
np.random.seed(123456789)
# kargs = {
# 'ninputs': 9,
# 'ncolumns': 100,
# 'nsynapses': 5,
# 'random_permanence': True,
# 'pinc':0.03, 'pdec':0.05,
# 'seg_th': 3,
# 'nactive': 10,
# 'duty_cycle': 100,
# 'max_boost': 10,
# 'global_inhibition': True,
# 'trim': 1e-4
# }
kargs2 = {
'ninputs': 100 * (26 ** 2),
'ncolumns': 2048,
'nsynapses': 1000,
'random_permanence': True,
'pinc':0.03, 'pdec':0.05,
'seg_th': 5,
'nactive': 20,
'duty_cycle': 100,
'max_boost': 10,
'global_inhibition': True,
'trim': 1e-4
}
# Get the data
(tr_x, tr_y), (te_x, te_y) = get_data()
nwindows = 26 ** 2
# # Make the SPs
# sps = [SPRegion(**kargs) for _ in xrange(nwindows)]
# # Train the SPs
# nepochs = 10
# t = time.time()
# for i in xrange(nepochs):
# print i
# for j, x in enumerate(tr_x):
# print '\t{0}'.format(j)
# nx = extract_patches_2d(x.reshape(28, 28), (3, 3)).reshape(
# nwindows, 9)
# for xi, sp in izip(nx, sps):
# sp.step(xi)
# t1 = time.time() - t
# print t1
# # Save this batch of SPs
# for i, sp in enumerate(sps):
# sp.learn = False
# sp.save(os.path.join(p, 'sp0-{0}.pkl'.format(i)))
# Make the top level SP
sp2 = SPRegion(**kargs2)
# Get the SPs
sps = [load(os.path.join(p, sp)) for sp in os.listdir(p) if sp[2] == '0']
# Train the top SP
nepochs = 10
t = time.time()
for i in xrange(nepochs):
print i
for j, x in enumerate(tr_x):
print '\t{0}'.format(j)
nx = extract_patches_2d(x.reshape(28, 28), (3, 3)).reshape(
nwindows, 9)
output = np.array(np.zeros(100 * nwindows), dtype='bool')
for k, (xi, sp) in enumerate(izip(nx, sps)):
sp.step(xi)
output[k*100:(k*100)+100] = sp.y[:, 0]
sp2.step(output)
t2 = time.time() - t
print t2
# Save the top SP
sp2.learn = False
sp2.save(os.path.join(p, 'sp1-0.pkl'))
def score_grid():
"""
Classify with the gridded SP.
"""
p = 'results\\mnist_filter'
(tr_x, tr_y), (te_x, te_y) = load_mnist()
# Get the SPs
sps = [load(os.path.join(p, sp)) for sp in os.listdir(p) if sp[2] == '0']
sp2 = load(os.path.join(p, 'sp1-0.pkl'))
nwindows = 26 ** 2
nfeat = 100 * nwindows
# w = [sp2.p[sp2.syn_map == j] for j in xrange(nfeat)]
# ms = max(wi.shape[0] for wi in w)
# with open(os.path.join(p, 'data.pkl'), 'wb') as f:
# cPickle.dump((w, ms), f, cPickle.HIGHEST_PROTOCOL)
with open(os.path.join(p, 'data.pkl'), 'rb') as f:
w, ms = cPickle.load(f)
# Get training data
tr_x2 = np.zeros((tr_x.shape[0], nfeat))
for i, x in enumerate(tr_x):
nx = extract_patches_2d(x.reshape(28, 28), (3, 3)).reshape(
nwindows, 9)
x = np.array(np.zeros(nfeat), dtype='bool')
for j, (xi, sp) in enumerate(izip(nx, sps)):
sp.step(xi)
x[j*100:(j*100)+100] = sp.y[:, 0]
y = sp2.p * x[sp2.syn_map]
w = np.zeros((nfeat, ms))
for j in xrange(nfeat):
a = y[sp2.syn_map == j]
w[j][:a.shape[0]] = a
tr_x2[i] = np.mean(w, 1)
# Get testing data
te_x2 = np.zeros((te_x.shape[0], nfeat))
for i, x in enumerate(te_x):
nx = extract_patches_2d(x.reshape(28, 28), (3, 3)).reshape(
nwindows, 9)
x = np.array(np.zeros(nfeat), dtype='bool')
for j, (xi, sp) in enumerate(izip(nx, sps)):
sp.step(xi)
x[j*100:(j*100)+100] = sp.y[:, 0]
y = sp2.p * x[sp2.syn_map]
w = np.zeros((nfeat, ms))
for j in xrange(nfeat):
a = y[sp2.syn_map == j]
w[j][:a.shape[0]] = a
te_x2[i] = np.mean(w, 1)
# Classify
clf = LinearSVC(random_state=123456789)
clf.fit(tr_x2, tr_y)
print 'SVM Accuracy : {0:2.2f} %'.format(clf.score(te_x2, te_y) * 100)
def execute(sp, tr, te):
# Trains
sp.fit(tr)
# Test
tr_x = sp.predict(tr)
te_x = sp.predict(te)
# Save predictions
sp._save_data("predictions", (tr_x, te_x))
def first_level(log_dir, ntrain=800, ntest=200, nsplits=1, seed=123456789):
# Details of the filter
win_size = 7
total_win_size = win_size * win_size
nwindows = 16
# SP arguments
kargs = {
'ninputs': total_win_size,
'ncolumns': 200,
'nactive': 50,
'global_inhibition': True,
'trim': 1e-4,
'disable_boost': True,
'seed': seed,
'nsynapses': 35,
'seg_th': 5,
'syn_th': 0.5,
'pinc': 0.001,
'pdec': 0.001,
'pwindow': 0.5,
'random_permanence': True,
'nepochs': 10,
'log_dir': os.path.join(log_dir, '1-1')
}
# Get the data
(tr_x, tr_y), (te_x, te_y) = load_mnist()
x, y = np.vstack((tr_x, te_x)), np.hstack((tr_y, te_y))
# Split the data for CV
tr, te = MNISTCV(tr_y, te_y, ntrain, ntest, nsplits, seed).gen.next()
tr, te = tr[:ntrain], te[:ntest]
# Store the labels to disk
with open(os.path.join(log_dir, 'labels.pkl'), 'wb') as f:
cPickle.dump((y[tr], y[te]), f, cPickle.HIGHEST_PROTOCOL)
del tr_y; del te_y; del y;
# Build the training data
train_data = np.zeros((nwindows, ntrain, total_win_size), dtype='bool')
for i in xrange(ntrain):
xi = x[tr[i]]
for j, window in enumerate(get_windows(xi.reshape(28, 28), win_size)):
train_data[j, i] = window
# Build the testing data
test_data = np.zeros((nwindows, ntest, total_win_size), dtype='bool')
for i in xrange(ntest):
xi = x[te[i]]
for j, window in enumerate(get_windows(xi.reshape(28, 28), win_size)):
test_data[j, i] = window
del tr_x; del te_x; del x
# Make the SPs
sps = [SPRegion(**kargs) for _ in xrange(nwindows)]
# Execute the SPs in parallel
Parallel(n_jobs=-1)(delayed(execute)(sp, tr, te) for sp, tr, te in izip(
sps, train_data, test_data))
def second_level(log_dir, seed=123456789):
# Get the paths to the data
paths = []
for d in os.listdir(log_dir):
p = os.path.join(log_dir, d)
if os.path.isdir(p): paths.append(os.path.join(p, 'predictions.pkl'))
paths = sorted(paths)[:16]
# Read in the first item, to determine the shape of the data
tr, te = SPRegion.load_data(paths[0])
ntrain, ntest = len(tr), len(te)
n_base_cols = tr.shape[-1]
ninputs = n_base_cols * len(paths)
# Read in all of the data
tr_x = np.zeros((ntrain, ninputs), dtype='bool')
te_x = np.zeros((ntest, ninputs), dtype='bool')
for i, p in enumerate(paths):
tr, te = SPRegion.load_data(p)
tr_x[:, i*n_base_cols:(i+1)*n_base_cols] = tr
te_x[:, i*n_base_cols:(i+1)*n_base_cols] = te
# Read in the labels
tr_y, te_y = SPRegion.load_data(os.path.join(log_dir, 'labels.pkl'))
# SP arguments
ncolumns = 4096
kargs = {
'ninputs': ninputs,
'ncolumns': ncolumns,
'nactive': int(ncolumns*0.2),
'global_inhibition': True,
'trim': 1e-4,
'disable_boost': True,
'seed': seed,
'nsynapses': 100,
'seg_th': 0,
'syn_th': 0.5,
'pinc': 0.001,
'pdec': 0.001,
'pwindow': 0.5,
'random_permanence': True,
'nepochs': 10,
'log_dir': os.path.join(log_dir, '2-1'),
'clf': LinearSVC(random_state=seed)
}
# Create the SP
sp = SPRegion(**kargs)
# Train the SP
sp.fit(tr_x, tr_y)
# Score the SP
print sp.score(te_x, te_y)
def parallel_grid(log_dir, ntrain=800, ntest=200, nsplits=1, seed=123456789):
# Make a new directory for this experiment
new_dir = os.path.join(log_dir, time.strftime('%Y%m%d-%H%M%S' ,
time.localtime()))
os.makedirs(new_dir)
# Seed numpy
np.random.seed(seed)
# Execute the first level
first_level(new_dir, ntrain=800, ntest=200, nsplits=1, seed=123456789)
# Execute the second level
second_level(new_dir, seed=123456789)
if __name__ == '__main__':
out_path = os.path.join(os.path.expanduser('~'), 'scratch', 'grid')
parallel_grid(out_path)
# second_level(r'C:\Users\james\scratch\grid\20160611-153835')
|
key = [[0, 0, 0], [1, 0, 0], [0, 1, 1]]
lock = [[1, 1, 1], [1, 1, 0], [1, 0, 1]]
M = 3
check = []
check_rot = []
for y in range(M):
for x in range(M):
if key[y][x] == 1:
check.append((y, x))
N = 3
target = []
for y in range(N):
for x in range(N):
if lock[y][x] == 0:
target.append((y, x))
print(target)
print(check)
for i in key:
print(i)
key = [[key[j][i] for j in range(M -1, -1, -1)] for i in range(M)]
for y in range(M):
for x in range(M):
if key[y][x] == 1:
check_rot.append((y, x))
print()
for i in key:
print(i)
key = [[key[j][i] for j in range(M -1, -1, -1)] for i in range(M)]
print()
for i in key:
print(i)
target = [(), ()]
key = 3
print(target)
print(check)
print(check_rot)
#print(rot)
#1 check를 기준으로 90도 회전해보
"""2, 1 -> 1, 0
2, 2 -> 2, 0
1, 0 -> 0, 1
print(target)"""
|
escala = input("qual escala? (C/F) ")
vt = float(input("valor da temperatura: "))
if (escala.upper() == "F" ):
c = (5*(vt-32)) / 9
print(round(c, 2))
else:
f = (vt*9/5) + 32
print(round(f, 2)) |
#!/usr/bin/env python
import numpy as np
from rosplane_msgs.msg import State
from rosplane_msgs.msg import Controller_Commands
from rosflight_msgs.msg import Command
from pdb import set_trace as pause
import yaml
import rospy
class autopilot():
def __init__(self):
# set up timing variables
self.hz = 100.
self.rate = rospy.Rate(self.hz)
self.Ts = 1/self.hz
self.t = []
self.Va_c=[]
self.theta_c_max = 30*np.pi/180.
self.phi_max = 45*np.pi/180.
self.altitude_takeoff_zone = 10
self.altitude_hold_zone = 5
self.climb_out_throttle = 1.0
self.altitude_state = 0
self.integrator_1 = 0
self.integrator_2 = 0
self.integrator_3 = 0
self.integrator_4 = 0
self.integrator_5 = 0
self.integrator_6 = 0
self.error_1 = 0
self.error_2 = 0
self.error_3 = 0
self.error_4 = 0
self.error_5 = 0
self.error_6 = 0
self.ap_differentiator_ = 0
self.at_differentiator_ = 0
self.hdot = 0
self.hdot_d = 0
self.h_d = 0
self.tau = 5
self.commands = Command()
# load param file
self.P = yaml.load(open('/home/nmd89/git/nathan/flight_dynamics/final_project/rosplane_ws/src/rosplane/rosplane/param/aerosonde.yaml'))
# modified param values so they match the simulation
self.P['AS_PITCH_KP'] = 0.0
self.P['BETA_KP'] = 0.0
self.P['BETA_KI'] = 0.0
self.P['COURSE_KI'] = 0.0
self.P['PITCH_KP'] = 1.0
self.P['PITCH_KD'] = -0.17
# trim values for control surfaces and throttle
self.delta_a = self.P['TRIM_A']
self.delta_e = self.P['TRIM_E']
self.delta_r = self.P['TRIM_R']
self.delta_t = self.P['TRIM_T']
# subscribe to the MAV states and controller_commands
rospy.Subscriber('/fixedwing/truth', State, self.get_states)
rospy.Subscriber('/fixedwing/controller_commands', Controller_Commands, self.get_commands)
# publish the commanded surface deflections
self.pub = rospy.Publisher('/fixedwing/command', Command, queue_size=1)
check=1
while not self.t:
if check:
print 'waiting for states'
check=0
print 'states received'
check=1
while not self.Va_c:
if check:
print 'waiting for commands'
check=0
print 'commands received'
def get_states(self, msg):
self.position = msg.position
self.h = -self.position[2]
self.Va = msg.Va
self.alpha = msg.alpha
self.beta = msg.beta
self.phi = msg.phi
self.theta = msg.theta
self.psi = msg.psi
self.chi = msg.chi
self.p = msg.p
self.q = msg.q
self.r = msg.r
self.Vg = msg.Vg
self.wn = msg.wn
self.we = msg.we
self.chi_w = msg.chi_deg
self.psi_w = msg.psi_deg
self.t = msg.header.stamp.secs + msg.header.stamp.nsecs * 1e-9
self.sec = msg.header.stamp.secs
def get_commands(self, msg):
self.Va_c = msg.Va_c
self.h_c = msg.h_c
self.chi_c = msg.chi_c
self.phi_ff = msg.phi_ff
def course_hold(self):
error = self.chi_c - self.chi
if np.abs(error)>15*np.pi/180.:
self.integrator_2 = 0
else:
self.integrator_2 = self.integrator_2+(self.Ts/2.)*(error+self.error_2)
up = self.P['COURSE_KP']*error
ui = self.P['COURSE_KI']*self.integrator_2
ud = self.P['COURSE_KD']*self.r
self.phi_c = self.sat(up+ui+ud, self.phi_max, -self.phi_max)
if not self.P['COURSE_KI']==0:
phi_c_unsat = up+ui+ud
k_antiwindup = self.Ts/self.P['COURSE_KI']
self.integrator_2 = self.integrator_2 + k_antiwindup*(self.phi_c-phi_c_unsat)
self.error_2 = error
def roll_hold(self):
error = self.phi_c - self.phi
self.integrator_5 = self.integrator_5+(self.Ts/2.)*(error+self.error_5)
up = self.P['ROLL_KP']*error
ui = self.P['ROLL_KI']*self.integrator_5
ud = self.P['ROLL_KD']*self.p
self.delta_a = self.sat(up+ui+ud, self.phi_max, -self.phi_max)
if not self.P['ROLL_KI']==0:
delta_a_unsat = up+ui+ud
k_antiwindup = self.Ts/self.P['ROLL_KI']
self.integrator_5 = self.integrator_5 + k_antiwindup*(self.delta_a-delta_a_unsat)
self.error_5 = error
def pitch_hold(self):
error = self.theta_c - self.theta
self.integrator_6 = self.integrator_6+(self.Ts/2.)*(error+self.error_6)
up = self.P['PITCH_KP']*error
ui = self.P['PITCH_KI']*self.integrator_6
ud = self.P['PITCH_KD']*self.q
self.delta_e = self.sat(up+ud, self.phi_max, -self.phi_max)
if not self.P['ROLL_KI']==0:
delta_e_unsat = up+ui+ud + self.P['TRIM_E']
k_antiwindup = self.Ts/self.P['PITCH_KI']
self.integrator_6 = self.integrator_6 + k_antiwindup*(self.delta_e-delta_e_unsat)
self.error_6 = error
def airspeed_with_pitch_hold(self):
error = self.Va_c - self.Va
self.integrator_1 = self.integrator_1 + (self.Ts/2.)*(error + self.error_1)
self.ap_differentiator_ = (2.0*self.tau - self.Ts)/(2.0*self.tau + self.Ts)*self.ap_differentiator_ + (2.0/(2.0*self.tau + self.Ts))*(error - self.error_1);
up = self.P['AS_PITCH_KP']*error
ui = self.P['AS_PITCH_KI']*self.integrator_1
ud = self.P['AS_PITCH_KD']*self.ap_differentiator_
self.theta_c = self.sat(up+ui+ud, 20.*np.pi/180., -25.*np.pi/180.)
# implement integrator antiwindup
if not self.P['AS_PITCH_KI']==0:
theta_c_unsat = up + ui + ud;
k_antiwindup = self.Ts/self.P['AS_PITCH_KI']
self.integrator_1 = self.integrator_1 + k_antiwindup*(self.theta_c-theta_c_unsat);
# update error
self.error_1 = error
def airspeed_with_throttle_hold(self):
error = self.Va_c - self.Va
self.integrator_4 = self.integrator_4 + (self.Ts/2.)*(error + self.error_4)
self.at_differentiator_ = (2.0*self.tau - self.Ts)/(2.0*self.tau + self.Ts)*self.at_differentiator_ + (2.0/(2.0*self.tau + self.Ts))*(error - self.error_4);
up = self.P['AS_THR_KP']*error
ui = self.P['AS_THR_KI']*self.integrator_4
ud = self.P['AS_THR_KD']*self.at_differentiator_
self.delta_t = self.sat(self.P['TRIM_T']+up+ui+ud,1,0)
if not self.P['AS_THR_KI']==0:
delta_t_unsat = self.P['TRIM_T']+up+ui+ud
k_antiwindup = self.Ts/self.P['AS_THR_KI']
self.integrator_4 = self.integrator_4 + k_antiwindup*(self.delta_t-delta_t_unsat)
self.error_4 = error
def altitude_hold(self):
error = self.h_c - self.h
self.integrator_3 = self.integrator_3 + (self.Ts/2.)*(error+self.error_3)
self.hdot = (2*self.tau-self.Ts)/(2*self.tau+self.Ts)*self.hdot+(2/2*(self.tau+self.Ts))*(error-self.error_3)
up = self.P['ALT_KP']*error
ui = self.P['ALT_KI']*self.integrator_3
ud = self.P['ALT_KD']*self.hdot
self.theta_c = self.sat(up+ui+ud, self.theta_c_max, -self.theta_c_max)
if not self.P['ALT_KI']==0:
theta_c_unsat = up+ui+ud
k_antiwindup = self.Ts/self.P['ALT_KI']
self.integrator_3 = self.integrator_3 + k_antiwindup*(self.theta_c-theta_c_unsat)
self.error_3 = error
def sat(self, in_, up_limit, low_limit):
if in_ > up_limit:
out = up_limit
elif in_ < low_limit:
out = low_limit
else:
out = in_
return out
def autopilot_uavbook(self):
self.course_hold()
self.delta_r = 0
self.roll_hold()
if self.altitude_state==0: # initialize state machine
#print 'initializing state machine...'
if self.h<=self.altitude_takeoff_zone:
self.altitude_state = 1
elif self.h<=self.h_c-self.altitude_hold_zone:
self.altitude_state = 2
elif self.h>=self.h_c+self.altitude_hold_zone:
self.altitude_state = 3
else:
self.altitude_state = 4
if self.altitude_state==1: # in take-off zone
#print 'taking off...'
self.phi_c = 0
self.roll_hold()
self.delta_t = self.climb_out_throttle
self.theta_c = 15*np.pi/180.#self.theta_c_m
if self.h>=self.altitude_takeoff_zone:
self.altitude_state = 2
if self.altitude_state==2: # climb zone
#print 'climbing...'
self.delta_t = self.climb_out_throttle
self.airspeed_with_pitch_hold()
if self.h>=self.h_c-self.altitude_takeoff_zone:
self.altitude_state = 4
if self.h<=self.altitude_takeoff_zone:
self.altitude_state = 1
if self.altitude_state==3: # descend zone
#print 'descending...'
self.delta_t = 0
self.airspeed_with_pitch_hold()
if self.h<=self.h_c+self.altitude_hold_zone:
self.altitude_state = 4
if self.altitude_state==4: # altitude hold zone
#print 'holding altitude...'
self.airspeed_with_throttle_hold()
self.altitude_hold()
if self.h<=self.h_c-self.altitude_hold_zone:
self.altitude_state = 2
if self.h>=self.h_c+self.altitude_hold_zone:
self.altitude_state = 3
self.pitch_hold()
self.delta_t = self.sat(self.delta_t,1,0)
self.commands.x = self.delta_a
self.commands.y = self.delta_e
self.commands.z = self.delta_r
self.commands.F = self.delta_t
self.pub.publish(self.commands)
def run(self):
while not rospy.is_shutdown():
self.autopilot_uavbook()
self.rate.sleep()
if __name__=="__main__":
rospy.init_node('autopilot',anonymous=True)
ap = autopilot()
ap.run()
|
#!/usr/bin/env python
# coding: utf-8
# In[11]:
import matplotlib.pyplot as plt
import numpy as np
import re
if __name__=="__main__":
address = input('address: ')
filename = input('txt file name: ')
k = input('K:')
data = input('Test data:')
d = np.loadtxt(address+filename)
X = d[:,0]
Y = d[:,1]
plt.plot(X,Y)
plt.title('ROC curve, data ='+data+', k = '+k)
plt.xlabel('false alarm rate')
plt.ylabel('detection rate')
plt.savefig(address+filename[:-4]+'.png')
input('press to end')
|
from ..models import Spell
from ..serializers import SpellSerializer, SpellLocalSerializer
from ._dofus_viewset import DofusViewSet
class SpellViewSet(DofusViewSet):
model_class = Spell
default_serializer = SpellSerializer
local_serializer = SpellLocalSerializer
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author = 'wyx'
@time = 2019-04-08 21:28
@annotation = ''
"""
import cv2 as cv
import numpy as np
import util
"""
BGR和灰度图的转换使用 cv.COLOR_BGR2GRAY
BGR和HSV的转换使用 cv.COLOR_BGR2HSV
H表示色彩/色度,取值范围 [0,179]
S表示饱和度,取值范围 [0,255]
V表示亮度,取值范围 [0,255]
"""
img = util.load_img('img/messi5.jpg')
img2gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
util.show(img2gray)
cap = cv.VideoCapture(0)
while (1):
# Take each frame
_, frame = cap.read()
# Convert BGR to HSV
hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
"""
BGR2HSV
green = np.uint8([[[0,255,0 ]]])
hsv_green = cv2.cvtColor(green,cv2.COLOR_BGR2HSV)
使用[H-10, 100,100] and [H+10, 255, 255] 做阈值上下限
"""
# define range of blue color in HSV
lower_blue = np.array([110, 50, 50])
upper_blue = np.array([130, 255, 255])
# 设定取值范围
mask = cv.inRange(hsv, lower_blue, upper_blue)
# Bitwise-AND mask and original image
res = cv.bitwise_and(frame, frame, mask=mask)
cv.imshow('frame', frame)
cv.imshow('mask', mask)
cv.imshow('res', res)
k = cv.waitKey(5) & 0xFF
if k == 27:
break
cv.destroyAllWindows()
|
def rmv_b(country): # Permet de supprimer le b' qui ce met souvent en general avant le nom du pays, ceci a un rapport avec le type de la variables qui serait un byte ?
return country.replace("b'", "")
def print_result():
f = open("result_per_years.txt", "w")
f.write("country, population, percent, suicide_nbr, year\n")
for line in finallist:
f.write(str(line['country']) + ',' + str(line['population']) + ',' + str(line['percent']) + ',' + str(line['suicide_nbr']) + ',' + str(line['year']) + '\n')
def get_data(country, res_pib_country):
for elem in res_pib_country:
line = str(elem).split(",")
line[0] = line[0].replace("b'", "")
if line[0] == country:
return line[33]
return ""
def merge_data(): # on souhaite merge result_per_country et result_pid_country pour garder que les infornmations significatives
finalist = []
# On open les deux fichiers
with open('result_per_country.txt','rb') as f:
res_per_country = list(f)
del res_per_country[0]
with open('result_pib_country.txt','rb') as f:
res_pib_country = list(f)
del res_pib_country[0]
# On créer et prépare l'entête du nouveau fichier
f = open("result_final.txt", "w")
f.write("country,average_suicide,average_pib\n")
# On parcours le fichier result_per_country
for elem in res_per_country:
line = str(elem).split(",")
line[0] = rmv_b(line[0])
# On recupere dans le fichier result_pib_country, le pays passeé en param
data = get_data(line[0], res_pib_country)
# Si un pays est trouvé on exploit la data
if len(data) > 1:
f.write(line[0] + ',' + line[1] + '%,' + data[:(len(data)-3)] + '\n')
merge_data() |
# clize -- A command-line argument parser for Python
# Copyright (C) 2011-2015 by Yann Kaiser <kaiser.yann@gmail.com>
# See COPYING for details.
from sigtools import support
from clize import parser, errors, Parameter
from clize.extra import parameters
from clize.tests import util
@util.testfunc
def check_repr(self, sig_str, annotation, str_rep):
sig = support.s(sig_str, locals={'a': annotation})
csig = parser.CliSignature.from_signature(sig)
self.assertEqual(str(csig), str_rep)
@check_repr
class RepTests(object):
mapped_basic = ('par:a', parameters.mapped([
('greeting', ['hello'], 'h1'),
('parting', ['goodbye'], 'h2'),
]), 'par')
mapped_force_icase = ('par:a', parameters.mapped([
(1, ['thing'], 'h')
], case_sensitive=False), 'par')
mapped_force_scase = ('par:a', parameters.mapped([
(1, ['Thing'], 'h'),
], case_sensitive=True), 'par')
mapped_imply_scase = ('par:a', parameters.mapped([
(1, ['thing'], 'h'),
(2, ['Thing'], 'h'),
]), 'par')
mapped_bad_icase = ('par:a', parameters.mapped([
(1, ['thing'], 'h'),
(2, ['Thing'], 'h'),
], case_sensitive=False), 'par')
oneof_basic = 'par:a', parameters.one_of('hello', 'goodbye', 'bye'), 'par'
oneof_help = (
'par:a', parameters.one_of(('hello', 'h1'), ('bye', 'h2')), 'par')
multi_basic = '*, par:a', parameters.multi(), '--par=STR'
multi_req = '*, par:a', parameters.multi(1), '--par=STR'
multi_min = '*, par:a', parameters.multi(2), '--par=STR'
multi_max = '*, par:a', parameters.multi(max=2), '--par=STR'
multi_bound = '*, par:a', parameters.multi(min=2, max=3), '--par=STR'
multi_conv = '*, par:a', (parameters.multi(), int), '--par=INT'
multi_last_opt = (
'*args, par:a', (parameters.multi(), Parameter.L),
'--par=STR [args...]')
@util.testfunc
def annotated_sigtests(self, sig_info, in_args, args, kwargs):
sig_str, annotation, str_rep = sig_info
sig = support.s(sig_str, locals={'a': annotation})
csig = parser.CliSignature.from_signature(sig)
ba = csig.read_arguments(in_args)
self.assertEqual(ba.args, args)
self.assertEqual(ba.kwargs, kwargs)
@util.testfunc
def annotated_sigerror_tests(self, sig_info, in_args,
exc=errors.BadArgumentFormat):
sig_str, annotation, str_rep = sig_info
sig = support.s(sig_str, locals={'a': annotation})
csig = parser.CliSignature.from_signature(sig)
self.assertRaises(exc, csig.read_arguments, in_args)
@annotated_sigtests
class MappedTests(object):
exact_1 = RepTests.mapped_basic, ['hello'], ['greeting'], {}
exact_2 = RepTests.mapped_basic, ['goodbye'], ['parting'], {}
isec_1 = RepTests.mapped_basic, ['HElLo'], ['greeting'], {}
isec_2 = RepTests.mapped_basic, ['GoODByE'], ['parting'], {}
forced_icase_1 = RepTests.mapped_force_icase, ['thing'], [1], {}
forced_icase_2 = RepTests.mapped_force_icase, ['ThiNG'], [1], {}
implied_scase_1 = RepTests.mapped_imply_scase, ['thing'], [1], {}
implied_scase_2 = RepTests.mapped_imply_scase, ['Thing'], [2], {}
forced_scase_1 = RepTests.mapped_force_scase, ['Thing'], [1], {}
def test_show_list(self):
sig = support.s('par:a', locals={'a': RepTests.mapped_basic[1]})
csig = parser.CliSignature.from_signature(sig)
ba = csig.read_arguments(['list'])
par = csig.positional[0]
self.assertEqual(par.show_list, ba.func)
self.assertEqual(
"""name: Possible values for par:
hello h1
goodbye h2""".split(),
ba.func('name', *ba.args, **ba.kwargs).split())
@annotated_sigerror_tests
class MappedErrorTests(object):
not_found = RepTests.mapped_basic, ['dog']
forced_scase = RepTests.mapped_force_scase, ['thing']
bas_icase = RepTests.mapped_bad_icase, ['anything'], ValueError
@annotated_sigtests
class OneOfTests(object):
exact = RepTests.oneof_basic, ('hello',), ['hello'], {}
icase = RepTests.oneof_basic, ('Hello',), ['hello'], {}
def test_show_list(self):
sig = support.s('par:a', locals={'a': RepTests.oneof_help[1]})
csig = parser.CliSignature.from_signature(sig)
ba = csig.read_arguments(['list'])
par = csig.positional[0]
self.assertEqual(par.show_list, ba.func)
self.assertEqual(
"""name: Possible values for par:
hello h1
bye h2""".split(),
ba.func('name', *ba.args, **ba.kwargs).split())
@annotated_sigtests
class MultiTests(object):
basic_none = RepTests.multi_basic, (), [], {'par': []}
basic_one = RepTests.multi_basic, ('--par=one',), [], {'par': ['one']}
basic_two = (
RepTests.multi_basic, ('--par=one', '--par', 'two'),
[], {'par': ['one', 'two']})
conv = RepTests.multi_conv, ('--par=1', '--par', '2'), [], {'par': [1, 2]}
req_met = (RepTests.multi_req, ('--par=1',), [], {'par': ['1']})
min_met = (
RepTests.multi_min, ('--par=1', '--par=2'), [], {'par': ['1', '2']})
max_met_1 = RepTests.multi_max, (), [], {'par': []}
max_met_2 = RepTests.multi_max, ('--par=1',), [], {'par': ['1']}
max_met_3 = (
RepTests.multi_max, ('--par=1', '--par=2'), [], {'par': ['1', '2']})
last_opt = (
RepTests.multi_last_opt, ('--par=1', '--par=2'),
['--par=2'], {'par': ['1']})
@annotated_sigerror_tests
class MultiErrorTests(object):
req_not_met = RepTests.multi_req, (), errors.MissingRequiredArguments
min_not_met_1 = (
RepTests.multi_min, ('--par=one',), parameters.NotEnoughValues)
min_not_met_2 = (
RepTests.multi_min, ('--par', 'one'), parameters.NotEnoughValues)
max_passed_1 = (
RepTests.multi_max, ('--par=1', '--par=2', '--par=3'),
parameters.TooManyValues)
max_passed_2 = (
RepTests.multi_max, ('--par=1', '--par=2', '--par=3', '--par=4'),
parameters.TooManyValues)
def test_message(self):
sig_str, annotation, str_rep = RepTests.multi_bound
sig = support.s(sig_str, locals={'a': annotation})
csig = parser.CliSignature.from_signature(sig)
try:
csig.read_arguments(('--par=1',))
except parameters.NotEnoughValues as e:
self.assertEqual(e.message, "Received too few values for --par")
try:
csig.read_arguments(('--par=1', '--par=2', '--par=3', '--par=4'))
except parameters.TooManyValues as e:
self.assertEqual(e.message, "Received too many values for --par")
|
##################################################
# Import modules
import random
import torch.optim.lr_scheduler
import matplotlib.pyplot as plt
import data
import metrics
import seq2seq
import train
##################################################
# Functions
def print_raw_sentences(name_list, raw_sentences_list):
for i in range(len(raw_sentences_list[0])):
for j in range(len(raw_sentences_list)):
print('{} -> {}'.format(name_list[j], ' '.join(raw_sentences_list[j][i])))
print()
def plot_loss(loss_list, loss_val_list):
plt.plot(range(len(loss_list)), loss_list, label='training')
plt.plot(range(len(loss_list)), loss_val_list, label='validating')
plt.legend()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
##################################################
# Prepare data
en_lang, zh_lang = data.load_parallel_en_zh()
n_train = int(en_lang.n_sentences * 0.95)
n_val = en_lang.n_sentences - n_train
index = list(range(en_lang.n_sentences))
r = random.Random(0)
r.shuffle(index)
index_train = index[:n_train]
source_sentences = en_lang.to_sentences([en_lang.raw_sentences[i] for i in index_train], eos=False)
target_sentences = zh_lang.to_sentences([zh_lang.raw_sentences[i] for i in index_train], eos=True)
index_val = index[n_train:][:n_val]
source_sentences_val = en_lang.to_sentences([en_lang.raw_sentences[i] for i in index_val], eos=False)
target_sentences_val = zh_lang.to_sentences([zh_lang.raw_sentences[i] for i in index_val], eos=True)
# View data
print('{} training samples, {} validating samples'.format(len(source_sentences), len(source_sentences_val)))
print()
print_raw_sentences(
['en', 'zh'],
[en_lang.to_raw_sentences(source_sentences[:5]),
zh_lang.to_raw_sentences(target_sentences[:5])])
##################################################
# Overfit on small dataset
n_small = 100
model_s = seq2seq.Seq2seq('gru', 'bilinear', 1, en_lang.n_words, 100, 200, zh_lang.n_words, 100, 300)
optimizer_s = torch.optim.Adam(model_s.parameters(), lr=0.01)
scheduler_s = torch.optim.lr_scheduler.ExponentialLR(optimizer_s, 1.0)
# Train
loss_list_s, loss_val_list_s = train.train(
model_s, optimizer_s, scheduler_s,
source_sentences[:n_small], target_sentences[:n_small],
source_sentences_val[:n_small], target_sentences_val[:n_small],
teacher_force_rate=1.0, n_epochs=40, batch_size=500)
# Loss curve
plot_loss(loss_list_s, loss_val_list_s)
# Predict
target_sentences_g = train.predict_greedy(
model_s, source_sentences[:n_small], target_max_length=10, batch_size=500)
# Show
print_raw_sentences(
['en', 'zh', 'greedy'],
[en_lang.to_raw_sentences(source_sentences[:10]),
zh_lang.to_raw_sentences(target_sentences[:10]),
zh_lang.to_raw_sentences(target_sentences_g[:10])])
##################################################
# Run on full dataset
model = seq2seq.Seq2seq('gru', 'bilinear', 2, en_lang.n_words, 200, 400, zh_lang.n_words, 200, 600)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, 0.9)
# Train
loss_list, loss_val_list = train.train(
model, optimizer, scheduler,
source_sentences, target_sentences,
source_sentences_val, target_sentences_val,
teacher_force_rate=1.0, n_epochs=40, batch_size=500)
# Loss curve
plot_loss(loss_list, loss_val_list)
# Predict
target_sentences_val_g = train.predict_greedy(
model, source_sentences_val, target_max_length=10, batch_size=500)
target_sentences_val_b = train.predict_beam(
model, source_sentences_val[:1000], target_max_length=10, beam_size=10)
# Show
print_raw_sentences(
['en', 'zh', 'greedy', 'beam'],
[en_lang.to_raw_sentences(source_sentences_val[:20]),
zh_lang.to_raw_sentences(target_sentences_val[:20]),
zh_lang.to_raw_sentences(target_sentences_val_g[:20]),
zh_lang.to_raw_sentences(target_sentences_val_b[:20])])
# BLEU score
raw_target_sentences_val = zh_lang.to_raw_sentences(target_sentences_val)
raw_target_sentences_val_g = zh_lang.to_raw_sentences(target_sentences_val_g)
raw_target_sentences_val_b = zh_lang.to_raw_sentences(target_sentences_val_b)
bleu_g = metrics.bleu(raw_target_sentences_val_g, raw_target_sentences_val)
bleu_b = metrics.bleu(raw_target_sentences_val_b, raw_target_sentences_val)
print('bleu score (greedy): {}, bleu score (beam): {}'.format(bleu_g, bleu_b))
print()
# Attention
##################################################
|
import random
from CybORG.Shared.Actions import DiscoverRemoteSystems, DiscoverNetworkServices, ExploitRemoteService, PrivilegeEscalate, Impact
class HeuristicRed():
def __init__(self, session=0):
self.parameters = {
'session':session,
'agent':'Red',
}
self.killchain = [DiscoverNetworkServices,ExploitRemoteService,
PrivilegeEscalate,Impact]
self.last_action = None
self.history = []
self.active_ip = None
self.known_subnets = set()
self.unexplored_subnets = set()
self.ip_map = {}
self.ip_status = {}
def get_action(self,obs):
success = obs['success']
if success == False:
# Needs to be failure first because unknown (initial obs) counts as true
self._process_last_action_failure()
else:
self._process_last_action_success() if self.last_action else None
self._process_new_ips(obs)
action = self._advance_killchain()
return action
def _process_last_action_success(self):
action = self.last_action
name = self.last_action.__class__.__name__
if name == 'DiscoverRemoteSystems':
subnet = action.subnet
self.unexplored_subnets.remove(subnet)
elif name in ('DiscoverNetworkServices','ExploitRemoteService'):
# Advance killchain
ip = action.ip_address
self.ip_status[ip] += 1
else:
# Get ip from hostname and advance killchain
ip = self._get_ip(action.hostname)
self.ip_status[ip] += 1 if self.ip_status[ip] < 3 else 0
def _process_last_action_failure(self):
action = self.last_action
name = self.last_action.__class__.__name__
if name in ('PrivilegeEscalate','Impact'):
ip = self._get_ip(action.hostname)
self.ip_status[ip] = 1
elif name == 'ExploitRemoteService':
# Assuming host is Defender
self.ip_status[action.ip_address] = 3
else:
raise NotImplementedError('Scans are not supposed to fail.')
def _process_new_ips(self,obs):
for hostid in obs:
if hostid == 'success':
continue
host = obs[hostid]
for interface in host.get('Interface',[]):
subnet = interface.get('Subnet')
if (subnet not in self.known_subnets) and (subnet is not None):
self.known_subnets.add(subnet)
self.unexplored_subnets.add(subnet)
ip = interface.get('IP Address')
assert ip is not None
if ip not in self.ip_status:
self.ip_status[ip] = 0
sysinfo = host.get('System info')
hostname = sysinfo.get('Hostname') if sysinfo else None
if ip not in self.ip_map:
self.ip_map[ip] = hostname
elif self.ip_map[ip] is None:
self.ip_map[ip] = hostname
def _advance_killchain(self):
if self.unexplored_subnets:
subnet = random.choice(list(self.unexplored_subnets))
action = DiscoverRemoteSystems(subnet=subnet,**self.parameters)
else:
ip = self._choose_ip()
action = self._choose_exploit(ip)
self.last_action = action
self.history.append(action)
return action
def _choose_ip(self):
if self.active_ip is None:
self.active_ip = random.choice(list(self.ip_status.keys()))
ip = self.active_ip
status = self.ip_status[ip]
if (status < 3) or (self.ip_map[ip] == 'Op_Server0'):
pass
else:
valid_ips = [ip for ip in self.ip_status if self.ip_status[ip] < 3]
ip = self.active_ip = random.choice(valid_ips) if valid_ips else None
self.active_ip = ip
assert ip in self.ip_status
return ip
def _choose_exploit(self,ip):
status = self.ip_status[ip]
command = self.killchain[status]
if status < 2:
action = command(ip_address=ip,**self.parameters)
else:
hostname = self.ip_map[ip]
action = command(hostname=hostname,**self.parameters)
return action
def _get_ip(self,hostname):
for ip in self.ip_map:
if self.ip_map[ip] == hostname:
break
else:
raise NotImplementedError('Hostname missing from ip_map')
return ip
|
"""Testing the diverses algorithms to shuffler."""
from collections import defaultdict
from random import randrange
def test_shuffler(shuffler, deck='abcd', n=10000):
counts = defaultdict(int)
for _ in xrange(n):
input = list(deck)
shuffler(input)
counts["".join(input)] += 1
e = n * 1. / factorial(len(deck)) # expected value
ok = all((0.9 <= counts[item] / e <= 1.1) for item in counts)
name = shuffler.__name__
print("{0}({1}) {2}").format(name, deck, ("ok" if ok else "**** BAD ****"))
print " ",
for item, count in sorted(counts.items()):
print("{0}:{1:4.1f}").format(item, count * 100. / n)
print
def factorial(n): return 1 if (n <= 1) else n * factorial(n - 1)
def shuffle(deck):
"Knuth's algorithm P"
N = len(deck)
for i in xrange(N-1):
swap(deck, i, randrange(i, N))
def shuffle1(deck):
"Teacher's of Peter algorithm"
N = len(deck)
swapped = [False] * N
while not all(swapped):
i, j = randrange(N), randrange(N)
swapped[i] = swapped[j] = True
swap(deck, i, j)
def shuffle2(deck):
"modifified version of shuffle 1"
N = len(deck)
swapped = [False] * N
while not all(swapped):
i, j = randrange(N), randrange(N)
swapped[i] = True
swap(deck, i, j)
def shuffle3(deck):
"Another modification in shuffle 1"
N = len(deck)
for i in xrange(N):
swap(deck, i, randrange(N))
def swap(deck, i, j):
"Swap elements i and j of a collection"
deck[i], deck[j] = deck[j], deck[i]
def test_shufflers(shufflers=[shuffle, shuffle1], decks=['abc', 'ab', 'abcd']):
"compare the shufflers"
for deck in decks:
print
for f in shufflers:
test_shuffler(f, deck)
def main():
test_shufflers([shuffle, shuffle1, shuffle2, shuffle3])
if __name__ == "__main__":
main()
|
import pandas as pd
import numpy as np
from sklearn.metrics import precision_score
from BAYES import GAUSSIAN,N_GAUSSIAN
import matplotlib.pyplot as plt
from MySelect import SelectByChi2
n=3000
p=0.7
R=np.zeros(20)
for i in range(100):
read_data = pd.read_csv('E:/PY/voice/voice.csv')
data = read_data.sample(n)
train_data = data.sample(frac=p)
test_data = data.drop([d for d in train_data.index])
train_label = train_data['label'].values
train_X = train_data.drop(['label'], axis=1).values
feature_name=data.drop(['label'],axis=1).columns.values
test_label = test_data['label'].values
test_X = test_data.drop(['label'], axis=1).values
for k in range(1,21):
select_train_X, select_test_X, select_feature=SelectByChi2(train_X,train_label,test_X,feature_name,k)
male_train=select_train_X[train_label=='male']#统计量计算
male_count=male_train.shape[1]
male_mean=male_train.mean(axis=0)
male_var=male_train.var(axis=0)
male_cov=np.cov(male_train,rowvar=False)
female_train=select_train_X[train_label=='female']
female_count=female_train.shape[1]
female_mean=female_train.mean(axis=0)
female_var=female_train.var(axis=0)
female_cov=np.cov(female_train,rowvar=False)
male_prior=male_count/select_train_X.shape[1]
female_prior=female_count/select_train_X.shape[1]
predict=GAUSSIAN(male_mean,male_var,male_prior,female_mean,female_var,female_prior,select_test_X)
test_y=np.zeros(len(test_label))
test_y[(test_label=='male')]=0
test_y[(test_label=='female')]=1
y_label=['male','female']
R[k-1]+=precision_score(test_y,predict,average='weighted')
print(i)
R=R/100
print(R)
ax1 = plt.subplot(1,1,1,facecolor='white')
plt.rcParams['font.sans-serif']=['SimHei']
x_index = np.arange(20)+1
bar_width=0.35
rects1 = plt.bar(x_index, R, width=bar_width,alpha=0.4, color='r')
plt.xticks(x_index, np.arange(1,21,1))
plt.tight_layout()
ax1.set_title('声音预测准确率与特征选择特征数关系')
ax1.set_xlabel('特征选择特征数')
ax1.set_ylabel('准确率')
ax1.set_xlim(0,22)
ax1.set_ylim(0.5,1)
plt.show() |
def main():
''' Evaluate the Laplace equation finite sum solution '''
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
n_tot = 51 # Number of terms to use in sum
n = np.arange(1, n_tot+1, 2) # Odd-terms only
N = 50 # Grid size
L = 1. # System length
h = L / (N-1) # Grid spacing
phi0 = 1.
x = np.arange(N)*h
y = np.arange(N)*h
phi = np.empty((N, N))
phi_n = np.empty((len(n), N, N))
## MAIN LOOP ##
for idx, n_ in enumerate(n):
for i in range(N):
for j in range(N):
phi[i ,j] = phi0 * 4./(np.pi*n_) * np.sin(n_*np.pi*x[i]/L) * \
(np.sinh(n_*np.pi*y[j]/L)/np.sinh(n_*np.pi))
# Store each result along axis 0 in the rank-3 tensor phi_n
phi_n[idx, :, :] = np.copy(phi)
phi_tot = np.sum(phi_n, axis = 0)
levels = np.linspace(0, 1, 11)
xx, yy = np.meshgrid(x, y)
# In meshgrid, xx is the columns and yy are the rows, which is opposite of phi_tot
ct = plt.contour(x, y, np.flipud(np.rot90(phi_tot)), levels)
plt.clabel(ct, fmt='%1.2f')
plt.xlabel('x')
plt.ylabel('y')
plt.title(f'Potential of finite sum with n = {n_tot}')
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(yy, xx, phi_tot, rstride=1, cstride=1, cmap=cm.hot)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel(r'$\Phi(x, y)$')
plt.show()
if __name__ == '__main__':
main() |
from j.osh import *
import pandas as pd
from PIL import Image
from random import shuffle
from random import randint
import numpy
import uuid
def load_csv():
"""
Loads the CSV file for reading coordinates.
"""
return pd.read_csv('coords.csv')
def load_img(filename):
"""
Loads an Image with PIL for a given filename.
"""
img = Image.open(filename)
return img
def crop_img(img, x1, x2, y1, y2, z):
"""
Crops an image with given coordinates.
"""
img.seek(z)
img = img.crop((x1, y1, x2, y2))
return img
def n_frames(img):
"""
Returns the number of frames in an image stack.
"""
img.seek(0)
i = 1
while img:
try:
img.seek(i)
except:
break
i += 1
return i
def save_image(image):
"""
Will save an image to file, then return its filename.
"""
# generate unique token
unq = 'cropped/' + str(uuid.uuid4().hex[:6].upper()) + '.jpg'
# ensure cropped exists
ensure_dir("cropped/")
image.mode = 'I'
image.point(lambda i:i*(1./256)).convert('L').save(unq)
return unq
def add_crop_to_row(cropped_img, malignant, x1, x2, y1, y2, z):
"""
Takes an image and its values and formats it into a row in an array.
"""
unq = save_image(cropped_img)
return [{'image':unq, 'malignant':malignant,
'x1':x1, 'x2':x2, 'y1':y1, 'y2':y2, 'z':z,
'image_size':cropped_img.size}]
def crop_malignant(img, x1, x2, y1, y2, z1, z2):
"""
Will crop out all the malignant images and return them in an array.
"""
output = []
# loop through "z" slices
z_range = range(int(z1), int(z2)+1)
for z in z_range:
# crop image
cropped_img = crop_img(img, x1, x2, y1, y2, z)
# add to dataframe
output += add_crop_to_row(cropped_img, 1, x1, x2, y1, y2, z)
return output
def random_z(img):
"""
Return a random z coordinate
"""
return randint(0, n_frames(img)-1)
def gen_crop_coords(img, x1, x2, y1, y2, z, minx=50, miny=50, maxx=200, maxy=200):
"""
Will offset a given images x and y coordinates to give a random crop.
"""
# get original crop size
orig_crop_size = (abs(x1-x2), abs(y1-y2))
# calculate a new crop size that is either big or small depending on size of orig crop
div_const = 5
new_crop_size = ((img.size[0] - orig_crop_size[0])/div_const, (img.size[1] - orig_crop_size[1])/div_const)
# initialize to zero
X1, X2, Y1, Y2 = 0, 0, 0, 0
padding = 8
x_bound = img.size[0]/2
if x1 > x_bound and x2 > x_bound:
# generate to left
X1 = randint(padding, int(img.size[0]-new_crop_size[0]-max(x1, x2))-padding)
X2 = X1 + new_crop_size[0]
else:
# generate to right
X1 = randint(int(max(x1, x2))+padding, int(img.size[0]-new_crop_size[0])-padding)
X2 = X1 + new_crop_size[0]
y_bound = img.size[1]/2
if y1 > y_bound and y2 > y_bound:
# generate below
Y1 = randint(padding, int(img.size[1]-new_crop_size[1]-max(y1, y2))-padding)
Y2 = Y1 + new_crop_size[1]
else:
# generate above
Y1 = randint(int(max(y1, y2))+padding, int(img.size[1]-new_crop_size[1])-padding)
Y2 = Y1 + new_crop_size[1]
return X1, X2, Y1, Y2
def crop_benign(img, x1, x2, y1, y2, z1, z2):
"""
Will crop out random benign images - need to edit this!!!!
"""
output = []
# we want to crop three images
for i in range(0, 3):
# generate random z
z = random_z(img)
# get coordinates
X1, X2, Y1, Y2 = gen_crop_coords(img, x1, x2, y1, y2, z)
# crop image
cropped_img = crop_img(img, X1, X2, Y1, Y2, z)
# add to dataframe
output += add_crop_to_row(cropped_img, 0, X1, X2, Y1, Y2, z)
return output
if __name__ == '__main__':
# load data frame
df = load_csv()
# output
output = []
# Loop over rows in data frame
for index, row in df.iterrows():
# extract data
img_name, x1, x2, y1, y2, z1, z2 = row["Patient"], row["x1"], row["x2"], row["y1"], row["y2"], row["z1"], row["z2"]
img = load_img("raw/"+img_name+"_raw.tif")
# crop malignant
output += crop_malignant(img, x1, x2, y1, y2, z1, z2)
# crop benign
output += crop_benign(img, x1, x2, y1, y2, z1, z2)
# create dataframe
result = pd.DataFrame(output)
# save dataframe
result.to_csv("dataset.csv")
print("script done.") |
import DataBaseHandler
import ModelHandler
import time
from sklearn.linear_model import LinearRegression
model = LinearRegression()
sys_samples = DataBaseHandler.get_samples(type="systolicBloodPressure")
x_train, y_train = ModelHandler.get_samples_to_nparray(sys_samples)
ModelHandler.train_model(model, x_train, y_train)
while True:
new_systolic_samples = ModelHandler.get_new_systolic_samples_from_API()
if new_systolic_samples is not None:
x_tune, y_tune = ModelHandler.get_samples_to_nparray(new_systolic_samples)
ModelHandler.tune_model(model, x_tune, y_tune)
ModelHandler.save_as_onnx(model, "MyModel.onnx")
ModelHandler.save_model_as_pickle(model, "MyModel.pkl")
time.sleep(60*60) # One hour.
|
#!/usr/bin/env python2
# coding=utf-8
from __future__ import print_function
import json
import requests
from config import global_config
from bddown_core import Pan, GetFilenameError
from util import logger
def export(links):
for link in links:
pan = Pan(link)
count = 1
while count != 0:
link, filename, count = pan.info
if not filename and not link:
raise GetFilenameError("无法获取下载地址或文件名!")
export_single(filename, link)
def export_single(filename, link):
jsonrpc_path = global_config.jsonrpc
jsonrpc_user = global_config.jsonrpc_user
jsonrpc_pass = global_config.jsonrpc_pass
if not jsonrpc_path:
print("请设置config.ini中的jsonrpc选项")
exit(1)
jsonreq = json.dumps(
[{
"jsonrpc": "2.0",
"method": "aria2.addUri",
"id": "qwer",
"params": [
[link],
{
"out": filename,
"header": "User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/25.0"
"\r\nReferer:http://pan.baidu.com/disk/home"
}]
}]
)
try:
if jsonrpc_user and jsonrpc_pass:
response = requests.post(url=jsonrpc_path, data=jsonreq, auth=(jsonrpc_user, jsonrpc_pass))
else:
response = requests.post(url=jsonrpc_path, data=jsonreq)
logger.debug(response.text, extra={"type": "jsonreq", "method": "POST"})
except requests.ConnectionError as urle:
print(urle)
raise JsonrpcError("jsonrpc无法连接,请检查jsonrpc地址是否有误!")
if response.ok:
print("已成功添加到jsonrpc\n")
class JsonrpcError(Exception):
pass
|
import unittest
from simplecache import SimpleCache
from unittest import TestCase
class BasicTests(TestCase):
def test_basic_functionality(self):
sc = SimpleCache(max_items=3)
sc[1] = 'a'
sc[2] = 'b'
sc[3] = 'c'
self.assertEqual(sc[1], 'a')
self.assertEqual(sc[2], 'b')
self.assertEqual(sc[3], 'c')
self.assertEqual(len(sc), 3)
sc[4] = 'd'
self.assertEqual(sc[4], 'd')
self.assertEqual(len(sc), 3)
self.assertFalse(1 in sc)
if __name__ == '__main__':
unittest.main() |
# coding=utf-8
'''
Basic Network Library.
'''
import logging
import os.path
import sys
import urllib
import urllib2
import StringIO
import gzip
logger = logging.getLogger('listenone.' + __name__)
########################################
# network
########################################
def chunk_report(bytes_so_far, chunk_size, total_size):
percent = float(bytes_so_far) / total_size
percent = round(percent * 100, 2)
sys.stdout.write(
"Downloaded %d of %d bytes (%0.2f%%)\r" %
(bytes_so_far, total_size, percent))
if bytes_so_far >= total_size:
sys.stdout.write('\n')
def chunk_read(response, chunk_size=8192, report_hook=None):
total_size = response.info().getheader('Content-Length').strip()
total_size = int(total_size)
bytes_so_far = 0
total = ''
while 1:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if not chunk:
break
total += chunk
if report_hook:
report_hook(bytes_so_far, chunk_size, total_size)
return total
def h(
url, v=None, progress=False, extra_headers={},
post_handler=None, return_post=False):
'''
base http request
progress: show progress information
need_auth: need douban account login
'''
logger.debug('fetching url:' + url)
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_1) ' + \
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.86' + \
' Safari/537.36'
headers = {'User-Agent': user_agent}
headers.update(extra_headers)
data = urllib.urlencode(v) if v else None
req = urllib2.Request(url, data, headers)
response = urllib2.urlopen(req)
if progress:
result = chunk_read(response, report_hook=chunk_report)
else:
result = response.read()
if response.info().get('Content-Encoding') == 'gzip':
buf = StringIO.StringIO(result)
f = gzip.GzipFile(fileobj=buf)
result = f.read()
if post_handler:
post_result = post_handler(response, result)
if return_post:
return post_result
return result
def w(url, path, overwrite=False):
'''
write file from url to path
use_cache: use file if already exists
'''
if os.path.isfile(path) and not overwrite:
return
c = h(url, progress=True)
with open(path, 'wb') as f:
f.write(c)
|
# This is a sample Python script.
# Press ⌃R to execute it or replace it with your code.
# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.
from mechdatas import resources, MENU, coins, commands
alive = True
credit = 0.0
def print_report():
print(f"Water: {resources['water']}ml\n"
f"Milk: {resources['milk']}ml\n"
f"Coffee: {resources['coffee']}g\n"
f"Money: ${resources['money']}")
def operation(name):
global credit
drink = MENU[name]
print("Please insert coins.")
new_credit = get_credit()
credit += new_credit
if credit < drink['cost']:
credit -= new_credit
print("Sorry that's not enough money. Money refunded.")
else:
resources['money'] += drink['cost']
credit -= drink['cost']
credit = round(credit, 2)
print(f"Here is {credit} in change.")
ingredient = drink['ingredients']
for i in ingredient:
resources[i] -= ingredient[i]
print(f"Here is your {name} ☕. Enjoy!")
def get_credit():
total = 0.0
for coin in coins:
value = input(f"how many {coin}?:")
while not value.isdigit():
value = input(f"how many {coin}?:")
total += float(value) * coins[coin]
return total
def check_ressources(drink):
ingredient = drink['ingredients']
filled = True
for i in ingredient:
if ingredient[i] > resources[i]:
print(f"Sorry there is not enough {i}.")
filled = False
return filled
def turn_off():
global alive
alive = False
while alive:
cmd = input("What would you like? (espresso/latte/cappuccino): ")
while cmd not in commands:
cmd = input("What would you like? (espresso/latte/cappuccino): ")
if cmd == "off":
turn_off()
elif cmd == "report":
print_report()
elif cmd == "credit":
print(credit)
elif cmd == "latte" or cmd == "espresso" or cmd == "cappuccino":
if check_ressources(MENU[cmd]):
operation(cmd)
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'androidforensics.views.home', name='home'),
#url(r'^apk/', include('apk.urls')),
url(r'^$', 'apk.views.index', name='index'),
url(r'^apk/(?P<apk_id>\d+)/classes_menu.json$', 'apk.views.classes_menu_json', name='classes_menu_json'),
url(r'^apk/(?P<apk_id>\d+)/source.zip$', 'apk.views.classes_zip', name='classes_zip'),
url(r'^apk/menu.json$', 'apk.views.menu_json', name='menu_json'),
url(r'^$', 'apk.views.appindex', name='appindex'),
url(r'^showclass/(?P<class_id>\d+)$', 'apk.views.showclass', name='showclass'),
url(r'^showapk/(?P<apk_id>\d+)$', 'apk.views.showapk', name='showapk'),
url(r'^dissect/(?P<apk_id>\d+)$', 'apk.views.dissect', name='dissect'),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
|
"""
利用Grabcut图像分割进行背景替换
"""
import cv2 as cv
import numpy as np
src = cv.imread("../images/master.jpg")
h, w = src.shape[:2]
background = cv.imread("images/land.jpg")
background = cv.resize(background, (w, h))
cv.imshow("input", src)
cv.imshow("background", background)
# 分割,得到mask区域
h, w, ch = src.shape
mask = np.zeros(src.shape[:2], dtype=np.uint8)
rect = (53,12,w-100,h-12)
bgdmodel = np.zeros((1,65),np.float64)
fgdmodel = np.zeros((1,65),np.float64)
cv.grabCut(src,mask,rect,bgdmodel,fgdmodel,5,mode=cv.GC_INIT_WITH_RECT)
mask2 = np.where((mask==1) + (mask==3), 255, 0).astype('uint8')
# 高斯模糊
se = cv.getStructuringElement(cv.MORPH_RECT, (3, 3))
cv.dilate(mask2, se, mask2)
mask2 = cv.GaussianBlur(mask2, (5, 5), 0)
cv.imshow('background-mask',mask2)
# 虚化背景
background = cv.GaussianBlur(background, (0, 0), 15)
# 混合图像
result = np.zeros((h, w, ch), dtype=np.uint8)
for row in range(h):
for col in range(w):
w1 = mask2[row, col] / 255.0
b, g, r = src[row, col]
b1,g1,r1 = background[row, col]
b = (1.0-w1) * b1 + b * w1
g = (1.0-w1) * g1 + g * w1
r = (1.0-w1) * r1 + r * w1
result[row, col] = (b, g, r)
cv.imshow("result", result)
cv.waitKey(0)
cv.destroyAllWindows() |
#!/usr/bin/env python3
from collections import namedtuple
Instruction = namedtuple("Instruction", ["opcode", "value"])
OPCODES = {
"acc": lambda pc, acc, value: (pc + 1, acc + value),
"jmp": lambda pc, acc, value: (pc + value, acc),
"nop": lambda pc, acc, value: (pc + 1, acc)
}
def parse_instructions(raw):
instructions = []
for line in raw:
opcode, value = line.split(" ")
instructions.append(Instruction(opcode, int(value)))
return instructions
def run_program(instructions, halt_on_loop=False):
pc = 0
acc = 0
seen = set()
while pc < len(instructions):
instruction = instructions[pc]
opcode = instruction.opcode
value = instruction.value
if pc in seen and halt_on_loop:
break
seen.add(pc)
pc, acc = OPCODES[opcode](pc, acc, value)
return acc, pc
|
import PIL
from PIL import Image
import os
mywidth = 2000
source_dir ='C:/Users/admin/Desktop/image'
destination_dir ='C:/Users/admin/Desktop/image1'
def resize_pic(old_pic,new_pic):
img=Image.open(old_pic)
wpercent = (mywidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img=img.resize((mywidth,hsize),PIL.Image.ANTIALIAS)
img.save(new_pic)
def entire_directory(source_dir,desti_dir):
files=os.listdir(source_dir)
i=0
for file in files:
i+=1
old_pic=source_dir + "/" + file
new_pic=desti_dir + "/" +file
resize_pic(old_pic,new_pic)
print(i,"done")
entire_directory(source_dir,destination_dir) |
import ctypes
import unittest
lib = ctypes.CDLL(".libs/topology_cyclecloud.so")
class Test(unittest.TestCase):
def test_basic(self):
with open("test.csv", "w") as fw:
fw.write("execute,pg1,ip-0A000000\r\n")
fw.write("execute,pg0,ip-0A000001\n")
fw.write("execute,pg0,ip-0A010004")
def _topo_get_node_addr(nodename, nodearray, placementgroup, hostname):
paddr = (ctypes.c_char_p * 1)("\0" * 512)
ppath = (ctypes.c_char_p * 1)("\0" * 512)
ret = lib.topo_get_node_addr(nodename, paddr, ppath)
self.assertEquals(ret, 0)
self.assertEquals(paddr[0], '%s.%s.%s' % (nodearray, placementgroup, hostname))
self.assertEquals(ppath[0], 'switch.switch.node')
_topo_get_node_addr("ip-0A000001", "execute", "pg0", "ip-0A000001")
_topo_get_node_addr("ip-0A000000", "execute", "pg1", "ip-0A000000")
_topo_get_node_addr("ip-0A010004", "execute", "pg0", "ip-0A010004")
if __name__ == "__main__":
unittest.main() |
class Theater:
"""Holds all the information about a specific theater."""
def __init__(self, name):
self.name = name
self.movietimes = [] # <-- Now this is MovieTime objects
class Movie:
"""Holds all the information about a specific movie."""
def __init__(self, name, duration, genre):
self.name = name
self.duration = duration
self.genre = genre
class MovieTime:
"""Holds a movie and the times it is playing"""
def __init__(self, movie, times):
self.movie = movie
self.times = times
movies = [
Movie("Star Wars", 125, "scifi"),
Movie("Shaun of the Dead", 100, "romzomcom"),
Movie("Citizen Kane", 119, "drama")
]
theaters = [
Theater("McMenamin's Old St. Francis Theater"),
Theater("Tin Pan Theater"),
Theater("Tower Theater")
]
# McMenamin's is showing Star Wars and Shaun of the Dead
theaters[0].movietimes.append(MovieTime(movies[0], ["7pm", "9pm", "10pm"]))
theaters[0].movietimes.append(MovieTime(movies[1], ["5pm", "8pm"]))
# Tin Pan is showing Shaun of the Dead and Fastest Indian
theaters[1].movietimes.append(MovieTime(movies[1], ["2pm", "5pm"]))
theaters[1].movietimes.append(MovieTime(movies[2], ["6pm", "8pm", "10pm"]))
# Tower is showing all three
theaters[2].movietimes.append(MovieTime(movies[0], ["3pm"]))
theaters[2].movietimes.append(MovieTime(movies[1], ["5pm", "7pm"]))
theaters[2].movietimes.append(MovieTime(movies[2], ["6pm", "7pm", "8pm"]))
def print_theater(theater):
"""Print all the information about a theater."""
print(f'{theater.name} is showing:')
for mt in theater.movietimes:
m = mt.movie
t = " ".join(mt.times) # Make string of times separated by spaces
print(f' {m.name} ({m.genre}, {m.duration} minutes): {t}')
# Main code
for t in theaters:
print_theater(t) |
"""
============================
Author:柠檬班-木森
Time:2020/3/4 15:13
E-mail:3247119728@qq.com
Company:湖南零檬信息技术有限公司
============================
"""
# 作业参考答案
"""
1、完成上课手机类继承的代码
2、有一组数据,如下格式:
{'case_id': 1, 'method': 'post', 'url': '/member/login', 'data': '123', 'actual': '不通过','excepted': '通过'},
定义一个如下的类,请通过setattr将上面字典中的键值对,
分别设置为类的属性和属性值,键作为属性名,对应的值作为属性值
"""
# -----------------------------第一题:------------------------------------------------
class PhoneV1(object):
def phone(self):
print("打电话的功能")
# 需求二:V2:功能机
class PhoneV2(PhoneV1):
def music(self):
print("听音乐的功能")
def send_msg(self):
print("发送信息的功能")
class PhoneV3(PhoneV2):
def pay(self):
print("支付功能")
def game(self):
print("玩游戏的功能")
# -----------------------------第二题:------------------------------------------------
datas = {'case_id': 1, 'method': 'post', 'url': '/member/login', 'data': '123', 'actual': '不通过', 'excepted': '通过'}
class CaseData:
pass
for key, value in datas.items():
setattr(CaseData, key, value)
#
# print(CaseData.case_id)
# print(CaseData.method)
# print(CaseData.url)
# print(CaseData.data) |
import json
from predict import get_result, get_model
from flask import request, Response
class Server:
model = None
def set_model(self):
self.model = get_model()
def server_running(self):
return 'Server is running...'
def predict(self):
incoming = request.get_json()
filename = incoming['filename']
print (filename)
prediction = get_result(filename, self.model)
print (prediction)
resp = Response(prediction)
return resp
|
from song_etl import song_etl
from log_etl import log_etl
def main():
song_etl()
log_etl()
if __name__ == '__main__':
main()
|
# Generated by Django 3.0.3 on 2020-03-13 07:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('eadmin', '0003_auto_20200225_0400'),
]
operations = [
migrations.AlterField(
model_name='user',
name='role',
field=models.CharField(choices=[('admin', 'Admin'), ('shop', 'Shop'), ('customer', 'Customer')], default='shop', max_length=10, verbose_name='User Role'),
),
]
|
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from django.contrib.auth.models import User, Group
from .models import Project
from .models import Document
from .models import Sheet
from .models import Style
from .models import Task
from .models import Member
from .models import Setting
# Create your views here.
def hello(request):
return HttpResponse("Hello world!")
def users(request):
users = User.objects.all()
return JsonResponse(list(users.values()), safe=False)
def projects(request):
user_id = request.GET.get('id')
projects = Project.objects.filter(user_id=user_id)
return JsonResponse(list(projects.values()), safe=False)
def documents(request):
project_id = request.GET.get('id')
documents = Document.objects.filter(project_id=project_id)
return JsonResponse(list(documents.values()), safe=False)
def sheets(request):
document_id = request.GET.get('id')
sheets = Sheet.objects.filter(document_id=document_id)
return JsonResponse(list(sheets.values()), safe=False)
def styles(request):
sheet_id = request.GET.get('id')
styles = Style.objects.filter(sheet_id=sheet_id)
return JsonResponse(list(styles.values()), safe=False)
def tasks(request):
sheet_id = request.GET.get('id')
tasks = Task.objects.filter(sheet_id=sheet_id)
return JsonResponse(list(tasks.values()), safe=False)
def members(request):
project_id = request.GET.get('id')
members = Member.objects.filter(project_id=project_id)
return JsonResponse(list(members.values()), safe=False) |
#!/usr/bin/env python
import sys
import base64
import time
import hashlib
import binascii
import re
class Authenticator(object):
"""Authenticator class which generates unique one time use password."""
def __init__(self, secret: str):
"""Creates a new Authenticator instance.
Args:
secret (str):
User secret which is used in generating one
time password.
Returns:
Authenticator instance.
"""
self._secret = secret
self.__check_secret(secret)
def __is_ascii(self, secret: str):
return all(ord(c) < 128 for c in secret)
def __is_alpha(self, secret: str):
return all(c.isalpha() for c in secret)
def __check_secret(self, secret: str):
if isinstance(secret, str) == False:
raise TypeError("You must set a str variable as secret!")
if self.__is_ascii(secret) == False:
raise TypeError("You must set an ascii str variable as secret!")
secret_without_spaces = self.remove_spaces(secret)
self._secret = self.to_upper_case(secret_without_spaces)
secret_length = len(self._secret)
if secret_length < 8:
raise ValueError("You must set a secret of minimum 8 characters!")
if secret_length > 8:
index = secret_length % 8
self._secret = self._secret[:-index]
if self.__is_alpha(self._secret) == False:
raise TypeError("All characters in the secret must be alphabetic!")
def remove_spaces(self, secret: str) -> str:
"""Removes empty spaces from given string.
Args:
secret (str):
User secret which is used in generating one
time password.
Returns:
String without empty spaces.
"""
secret_without_spaces = secret.replace(" ", "")
secret_without_spaces = re.sub(r"\W", "", secret_without_spaces)
return secret_without_spaces
def to_upper_case(self, secret_without_spaces: str) -> str:
"""Updates given string to uppercase without changing.
Args:
secret_without_spaces (str):
User secret which is used in generating one
time password.
Returns:
String in uppercase.
"""
return secret_without_spaces.upper()
def decode_with_base32(self, upper_case_secret: str) -> bytes:
"""Creates a new Base32 decoded value from given string.
Args:
upper_case_secret (str):
User secret which is used in generating one
time password.
Returns:
Base32 decoded value.
"""
return base64.b32decode(upper_case_secret)
def current_timestamp(self) -> time:
"""Returns the current UNIX time."""
return time.time()
def create_hmac(self, secret: str, input: float) -> str:
"""Creates the hash value which is used in creating one time password.
Args:
secret (str):
User secret which is used in generating one
time password.
input (float):
The value of current UNIX time divided by 30.
Returns:
SHA1 hash value.
"""
input_str = repr(input).encode("ascii")
input_hash = hashlib.sha1(secret + input_str).hexdigest().encode("ascii")
return hashlib.sha1(secret + input_hash).hexdigest()
def one_time_password(self, delay_time: float = 30.0) -> str:
"""Creates one time password using secret which must be set in constructor.
Args:
delay_time (float):
Optional time interval for token availability.
Returns:
One time password as string.
"""
secret_without_spaces = self.remove_spaces(self._secret)
upper_case_secret = self.to_upper_case(secret_without_spaces)
secret = self.decode_with_base32(upper_case_secret)
input = self.current_timestamp() / delay_time
hmac = self.create_hmac(secret, input)
offset = ord(hmac[len(hmac) - 1]) & 0x0F
hex_four_characters = binascii.hexlify(hmac[offset : offset + 4].encode())
password = int(hex_four_characters, 32) % 1000000
return password
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import seaborn as sns; sns.set()
from sklearn.datasets.samples_generator import make_blobs
X,y = make_blobs(n_samples=100, centers =2, random_state=0, cluster_std=0.50)
plt.scatter(X[:,0], X[:,1], c=y, s=50, cmap='summer');
#plt.show()
xfit = np.linspace(-1,3,5)
plt.scatter(X[:,0], X[:,1], c=y, s=30, cmap='summer')
plt.plot([0.6],[2.1], 'x', color='black', markeredgewidth=4, markersize=12)
for m,b in [(1,0.65), (0.5,1.6), (-0.2,2.9)]:
plt.plot(xfit, m*xfit+b, '-k')
plt.xlim(-1,3,5)
#plt.show()
xfit = np.linspace(-1,3,5)
plt.scatter(X[:,0], X[:,1], c=y, s=30, cmap='summer')
plt.plot([0.6],[2.1], 'x', color='black', markeredgewidth=4, markersize=12)
for m,b,d in [(1, 0.65, 0.33), (0.5, 1.6, 0.55), (-0.2, 2.9, 0.2)]:
yfit= m*xfit+b
plt.plot(xfit, yfit, '-k')
plt.fill_between(xfit, yfit-d, yfit+d, edgecolor='none', color ='#AAAAAA', alpha=0.4)
plt.xlim(-1, 3.5)
plt.show() |
"""
Interactive simulation for Monte Hall problem
"""
import random
import simplegui
# global constants
CANVAS_WIDTH = 540
CANVAS_HEIGHT = 180
CONTROL_WIDTH = 100
CENTER_VERT = 0.3
CENTER_HORIZ = 0.8
MIN_DOORS = 3
MAX_DOORS = 10
SELECT = 0
CHOOSE = 1
SHOW = 2
class MontyHallGUI:
"""
Interactive demo for Monty Hall
"""
def __init__(self):
"""
Initialize the simulation
"""
self._frame = simplegui.create_frame("Monty Hall problem", CANVAS_WIDTH,
CANVAS_HEIGHT, CONTROL_WIDTH)
self._frame.set_canvas_background("White")
self._frame.add_button("Clear", self.clear, CONTROL_WIDTH)
self._frame.add_button("Add door", self.add_door, CONTROL_WIDTH)
self._frame.set_mouseclick_handler(self.click)
self._frame.add_label("")
self._win_label = self._frame.add_label("Wins = 0")
self._lose_label = self._frame.add_label("Loses = 0")
self.clear()
self._frame.set_draw_handler(self.draw)
self._frame.start()
def clear(self):
"""
Clear the simulatin
"""
self._game_state = SELECT
self._wins = 0
self._loses = 0
self._num_doors = MIN_DOORS
self._win_label.set_text("Wins = " + str(self._wins))
self._lose_label.set_text("Loses = " + str(self._loses))
self._prize_door = random.randrange(self._num_doors)
def click(self, pos):
"""
Convert a canvas click to a door number, reject invalid click when staying or switching
"""
door_width = CANVAS_WIDTH / self._num_doors
door_num = min(pos[0] / door_width, self._num_doors)
if self._game_state == SELECT:
self.process_door(door_num)
elif self._game_state == CHOOSE:
if door_num == self._selected_door or door_num == self._show_door:
self.process_door(door_num)
elif self._game_state == SHOW:
self.process_door(door_num)
def process_door(self, door_num):
"""
Process a valid door number based on state
"""
if self._game_state == SELECT:
self._game_state = CHOOSE
self._selected_door = door_num
if door_num == self._prize_door:
show_doors = range(self._num_doors)
show_doors.remove(door_num)
self._show_door = random.choice(show_doors)
else:
self._show_door = self._prize_door
elif self._game_state == CHOOSE:
if door_num == self._prize_door:
self._wins += 1
self._win_label.set_text("Wins = " + str(self._wins))
else:
self._loses += 1
self._lose_label.set_text("Loses = " + str(self._loses))
self._game_state = SHOW
elif self._game_state == SHOW:
self._prize_door = random.randrange(self._num_doors)
self._game_state = SELECT
def add_door(self):
"""
Add a door to the simulation
"""
self._num_doors = min(self._num_doors + 1, MAX_DOORS)
def draw_door(self, canvas, door_pos, color):
"""
Draw a single door with positin and color
"""
door_width = CANVAS_WIDTH / self._num_doors
canvas.draw_polygon([door_pos,
[door_pos[0] + door_width, door_pos[1]],
[door_pos[0] + door_width, door_pos[1] + CANVAS_HEIGHT],
[door_pos[0], door_pos[1] + CANVAS_HEIGHT]],
4, "Black", color)
def draw(self, canvas):
"""
Draw the doors
"""
door_width = CANVAS_WIDTH / self._num_doors
for door_num in range(self._num_doors):
door_pos = [door_width * door_num, 0]
if self._game_state == SELECT:
self.draw_door(canvas, door_pos, "White")
elif self._game_state == CHOOSE:
if door_num == self._selected_door:
self.draw_door(canvas, door_pos, "LightGreen")
elif door_num == self._show_door:
self.draw_door(canvas, door_pos, "LightGreen")
else:
self.draw_door(canvas, door_pos, "LightGray")
elif self._game_state == SHOW:
if door_num == self._prize_door:
self.draw_door(canvas, door_pos, "Gold")
else:
self.draw_door(canvas, door_pos, "LightGray")
MontyHallGUI() |
from selenium import webdriver
browser = webdriver.Chrome('./chromedriver')
browser.get('http://www.google.com')
searchBar = browser.find_element_by_xpath(
'//*[@id="tsf"]/div[2]/div/div[1]/div/div[1]/input')
searchBar.send_keys('weather')
|
from .card import Card
from .cardset import CardSet, Hand, Trick
from .deck import CardStack, Deck
from .trump import NonTrump, Trump |
import yaml
import os
### update apikey value
with open('scrapinghub.yml', 'r') as f:
d = yaml.load(f.read(), Loader=yaml.FullLoader)
d['apikey'] = os.environ['APIKEY']
### update config to file
with open('scrapinghub.yml', 'w') as f:
yaml.dump(d, f, default_flow_style=False)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('A_Value', models.BooleanField(default=False)),
('B_Value', models.BooleanField(default=False)),
('C_Value', models.BooleanField(default=False)),
('D_Value', models.BooleanField(default=False)),
('Address', models.CharField(max_length=500, blank=True, null=True)),
('Phone', models.DecimalField(decimal_places=False, max_digits=10)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
]
|
import yaml
from pathlib import Path
path = Path('.')
for userDataPath in path.iterdir():
if (userDataPath.suffix == '.yml'):
with userDataPath.open() as oldData:
dataMap = yaml.load(oldData)
dataMap.pop('lastlocation', None)
dataMap.pop('uuid', None)
newData = open(userDataPath.name, 'w')
yaml.dump(dataMap, newData, default_flow_style = False)
newData.close()
print('done')
|
import urllib.request
import hashlib
import random
PREFIX_LIST = [
186, 158, 135, 159,
136, 150, 137, 138,
187, 151, 182, 152,
139, 183, 188, 134,
185, 189, 180, 157,
155, 156, 131, 132,
133, 130, 181, 176,
177, 153, 184, 178,
173, 147, 175, 199,
166, 170, 198, 171,
191, 145, 165, 172
]
for prefix in PREFIX_LIST:
mobile = str(prefix*100000000 + random.randrange(100000000))
hash = hashlib.md5(mobile.encode('utf-8')).hexdigest()
print(mobile, urllib.request.urlopen("http://localhost:8000/"+hash).read()) |
#
# Kirk Fay
# Artificial Intelligence
#
import numpy as np
import matplotlib.pyplot as plt
import neurolab as nl
text = np.loadtxt('perceptron_data.txt')
# Separate datapoints and labels
data = text[:, :2]
labels = text[:, 2].reshape((text.shape[0], 1))
print(data)
print(labels)
plt.figure()
plt.scatter(data[:,0], data[:,1])
plt.xlabel('Dimension 1')
plt.ylabel('Dimension 2')
plt.title('Input data')
plt.show()
# Define minimum and maximum values for each dimension
dim1_min, dim1_max, dim2_min, dim2_max = 0, 1, 0, 1
n_output = labels.shape[1]
print(n_output)
D1 = [dim1_min, dim1_max]
D2 = [dim2_min, dim2_max]
perceptron = nl.net.newp([D1, D2], n_output)
error_progress = perceptron.train(data, labels, epochs=4, lr=0.01)
plt.figure()
plt.plot(error_progress)
plt.xlabel('epochs')
plt.ylabel('Training error')
plt.title('Training progress')
plt.grid()
plt.show()
|
import numpy as np
import random
from csv import reader
import math
import copy
from scipy.optimize import minimize
# Load a CSV file
def loadCSV(filename):
file = open(filename, "rt")
lines = reader(file)
dataset = list(lines)
return dataset
# Stochastic gradient descent (SGD)
def SGD(X, Y, C, lRate, epochs):
# initialize weight vector
w = [0.0 for i in range(len(X[0]))]
for epoch in range(epochs):
print('Epoch: ', epoch)
d = .5
lRate = lRate / (1 + ((lRate * epoch) / d))
# lRate = lRate / (1 + epoch)
# shuffle X, Y based on the same random seed
s = np.arange(X.shape[0])
np.random.shuffle(s)
X = X[s]
Y = Y[s]
for i in range(len(X)):
x = X[i]
a = hinge(w, x, Y[i], C)
w = w - (lRate * a)
print('\n')
return w
def hinge(w, X, Y, C):
Y = np.array([Y])
X = np.array([X])
loss = 1 - (Y * np.dot(X, w))
w_new = np.zeros(len(w))
for i in range(len(loss)):
if max(0, loss[i]) == 0:
w_new += w
else:
w_new += w - (C * Y[i] * X[i])
w_new /= len(Y)
return w_new
def dualSVM(X, Y):
# Our first order of business is to set up the matrix that is evaluated in the double
# summation in our minimization problem
Xlen = len(X)
XXYY = np.zeros((Xlen, Xlen))
for i in range(Xlen):
for j in range(Xlen):
XXYY[i,j] = np.dot(X[i,:], X[j,:]) * Y[i] * Y[j]
A = Y[:]
# These are the bounds on alpha, we set the upper bound to be one of our prescribed C values
bounds = [(0, 100/873)] * Xlen
constraints = {'type': 'eq', 'fun': lambda alpha: np.dot(A, alpha), 'jac': lambda alpha: A}
x0 = np.random.rand(Xlen)
# Here we use Scipy minimize to minimize our quadratic convex optimization function
weights = minimize(lambda alpha: .5 * np.dot(alpha.T, np.dot(XXYY, alpha)) - np.sum(alpha), x0,
jac=lambda alpha: np.dot(alpha.T, XXYY) - np.ones(alpha.shape[0]),
constraints=constraints, method='SLSQP', bounds=bounds)
return weights
def dualWeights(X, Y, alpha):
weights = np.array(np.sum(alpha * Y * X.T, 1))
wTemp = np.zeros(len(weights))
for i in range(len(weights)):
wTemp[i] = weights[i]
bias = np.mean(Y - np.dot(X, wTemp.T))
coeffs =np.zeros(len(weights) + 1)
coeffs[:-1] = weights
coeffs[-1] = bias
return coeffs
|
from numpy import zeros, array_equal
from ubcs_auxiliary import precision_sleep, interupt_sleep
from time import time
import random
# def test_precision_sleep():
# """ runs multiple tests with fixed maximum allowed error (1 ms) """
# precision = 10.0e-4
# for i in range(100):
# sleep_t = random.randint(1,20)/1000.0
# t1 = time(); precision_sleep(sleep_t); t2 = time(); dt = t2-t1
# # run tests
# assert ((dt - sleep_t) < precision)
|
class Solution:
def pairSum(self, head: Optional[ListNode]) -> int:
stack = []
m = 0
curr = head
while curr:
stack.append(curr.val)
curr = curr.next
full = len(stack)
half = full // 2
curr = head
for i in range(half):
m = max(m, curr.val + stack.pop())
curr = curr.next
return m |
# https://codeforces.com/contest/63/problem/A
def single_integer():
return int(input())
def multi_integer():
return map(int, input().split())
def string():
return input()
def multi_string():
return input().split()
n = single_integer()
row = dict()
status_dict = {
"rat": 1,
"woman": 2,
"child": 2,
"man": 3,
"captain": 4
}
for i in range(n):
name, status = multi_string()
s = status_dict[status]
if s in row:
row[s].append(name)
else:
row[s] = [name]
for i in sorted(row.keys()):
for j in row[i]:
print(j)
|
import click
import easy_workflow_manager as ewm
@click.command()
@click.option(
'--pop-stash', '-p', 'pop_stash', is_flag=True, default=False,
help='Do a `git stash pop` at the end if a stash was made'
)
@click.argument('branch', nargs=1, default='')
def main(branch, pop_stash):
"""Get latest changes from origin into branch"""
success = ewm.update_branch(branch=branch, pop_stash=pop_stash)
if success:
print('\nSuccessfully updated {} branch locally'.format(branch))
if __name__ == '__main__':
main()
|
# Generated by Django 3.1 on 2020-08-25 14:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('colony', '0004_auto_20200822_1611'),
]
operations = [
migrations.AlterField(
model_name='hive',
name='apiary_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='colony.apiary', verbose_name='Apiary'),
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-12-29 12:01
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_mysql.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('datalive_cust_veh', '0045_vehicle_service_due_odo'),
('datalive_vehicle_check', '0015_auto_20171203_0839'),
]
operations = [
migrations.CreateModel(
name='AuditSurveyReport',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('added', models.DateTimeField(auto_now_add=True)),
('date', models.DateField()),
('data', django_mysql.models.JSONField(default=dict)),
('created_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('depot', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='audit_reports', to='datalive_cust_veh.VehicleGroup')),
],
),
migrations.CreateModel(
name='AuditSurveyTemplate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('template', django_mysql.models.JSONField(default=dict)),
('created_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='audit_templates', to='datalive_cust_veh.Customer')),
],
),
migrations.AddField(
model_name='auditsurveyreport',
name='template',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reports', to='datalive_vehicle_check.AuditSurveyTemplate'),
),
]
|
a=int(input("valor"))
b=int(input("valor"))
c=int(input("valor"))
print (min(a,b,c))
print ((a+b+c)-min(a,b,c)-max(a,b,c))
print(max(a,b,c)) |
"""test.py - integrating xinput.XInputJoystick with pygame for Windows + Xbox 360 controller
Windows Xbox 360 cannot use pygame events for the left and right trigger. The axis doesn't come through distinctly.
This alternative corrects that issue, and adds functions unique to the Xbox controller.
General approach:
1. Detect joysticks.
2. Detect Windows.
3. Detect Xbox 360 controller.
4. Set up the joystick device platform+controller option.
For non-Windows controllers use pygame's joystick.Joystick as usual.
For Xbox 360 controller on Windows use xinput.XInputJoystick:
1. Do "joystick = xinput.XInputJoystick()".
2. Do "if WINDOWS_XBOX_360: joystick.dispatch_events()" each game tick to poll the device.
3. Handle pygame events as usual.
References:
https://github.com/r4dian/Xbox-360-Controller-for-Python
http://support.xbox.com/en-US/xbox-360/accessories/controllers
"""
import xinput
import platform
import pygame
import numpy
from operator import attrgetter
from pygame.locals import *
__version__ = '1.0.0'
class quadCTRL:
def __init__(self):
pygame.init()
pygame.joystick.init()
# Initialize a joystick object: grabs the first joystick
self.max_fps = 60
self.clock = pygame.time.Clock()
self.right_stick = numpy.array(numpy.zeros(2), float)
self.left_stick = numpy.array(numpy.zeros(2), float)
#self.memos_stick = numpy.array(numpy.zeros(4), float)
PLATFORM = platform.uname()[0].upper()
WINDOWS_PLATFORM = PLATFORM == 'WINDOWS'
self.WINDOWS_XBOX_360 = False
JOYSTICK_NAME = ''
joysticks = xinput.XInputJoystick.enumerate_devices()
device_numbers = list(map(attrgetter('device_number'), joysticks))
self.joystick = None
if device_numbers:
self.joystick = pygame.joystick.Joystick(device_numbers[0])
JOYSTICK_NAME = self.joystick.get_name().upper()
print('Joystick: {} using "{}" device'.format(PLATFORM, JOYSTICK_NAME))
if 'XBOX 360' in JOYSTICK_NAME and WINDOWS_PLATFORM:
self.WINDOWS_XBOX_360 = True
self.joystick = xinput.XInputJoystick(device_numbers[0])
print('Using xinput.XInputJoystick')
else:
# put other logic here for handling platform + device type in the event loop
print('Using pygame joystick')
self.joystick.init()
else:
print('Try default pygame joystick')
self.joystick = pygame.joystick.Joystick(0)
self.joystick.init()
JOYSTICK_NAME = self.joystick.get_name()
print('Joystick: {} using "{}" device'.format(PLATFORM, JOYSTICK_NAME))
def stick_center_snap(self, value, snap=0.2):
# Feeble attempt to compensate for calibration and loose stick.
if value >= snap or value <= -snap:
return value
else:
return 0.0
def DetectAction(self):
self.clock.tick(self.max_fps)
if self.WINDOWS_XBOX_360:
self.joystick.dispatch_events()
for e in pygame.event.get():
#print('event: {}'.format(pygame.event.event_name(e.type)))
if e.type == JOYAXISMOTION:
#print('JOYAXISMOTION: axis {}, value {}'.format(e.axis, e.value))
if e.axis == 3:
self.right_stick[0] = self.stick_center_snap(e.value * -1)
#return self.right_stick
elif e.axis == 4:
self.right_stick[1] = self.stick_center_snap(e.value * -1)
#return self.right_stick
elif e.axis == 0:
self.left_stick[1] = self.stick_center_snap(e.value)
elif e.axis == 1:
self.left_stick[0] = self.stick_center_snap(e.value)
return numpy.concatenate((self.right_stick,self.left_stick))
else:
for e in pygame.event.get():
#print('event: {}'.format(pygame.event.event_name(e.type)))
if e.type == JOYAXISMOTION:
#print('JOYAXISMOTION: axis {}, value {}'.format(e.axis, e.value))
if e.axis == 3:
self.right_stick[0] = self.stick_center_snap(e.value)
#return self.right_stick
elif e.axis == 2:
self.right_stick[1] = self.stick_center_snap(e.value * -1)
#return self.right_stick
elif e.axis == 0:
self.left_stick[1] = self.stick_center_snap(e.value)
elif e.axis == 1:
self.left_stick[0] = self.stick_center_snap(e.value)
return numpy.concatenate((self.right_stick,self.left_stick))
def Destroyer(self):
pygame.quit() |
# Generated by Django 3.2.7 on 2021-09-04 19:36
from django.db import migrations, models, transaction
import django.db.models.deletion
from django.contrib.auth.models import User
import random
def create_users(apps, schema_editor):
with transaction.atomic():
users = [
{
'first_name': 'Sebastián',
'last_name': 'Castañeda',
'email': 'sebastian@mail.com',
'username': 'sebastianc',
'position': 'Software Developer'
},
{
'first_name': 'Valentina',
'last_name': 'Ibarra',
'email': 'valentina@mail.com',
'username': 'valentinai',
'position': 'Recruiter'
},
{
'first_name': 'Sara',
'last_name': 'Hincapié',
'email': 'sara@mail.com',
'username': 'sarah',
'position': 'Product Owner'
},
{
'first_name': 'Juan',
'last_name': 'Ciro',
'email': 'juan@mail.com',
'username': 'juanc',
'position': 'CTO'
},
{
'first_name': 'David',
'last_name': 'Leal',
'email': 'david@mail.com',
'username': 'david',
'position': 'Tech Lead'
}
]
Information = apps.get_model('profiles', 'Information')
Skills = apps.get_model('profiles', 'Skills')
for user in users:
new_user = User()
new_user.first_name = user['first_name']
new_user.last_name = user['last_name']
new_user.email = user['email']
new_user.username = user['username']
new_user.set_password('factored2021')
new_user.save()
new_information = Information()
new_information.user_id = User.objects.get(username = user['username']).id
new_information.position = user['position']
new_information.avatar = 'https://avatars.dicebear.com/api/avataaars/' + user['username'] + '.svg'
new_information.save()
new_skills = Skills()
new_skills.user_id = User.objects.get(username = user['username']).id
new_skills.python_xp = random.randint(1,99)
new_skills.javascript_xp = random.randint(1,99)
new_skills.sql_xp = random.randint(1,99)
new_skills.java_xp = random.randint(1,99)
new_skills.spark_xp = random.randint(1,99)
new_skills.html_xp = random.randint(1,99)
new_skills.others_xp = random.randint(1,99)
new_skills.save()
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Information',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, primary_key=True, serialize=False, to='auth.user')),
('position', models.CharField(max_length=100)),
('avatar', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Skills',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, primary_key=True, serialize=False, to='auth.user')),
('python_xp', models.IntegerField()),
('javascript_xp', models.IntegerField()),
('sql_xp', models.IntegerField()),
('java_xp', models.IntegerField()),
('spark_xp', models.IntegerField()),
('html_xp', models.IntegerField()),
('others_xp', models.IntegerField()),
],
),
migrations.RunPython(create_users)
]
|
import simplejson as json
import yaml
with open('overpass\syntaxes\overpassQL.yaml', 'r') as origin:
data = yaml.load(origin)
with open("overpass\syntaxes\overpassQL.json", "w") as target:
target.write(json.dumps(data, indent=3 * ' '))
|
#!/usr/bin/env python3
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import logging
import time
import requests
from datetime import datetime
from googleapiclient.discovery import build_from_document
logger = logging.getLogger(__name__)
class SupportCase:
"""
Represent a Google Cloud Support Case.
Attributes
----------
case_number : str
a unique string of numbers that is the id for the case
resource_name : str
a unique string including the org or project id and the case id, examples:
organizations/12345/cases/67890
projects/12345/cases/67890
case_title : str
the title the user gave the case when they created it
description : str
the user"s description of the case as provided in the support ticket
escalated : bool
whether or not a case has been escalated. This field doesn"t exist in
the response until after a case has been escalated. True means the case
is escalated
case_creator : str
name of the user that opened the support case
create_time : str
timestamp of when the case was created
update_time : str
timestamp of the last update made to the case
priority : str
the current priority of the case, represented as S0, S1, S2, S3, or S4
state : str
the status of the support ticket. Can be NEW, IN_PROGRESS_GOOGLE_SUPPORT,
ACTION_REQUIRED, SOLUTION_PROVIDED, or CLOSED
comment_list : list
all public comments made on the case as strings. Comments are sorted
with newest comments at the top
"""
def __init__(self, caseobj):
"""
Parameters
----------
caseobj : json
json for an individual case
"""
MAX_RETRIES = 3
API_KEY = os.environ.get("API_KEY")
# Get our discovery doc and build our service
r = requests.get(
f"https://cloudsupport.googleapis.com/$discovery/rest?key={API_KEY}&labels=V2_TRUSTED_TESTER&version=v2beta",
timeout=5)
r.raise_for_status()
support_service = build_from_document(r.json())
self.case_number = re.search("(?:cases/)([0-9]+)", caseobj["name"])[1]
self.resource_name = caseobj["name"]
self.case_title = caseobj["displayName"]
self.description = caseobj["description"]
if "escalated" in caseobj:
self.escalated = caseobj["escalated"]
else:
self.escalated = False
self.case_creator = caseobj["creator"]["displayName"]
self.create_time = str(
datetime.fromisoformat(caseobj["createTime"].replace("Z",
"+00:00")))
self.update_time = str(
datetime.fromisoformat(caseobj["updateTime"].replace("Z",
"+00:00")))
self.priority = caseobj["severity"].replace("S", "P")
self.state = caseobj["state"]
self.comment_list = []
case_comments = support_service.cases().comments()
request = case_comments.list(parent=self.resource_name)
while request is not None:
try:
comments = request.execute(num_retries=MAX_RETRIES)
except BrokenPipeError as e:
error_message = f"{e} : {datetime.now()}"
logger.error(error_message)
time.sleep(1)
else:
if "comments" in comments:
for comment in comments["comments"]:
self.comment_list.append(comment)
request = case_comments.list_next(request, comments)
|
# -*- coding: utf-8 -*-
"""
admin page schema module.
"""
from pyrin.api.schema.structs import ResultSchema
from pyrin.admin.interface import AbstractAdminPage
from pyrin.admin.exceptions import InvalidAdminPageTypeError
class AdminSchema(ResultSchema):
"""
admin schema class.
"""
def __init__(self, admin, **options):
"""
initializes an instance of AdminSchema.
note that at least one of keyword arguments must be provided.
:param AbstractAdminPage admin: related admin page instance.
:keyword SECURE_TRUE | SECURE_FALSE readable: specifies that any column or attribute
which has `allow_read=False` or its name
starts with underscore `_`, should not
be included in result dict. defaults to
`SECURE_TRUE` if not provided.
it will be used only for entity
conversion.
:keyword dict[str, list[str]] | list[str] columns: column names to be included in result.
it could be a list of column names.
for example:
`columns=['id', 'name', 'age']`
but if you want to include
relationships, then columns for each
entity must be provided in a key for
that entity class name.
for example if there is `CarEntity` and
`PersonEntity`, it should be like this:
`columns=dict(CarEntity=
['id', 'name'],
PersonEntity=
['id', 'age'])`
if provided column names are not
available in result, they will be
ignored.
:note columns: dict[str entity_class_name, list[str column_name]] | list[str column_name]
:keyword dict[str, dict[str, str]] | dict[str, str] rename: column names that must be
renamed in the result.
it could be a dict with keys
as original column names and
values as new column names
that should be exposed instead
of original column names.
for example:
`rename=dict(age='new_age',
name='new_name')`
but if you want to include
relationships, then you must
provide a dict containing
entity class name as key and
for value, another dict
containing original column
names as keys, and column
names that must be exposed
instead of original names,
as values. for example
if there is `CarEntity` and `
PersonEntity`, it should be
like this:
`rename=
dict(CarEntity=
dict(name='new_name'),
PersonEntity=
dict(age='new_age')`
then, the value of `name`
column in result will be
returned as `new_name` column.
and also value of `age` column
in result will be returned as
'new_age' column. if provided
rename columns are not
available in result, they
will be ignored.
:note rename: dict[str entity_class_name, dict[str original_column, str new_column]] |
dict[str original_column, str new_column]
:keyword dict[str, list[str]] | list[str] exclude: column names to be excluded from
result. it could be a list of column
names. for example:
`exclude=['id', 'name', 'age']`
but if you want to include
relationships, then columns for each
entity must be provided in a key for
that entity class name.
for example if there is `CarEntity`
and `PersonEntity`, it should be
like this:
`exclude=dict(CarEntity=
['id', 'name'],
PersonEntity=
['id', 'age'])`
if provided excluded columns are not
available in result, they will be
ignored.
:note exclude: dict[str entity_class_name, list[str column_name]] | list[str column_name]
:keyword int depth: a value indicating the depth for conversion.
for example if entity A has a relationship with
entity B and there is a list of B in A, if `depth=0`
is provided, then just columns of A will be available
in result dict, but if `depth=1` is provided, then all
B entities in A will also be included in the result dict.
actually, `depth` specifies that relationships in an
entity should be followed by how much depth.
note that, if `columns` is also provided, it is required to
specify relationship property names in provided columns.
otherwise they won't be included even if `depth` is provided.
defaults to `default_depth` value of database config store.
please be careful on increasing `depth`, it could fail
application if set to higher values. choose it wisely.
normally the maximum acceptable `depth` would be 2 or 3.
there is a hard limit for max valid `depth` which is set
in `ConverterMixin.MAX_DEPTH` class variable. providing higher
`depth` value than this limit, will cause an error.
it will be used only for entity conversion.
:keyword bool indexed: specifies that list results must
include an extra field as row index.
the name of the index field and the initial value
of index could be provided by `index_name` and
`start_index` respectively. `indexed` keyword has
only effect if the returning result contains a list
of objects.
:keyword str index_name: name of the extra field to contain
the row index of each result. if not provided
defaults to `row_num` value.
:keyword int start_index: the initial value of row index. if not
provided, starts from 1.
:raises InvalidAdminPageTypeError: invalid admin page type error.
:raises SecureBooleanIsRequiredError: secure boolean is required error.
:raises InvalidStartIndexError: invalid start index error.
"""
if not isinstance(admin, AbstractAdminPage):
raise InvalidAdminPageTypeError('Input parameter [{admin}] is '
'not an instance of [{base}].'
.format(admin=admin, base=AbstractAdminPage))
super().__init__(**options)
self._admin = admin
def get_computed_row_columns(self, row, **options):
"""
gets a dict containing all computed columns to be added to the result.
note that the result dict should not contain any `BaseEntity` or
`ROW_RESULT` values, otherwise a max recursion error may occur.
:param ROW_RESULT row: the actual row result to be processed.
:rtype: dict
"""
result = {}
for method in self._admin.method_names:
result[method] = self._admin.call_method(method, row)
return result
|
import numpy as np
def quantizeMatrix(original_values, scale, zero_point):
transformed_val = zero_point + original_values / scale
clamped_val = np.maximum(0, np.minimum(255, transformed_val))
return(np.around(clamped_val))
def getFinalScale(real_multiplier):
if(real_multiplier > 1 or real_multiplier < 0):
raise ValueError("Scale is outside of the required range: ", real_multiplier)
nudge_factor = 0
while (real_multiplier < 0.5):
real_multiplier *= 2
nudge_factor += 1
quanatized_value = round(real_multiplier * (2**31))
if(quanatized_value > (2**31)):
raise ValueError("Something went wrong with scale quantization: ", quanatized_value)
if (quanatized_value == 2**31):
quanatized_value /= 2
nudge_factor-=1
if (nudge_factor<0):
raise ValueError("Something went wrong with scale quantization: ", nudge_factor)
if (quanatized_value>=(2**31)):
raise ValueError("Something went wrong with scale quantization: ", quanatized_value)
return(quanatized_value, nudge_factor)
def findScaleAndZeroPoint(max, min):
qmin = 0
qmax = 255
scale = (max - min) / (qmax - qmin)
initial_zero_point = qmin - min / scale
if (initial_zero_point < qmin):
nudged_zero_point = qmin
elif (initial_zero_point > qmax):
nudged_zero_point = qmax
else:
nudged_zero_point = round(initial_zero_point)
return(scale, nudged_zero_point)
right = np.random.rand(4,4)
left = np.random.rand(1,4)
result = np.dot(left, right)
print("Input: \n", right)
print("Weights: ", left)
print("Result: ", result)
minimum_values = []
maximum_values = []
minimum_values.append(np.amin(np.array([0, np.amin(right)])))
maximum_values.append(np.amax(np.array([0, np.amax(right)])))
minimum_values.append(np.amin(np.array([0, np.amin(left)])))
maximum_values.append(np.amax(np.array([0, np.amax(left)])))
minimum_values.append(np.amin(np.array([0, np.amin(result)])))
maximum_values.append(np.amax(np.array([0, np.amax(result)])))
print("Mins: ", minimum_values)
print("Maxs: ", maximum_values)
quantized_scales = []
quantized_zeros = []
qmin = 0
qmax = 255
for layer_index in range(len(minimum_values)):
scale, nudged_zero_point = findScaleAndZeroPoint(maximum_values[layer_index], minimum_values[layer_index])
quantized_scales.append(scale)
quantized_zeros.append(nudged_zero_point)
print("Scales: ", quantized_scales)
print("Zeroes: ", quantized_zeros)
quantized_right = quantizeMatrix(right, quantized_scales[0], quantized_zeros[0])
quantized_left = quantizeMatrix(left, quantized_scales[1], quantized_zeros[1])
real_multiplier = quantized_scales[1] * quantized_scales[0] / quantized_scales[2]
quantized_scale, shift_amount = getFinalScale(real_multiplier)
print("Scaled scaling constant: ", quantized_scale, " shifted ", shift_amount, " times.")
print("All offline work done now! The following calculations will be done one the fly in the NN.")
# The zero point addition (subtraction) can be optimized.
acc = np.dot(quantized_left - quantized_zeros[1], quantized_right - quantized_zeros[0])
acc = acc * quantized_scale
acc = acc / 2**(31+shift_amount)
acc = np.round(acc)
acc = acc + quantized_zeros[2]
quantized_result = acc
dequantized_result = quantized_scales[2]*(quantized_result - quantized_zeros[2])
print("Result is: ", quantized_result)
print("Dequantized result is: ", dequantized_result)
print("Difference from the real result is: %0.3f%%" % np.average(np.abs(result-dequantized_result)/result*100))
"""
To see how the scale quantization works
#real_value = scale * (quantized_value - zero_point)
print("Correct result: ",0.2 * (22 - 2))
quantized_scale, shift_amount = getFinalScale(0.2)
print(quantized_scale * (22 - 2) / (2**31 * 2**shift_amount))
""" |
# -*- coding: utf-8 -*-
##############################################################################
#Author:QQ173782910
##############################################################################
CLIENT_NAME = 'wrobot'#注意:这个是项目名
DEBUG='1'
L_no=['newscontent','text_contents']#不判断副文本
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Création jeu bataille
import random
#préparation de la variable carte pour le traitement
cartes = [
[
"trefle", "pique", "coeur", "carreau"
],
[
"7", "8", "9", "10", "valet", "reine", "roi", "as"
]]
def tirerCarte(cartesDictionnaire):
#Joueur 1 tire une carte
couleurRandom1 = cartes[0][random.randint(0, len(cartesDictionnaire[0]) - 1)]
valeurRandom1 = cartes[1][random.randint(0, len(cartesDictionnaire[1]) - 1)]
while True:
#joueur 2 tire une carte
couleurRandom2 = cartes[0][random.randint(0, len(cartesDictionnaire[0]) - 1)]
valeurRandom2 = cartes[1][random.randint(0, len(cartesDictionnaire[1]) - 1)]
#comparaison des cartes afin d'éviter que j1 et j2 ont la même carte
if valeurRandom1 is valeurRandom2 and couleurRandom1 is couleurRandom2:
couleurRandom2 = cartes[0][random.randint(0, len(cartesDictionnaire[0]) - 1)]
valeurRandom2 = cartes[1][random.randint(0, len(cartesDictionnaire[1]) - 1)]
else:
break
#comparaison terminées on renvoie les cartes dans une liste contenant 2 dictionnaires
carteJoueur1 = {
"couleur": couleurRandom1,
"valeur": valeurRandom1
}
carteJoueur2 = {
"couleur": couleurRandom2,
"valeur": valeurRandom2
}
return [carteJoueur1, carteJoueur2]
def bataille(listeCartes):
cartesDictionnaire = tirerCarte(cartes)
#joueur 1 prend la 1er carte du dictionnaire
j1 = cartesDictionnaire[0]
#joueur2 prend la 2eme carte du dictionnaire
j2 = cartesDictionnaire[1]
#celui qui a l'index le plus haut a gagné, sinon c'est match nul
indexJ1 = listeCartes[1].index(j1["valeur"])
indexJ2 = listeCartes[1].index(j2["valeur"])
print("Le joueur A a tire la carte", j1["valeur"], "de", j1["couleur"])
print("Le joueur B a tire la carte", j2["valeur"], "de", j2["couleur"])
if indexJ1 > indexJ2:
print("Le joueur A a gagne")
elif indexJ1 < indexJ2:
print("Le joueur B a gagne")
else: print("Bataille !")
#Execution d'une partie
bataille(cartes)
|
#player number two
from rtcmix import *
import random
import math
from stereoInsurance import stereoInsurance
#stereoInsurance(left, right, minPan, maxPan) where left and right are values 0-7
from glassesGran import granularGlasses
# granularGlasses(start, dur, grainTrans, chanX, chanY, searchTuple)
# comment/uncomment for send to stereo
#set_option("device = Soundflower (16ch)")
rtsetparams(44100, 8)
load("GRANULATE")
#set_option("clobber = on", "play = off")
#rtoutput("/Users/anstepp/Desktop/glassesTwo.aif")
random.seed(400) # 400
x = 0
while x < 500:
duration = 30
if x < 20:
minPan = 7
maxPan = 9
left = random.randint(minPan, maxPan)
right = random.randint(minPan, maxPan)
searchTuple = ('weizen', )
transposition = -3
testVar,xChan,yChan = stereoInsurance(left, right, minPan, maxPan)
if testVar is True:
granularGlasses(x, duration, transposition, xChan, yChan, searchTuple)
x += random.uniform(6, 7)
while 20 < x < 40:
duration = 25
minpan = 6
maxPan = 10
left = random.randint(minPan, maxPan)
right = random.randint(minPan, maxPan)
searchTuple = ('snifter', )
transposition = -2
testVar,xChan,yChan = stereoInsurance(left, right, minPan, maxPan)
if testVar is True:
granularGlasses(x, duration, transposition, xChan, yChan, searchTuple)
x += random.uniform(5, 7)
while 40 < x < 60:
minPan = 0
maxPan = 5
left = random.randint(minPan, maxPan)
right = random.randint(minPan, maxPan)
searchTuple = ('tulip', )
transposition = -1
testVar,xChan,yChan = stereoInsurance(left, right, minPan, maxPan)
if testVar is True:
granularGlasses(x, duration, transposition, xChan, yChan, searchTuple)
x += random.uniform(4, 7)
while 60 < x < 80:
maxPan = 6
left = random.randint(minPan, maxPan)
right = random.randint(minPan, maxPan)
searchTuple = ('chalice', )
transposition = 0
testVar,xChan,yChan = stereoInsurance(left, right, minPan, maxPan)
if testVar is True:
granularGlasses(x, duration, transposition, xChan, yChan, searchTuple)
x += random.uniform(4, 6)
while 80 < x < 100:
duration = 10
maxPan = 7
left = random.randint(minPan, maxPan)
right = random.randint(minPan, maxPan)
searchTuple = ('all', )
transposition = 1
testVar,xChan,yChan = stereoInsurance(left, right, minPan, maxPan)
if testVar is True:
granularGlasses(x, duration, transposition, xChan, yChan, searchTuple)
x += random.uniform(3, 5)
while 100 < x < 220:
#do nothing
x += 1
while 220 < x < 310:
duration = 5
maxPan = 7
searchTuple = ('all', )
transposition = 2
left = random.randint(minPan, maxPan)
right = random.randint(minPan, maxPan)
testVar,xChan,yChan = stereoInsurance(left, right, minPan, maxPan)
if testVar is True:
granularGlasses(x, duration, transposition, xChan, yChan, searchTuple)
x += random.uniform(4, 9)
while 310 < x < 430:
minPan = 3
maxPan = 5
searchTuple = ('all', )
transposition = 3
left = random.randint(minPan, maxPan)
right = random.randint(minPan, maxPan)
testVar,xChan,yChan = stereoInsurance(left, right, minPan, maxPan)
if testVar is True:
granularGlasses(x, duration, transposition, xChan, yChan, searchTuple)
x += random.uniform(4, 9)
while 430 < x < 500:
duration = 45
minPan = 4
maxPan = 6
transposition = random.choice([-4, -3])
searchTuple = ('all', )
left = random.randint(minPan, maxPan)
right = random.randint(minPan, maxPan)
testVar,xChan,yChan = stereoInsurance(left, right, minPan, maxPan)
if testVar is True:
granularGlasses(x, duration, transposition, xChan, yChan, searchTuple)
x += random.uniform(4, 9) |
# Generated by Django 3.2.5 on 2021-08-05 16:09
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Deposition_RF',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('day', models.DateField(default=datetime.date(2021, 8, 6), verbose_name='Date')),
('user', models.CharField(max_length=20, verbose_name='User')),
('material', models.CharField(choices=[('SiO2', 'SiO2'), ('Si', 'Si')], default='SiO2', max_length=10, verbose_name='Material')),
('power', models.IntegerField(default=250, verbose_name='Power (W)')),
('pressure', models.FloatField(default=0.84, verbose_name='Pressure (mTorr)')),
('mfc_flow', models.FloatField(default=10.0, verbose_name='MFC flow (sccm)')),
('deposition_time', models.DurationField(default=datetime.timedelta(seconds=1800), verbose_name='Deposition time')),
('thickness', models.FloatField(verbose_name='Thickness (nm)')),
('comment', models.CharField(max_length=256, verbose_name=' Comment')),
],
),
]
|
# Test script for CounterACT
import urllib.request
import json
import logging
logging.info('===>Starting m3sp Test Script')
# Server configuration fields will be available in the 'params' dictionary.
base_url = params['connect_m3sp_url']
headers = {
'Content-Type': "application/json",
'charset': 'utf-8',
'User-Agent': "FSCT/1.16.2020"
}
request = urllib.request.Request(base_url, headers=headers)
resp = urllib.request.urlopen(request)
# Return the 'response' dictionary, must have a 'succeded' field.
response = {}
if resp.getcode() == 200:
response['succeeded'] = True
response['result_msg'] = 'Successfull connected.'
else:
response['succeeded'] = False
response['result_msg'] = 'Could not connect to m3sp Server'
logging.info('===>Ending m3sp Test Script')
|
import torch
import torch.nn as nn
#from utils import ExitBlock
from pthflops import count_ops
import torchvision.models as models
import numpy as np
#import config
class ConvBasic(nn.Module):
def __init__(self, nIn, nOut, kernel=3, stride=1,
padding=1):
super(ConvBasic, self).__init__()
self.net = nn.Sequential(
nn.Conv2d(nIn, nOut, kernel_size=kernel, stride=stride,
padding=padding, bias=False),
nn.BatchNorm2d(nOut),
nn.ReLU(True)
)
def forward(self, x):
return self.net(x)
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
class ExitBlock(nn.Module):
"""
This class defines the Early Exit, which allows to finish the inference at the middle layers when
the classification confidence achieves a predefined threshold
"""
def __init__(self, n_classes, input_shape, exit_type, device):
super(ExitBlock, self).__init__()
_, channel, width, height = input_shape
"""
This creates a random input sample whose goal is to find out the input shape after each layer.
In fact, this finds out the input shape that arrives in the early exits, to build a suitable branch.
Arguments are
nIn: (int) input channel of the data that arrives into the given branch.
n_classes: (int) number of the classes
input_shape: (tuple) input shape that arrives into the given branch
exit_type: (str) this argument define the exit type: exit with conv layer or not, just fc layer
dataset_name: (str) defines tha dataset used to train and evaluate the branchyNet
"""
self.expansion = 1
self.device = device
self.layers = nn.ModuleList()
# creates a random input sample to find out input shape in order to define the model architecture.
x = torch.rand(1, channel, width, height).to(device)
self.conv = nn.Sequential(
ConvBasic(channel, channel, kernel=3, stride=2, padding=1),
nn.AvgPool2d(2),)
#gives the opportunity to add conv layers in the branch, or only fully-connected layers
if (exit_type == "conv"):
self.layers.append(self.conv)
else:
self.layers.append(nn.AdaptiveAvgPool2d(2))
feature_shape = nn.Sequential(*self.layers).to(device)(x).shape
total_neurons = feature_shape[1]*feature_shape[2]*feature_shape[3] # computes the input neurons of the fc layer
self.layers = self.layers.to(device)
self.classifier = nn.Linear(total_neurons , n_classes).to(device) # finally creates
def forward(self, x):
for layer in self.layers:
x = layer(x)
x = x.view(x.size(0), -1)
return self.classifier(x)
class B_MobileNet(nn.Module):
def __init__(self, n_classes: int,
pretrained: bool, n_branches: int, img_dim:int,
exit_type: str, device, branches_positions=None, distribution="linear"):
super(B_MobileNet, self).__init__()
self.n_classes = n_classes
self.pretrained = pretrained
self.n_branches = n_branches
self.img_dim = img_dim
self.exit_type = exit_type
self.branches_positions = branches_positions
self.distribution = distribution
self.softmax = nn.Softmax(dim=1)
self.device = device
self.model = self.initialize_model()
self.n_blocks = len(list(self.model.features))
self.insertBranches()
def initialize_model(self):
model = models.mobilenet_v2(pretrained=self.pretrained)
model.classifier[1] = nn.Linear(model.classifier[1].in_features, self.n_classes)
return model.to(self.device)
def countFlops(self):
x = torch.rand(1, 3, self.img_dim, self.img_dim).to(self.device)
flops_count_dict = {}
flops_acc_dict = {}
flops_list = []
total_flops = 0
for i, layer in enumerate(self.model.features, 1):
ops, all_data = count_ops(layer, x, print_readable=False, verbose=False)
x = layer(x)
flops_count_dict[i] = ops
total_flops += ops
flops_acc_dict[i] = total_flops
#for key, value in flops_acc_dict.items():
# flops_acc_dict[key] = value/total_flops
return flops_count_dict, flops_acc_dict, total_flops
def set_thresholds(self, total_flops):
"""
"""
gold_rate = 1.61803398875
flop_margin = 1.0 / (self.n_branches+1)
self.threshold = []
self.percentage_threshold = []
for i in range(self.n_branches):
if (self.distribution == 'pareto'):
self.threshold.append(total_flops * (1 - (0.8**(i+1))))
self.percentage_threshold.append(1 - (0.8**(i+1)))
elif (self.distribution == 'fine'):
self.threshold.append(total_flops * (1 - (0.95**(i+1))))
self.percentage_threshold.append(1 - (0.95**(i+1)))
elif (self.distribution == 'linear'):
self.threshold.append(total_flops * flop_margin * (i+1))
self.percentage_threshold.append(flop_margin * (i+1))
else:
self.threshold.append(total_flops * (gold_rate**(i - self.num_ee)))
self.percentage_threshold.append(gold_rate**(i - self.n_branches))
def is_suitable_for_exit(self, i, flop_count):
if (self.branches_positions is None):
return self.stage_id < self.n_branches and flop_count >= self.threshold[self.stage_id]
else:
return i in self.branches_positions
def add_early_exit(self, layer):
#print("Adding")
self.stages.append(nn.Sequential(*self.layers))
x = torch.rand(1, 3, self.img_dim, self.img_dim).to(self.device)
feature_shape = nn.Sequential(*self.stages)(x).shape
self.exits.append(ExitBlock(self.n_classes, feature_shape, self.exit_type, self.device))
self.stage_id += 1
self.layers = nn.ModuleList()
def insertBranches(self):
self.stages = nn.ModuleList()
self.exits = nn.ModuleList()
self.layers = nn.ModuleList()
self.stage_id = 0
flops_count_dict, flops_acc_dict, total_flops = self.countFlops()
self.set_thresholds(total_flops)
for i, layer in enumerate(self.model.features, 1):
if (self.is_suitable_for_exit(i, flops_acc_dict[i])):
self.add_early_exit(layer)
else:
self.layers.append(layer)
self.stages.append(nn.Sequential(*self.layers))
self.fully_connected = self.model.classifier
def forwardTrain(self, x):
output_list, conf_list, class_list = [], [], []
for i, exitBlock in enumerate(self.exits):
x = self.stages[i](x)
output_branch = exitBlock(x)
output_list.append(output_branch)
conf, infered_class = torch.max(self.softmax(output_branch), 1)
conf_list.append(conf)
class_list.append(infered_class)
x = self.stages[-1](x)
x = x.mean(3).mean(2)
output = self.fully_connected(x)
infered_conf, infered_class = torch.max(self.softmax(output), 1)
output_list.append(output)
conf_list.append(infered_conf)
class_list.append(infered_class)
return output_list, conf_list, class_list
def forwardEval(self, x, p_tar):
output_list, conf_list, class_list = [], [], []
for i, exitBlock in enumerate(self.exits): #[:config.N_BRANCHES] it acts to select until branches will be processed.
x = self.stages[i](x)
output_branch = exitBlock(x)
conf, infered_class = torch.max(self.softmax(output_branch), 1)
if (conf.item() > p_tar):
return output_branch, conf_list, infered_class
else:
output_list.append(output_branch)
conf_list.append(conf.item())
class_list.append(infered_class)
return x, conf_list, None
def forward(self, x, p_tar=0.5):
return self.forwardEval(x, p_tar)
|
string_1 = "Терпение и труд - все перетрут!"
sub_string_1 = "труд"
string_2 = "Bema"
sep_string_2 = "B,e,m,a"
numlist = ['1', '2', '3']
separator = ', '
character = 'p'
unicode_char = ord(character)
string = "Python is awesome"
new_string = string.center(24)
str = 'xyz\t12345\tabc'
result = str.expandtabs()
random_string = ' this is good '
string_4 = 'This is good '
string_5 = ' Master of Manicure, '
string_6 = "ThIs ShOuLd Be MiXeD cAsEd."
string_7 = 'BUSINESS LADY'
text = "program is fun"
width = 15
if __name__ == '__main__':
print(string_1.find(sub_string_1, 3)) # Метод показывает где подстрока находится.
print(string_1.rfind(sub_string_1, 4)) # Метод возвращает наивысший индекс подстроки.
print(string_1.index(sub_string_1, 5)) # Метод возвращает индекс подстроки внутри строки.
print(string_1.rindex(sub_string_1, 6)) # Метод возвращает наивысший индекс подстроки внутри строки.
print(string_1.replace(sub_string_1, 'пруд')) # Метод заменяет все вхождения одной строки на другую.
print(string_2.split(sep_string_2,)) # Метод Разбивает строку на части, и возвращает его списком.
print(string_1.isdigit()) # Метод указывает на то, состоит ли строка из цифр.
print(string_2.isalpha()) # Метод указывает на то, состоит ли строка из букв.
print(string_1.isalnum()) # Метод указывает на то, состоит ли строка из букв и цифр.
print(string_1.islower()) # Метод указывает на то, состоит ли строка в нижнем регистре.
print(string_2.isupper()) # Метод указывает на то, состоит ли строка в верхнем регистре.
print(string_2.isspace()) # Метод указывает на то, состоит ли строка из неотображаемых символов.
print(string_1.istitle()) # Метод указывает на то, начинаются ли слова в строке с заглавной буквы.
print(string_2.upper()) # Метод преобразовывает строки к верхнему регистру.
print(string_1.lower()) # Метод преобразовывает строки к нижнему регистру.
print(string_1.startswith('терпение')) # Метод указывает на то, начинается ли строка с указанного префикса.
print(string_1.endswith('перетрут!')) # Метод указывает на то, заканчивается ли строка с указаннего префикса.
print(separator.join(numlist)) # Метод возвращает строку путем объединения всех элементов итератора.
print(unicode_char) # Метод возвращает целое число , представляющее символ Unicode.
print(chr(97)) # Метод Возвращает символ по его числовому представлению.
print(string_1.capitalize()) # Метод Переводит первый символ строки в верхний регистр.
print("Centered String: ", new_string) # Mетод возвращает строку , проложенные с указанными fillchar.
print(string_1.count('Т')) # Метод возвращает количество вхождений подстроки в данной строке.
print(result) # Метод возвращает копию строки с пробелами.
print(random_string.lstrip()) # Метод удалить пробельных символов в начале строки.
print(string_4.rstrip()) # Метод удалить пробельных символов в конце строки.
print(string_5.strip()) # Метод удалить пробельных символов в конце и в начале строки.
print(string_5.partition('of')) # Метод разбавляет строку на картеж.
print(string_5.rpartition('of ')) # Метод принимает строковый параметр который отделяет строку от
# последнего ее появления.
print(string_6.swapcase()) # Метод переводит символы нижнего регистра в верхний, а верхнего – в нижний.
print(string_7.title()) # Метод переводит первую букву каждого слова в верхний регистр,
# а все остальные в нижний.
print(text.zfill(15)) # Метод возвращает копию строки с добавленными слева символами «0».
print(text.ljust(width)) # Метод возвращает выровненную по левому краю строку заданной минимальной ширины.
print(text.rjust(width)) # Метод возвращает выровненную по правому краю строку заданной минимальной ширины.
print('{name} написал {book}'.format(name = 'Swaroop', book = 'A byte of Python')) # Иногда бывает нужно
# составить строку на основе каких-либо данных. Вот здесь-то пригодится метод format.
|
from setuptools import setup
def readme():
with open("README.rst") as f:
return f.read()
setup(name='k_index_calculator',
version='0.2.3',
description='Python module which calculates the K-Index of a geomagnetic time series.',
long_description='Calculates the K-Index of geomagnetic time series using the FMI method',
keywords='geomagnetic k-index space weather',
url='https://github.com/TerminusEst/k_index_calculator',
author='Sean Blake',
author_email='blakese@tcd.ie',
license='MIT',
packages=['k_index_calculator'],
install_requires=[
'datetime', 'numpy', 'scipy', 'matplotlib'
],
include_package_data=True,
zip_safe=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.