text stringlengths 8 6.05M |
|---|
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2023] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module is an implementation of eHive's Param module.
It defines ParamContainer which is an attribute of BaseRunnable
and not its base class as in eHive's class hierarchy.
All the specific warnings and exceptions inherit from ParamWarning
and ParamException.
"""
import collections
import numbers
import unittest
class ParamWarning(Warning):
"""Used by process.BaseRunnable"""
pass
class ParamException(Exception):
"""Base class for parameters-related exceptions"""
pass
class ParamNameException(ParamException):
"""Raised when the parameter name is not a string"""
def __str__(self):
return '"{0}" (type {1}) is not a valid parameter name'.format(self.args[0], type(self.args[0]).__name__)
class ParamSubstitutionException(ParamException):
"""Raised when ParamContainer tried to substitute an unexpected structure (only dictionaries and lists are accepted)"""
def __str__(self):
return 'Cannot substitute elements in objects of type "{0}"'.format(str(type(self.args[0])))
class ParamInfiniteLoopException(ParamException):
"""Raised when parameters depend on each other, forming a loop"""
def __str__(self):
return "Substitution loop has been detected on {0}. Parameter-substitution stack: {1}".format(self.args[0], list(self.args[1].keys()))
class NullParamException(ParamException):
"""Raised when a parameter cannot be required because it is null (None)"""
def __str__(self):
return "{0} is None".format(self.args[0])
class ParamContainer:
"""Equivalent of eHive's Param module"""
def __init__(self, unsubstituted_params, debug=False):
"""Constructor. "unsubstituted_params" is a dictionary"""
self.unsubstituted_param_hash = unsubstituted_params.copy()
self.param_hash = {}
self.debug = debug
# Public methods
#################
def set_param(self, param_name, value):
"""Setter. Returns the new value"""
self.validate_parameter_name(param_name)
self.param_hash[param_name] = value
return value
def get_param(self, param_name):
"""Getter. Performs the parameter substitution"""
self.validate_parameter_name(param_name)
self.substitution_in_progress = collections.OrderedDict()
try:
return self.internal_get_param(param_name)
except (KeyError, SyntaxError, ParamException) as e:
# To hide the part of the stack that is in ParamContainer
raise e.with_traceback(None)
def has_param(self, param_name):
"""Returns a boolean. It checks both substituted and unsubstituted parameters"""
self.validate_parameter_name(param_name)
return (param_name in self.param_hash) or (param_name in self.unsubstituted_param_hash)
def substitute_string(self, string):
"""Apply the parameter substitution to the string"""
self.substitution_in_progress = collections.OrderedDict()
try:
return self.param_substitute(string)
except (KeyError, SyntaxError, ParamException) as e:
# To hide the part of the stack that is in ParamContainer
raise e.with_traceback(None)
# Private methods
##################
def validate_parameter_name(self, param_name):
"""Tells whether "param_name" is a non-empty string"""
if not isinstance(param_name, str) or (param_name == ''):
raise ParamNameException(param_name)
def debug_print(self, *args, **kwargs):
"""Print debug information if the debug flag is turned on (cf constructor)"""
if self.debug:
print(*args, **kwargs)
def internal_get_param(self, param_name):
"""Equivalent of get_param() that assumes "param_name" is a valid parameter name and hence, doesn't have to raise ParamNameException.
It is only used internally"""
self.debug_print("internal_get_param", param_name)
if param_name not in self.param_hash:
x = self.unsubstituted_param_hash[param_name]
self.param_hash[param_name] = self.param_substitute(x)
return self.param_hash[param_name]
def param_substitute(self, structure):
"""
Take any structure and replace the pairs of hashes with the values of the parameters / expression they represent
Compatible types: numbers, strings, lists, dictionaries (otherwise, ParamSubstitutionException is raised)
"""
self.debug_print("param_substitute", structure)
if structure is None:
return None
elif isinstance(structure, list):
return [self.param_substitute(_) for _ in structure]
elif isinstance(structure, dict):
# NB: In Python, not everything can be hashed and used as a dictionary key.
# Perhaps we should check for such errors ?
return {self.param_substitute(key): self.param_substitute(value) for (key,value) in structure.items()}
elif isinstance(structure, numbers.Number):
return structure
elif isinstance(structure, str):
# We handle the substitution differently if there is a single reference as we can avoid forcing the result to be a string
if structure[:6] == '#expr(' and structure[-6:] == ')expr#' and structure.count('#expr(', 6, -6) == 0 and structure.count(')expr#', 6, -6) == 0:
return self.subst_one_hashpair(structure[1:-1], True)
if structure[0] == '#' and structure[-1] == '#' and structure.count('#', 1, -1) == 0:
if len(structure) <= 2:
return structure
return self.subst_one_hashpair(structure[1:-1], False)
# Fallback to the default parser: all pairs of hashes are substituted
return self.subst_all_hashpairs(structure, lambda middle_param: self.subst_one_hashpair(middle_param, False) )
else:
raise ParamSubstitutionException(structure)
def subst_all_hashpairs(self, structure, callback):
"""
Parse "structure" and replace all the pairs of hashes by the result of calling callback() on the pair content
#expr()expr# are treated differently by calling subst_one_hashpair()
The result is a string (like structure)
"""
self.debug_print("subst_all_hashpairs", structure)
# Allow a single literal hash
if structure.count("#") == 1:
return structure
result = []
while True:
(head,_,tmp) = structure.partition('#')
result.append(head)
if _ != '#':
return ''.join(result)
if tmp.startswith('expr('):
i = tmp.find(')expr#')
if i == -1:
raise SyntaxError("Unmatched '#expr(' token")
val = self.subst_one_hashpair(tmp[:i+5], True)
tail = tmp[i+6:]
else:
(middle_param,_,tail) = tmp.partition('#')
if _ != '#':
raise SyntaxError("Unmatched '#' token")
if middle_param == '':
val = '##'
else:
val = callback(middle_param)
result.append(str(val))
structure = tail
def subst_one_hashpair(self, inside_hashes, is_expr):
"""
Run the parameter substitution for a single pair of hashes.
Here, we only need to handle #expr()expr#, #func:params# and #param_name#
as each condition has been parsed in the other methods
"""
self.debug_print("subst_one_hashpair", inside_hashes, is_expr)
# Keep track of the substitutions we've made to detect loops
if inside_hashes in self.substitution_in_progress:
raise ParamInfiniteLoopException(inside_hashes, self.substitution_in_progress)
self.substitution_in_progress[inside_hashes] = 1
# We ask the caller to provide the is_expr tag to avoid checking the string again for the presence of the "expr" tokens
if is_expr:
s = self.subst_all_hashpairs(inside_hashes[5:-5].strip(), 'self.internal_get_param("{0}")'.format)
val = eval(s)
elif ':' in inside_hashes:
(func_name,_,parameters) = inside_hashes.partition(':')
try:
f = eval(func_name)
except:
raise SyntaxError("Unknown method: " + func_name)
if callable(f):
if parameters:
val = f(self.internal_get_param(parameters))
else:
val = f()
else:
raise SyntaxError(func_name + " is not callable")
else:
val = self.internal_get_param(inside_hashes)
del self.substitution_in_progress[inside_hashes]
return val
class ParamContainerTestExceptions(unittest.TestCase):
def test_infinite_loops(self):
with self.assertRaises(ParamInfiniteLoopException):
ParamContainer({'a': '#b#', 'b': '#a#'}).get_param('a')
def test_missing_param(self):
with self.assertRaises(KeyError):
ParamContainer({'a': 3}).get_param('b')
def test_param_must_be_string(self):
with self.assertRaises(ParamNameException):
ParamContainer({'a': 3}).get_param(0)
class ParamContainerTestSubstitutions(unittest.TestCase):
# Type to clarify seed_params
TestParamEntry = collections.namedtuple('TestParamEntry', ['name', 'seed_value', 'eval_value'])
# Test data
seed_params_list = (
TestParamEntry('alpha', 2, 2),
TestParamEntry('beta', 5, 5),
TestParamEntry('delta', '#expr( #alpha#*#beta# )expr#', 10),
TestParamEntry('epsilon', 'alpha#beta', 'alpha#beta'), # Single hash -> no substitution
TestParamEntry('gamma', [10, 20, 33, 15], [10, 20, 33, 15]),
TestParamEntry('gamma_prime', '#expr( #gamma# )expr#', [10, 20, 33, 15]),
TestParamEntry('gamma_second', '#expr( list(#gamma#) )expr#', [10, 20, 33, 15]),
TestParamEntry('age', {'Alice': 17, 'Bob': 20, 'Chloe': 21}, {'Alice': 17, 'Bob': 20, 'Chloe': 21}),
TestParamEntry('age_prime', '#expr( #age# )expr#', {'Alice': 17, 'Bob': 20, 'Chloe': 21}),
TestParamEntry('age_second', '#expr( dict(#age#) )expr#', {'Alice': 17, 'Bob': 20, 'Chloe': 21}),
TestParamEntry('csv', '[123,456,789]', '[123,456,789]'),
TestParamEntry('csv_prime', '#expr( #csv# )expr#', '[123,456,789]'),
TestParamEntry('listref', '#expr( eval(#csv#) )expr#', [123, 456, 789]),
TestParamEntry('null', None, None),
TestParamEntry('ref_null', '#null#', None),
TestParamEntry('ref2_null', '#expr( #null# )expr#', None),
TestParamEntry('ref3_null', '#alpha##null##beta#', '2None5'),
)
seed_params_dict = {p.name: p.seed_value for p in seed_params_list}
def setUp(self):
self.params = ParamContainer(self.seed_params_dict)
def assertSubstitution(self, param_string, expected_value, msg):
"""Helper method to execute the substitution and check the result"""
value = self.params.substitute_string(param_string)
self.assertEqual(value, expected_value, msg)
def test_values(self):
for p in self.seed_params_list:
self.assertEqual(self.params.get_param(p.name), p.eval_value, p.name + " can be retrieved")
def test_numbers(self):
self.assertSubstitution(
'#alpha# and another: #beta# and again one: #alpha# and the other: #beta# . Their product: #delta#',
'2 and another: 5 and again one: 2 and the other: 5 . Their product: 10',
'Scalar substitutions'
)
def test_lists(self):
self.assertSubstitution(
'#gamma#',
[10, 20, 33, 15],
'gamma not stringified'
)
self.assertSubstitution(
'#expr( #gamma# )expr#',
[10, 20, 33, 15],
'expr-gamma not stringified'
)
self.assertSubstitution(
'#expr( "~".join([str(_) for _ in sorted(#gamma#)]) )expr#',
'10~15~20~33',
'gamma stringification'
)
self.assertSubstitution(
'#expr( "~".join([str(_) for _ in sorted(#gamma_prime#)]) )expr#',
'10~15~20~33',
'gamma_prime stringification'
)
def test_dictionaries(self):
self.assertSubstitution(
'#age#',
{'Alice': 17, 'Bob': 20, 'Chloe': 21},
'age not stringified'
)
self.assertSubstitution(
'#expr( #age# )expr#',
{'Alice': 17, 'Bob': 20, 'Chloe': 21},
'age not stringified'
)
self.assertSubstitution(
'#expr( " and ".join(["{0} is {1} years old".format(p,a) for (p,a) in sorted(#age#.items())]) )expr#',
'Alice is 17 years old and Bob is 20 years old and Chloe is 21 years old',
'complex fold of age'
)
self.assertSubstitution(
'#expr( " and ".join(["{0} is {1} years old".format(p,a) for (p,a) in sorted(#age_prime#.items())]) )expr#',
'Alice is 17 years old and Bob is 20 years old and Chloe is 21 years old',
'complex fold of age_prime'
)
def test_maths_methods(self):
self.assertSubstitution(
'#expr( sum(#gamma#) )expr#',
78,
'sum(gamma)'
)
self.assertSubstitution(
'#expr( min(#gamma#) )expr#',
10,
'min(gamma)'
)
self.assertSubstitution(
'#expr( max(#gamma#) )expr#',
33,
'max(gamma)'
)
def test_indexes(self):
self.assertSubstitution(
'#expr( #age#["Alice"]+max(#gamma#)+#listref#[0] )expr#',
173,
'adding indexed and keyed values'
)
def test_param_modification(self):
# Force the substitution of these parameters
self.params.get_param('gamma')
self.params.get_param('gamma_prime')
self.params.get_param('gamma_second')
# Modify gamma
self.params.get_param('gamma').append("val0")
# Only gamma and gamma_prime should be modified
# because they are the same reference.
# gamma_second is a copy made before the edition
# so should still have the initial value.
self.assertEqual(
self.params.get_param('gamma'),
[10, 20, 33, 15, 'val0'],
'gamma'
)
self.assertEqual(
self.params.get_param('gamma_prime'),
[10, 20, 33, 15, 'val0'],
'gamma_prime'
)
self.assertEqual(
self.params.get_param('gamma_second'),
[10, 20, 33, 15],
'gamma_second'
)
|
import time
# use this program to test sending processes to the background
time.sleep(5)
print("done!")
|
import os
import matplotlib.pyplot as plt
import numpy as np
from MiscPyUtilities.pipeMAT import LoadMatFile2NumpyArray
from datetime import datetime
def PlotSaveHollowTri(CroSecFolder):
"""
plot cross section in a CroSecXX folder
@param CroSecFolder: str, path to a CroSecXX folder
@return:
"""
if CroSecFolder.endswith('/'):
CroSecFolder = CroSecFolder[:-1] # remove trailing '/'
# load profile
shift2centroidFile = os.path.join(CroSecFolder, 'genTriCros', 'shift2centroid.mat')
if not os.path.isfile(shift2centroidFile):
raise FileNotFoundError(shift2centroidFile + 'not exist!')
extract = LoadMatFile2NumpyArray(shift2centroidFile, ['Po', 'Pi'])
Po = extract['Po'] # vertices on external bound
Pi = extract['Pi'] # vertices on internal bound
# load mesh
coords_ien_bgpFile = os.path.join(CroSecFolder, 'tri_mesh', 'coords_ien_bgp.mat')
extract = LoadMatFile2NumpyArray(coords_ien_bgpFile, ['ien', 'coords'])
ien = extract['ien'] # index starts from 1
coords = extract['coords'] # x y coordinates of mesn nodes
# start plotting
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_aspect(aspect="equal")
# plot outer bound
x = Po[:, 0].tolist()
x.append(x[0])
y = Po[:, 1].tolist()
y.append(y[0])
ax.plot(x, y, 'g--', linewidth=1, label='outer profile')
for i in range(3):
ax.text(x[i], y[i], str(i), fontsize=12) # annotate node index
# plot inner bound
x = Pi[:, 0].tolist()
x.append(x[0])
y = Pi[:, 1].tolist()
y.append(y[0])
ax.plot(x, y, 'r--', linewidth=1, label='inner profile')
# plot triangular mesh
ax.triplot(coords[:, 0], coords[:, 1], ien - 1, linewidth=0.1)
ax.plot(0, 0, 'rs', markersize=6) # plot Origin of cross section
now = datetime.now()
current_time = now.strftime("%Y-%m-%d-%H-%M-%S") # year-month-day-hour-min-sec
pngFile = CroSecFolder.split('/')[-1] + '_' + current_time + '.png' # name of png file
pngFile = os.path.join(CroSecFolder, pngFile) # full path
fig.savefig(pngFile, dpi=300, transparent=True)
plt.close(fig)
return
if __name__=='__main__':
import sys
import os
try:
workdir = sys.argv[1] # path of parent folder of all CroSecXX
except:
workdir = os.getcwd()
if not os.path.isdir(workdir):
raise ValueError(workdir + ' is not a valid folders')
if workdir.endswith('/'):
workdir = workdir[:-1]
CroSecFolders = []
for (root, dirs, files) in os.walk(workdir):
if root == workdir:
for dir in dirs:
if dir.startswith('CroSec'):
CroSecFolders.append(dir)
if len(CroSecFolders) == 0:
raise RuntimeError('No CroSec folders in ' + workdir)
for CroSecFolder in CroSecFolders:
print('Plotting ' + CroSecFolder + '...')
PlotSaveHollowTri(CroSecFolder)
|
#import sys
#input = sys.stdin.readline
def main():
N = int( input())
ans = 0
d = list( map( int, input().split()))
for i in range(N):
for j in range(N):
if i == j:
continue
ans += d[i]*d[j]
print(ans//2)
if __name__ == '__main__':
main()
|
from new2 import Father
class Son(Father):
pass
s1=Son()
s1.display()
|
import face_recognition
from cv2 import cv2
import numpy as np
import glob
import os
import sys
import time
contador_circulo_click = 0
color_circulo_click = ''
# Función que
ix,iy = -1,-1
def save_face_click(event,x,y,flags,param):
global ix,iy,contador_circulo_click,color_circulo_click, known_face_encodings, known_face_names, listadoImagenes
if event == cv2.EVENT_LBUTTONDOWN:
ix,iy = x,y
contador_circulo_click = 5
color_circulo_click = (0,0,255)
for foto_desconocido in fotos_desconocidos:
if ix >= foto_desconocido[1]['left'] and ix <= foto_desconocido[1]['right'] \
and iy >= foto_desconocido[1]['top'] and iy <= foto_desconocido[1]['bottom']:
color_circulo_click = (0,255,0)
(video_x, video_y, video_w, video_h) = cv2.getWindowImageRect('Video')
nombreImagen = input("Ingrese el nombre de la imagen: ")
cv2.imwrite("img/"+nombreImagen+".jpg" , foto_desconocido[0])
known_face_encodings, known_face_names, listadoImagenes = cargaImagenes()
# Load pictures and learn how to recognize it. MEJORADO POR JUAN
def cargaImagenes():
listadoImagenes = glob.glob("img/*.*") #trae las rutas de cada imagen dentro de la carpeta img
known_face_encodings = []
known_face_names = []
for i in listadoImagenes:
imagen = face_recognition.load_image_file(str(i))# Carga la imagen como arreglo vectorial ?
try:
known_face_encodings=known_face_encodings+[face_recognition.face_encodings(imagen)[0]] #Busca las caras ?
known_face_names = known_face_names+[os.path.splitext(os.path.basename(str(i)))[0]] #Agrega el nombre del archivo a los nombres conocidos
except:
print ('La siguiente imagen no contiene un rostro reconocible:'+[os.path.splitext(os.path.basename(str(i)))[0]][0])
return known_face_encodings, known_face_names, listadoImagenes
known_face_encodings, known_face_names, listadoImagenes = cargaImagenes()
print('Personas cargadas con imágenes:')
print(known_face_names)
# Get a reference to webcam #0 0 es la cámara de la note, 2 es la camara USB en mi caso.
video_capture = cv2.VideoCapture(0)
#inicializamos frame grilla (pantalla negra para mostrar datos de los usuarios)
grilla = np.zeros((480,1366-640,3), np.uint8)
#Inicializamos el arreglo de ids(nombres) que están siendo captados por la cámara
ids_encontrados = []
#inicializo la lista de fotos con el nombre de cada persona encontrada
fotos_individuales = []
fotos_anteriores = []
nombres_fotos_individuales = []
fotos_desconocidos = []
#Bandera para hacer parpadear los colores de la lista
bandera_color = True
#Buscar que hacen --rev
roi=0
#flagCaptura = False
#saltear frames
busca = 0
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_frame = frame[:, :, ::-1]
if busca == 0:
# Find all the faces and face enqcodings in the frame of video
face_locations = face_recognition.face_locations(rgb_frame,0)
face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
if busca == 8:
busca = 0
else:
busca += 1
font = cv2.FONT_HERSHEY_DUPLEX #Fuente para las letras de los recuadros
#hay que sacar el otro? --rev
roi = 0
fotos_individuales = []
nombres_fotos_individuales = []
fotos_desconocidos = []
# Loop through each face in this frame of video
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
#top, right, bottom, left vienen del face location de cada cara
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Desconocida/o"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index] #asigna a name el nombre del archivo de la imagen con la que coinicidió
ids_encontrados.append(name) #agrega el nombre de coincidencia al arreglo de personas encontradas.
fotos_individuales.append([frame[top:bottom, left:right],name,{"top": top,"bottom": bottom,"left":left,"right":right}]) #agrego la foto de de la cara a un arreglo de caras encontradas
else:
fotos_desconocidos.append([frame[top:bottom, left:right],{"top": top,"bottom": bottom,"left":left,"right":right}]) #agrego la foto de de la cara a un arreglo de caras encontradas desconocidas
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# --rev
roi = frame[top:bottom, left:right]
# Draw a label with a name below the face
cv2.rectangle(frame, (left, top - 20), (right, top), (0, 0, 255), cv2.FILLED)
cv2.putText(frame, name, (left + 6, top - 6), font, 0.4, (104, 197, 219), 1)
for nombre in fotos_individuales:
nombres_fotos_individuales.append(nombre[1])
if fotos_anteriores:
for face in fotos_anteriores:
if face not in nombres_fotos_individuales:
cv2.destroyWindow(face)
fotos_anteriores = []
# Display the resulting image
cv2.putText(frame, 'CANTIDAD DE PERSONAS AUTORIZADAS: '+str(len(listadoImagenes)), (20, 20), font, 0.5, (255, 255, 255), 1)
#nos fijamos si se hizo click(que contador sea > 0 ) y mostramos el circulo durante x cantidad de framse
if contador_circulo_click > 0:
cv2.circle(frame, (ix,iy), 60, color_circulo_click, thickness=-1, lineType=8, shift=0)
contador_circulo_click -= 1
#Este for es para mostrar los nombres, lo reemplazo con los cuadritos.
cv2.imshow('Video', frame)
#Funcion para guardar la cara con un click
cv2.setMouseCallback('Video',save_face_click)
cv2.imshow('Video', frame)
#estas 2 lineas son para sacar la barra de la imagen
cv2.namedWindow('Video',cv2.WINDOW_NORMAL)
cv2.setWindowProperty('Video', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.moveWindow('Video', -35, -20)
datos_ventana_video = cv2.getWindowImageRect('Video') #This will return a tuple of (x, y, w, h)
#Esto es la grilla con fondo negro y datos
cv2.imshow('grilla',grilla)
#estas 2 lineas son para sacar la barra de la imagen
cv2.namedWindow('grilla',cv2.WINDOW_NORMAL)
cv2.setWindowProperty('grilla', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
#linea para mover la ventana y acomodarlas en la pantalla
cv2.moveWindow('grilla', datos_ventana_video[0]+datos_ventana_video[2], -20)
pos_y = 0 #inicia con la posicion en el borde superior
color = (0,0,208) # establece el color rojo de los rectńgulos
#Para cada nombre cargado en la carpeta de imágenes, vamos a dibujar un rectángulo, una línea divisora y el nombre de la persona
for name in known_face_names:
if ids_encontrados:
if name == ids_encontrados[-1]:
bandera_color = not bandera_color
if name in ids_encontrados:
if bandera_color:
color = (37,212,107)
else:
color = (255,30,100)
cv2.rectangle(grilla,(0,pos_y),(20,pos_y+20),color,-1)
pos_y = pos_y +20
pos_name = (30,pos_y)
cv2.putText(grilla, name, pos_name, cv2.FONT_HERSHEY_SIMPLEX, 0.8, (99,111,19), 0, cv2.LINE_AA)
cv2.line(grilla,(0,pos_y+1),(800,pos_y+1),(97,76,8))
pos_y = pos_y +2
color = (0,0,255)
ids_encontrados = []
Pos_x_ventanas_individuales = -35
for face in fotos_individuales:
cv2.imshow(face[1], cv2.imread('img/'+face[1]+'.jpg'))
#estas 2 lineas son para sacar la barra de la imagen
cv2.namedWindow(face[1],cv2.WINDOW_NORMAL)
cv2.setWindowProperty(face[1], cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
alto_ventanas_individuales = 768-480-20
Pos_x_ventanas_individuales += alto_ventanas_individuales
cv2.resizeWindow(face[1],alto_ventanas_individuales,alto_ventanas_individuales)#uso el mismo para que sea cuadrada
cv2.moveWindow(face[1], Pos_x_ventanas_individuales, datos_ventana_video[3])
fotos_anteriores.append(face[1])
k = cv2.waitKey(1)
if k == ord('s'):
if len(fotos_desconocidos) == 1:
(video_x, video_y, video_w, video_h) = cv2.getWindowImageRect('Video')
print(video_x, video_y, video_w, video_h)
print(roi)
'''
if not flagCaptura:
if ((top-20)<):
top = 0
if ((bottom+20)>
if ((left-20)
'''
nombreImagen = input("Ingrese el nombre de la imagen: ")
cv2.imwrite("img/"+nombreImagen+".jpg" , roi)
known_face_encodings, known_face_names, listadoImagenes = cargaImagenes()
flagCaptura= not flagCaptura
else:
cv2.putText(frame, 'Para realizar una carga solo debe aparecer una persona en el video', (50, 50), font, 0.8, (0, 0, 0), 2)
elif k== ord('q'):
break
elif k == ord('a'):
print(ix,iy)
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows() |
#stab at connect four
from os.path import realpath, join, dirname
import logging
import random
import itertools
log_file = realpath(join(dirname(__file__),"connect4.log"))
logging.basicConfig(filename=log_file, filemode="w+", level=logging.DEBUG)
log = logging.getLogger(__name__)
class Board(object):
def __init__(self, p0moves=[], p1moves=[], turn=1):
# if p0moves is None:
# p0moves = []
self.size = (6, 7)
self.p0_moves = p0moves[:]
self.p1_moves = p1moves[:]
self.turn = turn
@classmethod
def for_testing(cls, board_string, turn=1):
#convert string to p0 and p1 moves
p0 = []
p1 = []
for index, ele, in enumerate(board_string.split(',')):
for i, e in enumerate(ele):
if e == 'x':
p0.append([index, i])
elif e == 'o':
p1.append([index, i])
b = cls (p0, p1, turn)
return b
@property
def gameboard (self):
gameboard = [[' ']*self.size[1] for x in range(self.size[0])]
for r,c in self.p0_moves:
gameboard[r][c] = 'x'
for r,c in self.p1_moves:
gameboard[r][c] = 'o'
return gameboard
@property
#relative positions of neighbours of any cell relative to its own coordinate
def rela_pos (self):
rela_postns = list( itertools.permutations(range(-1,2),2) )
rela_postns.extend([(1,1),(-1,-1)])
return rela_postns
def check_boundary ( self, a_cell):
return a_cell[0]>0 and a_cell[0]<self.size[0] and a_cell[1]>0 and a_cell[1]<self.size[1]
def conxtn_one_dir (self, cell, dirtn, total):
#whether there is a same cell in a particular direction.e.g (-1,1)
new_cell = [ cell[0]+dirtn[0], cell[1]+dirtn[1] ]
if self.check_boundary(new_cell) and self.gameboard[new_cell[0]][new_cell[1]] == self.gameboard[cell[0]][cell[1]]:
# another way to do it: final = self.conxtn_one_dir(new_cell, dirtn, total + 1 )
# and then add else: final = total
# to explicitly save the value at that point in that nested functionO...finally else: return final
return self.conxtn_one_dir(new_cell, dirtn, total + 1 )
else:
return total
def cal_connections (self, cell):
if self.gameboard[cell[0]][cell[1]] == ' ':
return 0
else:
temp = []
#loop through all the neighbouring directions
for pos in self.rela_pos:
temp.append (self.conxtn_one_dir ( cell, pos, 0))
return max(temp)
@property
def connections (self):
connections = [[0]*self.size[1] for x in range(self.size[0])]
for r in range(self.size[0]):
for c in range(self.size[1]):
connections[r][c] = self.cal_connections( [r,c] )
return connections
def show (self):
for element in self.gameboard:
print '|'+'|'.join(element)+'|'
def move (self, move, turn=None ):
#move is tuple/list. return a brand new board
b = Board( self.p0_moves,self.p1_moves)
if turn != None:
b.turn = turn
elif b.turn == 1:
b.p0_moves.append(move)
elif b.turn == -1:
b.p1_moves.append(move)
# if check_winning( a_board ):
# print " you won !!! "
return b
def check_valid_move ( self, move):
#move is tuple
if self.gameboard[move[0]][move[1]] ==' ':
if move[0]==5 or self.gameboard[move[0]+1][move[1]] != ' ':
return True
#TODO: add occasion where input is not int type
else:
# print " invalid move. try again ^^ "
return False
else:
return False
def get_avail_moves ( self):
cells = []
for r in range( self.size[0] ):
for c in range( self.size[1] ):
if self.check_valid_move ( [r, c] ):
cells.append ( [r,c] )
return cells
def get_possible_boards ( self ):
boards = []
for cell in self.get_avail_moves ():
print cell
new_board = self.move ( cell )
print new_board.gameboard
new_board.turn = -1 * new_board.turn
boards.append( new_board )
log.info("===================================")
for b in boards:
log.info("possible board")
#print " possi board \n"
for element in b.gameboard:
# print str(element)[1:-1]
log.info( "{element}\n".format(element=element))
log.info( " ---------------")
log.info("===================================")
return boards
def check_winning (self ):
for element in self.connections:
#print "connections " + str(element)[1:-1]
if 3 in element:
return True
return False
def check_ending ( self ):
if self.check_winning():
print " YOU WON!! "
return True
else:
for element in self.gameboard:
for cell in element:
if cell == ' ':
return False
return True
def num_center ( self ):
if self.turn == -1:
#TODO: use list.count() to refactor this?
return len( [ x for x in self.p0_moves if x[1] == 3 ] )
elif self.turn == 1:
return len( [ x for x in self.p1_moves if x[1] == 3 ] )
def num_n_conxtn ( self, n ):
#cal number of 2s or 1s in the connections grid
#### turn means it is someone's turn for next move
### but we want to evaluate the move that is JUST MADE
### switch turns back
## if -1 then evalue 'x' 1 --> evaluate 'o'
if self.turn == -1:
v = 0
for x in self.p0_moves:
if self.cal_connections (x) == n:
v += 1
return v
elif self.turn == 1:
vl = 0
for y in self.p1_moves:
if self.cal_connections (y) == n:
vl += 1
return vl
def evaluate_board ( self ):
#evaluation result in tuples (9,5,3)
return ( self.check_winning(), self.num_n_conxtn(2), self.num_n_conxtn(1), self.num_center())
def play (board):
if board.turn == 1:
next_board = human_move(board)
else:
next_board = minimax ( board, 0 )
next_board.turn = -1 * board.turn
return next_board
def human_move ( a_board ):
#let human make the move
print "place your move."
print "enter row number 0 to 5. Bottom row is 0"
row = int(raw_input())
print"enter column number 0 to 6"
col = int(raw_input())
the_move = ((5-row),col)
if a_board.check_valid_move( the_move ):
new_board = a_board.move ( the_move )
else:
new_board = human_move ( a_board )
return new_board
def minimax ( a_board, depth ):
b = Board ( a_board.p0_moves, a_board.p1_moves )
if depth == 1:
# print "evaluation " , evaluate_board ( b)
return a_board.evaluate_board ()
if b.turn == -1:
value = (0,0,0) #TODO...modify accoridng to evalu func
elif b.turn == 1:
value = (1,9,9) #TODO> modify
for element in b.get_possible_boards ():
#print "posi board turn " , element.turn
#print "posi board" , element.gameboard
v = minimax ( element, depth + 1 )
if b.turn == -1 and v > value:
the_right_move = element
value = v
log.info("max value is {value}".format( value= value ))
# print " max value is ", value
elif b.turn == 1 and v < value:
the_right_move = element
value = v
log.info("min value is {value}".format( value=value ))
# print " min value is ", value
if b.turn == -1:
log.info("max value is {value}".format( value=value ))
elif b.turn == 1:
log.info("min value is {value}".format( value=value ))
if depth == 0:
return the_right_move
return value
def main():
b = Board()
s = ',,,nnnxnnn,nnoxxon,onoxoxn'
board = b.for_testing ( s, -1)
# board.p0_moves= [[3,4], [4,3], [4,5], [5,2], [5,4] ]
# board.p1_moves= [ [4,2], [4,4], [5,3], [5,5] ]
while True:
board.show()
if board.check_ending():
break
else:
board = play(board)
if __name__=="__main__":
main()
|
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
hidden_dim = 32
act_dim = 3
model = keras.Sequential()
model.add(keras.Input(shape=(3,)))
model.add(layers.Dense(hidden_dim, activation="relu"))
model.add(layers.Dense(hidden_dim, activation="relu"))
model.add(layers.Dense(hidden_dim, activation="relu"))
model.add(layers.Dense(act_dim, activation="tanh"))
#model.summary()
#input = tf.ones((1,3))
#y = model(input)
print(model.inputs.shape) |
# Machine Learning - Data Distribution
# Distribuição de dados
# No início deste tutorial trabalhamos com quantidades muito pequenas de dados em nossos exemplos,
# apenas para entender os diferentes conceitos.
# No mundo real, os conjuntos de dados são muito maiores,
# mas pode ser difícil coletar dados do mundo real, pelo menos em um estágio inicial de um projeto.
# Como podemos obter conjuntos de big data?
# Para criar conjuntos de big data para testes, usamos o módulo Python NumPy,
# que vem com uma série de métodos para criar conjuntos de dados aleatórios, de qualquer tamanho.
# Exemplo
# Crie uma matriz contendo 250 carros alegóricos aleatórios entre 0 e 5:
import numpy
x = numpy.random.uniform(0.0, 5.0, 250)
print(x)
|
import csv
import sys
class Room:
def __init__(self, row):
self.data = self._trim(row)
def name(self):
return self.data[0]
def description(self):
return self.data[1]
def has_lock(self):
return '|' in self.data[2] and self.data[2].startswith('lock')
def has_item(self):
return self.data[2].startswith('key')
def get_item(self):
if self.has_item():
return self.data[2]
def is_locking(self, direction, player):
lock_name, di = self.data[2].split('|')
if di == direction:
if player.has_key_for_lock(lock_name):
print("You unlocked the lock with a key")
return False
else:
return True
return False
def parse_command(self, cmd, player):
c0 = cmd.split(' ')[0]
# Handle actions
if c0 == 'take' or c0 == 'pickup' or c0 == 'collect':
if self.has_item():
player.add_to_inventory(self.get_item())
print("You picked up the key")
return ''
else:
return None
# Check locks for exits
if self.has_lock() and self.is_locking(cmd, player):
print("That direction is locked")
return ''
# Moving to next room
for cmd_state_idx in range(3, len(self.data)):
cmd_state = self.data[cmd_state_idx]
c, room_name = cmd_state.split('|')
if c == cmd:
return room_name
return None
def is_exit(self):
return self.name() == 'E'
@staticmethod
def _trim(row):
last_good = len(row)-1
for i in range(last_good, -1, -1):
if row[i] != "":
last_good = i
break
return row[:last_good+1]
class Player:
def __init__(self):
self.inventory = {}
def add_to_inventory(self, item):
self.inventory[item] = True
def has_key_for_lock(self, lock_name):
key_name = lock_name.replace('lock', 'key')
return key_name in self.inventory
class Dungeon:
def __init__(self, file):
self.rooms = []
self.player = Player()
with open(file, 'r') as f:
reader = csv.reader(f)
for row in reader:
if row[0] != 'Name':
self.rooms.append(Room(row))
self.current_room = self.rooms[0]
def run(self):
while True:
# First print current state description
print(self.current_room.description())
if self.current_room.is_exit():
break
# Next take input
cmd = input('> ')
next_room_name = self.current_room.parse_command(cmd, self.player)
if next_room_name is None:
print("Don't know what you talkin' about")
else:
next_room = self.locate_room(next_room_name)
if next_room is not None:
self.current_room = next_room
def locate_room(self, room_name):
for room in self.rooms:
if room.name() == room_name:
return room
return None
def main():
dungeon = Dungeon(sys.argv[1])
dungeon.run()
if __name__ == "__main__":
main()
|
from rest_framework.routers import DefaultRouter
from .views import UserRegistrationView ,OrganisationUserLoginView
from django.urls import path
router = DefaultRouter(trailing_slash=False)
router.register(r'register', UserRegistrationView, basename='user_register')
router.register(r'login', OrganisationUserLoginView, basename='org_user_login')
urlpatterns = [
*router.urls
]
|
import datetime
MILISECODS = 1e3
# JavaScript has timestamp in miliseconds
# you have to divide by 1e3 = 1000
timestamp = 1331856000000
datetime.datetime.fromtimestamp(timestamp / MILISECODS)
# datetime.datetime(2012, 3, 16, 1, 0)
timestamp = 1331856000000
datetime.datetime.fromtimestamp(timestamp)
|
import u12
from time import sleep
from time import time
import datetime
import csv
import numpy as np
import sys
#path has to be adjusted unique to each machine
#change to make look in current directory?
sys.path.insert(0, '/home/albert/Documents/Albert Work/Scripts')
from thermocouple import temperature_read
from pressure import pressure_read
import matplotlib.pyplot as plt
import pylab as pylab
def write_to_csv(row, name, read):
''' data order=[times,amps,volts,power,temps, pressures]'''
file_temp = open(name,read)
with file_temp as temp_csv:
temp_writer=csv.writer(temp_csv)
temp_writer.writerow(row)
# temp_writer.writerow([str(times),str(amps),str(volts),str(power),str(temps), str(pressures)])
file_temp.close()
return
d=u12.U12()
now=datetime.datetime.now()
time_stamp=str(now.year) +'-'+ str(now.month) +'-'+ str(now.day) +'-'+ str(now.hour)
iter= 2
time_length= 5000
temps=["Temps"]
times=["Times"]
amps=["Amps"]
volts=["Volts"]
power=["Power"]
pressures=["Pressures"]
data=[times,amps,volts,power,temps, pressures]
file_new=input("Append File or Create New: 2=New/Rewrite 1=New w/ Descriptor, 0=Append -> ")
if file_new==1:
descriptor=input("File Descriptor: (text must have quotes) ")
file_name='power_temp-' + time_stamp + "_" + str(descriptor)
read_write='a'
power_temp = open(file_name,read_write)
elif file_new==0:
file_name='power_temp-' + time_stamp
read_write='a'
else:
file_name='power_temp-' + time_stamp
read_write='w'
power_temp = open(file_name,read_write)
write_to_csv(data, file_name, read_write)
amperage=input("Initial amps: ")
voltage=input("Initial volts: ")
initial=time()
k=0
for i in range(time_length/iter):
meas_temp=temperature_read(d)
temps.append(meas_temp)
meas_pressure=pressure_read(d, time=0)
pressures.append(meas_pressure)
current_t=time()
actual=current_t-initial
times.append(actual)
amps.append(amperage)
volts.append(voltage)
calc_power=amperage*voltage
power.append(calc_power)
data_row=[actual,amperage, voltage, calc_power, meas_temp, meas_pressure]
write_to_csv(data_row,file_name, read_write)
if(k%10==0):
print("Running Temp: " + str(meas_temp) + " deg C")
k+=1
try:
sleep(iter)
except KeyboardInterrupt:
continue_check = input("\n Continue? 1=YES, 0=NO -> ")
if continue_check:
amperage=input("New Amps: ")
voltage=input("New Voltage: ")
else:
break
plot_choice=input("Plot Desired: 0= Temp-Power, 1=Temp-Time 2=Pressures-Time, 3=Skip Plot Generation-> ")
if plot_choice==1:
plt.plot(times,temps)
plt.xlabel('Times in Seconds')
plt.ylabel('Temperature in deg. C')
plt.tight_layout()
pylab.show()
elif plot_choice==0:
plt.plot(power,temps)
plt.xlabel('Power in Watts')
plt.ylabel('Temperature in deg. C')
plt.tight_layout()
pylab.show()
elif plot_choice==2:
plt.plot(times,pressures)
plt.xlabel('Time in Seconds')
plt.ylabel('Pressures in Torr')
plt.tight_layout()
pylab.show()
else:
print("No Plot")
#convert csv data into column mode
# data_rows=[]
# data=[times,amps,volts,power,temps, pressures]
# for i in range(len(times)):
# temp=[]
# for j in data:
# temp.append(j[i])
# data_rows.append(temp) |
#!/usr/bin/env python
from __future__ import with_statement
import sys
from setuptools import setup, find_packages
long_description = ""
setup(
name='ifilter',
version="0.2",
description='ifilter is a command line tool for interactive filtering of pipes.',
long_description=long_description,
author='Stefan Hudelmaier',
author_email='hudelmaier@gmail.com',
url='https://github.com/stefan-hudelmaier/ifilter',
download_url='https://github.com/stefan-hudelmaier/ifilter/tarball/0.2',
keywords=["shell", "pipe", "filter", "interactive"],
packages=find_packages(),
install_requires=[],
entry_points={
'console_scripts': [
'ifilter = ifilter:main',
]
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: Unix',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: System :: Systems Administration'
],
)
|
#!/usr/bin/env python3
"""
TODO:
+ Detectar que no existe en el path
+ Generar uno vacio
- Test default path on windows
- Test default path on mac
- Getting a missing profile
- Any attribute is null
"""
import os
from .namespace import namespace
from .consolemsg import error
# Kludge in order to use FileNotFoundError in Python2
try: FileNotFoundError
except NameError:
FileNotFoundError=IOError
class BadProfile(Exception) : pass
class MissingValue(Exception) : pass
_mandatoryKeys = 'dbname user pwd'.split()
def defaultConfigDbFile() :
import appdirs
return os.path.join(
appdirs.user_config_dir(
appname='configdb',
appauthor='somenergia',
version='1.0',
),
'configdb.yaml',
)
def generateDefault(configfile, required=_mandatoryKeys) :
container = os.path.dirname(configfile)
if container and not os.access(container, os.R_OK) :
os.makedirs(os.path.dirname(configfile))
data=namespace(
default=namespace(
(key,None) for key in required
))
data.dump(configfile)
return data
def configdb(configfile=None, profile=None, required=_mandatoryKeys ):
profile = profile or os.environ.get('CONFIGDB_PROFILE', 'default')
configfile = configfile or defaultConfigDbFile()
try :
data = namespace.load(configfile)
except FileNotFoundError:
error(
"Database configuration file not available, "
"generating a default one at '{}'"
.format(configfile))
data = generateDefault(configfile, required)
try:
result = data[profile]
except KeyError:
message = ("Database profile '{}' not availabe in '{}'"
.format(profile, configfile) + (", try with: "+
(", ".join(sorted(data.keys()))) if data else ""))
error(message)
raise BadProfile(message)
for key in required:
if key not in result or not result[key] :
raise MissingValue(key)
return result
|
al1 = {'nome':'Isabela','nota':4}
al2 = {'nome':'Ricardo','nota':10}
al3 = {'nome':'Fernanda','nota':9.5}
lista = [al1, al2, al3]
for elemento in lista:
print(elemento['nome'],elemento['nota'])
|
#!/usr/local/bin/python3
class Board:
def __init__(self):
self.board = [[Space() for x in range(3)] for y in range(3)]
def get_space(self, x, y):
return self.board[x][y]
def fill_board(self, x ,y, player):
return self.board[x][y].set_space(player)
def print_board(self):
for column in self.board:
print('\n')
for item in column:
print(item, end='')
print('\n')
class Space:
def __init__(self, space='#'):
self.space = space
def set_space(self, space):
self.space = space
def __str__(self):
return self.space
test = Board()
test.print_board()
|
#!/usr/bin/env python
"""
Pose server that uses a separate server to access journal data
that way journal data does not need to be reloaded every time server restarts
does make startup more complex:
separate tab:
cd /c/moments/moments
python journal_server.py /c/journal
python application-split.py
"""
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
import sys, os, re, codecs
import urllib.request, urllib.parse, urllib.error, urllib.request, urllib.error, urllib.parse
#redefine standard python range:
pyrange = range
from bottle import static_file
from bottle import get, post, request
from bottle import route, run
from bottle import template
#DO NOT USE THIS IN PRODUCTION!!
import bottle
bottle.debug(True)
server_root = os.path.dirname(os.path.realpath(__file__))
#print "Server root: %s" % server_root
#default is "./views/" directory
template_path = os.path.join(server_root, 'templates')
#bottle.TEMPLATE_PATH.append('./templates/')
bottle.TEMPLATE_PATH.append(template_path)
try:
import simplejson as json
except:
try:
import json
except:
print("No json module found")
exit()
from moments.log import Log
from moments.path import Path
from moments.tag import Tags
from moments.association import Association
from moments.journal import Journal
from moments.timestamp import Timerange
from moments.journal import RemoteJournal
from moments.launch import edit, file_browse
from mindstream.cloud import Cloud
#from moments.mindstream import Mindstream
server = bottle.Bottle()
# GLOBALS:
#this is equivalent to main() function in template_script.py
#requires that at least one argument is passed in to the script itself
#(through sys.argv)
ignores = []
port = 8088
path_root = '/c/moments/tests/'
path_root = "/c/binaries/journal/2010/"
path_root = "/c/"
path_root = "/"
if len(sys.argv) > 1:
helps = ['--help', 'help', '-h']
for i in helps:
if i in sys.argv:
print("python application.py [directory to load]")
exit()
ports = ['--port', '-p']
for p in ports:
if p in sys.argv:
i = sys.argv.index(p)
sys.argv.pop(i)
port = sys.argv.pop(i)
proots = ['--root', '-r', '-c', '--context']
for p in proots:
if p in sys.argv:
i = sys.argv.index(p)
sys.argv.pop(i)
path_root = sys.argv.pop(i)
if len(sys.argv) > 1:
look_in = sys.argv[1]
print("Look in: %s" % look_in)
else:
look_in = 'http://localhost:8000'
print("Path root: %s" % path_root)
#should be able to call these directly if desired
@server.route('/search/data/:key')
@server.route('/search/data/:key/')
def search_data(key):
global look_in
j = RemoteJournal(look_in)
entries = j.search(key, data=True)
results = []
for e in entries:
results.append(e.as_dict())
return { 'matches' : results }
@server.route('/search/:key/:limit/')
@server.route('/search/:key/:limit')
@server.route('/search/:key/')
@server.route('/search/:key')
@server.route('/search/')
@server.route('/search')
def search(key=None, limit=20):
global look_in
j = RemoteJournal(look_in)
if key is None:
key = request.GET.get('term')
#print key
tags = j.search(key, limit=limit)
#return { 'matches' : tags }
return json.dumps(tags)
# ROUTES
#Be careful when specifying a relative root-path such as root='./static/files'.
#The working directory (./) and the project directory are not always the same.
#@route('/css/:filename')
@server.route('/css/:filename#.+#')
#@route('/css/style.css')
def css_static(filename):
css_path = os.path.join(server_root, 'css')
print(css_path)
#return static_file(filename, root='./css')
return static_file(filename, root=css_path)
@server.route('/js/:filename#.+#')
def js_static(filename):
js_path = os.path.join(server_root, 'js')
return static_file(filename, root=js_path)
@server.route('/images/:filename#.+#')
def images_static(filename):
image_path = os.path.join(server_root, 'images')
return static_file(filename, root=image_path)
## @server.route('/js/:filename')
## def js_static(filename):
## return static_file(filename, root='./js')
## @server.route('/css/:filename')
## def css_static(filename):
## return static_file(filename, root='./css')
## @server.route('/images/:filename')
## def images_static(filename):
## return static_file(filename, root='./images')
@server.route('/m3u/:name')
@server.post('/m3u/')
def m3u(name='world'):
global look_in
j = RemoteJournal(look_in)
global path_root
if name == "world" or name == '' or name is None:
name = request.forms.get('tag')
redirect('/tagged/%s' % name)
entries = j.tag(name)
m3u = "#EXTM3U\r\n"
for e in entries:
first_line = e.data.splitlines()[0]
title = os.path.basename(first_line)
m3u += "#EXTINF: ,0 - %s\r\n" % (title)
prefix = "/media/disk/"
full_path = os.path.join(prefix, first_line)
if not os.path.exists(full_path):
print("Couldn't find: %s" % full_path)
m3u += full_path + '\n'
return m3u
@server.route('/tagged/:name')
@server.post('/tagged/')
def tagged(name='world'):
global look_in
j = RemoteJournal(look_in)
global path_root
if name == "world" or name == '' or name is None:
name = request.forms.get('tag')
redirect('/tagged/%s' % name)
#print "NAME: %s" % name
#entries = j.entries_tagged(name)
entries = j.tag(name)
#print entries
#print len(entries)
#*2011.06.16 11:06:01
#TODO:
#related should be included here, at the top...
#easy to scroll by
#and if there are too many related,
#that's probably a bigger problem (cluttered space)
related = j.related(name)
#return template('related', tags=related, name=name)
for e in entries:
#if hasattr(e, "path"):
e.path = Path(e.path, relative_prefix=path_root)
j = Journal()
j.update_many(entries)
j.sort('reverse-chronological')
entries = j.entries()
#entries.reverse()
return template('entries', entries=entries, tags=related, name=name)
@server.route('/timeline')
def timeline():
"""
display a navigatable timeline for all of the times involved
"""
global look_in
j = RemoteJournal(look_in)
j.sort('chronological')
#caching locally ends up taking longer, surprisingly enough
#entries = j.entries()
#local_j = Journal()
#local_j.update_many(entries)
local_j = j
counter = 0
first = local_j.entry(counter)
while isinstance(first.created, str) or isinstance(first.created, str):
counter += 1
first = local_j.entry(counter)
started_at = counter
counter = -1
last = local_j.entry(counter)
while isinstance(last.created, str) or isinstance(last.created, str):
counter -= 1
last = local_j.entry(counter)
last = local_j.entry(-1)
body = ''
for year in pyrange(first.created.year, last.created.year+1):
year_range = Timerange(str(year))
#this is expensive to do on a remote Journal
#currently (*2011.12.31 16:10:43) takes a minute and a half to run
#going to try loading a local copy of journal in memory
#and see if that improves the situation
#also 2011.12.31 16:18:58
#caching the journal locally to this function actually takes longer
#(2+ minutes)
#it's an expensive operation
entries = local_j.range(year_range.start, year_range.end)
year_j = Journal()
year_j.update_many(entries)
next_year = year + 1
body += '<p><a href="/range/%s/%s">%s</a> (%s entries)<br>' % (year, next_year, year, len(entries))
for month in pyrange(1, 13):
compact = "%s%02d" % (year, month)
month_range = Timerange(compact)
m_entries = year_j.range(month_range.start, month_range.end)
body += '<a href="/range/%s/%s">%s</a> (%s entries) - ' % (compact, month_range.end.compact(), month, len(m_entries))
body += "</p>"
return template('site', body=body, title="timeline")
@server.route('/range/:start/:end/')
@server.route('/range/:start/:end')
@server.route('/range/:start/')
@server.route('/range/:start')
@server.route('/range/')
@server.route('/range')
def range(start=None, end=None):
global look_in
j = RemoteJournal(look_in)
if start:
entries = j.range(start, end)
ignores = []
range_j = Journal()
range_j.update_many(entries)
tags = range_j.tags()
cloud = Cloud(tags, ignores=ignores)
cloud.make()
body = ''
body += "<p>%s total tags" % len(list(tags.keys()))
body += " --- %s total entries</p>" % len(entries)
body += cloud.render("/tagged/%s")
for entry in entries:
entry.path = Path(entry.path)
body += template('entry', entry=entry)
return template('site', body=body, title=start)
## results = []
## for e in entries:
## results.append(e.as_dict())
## return { 'entries' : results }
else:
return str(j.range())
@server.route('/clouds')
def clouds():
"""
look up all available clouds and provide links to them
this is for pre-defined clouds stored in a file
and not really clouds as much as ordered tag lists?
closer to a medley.collection.cluster object?
clouds imply weighted sizes for tags
"""
global path_root
cloud_file = os.path.join(path_root, 'clouds.txt')
if not os.path.exists(cloud_file):
print("couldn't find cloud file: %s" % cloud_file)
exit()
clouds = Journal(cloud_file)
body = ''
tags = list(clouds.tags().keys())
#body = str(tags)
tags.sort()
for name in tags:
tags_found = clouds.tags(name)[0].data.split()
body += '<p><a href="/cloud/%s">%s</a> <a href="/cloud/ignore/%s">(ignore)</a> [%s tags in cloud]</p>' % (name, name, name, len(tags_found))
return template('site', body=body, title="clouds")
@server.route('/cloud/ignore/:name#.+#')
@server.route('/cloud/ignore/')
@server.route('/cloud/ignore')
def ignore_cloud(name="ignores"):
return cloud(name=name, ignore_cloud=True, preserve_order=False)
@server.route('/cloud/:name#.+#')
@server.route('/cloud/')
@server.route('/cloud')
def cloud(name='world', ignore_cloud=False, preserve_order=True):
"""
ignore_cloud will toggle whethere the supplied name cloud
contains only tags for use
or only tags to be ignored.
"""
global look_in
j = RemoteJournal(look_in)
global path_root
global ignores
all_tags = j.tags()
if name != 'world':
#look for a cloud with the name we were passed
#cloud file should be passed in
#or local 'clouds.txt'
cloud_file = os.path.join(path_root, 'clouds.txt')
if not os.path.exists(cloud_file):
print("couldn't find cloud file: %s" % cloud_file)
exit()
clouds = Journal(cloud_file)
tags = Association()
order = []
if clouds.tag(name):
tags_found = clouds.tags(name)[0].data.split()
if ignore_cloud:
#this should ignore any tags found in the loaded list
ignore_tags = tags_found
for t in list(all_tags.keys()):
if not t in ignore_tags:
tags[t] = all_tags[t]
order.append(t)
else:
#this assumes the tags in clouds are what we want
for t in tags_found:
if t in all_tags:
tags[t] = all_tags[t]
order.append(t)
else:
tags = all_tags
order = list(all_tags.keys())
order.sort()
#return template('site', body=str(tags))
if preserve_order:
cloud = Cloud(tags, ignores=ignores, ordered_list=order)
else:
cloud = Cloud(tags, ignores=ignores)
cloud.make()
body = ''
body += "<p>%s total tags</p>" % len(list(tags.keys()))
body += cloud.render("/tagged/%s")
title = "%s - cloud" % name
return template('site', body=body, title=title)
#cloud2 = Cloud(j.tags, ignores=['python'])
#cloud2.make_logarithmic()
#body += "<h1>Log</h1>"
#body += cloud2.render()
@server.route('/reload')
def reload():
global look_in
j = RemoteJournal(look_in)
j.reload()
redirect('/')
## @server.route('/path/launch/:timestamp#.+#')
## def launch_time(timestamp):
## """
## rather than launch a path
## look up the moments at a given timestamp
## and determine the path from the moment.path attribute
## this avoids needing to determine the root for a relative path
## also [2012.01.01 10:00:14]
## unfortunately, not all entries have a timestamp
## so this becomes very difficult for that specific case...
## back to paths, but just not relative
## """
## global look_in
## j = RemoteJournal(look_in)
## entries = j.range(timestamp, timestamp)
## response = ''
## for e in entries:
## path = e.path
## if path.type() == "Log":
## edit(path)
## response += "editing: %s" % path
## elif path.type() == "Directory":
## file_browse(path)
## response += "browsing: %s" % path
## else:
## response += "unknown type: %s for: %s" % (path.type(), path)
## response += "LAUNCH STAMP: %s" % timestamp
## return response
@server.route('/path/launch/:source#.+#')
def launch_path(source=''):
global path_root
path = Path(path_root + source, relative_prefix=path_root)
#just assume the whole thing has been sent
#path = Path(source)
response = ''
if path.type() == "Log":
edit(path)
response += "editing: %s<br>" % path
elif path.type() == "Directory":
file_browse(path)
response += "browsing: %s<br>" % path
else:
response += "unknown type: %s for: %s<br>" % (path.type(), path)
response += "LAUNCH PATH: %s<br>" % source
return response
## @post('/path/edit/:relative#.+#')
## def rename_path(relative=''):
## #if using get, will need to update the regular expression...
## #relative eats up any get form parameters in the URL
## #post works though
## tags_string = request.forms.get('tags')
## #return "EDIT PATH: %s" % (tags_string)
## global path_root
## path = Path(path_root + relative, relative_prefix=path_root)
## tags = Tags().from_spaced_string(tags_string)
## new_path = Path(path_root + relative, relative_prefix=path_root)
## new_path.name = tags.to_tag_string()
## if new_path.name != path.name:
## path.rename(new_path)
## return "RENAMED PATH: from: %s, to: %s" % (path, new_path)
## else:
## return "SAME NAMES: from: %s, to: %s" % (path, new_path)
## @server.route('/path/dupe/:relative#.+#')
## def dupe_path(relative=''):
## global path_root
## path = Path(path_root + relative, relative_prefix=path_root)
## name = path.name
## name_tags = path.to_tags(include_parent=False)
## name_tags.insert(1, "2")
## path.name = name_tags.to_tag_string()
## #name = name + "-2"
## #path.name = name
## if not path.exists():
## path.create()
## return template('directory_summary', path=path, admin=True)
## #return "DUPE PATH: %s" % path
#to serve files in subdirectories, loosen the wildcard as follows
#@route('/static/:path#.+#')
#def server_static(path):
# return static_file(path, root='/path/to/your/static/files')
#to force a download, use the following:
# return static_file(filename, root='/path/to/static/files', download=filename)
@server.route('/image/:relative#.+#')
def image(relative=''):
"""
this is redundant with path
"""
global path_root
#if not re.match('/', relative):
# relative = os.path.join(path_root, relative)
print("SHOWING IMAGE: %s" % relative)
path = Path(relative, relative_prefix=path_root)
if path.type() == "Image":
return static_file(relative, root=path_root)
else:
#TODO: raise 404
pass
def load_groups(full_source):
"""
allows for custom editing of json files if needed
"""
groups = []
if not os.path.exists(full_source):
#to get original version started
#collections.scenes should have been loaded already
#and star_order calculated
raise ValueError("No order file: %s" % full_source)
#comment this out if you want to initialize a list from scratch:
#groups = [ self.scenes.star_order, [], [], [], [], [], [], [], [], [], [], ]
else:
#destination = "order.txt"
json_file = codecs.open(full_source, 'r', encoding='utf-8', errors='ignore')
lines = json_file.readlines()
#split up the object so it is easier to edit
split = ''
for line in lines:
line = line.replace(',]', ']')
line = line.replace(', ]', ']')
split += line.strip() + ' '
#split = json_file.read()
#split.replace('\r\n', '')
#split.replace('\r', '')
#split.replace('\n', '')
#print split
try:
groups = json.loads(split)
except:
#try to pinpoint where the error is occurring:
print(split)
#get rid of outer list:
split = split[1:-1]
parts = split.split('], ')
assert len(parts) == 11
count = 0
for p in parts:
p = p + ']'
try:
group = json.loads(p)
except:
new_p = p[1:-1]
tags = new_p.split('", "')
summary = ''
for tag in tags:
summary += tag + "\n"
#print count
#print summary
print("%s - %s" % (count, summary))
#raise ValueError, "Trouble loading JSON in part %s: %s" % (count, p)
raise ValueError("Trouble loading JSON in part %s: %s" % (count, summary))
count += 1
#raise ValueError, "Trouble loading JSON: %s" % split
json_file.close()
#groups = load_json(destination)
return groups
def save_groups(destination, ordered_list):
"""
similar to save json, but custom formatting to make editing easier
to load, use collection.load_groups
"""
#print "Saving: %s" % ordered_list
#print "To: %s" % destination
#journal = merge_simple(ordered_list, cloud_file)
json_file = codecs.open(destination, 'w', encoding='utf-8', errors='ignore')
#print "JSON FILE OPEN"
split = json.dumps(ordered_list)
split = split.replace('], ', ', ], \n')
split = split.replace(']]', ', ]]')
#print "Split version: %s" % split
json_file.write(split)
json_file.close()
@server.post('/save_tabs/:relative#.+#')
@server.post('/save_tabs/')
@server.post('/save_tabs')
def save_tabs(relative=''):
global path_root
if re.match('~', relative):
relative = os.path.expanduser(relative)
if not relative:
#could set a default here if it is desireable
print("NO DESTINATION SENT!")
elif not re.match('/', relative):
relative = path_root + relative
#destination = Path(relative, relative_prefix=path_root)
destination = relative
#print destination
#debug:
#print dir(request.forms)
#print "Keys: %s" % (request.forms.keys())
#print "Values: %s" % (request.forms.values())
#gets a string
cloud = request.forms.get('cloud')
#gets a list
#cloud = request.forms.getlist('cloud[]')
#print cloud
ordered_list = json.loads(cloud)
#print ordered_list
#save_json(destination, ordered_list)
save_groups(destination, ordered_list)
#d = open(destination, 'w')
#d.write(' '.join(ordered_list))
#return "Name: %s, Password: %s" % (name, password)
return "Success!"
@server.route('/sort/:relative#.+#')
def sort(relative=''):
"""
accept a path to a moment log and enable sorting on the items
using jquery ui for a drag and drop interface
"""
global path_root
if re.match('~', relative):
relative = os.path.expanduser(relative)
if not re.match('/', relative):
relative = path_root + relative
#set some defaults here...
#if they've been changed, this will get over written on load
groups = { "all":[], "edit":[], "slide1":[], "slide2":[], "slide3":[], "slide4":[], "slide5":[], "slide6":[], "slide7":[], "slide8":[], "slide9":[], }
tab_order = ['all', 'edit', "slide1", "slide2", "slide3", "slide4", "slide5", "slide6", "slide7", "slide8", "slide9"]
path = Path(relative, relative_prefix=path_root)
print(path)
if path.exists() and path.type() == "Directory":
response = "Error: need a file name to store the meta data in<br>"
response = "You supplied a directory path: %s<br>" % path
return response
else:
parent_directory = path.parent()
if path.extension == ".txt":
#create a text journal if we don't have one
if not path.exists():
#convert images to journal
#print "PARENT: %s" % parent_directory
directory = parent_directory.load()
#print "directory: %s, of type: %s" % (directory, type(directory))
directory.create_journal(journal=path.filename)
#journal = path.load_journal(create=True)
journal = path.load_journal()
items = []
for e in journal.entries():
new_p = os.path.join(str(parent_directory), e.data.strip())
#print new_p
p = Path(new_p)
#print p.exists()
items.append(p)
#initial version of groups:
destination = Path(relative)
destination.extension = '.json'
groups['all'] = items
elif path.extension == ".json":
#we can make the initial version here...
#skip the generation of a moments log step
if not path.exists():
directory = parent_directory.load()
#print "directory: %s, of type: %s" % (directory, type(directory))
directory.sort_by_date()
directory.scan_filetypes()
groups['all'] = directory.images
else:
loaded = load_groups(str(path))
#template expects all items in groups to be Path objects.
#do that now
groups = {}
for key, value in list(loaded.items()):
groups[key] = []
for v in value:
groups[key].append(Path(v))
destination = Path(relative)
else:
#dunno!
print("UNKNOWN FILE TYPE: %s" % relative)
groups = {}
destination = None
#clean up tab_order as needed
for key in list(groups.keys()):
if not key in tab_order:
tab_order.append(key)
for item in tab_order[:]:
if item not in list(groups.keys()):
tab_order.remove(item)
print(tab_order)
#return template('sort', path=path, items=items)
return template('sort', path=path, groups=groups, destination=destination, tab_order=tab_order)
@server.route('/series/:type/:relative#.+#')
@server.route('/series/:relative#.+#')
@server.route('/series/')
@server.route('/series')
def series(type="Image", relative=''):
"""
show the current item in a series
along with links to previous and next
"""
global path_root
if re.match('~', relative):
relative = os.path.expanduser(relative)
if not re.match('/', relative):
relative = os.path.join(path_root, relative)
path = Path(relative, relative_prefix=path_root)
if path.type() != "Directory":
parent = path.parent()
parent_dir = parent.load()
#parent_dir.sort_by_date()
parent_dir.sort_by_path()
parent_dir.scan_filetypes()
if path.type() == "Image":
count = 0
position = None
for i in parent_dir.images:
if str(i) == str(path):
position = count
break
count += 1
if position is None:
raise ValueError("Couldn't find matching image in directory: %s" % str(parent))
else:
if position != 0:
prev_pos = position-1
else:
prev_pos = 0
previous = parent_dir.images[prev_pos]
nexts = []
next_len = 5
end = position + next_len
if end >= len(parent_dir.images):
nexts = parent_dir.images[position+1:]
else:
nexts = parent_dir.images[position+1:end]
return template('series', path=path, parent=parent, previous=previous, nexts=nexts)
@server.route('/path/:relative#.+#')
@server.route('/path/')
@server.route('/path')
def path(relative=''):
"""
serve a static file
this also allows pose to function as a customizable file system browser
be careful with what you set path_root to
if the machine you run this on has sensitive information
and is connected to a public network
"""
global path_root
if re.match('~', relative):
relative = os.path.expanduser(relative)
## else:
## relative = os.path.join('/', relative)
## full = os.path.abspath(relative)
## print full
full_path = os.path.join(path_root, relative)
path = Path(full_path, relative_prefix=path_root)
if path.type() == "Directory":
node = path.load()
#will depend what we want to sort by here:
node.sort_by_path()
#node.sort_by_date()
return template('directory', path=path, contents=node.contents)
else:
#this is equivalent to a view...
#indicate it in the log:
#path.log_action()
return static_file(relative, root=path_root)
@server.route('/now')
def now(relative=''):
return template('now')
@server.route('/')
def index():
global path_root
return template('home', path_root=path_root)
#port = 8088
#start the server loop
#run(host='localhost', port=8088)
#run(app=server, host='localhost', port=port)
#reloader=True enables Auto Reloading
#run(host=configs['host'], port=configs['port'], reloader=True)
run(app=server, host='localhost', port=port, reloader=True)
|
from .accent import Accent
class E(Accent):
REPLACEMENTS = {
r"[a-z]": "e",
}
|
import pickle
import os.path
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from alexnet import AlexNet
import numpy as np
# Load traffic signs data.
with open('train.p', mode='rb') as f:
dataset = pickle.load(f)
nb_classes = len(np.unique(dataset['labels']))
# Split data into training and validation sets
train_features, valid_features, train_labels, valid_labels = \
train_test_split(dataset['features'], dataset['labels'], test_size=0.33, random_state=0)
trainset_size = train_features.shape[0]
validset_size = valid_features.shape[0]
# Define placeholders and resize operation.
x = tf.placeholder(tf.float32, (None, 32, 32, 3))
y = tf.placeholder(tf.int32, (None))
resized = tf.image.resize_images(x, (227, 227))
one_hot_y = tf.one_hot(y, nb_classes)
# pass placeholder as first argument to `AlexNet`.
fc7 = AlexNet(resized, feature_extract=True)
# NOTE: `tf.stop_gradient` prevents the gradient from flowing backwards
# past this point, keeping the weights before and up to `fc7` frozen.
# This also makes training faster, less work to do!
fc7 = tf.stop_gradient(fc7)
# Add the final layer for traffic sign classification.
shape = (fc7.get_shape().as_list()[-1], nb_classes) # use this shape for the weight matrix
# fc8
fc8W = tf.Variable(tf.truncated_normal(shape, stddev=.1))
fc8b = tf.Variable(tf.zeros(nb_classes))
logits = tf.nn.xw_plus_b(fc7, fc8W, fc8b)
probs = tf.nn.softmax(logits)
# Define loss, training, accuracy operations.
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)
loss_operation = tf.reduce_mean(cross_entropy)
# Optimizer (Adam Optimizer)
learning_rate = .01
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate)
training_operation = optimizer.minimize(loss_operation, var_list=[fc8W, fc8b])
# Accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and evaluate the feature extraction model.
epochs = 10
batch_size = 128
divider = 1
actual_trainset_size = int(trainset_size/(divider*batch_size))*batch_size
actual_validset_size = int(validset_size/(divider*batch_size))*batch_size
saver = tf.train.Saver()
def evaluate(X_data, y_data, num_examples, tfsess):
total_accuracy = 0
total_loss = 0
for offset in range(0, num_examples, batch_size):
batch_x, batch_y = X_data[offset:offset+batch_size], y_data[offset:offset+batch_size]
accuracy, loss = tfsess.run((accuracy_operation, loss_operation), feed_dict={x: batch_x, y: batch_y})
total_accuracy += (accuracy * len(batch_x))
total_loss += (loss * len(batch_x))
return (total_accuracy/num_examples, total_loss/num_examples)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if os.path.isfile('./alexnet_optim.meta'):
saver.restore(sess, "./alexnet_optim")
print()
print("Model restored")
else:
print()
print("Initializing model")
print("Training...")
print()
for i in range(epochs):
train_features, train_labels = shuffle(train_features, train_labels)
for offset in range(0, actual_trainset_size, batch_size):
end = offset + batch_size
train_batch_x, train_batch_y = train_features[offset:end], train_labels[offset:end]
sess.run(training_operation, feed_dict={x: train_batch_x, y: train_batch_y})
training_accuracy, training_loss = evaluate(train_features, train_labels, actual_trainset_size, sess)
validation_accuracy, validation_loss = evaluate(valid_features, valid_labels, actual_validset_size, sess)
print("Epoch {} ...".format(i+1))
print("Training Loss = {:.3f}".format(training_loss))
print("Validation Loss = {:.3f}".format(validation_loss))
print("Training Accuracy = {:.3f}".format(training_accuracy))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, './alexnet_optim')
print("Model saved")
|
# Generated by Django 3.2.5 on 2021-08-30 05:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_order', '0009_auto_20210829_1825'),
]
operations = [
migrations.AddField(
model_name='orderstatus',
name='slug',
field=models.SlugField(max_length=200, null=True, unique=True, verbose_name='Slug'),
),
migrations.AlterField(
model_name='products',
name='product_date_published',
field=models.DateTimeField(verbose_name='Дата публикации'),
),
]
|
N = int( input())
F = [0]*(N+1)
Q = 10**9 + 7
for i in range(1,N+1):
n = i
for j in range(2, int( N**(1/2))+1):
while n%j == 0:
F[j] += 1
n //= j
if n != 1:
F[n] += 1
ans = 1
for i in range(N+1):
ans *= F[i]+1
ans %= Q
print(ans)
|
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.datasets import SupervisedDataSet
from pybrain.structure import FeedForwardNetwork, LinearLayer, SigmoidLayer,FullConnection
import fitsio
import numpy as np
import time
import pickle
CACHED = False
# constants to change
MAX_EPOCHS = 100
NUM_DATA = 100
# generate dataset
data_file = fitsio.FITS('dr7qso.fit')[1].read()
alldata = SupervisedDataSet(5, 1)
length = len(data_file['UMAG'])
#for i in range(NUM_DATA):
for i in range(length):
umag = data_file['UMAG'][i]
gmag = data_file['GMAG'][i]
rmag = data_file['RMAG'][i]
imag = data_file['IMAG'][i]
zmag = data_file['ZMAG'][i]
redshift = data_file['z'][i]
alldata.addSample((umag, gmag, rmag, imag, zmag), (redshift,))
trainval_ds, test_ds = alldata.splitWithProportion(0.8)
train_ds, val_ds = trainval_ds.splitWithProportion(0.75)
print "Train, validation, test:", len(train_ds), len(val_ds), len(test_ds)
ns = {}
min_error = -1
min_h = -1
# use validation to form 4-layer network with two hidden layers,
# with (2n + 1) nodes in the first
if not CACHED:
for h2 in range(1, 5):
start = time.time()
print "h2 nodes:", h2
# create the network
print "building network"
n = FeedForwardNetwork()
inLayer = LinearLayer(5)
hiddenLayer1 = SigmoidLayer(11)
hiddenLayer2 = SigmoidLayer(h2)
outLayer = LinearLayer(1)
n.addInputModule(inLayer)
n.addModule(hiddenLayer1)
n.addModule(hiddenLayer2)
n.addOutputModule(outLayer)
in_to_hidden = FullConnection(inLayer, hiddenLayer1)
hidden_to_hidden = FullConnection(hiddenLayer1, hiddenLayer2)
hidden_to_out = FullConnection(hiddenLayer2, outLayer)
n.addConnection(in_to_hidden)
n.addConnection(hidden_to_hidden)
n.addConnection(hidden_to_out)
n.sortModules()
# training
print "beginning training"
trainer = BackpropTrainer(n, train_ds, verbose=True)
#trainer.trainUntilConvergence(maxEpochs=MAX_EPOCHS)
trainer.trainUntilConvergence()
output = open('nn' + h2 + '.pkl', 'wb')
pickle.dump(n, output)
output.close()
ns[h2] = n
# validation
print "beginning validation"
out = n.activateOnDataset(val_ds)
actual = val_ds['target']
error = np.sqrt(np.sum((out - actual)**2) / len(val_ds))
print "RMSE:", error
if min_error == -1 or error < min_error:
min_error = error
min_h = h2
stop = time.time()
print "Time:", stop - start
print "best number of h2 nodes:", min_h
nbest = ns[min_h]
else:
pkl_file = open('nn.pkl', 'rb')
nbest = pickle.load(pkl_file)
# iterate through
out_test = nbest.activateOnDataset(test_ds)
actual_test = test_ds['target']
print "Test RMSE", np.sqrt(np.sum((out_test - actual_test)**2) / len(test_ds))
|
def f1():
print(1)
def f1():
print(2)
f1() |
'''
Created on Jun 12, 2016
@author: Dayo
'''
import django_filters
from .models import ProcessedMessages
class ProcessedMessagesFilter(django_filters.FilterSet):
MSG_TYPE = (
('ADVANCED',' Advanced'),
('STANDARD',' Standard')
)
message_type = django_filters.ChoiceFilter(label="Filter by Message Type", choices=MSG_TYPE)
class Meta:
model = ProcessedMessages
fields = ['message_type'] |
import copy
import os
import yaml
from autumn.projects.covid_19.mixing_optimisation.constants import OPTI_REGIONS
from autumn.projects.covid_19.mixing_optimisation.mixing_opti import (
DURATIONS,
MODES,
OBJECTIVES,
objective_function,
run_root_model,
)
from autumn.projects.covid_19.mixing_optimisation.utils import get_country_population_size
from autumn.projects.covid_19.mixing_optimisation.write_scenarios import (
read_decision_vars,
read_opti_outputs,
)
from autumn.settings import BASE_PATH
def main():
opti_output_filename = "opti_outputs.csv"
opti_outputs_df = read_opti_outputs(opti_output_filename)
target_objective = {
"deaths": 20,
"yoll": 1000,
}
for direction in ["up", "down"]:
for country in OPTI_REGIONS:
for mode in MODES:
for duration in DURATIONS:
for objective in OBJECTIVES:
print()
print()
print(
country
+ " "
+ objective
+ " "
+ mode
+ " "
+ str(duration)
+ " "
+ direction
)
run_sensitivity_perturbations(
opti_outputs_df,
country,
duration,
mode,
objective,
target_objective_per_million=target_objective[objective],
tol=0.02,
direction=direction,
)
def evaluate_extra_deaths(
decision_vars,
extra_contribution,
i,
root_model,
mode,
country,
duration,
best_objective,
objective,
direction="up",
):
tested_decision_vars = copy.deepcopy(decision_vars)
if direction == "up":
tested_decision_vars[i] += extra_contribution
else:
tested_decision_vars[i] -= extra_contribution
h, this_d, this_yoll = objective_function(
tested_decision_vars,
root_model,
mode,
country,
duration,
called_from_sensitivity_analysis=True,
)
this_objective = {"deaths": this_d, "yoll": this_yoll}
if not h:
delta_deaths_per_million = 1.0e6
else:
country_name = country.title() if country != "united-kingdom" else "United Kingdom"
population = get_country_population_size(country_name)
delta_deaths_per_million = (this_objective[objective] - best_objective) / population * 1.0e6
return delta_deaths_per_million
def run_sensitivity_perturbations(
opti_outputs_df,
country,
duration="six_months",
mode="by_age",
objective="deaths",
target_objective_per_million=20,
tol=0.02,
direction="up",
):
# target_deaths is a number of deaths per million people
decision_vars = read_decision_vars(opti_outputs_df, country, mode, duration, objective)
if decision_vars is None:
return
root_model = run_root_model(country)
h, best_d, best_yoll = objective_function(
decision_vars, root_model, mode, country, duration, called_from_sensitivity_analysis=True
)
best_objective = {
"deaths": best_d,
"yoll": best_yoll,
}
delta_contributions = []
for i in range(len(decision_vars)):
print("Age group " + str(i))
extra_contribution_lower = 0.0
if direction == "up":
extra_contribution_upper = 1.0 - decision_vars[i]
else:
extra_contribution_upper = decision_vars[i]
if extra_contribution_upper < tol:
best_solution = extra_contribution_upper if direction == "up" else decision_vars[i]
else:
# find an upper bound (lower if direction is down):
delta_deaths_per_million = evaluate_extra_deaths(
decision_vars,
extra_contribution_upper,
i,
root_model,
mode,
country,
duration,
best_objective[objective],
objective,
direction,
)
if delta_deaths_per_million < target_objective_per_million:
best_solution = extra_contribution_upper
else:
loop_count = 0
while (extra_contribution_upper - extra_contribution_lower) > tol:
evaluation_point = (extra_contribution_lower + extra_contribution_upper) / 2.0
delta_deaths_per_million = evaluate_extra_deaths(
decision_vars,
evaluation_point,
i,
root_model,
mode,
country,
duration,
best_objective[objective],
objective,
direction,
)
if delta_deaths_per_million > target_objective_per_million:
extra_contribution_upper = evaluation_point
else:
extra_contribution_lower = evaluation_point
loop_count += 1
if loop_count >= 20:
print("FLAG INFINITE LOOP")
break
if (extra_contribution_upper - target_objective_per_million) < (
target_objective_per_million - extra_contribution_lower
):
best_solution = extra_contribution_upper
else:
best_solution = extra_contribution_lower
delta_contributions.append(best_solution)
print(best_solution)
output_file_path = os.path.join(
BASE_PATH,
"apps",
"covid_19",
"mixing_optimisation",
"optimised_variables",
"optimal_plan_sensitivity",
country + "_" + mode + "_" + duration + "_" + objective + "_" + direction + ".yml",
)
with open(output_file_path, "w") as f:
yaml.dump(delta_contributions, f)
if __name__ == "__main__":
main()
|
from rest_framework import status
from rest_framework.response import Response
# Create your views here.
def HandleResponse(data,message,success = True,err = 'no err',resp_status = status.HTTP_200_OK):
"""
HandleResponse , makes easier to send Response
Equalent to Response({
'success':success,
"error":err,
"message":message,
"data":data
},status = resp_status)
"""
return Response({
'success':success,
"error":err,
"message":message,
"data":data
},status = resp_status)
|
# This version outputs how many times the mouse was clicked
from turtle import Screen, Turtle
from random import randint
from tkinter import messagebox, Tk
window = Tk()
window.withdraw()
# Counter that increases
click_counter = 0
def chase_move(x_cor, y_cor): # _cor is where the mouse wants to go
global click_counter
click_counter += 1
random_x = randint(-200, 200) # Random positions of the turtle
random_y = randint(-200, 200)
# While the user_arrow is close to where the Chased-Turtle wants to go
while user_arrow.distance(random_x, random_y) <= 100:
random_x = randint(-200, 200)
random_y = randint(-200, 200)
user_arrow.clear() # Removes their drawn lines
turtle.clear()
turtle.goto(random_x, random_y)
user_arrow.goto(x_cor, y_cor)
if turtle.distance(user_arrow) <= 65: # This checks if the arrow hits the turtle
user_arrow.color("red")
turtle.color("red")
messagebox.showinfo(
"Turtle Tag!",
"You caught the Turtle in {0} clicks!\nYou Win!".format(click_counter),
) # \n adds a new line
screen.bye()
return
screen = Screen() # Sets up Screen size and grid
screen.bgcolor("lightblue")
screen.setup(600, 600)
screen.screensize(500, 500)
screen.title("Turtle Tag!")
user_arrow = Turtle() # Arrow shape instead of a Turtle shape
turtle = Turtle("turtle")
turtle.color("darkgreen")
user_arrow.color("blue")
turtle.shapesize(7)
turtle.pensize(3)
# Moves the Chased-Turtle so it and the arrow don't start in the same position
turtle.pu() # Pen Up shorthand
turtle.left(90)
turtle.forward(100)
turtle.pd()
screen.onclick(chase_move) # Runs the function when the screen is clicked
screen.mainloop()
window.mainloop()
|
import base64
import webapp2
from google.appengine.api import urlfetch
class FetchURL(webapp2.RequestHandler):
def get(self):
encoded_url = str(self.request.get('url'))
url = base64.urlsafe_b64decode(encoded_url)
validate = self.request.get('validate').lower() == 'true'
urlfetch.fetch(url, validate_certificate=validate)
urls = [('/python/urlfetch', FetchURL)]
|
# coding:utf-8
class Queue(object):
"""定义一个队列类"""
def __init__(self):
self.__queue = []
def len(self):
return len(self.__queue)
def add(self, item):
self.__queue.append(item)
def pop(self):
if not self.__queue:
return None
else:
return self.__queue.pop(0)
class Stack(object):
"""创建一个栈对象"""
def __init__(self):
self.__queue1 = Queue()
self.__queue2 = Queue()
def len(self):
return self.__queue1.len() if self.__queue1.len() else self.__queue2.len()
def add(self, item):
queue = self.__queue1 if self.__queue1.len() else self.__queue2
queue.add(item)
def pop(self):
if self.__queue1.len() or self.__queue2.len():
if self.__queue1.len():
while self.__queue1.len() > 1:
self.__queue2.add(self.__queue1.pop())
return self.__queue1.pop()
else:
while self.__queue2.len() > 1:
self.__queue1.add(self.__queue2.pop())
return self.__queue2.pop()
return None
if __name__ == '__main__':
# queue = Queue()
# for i in range(1, 11):
# queue.add(i)
# print(queue.len())
# while queue.len():
# print(queue.pop())
stack = Stack()
stack.add(1)
stack.add(2)
stack.add(3)
stack.add(4)
print('========')
print(stack.pop())
print(stack.pop())
print(stack.pop())
print(stack.pop())
print(stack.len())
|
"""
smorest_sfs.modules.menus.schemas
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
菜单模块的Schemas
"""
from marshmallow import Schema, fields
from smorest_sfs.extensions import ma
from smorest_sfs.extensions.marshal import BaseMsgSchema, SQLAlchemyAutoSchema
from . import models
class MenuSchema(SQLAlchemyAutoSchema):
"""
菜单的序列化类
"""
children = fields.List(fields.Nested("MenuSchema"))
parent_id = fields.Int()
class Meta:
model = models.Menu
exclude = ["permission_id", "level", "left", "tree_id", "parent", "right"]
dump_only = ["id", "children"]
load_only = ["permission"]
session = models.db.session
class MenuOptsSchema(Schema):
"""菜单的选项"""
class Meta:
fields = ("id", "name", "url", "img")
class MenuListSchema(BaseMsgSchema):
"""菜单的选项列表"""
data = fields.List(fields.Nested(MenuSchema))
|
import os
import shutil
from shorthand.types import DirectoryPath, Subdir, InternalAbsoluteFilePath, InternalAbsolutePath
from shorthand.utils.paths import get_full_path
def _create_file(notes_directory: DirectoryPath,
file_path: InternalAbsoluteFilePath
) -> None:
'''Create a new empty file at a specific internal path
'''
full_path = get_full_path(notes_directory, file_path)
if os.path.exists(full_path):
raise ValueError(f'File to create at path {file_path} already exists')
with open(full_path, 'w') as f:
pass
def _move_file_or_directory(notes_directory: DirectoryPath,
source: InternalAbsolutePath,
destination: InternalAbsolutePath
) -> None:
'''Move a file or directory from a source path to a destination path
'''
full_source_path = get_full_path(notes_directory, source)
full_destination_path = get_full_path(notes_directory, destination)
if not os.path.exists(full_source_path):
raise ValueError(f'File to move at path {source} does not exist')
if os.path.exists(full_destination_path):
raise ValueError(f'Target for move at path {destination} already exists')
shutil.move(full_source_path, full_destination_path)
def _delete_file(notes_directory: DirectoryPath,
file_path: InternalAbsoluteFilePath
) -> None:
'''Delete a file off the filesystem
'''
full_path = get_full_path(notes_directory, file_path)
if not os.path.exists(full_path):
raise ValueError(f'File to delete at path {file_path} does not exist')
os.remove(full_path)
def _create_directory(notes_directory: DirectoryPath,
directory_path: Subdir
) -> None:
'''Create a subdirectory within the notes directory. Will create
any needed parent dirs if they don't already exist
'''
if not directory_path:
raise ValueError('No path provided for new directory to create')
full_path = get_full_path(notes_directory, directory_path)
if os.path.exists(full_path):
raise ValueError(f'Directory to create at path {directory_path} already exists')
os.makedirs(full_path)
def _delete_directory(notes_directory: DirectoryPath, directory_path: Subdir,
recursive: bool = False
) -> None:
'''Delete an empty subdirectory within the notes directory.
If recursive is set to True, will delete all of the contents as well
'''
full_path = get_full_path(notes_directory, directory_path)
if not os.path.exists(full_path):
raise ValueError(f'Directory to delete at path {directory_path} does not exist')
if recursive:
shutil.rmtree(full_path)
else:
os.rmdir(full_path)
|
class Node:
def __init__(self, item, left, right):
self.item = item
self.left = left
self.right = right
def preorder(node): # 전위 순회
print(node.item, end='')
if node.left != '.':
preorder(tree[node.left])
if node.right != '.':
preorder(tree[node.right])
def inorder(node): # 중위 순회
if node.left != '.':
inorder(tree[node.left])
print(node.item, end='')
if node.right != '.':
inorder(tree[node.right])
def postorder(node): # 후위 순회
if node.left != '.':
postorder(tree[node.left])
if node.right != '.':
postorder(tree[node.right])
print(node.item, end='')
if __name__ == "__main__":
N = int(input())
tree = {}
for _ in range(N):
node, left, right = map(str, input().split())
tree[node] = Node(item=node, left=left, right=right)
preorder(tree['A'])
print()
inorder(tree['A'])
print()
postorder(tree['A'])
|
class DefaultList(list):
def __init__(self, fx):
self._fx = fx
def _fill(self, index):
while len(self) <= index:
self.append(self._fx())
def __setitem__(self, index, value):
self._fill(index)
list.__setitem__(self, index, value)
def __getitem__(self, index):
self._fill(index)
return list.__getitem__(self, index) |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2018-01-24 06:51
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('nova', '0073_servicehistory_servicestep_servicetest'),
]
operations = [
migrations.DeleteModel(
name='ServiceHistory',
),
]
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import seaborn as sb
import numpy as np
def plot_correlation_matrix(data,data_type):
from sklearn.preprocessing import scale
import seaborn as sb
if data_type== "labels":
data_to_predict=scale(data,axis=0) #Rescale each columns
cov_mat=np.abs(np.cov(data_to_predict.T))
xlabels=12*[""]+["PM2_5"]+24*[""]+["PM10"]+24*[""]+["O3"]+24*[""]+["NO2"]
ylabels=12*[""]+["PM2_5"]+24*[""]+["PM10"]+24*[""]+["O3"]+24*[""]+["NO2"]
plt.figure()
plt.hold('on')
sb.heatmap(cov_mat,cmap="Greys",square=False,xticklabels=xlabels,yticklabels=ylabels)
#plt.plot([96,0],[96,0],color="red",LineWidth=2)
plt.title("Correlation between the features to predict (in the train dataset)")
plt.hold('off')
plt.show()
"""
#Filter the lines repeating the same days to keep only different days
data_to_predict2=scale(data.iloc[0:4031:24,:],axis=0) #Rescale each columns
cov_mat2=np.abs(np.cov(data_to_predict2.T))
plt.figure()
plt.hold('on')
plt.hold('on')
sb.heatmap(cov_mat2,cmap="Greys",square=False,xticklabels=xlabels,yticklabels=ylabels)
#plt.plot([96,0],[96,0],color="red",LineWidth=2)
plt.title("Correlation between the features to predict after filtering (in the train dataset)")
plt.hold('off')
plt.show()
"""
if data_type == "features":
data_to_train=data.drop("date",axis=1)
data_to_train=scale(data_to_train,axis=0) #Rescale each columns
cov_mat=np.abs(np.cov(data_to_train.T))
plt.figure()
plt.hold('on')
plt.imshow(np.array(cov_mat))
plt.colorbar()
plt.tick_params(which='both', bottom='off', top='off', labelbottom='off')
plt.hold('off')
plt.show()
"""
#Filter the lines repeating the same days to keep only different days
data_to_train2=data.drop("date",axis=1)
data_to_train2=scale(data_to_train2.iloc[0:4031:24,:],axis=0) #Rescale each columns and take only different days
cov_mat2=np.abs(np.cov(data_to_train2.T))
plt.figure()
plt.hold('on')
plt.imshow(np.array(cov_mat2))
plt.colorbar()
plt.tick_params(which='both', bottom='off', top='off', labelbottom='off')
plt.hold('off')
plt.show()
"""
def plot_average_regression(y_pred,y_test):
fig, ax = plt.subplots()
plt.hold("on")
ax.plot(np.arange(0,96),np.mean(y_test,axis=0),color="red",label="Average real value")
ax.plot(np.arange(0,96),np.mean(y_pred,axis=0),color="blue",label="Average predicted value")
ax.axvline(x=24,color="grey",linestyle="dashed")
ax.axvline(x=48,color="grey",linestyle="dashed")
ax.axvline(x=72,color="grey",linestyle="dashed")
ax.set_xlabel('Polutant')
ax.set_ylabel("Concentration")
ax.legend(loc="lower right")
plt.hold("off")
plt.show()
def plot_regression_coefficient(reg,data_train):
#This function will plot the coefficient importances if reg is an ensemble method
#and the classical regression coefficents values if reg is a linear regressor
if hasattr(reg,"feature_importances_")==True:
coefs=reg.feature_importances_
n_coefs = coefs.shape[1]
fig, ax = plt.subplots()
ax.bar(np.arange(0,n_coefs),np.mean(coefs,axis=0), color = "blue")
ax.set_xlabel('Feature index')
ax.set_ylabel("Feature importance in the ensemble method")
plt.show()
#check the most important coeffs, in averaged
threshold = 0.3
important_coefs=np.abs(np.mean(coefs,axis=0))>threshold
important_coefs_names=data_train.columns[important_coefs]
print important_coefs_names
if hasattr(reg,"coef_")==True:
coefs=reg.coef_
n_coefs = coefs.shape[1]
fig, ax = plt.subplots()
ax.bar(np.arange(0,n_coefs),np.mean(coefs,axis=0), color = "blue")
ax.set_xlabel('Feature index')
ax.set_ylabel("Coefficient value")
plt.show()
#check the most important coeffs, in averaged
threshold = 0.3
important_coefs=np.abs(np.mean(coefs,axis=0))>threshold
important_coefs_names=data_train.columns[important_coefs]
print important_coefs_names
def plot_coeff_importances(reg,data_train):
X_columns = data_train.columns
ordering = np.argsort(reg.feature_importances_)[::-1][:50]
importances = reg.feature_importances_[ordering]
feature_names = X_columns[ordering]
x = np.arange(len(feature_names))
fig,ax = plt.subplots(1,1,figsize=(6,6))
ax.bar(x, importances)
ax.set_xticks(x + 0.5)
ax.set_xticklabels(feature_names, rotation=90, fontsize=8)
ax.set_ylabel('Feature importance')
def plot_MSE_per_hour(y_pred,y_test):
from sklearn.metrics import mean_squared_error
#This function plots, for each pollutant, the MSE made by the regression along the hours to predict (ie the successive columns)
PM2_pred = y_pred[:,:24]
PM10_pred = y_pred[:,24:48]
O3_pred = y_pred[:,48:72]
NO2_pred = y_pred[:,72:96]
PM2_test = y_test[:,:24]
PM10_test = y_test[:,24:48]
O3_test = y_test[:,48:72]
NO2_test = y_test[:,72:96]
mse_PM2=mean_squared_error(PM2_pred,PM2_test,multioutput="raw_values")
mse_PM10=mean_squared_error(PM10_pred,PM10_test,multioutput="raw_values")
mse_O3=mean_squared_error(O3_pred,O3_test,multioutput="raw_values")
mse_NO2=mean_squared_error(NO2_pred,NO2_test,multioutput="raw_values")
x_vec = np.arange(0,24)
plt.figure()
plt.xlim(xmin=0,xmax=24)
xmin,xmax=plt.xlim()
plt.hold("on")
plt.plot(x_vec,mse_PM2,"ob",label="MSE on PM2")
plt.plot(x_vec,mse_PM10,"oc",label="MSE on PM10")
plt.plot(x_vec,mse_O3,"or",label="MSE on O3")
plt.plot(x_vec,mse_NO2,"og",label="MSE on NO2")
plt.legend(loc=4,frameon=True,framealpha=1)
ymin,ymax = plt.ylim()
plt.hlines(y=np.mean(mse_PM2),xmin=xmin,xmax=xmax+1,linestyles="dashed",color="blue")
plt.hlines(y=np.mean(mse_PM10),xmin=xmin,xmax=xmax+1,linestyles="dashed",color="cyan")
plt.hlines(y=np.mean(mse_O3),xmin=xmin,xmax=xmax+1,linestyles="dashed",color="red")
plt.hlines(y=np.mean(mse_NO2),xmin=xmin,xmax=xmax+1,linestyles="dashed",color="green")
plt.xlabel("Hour")
plt.ylabel("MSE")
plt.title("MSE per hour of prediction")
plt.hold("off")
plt.show() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 19 12:01:01 2020
@author: Chris Harris
To help you clean up your home directory
"""
import tkinter as tk
from tkinter import ttk
import os
import shutil
import tkinter.font as font
class Clean_Up(tk.Tk):
def __init__(self):
super().__init__()
self.title("Clean up your home folder")
get_user = os.getlogin()
self.files = os.listdir(os.path.join("/", 'home', get_user))
self.source = os.path.join("/", 'home', get_user)
os.chdir(self.source)
text_label = ttk.Label(self, text="A tool to help you keep your home folder clean")
text_label.grid(row=0, column=0, sticky="E", padx=10, pady=(10,10))
begin_scan_button = ttk.Button(self, text="Begin scan of your home directory", command=self.Begin_Scan)
begin_scan_button.grid(row=1, column=0, sticky="EW", padx=10, pady=(10, 10))
quit_button = ttk.Button(self, text="Quit", command=self.destroy)
quit_button.grid(row=6, column=0, padx=10, pady=(10, 10))
def Begin_Scan(self):
self.pict = ['.jpg', '.jpeg', '.gif', '.png']
self.movies = ['.mp4', '.mkv', 'webm']
self.music = ['.mp3', '.opus', '.ogg', '.m4a', '.wav']
self.documents = ['.docx', '.pdf', '.txt', 'odt']
moved_files = 0
try:
for i in self.files:
if i.endswith( tuple(self.pict)):
self.Pictures(i)
pictures_label = ttk.Label(self, text="Pictures : ")
pictures_label.grid(row=2, column=0, sticky='W', padx=5, pady=(5, 5))
moved_files += 1
elif i.endswith( tuple(self.movies)):
self.Videos(i)
mov_video = ttk.Label(self, text="Moved video : ")
mov_video.grid(row=3, column=0, sticky='W', padx=5, pady=(5, 5))
moved_files += 1
elif i.endswith( tuple(self.music)):
self.Music(i)
mov_aud = ttk.Label(self, text="Moved audio : " )
mov_aud.grid(row=4, column=0, sticky='W', padx=5, pady=(5, 5))
moved_files += 1
elif i.endswith( tuple(self.documents)):
self.Documents(i)
mov_doc = ttk.Label(self, text="Moved documents : " )
mov_doc.grid(row=5, column=0, sticky='W', padx=5, pady=(5, 5))
moved_files += 1
except shutil.Error as e:
horribly_awry = ttk.Label(self, text=e)
horribly_awry.grid(row=8, column=0)
if moved_files == 0:
nothing_to_do = ttk.Label(self, text="Nothing to do!")
nothing_to_do.grid(row=3, column=0)
else:
files_have_moved = ttk.Label(self, text="Moved!")
files_have_moved.grid(row=7, column=0)
def Pictures(self, i):
shutil.move(i, os.path.join(self.source, 'Pictures'))
pics_have_moved = ttk.Label(self, text=i)
pics_have_moved.grid(row=2, column=1, sticky='E', padx=5, pady=(5, 5))
def Videos(self, i):
shutil.move(i, os.path.join(self.source, 'Videos'))
movies_have_moved = ttk.Label(self, text=i)
movies_have_moved.grid(row=3, column=1, sticky='E', padx=5, pady=(5, 5))
def Music(self, i):
shutil.move(i, os.path.join(self.source, 'Music'))
music_have_moved = ttk.Label(self, text=i)
music_have_moved.grid(row=4, column=1, sticky='E', padx=5, pady=(5, 5))
def Documents(self, i):
shutil.move(i, os.path.join(self.source, 'Documents'))
docs_have_moved = ttk.Label(self, text=i)
docs_have_moved.grid(row=5, column=1, sticky='E', padx=5, pady=(5, 5))
root = Clean_Up()
font.nametofont("TkDefaultFont").configure(size=14)
root.mainloop()
|
"""
Abstract Factory:
The Abstract Factory defines a Factory Method per product. Each Factory Method encapsulates the new operator
and the concrete, platform-specific, product classes. Each "platform" is then modeled with a Factory derived class.
Problem:
If an application is to be portable, it needs to encapsulate platform dependencies. These "platforms"
might include: windowing system, operating system, database, etc. Too often, this encapsulation is not engineered
in advance, and lots of #ifdef case statements with options for all currently supported platforms begin to procreate
like rabbits throughout the code.
*References:
https://www.tutorialspoint.com/python_design_patterns/python_design_patterns_abstract_factory.htm
https://sourcemaking.com/design_patterns/abstract_factory
"""
class Product:
origin = ""
component = ""
def __init__(self, origin, component):
self.origin = origin
self.component = component
def getOrigin(self):
return self.origin
def startComponent(self):
return self.component
class AtmaXProduct(Product):
def __init__(self):
Product.__init__(self, "Atma", "X")
class AtmaYProduct(Product):
def __init__(self):
Product.__init__(self, "Atma", "Y")
class AtmaZProduct(Product):
def __init__(self):
Product.__init__(self, "Atma", "Z")
class UltimaXProduct(Product):
def __init__(self):
Product.__init__(self, "Ultima", "X")
class UltimaYProduct(Product):
def __init__(self):
Product.__init__(self, "Ultima", "Y")
class UltimaZProduct(Product):
def __init__(self):
Product.__init__(self, "Ultima", "Z")
# Abstract factory class
class IAbstractFactory:
def getX(self): pass
def getY(self): pass
def getZ(self): pass
class AtmaFactory(IAbstractFactory):
def getX(self):
return AtmaXProduct()
def getY(self):
return AtmaYProduct()
def getZ(self):
return AtmaZProduct()
class UltimaFactory(IAbstractFactory):
def getX(self):
return UltimaXProduct()
def getY(self):
return UltimaYProduct()
def getZ(self):
return UltimaZProduct()
if __name__ == "__main__":
product1 = AtmaFactory()
product2 = UltimaFactory()
componentXP1 = product1.getX()
componentYP1 = product1.getY()
componentZP1 = product1.getZ()
componentXP2 = product2.getX()
componentYP2 = product2.getY()
componentZP2 = product2.getZ()
print("--- Product 1 ---")
print("Start component", componentXP1.startComponent(), " from origin", componentXP1.getOrigin())
print("Start component", componentYP1.startComponent(), " from origin", componentYP1.getOrigin())
print("Start component", componentZP1.startComponent(), " from origin", componentZP1.getOrigin())
print("--- Product 2 ---")
print("Start component", componentXP2.startComponent(), " from origin", componentXP2.getOrigin())
print("Start component", componentYP2.startComponent(), " from origin", componentYP2.getOrigin())
print("Start component", componentZP2.startComponent(), " from origin", componentZP2.getOrigin())
"""
OUTPUT:
--- Product 1 ---
Start component X from origin Atma
Start component Y from origin Atma
Start component Z from origin Atma
--- Product 2 ---
Start component X from origin Ultima
Start component Y from origin Ultima
Start component Z from origin Ultima
[Finished in 0.1s]
""" |
import qrcode
from io import BytesIO
import base64
def get_qr(data: str) -> str:
"""二维码
"""
if not data:
return None
img = qrcode.make(data)
output_buffer = BytesIO()
img.save(output_buffer, format="PNG")
byte_data = output_buffer.getvalue()
base64_str = base64.b64encode(byte_data)
return base64_str.decode(encoding="utf-8") |
class AuthInfo(object):
def __init__(self, consumer_key = "20L3pj0XqJszENU5tVVFrKntT",
consumer_secret = "ptNul7KPR5gidrmfdbzc897f4oEESAebvOpViEU2ZBr8T15dmb",
access_token= "702690933679083520-ZPYrkYT0kfjfXGbO6xcwe6Bth6P9DIN",
access_token_secret = "miZ4ogEVCqokELn7JVky3Qgsmnf8wN8Xb8inEEJU16NJG"):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.access_token = access_token
self.access_token_secret = access_token_secret
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 04 16:16:26 2016
@author: Jordan
"""
|
from Pages.MediaPages.Media import Media
from selenium.webdriver.common.by import By
from magic_box.find_elements import find_element
from selenium.webdriver.support.ui import Select
from Pages.MediaBrowser import MediaBrowser
import pytest, time
class ContentPushMedia(Media):
def __init__(self, driver):
super().__init__(driver)
self.driver = driver
self.media_browser = MediaBrowser(driver)
self.locators = {
'internal_link_tab': {'by': By.XPATH, 'value': '//summary[contains(text(),"Internal link")]'},
'external_link_tab': {'by': By.XPATH, 'value': '//summary[contains(text(),"External link")]'},
'internal_node_link': {'by': By.XPATH, 'value': '//input[contains(@id,"target-id")][contains(@id,"content-push")]'},
'external_color': {'by': By.XPATH, 'value': '//select[@id="edit-field-color"]'},
'external_image_tab': {'by': By.XPATH, 'value': '//summary[contains(text(),"Image")]'},
'external_image_frame': {'by': By.XPATH, 'value': '//iframe[contains(@id,"media_image_browser")]'},
'external_title': {'by': By.XPATH, 'value': '//input[contains(@id,"push-title")]'},
'external_description': {'by': By.XPATH, 'value': '//textarea[contains(@id,"push-descr")]'},
'external_url': {'by': By.XPATH, 'value': '//input[contains(@id,"push-link")][contains(@id,"uri")]'},
'external_link_text': {'by': By.XPATH, 'value': '//input[contains(@id,"push-link")][contains(@id,"title")]'},
'external_attributes_tab': {'by': By.XPATH, 'value': '//summary[contains(text(),"Attributes")]'},
'external_target': {'by': By.XPATH, 'value': '//select[@id="target"]'},
'external_rel': {'by': By.XPATH, 'value': '//select[@id="rel"]'},
'content_push_item': {'by': By.XPATH, 'value': '//div[contains(@class,"content-push")]'},
'content_push_green_theme': {'by': By.XPATH, 'value': '//div[contains(@class,"content-push--theme")]'
'[contains(@class,"vivid-khaki")]'},
}
self.content_push_data = {
'name': 'test content push',
'external_color': 'vivid-khaki',
'external_title': 'External Content Push',
'external_description': 'Description for the content push',
'external_url': 'https://google.com',
'internal_url': '/node/66',
'external_text': 'CTA content push',
'target': '_blank',
'link_rel': 'Noindex, nofollow',
}
def get_internal_link_tab(self):
return find_element(self.driver, **self.locators['internal_link_tab'])
def get_external_link_tab(self):
return find_element(self.driver, **self.locators['external_link_tab'])
def get_internal_node_link(self):
return find_element(self.driver, **self.locators['internal_node_link'])
def get_external_color(self):
return Select(find_element(self.driver, **self.locators['external_color']))
def get_external_image_tab(self):
return find_element(self.driver, **self.locators['external_image_tab'])
def get_external_image_frame(self):
return find_element(self.driver, **self.locators['external_image_frame'])
def get_external_title(self):
return find_element(self.driver, **self.locators['external_title'])
def get_external_description(self):
return find_element(self.driver, **self.locators['external_description'])
def get_external_url(self):
return find_element(self.driver, **self.locators['external_url'])
def get_external_link_text(self):
return find_element(self.driver, **self.locators['external_link_text'])
def get_external_attributes_tab(self):
return find_element(self.driver, **self.locators['external_attributes_tab'])
def get_external_target(self):
return Select(find_element(self.driver, **self.locators['external_target']))
def get_external_rel(self):
return find_element(self.driver, **self.locators['external_rel'])
def get_content_push_item(self):
return find_element(self.driver, **self.locators['content_push_item'])
def get_content_push_green_theme(self):
return find_element(self.driver, **self.locators['content_push_green_theme'])
@pytest.allure.step('Fill content push with internal link')
def fill_content_push_internal_link(self, link):
self.get_name().send_keys(self.content_push_data['name'])
self.get_internal_link_tab().click()
self.get_internal_node_link().send_keys(link)
@pytest.allure.step('Fill content push with external link')
def fill_content_push_external_link(self):
self.get_name().send_keys(self.content_push_data['name'])
self.get_external_link_tab().click()
self.get_external_color().select_by_value(self.content_push_data['external_color'])
self.get_external_image_tab().click()
self.media_browser.choose_image(self.get_external_image_frame())
self.get_external_title().send_keys(self.content_push_data['external_title'])
self.get_external_description().send_keys(self.content_push_data['external_description'])
self.get_external_url().send_keys(self.content_push_data['external_url'])
self.get_external_link_text().send_keys(self.content_push_data['external_text'])
@pytest.allure.step('Open content push tabs')
def open_tabs(self):
self.get_internal_link_tab().click()
self.get_external_link_tab().click()
time.sleep(1)
@pytest.allure.step('Set link attributes for CTA button')
def open_set_link_attributes(self):
self.get_external_attributes_tab().click()
self.get_external_target().select_by_value(self.content_push_data['target'])
self.get_external_rel().send_keys(self.content_push_data['link_rel']) |
# https://www.reddit.com/r/dailyprogrammer/comments/56tbds/20161010_challenge_287_easy_kaprekars_routine/
def largestDigit(digit):
numbers = list(map(int, str(digit)))
return max(numbers)
def descendingOrder(digit):
numbers = list(map(int, str(digit)))
numbers.sort(reverse=True)
s = [str(i) for i in numbers]
result = int("".join(s))
return result
def ascendingOrder(digit):
numbers = list(map(int, str(digit)))
numbers.sort()
s = [str(i) for i in numbers]
result = int("".join(s))
return result
def kaprekarRoutine(descNum, ascNum):
num = 0
i = 0
while num != 6174:
num = int(descNum) - int(ascNum)
ascNum = ascendingOrder(num)
descNum = descendingOrder(num)
print(num)
i += 1
print(i)
desc = descendingOrder(5455)
asc = ascendingOrder(5455)
kaprekarRoutine(desc, asc) |
print("This is for only dev branch") |
'''
.. todo::
cover the major cases and maybe a few more.
Leave rest in contrib
'''
|
def show_magicians(magicians_name):
for name in magicians_name:
print(name)
def make_great(magicians_name):
for i in range(len(magicians_name)):
magicians_name[i] = 'The Great ' + magicians_name[i]
magicians_name = ['aaa', 'bbb', 'ccc']
make_great(magicians_name)
show_magicians(magicians_name)
|
def count_zero_pairs(numbers):
count = 0
for i1 in range(0, len(numbers)):
for i2 in range(i1, len(numbers)):
if numbers[i1] + numbers[i2] == 0:
count += 1
return count
print(count_zero_pairs([0, 2, -2, 5, 10]))
|
#!/usr/bin/env python
import sys, array, re
from deflib import Field, Def, Def2CC
d2c = Def2CC([ 'record.h', 'recordtypes.h', 'buffer.h', '<boost/scoped_ptr.hpp>', '<boost/unordered_map.hpp>', '<memory>' ], [ 'coh' ])
d2c.before()
d2c.setfiles(sys.argv[1:])
fnhintcache = {}
print('namespace record {')
for fdef in d2c.deforder:
nparts = d2c.NameComponents(fdef.name)
for ns in nparts[:-1]:
print('namespace ' + ns + ' {')
print('string ' + nparts[-1] + '::TypeName() const {')
print(' return StaticTypeName();')
print('}')
print('string ' + nparts[-1] + '::FamilyName() const {')
print(' return StaticFamilyName();')
print('}')
print('string ' + nparts[-1] + '::NameHint() const {')
print(' return "' + (len(fdef.namehints) and fdef.namehints[0] or "") + '";')
print('}')
print('void ' + nparts[-1] + '::ParseFields(Buffer& buf) {')
for field in fdef.fields:
print(' buf >> ' + field.name + ';')
print('}')
print(nparts[-1] + '::Family* ' + nparts[-1] + '::ToFamily() const {')
if fdef.variantof:
print(' ' + nparts[-1] + '::Family* ret = new ' + nparts[-1]+ '::Family;')
print(' *ret = *this;')
print(' return ret;')
else:
print(' return 0;')
print('}')
for variant in fdef.variants:
print(nparts[-1] + '& ' + nparts[-1] + '::operator=(' + d2c.NameFilter(variant) + ' const& orig) {')
vnmap = {}
vdef = d2c.alldefs[variant]
for field in vdef.fields:
vnmap[field.name] = field
for field in fdef.fields:
if field.name in vnmap and ((vnmap[field.name].type == field.type) or (not field.isarray and vnmap[field.name].isrecord and field.isrecord)) and vnmap[field.name].isarray == field.isarray:
print(' ' + field.name + ' = orig.' + field.name + ';')
elif field.name in vnmap and field.isarray and vnmap[field.name].isarray:
print(' ' + field.name + '.resize(orig.' + field.name + '.size());')
print(' for (size_t i = 0; i < orig.' + field.name + '.size(); ++i) {')
print(' ' + field.name + '[i] = orig.' + field.name + '[i]; }')
elif field.type != "string" and not field.isrecord and not field.isarray:
print(' ' + field.name + ' = 0;')
print(' return *this;')
print('}')
for ns in nparts[:-1]:
print('}')
print('')
for nhint in fdef.namehints:
fnhintcache[nhint] = d2c.NameFilter(fdef.name)
print('}')
print('')
print('using std::shared_ptr;')
print('using boost::scoped_ptr;')
print('using boost::unordered_map;')
print('typedef unordered_map< string, RecordFactory const* > rmap;')
print('static rmap mkhintcache() {')
print(' rmap ret;')
for k, v in fnhintcache.items():
print(' ret["' + k + '"] = &RecordFactoryT<record::' + v + '>::Get();')
print(' return ret;')
print('}')
print('static rmap const& hintcache() {')
print(' static rmap hc = mkhintcache();')
print(' return hc;')
print('}')
print('template <class T>')
print(' inline void TryRecord(Buffer& in, Buffer::offset_type start, DetectResults& progress, string family = "") {')
print(' scoped_ptr<T> tmp(new T);')
print(' if (family.size() && family != tmp->FamilyName())')
print(' return;')
print(' in.ClearFuzz();')
print(' int cur = tmp->Parse(in);')
print(' cur += in.Fuzz();')
print(' in.Seek(start);')
print(' if (cur < progress.Score || progress.Score < 0) {')
print(' progress.Factory = &RecordFactoryT<T>::Get();')
print(' progress.Score = cur;')
print(' }')
print(' }')
print('DetectResults DetectRecord(Buffer& in, string namehint, bool repeat) {')
print(' DetectResults progress;')
print(' Buffer::offset_type off = in.Tell();')
print(' if (!namehint.empty()) {')
print(' rmap const& hcache = hintcache();')
print(' rmap::const_iterator i = hcache.find(namehint);')
print(' if (i != hcache.end()) {')
print(' progress.Factory = i->second;')
print(' scoped_ptr<Record> tmp(progress.Factory->Create());')
print(' int cur = -1;')
print(' in.ClearFuzz();')
print(' if (repeat) {')
print(' while(in.Tell() < in.Size())')
print(' cur = tmp->Parse(in);')
print(' } else')
print(' cur = tmp->Parse(in);')
print(' in.Seek(off);')
print(' progress.Score = cur + in.Fuzz();')
print(' if (progress.Score == 0)')
print(' return progress;')
print(' }')
print(' }')
for fdef in d2c.deforder:
if fdef.autodetect:
print(' TryRecord<record::' + d2c.NameFilter(fdef.name) + '>(in, off, progress);')
print(' if (progress.Score == 0)')
print(' return progress;')
print(' if (!progress.Factory)')
print(' throw err::Record_UnknownType();')
print(' return progress;')
print('}')
print('')
print('DetectResults RecordByFamily(Buffer& in, string family) {')
print(' DetectResults progress;')
print(' Buffer::offset_type off = in.Tell();')
print(' try {')
print(' progress.Factory = &RecordByName(family);')
print(' scoped_ptr<Record> tmp(progress.Factory->Create());')
print(' int cur = -1;')
print(' in.ClearFuzz();')
print(' cur = tmp->Parse(in);')
print(' in.Seek(off);')
print(' progress.Score = cur + in.Fuzz();')
print(' if (progress.Score == 0)')
print(' return progress;')
print(' } catch(err::Record_UnknownType()) {}')
for fdef in d2c.deforder:
if fdef.autodetect:
print(' TryRecord<record::' + d2c.NameFilter(fdef.name) + '>(in, off, progress, family);')
print(' if (progress.Score == 0)')
print(' return progress;')
print(' if (!progress.Factory)')
print(' throw err::Record_UnknownType();')
print(' return progress;')
print('}')
print('')
print('static rmap mknamecache() {')
print(' rmap ret;')
for fdef in d2c.deforder:
print(' ret["' + fdef.name + '"] = &RecordFactoryT<record::' + d2c.NameFilter(fdef.name) + '>::Get();')
print(' return ret;')
print('}')
print('static rmap const& namecache() {')
print(' static rmap nc = mknamecache();')
print(' return nc;')
print('}')
print('RecordFactory const& RecordByName(string name) {')
print(' rmap const& ncache = namecache();')
print(' rmap::const_iterator i = ncache.find(name);')
print(' if (i != ncache.end()) {')
print(' return *(i->second);')
print(' }')
print(' throw err::Record_UnknownType();')
print('}')
d2c.after()
|
import plotly.graph_objects as go
import pandas as pd
df = pd.read_csv('Pattern_Recognised.csv')
fig = go.Figure(data=[go.Candlestick(x=df[df.columns[0]],
open=df[df.columns[1]], high=df[df.columns[2]],
low=df[df.columns[3]], close=df[df.columns[4]])
])
fig.update_layout(xaxis_rangeslider_visible=False)
fig.show()
|
import random
def random_weight_choice(L):
choice = None
total = 0
for item, p in L:
total += p
if random.random() * total < p:
choice = item
return choice
def test_random_weight_choice():
from collections import defaultdict
X = [('A', 1), ('B', 2), ('C', 3), ('D', 4)]
count = defaultdict(int)
for _ in xrange(100000):
item = random_weight_choice(X)
count[item] += 1
print count
if __name__ == '__main__':
test_random_weight_choice()
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
IP = '127.0.0.1'
PORT = 8080
ACCOUNT_PATH = os.path.join(BASE_DIR,'conf','accounts.cfg') |
#!env python3
# -*- coding: utf-8 -*-
from dataclasses import dataclass
@dataclass(order=True)
class Person:
name: str
age: int = 20
p1 = Person('Alice')
p2 = Person('Bob', 18)
print(p1, p2)
print(p1 < p2)
|
from __future__ import unicode_literals
from decimal import Decimal
from django.db import models
from people.models import BaseEntity
# Create your models here.
class Product(BaseEntity):
name = models.CharField(max_length=40)
company = models.CharField(max_length = 40,blank=True,null=True)
price = models.DecimalField(max_digits=20,decimal_places=4,default=Decimal('0.0000'),blank=True,null=True)
total_units = models.IntegerField(blank=True,null=True)
consumed_units = models.IntegerField(blank=True,null=True)
minimum_units_to_be_maintained = models.IntegerField(blank=True,null=True)
def __str__(self):
return self.name |
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def home_page(request):
return HttpResponse('<html> <title>To-Do lists</title> <body><p>foo</p></body></html>' )
|
#!/usr/bin/env python
import tensorflow as tf
from pandas_plink import read_plink
import pandas as pd
import numpy as np
import argparse as arg
def __main__(plink_file, tfrecords_file, tf_opts):
bim, fam, G = read_plink(plink_file)
G = np.array(G.T, dtype=np.int8)
G[np.isnan(G)] = 0
N = G.shape[0]
M = G.shape[1]
def write_record(row, writer_handle):
'''
row: a sample's genotype vector.
'''
# wrap raw byte values
genotypes_feature = tf.train.Feature(
bytes_list=tf.train.BytesList(value=[row.tostring()]))
# convert to Example
example = tf.train.Example(
features=tf.train.Features(
feature={'genotypes': genotypes_feature}))
writer_handle.write(example.SerializeToString())
with tf.python_io.TFRecordWriter(tfrecords_file, options=tf_opts) as tfwriter:
np.apply_along_axis(write_record, axis=1, arr=G, writer_handle=tfwriter)
if __name__ == '__main__':
tf_opts = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB)
parser = arg.ArgumentParser()
parser.add_argument('--plink-file', dest='plink_file', action='store')
parser.add_argument('--tfrecords-file', dest='tfrecords', action='store')
args = parser.parse_args()
__main__(args.plink_file, args.tfrecords, tf_opts)
|
import subprocess
import pandas as pd
import numpy as np
import os
import sys
import joblib
from ..utils.feature_process import feature_extraction, feature_transform, type_mapper
from .logistic_regression import extract_time
fm_train_data = "../data/fm_train.txt"
def preprocess(epoch, batch_size):
# 通过df的方式将流式数据不断的传入该文件,然后处理得到libFM需要的格式
df_obj = pd.read_csv("../data/train.csv", iterator=True, dtype=type_mapper)
for i in range(epoch):
batch_df = df_obj.get_chunk(siz=batch_size)
label = batch_df["click"]
id = batch_df["id"]
batch_df = batch_df.drop(columns=["id", "click"])
batch_df = extract_time(batch_df)
mapper = joblib.load("mapper_path")
clean_df = feature_transform(batch_df, mapper)
for row in clean_df.iterrows():
pass
def main():
# 使用subprocess调用libfm进行训练,本脚本用来处理数据
pass
if __name__ == '__main__':
main()
|
"""Tests for training the model for contradictory-claims."""
# -*- coding: utf-8 -*-
import os
import shutil
import unittest
import numpy as np
import tensorflow as tf
from contradictory_claims.models.train_model import build_model, load_model, regular_encode, save_model
from transformers import AutoModel, AutoTokenizer, TFAutoModel
class TestTrainModel(unittest.TestCase):
"""Test for training the model for contradictory-claims."""
def setUp(self) -> None:
"""Set up for the tests--load tokenizer."""
self.test_tokenizer = AutoTokenizer.from_pretrained("allenai/biomed_roberta_base")
self.model = AutoModel.from_pretrained("allenai/biomed_roberta_base")
self.model.resize_token_embeddings(len(self.test_tokenizer))
self.out_dir = 'tests/models/test_output'
def test_regular_encode(self):
"""Test that encoding is done properly."""
test_input = ["this is a test", "so is this"]
len_encoding = 20
encoded_input = regular_encode(test_input, self.test_tokenizer, len_encoding)
expected_encoded_input = np.array([[0, 9226, 16, 10, 1296, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 2527, 16, 42, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
self.assertTrue((encoded_input == expected_encoded_input).all())
def test_build_save_load_model(self):
"""Test that full model is built properly."""
strategy = tf.distribute.MirroredStrategy(cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())
os.makedirs("biomed_roberta_base")
self.model.save_pretrained("biomed_roberta_base")
with strategy.scope():
model = TFAutoModel.from_pretrained("biomed_roberta_base", from_pt=True)
model = build_model(model)
shutil.rmtree("biomed_roberta_base")
# Note: this changed recently and I don't know why... Maybe different TF version?
# self.assertEqual(str(type(model)), "<class 'tensorflow.python.keras.engine.training.Model'>")
self.assertEqual(str(type(model)), "<class 'tensorflow.python.keras.engine.functional.Functional'>")
save_model(model, timed_dir_name=False, transformer_dir=self.out_dir)
self.assertTrue(os.path.isfile(os.path.join(self.out_dir, 'sigmoid.pickle')))
self.assertTrue(os.path.isfile(os.path.join(self.out_dir, 'config.json')))
self.assertTrue(os.path.isfile(os.path.join(self.out_dir, 'tf_model.h5')))
pickle_path = os.path.join(self.out_dir, 'sigmoid.pickle')
model = load_model(pickle_path=pickle_path, transformer_dir=self.out_dir)
# Same comment here applies
# self.assertEqual(str(type(model)), "<class 'tensorflow.python.keras.engine.training.Model'>")
self.assertEqual(str(type(model)), "<class 'tensorflow.python.keras.engine.functional.Functional'>")
@unittest.skip("Yeah I don't know how to reasonably test this sorry")
def test_train_model(self):
"""Test that the model can be trained."""
# What's a good way to test this?
# TODO: Implement something
pass
def tearDown(self):
"""Clean-up after all tests have run."""
if os.path.isdir(self.out_dir):
shutil.rmtree(self.out_dir)
|
import smtplib, ssl
port = 465
password = input("Type your password and press enter: ")
context = ssl.create_default_context()
sender_email = "home.pharmacy.application@gmail.com"
receiver_email = "spodkowinska@gmail.com"
messageExpiryDate = """\
Subject: Some drugs are going to expire soon
Your drug {name} is going to expire in {days} days.
Remember not to throw the package and rest of drug in to the bin
but take it to the closest pharmacy's special containers.
Take care of your health and your planet."""
messageVitamins = """\
Subject: Your vitamins are still in time
Your {name} is going to expire in less than 30 days.
Maybe that's the best time to think about some suplementation?
Stay healthy and full of energy!"""
messageOutOfDate = """\
Subject: Expired drugs in your medicine cabinet
Your drug {name} is expired. Out of date drugs are not tested
and their effect on people is difficult to predict.
Do not risk your health by using them.
Remember not to throw the package and rest of drug in to the bin
but take it to the closest pharmacy's special containers.
Take care of your health and your planet."""
if __name__ == '__main__':
with smtplib.SMTP_SSL("smtp.gmail.com", port, context=context) as server:
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, message) |
from flask import Flask, jsonify
app = Flask(__name__)
# pull in the config (i.e. vars from `services/web/project/config.py` on init)
# define your app/service routes here
@app.route("/")
def hello_world():
return jsonify(hello="world")
# Liveliness check--should at least have '/alive' and 'readiness' dummy routes in your apps:
@app.route("/alive")
def alive():
return 'OK'
# Mock readiness check
@app.route("/ready")
def ready():
return jsonify(
backend='ready',
db='ready',
queue='ready'
)
if __name__ == "__main__":
# Only for debugging while developing
app.run(debug=True, port=5000)
|
import numpy as np
def read_OFF(off_file):
'''Returns a list of vertices and a list of triangles (both represented as
numpy arrays)'''
vertexBuffer = []
indexBuffer = []
with open(off_file, "r") as modelfile:
first = modelfile.readline().strip()
if first != "OFF":
raise(Exception("not a valid OFF file ({})".format(first)))
parameters = modelfile.readline().strip().split()
if len(parameters) < 2:
raise(Exception("OFF file has invalid number of parameters"))
for i in range(int(parameters[0])):
coordinates = modelfile.readline().split()
vertexBuffer.append([float(coordinates[0]), float(coordinates[1]), float(coordinates[2])])
for i in range(int(parameters[1])):
indices = modelfile.readline().split()
indexBuffer.append([int(indices[1]), int(indices[2]), int(indices[3])])
return np.array(vertexBuffer), np.array(indexBuffer)
def write_OFF(output_file, vertices, indices):
'''Receives a list of vertices and a list of indices (both as numpy arrays)
and writes them to an off file'''
# converts indices and vertices to a string representation
str_vertices = ["{} {} {}\n".format(v[0], v[1], v[2]) for v in vertices]
str_indices = ["3 {} {} {}\n".format(i[0], i[1], i[2]) for i in indices]
with open(output_file, 'w') as meshfile:
meshfile.write(
'''OFF
%d %d 0
%s%s
'''%(len(str_vertices),len(str_indices), "".join(str_vertices), "".join(str_indices)))
def write_uv_PLY(output_file, vertices, indices, uv):
str_vertices = ["{} {} {}\n".format(v[0], v[1], v[2]) for v in vertices]
str_indices = ["3 {} {} {} 6 {} {} {} {} {} {}\n".format(i[0], i[1], i[2],
uv[i[0]][0], uv[i[0]][1], uv[i[1]][0], uv[i[1]][1], uv[i[2]][0], uv[i[2]][1]) for i in indices]
# str_uv = ["{} {} {}".format(n[0], n[1], n[2]) for n in normals]
# str_vertices = [ "{} {}\n".format(str_vertices[i], str_normals[i]) for i in range(len(vertices)) ]
with open(output_file,"w") as meshfile:
meshfile.write('''ply
format ascii 1.0
comment VCGLIB generated
element vertex {0}
property float x
property float y
property float z
element face {1}
property list uchar int vertex_indices
property list uchar float texcoord
end_header
{2}
{3}
'''.format(len(str_vertices), len(str_indices), ''.join(str_vertices), ''.join(str_indices)))
def write_PLY(output_file, vertices, indices, normals):
str_vertices = ["{} {} {}".format(v[0], v[1], v[2]) for v in vertices]
str_indices = ["3 {} {} {}\n".format(i[0], i[1], i[2]) for i in indices]
str_normals = ["{} {} {}".format(n[0], n[1], n[2]) for n in normals]
str_vertices = [ "{} {}\n".format(str_vertices[i], str_normals[i]) for i in range(len(vertices)) ]
with open(output_file,"w") as meshfile:
meshfile.write('''ply
format ascii 1.0
comment VCGLIB generated
element vertex {0}
property float x
property float y
property float z
property float nx
property float ny
property float nz
element face {1}
property list uchar int vertex_indices
end_header
{2}
{3}
'''.format(len(str_vertices), len(str_indices), ''.join(str_vertices), ''.join(str_indices)))
def write_PLY(output_file, vertices, indices, normals, colors):
str_vertices = ["{} {} {}".format(v[0], v[1], v[2]) for v in vertices]
str_indices = ["3 {} {} {}\n".format(i[0], i[1], i[2]) for i in indices]
str_normals = ["{} {} {}".format(n[0], n[1], n[2]) for n in normals]
# no transparency, alpha = 255
str_colors = ["{} {} {}".format(c[0], c[1], c[2]) for c in colors]
str_vertices = [ "{} {} {}\n".format(str_vertices[i], str_normals[i], str_colors[i]) for i in range(len(vertices)) ]
with open(output_file,"w") as meshfile:
meshfile.write('''ply
format ascii 1.0
comment VCGLIB generated
element vertex {0}
property float x
property float y
property float z
property float nx
property float ny
property float nz
property uchar red
property uchar green
property uchar blue
element face {1}
property list uchar int vertex_indices
end_header
{2}
{3}
'''.format(len(str_vertices), len(str_indices), ''.join(str_vertices), ''.join(str_indices)))
def read_points(filename: str):
'''Reads a file with points coordinates per line and returns a list of
numpy arrays'''
points = []
with open(filename) as myfile:
file_lines = myfile.readlines()
for line in file_lines:
content = line.split()
content = [float(n) for n in content]
# each element is a numpy array
points.append(content)
return np.array(points)
def write_points(points:list, filename:str):
'''Receives a list of numpy arrays and writes it to a file, one point per line'''
if len(points) == 0:
return None
with open(filename, "w") as myfile:
for point in points:
if len(point) == 2:
myfile.write("{} {}\n".format(point[0], point[1]))
elif len(point) == 3:
myfile.write("{} {} {}\n".format(point[0], point[1], point[3]))
else:
raise Exception("Points should have dimension 2 or 3")
|
__author__ = 'Tommaso Mazza'
import glob
from MatchVCF.comparator import Comparator
class Difference(Comparator):
__fold1 = None
__fold2 = None
def __init__(self, fold1, fold2):
self.__fold1 = fold1
self.__fold2 = fold2
def calc(self, genotype):
__dic1 = {}
__dic2 = {}
vcflist1 = glob.glob(self.__fold1 + "/" + "*.vcf")
vcflist2 = glob.glob(self.__fold2 + "/" + "*.vcf")
for file in vcflist1:
print("Processing %s" % file)
super(Difference, self).parse_vcf(file, genotype, __dic1)
for file in vcflist2:
print("Processing %s" % file)
super(Difference, self).parse_vcf(file, genotype, __dic2)
diffkeys = set(__dic1) - set(__dic2)
return {k: __dic1.get(k, None) for k in diffkeys} |
import matplotlib.pyplot as pl
import scipy.signal as s
import scipy.fft as f
import numpy as np
import random as r
def _show(fun, data, data_len, max_len, transformer, desired=False):
r.shuffle(data)
for i, x in enumerate(data):
if i >= max_len:
break
pl.figure(i)
pl.plot(fun(x, data, data_len, transformer))
pl.title(get_title(x[0]) + f" {fun.__name__}")
pl.legend(range(9))
if desired:
pl.figure(max_len)
pl.plot(fun(None, data, data_len, transformer))
pl.title("desired" + f" {fun.__name__}")
pl.legend(range(9))
pl.show()
showable = lambda desired=False: lambda fun: lambda *args, **kwargs: _show(
fun, *args, **kwargs, desired=desired)
def get_title(current):
return " ".join([
f"{k}={v}" for k, v in current.items() if k not in
["input", "desired", *[x.name for x in list(t.Transformers)]]
])
@showable(True)
def output(
current,
data,
data_len,
transformer,
):
return current[0][transformer][0][
"output"][:data_len] if current is not None else data[0][0][
"desired"][:data_len]
@showable()
def correlation(current, data, data_len, transformer):
des = data[0][0]["desired"][:data_len]
out = current[0][transformer][0][
"output"][:data_len] if current is not None else des
return s.correlate(out, des)
@showable()
def fft(current, data, data_len, transformer):
current = current[0][transformer][0][
"output"][:data_len] if current is not None else data[0][0][
"desired"][:data_len]
return f.fftn(current)
@showable(True)
def log_fft(current, data, data_len, transformer):
tmp = current[0][transformer][0][
"output"][:data_len] if current is not None else data[0][0][
"desired"][:data_len]
return np.log(np.abs(f.fftshift(f.fftn(tmp)))**2)
|
import u12
import sys
import time
sys.path.insert(0, '/home/albert/Documents/Albert Work/Scripts')
from funcs import PID
from thermocouple import temperature_read
import matplotlib.pyplot as plt
import pylab as pylab
d=u12.U12()
i=0
max=100
frequency=1
temps=[]
sum=0
while i<max:
t=temperature_read(d)
temps.append(t)
sum+=t
# time.sleep(frequency)
i+=1
avg=sum/max
print avg
x_vals=range(max)
plt.plot(x_vals, temps)
plt.xlabel("Measurments")
plt.ylabel("Temperatures")
plt.tight_layout()
plt.show() |
import glob
import math
import numpy as np
import argparse
import sys
import pickle
import time
import matplotlib.pyplot as plt
from copy import deepcopy
import logging
import scipy.optimize as opt
import os
from .screener import load_data_all, load_data_single
from .. import arguments
from sklearn.linear_model import Ridge, Lasso
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
class MLP(object):
def __init__(self, args, Xin, Xout):
self.args = args
X_mean = Xin.mean(0)
Xin = Xin - X_mean
np.save('saved_weights/mean.npy', X_mean)
self.Xin = Xin
self.Xout = Xout
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.Xin, self.Xout, test_size=0.1, random_state=1)
self.hyper_parameters = {'hidden_layer_sizes': [(64,), (128,), (256,)],
'batch_size': [32, 64, 128],
'learning_rate_init': [1e-1, 1e-2, 1e-3]}
self.model = MLPRegressor(activation='logistic', solver='adam', alpha=0,
max_iter=1000, random_state=1,
tol=1e-9, verbose=False, n_iter_no_change=1000)
def cross_validation(self):
self.Grid = GridSearchCV(
self.model, self.hyper_parameters, cv=5, scoring='neg_mean_squared_error', verbose=1)
self.Grid.fit(self.X_train, self.y_train)
print("Best hyper params", self.Grid.best_params_)
print("Best score = {}".format(self.Grid.best_score_))
print("Summary", self.Grid.cv_results_)
np.save('plots/new_data/numpy/hyper/best_hyper.npy',
self.Grid.best_params_)
np.save('plots/new_data/numpy/hyper/summary.npy', self.Grid.cv_results_)
def model_fit(self):
best_params = np.load(
'plots/new_data/numpy/hyper/best_hyper.npy', allow_pickle='TRUE').item()
cv_summary = np.load('plots/new_data/numpy/hyper/summary.npy').item()
print(best_params)
params = cv_summary['params']
scores = cv_summary['mean_test_score']
for i in range(len(params)):
print("param is {} and MSE_validation is {:.6f}".format(
params[i], -scores[i]))
self.model.set_params(**best_params)
self.model.set_params(verbose=True)
self.model.fit(self.X_train, self.y_train)
y_pred = self.model.predict(self.X_test)
MSE_test = mean_squared_error(self.y_test, y_pred)
print(MSE_test)
np.save('saved_weights/coefs.npy', self.model.coefs_)
np.save('saved_weights/intercepts.npy', self.model.intercepts_)
pickle.dump(self.model, open('saved_weights/model.sav', 'wb'))
print(self.model.coefs_[0].shape)
print(self.model.coefs_[1].shape)
print(self.model.intercepts_[0].shape)
print(self.model.intercepts_[1].shape)
def model_exp(self):
print("start")
start = time.time()
# regr = MLPRegressor(hidden_layer_sizes=(256,), activation='logistic', solver='adam', alpha=0,
# batch_size=32, learning_rate_init=1e-2, max_iter=2000, random_state=1,
# tol=1e-9, verbose=True, n_iter_no_change=1000).fit(self.X_train,
# self.y_train)
regr = MLPRegressor(hidden_layer_sizes=(128,), activation='logistic', solver='adam', alpha=0,
batch_size=64, learning_rate_init=1e-2, max_iter=1000, random_state=1,
tol=1e-9, verbose=False, n_iter_no_change=1000).fit(self.X_train, self.y_train)
y_pred = regr.predict(self.X_test)
MSE_test = mean_squared_error(self.y_test, y_pred)
print(MSE_test)
end = time.time()
print("time spent {}".format(end - start))
# np.save('saved_weights/coefs.npy', regr.coefs_)
# np.save('saved_weights/intercepts.npy', regr.intercepts_)
# pickle.dump(regr, open('saved_weights/model.sav', 'wb'))
# plt.plot(regr.loss_curve_)
# plt.show()
class PolyReg(object):
def __init__(self, args, Xin, Xout):
self.args = args
self.X_mean = Xin.mean(0)
self.Xin = Xin - self.X_mean
self.Xout = Xout
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.Xin, self.Xout, test_size=0.1, random_state=1)
@classmethod
def poly_features_flexible(cls, degree, X):
# TODO: Bad implementation
poly = PolynomialFeatures(degree)
transform_X = X[:, :-1]
category_X = X[:, -1:]
transform_X = poly.fit_transform(transform_X)
X_poly = np.concatenate((transform_X, category_X), axis=1)
return X_poly
def polynomial_features(self, degree):
category_training = self.X_train[:, -1:]
category_test = self.X_test[:, -1:]
transform_training = self.X_train[:, :-1]
transform_test = self.X_test[:, :-1]
poly = PolynomialFeatures(degree)
transform_training = poly.fit_transform(transform_training)
transform_test = poly.fit_transform(transform_test)
self.X_training_poly = np.concatenate(
(transform_training, category_training), axis=1)
self.X_test_poly = np.concatenate(
(transform_test, category_test), axis=1)
self.degree = degree
def linear_regression_sklearn(self):
regr = LinearRegression(fit_intercept=False).fit(
self.X_training_poly, self.y_train)
y_pred_train = regr.predict(self.X_training_poly)
y_pred_test = regr.predict(self.X_test_poly)
MSE_train = mean_squared_error(self.y_train, y_pred_train)
MSE_test = mean_squared_error(self.y_test, y_pred_test)
print("Sklern polynomial regression degree {} training MSE {}, test MSE {}\n".format(
self.degree, MSE_train, MSE_test))
pickle.dump(regr, open('saved_weights/poly.sav', 'wb'))
return MSE_train, MSE_test
def linear_regression_custom(self):
print(self.X_training_poly.shape[1])
print(np.linalg.matrix_rank(self.X_training_poly))
print(np.linalg.matrix_rank(
np.matmul(self.X_training_poly.transpose(), self.X_training_poly)))
# Analytical form
tmp = np.linalg.inv(
np.matmul(self.X_training_poly.transpose(), self.X_training_poly))
w = np.matmul(np.matmul(tmp, self.X_training_poly.transpose()),
np.expand_dims(self.y_train, axis=1))
y_predicted_training = np.matmul(self.X_training_poly, w).flatten()
y_predicted_test = np.matmul(self.X_test_poly, w).flatten()
MSE_train = np.sum(
(y_predicted_training - self.y_train)**2) / len(self.y_train)
MSE_test = np.sum((y_predicted_test - self.y_test)
** 2) / len(self.y_test)
print("Custom polynomial regression degree {} training MSE {}, test MSE {}\n".format(
self.degree, MSE_train, MSE_test))
return MSE_train, MSE_test
def MLP_regression(args):
Xin = np.load('saved_data_sobol/Xin.npy')
Xout = np.load('saved_data_sobol/Xout.npy')
mlp_model = MLP(args, Xin, Xout)
# mlp_model.model_fit()
mlp_model.model_exp()
def polynomial_regression(args):
Xin = np.load('saved_data_sobol/Xin.npy')
Xout = np.load('saved_data_sobol/Xout.npy')
poly_model = PolyReg(args, Xin, Xout)
degrees = np.arange(1, 14, 1)
train_MSE_tosave = []
test_MSE_tosave = []
poly_degree = []
for d in degrees:
poly_model.polynomial_features(degree=d)
train_MSE, test_MSE = poly_model.linear_regression_custom()
poly_model.linear_regression_sklearn()
train_MSE_tosave.append(train_MSE)
test_MSE_tosave.append(test_MSE)
poly_degree.append(d)
np.save('plots/new_data/numpy/polynomial/train_MSE.npy',
np.asarray(train_MSE_tosave))
np.save('plots/new_data/numpy/polynomial/test_MSE.npy',
np.asarray(test_MSE_tosave))
np.save('plots/new_data/numpy/polynomial/poly_degree.npy',
np.asarray(poly_degree))
# Run the best again
poly_model.polynomial_features(degree=10)
poly_model.linear_regression_sklearn()
if __name__ == '__main__':
args = arguments.args
MLP_regression(args)
# polynomial_regression(args)
|
a=input().split()
b=input()
print(a.index(b)+1)
|
# Generated by Django 2.1.4 on 2019-01-14 18:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('psychologues', '0016_auto_20190114_0032'),
]
operations = [
migrations.AddField(
model_name='competence',
name='charge',
field=models.FloatField(blank=True, default=100),
preserve_default=False,
),
migrations.AddField(
model_name='competence',
name='desc_ang',
field=models.TextField(blank=True, max_length=1000),
),
migrations.AddField(
model_name='competence',
name='desc_fr',
field=models.TextField(blank=True, max_length=1000),
),
]
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : login_token.py
@CopyRight : USTC SSE
@Modify Time : 2020/11/17 19:36
@Author : TJ
@Version : 1.0
@Description : 自定义登录验证类
"""
import jwt
import datetime
import logging as logger
from django.utils import timezone
from SharePlatform import settings
class Authenticated: # 无需继承其他认证类
@staticmethod
def has_permission(request,view): # 必须是这个名字,必须多带一个没用的view参数
"""
:param request: 请求
:param view: 视图函数
:return: 是否登录(true or false)
"""
token = request.META.get('HTTP_AUTHORIZATION')
if token:
BOOLEAN=verify_token(token=token) # 验证成功返回True 失效及错误token 返回False
return BOOLEAN
else:
BOOLEAN=False
return BOOLEAN
class AdminPermission: # 教师,管理员权限验证类
message = "必须是Admin才能访问"
@staticmethod
def has_permission(request,view):
"""
:param request: 请求
:param view: 视图函数
:return: 是否具有admin权限(true/false)
"""
if Authenticated.has_permission(request,view):
print(request.META)
token = request.META.get('HTTP_AUTHORIZATION')
decode = jwt.decode(token, settings.SECRET_KEY, algorithms=['HS256'])
if decode.get('role') == 'teacher':
return True
else:
return False
return False
def verify_token(token):
"""
:param token: 验证请求的token是否有效
:return: True or False
"""
try:
decode = jwt.decode(token, settings.SECRET_KEY , algorithms=['HS256'])
logger.info(decode.get('username'))
logger.info(decode.get('uid'))
return True
except Exception as e:
# '签名已过期
print(e)
print('签名已过期')
return False
def resolve_token(func):
"""
解析token的装饰器函数
:param func:
:return: dict{} 用户信息
"""
def resolve_token1(self,request):
token = request.META.get('HTTP_AUTHORIZATION')
args ={}
try:
decode = jwt.decode(token, settings.SECRET_KEY, algorithms=['HS256'])
args['uid'] = decode.get('uid')
args['username'] = decode.get('username')
args['name'] = decode.get('name')
args['role'] = decode.get('role')
except Exception as e:
print(e)
print('签名已过期')
return func(self,request,args)
return resolve_token1
def create_token(user,role):
"""
创建token,将用户信息存储在token中
:param user: 用户对象
:param role: student or teacher
:return: token
"""
payload = {"exp": timezone.now()+datetime.timedelta(hours=1), "uid": user.uid, "username": user.username,
"name": user.name, 'role': role}
token = jwt.encode(payload,settings.SECRET_KEY , algorithm='HS256').decode('utf8')
return token
|
import numpy
from load_data_ex1 import *
from normalize_features import *
from gradient_descent import *
from plot_data_function import *
from plot_boundary import *
import matplotlib.pyplot as plt
import os
figures_folder = os.path.join(os.getcwd(), 'figures')
if not os.path.exists(figures_folder):
os.makedirs(figures_folder, exist_ok=True)
# This loads our data
X_inital, y = load_data_ex1()
X = numpy.empty([len(X_inital), 5])
for i in range(0, len(X_inital)):
a = X_inital[i]
b = numpy.array([X_inital[i][0] * X_inital[i][1], X_inital[i][0] ** 2, X_inital[i][1] ** 2])
c = numpy.append(a, b)
X[i] = c
# Normalize
X_normalized, mean_vec, std_vec = normalize_features(X)
# After normalizing, we append a column of ones to X_normalized, as the bias term
column_of_ones = np.ones((X_normalized.shape[0], 1))
# append column to the dimension of columns (i.e., 1)
X_normalized = np.append(column_of_ones, X_normalized, axis=1)
theta = np.zeros((6))
# Set learning rate alpha and number of iterations
alpha = 1.0
iterations = 100
# Call the gradient descent function to obtain the trained parameters theta_final and the cost vector
theta_final, cost_vector = gradient_descent(X_normalized, y, theta, alpha, iterations)
# Plot the cost for all iterations
fig, ax1 = plt.subplots()
plot_cost(cost_vector, ax1)
plot_filename = os.path.join(os.getcwd(), 'figures', 'ex3_cost.png')
plt.savefig(plot_filename)
min_cost = np.min(cost_vector)
argmin_cost = np.argmin(cost_vector)
print('Final cost: {:.5f}'.format(cost_vector[-1]))
print('Minimum cost: {:.5f}, on iteration #{}'.format(min_cost, argmin_cost + 1))
# enter non-interactive mode of matplotlib, to keep figures open
plt.ioff()
plt.show()
|
# Copyright Alexander Wood 2016.
# Solutions for Coursera's Bioinformatics 1 Course.
'''
Minimum Skew Problem: Find a position in a genome where the skew diagram attains a minimum.
Input: A DNA string Genome.
Output: All integer(s) i minimizing Skewi (Genome) among all values of i (from 0 to |Genome|).
'''
# Recall that skew(i, Genome) = # of occureences of G - # occurences of C
# on the first i nucleotides in the genome.
def skew(i, Genome):
# Store the values of skew in a list, Skew
# So, skew_list[j] is the value of skew at index j
# start with skew_list = [0] since the balance of G and C is initally zero.
skew_list = [0]
# To keep track of the skew values as we run through the string
count = 0
for nuc in range(i):
if (Genome[nuc] == 'A') or (Genome[nuc] == 'T'):
skew_list.append(count)
elif Genome[nuc] == 'G':
count += 1
skew_list.append(count)
elif Genome[nuc] == 'C':
count -= 1
skew_list.append(count)
elif Genome[nuc] == '\n':
continue
else:
print('An error occurred')
# for val in range(len(skew_list)):
# print(skew_list[val], end=' ')
return skew_list
# This algorithm returns a list of the indexes of minimum values in a list.
def MinListIndex(Some_List):
# Find the minimum
minimum = min(Some_List)
# Create a list to store the indexes.
min_index = []
for i in range(len(Some_List)):
if Some_List[i] == minimum:
min_index.append(i)
return min_index
def MinimumSkew(Genome):
# Get the list of skew funciton values
skew_list = skew(len(Genome), Genome)
# Get the list of indexes of occurences of min value.
min_index = MinListIndex(skew_list)
for val in range(len(min_index)):
print(min_index[val], end=' ')
#return min_index
|
import os
import time
from appium import webdriver
from features.steps import login
from features.action.actions import *
from features.action.unhandled_event import *
from features.page.BotBar import BotBar
from features.page.HomePage import HomePage
from features.page.ItemPage import ItemPage
from features.page.UserPage import UserPage
from features.common.userData import singleCompanyAcc2,PW2
from features.common.exception import *
from features.action.userPageActions import *
from features.action.screenShot import *
from features.action.testSupDocToolbar import *
from behave import given, when, then # pylint: disable=no-name-in-module
@when('successful login and in home page')
@screenshot_for_when_action
def Successful_login(context):
pass
@then('check date displayed')
@screenshot_for_then_action
def Check_date_displayed(context):
wait_load_by_id(context.driver, 3, HomePage.dateId_id)
today = time.strftime("%d %b %Y")
index_today = today.split(" ")
#if date is correct
dayElement = get_text_by_id(context.driver, HomePage.dateId_id)
assert index_today[
0] == dayElement, "Date display wrongly. Date displayed is " + dayElement
#if month and year correct
m_y_Element = get_text_by_id(context.driver, HomePage.monthId_id)
mAndY = m_y_Element.split(" ")
judge1 = index_today[1] == mAndY[0]
judge2 = index_today[2] == mAndY[1]
assert judge1 == True, "Month display wrongly. Month displayed is " + mAndY[0]
assert judge2 == True, "Year display wrongly. Year displayed is " + mAndY[1]
@then('check search bar function with searching "{string1}"')
@screenshot_for_then_action
def Check_searchbar(context, string1):
click_by_id(context.driver, HomePage.searchBarBtn_id)
search_bar = check_displayed_by_id(context.driver, HomePage.searchTextBox_id)
assert search_bar == True, "Search bar isn't selected properly."
check_event = WithUnhandledEvent(context.driver)
if check_event == True:
# To-adjust: the search content string
sendkeys_by_id(context.driver, HomePage.searchTextBox_id, string1)
click_by_id(context.driver, HomePage.searchBtn_id)
wait_load_by_id(context.driver, 10, HomePage.searchTextBox_id)
blank_list = WithUnhandledEvent(context.driver)
assert blank_list == False, "Search function doesn't work properly."
clear_text_by_id(context.driver, HomePage.searchTextBox_id)
# To-adjust: the search content string
sendkeys_by_id(context.driver, HomePage.searchTextBox_id, "test")
click_by_id(context.driver, HomePage.searchBtn_id)
wait_load_by_xpath(context.driver, 10, HomePage.topEventText2_xp)
event_text = get_text_by_xpath(context.driver, HomePage.topEventText2_xp)
assert "test" in event_text.lower(), "Search function doesn't work properly."
click_by_id(context.driver, HomePage.searchCloseBtn_id)
else:
pass
wait_load_by_id(context.driver, 3, HomePage.homePageLayout_id)
@given('display home page')
def Display_home_page(context):
pass
@when('click third button')
@screenshot_for_when_action
def Click_third_button(context):
click_by_id(context.driver, BotBar.userPage_id)
wait_load_by_id(context.driver, 3, UserPage.userPic_id)
@then('display user page')
@screenshot_for_then_action
def Display_user_page(context):
user = check_displayed_by_id(context.driver, UserPage.userPic_id)
assert user == True, "Failed to navigate to user page."
@when('click second button')
@screenshot_for_when_action
def Click_second_button(context):
click_by_id(context.driver, BotBar.itemPage_id)
wait_load_by_id(context.driver, 3, ItemPage.itemPageTitle_id)
@then('display item page')
@screenshot_for_then_action
def Display_item_page(context):
item = check_displayed_by_id(context.driver, ItemPage.itemPageTitle_id)
assert item == True, "Failed to navigate to item page."
@then(
'check search bar function by serching "{string1}" and search "{string2}", then test filter'
)
@screenshot_for_then_action
def Check_searchbar_filter(context, string1, string2):
click_by_id(context.driver, ItemPage.searchBarBtn_id)
search_bar = check_displayed_by_id(context.driver,
ItemPage.searchTextBox_id)
assert search_bar == True, "Search bar isn't selected properly."
# To-adjust: the search content string
sendkeys_by_id(context.driver, ItemPage.searchTextBox_id, string1)
click_by_id(context.driver, ItemPage.searchBtn_id)
wait_load_by_id(context.driver, 5, ItemPage.searchTextBox_id)
blank_list = WithUnhandledEvent(context.driver)
assert blank_list == False, "Search function doesn't work properly."
clear_text_by_id(context.driver, ItemPage.searchTextBox_id)
# To-adjust: the search content string
sendkeys_by_id(context.driver, ItemPage.searchTextBox_id, string2)
click_by_id(context.driver, ItemPage.searchBtn_id)
wait_load_by_xpath(context.driver, 7, ItemPage.topItemText_xp)
event_text = get_text_by_xpath(context.driver, ItemPage.topItemText_xp)
assert "test" in event_text.lower(), "Search function doesn't work properly."
click_by_id(context.driver, ItemPage.searchCloseBtn_id)
wait_load_by_id(context.driver, 5, ItemPage.filterBtn_id)
# Test filter for resolution
click_by_id(context.driver, ItemPage.filterBtn_id)
wait_load_by_id(context.driver, 2, ItemPage.meetingSwitch_id)
click_by_id(context.driver, ItemPage.meetingSwitch_id)
click_by_id(context.driver, ItemPage.scheduleSwitch_id)
click_by_id(context.driver, ItemPage.showResultsBtn_id)
wait_load_by_id(context.driver, 10, ItemPage.itemPageTitle_id)
test_filter_resolution(context.driver)
# Test filter for schedule
click_by_id(context.driver, ItemPage.filterBtn_id)
wait_load_by_id(context.driver, 2, ItemPage.resolutionSwitch_id)
click_by_id(context.driver, ItemPage.resolutionSwitch_id)
click_by_id(context.driver, ItemPage.scheduleSwitch_id)
click_by_id(context.driver, ItemPage.showResultsBtn_id)
wait_load_by_id(context.driver, 10, ItemPage.itemPageTitle_id)
test_filter_schedule(context.driver)
# Test filter for meeting
click_by_id(context.driver, ItemPage.filterBtn_id)
wait_load_by_id(context.driver, 2, ItemPage.scheduleSwitch_id)
click_by_id(context.driver, ItemPage.scheduleSwitch_id)
click_by_id(context.driver, ItemPage.meetingSwitch_id)
click_by_id(context.driver, ItemPage.showResultsBtn_id)
wait_load_by_id(context.driver, 10, ItemPage.itemPageTitle_id)
test_filter_meeting(context.driver)
@when('click first button')
@screenshot_for_when_action
def Click_first_button(context):
click_by_id(context.driver, BotBar.homePage_id)
@then('display home page again')
@screenshot_for_then_action
def Display_home_page_2(context):
home = check_displayed_by_id(context.driver, HomePage.dateId_id)
assert home == True, "Failed to navigate to home page."
@given('check any unhandled resolution exists')
def Check_resolution(context):
# select one type of events to run test, with filter
# first select resolution for test
wait_load_by_id(context.driver, 3, HomePage.filterBtn_id)
clickFilter(context.driver)
# select resolution to test
wait_load_by_id(context.driver, 2, HomePage.filterpopup_id)
switchMeeting(context.driver)
switchSchedule(context.driver)
clickShowResults(context.driver)
wait_load_by_id(context.driver, 3, HomePage.filterBtn_id)
reso_todo = WithUnhandledEvent(context.driver)
if reso_todo == True:
print('Unhandled resolution exists and needs to test.')
else:
print('No unhandled resolution.')
@when('unhandled resolution exists')
@screenshot_for_when_action
def empty_act(context):
pass
@then('test basic function in resolution')
@screenshot_for_then_action
def handle_resolution_procedure(context):
reso_todo = WithUnhandledEvent(context.driver)
if reso_todo == True:
# determine one or many to-dos
time.sleep(2)
if IsSingle(context.driver) == True:
resolution_name = get_text_by_id(context.driver,
HomePage.singleEventText_id)
click_by_id(context.driver, HomePage.singleEventMark_id)
else:
resolution_name = get_text_by_xpath(context.driver,
HomePage.topEventText_xp)
click_by_xpath(context.driver, HomePage.topEvent)
wait_load_by_id(context.driver, 10, HomePage.resolutionMark_id)
# Test pdf tool
test_approval_doc_toolbar(context.driver)
if check_displayed_by_id(context.driver, HomePage.supDocBtn_id):
click_by_id(context.driver, HomePage.supDocBtn_id)
time.sleep(4)
if check_displayed_by_id(context.driver, HomePage.actionMenu_id):
test_sup_doc_toolbar(context.driver)
press_back_button(context.driver)
else:
click_by_xpath(context.driver, HomePage.topMultiSupDoc_xpath)
test_sup_doc_toolbar(context.driver)
press_back_button(context.driver)
press_back_button(context.driver)
# Test "Add remarks" function
# which can be optimised by checking the result in email
click_by_id(context.driver, HomePage.actionMenu_id)
click_by_id(context.driver, HomePage.addRemarksBtn_id)
wait_load_by_id(context.driver, 3, HomePage.remarksBox_id)
sendkeys_by_id(context.driver, HomePage.remarksBox_id,
"Add some remarks here.")
click_by_id(context.driver, HomePage.updateRemarksBtn_id)
wait_load_by_id(context.driver, 5, HomePage.actionMenu_id)
click_by_id(context.driver, HomePage.actionMenu_id)
testRemarks = get_text_by_id(context.driver, HomePage.editRemarksBtn_id)
assert testRemarks == "Edit Remarks", addRemarksException
# To-adjust: so far it can only approve one resolution
click_by_id(context.driver, HomePage.approveBtn_id)
wait_load_by_id(context.driver, 2, HomePage.continueWithRemarks_id)
click_by_id(context.driver, HomePage.continueWithRemarks_id)
# To-do: I want to add a picture comparison here.
# To distinguish whether signature is loaded or not
time.sleep(6)
popup = check_displayed_by_id(context.driver, HomePage.accessFilePop_id)
if popup:
wait_load_by_id(context.driver, 3, HomePage.allowAccessBtn_id)
click_by_id(context.driver, HomePage.allowAccessBtn_id)
wait_load_by_id(context.driver, 5, HomePage.loadSignatureBtn_id)
click_by_id(context.driver, HomePage.loadSignatureBtn_id)
wait_load_by_id(context.driver, 8, HomePage.continueToAction_id)
loaded = check_displayed_by_id(context.driver, HomePage.clearSignatureBtn_id)
assert loaded == True, loadSignatureException
click_by_id(context.driver, HomePage.continueToAction_id)
time.sleep(6)
optmark = check_displayed_by_id(context.driver, HomePage.otpMark_id)
if optmark:
# To-do: OTP part
# Now can only manually key in
# After put in the OTP, it directs to another page
wait_load_by_id(context.driver, 45, HomePage.resoBackBtn_id)
click_by_id(context.driver, HomePage.resoBackBtn_id)
wait_load_by_id(context.driver, 8, BotBar.itemPage_id)
else:
wait_load_by_id(context.driver, 8, BotBar.itemPage_id)
# switch to item page, check if the approval is handled
click_by_id(context.driver, BotBar.itemPage_id)
top_item_name = get_text_by_xpath(context.driver, ItemPage.topItem_xp)
assert resolution_name==top_item_name, handleApprovalException
click_by_id(context.driver, BotBar.homePage_id)
# testing case following
else:
clickFilter(context.driver)
wait_load_by_id(context.driver, 2, HomePage.filterpopup_id)
switchSchedule(context.driver)
switchMeeting(context.driver)
clickShowResults(context.driver)
@given('check any unhandled schedule exists')
def check_unhandled_schedule(context):
# after testing in resolution page, it should direct to home page for coming test.
# test schedules
wait_load_by_id(context.driver, 5, HomePage.filterBtn_id)
clickFilter(context.driver)
wait_load_by_id(context.driver, 2, HomePage.filterpopup_id)
switchResolution(context.driver)
switchMeeting(context.driver)
clickShowResults(context.driver)
context.sche_todo = WithUnhandledEvent(context.driver)
if context.sche_todo == True:
print('Unhandled schedule exist.\n')
else:
print('No unhandled schedule.\n')
@when('unhandled shcedule exists')
@screenshot_for_when_action
def empty_act_1(context):
pass
@then('test basic function in shcedule')
@screenshot_for_then_action
def test_schedule(context):
if context.sche_todo == True:
if IsSingle(context.driver) == True:
schedule_name = get_text_by_id(context.driver, HomePage.singleEventText_id)
click_by_id(context.driver, HomePage.singleEventMark_id)
else:
schedule_name = get_text_by_xpath(context.driver,
HomePage.topEventText_xp)
click_by_xpath(context.driver, HomePage.topEvent_xp)
time.sleep(1)
schedulePage = check_displayed_by_id(context.driver, HomePage.schedulePageTitle_id)
assert schedulePage, "Failed to direct to view schedule page."
click_by_id(context.driver, HomePage.scheVote_id)
click_by_id(context.driver, HomePage.scheBackBtn_id)
time.sleep(2)
checkExitPop = check_displayed_by_id(context.driver, HomePage.scheExitPopup_id)
assert checkExitPop, "Back to home page confirmation popup didn't appear as expected."
click_by_id(context.driver, HomePage.scheNoExitBtn_id)
click_by_id(context.driver, HomePage.scheUpdateBtn_id)
try:
wait_load_by_id(context.driver, 10, HomePage.homePageLayout_id)
except:
raise Exception("Failed to direct to home page after submitting vote.")
click_by_id(context.driver, BotBar.itemPage_id)
top_event_name = get_text_by_xpath(context.driver, ItemPage.topItem_xp)
assert top_event_name==schedule_name, handleScheduleException
click_by_id(context.driver, BotBar.homePage_id)
else:
clickFilter(context.driver)
wait_load_by_id(context.driver, 3, HomePage.filterpopup_id)
switchMeeting(context.driver)
switchResolution(context.driver)
clickShowResults(context.driver)
@given('check any unhandled meeting exists')
def check_meeting(context):
wait_load_by_id(context.driver, 3, HomePage.filterBtn_id)
clickFilter(context.driver)
wait_load_by_id(context.driver, 2, HomePage.filterpopup_id)
switchResolution(context.driver)
switchSchedule(context.driver)
clickShowResults(context.driver)
time.sleep(2)
meet_todo = WithUnhandledEvent(context.driver)
if meet_todo == True:
print('Unhandled meeting exists.')
else:
print('No unhandled meeting.')
@when('unhandled meeting exists')
@screenshot_for_when_action
def empty_act_2(context):
pass
@then('test basic function in meeting')
@screenshot_for_then_action
def test_meeting(context):
meet_todo = WithUnhandledEvent(context.driver)
if meet_todo:
if IsSingle(context.driver):
meeting_name = get_text_by_id(context.driver,
HomePage.singleEventText_id)
click_by_id(context.driver, HomePage.singleEventMark_id)
else:
meeting_name = get_text_by_xpath(context.driver,
HomePage.topEventText_xp)
click_by_xpath(context.driver, HomePage.topEvent_xp)
wait_load_by_id(context.driver, 5, HomePage.otherDocBtn_id)
click_by_id(context.driver, HomePage.otherDocBtn_id)
otherDoc = check_displayed_by_id(context.driver,
HomePage.otherDocMark_id)
assert otherDoc == True, "Couldn't switch to other Documents page."
# Check if additional document exists.
# To-do: Need to distinguish different types of agenda and whether recused or not
if check_displayed_by_id(context.driver, HomePage.noMeetingDataMark_id):
pass
else:
if check_displayed_by_id(context.driver, HomePage.singleDocMark_id):
click_by_id(context.driver, HomePage.singleDocMark_id)
wait_load_by_id(context.driver, 7, HomePage.pdfPage_id)
test_meeting_other_doc_toolbar(context.driver)
press_back_button(context.driver)
else:
click_by_xpath(context.driver, HomePage.topMultiDoc_xpath)
wait_load_by_id(context.driver, 7, HomePage.pdfPage_id)
test_meeting_other_doc_toolbar(context.driver)
press_back_button(context.driver)
wait_load_by_id(context.driver, 2, HomePage.participantBtn_id)
click_by_id(context.driver, HomePage.participantBtn_id)
participant = check_displayed_by_id(context.driver,
HomePage.participantMark_id)
assert participant == True, "Couldn't switch to participants page."
wait_load_by_id(context.driver, 3, HomePage.agendaBtn_id)
### To-do: this testing scenario can be enriched
click_by_id(context.driver, HomePage.agendaBtn_id)
agenda = check_displayed_by_id(context.driver, HomePage.agendaMark_id)
assert agenda == True, "Couldn't switch to agenda page."
if check_displayed_by_id(context.driver, HomePage.noMeetingDataMark_id):
press_back_button(context.driver)
else: # To-do: Need to distinguish different types of agenda and whether recused or not
if check_displayed_by_id(context.driver, HomePage.singleAgendaMark_id):
click_by_id(context.driver, HomePage.singleAgendaMark_id)
wait_load_by_id(context.driver, 3, HomePage.pdfPage_id)
test_meeting_agenda_toolbar(context.driver, context.width, context.height)
else:
click_by_xpath(context.driver, HomePage.topMultiAgenda_xpath)
wait_load_by_id(context.driver, 3, HomePage.pdfPage_id)
test_meeting_agenda_toolbar(context.driver, context.width, context.height)
click_by_id(context.driver, HomePage.meetingBackBtn_id)
wait_load_by_id(context.driver, 2, HomePage.filterBtn_id)
|
# doc2vec
import numpy as np
import pandas as pd
import src.features.doc2vec as doc2vec
articles = ['A dog saw a cat. He was standing there', 'He says hi']
titles = ['A','B']
df = pd.DataFrame({'articles':articles,'titles':titles})
# def test_getEmbeddingsMeanUse():
# embDf = doc2vec.getEmbeddings(df,colName = 'articles',indexCol = 'titles', docMean = True, embedModel = 'use')
# assert embDf.shape == (2,512)
def test_getEmbeddingsMeanSentTrans():
embDf = doc2vec.getEmbeddings(df,colName = 'articles',indexCol = 'titles', docMean = True, embedModel = 'transSent')
assert embDf.shape == (2,768)
assert 'sent' in embDf.columns
def test_getEmbeddingsNoMeanSentTrans():
embDf = doc2vec.getEmbeddings(df,colName = 'articles',indexCol = 'titles', docMean = False, embedModel = 'transSent')
assert embDf.shape == (3,768)
|
"""
Time/Space complexity = O(log N)
"""
# Recursion
class Solution:
def insertIntoBST(self, root: TreeNode, val: int) -> TreeNode:
if not root:
return TreeNode(val)
def dfs(node):
if not node:
return node
if val < node.val:
if not dfs(node.left):
node.left = TreeNode(val)
else:
if not dfs(node.right):
node.right = TreeNode(val)
return node
return dfs(root)
# Second Approach (Recursion)
class Solution:
def insertIntoBST(self, root: TreeNode, val: int) -> TreeNode:
def dfs(node):
if not node:
return TreeNode(val)
if val < node.val:
node.left = dfs(node.left)
else:
node.right = dfs(node.right)
return node
return dfs(root)
"""
Time Complexity = O(log N)
Space Complexity = O(1)
"""
#Iterative
class Solution:
def insertIntoBST(self, root: TreeNode, val: int) -> TreeNode:
if not root:
return TreeNode(val)
node = root
while node:
prev = node
if val < node.val:
if not node.left:
node.left = TreeNode(val)
break
node = node.left
else:
if not node.right:
node.right = TreeNode(val)
break
node = node.right
return root |
import matplotlib.pyplot as plt
import numpy as np
import cv2
import argparse
import os.path
from PIL import Image
import os
import struct
width = 450
height = 350
def readDmp(file, depthShift):
# input: [string] file name
# output: list of {( [array of np.array(int)] 2D depth map , [array of np.array([int, int, int])] ) 2D color map}
f = open(file, 'rb')
flag = f.read(10).decode('ascii')[:9]
print(flag)
n_frame = struct.unpack('I', f.read(4))[0]
print(n_frame)
width = struct.unpack('I', f.read(4))[0]
print(width)
height = struct.unpack('I', f.read(4))[0]
print(height)
if flag != '_dmp_yee_':
print('flag error.')
exit(1)
mapLen = width*height
data = []
max_dep = 0
for frame in range(n_frame):
print('processing frame [{:03d}] ...'.format(frame), end="\r")
depthMap = struct.unpack('f'*mapLen, f.read(4*mapLen))
colorMap = struct.unpack('B'*mapLen, f.read(mapLen))
dep_array = np.zeros((height, width, 1), dtype=np.float32)
rgb_array = np.zeros((height, width, 3), dtype=np.float32)
# nearst neighbor
for ih in range(height):
for iw in range(width):
dep = depthMap[ih*width + iw] / depthShift
if dep > max_dep:
max_dep = dep
red = float(colorMap[ih*width + iw]) / 255.0
dep_array[ih][iw] = dep
rgb_array[ih][iw] = np.array([red, red, red])
data.append((dep_array, rgb_array))
print('\nmax_dep: {}'.format(max_dep))
header = (n_frame, width, height, max_dep)
return header, data
def readPre(file):
# input: [string] file name
# output:
f = open(file, 'rb')
mapLen = width*height
dep_array = np.zeros((height, width, 1), dtype=np.float32)
for frame in range(1):
depthMap = struct.unpack('f'*mapLen, f.read(4*mapLen))
# nearst neighbor
for ih in range(height):
for iw in range(width):
dep = depthMap[ih*width + iw]
dep_array[ih][iw] = dep
return dep_array
# Create object for parsing command-line options
parser = argparse.ArgumentParser(description="")
parser.add_argument('dmp', metavar='DIR', help='path to dmp')
# parser.add_argument('preSR', metavar='DIR', help='path to dmp')
# parser.add_argument('preS', metavar='DIR', help='path to dmp')
if __name__ == "__main__":
args = parser.parse_args()
pre_depth = readPre(args.preSR)
preS_depth = readPre(args.preS)
######### READ DMP ##################################
header, pmd_data = readDmp(args.dmp, 1)
pmd_depth, pmd_color = pmd_data[0]
# pmd_color = pmd_color*255
# color_png = Image.fromarray(pmd_color.astype(np.uint8))
# color_png = color_png.convert("RGB")
# color_png.save('color_out.png')
# print('output: color_out.png')
# input()
y_1 = []
y_3 = []
y_4 = []
for i in range(135, 411):
# print('[{}][{}]'.format(i, pmd_depth[175][i]))
y_1.append(pmd_depth[175][i][0])
y_3.append(pre_depth[175][i])
y_4.append(preS_depth[175][i])
y_2 = []
for i in range(135, 411):
d = 0
if i in range(135, 147):
d = 44.5 + (i-135)*(43.8-44.5)/(147-135)
elif i in range(147, 230):
d = 43.8 + (i-147)*(62.5-43.8)/(230-147)
elif i in range(230, 411):
d = 62.5 + (i-230)*(55.5-62.5)/(411-230)
y_2.append(d/100)
######### PLOT #######################################
x = range(135, 411)
Data_1, = plt.plot(x, y_1, 'r-', label='ToF depth') #畫線
Data_2, = plt.plot(x, y_2, 'g-', label='Gronud-truth depth') #畫線
Data_3, = plt.plot(x, y_3, 'b--', label='SHARP-Net (S+R100K) depth') #畫線
Data_4, = plt.plot(x, y_4, 'm--', label='SHARP-Net (S) depth') #畫線
plt.title("Depth Profile", fontsize=18) #圖表標題
plt.tick_params(axis='both', labelsize=12, color='green')
plt.legend(handles=[Data_1, Data_2, Data_3, Data_4])
plt.ylabel("depth(m)", fontsize=16) #y軸標題
plt.savefig('testChart.png', bbox_inches='tight') |
# 1326. Minimum Number of Taps to Open to Water a Garden
'''
There is a one-dimensional garden on the x-axis. The garden starts at the point 0 and ends at the point n. (i.e The length of the garden is n).
There are n + 1 taps located at points [0, 1, ..., n] in the garden.
Given an integer n and an integer array ranges of length n + 1 where ranges[i] (0-indexed) means the i-th tap can water the area [i - ranges[i], i + ranges[i]] if it was open.
Return the minimum number of taps that should be open to water the whole garden, If the garden cannot be watered return -1.
'''
Basic idea: Sweep Line
Greedy approach, save furthest location a start point can jump to
sweep all the positions
Solution 1:
class Solution:
def minTaps(self, n: int, ranges: List[int]) -> int:
jump = [0]*(n+1)
key_pos = []
for i, cover in enumerate(ranges):
if cover:
start, end = max(0,i-cover), min(n,i+cover)
jump[start] = max(jump[start], end)
key_pos += [start, end]
key_pos = sorted(set(key_pos))
res = 0
curr_end = next_end = 0
for pos in key_pos:
next_end = max(jump[pos], next_end)
if pos == curr_end:
curr_end = next_end
res += 1
if curr_end == n:
return res
return -1
Solution 2: same idea, not sweeping, but jump
class Solution:
def minTaps(self, n: int, ranges: List[int]) -> int:
jump = [0]*(n+1)
for i, cover in enumerate(ranges):
if cover:
start, end = max(0,i-cover), min(n,i+cover)
jump[start] = max(jump[start], end)
res = 0
curr = prev = 0
while curr < n:
res += 1
prev, curr = curr, max(jump[prev:curr+1]) # jump prev and curr at the SAME time
if curr == prev: # if not moving
return -1
return res
|
#!/user/bin/python
print("Welcome to Python Programmaing language")
print("Welcome to the python tutorials")
x=200
y=100
x+y
x-y
x/y
#print the Hello
print("Hello World Programmaing")
print(x+y)
print(x-y)
print(x/y)
print("\n"*20)
for x in 'AshishKumar'
print("Welcome to: ",x)
for x in "AshishKumar"
print("Welcome to: ", x,end=" ")
#Final Code is above.
|
from dataviva import db
from dataviva.utils.auto_serialize import AutoSerialize
from dataviva.attrs.models import Bra, Isic, Cbo
from sqlalchemy import and_
############################################################
# ----------------------------------------------------------
# 2 variable tables
#
############################################################
class Yi(db.Model, AutoSerialize):
__tablename__ = 'rais_yi'
year = db.Column(db.Integer(4), primary_key=True)
isic_id = db.Column(db.String(5), db.ForeignKey(Isic.id), primary_key=True)
wage = db.Column(db.Numeric(16,2))
num_emp = db.Column(db.Integer(11))
num_est = db.Column(db.Integer(11))
wage_avg = db.Column(db.Numeric(16,2))
num_emp_est = db.Column(db.Float())
cbo_diversity = db.Column(db.Integer(11))
cbo_diversity_eff = db.Column(db.Float())
bra_diversity = db.Column(db.Integer(11))
bra_diversity_eff = db.Column(db.Float())
wage_growth_pct = db.Column(db.Float())
wage_growth_pct_5 = db.Column(db.Float())
wage_growth_val = db.Column(db.Numeric(16,2))
wage_growth_val_5 = db.Column(db.Numeric(16,2))
num_emp_growth_pct = db.Column(db.Float())
num_emp_growth_pct_5 = db.Column(db.Float())
num_emp_growth_val = db.Column(db.Integer(11))
num_emp_growth_val_5 = db.Column(db.Integer(11))
def __repr__(self):
return '<Yi %d.%s>' % (self.year, self.isic_id)
class Yb_rais(db.Model, AutoSerialize):
__tablename__ = 'rais_yb'
year = db.Column(db.Integer(4), primary_key=True)
bra_id = db.Column(db.String(8), db.ForeignKey(Bra.id), primary_key=True)
wage = db.Column(db.Numeric(16,2))
num_emp = db.Column(db.Integer(11))
num_est = db.Column(db.Integer(11))
wage_avg = db.Column(db.Numeric(16,2))
num_emp_est = db.Column(db.Float())
isic_diversity = db.Column(db.Integer(11))
isic_diversity_eff = db.Column(db.Float())
cbo_diversity = db.Column(db.Integer(11))
cbo_diversity_eff = db.Column(db.Float())
wage_growth_pct = db.Column(db.Float())
wage_growth_pct_5 = db.Column(db.Float())
wage_growth_val = db.Column(db.Numeric(16,2))
wage_growth_val_5 = db.Column(db.Numeric(16,2))
num_emp_growth_pct = db.Column(db.Float())
num_emp_growth_pct_5 = db.Column(db.Float())
num_emp_growth_val = db.Column(db.Integer(11))
num_emp_growth_val_5 = db.Column(db.Integer(11))
def __repr__(self):
return '<Yb_rais %d.%s>' % (self.year, self.bra_id)
class Yo(db.Model, AutoSerialize):
__tablename__ = 'rais_yo'
year = db.Column(db.Integer(4), primary_key=True)
cbo_id = db.Column(db.String(6), db.ForeignKey(Cbo.id), primary_key=True)
wage = db.Column(db.Numeric(16,2))
num_emp = db.Column(db.Integer(11))
num_est = db.Column(db.Integer(11))
wage_avg = db.Column(db.Numeric(16,2))
num_emp_est = db.Column(db.Float())
isic_diversity = db.Column(db.Integer(11))
isic_diversity_eff = db.Column(db.Float())
bra_diversity = db.Column(db.Integer(11))
bra_diversity_eff = db.Column(db.Float())
wage_growth_pct = db.Column(db.Float())
wage_growth_pct_5 = db.Column(db.Float())
wage_growth_val = db.Column(db.Numeric(16,2))
wage_growth_val_5 = db.Column(db.Numeric(16,2))
num_emp_growth_pct = db.Column(db.Float())
num_emp_growth_pct_5 = db.Column(db.Float())
num_emp_growth_val = db.Column(db.Integer(11))
num_emp_growth_val_5 = db.Column(db.Integer(11))
def __repr__(self):
return '<Yo %d.%s>' % (self.year, self.cbo_id)
############################################################
# ----------------------------------------------------------
# 3 variable tables
#
############################################################
class Ybi(db.Model, AutoSerialize):
__tablename__ = 'rais_ybi'
year = db.Column(db.Integer(4), primary_key=True)
bra_id = db.Column(db.String(8), db.ForeignKey(Bra.id), primary_key=True)
isic_id = db.Column(db.String(5), db.ForeignKey(Isic.id), primary_key=True)
wage = db.Column(db.Numeric(16,2))
num_emp = db.Column(db.Integer(11))
num_est = db.Column(db.Integer(11))
wage_avg = db.Column(db.Numeric(16,2))
num_emp_est = db.Column(db.Float())
rca = db.Column(db.Float())
distance = db.Column(db.Float())
opp_gain = db.Column(db.Float())
wage_growth_pct = db.Column(db.Float())
wage_growth_pct_5 = db.Column(db.Float())
wage_growth_val = db.Column(db.Numeric(16,2))
wage_growth_val_5 = db.Column(db.Numeric(16,2))
num_emp_growth_pct = db.Column(db.Float())
num_emp_growth_pct_5 = db.Column(db.Float())
num_emp_growth_val = db.Column(db.Integer(11))
num_emp_growth_val_5 = db.Column(db.Integer(11))
def __repr__(self):
return '<Ybi %d.%s.%s>' % (self.year, self.bra_id, self.isic_id)
class Ybo(db.Model, AutoSerialize):
__tablename__ = 'rais_ybo'
year = db.Column(db.Integer(4), primary_key=True)
bra_id = db.Column(db.String(8), db.ForeignKey(Bra.id), primary_key=True)
cbo_id = db.Column(db.String(6), db.ForeignKey(Cbo.id), primary_key=True)
wage = db.Column(db.Numeric(16,2))
num_emp = db.Column(db.Integer(11))
num_est = db.Column(db.Integer(11))
wage_avg = db.Column(db.Numeric(16,2))
num_emp_est = db.Column(db.Float())
wage_growth_pct = db.Column(db.Float())
wage_growth_pct_5 = db.Column(db.Float())
wage_growth_val = db.Column(db.Numeric(16,2))
wage_growth_val_5 = db.Column(db.Numeric(16,2))
num_emp_growth_pct = db.Column(db.Float())
num_emp_growth_pct_5 = db.Column(db.Float())
num_emp_growth_val = db.Column(db.Integer(11))
num_emp_growth_val_5 = db.Column(db.Integer(11))
def __repr__(self):
return '<Ybo %d.%s.%s>' % (self.year, self.bra_id, self.cbo_id)
class Yio(db.Model, AutoSerialize):
__tablename__ = 'rais_yio'
year = db.Column(db.Integer(4), primary_key=True)
isic_id = db.Column(db.String(5), db.ForeignKey(Isic.id), primary_key=True)
cbo_id = db.Column(db.String(6), db.ForeignKey(Cbo.id), primary_key=True)
wage = db.Column(db.Numeric(16,2))
num_emp = db.Column(db.Integer(11))
num_est = db.Column(db.Integer(11))
wage_avg = db.Column(db.Numeric(16,2))
num_emp_est = db.Column(db.Float())
importance = db.Column(db.Float())
wage_growth_pct = db.Column(db.Float())
wage_growth_pct_5 = db.Column(db.Float())
wage_growth_val = db.Column(db.Numeric(16,2))
wage_growth_val_5 = db.Column(db.Numeric(16,2))
num_emp_growth_pct = db.Column(db.Float())
num_emp_growth_pct_5 = db.Column(db.Float())
num_emp_growth_val = db.Column(db.Integer(11))
num_emp_growth_val_5 = db.Column(db.Integer(11))
def __repr__(self):
return '<Yio %d.%s.%s>' % (self.year, self.isic_id, self.cbo_id)
class Ybio(db.Model, AutoSerialize):
__tablename__ = 'rais_ybio'
year = db.Column(db.Integer(4), primary_key=True)
bra_id = db.Column(db.String(8), db.ForeignKey(Bra.id), primary_key=True)
isic_id = db.Column(db.String(5), db.ForeignKey(Isic.id), primary_key=True)
cbo_id = db.Column(db.String(6), db.ForeignKey(Cbo.id), primary_key=True)
wage = db.Column(db.Numeric(16,2))
num_emp = db.Column(db.Integer(11))
num_est = db.Column(db.Integer(11))
wage_avg = db.Column(db.Numeric(16,2))
num_emp_est = db.Column(db.Float())
required = db.Column(db.Float())
wage_growth_pct = db.Column(db.Float())
wage_growth_pct_5 = db.Column(db.Float())
wage_growth_val = db.Column(db.Numeric(16,2))
wage_growth_val_5 = db.Column(db.Numeric(16,2))
num_emp_growth_pct = db.Column(db.Float())
num_emp_growth_pct_5 = db.Column(db.Float())
num_emp_growth_val = db.Column(db.Integer(11))
num_emp_growth_val_5 = db.Column(db.Integer(11))
def __repr__(self):
return '<Ybio %d.%s.%s.%s>' % (self.year, self.bra_id, self.isic_id, self.cbo_id)
|
###########
## Notes ##
###########
'''
The replace() method returns a copy of the string where all occurrences of a substring is replaced with another substring.
The syntax of replace() is:
str.replace(old, new[, count])
replace() parameters
* old - old substring you want to replace
* new - new substring which you would replace the old substring
* count (optional) - the number of times you want to replace the old substring with the new substring
If count is not specified, replace() method replaces all occurrences of the old substring with the new substring
'''
'''
Problem 5
Remember to use python 2.7, not python 3 for this problem
'''
print (10/4.0)
print (10/4)
print (10.0/5)
print (10 * 1.0 / 4)
print (10 / 5)
print (10 / 50)
'''
Problem 6
You can read a string backwards with the following syntax:
string[::-1] - where the "-1" means one step back.
'''
word = 'madman'
reverse = word[::-1]
is_palindrome = word.find(reverse) |
import pandas as pd
import numpy as np
import pickle
df = pd.read_csv('Dataset.csv')
df = df.drop(columns = ['Id'])
X = np.array(df.iloc[:, 0:4])
y = np.array(df.iloc[:, 4:])
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(y.reshape(-1))
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
from sklearn.linear_model import LogisticRegression
sv = LogisticRegression().fit(X_train,y_train)
# print metric to get performance
print("Accuracy: ",sv.score(X_test, y_test) * 100)
pickle.dump(sv, open('iri.pkl', 'wb')) |
#! /usr/bin/python
# -*- coding: utf-8 -*-
import argparse
import cv2
import numpy as np
import dlib
import sys
import datetime
import os
import imutils
import math
from imutils import face_utils
# from scipy.spatial import distance as dist # for desktop
class FaceDLib:
def __init__(self, predictor_path):
self.predictor = dlib.shape_predictor(predictor_path)
def face_shape_detector_display(self, img, img_gray, rects, mar, MAR_THRESH):
for rect in rects:
# 画像の中から顔の特徴点を取得する
shape = self.predictor(img_gray, rect)
shape = face_utils.shape_to_np(shape)
# MARの値を画面に表示する
cv2.putText(img, "MAR: {:.2f}".format(mar), (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# 口が開いている場合、画面に表示する
if mar > MAR_THRESH:
cv2.putText(img, "Mouth is Open!", (30,60),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255),2)
# cv2.rectangle(img, (rect.left(), rect.top()), (rect.right(), rect.bottom()), (255, 0, 0), 1)
# landmarkを画像に書き込む 48:68が口
mouth_hull = cv2.convexHull(shape[48:68])
cv2.drawContours(img, [mouth_hull], -1, (0, 0, 255), 2)
# for (x, y) in shape[48:68]:
# cv2.circle(img, (x, y), 5, (0, 0, 255), -1)
return img
"""
# 画像に映る口の座標を取得(口が動いているかを判別するために作成)→現在は使っていない
def get_mouth_xy(self, img_gray):
x_upper = x_lower = y_upper = y_lower = None # 初期化
rects = self.detector(img_gray, 0)
for rect in rects:
shape = self.predictor(img_gray, rect)
mouth_point_upper = shape.part(51) # 唇の上の点を取得
x_upper = mouth_point_upper.x
y_upper = mouth_point_upper.y
mouth_point_lower = shape.part(57) # 唇の下の点を取得
x_lower = mouth_point_lower.x
y_lower = mouth_point_lower.y
return x_upper, y_upper, x_lower, y_lower # x, yはそれぞれlong型で、それらをタプルとして返す。
"""
# mouth aspect ratio(口の開き具合の指標)を算出
def mouth_aspect_ratio(self, img_gray, rects):
mar = 0 # mouth aspect ratio初期化
for rect in rects:
shape = self.predictor(img_gray, rect)
shape = face_utils.shape_to_np(shape)
# 顔の各特徴点の座標を取得(Desktop)
# A = dist.euclidean(shape[51] , shape[59]) # 51, 59
# B = dist.euclidean(shape[53] , shape[57]) # 53, 57
# C = dist.euclidean(shape[49] , shape[55]) # 49, 55
# 顔の各特徴点の座標を取得(xavier)
A = np.linalg.norm(shape[51] - shape[59])
B = np.linalg.norm(shape[53] - shape[57])
C = np.linalg.norm(shape[49] - shape[55])
# mouth aspect ratioの計算(口が開いているほど値が大きくなる)
mar = (A + B) / (2.0 * C)
return mar
# 画像に映る鼻の座標を取得(IDを割り振る際の人物判定用に作成)
def get_nose_xy(self, img_gray, rects):
x = y = None # 初期化
for rect in rects:
shape = self.predictor(img_gray, rect)
nose_point = shape.part(30) # 鼻の点を取得
x = nose_point.x
y = nose_point.y
return x, y # x, yはそれぞれlong型で、それらをタプルとして返す。
if __name__ == '__main__':
predictor_path = "./shape_predictor_68_face_landmarks.dat"
gets = FaceDLib(predictor_path)
cap = cv2.VideoCapture(0)
print("frame per second:", cap.get(cv2.CAP_PROP_FPS))
count = 0
while True:
ret, frame = cap.read()
frame = imutils.resize(frame, width=500)
img_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame, img_gray = gets.face_shape_detector_display(frame, img_gray)
cv2.imshow('img', frame)
c = cv2.waitKey(1)
if c == 27:#ESCを押してウィンドウを閉じる
break
if c == 32:#spaceで保存
count += 1
cv2.imwrite('./filename%03.f'%(count)+'.jpg', frame) #001~連番で保存
print('save done')
gets.get_mouth_xy(img_gray)
gets.get_nose_xy(img_gray)
print("get_nose_xy: " + str(gets.get_nose_xy(img_gray)))
print("get_mouth_xy: " + str(gets.get_mouth_xy(img_gray)))
cap.release()
cv2.destroyAllWindows()
|
funcdict = {
"update": set.update,
"intersection_update": set.intersection_update,
"difference_update": set.difference_update,
"symmetric_difference_update": set.symmetric_difference_update,
}
_ = int(input())
set_a = set(list(map(int, input().strip().split())))
n = int(input())
for i in range(n):
funcdict[input().split()[0]](set_a, set(list(map(int, input().split()))))
print(sum(list(set_a)))
|
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.views import generic
from .forms import StoreInfoForm, StoreMenuForm
from .models import StoreInfo, StoreMenu
from django.utils.safestring import mark_safe
import json
# Create your views here.
class Home(generic.TemplateView):
template_name = 'myapp/home.html'
@login_required
def store_setting(request):
params = {'message': '', 'form': None}
if request.method == 'POST':
form_instance = StoreInfo(STORE_EMAIL=request.user) #現在のユーザー名のインスタンスをmodelから取得
form = StoreInfoForm(request.POST, instance=form_instance)
if form.is_valid():
StoreInfo.objects.filter(STORE_EMAIL=request.user).delete() #上記updateを行うとゴミmodel情報(EMAIL=NULL)が残るのでdeleteで消してしまう。
form.save()
return redirect('myapp:store_mypage')
else:
params['message'] = '再入力して下さい'
params['form'] = form
else:
params['form'] = StoreInfoForm()
return render(request, 'myapp/store_setting.html', params)
@login_required
def store_show_menu(request):
params = {'message': '', 'form': None}
if request.method == 'POST':
form = StoreMenuForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.STORE_INFO = StoreInfo.objects.get(STORE_EMAIL=request.user)
post.save()
print("登録しました")
return redirect('myapp:store_show_menu')
else:
params['message'] = '再入力して下さい'
params['form'] = form
else:
#店舗情報が入力されているか?
if StoreInfo.objects.filter(STORE_EMAIL=request.user).exists():
params['form'] = StoreMenuForm()
else:
return redirect('myapp:store_setting')
return render(request, 'myapp/debug_store_show_menu.html', params)
@login_required
def store_mypage(request):
if StoreInfo.objects.filter(STORE_EMAIL=request.user).exists():
'''============================memo============================
datas = StoreInfo.objects.get(STORE_EMAIL=request.user)の場合
datas.STORE_EMAIL 等でアクセスできる。
datas = StoreInfo.objects.filter(STORE_EMAIL=request.user)の場合
for data in datas:
data.STORE_EMAIL 等でアクセスできる。
※filterの場合は、htmlへ投げる辞書を作成する際はこのまま投げることができる。
params = {'message': '店舗情報', 'data': datas}
============================memo============================'''
#if 1
# <T.B.D>こっちの検討も...first()なので大規模になった際は速度が早い
data_buf = StoreInfo.objects.filter(STORE_EMAIL=request.user).first()
data = {
'E_Mail': data_buf.STORE_EMAIL,
'店舗名': data_buf.STORE_NAME,
'郵便番号': data_buf.STORE_POSTAL_CODE,
'住所': data_buf.STORE_ADDRESS,
'電話番号': data_buf.STORE_TEL
}
#else
# data = StoreInfo.objects.filter(STORE_EMAIL=request.user)
#endif
params = {'message': '店舗情報', 'data': data}
return render(request, 'myapp/debug_mypage.html', params)
else:
return redirect('myapp:store_setting')
@login_required
def store_mypage_edit(request):
context = {'message': '', 'form': None, 'data': None}
if request.method == 'POST':
form_instance = StoreInfo(STORE_EMAIL=request.user) #現在のユーザー名のインスタンスをmodelから取得
form = StoreInfoForm(request.POST, instance=form_instance)
if form.is_valid():
# StoreInfo.objects.filter(STORE_EMAIL=request.user).update(STORE_EMAIL=None) #Emailがユニークキーなので厄介。一旦NULLに書き換えた後にformの内容で更新
StoreInfo.objects.filter(STORE_EMAIL=request.user).delete() #上記updateを行うとゴミmodel情報(EMAIL=NULL)が残るのでdeleteで消してしまう。
form.save()
return redirect('myapp:store_mypage')
else:
context['message'] = '再入力して下さい'
context['form'] = form
else:
data = StoreInfo.objects.filter(STORE_EMAIL=request.user).first()
initial_data = {
'STORE_EMAIL': data.STORE_EMAIL,
'STORE_NAME': data.STORE_NAME,
'STORE_POSTAL_CODE': data.STORE_POSTAL_CODE,
'STORE_ADDRESS': data.STORE_ADDRESS,
'STORE_TEL': data.STORE_TEL
}
form = StoreInfoForm(
initial=initial_data
)
context = {
'form': form
}
return render(request, 'myapp/debug_mypage_edit.html', context)
def debug_websocket(request):
# return render(request, 'myapp/debug_websocket.html', {})
return render(request, 'myapp/debug_websocket.html')
def debug_websocket_room(request, room_name):
return render(request, 'myapp/debug_websocket_room.html', {
'room_name_json': mark_safe(json.dumps(room_name))
}) |
#!env python3
# -*- coding: utf-8 -*-
import argparse
# python3 argparse_module.py
# python3 argparse_module.py -h
# python3 argparse_module.py -a 1 -b 2 -c 3
def mul(a, b, c):
return a * b * c
if __name__ == "__main__":
parser = argparse.ArgumentParser("multiply three values")
parser.add_argument("-a", help="value1", type=float,
required=True)
parser.add_argument("-b", help="value2", type=float,
required=True)
parser.add_argument("-c", help="value3", type=float,
default=1)
args = parser.parse_args()
print(mul(args.a, args.b, args.c))
|
import logging
import contextlib
logging.warning('Watch out!') # will print a message to the console
logging.info('I told you so') # will not print anything
@contextlib.contextmanager
def log_level(level, name):
logger = logging.getLogger(name)
old_level = logger.getEffectiveLevel()
logger.setLevel(level)
try:
yield logger
finally:
logger.setLevel(old_level)
with log_level(logging.DEBUG, 'my-log') as logger:
logger.debug(f'This is a message for {logger.name}!')
logging.debug('This will not print') |
class Solution(object):
def generatePossibleNextMoves(self, s):
n, res = len(s), []
for i in range(n - 1):
if s[i:i+2] == '++':
res.append(s[:i] + '--' + s[i+2:])
return res |
#Created by WilliamOtieno
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
driver = webdriver.Chrome()
driver.get('https://lbry.tv/')
searchbox = driver.find_element_by_xpath('//*[@id="app"]/div/header/div/div[1]/div/div/input')
searchbox.send_keys('Arch Linux')
searchbox.send_keys(Keys.RETURN)
|
import unittest
def reverse(char_list):
size = len(char_list)
for i in range(0, size//2):
char_list[i], char_list[size-1-i] = char_list[size-1-i], char_list[i]
return char_list
# Tests
class Test(unittest.TestCase):
def test_empty_string(self):
list_of_chars = []
reverse(list_of_chars)
expected = []
self.assertEqual(list_of_chars, expected)
def test_single_character_string(self):
list_of_chars = ['A']
reverse(list_of_chars)
expected = ['A']
self.assertEqual(list_of_chars, expected)
def test_longer_string(self):
list_of_chars = ['A', 'B', 'C', 'D', 'E']
reverse(list_of_chars)
expected = ['E', 'D', 'C', 'B', 'A']
self.assertEqual(list_of_chars, expected)
unittest.main(verbosity=2) |
"""
this script demonstrates ggplot in python.
todo run script at clinux.
todo see if data must be long for ggplot.
"""
from ggplot import *
## graphs from website
# -----------------------------------------------------------------------------
## fun
ggplot(diamonds, aes(x='carat', y='price', color='cut')) +\
geom_point() +\
scale_color_brewer(type='diverging', palette=4) +\
xlab("Carats") + ylab("Price") + ggtitle("Diamonds")
## powerful
ggplot(diamonds, aes(x='price', fill='cut')) +\
geom_density(alpha=0.25) +\
facet_wrap("clarity")
## qplot
qplot(x=df.time, y=df.lifeexp, color=df.region, fill=df.gdp,
xlab="Year", ylab="Life expectancy",
main="Health improves across the world")
# can add xlim=(0, 100) ylim=(-1, 200) log="y"
## themes
# http://ggplot.yhathq.com/docs/theme_seaborn.html
ggplot(aes('carat', 'price'), data=diamonds) +\
geom_point() + theme_seaborn(context='poster')
# black white
ggplot(aes('carat', 'price'), data=diamonds) + geom_point() + theme_bw()
## graphs with my own data
# -----------------------------------------------------------------------------
# todo go to plot.py and do them with ggplot syntax
# how does it work when one variable is in the index?
# should i do .reset_index() so that i can access date variable?
# i must change from wide to long format to use ggplot? i mean, what is y?
dfl_vcc = pd.read_csv(filepath1long, parse_dates=True)
ggplot(ret2aum(dfl_vcc.loc[dfl_vcc.symbol.isin(['BTC', 'ETH'])]),
aes('date', 'return')) + geom_point()
|
from selectable.base import ModelLookup
from selectable.registry import registry
from cities.models import City
class CityLookup(ModelLookup):
model = City
search_fields = ('name__icontains', )
# def get_item_label(self, item):
# # Display for choice listings
# return u"%s, %s" % (item.name, item.region.name)
def get_item_value(self, item):
return item.name
def get_item_id(self, item):
return "%d,%s,%s" % (item.id, item.location.x, item.location.y)
if not 'event-citylookup' in registry._registry:
registry.register(CityLookup)
|
from datetime import datetime, timedelta
import os
import shutil
from zipfile import ZipFile, BadZipfile
class Archive:
def __init__(self, sourceDir, outputDir, stagingDir):
self.sourceDir = sourceDir
self.outputDir = outputDir
self.stagingDir = stagingDir
self.extension = ".zip"
def main(self):
eligible = self.collectEligibleFileList()
archived = self.compressFile(eligible)
self.removeArchived(archived)
self._sendToArchive()
return
"""
Find files and return list of fullpaths of files.
"""
def collectEligibleFileList(self):
toArchive = []
for p, d, f in os.walk(self.sourceDir):
for job in f:
mtime = datetime.fromtimestamp(
os.path.getmtime(os.path.join(p, job)))
if datetime.now() - mtime > timedelta(days=2):
toArchive.append(os.path.join(os.path.join(p, job)))
return toArchive
"""
Compress files in list. Return list of successful files archived.
"""
def compressFile(self, to_archive_list):
archived = []
for f in to_archive_list:
name = os.path.basename(f)
try:
archive = ZipFile(os.path.join(self.stagingDir,
os.path.basename(f)) + self.extension,
"w", allowZip64=True)
archive.write(f, os.path.basename(f))
archive.close()
archived.append(os.path.basename(f))
except BadZipfile:
print BadZipfile
except:
print "An error occurred."
return archived
"""
Remove all local printfiles that have been archived.
"""
def removeArchived(self, archived_list):
for f in archived_list:
if f in os.listdir(self.sourceDir):
os.remove(os.path.join(self.sourceDir, f))
"""
Move all files in staging directory (.zip) to output directory.
"""
def _sendToArchive(self):
for f in os.listdir(self.stagingDir):
if os.path.isfile(os.path.join(self.outputDir, f)):
os.remove(os.path.join(self.stagingDir, f))
else:
shutil.move(os.path.join(self.stagingDir, f), self.outputDir)
return
if __name__ == "__main__":
test = Archive(r"C:\test\named",
r"C:\test\output",
r"C:\test\staging")
test.main()
|
from behave import Given, When, Then, Step
@Given("I'm on the products list page")
def step_impl(context):
context.home_page.go_to_products_list()
@When("I choose price min to max from sort list")
def step_impl(context):
context.products_list_page.set_sort_products("low_to_high")
@Then("Products should be sorted by price from low to high")
def step_impl(context):
assert context.products_list_page.check_price_sorting("low_to_high")
@When("I click on Filter")
def step_impl(context):
context.products_list_page.show_filter_options()
@Step("I choose M size")
def step_impl(context):
context.products_list_page.set_filter_size("M")
@Then("Only products with that size should be displayed")
def step_impl(context):
context.products_list_page.check_filtered_products("M")
|
#coding=utf-8
import requests
import json
url1={'mysql旧':'http://192.168.13.141:4042/sw/serviceApi/09f4fef9249c457ca67b4a7a45823730/interface/51aa655ec2734aa4a5f38bae78a7fc3a/customWrapper',
'sqlserver旧':'http://192.168.13.141:4042/sw/serviceApi/09f4fef9249c457ca67b4a7a45823730/interface/bd3f6e5debb34d94b63688d40bbbb244/customWrapper',
'oracle旧':'http://192.168.13.141:4042/sw/serviceApi/09f4fef9249c457ca67b4a7a45823730/interface/c2e5e94645dc4e99bc34bbcbc18bfb1a/customWrapper'}
# get类型的接口
for i in range(len(url1.values())):
r1=requests.get(list(url1.values())[i])
result1=r1.json()
if result1['state']==True:
print("接口测试通过,",list(url1.keys())[i])
else:
print("接口测试不通过,", list(url1.keys())[i])
# post类型的接口
url2="http://192.168.13.141:4042/sw/123/wrapper/custom/getData/v1.0"
para={'mysql新2':'e17a2ff38bf54971a79614641740e890',
'sqlserver新':'bdda55e32a8c4c748769637427829c0d',
'oracle新2':'ac42d681bdde489a82dc23869af80eb1'}
for i in range(len(para.keys())):
r2 = requests.post(url2, json={'token':'123','serviceNo':list(para.values())[i]})
result2 = r2.json()
if result2['code'] == '0':
print("接口测试通过,", list(para.keys())[i])
else:
print("接口测试不通过,", list(para.keys())[i])
|
from __future__ import annotations
from pathlib import Path
from typing import Union
import tomlkit
class PathableConcept(object):
def __init__(self, path: Path):
self.path = path
# Assume the Workspace's name is the directory name
# TODO Enforce any name requirements (would they be project-specific
# rules)? Allow overriding via config?
self.name = self.path.name
def read_toml(
self, toml_path: Union[str, Path]
) -> tomlkit.toml_document.TOMLDocument:
# TODO Consider caching the result
return tomlkit.parse(self.read_path(toml_path))
def read_path(self, path: Union[str, Path]) -> str:
with open(self.path.joinpath(path)) as f:
return f.read()
def path_exists(self, path: Union[str, Path]) -> bool:
return self.path.joinpath(path).exists()
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: {str(self)}>"
def __eq__(self, other: object) -> bool:
return isinstance(other, self.__class__) and self.path == other.path
def __hash__(self) -> int:
return hash(self.path)
|
from adapters.contact_adapter import ContactAdapter
from adapters.generic.motion_sensor import MotionSensorAdapter
from adapters.generic.temp_hum_sensor import TemperatureHumiditySensorAdapter
from adapters.generic.water_leak_sensor import WaterLeakSensorAdapter
konke_adapters = {
'2AJZ4KPFT': TemperatureHumiditySensorAdapter, # Konke Temperature and humidity sensor
'2AJZ4KPBS': MotionSensorAdapter, # Konke Motion sensor
'2AJZ4KPDR': ContactAdapter, # Konke Contact sensor
'LH07321': WaterLeakSensorAdapter # Konke Water detector
} |
#!/usr/bin/env python3
import sys
import Standardize
import estimation_n_grams
import n_gram
from collections import Counter
OUTPUT_FILE = "output.txt"
OUTPUT_FILE_PROPER = "output2.txt"
def main(args):
input = args[1]
#Standardize.standardize(input, "output.txt")
#
# lexicon = build_lexicon(OUTPUT_FILE)
# removed_words = lexicon.discard_unfrequent_n_grams(2)
#
# replace_removed_words(removed_words)
# lexicon = build_lexicon(OUTPUT_FILE_PROPER)
# print(lexicon.get_size())
# print(lexicon.get_top_n_by_counts(20))
# unigram, bigram, trigram = build_gram(OUTPUT_FILE_PROPER)
# save_ngram_count(unigram, "./counts/unigram_counts.txt")
# save_ngram_count(bigram, "./counts/bigram_counts.txt")
# save_ngram_count(trigram, "./counts/trigram_counts.txt")
#Standardize.standardize(input, "output3.txt")
laplace = estimation_n_grams.LaplaceSmoothing()
build_trainig(laplace, "output2.txt")
build_test(laplace, "output3.txt")
print(laplace.get_perplexity())
linear = estimation_n_grams.LinearInterpolation(n=3, k=3)
build_trainig(linear, "output2.txt")
build_test(linear, "output3.txt")
print(linear.get_perplexity())
def build_trainig(obj_estimation, filepath):
with open(filepath, "r") as file:
for line in file:
obj_estimation.add_training_corpus(line)
def build_test(obj_estimation, filepath):
with open(filepath, "r") as file:
for line in file:
obj_estimation.add_test_corpus(line)
def build_lexicon(filepath):
lexicon = n_gram.NGram()
with open(filepath, "r") as file:
for line in file:
lexicon.add_corpus(line)
return lexicon
def build_gram(filepath):
unigram = n_gram.NGram(1)
bigram = n_gram.NGram(2)
trigram = n_gram.NGram(3)
with open(filepath, "r") as file:
for line in file:
unigram.add_corpus(line)
bigram.add_corpus(line)
trigram.add_corpus(line)
return unigram, bigram, trigram
def save_ngram_count(ngram_obj, filepath):
with open(filepath, "w") as file_to_write:
counts = ngram_obj.get_counts().values()
counts = Counter(counts)
for (element, cnt) in counts.items():
file_to_write.write(str(element) + "\t" + str(cnt))
file_to_write.write("\n")
def replace_removed_words(removed_words):
with open(OUTPUT_FILE, "r") as file:
s = file.read()
for removed_word in removed_words:
s = s.replace(removed_word, " <UNK> ")
with open(OUTPUT_FILE_PROPER, "w") as file_to_write:
file_to_write.write(s)
arguments = sys.argv
main(arguments)
|
# -*- coding: utf-8 -*-
import tkinter as tk # 使用Tkinter前需要先導入
# 第1步,產生實體object,建立視窗window
window = tk.Tk()
# 第2步,給窗口的視覺化起名字
window.title('My Window')
# 第3步,設定窗口的大小(長 * 寬)
window.geometry('500x300') # 這裡的乘是小x
# 第4步,在圖形介面上創建一個標籤label用以顯示並放置
var1 = tk.StringVar() # 創建變數,用var1用來接收滑鼠點擊具體選項的內容
l = tk.Label(window, bg='green', fg='yellow',font=('Arial', 12), width=10, textvariable=var1)
l.pack()
# 第6步,創建一個方法用於按鈕的點擊事件
def print_selection():
value = lb.get(lb.curselection()) # 獲取當前選中的文本
var1.set(value) # 為label設置值
# 第5步,創建一個按鈕並放置,點擊按鈕調用print_selection函數
b1 = tk.Button(window, text='print selection', width=15, height=2, command=print_selection)
b1.pack()
# 第7步,創建Listbox並為其添加內容
var2 = tk.StringVar()
var2.set((1,2,3,4)) # 為變數var2設置值
# 創建Listbox
lb = tk.Listbox(window, listvariable=var2) #將var2的值賦給Listbox
# 創建一個list並將值迴圈添加到Listbox控制項中
list_items = [11,22,33,44]
for item in list_items:
lb.insert('end', item) # 從最後一個位置開始加入值
lb.insert(1, 'first') # 在第一個位置加入'first'字元
lb.insert(2, 'second') # 在第二個位置加入'second'字元
lb.delete(2) # 刪除第二個位置的字元
lb.pack()
# 第8步,主視窗迴圈顯示
window.mainloop()
|
import csv
import matplotlib.pyplot as plt
import math
import pandas as pd
from statistics import mean
import numpy as np
def fix():
fix_1, fix_2, fix_3, fix_4, fix_5, = [],[],[],[],[]
fix_6, fix_7, fix_8, fix_9 = [],[],[],[]
for i in range(1,10):
f = i/10
with open('data t '+str(f)+' and r0.csv', 'r') as csvnew:
read = csv.reader(csvnew)
for line in read:
if f == 0.1:
fix_1.append(list(map(float, line)))
elif f == 0.2:
fix_2.append(list(map(float, line)))
elif f == 0.3:
fix_3.append(list(map(float, line)))
elif f == 0.4:
fix_4.append(list(map(float, line)))
elif f == 0.5:
fix_5.append(list(map(float, line)))
elif f == 0.6:
fix_6.append(list(map(float, line)))
elif f == 0.7:
fix_7.append(list(map(float, line)))
elif f == 0.8:
fix_8.append(list(map(float, line)))
else:
fix_9.append(list(map(float, line)))
fix_1.append(['','','',])
fix_2.append(['','','',])
fix_3.append(['','','',])
fix_4.append(['','','',])
fix_5.append(['','','',])
fix_6.append(['','','',])
fix_7.append(['','','',])
fix_8.append(['','','',])
fix_9.append(['','','',])
return fix_1, fix_2, fix_3, fix_4, fix_5, fix_6, fix_7, fix_8, fix_9
def dfix_1(fix_1):
count, r, avg_rfix1 = 0, 0, []
for index in range(len(fix_1)-1):
if fix_1[index][0] == 1.0:
if fix_1[index][1] == fix_1[index+1][1]:
count += 1
r += fix_1[index][2]
else:
avg_rfix1.append(r/count)
count, r = 0, 0
return avg_rfix1
def dfix_2(fix_2):
count, r, avg_rfix2 = 0, 0, []
for index in range(len(fix_2)-1):
if fix_2[index][0] == 1.0:
if fix_2[index][1] == fix_2[index+1][1]:
count += 1
r += fix_2[index][2]
else:
avg_rfix2.append(r/count)
count, r = 0, 0
return avg_rfix2
def dfix_3(fix_3):
count, r, avg_rfix3 = 0, 0, []
for index in range(len(fix_3)-1):
if fix_3[index][0] == 1.0:
if fix_3[index][1] == fix_3[index+1][1]:
count += 1
r += fix_3[index][2]
else:
avg_rfix3.append(r/count)
count, r = 0, 0
return avg_rfix3
def dfix_4(fix_4):
count, r, avg_rfix4 = 0, 0, []
for index in range(len(fix_4)-1):
if fix_4[index][0] == 1.0:
if fix_4[index][1] == fix_4[index+1][1]:
count += 1
r += fix_4[index][2]
else:
avg_rfix4.append(r/count)
count, r = 0, 0
return avg_rfix4
def dfix_5(fix_5):
count, r, avg_rfix5 = 0, 0, []
for index in range(len(fix_5)-1):
if fix_5[index][0] == 1.0:
if fix_5[index][1] == fix_5[index+1][1]:
count += 1
r += fix_5[index][2]
else:
avg_rfix5.append(r/count)
count, r = 0, 0
return avg_rfix5
def dfix_6(fix_6):
count, r, avg_rfix6 = 0, 0, []
for index in range(len(fix_6)-1):
if fix_6[index][0] == 1.0:
if fix_6[index][1] == fix_6[index+1][1]:
count += 1
r += fix_6[index][2]
else:
avg_rfix6.append(r/count)
count, r = 0, 0
return avg_rfix6
def dfix_7(fix_7):
count, r, avg_rfix7 = 0, 0, []
for index in range(len(fix_7)-1):
if fix_7[index][0] == 1.0:
if fix_7[index][1] == fix_7[index+1][1]:
count += 1
r += fix_7[index][2]
else:
avg_rfix7.append(r/count)
count, r = 0, 0
return avg_rfix7
def dfix_8(fix_8):
count, r, avg_rfix8 = 0, 0, []
for index in range(len(fix_8)-1):
if fix_8[index][0] == 1.0:
if fix_8[index][1] == fix_8[index+1][1]:
count += 1
r += fix_8[index][2]
else:
avg_rfix8.append(r/count)
count, r = 0, 0
return avg_rfix8
def dfix_9(fix_9):
count, r, avg_rfix9 = 0, 0, []
for index in range(len(fix_9)-1):
if fix_9[index][0] == 1.0:
if fix_9[index][1] == fix_9[index+1][1]:
count += 1
r += fix_9[index][2]
else:
avg_rfix9.append(r/count)
count, r = 0, 0
return avg_rfix9
def dynamic():
t_dynamic = []
dyn_1, dyn_2, dyn_3, dyn_4, dyn_5, = [],[],[],[],[]
dyn_6, dyn_7, dyn_8, dyn_9 = [],[],[],[]
for i in range(1,10):
f = i/10
with open('data t dynamic '+str(f)+' and r0.csv', 'r') as csvnew:
read = csv.reader(csvnew)
for line in read:
if f == 0.1:
dyn_1.append(list(map(float, line)))
elif f == 0.2:
dyn_2.append(list(map(float, line)))
elif f == 0.3:
dyn_3.append(list(map(float, line)))
elif f == 0.4:
dyn_4.append(list(map(float, line)))
elif f == 0.5:
dyn_5.append(list(map(float, line)))
elif f == 0.6:
dyn_6.append(list(map(float, line)))
elif f == 0.7:
dyn_7.append(list(map(float, line)))
elif f == 0.8:
dyn_8.append(list(map(float, line)))
else:
dyn_9.append(list(map(float, line)))
dyn_1.append(['','','',])
dyn_2.append(['','','',])
dyn_3.append(['','','',])
dyn_4.append(['','','',])
dyn_5.append(['','','',])
dyn_6.append(['','','',])
dyn_7.append(['','','',])
dyn_8.append(['','','',])
dyn_9.append(['','','',])
return t_dynamic, dyn_1, dyn_2, dyn_3, dyn_4, dyn_5, dyn_6, dyn_7, dyn_8, dyn_9
def dynamic_1(dyn_1):
count, t, r, avg_t1, avg_r01 = 0, 0, 0, [], []
for index in range(len(dyn_1)-1):
if dyn_1[index][0] == 61.0:
if dyn_1[index][1] == dyn_1[index+1][1]:
count += 1
t += dyn_1[index][2]
r += dyn_1[index][3]
else:
avg_t1.append(t/count)
avg_r01.append(r/count)
count, t, r = 0,0, 0
return avg_t1, avg_r01
def dynamic_2(dyn_2):
count, t, r, avg_t2, avg_r02 = 0, 0, 0, [], []
for index in range(len(dyn_2)-1):
if dyn_2[index][0] == 61.0:
if dyn_2[index][1] == dyn_2[index+1][1]:
count += 1
t += dyn_2[index][2]
r += dyn_2[index][3]
else:
avg_t2.append(t/count)
avg_r02.append(r/count)
count, t, r = 0,0, 0
return avg_t2, avg_r02
def dynamic_3(dyn_3):
count, t, r, avg_t3, avg_r03 = 0, 0, 0, [], []
for index in range(len(dyn_3)-1):
if dyn_3[index][0] == 92.0:
if dyn_3[index][1] == dyn_3[index+1][1]:
count += 1
t += dyn_3[index][2]
r += dyn_3[index][3]
else:
avg_t3.append(t/count)
avg_r03.append(r/count)
count, t, r = 0,0, 0
return avg_t3, avg_r03
def dynamic_4(dyn_4):
count, t, r, avg_t4, avg_r04 = 0, 0, 0, [], []
for index in range(len(dyn_4)-1):
if dyn_4[index][0] == 35.0:
if dyn_4[index][1] == dyn_4[index+1][1]:
count += 1
t += dyn_4[index][2]
r += dyn_4[index][3]
else:
avg_t4.append(t/count)
avg_r04.append(r/count)
count, t, r = 0,0, 0
return avg_t4, avg_r04
def dynamic_5(dyn_5):
count, t, r, avg_t5, avg_r05 = 0, 0, 0, [], []
for index in range(len(dyn_5)-1):
if dyn_5[index][0] == 94.0:
if dyn_5[index][1] == dyn_5[index+1][1]:
count += 1
t += dyn_5[index][2]
r += dyn_5[index][3]
else:
avg_t5.append(t/count)
avg_r05.append(r/count)
count, t, r = 0,0, 0
return avg_t5, avg_r05
def dynamic_6(dyn_6):
count, t, r, avg_t6, avg_r06 = 0, 0, 0, [], []
for index in range(len(dyn_6)-1):
if dyn_6[index][0] == 88.0:
if dyn_6[index][1] == dyn_6[index+1][1]:
count += 1
t += dyn_6[index][2]
r += dyn_6[index][3]
else:
avg_t6.append(t/count)
avg_r06.append(r/count)
count, t, r = 0,0, 0
return avg_t6, avg_r06
def dynamic_7(dyn_7):
count, t, r, avg_t7, avg_r07 = 0, 0, 0, [], []
for index in range(len(dyn_7)-1):
if dyn_7[index][0] == 4.0:
if dyn_7[index][1] == dyn_7[index+1][1]:
count += 1
t += dyn_7[index][2]
r += dyn_7[index][3]
else:
avg_t7.append(t/count)
avg_r07.append(r/count)
count, t, r = 0,0, 0
return avg_t7, avg_r07
def dynamic_8(dyn_8):
count, t, r, avg_t8, avg_r08 = 0, 0, 0, [], []
for index in range(len(dyn_8)-1):
if dyn_8[index][0] == 44.0:
if dyn_8[index][1] == dyn_8[index+1][1]:
count += 1
t += dyn_8[index][2]
r += dyn_8[index][3]
else:
avg_t8.append(t/count)
avg_r08.append(r/count)
count, t, r = 0,0, 0
return avg_t8, avg_r08
def dynamic_9(dyn_9):
count, t, r, avg_t9, avg_r09 = 0, 0, 0, [], []
for index in range(len(dyn_9)-1):
if dyn_9[index][0] == 35.0:
if dyn_9[index][1] == dyn_9[index+1][1]:
count += 1
t += dyn_9[index][2]
r += dyn_9[index][3]
else:
avg_t9.append(t/count)
avg_r09.append(r/count)
count, t, r = 0,0, 0
return avg_t9, avg_r09
def plot(avg_t1, avg_r01, avg_t2, avg_r02,avg_t3, avg_r03, avg_t4, avg_r04,\
avg_t5, avg_r05, avg_t6, avg_r06,avg_t7, avg_r07, avg_t8, avg_r08,\
avg_t9, avg_r09,\
avg_rfix1, avg_rfix2, avg_rfix3, avg_rfix4, avg_rfix5, avg_rfix6, \
avg_rfix7, avg_rfix8, avg_rfix9):
plt.plot(avg_t1, label ='t predefine 0.1 averge is ')
plt.plot(avg_t2, label ='t predefine 0.2 averge is ')
plt.plot(avg_t3, label ='t predefine 0.3 averge is ')
plt.plot(avg_t4, label ='t predefine 0.4 averge is ')
plt.plot(avg_t5, label ='t predefine 0.5 averge is ')
plt.plot(avg_t6, label ='t predefine 0.6 averge is ')
plt.plot(avg_t7, label ='t predefine 0.7 averge is ')
plt.plot(avg_t8, label ='t predefine 0.8 averge is ')
plt.plot(avg_t9, label ='t predefine 0.9 averge is ')
plt.xlabel('round')
plt.ylabel('t_predefine')
plt.title("average dynamix predefine")
plt.legend()
## plt.xlim(
plt.ylim(0, 1.5)
plt.show()
fig = 18
width = 0.6
ind = np.arange(fig)
lap_min = [min( avg_rfix1),min( avg_rfix2),min( avg_rfix3),min( avg_rfix4),min( avg_rfix5),
min( avg_rfix6),min( avg_rfix7),min( avg_rfix8),min( avg_rfix9),\
min(avg_r01),min(avg_r03),min(avg_r03),min(avg_r04),min(avg_r05),
min(avg_r06),min(avg_r07),min(avg_r08),min(avg_r09)]
lap_mean = [mean( avg_rfix1),mean( avg_rfix2),\
## mean( avg_rfix3)\
mean( avg_rfix4),mean( avg_rfix5),
mean( avg_rfix6),mean( avg_rfix7),mean( avg_rfix8),mean( avg_rfix9),\
mean(avg_r01),mean(avg_r03),mean(avg_r03),mean(avg_r04),mean(avg_r05),
mean(avg_r06),mean(avg_r07),mean(avg_r08),mean(avg_r09)]
lap_max = [max( avg_rfix1),max( avg_rfix2),max( avg_rfix3),max( avg_rfix4),max( avg_rfix5),
max( avg_rfix6),max( avg_rfix7),max( avg_rfix8),max( avg_rfix9),\
max(avg_r01),max(avg_r03),max(avg_r03),max(avg_r04),max(avg_r05),\
max(avg_r06),max(avg_r07),max(avg_r08),max(avg_r09)]
plt.xticks(ind + width / 2, ('fix 0.1', 'fix 0.2', 'fix 0.3', 'fix 0.4', 'fix 0.5', \
'fix 0.6', 'fix 0.7', 'fix 0.8', 'fix 0.9',\
'dnm 0.1', 'dnm 0.2', 'dnm 0.3', 'dnm 0.4', 'dnm 0.5', \
'dnm 0.6', 'dnm 0.7', 'dnm 0.8', 'dnm 0.9'))
bar_1 = plt.bar(ind, lap_max, width, label =' maximum distance')
bar_2 = plt.bar(ind, lap_mean, width, label =' average distance')
bar_3 = plt.bar(ind, lap_min, width, label =' minimum distance')
plt.xlabel('t_predefine')
plt.ylabel('distance')
plt.title("distance in difference t_predefine value")
for rect in bar_1 + bar_2+ bar_3:
height = rect.get_height()
plt.text(rect.get_x() + rect.get_width()/2.0, height, '%d' % int(height), ha='center', va='bottom')
plt.legend()
plt.tight_layout()
plt.show()
def start():
fix_1, fix_2, fix_3, fix_4, fix_5, fix_6, fix_7, fix_8, fix_9 = fix()
t_dynamic, dyn_1, dyn_2, dyn_3, dyn_4, dyn_5, dyn_6, dyn_7, dyn_8, dyn_9 = dynamic()
avg_t1, avg_r01 = dynamic_1(dyn_1)
avg_t2, avg_r02 = dynamic_2(dyn_2)
avg_t3, avg_r03 = dynamic_3(dyn_3)
avg_t4, avg_r04 = dynamic_4(dyn_4)
avg_t5, avg_r05 = dynamic_5(dyn_5)
avg_t6, avg_r06 = dynamic_6(dyn_6)
avg_t7, avg_r07 = dynamic_7(dyn_7)
avg_t8, avg_r08 = dynamic_8(dyn_8)
avg_t9, avg_r09 = dynamic_9(dyn_9)
avg_rfix1 = dfix_1(fix_1)
avg_rfix2 = dfix_2(fix_2)
avg_rfix3 = dfix_3(fix_3)
avg_rfix4 = dfix_4(fix_4)
avg_rfix5 = dfix_5(fix_5)
avg_rfix6 = dfix_6(fix_6)
avg_rfix7 = dfix_7(fix_7)
avg_rfix8 = dfix_8(fix_8)
avg_rfix9 = dfix_9(fix_9)
plot(avg_t1, avg_r01, avg_t2, avg_r02,avg_t3, avg_r03, avg_t4, avg_r04,\
avg_t5, avg_r05, avg_t6, avg_r06,avg_t7, avg_r07, avg_t8, avg_r08, avg_t9, avg_r09,\
avg_rfix1, avg_rfix2, avg_rfix3, avg_rfix4, avg_rfix5, avg_rfix6, \
avg_rfix7, avg_rfix8, avg_rfix9)
start()
|
import asyncio
import cozmo
import cv2
import getopt
import logging
import numpy as np
import sys
import threading
import time
import PIL.Image
import PIL.ImageFont
import PIL.ImageTk
from scipy.interpolate import UnivariateSpline
import tkinter as tk
from cozmo.util import degrees, distance_mm, speed_mmps
from NH_Image import Image as Image2
from NH_Utils import *
# Logger
log = logging.getLogger('ok.linefollow')
def cozmo_cli(robot: cozmo.robot.Robot):
""" Main loop implementing simplistic CLI
"""
robot.stop_all_motors()
robot.set_head_light(True)
robot.camera.image_stream_enabled = True
robot.set_lift_height(1.0).wait_for_completed()
robot.set_head_angle(cozmo.robot.MIN_HEAD_ANGLE).wait_for_completed()
while True:
run_cmd=input('C>')
if run_cmd == 's':
go_Cozmo(robot)
if run_cmd == 'e':
robot.stop_all_motors()
cv2.destroyAllWindows()
print('Bye.')
break
def go_Cozmo(robot: cozmo.robot.Robot):
""" Capture an image of the path and show bounding rect of the
contour of the path.
"""
log.info('start')
font = cv2.FONT_HERSHEY_SIMPLEX
direction = 0
Images = []
N_SLICES = 4
for q in range(N_SLICES):
Images.append(Image())
log.info('before true')
while True:
#log.info('after true')
img = np.array(robot.world.latest_image.raw_image)
array = np.frombuffer(img, dtype='uint8')
img = cv2.imdecode(array, 1)
direction = 0
img = RemoveBackground(img, False)
#log.info('before image')
if img is not None:
log.info('after image')
t1 = time.clock()
SlicePart(img, Images, N_SLICES)
for i in range(N_SLICES):
direction += Images[i].dir
log.info(direction)
fm = RepackImages(Images)
t2 = time.clock()
cv2.putText(fm, "Time: " + str((t2 - t1) * 1000) + " ms", (10, 470), font, 0.5, (0, 0, 255), 1, cv2.LINE_AA)
cv2.imshow("Vision Race", fm)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
def main(argv):
# Set-up logging
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)-8s %(message)s')
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
log.setLevel(logging.INFO)
log.addHandler(handler)
# Eval command line
usecase='cli'
try:
opts, args = getopt.getopt(argv,'hu:',['usecase='])
except getopt.GetoptError:
print('line_follow.py -u <usecase>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('line_follow.py -u <usecase>')
sys.exit()
elif opt in ("-u", "--usecase"):
usecase=arg
if usecase=='cli':
cozmo.run_program(cozmo_cli)
#if __name__ == '__main__':
# main(sys.argv[1:])
class Thresh:
def __init__(self):
self._robot = None
self._tk_root = 0
self._tk_label_input = 0
self._tk_label_output = 0
cozmo.connect(self.run)
def on_img(self, event, *, image: cozmo.world.CameraImage, **kw):
raw_img = image.raw_image
raw_rgb = np.array(raw_img)
r, g, b = cv2.split(raw_rgb)
hsv_img = cv2.cvtColor(np.array(raw_img), cv2.COLOR_RGB2HSV)
h, s, v = cv2.split(hsv_img)
mer_img = cv2.merge((h, s, v))
hsv_img = mer_img
rgb_img2 = cv2.cvtColor(hsv_img, cv2.COLOR_HSV2RGB)
invGamma = 1.0 / ((self._tk_ga_scale.get() + 1) / 50)#self._tk_ga_scale.get()#77
new = np.zeros(256)
ori = np.zeros(256)
for i in range(256):
new[i] = ((i / 255.0) ** invGamma) * 255
ori[i] = i
try:
incr_ch_lut = self.create_LUT_8UC1(ori, new)
low = np.array([self._tk_hl_scale.get(),self._tk_sl_scale.get(),self._tk_vl_scale.get()], dtype="uint8")#[self._tk_hl_scale.get(),self._tk_sl_scale.get(),self._tk_vl_scale.get()]
except:
sys.exit('Window Closed - Exiting')
high = np.array([self._tk_hh_scale.get(),self._tk_sh_scale.get(),self._tk_vh_scale.get()], dtype="uint8")#255,255,252
rgb_img = cv2.LUT(raw_rgb, incr_ch_lut).astype(np.uint8)
rgb_img2 = cv2.LUT(rgb_img2, incr_ch_lut).astype(np.uint8)
rgb_img = cv2.blur(rgb_img, (3, 3))
thresh_img = cv2.inRange(rgb_img2, low, high)
raw_rgb_conv = cv2.cvtColor(np.array(raw_img), cv2.COLOR_BGR2RGB)
rgb_img_conv = cv2.cvtColor(np.array(rgb_img), cv2.COLOR_BGR2RGB)
rgb_img2_conv = cv2.cvtColor(np.array(rgb_img2), cv2.COLOR_BGR2RGB)
cv2.imwrite('thresh.png', thresh_img)
pil_thresh = PIL.Image.fromarray(cv2.cvtColor(thresh_img, cv2.COLOR_GRAY2RGB))
rgb_img = PIL.Image.fromarray(rgb_img)
rgb_img2 = PIL.Image.fromarray(rgb_img2)
display_image_input = PIL.ImageTk.PhotoImage(image=pil_thresh)
display_image_output = PIL.ImageTk.PhotoImage(image=rgb_img2)
self._tk_label_input.imgtk = display_image_input
self._tk_label_input.configure(image=display_image_input)
self._tk_label_output.imgtk = display_image_output
self._tk_label_output.configure(image=display_image_output)
self._tk_root.update()
log.info('start')
font = cv2.FONT_HERSHEY_SIMPLEX
direction = 0
Images = []
N_SLICES = 4
#for q in range(N_SLICES):
# Images.append(Image2.Image())
#log.info('before true')
#while True:
#array = np.array(rgb_img2, dtype='uint8')
#img = np.array(rgb_img2)
#img = img[:, :, ::-1].copy()
#img = cv2.imdecode(open_cv_image, 1)
#direction = 0
#img = RemoveBackground(img, False)
#if rgb_img_conv is not None:
#t1 = time.clock()
#SlicePart(rgb_img_conv, Images, N_SLICES)
#for i in range(N_SLICES):
#direction += Images[i].dir
#log.info(direction)
def create_LUT_8UC1(self, x, y):
spl = UnivariateSpline(x, y)
return spl(range(256))
async def set_up_cozmo(self, coz_conn):
asyncio.set_event_loop(coz_conn._loop)
self._robot = await coz_conn.wait_for_robot()
self._robot.camera.image_stream_enabled = True
self._robot.camera.color_image_enabled = True
self._robot.set_head_angle(cozmo.robot.MIN_HEAD_ANGLE).wait_for_completed()
self._robot.add_event_handler(cozmo.world.EvtNewCameraImage, self.on_img)
async def run(self, coz_conn):
await self.set_up_cozmo(coz_conn)
self._tk_root = tk.Tk()
self._tk_label_input = tk.Label(self._tk_root)
self._tk_label_output = tk.Label(self._tk_root)
self._tk_ga_scale = tk.Scale(self._tk_root, from_=0, to=255, orient=tk.HORIZONTAL, length=300, label='ga')
self._tk_hl_scale = tk.Scale(self._tk_root, from_=0, to=255, orient=tk.HORIZONTAL, length=300, label='hl')
self._tk_sl_scale = tk.Scale(self._tk_root, from_=0, to=255, orient=tk.HORIZONTAL, length=300, label='sl')
self._tk_vl_scale = tk.Scale(self._tk_root, from_=0, to=255, orient=tk.HORIZONTAL, length=300, label='vl')
self._tk_hh_scale = tk.Scale(self._tk_root, from_=0, to=255, orient=tk.HORIZONTAL, length=300, label='hh')
self._tk_sh_scale = tk.Scale(self._tk_root, from_=0, to=255, orient=tk.HORIZONTAL, length=300, label='sh')
self._tk_vh_scale = tk.Scale(self._tk_root, from_=0, to=255, orient=tk.HORIZONTAL, length=300, label='vh')
self._tk_label_input.pack()
self._tk_label_output.pack()
self._tk_ga_scale.pack()
self._tk_hl_scale.pack()
self._tk_sl_scale.pack()
self._tk_vl_scale.pack()
self._tk_hh_scale.pack()
self._tk_sh_scale.pack()
self._tk_vh_scale.pack()
while True:
await asyncio.sleep(0)
if __name__ == '__main__':
Thresh()
|
from configuration import *
from bench_runner import *
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import json
import os
import socket
import math
# Find the newest subdirectory in a path (i.e. the one most recently modified)
def newest_subdir(path):
directories = [path + d for d in os.listdir(path) if os.path.isdir(path+d)]
most_recent_subdir = max(directories, key=os.path.getmtime)
return most_recent_subdir + "/"
# Given a data file generated by one of the benchmarks, parse some usable times
# This doesn't _quite_ handle all the errors that might be included in the result
# files.
def parse_result_file(fname):
print "Parsing "+fname
dat = []
with open(fname) as f:
for l in f:
if "ComputeCpp" not in l and l.rstrip() != "":
try:
bench,duff,size,time = l.rstrip().split(" ")
dat.append((size, time))
except Exception, e:
if "terminate" in l or "what()" in l:
print "Found error, finishing parse."
break
else:
print "Failed to parse " + l
raise e
if dat == []:
print "Failed to parse any data from file: " + fname
exit(1)
return dat
# Wrapper object for a dataset configuration, benchmark, name, and raw data.
class DataSet(object):
def __init__(self, tup):
super(DataSet, self).__init__()
self.config = tup[0]
self.benchmark = tup[1]
self.name = tup[2]
# find the most recent subdirectory, and then
# get the results folder from the config + director,
# and get the result file
dfname = newest_subdir(self.config.results_folder) + self.benchmark
# parse the data from the results file - discard the raw stuff
raw_data = parse_result_file(dfname)
# turn it into a dataframe``
self.data = pd.DataFrame.from_records(raw_data, columns=["size", "time"]
).astype(float)
# Given a filename, and extension, get a new unique filename based on it.
# for example, given "name.ext", this will search "name_1.ext", "name_2.ext" etc
# until it finds a filename that does not yet exist.
def get_unique_filename(filename):
# split the file into name + extension
name, ext = os.path.splitext(filename)
# given the name of a file, find the next name_n.ext file available
if not os.path.exists(filename):
# if the file doesn't exist yet, just write to it...
return filename
# start with name_1.ext
n = 1
# build a subsitution string
nname = name+"_%s."+ext
# keep trying to substitute it with higher numbers while it exists
while os.path.exists(nname % n):
n = n + 1
# return the only one that doesn't exist
return (nname % n)
# dummy check method
def check(self, extrargs=[]):
print "Checking plot configuration"
# Run a given plot configuration, generating a plot and saving it to a file
def run(self, extrargs=[]):
print "running plotrunner"
datasets = [DataSet(c) for c in self.bench_configs]
for c in datasets:
print c
print self.fname
print self.options
print "Starting plotting"
# set up seaborn, and get a nice style
sns.set_style("darkgrid")
plt.figure()
# iterate over the datasets, plotting them
for d in datasets:
# if we have a transform specified in the plot configuration, run it
if "transforms" in self.options:
# get the keys that we're manipulating, and the transform lambdas (values)
for key, value in self.options["transforms"].iteritems():
# eval it into an object, and apply it across the datasets with the key
fn = eval(value)
d.data[key] = d.data[key].apply(fn)
# plot the data
d.data.plot(x="size", y="time", label=d.name)
# give the plot a legend and title
plt.legend(loc="upper left")
plt.title(getelse(self.options, "title", "defulat title"))
# change scale options according to the configuration
plt.yscale(getelse(self.options, "yscale", "linear"))
plt.xscale(getelse(self.options, "yscale", "linear"))
plt.ylabel(getelse(self.options, "ylabel", "time"))
plt.xlabel(getelse(self.options, "xlabel", "size"))
# get a unique name, and save the plot to that name
ofname = get_unique_filename(self.fname)
plt.savefig(ofname, bbox_inches = "tight")
# check if the plot configuration has run or not
def hasrun(self, extrargs=[]):
print "Has the plot configuration run?"
PlotConfiguration.check = check
PlotConfiguration.run = run
PlotConfiguration.hasrun = hasrun |
from computeCost import computeCost
import numpy as np
def gradientDescent(X, y, theta, alpha, num_iters):
"""
Performs gradient descent to learn theta
theta = gradientDescent(x, y, theta, alpha, num_iters) updates theta by
taking num_iters gradient steps with learning rate alpha
"""
m = y.size
J_history = np.zeros(num_iters)
for iter in np.arange(num_iters):
h = X.dot(theta)
theta = theta - alpha*(1.0/m)*(X.T.dot(h-y))
J_history[iter] = computeCost(X, y, theta)
return theta, J_history
|
print('test')
hi
print('feature') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.