text stringlengths 8 6.05M |
|---|
import os
import random
import numpy as np
import time
import neat6 as neat
class CuatroEnRaya():
def __init__(self,alto=6,ancho=7,cantidad=100,f1=None,f2=None,imp=True,profundidad=2):
self.alto = alto
self.ancho = ancho
'''self.tablero=[
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0,-1, 0, 0],
[ 0, 0, 0,-1, 1, 0, 0],
[ 0, 0, 0, 1, 1, 1, 0],
[ 0,-1,-1, 1, 1,-1, 0]
]'''
self.tablero = []
for k in range(self.alto):
fila = [0 for i in range(self.ancho)]
self.tablero.append(fila)
self.turno = 1
self.columna = ''
self.contador_de_turnos = 0
self.ganador = 2
self.fichas = [' .', ' O', ' X']
#jugadores tiene longitud 3 porque como indices se usara self.turno que varia entre 1 y -1=2 (mod 3)
self.funciones = [None, f1, f2]
self.imp = imp
self.depth = profundidad
self.n=neat.Neat(cantidad,self.ancho*self.alto,self.ancho)
def imprimir_tablero(self):
print('')
for k in range(self.alto):
for i in range(self.ancho):
print(self.fichas[self.tablero[k][i]], end='')
pass
print('')
print('')
def tirar_ficha(self, columna):
self.columna = columna
if self.tablero[0][columna] == 0 and columna in range(self.ancho):
for k in reversed(range(self.alto)):
if self.tablero[k][columna] == 0:
self.tablero[k][columna] = self.turno
self.turno = -self.turno
self.contador_de_turnos += 1
self.columna = columna
return 1
return 0
def quitar_ficha(self, columna):
for k in range(self.alto):
if self.tablero[k][columna] != 0:
self.tablero[k][columna] = 0
break
self.turno = -self.turno
self.contador_de_turnos -= 1
def fin_de_partida(self):
if self.contador_de_turnos == 0:
return 0
# Fila
fila = ''
if self.tablero[0][self.columna] != 0:
fila = 0
for k in reversed(range(self.alto)):
#print('f: en la casilla ',k,self.columna,'hay un ',self.tablero[k][self.columna])
if self.tablero[k][self.columna] == 0:
fila = k + 1
break
if fila == '':
print('ERROR: fin_de_partida(): no se que en que fila esta la ficha que has tirado')
# Horizontal
cont = 0
d = 1
#print('la fila es ',fila)
for s in [1, -1]:
#print('en la casilla ',fila,self.columna+s*d,'hay un ',self.tablero[fila][self.columna+s*d])
while (self.columna + s * d in range(self.ancho) and self.tablero[fila][self.columna + s * d] == -self.turno):
#print('en la casilla ',fila,self.columna+s*d,'hay un ',self.tablero[fila][self.columna+s*d])
d += 1
cont += 1
d = 1
if cont >= 3:
self.ganador = -self.turno
return 1
# Vertical
d = 1
cont = 0
for s in [1, -1]:
#print('en la casilla ',fila,self.columna+s*d,'hay un ',self.tablero[fila][self.columna+s*d])
while (fila + s * d in range(self.alto) and self.tablero[fila + s * d][self.columna] == -self.turno):
#print('en la casilla ',fila,self.columna+s*d,'hay un ',self.tablero[fila][self.columna+s*d])
d += 1
cont += 1
d = 1
if cont >= 3:
self.ganador = -self.turno
return 1
# Diagonal: / \
# Direccion Sentido
DS = [[[1, -1], [-1, 1]], [[1, 1], [-1, -1]]]
cont = 0
for dir in [0, 1]:
for s in [0, 1]:
for d in [1, 2, 3]:
if fila + d * DS[dir][s][1] in range(self.alto) and self.columna + d * DS[dir][s][0] in range(self.ancho) and self.tablero[fila + d * DS[dir][s][1]][self.columna + d * DS[dir][s][0]] == -self.turno:
cont += 1
else:
break
if cont >= 3:
self.ganador = -self.turno
return 1
cont = 0
# Tablero lleno
if self.contador_de_turnos == self.ancho * self.alto:
self.ganador = 0
return 1
return 0
def pedir_columna(self):
if self.funciones[self.turno] == None:
while True:
try:
i=int(input())
return i
except: pass
elif self.funciones[self.turno] == 'random':
return self.columna_random()
elif self.funciones[self.turno] == 'd1':
return self.d1()
elif self.funciones[self.turno] == 'negamax':
n = self.negamx(self.depth)
if self.imp == True:
print("negamax: prof=", self.depth, "mejorcol=", n[0],"valor=", n[1])
return n[0]
else:
f = self.funciones[self.turno]
col = f(self.tablero)
#print(col)
return col
def columna_random(self):
columnas = []
for k in range(self.ancho):
if self.tablero[0][k] == 0:
columnas.append(k)
if len(columnas) == 0:
print("No hay columnas disponibles para jugar")
print("numero de movimientos:", self.contador_de_turnos)
self.imprimir_tablero()
return random.choice(columnas)
def d1(self):
columna_buena = ''
for k in range(self.ancho):
if self.tablero[0][k] == 0:
self.tirar_ficha(k)
if self.fin_de_partida() == 1:
if self.ganador == -self.turno:
columna_buena = k
self.quitar_ficha(k)
self.ganador = 2
if columna_buena == '':
return self.columna_random()
else:
return columna_buena
def negamx(self, depth):
#print("negamax: ",depth)
if self.fin_de_partida() == 1:
#print("negamax:",depth,"he detectado que el bando",self.ganador,"puede ganar en la siguiente jugada")
if self.ganador == 0: return ['', 0]
elif self.ganador == 1: return ['', -100]
elif self.ganador == -1: return ['', -100]
else:
print("negamax(): ERROR: es fin de partida pero ganador=",self.ganador)
if depth == 0:
# TODO cambio esto para que tire random
# antes ponia return [0,0]
#return [self.columna_random(), random.randint(0,99)]
return ['',random.randint(0,99)]
max_val = -101
val = 0
mejor_columna = ''
cols = [3, 2, 4, 1, 5, 0, 6]
for k in range(self.ancho):
if self.tablero[0][k] == 0:
'''if depth==1: print("\t",end='')
print("negamax: tiro en la columna",k)'''
self.tirar_ficha(k)
val = -self.negamx(depth - 1)[1]
self.quitar_ficha(k)
'''if depth==2:
print("negamax: vuelvo con una puntuacion de ",val)'''
if (val > max_val):
max_val = val
mejor_columna = k
return [mejor_columna, max_val]
def nueva_partida(self):
# Inicializar
self.tablero = []
for k in range(self.alto):
fila = [0 for i in range(self.ancho)]
self.tablero.append(fila)
self.turno = 1
self.columna = ''
self.contador_de_turnos = 0
self.ganador = ''
# Bucle del juego
while (not self.fin_de_partida()):
if self.imp:
self.imprimir_tablero()
columna = self.pedir_columna()
self.tirar_ficha(columna)
#self.tablero_a_input()
#os.system('clear')
if self.imp:
self.imprimir_tablero()
#print('Resultado: ',self.ganador)
return self.ganador
'''def tablero_a_input(self):
En una columna de altura 7 hay 2^7-1=127 posibles combinaciones
Lo que quiero es que a cada una de esas 127 combinaciones se le asigne un numero uniformemente distribuido entre el 0 y el 1
Si consigo que a cada columna se le asigne un numero uniformemente distribuido entre el 0 y el 127 entonces dividieno entre 127 ya estaria
Si pienso las O y las X como 1 y 0 de un numero en base 2 respectivamente entonces:
| |
| | A esta columna le podriamos asignar el 1*(2**5)+1*(2**4)+2*(2**3)+1*(2**2)+0+0
|O| Pero con esta numeracion tienen muy poco peso las fichas de la fila de arriba
|X|
|O|
|O|
Lo ideal seria entonces tenera un hash para cada columna donde columnas similares tuvieran hash muy distintos (zurbist hashing)
Segundo la asignacion binario anterior al numero 100=(1100100)_2=1*(2**6)+1*(2**5)+1*(2**2) le corresponderia la columna:
El 1*(2**6) corresponde necesariamente a una X abajo de todo
El 1*(2**5) podria corresponder a una X en la siguiente posicion:
En este caso el 2**2=4 no puede correspnder a ninguna nueva fiucha
El 1*(2**5) Tiene que coresponder necesariamente (en parte, solo 1*(2**4)) a una O
Ahora queda el numero 1*(2**5)+1*(2**2)-1*(2**4)=20=10100=1*(2**4)+1*(2**2) etc
| |
|X|
|X|
|O|
|O|
|X|
t=[]
for k in range(self.ancho):
col=0
for i in reversed(range(self.alto)):
if self.tablero[i][k]==0:
break
elif self.tablero[i][k]==1:
col+=1*(2**i)
else:
col+=2*(2**i)
t.append(col/128)
return t'''
def tablero_a_input(self):
L=[]
for l in self.tablero: L+=l
return L
def partida_vs_red(self,red,t=1):
# Inicializar
self.tablero = []
for k in range(self.alto):
fila = [0 for i in range(self.ancho)]
self.tablero.append(fila)
self.turno = 1
self.columna = ''
self.contador_de_turnos = 0
self.ganador = ''
#self.funciones[-1] = 'negamax'
# Bucle del juego
while (not self.fin_de_partida()):
if self.turno==t:
p=red.prealimentacion(self.tablero_a_input())
for k in range(self.ancho):
if self.tablero[0][k]!=0:
p[k]=-1# luego escogere el maximo asi que al poner el -1 queda descartada la columna valida
self.tirar_ficha(p.index(max(p)))
else: self.tirar_ficha(self.pedir_columna())
if self.imp==True:
self.imprimir_tablero()
return
def juego(self,num=50,meta=50):
for red in self.n.poblacion:
fitness=0
for k in range(num):
self.partida_vs_red(red)
fit=50
if self.ganador==1:
#print("juego(): La red",self.n.poblacion.index(red),"ha ganado una partida")
fit=fit+50-self.contador_de_turnos
elif self.ganador==-1:
fit=fit-50+self.contador_de_turnos
#fitness=max(fit,fitness)
fitness+=fit
#print("resultado:",self.ganador,"fit:",fit,"fitness",fitness)
red.fitness=fitness/num
print("La red",self.n.poblacion.index(red),"tiene un fitness de",red.fitness)
if red.fitness>meta:
self.n.stop=True
print("La red",self.n.poblacion.index(red),"ha conseguido un fitness de",red.fitness)
def entrenar(self,g=40):
T0=time.time()
for k in range(g):
t0=time.time()
self.juego()
t1=time.time()
self.n.info_generacion()
self.n.nueva_generacion_neat(p=False,d=False,t=True)
t2=time.time()
#self.n.debug_definitvo()
#self.n.debug_rutinario()
#self.n.debug()
print("La generacion",self.n.generacion," ha tardado",round(t1-t0,2),"s de juego +",round(t2-t1,2),"s de generacion +",round(time.time()-t2,2),"s de debug")
if self.n.stop==True: break
print("En total el entreno ha tardado:",time.time()-T0,"segundos")
def ver(self,red=None,manual=False):
if red==None:
self.n.poblacion.sort(key=lambda red: red.fitness)
red=self.n.poblacion[-1]
self.imp=True
if manual==True: self.funciones[-1]=None
self.partida_vs_red(red)
if manual==True: self.funciones[-1]='negamax'
print("La red",self.n.poblacion.index(red),"tiene un fitness de",red.fitness)
print("ganador:",self.ganador,"movimientos:",self.contador_de_turnos)
self.imp=False
def performance(self,red=None,num=50):
if red==None: red=self.n.mejor()
resultados=[]
fitness=0
for k in range(num):
self.partida_vs_red(red)
resultados.append(self.ganador)
fit=50
if self.ganador==1:
fit=fit+50-self.contador_de_turnos
elif self.ganador==-1:
fit=fit-50+self.contador_de_turnos
fitness+=fit
print("Resultados:",resultados,"=",resultados.count(1),resultados.count(-1))
print("La red",self.n.poblacion.index(red),"ha hecho un performance de",fitness/num)
if __name__=='__main__':
c=CuatroEnRaya(alto=6,ancho=7,cantidad=50,imp=False,profundidad=2,f2='negamax')
# Hiperparametros
# Distania entre redes
c.n.c1=1 # Exceso
c.n.c2=1 # Disjuntos
c.n.c3=4 # Diferencia de peso
c.n.CP=3 # Distancia entre especies
c.n.MUERTES=0.5 # porcentage de muertes de cada especie tras cada generacion
# Probabilidades de mutar
c.n.PROBABILIDAD_MUTAR_NUEVA_CONEXION=0.9
c.n.PROBABILIDAD_MUTAR_NUEVO_NODO=0.5
c.n.PROBABILIDAD_MUTAR_AJUSTAR_PESO=0.9
c.n.PROBABILIDAD_MUTAR_RANDOM_PESO=0.9
c.n.PROBABILIDAD_MUTAR_ACTIVAR_CONEXION=0.1
# Tamano de los randoms
c.n.VALOR_PESO_RANDOM=1
c.n.VALOR_PESO_CAMBIO=0.3
guardar=True
if guardar:
text_files = [f for f in os.listdir(os.getcwd()+'/checkpoint') if f.endswith('.pickle')]
l=[int(f.replace('.pickle','').replace('gen','')) for f in text_files]
nombre='checkpoint/gen'+str(max(l))
c.n=neat.cargar_neat(nombre)
print("Archivo:",nombre+'.pickle',"cargado")
else:
for red in c.n.poblacion:
print("Mutando la red",c.n.poblacion.index(red),"...",end='\r')
while(len(red.conexiones)<red.numero_inputs*red.numero_outputs):
c.n.mutar_nueva_conexion(red)
print("Calculando... \r")
c.entrenar(20)
#c.partida_vs_red(c.n.poblacion[0])
if guardar:
neat.guardar_neat(c.n,'checkpoint/gen'+str(c.n.generacion))
print("Archivo:",'checkpoint/gen'+str(c.n.generacion)+'.pickle',"guardado")
'''
import cuatroenraya
c=cuatroenraya.CuatroEnRaya(alto=4,ancho=5,cantidad=100,imp=False,profundidad=2,f2='negamax')
c.entrenar()
'''
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 Forest Crossman
# Copyright 2014 funoverip.net.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import math
import struct
from gnuradio import gru
from gnuradio.digital import packet_utils
ACCESS_CODE = '\xFF\x00' * 2
PREAMBLE = '\xAA' * 16
PADDING = '\x00' * 10
ENCODER_TABLE = {
0x0: 0x15,
0x1: 0x31,
0x2: 0x32,
0x3: 0x23,
0x4: 0x34,
0x5: 0x25,
0x6: 0x26,
0x7: 0x16,
0x8: 0x1a,
0x9: 0x19,
0xa: 0x2a,
0xb: 0x0b,
0xc: 0x2c,
0xd: 0x0d,
0xe: 0x0e,
0xf: 0x1c,
}
# CRC8 lookup table and function adapted from here:
# https://github.com/bewest/comlink2-uart/blob/master/lib.js
CRC8_TABLE = [
0x00, 0x9b, 0xad, 0x36, 0xc1, 0x5a, 0x6c, 0xf7,
0x19, 0x82, 0xb4, 0x2f, 0xd8, 0x43, 0x75, 0xee,
0x32, 0xa9, 0x9f, 0x04, 0xf3, 0x68, 0x5e, 0xc5,
0x2b, 0xb0, 0x86, 0x1d, 0xea, 0x71, 0x47, 0xdc,
0x64, 0xff, 0xc9, 0x52, 0xa5, 0x3e, 0x08, 0x93,
0x7d, 0xe6, 0xd0, 0x4b, 0xbc, 0x27, 0x11, 0x8a,
0x56, 0xcd, 0xfb, 0x60, 0x97, 0x0c, 0x3a, 0xa1,
0x4f, 0xd4, 0xe2, 0x79, 0x8e, 0x15, 0x23, 0xb8,
0xc8, 0x53, 0x65, 0xfe, 0x09, 0x92, 0xa4, 0x3f,
0xd1, 0x4a, 0x7c, 0xe7, 0x10, 0x8b, 0xbd, 0x26,
0xfa, 0x61, 0x57, 0xcc, 0x3b, 0xa0, 0x96, 0x0d,
0xe3, 0x78, 0x4e, 0xd5, 0x22, 0xb9, 0x8f, 0x14,
0xac, 0x37, 0x01, 0x9a, 0x6d, 0xf6, 0xc0, 0x5b,
0xb5, 0x2e, 0x18, 0x83, 0x74, 0xef, 0xd9, 0x42,
0x9e, 0x05, 0x33, 0xa8, 0x5f, 0xc4, 0xf2, 0x69,
0x87, 0x1c, 0x2a, 0xb1, 0x46, 0xdd, 0xeb, 0x70,
0x0b, 0x90, 0xa6, 0x3d, 0xca, 0x51, 0x67, 0xfc,
0x12, 0x89, 0xbf, 0x24, 0xd3, 0x48, 0x7e, 0xe5,
0x39, 0xa2, 0x94, 0x0f, 0xf8, 0x63, 0x55, 0xce,
0x20, 0xbb, 0x8d, 0x16, 0xe1, 0x7a, 0x4c, 0xd7,
0x6f, 0xf4, 0xc2, 0x59, 0xae, 0x35, 0x03, 0x98,
0x76, 0xed, 0xdb, 0x40, 0xb7, 0x2c, 0x1a, 0x81,
0x5d, 0xc6, 0xf0, 0x6b, 0x9c, 0x07, 0x31, 0xaa,
0x44, 0xdf, 0xe9, 0x72, 0x85, 0x1e, 0x28, 0xb3,
0xc3, 0x58, 0x6e, 0xf5, 0x02, 0x99, 0xaf, 0x34,
0xda, 0x41, 0x77, 0xec, 0x1b, 0x80, 0xb6, 0x2d,
0xf1, 0x6a, 0x5c, 0xc7, 0x30, 0xab, 0x9d, 0x06,
0xe8, 0x73, 0x45, 0xde, 0x29, 0xb2, 0x84, 0x1f,
0xa7, 0x3c, 0x0a, 0x91, 0x66, 0xfd, 0xcb, 0x50,
0xbe, 0x25, 0x13, 0x88, 0x7f, 0xe4, 0xd2, 0x49,
0x95, 0x0e, 0x38, 0xa3, 0x54, 0xcf, 0xf9, 0x62,
0x8c, 0x17, 0x21, 0xba, 0x4d, 0xd6, 0xe0, 0x7b
]
def crc8(data):
result = 0
for i in range(0, len(data)):
result = CRC8_TABLE[(result ^ ord(data[i]))]
return str(bytearray([result]))
def bits_to_bytes(bits):
encoded_bytes = bytearray(int(math.ceil(len(bits)/8.0)))
for i in range(0, len(bits)):
encoded_bytes[i / 8] ^= bits[i] << (7 - (i % 8))
return encoded_bytes
def make_packet(payload, samples_per_symbol, bits_per_symbol, pad_for_usrp=True):
"""
Build a packet
Args:
payload: packet payload, len [0, 4096]
samples_per_symbol: samples per symbol (needed for padding calculation) (int)
bits_per_symbol: (needed for padding calculation) (int)
pad_for_usrp:
Packet will have the preamble and access code at the beginning, followed by
the encoded payload and an 8-bit CRC.
"""
# CRC
crc = crc8(payload)
raw_message = ''.join((payload, crc))
# 4b/6b encoding
encoded_nibbles = []
for element in raw_message:
for shift in [4, 0]:
encoded_nibble = ENCODER_TABLE[(ord(element) >> shift) & 0xf]
encoded_nibbles.append(encoded_nibble)
# Nibble to bit conversion
bits = []
for element in encoded_nibbles:
for bit_index in range(0, 6)[::-1]:
bit = (element >> bit_index) & 0x01
bits.append(bit)
# Bit padding
if len(bits) % 8:
padding = [0, 1] * ((len(bits) % 8) / 2)
bits.extend(padding)
# Convert the bits to bytes
encoded_message = str(bits_to_bytes(bits))
# Prepend the preamble and sync words/access code to the message
packet = ''.join((PREAMBLE, ACCESS_CODE, encoded_message, PADDING))
# Padding (optional)
if pad_for_usrp:
usrp_packing = packet_utils._npadding_bytes(len(packet), samples_per_symbol, bits_per_symbol) * '\x00'
packet = packet + usrp_packing
return packet
|
def func1():
print("Module 1")
def func2():
print("Module 1 function 2")
number1 = 21 |
# Copyright (c) 2011, James Hanlon, All rights reserved
# This software is freely distributable under a derivative of the
# University of Illinois/NCSA Open Source License posted in
# LICENSE.txt and at <http://github.xcore.com/>
from math import floor
import error
import ast
from walker import NodeWalker
class EvalExpr(NodeWalker):
"""
Evaluate a constant-valued expression. Return None if this is not possible.
NOTE: this doesn't observe any precidence rules.
"""
def __init__(self, debug=False):
self.debug = debug
# Expressions =========================================
def expr_single(self, node):
return self.elem(node.elem)
def expr_unary(self, node):
a = self.elem(node.elem)
if a == None:
return None
if node.op == '-': return -a
elif node.op == '~': return ~a
else:
assert 0
def expr_binop(self, node):
a = self.elem(node.elem)
b = self.expr(node.right)
if a == None or b == None:
return None
if node.op == '+': return a + b
elif node.op == '-': return a - b
elif node.op == '*': return a * b
elif node.op == '/': return floor(a / b)
elif node.op == 'rem': return floor(a % b)
elif node.op == 'or': return a | b
elif node.op == 'and': return a & b
elif node.op == 'xor': return a ^ b
elif node.op == '<<': return a << b
elif node.op == '>>': return a >> b
elif node.op == '<': return 1 if a < b else 0
elif node.op == '>': return 1 if a > b else 0
elif node.op == '<=': return 1 if a <= b else 0
elif node.op == '>=': return 1 if a >= b else 0
elif node.op == '=': return 1 if a == b else 0
elif node.op == '~=': return 1 if a != b else 0
else:
assert 0
# Elements= ===========================================
def elem_id(self, node):
if self.debug:
print('eval: id: '+node.name+', {}'.format(node.symbol))
s = node.symbol
if self.debug and s.value != None:
print('Evaluating elem: '+node.name+', {} = {}'.format(s, s.value))
return s.value if (s != None and s.value != None) else None
def elem_group(self, node):
return self.expr(node.expr)
def elem_number(self, node):
return node.value
def elem_boolean(self, node):
return node.value
def elem_char(self, node):
return node.value
# Disallowed
def elem_fcall(self, node):
return None
def elem_sub(self, node):
return None
def elem_slice(self, node):
return None
def elem_index(self, node):
return None
def elem_string(self, node):
return None
|
from decimal import Decimal
from django.shortcuts import get_object_or_404
from apps.bitcoin_crypto.utils import create_connection
from .models import TransactionFee
def set_mining_fees(mining_fees):
access = create_connection()
try:
access.settxfee(mining_fees)
return True
except:
return False
def get_transaction_fee(amount, currency, fee_type):
"""return transction fee of parameters amount, currecny, fee type"""
# checking any transaction fee object exist else return fees to zero
try:
transaction_fee_obj = get_object_or_404(TransactionFee, currency=currency, fee_type=fee_type)
except:
return Decimal(0)
fee_limits = transaction_fee_obj.transactionfeerange_set.all()
#identifing fee range object of amount
fee_obj = None
for fee_limit in fee_limits:
if Decimal(fee_limit.value) <= Decimal(amount):
if fee_obj:
fee_obj = fee_limit if Decimal(fee_obj.value) < Decimal(fee_limit.value) else fee_obj
else:
fee_obj = fee_limit
#calculating transaction fees according to rate type
if not fee_obj:
return Decimal(0)
if transaction_fee_obj.rate_type == 'percentage':
fee = Decimal(amount) * Decimal(fee_obj.fees)/100
else:
fee = Decimal(fee_obj.fees)
return fee
|
# @Title: 替换空格 (替换空格 LCOF)
# @Author: 2464512446@qq.com
# @Date: 2020-07-21 14:50:20
# @Runtime: 40 ms
# @Memory: 13.3 MB
class Solution:
def replaceSpace(self, s: str) -> str:
# return s.replace(" ",'%20')
# 这道题也可以用双指针法来解(感觉这才是题目用意)
# p1 等于末尾,p2等于用多少个空格就加多少个占位符的末尾
# 然后向前遍历
res = []
for c in s:
if c == ' ':
res.append("%20")
else:
res.append(c)
return "".join(res)
|
# import urllib
import hashlib
from django.conf import settings
from django.contrib.auth import get_user_model
from rest_framework import serializers
from rest_auth.serializers import UserDetailsSerializer
User = get_user_model()
class UserSerializer(UserDetailsSerializer):
"""
The user serializer is child of :model: `rest_auth.UserDetailsSerializer`
which is an extenstion. To generate gravatar url based on the given
email.
"""
GENDER_CHOICES = (
("M", "Male"),
("F", "Female"),
("O", "Other"),
("N", "Not Willing to say"),
)
profile_url = serializers.SerializerMethodField()
gender = serializers.ChoiceField(GENDER_CHOICES)
class Meta(UserDetailsSerializer.Meta):
fields = (
"username",
"email",
"first_name",
"last_name",
"profile_url",
"gender",
)
read_only_fields = UserDetailsSerializer.Meta.read_only_fields + (
"username",
"email",
)
# exclude = ('pk', )
def get_profile_url(self, object):
const_url = "https://www.gravatar.com/avatar/"
url = object.email.lower().encode()
url = hashlib.md5(url).hexdigest()
return "{}{}?d=identicon".format(const_url, url)
|
import argparse
import matplotlib.pyplot as plt
import numpy as np
import pickle as p
import os
from Games import utils_draws as ud
from Games import UnimodalGame as ug
from Environments import Rank1Env as r1e
from Policies import OSUB
from Policies import UTS
if __name__ == "__main__":
## Parameters
draws_dir = "./results/draws"
horizon = 10000
nb_row = 3
nb_col = 3
results_dir = f"./results/pair_row-{nb_row}_col-{nb_col}_horizon-{horizon}"
if not os.path.exists(results_dir):
os.mkdir(results_dir)
## Load draws in advance
pair_draw_pickle = f'pair_row-{nb_row}_col-{nb_col}_horizon-{horizon}.p'
with open(os.path.join(draws_dir, pair_draw_pickle), 'rb') as f:
draws_dict = p.load(f)
draws_in_advance = draws_dict["draws_in_advance"]
list_draw_leader_every = [3,5]
regrets_OSUB = []
regrets_UTS = []
for draw_leader_every in list_draw_leader_every:
output_dir = os.path.join(results_dir, f'draw_leader_every_{draw_leader_every}')
if not os.path.exists(output_dir):
os.mkdir(output_dir)
## OSUB
OSUB_rank1_env = r1e.create_rank1env(draws_dict)
OSUB_policy = OSUB.OSUB(draw_leader_every=draw_leader_every)
osub_game = ug.UnimodalGame(environment=OSUB_rank1_env,
policy=OSUB_policy,
horizon=horizon)
osub_game.playGame()
osub_game.plot_and_save(output_dir=output_dir,
show_regret=True,
show_arm=True,
show_mu_hat=True,
show_leader=True,
save_game=True)
regrets_OSUB.append(osub_game.regret_history)
## UTS
UTS_rank1_env = r1e.create_rank1env(draws_dict)
UTS_policy = UTS.UTS(draw_leader_every=draw_leader_every)
UTS_game = ug.UnimodalGame(environment=UTS_rank1_env,
policy=UTS_policy,
horizon=horizon)
UTS_game.playGame()
UTS_game.plot_and_save(output_dir=output_dir,
show_regret=True,
show_arm=True,
show_mu_hat=True,
show_leader=True,
save_game=True)
regrets_UTS.append(UTS_game.regret_history)
plt.figure(figsize=(10,10))
|
import re
a = input()
x = [m.start() for m in re.finditer('AB', a)]
y = [m.start() for m in re.finditer('BA', a)]
res = 0
i = 0
j = len(y) - 1
k = len(x) - 1
l = 0
while i < len(x) and j > -1 and k > -1 and l < len(y):
if abs(x[i] - y[j]) > 1 or abs(x[k] - y[l]) > 1:
res = 1
i += 1
j -= 1
k -= 1
l += 1
if res:
print('YES')
else:
print('NO') |
import pywapi
import string
result = pywapi.get_weather_from_yahoo('JAXX0030')
print '---'
print result['title']
print 'city: ' + result['location']['city']
print 'date_time: ' + result['condition']['date']
print 'condition: ' + result['condition']['text']
print 'temp: ' + result['condition']['temp'] + '('+ result['units']['temperature'] + ')'
print 'geo(lat,long): ' + result['geo']['lat'] + ',' + result['geo']['long']
print '---'
|
from datetime import datetime
now = datetime.now()
print("=============================")
print("Now :", now)
print("hello world!")
print("Welcome to python cron job")
print("=============================")
|
"""
#------------------------------------------------------------------------------
# generate_pulse_fit.py
#
# This script iterates through a normalized acceleration period and
# performs a curve fit to the calculated response versus the response
# of a pure impulse. The specific application is to a boom crane.
#
#
# Created: 6/20/17 - Daniel Newman -- dmn3669@louisiana.edu
#
# Modified:
# * 6/20/17 - DMN -- dmn3669@louisiana.edu
# - Added documentation for this script
#------------------------------------------------------------------------------
"""
# Ignore user warnings to keep the terminal clean
import warnings
warnings.simplefilter("ignore", UserWarning)
# Import the necessary python library modules
import numpy as np
from control import matlab
from scipy.optimize import curve_fit
from sklearn.metrics import r2_score
import os
import sys
import pdb
# Add my local path to the relevant modules list
sys.path.append('/Users/Daniel/Github/Crawlab-Student-Code/Daniel Newman/Python Modules')
# Import my python modules
import InputShaping as shaping
import Generate_Plots as genplt
import Boom_Crane as bc
folder = 'Figures/{}/'.format(
sys.argv[0],
)
# Constants
DEG_TO_RAD = np.pi / 180
G = 9.81
############################################################
#
# Initialize the boom crane
#
############################################################
scaling = 10
Boom = 0.89 * scaling
Cable = 0.35 * scaling
Vmax=5.0
Amax = 50.0
Luff_vals = np.array([30.,60.])
Tmax=15.0
Tstep=0.01
normalized_amp=0.0
normphase= 90
Startt=np.array([0.00])
p = bc.init_crane( Boom,
Cable,
Amax,
Vmax,
Luff_vals,
Tmax,
Tstep,
normalized_amp,
normphase,
Startt=Startt
)
[Amax,Vmax], l, r, StartTime, t_step,t,X0,Distance = p
############################################################
#
# Compute the approximate impulse response of the system
#
############################################################
# Approximate natural frequency
omega = np.sqrt((G - r * Vmax**2 * np.sin(X0[2]))/ l)
# Acceleration characteristics of the impulse response.
tau = 2 * np.pi / omega
# Normalized acceleration and damping ratio values over which we will iterate
Tacc_array=np.arange(0.01,1.0,0.01)
gamma0_array = np.deg2rad(np.arange(0,90,1))
# Assume that the crane begins at rest
X0_impulse = np.array(X0)
X0_impulse[0] = 0.
X0_impulse[1] = 0.
zeta = 0.0
# Ensure that the folder we want to save to exists
if not os.path.exists('Data'):
os.makedirs('Data')
# We will write our values to a plain text file so that we can manipulate them later
data = open('Data/bc_imp_amp_low.txt','w')
data.write('Amplitude, Phi, Offset, Normalized Tacc, Gamma0, Fit Correlation \n')
# Arbitrarily chosen frequency
modeled_freq = 1.
for i in range(0,len(Tacc_array)):
for j in range(0,len(gamma0_array)):
X0_impulse[2] = gamma0_array[j]
tau = 2 * np.pi / omega
t_acc = tau * Tacc_array[i]
norm_tacc = t_acc / tau
#Tconst=Dist/Vmax
Amax = Vmax / t_acc
acc=1/t_acc
# Compute the pulse response
p_0 = [[Amax,Vmax], l, r, StartTime, t_step,t,X0_impulse,np.array([100.])]
impulse_response = bc.response(p_0,['Unshaped'])
# These shift values are for a simple undamped linear system
phase_shift = np.pi * norm_tacc
amplitude_shift = (0.64781685 * norm_tacc**3 -1.30590826 * norm_tacc**2 -0.34197296*norm_tacc + 1) \
/ (0.35026643 * norm_tacc**2 -0.34197296*norm_tacc + 1)
# The fit of a boom crane response to a simple linear system
def fit_response(t,phi,amplitude,offset):
return (np.sin(omega * t - phi - phase_shift) * amplitude_shift * amplitude * Vmax * r * np.sin(X0[2]) * omega + offset)
# Guess values for the fit
guess_phi = 0.
amp_guess = np.amax(impulse_response[:,0])
guess_offset = 0.
p0 = [guess_phi,amp_guess,guess_offset]
# Bounds for the curve fit
bounds = np.array([tuple((-2*np.pi,2*np.pi)),tuple((0,np.inf)),tuple((0,np.inf))])
bounds = bounds.T
# Let's only consider the response after it has accelerated to maximum velocity
tacc_step = np.round(t_acc / t_step).astype(int)
tau_step = int(np.round(tau/t_step))
# now do the fit
fit = curve_fit(
fit_response,
t[tacc_step:2*tau_step],
l * np.sin(impulse_response[tacc_step:2*tau_step,0]),
p0=p0,method='trf',
bounds=bounds
)
fit = fit[0]
#print(fit)
# Phase shift values should all be positive
if fit[0] < 0:
fit[0] += 2 * np.pi
# The fit values are given in terms of shifts of the boom crane response
ic_phaseshift = fit[0]
ic_ampshift = fit[1]
ic_offset = fit[2]
tacc_step = np.round(t_acc / Tstep).astype(int)
response_fit = fit_response(t[tacc_step:tau_step],*fit)
corr_coeff = r2_score(l * np.sin(impulse_response[tacc_step:tau_step,0]), response_fit)
print(corr_coeff)
data.write('{}, {}, {}, {}, {}, {} \n'.format(
np.round(float(ic_ampshift),4),
np.round(float(ic_phaseshift),4),
np.round(float(ic_offset),4),
np.round(Tacc_array[i],4),
np.round(gamma0_array[j],4),
np.round(corr_coeff,3)))
data.close() |
# Generated by Django 2.2.11 on 2020-06-02 10:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('detskoePostelnoe', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Size',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, default=None, max_length=120, null=True, unique=True)),
('slug', models.SlugField(blank=True, default=None, null=True, verbose_name='Транслит')),
],
options={
'verbose_name': 'Размер',
'verbose_name_plural': 'Размер',
},
),
migrations.AddField(
model_name='detskapostel',
name='size',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='detskoePostelnoe.Size', to_field='name', verbose_name='Размер'),
),
]
|
#!/usr/bin/python3
# -*- coding=utf-8 -*-
import email
import imaplib
import os
import re
import subprocess
import sys
from datetime import datetime
from script.process_html_table import ProcessHtmlTable
# 测试模式
debug = False
# 定义目录
local_path = sys.path[0]
config_path = local_path + '/config.json'
# 检查配置文件
if os.path.exists(config_path):
r = open(config_path, 'r')
r_read = r.readlines()
r.close()
config_json = ''
for i in r_read:
config_json = config_json + i
config_json = eval(config_json)
else:
print("config file is not exists!")
sys.exit(0)
if debug:
wechat_mod = 'app_test'
else:
wechat_mod = 'app_main'
# 邮件配置
imap_host = config_json['email']['host']
imap_port = config_json['email']['port']
imap_username = config_json['email']['username']
imap_passwd = config_json['email']['passwd']
# 微信配置
corpid = config_json['wechat'][wechat_mod]['corpid']
corpsecret = config_json['wechat'][wechat_mod]['corpsecret']
app_id = config_json['wechat'][wechat_mod]['app_id']
group_id = config_json['wechat'][wechat_mod]['group_id']
# 脚本的相对路径
script_path = '/usr/local/shell/wechat_msg/main.py'
# script_path = '/Users/terence/Documents/git_home/wechat_msg/main.py'
comm = imaplib.IMAP4_SSL(imap_host, imap_port)
comm.login(imap_username, imap_passwd)
# print(comm.list())
mail_folder = ['jiankongyi', 'dce']
# 获取邮件uid
def get_uid_list(folder_name):
comm.select(folder_name)
response, uid_list = comm.uid('search', None, 'ALL')
uid_list = uid_list[0].decode().split(' ')
return uid_list
# 获取邮件
def get_mail_data(folder_name, mail_uid):
comm.select(folder_name)
response, mail_data = comm.uid('fetch', mail_uid, '(RFC822)')
return mail_data
# 获取日期
today_date = datetime.today().date()
today_date = str(today_date).replace('-', '_')
# 定义目录
local_path = sys.path[0]
data_store_dir = local_path + '/datastore/' + today_date + '/'
for i in mail_folder:
mail_content_dir = data_store_dir + i
if os.path.exists(mail_content_dir):
if not os.path.exists(mail_content_dir):
os.mkdir(mail_content_dir)
else:
os.makedirs(mail_content_dir)
# 保存邮件
def write_content_to_file(folder, mail_id, file_content):
file_path = data_store_dir + folder + '/' + str(mail_id) + '.eml'
file_content = file_content[0][1].decode()
file_content = email.message_from_string(file_content)
j_file = open(file_path, 'w')
j_file.write(str(file_content))
j_file.close()
# 写入日志
def write_log_to_file(folder, mail_uid):
file_path = local_path + '/' + '.temp.log'
if os.path.exists(file_path):
if os.path.getsize(file_path):
f = open(file_path, 'r')
f_dict = f.readline()
f_dict = eval(f_dict)
f_dict[folder] = mail_uid
f = open(file_path, 'w')
f.write(str(f_dict))
f.close()
else:
f_dict = dict()
f = open(file_path, 'w')
f_dict[folder] = mail_uid
f.write(str(f_dict))
f.close()
else:
f_dict = dict()
f = open(file_path, 'w')
f_dict[folder] = mail_uid
f.write(str(f_dict))
f.close()
def check_last_mail_uid(folder, mail_uid):
file_path = local_path + '/' + '.temp.log'
if os.path.exists(file_path):
if os.path.getsize(file_path):
log_dict = open(file_path, 'r')
log_dict = log_dict.readline()
log_dict = eval(str(log_dict))
if folder in log_dict:
j = log_dict[folder]
if int(mail_uid) > int(j):
return True, j
else:
return False, True
else:
return False, False
else:
return False, None
else:
return None, None
def get_charset(content):
re_pattern = re.compile(r'charset\=.*')
mail_charset = re_pattern.findall(content)[0]
mail_charset = mail_charset.split('=')[1]
mail_charset = mail_charset.lower()
return mail_charset
def check_table(email_content):
re_pattern = re.compile(r'<html>|<tr>|<td>')
re_count = len(re_pattern.findall(email_content))
if re_count >= 4:
return True
else:
return False
def process_mail_content(content):
j = content[0][1].decode()
j = email.message_from_string(j)
mail_charset = get_charset(str(j))
f = ''
for f in j.walk():
if f.get_content_type():
f = f.get_payload(decode=True)
f = f.decode(mail_charset)
if check_table(f):
f = ProcessHtmlTable.process_table(f)
return f
def sent_msg_to_wechat(wechat_msg):
# dict
script_parameter = dict()
script_parameter['corpid'] = corpid
script_parameter['corpsecret'] = corpsecret
script_parameter['msg'] = wechat_msg
script_parameter['app_id'] = app_id
script_parameter['group_id'] = group_id
# 定义shell命令
cmd = ['python3', script_path, str(script_parameter)]
# 定义调用shell命令的方法
send_msg = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
# 调用shell命令
# send_msg_callback, send_msg_err = send_msg.communicate()
send_msg.communicate()
for i in mail_folder:
mail_uid_list = get_uid_list(i)
mail_uid_new = mail_uid_list[-1]
check_mail = check_last_mail_uid(i, mail_uid_new)
check_mail_data_check = check_mail[0]
check_mail_value = check_mail[1]
if check_mail_data_check:
mail_uid_old = int(check_mail_value)
while mail_uid_old < int(mail_uid_new):
mail_uid_old += 1
mail_content = get_mail_data(i, str(mail_uid_old))
if mail_content[0] is None:
pass
else:
write_content_to_file(i, mail_uid_old, mail_content)
mail_content = process_mail_content(mail_content)
write_log_to_file(i, mail_uid_old)
sent_msg_to_wechat(mail_content)
elif check_mail_data_check is None:
for k in mail_uid_list:
mail_content = get_mail_data(i, k)
if mail_content[0] is None:
pass
else:
write_content_to_file(i, k, mail_content)
mail_content = process_mail_content(mail_content)
write_log_to_file(i, k)
sent_msg_to_wechat(mail_content)
else:
if check_mail_value:
pass
elif check_mail_value is None:
for k in mail_uid_list:
mail_content = get_mail_data(i, k)
if mail_content[0] is None:
pass
else:
write_content_to_file(i, k, mail_content)
mail_content = process_mail_content(mail_content)
write_log_to_file(i, k)
sent_msg_to_wechat(mail_content)
else:
file_path = local_path + '/' + '.temp.log'
for k in mail_uid_list:
mail_content = get_mail_data(i, k)
if mail_content[0] is None:
pass
else:
write_content_to_file(i, k, mail_content)
mail_content = process_mail_content(mail_content)
sent_msg_to_wechat(mail_content)
j = open(file_path, 'r')
log_dict = j.readline()
log_dict = eval(log_dict)
log_dict[i] = k
j = open(file_path, 'w')
j.write(str(log_dict))
j.close()
comm.logout()
|
from test_plus import TestCase
from zhihu.articles.models import Article
class ArticleModelsTest(TestCase):
def setUp(self) -> None:
self.user = self.make_user()
self.draft = Article.objects.create(
user=self.user,
title='第一篇文章',
content='测试',
image='articles_pictures/2019/07/06/1.png',
status='D',
tags=['a', 'b']
)
self.published = Article.objects.create(
user=self.user,
title='第二篇文章',
content='测试',
image='articles_pictures/2019/07/06/1.png',
status='P',
tags=['a', 'b']
)
def test_object_instance(self):
'''判断实例是否为article模型类'''
assert isinstance(self.draft, Article)
assert isinstance(self.published, Article)
assert Article.objects.get_publushed().count() == 1
assert Article.objects.get_publushed().first() == self.published
assert Article.objects.get_drafts().count() == 1
assert Article.objects.get_drafts().first() == self.draft
assert self.draft.slug == 'di-yi-pian-wen-zhang'
# def test_return_value(self):
# '''测试返回值'''
#
#
|
"""
LeetCode - Medium
"""
"""
Given a 2D board and a word, find if the word exists in the grid.
The word can be constructed from letters of sequentially adjacent cells, where "adjacent" cells are horizontally or vertically neighboring. The same letter cell may not be used more than once.
Example 1:
Input: board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], word = "ABCCED"
Output: true
Example 2:
Input: board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], word = "SEE"
Output: true
Example 3:
Input: board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], word = "ABCB"
Output: false
Constraints:
board and word consists only of lowercase and uppercase English letters.
1 <= board.length <= 200
1 <= board[i].length <= 200
1 <= word.length <= 10^3
"""
class Solution:
def exist(self, board, word: str) -> bool:
board = board[0]
for row in range(len(board)):
for col in range(len(board[row])):
if board[row][col] == word[0]:
if self.dfs(row, col, word, 0, board):
return True
return False
def dfs(self, row, col, word, index, board):
if 0 <= row < len(board) and 0 <= col < len(board[0]) and board[row][col] == word[index]:
if index + 1 == len(word):
return True
if board[row][col].isupper():
board[row][col] = board[row][col].lower()
else:
board[row][col] = board[row][col].upper()
# call up
up = self.dfs(row - 1, col, word, index + 1, board)
# call down
down = self.dfs(row + 1, col, word, index + 1, board)
# call left
left = self.dfs(row, col - 1, word, index + 1, board)
# call right
right = self.dfs(row, col + 1, word, index + 1, board)
if board[row][col].isupper():
board[row][col] = board[row][col].lower()
else:
board[row][col] = board[row][col].upper()
return up or down or left or right
return False
if __name__ == '__main__':
board = [["A", "B", "C", "E"],
["S", "F", "C", "S"],
["A", "D", "E", "E"]],
word = "ABCCED"
word = "SEE"
word = "ABCB"
print(Solution().exist(board, word))
|
print("Menghitung Volume Balok Dengan Python")
p = 5
l = 7
t = 4
volume = p * l * t
print ("Hasilnya adalah = ", volume) |
#Cadeias de caracteres strings
frase = str(' Curso em Video Python ')
print(len(frase)) #tamanho da frase
print(frase.count('o')) # quantas vezes aparece a letra
print(frase.count('o',0, 13)) #Quantas vezes aparece a letra dentro do range (sempre o ultimo não é considerado
print(frase.find('deo')) #quantas vezes ele encontrou a stg a ua posição
print(frase.find('android')) #irá retornar -1 dizendo que a palavra não existe
print('Curso' in frase)
#Transformação
print(frase.replace('Python','Android')) #substitui a primeira str a segunda str
print(frase.upper())#trasforma todas a letras em mauisculas
print(frase.lower())#transforma todas as letras em minusculas
print(frase.capitalize())#joga a primeira letra da frase em maiuscula
print(frase.title())#transforma em maiuscula todas a primeiras letras de cada palavra
print(frase.strip())#remove espaço a mais das extremidades
print(frase.rstrip())#remove somente os espaço da direita da frase (right)
print(frase.lstrip())#remove espaço somente da esquerda da frase (left)
#divisão
print(frase.split())# gera uma lista divide a frase em lista pelas palavras
frase = frase.split() # para atribuir o valor em uma variavel e assim alterar
print(frase[1]) #mostra a palavra ou str da lista
print(frase[0][1:3])#mostra a letra da palavra da lista
print(' '.join(frase))# juntas as palavras da lista novamente. ('-') entre as aspas é o que irá ficar entre as palavras
|
# Generated by Django 2.1.1 on 2018-10-05 08:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0006_auto_20180926_0034'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'ordering': ['title', 'timestamp']},
),
migrations.AddField(
model_name='post',
name='summary',
field=models.CharField(blank=True, max_length=240, null=True),
),
]
|
#%%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
import lightgbm
from workalendar.asia import SouthKorea # 한국의 공휴일, version : 1.1.1
from utils import WindowGenerator, save_results
#%%
class Config():
def __init__(
self,
input_width = 14,
label_width = 7,
shift = 7,
label_columns = ["Maximum_Power_This_Year"],
batch_size = 32,
features = ["meteo", "covid", "gas", "exchange"],#, "gas", "exchange"], #"exchange"], #"gas", ],
filters = 64,
kernel_size = 3,
activation = 'relu',
lstm_units = 100,
attn_units = 100,
learning_rate = 0.001,
epochs = 1000,
verbose = 0,
aux1 = False,
aux2 = False,
is_x_aux1 = False,
is_x_aux2 = False,
trial = "linaer_regresson"
):
self.input_width = input_width
self.label_width = label_width
self.shift = shift
self.label_columns = label_columns
self.batch_size = batch_size
self.features = features
self.filters = filters
self.kernel_size = kernel_size
self.activation = activation
self.lstm_units = lstm_units
self.attn_units = attn_units
self.learning_rate = learning_rate
self.epochs = epochs
self.verbose = verbose
self.aux1 = aux1
self.aux2 = aux2
self.is_x_aux1 = is_x_aux1
self.is_x_aux2 = is_x_aux2
self.trial = trial
config = Config()
data = WindowGenerator(
input_width = config.input_width,
label_width = config.label_width,
shift = config.shift,
label_columns = config.label_columns,
batch_size = config.batch_size,
features = config.features
)
#%%
X_train, y_train = data.train
X_test, y_test = data.test
X_train = X_train.reshape((-1,X_train.shape[1]*X_train.shape[2]))
y_train = np.squeeze(y_train, axis=-1)
X_test = X_test.reshape((-1,X_test.shape[1]*X_test.shape[2]))
y_test = np.squeeze(y_test, axis=-1)
#%%
lr = LinearRegression()
lr.fit(X_train, y_train)
lr.score(X_train, y_train)
#%%
y_pred = data.inverse_transform(lr.predict(X_test))
y_true = data.inverse_transform(y_test)
# %%
import math
def root_mean_squared_error(y_true, y_pred):
return math.sqrt(((y_true-y_pred)**2).mean())
root_mean_squared_error(y_true[:,-1], y_pred[:,-1]) / (24*7)
#%%
def mean_absolute_percentage_error(y_true, y_pred):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
mean_absolute_percentage_error(y_true[:,-1], y_pred[:,-1]) / (24*7)
# %%
plt.plot(y_pred[:,-1])
plt.plot(y_true[:,-1])
save_results(config, "./outputs/ablation/lr_result.csv")
# %%
import os
save_results(config, y_pred)
# %%
|
#!/usr/bin/env python
"""Example factory function for DES Y1 3x2pt likelihood."""
import os
from typing import Dict, Union, Tuple
import sacc
import pyccl as ccl
import pyccl.nl_pt
import firecrown.likelihood.gauss_family.statistic.source.weak_lensing as wl
import firecrown.likelihood.gauss_family.statistic.source.number_counts as nc
from firecrown.likelihood.gauss_family.statistic.two_point import TwoPoint
from firecrown.likelihood.gauss_family.gaussian import ConstGaussian
from firecrown.parameters import ParamsMap
from firecrown.modeling_tools import ModelingTools
from firecrown.likelihood.likelihood import Likelihood
saccfile = os.path.expanduser(
os.path.expandvars(
"${FIRECROWN_DIR}/examples/des_y1_3x2pt/des_y1_3x2pt_sacc_data.fits"
)
)
def build_likelihood(_) -> Tuple[Likelihood, ModelingTools]:
"""Likelihood factory function for DES Y1 3x2pt analysis."""
# Load sacc file
sacc_data = sacc.Sacc.load_fits(saccfile)
# Define sources
sources: Dict[str, Union[wl.WeakLensing, nc.NumberCounts]] = {}
# Define the intrinsic alignment systematic. This will be added to the
# lensing sources later
ia_systematic = wl.TattAlignmentSystematic()
# Define the photo-z shift systematic.
src_pzshift = wl.PhotoZShift(sacc_tracer="src0")
# Create the weak lensing source, specifying the name of the tracer in the
# sacc file and a list of systematics
sources["src0"] = wl.WeakLensing(
sacc_tracer="src0", systematics=[src_pzshift, ia_systematic]
)
lens_pzshift = nc.PhotoZShift(sacc_tracer="lens0")
magnification = nc.ConstantMagnificationBiasSystematic(sacc_tracer="lens0")
nl_bias = nc.PTNonLinearBiasSystematic(sacc_tracer="lens0")
sources["lens0"] = nc.NumberCounts(
sacc_tracer="lens0",
has_rsd=True,
systematics=[lens_pzshift, magnification, nl_bias],
)
# Define the statistics we like to include in the likelihood
# The only place the dict 'stats' gets used, other than setting values in
# it, is to call 'values' on it. Thus we don't need a dict, we need a list
# of the values. The keys assigned to the dict are never used.
stats = {}
for stat, sacc_stat in [
("xip", "galaxy_shear_xi_plus"),
("xim", "galaxy_shear_xi_minus"),
]:
# Define two-point statistics, given two sources (from above) and
# the type of statistic.
stats[f"{stat}_src0_src0"] = TwoPoint(
source0=sources["src0"],
source1=sources["src0"],
sacc_data_type=sacc_stat,
)
stats["gammat_lens0_src0"] = TwoPoint(
source0=sources["lens0"],
source1=sources["src0"],
sacc_data_type="galaxy_shearDensity_xi_t",
)
stats["wtheta_lens0_lens0"] = TwoPoint(
source0=sources["lens0"],
source1=sources["lens0"],
sacc_data_type="galaxy_density_xi",
)
# Create the likelihood from the statistics
pt_calculator = pyccl.nl_pt.EulerianPTCalculator(
with_NC=True,
with_IA=True,
log10k_min=-4,
log10k_max=2,
nk_per_decade=20,
)
modeling_tools = ModelingTools(pt_calculator=pt_calculator)
likelihood = ConstGaussian(statistics=list(stats.values()))
# Read the two-point data from the sacc file
likelihood.read(sacc_data)
# an object called "likelihood" must be defined
print(
"Using parameters:", list(likelihood.required_parameters().get_params_names())
)
# To allow this likelihood to be used in cobaya or cosmosis,
# return the likelihood object
return likelihood, modeling_tools
# We can also run the likelihood directly
def run_likelihood() -> None:
"""Produce some plots using the likelihood function built by
:python:`build_likelihood`.
"""
# We do imports here to save a bit of time when importing this module but
# not using the run_likelihood function.
# pylint: disable=import-outside-toplevel
import numpy as np
import matplotlib.pyplot as plt
# pylint: enable=import-outside-toplevel
likelihood, tools = build_likelihood(None)
# Load sacc file
sacc_data = sacc.Sacc.load_fits(saccfile)
src0_tracer = sacc_data.get_tracer("src0")
lens0_tracer = sacc_data.get_tracer("lens0")
z, nz = src0_tracer.z, src0_tracer.nz
lens_z, lens_nz = lens0_tracer.z, lens0_tracer.nz
# Define a ccl.Cosmology object using default parameters
ccl_cosmo = ccl.CosmologyVanillaLCDM()
ccl_cosmo.compute_nonlin_power()
# Bare CCL setup
a_1 = 1.0
a_2 = 0.5
a_d = 0.5
b_1 = 2.0
b_2 = 1.0
b_s = 1.0
mag_bias = 1.0
c_1, c_d, c_2 = pyccl.nl_pt.translate_IA_norm(
ccl_cosmo, z=z, a1=a_1, a1delta=a_d, a2=a_2, Om_m2_for_c2=False
)
# Code that creates a Pk2D object:
ptc = pyccl.nl_pt.EulerianPTCalculator(
with_NC=True,
with_IA=True,
log10k_min=-4,
log10k_max=2,
nk_per_decade=20,
cosmo=ccl_cosmo,
)
ptt_i = pyccl.nl_pt.PTIntrinsicAlignmentTracer(
c1=(z, c_1), c2=(z, c_2), cdelta=(z, c_d)
)
ptt_m = pyccl.nl_pt.PTMatterTracer()
ptt_g = pyccl.nl_pt.PTNumberCountsTracer(b1=b_1, b2=b_2, bs=b_s)
# IA
pk_im = ptc.get_biased_pk2d(ccl_cosmo, ptt_i, tracer2=ptt_m)
pk_ii = ptc.get_biased_pk2d(ccl_cosmo, tracer1=ptt_i, tracer2=ptt_i)
pk_gi = ptc.get_biased_pk2d(ccl_cosmo, tracer1=ptt_g, tracer2=ptt_i)
# Galaxies
pk_gm = ptc.get_biased_pk2d(ccl_cosmo, tracer1=ptt_g, tracer2=ptt_m)
pk_gg = ptc.get_biased_pk2d(ccl_cosmo, tracer1=ptt_g, tracer2=ptt_g)
# Magnification: just a matter-matter P(k)
pk_mm = ptc.get_biased_pk2d(ccl_cosmo, tracer1=ptt_m, tracer2=ptt_m)
# Set the parameters for our systematics
systematics_params = ParamsMap(
{
"ia_a_1": a_1,
"ia_a_2": a_2,
"ia_a_d": a_d,
"lens0_bias": b_1,
"lens0_b_2": b_2,
"lens0_b_s": b_s,
"lens0_mag_bias": mag_bias,
"src0_delta_z": 0.000,
"lens0_delta_z": 0.000,
}
)
# Apply the systematics parameters
likelihood.update(systematics_params)
# Prepare the cosmology object
tools.prepare(ccl_cosmo)
# Compute the log-likelihood, using the ccl.Cosmology object as the input
log_like = likelihood.compute_loglike(tools)
print(f"Log-like = {log_like:.1f}")
# Plot the predicted and measured statistic
# x = likelihood.statistics[0].ell_or_theta_
# y_data = likelihood.statistics[0].measured_statistic_
assert isinstance(likelihood, ConstGaussian)
assert likelihood.cov is not None
# y_err = np.sqrt(np.diag(likelihood.cov))[: len(x)]
# y_theory = likelihood.statistics[0].predicted_statistic_
print(list(likelihood.statistics[0].cells.keys()))
ells = likelihood.statistics[0].ells
cells_GG = likelihood.statistics[0].cells[("shear", "shear")]
cells_GI = likelihood.statistics[0].cells[("shear", "intrinsic_pt")]
cells_II = likelihood.statistics[0].cells[("intrinsic_pt", "intrinsic_pt")]
cells_cs_total = likelihood.statistics[0].cells["total"]
print(list(likelihood.statistics[2].cells.keys()))
cells_gG = likelihood.statistics[2].cells[("galaxies", "shear")]
cells_gI = likelihood.statistics[2].cells[("galaxies", "intrinsic_pt")]
cells_mI = likelihood.statistics[2].cells[("magnification+rsd", "intrinsic_pt")]
print(list(likelihood.statistics[3].cells.keys()))
cells_gg = likelihood.statistics[3].cells[("galaxies", "galaxies")]
cells_gm = likelihood.statistics[3].cells[("galaxies", "magnification+rsd")]
cells_gg_total = likelihood.statistics[3].cells["total"]
# Code that computes effect from IA using that Pk2D object
t_lens = ccl.WeakLensingTracer(ccl_cosmo, dndz=(z, nz))
t_ia = ccl.WeakLensingTracer(
ccl_cosmo,
dndz=(z, nz),
has_shear=False,
ia_bias=(z, np.ones_like(z)),
use_A_ia=False,
)
t_g = ccl.NumberCountsTracer(
ccl_cosmo,
has_rsd=False,
dndz=(lens_z, lens_nz),
bias=(lens_z, np.ones_like(lens_z)),
)
t_m = ccl.NumberCountsTracer(
ccl_cosmo,
has_rsd=True,
dndz=(lens_z, lens_nz),
bias=None,
mag_bias=(lens_z, mag_bias * np.ones_like(lens_z)),
)
cl_GI = ccl.angular_cl(ccl_cosmo, t_lens, t_ia, ells, p_of_k_a=pk_im)
cl_II = ccl.angular_cl(ccl_cosmo, t_ia, t_ia, ells, p_of_k_a=pk_ii)
# The weak gravitational lensing power spectrum
cl_GG = ccl.angular_cl(ccl_cosmo, t_lens, t_lens, ells)
# Galaxies
cl_gG = ccl.angular_cl(ccl_cosmo, t_g, t_lens, ells, p_of_k_a=pk_gm)
cl_gI = ccl.angular_cl(ccl_cosmo, t_g, t_ia, ells, p_of_k_a=pk_gi)
cl_gg = ccl.angular_cl(ccl_cosmo, t_g, t_g, ells, p_of_k_a=pk_gg)
# Magnification
cl_mI = ccl.angular_cl(ccl_cosmo, t_m, t_ia, ells, p_of_k_a=pk_im)
cl_gm = ccl.angular_cl(ccl_cosmo, t_g, t_m, ells, p_of_k_a=pk_gm)
cl_mm = ccl.angular_cl(ccl_cosmo, t_m, t_m, ells, p_of_k_a=pk_mm)
# The observed angular power spectrum is the sum of the two.
cl_cs_theory = cl_GG + 2 * cl_GI + cl_II
cl_gg_theory = cl_gg + 2 * cl_gm + cl_mm
fig, ax = plt.subplots(2, 1, sharex=True, figsize=(6, 6))
fig.subplots_adjust(hspace=0)
# ax[0].plot(x, y_theory, label="Total")
ax[0].plot(ells, cells_GG, label="GG firecrown")
ax[0].plot(ells, cl_GG, ls="--", label="GG CCL")
ax[0].plot(ells, -cells_GI, label="-GI firecrown")
ax[0].plot(ells, -cl_GI, ls="--", label="-GI CCL")
ax[0].plot(ells, cells_II, label="II firecrown")
ax[0].plot(ells, cl_II, ls="--", label="II CCL")
ax[0].plot(ells, -cells_gI, label="-Ig firecrown")
ax[0].plot(ells, -cl_gI, ls="--", label="-Ig CCL")
ax[0].plot(ells, cells_cs_total, label="total CS firecrown")
ax[0].plot(ells, cl_cs_theory, ls="--", label="total CS CCL")
ax[1].plot(ells, cells_gG, label="Gg firecrown")
ax[1].plot(ells, cl_gG, ls="--", label="Gg CCL")
ax[1].plot(ells, cells_gg, label="gg firecrown")
ax[1].plot(ells, cl_gg, ls="--", label="gg CCL")
ax[1].plot(ells, -cells_mI, label="-mI firecrown")
ax[1].plot(ells, -cl_mI, ls="--", label="-mI CCL")
ax[1].plot(ells, cells_gm, label="gm firecrown")
ax[1].plot(ells, cl_gm, ls="--", label="gm CCL")
ax[1].plot(ells, cells_gg_total, label="total gg firecrown")
ax[1].plot(ells, cl_gg_theory, ls="--", label="total gg CCL")
# ax[0].errorbar(x, y_data, y_err, ls="none", marker="o")
ax[0].set_xscale("log")
ax[1].set_xlabel(r"$\ell$")
ax[1].set_ylabel(r"$C_\ell$")
for a in ax:
a.set_yscale("log")
a.set_ylabel(r"$C_\ell$")
a.legend(fontsize="small")
fig.suptitle("PT Cls, including IA, galaxy bias, magnification")
fig.savefig("pt_cls.png", facecolor="white", dpi=300)
plt.show()
if __name__ == "__main__":
run_likelihood()
|
import cpu
mappers = {0:'NROM',1:'MMC1',2:'UNROM',3:'CNROM',4:'MMC3',5:'MMC5'}
def romCheck(path):
with open(path,encoding='ascii',errors='replace') as ROM:
ROM = ROM.read()
if ROM[:4] != 'NES\x1a':
print('The cartridge is not of the iNES format.')
else:
print('The cartridge is of the correct iNES format.')
print(repr(ROM[:16]))
prgROM_length = ord(ROM[4])
print('The PRG ROM size is',prgROM_length,'16-KB blocks.')
print('The CHR ROM size is',ord(ROM[5]),'8-KB blocks.')
f6 = ord(ROM[6])
print('The flag 6 is',bin(f6)[2:])
mirroring = 'vertical' if f6 % 2 else 'horizontal'
prgRAM = f6 % 4 > 1
trainer = f6 % 8 > 3
fourScreen = f6 % 16 > 7
print(mirroring,prgRAM,trainer,fourScreen)
print('The flag 7 is',bin(ord(ROM[7]))[2:])
print('The PRG RAM size is',ord(ROM[8]) if ord(ROM[8])>0 else 1,'8-KB blocks.')
mapper = f6>>4|ord(ROM[7])>>4<<4
print('The mapper number is',mapper)
print('This mapper is known as',mappers.get(mapper,'(not implemented)'))
# TODO: Implement proper mapper support.
#for x in ROM[int('fffc',16)-32768:int('fffe',16)-32768]: print(hex(ord(x)))
#print(repr(ROM[int('fffc',16)-32768:int('fffe',16)-32768]))
return prgROM_length
def loadROM(path):
with open(path,encoding='ascii',errors='replace') as ROM:
ROM = ROM.read()
for x in range(len(cpu.prgROM)):
cpu.prgROM[x] = ROM[x+16]
#>>> bytearray.fromhex('4e45531a')
# bytearray(b'NES\x1a')
|
import numpy as np
import os, sys, time
from chainer import cuda
from chainer import functions as F
sys.path.append(os.path.split(os.getcwd())[0])
from progress import Progress
from mnist_tools import load_train_images, load_test_images
from model import params_energy_model, params_generative_model, ddgm
from args import args
from dataset import binarize_data
from plot import plot
class Object(object):
pass
def to_object(dict):
obj = Object()
for key, value in dict.iteritems():
setattr(obj, key, value)
return obj
def sample_from_data(images, batchsize):
example = images[0]
ndim_x = example.size
x_batch = np.zeros((batchsize, ndim_x), dtype=np.float32)
indices = np.random.choice(np.arange(len(images), dtype=np.int32), size=batchsize, replace=False)
for j in range(batchsize):
data_index = indices[j]
img = images[data_index].astype(np.float32) / 255.0
x_batch[j] = img.reshape((ndim_x,))
x_batch = binarize_data(x_batch)
return x_batch
def main():
# load MNIST images
images, labels = load_train_images()
# config
config_energy_model = to_object(params_energy_model["config"])
config_generative_model = to_object(params_generative_model["config"])
# settings
max_epoch = 1000
n_trains_per_epoch = 1000
batchsize_positive = 128
batchsize_negative = 128
plot_interval = 30
# seed
np.random.seed(args.seed)
if args.gpu_enabled:
cuda.cupy.random.seed(args.seed)
# init weightnorm layers
if config_energy_model.use_weightnorm:
print "initializing weight normalization layers of the energy model ..."
x_positive = sample_from_data(images, len(images) // 10)
ddgm.compute_energy(x_positive)
if config_generative_model.use_weightnorm:
print "initializing weight normalization layers of the generative model ..."
x_negative = ddgm.generate_x(len(images) // 10)
# training
progress = Progress()
for epoch in xrange(1, max_epoch):
progress.start_epoch(epoch, max_epoch)
sum_energy_positive = 0
sum_energy_negative = 0
sum_loss = 0
sum_kld = 0
for t in xrange(n_trains_per_epoch):
# sample from data distribution
x_positive = sample_from_data(images, batchsize_positive)
x_negative = ddgm.generate_x(batchsize_negative)
# train energy model
energy_positive = ddgm.compute_energy_sum(x_positive)
energy_negative = ddgm.compute_energy_sum(x_negative)
loss = energy_positive - energy_negative
ddgm.backprop_energy_model(loss)
# train generative model
# TODO: KLD must be greater than or equal to 0
x_negative = ddgm.generate_x(batchsize_negative)
kld = ddgm.compute_kld_between_generator_and_energy_model(x_negative)
ddgm.backprop_generative_model(kld)
sum_energy_positive += float(energy_positive.data)
sum_energy_negative += float(energy_negative.data)
sum_loss += float(loss.data)
sum_kld += float(kld.data)
if t % 10 == 0:
progress.show(t, n_trains_per_epoch, {})
progress.show(n_trains_per_epoch, n_trains_per_epoch, {
"x+": sum_energy_positive / n_trains_per_epoch,
"x-": sum_energy_negative / n_trains_per_epoch,
"loss": sum_loss / n_trains_per_epoch,
"kld": sum_kld / n_trains_per_epoch
})
ddgm.save(args.model_dir)
if epoch % plot_interval == 0 or epoch == 1:
plot(filename="epoch_{}_time_{}min".format(epoch, progress.get_total_time()))
if __name__ == "__main__":
main()
|
import sys
import socket
from Crypto import Random
from Crypto.Cipher import AES
import base64
import serial
import threading
import time
import queue
import pandas as pd
import numpy as np
from sklearn.externals import joblib
import pickle
from scipy import stats
mlp_model = joblib.load('ML_Models/mlp_10move_updated.pkl')
#mlp_model = pickle.load(open('knn_10move', 'rb'))
windowSize = 54
#rf_model = joblib.load('clf.pkl')
#neigh_model = joblib.load('neigh.pkl')
X_columns = [
"accLH_x", "accLH_y", "accLH_z", "gyrLH_x", "gyrLH_y", "gyrLH_z",
"accRH_x", "accRH_y", "accRH_z", "gyrRH_x", "gyrRH_y", "gyrRH_z",
"accRL_x", "accRL_y", "accRL_z"
]
feature_columns = [
"accLH_x_mean", "accLH_y_mean", "accLH_z_mean", "gyrLH_x_mean", "gyrLH_y_mean", "gyrLH_z_mean",
"accRH_x_mean", "accRH_y_mean", "accRH_z_mean", "gyrRH_x_mean", "gyrRH_y_mean", "gyrRH_z_mean",
"accRL_x_mean", "accRL_y_mean", "accRL_z_mean"
# ,"accLH_x_std", "accLH_y_std", "accLH_z_std",
# "gyrLH_x_std", "gyrLH_y_std", "gyrLH_z_std", "accRH_x_std", "accRH_y_std", "accRH_z_std",
# "gyrRH_x_std", "gyrRH_y_std", "gyrRH_z_std",
# "accRL_x_std", "accRL_y_std", "accRL_z_std"
# ,"accLH_x_min", "accLH_y_min", "accLH_z_min", "gyrLH_x_min", "gyrLH_y_min", "gyrLH_z_min",
# "accRH_x_min", "accRH_y_min", "accRH_z_min", "gyrRH_x_min", "gyrRH_y_min", "gyrRH_z_min",
# "accRL_x_min", "accRL_y_min", "accRL_z_min",
# "accLH_x_max", "accLH_y_max", "accLH_z_max", "gyrLH_x_max", "gyrLH_y_max", "gyrLH_z_max",
# "accRH_x_max", "accRH_y_max", "accRH_z_max", "gyrRH_x_max", "gyrRH_y_max", "gyrRH_z_max",
# "accRL_x_max", "accRL_y_max", "accRL_z_max",
# "accLH_x_var", "accLH_y_var", "accLH_z_var", "gyrLH_x_var", "gyrLH_y_var", "gyrLH_z_var",
# "accRH_x_var", "accRH_y_var", "accRH_z_var", "gyrRH_x_var", "gyrRH_y_var", "gyrRH_z_var",
# "accRL_x_var", "accRL_y_var", "accRL_z_var"
# "accLH_x_median", "accLH_y_median", "accLH_z_median", "gyrLH_x_median", "gyrLH_y_median", "gyrLH_z_median",
# "accRH_x_median", "accRH_y_median", "accRH_z_median", "gyrRH_x_median", "gyrRH_y_median", "gyrRH_z_median",
# "accRL_x_median", "accRL_y_median", "accRL_z_median"
#Test acc only
# "accLH_x_mean", "accLH_y_mean", "accLH_z_mean",
# "accRH_x_mean", "accRH_y_mean", "accRH_z_mean",
# "accRL_x_mean", "accRL_y_mean", "accRL_z_mean"
]
data_packet_list = []
#data_df = pd.DataFrame(columns=X_columns)
class Client(threading.Thread):
def __init__(self, serverName, serverPort, dataQueue, volCurPowQueue):
threading.Thread.__init__(self)
self.shutdown = threading.Event()
self.clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.clientSocket.connect((serverName, serverPort))
self.predictionReady = False
self.predictedAction = ''
self.dataQueue = dataQueue
self.volCurPowQueue = volCurPowQueue
self.moveCounter = 0
def run(self):
prediction = []
while not self.shutdown.is_set():
#to remove; timing check
if(self.dataQueue.empty()):
queue_start_time = time.time()
if(self.dataQueue.full()):
print("---Queue took %s seconds to fill ---" % (time.time() - queue_start_time))
print("------------------")
prediction.append(self.predictAction())
self.setPredictionReady(prediction)
if(self.predictionReady):
prediction.clear()
if(self.predictedAction != 'neutral'):
if(self.predictedAction == 'endmove'):
if(self.moveCounter > 38):
self.predictedAction = 'logout'
else:
continue
else:
self.moveCounter += 1
volCurPow = self.volCurPowQueue.get()
data = [self.predictedAction, volCurPow[0], volCurPow[1], volCurPow[2], volCurPow[3]]
self.sendData(data)
print("Dance Move is: " + str(self.predictedAction) + "\n")
def stop(self):
self.clientSocket.close()
self.shutdown.set()
def sendData(self, data):
iv = Random.get_random_bytes(16)
message = "#"
for i in range(4):
message += str(data[i]) + "|"
message += str(data[4])
secret_key = bytes("1234567890abcdef", 'utf-8')
aesCipher = AES.new(secret_key,AES.MODE_CBC,iv)
padding = AES.block_size - (len(message) % AES.block_size)
if padding == AES.block_size:
padding = 0
for i in range(padding):
message += " "
encryptedMsg = iv + aesCipher.encrypt(message)
b64CipherMsg = base64.b64encode(encryptedMsg)
try:
self.clientSocket.sendall(b64CipherMsg)
except:
print("Request timed out")
if(self.predictedAction == 'logout'):
self.stop()
def predictAction(self):
predict_start_time = time.time()
data_packet_list.clear()
for i in range(100):
data_points = self.dataQueue.get()
data_packet_list.append(data_points)
#print(data_points)
data_df = pd.DataFrame(data_packet_list, columns = X_columns)
#print("Shape of data df: " + str(data_df.shape))
feature_df = pd.DataFrame(columns = feature_columns)
#print("Shape of feature df: " + str(feature_df.shape))
feature_df['accLH_x_mean'] = data_df['accLH_x'].rolling(window=windowSize).mean()
feature_df['accLH_y_mean'] = data_df['accLH_y'].rolling(window=windowSize).mean()
feature_df['accLH_z_mean'] = data_df['accLH_z'].rolling(window=windowSize).mean()
feature_df['gyrLH_x_mean'] = data_df['gyrLH_x'].rolling(window=windowSize).mean()
feature_df['gyrLH_y_mean'] = data_df['gyrLH_y'].rolling(window=windowSize).mean()
feature_df['gyrLH_z_mean'] = data_df['gyrLH_z'].rolling(window=windowSize).mean()
feature_df['accRH_x_mean'] = data_df['accRH_x'].rolling(window=windowSize).mean()
feature_df['accRH_y_mean'] = data_df['accRH_y'].rolling(window=windowSize).mean()
feature_df['accRH_z_mean'] = data_df['accRH_z'].rolling(window=windowSize).mean()
feature_df['gyrRH_x_mean'] = data_df['gyrRH_x'].rolling(window=windowSize).mean()
feature_df['gyrRH_y_mean'] = data_df['gyrRH_y'].rolling(window=windowSize).mean()
feature_df['gyrRH_z_mean'] = data_df['gyrRH_z'].rolling(window=windowSize).mean()
feature_df['accRL_x_mean'] = data_df['accRL_x'].rolling(window=windowSize).mean()
feature_df['accRL_y_mean'] = data_df['accRL_y'].rolling(window=windowSize).mean()
feature_df['accRL_z_mean'] = data_df['accRL_z'].rolling(window=windowSize).mean()
# feature_df['accLH_x_std'] = data_df['accLH_x'].rolling(window=windowSize).std()
# feature_df['accLH_y_std'] = data_df['accLH_y'].rolling(window=windowSize).std()
# feature_df['accLH_z_std'] = data_df['accLH_z'].rolling(window=windowSize).std()
# feature_df['gyrLH_x_std'] = data_df['gyrLH_x'].rolling(window=windowSize).std()
# feature_df['gyrLH_y_std'] = data_df['gyrLH_y'].rolling(window=windowSize).std()
# feature_df['gyrLH_z_std'] = data_df['gyrLH_z'].rolling(window=windowSize).std()
# feature_df['accRH_x_std'] = data_df['accRH_x'].rolling(window=windowSize).std()
# feature_df['accRH_y_std'] = data_df['accRH_y'].rolling(window=windowSize).std()
# feature_df['accRH_z_std'] = data_df['accRH_z'].rolling(window=windowSize).std()
# feature_df['gyrRH_x_std'] = data_df['gyrRH_x'].rolling(window=windowSize).std()
# feature_df['gyrRH_y_std'] = data_df['gyrRH_y'].rolling(window=windowSize).std()
# feature_df['gyrRH_z_std'] = data_df['gyrRH_z'].rolling(window=windowSize).std()
# feature_df['accRL_x_std'] = data_df['accRL_x'].rolling(window=windowSize).std()
# feature_df['accRL_y_std'] = data_df['accRL_y'].rolling(window=windowSize).std()
# feature_df['accRL_z_std'] = data_df['accRL_z'].rolling(window=windowSize).std()
#
# feature_df['accLH_x_max'] = data_df['accLH_x'].rolling(window=windowSize).max()
# feature_df['accLH_y_max'] = data_df['accLH_y'].rolling(window=windowSize).max()
# feature_df['accLH_z_max'] = data_df['accLH_z'].rolling(window=windowSize).max()
# feature_df['gyrLH_x_max'] = data_df['gyrLH_x'].rolling(window=windowSize).max()
# feature_df['gyrLH_y_max'] = data_df['gyrLH_y'].rolling(window=windowSize).max()
# feature_df['gyrLH_z_max'] = data_df['gyrLH_z'].rolling(window=windowSize).max()
# feature_df['accRH_x_max'] = data_df['accRH_x'].rolling(window=windowSize).max()
# feature_df['accRH_y_max'] = data_df['accRH_y'].rolling(window=windowSize).max()
# feature_df['accRH_z_max'] = data_df['accRH_z'].rolling(window=windowSize).max()
# feature_df['gyrRH_x_max'] = data_df['gyrRH_x'].rolling(window=windowSize).max()
# feature_df['gyrRH_y_max'] = data_df['gyrRH_y'].rolling(window=windowSize).max()
# feature_df['gyrRH_z_max'] = data_df['gyrRH_z'].rolling(window=windowSize).max()
# feature_df['accRL_x_max'] = data_df['accRL_x'].rolling(window=windowSize).max()
# feature_df['accRL_y_max'] = data_df['accRL_y'].rolling(window=windowSize).max()
# feature_df['accRL_z_max'] = data_df['accRL_z'].rolling(window=windowSize).max()
#
# feature_df['accLH_x_min'] = data_df['accLH_x'].rolling(window=windowSize).min()
# feature_df['accLH_y_min'] = data_df['accLH_y'].rolling(window=windowSize).min()
# feature_df['accLH_z_min'] = data_df['accLH_z'].rolling(window=windowSize).min()
# feature_df['gyrLH_x_min'] = data_df['gyrLH_x'].rolling(window=windowSize).min()
# feature_df['gyrLH_y_min'] = data_df['gyrLH_y'].rolling(window=windowSize).min()
# feature_df['gyrLH_z_min'] = data_df['gyrLH_z'].rolling(window=windowSize).min()
# feature_df['accRH_x_min'] = data_df['accRH_x'].rolling(window=windowSize).min()
# feature_df['accRH_y_min'] = data_df['accRH_y'].rolling(window=windowSize).min()
# feature_df['accRH_z_min'] = data_df['accRH_z'].rolling(window=windowSize).min()
# feature_df['gyrRH_x_min'] = data_df['gyrRH_x'].rolling(window=windowSize).min()
# feature_df['gyrRH_y_min'] = data_df['gyrRH_y'].rolling(window=windowSize).min()
# feature_df['gyrRH_z_min'] = data_df['gyrRH_z'].rolling(window=windowSize).min()
# feature_df['accRL_x_min'] = data_df['accRL_x'].rolling(window=windowSize).min()
# feature_df['accRL_y_min'] = data_df['accRL_y'].rolling(window=windowSize).min()
# feature_df['accRL_z_min'] = data_df['accRL_z'].rolling(window=windowSize).min()
#
# feature_df['accLH_x_var'] = data_df['accLH_x'].rolling(window=windowSize).var()
# feature_df['accLH_y_var'] = data_df['accLH_y'].rolling(window=windowSize).var()
# feature_df['accLH_z_var'] = data_df['accLH_z'].rolling(window=windowSize).var()
# feature_df['gyrLH_x_var'] = data_df['gyrLH_x'].rolling(window=windowSize).var()
# feature_df['gyrLH_y_var'] = data_df['gyrLH_y'].rolling(window=windowSize).var()
# feature_df['gyrLH_z_var'] = data_df['gyrLH_z'].rolling(window=windowSize).var()
# feature_df['accRH_x_var'] = data_df['accRH_x'].rolling(window=windowSize).var()
# feature_df['accRH_y_var'] = data_df['accRH_y'].rolling(window=windowSize).var()
# feature_df['accRH_z_var'] = data_df['accRH_z'].rolling(window=windowSize).var()
# feature_df['gyrRH_x_var'] = data_df['gyrRH_x'].rolling(window=windowSize).var()
# feature_df['gyrRH_y_var'] = data_df['gyrRH_y'].rolling(window=windowSize).var()
# feature_df['gyrRH_z_var'] = data_df['gyrRH_z'].rolling(window=windowSize).var()
# feature_df['accRL_x_var'] = data_df['accRL_x'].rolling(window=windowSize).var()
# feature_df['accRL_y_var'] = data_df['accRL_y'].rolling(window=windowSize).var()
# feature_df['accRL_z_var'] = data_df['accRL_z'].rolling(window=windowSize).var()
#
# feature_df['accLH_x_std'] = data_df['accLH_x'].rolling(window=windowSize).std()
# feature_df['accLH_y_std'] = data_df['accLH_y'].rolling(window=windowSize).std()
# feature_df['accLH_z_std'] = data_df['accLH_z'].rolling(window=windowSize).std()
# feature_df['gyrLH_x_std'] = data_df['gyrLH_x'].rolling(window=windowSize).std()
# feature_df['gyrLH_y_std'] = data_df['gyrLH_y'].rolling(window=windowSize).std()
# feature_df['gyrLH_z_std'] = data_df['gyrLH_z'].rolling(window=windowSize).std()
# feature_df['accRH_x_std'] = data_df['accRH_x'].rolling(window=windowSize).std()
# feature_df['accRH_y_std'] = data_df['accRH_y'].rolling(window=windowSize).std()
# feature_df['accRH_z_std'] = data_df['accRH_z'].rolling(window=windowSize).std()
# feature_df['gyrRH_x_std'] = data_df['gyrRH_x'].rolling(window=windowSize).std()
# feature_df['gyrRH_y_std'] = data_df['gyrRH_y'].rolling(window=windowSize).std()
# feature_df['gyrRH_z_std'] = data_df['gyrRH_z'].rolling(window=windowSize).std()
# feature_df['accRL_x_std'] = data_df['accRL_x'].rolling(window=windowSize).std()
# feature_df['accRL_y_std'] = data_df['accRL_y'].rolling(window=windowSize).std()
# feature_df['accRL_z_std'] = data_df['accRL_z'].rolling(window=windowSize).std()
#print("Shape of feature df after data added: " + str(feature_df.shape))
#feature_df.fillna(0,inplace=True)
feature_df.dropna(inplace=True)
#print("Shape of feature df after fillna: " + str(feature_df.shape))
prediction = mlp_model.predict(feature_df)
#For immediate prediction
prediction_list=list(prediction)
final_prediction = max(set(prediction_list), key=prediction_list.count)
print("Initial prediction is: " + str(final_prediction))
print("---Prediction took %s seconds ---" % (time.time() - predict_start_time))
print("------------------")
#return prediction
return final_prediction
def setPredictionReady(self, prediction):
set_pre_start_time = time.time()
predictionReady = False
mode = 'neutral'
count = 0
if(len(prediction) >= 3):
mode = max(set(prediction), key=prediction.count)
print('Prediction Ready!')
print('Prediction List is: ' + str(prediction))
predictionReady = True
#for prediction after many counts
#combined_arr = np.concatenate(prediction, axis =0)
#mode_res = stats.mode(combined_arr)
#count = mode_res[1]
#if (count > 50):
#print('Prediction Ready!')
#mode = mode_res[0]
#predictionReady = True
else:
print('Prediction Not Ready.')
self.predictedAction = mode
self.predictionReady = predictionReady
print("---Setting prediction ready took %s seconds ---" % (time.time() - set_pre_start_time))
print("------------------")
class Serial(threading.Thread):
def __init__(self, clientThread, dataQueue, volCurPowQueue):
threading.Thread.__init__(self)
self.shutdown = threading.Event()
self.isHandShakeDone = False
self.port = serial.Serial("/dev/ttyAMA0", baudrate=115200, timeout=1.0)
self.dataQueue = dataQueue
self.volCurPowQueue = volCurPowQueue
self.clientThread = clientThread
self.CumPower = 0
def run(self):
while not self.isHandShakeDone:
self.port.write(bytes("H", 'utf-8'))
ack = self.port.read().decode()
if ack == 'A':
self.isHandShakeDone = True
self.port.write(bytes("A", 'utf-8'))
print("Handshake Done!\n")
while not self.shutdown.is_set():
self.getData()
if(self.clientThread.predictionReady):
with self.dataQueue.mutex:
self.dataQueue.queue.clear()
self.clientThread.predictionReady = False
if(self.clientThread.shutdown.is_set()):
self.stop()
def stop(self):
self.shutdown.set()
def getData(self):
length = ''
while length == '':
length = self.port.read().decode('utf-8')
length += self.port.read().decode('utf-8')
length += self.port.read().decode('utf-8')
data = ''
checksum = ord('\0')
for i in range(int(length)):
dataByte = self.port.read().decode('utf-8')
if (dataByte != ','):
checksum ^= ord(dataByte)
data += dataByte
if checksum == ord(self.port.read().decode('utf-8')):
self.port.write(bytes("A", 'utf-8'))
self.deserialize(data)
else:
self.port.write(bytes("N", 'utf-8'))
self.getData()
def deserialize(self, data):
if(data.split(',')[0] == 'D'):
deserializedData = []
for i in range(len(data.split(',')) - 1):
deserializedData.append(float(data.split(',')[i+1]))
if(self.dataQueue.full()):
self.dataQueue.get()
self.dataQueue.put(deserializedData)
else:
self.setVolCurPowQueue(round(float(data.split(',')[1]), 1), round(float(data.split(',')[2]), 1))
self.getData()
def setVolCurPowQueue(self, sensorValue, vdValue):
sensorValue = (sensorValue * 5) / 1023
vdValue = (vdValue * 5) / 1023
Voltage = round(vdValue*2, 4)
current = round(((sensorValue / (10 * 0.1)) / 10), 4)
Power = round(Voltage * current, 4)
self.CumPower += Power * 5
self.CumPower = round(self.CumPower, 4)
volCurPow = [Voltage, current, Power, self.CumPower]
if(not self.volCurPowQueue.empty()):
with self.volCurPowQueue.mutex:
self.volCurPowQueue.queue.clear()
self.volCurPowQueue.put(volCurPow)
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Invalid number of arguments')
print('python client.py [IP address] [Port]')
sys.exit()
serverName = sys.argv[1]
serverPort = int(sys.argv[2])
dataQueue = queue.Queue(maxsize=100)
volCurPowQueue = queue.Queue()
myClient = Client(serverName, serverPort, dataQueue, volCurPowQueue)
mySerial = Serial(myClient, dataQueue, volCurPowQueue)
myClient.start()
mySerial.start()
|
# Módulo destinado a correr el programa
from chaucraft import ChauCraft
from galaxia import Galaxia
from planeta import Planeta
from clases import Aprendiz, Asesino, Maestro, Edificio
from datetime import datetime
game = ChauCraft()
try:
with open("galaxias.csv", "r", encoding="utf-8") as galaxias_file:
primera = galaxias_file.readline()
orden = (dato.strip("\n").split(": ") for dato in primera.split(","))
orden = tuple(dato.strip() for [dato, tipo] in orden)
nueva_galaxia = {}
for line in galaxias_file:
datos_galaxia = (dato.strip() for dato in
line.strip("\n").split(","))
for dato in orden:
nueva_galaxia[dato] = datos_galaxia.__next__()
galaxia = Galaxia(nueva_galaxia["nombre"])
galaxia.reserva_mineral = int(nueva_galaxia["minerales"])
galaxia.reserva_deuterio = int(nueva_galaxia["deuterio"])
game.galaxias[nueva_galaxia["nombre"]] = galaxia
with open("planetas.csv", "r", encoding="utf-8") as planetas_file:
primera = planetas_file.readline()
orden = (dato.split(": ") for dato in primera.strip("\n").split(","))
orden = tuple(dato.strip() for [dato, tipo_dato] in orden)
nuevo_planeta = {}
for line in planetas_file:
datos_planeta = (dato.strip() for dato in
line.strip("\n").split(","))
for dato in orden:
nuevo_planeta[dato] = datos_planeta.__next__()
if nuevo_planeta["raza"] == "Aprendiz":
raza = Aprendiz()
elif nuevo_planeta["raza"] == "Maestro":
raza = Maestro()
elif nuevo_planeta["raza"] == "Asesino":
raza = Asesino()
else:
raise ValueError
galaxia = game.galaxias[nuevo_planeta["galaxia"]]
galaxia.planetas[nuevo_planeta["nombre"]] = Planeta(raza)
planeta = galaxia.planetas[nuevo_planeta["nombre"]]
planeta.magos = int(nuevo_planeta["magos"])
planeta.soldados = int(nuevo_planeta["soldados"])
planeta.tasa_minerales = int(nuevo_planeta["tasa_minerales"])
planeta.tasa_deuterio = int(nuevo_planeta["tasa_deuterio"])
planeta.nivel_ataque = int(nuevo_planeta["nivel_ataque"])
planeta.nivel_economia = int(nuevo_planeta["nivel_economia"])
if nuevo_planeta["conquistado"] == "True":
planeta.conquistado = True
galaxia.planetas_conquistados.add(nuevo_planeta["nombre"])
if nuevo_planeta["torre"] == "True":
planeta.torre = Edificio(150, 300, 1000, 2000)
if nuevo_planeta["cuartel"] == "True":
planeta.cuartel = Edificio(200, 500, 0, 5000)
recoleccion = nuevo_planeta["ultima_recoleccion"]
'''As seen at https://stackoverflow.com/questions/466345
/converting-string-into-datetime'''
modo_datetime = datetime.strptime(recoleccion, '%Y-%m-%d %H:%M:%S')
planeta.ultima_recoleccion = modo_datetime
nombre_planeta = nuevo_planeta["nombre"]
nombre_galaxia = nuevo_planeta["galaxia"]
Galaxia.planetas[nombre_planeta] = (planeta, nombre_galaxia)
except FileNotFoundError:
print("No hay archivos para cargar o estos no tienen el header")
print("Se comenzará el juego desde cero")
game.run()
|
from __future__ import print_function
import pyautogui
import speech_recognition as sr
import os
from time import gmtime
import time
import random
from flask import Flask ,render_template ,request
from chatterbot import ChatBot
from chatterbot.trainers import ChatterBotCorpusTrainer
english_bot=ChatBot("Chatterbot",storage_adapter="chatterbot.storage.SQLStorageAdapter")
trainer= ChatterBotCorpusTrainer(english_bot)
trainer.train("C:\\Users\\USER\\AppData\\Local\\Programs\\Python\\Python37-32\\Lib\\site-packages\\chatterbot_corpus\\data\\english")
def recognition(time=2):
r=sr.Recognizer()
with sr.Microphone() as source:
print('say something ')
audio=r.listen(source,timeout=time)
try:
text=r.recognize_google(audio)
print(text)
except Exception as e:
print(e)
return text
def speaker( speak_word , test_word , action=' ' ): #creates file and outputs for voice
print(test_word)
word="at you service"
f=open("C:\\Users\\USER\\Desktop\\filet.vbs","w+")
f.write('set sapi=createobject("sapi.spvoice")\n')
f.write('sapi.speak "')
f.write(speak_word)
f.write('"')
f.close()#time.strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
os.startfile("C:\\Users\\USER\\Desktop\\filet.vbs")
if action!=' ':
os.startfile(action)
def typer( speak_word , test_word , typer ):
print(test_word)
word="at you service"
f=open("C:\\Users\\USER\\Desktop\\typerfil.vbs","w+")
f.write('set sapi=createobject("sapi.spvoice")\n')
f.write('set wshshell=wscript.createobject("wscript.shell")\n')
f.write('wshshell.sendkeys "')
f.write('{ENTER}')
f.write('"\n')
f.write('wshshell.sendkeys "')
f.write(typer)
f.write('"\n')
f.write('sapi.speak "')
f.write(speak_word)
f.write('"')
f.close()#time.strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
os.startfile("C:\\Users\\USER\\Desktop\\typerfil.vbs")
daytime=time.strftime("%a, %d %b ", gmtime())
#rec=recognition()
print("listening :: >")
time.sleep(3.6)
speaker("yes sir","Jarvis")
print("listening :: >")
time.sleep(8.5)
speaker("well sir,.we have python, java , c and prolog languages on the system.","jarvis create a new project")
print("listening :: >")
time.sleep(10.0)
speaker("entering python mode sir, a new file will be created","jarvis use python","C:\\WINDOWS\\system32\\notepad.exe")
print("listening :: >")
time.sleep(8.0)
speaker("visual library, open cv found sir, ","jarvis import visual library")
typer("imported successfully"," ","import cv2")
print("listening :: >")
time.sleep(10.6)
typer("importing num py","import numpy","import numpy")
print("listening :: >")
time.sleep(8.0)
speaker("keras module not found sir, should we consider installing it via pip","import keras","C:\\Users\\USER\\AppData\\Local\\Programs\\Python\\Python37-32\\Scripts\\installer.bat")
typer("","","cd scripts")
time.sleep(0.5)
typer("","","{ENTER}")
print("listening :: >")
time.sleep(9)
typer("","","pip install keras")
time.sleep(6)
typer("ok sir ","yes","{ENTER}")
time.sleep(3)
|
aminhastring = 'ola a todos bem vindos ao meu canal'
omeuarraydenumeros = [1, 2, 3, 4]
familia = [
['1', 'olha a vaca ah ima gossto em vela'],
['2', 'tiago, leva a roupa a lavandaria'],
['3', 'estas sempre a implicar comigo'],
['4', 'silencio sepulcral seguido de um lindo gesto -> .|.']
]
familiav2 = {
'1': {
'statement 1': 'olha a vaca',
'statement 2': 'a ima gossto em vela',
'statement 3': 'ah tiago essess sitioss que tu fequentass tem muito mau asspecto'
},
'2': {
'statement1': 'tiago leva a roupa a lavandaria',
'statement2': 'tiago levanta a toua louca',
'statement3': 'tiago filho tanto sacrificio e investimento em ti para tu agora estares desempregado'
},
'3': {
'statement1': 'estas sempre a implicar comigo por tudo e por nada'
},
'4': {
'statement1': 'silencio sepulcrarl',
'statement2': '.|.'
}
}
print aminhastring
print omeuarraydenumeros
for row in familia:
for el in row:
print el,
print
for id in familiav2.keys():
print id
for key in familiav2[id].keys():
print " " + key + " " + familiav2[id][key]
print |
import matplotlib.pyplot as plt
import math
import random
N= 20
data = open("finalDataCoords.txt")
incomes, callRatios= [], []
line = data.readline()
while line:
fields = line.split("|")
incomes.append(float(fields[0]))
callRatios.append(float(fields[1][:-1]))
line = data.readline()
print(incomes)
print(callRatios)
f = plt.figure(figsize = (10,5) )
colors = [random.random() for i in range(len(incomes))]
areas = [math.pi * random.random() *50 for i in range(len(incomes))]
plt.scatter(incomes, callRatios, c= colors, s= areas, alpha = 0.5)
plt.ylim(0.0, 1.7)
plt.xlim(0.0, 400000)
plt.xlabel("Average Incomes")
plt.ylabel("Ratio EMS calls 2020:2019")
plt.title("Ratio of Non-Criical EMS calls in 2020 vs 2019 According to Average Income")
f.savefig("RatioEMSCallsVSIncome.pdf")
|
import email.parser
import re
from datetime import datetime
import dateutil.parser
import sb.util
from xml.sax.saxutils import escape, quoteattr
from jk.islojban import is_lojban
from_re = re.compile("^From ")
# Dates are of lots of forms:
# Fri, 2 Apr 1993 16:40:37 BST
# Wed, 06 Jun 90 11:44:24 -0700
# 6 Nov 90 00:02:21 EST (Tue)
months = dict(zip(["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],range(1,13)))
def parse(fname):
content = open(fname).read()
chunk = []
for l in content.split("\n"):
if from_re.match(l) and len(chunk) > 0:
# the content in chunk is now a mail
mail = email.message_from_string("\n".join(chunk))
# for k,v in mail.items():
# print k,v
dt = mail.get('Date','')
if dt == "":
mid = mail.get('Message-Id','')
if mid[1:3] == "19":
dt = mid[1:9]
else:
dt = mid[1:7]
# print "dt:", dt
date = None
try:
date = dateutil.parser.parse(dt)
except ValueError:
try:
date = dateutil.parser.parse(dt[:-8])
except ValueError:
pass
# print "Cannot parse", dt
if date is not None and dt != "":
if date.year < 2014:
date = date.strftime("%Y-%m-%d")
else:
date = ""
else:
date = ""
# print "date:", date
# print
header_printed = False
try:
paragraphs = mail.get_payload().split("\n\n")
except:
paragraphs = []
for x in paragraphs:
if is_lojban(x):
if not header_printed:
header_printed = True
def lk(x):
return quoteattr(mail.get(x,''))
print "<msg date=%s author=%s subject=%s>" % (quoteattr(date),lk('From'),lk('Subject'))
print "<chunk>"
print escape(x)
print "</chunk>"
if header_printed:
print "</msg>"
chunk = []
else:
chunk.append(l)
if __name__ == "__main__":
sb.util.run.main(parse)
|
# Generated by Django 2.1.4 on 2019-01-11 17:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('speciality', '0036_auto_20190111_1651'),
('clientele', '0005_clientele_services'),
]
operations = [
migrations.CreateModel(
name='Besoin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.AlterModelOptions(
name='clientele',
options={'ordering': ['nom_fr'], 'verbose_name_plural': 'clienteles'},
),
migrations.RenameField(
model_name='clientele',
old_name='name',
new_name='nom_en',
),
migrations.RenameField(
model_name='clientele',
old_name='nom',
new_name='nom_fr',
),
migrations.AddField(
model_name='besoin',
name='clientele',
field=models.ManyToManyField(to='clientele.Clientele'),
),
migrations.AddField(
model_name='besoin',
name='probleme',
field=models.ManyToManyField(to='speciality.Problematique'),
),
]
|
from django.contrib import admin
from .models import List, Item
admin.site.register(List)
admin.site.register(Item) |
"""
run the script
press c to capture the detected face
press q to quit
or continue adding faces
"""
import numpy as np
import cv2
faceCascade = cv2.CascadeClassifier('cascades/haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
cap.set(3,640) # set Width
cap.set(4,480) # set Height
face_num=1
while True:
ret, img = cap.read()
img = cv2.flip(img, 1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(20, 20)
)
j=0
faces_roi = []
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
cv2.imshow('frames',img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if cv2.waitKey(1) & 0xFF == ord('c'):
crop_img = img[y: y + h, x: x + w] # Crop from x, y, w, h -> 100, 200, 300, 400
cv2.imwrite('face'+str(face_num)+'.jpg', crop_img)
face_num+=1
continue
cap.release()
cv2.destroyAllWindows()
|
#if-else
x = 5
if x > 0:
print("The number is positive")
elif x < 0:
print("The number is negative")
else:
print("The nuber is Zero") |
#-*- coding:utf8 -*-
from django.conf import settings
from django.core import exceptions
from shopback import paramconfig as pcfg
from .handler import (BaseHandler,
InitHandler,
ConfirmHandler,
FinalHandler,
StockOutHandler,
DefectHandler,
RuleMatchHandler)
from .split import SplitHandler
from .memo import MemoHandler
from .merge import MergeHandler
from .refund import RefundHandler
from .logistic import LogisticsHandler
from .intercept import InterceptHandler
from .regular import RegularSaleHandler
from .flashsale import FlashSaleHandler
import logging
logger = logging.getLogger('celery.handler')
class NotBaseHandlerError(Exception):
pass
class AlreadyRegistered(Exception):
pass
class TradeHandler(object):
def __init__(self, name='handlers', app_name='trades'):
self._handlers = [] #collect all need effect handlers
def register(self,handler_class):
if not handler_class or not issubclass(handler_class,BaseHandler):
raise NotBaseHandlerError('Need Trade BaseHandler Subclass.')
handler = handler_class()
for registed_handler in self._handlers:
if type(handler) == type(registed_handler):
raise AlreadyRegistered(u'%s is already regiest.'%unicode(type(handler)))
self._handlers.append(handler)
def proccess(self,merge_trade,*args,**kwargs):
try:
for registed_handler in self._handlers:
if registed_handler.handleable(merge_trade,*args,**kwargs):
registed_handler.process(merge_trade,*args,**kwargs)
except Exception,exc:
merge_trade.append_reason_code(pcfg.SYSTEM_ERROR_CODE)
logger.error(u'订单处理错误:%s'%exc.message,exc_info=True)
def getTradeHandler(config_handlers_path=[]):
from django.utils.importlib import import_module
trade_handler = TradeHandler()
config_handlers_path = config_handlers_path or getattr(settings,'TRADE_HANDLERS_PATH',[])
for handler_path in config_handlers_path:
try:
hl_module, hl_classname = handler_path.rsplit('.', 1)
except ValueError:
raise exceptions.ImproperlyConfigured('%s isn\'t a middleware module' % handler_path)
try:
mod = import_module(hl_module)
except ImportError, e:
raise exceptions.ImproperlyConfigured('Error importing middleware %s: "%s"' % (hl_module, e))
try:
hl_class = getattr(mod, hl_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured('Middleware module "%s" does not define a "%s" class'
% (hl_module, hl_classname))
trade_handler.register(hl_class)
return trade_handler
trade_handler = getTradeHandler()
|
import abc
import boto3
from botocore.exceptions import ClientError
from backend.settings import DEBUG
class _AbstractS3(metaclass=abc.ABCMeta):
@abc.abstractmethod
def get_upload_dict(self, bucket, key):
pass
@abc.abstractmethod
def get_download_url(self, bucket, key):
pass
@abc.abstractmethod
def delete(self, bucket, key):
pass
class _MockS3(_AbstractS3):
def get_upload_dict(self, bucket, key):
return {
'url': f'https://{bucket}.s3.amazonaws.com/',
'fields': {
'key': key,
'AWSAccessKeyId': 'AWSAccessKeyId',
'policy': 'policy',
'signature': 'signature',
},
}
def get_download_url(self, bucket, key):
return 'https://dongyuzheng.com/static/img/paper.png'
def delete(self, bucket, key):
pass
class _S3(_AbstractS3):
"""George: boto3.readthedocs.io/en/latest/reference/services/s3.html
Look for `presigned`
"""
def __init__(self):
self.client = boto3.client('s3')
def get_upload_dict(self, bucket, key):
resp = self.client.generate_presigned_post(
Bucket=bucket,
Key=key,
)
return resp
def get_download_url(self, bucket, key):
"""Please return None if does not exist.
"""
try:
self.client.head_object(Bucket=bucket, Key=key)
except ClientError as e:
if e.response['Error']['Code'] == '404':
return None
raise
resp = self.client.generate_presigned_url(
'get_object',
Params={
'Bucket': bucket,
'Key': key,
},
HttpMethod='GET',
)
return resp
def delete(self, bucket, key):
self.client.delete_object(
Bucket=bucket,
Key=key,
)
S3 = _MockS3() if DEBUG else _S3()
|
from torch import tensor
from sentanalyzer_api.utils.vocabulary import Vocabulary
from sentanalyzer_api.utils.preprocess import tweet_cleaner
# vocab_file = r'sentanalyzer_api/utils/vocab_V4_.json'
import os
from dotenv import load_dotenv
load_dotenv()
vocab_file = r'sentanalyzer_api/utils/vocab_V4_.json'
if os.environ.get('MODE')== 'DEV':
vocab_file = r'utils/vocab_V4_.json'
vocab = Vocabulary(freq_threshold=5,file=vocab_file)
def text_to_vector(tweet,num_words=100):
tweet = tweet_cleaner(tweet)
numericalized_data = vocab.numericalize(tweet)
numericalized_data.extend([vocab.str2idx["<PAD>"] for _ in range(len(numericalized_data),num_words)])
if len(numericalized_data)>num_words:
numericalized_data = numericalized_data[:num_words]
return tensor(numericalized_data) |
"""
The controller file...
Following operations can be performed:
1. `cap`, `pcap` or `pcapng` (or similar formats) can be converted to csv format -- frame csv file
Headers extracted:
'frame.number'
'frame.time_epoch'
'frame.len'
'wlan.duration'
'-e wlan.bssid'
'-e wlan.ra'
'-e wlan.ta'
'-e wlan.sa'
'-e wlan.da'
'-e wlan.seq'
'-e wlan_mgt.ssid'
'-e wlan_mgt.ds.current_channel'
'-e wlan_mgt.qbss.scount'
'-e wlan_mgt.fixed.reason_code'
'-e wlan_mgt.fixed.status_code'
'-e wlan.fc.type'
'-e wlan.fc.type_subtype'
'-e wlan.fc.retry'
'-e wlan.fc.pwrmgt'
'-e wlan.fc.moredata'
'-e wlan.fc.frag'
'-e wlan.fc.ds'
'-e wlan.qos.priority'
'-e wlan.qos.amsdupresent'
'-e radiotap.channel.freq'
'-e radiotap.mactime'
'-e radiotap.datarate'
'-e radiotap.dbm_antsignal'
2. convert frame csv files to episode characteristics csv files
calculates various characteristics by analyzing a frames csv
slices the frames into episodes
3. tag with active scanning causes according to the old rule based system
4. merge given csv files into one... (maintenance feature)
"""
import argparse
def define_command_line_parser():
"""
Defines a simple argument parser for the script
"""
# the parser
arg_parser = argparse.ArgumentParser()
# - action
arg_parser.add_argument(
'-a', '--action',
action = 'store',
dest = 'pcap',
required = True,
help = 'Path to `pcap` file.'
)
arg_parser.add_argument(
'-c', '--clients',
action = 'store',
dest = 'clients',
required = True,
help = 'MAC addresses of clients to filter out. aa:bb:cc:dd:ee:ff [, gg:hh:ii:jj:kk:ll ...]'
)
arg_parser.add_argument(
'--save_plots',
action = 'store_true',
dest = 'should_save_plots',
help = 'include to save plots'
)
return arg_parser
def get_command_line_arguments():
pass
def process_and_execute_commands():
pass
if __name__ == '__main__':
pass
|
# send_crest_request
import http.client
import csv
import json
import os.path
import threading
import datetime
#import redis
def send_crest_requset(url, flag, option):
global standaloneWriter
conn = http.client.HTTPConnection(url)
conn.request("GET", "/crest/v1/api")
res = conn.getresponse()
data = json.loads(res.read().decode('utf8', "ignore"))
if data["gameStates"]["mGameState"] > 1:
if flag == 'standalone':
file_path = './standalone.csv'
with open(file_path, 'a') as f:
writer = csv.writer(f)
writer.writerow([str(datetime.now()),data])
elif flag == 'crest-monitor':
return data
return data
|
import json
from rest_framework.views import status
from api.tests.base import AuthBaseTest
from django.urls import reverse
class AuthRegisterUserTest(AuthBaseTest):
"""
Tests for /auth/register/ endpoint
"""
def test_create_a_user_profile_with_valid_data(self):
# test creating a user with valid data
url = reverse(
'shop_list_api:shop-list-api-register-user',
kwargs={
'version': 'v1'
}
)
response = self.client.post(
url,
data=json.dumps(self.valid_data),
content_type='application/json'
)
# assert status code is 201 CREATED
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_a_user_profile_with_invalid_data(self):
# test creating a user with invalid data
url = reverse(
'shop_list_api:shop-list-api-register-user',
kwargs={
'version': 'v1'
}
)
response = self.client.post(
url,
data=json.dumps(self.invalid_data),
content_type='application/json'
)
# assert status code
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class AuthResetUserPasswordTest(AuthBaseTest):
"""
Tests for the /auth/reset-password/ endpoint
"""
def test_reset_user_password_with_valid_data(self):
# test reset password with valid data
url = reverse(
'shop_list_api:shop-list-api-reset-password',
kwargs={
'version': 'v1'
}
)
self.login_client('test_user', 'testing')
response = self.client.put(
url,
data=json.dumps({
'password': 'some-long-password'
}),
content_type='application/json'
)
# assert status code
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_reset_user_password_with_invalid_data(self):
# test reset password with invalid data
url = reverse(
'shop_list_api:shop-list-api-reset-password',
kwargs={
'version': 'v1'
}
)
self.login_client('test_user', 'testing')
response = self.client.put(
url,
data=json.dumps({
'pass': 'some-long-password'
}),
content_type='application/json'
)
# assert status code
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class AuthLoginUserTest(AuthBaseTest):
"""
Tests for the /auth/login/ endpoint
"""
def test_login_user_with_valid_credentials(self):
# test logging in with valid credentials
url = reverse(
'shop_list_api:shop-list-api-login-user',
kwargs={
'version': 'v1'
}
)
response = self.client.post(
url,
data=json.dumps({
'username': 'test_user',
'password': 'testing'
}),
content_type='application/json'
)
# assert token key exists
self.assertIn('token', response.data)
# assert status code is 200 OK
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_login_user_with_invalid_credentials(self):
# test logging in with invalid credentials
url = reverse(
'shop_list_api:shop-list-api-login-user',
kwargs={
'version': 'v1'
}
)
response = self.client.post(
url,
data=json.dumps({
'username': 'user',
'password': 'tester'
}),
content_type='application/json'
)
# assert status code is 401 UNAUTHORIZED
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class AuthLogoutUserTest(AuthBaseTest):
"""
Tests for the /auth/logout/
"""
def test_logout_user(self):
url = reverse(
'shop_list_api:shop-list-api-logout-user',
kwargs={
'version': 'v1'
}
)
self.login_client('test_user', 'testing')
response = self.client.get(url)
# assert status code is 200 OK
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_logout_user_with_out_login(self):
url = reverse(
'shop_list_api:shop-list-api-logout-user',
kwargs={
'version': 'v1'
}
)
response = self.client.get(url)
# assert status code is 401 UNAUTHORIZED
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
|
import numpy as np
# numpy.frombuffer
s = 'Hello World'
a = np.frombuffer(s, dtype='S1')
print(a) |
# plot the energies
# Created by Martin Gren 2014-10-25.
# imports
import matplotlib.pylab as plt
import numpy as np
# input file
filename = 'energy_data_eq.dat'
# import data
data = np.loadtxt(filename)
cols = np.size(data,1)
# initial size of plot window
plt.figure(figsize=(8,6))
# plot
plt.plot(data[:,0], data[:,1],'-', label='energy')
# labels
plt.xlabel('Iterations / []', fontsize=20)
plt.ylabel('Energy / []', fontsize=20)
# legend
plt.legend(loc='upper right')
leg = plt.gca().get_legend()
ltext = leg.get_texts()
plt.setp(ltext, fontsize=12)
# axis limits
# tick fontsize
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.title('Energy during iterations')
# display the plot
plt.show()
|
VALID_NUCLEIC_ACIDS = 'ACGTNUKSYMWRBDHV'
VALID_AMINO_ACIDS = 'APBQCRDSETFUGVHWIYKZLXMN'
NUCLEIC_DELCHARS = str.maketrans({ord(c): None for c in VALID_NUCLEIC_ACIDS})
AA_DELCHARS = str.maketrans({ord(c): None for c in VALID_AMINO_ACIDS})
def validate_sequence(sequence, valid_chars):
"""Check whether sequence is valid nucleotide or protein sequence."""
return len(sequence.upper().translate(valid_chars)) == 0
def validate_protein(sequence):
return validate_sequence(sequence, AA_DELCHARS)
def validate_search_params(search_params):
search_params['diff_mods'] = _validate_diff_mods(search_params.get('diff_mods'))
search_params['options'] = _validate_options(search_params.get('options'))
return {k: v for k, v in search_params.items() if v}
def _validate_diff_mods(diff_mods):
if not diff_mods:
return None
valid_mods = []
for mod in diff_mods:
if float(mod['mass']) > 0 and validate_protein(mod['aa']):
# make sure we aren't repeating amino acids
mod['aa'] = ''.join(set(mod['aa']))
# make sure amino acid is uppercase else IP2/Prolucid will not find anything
mod['aa'] = mod['aa'].upper()
# keep composition keyed by uppercase amino acids for consistency
mod['comp'] = dict(zip(
map(str.upper, mod['comp'].keys()),
mod['comp'].values()
))
# make sure we have light and heavy options set
mod['light'] = mod.get('light') or False
mod['heavy'] = mod.get('heavy') or False
valid_mods.append(mod)
return valid_mods
def _validate_options(options):
if not options:
return None
accepted_options = {
'minPeptidesPerProtein': int,
'maxNumDiffmod': int
}
valid_options = {}
for k, v in options.items():
if k in accepted_options.keys():
try:
valid_options[k] = accepted_options[k](v)
except:
pass
return valid_options
|
def voto(ano):
"""
Indica se a pessoa tem voto obrigatório ou não de acordo com a idade
:param int ano: ano de nascimento da pessoa
:return str:
"""
from datetime import date
idade = date.today().year - ano
if idade <= 0:
return 'Essa pessoa nem nasceu!'
elif idade < 16:
return f'Com {idade} ano(s): Não Vota.'
elif 18 > idade > 65:
return f'Com {idade} ano(s): Voto Opcional'
return f'Com {idade} ano(s): Voto Obrigatório'
ano = int(input('Em que ano você nasceu? '))
print(f'{voto(ano)}')
|
import pandas as pd
import numpy as np
########## Specify the name of excel with the data ##########
fileData=pd.read_excel('Sample DataSet.xlsx')
########## Removes last 2 characters ##########
for loop in range(0,fileData['Section'].count()):
temp_String=str(fileData['Section'].iloc[loop])
temp_String=temp_String[:-2]
fileData['Section'].iloc[loop]=temp_String
########## Specify the name of the column based on which the split needs to be performed ##########
for section in fileData['Section'].unique():
temp_DataFrame=(fileData[fileData['Section']==section])
########## Specify the name of the column based on which the sort needs to be performed ##########
result=temp_DataFrame.sort(['Total'], ascending=0)
Filename=str(temp_DataFrame['Section'].iloc[0])+'.xlsx'
writer = pd.ExcelWriter(Filename, engine='xlsxwriter')
result.to_excel(writer,'Sheet1')
writer.save()
print('File '+Filename+' created succesfully')
|
from splinter import Browser
from bs4 import BeautifulSoup as bs
from webdriver_manager.chrome import ChromeDriverManager
import time
import pandas as pd
def init_browser():
executable_path = {'executable_path': ChromeDriverManager().install()}
return Browser('chrome', **executable_path, headless=False)
def scrape():
# NASA Mars News #
browser = init_browser()
url = 'https://redplanetscience.com/'
browser.visit(url)
time.sleep(1)
html = browser.html
soup = bs(html, 'html.parser')
news = soup.find_all('div', class_='list_text')
newst = []
newsp = []
for new in news:
n_t = new.find('div', class_='content_title').text
n_p = new.find('div', class_='article_teaser_body').text
newst.append(n_t)
newsp.append(n_p)
newst = newst[0]
newsp = newsp[0]
browser.quit()
# JPL Mars Space Images - Featured Image
browser = init_browser()
img_url = 'https://spaceimages-mars.com/'
browser.visit(img_url)
time.sleep(1)
img_html = browser.html
soup = bs(img_html, 'html.parser')
img = soup.find('a', class_='showimg')
featured_image_url = f'{img_url}{img["href"]}'
browser.quit()
# Facts table #
facts_url = 'https://galaxyfacts-mars.com/'
tables = pd.read_html(facts_url)
df_facts = tables[0]
df_facts.columns = df_facts.iloc[0, :]
df_facts.drop(index=0, inplace=True)
df_facts.reset_index(drop=True, inplace=True)
df_facts_html = df_facts.to_html(index=False)
# Hemispheres images #
browser = init_browser()
hem_url = 'https://marshemispheres.com/'
browser.visit(hem_url)
time.sleep(1)
hi_url = []
names = []
hurl = []
hhtml = browser.html
soup = bs(hhtml, 'html.parser')
hemis = soup.find_all('div', class_='description')
for hem in hemis:
hemisphere = hem.find('a').find('h3').text
names.append(hemisphere)
for i in names:
browser.links.find_by_partial_text(names[0]).click()
hhtml = browser.html
soup = bs(hhtml, 'html.parser')
hemis = soup.find('div', class_='wide-image-wrapper')
hemis = hemis.find('img', class_='wide-image')['src']
hurl.append(f'{hem_url}{hemis}')
browser.back()
for i, j in zip(names, hurl):
dictionary = {'title': '', 'img_url': ''}
dictionary['title'] = i
dictionary['img_url'] = j
hi_url.append(dictionary)
browser.quit()
# Create dictionary for all this #
dict1 = {
# Mars title
'news_title': newst,
'news_p': newsp,
# Featured Image
'featured_image_url': featured_image_url,
# Mars facts
'mars_facts_html': df_facts_html,
'hemisphere_image_url': hi_url
}
return dict1
|
from flask import Flask, render_template, request, redirect, url_for
import csv, random, json
app = Flask(__name__)
profiles = []
data = open("./static/MOCK_DATA.csv")
data = data.read()[:-1]
data = data.split("\n")
for x in data:
person={}
x = x.split(",")
person["ID"] = x[0]
person["first"] = x[1]
person["last"] = x[2]
person["email"] = x[3]
person["country"] = x[4]
person["drugs"] = x[5]
person["gender"] = x[6]
person["color"] = x[7]
profiles.append(person);
profiles = profiles[1:]
@app.route("/", methods=["GET","POST"])
def index():
return render_template("index.html")
@app.route("/profile")
def profile():
i = request.args.get("data")
print(int(i))
if int(i)>=0:
result = profiles[int(i)]
else:
result = profiles
return json.dumps(result)
if __name__=="__main__":
app.debug=True
app.run(host='0.0.0.0',port=8000)
|
def gd5consect_numbers(number):
""" (int) -> int
Return the greatest product of five consecutive digits in number.
>>> gd5consect_numbers(12345678935)
15120
>>> gd5consect_numbers(9998543521)
29160
"""
s_number = str(number)
result_list = []
for i in range(len(s_number)-4):
result = int(s_number[i])*int(s_number[i+1])*int(s_number[i+2])*int(s_number[i+3])*int(s_number[i+4])
result_list.append(result)
m_result = max(result_list)
return m_result
|
import re
import os
import sys
def banner():
print(' ****************************************** ')
print(' ******************************************************************* ')
print('******************************* **********************************')
print('************************** ALPHA CODE *****************************')
print('******************************* **********************************')
print(' ******************************************************************* ')
print(' ******************WELCOME***************** \n\n')
def clearScreen():
if sys.platform == 'win32':
os.system('cls')
else:
os.system('clear')
banner
def lasted(elem):
return int(elem[7])
# find related areas
def relatedAreas(word, datain):
areas = []
a = []
for line in datain:
if word.lower() not in line[5].lower():
areas.append(line[5])
a = set(areas)
areas = list(a)
i = 0
for area in areas:
print('[' + str(i) + ']' + area)
i += 1
area_index = input('\nEnter the number of an industry in which these types of clients are located:')
for area in areas:
if int(areas.index(area)) == int(area_index):
return area
banner()
Input = input('Describe, in one word, the type of person to whom marketing is directed: \nExample: developer\n>')
# Open files to read
data = []
with open('people.in', 'r') as file:
data1 = file.read().split('\n')
for i in data1:
data.append(i.split('|'))
with open('Learn.txt', 'r') as learnfile:
learn = learnfile.read()
rows = learn.split('\n')
# converting str to list
set_learn = []
for row in rows:
if row != '':
fields = row.split('|')
yWords = fields[1].split(' ')
nWords = fields[2].split(' ')
seld = []
seld.append(int(fields[0]))
seld.append(yWords)
seld.append(nWords)
set_learn.append(seld)
pre_out = []
for row in set_learn:
if Input in row[1]:
for word1 in row[1]:
for row in data:
if word1.lower() in row[3] or word1.lower() in row[5]:
pre_out.append(tuple(row))
# traditional search
for row in data:
if row[3].lower().find(Input.lower()) > -1 or row[5].lower().find(Input.lower()) > -1:
pre_out.append(tuple(row))
search = []
search.append(relatedAreas(Input, pre_out))
for row in data:
if row[5].lower().find(search[0].lower()) > -1:
pre_out.append(tuple(row))
out = list(set(pre_out))
for i in out:
print(i)
print(len(out))
while len(out) < 100:
search = []
search.append(relatedAreas(Input, pre_out))
for row in data:
if row[5].lower().find(search[0].lower()) > -1:
pre_out.append(tuple(row))
out = list(set(pre_out))
for i in out:
print(i)
print(len(out))
out.sort(key=lasted, reverse=True)
with open('people.out', 'w') as out_file:
i = 0
while i < 100:
out_file.write(str(out[i][0]) + '\n')
i += 1
print('Ids of 100 prospects saved in ./people.out') |
import logging
import torch
from typing import Dict
from torch.utils.tensorboard import SummaryWriter
from ..abstract_callback import AbstractCallback, ModelTrainer
from sonosco.serialization import serializable
LOGGER = logging.getLogger(__name__)
@serializable
class LasTextComparisonCallback(AbstractCallback):
"""
Perform inference on an las model and compare the generated text with groundtruth and add it to tensorboard.
Args:
log_dir: tensorboard output directory
labels: string with characters that the model supports
args: dictionary of arguments for the model decoding step such as beam size
samples: number of samples to compare and visualize at a time
"""
log_dir: str
labels: str
args: Dict[str, str]
samples: int = 4
def __post_init__(self) -> None:
"""
Post initialization.
"""
# samples should be less than batch size
self.writer = SummaryWriter(log_dir=self.log_dir)
def __call__(self,
epoch: int,
step: int,
performance_measures: Dict,
context: ModelTrainer,
validation: bool = False) -> None:
"""
Execute las text comparison during inference callback.
Args:
epoch: epoch step
step: step inside of the epoch
performance_measures: performance measures dictionary
context: model trainer
validation: should validation dataloader be used for comparison
"""
if step == 0 or step % context.test_step > 0:
return
model = context.model
decoder = context.decoder
batch = next(iter(context.test_data_loader))
batch = context._recursive_to_cuda(batch)
batch_x, batch_y, input_lens, target_lens = batch
transcriptions = []
split_targets = []
offset = 0
for size in target_lens:
split_targets.append(batch_y[offset:offset + size])
offset += size
groundtruths = decoder.convert_to_strings(split_targets)
for i, el in enumerate(batch_x):
transcriptions.append(model.recognize(el[0].transpose(0, 1), input_lens[i:i+1], self.labels, self.args)[0])
for transcription, groundtruth in zip(transcriptions, groundtruths):
trans = decoder.convert_to_strings(torch.tensor([transcription['yseq']]))
comparison = f"Transcription: {trans}. Groundtruth: {groundtruth}"
LOGGER.info(comparison)
self.writer.add_text("inference_text_comparison", comparison, step)
|
from flask import Flask, request
from flask_restful import Resource, Api
from deta import Deta
from dotenv import load_dotenv
import os
app = Flask(__name__)
api = Api(app)
deta = Deta(os.environ["DETA_KEY"])
|
from .stubs import *
output = ""
history = ""
win = {"R": "P", "P": "S", "S": "R"}
def rchoice(s):
if not s:
return "P"
return s[
int(get_turn() * 24.542243858834 - 4832.584377 * 7325285848277677.3485 % 3285787478 % (get_turn() + 1))
% len(s)]
opp_side = {RobotTeam.FIRST: RobotTeam.SECOND, RobotTeam.SECOND: RobotTeam.FIRST}
def turn():
global history, output
turn = get_turn()
if turn == 0:
output = rchoice("RPS")
return
input = get_action(turn - 1, opp_side[get_side()])
history += input
output = win[rchoice(history)]
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Rule
import time
class CrawlpageSpider(scrapy.Spider):
name = 'crawlpage'
allowed_domains = ['nytimes.com']
def parse(self, response):
page = response.text
filename = 'output.html'
with open(filename, 'ab') as f:
f.write(response.body)
|
"""updated columns in power gen table
Revision ID: af833ec69790
Revises: b3d6c710aa26
Create Date: 2021-09-15 16:56:44.335825
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'af833ec69790'
down_revision = 'b3d6c710aa26'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('power_generation', sa.Column('device', sa.String(length=255), nullable=False))
op.add_column('power_generation', sa.Column('temperature', sa.Float(), nullable=True))
op.drop_index('date_time', table_name='power_generation')
op.create_unique_constraint(None, 'power_generation', ['date_time', 'device'])
op.drop_column('power_generation', 'location')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('power_generation', sa.Column('location', mysql.VARCHAR(length=255), nullable=False))
op.drop_constraint(None, 'power_generation', type_='unique')
op.create_index('date_time', 'power_generation', ['date_time', 'location'], unique=False)
op.drop_column('power_generation', 'temperature')
op.drop_column('power_generation', 'device')
# ### end Alembic commands ###
|
def findstr(instr):
str = 'This is such a long string that I would like to find a substring of'
print str
if str.find(instr) >= 0:
print "Found %s" % instr
else:
print "Did not find %s" % instr
|
../../../py_ros/arm7dp_key.py |
#https://leetcode-cn.com/contest/weekly-contest-215/problems/minimum-operations-to-reduce-x-to-zero/
#https://leetcode-cn.com/problems/minimum-operations-to-reduce-x-to-zero/
#思路1 最直观的思路,不断迭代 从 最左 和 最右 计算子迭代是否能成功,但时间复杂度随着LIST变大,呈现指数扩张
#思路2 求最大的1个内部子串,使得 SUM内部 = SUM ALL - X,因为如果不存在这样的内层子串,那根本不存在这样的计算方法使得X=0;且要计算得到这个中间剩余部分LENG最长的结果,这样取0的LENG就是最短的
#问题转换为 求 sum = total - x 的最长连续子串
class Solution(object):
def minOperations(self, nums, x):
"""
:type nums: List[int]
:type x: int
:rtype: int
"""
if sum(nums) == x:
return len(nums)
if x > sum(nums):
return -1
midVal = sum(nums) - x
totalVal = 0
indexL = 0
indexR = 0
maxMidLen = -1
#先从左往右计算,直到找到第一个相等或者大于MIDVAL的,然后继续移动L下标,直到L下标和R下标的中指大于MIDVAL,循环往复,直到R挪到LIST底部
#如果大于MIDVAL,缩减窗口左侧,然后每次再扩大窗口右侧,依次往复
while indexL < len(nums):
if indexR < len(nums):
totalVal += nums[indexR]
indexR += 1
while totalVal > midVal and indexL < len(nums):
totalVal -= nums[indexL]
indexL += 1
if totalVal == midVal:
if maxMidLen == -1:
maxMidLen = indexR - indexL
else:
if indexR - indexL > maxMidLen:
maxMidLen = indexR - indexL
if indexR == len(nums):
indexL += 1
shortLen = -1
if maxMidLen > 0:
shortLen = len(nums) - maxMidLen
return shortLen
solution = Solution()
#2
#nums = [1,1,4,2,3]
#num = 5
#-1
#nums = [5,6,7,8,9]
#num = 4
#-1
#nums = [1241,8769,9151,3211,2314,8007,3713,5835,2176,8227,5251,9229,904,1899,5513,7878,8663,3804,2685,3501,1204,9742,2578,8849,1120,4687,5902,9929,6769,8171,5150,1343,9619,3973,3273,6427,47,8701,2741,7402,1412,2223,8152,805,6726,9128,2794,7137,6725,4279,7200,5582,9583,7443,6573,7221,1423,4859,2608,3772,7437,2581,975,3893,9172,3,3113,2978,9300,6029,4958,229,4630,653,1421,5512,5392,7287,8643,4495,2640,8047,7268,3878,6010,8070,7560,8931,76,6502,5952,4871,5986,4935,3015,8263,7497,8153,384,1136]
#num = 894887480
#-1
#nums = [5207,5594,477,6938,8010,7606,2356,6349,3970,751,5997,6114,9903,3859,6900,7722,2378,1996,8902,228,4461,90,7321,7893,4879,9987,1146,8177,1073,7254,5088,402,4266,6443,3084,1403,5357,2565,3470,3639,9468,8932,3119,5839,8008,2712,2735,825,4236,3703,2711,530,9630,1521,2174,5027,4833,3483,445,8300,3194,8784,279,3097,1491,9864,4992,6164,2043,5364,9192,9649,9944,7230,7224,585,3722,5628,4833,8379,3967,5649,2554,5828,4331,3547,7847,5433,3394,4968,9983,3540,9224,6216,9665,8070,31,3555,4198,2626,9553,9724,4503,1951,9980,3975,6025,8928,2952,911,3674,6620,3745,6548,4985,5206,5777,1908,6029,2322,2626,2188,5639]
#num = 565610
#5
#nums = [3,2,20,1,1,3]
#num = 10
#5
#nums = [1,5,10,3,2,4]
#num = 15
#1
#nums = [1,1,3,2,5]
#num = 5
#6
nums = [6016,5483,541,4325,8149,3515,7865,2209,9623,9763,4052,6540,2123,2074,765,7520,4941,5290,5868,6150,6006,6077,2856,7826,9119]
num = 31841
print(solution.minOperations(nums, num)) |
import unittest
import numpy as np
from ized import qr
class QRchecker(unittest.TestCase):
def setUp(self):
self.test_matrix = np.r_[
np.eye(4) - np.eye(4, k=1) - np.eye(4, k=-1),
np.ones((1, 4))]
def assertUpperTriangular(self, mat):
nrows = mat.shape[0]
self.assertSquareMatrix(mat)
for i in range(nrows):
for j in range(i+1, nrows):
self.assertEqual(mat[j, i], 0)
def assertSquareMatrix(self, mat):
nrows = mat.shape[0]
self.assertEqual(mat.shape, (nrows, nrows))
class TestQRmapped(QRchecker):
def test_qr_mapped_returns_upper_triangular_of_large_matrix(self):
upper_triangular = qr.qr_mapped(self.test_matrix, n=4)
self.assertUpperTriangular(upper_triangular)
def test_qr_mapped_just_returns_the_same_for_small_matrix(self):
not_upper_triangular = qr.qr_mapped(self.test_matrix, n=6)
self.assertEqual(not_upper_triangular.shape, self.test_matrix.shape)
self.assertTrue(np.all((not_upper_triangular - self.test_matrix) == 0))
class TestQRreduce(QRchecker):
def test_qr_reduce_reduces_to_upper_triangular_if_matrices_are_big(self):
upper_triangular = qr.qr_reduce(self.test_matrix,
self.test_matrix,
n=4)
self.assertUpperTriangular(upper_triangular)
def test_qr_reduce_does_nothing_for_small_matricies(self):
not_upper_triangular = qr.qr_reduce(self.test_matrix,
self.test_matrix,
n=11)
self.assertEqual(not_upper_triangular.shape, (10, 4))
class TestMapReduceQr(QRchecker):
def test_on_iterator(self):
x_chunks = (self.test_matrix for _ in range(5))
upper_triangular = qr.mapreduce_qr(x_chunks)
self.assertUpperTriangular(upper_triangular)
def test_lm_solve_qr(self):
x_chunks = (self.test_matrix for _ in range(5))
w, r2, _ = qr.lm_solve_qr(x_chunks)
self.assertGreater(r2, 0)
self.assertEqual(len(w.shape), 1)
self.assertEqual(w.shape, (3,))
|
"""
A module that preprocesses adverse events data to be ready for xgboost training.
"""
import json
import os
import time
import numpy as np
import pandas as pd
#import modin.pandas as pd
import torch
from collections import OrderedDict
def get_frequent_features(vocab, num_features, codes_only=True, exclusion_list=[]):
"""
Get the most frequent codes/features.
Args:
vocab(Object): Vocab object that contains all the vocabs
num_features(int): Number of features to be selected
codes_only(bool): Whether to select ICD10 codes only
exclusion_list(list): List of codes to be excluded (eg. labels events)
Returns:
List of most frequent features
"""
num_exc = len(exclusion_list) + 100
features = vocab.freqs.most_common(num_features + num_exc)
if codes_only:
features = [word[0] for word in features if word[0] not in exclusion_list and ('_' in word[0])]
else:
features = [word[0] for word in features if word[0] not in exclusion_list]
features = [word for word in features if 'day' not in word] #Exclude day features
features = features[:num_features]
return features
def get_feature_ids(vocab, frequent_features):
"""
Get the corresponding dictionary ids of the for the selected frequent features.
Args:
vocab(Object): Vocab Object
frequent_features(list): List of most frequent features(events)
Returns:
Corresponding ids of the features in the vocab dict
"""
ft_ids = [vocab.stoi[ft] for ft in frequent_features]
return ft_ids
def get_one_hot_frequent_features(row, frequent_features):
"""
Gets one-hot encoding of the most frequent features of a given patient data
Args:
row(pd.Series): row to specify patient's specific adverse event
frequent_features(list): List of frequent features (events)
Returns:
Returns 0 if max value is 0 otherwise 1
"""
features = set(row.tolist())
one_hot = [int(ft in features) for ft in frequent_features]
return one_hot
def read_numpy(numpy_path, columns=None):
"""
Read numpy file and return dataframe
Args:
numpy_path(str): Numpy file path
columns(list): List of columns
Returns:
Dataframe of the loaded numpy file
"""
df = np.load(numpy_path)
if columns is None:
df = pd.DataFrame(df, columns=range(1,df.shape[1]+1))
else:
df = pd.DataFrame(df, columns=columns)
return df
def read_labels(labels_path):
"""
Read list of labels from path
Args:
labels_path(str): Labels/Classes file path
Returns:
List of classes/labels
"""
with open(labels_path, 'r') as fp:
labels = fp.readlines()
labels = [label.strip() for label in labels]
return labels
def get_class_imbalance(df_y):
"""
Get class imbalance for all the target variables.
Args:
df_y(DataFrame): Dataframe that contains # of positive and negative examples for each class
Returns:
Dictionary of class imbalances for each class
"""
imbalance = df_y.apply(lambda x: x.value_counts()).transpose().values.tolist()
imbalance = dict(zip(df_y.columns.tolist(), imbalance))
return imbalance
def preprocess(numpy_x_path, numpy_y_path, features, features_ids, labels, split, output_dir, class_imbalance_path=None):
"""
Transform the predictor data to one-hot encoding and aggregate with target data.
Args:
numpy_x_path(str): Numpy file path of X data
numpy_y_path(str): Numpy file path of y data
features(list): List of features/events
features_ids(list): Corresponding list of feature ids
labels(list): List of classes
split(str): Dataset split
output_dir(str): Output directory
class_imbalance_path(str): Classes imbalances path
Returns:
Dataframe of the preprocessed data
"""
print('Preprocessing and saving {} data...'.format(split))
df_x = read_numpy(numpy_x_path, columns=None)
df_y = read_numpy(numpy_y_path, columns=labels)
df_x = df_x.apply(get_one_hot_frequent_features, axis=1, args=(features_ids,))
df_x = pd.DataFrame(df_x.tolist(), columns=features)
df = pd.concat([df_x, df_y], axis=1)
if split=='train':
imb = get_class_imbalance(df_y)
with open(class_imbalance_path, 'w') as fp:
json.dump(imb, fp)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_path = os.path.join(output_dir, split+'.csv')
df.to_csv(output_path, index=False)
print('{} data successfully preprocessed!'.format(split))
return df
def prepare(df, num_features_list, labels, output_dir, split='train'):
"""
Prepares data for model training.
Args:
df(Dataframe): Preprocessed data
num_features_list(list): List of the number of features to be selected
labels(list): List of classes
output_dir(str): Output directory
split(str): Dataset split
Returns:
None
"""
num_targets = len(labels)
features = df.columns.tolist()[:-num_targets]
for num_features in num_features_list:
print('Preparing data with {} features...'.format(num_features))
for label in labels:
columns = [label] + features[:num_features]
my_output_dir = os.path.join(output_dir, str(num_features), label)
if not os.path.exists(my_output_dir):
os.makedirs(my_output_dir)
output_path = os.path.join(my_output_dir, split+'.csv')
df[columns].to_csv(output_path, index=False, header=None)
print('Successfully prepared data for training!')
if __name__ == "__main__":
ROOT_DIR = '/home/ec2-user/SageMaker/CMSAI/modeling/tes/data/final-global/ae/1000/'
RAW_DATA_DIR = os.path.join(ROOT_DIR, 'raw')
SPLITS_FNAMES = OrderedDict({'train': ['final_allvocab_x_train.npy', 'final_allvocab_y_train.npy'],
'val': ['final_allvocab_x_val.npy', 'final_allvocab_y_val.npy'],
'test': ['cms_test_x.npy', 'cms_test_y.npy']
})
VOCAB_PATH = os.path.join(RAW_DATA_DIR, 'ae_all_vocab_last180_whole')
LABELS_PATH = os.path.join(RAW_DATA_DIR, 'labels.txt')
CLASS_IMBALANCE_PATH = os.path.join(RAW_DATA_DIR, 'class_imbalances.json')
PREPROCESSED_DATA_DIR = os.path.join(ROOT_DIR, 'preprocessed')
TRAIN_DATA_DIR = os.path.join(ROOT_DIR, 'training')
S3_OUTPUT_DIR = 's3://cmsai-mrk-amzn/CSVModelInputs/Tes/models/ae/final-global/data/'
NUM_FREQUENT_FEATURES = 300
NUM_FEATURES_LIST = [100, 200, 300]
MEDICAL_CODES_ONLY = True
labels = read_labels(LABELS_PATH)
EXCLUSION_LIST = ['nan', 'pad', 'unk'] + labels
vocab = torch.load(VOCAB_PATH)
features = get_frequent_features(vocab,
NUM_FREQUENT_FEATURES,
MEDICAL_CODES_ONLY,
EXCLUSION_LIST)
features_ids = get_feature_ids(vocab, features)
for split, fnames in SPLITS_FNAMES.items():
data_path_x = os.path.join(RAW_DATA_DIR, fnames[0])
data_path_y = os.path.join(RAW_DATA_DIR, fnames[1])
df = preprocess(data_path_x, data_path_y, features, features_ids, labels, split, PREPROCESSED_DATA_DIR, CLASS_IMBALANCE_PATH)
prepare(df, NUM_FEATURES_LIST, labels, TRAIN_DATA_DIR, split)
del df
command = 'aws s3 cp --recursive --quiet {} {}'.format(TRAIN_DATA_DIR, S3_OUTPUT_DIR)
os.system(command)
print('All data successfully preprocessed and copied to {}!'.format(S3_OUTPUT_DIR))
|
import time
import pandas as pd
import numpy as np
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
months = ['january', 'february', 'march', 'april', 'may', 'june','all']
week = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday','all']
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
global city
city= input('please choose the city : chicago or new york city or washington ? ').lower()
while city not in CITY_DATA.keys():
city= input('please type a valid name (chicago or new york city or washington) ').lower()
# TO DO: get user input for month (all, january, february, ... , june)
month = input('which month ? january, february, march , april , may , june or all ? ').lower()
while month not in months:
month = input('please type a valid month (january, february, march , april , may , june or all) ').lower()
# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)
day = input('which day in the week ? monday tuesday wednesday thursday friday saturday sunday or all ?').lower()
while day not in week:
day = input('please type a valid day (sunday ,monday,..., or all) ').lower()
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
# load data file into a dataframe
df = pd.read_csv(CITY_DATA[city])
# convert the Start Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
# extract month and day of week from Start Time to create new columns
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
# filter by month if applicable
if month != 'all':
# use the index of the months list to get the corresponding int
months = ['january', 'february', 'march', 'april', 'may', 'june']
month = months.index(month) + 1
# filter by month to create the new dataframe
df = df[df['month'] == month]
# filter by day of week if applicable
if day != 'all':
# filter by day of week to create the new dataframe
df = df[df['day_of_week'] == day.title()]
return df
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# TO DO: display the most common month
months = ['january', 'february', 'march', 'april', 'may', 'june']
common_month_num = df['month'].mode()[0]
common_month_str = months[common_month_num-1]
print(common_month_str)
# TO DO: display the most common day of week
print(' the most common day of week ', df['day_of_week'].value_counts().idxmax())
# TO DO: display the most common start hour
df['hour'] = df['Start Time'].dt.hour
print('the most start hour ', df['hour'].mode()[0])
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# TO DO: display most commonly used start station
start_station = df['Start Station'].value_counts().idxmax()
print('the most common used start station: ',start_station)
# TO DO: display most commonly used end station
end_station = df['End Station'].value_counts().idxmax()
print('the most common used end station: ',end_station)
# TO DO: display most frequent combination of start station and end station trip
#start = df['Start Station'].value_counts()
#end = df['Start Station'].value_counts()
print('the most common combination of start station and end station trip: ',end_station)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
# """Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# TO DO: display total travel time
total_time = df['Trip Duration'].sum()
print('the total travel time = '+str(total_time/(60*60))+ ' Hour\n')
# TO DO: display mean travel time
average_time = df['Trip Duration'].mean()
print('the average_time = '+str(average_time/(60*60))+ ' Hour\n')
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
if city == 'washington':
print(' no user stats for washington city ')
else :
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
# TO DO: Display counts of user types
users = df['User Type'].value_counts()
print('the counts of users :\n\n',users)
# TO DO: Display counts of gender
print('\n')
gender = df['Gender'].value_counts()
print('the counts of gender :\n\n',gender)
# TO DO: Display earliest, most recent, and most common year of birth
print('\n')
birth_year_min = df['Birth Year'].min()
print('the earliest year : ',birth_year_min)
most_recent = df['Birth Year'].max()
print('the most recent year of birth : ',most_recent)
most_common = df['Birth Year'].mode()[0]
print('the most common year of birth : ',most_common)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def show_row_data(df):
answer = input('do you want to see the raw data ? please choose yes or no ').lower()
if answer == 'yes':
no_of_rows = int (input('how many row you want to see? '))
print(df.iloc[:no_of_rows-1])
else :
print("Exiting.....")
def main():
while True:
city, month, day = get_filters()
df = load_data(city, month, day)
time_stats(df)
station_stats(df)
trip_duration_stats(df)
user_stats(df)
show_row_data(df)
restart = input('\nWould you like to restart? Enter yes or no.\n')
if restart.lower() != 'yes':
break
if __name__ == "__main__":
main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import openravepy
import numpy
env= openravepy.Environment()
env.SetViewer('qtcoin')
env.Load('data2/hiro_test.env.xml')
raw_input("Press Enter to start...")
#robot= env.GetRobots()[0]
robot= env.GetRobot('HiroNX')
robot.SetActiveManipulator('rightarm')
# rightarm, rightarm_torso, leftarm, leftarm_torso, head, head_torso
manip= robot.GetActiveManipulator()
#manip= robot.GetManipulator('leftarm')
# rightarm, rightarm_torso, leftarm, leftarm_torso, head, head_torso
ikmodel = openravepy.databases.inversekinematics.InverseKinematicsModel(robot,iktype=openravepy.IkParameterization.Type.Transform6D)
if not ikmodel.load():
ikmodel.autogenerate()
#with env: # lock environment
#Tgoal = numpy.array([[0,-1,0,-0.3],[-1,0,0,0.1],[0,0,-1,0.9],[0,0,0,1]])
##Tgoal = numpy.array([[0,-1,0,-0.3],[-1,0,0,-0.05],[0,0,-1,0.9],[0,0,0,1]])
##Tgoal = numpy.array([[0,-1,0,-0.23],[-1,0,0,-0.1446],[0,0,-1,0.85],[0,0,0,1]])
#sol = manip.FindIKSolution(Tgoal, openravepy.IkFilterOptions.CheckEnvCollisions) # get collision-free solution
#with robot: # save robot state
#robot.SetDOFValues(sol,manip.GetArmIndices()) # set the current solution
#Tee = manip.GetEndEffectorTransform()
#env.UpdatePublishedBodies() # allow viewer to update new robot
#raw_input('press any key')
#print Tee
#robot.SetDOFValues([1.5,0,-1.5,0],[9,10,11,12])
manipprob = openravepy.interfaces.BaseManipulation(robot) # create the interface for basic manipulation programs
##Tgoal = numpy.array([[-1,0,0,-0.3],[0,-1,0,-0.08],[0,0,-1,0.9],[0,0,0,1]])
Tgoal = numpy.array([[0,-1,0,-0.27],[-1,0,0,-0.14],[0,0,-1,0.85],[0,0,0,1]])
#Tgoal = numpy.array([[0,-1,0,-0.23],[-1,0,0,-0.1446],[0,0,-1,0.85],[0,0,0,1]])
res = manipprob.MoveToHandPosition(matrices=[Tgoal],seedik=10) # call motion planner with goal joint angles
robot.WaitForController(0) # wait
taskprob = openravepy.interfaces.TaskManipulation(robot) # create the interface for task manipulation programs
taskprob.CloseFingers() # close fingers until collision
robot.WaitForController(0) # wait
with env:
robot.Grab(env.GetKinBody('mug4'))
goal= numpy.zeros(len(manip.GetArmIndices()))
goal[1]= -1.5
goal[2]= -0.8
manipprob.MoveManipulator(goal)
robot.SetDOFValues([0,0,0,0],[9,10,11,12])
raw_input("Press Enter to exit...")
env.Destroy()
|
from flask import app, render_template
import flask
app = flask(__name__)
@app.route('/log')
def log():
return render_template('log/log.html')
def ini_app(config):
app.config.from_object(config)
return app
|
# -*- coding: utf-8 -*-
from common import *
def visualize(board):
board = list(reversed(board))
h, w = len(board), len(board[0])
box(0, 0, h, w, edgecolor='k', color='white')
for y in range(h):
for x in range(w):
if board[y][x] == '#':
box(y, x, y+1, x+1, color="0.5", linewidth=0)
if board[y][x].isdigit():
box(y, x, y+1, x+1, color="0." + board[y][x], linewidth=0)
if board[y][x].isalpha():
box(y, x, y+1, x+1, color="0.8", linewidth=0)
hthick = y + 1 < h and board[y+1][x] != board[y][x]
if board[y][x].isalpha():
thin = "0.9"
else:
thin = "0.8"
line(y+1, x, y+1, x+1, "k" if hthick else thin,
lw=2 if hthick else 1)
vthick = x + 1 < w and board[y][x] != board[y][x+1]
line(y, x+1, y+1, x+1, "k" if vthick else thin,
lw=2 if vthick else 1)
hideticks()
show()
def example1():
clf()
visualize([
"e....",
"eabb.",
".aac.",
"..cc.",
"...dd"])
subplots_adjust(left=0.05,right=0.95)
show()
def example2():
clf()
subplot(131)
title("(a)")
visualize([
".....",
".abb.",
".aab.",
".....",
"....."])
subplot(132)
title("(b)")
visualize([
".....",
".abb.",
".acc.",
".....",
"....."])
subplot(133)
title("(c)")
visualize([
".....",
".abc.",
".abc.",
".....",
"....."])
subplots_adjust(left=0.05,right=0.95)
show()
example2()
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Instrument to report system (CPU, memory, network) and
process (CPU, memory, garbage collection) metrics. By default, the
following metrics are configured:
"system_memory": ["total", "available", "used", "free"],
"system_cpu": ["user", "system", "idle"],
"network_bytes": ["bytes_recv", "bytes_sent"],
"runtime_memory": ["rss", "vms"],
"runtime_cpu": ["user", "system"],
Usage
-----
.. code:: python
from opentelemetry import metrics
from opentelemetry.ext.system_metrics import SystemMetrics
from opentelemetry.sdk.metrics import MeterProvider,
from opentelemetry.sdk.metrics.export import ConsoleMetricsExporter
metrics.set_meter_provider(MeterProvider())
exporter = ConsoleMetricsExporter()
SystemMetrics(exporter)
# metrics are collected asynchronously
input("...")
# to configure custom metrics
configuration = {
"system_memory": ["total", "available", "used", "free", "active", "inactive", "wired"],
"system_cpu": ["user", "nice", "system", "idle"],
"network_bytes": ["bytes_recv", "bytes_sent"],
"runtime_memory": ["rss", "vms"],
"runtime_cpu": ["user", "system"],
}
SystemMetrics(exporter, config=configuration)
API
---
"""
import gc
import os
import typing
import psutil
from opentelemetry import metrics
from opentelemetry.sdk.metrics import ValueObserver
from opentelemetry.sdk.metrics.export import MetricsExporter
from opentelemetry.sdk.metrics.export.controller import PushController
class SystemMetrics:
def __init__(
self,
exporter: MetricsExporter,
interval: int = 30,
labels: typing.Optional[typing.Dict[str, str]] = None,
config: typing.Optional[typing.Dict[str, typing.List[str]]] = None,
):
self._labels = {} if labels is None else labels
self.meter = metrics.get_meter(__name__)
self.controller = PushController(
meter=self.meter, exporter=exporter, interval=interval
)
if config is None:
self._config = {
"system_memory": ["total", "available", "used", "free"],
"system_cpu": ["user", "system", "idle"],
"network_bytes": ["bytes_recv", "bytes_sent"],
"runtime_memory": ["rss", "vms"],
"runtime_cpu": ["user", "system"],
}
else:
self._config = config
self._proc = psutil.Process(os.getpid())
self._system_memory_labels = {}
self._system_cpu_labels = {}
self._network_bytes_labels = {}
self._runtime_memory_labels = {}
self._runtime_cpu_labels = {}
self._runtime_gc_labels = {}
# create the label set for each observer once
for key, value in self._labels.items():
self._system_memory_labels[key] = value
self._system_cpu_labels[key] = value
self._network_bytes_labels[key] = value
self._runtime_memory_labels[key] = value
self._runtime_gc_labels[key] = value
self.meter.register_observer(
callback=self._get_system_memory,
name="system.mem",
description="System memory",
unit="bytes",
value_type=int,
observer_type=ValueObserver,
)
self.meter.register_observer(
callback=self._get_system_cpu,
name="system.cpu",
description="System CPU",
unit="seconds",
value_type=float,
observer_type=ValueObserver,
)
self.meter.register_observer(
callback=self._get_network_bytes,
name="system.net.bytes",
description="System network bytes",
unit="bytes",
value_type=int,
observer_type=ValueObserver,
)
self.meter.register_observer(
callback=self._get_runtime_memory,
name="runtime.python.mem",
description="Runtime memory",
unit="bytes",
value_type=int,
observer_type=ValueObserver,
)
self.meter.register_observer(
callback=self._get_runtime_cpu,
name="runtime.python.cpu",
description="Runtime CPU",
unit="seconds",
value_type=float,
observer_type=ValueObserver,
)
self.meter.register_observer(
callback=self._get_runtime_gc_count,
name="runtime.python.gc.count",
description="Runtime: gc objects",
unit="objects",
value_type=int,
observer_type=ValueObserver,
)
def _get_system_memory(self, observer: metrics.ValueObserver) -> None:
"""Observer callback for memory available
Args:
observer: the observer to update
"""
system_memory = psutil.virtual_memory()
for metric in self._config["system_memory"]:
self._system_memory_labels["type"] = metric
observer.observe(
getattr(system_memory, metric), self._system_memory_labels
)
def _get_system_cpu(self, observer: metrics.ValueObserver) -> None:
"""Observer callback for system cpu
Args:
observer: the observer to update
"""
cpu_times = psutil.cpu_times()
for _type in self._config["system_cpu"]:
self._system_cpu_labels["type"] = _type
observer.observe(
getattr(cpu_times, _type), self._system_cpu_labels
)
def _get_network_bytes(self, observer: metrics.ValueObserver) -> None:
"""Observer callback for network bytes
Args:
observer: the observer to update
"""
net_io = psutil.net_io_counters()
for _type in self._config["network_bytes"]:
self._network_bytes_labels["type"] = _type
observer.observe(
getattr(net_io, _type), self._network_bytes_labels
)
def _get_runtime_memory(self, observer: metrics.ValueObserver) -> None:
"""Observer callback for runtime memory
Args:
observer: the observer to update
"""
proc_memory = self._proc.memory_info()
for _type in self._config["runtime_memory"]:
self._runtime_memory_labels["type"] = _type
observer.observe(
getattr(proc_memory, _type), self._runtime_memory_labels
)
def _get_runtime_cpu(self, observer: metrics.ValueObserver) -> None:
"""Observer callback for runtime CPU
Args:
observer: the observer to update
"""
proc_cpu = self._proc.cpu_times()
for _type in self._config["runtime_cpu"]:
self._runtime_cpu_labels["type"] = _type
observer.observe(
getattr(proc_cpu, _type), self._runtime_cpu_labels
)
def _get_runtime_gc_count(self, observer: metrics.ValueObserver) -> None:
"""Observer callback for garbage collection
Args:
observer: the observer to update
"""
gc_count = gc.get_count()
for index, count in enumerate(gc_count):
self._runtime_gc_labels["count"] = str(index)
observer.observe(count, self._runtime_gc_labels)
|
new_list = [ i ] |
import MySQLdb
def connection():
conn = MySQLdb.connect(host="localhost",
user="root",
passwd='cross1994',
db="uniplan")
c = conn.cursor()
return c, conn
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
from made_list import ListNode
class Solution:
# @param head, a ListNode
# @return a ListNode
def deleteDuplicates(self, head):
if head == None:
return head
cur = head
while cur.next != None:
if cur.val == cur.next.val:
cur.next = cur.next.next
else:
cur = cur.next
return head
a = Solution()
l = ListNode('1->1->2')
print a.deleteDuplicates(l) |
def check_if_alnum(s):
for char in s:
if char.isalnum():
return True
return False
def check_if_alpha(s):
for char in s:
if char.isalpha():
return True
return False
def check_if_digit(s):
for char in s:
if char.isdigit():
return True
return False
def check_if_lower(s):
for char in s:
if char.islower():
return True
return False
def check_if_upper(s):
for char in s:
if char.isupper():
return True
return False
if __name__ == '__main__':
s = input()
print(check_if_alnum(s))
print(check_if_alpha(s))
print(check_if_digit(s))
print(check_if_lower(s))
print(check_if_upper(s))
|
from django.db import models
class Alumno(models.Model):
nombre = models.CharField(max_length=200)
def __str__(self):
return self.nombre
class Clase(models.Model):
numero = models.IntegerField(default=0,unique=True)
def __str__(self):
return str(self.numero)
class Asistencia(models.Model):
alumno = models.ForeignKey(Alumno, on_delete=models.CASCADE)
clase = models.ForeignKey(Clase, on_delete=models.CASCADE)
asistio = models.IntegerField(default=0)
class Meta:
unique_together = (('alumno', 'clase'),)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @author: yuanzi
def main():
lst = ['大白菜','花菜','空心菜','生姜','小龙虾']
for lst_item in lst:
print '老妈看到了 %s ' % (lst_item)
print '~~~~~~~~~~~~~~~'
def main2():
my_food_list = ['牛奶','饼干','薯片','海苔','虾条','可乐']
print '我要买', len(my_food_list), '种零食,它们是'
for item in my_food_list:
print '%s' % (item)
if __name__ == '__main__':
main()
main2()
#list 是一个数据结构,把所有的项目都有序的放在一个列表里,就像一个清单一样
#for ... in 循环在列表中各项目间递归,把list里的项目一个个列出来
#递归就是程序调用自身的编程技巧 |
#!/usr/bin/env python3
from zlib import adler32
def deterministic_choice(sequence, choice_seed):
return sequence[adler32(bytes(choice_seed, 'utf8')) % len(sequence)]
def deterministic_triple(sequence, choice_seed):
result = []
nonces = ['a', 'b','c','d','e','f','g','h']
nonceIndex = 0
while len(result) < min(len(sequence), 3):
choice = deterministic_choice(sequence, choice_seed+nonces[nonceIndex])
if choice not in result:
result.append(choice)
nonceIndex=nonceIndex+1
return result
JINJA_FILTERS = { \
'pick_reccomendations': lambda array, article: deterministic_triple([i for i in array if i.title != article], article.title),\
'pick_subtitle': deterministic_choice\
}
AUTHOR = 'capu'
SITENAME = "capu's blog"
SITEURL = 'http://127.0.0.1:8000'
SITESUBTITLE = ['I\'m probably over-engineering this',\
'You can\'t downvote me here',\
'También en castellano, wachin',\
'No me \'aggiorno\' un carajo',\
'Tope (inferior) de gama',\
'Brazing fast',\
'El ingerdiente secreto es el desempleo',\
'Secret ingredient: unemployment',\
'Me gussssstan las herramientas',\
'Come surf Dunning-Kruger\'s crest with me',\
'World\'s Okayest Programmer',\
'World\'s Okayest Bike Mechanic',\
'We have nothing to lose but our OSDE 210',\
'Alta paja agregar comentarios, mandame un mail',\
'Cookie free! NGINX logs your IP, tho',
'Looks just as good in w3m',\
'Software is evil unless you can fork it',\
'Content Warning: unsufferable hipsters',\
'Hosted on the Other People\'s Computers ☁',\
'didn\'t pump yet! you\'re still early!',\
'haschash maximalist',\
'non-giver of ether',\
'Hack the planet! (it\'s a reference)',\
'No backups. Can\'t restore. Don\'t want to either.']
PATH = 'content'
TIMEZONE = 'America/Argentina/Buenos_Aires'
DEFAULT_LANG = 'en'
FEED_ALL_RSS = 'feeds/all.rss.xml'
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Blogroll
SOCIAL = ( ('github', 'http://github.com/juanpcapurro'),
('hire me', 'http://hire.capu.tech/') )
# Social widget
LINKS = (('website home', 'http://capu.tech'), ('hire me', 'http://hire.capu.tech/'))
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
THEME = 'themes/capu'
PLUGIN_PATHS = ['plugins', 'plugins/pelican-plugins']
PLUGINS = ['pelican-global-rst-include','video', 'plantuml', 'readtime']
# idk, this seems to be relative to the context directory
RST_GLOBAL_INCLUDES =['../globals/globals.rst']
STATIC_PATHS = ['static']
EXTRA_PATH_METADATA = {'static/favicon.ico': {'path': 'favicon.ico'},}
|
# coding=utf-8
'''
@author: 黄鑫晨
'''
import json
import tornado
from sqlalchemy import desc
from tornado import gen
from tornado.concurrent import Future
from tornado.web import asynchronous
from Appointment.APgroupHandler import APgroupHandler
from BaseHandlerh import BaseHandler
from Database.tables import Appointment, User, UserCollection, UserLike
from Userinfo import Usermodel
from Userinfo.Ufuncs import Ufuncs
from Userinfo.UserImgHandler import UserImgHandler
from Userinfo.Usermodel import Model_daohanglan
class LoginHandler(BaseHandler):
retjson = {'code': '', 'contents': u'未处理 '}
@asynchronous
@gen.coroutine
def post(self):
askcode = self.get_argument('askCode') # 请求码
future =Future()
if askcode == '10106': # 手动登录
m_phone = self.get_argument('phone')
m_password = self.get_argument('password')
if not m_phone or not m_password:
self.retjson['code'] = 400
self.retjson['contents'] = 10105 # '用户名密码不能为空'
# todo:登录返回json的retdata多一层[],客户端多0.5秒处理时间
# 防止重复注册
else:
try:
user = self.db.query(User).filter(User.Utel == m_phone).one()
if user: # 用户存在
print '111'
password = user.Upassword
if m_password == password: # 密码正确
print '222'
self.get_new_login_model(user)
print '333'
else:
self.retjson['contents'] = u'密码错误'
self.retjson['code'] = '10114' # 密码错误
else: # 用户不存在
print '444'
self.retjson['contents'] = u'该用户不存在'
self.retjson['code'] = '10113'
except Exception, e: # 还没有注册
print "异常:"
print e
print '555'
self.retjson['contents'] = u'该用户名不存在'
self.retjson['code'] = '10113' # '该用户名不存在'
elif askcode == '10105': # 自动登录
auth_key = self.get_argument("authkey") # 授权码
uid = self.get_argument('uid')
try:
user = self.db.query(User).filter(User.Uid == uid).one()
u_auth_key = user.Uauthkey
if auth_key == u_auth_key:
self.retjson['code'] = '10111'
self.get_new_login_model(user)
else:
self.retjson['code'] = '10116'
self.retjson['contents'] = u'授权码不正确或已过期'
except Exception, e:
print e
self.retjson['code'] = '10113'
self.retjson['contents'] = u'该用户名不存在'
else:
self.retjson['contents'] = u"登录类型不满足要求,请重新登录!"
self.retjson['data'] = u"登录类型不满足要求,请重新登录!"
self.write(json.dumps(self.retjson, ensure_ascii=False, indent=2))
self.finish()
#@asynchronous
#@gen.coroutine
def bannerinit(self):
from FileHandler.Upload import AuthKeyHandler
bannertokens = []
authkeyhandler = AuthKeyHandler()
banner1 = authkeyhandler.download_url("banner/banner1.jpg")
banner2 = authkeyhandler.download_url("banner/banner2.jpg")
banner3 = authkeyhandler.download_url("banner/banner3.jpg")
banner4 = authkeyhandler.download_url("banner/banner4.jpg")
banner_json1 = {'imgurl': banner1, 'weburl': "http://www.shacus.cn/"}
banner_json2 = {'imgurl': banner2, 'weburl': "http://www.shacus.cn/"}
banner_json3 = {'imgurl': banner3, 'weburl': "http://www.shacus.cn/"}
banner_json4 = {'imgurl': banner4, 'weburl': "http://www.shacus.cn/"}
bannertokens.append(banner_json1)
bannertokens.append(banner_json2)
bannertokens.append(banner_json3)
bannertokens.append(banner_json4)
return bannertokens
@asynchronous
@gen.coroutine
def get_login_model(self, user):
retdata = []
user_model = Usermodel.get_user_detail_from_user(user) # 用户模型
photo_list = [] # 摄影师发布的约拍
model_list = []
try:
photo_list_all = self.db.query(Appointment).filter(Appointment.APtype == 1,
Appointment.APvalid == 1). \
order_by(desc(Appointment.APcreateT)).limit(6).all()
model_list_all = self.db.query(Appointment).filter(Appointment.APtype == 0,
Appointment.APvalid == 1). \
order_by(desc(Appointment.APcreateT)).limit(6).all()
from Appointment.APmodel import APmodelHandler
ap_model_handler = APmodelHandler() # 创建对象
ap_model_handler.ap_Model_simply(photo_list_all, photo_list, user.Uid)
ap_model_handler.ap_Model_simply(model_list_all, model_list, user.Uid)
# 约拍类型和id
data = dict(
userModel=user_model,
daohanglan=self.bannerinit(),
photoList=photo_list,
modelList=model_list,
groupList=APgroupHandler.Group(),
)
retdata.append(data)
self.retjson['code'] = '10111'
self.retjson['contents'] = retdata
except Exception, e:
print e
self.retjson['contents'] = r"摄影师约拍列表导入失败!"
def get_new_login_model(self, user):
models = []
retdata = []
imghandler = UserImgHandler()
user_model = Usermodel.get_user_detail_from_user(user) # 用户模型
try:
my_likes = self.db.query(UserLike).filter(UserLike.ULlikeid == user.Uid, UserLike.ULvalid == 1).all()
for like in my_likes:
pic = self.db.query(UserCollection).filter(UserCollection.UCuser == like.ULlikedid,
UserCollection.UCvalid == 1).all()
for item in pic:
retdata.append(imghandler.UC_login_model(item, item.UCuser, user.Uid))
# 推荐作品集
# 约拍类型和id
data = dict(
userModel=user_model,
daohanglan=self.bannerinit(),
CollectionList=retdata, # 好友作品集
RecList=[], # 推荐作品集
groupList=APgroupHandler.Group(),
)
models.append(data)
self.retjson['code'] = '10111'
self.retjson['contents'] = models
except Exception, e:
print e
self.retjson['contents'] = r"摄影师约拍列表导入失败!" |
w, h = map(int, input().split())
x, y = map(int, input().split())
t = int(input())
dis_x = t - (t // (w*2))*(w*2)
dis_y = t - (t // (h*2))*(h*2)
if w - x-dis_x >=0:
x += dis_x
else:
x = 2*w -x-dis_x
if h - y-dis_y >=0:
y += dis_y
else:
y = 2*h -y - dis_y
print("%d %d" % (abs(x), abs(y))) |
#!/usr/bin/env python
# -*-coding:utf-8 -*-
# author:罗徐 time:2019/5/17
import cv2 as cv
import numpy as np
#实现控制图像的对比度与亮度
def contrast_brightness_demo(image,c,b):
#c代表亮度,b代表对比度
h,w,ch=image.shape
blank=np.zeros([h,w,ch],image.dtype)
dst=cv.addWeighted(image,c,blank,1-c,b)
cv.imshow("con-bri-demo",dst)
src=cv.imread("C:/Users/17913/python+opencv/tangsan.jpg")
cv.namedWindow("input image",cv.WINDOW_AUTOSIZE)
cv.imshow("input image",src)
contrast_brightness_demo(src,1,20)
#后面的两个数字参数一个是对比度,一个是亮度
cv.waitKey(0)
cv.destroyAllWindows()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-03-24 12:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('chemhunt', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='player',
name='answers_given',
field=models.CharField(default='000000000000000000000000000000000000', max_length=100),
),
]
|
from myhdl import *
def toTwosComplement(binarySequence):
convertedSequence = [0] * len(binarySequence)
carryBit = 1
# INVERT THE BITS
for i in range(0, len(binarySequence)):
if binarySequence[i] == '0':
convertedSequence[i] = 1
else:
convertedSequence[i] = 0
# ADD BINARY DIGIT 1
if convertedSequence[-1] == 0: #if last digit is 0, just add the 1 then there's no carry bit so return
convertedSequence[-1] = 1
return ''.join(str(x) for x in convertedSequence)
for bit in range(0, len(binarySequence)):
if carryBit == 0:
break
index = len(binarySequence) - bit - 1
if convertedSequence[index] == 1:
convertedSequence[index] = 0
carryBit = 1
else:
convertedSequence[index] = 1
carryBit = 0
return ''.join(str(x) for x in convertedSequence)
x4 = fixbv(-351.750000)
print repr(x4)
z = bin(x4)
print len(z)
sign_bit_x4 = x4 >> len(z)
print sign_bit_x4
print ' origbinary ',z
y = toTwosComplement( z)
print 'TwosComplement ',y
print x4._W._fwl
print x4._W._iwl
x5 = fixbv(-356.375000)
print repr(x5)
z = bin(x5)
print len(z)
sign_bit_x5 = x5 >> len(z)
print sign_bit_x5
print ' origbinary ',z
y = toTwosComplement( z)
print 'TwosComplement ',y
print x5._W._fwl
print x5._W._iwl
#print xx + x5
|
from django.apps import AppConfig
class DterpAppConfig(AppConfig):
name = 'dterp_app'
|
__author__ = "Pruthvi Kumar, pruthvikumar.123@gmail.com"
__copyright__ = "Copyright (C) 2018 Pruthvi Kumar | http://www.apricity.co.in"
__license__ = "Public Domain"
__version__ = "1.0"
import redis
from configuration import ProtonConfig
from nucleus.generics.logUtilities import LogUtilities
class CacheManager(ProtonConfig, LogUtilities):
def __init__(self):
super(CacheManager, self).__init__()
self.__redisConfig = {
'host': 'localhost',
'port': 6379,
'db': 0
}
self.logger = self.getLogger(logFileName='cacheManager_logs',
logFilePath='{}/trace/cacheManager_logs.log'.format(self.ROOT_DIR))
self.cacheProcessor = self.__processor
def __processor(self):
"""
Closure for CacheManager
:return: A dictionary of all methods processed by CacheManager.
"""
def instantiateCache():
try:
redisInstance = redis.StrictRedis(host=self.__redisConfig['host'], port=self.__redisConfig['port'],
db=self.__redisConfig['db'])
self.logger.info('Successfully instantiated cache!')
return redisInstance
except Exception as e:
self.logger.exception('Exception while instantiating cache. Details: {}'.format(str(e)))
raise str(e)
def setToCache(redisInstance, key, value):
try:
redisInstance.set(key, value)
self.logger.info('Cache set for key: {}'.format(key))
except Exception as e:
self.logger.exception('Exception while setting value to cache. Details: {}'.format(str(e)))
raise str(e)
def getFromCache(redisInstance, key):
try:
dataFromCache = redisInstance.get(key)
self.logger.info('Data from cache successful for key: {}'.format(key))
return dataFromCache
except Exception as e:
self.logger.exception('Data from cache for key: {} is unsuccessful. Details: {}'.format(key, str(e)))
raise str(e)
def pingCache(redisInstance):
try:
redisInstance.ping()
self.logger.info('Redis instance is available!')
return True
except Exception as e:
self.logger.exception('Redis instance is unavailable on ping!. Details : {}'.format(str(e)))
return False
def deleteFromCache(redisInstance, key):
try:
redisInstance.delete(key)
self.logger.info('{} deleted from Redis cache!'.format(key))
return True
except Exception as e:
self.logger.exception(('Redis instance is unavailable to delete key: {}. '
'Details: {}'.format(key, str(e))))
return False
return {
'initCache': instantiateCache,
'setToCache': setToCache,
'getFromCache': getFromCache,
'pingCache': pingCache,
'deleteFromCache': deleteFromCache
}
|
# 2.2 更新
# 1. 更改了策略的函数名,更加直观
# 2. 将注释从英文改为中文
# 3.
# 加载所需模块
import pandas as pd # 数据处理
import datetime as dt # 日期
from WindPy import * # 万德数据API
from WindAlgo import * # 回测
from WindCharts import * # 绘图
w.start(show_welcome = False) # 启动万德
# 反转&小市值交易策略。
today = dt.datetime.now().strftime("%Y-%m-%d") # 今天的日期
# 今天的A股股票列表。
securities = w.wset(
"sectorconstituent",
"date="+today+";sectorId=a001010100000000;field=wind_code,sec_name"
).Data[0]
def initialize(context):
context.capital = 1e9 # 10亿元人民币作为初始资金
context.securities = securities # A股股票列表作为证券池
context.start_date = "20090101" # 回测在2009年01月01日开始
context.end_date = "20171231" # 回测在2017年12月31日结束
context.period = "d" # 回测频率为一天
context.benchmark = "000300.SH" # 沪深300作为基准
context.fields = "sec_name,windcode,pct_chg_per,mkt_cap_ashare2"
'''
获取指标列表:
1. 证券名称
2. 万德代码
3. 涨跌幅
4. 市值(A股市值,含限售股)
'''
def handle_data(bar_datetime, context, bar_data):
pass
def strategy(bar_datetime, context, bar_data):
trade_date = bar_datetime.strftime("%Y-%m-%d") # 在回测的“今天”开始交易
# 排序
start_rank_date = w.tdaysoffset(
-3, # 在交易日的三个月之前开始排序
trade_date,
"Period=M;Days=Alldays"
).Data[0][0].strftime("%Y-%m-%d")
end_rank_date = w.tdaysoffset(
-1, # end rank 1 day ago
trade_date,
"Period=D;Days=Alldays"
).Data[0][0].strftime("%Y-%m-%d")
# Data contains percentage change and market capital.
data = w.wss(
context.securities,
context.fields,
"startDate="+start_rank_date+";endDate="+end_rank_date+";trade_date="+trade_date+"",
usedf = True # generate pandas dataframe directly
)[1]
data.dropna(inplace = True) # drop data not available
data.sort_values("MKT_CAP_ASHARE2", inplace = True) # sort by market capital to select small stocks
small_stock_list = list(data[ : round(len(data)/10)].index) # select the 10% smallest stocks in the past 3 months
data.sort_values("PCT_CHG_PER", inplace = True) # sort by percentage change to select loser stocks
loser_stock_list = list(data[ : round(len(data)/10)].index) # select the 10% worst performing stocks in the past 3 months
target_list = [x for x in small_stock_list if x in loser_stock_list] # select the intersection of 10% smallest and 10% worst as our target.
current_stock_list = wa.query_position().get_field("code") # get the stocks list I currently hold
buy_list = [x for x in target_list if x not in current_stock_list] # buy stocks I previously don't have but is my target
wa.change_securities(buy_list) # change context.securities, but why won't it affect next loop?
sell_list = [x for x in current_stock_list if x not in target_list] # sell stocks i previously have but is not my target
continue_holding_list = [x for x in target_list if x in current_stock_list] #
for code in sell_list:
volume = wa.query_position()[code]['volume'] # query how much position is in my portfolio
res = wa.order(code, volume, "sell", price = "close", volume_check = False) # sell stocks
for code in buy_list:
res = wa.order_percent(code, 1/len(buy_list), 'buy', price = "close", volume_check = False) # buy stocks equally weighted
for code in continue_holding_list:
res = wa.order_target_percent(code, 1/len(continue_holding_list), 'buy', price = "close", volume_check = False) # incremental switching position
wa = BackTest(init_func = initialize, handle_data_func = handle_data)
wa.schedule(CTR_SCPT, "m", 0) # execute strategy on the first trading day each month
res = wa.run(show_progress = True)
def windframe_to_dataframe(windframe):
df = pd.DataFrame()
column_list = windframe.fields
for column in column_list:
df[column] = wind_frame.get_field(column)
return df
# Backtest summary.
result = windframe_to_dataframe(wa.summary("result"))
nav = windframe_to_dataframe(wa.summary("nav"))
trade = windframe_to_dataframe(wa.summary("trade"))
position = windframe_to_dataframe(wa.summary("position"))
monthly_profit = windframe_to_dataframe(wa.summary("monthly_profit"))
position_rate = windframe_to_dataframe(wa.summary("position_rate"))
stat_month = windframe_to_dataframe(wa.summary("stat_month"))
stat_quarter = windframe_to_dataframe(wa.summary("stat_quarter"))
stat_year = windframe_to_dataframe(wa.summary("stat_year")) |
import os
import discord
import dotenv
dotenv.load_dotenv()
class Client(discord.Client):
def __init__(self):
super().__init__()
async def on_ready(self):
print(self.user.name)
print(self.user.id, "\n")
async def on_voice_state_update(self, member, before, after):
channel = await self.fetch_channel(os.getenv("CHANNEL_ID"))
if before.channel is None:
event_name = "参加"
elif after.channel is None:
event_name = "退出"
member_name = member.nick if member.nick is not None else member.name
message = f"{event_name}: {member_name}"
await channel.send(message)
def main():
client = Client()
client.run(os.getenv("BOT_TOKEN"))
if __name__ == "__main__":
main()
|
"""Classes for creating basic modal dialogs
Such as YES-NO, Question, Information dialogs etc.
"""
from julesTk import view, controller
from julesTk.view.window import Window
__author__ = "Joeri Jongbloets <joeri@jongbloets.net>"
class ModalWindow(Window):
"""A window taking all focus and blocking interaction with other windows"""
STATE_BLOCKED = 4
def __init__(self, parent, ctrl):
super(ModalWindow, self).__init__(parent, ctrl)
self.application.register_hook("APP_CLOSE", self.hide)
def _prepare(self):
raise NotImplementedError
def _show(self):
super(ModalWindow, self)._show()
self.transient(self.parent)
self.grab_set()
self._block()
def block(self):
self._view_state = self.STATE_BLOCKED
return self._block()
def _block(self):
self.update()
self.root.wait_window(self)
def _hide(self):
return False
def _close(self):
self.application.remove_hook("APP_CLOSE", self.hide)
super(ModalWindow, self)._close()
def is_blocked(self):
return self._view_state == self.STATE_BLOCKED
class Dialog(ModalWindow):
"""Basic Dialog Window"""
def __init__(self, parent, ctrl):
super(Dialog, self).__init__(parent, ctrl)
self._response = None
# self._prepare()
@property
def response(self):
"""Returns the input of the user given in the ModalWindow
Developers can use this communicate the input of the window to the controller
"""
return self._response
def _prepare(self):
self.grid()
self.configure_column(self, 0)
self.configure_row(self, [0, 1, 2])
# header
fmh = self.add_widget(
"header", view.ttk.Frame(self)
)
self.header(fmh)
self.configure_grid(fmh, row=0, column=0)
# body
fmb = self.add_widget(
"body", view.ttk.Frame(self)
)
self.body(fmb)
self.configure_grid(fmb, row=1, column=0)
# footer
fmf = self.add_widget(
"footer", view.ttk.Frame(self)
)
self.footer(fmf)
self.configure_grid(fmf, row=2, column=0)
def header(self, parent):
"""Header of the dialog"""
return True # override
def body(self, parent):
"""Build the body of the dialog, parent refers to parent frame"""
return True # override
def footer(self, parent):
"""Build the buttons of the dialog, parent refers to parent frame"""
return True # override
def validate(self):
return True # override
def start(self):
return self.show()
def stop(self):
return self.close()
class SimpleDialog(Dialog):
def __init__(self, parent, ctrl, buttons=None):
super(SimpleDialog, self).__init__(parent, ctrl)
self._message = view.tk.StringVar("")
if buttons is None:
buttons = []
if len(buttons) == 0:
buttons = [{"id": "ok", "caption": "Ok", "value": True}]
self._buttons = buttons
@property
def message(self):
return self._message.get()
@message.setter
def message(self, v):
self._message.set(v)
def body(self, parent):
lbm = view.ttk.Label(parent, textvariable=self._message)
lbm.pack(side=view.tk.TOP, fill=view.tk.BOTH, expand=1)
def footer(self, parent):
idx = 0
for button in self._buttons:
# get button id
name = button.get("id", None)
if name is None:
name = idx
idx += 1
# get caption
caption = button.get("caption", name)
# get return value
value = button.get("value", name)
# check if set to default
is_default = button.get("default", False)
if is_default:
self._response = value
# add button
btn = self.make_button(parent, name, caption, value, is_default)
btn.pack(side=view.tk.LEFT, padx=5)
def make_button(self, parent, name, caption, value, is_default=False):
"""Creates a button"""
default = view.tk.ACTIVE if is_default else view.tk.NORMAL
btn = view.ttk.Button(
parent, text=caption, default=default,
command=lambda i=value: self.process_click(i)
)
# register button in registry
self.add_widget(name, btn)
return btn
def process_click(self, value):
pass # overload
class MessageBox(SimpleDialog):
def __init__(self, parent, ctrl, buttons=None):
""" Initialize a MessageBox
:param parent: Reference to parent view
:type parent: julesTk.view.BaseView
:param ctrl: Reference to controller class
:type ctrl: julesTk.controller.BaseController
:param buttons: List of button definitions.
A button definition is dictionary with the keys: id, caption, value
:type buttons: list[dict[str, str | int | float]]
"""
super(MessageBox, self).__init__(parent, ctrl, buttons=buttons)
@classmethod
def alert(cls, parent, title, message, buttons=None):
"""Show an alert"""
if not isinstance(parent, (view.tk.Tk, view.tk.Frame, view.BaseView)):
raise ValueError("Expected a controller not a {}".format(type(parent)))
mb = cls(parent, None, buttons=buttons)
mb.title = title
mb.message = message
mb.show()
return mb.response
def process_click(self, value):
self._response = value
self.close()
class QuestionBox(Dialog):
def __init__(self, parent, ctrl):
super(QuestionBox, self).__init__(parent, ctrl)
self._question = view.tk.StringVar(self)
self._answer = view.tk.StringVar(self)
self._error = view.tk.StringVar(self)
@classmethod
def ask(cls, parent, question, default=None):
if not isinstance(parent, (view.tk.Tk, view.tk.Frame, view.BaseView)):
raise ValueError("Expected a view not a {}".format(type(parent)))
qb = cls(parent, None)
qb.question = question
qb._response = default
qb.answer = default
qb.show()
return qb.response
@property
def question(self):
return self._question.get()
@question.setter
def question(self, value):
self._question.set(value)
@property
def answer(self):
return self._answer.get()
@answer.setter
def answer(self, value):
value = "" if value is None else value
self._answer.set(value)
@property
def error(self):
return self._error
@error.setter
def error(self, text):
self._error.set(text)
def show_validation_msg(self):
lbv = self.get_widget("validate")
lbv.grid()
def hide_validation_msg(self):
lbv = self.get_widget("validate")
lbv.grid_remove()
def header(self, parent):
# add question
lbq = view.ttk.Label(parent, textvariable=self._question)
self.configure_grid(
lbq, padx=10, pady=5
)
def body(self, parent):
# add answer
ena = view.ttk.Entry(
parent, textvariable=self._answer
)
self.configure_grid(
ena, padx=15, pady=5
)
# add validation
view.ttk.Style().configure(
"Error.TLabel", foreground="red"
)
lbv = view.ttk.Label(
parent, textvariable=self._error, style="Error.TLabel"
)
self.add_widget("validate", lbv)
self.configure_grid(
lbv, row=1, padx=20
)
self.hide_validation_msg()
def footer(self, parent):
self.configure_column(parent, [0, 1])
# add cancel
view.ttk.Button(
parent, text="Cancel", command=self.cancel
).pack(side=view.tk.LEFT)
self.bind("<Escape>", lambda x: self.cancel())
# add ok
view.ttk.Button(
parent, text="Ok", command=self.ok
).pack(side=view.tk.LEFT)
self.bind("<Return>", lambda x: self.ok())
def validate(self):
response = self._answer.get()
result = response not in (None, "")
if not result:
self.error = "Please provide an answer"
self.show_validation_msg()
return result
def cancel(self):
self.close()
def ok(self):
if self.validate():
self._response = self._answer.get()
self.close()
|
satu = 1;
dua = 2;
tiga = 3;
empat = 4;
lima = 5;
enam = 6;
tujuh = 7;
delapan = 8;
sembilan = 9;
loop = 1
nama = "Nama : M Firman Kahfi"
npm = "NPM : 1144015"
kelas = "Kelas : D4TI3B"
import time
start_time = time.time()
while loop == 1:
print "PROGRAM OPERASI ARITMATIKA LEBIH DARI 1 OPERATOR "
print (nama)
print (npm)
print (kelas)
print
print ("Example - (satu + lima) x (Dua / Satu)")
print "Masukan operan dalam format kata (angka) seperti inputan di atas"
pilih = 1;
if pilih == 1:
operan_a = input("Input :")
operan_b = input("+ ")
operan_c = input("x ")
operan_d = input("/ ")
hasil = (operan_a + operan_b) * (operan_c / operan_d)
totalTime = format((time.time() - start_time), '.5f')
print "hasil dari", "(",operan_a, "+", operan_b,")", "*", "(",operan_c, "/", operan_d,")", "adalah", hasil
loop = 0;
|
from io import BytesIO
import base64
from PIL import Image
from python_helper import Constant as c
from python_framework import Service, ServiceMethod
from dto import QRCodeDto
@Service()
class ImageService :
@ServiceMethod(requestClass=[str, str])
def save(self, imageAsBase64, pathWithNameAndExtension) :
image = Image.open(BytesIO(self.helper.base64.decode(imageAsBase64)))
image.save(pathWithNameAndExtension)
return image
|
from flask import Blueprint, render_template, url_for, redirect
from flask_login import login_user, logout_user, login_required, current_user
from app.form import SearchForm, LoginForm, RegisterForm, WatchForm, UnWatchForm, DelPackageForm
from app.model import Express, Package, User
from config import INTERNAL_CODE
route = Blueprint("view", __name__)
@route.route("/", methods=["GET", "POST"])
def index():
form = SearchForm()
form.express_code.choices = [(e.code, e.name) for e in Express.query.all()]
if form.validate_on_submit():
if current_user and current_user.is_authenticated:
pkg = Package.get_package(current_user.user_id, form.express_code.data, form.package_number.data)
else:
pkg = None
form.errors['package_number'] = ['查询前请先登录!']
if pkg:
return redirect(url_for("view.package_info", package_id=pkg.package_id))
else:
if not form.errors:
form.errors['package_number'] = ['未找到相关信息, 请核实单号和物流公司']
return render_template("index.html", form=form)
@route.route("/package/<package_id>", methods=['GET', 'POST'])
@login_required
def package_info(package_id):
pkg = Package.get_package_by_id(package_id)
if not pkg:
return redirect(url_for("view.index"))
if pkg.user_id != current_user.user_id:
return redirect(url_for("view.user_package"))
express = Express.query.filter_by(express_id=pkg.express_id).first()
watch_form = WatchForm()
unwatch_form = UnWatchForm()
if watch_form.validate_on_submit() and pkg.package_id == watch_form.watch_package_id.data:
pkg.watching(watch_form.watch_nicename.data)
return redirect(url_for("view.package_info", package_id=pkg.package_id))
if unwatch_form.validate_on_submit() and pkg.package_id == unwatch_form.unwatch_package_id.data:
pkg.unwatching()
return redirect(url_for("view.package_info", package_id=pkg.package_id))
return render_template("package.html", package=pkg, express=express, watch_form=watch_form,
unwatch_form=unwatch_form)
@route.route("/user/package", methods=['GET', 'POST'])
@login_required
def user_package():
packages = Package.query.filter_by(user_id=current_user.user_id).all()
delete_form = DelPackageForm()
if delete_form.validate_on_submit():
pkg = Package.query.filter_by(package_id=delete_form.delete_package_id.data).first()
if pkg and pkg.user_id == current_user.user_id:
pkg.delete()
return redirect(url_for("view.user_package"))
return render_template("user/packages.html", packages=packages, delete_form=delete_form)
@route.route("/user/watching", methods=['GET', 'POST'])
@login_required
def user_watching():
packages = Package.get_watching_package_by_user_id(current_user.user_id)
unwatch_form = UnWatchForm()
if unwatch_form.validate_on_submit():
pkg = Package.query.filter_by(package_id=unwatch_form.unwatch_package_id.data).first()
if pkg and pkg.user_id == current_user.user_id:
pkg.unwatching()
return redirect(url_for("view.user_watching"))
return render_template("user/watching.html", packages=packages, unwatch_form=unwatch_form)
@route.route("/user/open-api")
@login_required
def user_token():
token = current_user.get_token()
return render_template("user/token.html", token=token)
@route.route("/login", methods=["GET", "POST"])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user and user.check_password(form.password.data):
login_user(user, remember=form.remember.data)
return redirect(url_for("view.user_package"))
form.errors['username'] = ['用户名或密码错误']
return render_template("login.html", form=form)
@route.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for("view.index"))
@route.route("/register", methods=['GET', 'POST'])
def register():
form = RegisterForm()
if form.validate_on_submit():
if form.internal_code.data != INTERNAL_CODE:
form.errors['internal_code'] = ["内测码错误, 请联系作者。"]
if not form.errors:
user = User.query.filter_by(username=form.username.data).first()
if user:
form.errors['username'] = ['用户名 {} 已被占用'.format(form.username.data)]
else:
user = User.query.filter_by(email=form.email.data).first()
if user:
form.errors['email'] = ['邮箱 {} 已被占用'.format(form.email.data)]
if not form.errors:
user = User(form.username.data, form.email.data, form.password.data)
if user:
return redirect(url_for("view.login"))
return render_template("register.html", form=form)
@route.route("/about")
def about():
return render_template("about.html")
|
import random
hand_list = {1:"グー",2:"チョキ",3:"パー"}
def janken():
g,t,p = 0,0,0
rand_num = random.randint(0,90)
while(1):
hand = int(input("1:グー 2:チョキ 3:パー : "))
if hand == 1 or hand == 2 or hand == 3:
break
print()
print("1,2,3のどれかを入力してください")
print("ポン!")
print()
print("あなた:" + hand_list[hand])
if rand_num <= 30:
g = 1
print("相手:グー")
elif rand_num >30 and rand_num <=60:
t = 1
print("相手:チョキ")
else:
p = 1
print("相手:パー")
i_and_you = [hand,g,t,p]
return i_and_you
print("じゃんけんスタート!")
print()
print("じゃんけん!")
while(1):
hand = janken()
print("-----------------------------------------------")
if (hand[0] == 1 and hand[2] == 1) or (hand[0] == 2 and hand[3] == 1) or (hand[0] == 3 and hand[1] ==1):
print("あなたの勝ち")
break
elif (hand[0] == 1 and hand[3] == 1) or (hand[0] == 2 and hand[1] == 1) or (hand[0] == 3 and hand[2] ==1):
print("あなたの負け")
break
else:
print("あいこで!")
#janken() |
import random
a = [random.randrange(1, 6) for i in range(10)]
print("Массив:")
print(' '.join([str(i) for i in a]))
print(f"Элементов, которые равны 3: {len([i for i in a if i == 3])}")
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 16 10:26:16 2017
@author: Peter Wilson
"""
import numpy as np
import sympy as sp
print("Sympy version: ",sp.__version__)
x = sp.Symbol('x')
y = sp.Symbol('y')
xi = sp.Symbol('xi')
eta = sp.Symbol('eta')
loc1 = sp.Symbol('loc1')
loc2 = sp.Symbol('loc2')
alpha = sp.Symbol('alpha')
beta = sp.Symbol('beta')
a1 = sp.Symbol('a1')
a2 = sp.Symbol('a2')
a3 = sp.Symbol('a3')
a4 = sp.Symbol('a4')
a5 = sp.Symbol('a5')
a6 = sp.Symbol('a6')
a7 = sp.Symbol('a7')
a8 = sp.Symbol('a8')
a9 = sp.Symbol('a9')
Bub = sp.Symbol('Bub')
W1 = sp.Symbol('W1')
W2 = sp.Symbol('W2')
W3 = sp.Symbol('W3')
PX1 = sp.Symbol('PX1')
PX2 = sp.Symbol('PX2')
PX3 = sp.Symbol('PX3')
PY1 = sp.Symbol('PY1')
PY2 = sp.Symbol('PY2')
PY3 = sp.Symbol('PY3')
# THESE ARE KRATOS NODAL ROTATIONS - DEFINED
RX1 = sp.Symbol('RX1')
RX2 = sp.Symbol('RX2')
RX3 = sp.Symbol('RX3')
RY1 = sp.Symbol('RY1')
RY2 = sp.Symbol('RY2')
RY3 = sp.Symbol('RY3')
a = sp.Symbol('a') #x2 - x1
b = sp.Symbol('b') #y2 - y1
c = sp.Symbol('c') #y3 - y1
d = sp.Symbol('d') #x3 - x1
detJ = sp.Symbol('detJ')
#Nodal coords
x1 = sp.Symbol('x1')
y1 = sp.Symbol('y1')
x2 = sp.Symbol('x2')
y2 = sp.Symbol('y2')
x3 = sp.Symbol('x3')
y3 = sp.Symbol('y3')
#Node coordinates set to example values
x1 = 0.0
y1 = 0.0
x2 = 1.0
y2 = 0.0
x3 = 0.0
y3 = 1.0
#Shape functions
N1 = (1.0-xi-eta)
N2 = xi
N3 = eta
# Section 1 ---------------------------------------------------
PHI = a1 + a2*x +a3*y + a4*x**2 + 0.5*(a5+a6)*x*y + a7*y**2
#GAM = a8*x + a9*y - Bub*x*y #generic bubble mode
GAM = a8*x + a9*y - (a8+a9)*x*y #generic bubble mode
#GAM = a8*x + a9*y #no bubble
PD = 0.5*(a5-a6)*x*y
C = a5 - a6
# Section 2 ---------------------------------------------------
is_normal_DSG = False
PX = PHI + PD
PY = PHI - PD
if(is_normal_DSG):
print("\nUsing basic DSG formulation!")
GX = a8*x + a9*y #normal DSG
GY = a8*x + a9*y #normal DSG
else:
GX = GAM + PD
GY = GAM - PD
# Section 3 ---------------------------------------------------
px = sp.diff(PX,x)
py = sp.diff(PY,y)
gx = sp.diff(GX,x)
gy = sp.diff(GY,y)
print("\nSymbolic gx:",gx)
print("\nSymbolic gy:",gy)
# Section 4 ---------------------------------------------------
# Skip internal energy derivation, just write bubble mode result
Bub = a8 + a9
# Alternative representation of the displacement field as the difference between thrust and Kirchhoff
W = GAM - PHI
WX = GX - PX
WY = GY - PY
#Identification of the Ansatz coefficients with the node values for shifts Wi and rotations
# (converted to zero value eqns)
eq1 = W1 - (W.subs(x,x1)).subs(y,y1)
eq2 = W2 - (W.subs(x,x2)).subs(y,y2)
eq3 = W3 - (W.subs(x,x3)).subs(y,y3)
eq4 = PX1 - (px.subs(x,x1)).subs(y,y1)
eq5 = PX2 - (px.subs(x,x2)).subs(y,y2)
eq6 = PX3 - (px.subs(x,x3)).subs(y,y3)
eq7 = PY1 - (py.subs(x,x1)).subs(y,y1)
eq8 = PY2 - (py.subs(x,x2)).subs(y,y2)
eq9 = PY3 - (py.subs(x,x3)).subs(y,y3)
# Setup system to solve [A][a] = [W] -------------------------
A = sp.zeros(9) #system matrix
UVector = sp.zeros(9,1) #vector of displacements
UVector[0] = W1
UVector[1] = W2
UVector[2] = W3
UVector[3] = PX1
UVector[4] = PX2
UVector[5] = PX3
UVector[6] = PY1
UVector[7] = PY2
UVector[8] = PY3
results = list(sp.linsolve([eq1,eq2,eq3,eq4,eq5,eq6,eq7,eq8,eq9],[a1,a2,a3,a4,a5,a6,a7,a8,a9]))
ansatzCoefficients = results[0]
print("\nAnsatz coefficients solved")
for i in range(9):
print("a",i,"=\t",ansatzCoefficients[i])
#Go through and update everything "_u"
PHI_u = PHI.subs([(a1,ansatzCoefficients[0]),(a2,ansatzCoefficients[1]),(a3,ansatzCoefficients[2]),(a4,ansatzCoefficients[3]),(a5,ansatzCoefficients[4]),(a6,ansatzCoefficients[5]),(a7,ansatzCoefficients[6]),(a8,ansatzCoefficients[7]),(a9,ansatzCoefficients[8])])
GAM_u = GAM.subs([(a1,ansatzCoefficients[0]),(a2,ansatzCoefficients[1]),(a3,ansatzCoefficients[2]),(a4,ansatzCoefficients[3]),(a5,ansatzCoefficients[4]),(a6,ansatzCoefficients[5]),(a7,ansatzCoefficients[6]),(a8,ansatzCoefficients[7]),(a9,ansatzCoefficients[8])])
PD_u = PD.subs([(a1,ansatzCoefficients[0]),(a2,ansatzCoefficients[1]),(a3,ansatzCoefficients[2]),(a4,ansatzCoefficients[3]),(a5,ansatzCoefficients[4]),(a6,ansatzCoefficients[5]),(a7,ansatzCoefficients[6]),(a8,ansatzCoefficients[7]),(a9,ansatzCoefficients[8])])
C_u = C.subs([(a1,ansatzCoefficients[0]),(a2,ansatzCoefficients[1]),(a3,ansatzCoefficients[2]),(a4,ansatzCoefficients[3]),(a5,ansatzCoefficients[4]),(a6,ansatzCoefficients[5]),(a7,ansatzCoefficients[6]),(a8,ansatzCoefficients[7]),(a9,ansatzCoefficients[8])])
print("\nUpdated C",C_u)
#PX_u = PHI_u + PD_u
#PY_u = PHI_u - PD_u
if (is_normal_DSG):
#normal DSG
GX_u = GX.subs([(a1,ansatzCoefficients[0]),(a2,ansatzCoefficients[1]),(a3,ansatzCoefficients[2]),(a4,ansatzCoefficients[3]),(a5,ansatzCoefficients[4]),(a6,ansatzCoefficients[5]),(a7,ansatzCoefficients[6]),(a8,ansatzCoefficients[7]),(a9,ansatzCoefficients[8])])
GY_u = GY.subs([(a1,ansatzCoefficients[0]),(a2,ansatzCoefficients[1]),(a3,ansatzCoefficients[2]),(a4,ansatzCoefficients[3]),(a5,ansatzCoefficients[4]),(a6,ansatzCoefficients[5]),(a7,ansatzCoefficients[6]),(a8,ansatzCoefficients[7]),(a9,ansatzCoefficients[8])])
else:
# super DSG
GX_u = GAM_u + PD_u
GY_u = GAM_u - PD_u
print("\nGAM_u_n1:",sp.simplify(GAM_u.subs([(x,0),(y,0)])))
print("\nGAM_u_n2:",sp.simplify(GAM_u.subs([(x,1),(y,0)])))
print("\nGAM_u_n3:",sp.simplify(GAM_u.subs([(x,0),(y,1)])))
# Reconstruction of above field using nodal shear gaps and SFs ###########################
# 'Shear gaps' evaluated at nodes
print("\n\nShear gaps evaluated at nodes:")
print("\nGX_u_n1:",sp.simplify(GX_u.subs([(x,0),(y,0)])))
print("\nGX_u_n2:",sp.simplify(GX_u.subs([(x,1),(y,0)])))
print("\nGX_u_n3:",sp.simplify(GX_u.subs([(x,0),(y,1)])))
print("\n\nGY_u_n1:",sp.simplify(GY_u.subs([(x,0),(y,0)])))
print("\n\nGY_u_n2:",sp.simplify(GY_u.subs([(x,1),(y,0)])))
print("\n\nGY_u_n3:",sp.simplify(GY_u.subs([(x,0),(y,1)])))
sg_xi_n1 = GX_u.subs([(x,0),(y,0)])
sg_xi_n2 = GX_u.subs([(x,1),(y,0)])
sg_xi_n3 = GX_u.subs([(x,0),(y,1)])
sg_eta_n1 = GY_u.subs([(x,0),(y,0)])
sg_eta_n2 = GY_u.subs([(x,1),(y,0)])
sg_eta_n3 = GY_u.subs([(x,0),(y,1)])
# Manually override shear gaps, cartesian differences a thru d introduced
print("\n\nMANUALLY OVERRIDING SHEAR GAPS!!!!!!!!!!")
sg_xi_n1 = 0.0
sg_xi_n2 = 0.5*a*PX1 + 0.5*a*PX2 - 1.0*W1 + 1.0*W2
sg_xi_n3 = 0.5*b*PY1 + 0.5*b*PY3 - 1.0*W1 + 1.0*W3
sg_eta_n1 = 0.0
sg_eta_n2 = 0.5*d*PX1 + 0.5*d*PX2 - 1.0*W1 + 1.0*W2
sg_eta_n3 = 0.5*c*PY1 + 0.5*c*PY3 - 1.0*W1 + 1.0*W3
sg_xi_sf = (1.0-x-y)*sg_xi_n1 + x*sg_xi_n2 + y*sg_xi_n3
sg_eta_sf = (1.0-x-y)*sg_eta_n1 + x*sg_eta_n2 + y*sg_eta_n3
GX_u = sg_xi_sf
GY_u = sg_eta_sf
# Reconstruction of above field using nodal shear gaps and SFs ###########################
# DOF transformation from plate theory to FEM rotational dofs
# vector of displacements------------------------------------------------------
UVector = sp.zeros(9,1)
UVector[0] = W1
UVector[1] = W2
UVector[2] = W3
UVector[3] = RX1
UVector[4] = RX2
UVector[5] = RX3
UVector[6] = RY1
UVector[7] = RY2
UVector[8] = RY3
print("Vector of displacements (rotations are per FEM):\n",UVector)
GX_u = GX_u.subs([(PY1,-RX1),(PY2,-RX2),(PY3,-RX3),(PX1,RY1),(PX2,RY2),(PX3,RY3)])
GY_u = GY_u.subs([(PY1,-RX1),(PY2,-RX2),(PY3,-RX3),(PX1,RY1),(PX2,RY2),(PX3,RY3)])
# Section 3 ---------------------------------------------------
#px_u = sp.diff(PX_u,x)
#py_u = sp.diff(PY_u,y)
#gx_u = sp.diff(GX_u,x)
#gy_u = sp.diff(GY_u,y)
# Cartesian transformation
gx_u = sp.diff(GX_u,x)*c/detJ + sp.diff(GY_u,y)*-b/detJ
gy_u = sp.diff(GY_u,y)*a/detJ + sp.diff(GX_u,x)*-d/detJ
print("\n\n\nSymbolic B Matrix ( transformed to cartesian ) (1/detJ taken out) = ")
# Assemble B Matrix ------------------------------------------
B = sp.zeros(2,9)
for col in range(9):
B[0,col] = sp.diff(gx_u,UVector[col])
B[1,col] = sp.diff(gy_u,UVector[col])
sp.pprint(sp.simplify(B*detJ),wrap_line=False)
print("\n\n\n\n\nPrinting individual entries of original Bmat (1/detJ taken out), just for easy copying into C++:")
#Bsimp = sp.factor(B)*detJ #detJ taken out for clarity
#testing below
B = B.subs([(x,loc1),(y,loc2)])
Bsimp = sp.factor(B)*detJ
#testing end
for col in range(9):
print("BSuper(0,",col,")=",Bsimp[0,col],";")
for col in range(9):
print("BSuper(1,",col,")=",Bsimp[1,col],";")
#Rearraging B-matrix to original DSG dofs for easier comparison ---------------
# Here -----------------> Original DSG
# [w1,w2,w3,phix1,...]' --> [w1,phix1,phiy1,w2,...]'
B_original_DSG_ordering = sp.zeros(2,9)
for gamma in range(2):
for node in range(3):
B_original_DSG_ordering[gamma,node*3] = B[gamma,node]
B_original_DSG_ordering[gamma,node*3+1] = B[gamma,3+node]
B_original_DSG_ordering[gamma,node*3+2] = B[gamma,6+node]
print("\n\n\nB Matrix (cartesian space, factor of 1/detJ taken out, ordered as per original DSG formulation) = \n")
sp.pprint(sp.factor(B_original_DSG_ordering)*detJ,wrap_line=False) #detJ taken out for clarity |
import numpy as np
import copy
from td import TD
from alg_plugin import AlgPlugin
import common
class TDLearning(AlgPlugin):
def __init__(self, alpha, gamma, eligibility, epsilon, next_action_considered):
super().__init__()
# store the hyper parameters
self.alpha = alpha
self.gamma = gamma
self.eligibility = eligibility
self.epsilon = epsilon
self.next_action_considered = next_action_considered
# use 'epsilon greedy' to chose action while agent is in a state
#self.action_selection = common.epsilon_greedy
# __experiment
self.action_selection = common.explore
self.steps = 0
# use TD class to do the actual algorithm
self.td = TD(alpha, gamma, eligibility, self.value_callback, self.update_callback)
# filled in when we know environment layout
self.n_features = None
self.action_space = None
# underlying storage for store value per action per state
# it's called 'q-table' just for convention, actually it can be used for any TD learning
# as long as the environment has finite number of states
self.qtable = {}
# delayed learning
self.qtable_future = None
self._delayed_learning = False
@property
def delayed_learning(self):
return self._delayed_learning
@delayed_learning.setter
def delayed_learning(self, onoff):
assert onoff == True or onoff == False
if onoff != self._delayed_learning:
if onoff == True:
self.qtable_future = copy.deepcopy(self.qtable)
else:
assert self.qtable_future != None
self.qtable = self.qtable_future
self.qtable_future = None
self._delayed_learning = onoff
def delayed_learning_catchup(self):
if self._delayed_learning == True:
self.qtable = copy.deepcopy(self.qtable_future)
def value_callback(self, state, action):
"""TD algorithm call this function to query action-value of a state"""
#print("value_callback(): state: {}, action: {}".format(state, action))
if action == None:
return np.max(self.qtable[state])
else:
if self.delayed_learning:
if self.qtable_future.get(state) == None:
self.qtable_future[state] = [np.float32(0)] * self.action_space.n_actions
return self.qtable_future[state][action]
else:
return self.qtable[state][action]
def update_callback(self, state, action, delta):
"""TD algorithm call this function to update action-value of a state"""
# wk_debug
if delta != 0:
#if delta < 0.0000000000000001 and delta > -0.000000000000001:
#print("update_callback(): state: {}, action: {}, delta: {:.24f}".format(state, action, delta))
if delta > 10000000000 or delta < -100000000000:
print("update_callback(): state: {}, action: {}, delta: {:.24f}".format(state, action, delta))
if self.delayed_learning:
if self.qtable_future.get(state) == None:
self.qtable_future[state] = [np.float32(0)] * self.action_space.n_actions
self.qtable_future[state][action] += np.float32(delta)
else:
self.qtable[state][action] += np.float32(delta)
##############################################################
# #
# Below is the implementation of 'AlgPlugin' interface #
# #
##############################################################
def layout(self, n_features, action_space, preset_states_list):
# __experiment
self.steps = 0
self.n_features = n_features
self.action_space = action_space
self.qtable = {}
self.qtable_future = None
self._delayed_learning = False
for (state, value, is_terminal) in preset_states_list:
self.qtable[state] = [np.float32(value)] * self.action_space.n_actions
def episode_start(self, episode, state):
#super().episode_start(episode, state)
if self.qtable.get(state) == None:
self.qtable[state] = [np.float32(0)] * self.action_space.n_actions
self.td.episode_start(state)
return self.next_action(state)
def one_step(self, state, action, reward, state_next):
if self.qtable.get(state) == None:
self.qtable[state] = [np.float32(0)] * self.action_space.n_actions
if self.qtable.get(state_next) == None:
self.qtable[state_next] = [np.float32(0)] * self.action_space.n_actions
next_action_index = self._next_action_index(state_next)
if self.next_action_considered == True:
use_this_action = next_action_index
else:
use_this_action = None
# need to translate from action to action_index, underlying TD algorithm
# assume that actions are non-negative integer
action_index = self.action_space.action_index(action)
self.td.step(state, action_index, reward, state_next, use_this_action)
return self.action_space.action_at(next_action_index)
def episode_end(self):
# __experiment
self.steps += 1
self.td.episode_end()
def _next_action_index(self, state):
# __experiment
action_index = self.action_selection(self.steps, self.qtable[state])
#print("next action index:", action_index)
return action_index
#return self.action_selection(self.epsilon, self.qtable[state])
def next_action(self, state):
"""Given the current state, based on selection algorithm select next action for agent"""
action_index = self._next_action_index(state)
return self.action_space.action_at(action_index)
def best_action(self, state):
"""Select the action that has max value in a given state"""
action_index = np.argmax(self.qtable[state])
return self.action_space.action_at(action_index)
def get_action_values(self, state):
return self.qtable.get(state)
def get_action_values_dict(self, state):
action_values = self.qtable.get(state)
if action_values == None:
return None
else:
action_values_dict = {self.action_space.action_at(i):v for i, v in enumerate(action_values)}
return action_values_dict
def whole_episode(self, one_episode):
self.episode_start(one_episode[0][0])
for state, action, reward, state_next in one_episode:
self.step(state, action, reward, state_next)
self.episode_end()
if __name__ == '__main__':
state_action = [2, 4, 6, 8]
a = common.epsilon_greedy(0.8, state_action)
print(a)
a = common.explore(10000, state_action)
print(a)
|
from enum import Enum
class EthereumChain(Enum):
MAIN_NET = 1
ROPSTEN = 3
RINKEBY = 4
UBIQ = 8
KOVAN = 42
SOKOL = 77
DOLOMITE_TEST = 1001
ZEROEX_TEST = 1337
|
from onegov.agency.models import ExtendedAgency
from onegov.agency.utils import filter_modified_or_created
from onegov.core.collection import GenericCollection, Pagination
from onegov.people import AgencyCollection
from sqlalchemy import or_, func
from sqlalchemy.orm import joinedload
class ExtendedAgencyCollection(AgencyCollection):
__listclass__ = ExtendedAgency
# Used to create link for root pdf based on timestamp
def __init__(self, session, root_pdf_modified=None, browse=None):
super(ExtendedAgencyCollection, self).__init__(session)
self.root_pdf_modified = root_pdf_modified
self.browse = browse
class PaginatedAgencyCollection(GenericCollection, Pagination):
def __init__(self, session, page=0, parent=None, exclude_hidden=True,
joinedload=None, title=None, updated_gt=None,
updated_ge=None, updated_eq=None, updated_le=None,
updated_lt=None):
super().__init__(session)
self.page = page
# filter keywords
self.parent = parent
self.title = title
self.updated_gt = updated_gt
self.updated_ge = updated_ge
self.updated_eq = updated_eq
self.updated_le = updated_le
self.updated_lt = updated_lt
# end filter keywords
self.exclude_hidden = exclude_hidden
self.joinedload = joinedload or []
@property
def model_class(self):
return ExtendedAgency
def __eq__(self, other):
return (
other.page == self.page
and other.parent == self.parent
)
def subset(self):
return self.query()
@property
def page_index(self):
return self.page
def page_by_index(self, index):
return self.__class__(
self.session,
page=index,
title=self.title,
updated_gt=self.updated_gt,
updated_ge=self.updated_ge,
updated_eq=self.updated_eq,
updated_le=self.updated_le,
updated_lt=self.updated_lt,
)
def for_filter(self, **kwargs):
return self.__class__(
session=self.session,
title=kwargs.get('title', self.title),
updated_gt=kwargs.get('updated_gt', self.updated_gt),
updated_ge=kwargs.get('updated_ge', self.updated_ge),
updated_eq=kwargs.get('updated_eq', self.updated_eq),
updated_le=kwargs.get('updated_le', self.updated_le),
updated_lt=kwargs.get('updated_lt', self.updated_lt),
)
def query(self):
query = super().query()
for attribute in self.joinedload:
query = query.options(
joinedload(getattr(ExtendedAgency, attribute))
)
if self.exclude_hidden:
query = query.filter(
or_(
ExtendedAgency.meta['access'] == 'public',
ExtendedAgency.meta['access'] == None,
),
ExtendedAgency.published.is_(True)
)
if self.parent is False:
query = query.filter(ExtendedAgency.parent_id == None)
elif self.parent:
query = query.filter(ExtendedAgency.parent_id == self.parent)
if self.title:
# if multiple words in search filter for title we 'or' link
# them using ilike
query = query.filter(or_(
func.lower(
func.unaccent(ExtendedAgency.title)
).ilike(f'%{element}%') for element in self.title.split()
))
if self.updated_gt:
query = filter_modified_or_created(query, '>', self.updated_gt,
ExtendedAgency)
if self.updated_ge:
query = filter_modified_or_created(query, '>=', self.updated_ge,
ExtendedAgency)
if self.updated_eq:
query = filter_modified_or_created(query, '==', self.updated_eq,
ExtendedAgency)
if self.updated_le:
query = filter_modified_or_created(query, '<=', self.updated_le,
ExtendedAgency)
if self.updated_lt:
query = filter_modified_or_created(query, '<', self.updated_lt,
ExtendedAgency)
return query
|
#figure out how to link login to usersession and pass the info onto usersession
from flask import Flask, render_template, request, session, redirect,url_for
import utils
app = Flask(__name__)
@app.route("/usersession")
def user():
uname=session['username']
return render_template("user.html",uname=uname)
@app.route("/about")
def about():
return render_template("about.html")
@app.route("/login",methods=['GET','POST'])
def login():
if request.method=="GET":
return render_template("login.html")
else:
button = request.form['button']
uname = request.form['Username']
pword = request.form['Password']
session['username'] = uname
if utils.authenticate(uname,pword):
return redirect("/usersession")
else:
return render_template("login.html",error="Invalid username or password")
if __name__ == "__main__":
app.debug = True
app.secret_key="hello"
app.run(host='0.0.0.0', port=8000)
|
from MyException import MyException
from Date import Date
date = Date(20, 12, 2002)
print(date)
try:
date.findDate()
except MyException as e:
print(e.value)
|
from rest_framework import serializers
from app.users.serializers.user import UserSerializer
from app.pools.models.pool_user import PoolUser
class PoolUserListSerializer(serializers.ModelSerializer):
user = UserSerializer()
class Meta:
model = PoolUser
fields = '__all__' |
from django.shortcuts import render
from rest_framework import serializers, viewsets
from . import models
class AlarmSerializer(serializers.ModelSerializer):
class Meta:
model = models.Alarm
fields = '__all__'
class Alarms(viewsets.ModelViewSet):
serializer_class = AlarmSerializer
queryset = models.Alarm.objects.all()
class ColorSerializer(serializers.ModelSerializer):
class Meta:
model = models.Color
fields = '__all__'
class Colors(viewsets.ModelViewSet):
serializer_class = ColorSerializer
queryset = models.Color.objects.all()
class ContentObjectRelatedField(serializers.RelatedField):
"""
A custom field to use for the `content_object` generic relationship.
"""
def to_representation(self, value):
"""
Serialize tagged objects to a simple textual representation.
"""
print(value)
if isinstance(value, models.Alarm):
return value.name
elif isinstance(value, models.Color):
return value.color
else:
raise Exception('Unexpected type of tagged object')
class RunningSerializer(serializers.ModelSerializer):
# running_content_object = ContentObjectRelatedField()
class Meta:
model = models.Running
fields = ('content_type', 'object_id', 'running_content_object')
class Running(viewsets.ModelViewSet):
serializer_class = RunningSerializer
queryset = models.Running.objects.all() |
"""
Author: Sidhin S Thomas (sidhin@trymake.com)
Copyright (c) 2017 Sibibia Technologies Pvt Ltd
All Rights Reserved
Unauthorized copying of this file, via any medium is strictly prohibited
Proprietary and confidential
"""
from django import forms
from django.core.exceptions import ValidationError
from trymake import settings
from trymake.apps.commons.models import Image
from trymake.apps.product.models import Product, AdditionalImages
class ProductAddForm(forms.ModelForm):
class Meta:
model = Product
exclude = ['additional_images']
# Checking if the upload size is within Max Upload size
def clean_product_image(self):
image = self.cleaned_data['product_image']
if image.size > settings.MAX_UPLOAD_SIZE:
raise ValidationError("File size lmit exceeded.", 'upload_limit_exeeded')
class AdditionalImagesForm(forms.ModelForm):
class Meta:
model = AdditionalImages
exclude = ['date_added','product']
def save_image(self,product_slug):
self.instance.product_slug = product_slug
return self.save() |
#!env python3
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import json
import seaborn as sb
plt.rcParams['figure.figsize'] = 8, 4
df = pd.read_json('../data/nobel_winners_biopic_cleaned.json')
by_gender = df.groupby('gender')
by_cat_gen = df.groupby(['category', 'gender'])
print(by_cat_gen.get_group(('Physics', 'female'))[['name','year']])
print(by_cat_gen.size())
fig = by_cat_gen.size().plot(kind='bar').get_figure()
fig.savefig("11_3.png")
|
# !encoding=utf8
# name = raw_input()
# print name
#
t = {"name":"wang", "agent":2}
for (key,value) in t.items():
print key, value
d = {}
d["key1"] = "value1"
l = "key1" in d
l2 = d.get("key12")
print l
print l2
print [x*x for x in range(1, 11)] #list
print [x*x for x in range(1, 11) if x%2==0] #只取偶数
print [m+n for m in 'abc' for n in 'xyz'] #生成全排列
|
import numpy as np
from sklearn import svm
from Classification.ClassifierBaseClass import ClassifierBaseClass
from config import *
class SVM (ClassifierBaseClass):
def __init__(self, **kwargs):
ClassifierBaseClass.__init__(self, **kwargs)
self.C = kwargs.get("C", 1.0)
self.kernel = kwargs.get("kernel", "rbf")
self.degree = kwargs.get("degree",3)
self.gamma = kwargs.get("gamma", "auto")
self.coef0 = kwargs.get("coef0", 0.0)
self.probability = kwargs.get("probability", False)
self.shrinking = kwargs.get("shrinking", True)
self.tol = kwargs.get("tol", 1e-3)
self.class_weight = kwargs.get("class_weight", None)
self.verbose = kwargs.get("verbose", False)
self.max_iter = kwargs.get("max_iter", -1)
self.decision_function_shape = kwargs.get("decision_function_shape", None)
self.random_state = kwargs.get("random_state", None)
def setClassifer(self):
self.classifier = svm.SVC(kernel=self.kernel,
C=self.C,
degree=self.degree,
gamma=self.gamma,
coef0=self.coef0,
probability=self.probability,
shrinking=self.shrinking,
tol=self.tol,
class_weight=self.class_weight,
verbose=self.verbose,
max_iter=self.max_iter,
decision_function_shape=self.decision_function_shape,
random_state=self.random_state)
def main():
clf = SVM(kernel="linear")
xdataDir = ldaReducedDataDir
xdataDir = pcaRedcuedDataDir
xdataDir = dataDir
ydataDir = dataDir
trainingTimes = []
testingTimes = []
accuracies = []
for i in range(crossValidationFold):
xTrain = np.load(xdataDir + "xTrain" + str(i) + ".npy")
yTrain = np.load(ydataDir + "yTrain" + str(i) + ".npy")
trainingTimes.append(clf.train(xTrain, yTrain))
xTest = np.load(xdataDir + "xDev" + str(i) + ".npy")
yTest = np.load(ydataDir + "yDev" + str(i) + ".npy")
[testingTime, accuracy] = clf.test(xTest, yTest)
testingTimes.append(testingTime)
accuracies.append(accuracy)
print "i = %d, training time: %.2f ms, testing time: %.2f ms, accuracy: %.1f %%" \
% (i, trainingTimes[i] * 1000, testingTime * 1000, accuracy * 100)
xTrain = np.load(xdataDir + "xTrain.npy")
yTrain = np.load(ydataDir + "yTrain.npy")
trainingTime = clf.train(xTrain, yTrain)
xTest = np.load(xdataDir + "xTest.npy")
yTest = np.load(ydataDir + "yTest.npy")
[testingTime, accuracy] = clf.test(xTest, yTest)
print "training time: %.2f ms, testing time: %.2f ms, accuracy: %.1f %%" \
% ( trainingTime * 1000, testingTime * 1000, accuracy * 100)
if __name__ == "__main__":
main() |
from django.contrib.auth.models import User
from project.api import models
from rest_framework import viewsets, response, permissions
from project.api import serializers
import logging
logger = logging.getLogger(__name__)
class PartCategoryViewSet(viewsets.ModelViewSet):
queryset = models.PartCategory.objects.all().order_by('name')
serializer_class = serializers.PartCategorySerializer
class ColorViewSet(viewsets.ModelViewSet):
queryset = models.Color.objects.all().order_by('id')
serializer_class = serializers.ColorSerializer
class CategoryFilterMixin(object):
def filter_by_category_id(self, queryset):
category = None
category_id = self.request.query_params.get('category_id', None)
if category_id is not None:
try:
category = models.PartCategory.objects.get(pk=category_id)
except models.PartCategory.DoesNotExist:
category = None
print('category: %s' % category)
if category is not None:
queryset = queryset.filter(category=category)
return queryset
class PartViewSet(viewsets.ModelViewSet, CategoryFilterMixin):
queryset = models.Part.objects.all()
serializer_class = serializers.PartSerializer
def get_queryset(self):
"""
Override default functionality to allow filtering of parts
:return:
"""
queryset = models.Part.objects.filter(meta=0).order_by('part_num')
queryset = self.filter_by_category_id(queryset)
return queryset
class ElementViewSet(viewsets.ModelViewSet):
queryset = models.Element.objects.all().order_by('part').order_by('color')
serializer_class = serializers.ElementSerializer
def get_queryset(self):
part_id = self.request.query_params.get('part', None)
if part_id is not None:
return self.queryset.filter(part=part_id)
return self.queryset
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = serializers.UserSerializer
permission_classes = (permissions.IsAuthenticated,)
def retrieve(self, request, pk=None):
if pk == 'me':
return response.Response(serializers.UserSerializer(request.user, context={'request': request}).data)
return super(UserViewSet, self).retrieve(request, pk)
class UserElementViewSet(viewsets.ModelViewSet):
queryset = models.UserElement.objects.all().order_by('-created')
serializer_class = serializers.UserElementSerializer
permission_classes = (permissions.IsAuthenticated,)
class UserPartsViewSet(viewsets.GenericViewSet, viewsets.mixins.ListModelMixin, CategoryFilterMixin):
permission_classes = (permissions.IsAuthenticated,)
serializer_class = serializers.UserPartSerializer
def get_queryset(self):
queryset = models.Part.objects.all() \
.distinct() \
.filter(element__userelement__user=self.request.user) \
.order_by('part_num')
queryset = self.filter_by_category_id(queryset)
return queryset
class SetViewSet(viewsets.ModelViewSet):
queryset = models.Set.objects.all().order_by('set_num')
serializer_class = serializers.SetSerializer
class SetThemeViewSet(viewsets.ModelViewSet):
queryset = models.SetTheme.objects.all()
serializer_class = serializers.SetThemeSerializer
|
def find(val, curr, n):
if val==n:
return curr
if val>n:
temp = val
mark = curr
while(temp>=n):
if temp==n:
return mark
mark+=1
temp=int(temp/3)
val*=2
curr+=1
return find(val, curr, n)
print(find(1, 0, 10))
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2018/9/27 21:47
# @Author : dapengchiji!!
# @FileName: code8.py
'''
函数传入的参数为不可变的,对外部的变量就没有影响 如 数字、字典
按值传--传入的不是变量对应的内存地址
函数传入的参数为可变的,对外部的变量就有影响
按引用传--传入的变量对应的内存地址
'''
a=111
def f():
global a
a = 11
b = a+1
print(b)
print (f())
# args可变参数
def f(a,b,*args):
for i in args:
print(i)
print(f(1,2,3,4,5,6))
'''
写一个函数,使用可变参数,计算函数所有参数之和
'''
#计算函数传入参数之和 *args 表示吧可变的多个非命名参数,转换成一个元祖
def f(*args):
sum=0
for i in args:
sum+=i
return sum
print(f(1,2,3,4,10))
#**kw 表示把可变的多个命名参数,转换为一个字典
def dict(a,b,**kw):
for k,v in kw.items():
print (k,v)
print(dict(1,2,m=1,n=1,w=1))
'''
使用**kw,把可变的所有参数 算一个乘积
同时使用*arg 和**kw,算一下字母的长度之和,注意所有参数均使用字符串
字符串都是字母
'''
def fun(**kw):
global mult
for k,w in kw.items():
mult=w*w
return None
print(fun(a=1,b=2,c=3))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.