text stringlengths 8 6.05M |
|---|
"""
MVC 패턴
Model: DTO (data transfer object) + DAO (data access object)
Service: Business Login (Algorithm)
Controller: RESTful 방식으로 React Axios 로 통신
""" |
from sys import argv
title = raw_input (" what's your recipe title?" )
ingredient = raw_input (" tell the ingredients:" )
step_by_step = raw_input (" tell the step by step guide:" )
first = title
second = ingredient
third = step_by_step
# script, first, second, third = argv
print "The script is called:" #, script
print "Title:", first
print "Ingredenients you need:", second
print "This is how easy is to make it:", third
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-03-09 23:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('th', '0003_auto_20180309_2318'),
]
operations = [
migrations.CreateModel(
name='ClubId',
fields=[
('club_id', models.AutoField(primary_key=True, serialize=False)),
('game_club_id', models.IntegerField()),
('club_name', models.CharField(max_length=20)),
('active_time', models.DateTimeField(default=django.utils.timezone.now)),
('inactive_time', models.DateTimeField(default='2037-01-01')),
],
),
]
|
#import sys
#input = sys.stdin.readline
from heapq import heapq, heappop
def main():
if __name__ == '__main__':
main()
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'tools.views.get_form', name='get_form'),
)
|
a = int(input("a="))
b = int(input("b="))
c = int(input("c="))
if (c >= (a + b)) or (b >= (a + c)) or (a >= (b + c)):
print("YES")
else:
print("NO")
|
import scipy.linalg as splinalg
import numpy as np
import math
#test_Givens_out_of_for_loop = False
test_Givens_out_of_for_loop = True
class GMRES_API(object):
def __init__( self,
A_coefficient_matrix: np.array([], dtype = float ),
b_boundary_condition_vector: np.array([], dtype = float ),
maximum_number_of_basis_used: int,
threshold = 1.0e-16 ):
self.A = A_coefficient_matrix
self.b = b_boundary_condition_vector
self.maximum_number_of_basis_used = maximum_number_of_basis_used
self.threshold = threshold
# Default methods_used_to_solve_leastSqare
#self.methods_used_to_solve_leastSqare = "Givens_rotation"
#self.methods_used_to_solve_leastSqare = "QR_decomposition_numpy"
self.methods_used_to_solve_leastSqare = "leastSquare_solver_numpy"
def methods_used_to_solve_leastSqare_register(self, methods_used_to_solve_leastSqare):
self.methods_used_to_solve_leastSqare = methods_used_to_solve_leastSqare
def initial_guess_input( self, x_input_vector_initial_guess: np.array([], dtype = float ) ):
self.x = x_input_vector_initial_guess
try:
assert len( self.x ) == len( self.b )
except Exception:
print(" The input guess vector's size must equal to the system's size !\n")
print(" The matrix system's size == ", len( self.b ))
print(" Your input vector's size == ", len( self.x ))
self.x = np.zeros( len( self.b ) )
print(" Use default input guess vector = ", self.x, " instead of the incorrect vector you given !\n")
def run( self ):
n = len( self.A )
m = self.maximum_number_of_basis_used
r = self.b - np.dot(self.A , self.x)
r_norm = np.linalg.norm( r )
b_norm = np.linalg.norm( self.b )
self.error = np.linalg.norm( r ) / b_norm
self.e = [self.error]
# initialize the 1D vectors
sn = np.zeros( m )
cs = np.zeros( m )
e1 = np.zeros( m + 1 )
e1[0] = 1.0
# beta is the beta vector instead of the beta scalar
beta = r_norm * e1
beta_test = r_norm * e1
H = np.zeros(( m+1, m+1 ))
H_test = np.zeros(( m+1, m+1 ))
Q = np.zeros(( n, m+1 ))
Q[:,0] = r / r_norm
#-----------------------------------------------------------------------------------------------
for k in range(m):
( H[0:k+2, k], Q[:, k+1] ) = __class__.arnoldi( self.A, Q, k)
#H_test[:,k] = H[:,k]
H_test = H
#print("H_test =\n",H_test)
if test_Givens_out_of_for_loop is not True:
( H[0:k+2, k], cs[k], sn[k] ) = __class__.apply_givens_rotation( H[0:k+2, k], cs, sn, k)
# update the residual vector
beta[ k+1 ] = -sn[k] * beta[k]
beta[ k ] = cs[k] * beta[k]
# calculate and save the errors
self.error = abs(beta[k+1]) / b_norm
self.e = np.append(self.e, self.error)
if( self.error <= self.threshold):
break
#-----------------------------------------------------------------------------------------------
if test_Givens_out_of_for_loop is True:
if self.methods_used_to_solve_leastSqare == "Givens_rotation":
# 1. My first GMRES written using Givens rotation to solve lstsq
#---------------------------------------------------------------------------------------------------------------------
H_Givens_test = np.copy(H_test)
for k in range(m):
( H_Givens_test[0:k+2, k], cs[k], sn[k] ) = __class__.apply_givens_rotation( H_Givens_test[0:k+2, k], cs, sn, k)
# update the residual vector
beta[ k+1 ] = -sn[k] * beta[k]
beta[ k ] = cs[k] * beta[k]
#print("H_Givens_test =\n", H_Givens_test)
#print("beta =\n", beta)
#y = __class__.__back_substitution( H_Givens_test[0:m+1, 0:m+1], beta[0:m+1] )
#y = np.matmul( np.linalg.inv( H_Givens_test[0:m+1, 0:m+1]), beta[0:m+1] )
#y = splinalg.solve_triangular(H_Givens_test[0:m, 0:m],beta[0:m] )
y = np.linalg.lstsq(H_Givens_test[0:m, 0:m], beta[0:m])[0]
#---------------------------------------------------------------------------------------------------------------------
elif self.methods_used_to_solve_leastSqare == "QR_decomposition_numpy":
# 2. GMRES using QR decomposition to solve lstsq
#---------------------------------------------------------------
H_QR_test = np.copy(H_test)
QR_q, QR_r = np.linalg.qr(H_QR_test, mode='reduced')
#print(QR_q)
#print("QR_r =\n", QR_r)
#print(beta_test)
new_beta = np.matmul( QR_q.T, beta_test )
#print(new_beta[0:m])
#print("new_beta =",new_beta)
#y = splinalg.solve_triangular(QR_r[0:m, 0:m],new_beta[0:m] )
#y = np.linalg.lstsq(QR_r[:,0:m],new_beta )[0]
y = np.linalg.lstsq(QR_r[0:m, 0:m],new_beta[0:m], rcond=-1 )[0]
#---------------------------------------------------------------
elif self.methods_used_to_solve_leastSqare == "leastSquare_solver_numpy":
# 3. GMRES directly using numpy.linalg.lstsq to solve lstsq (the most success one until now !)
#---------------------------------------------------------------
#print(H_test[0:m+1, 0:m])
#print(beta_test)
#y = np.linalg.solve(H_test[0:m, 0:m], beta_test[0:m])
y = np.linalg.lstsq(H_test[0:m+1, 0:m], beta_test)[0]
#---------------------------------------------------------------
else:
print("please set methods_used_to_solve_leastSqare.")
else:
# 1. My first GMRES written using Givens rotation to solve lstsq(but put the Givens with arnoldi)
#-----------------------------------------------------------------------------------
# calculate the result
#y = np.matmul( np.linalg.inv( H[0:k+1, 0:k+1]), beta[0:k+1] )
#TODO Due to H[0:k+1, 0:k+1] being a upper tri-matrix, we can exploit this fact.
y = __class__.__back_substitution( H[0:m+1, 0:m+1], beta[0:m+1] )
#-----------------------------------------------------------------------------------
#print("y =", y)
self.x = self.x + np.matmul( Q[:,0:k+1], y )
self.final_residual_norm = np.linalg.norm( self.b - np.matmul( self.A, self.x ) )
return self.x
'''''''''''''''''''''''''''''''''''
' Arnoldi Function '
'''''''''''''''''''''''''''''''''''
@staticmethod
def arnoldi( A, Q, k ):
h = np.zeros( k+2 )
q = np.dot( A, Q[:,k] )
for i in range ( k+1 ):
h[i] = np.dot( q, Q[:,i])
q = q - h[i] * Q[:, i]
h[ k+1 ] = np.linalg.norm(q)
q = q / h[ k+1 ]
return h, q
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
' Applying Givens Rotation to H col '
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
@staticmethod
def apply_givens_rotation( h, cs, sn, k ):
for i in range( k-1 ):
temp = cs[i] * h[i] + sn[i] * h[i+1]
h[i+1] = -sn[i] * h[i] + cs[i] * h[i+1]
h[i] = temp
# update the next sin cos values for rotation
cs_k, sn_k, h[k] = __class__.givens_rotation( h[k-1], h[k] )
# eliminate H[ k+1, i ]
h[k + 1] = 0.0
return h, cs_k, sn_k
##----Calculate the Given rotation matrix----##
# From "http://www.netlib.org/lapack/lawnspdf/lawn150.pdf"
# The algorithm used by "Edward Anderson"
@staticmethod
def givens_rotation( v1, v2 ):
if( v2 == 0.0 ):
cs = np.sign(v1)
sn = 0.0
r = abs(v1)
elif( v1 == 0.0 ):
cs = 0.0
sn = np.sign(v2)
r = abs(v2)
elif( abs(v1) > abs(v2) ):
t = v2 / v1
u = np.sign(v1) * math.hypot( 1.0, t )
cs = 1.0 / u
sn = t * cs
r = v1 * u
else:
t = v1 / v2
u = np.sign(v2) * math.hypot( 1.0, t )
sn = 1.0 / u
cs = t * sn
r = v2 * u
return cs, sn, r
# From https://stackoverflow.com/questions/47551069/back-substitution-in-python
@staticmethod
def __back_substitution( A: np.ndarray, b: np.ndarray) -> np.ndarray:
n = b.size
if A[n-1, n-1] == 0.0:
raise ValueError
x = np.zeros_like(b)
x[n-1] = b[n-1] / A[n-1, n-1]
for i in range( n-2, -1, -1 ):
bb = 0
for j in range ( i+1, n ):
bb += A[i, j] * x[j]
x[i] = (b[i] - bb) / A[i, i]
return x
def final_residual_info_show( self ):
print( "x =", self.x, "residual_norm = ", self.final_residual_norm )
def main():
A_mat = np.array( [[1.00, 1.00, 1.00],
[1.00, 2.00, 1.00],
[0.00, 0.00, 3.00]] )
b_mat = np.array( [3.0, 2.0, 1.0] )
GMRES_itr2 = GMRES_API( A_mat, b_mat, 2, 0.01)
x_mat = np.array( [1.0, 1.0, 1.0] )
print("x =", x_mat)
# GMRES with restart, 2 iterations in each restart ( GMRES(2) )
max_restart_counts = 100
for restart_counter in range(max_restart_counts):
GMRES_itr2.initial_guess_input( x_mat )
x_mat = GMRES_itr2.run()
print(restart_counter+1," : x =", x_mat)
xx = np.matmul( np.linalg.inv(A_mat), b_mat )
print("ANS : xx =", xx)
if __name__ == '__main__':
main()
|
import pickle
import sys
vocab_file = 'word_vocab.pkl'
with open(vocab_file, 'rb') as f:
vocabs = pickle.load(f)
print(len(vocabs))
|
import pygame
from settings import Settings
class Menu:
"""Class to create the game menu start/pause menu."""
def __init__(self, ai_game):
"""Initialize main menu."""
super().__init__()
self.screen = ai_game.screen
self.screen_rect = self.screen.get_rect()
self.settings = Settings()
# Set the dimensions and properties of each button
self.width, self.height = 384, 100
self.button_color = self.settings.button_color
self.text_color = self.settings.text_color
self.button_font = self.settings.button_font
self.menu_font = self.settings.menu_font
# Creating the header for the menu.
self.menu_text = self.menu_font.render('Alien Invasion', True,
self.settings.white_color, self.settings.bg_color)
self.menu_text_rect = self.menu_text.get_rect()
self.menu_text_rect.midtop = (self.settings.screen_width // 2,
50)
# Store placement of buttons
self.play_rect = pygame.Rect(768, 500,
self.width, self.height)
self.restart_rect = pygame.Rect(768, 650,
self.width, self.height)
self.quit_rect = pygame.Rect(768, 800,
self.width, self.height)
# Store text values and center text on the button
self.text_play = self.button_font.render(
'Play', True, self.text_color, self.button_color)
self.text_play_rect = self.text_play.get_rect()
self.text_play_rect.center = self.play_rect.center
self.text_restart = self.button_font.render(
'Restart', True, self.text_color, self.button_color)
self.text_restart_rect = self.text_restart.get_rect()
self.text_restart_rect.center = self.restart_rect.center
self.text_quit = self.button_font.render(
'Quit', True, self.text_color, self.button_color)
self.text_quit_rect = self.text_quit.get_rect()
self.text_quit_rect.center = self.quit_rect.center
self.buttons = [
[self.text_play, self.text_play_rect],
[self.text_restart, self.text_restart_rect],
[self.text_quit, self.text_quit_rect]
]
def draw_menu(self):
self.screen.blit(self.menu_text, self.menu_text_rect)
self.screen.fill(self.button_color, self.play_rect)
self.screen.blit(self.text_play, self.text_play_rect)
self.screen.fill(self.button_color, self.restart_rect)
self.screen.blit(self.text_restart, self.text_restart_rect)
self.screen.fill(self.button_color, self.quit_rect)
self.screen.blit(self.text_quit, self.text_quit_rect) |
##############################################################################
#
# Copyright (C) Zenoss, Inc. 2013-2019, all rights reserved.
#
# This content is made available according to terms specified in
# License.zenoss under the directory where your Zenoss product is installed.
#
##############################################################################
"""Models Digital Ocean Droplets."""
import digitalocean
from twisted.internet.defer import inlineCallbacks, returnValue, DeferredList
from Products.DataCollector.plugins.CollectorPlugin import PythonPlugin
from logging import getLogger
log = getLogger('zen.DigitalOcean.Droplets')
class Droplets(PythonPlugin):
"""Digital Ocean Droplet modeler plugin.
| Attributes returned by API:
| id (int): droplet id
| memory (str): memory size
| vcpus (int): number of vcpus
| disk (int): disk size in GB
| status (str): status
| locked (bool): True if locked
| created_at (str): creation date in format u'2014-11-06T10:42:09Z'
| status (str): status, e.g. 'new', 'active', etc
| networks (dict): details of connected networks
| kernel (dict): details of kernel
| backup_ids (:obj:`int`, optional): list of ids of backups of this droplet
| snapshot_ids (:obj:`int`, optional): list of ids of snapshots of this droplet
| action_ids (:obj:`int`, optional): list of ids of actions
| features (:obj:`str`, optional): list of enabled features. e.g.
| [u'private_networking', u'virtio']
| image (dict): details of image used to create this droplet
| ip_address (str): public ip addresses
| private_ip_address (str): private ip address
| ip_v6_address (:obj:`str`, optional): list of ipv6 addresses assigned
| end_point (str): url of api endpoint used
| volume_ids (:obj:`str`, optional): list of blockstorage volumes"""
relname = 'droplets'
modname = 'ZenPacks.zenoss.DigitalOcean.Droplet'
requiredProperties = (
'zDigitalOceanToken',
'zDigitalOceanApiEndpoint'
)
deviceProperties = PythonPlugin.deviceProperties + requiredProperties
def buildObjectMap(self, droplet):
try:
name = self.prepId(droplet.name)
region = self.prepId(droplet.region.get('name'))
image = self.prepId(droplet.image.get('name'))
data = {
'id': name,
'created_at': droplet.created_at,
'backups': droplet.backups,
'backup_ids': droplet.backup_ids,
'next_backup': droplet.next_backup_window.get('start'),
'snapshot_ids': droplet.snapshot_ids,
'features': droplet.features,
'networks': droplet.networks,
'vcpus': droplet.vcpus,
'disk': droplet.disk,
'volume_ids': droplet.volume_ids,
'droplet_id': droplet.id,
'image': image,
'public_ip': droplet.ip_address,
'private_ip': droplet.private_ip_address,
'memory': droplet.memory,
'region': region,
'droplet_status': droplet.status,
'droplet_locked': droplet.locked,
'tags': droplet.tags,
'price_hourly': droplet.size.get('price_hourly'),
'price_monthly': droplet.size.get('price_monthly'),
}
return(data)
except Exception, e:
log.error("Problem encountered: %s", e.message)
log.error("Droplet data: %s" % droplet)
@inlineCallbacks
def collect(self, device, log):
"""Model the Digital Ocean Droplets."""
log.info("%s: collecting data", device.id)
token = getattr(device, 'zDigitalOceanToken', None)
if not token:
log.error("zDigitalOceanToken not set.")
returnValue(None)
#Setup the Connection to the Digital Ocean API endpoint.
try:
manager = digitalocean.Manager(token=token)
droplets = yield manager.get_all_droplets()
except Exception, e:
log.error(
"Unable to retreive droplets for %s due to: %s" % (
device.id,
e.message
))
returnValue(None)
returnValue(droplets)
def process(self, device, droplets, log):
"""Process droplets returned from api endpoint.
Attributes and values that we model:
id
created_at
backups
backup_ids
next_backup
snapshot_ids
features
networks
vcpus
disk
volume_ids
droplet_id
image
public_ip
private_ip
memory
region
droplet_status
tags
price_hourly
price_monthly
"""
log.info("Processing %d results for device %s." % (
len(droplets),
device.id)
)
rm = self.relMap()
for droplet in droplets:
if droplet:
try:
rm.append(
self.objectMap(self.buildObjectMap(droplet))
)
except Exception, e:
log.error("Problem creating relMap: %s", e)
return rm
|
# -*- coding: utf-8 -*-
import arcpy
from arcpy import env
iWorkspace=arcpy.GetParameterAsText(0)
oriShp=arcpy.GetParameterAsText(1)
newShp=arcpy.GetParameterAsText(2)
dist=arcpy.GetParameterAsText(3)
def LEI(iWorkspace, oriShp, newShp, dist):
try:
dist=float(dist)
env.workspace=iWorkspace
arcpy.AddMessage('Step 1/4. Open data')
print('Step 1/4. Open data')
#Get newLayer and check new layer has field "MLEI"&"LEI", if not then creat
newLayer="new_layer"
arcpy.MakeFeatureLayer_management(newShp, newLayer)
desc=arcpy.Describe(newLayer)
ifieldInfo=desc.fieldInfo
index=ifieldInfo.findfieldbyname("LEI")
if index==-1:
arcpy.AddField_management(newLayer,"LEI","DOUBLE")
#Get oriLayer/oldLayer
oriLayer="oriLayer"
arcpy.MakeFeatureLayer_management(oriShp, oriLayer)
descOri=arcpy.Describe(oriLayer)
#Create symmetrical difference layer to store the temple data
if ".gdb" in iWorkspace:
temple = "LEITemple"
else:
temple = "LEITemple.shp"
templeLayer = "templeLayer"
spatial_reference = arcpy.Describe(oriShp).spatialReference
try:
arcpy.CreateFeatureclass_management(iWorkspace, temple, "POLYGON", "", "DISABLED", "DISABLED", spatial_reference)
except:
arcpy.DeleteFeatures_management(temple)
arcpy.MakeFeatureLayer_management(temple,templeLayer)
if ".gdb" in iWorkspace:
arcpy.AddField_management(templeLayer, "ID", "SHORT", "", "", "", "", "NULLABLE")
arcpy.AddMessage('Step 2/4. Create difference between new polygon and its buffer')
print('Step 2/4. Create difference between new polygon and its buffer')
#Create the difference polygon between polygon and its buffer
#Insert the polygon into temple layer
tCursor = arcpy.InsertCursor(templeLayer)
newCursor = arcpy.SearchCursor(newLayer)
for newFeature in newCursor:
iFDifference=newFeature.shape.buffer(dist).difference(newFeature.shape)
row = tCursor.newRow()
row.setValue("Shape",iFDifference)
if ".gdb" in iWorkspace:
row.setValue("ID",newFeature.OBJECTID)
else:
row.setValue("ID",newFeature.FID)
tCursor.insertRow(row)
del tCursor
del newCursor
del newFeature
del row
arcpy.AddMessage('Step 3/4. Caculate LEI')
print('Step 3/4. Caculate LEI')
#Compute the LEI index and creat a dictionary
leiDict = dict()
tCursor = arcpy.SearchCursor(templeLayer)
for templeFeature in tCursor:
arcpy.SelectLayerByLocation_management(oriLayer,"INTERSECT",templeFeature.shape)
oldCursor = arcpy.SearchCursor(oriLayer)
inAreaLEI=0
error = 0
for oldFeature in oldCursor:
try:
insideArea=templeFeature.shape.intersect(oldFeature.shape,4)
inAreaLEI+=insideArea.area
except Exception as e:
error = 1
print(e.message)
if error == 0:
leiDict[templeFeature.ID]=inAreaLEI/templeFeature.shape.area
else:
leiDict[templeFeature.ID]=999
del tCursor
del templeFeature
del oldCursor
del oldFeature
arcpy.AddMessage('Step 4/4. Set LEI to new layer')
print('Step 4/4. Set LEI to new layer')
#Set LEI to the new layer
newCursor = arcpy.UpdateCursor(newLayer)
for newFeature in newCursor:
if ".gdb" in iWorkspace:
lei = leiDict[newFeature.OBJECTID]
else:
lei = leiDict[newFeature.FID]
newFeature.LEI=lei*100
newCursor.updateRow(newFeature)
del newCursor
del newFeature
arcpy.AddMessage('Finished!')
print('Finished!')
except Exception as e:
print(e.message)
arcpy.AddMessage("Error: " + str(e.message))
if __name__=="__main__":
"""
iWorkspace="F:\Projects\Liu\LEI\LEI\Data\Exercise1"
oriShp="Old.shp"
newShp="New.shp"
dist=200
"""
LEI(iWorkspace, oriShp, newShp, dist)
|
def main():
l,w,h = parse("2x3x4")
print(surface_area(l,w,h)) # 52
l,w,h = parse("1x1x10")
print(surface_area(l,w,h)) # 42
print(iterateFile("02_data.txt", surface_area)) # 1586300
l,w,h = parse("2x3x4")
print(calculate_ribbon(l,w,h)) # 34
l,w,h = parse("1x1x10")
print(calculate_ribbon(l,w,h)) # 14
print(iterateFile("02_data.txt", calculate_ribbon)) # 3737498
def parse(string) :
return [int(x) for x in string.split("x")]
def surface_area(l,w,h):
return (2 * l * w) + (2 * w * h) + (2 * h * l)
def calculate_wrapping(l,w,h):
smallest, small, _ = sorted([l,w,h])
wrapping = surface_area(l,w,h)
return wrapping + (smallest * small)
def calculate_ribbon(l,w,h):
smallest, small, biggest = sorted([l,w,h])
return 2 *(smallest + small) + (l*w*h)
def iterateFile(filename, func):
total = 0
with open(filename) as f:
for line in f:
l,w,h = parse(line.strip())
# total += calculate_wrapping(l,w,h)
total += func(l,w,h)
return total
if __name__ == "__main__":
main()
|
from flask import Flask, request, redirect, render_template
import sqlite3
app = Flask(__name__)
@app.route("/")
def home():
return render_template("home.html")
@app.route("/query", methods = ["POST", "GET"])
def query():
con = sqlite3.connect("flowers2019.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("SELECT * FROM flowers ORDER BY comname")
rows = cur.fetchall()
return render_template("viewflowers.html",rows = rows)
@app.route("/viewsightings", methods = ["POST", "GET"])
def viewsightings():
iname = request.form["name"]
con = sqlite3.connect("flowers2019.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("SELECT person, location, sighted FROM sightings WHERE name = (?) ORDER BY sighted DESC", (iname, ))
rows = cur.fetchmany(10)
return render_template("viewsightings.html", rows = rows)
@app.route("/updateflower")
def update():
return render_template("update.html")
@app.route("/udetails", methods = ["POST", "GET"])
def udetails():
m = "."
if request.method == "POST":
try:
newg = request.form["newgenus"]
news = request.form["newspecies"]
newn = request.form["newname"]
oldn = request.form["oldname"]
with sqlite3.connect("flowers2019.db") as con:
cur = con.cursor()
cur.execute("UPDATE flowers SET genus = ?, species = ?, comname = ? WHERE comname = ?", (newg, news, newn, oldn))
con.commit()
m = "Flower information updated successfully"
except:
con.rollback()
m = "We could not update the flower information"
finally:
return render_template("success.html", m=m)
con.close()
@app.route("/insert", methods = ["POST", "GET"])
def insert():
return render_template("insert.html")
@app.route("/details", methods = ["GET", "POST"])
def details():
m = "."
if request.method == "POST":
try:
name = request.form["name"]
person = request.form["person"]
location = request.form["location"]
sighted = request.form["sighted"]
with sqlite3.connect("flowers2019.db") as con:
cur = con.cursor()
cur.execute("INSERT into Sightings (name, person, location, sighted) values (?,?,?,?)",(name,person,location,sighted))
con.commit()
m = "Sighting successfully added to table"
except:
con.rollback()
m = "We can not add the sighting to the list"
finally:
return render_template("success.html",m = m)
con.close()
if __name__ == "__main__":
app.run(port=5000, debug = True)
|
from bge import logic
from . import multitouchProcessor
import ctypes
CFArrayRef = ctypes.c_void_p
CFMutableArrayRef = ctypes.c_void_p
CFIndex = ctypes.c_long
MultitouchSupport = ctypes.CDLL("/System/Library/PrivateFrameworks/MultitouchSupport.framework/MultitouchSupport")
CFArrayGetCount = MultitouchSupport.CFArrayGetCount
CFArrayGetCount.argtypes = [CFArrayRef]
CFArrayGetCount.restype = CFIndex
CFArrayGetValueAtIndex = MultitouchSupport.CFArrayGetValueAtIndex
CFArrayGetValueAtIndex.argtypes = [CFArrayRef, CFIndex]
CFArrayGetValueAtIndex.restype = ctypes.c_void_p
MTDeviceCreateList = MultitouchSupport.MTDeviceCreateList
MTDeviceCreateList.argtypes = []
MTDeviceCreateList.restype = CFMutableArrayRef
class MTPoint(ctypes.Structure):
_fields_ = [("x", ctypes.c_float),
("y", ctypes.c_float)]
class MTVector(ctypes.Structure):
_fields_ = [("position", MTPoint),
("velocity", MTPoint)]
class MTData(ctypes.Structure):
_fields_ = [
("frame", ctypes.c_int),
("timestamp", ctypes.c_double),
("identifier", ctypes.c_int),
("state", ctypes.c_int), # Current state (of unknown meaning).
("unknown1", ctypes.c_int),
("unknown2", ctypes.c_int),
("normalized", MTVector), # Normalized position and vector of
# the touch (0 to 1).
("size", ctypes.c_float), # The area of the touch.
("unknown3", ctypes.c_int),
# The following three define the ellipsoid of a finger.
("angle", ctypes.c_float),
("major_axis", ctypes.c_float),
("minor_axis", ctypes.c_float),
("unknown4", MTVector),
("unknown5_1", ctypes.c_int),
("unknown5_2", ctypes.c_int),
("unknown6", ctypes.c_float),
]
MTDataRef = ctypes.POINTER(MTData)
MTContactCallbackFunction = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, MTDataRef,
ctypes.c_int, ctypes.c_double, ctypes.c_int)
MTDeviceRef = ctypes.c_void_p
MTRegisterContactFrameCallback = MultitouchSupport.MTRegisterContactFrameCallback
MTRegisterContactFrameCallback.argtypes = [MTDeviceRef, MTContactCallbackFunction]
MTUnregisterContactFrameCallback = MultitouchSupport.MTUnregisterContactFrameCallback
MTUnregisterContactFrameCallback.argtypes = [MTDeviceRef, MTContactCallbackFunction]
MTDeviceStart = MultitouchSupport.MTDeviceStart
MTDeviceStart.argtypes = [MTDeviceRef, ctypes.c_int]
MTDeviceStop = MultitouchSupport.MTDeviceStop
MTDeviceStop.argtypes = [MTDeviceRef]
MTDeviceRelease = MultitouchSupport.MTDeviceRelease
MTDeviceRelease.argtypes = [MTDeviceRef]
###
@MTContactCallbackFunction
def callback(device, data_ptr, n_fingers, timestamp, frame):
touches = []
for i in range(n_fingers):
data = data_ptr[i]
touch = {}
touch["pos"] = [data.normalized.position.x, data.normalized.position.y]
touch["vel"] = [data.normalized.velocity.x, data.normalized.velocity.y]
touch["size"] = data.size*2.0
touches.append(touch)
multitouchProcessor.touchHandler(touches) # send touch data to multitouchProcessor for processing
return 0
devices = MultitouchSupport.MTDeviceCreateList()
num_devices = CFArrayGetCount(devices)
print ("Number of Multitouch Devices:", num_devices)
logic.MTdevices = []
for i in range(num_devices):
device = CFArrayGetValueAtIndex(devices, i)
MTRegisterContactFrameCallback(device, callback)
MTDeviceStart(device, 0)
logic.MTdevices.append(devices)
# stop device and deregister callback function
def stopDevices():
devices = logic.MTdevices
for device in devices:
MTUnregisterContactFrameCallback(device, callback)
MTDeviceStop(device)
MTDeviceRelease(device)
|
#! usr/bin/python3
# -*- coding: utf-8 -*-
import datetime
from flask import render_template
from markdown import markdown
def display_post_box(ticket=None, post=None, replies=None, loop=None, page=None):
"""
:param ticket: object containing ticket information
:param post:
:param replies:
:param loop:
:param page:
:return:
"""
if post is None:
content = ticket
else:
content = post
return render_template(
"flicket_post.html",
ticket=ticket,
post=post,
content=content,
replies=replies,
loop=loop,
page=page,
)
def show_markdown(text):
"""
Function to convert text to markdown.
:param text:
:return:
"""
html = markdown(text, safemode="escape")
return html
def now_year():
return datetime.datetime.now().strftime("%Y")
|
import bpy
from mathutils import Vector
obj = bpy.context.active_object
print(obj)
pos = obj.location
# get current frame
cf = bpy.context.scene.frame_current
# set current frame
bpy.context.scene.frame_current = 1
#insert key, e.g. on location
obj.keyframe_insert(data_path='location', frame=1)
obj.location = Vector((0,5,0))
obj.keyframe_insert(data_path='location', frame=20)
# animation start
bpy.ops.screen.animation_play()
# animation stop
bpy.ops.screen.animation_cancel() |
import time
from app import app
from app.Models.RunModel import Run
from app.Models.PlanModel import Plan
from app.Models.ProductModel import Product
from app.Models.ProductModel import ProductVersion
from flask_login import login_required, current_user
from flask import Blueprint, request, redirect, url_for, render_template, flash
from app.Common.Utils import get_query_url, common_response, get_mongo_index, list_mongo_res
plan = Blueprint('plan', __name__)
@plan.route('/', methods=['GET'])
@login_required
def product():
r = request.args
pd = Product()
page = r.get('page', 1)
size = r.get('size', 16)
if page is None:
page = 1
search = r.get('search', None)
if search is None:
search = {}
else:
search = {
"name": {
"$regex": search
}
}
products = pd.get_list(search, page, size)
params = get_query_url(request)
return render_template('plan/choose_pd.html', products=products, url=request.path, params=params)
@plan.route('/list', methods=['GET'])
@login_required
def lists():
r = request.args
pl = Plan()
pd = Product()
page = r.get('page', 1)
if page is None:
page = 1
size = r.get('size', 10)
if size is None:
size = 10
product_id = r.get('product_id', '')
if product_id == "":
flash("产品 product_id 不能为空!")
return redirect(url_for('plan.product'))
prod = pd.table.find_one({"_id": int(product_id)})
if prod is None:
flash("产品不存在,请重新选择!")
return redirect(url_for('plan.product'))
search = r.get('search', None)
if search is None:
search = {
"pd": product_id
}
else:
search = {
"pd": product_id,
"name": {
"$regex": search
}
}
plans = pl.get_list(search, int(page), int(size))
params = get_query_url(request)
return render_template('plan/plan.list.html', product=prod, plans=plans, url=request.path, params=params)
@plan.route('/add', methods=['GET', 'POST'])
def add():
pl = Plan()
pv = ProductVersion()
if request.method == "GET":
r = request.args
pd = Product()
product_id = r.get('product_id', '')
pd_ver = pv.table.find({"pd": product_id}, {"ver": 1}).sort([("_id", -1)]).limit(20)
prod = pd.table.find_one({"_id": int(product_id)})
return render_template('plan/plan.add.html', product=prod, versions=pd_ver)
if request.method == "POST":
r = request.form
name = r.get('plan_name', '')
product_id = r.get('product_id', '')
pd_ver = r.get('pd_ver', '')
plan_type = r.get('plan_type', '')
plan_detail = r.get('plan_detail', '')
err = {}
if name.strip() == "":
err['name'] = "计划名称不能为空!"
if product_id.strip() == "":
err['product_id'] = "产品ID不能为空!"
if pd_ver.strip() == "":
err['pd_ver'] = "产品版本不能为空!"
if plan_type.strip() == "":
err['plan_type'] = "计划类型不能为空!"
if plan_detail.strip() == "":
err['plan_detail'] = "计划详情不能为空!"
if len(err) > 0:
return common_response(data=err, err=500, msg="参数错误,请查看接口返回!")
try:
d = {
"_id": get_mongo_index('plans'),
"name": name,
"pd": product_id,
"pd_ver": pd_ver,
"type": plan_type,
"detail": plan_detail,
"author": current_user.username,
"create_time": time.time(),
"update_time": time.time()
}
data = pl.table.insert_one(d)
return common_response(data={"_id": data.inserted_id}, err=0, msg="添加成功!")
except Exception as e:
app.logger.error(str(e))
return common_response(data='', err=500, msg='添加失败')
@plan.route('/detail', methods=['GET'])
@login_required
def detail():
r = request.args
plan_id = r.get('plan_id', '')
product_id = r.get('product_id', '')
if plan_id.strip() == "":
flash("plan_id 不能为空!")
return redirect(request.referrer)
pd = Product()
pl = Plan()
prod = pd.table.find_one({"_id": int(product_id)})
data = pl.table.find_one({"_id": int(plan_id)})
return render_template('plan/plan.detail.html', plan=data, product=prod)
@plan.route('/del', methods=['DELETE'])
@login_required
def delete():
r = request.form
pl = Plan()
rc = Run()
_id = r.get("_id", '')
if _id.strip() == "":
return common_response(data='', err=500, msg='_id 参数不能为空!')
check = rc.table.find_one({"pl": _id})
if check is not None:
return common_response(data='', err=500, msg="计划下有执行,请先删除执行!")
try:
data = pl.table.delete_one({"_id": int(_id)})
return common_response(data=data.raw_result, err=0, msg='删除成功!')
except Exception as e:
app.logger.error(str(e))
return common_response(data='', err=500, msg="删除失败!")
@plan.route('/update', methods=['GET', 'PUT'])
@login_required
def update():
pl = Plan()
pv = ProductVersion()
if request.method == "GET":
r = request.args
pd = Product()
product_id = r.get('product_id', '')
plan_id = r.get('plan_id', '')
pd_ver = pv.table.find({"pd": product_id}, {"ver": 1}).sort([("_id", -1)]).limit(20)
data = pl.table.find_one({"_id": int(plan_id)})
prod = pd.table.find_one({"_id": int(product_id)})
return render_template('plan/plan.edit.html', product=prod, versions=pd_ver, plan=data)
if request.method == "PUT":
r = request.form
plan_id = r.get('plan_id', '')
name = r.get('plan_name', '')
product_id = r.get('product_id', '')
pd_ver = r.get('pd_ver', '')
plan_type = r.get('plan_type', '')
plan_detail = r.get('plan_detail', '')
err = {}
if plan_id.strip() == "":
err['plan_id'] = "plan_id 不能为空!"
if name.strip() == "":
err['name'] = "计划名称不能为空!"
if product_id.strip() == "":
err['product_id'] = "产品ID不能为空!"
if pd_ver.strip() == "":
err['pd_ver'] = "产品版本不能为空!"
if plan_type.strip() == "":
err['plan_type'] = "计划类型不能为空!"
if plan_detail.strip() == "":
err['plan_detail'] = "计划详情不能为空!"
if len(err) > 0:
return common_response(data=err, err=500, msg="参数错误,请查看接口返回!")
try:
q = {"_id": int(plan_id)}
d = {
"$set": {
"name": name,
"pd_ver": pd_ver,
"type": plan_type,
"detail": plan_detail,
"update_time": time.time()
}
}
data = pl.table.update_one(q, d)
return common_response(data={"result": data.raw_result}, err=0, msg="更新成功!!")
except Exception as e:
app.logger.error(str(e))
return common_response(data='', err=500, msg='更新失败!')
@plan.route('/query', methods=['GET'])
def query():
r = request.args
pl = Plan()
pd = r.get('pd', '').strip()
ver = r.get('pd_ver', '').strip()
try:
data = pl.table.find({"pd": pd, "pd_ver": ver})
data = list_mongo_res(data)
return common_response(data=data, err=0, msg="请求成功!")
except Exception as e:
app.logger.error(str(e))
return common_response(data='', err=500, msg="请求失败!")
|
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import optimize
def func(t, A, tau, T, phi, C):
"""Function to fit data."""
return A*np.exp(-t/tau)*np.cos(2.0*math.pi*t/T + phi) + C
# Given constants and measured values
m_1 = 1.5 # [kg] - Mass of small masses on rigid rod
d = 50e-3 # [m] - Radius of rigid rod
b = 47e-3 # [m] - Distance between m1 and m2
L_0 = 2.13 # [m] - Distance for torsion balance to glass ruler
L_1 = 5.2e-2 # [m] - Distance from null position on ruler to equlibrium point
K = 1.083 # Antitorque moment correction constant
G = 6.67408e-11 # [m^3 kg^-1 s^-2] - Actual value gravitational constant G
# Measurement uncertainties
delta_L_0 = 0.012 # [m]
delta_L_1 = 0.1e-2 # [m]
# Estimates from fit
S_1 = 0.0 # [m] - Equilibrium of torsion balance in position 1
S_2 = 0.0 # [m] - Equilibrium of torsion balance in position 2
T = 0.0 # [s] - Period of torsion balance oscillations
# Uncertainties from fit
delta_S_1 = 0.0 # [m]
delta_S_2 = 0.0 # [m]
delta_T = 0.0 # [s]
# Read data
nrows = 116
# Column names
num, pos = "Datapoint", "Position"
column_names = [num, pos]
pos1 = pd.read_csv("data/pos1_datapoints.csv", header=None,
names=column_names, nrows=nrows)
pos2 = pd.read_csv("data/pos2_datapoints.csv", header=None,
names=column_names, nrows=nrows)
# Fit data from position 1 reading with fit function
# Initial parameter guess for position 1
S_1 = 9.6
T = 600.0
A = 15.9
tau = 30.0
phi = 1.0
guess_params1 = [A, tau, T, phi, S_1]
param_fit1, pcov1 = optimize.curve_fit(func, 30*pos1[num],
pos1[pos], p0=guess_params1,
sigma=0.1)
# Extract estimates from position 1 fit function
A_1 = param_fit1[0]
tau_1 = param_fit1[1]
T_1 = param_fit1[2]
phi_1 = param_fit1[3]
S_1 = param_fit1[4]
# Extract errors
perr1 = np.sqrt(np.diag(pcov1))
delta_A_1 = perr1[0]
delta_tau_1 = perr1[1]
delta_S_1 = perr1[4]
delta_T_1 = perr1[2]
delta_phi_1 = perr1[3]
y_fit1 = func(30*pos1[num], A_1, tau_1, T_1, phi_1, S_1)
print "Optiimized fit parameters in position I"
print "A = {0} +/- {1}".format(A_1, delta_A_1)
print "tau = {0} +/- {1}".format(tau_1, delta_tau_1)
print "T = {0} +/- {1}".format(T_1, delta_T_1)
print "phi = {0} +/- {1}".format(phi_1, delta_phi_1)
print "S_1 = {0} +/- {1}".format(S_1, delta_S_1)
print ""
# Fit data from position 2 reading with fit function
# Initial parameter guess for position 2
S_2 = 9.6
T = 600.0
A = 6.0
tau = 500.0
phi = 1.0
guess_params2 = [A, tau, T, phi, S_2]
param_fit2, pcov2 = optimize.curve_fit(func, 30*pos2[num],
pos2[pos], p0=guess_params2,
sigma=0.1)
# Extract estimates from position 2 fit function
A_2 = param_fit2[0]
tau_2 = param_fit2[1]
T_2 = param_fit2[2]
phi_2 = param_fit2[3]
S_2 = param_fit2[4]
# Extract errors
perr2 = np.sqrt(np.diag(pcov2))
delta_A_2 = perr2[0]
delta_tau_2 = perr2[1]
delta_S_2 = perr2[4]
delta_T_2 = perr2[2]
delta_phi_2 = perr2[3]
y_fit2 = func(30*pos2[num], A_2, tau_2, T_2, phi_2, S_2)
print "Optiimized fit parameters in position I"
print "A = {0} +/- {1}".format(A_2, delta_A_2)
print "tau = {0} +/- {1}".format(tau_2, delta_tau_2)
print "T = {0} +/- {1}".format(T_2, delta_T_2)
print "phi = {0} +/- {1}".format(phi_2, delta_phi_2)
print "S_2 = {0} +/- {1}".format(S_2, delta_S_2)
print ""
# Prepare computed fit values for computations
# Average Period
S_1
T = (T_1 + T_2)/2.0
S_1 = S_1*1e-2
S_2 = S_2*1e-2
delta_S_1 = delta_S_1*1e-2
delta_S_2 = delta_S_2*1e-2
delta_T = delta_T_1 + delta_T_2
# Compute estimate of graviational constant G
G_computed = (math.pi**2)*(b**2)*d*(S_1 - S_2)*L_0 / \
(m_1*(T**2)*((L_0**2) + (L_1**2)))
computed_discrepancy = G_computed - G
computed_error = ((G_computed - G)/G)*100
# Compute the uncertainty of G estimate
delta_G_computed = G_computed * \
math.sqrt(((delta_S_1**2 + delta_S_2**2)/(S_1 - S_2)**2) +
(delta_L_0/L_0)**2 + (2*delta_T/T)**2 +
(((2*L_0*delta_L_0)**2 +
(2*L_1*delta_L_1)**2)/(L_0**2 + L_1**2)))
# Compute G correction due to antitorque moment
G_corrected = G_computed * K
corrected_discrepancy = G_corrected - G
corrected_error = ((G_corrected - G)/G)*100
# Corrected uncertainty in G
delta_G_corrected = delta_G_computed * K
# Print results
print "Results of graviational constant G"
print "G_actual = {0} m^3 kg^-1 s^-2".format(G)
print ""
print "G_computed = {0} +/- {1} m^3 kg^-1 s^-2".format(G_computed,
delta_G_computed)
print "Discrepancy = {0}".format(computed_discrepancy)
print "Percentage error = {0}%".format(computed_error)
print ""
print "G_corrected = {0} +/- {1} m^3 kg^-1 s^-2".format(G_corrected,
delta_G_corrected)
print "Discrepancy = {0}".format(corrected_discrepancy)
print "Percentage error = {0}%".format(corrected_error)
# Plot data
pos1_plot, ax1 = plt.subplots()
pos2_plot, ax2 = plt.subplots()
# Position 1 plot
ax1.plot(30*pos1[num], pos1[pos], label="Observed", linestyle="", marker=".")
ax1.plot(30*pos1[num], y_fit1, label="Fit", linestyle="--",
color='orange', linewidth=2)
ax1.set(title="Displacement S_I in position I",
xlabel="Time (s)", ylabel="Displacement S (cm)",
ylim=[0, 20])
ax1.legend(loc="upper right")
ax1.grid(True)
# Position 2 plot
ax2.plot(30*pos2[num], pos2[pos], label="Observed", linestyle="", marker=".")
ax2.plot(30*pos2[num], y_fit2, label="Fit", linestyle="--",
color='orange', linewidth=2)
ax2.set(title="Displacement S_II in position II", xlabel="Time (s)",
ylabel="Displacement S (cm)",
ylim=[-2, 12])
ax2.legend(loc="upper right")
ax2.grid(True)
# Save plots
pos1_plot.savefig("plots/pos1_datafit.png", dpi=200)
pos2_plot.savefig("plots/pos2_datafit.png", dpi=200)
|
import random
# your code here
def generate_random():
random_number = random.randrange(0,9)
return random_number
print(generate_random()) |
import base64
import urllib
from hawkeye_test_runner import DeprecatedHawkeyeTestCase
from hawkeye_test_runner import HawkeyeTestSuite
class CertificateValidation(DeprecatedHawkeyeTestCase):
def run_hawkeye_test(self):
good_cert = 'https://redmine.appscale.com/'
bad_cert = 'https://ocd.appscale.net:8081/'
vars = {'url': base64.urlsafe_b64encode(good_cert), 'validate': 'false'}
response = self.http_get('/urlfetch?{}'.format(urllib.urlencode(vars)))
self.assertEqual(response.status, 200)
vars = {'url': base64.urlsafe_b64encode(good_cert), 'validate': 'true'}
response = self.http_get('/urlfetch?{}'.format(urllib.urlencode(vars)))
self.assertEqual(response.status, 200)
# Remove bad cert tests since we don't expose them to the outside world anymore
# vars = {'url': base64.urlsafe_b64encode(bad_cert), 'validate': 'false'}
# response = self.http_get('/urlfetch?{}'.format(urllib.urlencode(vars)))
# self.assertEqual(response.status, 200)
# vars = {'url': base64.urlsafe_b64encode(bad_cert), 'validate': 'true'}
# response = self.http_get('/urlfetch?{}'.format(urllib.urlencode(vars)))
# self.assertEqual(response.status, 500)
def suite(lang, app):
suite = HawkeyeTestSuite('URLFetch Suite', 'urlfetch')
if lang == 'python':
suite.addTests(CertificateValidation.all_cases(app))
return suite
|
#import sys
#input = sys.stdin.readline
def main():
Z = int( input())
A = [ list(input()) for _ in range(Z)]
N = int(input("N"))
K = 2**N
B = [[0]*K for _ in range(K)]
for k in range(Z):
for i in range(K-1):
for j in range(i+1,K):
if A[k][i] == A[k][j]:
B[i][j] += 1
B[j][i] += 1
print(B)
if __name__ == '__main__':
main()
|
"""Compute the average money made by rolling the dice."""
import random
import sys
import argparse
import my_plot
def main(argv):
"""Run the experiments 'm' times consisting of 'n' trials."""
opt = parse_cmd_line(argv)
results = []
dispatcher = {'max': max, 'min': min,
'anamaya': anamaya,
'rand': lambda lst: random.randint(0, len(lst) -1)}
strategy = dispatcher[opt.strategy]
msg = "Running exp {0} times with {1} trials and max {2} dice rolls and {3} strategy".format(
opt.num_experiments, opt.num_trials, opt.max_rolls, opt.strategy)
print(msg)
for i in range(opt.num_experiments):
# Repeat the experiment with max_rolls 'num_trials' times.
results.append(experiment(opt.num_trials, opt.max_rolls, strategy))
my_plot.draw_bar(msg, results)
def experiment(num_trials, max_rolls, strategy):
"""Return the average return of running the trial num_times."""
total_profit = 0
for i in range(num_trials):
total_profit += trial_best_of_n_rolls(max_rolls, strategy)
average_return = round(total_profit/num_trials, 1)
return average_return
def trial_best_of_n_rolls(max_rolls, strategy):
"""Returns the best case profit in upto n rolls of dice.
max_rolls: Allow upto these many rolls of the dice.
Return a tuple with the number of actual rolls and max profit.
"""
winnings = []
for i in range(max_rolls):
winnings.append(random.randint(1,6))
max_profit = strategy(winnings)
return max_profit
def anamaya(lst):
"""Keep playing if the value is more than the expected value of n-1."""
if lst[0] > 4:
return lst[0]
elif lst[1] > 3:
return lst[1]
else:
return lst[2]
def parse_cmd_line(argv):
"""Parse the command line options."""
parser = argparse.ArgumentParser(
description="Run the roll dice experiment to figure out the average return." )
parser.add_argument('--num_trials', action="store", dest="num_trials", type=int,
default=1000, help="Repeat so many times for each trial.")
parser.add_argument('--max_rolls', action="store", dest="max_rolls", type=int,
default=3, help="Allow max rolls of dice per trial.")
parser.add_argument('--num_experiments', action="store", dest="num_experiments", type=int,
default=10000, help="Repeat experiment so many times.")
parser.add_argument('--strategy', action="store", dest="strategy", type=str,
default="max", help="Strategy to use in a trial.")
return parser.parse_args(argv)
if __name__ == "__main__":
main(sys.argv[1:])
|
from pysubparser import parser
import jieba
import sys
word_count = {}
learned = [
'好',
'你',
'我',
'什么',
'了',
'不',
'好',
'说',
'的',
'啊',
'吧',
'是',
'吗',
'就',
'那',
'去',
'都', '我们', '呢',
'顾未易', # gu wei yi
'这', '吃', '走', '他', '给', '怎么', '在', '想', '也', '还',
'要', '没有', '一下', '来', '有', '你们', '跟', '不是', '快', '你在干吗', '干吗'
]
for filename in sys.argv[1:]:
subtitles = parser.parse(filename)
for subtitle in subtitles:
seg_list = jieba.cut(subtitle.text, cut_all=False)
for word in seg_list:
if word not in learned:
word_count[word] = word_count.get(word, 0) + 1
sorted_words = sorted(word_count.items(), key=lambda kv: kv[1])
for w, count in sorted_words[-101:-1]:
print(w + " -> " + str(count))
|
from django.db import models
from django.contrib.auth.models import User
# Creating a Temporary Model for User Details
class UserDetails(models.Model):
details = models.OneToOneField(User,on_delete=models.CASCADE)
first_name = models.CharField(null=True,blank=False,max_length = 50)
last_name = models.CharField(null=True,blank=False,max_length = 50)
username = models.CharField(max_length = 40,blank = False,null = True,unique = True)
email = models.EmailField(max_length = 40)
def __str__(self):
return self.details.username
class Meta:
verbose_name = 'User Detail'
verbose_name_plural = 'User Details'
|
import pytest
from domain import activity, vo2
from measurement.measures import Speed
vo2_inst_testdata = [
(0, 0, Speed(mph=0), 0, 3.5), # Should get resting constant value of 3.5
(1, 1, Speed(kph=6), 0, 103.5),
(vo2.O2_COST_HORIZ_RUN, vo2.O2_COST_VERT_RUN, Speed(kph=6), 0, 23.5),
(vo2.O2_COST_HORIZ_WALK, vo2.O2_COST_VERT_WALK, Speed(mph=2), 0.15, 23.348576),
]
@pytest.mark.parametrize("o2_cost_horiz,o2_cost_vert,speed,grade,expected", vo2_inst_testdata)
def test_vo2_instantiation(o2_cost_horiz, o2_cost_vert, speed, grade, expected):
vo2_est = vo2.VO2(
o2_cost_horiz=o2_cost_horiz,
o2_cost_vert=o2_cost_vert,
speed=speed,
grade=grade
)
assert expected == vo2_est.get_value()
vo2_fact_testdata = [
(activity.Run(), Speed(kph=6), 0, 23.5),
(activity.Walk(), Speed(mph=2), 0.15, 23.348576),
]
@pytest.mark.parametrize("activity,speed,grade,expected_vo2_value", vo2_fact_testdata)
def test_vo2_factory_method(activity, speed, grade, expected_vo2_value):
vo2_est = vo2.get_VO2(activity=activity, speed=speed, grade=grade)
assert expected_vo2_value == vo2_est.get_value()
|
# 패키지 안의 함수 실행하기
import game.sound.echo
game.sound.echo.echo_test()
from game.sound import echo
echo.echo_test()
from game.sound.echo import echo_test
echo_test()
from game.graphic.render import render_test
render_test() |
"""
The Fibonacci sequence is defined by the recurrence relation:
Fn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1.
Hence the first 12 terms will be:
F1 = 1
F2 = 1
F3 = 2
F4 = 3
F5 = 5
F6 = 8
F7 = 13
F8 = 21
F9 = 34
F10 = 55
F11 = 89
F12 = 144
The 12th term, F12, is the first term to contain three digits.
What is the index of the first term in the Fibonacci sequence to contain 1000
digits?
"""
# with every order of magnitude, the number of digits increases by one
# therefore the digit length of a number is equal to the power to which we must
# raise 10 to reduce that number to 0 plus one.
def fibo():
a, b = 0, 1
yield a
while True:
yield b
a, b = b, a+b
def find_term():
"""
>>> find_term()
4782
"""
magnitude = 10
digit_len = 1
for i, fib in enumerate(fibo()):
while fib > magnitude:
magnitude *= 10
digit_len += 1
if digit_len == 1000:
return i
|
import mysql.connector
mydb = mysql.connector.connect(
host=database_ip,
user=database_user,
passwd=database_password,
database=database_name
)
print(mydb)
mycursor = mydb.cursor()
|
str=input("enter string:")
for i in range(len(str)):
if str[i] not in str[:i]:
print("%c occurs %d times"%(str[i],str.count(str[i])))
|
#!/usr/bin/env python3
"""
test for the Sinumber module.
"""
import unittest
from base_test import PschedTestBase
from pscheduler.sinumber import number_as_si, si_as_number, si_range
class TestSinumber(PschedTestBase):
"""
Sinumber tests.
"""
def test_si_as_number(self):
"""SI as number test"""
conversion_map = {
'1234': 1234,
'1234K': 1234000,
'-1234ki': -1263616,
'5g': 5000000000,
'5G': 5000000000,
'-5Gi': -5368709120,
'2y': 2000000000000000000000000,
'12.34': 12.34,
'123.4K': 123400.0,
'106.9m': 106900000.0,
'3.1415P': 3.1415e+15,
}
for i in conversion_map:
self.assertEqual(conversion_map.get(i), si_as_number(i))
for i in ["ki", "Steak", "123e1", 3.1415]:
self.assertRaises(ValueError, si_as_number, i)
def test_number_to_si(self):
"""Number to SI test"""
conversion_map = {
1000: ('1.00K', '1000.00', '1.000K'),
1000000000: ('1.00G', '953.67Mi', '1.000G'),
1234567890: ('1.23G', '1.15Gi', '1.235G'),
'9.8': ('9.80', '9.80', '9.800'),
0: ('0.00', '0.00', '0.000'),
}
for k, v in list(conversion_map.items()):
self.assertEqual(number_as_si(k), v[0])
self.assertEqual(number_as_si(k, base=2), v[1])
self.assertEqual(number_as_si(k, places=3), v[2])
def test_si_range(self):
"""SI range test"""
self.assertEqual(
si_range(15, default_lower=0),
{'upper': 15, 'lower': 15})
self.assertEqual(
si_range('16ki', default_lower=0),
{'upper': 16384, 'lower': 16384})
self.assertEqual(
si_range({'upper': 1000}, default_lower=0),
{'upper': 1000, 'lower': 0})
self.assertEqual(
si_range({'upper': 2000, 'lower': 1000}, default_lower=0),
{'upper': 2000, 'lower': 1000})
self.assertEqual(
si_range({'upper': '2k', 'lower': 1000}, default_lower=0),
{'upper': 2000, 'lower': 1000})
self.assertEqual(
si_range({'upper': 2000, 'lower': '1k'}, default_lower=0),
{'upper': 2000, 'lower': 1000})
self.assertEqual(
si_range({'upper': '2k', 'lower': '1k'}, default_lower=0),
{'upper': 2000, 'lower': 1000})
self.assertRaises(ValueError, si_range, {"lower": "2k", "upper": "1k"}, default_lower=0)
if __name__ == '__main__':
unittest.main()
|
# -*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
fig, ax = plt.subplots() # 生成子图,相当于fig = plt.figure(),ax = fig.add_subplot(),其中ax的函数参数表示把当前画布进行分割,例:fig.add_subplot(2,2,2).表示将画布分割为两行两列
# ax在第2个子图中绘制,其中行优先,
xdata, ydata = [], [] # 初始化两个数组
ln, = ax.plot([], [], 'r-',
animated=False) # 第三个参数表示画曲线的颜色和线型,具体参见:https://blog.csdn.net/tengqingyong/article/details/78829596
def init():
ax.set_xlim(0, 2 * np.pi) # 设置x轴的范围pi代表3.14...圆周率,
ax.set_ylim(-1, 1)# 设置y轴的范围
return ln, # 返回曲线
def update(n):
xdata.append(n) # 将每次传过来的n追加到xdata中
ydata.append(np.sin(n))
ln.set_data(xdata, ydata) # 重新设置曲线的值
return ln,
ani = FuncAnimation(fig, update, frames=np.linspace(0, 2 * np.pi, 10), # 这里的frames在调用update函数是会将frames作为实参传递给“n”
init_func=init, blit=True)
plt.show()
|
from django.shortcuts import render
import telepot
import urllib3, json
from django.http import HttpResponse
from telepot.namedtuple import ReplyKeyboardMarkup, KeyboardButton
from bot.models import Worker, LoggingStep, TimeSheet
# Create your views here.
from django.views.decorators.csrf import csrf_exempt
import logging
proxy_url = "http://proxy.server:3128"
telepot.api._pools = {
'default': urllib3.ProxyManager(proxy_url=proxy_url, num_pools=3, maxsize=10, retries=False, timeout=30),
}
telepot.api._onetime_pool_spec = (urllib3.ProxyManager, dict(proxy_url=proxy_url, num_pools=1, maxsize=1, retries=False, timeout=30))
logger = logging.getLogger(__name__)
# logger.error('Something went wrong!')
print('Bot started')
secret = "f893df61-ee35-4a84-87fd-f0698b7438f7"
bot = telepot.Bot('408802721:AAHmVxkphGXoPWhQruHOLMDubWCkLk-vfCE')
bot.setWebhook("https://timeismoney.pythonanywhere.com/webhook", max_connections=1)
@csrf_exempt
def telegram_webhook(request):
# print (request.body)
body_unicode = request.body.decode('utf-8')
update = json.loads(body_unicode)
if "message" in update:
logic(update)
return HttpResponse("OK")
def logic(update):
text = update["message"]["text"]
chat_id = update["message"]["chat"]["id"]
username = update["message"]["chat"]["username"]
try:
worker = Worker.objects.get(telegram_username=username)
if text == "/start":
# set current step to "Start" and start logging from the beginning
output_start(chat_id)
LoggingStep.objects.update_or_create(worker=worker, defaults={"step": "Start"})
# check if we've already logged something for this worker
elif LoggingStep.objects.filter(worker=worker).exists():
logging_step = LoggingStep.objects.get(worker=worker)
# "Start". Just started logging process (Расскажешь, что делал сегодня?)
if logging_step.step == "Start":
logic_start(chat_id=chat_id, logging_step=logging_step, text=text)
# "Choose shoes type" (Выбери тип обуви)
elif logging_step.step == "Choose shoes type":
logic_choose_shoes_type(chat_id=chat_id, logging_step=logging_step, text=text)
# Choose shoes size
elif logging_step.step == "Choose shoes size":
logic_choose_shoes_size(chat_id=chat_id, logging_step=logging_step, text=text)
# Choose shoes width
elif logging_step.step == "Choose shoes width":
logic_choose_shoes_type(chat_id=chat_id, logging_step=logging_step, text=text)
# Choose sole color
elif logging_step.step == "Choose sole color":
logic_choose_sole_color(chat_id=chat_id, logging_step=logging_step, text=text)
# Choose top material
elif logging_step.step == "Choose top material":
logic_choose_top_material(chat_id=chat_id, logging_step=logging_step, text=text)
# Enter client info
elif logging_step.step == "Enter client info":
logic_enter_client_info(chat_id=chat_id, logging_step=logging_step, text=text)
# Output order NR and PDF
elif logging_step.step == "Output order NR and PDF":
output_order_nr_and_pdf (chat_id=chat_id, logging_step=logging_step)
else:
dont_understand(chat_id)
except Worker.DoesNotExist:
unknown_user(chat_id)
# start
def output_start(chat_id):
bot.sendMessage(chat_id, "Привет! Что надо сделать?",
reply_markup=ReplyKeyboardMarkup(
keyboard=[
[KeyboardButton(text="Создать заказ"),
KeyboardButton(text="Изменить статус заказа"),
KeyboardButton(text="Текущие активные заказы")]
]
, resize_keyboard=True, one_time_keyboard=True))
def logic_start(text, chat_id, logging_step):
if text == "Создать заказ":
output_choose_shoes_type(chat_id)
logging_step.step = "Choose shoes type"
logging_step.save()
elif text == "Изменить статус заказа":
not_ready(chat_id)
elif text == "Изменить статус заказа":
not_ready(chat_id)
else:
not_ready(chat_id)
# choose shoes type
def output_choose_shoes_type(chat_id):
bot.sendMessage(chat_id, "Какой тип обуви нужно сделать?",
reply_markup=ReplyKeyboardMarkup(
keyboard=[
[KeyboardButton(text="Кеды"), KeyboardButton(text="Туфли")]
], resize_keyboard=True, one_time_keyboard=True))
def logic_choose_shoes_type(text, chat_id, logging_step):
if text == "Кеды":
output_choose_shoes_size(chat_id)
logging_step.step = "Choose shoes size"
logging_step.save()
elif text == "Туфли":
output_choose_shoes_size(chat_id)
logging_step.step = "Choose shoes size"
logging_step.save()
else:
not_ready(chat_id)
# choose shoes size
def output_choose_shoes_size(chat_id):
bot.sendMessage(chat_id, "Какой тип обуви нужно сделать?",
reply_markup=ReplyKeyboardMarkup(
keyboard=[
[KeyboardButton(text="39"),
KeyboardButton(text="40"),
KeyboardButton(text="41"),
KeyboardButton(text="42"),
KeyboardButton(text="43"),
KeyboardButton(text="44"),
KeyboardButton(text="45")]
], resize_keyboard=True, one_time_keyboard=True))
def logic_choose_shoes_size(text, chat_id, logging_step):
sizes = ['39', '40', '41', '42', '43', '44', '45']
if any(text in s for s in sizes):
output_choose_shoes_width(chat_id)
logging_step.step = "Choose shoes width"
logging_step.save()
else:
not_ready(chat_id)
# choose shoes width
def output_choose_shoes_width(chat_id):
bot.sendMessage(chat_id, "Какая полнота обуви?",
reply_markup=ReplyKeyboardMarkup(
keyboard=[
[KeyboardButton(text="Стандарт"),
KeyboardButton(text="+1")]
], resize_keyboard=True, one_time_keyboard=True))
def logic_choose_shoes_width(text, chat_id, logging_step):
width = ['Стандарт', '+1']
if any(text in s for s in width):
output_choose_sole_color(chat_id)
logging_step.step = "Choose sole color"
logging_step.save()
else:
not_ready(chat_id)
# choose sole color
def output_choose_sole_color(chat_id):
bot.sendMessage(chat_id, "Какой цвет подошвы?",
reply_markup=ReplyKeyboardMarkup(
keyboard=[
[KeyboardButton(text="Белый"),
KeyboardButton(text="Черный")]
], resize_keyboard=True, one_time_keyboard=True))
def logic_choose_sole_color(text, chat_id, logging_step):
sole_color = ['Белый', 'Черный']
if any(text in s for s in sole_color):
output_choose_top_material(chat_id)
logging_step.step = "Choose top material"
logging_step.save()
else:
not_ready(chat_id)
# choose top material
def output_choose_top_material(chat_id):
bot.sendMessage(chat_id, "Напиши материал для верха")
def logic_choose_top_material(text, chat_id, logging_step):
output_enter_client_info(chat_id)
logging_step.step = "Enter client info"
logging_step.save()
# enter client info
def output_enter_client_info(chat_id):
bot.sendMessage(chat_id, "Напиши данные клиента")
def logic_enter_client_info(text, chat_id, logging_step):
choose_commercial_project(chat_id)
logging_step.step = "Output order NR and PDF"
logging_step.save()
# orderNR, generate pdf, finish
def output_order_nr_and_pdf(chat_id, logging_step):
bot.sendMessage(chat_id, "Номер заказа: 4242")
bot.sendMessage(chat_id, "PDF можете скачать по ссылке: https://tinyurl.com/4poyc6x")
bot.sendMessage(chat_id, "До новых встреч.")
logging_step.step = "Finish"
logging_step.save()
def dont_understand(chat_id):
bot.sendMessage(chat_id, "Я не знаю такой команды")
def define_project_type(chat_id):
bot.sendMessage(chat_id, "Над каким проектом ты работал?",
reply_markup=ReplyKeyboardMarkup(
keyboard=[
[KeyboardButton(text="Коммерческий"), KeyboardButton(text="Внутренний")]
], resize_keyboard=True, one_time_keyboard=True))
def choose_commercial_project(chat_id):
bot.sendMessage(chat_id, "Над каким проектом ты работал?",
reply_markup=ReplyKeyboardMarkup(
keyboard=[
[KeyboardButton(text="Делал бота"), KeyboardButton(text="Делал работу")]
], resize_keyboard=True, one_time_keyboard=True))
def choose_internal_project(chat_id):
bot.sendMessage(chat_id, "Над каким проектом ты работал?",
reply_markup=ReplyKeyboardMarkup(
keyboard=[
[KeyboardButton(text="Обучение"), KeyboardButton(text="Собеседование"),
KeyboardButton(text="Болезнь")]
], resize_keyboard=True, one_time_keyboard=True))
def what_did_you_do(chat_id):
bot.sendMessage(chat_id, "Что именно делал?")
def who_is_the_interviewee(chat_id):
bot.sendMessage(chat_id, "Кого собеседовал?")
def how_much_time_did_you_spend(chat_id):
bot.sendMessage(chat_id, "Сколько времени потратил? (в часах)",
reply_markup=ReplyKeyboardMarkup(
keyboard=[
[KeyboardButton(text="1"), KeyboardButton(text="2"),
KeyboardButton(text="3"), KeyboardButton(text="4"), KeyboardButton(text="5")]
], resize_keyboard=True, one_time_keyboard=True))
def confirm_result(chat_id, logging_step):
bot.sendMessage(chat_id, "Ты работал над проектом '" + logging_step.project +
"', сделал '" + logging_step.details +
"', за " + str(logging_step.time_spent) + " ч. Верно?",
reply_markup=ReplyKeyboardMarkup(
keyboard=[
[KeyboardButton(text="Да"), KeyboardButton(text="Нет")]
], resize_keyboard=True, one_time_keyboard=True))
def thank_you(chat_id):
bot.sendMessage(chat_id, "Спасибо!")
def unknown_user(chat_id):
bot.sendMessage(chat_id, "Я вас не знаю!")
def not_ready(chat_id):
bot.sendMessage(chat_id, "Not ready")
|
#!/usr/bin/python
from bs4 import BeautifulSoup
import sqlite3
class DB:
"""
Abstraction for the profile database
"""
def __init__(self, filename):
"""
Creates a new connection to the database
filename - The name of the database file to use
"""
self.Filename = filename
self.Connection = sqlite3.connect(filename)
self.Cursor = self.Connection.cursor()
def SaveProfile(self, data):
"""
Saves the profile to the database
data - A dictionary of profile information
"""
self.Cursor.execute("INSERT INTO profiles (url, e0, e1, e2, e3, e4, e5, e6, e7, e8, gender, age, orientation, status, location) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (data['url'], data['e0'], data['e1'], data['e2'], data['e3'], data['e4'], data['e5'], data['e6'], data['e7'], data['e8'], data['gender'], data['age'], data['orientation'], data['status'], data['location']))
self.Connection.commit()
def HasVisited(self, url):
"""
Returns true if the given URL is in the database, false otherwise
url - The URL to check
"""
self.Cursor.execute("SELECT 1 FROM profiles WHERE url = ? LIMIT 1", (url,))
return self.Cursor.fetchone() is not None
|
def is_sorted(string):
for i in range(len(string)-1):
if(string[i] > string[i+1]):
return False
return True
print(is_sorted("ABC"))
print(is_sorted("aBc"))
print(is_sorted("dog")) |
from flask import render_template, redirect, url_for, session
from . import main
from . forms import MatchForm
from .. import db
from ..models import Fenxi
@main.route('/', methods=['GET', 'POST'])
def index():
form = MatchForm()
match_list = None
if form.validate_on_submit():
match_list = Fenxi.query.filter_by(competition=form.competition.data).all()
print(match_list)
return render_template('index.html', form=form, match_list=match_list)
|
from selenium import webdriver
from time import sleep
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Chrome()
driver.implicitly_wait(10)
driver.get("https://www.126.com")
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, "x-URS-iframe")))
driver.switch_to_frame("x-URS-iframe")
driver.find_element_by_name("email").clear()
driver.find_element_by_name("email").send_keys("a15708420051")
driver.find_element_by_name("password").clear()
driver.find_element_by_name("password").send_keys("123456aa")
driver.find_element_by_id("un-login").click()
driver.find_element_by_id("dologin").click()
sleep(1)
driver.quit()
|
from abc import abstractmethod, ABC
class AnswersDAO(ABC):
@staticmethod
@abstractmethod
def get_all_answers_for_question(question_id):
pass
@staticmethod
@abstractmethod
def create_answer(answer, question_id):
pass
|
from django.urls import include, path
from story import views
app_name = 'story'
urlpatterns = [
path('home/', views.home, name='home'),
path('about_game/', views.about_game, name='about_game'),
path('start/', views.start, name='start'),
path('next/<int:option_id>/', views.next_page, name='next'),
]
|
# Author : Xiang Xu
# -*- coding: utf-8 -*-
def purity(clusters, points):
purity = 0
for i in xrange(len(clusters)):
cluster = clusters[i]
mi = len(cluster['cluster']) # mi is the count of cluster i
mij = {} # mij is the count of class j in cluster i
for point in cluster['cluster']:
if point.classId not in mij.keys():
mij[point.classId] = 0
else:
mij[point.classId] += 1
pij = {} # pij = mij / mi
for k, v in mij.items():
pij[k] = float(v) / mi
pi = max(pij.values()) # pi = max{pij}
purity += (mi * pi)
purity /= len(points)
return purity # purity = sum(mi * pi) / m
def fscore(clusters, points):
# calculate the count of each class
classNum = {}
for point in points:
if point.classId not in classNum.keys():
classNum[point.classId] = 0
else:
classNum[point.classId] += 1
f = {}
fscore = 0
for i in xrange(len(clusters)):
cluster = clusters[i]
pointsNum = len(cluster['cluster'])
# calculate the count of each class in each cluster
classNumInCluster = {}
for point in cluster['cluster']:
if point.classId not in classNumInCluster.keys():
classNumInCluster[point.classId] = 0
else:
classNumInCluster[point.classId] += 1
# get the max count class
maxClassId, maxClassNum = -1, -1
for k, v in classNumInCluster.items():
if maxClassNum < v:
maxClassId, maxClassNum = k, v
precision = float(maxClassNum) / classNum[maxClassId]
recall = float(maxClassNum) / pointsNum
f[i] = (2 * precision * recall) / (precision + recall)
fscore += (f[i] * pointsNum)
fscore /= len(points)
return fscore # fscore = sum(f[i] * cluster[i].num) / points.num
|
import unidecode
import shutil
DAYS = 4
DAY_NAMES = ("MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN")
people = {}
class Person():
def __init__(self, info):
info = info.split(";")
self.name = self.set_name(info)
self.times = {}
self.can_drive = [False, False, False, False]
for i in range(DAYS):
self.times[i] = {"in": False, "out": False}
if info[4 + 2 * i] != "No voy":
self.times[i]["in"] = int(info[4 + 2*i])
if info[4 + 2 * i + 1] != "No vuelvo":
self.times[i]["out"] = int(info[4 + 2 * i + 1])
if info[14 + i] == "Si":
self.can_drive[i] = True
self.max_passengers = int(info[19])
@staticmethod
def set_name(info):
name = ""
i = 0
while name == "" or name in people:
i += 1
name = info[0][:i] + info[1].split(" ")[0]
return name
@property
def safename(self):
unaccented_string = unidecode.unidecode(self.name)
return unaccented_string.lower()
def __str__(self):
s = self.name + "\n"
for d in range(DAYS):
s += "\t" + DAY_NAMES[d] + "\n"
if self.can_drive[d]:
s += "\t\tCAN DRIVE\n"
if self.times[d]["in"]:
s += "\t\tin:" + str(self.times[d]["in"]) + "\n"
if self.times[d]["out"]:
s += "\t\tout" + str(self.times[d]["out"]) + "\n"
s.strip("\n")
return s
def clingo_str(self):
s = f"person({self.safename}).\n"
for d in range(DAYS):
if self.can_drive[d]:
s += f"can_drive({self.safename}, {d}).\n"
if self.times[d]["in"]:
s += f"enter({self.safename}, {d}, {self.times[d]['in']}).\n"
# if self.times[d]["out"]:
# s += "\t\tout" + str(self.times[d]["out"]) + "\n"
return s
with open("data.csv") as foo:
foo.readline()
for i in foo.readlines():
p = Person(i)
people[p.name] = p
# for person in people:
# print(people[person])
shutil.copyfile("turno.lp", "test.lp")
with open("test.lp", "a") as file:
for p in people:
file.write(people[p].clingo_str())
|
from rv.api import m
def test_gpio(read_write_read_synth):
mod: m.Gpio = read_write_read_synth("gpio").module
assert mod.flags == 81
assert mod.name == "GPIO"
assert not mod.out
assert mod.out_pin == 213
assert mod.out_threshold == 46
assert mod.in_
assert mod.in_pin == 210
assert mod.in_note == 0
assert mod.in_amplitude == 93
|
#procurando uma string dentro de outra
nome = str(input('Digite o seu nome completo: ')).strip()
nome = nome.lower()
print('Seu nome contem Silva? {}'.format('silva' in nome))
|
import gym
from gym import envs
import numpy as np
# import sys
nstates = 8*8 # $B>uBV?t(B
nactions = 4 # $B9TF0?t(B
eM = 100000 # $BI>2A$r9T$&%(%T%=!<%I?t(B
alpha = 0.7
gamma = 0.9
policySelect = 2 # 1: e-greedy$B<jK!(B 2: softmax$B<jK!(B
tau = 0.0016 # softmax$B<jK!$N29EY(B
goal = 0 # $B%4!<%k$7$?2s?t(B
class Qlearning:
""" class for Q Learning """
@classmethod
def choose_action(self, Q, new_observation, E_GREEDY_RATIO):
""" $B9TF07hDj(B """
if policySelect == 1: # e-greedy
if E_GREEDY_RATIO < np.random.uniform():
# greedy$BK!$rE,MQ$9$k(B
return np.argmax(Q[new_observation,:])
else:
return np.random.randint(env.action_space.n)
elif policySelect == 2: # softmax
policy = np.exp(Q[new_observation,:]/tau) / np.sum(np.exp(Q[new_observation,:]/tau))
# $B5U4X?tK!$G9TF0A*Br(B
random = np.random.uniform()
cprob = 0
for a in range(nactions):
cprob = cprob + policy[a]
action = a
if random < cprob:
break
return action
@classmethod
def Qupdate(self, Q, new_observation, observation, action, reward):
""" Q$BCM$N99?7(B """
return (Q[observation][action] + alpha*(reward-Q[observation][action]+gamma*np.max(Q[new_observation,:])))
if __name__=='__main__':
Ql = Qlearning
Q = np.zeros([nstates,nactions])
E_GREEDY_RATIO = 0.3
env = gym.make('FrozenLake8x8-v0')
for i_episode in range(eM):
observation = env.reset()
E_GREEDY_RATIO *= 0.999
if E_GREEDY_RATIO < 1e-4:
E_GREEDY_RATIO = 0
print("Episode {}".format(i_episode+1))
for t in range(10000):
env.render()
# $B9TF0(Ba$B$N3MF@(B
# action = env.action_space.sample()
action = Ql.choose_action(Q, observation, E_GREEDY_RATIO)
# $B<B9T(B
new_observation, reward, done, info = env.step(action)
# Q$B3X=,$N99?7(B
Q[observation][action] = Ql.Qupdate(Q, new_observation, observation, action, reward)
#if np.max(Q) == 0 and t > 100:
# print("ERROR: Q=0")
# sys.exit()
# $B>uBV$H9TF0$N5-O?(B
observation = new_observation
if new_observation == nstates-1:
goal += 1
if done:
print("Episode finished after {} timesteps".format(t+1))
break
print(Q)
print(goal)
print(E_GREEDY_RATIO)
env.close()
|
from ScrapeWebsite import *
from GetTweets import *
from TweetParser import *
from JSONHelper import *
from GeoLocationData import *
map_1
###Parse Twitter Data
##Get the number of tweets to retrieve.
#TopN = 10000
#consumerKey = 'ipTb7DZ0LbJ18p9ATjdrSQ23p'
#consumerSecret = 'DT9OroScZI4HoGbx7PkRL6ojir5T4GWvmlqCkPTd9MgH72fHE9'
#acceesToken = '421348331-vDfvREGRolF3dA4mvvMrihZRtTohQ4yQmoXhRQjX'
#accessSecret = 'w8zsnlGfm6G54wmlKS1Glz9sk0Ei595y6zmPuikQKn5da'
#filepath = "C:\\Users\\barry\\Documents\\GitHub\\eNRG\\eNRG\\"
#fileEnding = "_tweets.json"
#thinkgeoenergy = 'thinkgeoenergy'
#GeothermEneRRgy = 'GeothermEneRRgy'
#GRC2001 = 'GRC2001'
#thinkgeoenergy_file = filepath + thinkgeoenergy + fileEnding
#GeothermEneRRgy_file = filepath + 'GeothermEneRRgy' + fileEnding
#GRC2001_file = filepath + 'GRC2001' + fileEnding
#TweetArray = []
##Get Tweets for user = "thinkgeoenergy"
#TweetArray.extend(GetTopNTweetsForUser(thinkgeoenergy, TopN, consumerKey, consumerSecret, acceesToken, accessSecret))
###Get Tweets for user = "GeothermEneRRgy"
#TweetArray.extend(GetTopNTweetsForUser(GeothermEneRRgy, TopN, consumerKey, consumerSecret, acceesToken, accessSecret))
###Get Tweets for user = "GRC2001"
#TweetArray.extend(GetTopNTweetsForUser(GRC2001, TopN, consumerKey, consumerSecret, acceesToken, accessSecret))
##parse the tweet array
##pull out all of the hash tags
#HashTags = []
#HashTags = ParseHashTags(TweetArray)
##Mentions
#Mentions = []
#Mentions = ParseMentions(TweetArray)
|
#!/usr/bin/env python
# Funtion:
# Filename:
import binascii
import zlib
with open(r'E:\vscode_pragram\mine\Python3\Python_learning\wxpython_leanning\tools\md5_tools\MD5_Hash.py', 'rb') as f:
# print(f.read())
# print(binascii.crc32(f.read()))
z = 0
for i in f:
z=zlib.crc32(i)
print('%X'% z)
# 5290D266C4 |
from gtnlplib.constants import OFFSET
import numpy as np
# hint! use this.
def argmax(scores):
items = list(scores.items())
items.sort()
return items[np.argmax([i[1] for i in items])][0]
# This will no longer work for our purposes since python3's max does not guarantee deterministic ordering
# argmax = lambda x : max(x.items(),key=lambda y : y[1])[0]
# deliverable 2.1
def make_feature_vector(base_features,label):
'''
take a counter of base features and a label; return a dict of features, corresponding to f(x,y)
:param base_features: counter of base features
:param label: label string
:returns: dict of features, f(x,y)
:rtype: dict
'''
raise NotImplementedError
# deliverable 2.2
def predict(base_features,weights,labels):
'''
prediction function
:param base_features: a dictionary of base features and counts
:param weights: a defaultdict of features and weights. features are tuples (label,base_feature).
:param labels: a list of candidate labels
:returns: top scoring label, scores of all labels
:rtype: string, dict
'''
raise NotImplementedError
return argmax(scores),scores
def predict_all(x,weights,labels):
'''
Predict the label for all instances in a dataset
:param x: base instances
:param weights: defaultdict of weights
:returns: predictions for each instance
:rtype: numpy array
'''
y_hat = np.array([predict(x_i,weights,labels)[0] for x_i in x])
return y_hat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import logging
import re
import sys
LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
LOGGER.setLevel(logging.DEBUG)
HASH_PATTERN = re.compile(r'#([0-9a-fA-F]{40})\b')
ESCAPE_PATTERN = re.compile(r'\[#([0-9a-fA-F]{40})\]\(([^)]+)\)')
RECOVERY_PATTERN = re.compile(
r'\[__ESCAPED_COMMIT_HASH_LINK_([0-9a-fA-F]{40})\]\(([^)]+)\)'
)
def process_markdown(markdown_path, owner, project):
"""Processes a given markdown file.
Replaces every occurence of '#' + full SHA-1 hash (40 characters) with
a link to the corresponding GitHub commit page.
Replaced markdown is written to the standard output.
"""
substitution = f'[#\\1](https://github.com/{owner}/{project}/commit/\\1)'
escape_substitution = f'[__ESCAPED_COMMIT_HASH_LINK_\\1](\\2)'
recovery_substitution = f'[#\\1](\\2)'
with open(markdown_path, mode='r', encoding='utf-8') as markdown_in:
for line in markdown_in:
# prevents already replaced links from being replaced
line = re.sub(ESCAPE_PATTERN, escape_substitution, line)
# replaces commit hashes with links
line = re.sub(HASH_PATTERN, substitution, line)
# recovers the escaped links
line = re.sub(RECOVERY_PATTERN, recovery_substitution, line)
sys.stdout.write(line)
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(
description='Replaces Git commit hashes with markdown links',
)
arg_parser.add_argument(
'--owner',
dest='owner',
metavar='OWNER',
type=str,
required=True,
help='owner of the repository (required)',
)
arg_parser.add_argument(
'--project',
dest='project',
metavar='PROJECT',
type=str,
required=True,
help='project name of the repository (required)',
)
arg_parser.add_argument(
'markdown_path',
metavar='MARKDOWN',
type=str,
help='path to a markdown file to process',
)
args = arg_parser.parse_args()
LOGGER.debug(f'owner={args.owner}, project={args.project}')
LOGGER.debug(f'processing: {args.markdown_path}')
process_markdown(args.markdown_path, owner=args.owner, project=args.project)
|
from linkedlist import LinkedList
def remove_duplicates(llist):
value_set = set()
current_node = llist.head
while current_node is not None:
if current_node.data in value_set:
temp_node = current_node
current_node = current_node.next
llist.delete(temp_node)
else:
value_set.add(current_node.data)
current_node = current_node.next
def remove_duplicates__save_space(llist):
test_node = llist.head
while test_node is not None:
probe_node = test_node.next
while probe_node is not None:
if probe_node.data == test_node.data:
temp_node = probe_node
probe_node = probe_node.next
llist.delete(temp_node)
test_node = test_node.next
def remove_and_print(llist):
print(llist, end=' => ')
remove_duplicates(llist)
print(llist)
def remove_and_print__save_space(llist):
print(llist, end=' => ')
remove_duplicates(llist)
print(llist)
if __name__ == '__main__':
remove_and_print(LinkedList(1, 2, 3, 2, 3, 4, 5, 5))
remove_and_print(LinkedList(1, 1, 2, 3, 2, 3, 4, 5, 5, 6))
remove_and_print__save_space(LinkedList(1, 2, 3, 2, 3, 4, 5, 5))
remove_and_print__save_space(LinkedList(1, 1, 2, 3, 2, 3, 4, 5, 5, 6))
|
# RF로 모델링 하시오!!!
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split, KFold, cross_val_score, GridSearchCV, RandomizedSearchCV
from sklearn.decomposition import PCA
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
datasets = load_iris()
x = datasets.data
y = datasets.target
print(x.shape, y.shape) # (150, 4) (150,)
pca = PCA()
pca.fit(x)
cumsum = np.cumsum(pca.explained_variance_ratio_)
print('cumsum :', cumsum)
d = np.argmax(cumsum >= 0.95) + 1
print('cumsum >= 0.95 :', cumsum >= 0.95)
print('d :', d)
'''
plt.plot(cumsum)
plt.grid()
plt.show()
'''
pca = PCA(n_components=d)
x = pca.fit_transform(x)
print(x.shape) # (150, 2)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=45)
kfold = KFold(n_splits=5, shuffle=True)
'''
parameters = [{'n_estimators':[100,200,300],
'max_depth':[6,8,10],
'min_samples_split':[2,4,6,8],
'min_samples_leaf':[1,3,5,7],
'n_jobs':[-1]}]
parameters = [
{"n_estimators":[100,200,300], "learning_rate":[0.001,0.01,0.1,0.3], "max_depth":[4,5,6]},
{"n_estimators":[90,100,110], "learning_rate":[0.001,0.01,0.1], "max_depth":[4,5,6], "colsample_bytree":[0.6,0.9,1]},
{"n_estimators":[90,110], "learning_rate":[0.001,0.1,0.5], "max_depth":[4,5,6], "colsample_bytree":[0.6,0.9,1], "colsample_bylevel":[0.6,0.7,0.9]},
]
'''
parameters = [[{'n_estimators':[100,200,300],
'max_depth':[6,8,10],
'min_samples_split':[2,4,6,8],
'min_samples_leaf':[1,3,5,7],
'n_jobs':[-1]}],
[
{"n_estimators":[100,200,300], "learning_rate":[0.001,0.01,0.1,0.3], "max_depth":[4,5,6]},
{"n_estimators":[90,100,110], "learning_rate":[0.001,0.01,0.1], "max_depth":[4,5,6], "colsample_bytree":[0.6,0.9,1]},
{"n_estimators":[90,110], "learning_rate":[0.001,0.1,0.5], "max_depth":[4,5,6], "colsample_bytree":[0.6,0.9,1], "colsample_bylevel":[0.6,0.7,0.9]},
]
]
for i, j in enumerate([RandomForestClassifier, XGBClassifier]):
if i == 0:
model = RandomizedSearchCV(j(), parameters[i], cv=kfold)
model.fit(x_train, y_train)
else:
model = RandomizedSearchCV(j(n_jobs=-1, use_label_encoder=False), parameters[i], cv=kfold)
model.fit(x_train, y_train, eval_metric='logloss')
y_pred = model.predict(x_test)
print(j.__name__ + '의 최종 정답률 :', accuracy_score(y_test, y_pred))
print(j.__name__ + '의 최종 정답률 :', model.score(x_test, y_test))
# RandomForestClassifier의 최종 정답률 : 1.0
# XGBClassifier의 최종 정답률 : 1.0
|
import numpy as np
from mesh import *
from basis_func import *
from assemble import *
from viewer import *
def clear_rows(A,b_nodes):
""" code to clear rows """
for node in b_nodes:
t = A[node, node]
A[node, :] = 0
A[node, node]=t
if __name__ == "__main__":
(topo , x , y , nodes , b_nodes) = read_msh("mesh/square.msh")
# compute A
A = gradu_gradv(topo,x,y)
clear_rows(A,b_nodes)
# compute rhs
F = f_v(topo,x,y)
F[b_nodes] = 0
# solve linear system Au=F
u = np.linalg.solve(A, F)
plot_sol_p1(x,y,u,topo)
|
#!/usr/bin/python
#-*-coding:utf-8-*-
from wx import WxAPI
import requests
import os
import time
from runonce import start_spider
from dytt import start_grasp
from xyy import run_xyy
from mmjpg import start_mmjpg
from apscheduler.schedulers.background import BackgroundScheduler
def get_oneday_text():
url = 'http://open.iciba.com/dsapi'
resp = requests.get(url)
return {'en': resp.json()['content'], 'ch': resp.json()['note'], 'img': resp.json()['picture']}
def post_iciba_template_msg():
client = WxAPI()
token = client.get_access_token()
users = client.get_user_list(token)
r = get_oneday_text()
for u in users:
body = '''
{
"touser":"%s",
"template_id":"vmk8rT72FHcPeMDK3Zbo6LSXzm3qAuw4CNXxFsxokeg",
"url": "%s",
"topcolor":"#FF0000",
"data":{
"en": {
"value": "%s",
"color":"#173177"
},
"ch": {
"value": "%s",
"color":"#173177"
}
}
}
''' % (u, r['img'], r['en'], r['ch'])
client.post_template_msg(token, body.encode('utf-8'))
if __name__ == '__main__':
print 'Background task is running now...'
scheduler = BackgroundScheduler()
scheduler.add_job(post_iciba_template_msg, 'cron', hour=12)
#scheduler.add_job(start_spider, 'cron', minute='*/30')
#scheduler.add_job(start_mmjpg, 'cron', minute='*/30')
#scheduler.add_job(start_grasp, 'cron', minute='*/20')
#scheduler.add_job(run_xyy, 'cron', minute='*/15')
scheduler.start()
print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
try:
# This is here to simulate application activity (which keeps the main thread alive).
while True:
time.sleep(2)
except (KeyboardInterrupt, SystemExit):
# Not strictly necessary if daemonic mode is enabled but should be done if possible
scheduler.shutdown()
|
DEFAULT_ARRAY = [[1,2,[3]],4]
LOG_LEVEL = 'INFO'
|
"""
------------------------------------
@Time :
@Auth :
@File : HomePage.py
@IDE : PyCharm
@Motto:
------------------------------------
"""
from time import sleep
from Page.BasePage import BasePage
from util.parseConFile import ParseConFile
class HomePage(BasePage):
# 配置文件读取元素
do_conf = ParseConFile()
# 左上角登录icon
loginIcon = do_conf.get_locators_or_account('HomePageElements', 'loginIcon')
# 菜单栏
# 首页
top = do_conf.get_locators_or_account('HomePageElements', 'top')
# 推荐
recommend = do_conf.get_locators_or_account('HomePageElements', 'recommend')
# 援助
collaboration = do_conf.get_locators_or_account('HomePageElements', 'collaboration')
# 社区
community = do_conf.get_locators_or_account('HomePageElements', 'community')
# 个人主页
myPage = do_conf.get_locators_or_account('HomePageElements', 'myPage')
# 图片点击
# 推荐
pic_recommend = do_conf.get_locators_or_account('HomePageElements', 'pic_recommend')
# 援助-need
pic_collaboration_needs = do_conf.get_locators_or_account('HomePageElements', 'pic_collaboration_needs')
# 社区
pic_community = do_conf.get_locators_or_account('HomePageElements', 'pic_community')
# 援助-help
pic_collaboration_help = do_conf.get_locators_or_account('HomePageElements', 'pic_collaboration_help')
def select_menu(self, menu=''):
if menu == "top":
self.click_top_menu()
elif menu == 'recommend':
self.click_recommend_menu()
elif menu == 'collaboration':
self.click_collaboration_menu()
elif menu == 'community':
self.click_community_menu()
elif menu == 'myPage':
self.click_myPage_menu()
elif menu == 'pic_recommend':
self.click_pic_recommend()
elif menu == 'pic_collaboration_needs':
self.click_pic_collaboration_needs()
elif menu == 'pic_community':
self.click_pic_community()
elif menu == 'pic_collaboration_help':
self.click_pic_collaboration_help()
else:
raise ValueError(
'''菜单选择错误!
top->首页
recommend->推荐
collaboration->援助
community->社区
myPage->个人主页'''
)
def click_top_menu(self):
print("top的位置地址是:", *HomePage.top)
return self.click(*HomePage.top)
def click_recommend_menu(self):
print("recommend的位置地址是", *HomePage.recommend)
return self.click(*HomePage.recommend)
def click_collaboration_menu(self):
print("collaboration的位置地址是", *HomePage.collaboration)
return self.click(*HomePage.collaboration)
def click_community_menu(self):
print("community的位置地址是", *HomePage.community)
return self.click(*HomePage.community)
def click_myPage_menu(self):
print("myPage的位置地址是", *HomePage.myPage)
return self.click(*HomePage.myPage)
def click_pic_recommend(self):
print("pic_recommend的位置地址是:",*HomePage.pic_recommend)
return self.click(*HomePage.pic_recommend)
def click_pic_collaboration_needs(self):
print("recommend的位置地址是", *HomePage.pic_collaboration_needs)
return self.click(*HomePage.pic_collaboration_needs)
def click_pic_community(self):
print("collaboration的位置地址是", *HomePage.pic_community)
return self.click(*HomePage.pic_community)
def click_pic_collaboration_help(self):
print("community的位置地址是", *HomePage.pic_collaboration_help)
return self.click(*HomePage.pic_collaboration_help)
if __name__ == '__main__':
# print(*HomePage.top)
print(*HomePage.top[1:])
|
# 105. Construct Binary Tree from Preorder and Inorder Traversal
#
# Given preorder and inorder traversal of a tree, construct the binary tree.
#
# Note:
# You may assume that duplicates do not exist in the tree.
#
# For example, given
#
# preorder = [3,9,20,15,7]
# inorder = [9,3,15,20,7]
# Return the following binary tree:
#
# 3
# / \
# 9 20
# / \
# 15 7
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
self.seqlist = []
def __str__(self):
self.inorder(self)
return ' '.join(str(x) for x in self.seqlist)
def inorder(self, curr):
if not curr: return
self.inorder(curr.left)
self.seqlist.append(curr.val)
self.inorder(curr.right)
class Solution(object):
def buildTree(self, preorder, inorder):
"""
:type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode
"""
if not preorder: return
root = TreeNode(preorder[0])
rootIdx = inorder.index(preorder[0])
if rootIdx > 0:
root.left = self.buildTree(preorder[1:rootIdx+1], inorder[0:rootIdx])
root.right = self.buildTree(preorder[rootIdx + 1:], inorder[rootIdx+1:])
return root
if __name__ == '__main__':
preorder = [3,9,20,15,7]
inorder = [9,3,15,20,7]
sol = Solution()
assert str(sol.buildTree(preorder, inorder)) == ' '.join(str(x) for x in inorder)
preorder = [1,2]
inorder = [1,2]
sol = Solution()
assert str(sol.buildTree(preorder, inorder)) == ' '.join(str(x) for x in inorder)
preorder = [1,2]
inorder = [2,1]
sol = Solution()
assert str(sol.buildTree(preorder, inorder)) == ' '.join(str(x) for x in inorder)
preorder = [1,2,3]
inorder = [3,2,1]
sol = Solution()
assert str(sol.buildTree(preorder, inorder)) == ' '.join(str(x) for x in inorder)
|
# Generated by Django 3.2 on 2021-04-21 18:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pie', '0004_auto_20210421_2343'),
]
operations = [
migrations.AlterField(
model_name='showchart',
name='customer_id',
field=models.CharField(max_length=5, null=True),
),
]
|
import yfinance as yf
import pandas as pd
#import tensorflow as tf
import math
from datetime import datetime
from matplotlib import pyplot as plt
def createList(r1, r2, t, k):
list = []
for i in range(r1, r2+1):
list.append(round(i*t, k))
return list
def mean(lst):
return sum(lst) / len(lst)
def listToString(s):
# initialize an empty string
str1 = ""
# traverse in the string
for ele in s:
str1 += str(ele)
str1 += "\n"
# return string
return str1
class players:
def __init__(self):
self.players = []
def print(self):
for i in range(0, len(self.players)):
self.players[i].print()
def make_move(self):
for i in range(0, len(self.players)):
self.players[i].make_move()
def dividend(self, div):
for i in range(0, len(self.players)):
self.players[i].dividend(div)
def split(self, spl):
for i in range(0, len(self.players)):
self.players[i].split(spl)
def best(self):
play = playerC(0, 0, 0)
temp = 0
best = 0
for i in range(0, len(self.players)):
play = self.players[i]
temp = play.cash + play.stock*float(history[len(history) - 1])
if(temp > best):
best = temp
answer = play
return answer
def worst(self):
play = playerC(0, 0, 0)
temp = 0
best = 10000000000
for i in range(0, len(self.players)):
play = self.players[i]
temp = play.cash + play.stock*float(history[len(history) - 1])
if(temp < best):
best = temp
answer = play
return answer
def total(self):
list = []
for i in range(0, len(self.players)):
play = self.players[i]
list.append(play.cash + play.stock*float(history[len(history) - 1]))
return list
class player:
def __init__(self, player, cash, strat):
self.player = player
self.strat = strat
self.stock = 0
self.cash = cash
self.spent = 0
self.stockArray = []
self.arg_cost =0
self.today = 0
self.yesterday = 0
self.change = 0
self.week_change = 0
self.year_change = 0
self.div_prof = 0
def buy(self):
if(self.today < self.cash):
# print("1 Bought:"+ str(self.player))
self.cash -= self.today
self.stock += 1
self.stockArray.append(self.today)
def sell(self):
# print("1 Sold Player:"+ str(self.player))
if(self.stock > 0):
self.cash += self.today
self.stock -= 1
self.stockArray.pop()
def next_day(self):
self.yesterday = self.today
self.today = float(history[len(history) - 1])
if(len(history) > 2):
self.change = self.today - float(history[len(history) - 2])
if(len(history) > 6):
self.week_change = self.today - float(history[len(history) - 7])
if(len(history) > 259):
self.year_change = self.today - float(history[len(history) - 260])
# print("~~~Player" + str(self.player))
def dividend(self, div):
self.cash += div*self.stock
self.div_prof += div*self.stock
def split(self, split):
stock_change = math.floor(self.stock*split) - self.stock
print(stock_change)
self.stock += stock_change
for i in range(0, stock_change):
self.cash += self.today
self.buy()
def print(self):
print("~~~Player : " + str(self.player) + "~~~")
print("Strat : " + str(self.strat))
print("Mean : " + str(self.arg_cost))
print("Buy : " + str(self.arg_cost*(1-self.strat)))
print("Sell : " + str(self.arg_cost*(1+self.strat)))
print("Cash : " + str(self.cash))
print("Div-Profit : " + str(self.div_prof))
print("Stock# : " + str(self.stock))
print("Stocks : " + str(self.stock*float(history[len(history) - 1])))
print("Total : " + str(self.cash + self.stock*float(history[len(history) - 1])))
class playerA(player):
def make_move(self):
self.next_day()
#
if(self.stock == 0 and mean(history) <= self.today):
self.buy()
if len(self.stockArray) is not 0:
self.arg_cost = mean(self.stockArray)
else:
self.arg_cost = 0
if(self.today > self.arg_cost*(1+self.strat) and self.arg_cost != 0):
self.sell()
if(self.today < self.arg_cost*(1-self.strat) and self.change < 0 and self.arg_cost != 0):
self.buy()
class playerB(player):
def make_move(self):
self.next_day()
aggression = int(math.sqrt(self.strat)*10)
if(self.stock == 0 and 0 > self.week_change and 0 > self.change):
for i in range(0, aggression):
self.buy()
if len(self.stockArray) is not 0:
self.arg_cost = mean(self.stockArray)
else:
self.arg_cost = 0
if(self.today > self.arg_cost*(1+self.strat) and self.week_change > 0 and self.arg_cost != 0):
for i in range(0, aggression):
self.sell()
if(self.today < self.arg_cost*(1-self.strat) and self.week_change < 0 and self.arg_cost != 0):
for i in range(0, aggression):
self.buy()
class playerC(player):
def make_move(self):
self.next_day()
aggression = int(math.sqrt(self.strat)*10)
if(self.stock == 0 and 0 > self.year_change and 0 > self.week_change):
for i in range(0, aggression):
self.buy()
if len(self.stockArray) is not 0:
self.arg_cost = mean(self.stockArray)
else:
self.arg_cost = 0
if(self.today > self.arg_cost*(1+self.strat) and self.year_change > 0 and self.arg_cost != 0):
for i in range(0, aggression):
self.sell()
if(self.today < self.arg_cost*(1-self.strat) and self.year_change < 0 and self.arg_cost != 0):
for i in range(0, aggression):
self.buy()
stock = input("Please enter a stock ticker: ")
data = yf.download(stock, start="1970-01-01", end="2020-08-30")
actions = yf.Ticker(stock)
#writer = pd.ExcelWriter(stock + ".xlsx")
print(data)
#data.to_excel(writer,'Sheet1')
print(actions.actions)
#actions.actions.to_excel(writer,'Sheet2')
print(actions.actions.empty)
#print(actions.actions.index[0])
#print(actions.actions.iat[0, 0])
#print(actions.actions.iat[0, 1])
#print(data.size)
#print(data['High'])
#print(data['High'].size)
#print(len(data.index))
#print(data.columns[0])
print(actions.actions.index.size)
#writer.save()
file1 = open(stock + ".txt","w")
history = []
div = []
split = []
change = []
change_week = []
change_year = []
action_num = 0
#print(actions.actions.index[action_num])
players = players()
stat = 0
beg_cash = 1000
for t in range(0, 2000):
# if t is 0:
# stat = 0.05
# player0 = player(t, beg_cash, stat)
# else:
stat = round(t*0.0005, 4)
player0 = playerC(t, beg_cash, stat)
players.players.append(player0)
for i in range(len(data.index)):
print("Day " + str(i) + ": " + str(data.index[i]))
print(data.iat[i, 3])
change.append(players.players[0].change)
change_week.append(players.players[0].week_change)
change_year.append(players.players[0].year_change)
history.append(data.iat[i, 3])
# print(actions.actions.index[action_num])
# if(actions.actions.index.size != action_num and actions.actions.empty == False):
# if(actions.actions.index[action_num] == data.index[i]):
# if(actions.actions.iat[action_num, 0] != 0):
# print(type(actions.actions.index[action_num].asm8))
# players.dividend(actions.actions.iat[action_num, 0])
# print("There is a dividend")
# print("The dividend is : " + str(actions.actions.iat[action_num, 0]))
# div.append(actions.actions.iat[action_num, 0])
# if(actions.actions.iat[action_num, 1] != 0):
# players.split(actions.actions.iat[action_num, 1])
# print("There is a split")
# print("The split is : " + str(actions.actions.iat[action_num, 1]))
# split.append(actions.actions.iat[action_num, 1])
# print("ACTION HAPPENS HERERERER\n" + str(action_num) + "\n")
# action_num = action_num + 1
players.make_move()
print("\nHistory : ")
print(history)
print("~average: " + str(mean(history)))
print("~min : " + str(min(history)))
print("~max : " + str(max(history)))
print("\nChange : ")
print(change)
print("~average: " + str(mean(change)))
print("~min : " + str(min(change)))
print("~max : " + str(max(change)))
if div:
print("\ndiv : ")
print(div)
print("~average: " + str(mean(div)))
print("~min : " + str(min(div)))
print("~max : " + str(max(div)))
else:
print("\nNo Dividends")
if split:
print("\nsplit : ")
print(split)
print("~average: " + str(mean(split)))
print("~min : " + str(min(split)))
print("~max : " + str(max(split)))
else:
print("\nNo Splits")
file1.write(listToString(history))
print(actions.info)
print("Best Player:")
print(players.best().print())
print("Worst Player:")
print(players.worst().print())
plt.plot(data.index, change, label='Day-Change')
plt.xlabel('Day', fontsize=18)
plt.ylabel('Day-Change', fontsize=16)
plt.show()
plt.plot(data.index, change_week, label='Week-Change')
plt.xlabel('Day', fontsize=18)
plt.ylabel('Week-Change', fontsize=16)
plt.show()
plt.plot(data.index, change_year, label='Year-Change')
plt.xlabel('Day', fontsize=18)
plt.ylabel('Year-Change', fontsize=16)
plt.show()
#print(players.print())
plt.plot(data.index, history, label='Price')
plt.xlabel('Day', fontsize=18)
plt.ylabel('Open-Price', fontsize=16)
plt.show()
scores = players.total()
x = createList(0, len(scores)-1, 0.0005, 4)
plt.plot(x, scores, label='Cash')
plt.xlabel('Sell/Buy-Ratio', fontsize=18)
plt.ylabel('Ending-Cash-1000Start', fontsize=16)
plt.show()
|
# Text and categorical data problems!!
# Categorical and text data can often be some of the messiest parts of a dataset due to their unstructured nature. In this chapter, you’ll learn how to fix whitespace and capitalization inconsistencies in category labels, collapse multiple categories into one, and reformat strings for consistency.
# Finding consistency
# In this exercise and throughout this chapter, you'll be working with the airlines DataFrame which contains survey responses on the San Francisco Airport from airline customers.
# The DataFrame contains flight metadata such as the airline, the destination, waiting times as well as answers to key questions regarding cleanliness, safety, and satisfaction. Another DataFrame named categories was created, containing all correct possible values for the survey columns.
# In this exercise, you will use both of these DataFrames to find survey answers with inconsistent values, and drop them, effectively performing an outer and inner join on both these DataFrames as seen in the video exercise. The pandas package has been imported as pd, and the airlines and categories DataFrames are in your environment.
# Print categories DataFrame
print(categories)
# Print unique values of survey columns in airlines
print('Cleanliness: ', airlines['cleanliness'].unique(), "\n")
print('Safety: ', airlines['safety'].unique(), "\n")
print('Satisfaction: ', airlines['satisfaction'].unique(), "\n")
# Inconsistent categories
# In this exercise, you'll be revisiting the airlines DataFrame from the previous lesson.
# As a reminder, the DataFrame contains flight metadata such as the airline, the destination, waiting times as well as answers to key questions regarding cleanliness, safety, and satisfaction on the San Francisco Airport.
# In this exercise, you will examine two categorical columns from this DataFrame, dest_region and dest_size respectively, assess how to address them and make sure that they are cleaned and ready for analysis. The pandas package has been imported as pd, and the airlines DataFrame is in your environment.
# Print unique values of both columns
print(airlines['dest_region'].unique())
print(airlines['dest_size'].unique())
# Remapping categories
# To better understand survey respondents from airlines, you want to find out if there is a relationship between certain responses and the day of the week and wait time at the gate.
# The airlines DataFrame contains the day and wait_min columns, which are categorical and numerical respectively. The day column contains the exact day a flight took place, and wait_min contains the amount of minutes it took travelers to wait at the gate. To make your analysis easier, you want to create two new categorical variables:
# wait_type: 'short' for 0-60 min, 'medium' for 60-180 and long for 180+
# day_week: 'weekday' if day is in the weekday, 'weekend' if day is in the weekend.
# The pandas and numpy packages have been imported as pd and np. Let's create some new categorical data!
# Create ranges for categories
label_ranges = [0, 60, 180, np.inf]
label_names = ['short', 'medium', 'long']
# Create wait_type column
airlines['wait_type'] = pd.cut(airlines['wait_min'], bins = label_ranges,
labels = label_names)
# Create mappings and replace
mappings = {'Monday':'weekday', 'Tuesday':'weekday', 'Wednesday': 'weekday',
'Thursday': 'weekday', 'Friday': 'weekday',
'Saturday': 'weekend', 'Sunday': 'weekend'}
airlines['day_week'] = airlines['day'].replace(mappings)
# Removing titles and taking names
# While collecting survey respondent metadata in the airlines DataFrame, the full name of respondents was saved in the full_name column. However upon closer inspection, you found that a lot of the different names are prefixed by honorifics such as "Dr.", "Mr.", "Ms." and "Miss".
# Your ultimate objective is to create two new columns named first_name and last_name, containing the first and last names of respondents respectively. Before doing so however, you need to remove honorifics.
# The airlines DataFrame is in your environment, alongside pandas as pd.
# Replace "Dr." with empty string ""
airlines['full_name'] = airlines['full_name'].str.replace("Dr.","")
# Replace "Mr." with empty string ""
airlines['full_name'] = airlines['full_name'].str.replace("Mr.","")
# Replace "Miss" with empty string ""
airlines['full_name'] = airlines['full_name'].str.replace("Miss","")
# Replace "Ms." with empty string ""
airlines['full_name'] = airlines['full_name'].str.replace("Ms.","")
# Assert that full_name has no honorifics
assert airlines['full_name'].str.contains('Ms.|Mr.|Miss|Dr.').any() == False
# Keeping it descriptive
# To further understand travelers' experiences in the San Francisco Airport, the quality assurance department sent out a qualitative questionnaire to all travelers who gave the airport the worst score on all possible categories. The objective behind this questionnaire is to identify common patterns in what travelers are saying about the airport.
# Their response is stored in the survey_response column. Upon a closer look, you realized a few of the answers gave the shortest possible character amount without much substance. In this exercise, you will isolate the responses with a character count higher than 40 , and make sure your new DataFrame contains responses with 40 characters or more using an assert statement.
# The airlines DataFrame is in your environment, and pandas is imported as pd.
# Store length of each row in survey_response column
resp_length = airlines['survey_response'].str.len()
# Find rows in airlines where resp_length > 40
airlines_survey = airlines[resp_length > 40]
# Assert minimum survey_response length is > 40
assert airlines_survey['survey_response'].str.len().min() > 40
# Print new survey_response column
print(airlines_survey['survey_response'])
|
from DBHelper import DBHelper
from helper_functions import *
from Product import *
from Customer import *
class Receipt:
def __init__(self):
self.db = DBHelper()
def __updateReceiptTotal (self, receiptNo):
sql = ("UPDATE receipt SET "
"total_receipt = new_total_receipt"
" FROM (SELECT rli.receipt_no , SUM(rli.amount_paid_here) As new_total_receipt From receipt_line_item rli GROUP BY rli.receipt_no) rli "
" Where receipt.receipt_no = rli.receipt_no "
"AND receipt.receipt_no = '{}' ".format(receiptNo))
self.db.execute (sql)
def __updateReceiptAmountUnpaid (self, receiptNo):
sql = ("UPDATE receipt_line_item SET "
" amount_unpaid = new_amount_unpaid"
" FROM (SELECT rli.receipt_no , SUM(rli.amount_paid_here) As new_total_receipt From receipt_line_item rli GROUP BY rli.receipt_no) rli "
" Where receipt.receipt_no = rli.receipt_no "
"AND receipt.receipt_no = '{}' ".format(receiptNo))
self.db.execute (sql)
def __updateLineItem (self, receiptNo, receiptLineItemList):
self.db.execute ("DELETE FROM receipt_line_item WHERE receipt_no = '{}' ".format(receiptNo))
for lineItem in receiptLineItemList:
self.db.execute ("INSERT INTO receipt_line_item (receipt_no, invoice_no, amount_paid_here) VALUES ('{}' ,'{}','{}')".format(receiptNo,lineItem["Invoice No"],lineItem["Amount Paid Here"]))
self.__updateReceiptTotal(receiptNo)
def create(self, receiptNo, receiptDate, customerCode, paymenMethod, paymenReference, remark, receiptLineItemList):
data, columns = self.db.fetch ("SELECT * FROM receipt WHERE receipt_no = '{}' ".format(receiptNo))
if len(data) > 0:
return {'Is Error': True, 'Error Message': "Receipt No '{}' already exists. Cannot Create. ".format(receiptNo)}
else:
self.db.execute ("INSERT INTO receipt (receipt_no, receipt_date, customer_code, payment_method, payment_reference, remark) VALUES ('{}' ,{} ,'{}' ,'{}' ,'{}' ,'{}')".format(receiptNo,receiptDate,customerCode,paymenMethod,paymenReference,remark))
self.__updateLineItem(receiptNo, receiptLineItemList)
return {'Is Error': False, 'Error Message': ""}
def read(self, receiptNo):
data, columns = self.db.fetch ("SELECT receipt_no, receipt_date, customer_code, payment_method, payment_reference, remark FROM receipt WHERE receipt_no = '{}' ".format(receiptNo))
if len(data) > 0:
retReceipt = row_as_dict(data, columns)
else:
return ({'Is Error': True, 'Error Message': "Receipt No '{}' not found. Cannot Read.".format(receiptNo)},{})
return ({'Is Error': False, 'Error Message': ""},retReceipt)
def update(self, receiptNo, newReceiptDate, newCustomerCode, newPaymenMethod, newPaymenReference, newRemark ,newReceiptLineItemList):
# Finds the invoice number in invoices object and then changes the values to the new ones.
# Returns dictionary {‘Is Error’: ___, ‘Error Message’: _____}.
data, columns = self.db.fetch ("SELECT * FROM receipt WHERE receipt_no = '{}' ".format(receiptNo))
if len(data) > 0:
self.db.execute ("UPDATE receipt SET receipt_date = {}, customer_code = '{}', payment_method = '{}', payment_reference = '{}', remark= '{}' WHERE receipt_no = '{}' ".format(newReceiptDate, newCustomerCode, newPaymenMethod, newPaymenReference, newRemark,receiptNo))
self.__updateLineItem(receiptNo, newReceiptLineItemList)
else:
return {'Is Error': True, 'Error Message': "Receipt No '{}' not found. Cannot Update.".format(receiptNo)}
return {'Is Error': False, 'Error Message': ""}
def delete(self, receiptNo):
# Finds the invoice number invoices object and removes it from the dictionary.
# Returns dictionary {‘Is Error’: ___, ‘Error Message’: _____}.
data, columns = self.db.fetch ("SELECT * FROM receipt WHERE receipt_no = '{}' ".format(receiptNo))
if len(data) > 0:
self.db.execute ("DELETE FROM receipt WHERE receipt_no = '{}' ".format(receiptNo))
self.db.execute ("DELETE FROM receipt_line_item WHERE receipt_no = '{}' ".format(receiptNo))
else:
return {'Is Error': True, 'Error Message': "Receipt No '{}' not found. Cannot Delete".format(receiptNo)}
return {'Is Error': False, 'Error Message': ""}
def dump(self):
# Will dump all invoice data by returning 1 dictionary as output.
data, columns = db.fetch ('SELECT r.receipt_no as "Receipt No", r.receipt_date as "Receipt Date", r.customer_code as "Customer Code", r.payment_method as "Payment Method", r.payment_reference as "Payment Reference", r.remark as "Remark" FROM receipt r JOIN customer c ON r.customer_code = c.customer_code')
return row_as_dict(data, columns)
def update_receipt_line(self, receiptNo, invoiceNo, newAmountPaid):
data, columns = self.db.fetch ("SELECT * FROM receipt_line_item WHERE receipt_no = '{}' AND invoice_no = '{}' ".format(receiptNo, invoiceNo))
if len(data) > 0:
self.db.execute ("UPDATE receipt_line_item SET amount_paid_here = {} WHERE receipt_no = '{}' AND invoice_no = '{}' ".format(newAmountPaid, receiptNo, invoiceNo))
self.__updateReceiptTotal(receiptNo)
else:
return {'Is Error': True, 'Error Message': "Invoice Code '{}' not found in Receipt No '{}'. Cannot Update.".format(invoiceNo, receiptNo)}
return {'Is Error': False, 'Error Message': ""}
def delete_receipt_line(self, receiptNo, invoiceNo):
data, columns = self.db.fetch ("SELECT * FROM receipt_line_item WHERE receipt_no = '{}' AND invoice_no = '{}' ".format(receiptNo, invoiceNo))
if len(data) > 0:
self.db.execute ("DELETE FROM receipt_line_item WHERE receipt_no = '{}' AND invoice_no = '{}' ".format(receiptNo, invoiceNo))
self.__updateReceiptTotal(receiptNo)
else:
return {'Is Error': True, 'Error Message': "Invoice Code '{}' not found in Receipt No '{}'. Cannot Delete.".format(invoiceNo, receiptNo)}
return {'Is Error': False, 'Error Message': ""}
|
import numpy
a = [float(x) for x in input().split()]
arr = numpy.array(a)
numpy.set_printoptions(sign=' ')
print(numpy.floor(arr))
print(numpy.ceil(arr))
print(numpy.rint(arr))
|
import logging
import os
from autumn.core.db import Database
from autumn.core.plots.plotter import FilePlotter
from . import plots
logger = logging.getLogger(__name__)
def plot_uncertainty(targets: dict, powerbi_db_path: str, output_dir: str):
"""
works on powerbi version
Assumes a COVID model.
"""
os.makedirs(output_dir, exist_ok=True)
db = Database(powerbi_db_path)
uncertainty_df = db.query("uncertainty")
outputs = uncertainty_df["type"].unique().tolist()
for output_name in outputs:
this_output_dir = os.path.join(output_dir, output_name)
os.makedirs(this_output_dir, exist_ok=True)
plotter = FilePlotter(this_output_dir, targets)
scenario_idxs = uncertainty_df["scenario"].unique().tolist()
for scenario_idx in scenario_idxs:
logger.info(
"Plotting uncertainty for output %s, scenario %s", output_name, scenario_idx
)
if scenario_idx == 0:
# Just plot the baseline scenario for the full time period.
scenario_idxs = [0]
x_low = 0
else:
# Plot the baseline compared ot the scenario, but only for the time period
# where the scenario is active.
scenario_idxs = [0, scenario_idx]
mask = uncertainty_df["scenario"] == scenario_idx
x_low = uncertainty_df[mask]["time"].min()
plots.plot_timeseries_with_uncertainty(
plotter,
uncertainty_df,
output_name,
scenario_idxs,
targets,
x_low=x_low,
)
|
#!/usr/bin/env python
import rospy
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Point
import utils
import create_map
rospy.init_node('line')
pub_line_min_dist = rospy.Publisher('/vgraph_markers', Marker, queue_size=1)
rospy.loginfo('Publishing line')
edges, shortest_path = utils.get_waypoints()
map_edges = []
for img_edge in edges:
map_edge = create_map.img2map(img_edge)
map_edges.append(map_edge.tolist())
edges = map_edges
shortest_path = create_map.img2map(shortest_path).tolist()
resolution = 100.
id_count = 0
while not rospy.is_shutdown():
# ================== Initialize marker ==================
marker = Marker()
marker.header.frame_id = "/map"
marker.type = marker.LINE_STRIP
marker.action = marker.ADD
marker.pose.orientation.x = 0.0
marker.pose.orientation.y = 0.0
marker.pose.orientation.z = 0.0
marker.pose.orientation.w = 1.0
marker.pose.position.x = 0.0
marker.pose.position.y = 0.0
marker.pose.position.z = 0.0
# ================== Draw graph ==================
marker.scale.x = 0.02
marker.scale.y = 0.02
marker.scale.z = 0.02
marker.color.a = 1.0
marker.color.r = 1.0
marker.color.g = 0.0
marker.color.b = 0.0
for edge in edges:
id_count += 1
marker.id = id_count
marker.points = []
start = Point()
start.x = float(edge[0][0])/resolution
start.y = float(edge[0][1])/resolution
marker.points.append(start)
end = Point()
end.x = float(edge[1][0])/resolution
end.y = float(edge[1][1])/resolution
marker.points.append(end)
# Duplicate points in the marker.points list will cause some anomalies,
# must publish the marker and empty the list at each iteration
pub_line_min_dist.publish(marker)
rospy.sleep(0.01)
# ================== Draw shortest path ==================
marker.scale.x = 0.05
marker.scale.y = 0.05
marker.scale.z = 0.05
marker.color.a = 1.0
marker.color.r = 1.0
marker.color.g = 1.0
marker.color.b = 0.0
id_count += 1
marker.id = id_count
marker.points = []
for point in shortest_path:
pt = Point()
pt.x = (float)(point[0])/resolution
pt.y = (float)(point[1])/resolution
marker.points.append(pt)
pub_line_min_dist.publish(marker)
rospy.sleep(0.01)
break
|
import pytest
import pdb
test_id = f"{'2.7.3':<10} - Can Be Term Server"
test_weight = 2
def test_test_act_as_term_server(host):
assert 0 == 1, "TODO - Write Test"
|
class OperaException(Exception):
pass
class MethodNotFound(OperaException):
pass
class ServiceNotFound(OperaException):
pass
|
def attackQueen(board , qr , qc):
n = len(board)
count = 0
#up/left
for i in range(1 , qr):
if board[i] == 0:
count += 1
#down/right
for i in range(qr + 1 , n):
if board[i] == 0:
count += 1
#upper right diag
i , j = qr - 1 , qc + 1
while (i > = 1 and j <= n):
if
return count
n,k = input().strip().split(' ')
n,k = [int(n),int(k)]
board = [0] * (n + 1)
qr,qc = input().strip().split(' ')
qr,qc = [int(qr),int(qc)]
board[qr] = qc
for a in range(k):
r , c = input().split(' ')
r , c = int(r) , int(c)
board[r] = c
count = attackQueen(board , qr , qc)
print(count)
|
def InitCharts(algorithm):
performance_plot = Chart('Performance Breakdown')
performance_plot.AddSeries(Series('Total Fees', SeriesType.Line, 0))
performance_plot.AddSeries(Series('Total Gross Profit', SeriesType.Line, 0))
algorithm.AddChart(performance_plot)
exposure_plot = Chart('Exposure/Leverage')
exposure_plot.AddSeries(Series('Gross', SeriesType.Line, 0))
exposure_plot.AddSeries(Series('Net', SeriesType.Line, 0))
algorithm.AddChart(exposure_plot)
country_exposure_plot = Chart('Country Exposure')
for etf, country in algorithm.etf_country.items():
country_exposure_plot.AddSeries(Series(country, SeriesType.Line, 0))
def PlotPerformanceChart(algorithm):
algorithm.Plot('Performance Breakdown', 'Total Fees', algorithm.Portfolio.TotalFees)
algorithm.Plot('Performance Breakdown', 'Total Gross Profit', algorithm.Portfolio.TotalProfit)
def PlotExposureChart(algorithm):
long_val = 0
short_val = 0
for security, v in algorithm.Portfolio.items():
if v.Invested:
val = v.AbsoluteHoldingsValue
if v.IsLong:
long_val += val
elif v.IsShort:
short_val += val
total_equity = algorithm.Portfolio.TotalPortfolioValue
gross = (long_val + short_val) / total_equity
net = (long_val - short_val) / total_equity
algorithm.Plot('Exposure/Leverage', 'Gross', gross)
algorithm.Plot('Exposure/Leverage', 'Net', net)
def PlotCountryExposureChart(algorithm):
for etf, country in algorithm.etf_country.items():
exposure = algorithm.Securities[etf].Holdings.HoldingsValue / algorithm.Portfolio.TotalHoldingsValue
algorithm.Plot('Country Exposure', country, exposure) |
from svmutil import *
import subprocess
shuffled_file = open('spambase.data.shuffled', 'r')
X_ORIGINAL = []
Y_ORIGINAL = []
with open("spambase.data.shuffled", "r") as shuffled_file:
for line in shuffled_file:
values = line.split(',')
Y_ORIGINAL.append(int(values[-1]))
x_temp = []
for i in range(0, values.__len__() - 1):
x_temp.append(float(values[i]))
X_ORIGINAL.append(x_temp)
x_train = X_ORIGINAL[: 3450]
y_train = Y_ORIGINAL[: 3450]
x_test = X_ORIGINAL[3450:]
y_test = Y_ORIGINAL[3450:]
def cross_validation_split(x, y, n_folds):
""" Split data set for n folds. Need for k-fold validation"""
dataset_split = []
fold_size = x.__len__() / n_folds
for i in range(n_folds):
x_test = x[i * fold_size:(i + 1) * fold_size]
y_test = y[i * fold_size:(i + 1) * fold_size]
x_train = x[0: max(i, 0) * fold_size]
x_train.extend(x[min(i + 1, 10) * fold_size: 10 * fold_size])
y_train = y[0: max(i , 0) * fold_size]
y_train.extend(y[min(i + 1, 10) * fold_size: 10 * fold_size])
print x_test.__len__()
print y_test.__len__()
print x_train.__len__()
print y_train.__len__()
dataset_split.append(((x_train, y_train), (x_test, y_test)))
return dataset_split
def k_fold(x_train, y_train):
dataset_split = cross_validation_split(x_train, y_train, 10)
for d in [1, 2, 3, 4]:
for k in range(-10, 11, 1):
c = 2 ** k
scores = []
for dataset in dataset_split:
print dataset[0][1].__len__()
print dataset[0][0].__len__()
model = svm_train(dataset[0][1], dataset[0][0], "-s 0 -t 1 -d %d -c %f" % (d, c))
p_label, p_acc, p_val = svm_predict(dataset[1][1], dataset[1][0], model)
scores.append(p_acc[0])
score = sum(scores) / scores.__len__()
print "d: %d, c: %f, score: %f" % (d, c, score)
y_train_scaled, x_train_scaled = svm_read_problem('data.libsvmformat.train.scaled')
k_fold(x_train_scaled, y_train_scaled)
|
import datetime
import webapp2
from google.appengine.api import datastore
from google.appengine.ext import db
class CronObj(db.Model):
last_update = db.DateTimeProperty()
class CronHandler(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = "application/json"
query = self.request.get('query')
if query is None or query == '':
# This request is coming from crontab, update timestamp
cron_obj = CronObj(key_name='cron_key')
cron_obj.last_update = datetime.datetime.now()
cron_obj.put()
else:
# Hawkeye is querying value
cron_key = db.Key.from_path('CronObj', 'cron_key')
cron_obj = db.get(cron_key)
if cron_obj is None:
self.response.set_status(404)
else:
if self.is_valid_timestamp(cron_obj.last_update):
self.response.set_status(200)
else:
self.response.set_status(404)
def is_valid_timestamp(self, last_update):
time_now = datetime.datetime.now()
time_delta = time_now - last_update
seconds = time_delta.seconds
if seconds > 61:
return False
else:
return True
class CronTargetHandler(webapp2.RequestHandler):
def get(self):
key = datastore.Key.from_path('CronTarget', 'cron-target-entity')
entity = datastore.Get(key)
if entity['content'] != 'success':
raise Exception('Unexpected CronTarget content')
urls = [
('/python/cron', CronHandler),
('/python/cron-target', CronTargetHandler)
]
|
import configparser
import os
import sys
import gamepedia_client
class GamepediaPagesRW:
gc = None
'''
Create new instance of GamepediaClient (required for name attribution)
'''
def create_gamepedia_client(self, username=None, password=None):
global cfg_file
if username is None:
username = cfg_file['account']['username']
if password is None:
password = cfg_file['account']['password']
self.gc = gamepedia_client.GamepediaClient(username=username, password=password)
'''
Download and save page.
'''
def download(self, path, page):
if self.gc is None:
self.create_gamepedia_client()
res = self.gc.read(page)
with open(path, 'w') as f:
f.write(res)
'''
Write text from local file to page
'''
def upload(self, path, page):
if self.gc is None:
self.create_gamepedia_client()
with open(path, 'r') as f:
res = f.read()
self.gc.write(page, res)
'''
Backup selection of pages
'''
def backup(self):
self.backup_galleries_cards()
'''
Archivate selection of pages
'''
def archivate(self):
self.download('setup/Template/Card_stats', 'Template:Card_stats')
self.download('setup/Template/Cardlist', 'Template:Cardlist')
self.download('setup/Template/Card_nav', 'Template:Card_nav')
self.download('setup/Template/Codexcontentlist', 'Template:Codexcontentlist')
self.download('setup/Lore/The_world', 'The_world')
self.download('setup/Lore/Factions', 'Factions')
self.download('setup/Lore/The_player,_the_orbs,_the_memoria', 'The_player,_the_orbs,_the_memoria')
self.download('setup/Lore/The_Faëria', 'The_Faëria')
self.download('setup/Template/Lake', 'Template:Lake')
self.download('setup/Template/Mountain', 'Template:Mountain')
self.download('setup/Template/Forest', 'Template:Forest')
self.download('setup/Template/Desert', 'Template:Desert')
self.download('setup/Template/Dpl_lake', 'Template:dpl_lake')
self.download('setup/Template/Dpl_mountain', 'Template:dpl_mountain')
self.download('setup/Template/Dpl_forest', 'Template:dpl_forest')
self.download('setup/Template/Dpl_desert', 'Template:dpl_desert')
self.download('setup/Template/Dpl_life', 'Template:Lif')
self.download('setup/Template/Dpl_power', 'Template:Pow')
self.download('setup/Template/Dpl_name', 'Template:dpl_name')
self.download('setup/Template/Dpl_display', 'Template:dpl_display')
self.download('setup/Template/Rarity', 'Template:Rarity')
self.download('setup/Template/Common', 'Template:Common')
self.download('setup/Template/Rare', 'Template:Rare')
self.download('setup/Template/Epic', 'Template:Epic')
self.download('setup/Template/Legendary', 'Template:Legendary')
self.download('setup/List/List_of_Cards', 'List_of_Cards')
self.download('setup/List/List_of_Blue_cards', 'List_of_Blue_cards')
self.download('setup/List/List_of_Green_cards', 'List_of_Green_cards')
self.download('setup/List/List_of_Red_cards', 'List_of_Red_cards')
self.download('setup/List/List_of_Yellow_cards', 'List_of_Yellow_cards')
self.download('setup/List/List_of_Human_cards', 'List_of_Human_cards')
self.download('setup/List/List_of_Common_cards', 'List_of_Common_cards')
self.download('setup/List/List_of_Rare_cards', 'List_of_Rare_cards')
self.download('setup/List/List_of_Epic_cards', 'List_of_Epic_cards')
self.download('setup/List/List_of_Legendary_cards', 'List_of_Legendary_cards')
self.download('setup/List/List_of_Creature_cards', 'List_of_Creature_cards')
self.download('setup/List/List_of_Structure_cards', 'List_of_Structure_cards')
self.download('setup/List/List_of_Event_cards', 'List_of_Event_Cards')
self.download('setup/List/List_of_Charge_X_cards', 'List_of_Charge_X_cards')
self.download('setup/List/List_of_Faeria_X_cards', 'List_of_Faeria_X_cards')
self.download('setup/List/List_of_Options_cards', 'List_of_Options_cards')
self.download('setup/List/List_of_Ranged_cards', 'List_of_Ranged_cards')
self.download('setup/List/List_of_Production_cards', 'List_of_Production_cards')
self.download('setup/List/List_of_Combat_cards', 'List_of_Combat_cards')
self.download('setup/List/List_of_Protection_cards', 'List_of_Protection_cards')
self.download('setup/List/List_of_Taunt_cards', 'List_of_Taund_cards')
self.download('setup/List/List_of_Haste_cards', 'List_of_Haste_cards')
self.download('setup/List/List_of_Last_Words_cards', 'List_of_Last_Words_cards')
self.download('setup/List/List_of_Deathtouch_cards', 'List_of_Deathtouch_cards')
self.download('setup/List/List_of_Flying_cards', 'List_of_Flying_cards')
self.download('setup/List/List_of_Jump_cards', 'List_of_Jump_cards')
self.download('setup/List/List_of_Aquatic_cards', 'List_of_Aquatic_cards')
self.download('setup/List/List_of_Activate_cards', 'List_of_Activate_cards')
self.download('setup/List/List_of_Gift_cards', 'List_of_Gift_cards')
self.download('setup/Cards/By Color/Human', 'Human')
self.download('setup/Cards/By Color/Blue', 'Blue')
self.download('setup/Cards/By Color/Green', 'Green')
self.download('setup/Cards/By Color/Red', 'Red')
self.download('setup/Cards/By Color/Yellow', 'Yellow')
self.download('setup/Cards/By Type/Creature', 'Creature')
self.download('setup/Cards/By Type/Event', 'Event')
self.download('setup/Cards/By Type/Structure', 'Structure')
self.download('setup/Cards/By Rarity/Common', 'Common')
self.download('setup/Cards/By Rarity/Rare', 'Rare')
self.download('setup/Cards/By Rarity/Epic', 'Epic')
self.download('setup/Cards/By Rarity/Legendary', 'Legendary')
self.download('setup/Gallery/Gallery_of_Blue_cards', 'Gallery_of_Blue_cards')
self.download('setup/Gallery/Gallery_of_Green_cards', 'Gallery_of_Green_cards')
self.download('setup/Gallery/Gallery_of_Human_cards', 'Gallery_of_Human_cards')
self.download('setup/Gallery/Gallery_of_Red_cards', 'Gallery_of_Red_cards')
self.download('setup/Gallery/Gallery_of_Yellow_cards', 'Gallery_of_Yellow_cards')
'''
Restore selection of default pages
'''
def restore(self):
self.restore_cards_by()
self.restore_galleries_cards()
'''
Restore Cards By-X
'''
def restore_cards_by(self):
self.upload('setup/Cards/By Color/Human', 'Human')
self.upload('setup/Cards/By Color/Blue', 'Blue')
self.upload('setup/Cards/By Color/Green', 'Green')
self.upload('setup/Cards/By Color/Red', 'Red')
self.upload('setup/Cards/By Color/Yellow', 'Yellow')
self.upload('setup/Cards/By Type/Creature', 'Creature')
self.upload('setup/Cards/By Type/Event', 'Event')
self.upload('setup/Cards/By Type/Structure', 'Structure')
self.upload('setup/Cards/By Rarity/Common', 'Common')
self.upload('setup/Cards/By Rarity/Rare', 'Rare')
self.upload('setup/Cards/By Rarity/Epic', 'Epic')
self.upload('setup/Cards/By Rarity/Legendary', 'Legendary')
'''
Restore Changelog Templates
'''
def restore_templates_changelog(self):
self.upload('setup/Template/Changelog/Cl_codexcode1', 'Template:Cl_codexcode1')
self.upload('setup/Template/Changelog/Cl_codexcode2', 'Template:Cl_codexcode2')
self.upload('setup/Template/Changelog/Cl_codexcode3', 'Template:Cl_codexcode3')
self.upload('setup/Template/Changelog/Cl_color', 'Template:Cl_color')
self.upload('setup/Template/Changelog/Cl_desc', 'Template:Cl_desc')
self.upload('setup/Template/Changelog/Cl_desert', 'Template:Cl_desert')
self.upload('setup/Template/Changelog/Cl_faeria', 'Template:Cl_faeria')
self.upload('setup/Template/Changelog/Cl_forest', 'Template:Cl_forest')
self.upload('setup/Template/Changelog/Cl_lake', 'Template:Cl_lake')
self.upload('setup/Template/Changelog/Cl_life', 'Template:Cl_life')
self.upload('setup/Template/Changelog/Cl_mountain', 'Template:Cl_mountain')
self.upload('setup/Template/Changelog/Cl_name', 'Template:Cl_name')
self.upload('setup/Template/Changelog/Cl_power', 'Template:Cl_power')
self.upload('setup/Template/Changelog/Cl_rarity', 'Template:Cl_rarity')
self.upload('setup/Template/Changelog/Cl_type', 'Template:Cl_type')
self.upload('setup/Template/Changelog/Cl_unknown', 'Template:Cl_unknown')
self.upload('setup/Template/Changelog/Cl_info', 'Template:Cl_info')
'''
Restore Card Galleries
'''
def restore_galleries_cards(self):
self.upload('setup/Gallery/Gallery_of_Blue_cards', 'Gallery_of_Blue_cards')
self.upload('setup/Gallery/Gallery_of_Green_cards', 'Gallery_of_Green_cards')
self.upload('setup/Gallery/Gallery_of_Human_cards', 'Gallery_of_Human_cards')
self.upload('setup/Gallery/Gallery_of_Red_cards', 'Gallery_of_Red_cards')
self.upload('setup/Gallery/Gallery_of_Yellow_cards', 'Gallery_of_Yellow_cards')
self.upload('setup/Gallery/Gallery_of_Creature_cards', 'Gallery_of_Creature_cards')
self.upload('setup/Gallery/Gallery_of_Structure_cards', 'Gallery_of_Structure_cards')
self.upload('setup/Gallery/Gallery_of_Event_cards', 'Gallery_of_Event_cards')
self.upload('setup/Gallery/Gallery_of_Common_cards', 'Gallery_of_Common_cards')
self.upload('setup/Gallery/Gallery_of_Rare_cards', 'Gallery_of_Rare_cards')
self.upload('setup/Gallery/Gallery_of_Epic_cards', 'Gallery_of_Epic_cards')
self.upload('setup/Gallery/Gallery_of_Legendary_cards', 'Gallery_of_Legendary_cards')
'''
Restore Lists of (effect) cards
'''
def restore_lists_effects(self):
self.download('setup/List/List_of_Charge_X_cards', 'List_of_Charge_X_cards')
self.download('setup/List/List_of_Faeria_X_cards', 'List_of_Faeria_X_cards')
self.download('setup/List/List_of_Options_cards', 'List_of_Options_cards')
self.download('setup/List/List_of_Ranged_cards', 'List_of_Ranged_cards')
self.download('setup/List/List_of_Production_cards', 'List_of_Production_cards')
self.download('setup/List/List_of_Combat_cards', 'List_of_Combat_cards')
self.download('setup/List/List_of_Protection_cards', 'List_of_Protection_cards')
self.download('setup/List/List_of_Taunt_cards', 'List_of_Taund_cards')
self.download('setup/List/List_of_Haste_cards', 'List_of_Haste_cards')
self.download('setup/List/List_of_Last_Words_cards', 'List_of_Last_Words_cards')
self.download('setup/List/List_of_Deathtouch_cards', 'List_of_Deathtouch_cards')
self.download('setup/List/List_of_Flying_cards', 'List_of_Flying_cards')
self.download('setup/List/List_of_Jump_cards', 'List_of_Jump_cards')
self.download('setup/List/List_of_Aquatic_cards', 'List_of_Aquatic_cards')
self.download('setup/List/List_of_Activate_cards', 'List_of_Activate_cards')
self.download('setup/List/List_of_Gift_cards', 'List_of_Gift_cards')
self.download('setup/List/List_of_Random_cards', 'List_of_Random_cards')
'''
Restore Card Galleries
'''
'''
Backup Card Galleries
'''
def backup_galleries_cards(self):
self.download('setup/Gallery/Gallery_of_Blue_cards', 'Gallery_of_Blue_cards')
self.download('setup/Gallery/Gallery_of_Green_cards', 'Gallery_of_Green_cards')
self.download('setup/Gallery/Gallery_of_Human_cards', 'Gallery_of_Human_cards')
self.download('setup/Gallery/Gallery_of_Red_cards', 'Gallery_of_Red_cards')
self.download('setup/Gallery/Gallery_of_Yellow_cards', 'Gallery_of_Yellow_cards')
self.download('setup/Gallery/Gallery_of_Creature_cards', 'Gallery_of_Creature_cards')
self.download('setup/Gallery/Gallery_of_Structure_cards', 'Gallery_of_Structure_cards')
self.download('setup/Gallery/Gallery_of_Event_cards', 'Gallery_of_Event_cards')
self.download('setup/Gallery/Gallery_of_Common_cards', 'Gallery_of_Common_cards')
self.download('setup/Gallery/Gallery_of_Rare_cards', 'Gallery_of_Rare_cards')
self.download('setup/Gallery/Gallery_of_Epic_cards', 'Gallery_of_Epic_cards')
self.download('setup/Gallery/Gallery_of_Legendary_cards', 'Gallery_of_Legendary_cards')
if __name__ == '__main__':
gr = GamepediaPagesRW()
global cfg_file
cfg_file = configparser.ConfigParser()
path_to_cfg = os.path.abspath(os.path.dirname(sys.argv[0]))
path_to_cfg = os.path.join(path_to_cfg, 'faeriawikibot.conf')
cfg_file.read(path_to_cfg)
gr.restore()
|
#!/usr/bin/python
import urllib2
import json
req = urllib2.Request("https://api.wheretheiss.at/v1/satellites/25544")
response = urllib2.urlopen(req)
obj = json.loads(response.read())
print obj['visibility']
|
from django.db import models
from datetime import datetime
class CounterName(models.Model):
title = models.CharField(max_length=250)
description = models.CharField(max_length=500)
is_favorite = models.BooleanField(default=False)
def __str__(self):
return self.title + ' - ' + self.description
# class Chart(models.Model):
# CounterName = models.ForeignKey(CounterName, on_delete=models.CASCADE)
# file_type = models.CharField(max_length=10)
class NetCount(models.Model):
CounterName = models.ForeignKey(CounterName, on_delete=models.CASCADE)
numCount = models.IntegerField(default=0)
time = models.DateTimeField(default=datetime.now())
def __str__(self):
return str(self.CounterName.title) + ' = ' + str(self.numCount)
|
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
class MLPAutoEncoder(nn.Module):
"""docstring for AutoEncoder"""
def __init__(self):
super(AutoEncoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(32*32*32*12, 512),
nn.ReLU(True),
nn.BatchNorm1d(512),
nn.Linear(512, 512),
nn.ReLU(True),
nn.BatchNorm1d(512),
nn.Linear(512, 256),
nn.ReLU(True),
nn.BatchNorm1d(256),
nn.Linear(256, 128))
self.decoder = nn.Sequential(
nn.Linear(128, 256),
nn.ReLU(True),
nn.BatchNorm1d(256),
nn.Linear(256, 512),
nn.ReLU(True),
nn.BatchNorm1d(512),
nn.Linear(512, 512),
nn.ReLU(True),
nn.BatchNorm1d(512),
nn.Linear(512, 32*32*32*12),
nn.Tanh())
def forward(self, x):
u = self.encoder(x)
x_prime = self.decoder(u)
return x_prime
class ConvolutionalAE(nn.Module):
"""docstring for ConvolutionalAE"""
def __init__(self, z_dimension, num_features):
super(ConvolutionalAE, self).__init__()
self.z_dim = z_dimension
self.num_features = num_features
self.encoder = nn.Sequential(
nn.Conv3d(self.num_features, self.z_dim // 8, 4,2,1), #32
nn.BatchNorm3d(self.z_dim // 8),
nn.LeakyReLU(0.2),
nn.Conv3d(self.z_dim // 8, self.z_dim // 4, 4,2,1), #16
nn.BatchNorm3d(self.z_dim // 4),
nn.LeakyReLU(0.2),
nn.Conv3d(self.z_dim // 4, self.z_dim // 2, 4,2,1), #4
nn.BatchNorm3d(self.z_dim // 2),
nn.LeakyReLU(0.2),
nn.Conv3d(self.z_dim // 2, self.z_dim, 4,2,1), #2
nn.BatchNorm3d(self.z_dim),
nn.LeakyReLU(0.2),
nn.Conv3d(self.z_dim, self.z_dim, 2,2,0), #1
)
self.decoder = nn.Sequential(
nn.ConvTranspose3d(self.z_dim, self.num_features*16, 4,2,0), # self.num_features*8 x 4 x 4 x 4
nn.BatchNorm3d(self.num_features*16),
nn.Tanh(),
nn.ConvTranspose3d(self.num_features*16, self.num_features*4, 4,2,1), # self.self.num_features*4 x 8 x 8 x 8
nn.BatchNorm3d(self.num_features*4),
nn.Tanh(),
nn.ConvTranspose3d(self.num_features*4, self.num_features*2, 4,2,1), # self.self.num_features*2 x 16 x 16 x 16
nn.BatchNorm3d(self.num_features*2),
nn.Tanh(),
nn.ConvTranspose3d(self.num_features*2, self.num_features, 4,2,1), # self.self.num_features x 32 x 32 x 32
nn.Sigmoid(),
)
def forward(self, x):
u = self.encoder(x)
x_prime = self.decoder(u)
return x_prime
# def main():
# if __name__ == '__main__':
# main()
# |
from nmigen import *
from .lib import stream
from .protocol import Transfer
__all__ = ["USBInputArbiter", "USBOutputArbiter"]
class RoundRobin(Elaboratable):
def __init__(self, width):
self.width = width
self.request = Signal(width)
self.ce = Signal()
self.grant = Signal(range(width))
def elaborate(self, platform):
m = Module()
with m.If(self.ce):
with m.Switch(self.grant):
for i in range(self.width):
with m.Case(i):
for j in reversed(range(i+1, i+self.width)):
t = j % self.width
with m.If(self.request[t]):
m.d.sync += self.grant.eq(t)
return m
class USBInputArbiter(Elaboratable):
def __init__(self, port_map):
self.port_map = port_map
self.source_write = stream.Endpoint([("ep", 4)])
self.source_data = stream.Endpoint([("empty", 1), ("data", 8)])
def elaborate(self, platform):
m = Module()
rr = m.submodules.rr = RoundRobin(len(self.port_map))
for i, (port, max_size, xfer_type) in enumerate(self.port_map.values()):
m.d.comb += rr.request[i].eq(port.valid)
with m.If(~self.source_write.valid):
with m.If(rr.request.bool()):
m.d.comb += rr.ce.eq(1)
m.d.sync += self.source_write.valid.eq(1)
with m.Elif(~self.source_write.ready |
~self.source_data.valid | self.source_data.last & self.source_data.ready):
m.d.sync += self.source_write.valid.eq(0)
ep_map = Array(self.port_map.keys())
m.d.comb += self.source_write.ep.eq(ep_map[rr.grant])
with m.Switch(rr.grant):
for i, (port, max_size, xfer_type) in enumerate(self.port_map.values()):
with m.Case(i):
if xfer_type is Transfer.CONTROL:
m.d.comb += self.source_data.empty.eq(port.empty)
else:
m.d.comb += self.source_data.empty.eq(Const(0))
m.d.comb += [
self.source_data.valid.eq(port.valid),
self.source_data.last.eq(port.last),
self.source_data.data.eq(port.data),
port.ready.eq(self.source_data.ready)
]
return m
class USBOutputArbiter(Elaboratable):
def __init__(self, port_map):
self.port_map = port_map
self.sink_read = stream.Endpoint([("ep", 4)])
self.sink_data = stream.Endpoint([("setup", 1), ("data", 8)])
def elaborate(self, platform):
m = Module()
rr = m.submodules.rr = RoundRobin(len(self.port_map))
for i, (port, max_size, xfer_type) in enumerate(self.port_map.values()):
m.d.comb += rr.request[i].eq(port.ready)
with m.If(~self.sink_read.valid):
with m.If(rr.request.bool()):
m.d.comb += rr.ce.eq(1)
m.d.sync += self.sink_read.valid.eq(1)
with m.Elif(~self.sink_read.ready |
self.sink_data.valid & self.sink_data.last & self.sink_data.ready):
m.d.sync += self.sink_read.valid.eq(0)
ep_map = Array(self.port_map.keys())
m.d.comb += self.sink_read.ep.eq(ep_map[rr.grant])
with m.Switch(rr.grant):
for i, (port, max_size, xfer_type) in enumerate(self.port_map.values()):
with m.Case(i):
if xfer_type is Transfer.CONTROL:
m.d.comb += port.setup.eq(self.sink_data.setup)
m.d.comb += [
port.valid.eq(self.sink_data.valid),
port.last.eq(self.sink_data.last),
port.data.eq(self.sink_data.data),
self.sink_data.ready.eq(port.ready)
]
return m
|
from payment_gateway.models import *
from LandingPage.models import *
import json
import razorpay
from django.http import JsonResponse
client = razorpay.Client(auth=("rzp_test_0G5HtLCg0WpC26", "y8iPiSBFRf8w2Y1W0L6Q7F55"))
def CreateOrder(request,productId,action=None):
customer = request.user
product = Course.objects.get(Cid=productId)
order, created = Order.objects.get_or_create(customer=customer, status=False)
orderItem, created = OrderCourses.objects.get_or_create(order=order, course=product)
if action == 'add':
orderItem.save()
request.session['order_id']=order.id
return {'action':True}
if action == 'remove':
#print(orderItem)
try:
del request.session['order_id']
except:
pass
orderItem.delete()
return {'action':False,'product_id':product.Cid}
#print(orderItem)
def CreateOrderWithAnonymousCart(request,cart):
for productId in cart:
CreateOrder(request,productId)
def cookieCart(request):
# Create empty cart for now for non-logged in user
try:
cart = json.loads(request.COOKIES['cart'])
except:
cart = {}
items = []
order = {'get_cart_total':0, 'get_cart_items':0}
cartItems = order['get_cart_items']
for i in cart:
#We use try block to prevent items in cart that may have been removed from causing error
try:
cartItems += cart[i]['quantity']
product = Course.objects.get(Cid=i)
total = (product.price * cart[i]['quantity'])
order['get_cart_total'] += total
order['get_cart_items'] += cart[i]['quantity']
item = {
'id':product.Cid,
'course':{'Cid':product.Cid,'title':product.title, 'price':product.price,
'thumbnail':product.thumbnail},
'get_total':total
}
items.append(item)
except:
pass
return {'cartItems':cartItems ,'order':order, 'items':items}
def cartData(request):
context={}
cart={}
if request.user.is_authenticated:
try:
cart = json.loads(request.COOKIES['cart'])
except:
cart = {}
if bool(cart):
CreateOrderWithAnonymousCart(request,cart)
customer = request.user
order, created = Order.objects.get_or_create(customer=customer, status=False)
items = order.order_course.all()
cartItems = order.get_cart_items
name = request.user.first_name+" "+request.user.last_name
email = request.user.email
amount =order.get_cart_total # after calculating all items there is sum of total order
order.price=amount
order.save()
order_amount = amount*100
checkout=request.GET.get('checkout',None)
print(order_amount)
if checkout is not None and order.get_cart_items > 0:
order_currency = 'INR'
order_receipt = str(order.id)
notes = {
'Shipping address': ''}
# CREAING ORDER
response = client.order.create(dict(amount=order_amount, currency=order_currency, receipt=order_receipt, notes=notes, payment_capture='0'))
order_id = response['id']
order_status = response['status']
print(order_status)
if order_status=='created':
# Server data for user convinience
context['total'] = order_amount
context['name'] = name
context['email'] = email
# data that'll be send to the razorpay for
context['order_id'] = order_id
else:
cookieData = cookieCart(request)
cartItems = cookieData['cartItems']
order = cookieData['order']
items = cookieData['items']
context['cartItems']=cartItems
context['order']=order
context['items']=items
return context
|
import numpy as np
class EdgeDetector:
def __init__(self):
pass
# Given a mask, apply that mask on the given pixel as center
def apply_mask(self, image, mask, row, col):
sum_r, sum_g, sum_b = 0, 0, 0
for x in range(-1, 1, 1):
for y in range(-1, 1, 1):
row_offset, col_offset = row + x, col + y
# Skip pixels that lie outside of image
if (row_offset < 0 or row_offset >= image.shape[0] or
col_offset < 0 or col_offset >= image.shape[1]):
continue
sum_r += image[row_offset][col_offset][0] * mask[x + 1][y + 1]
sum_g += image[row_offset][col_offset][1] * mask[x + 1][y + 1]
sum_b += image[row_offset][col_offset][2] * mask[x + 1][y + 1]
#sum_r /= 9
#sum_g /= 9
#sum_b /= 9
return np.array([sum_r, sum_g, sum_b])
pass
# Get the average of the pixel values in the neighborhood
def avg_filter(self, image):
mask = np.array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
for row in range(len(image)):
for col in range(row):
tmp = self.apply_mask(image, mask, row, col)
image[row][col] = tmp
return image
# Call the corresponding filter function
def apply_filter(self, filter_name, image):
filter_name = filter_name.lower()
if filter_name == 'avg':
return self.avg_filter(image)
print('The filter did not match any. Please try again.')
return None
|
import pygame, sys, glob, ntpath
from random import shuffle
from pygame.locals import *
from image import *
pygame.init()
clock = pygame.time.Clock()
LENGTH = 800
HEIGHT = 600
screen = pygame.display.set_mode((LENGTH,HEIGHT))
BKG= []
for bkg in glob.glob("./images/bkg-*.png"):
back= load_image(bkg,"",LENGTH,HEIGHT)
BKG.append(back)
y = 0
curr = 0
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
screen.blit(BKG[curr],(0,0))
y = y + HEIGHT
print y
if y == (len(BKG)-1)*HEIGHT:
y = 0
curr+=1
curr = curr%len(BKG)
msElapsed = clock.tick(10)
pygame.display.flip()
|
import socket
import json
import select
from util_m import *
class Server(BaseServer):
def __init__(self, addr, port):
super(Server, self).__init__(addr, port)
self.log = Log('Server')
self.messageListBuffer = MessageListBuffer(400, 50)
self.bytesListBuffer = BytesBuffer()
def run(self):
#timeout还没有处理
print('start ...')
#每1秒select都会强制返回一次,用于处理可能的超时事件
time_out_used_in_select = 1
while True:
rs, ws, es = select.select(self.inSockets, self.outSockets, [], time_out_used_in_select)
#一次循环里必须使用同一个时间戳
current_time = int(time.time())
for r in rs:
#像这个监听端口建立连接只能是ws server
#像我这里只能有一个ws server 暂时不考虑扩展
if r == self.Host:
ws_sock, address = self.Host.accept()
#没有采用非阻塞,疏忽了,是一个败笔...
if not ws_sock:
self.log.write('Socket accept failed.')
continue
self.ws_sock = ws_sock
self.inSockets.append(ws_sock)
self.log.write(str(address) + ' ws server connected !')
#ws server发过来请求某个room的dm或者不再要某个room了
#数据格式是: type + 人数(douyu room 人数,不是我这个网页上的人数,单位是万, 整型) + url
#在请求room时人数为0, 在取消room时人数用于决定该房间没人需要时多久后关闭
#type分为0和1,0为取消,1为需要
#传输方式为json
elif r == self.ws_sock:
ws_data_str = self.ws_sock.recv(1024)
if len(ws_data_str) == 0:
#断开连接
for i,s in enumerate(self.inSockets):
if s == self.ws_sock:
self.ws_sock.close()
del self.inSockets[i]
break
self.log.write('ws server disconencted')
continue
ws_data_str =ws_data_str.decode()
ws_data = json.loads(ws_data_str)
self.log.write('from ws server : ' + str(ws_data))
url_aim = ws_data['url']
if ws_data['type'] == 1:
#建立房间dm server的连接
self.room_dm_connect(url_aim, current_time)
else:
#如果是取消的话,room的接收状态可能是关闭或者即将关闭
self.room_dm_disconnect(url_aim, ws_data['peopleMax'], current_time)
#dm server发过来的dm
else:
#处理二进制流,给了个二进制buffer,因为可能接收的数据不完整等,整理出完整的数据再进行处理
roomDm = self.Danmus[r.fileno()]
roomUrl = roomDm.url
dm_data_str = r.recv(1024)
self.bytesListBuffer.appendData(roomDm, dm_data_str)
#如果room dm server的dm不被需要,那么接收到后直接丢弃
# if roomDm.status == DMSStatus.connecting:
# roomUrl = roomDm.url
# msgList = roomDm.operation.deal_with_recved_bytes(dm_data_str)
# for msg in msgList:
# u,c = roomDm.operation.extractMainContent(msg)
# self.log.write(u + ':' + c)
# self.messageListBuffer.appendItem(roomUrl, 1, u, c)
dm_data_str_list = self.bytesListBuffer.getFullMsgList(roomDm)
for dm_data in dm_data_str_list:
msg = roomDm.operation.deal_with_recved_bytes_main(dm_data)
if msg is None:
continue
ret = roomDm.operation.extractMainContent(msg)
if ret is not None:
u = ret[0]
c = ret[1]
self.log.write('-------------' + u + ':' + c)
#之所以要len(u)!=1,是为了去掉礼物信息,具体礼物返回的消息我没仔细去看
#输出会有句 火箭:1, 这种是因为有人送了火箭礼物
if roomDm.status == DMSStatus.connecting and len(u) != 1:
self.messageListBuffer.appendItem(roomUrl, 1, u, c)
else:
self.log.write('extractMainContent not return username and content.......', 1)
#就是说没有数据进来,之前的也全部取出来(不代表发完)了,那么就不需要触发ws server的写事件了
#但是要注意的是,可能上一次取出了所有数据,但没有发完,就会在下面的for循环里继续发送,如果仍然没有发送完全,那么也会覆盖这里的writeEvent值,继续监听写事件
if self.messageListBuffer.lengthItem() == 0:
self.ws_sock_writeEvent = False
else:
self.ws_sock_writeEvent = True
for w in ws:
#要知道,这里发送给ws server可能发送不完全,就要考虑接着发送
#还要考虑到message buffer中有还有数据要发送的问题
#所以这里就涉及到粘包,包体不完全问题了
#设计一个简单的协议判断包体的完整性
#借用douyu 的包体协议
#协议格式: 长度 + 未知(重复长度)+ 未知(固定) + content + end
#这样也好把之前处理的一套拿过来直接用
#这么一来就要去修缮message类
#content是json字符串
if w == self.ws_sock:
#如果上一次没有写完,这一次继续写,如果能写完,就继续写新内容
#如果不能写完,就不再继续写新内容
if self.ws_sock_isWriteLeft == True:
# self.log.write('send to ws server old data')
toSentLen = len(self.ws_sock_writeLeft)
sentDataLen = self.ws_sock.send(self.ws_sock_writeLeft)
if sentDataLen >= toSentLen:
self.ws_sock_isWriteLeft = False
self.ws_sock_writeLeft = ''
else:
self.ws_sock_writeLeft = self.ws_sock_writeLeft[sentDataLen:]
#如果之前发送的仍有遗留,说明还没发送完,这里更不能发送了
#如果之前没有遗留或者就算有也已经发送完了,就可以开始新内容的发送
if self.ws_sock_isWriteLeft == False:
#也有可能是因为遗留数据导致的对写事件感兴趣
if self.messageListBuffer.lengthItem() > 0:
# self.log.write('send to ws server new data')
#self.log.write('message buffer has------------- %d -------------datas' % self.messageListBuffer.lengthItem())
toSendMessage_dic = self.messageListBuffer.getPopItem()
toSendMessage_str = json.dumps(toSendMessage_dic)
tSM = message(toSendMessage_str)
#得到的字符串是经过编码的,可直接发送
tSM_final = tSM.getMsg()
tSM_final_len = len(tSM_final)
sentDataLen = self.ws_sock.send(tSM_final)
if sentDataLen < tSM_final_len:
self.log.write('send to ws server not end')
self.ws_sock_isWriteLeft = True
self.ws_sock_writeLeft = tSM_final[sentDataLen:]
#对写事件感兴趣主要是因为两个原因,一个是因为数据发送有遗留,一个是有新数据需要发送
if self.ws_sock_writeLeft == True or self.messageListBuffer.lengthItem() != 0:
self.ws_sock_writeEvent = True
if self.ws_sock_writeEvent == False:
if len(self.outSockets) != 0:
self.outSockets.clear()
else:
if len(self.outSockets) == 0:
self.outSockets.append(self.ws_sock)
self.timeout_process(current_time)
#通过url得到roomDm(DanmuSocket)
def url2roomDm(self, url):
roomDm = None
for mk, rD in self.Danmus.items():
if rD.url == url:
roomDm = rD
break
return roomDm
def room_dm_connect(self, url, current_time):
roomDm = self.url2roomDm(url)
#当前这个room不连接着
if roomDm is None:
self.log.write('new room is connecting...')
#连接到dm server
roomDm = DanmuSocket(url)
roomDm.getDanmuServerSock()
#用于判断该room是否已经在被监听
if roomDm.Mark not in self.Danmus:
self.Danmus[roomDm.Mark] = roomDm
#用于传递给select的参数
if roomDm.sock not in self.inSockets:
self.inSockets.append(roomDm.sock)
#加入心跳超时事件
roomDm.timeoutEventType = DMTEventType.keepAlive
roomDm.timeoutEvent = self.keep_alive_event
self.mainTimer.addTimer(roomDm, roomDm.keepAliveIntern, current_time)
#当前房间正在即将断开的状态又来说这个房间的dm是被需要的怎么处理呢?
else:
if roomDm.status == DMSStatus.connecting:
self.log.write('ws server is wrong ? repeat request...', 1)
elif roomDm.status == DMSStatus.closing:
#也就是说,下面马上会触发关闭room的事件,那我就提前执行关闭函数,之后重新连接
if roomDm.closingTimeout < current_time:
self.log.write('the closing room will restart ...')
#真正断开
self.room_dm_close(roomDm, current_time)
#重连,状态什么的应该都不需要改动等
self.room_dm_connect(url, current_time)
else:
self.log.write('the closing room will recover ...')
#暂时还不会立即关闭room,也就是说又要分情况,下一次超时事件是心跳还是关闭
#如果是心跳,那么很简单,改了之后谁也不会察觉
if roomDm.timeoutEventType == DMTEventType.keepAlive:
roomDm.status = DMSStatus.connecting
roomDm.closingTimeout = -1
roomDm.delayTimeout = -1
#但如果是关闭的话,获取到超时延迟时间,获取到超时绝对时间,获取到当前时间,就可以知道距离上一次心跳的时间
#因为关闭事件都是从心跳事件出来的,可以这样也是因为我把延迟时间的单位作为心跳时间的缘故
else:
#获取上一次心跳的时间
lastKeepAlive = roomDm.closingTimeout - roomDm.delayTimeout
#获取距离上一次心跳已经pass的时间
passedTime = current_time - lastKeepAlive
#获取下一次心跳的间隔
nextKeepAlive = roomDm.keepAliveIntern - passedTime
#加入定时器
self.mainTimer.addTimer(roomDm, nextKeepAlive, current_time)
#修改状态
roomDm.timeoutEventType = DMTEventType.keepAlive
roomDm.status = DMSStatus.connecting
roomDm.timeoutEvent = self.keep_alive_event
roomDm.closingTimeout = -1
roomDm.delayTimeout = -1
#延迟关闭
def room_dm_disconnect(self, url, peopleMax, current_time):
#根据url获取到对应的DanmuSocket对象
roomDm = self.url2roomDm(url)
if roomDm is None:
self.log.write('cannot find aim url in Dm pool', 2)
exit(-1)
#计算出延时关闭时间(单位是心跳次数,40秒一次, 限制最长维持10分钟600秒15次心跳,预估一下人数最多150W; 最少维持1次心跳),更改状态
#也就是10以及10以下一次心跳,递增,最多15次
nkeepAlive = int(peopleMax / 10)
if nkeepAlive < 1: nkeepAlive = 1
if nkeepAlive > 15: nkeepAlive = 15
roomDm.delayTimeout = nkeepAlive * roomDm.keepAliveIntern
roomDm.closingTimeout = current_time + roomDm.delayTimeout
roomDm.status = DMSStatus.closing
self.log.write('%s will close in %ds' % (url, roomDm.delayTimeout))
#在disconnect期间又来说这个房间的dm是被需要的怎么处理呢?
#这里又要分情况讨论,可能下一次超时事件依然是心跳,可能下一次超时就是直接关闭了
#这一部分处理应该放在room_dm_connect中
#超时事件:真正关闭dm server连接
def room_dm_close(self, roomDm, current_time):
if roomDm is None:
return
if roomDm.status != DMSStatus.closing:
return
self.log.write(roomDm.url + ' : room close...')
#关闭可读连接池中中的
for index, so in enumerate(self.inSockets):
if roomDm.sock == so:
del self.inSockets[index]
self.log.write('del from in Sockets pool')
break
#关闭可写池的(没有,心跳包我以为没必要放到select中监听)
#关闭dm server 池的
if roomDm.Mark in self.Danmus:
del self.Danmus[roomDm.Mark]
self.log.write('del from Danmu Sockets pool')
else:
self.log.write('cannot del from in Sockets pool', 1)
#删除存储在Server的MessageListBuffer里的key,value
if self.messageListBuffer.deleteItem(roomDm.url):
self.log.write('del from Danmu Sockets pool')
else:
self.log.write('cannot del from in Danmu pool', 1)
#从定时器中删掉
#返回值在正常情况下没用,且正常情况下要求输出:delete xxx fail, 返回False
#还有一种情况看room_dm_connect函数第二分支的第一分支
self.mainTimer.delTimer(roomDm, current_time)
roomDm.sock.close()
self.log.write(roomDm.url + ' : room close successfully...')
#要在这里将下一次的心跳事件添加到定时器
#同时还要看是否即将关闭,因为一个连接就一个超时事件的
def keep_alive_event(self, current_time, roomDm):
#发送心跳
self.log.write(roomDm.url + ' : room keep alive...')
roomDm.operation.keep_alive_package()
nextKeepAlive = current_time + roomDm.keepAliveIntern
timeout_intern = -1
if roomDm.status == DMSStatus.closed:
self.log.write('room is closed ???', 2)
exit(-1)
#如果连接即将断开,比较断开时间与下一次心跳时间
elif roomDm.status == DMSStatus.closing:
if roomDm.closingTimeout == -1:
self.log.write('next closing timeout is -1 ???')
exit(-1)
if nextKeepAlive >= roomDm.closingTimeout:
roomDm.timeoutEventType = DMTEventType.closing
roomDm.timeoutEvent = self.room_dm_close
timeout_intern = roomDm.closingTimeout - current_time
if timeout_intern == -1:
self.log.write(roomDm.url + ' : next timeout event : keep alive in ' + str(roomDm.keepAliveIntern) + 's')
timeout_intern = roomDm.keepAliveIntern
else:
self.log.write(roomDm.url + ' : next timeout event : room close in ' + str(timeout_intern) + 's')
self.mainTimer.addTimer(roomDm, timeout_intern, current_time)
def timeout_process(self, current_time):
#这里认为有超时事件的只有room socket
while self.mainTimer.isTopTimeOut(current_time):
(t, roomDm) = self.mainTimer.getPopTopTimer()
if roomDm.timeoutEventType == DMTEventType.keepAlive:
roomDm.timeoutEvent(current_time ,roomDm)
elif roomDm.timeoutEventType == DMTEventType.closing:
roomDm.timeoutEvent(roomDm, current_time)
else:
self.log.write('room timeoout event nothing ? ', 1)
#ws server发过来请求某个room的dm或者不再要某个room了
#数据格式是: type + 最多时人数(douyu room 人数,不是我这个网页上的人数,单位是万, 整型) +url
#在请求room时人数为0, 在取消room时人数用于决定该房间没人需要时多久后关闭
#type分为0和1,0为取消,1为需要
#传输方式为json
s = Server('127.0.0.1', 8666)
s.run() |
import argparse
import torch
from collections import OrderedDict
from os.path import isdir
from torch import nn
from torch import optim
from torchvision import datasets, transforms, models
from torch.utils.data import DataLoader
# Function get_input_args() parses keyword arguments from the command line
def get_input_args():
"""Parses the keyword arguments from command line
"""
parser = argparse.ArgumentParser(description="Neural Network Settings")
# Add architecture selection to parser
parser.add_argument('--arch',
type=str,
help='Choose architecture \
(https://pytorch.org/docs/stable/torchvision/models.html)')
# Add checkpoint directory to parser
parser.add_argument('--save_dir',
type=str,
help='Define save directory for checkpoints')
# Add hyperparameter tuning to parser
parser.add_argument('--learning_rate',
type=float,
help='Define gradient descent learning rate')
parser.add_argument('--hidden_units',
type=int,
help='Number of hidden units for DNN classifier')
parser.add_argument('--epochs',
type=int,
help='Number of epochs')
# Add GPU Option for training
parser.add_argument('--gpu',
action="store_true",
help='Use GPU for calculations')
# Parse args
args = parser.parse_args()
return args
def train_transformer(train_dir):
"""Performs training transformations on a dataset
Args:
train_dir (str): [Dataset training folder]
Returns:
[trainset]: [Torchvision dataset for the images]
"""
# Data transformation
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
transform = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)])
# Load the Data
trainset = datasets.ImageFolder(train_dir, transform=transform)
return trainset
def test_transformer(test_dir):
"""Performs test transformations on a dataset
Args:
test_dir (str): [Dataset testing folder]
Returns:
[testset]: [Torchvision dataset for the images]
"""
# Data transformation
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
transform = transforms.Compose([transforms.Resize((256, 256)),
transforms.CenterCrop((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)])
# Load the Data
testset = datasets.ImageFolder(test_dir, transform=transform)
return testset
def data_loader(data, batch_size, train=True):
"""Creates a dataloader from dataset
Args:
data (torchvision.dataset): [dataset to be loaded]
batch_size (int): [batch size for dataloader]
train (bool, optional): [Shuffle or not the dataset.
True for training purposes].
Defaults to True.
Returns:
[DataLoader]: [dataloader]
"""
if train:
dataloader = DataLoader(data, batch_size=batch_size, shuffle=True)
else:
dataloader = DataLoader(data, batch_size=batch_size)
return dataloader
def check_gpu(gpu_arg=True):
"""Check if GPU is avalaible.
Args:
gpu_arg (bool, optional): [Use GPU]. Defaults to True.
Returns:
[torch.device]: [device]
"""
# If gpu_arg is false then simply return the cpu device
if gpu_arg is True:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
return torch.device("cpu")
# Print result
if device == "cpu":
print("CUDA not found on device. For more details please visit: \
https://developer.nvidia.com/cuda-downloads.\nUsing CPU.")
return device
def model_loader(architecture="vgg16"):
"""Download model from torchvision
Args:
architecture (str, optional): [model architectures for
image classification].
Defaults to "vgg16".
Returns:
[torchvision.models]: [downloaded model]
"""
# Load Defaults if none specified
if architecture is None:
model = models.vgg16(pretrained=True)
model.name = "vgg16"
print("Network architecture specified as vgg16.")
else:
exec("model = models.{}(pretrained=True)".format(architecture))
model.name = architecture
# Freeze parameters so we don't backprop through them
for param in model.parameters():
param.requires_grad = False
return model
def initial_classifier(model, hidden_units):
"""Creates a classifier.
Args:
model ([torchvision.models]): [model of neural network]
hidden_units ([int]): [Number of hidden units]
Returns:
[torch.nn.modules.container.Sequential]: [new classifier]
"""
# Check that hidden layers has been input
if hidden_units is None:
print("Number of Hidden Layers specificed as 4096.")
hidden_units = 4096
else:
hidden_units = hidden_units
# Find number of input Layers
input_features = model.classifier[0].in_features
# Define Classifier
# model.classifier[-1] = nn.Sequential(
# nn.Linear(in_features=4096, out_features=102),
# nn.LogSoftmax(dim=1)
# )
model.classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_features, hidden_units, bias=True)),
('relu1', nn.ReLU()),
('dropout1', nn.Dropout(p=0.5)),
('fc2', nn.Linear(hidden_units, 102, bias=True)),
('output', nn.LogSoftmax(dim=1))]))
return model.classifier
def validation(model, testloader, criterion, device):
"""Validates training against testloader
Args:
model ([torchvision.models]): [model of neural network]
testloader ([DataLoader]): [testloader]
criterion ([torch.nn.modules.loss]): [criteria for classifier]
device ([torch.device]): [Device use for validation (GPU or CPU)]
Returns:
test_loss[torch.FloatTensor]: [test_loss]
accuracy[torch.FloatTensor]: [accuracy]
"""
test_loss = 0
accuracy = 0
for batch, (inputs, labels) in enumerate(testloader):
inputs, labels = inputs.to(device), labels.to(device)
output = model.forward(inputs)
test_loss += criterion(output, labels).item()
ps = torch.exp(output)
equality = (labels.data == ps.max(dim=1)[1])
accuracy += equality.type(torch.FloatTensor).mean()
return test_loss, accuracy
def net_train(model, trainloader, validloader, device,
criterion, optimizer, print_steps, steps, num_epochs):
if num_epochs is None:
print("Number of epochs specificed as 5.")
num_epochs = 5
else:
num_epochs = num_epochs
print("Training process initializing .....\n")
# Train Model
for e in range(num_epochs):
batch_loss = 0
for batch, (inputs, labels) in enumerate(trainloader):
steps += 1
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
# Forward and backward passes
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
batch_loss += loss.item()
if batch % print_steps == 0:
model.eval()
with torch.no_grad():
valid_loss, accuracy = validation(model,
validloader,
criterion,
device)
print("Epoch: {0}/{1} | ".format(e+1, num_epochs),
"Training Loss: {:.4f} | ".format(
batch_loss/print_steps),
"Validation Loss: {:.4f} | ".format(
valid_loss/len(validloader)),
"Validation Accuracy: {:.4f}".format(
accuracy/len(validloader)))
batch_loss = 0
model.train()
return model
def validate_model(model, testloader, device):
"""Validate the above model on test data images
Args:
model ([torchvision.models]): [model of neural network]
testloader ([DataLoader]): [testloader]
device ([torch.device]): [Device use for validation (GPU or CPU)]
"""
# Do validation on the test set
num_correct = 0
total = 0
with torch.no_grad():
model.eval()
for images, labels in testloader:
images, labels = images.to(device), labels.to(device)
output = model(images)
pred = torch.argmax(output, 1)
total += labels.size(0)
num_correct += (pred == labels).sum().item()
print('Accuracy achieved by the network on test \
images is: {} %'.format((100 * num_correct / total)))
def initial_checkpoint(model, savedir, trainset):
"""Saves the model to checkpoint.
Args:
model ([torchvision.models]): [model of neural network]
savedir ([str]): [Save directory of .pth file.]
trainset ([type]): [Torchvision dataset for the images]
"""
if savedir is None:
print("Model checkpoint directory is empty, model will not be saved.")
else:
if isdir(savedir):
# Create `class_to_idx` attribute in model
model.class_to_idx = trainset.class_to_idx
# Create checkpoint dictionary
checkpoint = {'architecture': model.name,
'class_to_idx': model.class_to_idx,
'classifier': model.classifier,
'state_dict': model.state_dict()}
# Save checkpoint
torch.save(checkpoint, 'my_checkpoint.pth')
else:
print("Directory not found, model will not be saved.")
# Main program function defined below
def main():
# Get Keyword Args for Training
in_arg = get_input_args()
# Set directory for training
data_dir = 'flowers'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
# Pass transforms in, then create trainloader
train_data = train_transformer(train_dir)
valid_data = test_transformer(valid_dir)
test_data = test_transformer(test_dir)
trainloader = data_loader(train_data, batch_size=20)
validloader = data_loader(valid_data, batch_size=20, train=False)
testloader = data_loader(test_data, batch_size=20, train=False)
# Load Model
model = model_loader(architecture=in_arg.arch)
# Build Classifier
model.classifier = initial_classifier(model,
hidden_units=in_arg.hidden_units)
# Check for GPU
device = check_gpu(gpu_arg=in_arg.gpu)
# Send model to device
model.to(device)
print("Using device: {}".format(device))
# Check for learnrate args
if in_arg.learning_rate is None:
learning_rate = 0.001
print("Learning rate specificed as 0.001")
else:
learning_rate = in_arg.learning_rate
# Define loss and optimizer
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
# Define deep learning method
print_steps = 40
steps = 0
# Train the classifier layers using backpropogation
trained_model = net_train(model=model,
trainloader=trainloader,
validloader=validloader,
device=device,
criterion=criterion,
optimizer=optimizer,
print_steps=print_steps,
steps=steps,
num_epochs=in_arg.epochs)
print("\nTraining process is now complete.")
# Quickly Validate the model
validate_model(trained_model, testloader, device)
# Save the model
initial_checkpoint(trained_model, in_arg.save_dir, train_data)
# Call to main function to run the program
if __name__ == '__main__':
main()
|
import csv
import time
from lxml import html
import requests
import unidecode
#The first scraper take each website link (listing conferences) from the csv file, go through the website and get the name of the authors
#and print these names into a new csv
#The second scraper takes the titles of the publications and print them into a new csv
def scraper(): #authors of conferences scraper
link = open(r"C:\Users\Allmightyme\Desktop\website-link.csv")
#link = open("website-link test.csv")
csv_link = csv.reader(link)
number = 0
for row in csv_link:
#section that grabs the data from the page
if not row:
continue #there should not be any empty row but in case there are, we skip it
website = row[0]
page = requests.get(website)
tree = html.fromstring(page.content)
names = tree.xpath('//span[@itemprop="author"]//span[@itemprop="name"]/text()') # always check that this corresponds to the website pattern
title = tree.xpath('//title/text()')[0]
unames = [unidecode.unidecode(x) for x in names] ## Unidecode normalizes the words (no accent, etc)
number += 1
print(f"we are at {number}/{len(csv_link)} !") ## Keeps track of how many we went through!
#section that writes down the csv
with open(title + '.csv', 'w') as f:
for name in unames:
f.write(name + '\n')
time.sleep(2) #timesleep slows down process as there is no API, we don't want to ask to much of their server
def scraper2(): #title of papers scraper
link = open(r"C:\Users\Allmightyme\Desktop\website-link.csv") ## Make sure it's the right localisation
csv_link = csv.reader(link)
number = 0
for year in range(2015, 2019): #we want to create a csv file for each year with only the publication from that specific year
publications = []
link.seek(0) #reset the csv file to the first row for each year loop
for row in csv_link: #grab website link from csv
if not row:
continue
if str(year) in row[0]: #check if the website is for publications in the correct year
website = row[0]
page = requests.get(website)
tree = html.fromstring(page.content)
titles = tree.xpath('//span[@class="title"]/text()') # Check that it is the correct pattern for the website
publications.append([unidecode.unidecode(x) for x in titles]) ## Unidecode normalizes the words (no accent, etc)
number += 1
print(f"we are at {year}! alternatively we are at conference number {number}") ## Keeps track of how website we went through
time.sleep(2)
with open(f"publication_for_{year}.csv", 'w') as f:
for publication in publications:
for single in publication: ## use a list of a list so we need two for loop
f.write(f"{single} \n")
|
# @Title: 反转链表 (Reverse Linked List)
# @Author: 2464512446@qq.com
# @Date: 2020-11-08 00:00:23
# @Runtime: 36 ms
# @Memory: 14.4 MB
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head:
return None
perv,curr = None,head
while curr:
temp = curr.next
curr.next = perv
perv,curr = curr,temp
return perv
|
default_app_config = 'values.apps.ValuesConfig'
|
#!/usr/bin/env python
import random
nums = [x for x in range(3, 100)]
print nums
key = 14
checked = []
for i, n in enumerate(nums):
checked.append([i, n])
while True:
midInt = len(checked)/2 - 1
mid = checked[midInt][1]
if mid == key:
print 'Found it at index %d!' % checked[midInt][0]
break
elif key > mid:
checked = checked[mid:]
elif key < mid:
checked = checked[:mid]
|
import requests
class Weather():
"""
Creates Weather object containing information about Current weather
"""
def __init__(self, area, appid):
self.area = area
self.weather_url = "http://api.openweathermap.org/"
self.appid = appid
def weather_condition(self):
"""
returns current weather condition
"""
weather_api = "{}data/2.5/weather?q={}&units=metric&APPID={}".format(
self.weather_url, self.area, self.appid)
r = requests.get(weather_api)
weather_dict = r.json()
current = weather_dict['main']['temp']
conditions = weather_dict['weather'][0]['description']
current = round(current)
weather = {
"area": self.area,
"conditions": conditions,
"temperature": current
}
return weather
|
from mlmodel import MLModel
from sklearn.neighbors import KNeighborsRegressor
class KNeighbors(MLModel):
def train(self, n_neighbors=5):
'''
'''
# Shuffle the training/test data:
self.shuffle()
# Create & Train Model:
self.model = KNeighborsRegressor(n_neighbors=n_neighbors)
self.model.fit(self.datasets.train.inputs, self.datasets.train.outputs)
if __name__ == '__main__':
model = KNeighbors('SPY', train=0.75)
model.train(n_neighbors=3)
|
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.conf import settings
import stripe
stripe.api_key=settings.STRIPE_SECRET_KEY
@login_required
def checkout(request):
publishKey = settings.STRIPE_PUBLISHABLE_KEY
if request.method =='POST':
token = request.POST['stripeToken']
#token = request.POST('stripeToken')
# Get the credit card details submitted by the form
# Charge the user's card:
try:
#charge(
charge = stripe.Charge.create(
amount = 1000,
currency = "usd",
description = "Example charge",
source = token
)
except stripe.error.CardError as e:
pass
context = {'publishKey': publishKey}
template = 'checkout/checkout.html'
return render(request, template, context)
|
def setrate(roll, rate, fslist):
for fscount in fslist:
for fsrate in range(rate):
roll += [fscount]
return roll
roll0 = []
for u01 in range(23):
roll0 += ["Crab Long Bao"]
roll0 += ["Gingerbread"]
for u02 in range(61):
roll0 += ["Bamboo Rice"]
roll0 += ["Peking Duck"]
roll0 += ["B-52"]
for u03 in range(62):
roll0 += ["Foie Gras"]
for uf4 in range(5):
roll0 += ["Boston Lobster"]
roll0 += ["Double Scoop"]
for s01 in range(103):
roll0 += ["Tiramisu"]
roll0 += ["Escargot"]
roll0 += ["Hotdog"]
for s02 in range(104):
roll0 += ["Mango Pudding"]
roll0 += ["Hamburger"]
roll0 += ["Steak"]
roll0 += ["Tangyuan"]
roll0 += ["Sanma"]
roll0 += ["Napoleon Cake"]
roll0 += ["Salad"]
roll0 += ["Pastel de nata"]
roll0 += ["Yuxiang"]
roll0 += ["Sukiyaki"]
roll0 += ["Brownie"]
roll0 += ["Red Wine"]
roll0 += ["Gyoza"]
for r01 in range(413):
roll0 += ["Long Bao"]
roll0 += ["Coffee"]
roll0 += ["Sashimi"]
roll0 += ["Macaron"]
roll0 += ["Zongzi"]
roll0 += ["Sakuramochi"]
roll0 += ["Tom Yum"]
roll0 += ["Taiyaki"]
roll0 += ["Milk"]
roll0 += ["Dorayaki"]
roll0 += ["Sake"]
roll0 += ["Tempura"]
roll0 += ["Spicy Gluten"]
for r02 in range(414):
roll0 += ["Jiuniang"]
roll0 += ["Omurice"]
roll0 += ["Orange Juice"]
roll0 += ["Ume Ochazuke"]
roll0 += ["Miso Soup"]
roll0 += ["Yellow Wine"]
for m01 in range(62):
roll0 += ["Pancake"]
roll0 += ["Jello"]
for m02 in range(61):
roll0 += ["Skewer"]
roll1 = []
for u11 in range(23):
roll1 += ["Crab Long Bao"]
roll1 += ["Gingerbread"]
for u12 in range(61):
roll1 += ["Foie Gras"]
roll1 += ["Peking Duck"]
roll1 += ["B-52"]
for u13 in range(62):
roll1 += ["Bamboo Rice"]
for u14 in range(5):
roll1 += ["Boston Lobster"]
roll1 += ["Double Scoop"]
for s11 in range(83):
roll1 += ["Tiramisu"]
roll1 += ["Escargot"]
roll1 += ["Hotdog"]
roll1 += ["Mango Pudding"]
roll1 += ["Hamburger"]
roll1 += ["Steak"]
roll1 += ["Tangyuan"]
roll1 += ["Sanma"]
roll1 += ["Napoleon Cake"]
roll1 += ["Salad"]
roll1 += ["Pastel de nata"]
roll1 += ["Yuxiang"]
roll1 += ["Sukiyaki"]
roll1 += ["Brownie"]
roll1 += ["Red Wine"]
roll1 += ["Gyoza"]
roll1 += ["Chocolate"]
for r11 in range(413):
roll1 += ["Long Bao"]
roll1 += ["Coffee"]
roll1 += ["Sashimi"]
roll1 += ["Macaron"]
roll1 += ["Zongzi"]
roll1 += ["Sakuramochi"]
roll1 += ["Tom Yum"]
roll1 += ["Taiyaki"]
roll1 += ["Milk"]
roll1 += ["Dorayaki"]
roll1 += ["Sake"]
roll1 += ["Tempura"]
roll1 += ["Spicy Gluten"]
for r12 in range(414):
roll1 += ["Jiuniang"]
roll1 += ["Omurice"]
roll1 += ["Orange Juice"]
roll1 += ["Ume Ochazuke"]
roll1 += ["Miso Soup"]
roll1 += ["Yellow Wine"]
for m11 in range(46):
roll1 += ["Skewer"]
roll1 += ["Jello"]
roll1 += ["Pancake"]
for m12 in range(47):
roll1 += ["Popcorn"]
roll2 = []
for u21 in range(23):
roll2 += ["Crab Long Bao"]
roll2 += ["Gingerbread"]
for u22 in range(61):
roll2 += ["Foie Gras"]
roll2 += ["Peking Duck"]
roll2 += ["B-52"]
for u23 in range(62):
roll2 += ["Bamboo Rice"]
for u24 in range(5):
roll2 += ["Boston Lobster"]
roll2 += ["Double Scoop"]
for s21 in range(80):
roll2 += ["Tiramisu"]
roll2 += ["Escargot"]
roll2 += ["Hotdog"]
roll2 += ["Mango Pudding"]
roll2 += ["Hamburger"]
roll2 += ["Steak"]
roll2 += ["Tangyuan"]
roll2 += ["Sanma"]
roll2 += ["Napoleon Cake"]
roll2 += ["Salad"]
roll2 += ["Pastel de nata"]
roll2 += ["Yuxiang"]
roll2 += ["Sukiyaki"]
roll2 += ["Brownie"]
roll2 += ["Red Wine"]
roll2 += ["Gyoza"]
roll2 += ["Chocolate"]
for s22 in range(150):
roll2 += ["Eggette"]
for s23 in range(151):
roll2 += ["Pineapple Cake"]
for r21 in range(413):
roll2 += ["Long Bao"]
roll2 += ["Coffee"]
roll2 += ["Sashimi"]
roll2 += ["Macaron"]
roll2 += ["Zongzi"]
roll2 += ["Sakuramochi"]
roll2 += ["Tom Yum"]
roll2 += ["Taiyaki"]
roll2 += ["Milk"]
roll2 += ["Dorayaki"]
roll2 += ["Sake"]
roll2 += ["Tempura"]
roll2 += ["Spicy Gluten"]
for r22 in range(414):
roll2 += ["Jiuniang"]
roll2 += ["Omurice"]
roll2 += ["Orange Juice"]
roll2 += ["Ume Ochazuke"]
roll2 += ["Miso Soup"]
roll2 += ["Yellow Wine"]
for m21 in range(46):
roll2 += ["Skewer"]
roll2 += ["Jello"]
roll2 += ["Pancake"]
for m22 in range(47):
roll2 += ["Popcorn"]
roll3 = []
for u31 in range(23):
roll3 += ["Crab Long Bao"]
roll3 += ["Gingerbread"]
for u32 in range(61):
roll3 += ["Foie Gras"]
roll3 += ["Peking Duck"]
roll3 += ["B-52"]
for u33 in range(62):
roll3 += ["Bamboo Rice"]
for u34 in range(5):
roll3 += ["Boston Lobster"]
roll3 += ["Double Scoop"]
for s31 in range(76):
roll3 += ["Tiramisu"]
roll3 += ["Escargot"]
roll3 += ["Hotdog"]
roll3 += ["Mango Pudding"]
roll3 += ["Hamburger"]
roll3 += ["Steak"]
roll3 += ["Tangyuan"]
roll3 += ["Sanma"]
roll3 += ["Napoleon Cake"]
roll3 += ["Salad"]
roll3 += ["Pastel de nata"]
roll3 += ["Yuxiang"]
roll3 += ["Sukiyaki"]
roll3 += ["Brownie"]
roll3 += ["Red Wine"]
roll3 += ["Gyoza"]
roll3 += ["Chocolate"]
roll3 += ["Udon"]
for s32 in range(146):
roll3 += ["Eggette"]
for s33 in range(147):
roll3 += ["Pineapple Cake"]
for r31 in range(413):
roll3 += ["Long Bao"]
roll3 += ["Coffee"]
roll3 += ["Sashimi"]
roll3 += ["Macaron"]
roll3 += ["Zongzi"]
roll3 += ["Sakuramochi"]
roll3 += ["Tom Yum"]
roll3 += ["Taiyaki"]
roll3 += ["Milk"]
roll3 += ["Dorayaki"]
roll3 += ["Sake"]
roll3 += ["Tempura"]
roll3 += ["Spicy Gluten"]
for r32 in range(414):
roll3 += ["Jiuniang"]
roll3 += ["Omurice"]
roll3 += ["Orange Juice"]
roll3 += ["Ume Ochazuke"]
roll3 += ["Miso Soup"]
roll3 += ["Yellow Wine"]
for m31 in range(46):
roll3 += ["Skewer"]
roll3 += ["Jello"]
roll3 += ["Pancake"]
for m32 in range(47):
roll3 += ["Popcorn"]
roll4 = []
for u41 in range(23):
roll4 += ["Crab Long Bao"]
roll4 += ["Gingerbread"]
for u42 in range(61):
roll4 += ["Foie Gras"]
roll4 += ["Peking Duck"]
roll4 += ["B-52"]
for u43 in range(62):
roll4 += ["Bamboo Rice"]
for u44 in range(5):
roll4 += ["Boston Lobster"]
roll4 += ["Double Scoop"]
for s41 in range(72):
roll4 += ["Tiramisu"]
roll4 += ["Escargot"]
roll4 += ["Hotdog"]
roll4 += ["Mango Pudding"]
roll4 += ["Hamburger"]
roll4 += ["Steak"]
roll4 += ["Tangyuan"]
roll4 += ["Sanma"]
roll4 += ["Napoleon Cake"]
roll4 += ["Salad"]
roll4 += ["Pastel de nata"]
roll4 += ["Yuxiang"]
roll4 += ["Sukiyaki"]
roll4 += ["Brownie"]
roll4 += ["Red Wine"]
roll4 += ["Gyoza"]
roll4 += ["Chocolate"]
roll4 += ["Udon"]
roll4 += ["Sweet Tofu"]
for s42 in range(146):
roll4 += ["Eggette"]
for s43 in range(147):
roll4 += ["Pineapple Cake"]
for r41 in range(413):
roll4 += ["Long Bao"]
roll4 += ["Coffee"]
roll4 += ["Sashimi"]
roll4 += ["Macaron"]
roll4 += ["Zongzi"]
roll4 += ["Sakuramochi"]
roll4 += ["Tom Yum"]
roll4 += ["Taiyaki"]
roll4 += ["Milk"]
roll4 += ["Dorayaki"]
roll4 += ["Sake"]
roll4 += ["Tempura"]
roll4 += ["Spicy Gluten"]
for r42 in range(414):
roll4 += ["Jiuniang"]
roll4 += ["Omurice"]
roll4 += ["Orange Juice"]
roll4 += ["Ume Ochazuke"]
roll4 += ["Miso Soup"]
roll4 += ["Yellow Wine"]
for m41 in range(46):
roll4 += ["Skewer"]
roll4 += ["Jello"]
roll4 += ["Pancake"]
for m42 in range(47):
roll4 += ["Popcorn"]
roll5 = []
for u51 in range(23):
roll5 += ["Crab Long Bao"]
roll5 += ["Gingerbread"]
for u52 in range(61):
roll5 += ["Foie Gras"]
roll5 += ["Peking Duck"]
roll5 += ["B-52"]
for u53 in range(62):
roll5 += ["Bamboo Rice"]
for u54 in range(5):
roll5 += ["Boston Lobster"]
roll5 += ["Double Scoop"]
for s51 in range(72):
roll5 += ["Tiramisu"]
roll5 += ["Escargot"]
roll5 += ["Hotdog"]
roll5 += ["Mango Pudding"]
roll5 += ["Hamburger"]
roll5 += ["Steak"]
roll5 += ["Tangyuan"]
roll5 += ["Sanma"]
roll5 += ["Napoleon Cake"]
roll5 += ["Salad"]
roll5 += ["Pastel de nata"]
roll5 += ["Yuxiang"]
roll5 += ["Sukiyaki"]
roll5 += ["Brownie"]
roll5 += ["Red Wine"]
roll5 += ["Gyoza"]
roll5 += ["Chocolate"]
roll5 += ["Udon"]
roll5 += ["Sweet Tofu"]
for s52 in range(146):
roll5 += ["Eggette"]
for s53 in range(147):
roll5 += ["Pineapple Cake"]
for r51 in range(392):
roll5 += ["Long Bao"]
roll5 += ["Coffee"]
roll5 += ["Sashimi"]
roll5 += ["Macaron"]
roll5 += ["Zongzi"]
roll5 += ["Sakuramochi"]
roll5 += ["Cold Rice Shrimp"]
for r52 in range(393):
roll5 += ["Tom Yum"]
roll5 += ["Taiyaki"]
roll5 += ["Milk"]
roll5 += ["Dorayaki"]
roll5 += ["Sake"]
roll5 += ["Tempura"]
roll5 += ["Spicy Gluten"]
roll5 += ["Jiuniang"]
roll5 += ["Omurice"]
roll5 += ["Orange Juice"]
roll5 += ["Ume Ochazuke"]
roll5 += ["Miso Soup"]
roll5 += ["Yellow Wine"]
for m51 in range(46):
roll5 += ["Skewer"]
roll5 += ["Jello"]
roll5 += ["Pancake"]
for m52 in range(47):
roll5 += ["Popcorn"]
roll6 = []
for u61 in range(13):
roll6 += ["Crab Long Bao"]
roll6 += ["Gingerbread"]
for u62 in range(36):
roll6 += ["Foie Gras"]
roll6 += ["Peking Duck"]
roll6 += ["B-52"]
roll6 += ["Bamboo Rice"]
for u63 in range(5):
roll6 += ["Boston Lobster"]
roll6 += ["Double Scoop"]
for s61 in range(46):
roll6 += ["Tiramisu"]
roll6 += ["Escargot"]
roll6 += ["Hotdog"]
roll6 += ["Mango Pudding"]
roll6 += ["Hamburger"]
roll6 += ["Steak"]
roll6 += ["Tangyuan"]
roll6 += ["Sanma"]
roll6 += ["Napoleon Cake"]
roll6 += ["Salad"]
roll6 += ["Pastel de nata"]
roll6 += ["Yuxiang"]
roll6 += ["Sukiyaki"]
roll6 += ["Brownie"]
roll6 += ["Red Wine"]
roll6 += ["Gyoza"]
roll6 += ["Chocolate"]
roll6 += ["Udon"]
roll6 += ["Sweet Tofu"]
roll6 += ["Milk Tea"]
roll6 += ["Yunnan Noodles"]
for s62 in range(98):
roll6 += ["Eggette"]
roll6 += ["Pineapple Cake"]
for r61 in range(392):
roll6 += ["Long Bao"]
roll6 += ["Coffee"]
roll6 += ["Sashimi"]
roll6 += ["Macaron"]
roll6 += ["Zongzi"]
roll6 += ["Sakuramochi"]
roll6 += ["Cold Rice Shrimp"]
for r62 in range(393):
roll6 += ["Tom Yum"]
roll6 += ["Taiyaki"]
roll6 += ["Milk"]
roll6 += ["Dorayaki"]
roll6 += ["Sake"]
roll6 += ["Tempura"]
roll6 += ["Spicy Gluten"]
roll6 += ["Jiuniang"]
roll6 += ["Omurice"]
roll6 += ["Orange Juice"]
roll6 += ["Ume Ochazuke"]
roll6 += ["Miso Soup"]
roll6 += ["Yellow Wine"]
for m61 in range(46):
roll6 += ["Skewer"]
roll6 += ["Jello"]
roll6 += ["Pancake"]
for m62 in range(47):
roll6 += ["Popcorn"]
roll7 = []
for u71 in range(23):
roll7 += ["Crab Long Bao"]
roll7 += ["Gingerbread"]
for u72 in range(61):
roll7 += ["Foie Gras"]
roll7 += ["Peking Duck"]
roll7 += ["B-52"]
for u73 in range(62):
roll7 += ["Bamboo Rice"]
for u74 in range(5):
roll7 += ["Boston Lobster"]
roll7 += ["Double Scoop"]
for s71 in range(62):
roll7 += ["Tiramisu"]
roll7 += ["Escargot"]
roll7 += ["Hotdog"]
roll7 += ["Mango Pudding"]
roll7 += ["Hamburger"]
roll7 += ["Steak"]
roll7 += ["Tangyuan"]
roll7 += ["Sanma"]
roll7 += ["Napoleon Cake"]
roll7 += ["Salad"]
roll7 += ["Pastel de nata"]
roll7 += ["Yuxiang"]
roll7 += ["Sukiyaki"]
roll7 += ["Brownie"]
roll7 += ["Red Wine"]
roll7 += ["Gyoza"]
roll7 += ["Chocolate"]
roll7 += ["Udon"]
for s72 in range(61):
roll7 += ["Sweet Tofu"]
for s73 in range(65):
roll7 += ["Milk Tea"]
roll7 += ["Yunnan Noodles"]
for s74 in range(118):
roll7 += ["Eggette"]
roll7 += ["Pineapple Cake"]
roll7 += ["Laba Congee"]
for r71 in range(392):
roll7 += ["Long Bao"]
roll7 += ["Coffee"]
roll7 += ["Sashimi"]
roll7 += ["Macaron"]
roll7 += ["Zongzi"]
roll7 += ["Sakuramochi"]
roll7 += ["Cold Rice Shrimp"]
for r72 in range(393):
roll7 += ["Tom Yum"]
roll7 += ["Taiyaki"]
roll7 += ["Milk"]
roll7 += ["Dorayaki"]
roll7 += ["Sake"]
roll7 += ["Tempura"]
roll7 += ["Spicy Gluten"]
roll7 += ["Jiuniang"]
roll7 += ["Omurice"]
roll7 += ["Orange Juice"]
roll7 += ["Ume Ochazuke"]
roll7 += ["Miso Soup"]
roll7 += ["Yellow Wine"]
for m71 in range(46):
roll7 += ["Skewer"]
roll7 += ["Jello"]
roll7 += ["Pancake"]
for m72 in range(47):
roll7 += ["Popcorn"]
roll8 = []
for u81 in range(21):
roll8 += ["Crab Long Bao"]
roll8 += ["Gingerbread"]
for u82 in range(60):
roll8 += ["Foie Gras"]
roll8 += ["Peking Duck"]
roll8 += ["B-52"]
roll8 += ["Bamboo Rice"]
for u83 in range(9):
roll8 += ["Bibimbap"]
for u84 in range(5):
roll8 += ["Boston Lobster"]
roll8 += ["Double Scoop"]
for s81 in range(62):
roll8 += ["Tiramisu"]
roll8 += ["Escargot"]
roll8 += ["Hotdog"]
roll8 += ["Mango Pudding"]
roll8 += ["Hamburger"]
roll8 += ["Steak"]
roll8 += ["Tangyuan"]
roll8 += ["Sanma"]
roll8 += ["Napoleon Cake"]
roll8 += ["Salad"]
roll8 += ["Pastel de nata"]
roll8 += ["Yuxiang"]
roll8 += ["Sukiyaki"]
roll8 += ["Brownie"]
roll8 += ["Red Wine"]
roll8 += ["Gyoza"]
roll8 += ["Chocolate"]
roll8 += ["Udon"]
for s82 in range(61):
roll8 += ["Sweet Tofu"]
for s83 in range(65):
roll8 += ["Milk Tea"]
roll8 += ["Yunnan Noodles"]
for s84 in range(118):
roll8 += ["Eggette"]
roll8 += ["Pineapple Cake"]
roll8 += ["Laba Congee"]
for r81 in range(392):
roll8 += ["Long Bao"]
roll8 += ["Coffee"]
roll8 += ["Sashimi"]
roll8 += ["Macaron"]
roll8 += ["Zongzi"]
roll8 += ["Sakuramochi"]
roll8 += ["Cold Rice Shrimp"]
for r82 in range(393):
roll8 += ["Tom Yum"]
roll8 += ["Taiyaki"]
roll8 += ["Milk"]
roll8 += ["Dorayaki"]
roll8 += ["Sake"]
roll8 += ["Tempura"]
roll8 += ["Spicy Gluten"]
roll8 += ["Jiuniang"]
roll8 += ["Omurice"]
roll8 += ["Orange Juice"]
roll8 += ["Ume Ochazuke"]
roll8 += ["Miso Soup"]
roll8 += ["Yellow Wine"]
for m81 in range(46):
roll8 += ["Skewer"]
roll8 += ["Jello"]
roll8 += ["Pancake"]
for m82 in range(47):
roll8 += ["Popcorn"]
roll9 = []
for u91 in range(21):
roll9 += ["Crab Long Bao"]
roll9 += ["Gingerbread"]
for u92 in range(60):
roll9 += ["Foie Gras"]
roll9 += ["Peking Duck"]
roll9 += ["B-52"]
roll9 += ["Bamboo Rice"]
for u93 in range(9):
roll9 += ["Bibimbap"]
for u94 in range(5):
roll9 += ["Boston Lobster"]
roll9 += ["Double Scoop"]
for s91 in range(58):
roll9 += ["Milk Tea"]
roll9 += ["Yunnan Noodles"]
roll9 += ["Tiramisu"]
roll9 += ["Escargot"]
roll9 += ["Hotdog"]
roll9 += ["Mango Pudding"]
roll9 += ["Hamburger"]
roll9 += ["Steak"]
roll9 += ["Tangyuan"]
roll9 += ["Sanma"]
roll9 += ["Napoleon Cake"]
roll9 += ["Salad"]
roll9 += ["Pastel de nata"]
roll9 += ["Yuxiang"]
roll9 += ["Sukiyaki"]
roll9 += ["Brownie"]
roll9 += ["Red Wine"]
roll9 += ["Gyoza"]
roll9 += ["Chocolate"]
roll9 += ["Udon"]
for s92 in range(57):
roll9 += ["Sweet Tofu"]
roll9 += ["Kimchi"]
roll9 += ["Ddeokbokki"]
for s93 in range(110):
roll9 += ["Eggette"]
roll9 += ["Pineapple Cake"]
roll9 += ["Laba Congee"]
for r91 in range(392):
roll9 += ["Long Bao"]
roll9 += ["Coffee"]
roll9 += ["Sashimi"]
roll9 += ["Macaron"]
roll9 += ["Zongzi"]
roll9 += ["Sakuramochi"]
roll9 += ["Cold Rice Shrimp"]
for r92 in range(393):
roll9 += ["Tom Yum"]
roll9 += ["Taiyaki"]
roll9 += ["Milk"]
roll9 += ["Dorayaki"]
roll9 += ["Sake"]
roll9 += ["Tempura"]
roll9 += ["Spicy Gluten"]
roll9 += ["Jiuniang"]
roll9 += ["Omurice"]
roll9 += ["Orange Juice"]
roll9 += ["Ume Ochazuke"]
roll9 += ["Miso Soup"]
roll9 += ["Yellow Wine"]
for m91 in range(46):
roll9 += ["Skewer"]
roll9 += ["Jello"]
roll9 += ["Pancake"]
for m92 in range(47):
roll9 += ["Popcorn"]
roll10 = []
for u101 in range(21):
roll10 += ["Crab Long Bao"]
roll10 += ["Gingerbread"]
for u102 in range(60):
roll10 += ["Foie Gras"]
roll10 += ["Peking Duck"]
roll10 += ["B-52"]
roll10 += ["Bamboo Rice"]
for u103 in range(9):
roll10 += ["Bibimbap"]
for u104 in range(5):
roll10 += ["Boston Lobster"]
roll10 += ["Double Scoop"]
for s101 in range(56):
roll10 += ["Fried Chicken"]
roll10 += ["Milk Tea"]
roll10 += ["Yunnan Noodles"]
roll10 += ["Tiramisu"]
roll10 += ["Escargot"]
roll10 += ["Hotdog"]
roll10 += ["Mango Pudding"]
roll10 += ["Hamburger"]
roll10 += ["Steak"]
roll10 += ["Tangyuan"]
roll10 += ["Sanma"]
roll10 += ["Napoleon Cake"]
roll10 += ["Salad"]
roll10 += ["Pastel de nata"]
roll10 += ["Yuxiang"]
roll10 += ["Sukiyaki"]
roll10 += ["Brownie"]
roll10 += ["Red Wine"]
roll10 += ["Gyoza"]
roll10 += ["Chocolate"]
roll10 += ["Udon"]
roll10 += ["Sweet Tofu"]
roll10 += ["Kimchi"]
for s102 in range(55):
roll10 += ["Ddeokbokki"]
for s103 in range(106):
roll10 += ["Eggette"]
roll10 += ["Pineapple Cake"]
roll10 += ["Laba Congee"]
for r101 in range(374):
roll10 += ["Long Bao"]
roll10 += ["Coffee"]
roll10 += ["Sashimi"]
roll10 += ["Macaron"]
roll10 += ["Zongzi"]
roll10 += ["Sakuramochi"]
roll10 += ["Cold Rice Shrimp"]
roll10 += ["Tom Yum"]
roll10 += ["Taiyaki"]
roll10 += ["Milk"]
roll10 += ["Dorayaki"]
roll10 += ["Sake"]
roll10 += ["Tempura"]
roll10 += ["Spicy Gluten"]
roll10 += ["Jiuniang"]
roll10 += ["Omurice"]
roll10 += ["Orange Juice"]
roll10 += ["Ume Ochazuke"]
roll10 += ["Miso Soup"]
roll10 += ["Yellow Wine"]
for r102 in range(373):
roll10 += ["Eclair"]
for m101 in range(46):
roll10 += ["Skewer"]
roll10 += ["Jello"]
roll10 += ["Pancake"]
for m102 in range(47):
roll10 += ["Popcorn"]
roll11 = []
for u111 in range(18):
roll11 += ["Crab Long Bao"]
roll11 += ["Gingerbread"]
for u112 in range(9):
roll11 += ["Bibimbap"]
for u113 in range(5):
roll11 += ["Boston Lobster"]
roll11 += ["Double Scoop"]
for u114 in range(17):
roll11 += ["Rum"]
roll11 += ["Dragon's Beard Candy"]
for s111 in range(106):
roll11 += ["Eggette"]
roll11 += ["Pineapple Cake"]
roll11 += ["Laba Congee"]
for s112 in range(54):
roll11 += ["Fried Chicken"]
roll11 += ["Milk Tea"]
roll11 += ["Yunnan Noodles"]
roll11 += ["Tiramisu"]
roll11 += ["Escargot"]
roll11 += ["Hotdog"]
roll11 += ["Mango Pudding"]
roll11 += ["Hamburger"]
roll11 += ["Steak"]
roll11 += ["Tangyuan"]
roll11 += ["Sanma"]
roll11 += ["Napoleon Cake"]
roll11 += ["Salad"]
roll11 += ["Pastel de nata"]
roll11 += ["Yuxiang"]
roll11 += ["Sukiyaki"]
roll11 += ["Brownie"]
roll11 += ["Red Wine"]
for us113 in range(53):
roll11 += ["Foie Gras"]
roll11 += ["Peking Duck"]
roll11 += ["B-52"]
roll11 += ["Bamboo Rice"]
roll11 += ["Gyoza"]
roll11 += ["Chocolate"]
roll11 += ["Udon"]
roll11 += ["Sweet Tofu"]
roll11 += ["Kimchi"]
roll11 += ["Ddeokbokki"]
roll11 += ["Pineapple Bun"]
for r111 in range(374):
roll11 += ["Long Bao"]
roll11 += ["Coffee"]
roll11 += ["Sashimi"]
roll11 += ["Macaron"]
roll11 += ["Zongzi"]
roll11 += ["Sakuramochi"]
roll11 += ["Cold Rice Shrimp"]
roll11 += ["Tom Yum"]
roll11 += ["Taiyaki"]
roll11 += ["Milk"]
roll11 += ["Dorayaki"]
roll11 += ["Sake"]
roll11 += ["Tempura"]
roll11 += ["Spicy Gluten"]
roll11 += ["Jiuniang"]
roll11 += ["Omurice"]
roll11 += ["Orange Juice"]
roll11 += ["Ume Ochazuke"]
roll11 += ["Miso Soup"]
roll11 += ["Yellow Wine"]
for r112 in range(373):
roll11 += ["Eclair"]
for m111 in range(46):
roll11 += ["Skewer"]
roll11 += ["Jello"]
roll11 += ["Pancake"]
for m112 in range(47):
roll11 += ["Popcorn"]
roll12 = []
roll12 = setrate(roll12, 5, ["Double Scoop", "Boston Lobster"])
roll12 = setrate(roll12, 9, ["Bibimbap"])
roll12 = setrate(roll12, 17, ["Rum", "Dragon's Beard Candy"])
roll12 = setrate(roll12, 18, ["Crab Long Bao", "Gingerbread"])
roll12 = setrate(roll12, 46, ["Jello", "Pancake", "Skewer"])
roll12 = setrate(roll12, 47, ["Popcorn"])
roll12 = setrate(roll12, 51, ["Escargot", "Fried Chicken",
"Hamburger", "Hotdog", "Milk Tea",
"Sanma", "Steak", "Tangyuan",
"Yunnan Noodles"])
roll12 = setrate(roll12, 52, ["Brownie", "Chocolate", "Ddeokbokki",
"Gyoza", "Kimchi", "Mango Pudding",
"Napoleon Cake", "Pastel de nata",
"Pineapple Bun", "Red Wine", "Salad",
"Sukiyaki", "Soft Serve Cone",
"Sweet Tofu", "Tiramisu", "Udon",
"Yuxiang"])
roll12 = setrate(roll12, 53, ["Foie Gras", "Peking Duck", "B-52",
"Bamboo Rice"])
roll12 = setrate(roll12, 106, ["Eggette", "Pineapple Cake", "Laba Congee"])
roll12 = setrate(roll12, 373, ["Eclair"])
roll12 = setrate(roll12, 374, ["Coffee", "Cold Rice Shrimp", "Dorayaki",
"Jiuniang", "Long Bao", "Macaron",
"Milk", "Miso Soup", "Omurice",
"Orange Juice", "Sake", "Sakuramochi",
"Sashimi", "Spicy Gluten", "Taiyaki",
"Tempura", "Tom Yum", "Ume Ochazuke",
"Yellow Wine", "Zongzi"])
|
altura=float(input("Altura:"))
base=float(input("Lagura:"))
area= base*altura
print("O valor da area é de",area)
|
import networkx as nx
import numpy as np
import datetime
import pickle
import matplotlib.pyplot as plt
G = nx.read_graphml('G_10_power_3_2017-03-29-21:24.graphml')
mapping = {str(x):x for x in range(len(G.nodes()))}
nx.relabel_nodes(G, mapping, copy=False)
every_ngbd = np.load('every_ngbd_G_10_power_3_2017-03-29-21:24.npy')
exp_estimate_list = [None]*10
N = nx.number_of_nodes(G)
N_big = pow(10,6)
N_small = pow(10,3)
def fast_graph_scan(G,every_ngbd,k,weights,print_time):
N = nx.number_of_nodes(G)
my_set = []
avg_ngbd = np.zeros(N)
start = datetime.datetime.now()
for v in G.nodes():
my_set = every_ngbd[v][0:(k+1)]
avg_ngbd[v] = np.mean(weights[my_set])
return np.min(avg_ngbd),np.argmin(avg_ngbd)
for exp_no in range(10):
print('exp_no = ',exp_no)
exp_estimate_list[exp_no] = [None]*10
L_ic = np.random.choice(range(0,N_big), int(N_big/2), replace=False)
# list of indices of inactive nodes in the
# bigger group (of million nodes)
weights = 10*np.ones(N) + np.random.normal(0,1,N)
weights[N_big:N] -= 8
weights[L_ic] -= 8
k = 1000
[a,a_ind] = fast_graph_scan(G,every_ngbd,k,weights,False) #without the adversary
print('without adversary done')
all_inactive = set(L_ic).union((set(np.arange(N_big,N))))
all_active = set(np.arange(0,N)) - all_inactive
active_in_best_ngbd = set(every_ngbd[a_ind][0:k+1]) - all_inactive
a_new = 9999
a_new_ind = 9999
print(a)
for step_no in range(10):
if active_in_best_ngbd:
altered_weights = weights
#set weights for the active guys in the best ngbd to 1mn
altered_weights[np.array(list(active_in_best_ngbd))] = pow(10,6)
[a_new,a_new_ind] = fast_graph_scan(G,every_ngbd,k,altered_weights,False)
print('with weak but multi-step adversary done')
print(a_new)
else:
print('Game Over! We won!')
[a_new,a_new_ind] = [9999,9999]
active_in_best_ngbd = set(every_ngbd[a_new_ind][0:k+1]) - all_inactive
exp_estimate_list[exp_no][step_no] = a_new
with open('adversary_results/a_k_1000_estimate_data_multisteps', 'wb') as fp:
pickle.dump(store, fp)
|
num = int(input('Input a num: '))
def getfactors(num):
alist = []
for i in range(1, num+1):
if num % i == 0:
alist.append(i)
else:
continue
return alist
print(getfactors(num))
|
print("Let's practice everything.")
print("You'd need to know \' about escapes with \\ that do\n newlines"
,"and \t tabs.")
poem ="""
\tThe lovely world
with logic so firmly planted
cannot discern \nthe needs of love
nor comprehend passion from intuition
and requires an explanation
\n\t\twhere there is none.
"""
print("_______________")
print(poem)
print("_______________")
five = 10 - 2 + 3 - 6
print("This should be five: %s"%five)
def secret_formula(started):
jelly_beans = started * 500
jars = jelly_beans / 1000
crates = jars /100
return jelly_beans, jars, crates
start_point = 10000
beans, jars, crates = secret_formula(start_point)
print("With a starting point of:%d"%(start_point))
print("We'd have %d beans, %d jars, and %d crates."%(beans,jars , crates))
start_point /= 10
print("We can also do that this way:")
print("We'd have %d beans, %d jars, and %d crates."%(secret_formula(start_point)))
|
from django import forms
from .models import *
from django.conf import settings
class ProduitForm(forms.ModelForm):
qte_stocke = forms.IntegerField(widget=forms.NumberInput(attrs={'class': 'form-control','placeholder':'Quantite'}), required = True)
categorie = forms.ModelChoiceField(widget=forms.Select(attrs={ 'class':'form-control'}),queryset = Categorie.objects.all())
nom = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control','placeholder':'Nom'}), required = True)
option = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control','placeholder':'Option'}), required = True)
marque = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control','placeholder':'Marque'}), required = True)
class Meta:
model = Produit
fields = '__all__'
class AchatForm(forms.ModelForm):
date_achat = forms.DateField(input_formats=settings.DATE_INPUT_FORMATS ,widget=forms.DateInput(format='%d/%m/%Y',attrs={'class': 'form-control ','placeholder':'JJ/MM/AAAA'} ), required = True)
qte = forms.IntegerField(widget=forms.NumberInput(attrs={'class': 'form-control','placeholder':'Quantite'}), required = True)
prix_u = forms.FloatField(widget=forms.NumberInput(attrs={'class': 'form-control','placeholder':'Prix Unitaire'}), required = True)
produit = forms.ModelChoiceField(widget=forms.Select(attrs={ 'class':'form-control'}),queryset = Produit.objects.all())
class Meta:
model = Achat
fields = (
'date_achat',
'qte',
'prix_u',
'produit',
)
def clean_qte(self , *args,**kwargs):
qte = self.cleaned_data.get('qte')
if qte < 1 :
raise forms.ValidationError('Verifier votre quantite')
else:
return qte
class VenteForm(forms.ModelForm):
date_vente = forms.DateField(input_formats=settings.DATE_INPUT_FORMATS ,widget=forms.DateInput(format='%d/%m/%Y',attrs={'class': 'form-control ','placeholder':'JJ/MM/AAAA'} ), required = True)
qte = forms.IntegerField(widget=forms.NumberInput(attrs={'class': 'form-control','placeholder':'Quantite'}), required = True)
prix_u = forms.FloatField(widget=forms.NumberInput(attrs={'class': 'form-control','placeholder':'Prix Unitaire'}), required = True)
produit = forms.ModelChoiceField(widget=forms.Select(attrs={ 'class':'form-control'}),queryset = Produit.objects.all())
class Meta:
model = Vente
fields = (
'date_vente',
'qte',
'prix_u',
'produit',
)
def clean_qte(self , *args,**kwargs):
qte = self.cleaned_data.get('qte')
if qte < 1 :
raise forms.ValidationError('Verifier votre quantite')
else:
return qte
class SortieForm(forms.ModelForm):
date_achat = forms.DateField(input_formats=settings.DATE_INPUT_FORMATS ,widget=forms.DateInput(format='%d/%m/%Y',attrs={'class': 'form-control ','placeholder':'JJ/MM/AAAA'} ), required = True)
prix_u = forms.FloatField(widget=forms.NumberInput(attrs={'class': 'form-control','placeholder':'Prix'}), required = True)
fraisdivers = forms.ModelChoiceField(widget=forms.Select(attrs={ 'class':'form-control'}),queryset = FraisDivers.objects.all())
class Meta:
model = Achat
fields = (
'date_achat',
'prix_u',
'fraisdivers',
)
class EntreeForm(forms.ModelForm):
date_vente = forms.DateField(input_formats=settings.DATE_INPUT_FORMATS ,widget=forms.DateInput(format='%d/%m/%Y',attrs={'class': 'form-control ','placeholder':'JJ/MM/AAAA'} ), required = True)
prix_u = forms.FloatField(widget=forms.NumberInput(attrs={'class': 'form-control','placeholder':'Prix'}), required = True)
fraisdivers = forms.ModelChoiceField(widget=forms.Select(attrs={ 'class':'form-control'}),queryset = FraisDivers.objects.all())
class Meta:
model = Vente
fields = (
'date_vente',
'prix_u',
'fraisdivers',
) |
#
# Copyright 2015-2016 Bleemeo
#
# bleemeo.com an infrastructure monitoring solution in the Cloud
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import imaplib
import logging
import select
import shlex
import smtplib
import socket
import struct
import threading
import time
import requests
# pylint: disable=wrong-import-order
from six.moves.urllib import parse as urllib_parse
import bleemeo_agent.type
import bleemeo_agent.util
# Special value, means that check could not be run, e.g. due to missing port
# information
STATUS_CHECK_NOT_RUN = -1
CHECKS_INFO = {
'mysql': {
'check_type': 'tcp',
},
'apache': {
'check_type': 'http',
},
'dovecot': {
'check_type': 'imap',
},
'elasticsearch': {
'check_type': 'http',
},
'influxdb': {
'check_type': 'http',
'http_path': '/ping'
},
'ntp': {
'check_type': 'ntp',
},
'openvpn': {
'disable_persistent_socket': True,
},
'openldap': {
'check_type': 'tcp',
},
'postgresql': {
'check_type': 'tcp',
},
'rabbitmq': {
'check_type': 'tcp',
'check_tcp_send': 'PINGAMQP',
'check_tcp_expect': 'AMQP',
},
'redis': {
'check_type': 'tcp',
'check_tcp_send': 'PING\n',
'check_tcp_expect': '+PONG',
},
'memcached': {
'check_type': 'tcp',
'check_tcp_send': 'version\r\n',
'check_tcp_expect': 'VERSION',
},
'mongodb': {
'check_type': 'tcp',
},
'nginx': {
'check_type': 'http',
},
'postfix': {
'check_type': 'smtp',
},
'exim': {
'check_type': 'smtp',
},
'squid': {
'check_type': 'http',
# Agent does a normal HTTP request, but squid expect a proxy. It expect
# squid to reply with a 400 - Bad request.
'http_status_code': 400,
},
'varnish': {
'check_type': 'tcp',
'check_tcp_send': 'ping\n',
'check_tcp_expect': 'PONG'
},
'zookeeper': {
'check_type': 'tcp',
'check_tcp_send': 'ruok\n',
'check_tcp_expect': 'imok',
},
}
# global variable with all checks created
CHECKS = {}
_CHECKS_LOCK = threading.Lock()
def update_checks(core):
global CHECKS # pylint: disable=global-statement
checks_seen = set()
for key, service_info in core.services.items():
(service_name, instance) = key
checks_seen.add(key)
with _CHECKS_LOCK:
if key in CHECKS and CHECKS[key].service_info == service_info:
# check unchanged
continue
elif key in CHECKS:
CHECKS[key].stop()
del CHECKS[key]
if service_info.get('ignore_check', False):
continue
if not service_info.get('active', True):
# If the service is inactive, no check should be performed
continue
try:
new_check = Check(
core,
service_name,
instance,
service_info,
)
with _CHECKS_LOCK:
CHECKS[key] = new_check
except NotImplementedError:
logging.debug(
'No check exists for service %s', service_name,
)
except Exception: # pylint: disable=broad-except
logging.debug(
'Failed to initialize check for service %s',
service_name,
exc_info=True
)
with _CHECKS_LOCK:
deleted_checks = set(CHECKS.keys()) - checks_seen
for key in deleted_checks:
CHECKS[key].stop()
del CHECKS[key]
def periodic_check():
""" Run few periodic check:
* that all TCP socket are still openned
"""
with _CHECKS_LOCK:
for check in CHECKS.values():
check.check_sockets()
class Check:
# pylint: disable=too-many-instance-attributes
def __init__(self, core, service_name, instance, service_info):
self.address = service_info.get('address')
self.port = service_info.get('port')
self.protocol = service_info.get('protocol')
self.check_info = CHECKS_INFO.get(service_name, {})
if self.port is not None and self.protocol == socket.IPPROTO_TCP:
self.check_info.setdefault('check_type', 'tcp')
self.service_info = service_info
self.check_info.update(service_info)
if (self.check_info.get('password') is None
and service_name in ('mysql', 'postgresql')):
# For those check, if password is not set the dedicated check
# will fail.
self.check_info['check_type'] = 'tcp'
self.service = service_name
self.instance = instance
self.core = core
self.extra_ports = self.check_info.get('netstat_ports', {})
if self.instance:
self.display_name = '%s (on %s)' % (self.service, self.instance)
else:
self.display_name = '%s' % self.service
if not self.check_info.get('check_type') and not self.extra_ports:
raise NotImplementedError("No check for this service")
self.open_sockets_job = None
self._fast_check_job = None
self._last_status = None
self._lock = threading.Lock()
self._closed = False
logging.debug(
'Created new check for service %s',
self.display_name
)
self.tcp_sockets = self._initialize_tcp_sockets()
self.current_job = self.core.add_scheduled_job(
self.run_check,
seconds=60,
next_run_in=0,
)
def _initialize_tcp_sockets(self):
tcp_sockets = {}
if (self.port is not None and self.address is not None
and self.protocol == socket.IPPROTO_TCP):
tcp_sockets[(self.address, self.port)] = None
for port_protocol, address in self.extra_ports.items():
if not port_protocol.endswith('/tcp'):
continue
port = int(port_protocol.split('/')[0])
if port == self.port:
continue
if self.check_info.get('ignore_high_port') and port > 32000:
continue
if address is None:
continue
tcp_sockets[(address, port)] = None
return tcp_sockets
def open_sockets(self):
""" Try to open all closed sockets
"""
with self._lock:
self.open_sockets_job = None
if self.check_info.get('disable_persistent_socket'):
return
run_check = False
for (key, tcp_socket) in self.tcp_sockets.items():
(address, port) = key
if tcp_socket is not None:
continue
tcp_socket = socket.socket()
tcp_socket.settimeout(2)
try:
tcp_socket.connect((address, port))
with self._lock:
if self._closed:
tcp_socket.close()
return
self.tcp_sockets[(address, port)] = tcp_socket
except socket.error:
tcp_socket.close()
logging.debug(
'check %s: failed to open socket to %s:%s',
self.display_name, address, port
)
run_check = True
if run_check:
# open_socket failed, run check now
# reschedule job to be run immediately
with self._lock:
if not self._closed:
self.current_job = self.core.trigger_job(self.current_job)
def check_sockets(self):
""" Check if some socket are closed
"""
try_reopen = False
if self.open_sockets_job is not None:
# open_sockets is pending, wait for it before checking sockets
return
sockets = {}
for key, sock in self.tcp_sockets.items():
if sock is not None:
sockets[sock] = key
if sockets:
(rlist, _, _) = select.select(sockets.keys(), [], [], 0)
else:
rlist = []
for sock in rlist:
try:
buffer = sock.recv(65536)
except socket.error:
buffer = b''
if buffer == b'':
(address, port) = sockets[sock]
logging.debug(
'check %s: connection to %s:%s closed',
self.display_name, address, port
)
sock.close()
self.tcp_sockets[(address, port)] = None
try_reopen = True
if try_reopen:
with self._lock:
if self.open_sockets_job is not None:
self.core.unschedule_job(self.open_sockets_job)
if self._closed:
return
self.open_sockets_job = self.core.add_scheduled_job(
self.open_sockets,
seconds=0,
next_run_in=0,
)
def run_check(self):
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
# pylint: disable=too-many-locals
now = time.time()
key = (self.service, self.instance)
if (key not in self.core.services
or not self.core.services[key].get('active', True)):
return
if not self.service_info.get('container_running', True):
(return_code, output) = (
bleemeo_agent.type.STATUS_CRITICAL,
'Container stopped: connection refused'
)
elif self.check_info.get('check_type') == 'nagios':
(return_code, output) = self.check_nagios()
elif self.check_info.get('check_type') == 'tcp':
(return_code, output) = self.check_tcp()
elif self.check_info.get('check_type') == 'http':
(return_code, output) = self.check_http()
elif self.check_info.get('check_type') == 'https':
(return_code, output) = self.check_http(tls=True)
elif self.check_info.get('check_type') == 'imap':
(return_code, output) = self.check_imap()
elif self.check_info.get('check_type') == 'smtp':
(return_code, output) = self.check_smtp()
elif self.check_info.get('check_type') == 'ntp':
(return_code, output) = self.check_ntp()
else:
(return_code, output) = (STATUS_CHECK_NOT_RUN, '')
if (return_code != bleemeo_agent.type.STATUS_CRITICAL
and return_code != bleemeo_agent.type.STATUS_UNKNOWN
and self.extra_ports):
if (return_code == STATUS_CHECK_NOT_RUN
and set(self.extra_ports.keys()) == {'unix'}):
return_code = bleemeo_agent.type.STATUS_OK
for (address, port) in self.tcp_sockets:
if port == self.port:
# self.port is already checked with above check
continue
(extra_port_rc, extra_port_output) = self.check_tcp(
address, port)
if extra_port_rc == bleemeo_agent.type.STATUS_CRITICAL:
(return_code, output) = (extra_port_rc, extra_port_output)
break
if return_code == STATUS_CHECK_NOT_RUN:
return_code = extra_port_rc
output = extra_port_output
if return_code == STATUS_CHECK_NOT_RUN:
return_code = bleemeo_agent.type.STATUS_OK
with self._lock:
if self._closed:
return
# Re-check if the container stopped during the check
current_service_info = self.core.services.get(key, {})
if (return_code != bleemeo_agent.type.STATUS_OK and
not current_service_info.get('container_running', True)):
(return_code, output) = (
bleemeo_agent.type.STATUS_CRITICAL,
'Container stopped: connection refused'
)
# If the container has just started few seconds ago (and check failed)
# ignore and retry soon
if return_code != bleemeo_agent.type.STATUS_OK:
container_id = current_service_info.get('container_id')
container = self.core.docker_containers.get(container_id)
try:
started_at = datetime.datetime.strptime(
container['State'].get('StartedAt', '').split('.')[0],
'%Y-%m-%dT%H:%M:%S',
).replace(tzinfo=datetime.timezone.utc)
except (ValueError, AttributeError, TypeError):
started_at = None
cutoff = datetime.datetime.utcnow().replace(
tzinfo=datetime.timezone.utc,
) - datetime.timedelta(seconds=10)
if started_at is not None and started_at > cutoff:
logging.debug(
'check %s: return code is %s (output=%s). '
'Ignore since container just started',
self.display_name,
return_code,
output,
)
with self._lock:
if self._fast_check_job is not None:
self.core.unschedule_job(self._fast_check_job)
if self._closed:
return
self._fast_check_job = self.core.add_scheduled_job(
self.run_check,
seconds=0,
next_run_in=10,
)
return
if self.instance:
logging.debug(
'check %s: return code is %s (output=%s)',
self.display_name, return_code, output,
)
else:
logging.debug(
'check %s: return code is %s (output=%s)',
self.service, return_code, output,
)
if self.instance:
instance = self.instance
labels = {'item': self.instance}
else:
instance = ''
labels = {}
metric_point = bleemeo_agent.type.DEFAULT_METRICPOINT._replace(
label='%s_status' % self.service,
labels=labels,
time=now,
value=float(return_code),
service_label=self.service,
service_instance=instance,
status_code=return_code,
problem_origin=output,
)
self.core.emit_metric(metric_point)
if return_code != bleemeo_agent.type.STATUS_OK:
# close all TCP sockets
for key, sock in self.tcp_sockets.items():
if sock is not None:
sock.close()
self.tcp_sockets[key] = None
if (self._last_status is None
or self._last_status == bleemeo_agent.type.STATUS_OK):
with self._lock:
if self._fast_check_job is not None:
self.core.unschedule_job(self._fast_check_job)
if self._closed:
return
self._fast_check_job = self.core.add_scheduled_job(
self.run_check,
seconds=0,
next_run_in=30,
)
if return_code == bleemeo_agent.type.STATUS_OK and self.tcp_sockets:
# Make sure all socket are openned
with self._lock:
if self.open_sockets_job is not None:
self.core.unschedule_job(self.open_sockets_job)
if self._closed:
return
self.open_sockets_job = self.core.add_scheduled_job(
self.open_sockets,
seconds=0,
next_run_in=5,
)
self._last_status = return_code
def stop(self):
""" Unschedule this check
"""
logging.debug('Stoping check %s', self.display_name)
with self._lock:
self._closed = True
self.core.unschedule_job(self.open_sockets_job)
self.core.unschedule_job(self.current_job)
self.core.unschedule_job(self._fast_check_job)
for tcp_socket in self.tcp_sockets.values():
if tcp_socket is not None:
tcp_socket.close()
def check_nagios(self):
(return_code, output) = bleemeo_agent.util.run_command_timeout(
shlex.split(self.check_info['check_command']),
)
output = output.decode('utf-8', 'ignore').strip()
if return_code > bleemeo_agent.type.STATUS_UNKNOWN or return_code < 0:
return_code = bleemeo_agent.type.STATUS_UNKNOWN
return (return_code, output)
def check_tcp_recv(self, sock, start):
received = ''
while not self.check_info['check_tcp_expect'] in received:
try:
tmp = sock.recv(4096)
except socket.timeout:
return (
bleemeo_agent.type.STATUS_CRITICAL,
'Connection timed out after 10 seconds'
)
except socket.error:
return (
bleemeo_agent.type.STATUS_CRITICAL,
'Connection closed'
)
if tmp == b'':
break
received += tmp.decode('utf8', 'ignore')
if self.check_info['check_tcp_expect'] not in received:
if received == '':
return (
bleemeo_agent.type.STATUS_CRITICAL,
'No data received from host'
)
return (
bleemeo_agent.type.STATUS_CRITICAL,
'Unexpected response: %s' % received
)
sock.close()
end = bleemeo_agent.util.get_clock()
return (
bleemeo_agent.type.STATUS_OK,
'TCP OK - %.3f second response time' % (end-start)
)
def check_tcp(self, address=None, port=None):
# pylint: disable=too-many-return-statements
if address is not None or port is not None:
use_default = False
else:
address = self.address
port = self.port
use_default = True
if port is None or address is None:
return (STATUS_CHECK_NOT_RUN, '')
start = bleemeo_agent.util.get_clock()
sock = socket.socket()
sock.settimeout(10)
try:
sock.connect((address, port))
except socket.timeout:
return (
bleemeo_agent.type.STATUS_CRITICAL,
'TCP port %d, connection timed out after 10 seconds' % port
)
except socket.error:
return (
bleemeo_agent.type.STATUS_CRITICAL,
'TCP port %d, Connection refused' % port
)
if (self.check_info.get('check_tcp_send')
and use_default):
try:
sock.send(self.check_info['check_tcp_send'].encode('utf8'))
except socket.timeout:
return (
bleemeo_agent.type.STATUS_CRITICAL,
'TCP port %d, connection timed out after 10 seconds' % port
)
except socket.error:
return (
bleemeo_agent.type.STATUS_CRITICAL,
'TCP port %d, connection closed too early' % port
)
if (self.check_info.get('check_tcp_expect')
and use_default):
return self.check_tcp_recv(sock, start)
sock.close()
end = bleemeo_agent.util.get_clock()
return (
bleemeo_agent.type.STATUS_OK,
'TCP OK - %.3f second response time' % (end-start)
)
def check_http(self, tls=False):
if self.port is None or self.address is None:
return (STATUS_CHECK_NOT_RUN, '')
if tls:
base_url = 'https://%s:%s' % (self.address, self.port)
else:
base_url = 'http://%s:%s' % (self.address, self.port)
url = urllib_parse.urljoin(
base_url,
self.check_info.get('http_path', '/')
)
try:
response = requests.get(
url,
timeout=10,
allow_redirects=False,
verify=False,
headers={'User-Agent': self.core.http_user_agent},
)
except requests.exceptions.Timeout:
return (
bleemeo_agent.type.STATUS_CRITICAL,
'Connection timed out after 10 seconds'
)
except requests.exceptions.RequestException:
return (bleemeo_agent.type.STATUS_CRITICAL, 'Connection refused')
if 'http_status_code' in self.check_info:
expected_code = int(self.check_info['http_status_code'])
else:
expected_code = None
if (expected_code is None and response.status_code >= 500
or (expected_code is not None
and response.status_code != expected_code)):
return (
bleemeo_agent.type.STATUS_CRITICAL,
'HTTP CRITICAL - http_code=%s' % (
response.status_code,
)
)
if expected_code is None and response.status_code >= 400:
return (
bleemeo_agent.type.STATUS_WARNING,
'HTTP WARN - status_code=%s' % (
response.status_code,
)
)
return (
bleemeo_agent.type.STATUS_OK,
'HTTP OK - status_code=%s' % (
response.status_code,
)
)
def check_imap(self):
if self.port is None or self.address is None:
return (STATUS_CHECK_NOT_RUN, '')
start = bleemeo_agent.util.get_clock()
try:
client = IMAP4Timeout(self.address, self.port)
client.noop()
client.logout()
except (imaplib.IMAP4.error, socket.error):
return (
bleemeo_agent.type.STATUS_CRITICAL,
'Unable to connect to IMAP server',
)
except socket.timeout:
return (
bleemeo_agent.type.STATUS_CRITICAL,
'Connection timed out after 10 seconds',
)
end = bleemeo_agent.util.get_clock()
return (
bleemeo_agent.type.STATUS_OK,
'IMAP OK - %.3f second response time' % (end-start)
)
def check_smtp(self):
if self.port is None or self.address is None:
return (STATUS_CHECK_NOT_RUN, '')
start = bleemeo_agent.util.get_clock()
try:
client = smtplib.SMTP(self.address, self.port, timeout=10)
client.noop()
client.quit()
except (smtplib.SMTPException, socket.error):
return (
bleemeo_agent.type.STATUS_CRITICAL,
'Unable to connect to SMTP server',
)
except socket.timeout:
return (
bleemeo_agent.type.STATUS_CRITICAL,
'Connection timed out after 10 seconds',
)
end = bleemeo_agent.util.get_clock()
return (
bleemeo_agent.type.STATUS_OK,
'SMTP OK - %.3f second response time' % (end-start)
)
def check_ntp(self):
if self.port is None or self.address is None:
return (STATUS_CHECK_NOT_RUN, '')
# Ntp use 1900-01-01 00:00:00 as epoc.
# Since Unix use 1970-01-01 as epoc, we have this delta
ntp_delta = 2208988800
start = bleemeo_agent.util.get_clock()
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client.settimeout(10)
msg = b'\x1b' + 47 * b'\0'
try:
client.sendto(msg, (self.address, self.port))
msg, _address = client.recvfrom(1024)
except socket.timeout:
return (
bleemeo_agent.type.STATUS_CRITICAL,
'Connection timed out after 10 seconds'
)
unpacked = struct.unpack("!BBBB11I", msg)
stratum = unpacked[1]
server_time = unpacked[11] - ntp_delta
end = bleemeo_agent.util.get_clock()
if stratum in (0, 16):
return (
bleemeo_agent.type.STATUS_CRITICAL,
'NTP server not (yet) synchronized'
)
if abs(server_time - time.time()) > 10:
return (
bleemeo_agent.type.STATUS_CRITICAL,
'Local time and NTP time does not match'
)
return (
bleemeo_agent.type.STATUS_OK,
'NTP OK - %.3f second response time' % (end-start)
)
class IMAP4Timeout(imaplib.IMAP4):
""" IMAP4 with timeout of 10 second
"""
def open(self, host='', port=imaplib.IMAP4_PORT):
self.host = host
self.port = port
self.sock = socket.create_connection((host, port), timeout=10)
self.file = self.sock.makefile('rb')
|
import player
import numpy as np
import sys
import random
import matplotlib.pyplot as plt
sys.path.append("../")
number_of_pieces = 4
# states = ["home", "goal_zone", "goal", "danger", "glob","safe"]
total_number_of_states = 60
# actions = ["move_out", "normal", "goal_zone", "goal", "star", "globe", "protect", "kill", "die", "nothing"]
total_number_of_actions = 11
move_out_action = 0
normal_action = 1
in_goal_zone_action = 2
enter_goal_zone_action = 3
enter_goal_action = 4
use_star_action = 5
move_to_safety_action = 6
move_away_from_safe_action = 7
kill_enemy_action = 8
suicide_action = 9
no_action = 10
def plot_heat_map(q):
actions = ["Move_out", "Normal", "In_goal_zone",
"Enter_goal_zone", "enter_goal_action", "Use_star", "Move_to_safety", "Move_away_from_safe",
"Kill_enemy", "Suicide_action", "no_action"]
fig, ax = plt.subplots()
plt.imshow(q.Q_table)
# We want to show all ticks...
ax.set_xticks(np.arange(total_number_of_actions))
ax.set_yticks(np.arange(total_number_of_states))
# ... and label them with the respective list entries
ax.set_xticklabels(actions)
ax.set_yticklabels(range(total_number_of_states))
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
# for i in range(total_number_of_states):
# for j in range(total_number_of_actions):
# ax.text(j, i, int(q.Q_table[i, j]*100), ha="center", va="center", color="w")
ax.set_title("Q_table")
fig.tight_layout()
plt.show()
def count(test_list, value):
number_of_occurrences = 0
for element in test_list:
if element == value:
number_of_occurrences = number_of_occurrences + 1
return number_of_occurrences
class QLearning:
def __init__(self, index):
# Parameters for the algorithm
self.learning_rate = 0.7
self.discount_factor = 0.34
self.explore_rate = 0.2
self.sum_of_rewards = 0.0
self.training = 1 # determine if the q table is updated and if there is going to be any explorations.
self.Q_table = np.zeros((total_number_of_states, total_number_of_actions))
# Parameters for the interpretations of the game
self.player_index = index
self.current_state = [0, 0, 0, 0]
self.last_action = no_action
self.last_player_pieces = [0, 0, 0, 0]
self.number_of_wins = 0
self.number_of_games = 0
self.last_state = 0
self.number_of_steps = 0
def reset_game(self):
self.current_state = [0, 0, 0, 0]
self.last_action = no_action
self.last_state = 0
self.last_player_pieces = [0, 0, 0, 0]
self.sum_of_rewards = 0.0
self.number_of_games = self.number_of_games + 1
self.number_of_steps = 0
def determined_state(self, player_pieces):
return player_pieces
def determined_possible_actions(self, player_pieces, enemy_pieces, dice):
possible_actions = [normal_action, normal_action, normal_action, normal_action]
for piece_index in range(number_of_pieces):
old_piece_pos = player_pieces[piece_index]
new_piece_pos = old_piece_pos + dice
enemy_at_pos, enemy_pieces_at_pos = player.get_enemy_at_pos(new_piece_pos, enemy_pieces)
if old_piece_pos == player.GOAL_INDEX or (old_piece_pos == player.HOME_INDEX and dice < 6): # piece at goal
possible_actions[piece_index] = no_action
elif old_piece_pos == player.HOME_INDEX and dice == 6: # move out of home
possible_actions[piece_index] = move_out_action
elif new_piece_pos in player.STAR_INDEXS: # use a star to jump
possible_actions[piece_index] = use_star_action
elif new_piece_pos == player.GOAL_INDEX or new_piece_pos == player.STAR_AT_GOAL_AREAL_INDX: # enter goal
possible_actions[piece_index] = enter_goal_action
elif new_piece_pos in player.GLOB_INDEXS: # globe not owned
if enemy_at_pos != player.NO_ENEMY:
possible_actions[piece_index] = suicide_action
else:
possible_actions[piece_index] = move_to_safety_action
elif new_piece_pos in player.LIST_ENEMY_GLOB_INDEX:
# Get the enemy their own the glob
globs_enemy = player.LIST_TAILE_ENEMY_GLOBS.index(player.BORD_TILES[new_piece_pos])
# Check if there is an enemy at the glob
if enemy_at_pos != player.NO_ENEMY:
# If there is another enemy then send them home and move there
if enemy_at_pos != globs_enemy:
possible_actions[piece_index] = kill_enemy_action
# If it is the same enemy that is there then move there
else:
possible_actions[piece_index] = suicide_action
# If there are not any enemy's at the glob then move there
else:
possible_actions[piece_index] = move_to_safety_action
elif enemy_at_pos != player.NO_ENEMY: # kill or suicide
if len(enemy_pieces_at_pos) == 1:
possible_actions[piece_index] = kill_enemy_action
else:
possible_actions[piece_index] = suicide_action
elif old_piece_pos < 53 and new_piece_pos > 52: # goal zone
possible_actions[piece_index] = enter_goal_zone_action
elif old_piece_pos in player.GLOB_INDEXS or count(player_pieces, old_piece_pos) > 1:
possible_actions[piece_index] = move_away_from_safe_action
elif new_piece_pos > 52 and not (new_piece_pos == player.GOAL_INDEX):
possible_actions[piece_index] = in_goal_zone_action
else:
possible_actions[piece_index] = normal_action
return possible_actions
def get_reward(self, player_pieces, there_is_a_winner):
reward = 0.0
if self.last_action == move_out_action:
reward += 0.3
elif self.last_action == kill_enemy_action:
reward += 0.2
elif self.last_action == suicide_action:
reward += -0.8
elif self.last_action == normal_action:
reward += 0.05
elif self.last_action == in_goal_zone_action:
reward += 0.05
elif self.last_action == enter_goal_zone_action:
reward += 0.2
elif self.last_action == use_star_action:
reward += 0.15
elif self.last_action == enter_goal_action:
reward += 0.25
elif self.last_action == move_away_from_safe_action:
reward += -0.1
elif self.last_action == move_to_safety_action:
reward += 0.1
elif self.last_action == no_action:
reward += -0.1
for i in range(number_of_pieces):
if self.last_player_pieces[i] > 0 and player_pieces[i] == 0: # Means that the pieces have been moved home
reward += -0.25
break
if not(self.last_action == move_out_action or self.last_action == in_goal_zone_action or
self.last_action == enter_goal_action or self.last_action == enter_goal_zone_action or
self.last_action == suicide_action or self.last_action == no_action):
lowest_index = -1
lowest_value = 100
for i in range(number_of_pieces): # Used to check if the piece closest
if not(self.last_player_pieces[i] == 0): # to home has been moved
if self.last_player_pieces[i] < lowest_value:
lowest_value = self.last_player_pieces[i]
lowest_index = i
if not(lowest_index == -1):
if self.last_player_pieces[lowest_index] < player_pieces[i]:
reward += 0.1
player_won = True
if there_is_a_winner: # check if the agent won the game.
for i in range(number_of_pieces):
if not player_pieces[i] == player.GOAL_INDEX:
player_won = False
break
if player_won:
reward += 1.0
else:
reward += -1.0
return reward
def pick_action(self,piece_states,piece_actions):
best_action_player = -1
if not (piece_actions.count(no_action) == len(piece_actions)):
if self.explore_rate == 0 or np.random.random() > self.explore_rate:
max_q = -1000.
for i in range(4):
if not(piece_actions[i] == no_action):
if max_q < self.Q_table[piece_states[i]][piece_actions[i]]:
max_q = self.Q_table[piece_states[i]][piece_actions[i]]
best_action_player = i
else:
while True:
best_action_player = random.randint(0, 3)
if not(piece_actions[best_action_player] == no_action):
break
return best_action_player
def pick_action_max(self,piece_states,piece_actions):
best_action_player = -1
if not (piece_actions.count(no_action) == len(piece_actions)):
max_q = -1000.
for i in range(4):
if not(piece_actions[i] == no_action):
if max_q < self.Q_table[piece_states[i]][piece_actions[i]]:
max_q = self.Q_table[piece_states[i]][piece_actions[i]]
best_action_player = i
return best_action_player
def update_q_table(self, player_pieces, enemy_pieces, dice, game, there_is_a_winner):
current_actions = self.determined_possible_actions( player_pieces,enemy_pieces,dice)
current_states = self.determined_state(player_pieces)
piece_index = self.pick_action(current_states, current_actions)
if self.training == 1 and not(piece_index == -1):
reward = self.get_reward(player_pieces, there_is_a_winner)
self.sum_of_rewards += reward
max_piece_index = self.pick_action_max(current_states, current_actions)
max_q_value_next_state = self.Q_table[current_states[max_piece_index]][current_actions[max_piece_index]]
last_q_value = self.Q_table[self.last_state][self.last_action]
self.Q_table[self.last_state][self.last_action] += \
self.learning_rate*(reward+self.discount_factor*max_q_value_next_state-last_q_value)
self.last_player_pieces = player_pieces
self.last_state = current_states[piece_index]
self.last_action = current_actions[piece_index]
self.number_of_steps = self.number_of_steps + 1
return piece_index
def save_Q_table(self,file_name):
file_ext = file_name.split(".")[-1]
assert file_ext == "npy", "The file extension has to be npy (numpy file)"
np.save(file_name, self.Q_table)
def load_Q_table(self,file_name):
file_ext = file_name.split(".")[-1]
assert file_ext == "npy", "The file extension has to be npy (numpy file)"
self.Q_table = np.load(file_name)
|
import os, sys
from pprint import pprint
# print(sys.path)
import tensorflow as tf
from tensorflow.contrib.slim.python.slim.data import parallel_reader
# import config as cfg
# 优化器
adadelta_rho = 0.95
adagrad_initial_accumulator_value = 0.1
adam_beta1 = 0.9
adam_beta2 = 0.999,
opt_epsilon = 1.0
ftrl_learning_rate_power = -0.5
ftrl_initial_accumulator_value = 0.1
ftrl_l1 = 0.0
ftrl_l2 = 0.0
momentum = 0.9
rmsprop_momentum = 0.9
rmsprop_decay = 0.9
end_learning_rate = 0.000001
# learning_rate_decay_factor = 0.7
slim = tf.contrib.slim
def configure_optimizer(optimizer_name, learning_rate):
if optimizer_name == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(
learning_rate, rho=adadelta_rho, epsilon=opt_epsilon)
elif optimizer_name == 'adagrad':
optimizer = tf.train.AdagradOptimizer(
learning_rate,
initial_accumulator_value=adagrad_initial_accumulator_value)
elif optimizer_name == 'adam':
optimizer = tf.train.AdamOptimizer(
learning_rate,
epsilon=opt_epsilon)
elif optimizer_name == 'ftrl':
optimizer = tf.train.FtrlOptimizer(
learning_rate,
learning_rate_power=ftrl_learning_rate_power,
initial_accumulator_value=ftrl_initial_accumulator_value,
l1_regularization_strength=ftrl_l1,
l2_regularization_strength=ftrl_l2)
elif optimizer_name == 'momentum':
optimizer = tf.train.MomentumOptimizer(
learning_rate, momentum=momentum, name='Momentum')
elif optimizer_name == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
decay=rmsprop_decay,
momentum=rmsprop_momentum,
epsilon=opt_epsilon)
elif optimizer_name == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
else:
raise ValueError('Optimizer [%s] was not recognized', optimizer_name)
return optimizer
def configure_learning_rate(learning_rate_decay_type, learning_rate,
decay_steps, learning_rate_decay_rate,
global_step):
"""Configures the learning rate.
Args:
num_samples_per_epoch: The number of samples in each epoch of training.
global_step: The global_step tensor.
Returns:
A `Tensor` representing the learning rate.
"""
# decay_steps = int(
# num_samples_per_epoch / flags.batch_size * flags.num_epochs_per_decay)
if learning_rate_decay_type == 'exponential':
return tf.train.exponential_decay(
learning_rate,
global_step,
decay_steps,
learning_rate_decay_rate,
staircase=True,
name='exponential_decay_learning_rate')
elif learning_rate_decay_type == 'fixed':
return tf.constant(learning_rate, name='fixed_learning_rate')
elif learning_rate_decay_type == 'polynomial':
return tf.train.polynomial_decay(
learning_rate,
global_step,
decay_steps,
end_learning_rate,
power=1.0,
cycle=False,
name='polynomial_decay_learning_rate')
else:
raise ValueError('learning_rate_decay_type [%s] was not recognized',
learning_rate_decay_type) |
import torch.nn as nn
import torchvision.models as backbone_
import torch.nn.functional as F
import torch
from torchvision.ops import MultiScaleRoIAlign
from collections import OrderedDict
import torch
import torchvision
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
from torch.nn.utils.rnn import pack_padded_sequence, pad_sequence
class Resnet_Network(nn.Module):
def __init__(self, hp, num_class = 81):
super(Resnet_Network, self).__init__()
self.hp = hp
backbone = backbone_.resnet50(pretrained=False) #resnet50, resnet18, resnet34
self.features = nn.Sequential()
for name, module in backbone.named_children():
if name not in ['avgpool', 'fc']:
self.features.add_module(name, module)
self.pool_method = nn.AdaptiveMaxPool2d(1) # as default
if hp.fullysupervised:
if hp.dataset_name == 'TUBerlin':
num_class = 250
elif hp.dataset_name == 'QuickDraw':
num_class = 345
self.classifier = nn.Linear(2048, num_class)
def forward(self, x):
x = self.features(x)
x = self.pool_method(x).view(-1, 2048)
if self.hp.fullysupervised:
x = self.classifier(x)
return x
def extract_features(self, x, every_layer=True):
feature_list = {}
batch_size = x.shape[0]
# https://stackoverflow.com/questions/47260715/
# how-to-get-the-value-of-a-feature-in-a-layer-that-match-a-the-state-dict-in-pyto
for name, module in self.features._modules.items():
x = module(x)
if every_layer and name in ['layer1', 'layer2', 'layer3', 'layer4']:
feature_list[name] = self.pool_method(x).view(batch_size, -1)
if not feature_list:
feature_list['pre_logits'] = self.pool_method(x).view(batch_size, -1)
return feature_list
class UNet_Decoder(nn.Module):
def __init__(self, out_channels=3):
super(UNet_Decoder, self).__init__()
# self.linear_1 = nn.Linear(512, 8*8*256)
# self.dropout = nn.Dropout(0.5)
self.deconv_1 = Unet_UpBlock(512, 512)
self.deconv_2 = Unet_UpBlock(512, 512)
self.deconv_3 = Unet_UpBlock(512, 512)
self.deconv_4 = Unet_UpBlock(512, 256)
self.deconv_5= Unet_UpBlock(256, 128)
self.deconv_6 = Unet_UpBlock(128, 64)
self.deconv_7 = Unet_UpBlock(64, 32)
self.final_image = nn.Sequential(*[nn.ConvTranspose2d(32, out_channels,
kernel_size=4, stride=2,
padding=1), nn.Tanh()])
def forward(self, x):
# x = self.linear_1(x)
x = x.view(-1, 512, 1, 1)
# x = self.dropout(x)
x = self.deconv_1(x) #2
x = self.deconv_2(x) #4
x = self.deconv_3(x) #8
x = self.deconv_4(x) #16
x = self.deconv_5(x) #32
x = self.deconv_6(x) #64
x = self.deconv_7(x) #128
x = self.final_image(x) #256
return x
class Unet_UpBlock(nn.Module):
def __init__(self, inner_nc, outer_nc):
super(Unet_UpBlock, self).__init__()
layers = [
nn.ConvTranspose2d(inner_nc, outer_nc, 4, 2, 1, bias=True),
nn.InstanceNorm2d(outer_nc),
nn.ReLU(inplace=True),
]
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
class Residual_UpBlock(nn.Module):
def __init__(self, c_in, c_out, stride, output_padding, norm = 'InstanceNorm2d', c_hidden=None):
super(Residual_UpBlock, self).__init__()
c_hidden = c_out if c_hidden is None else c_hidden
if norm == 'BatchNorm2d':
norm_layer = nn.BatchNorm2d
else:
norm_layer = nn.InstanceNorm2d
self.conv1 = nn.Sequential(
norm_layer(c_in, affine=True),
nn.LeakyReLU(),
nn.Conv2d(c_in, c_hidden, kernel_size=3, stride=1, padding=1))
self.conv2 = nn.Sequential(
norm_layer(c_hidden, affine=True),
nn.LeakyReLU(),
nn.ConvTranspose2d(c_hidden, c_out, kernel_size=3,
stride=stride, padding=1, output_padding=output_padding))
self.residual = nn.ConvTranspose2d(c_in, c_out, kernel_size=3,
stride=stride, padding=1, output_padding=output_padding)
def forward(self, x):
residual = self.residual(x)
conv1 = self.conv1(x)
conv2 = self.conv2(conv1)
return residual + conv2
class ResNet_Decoder(nn.Module):
def __init__(self):
super(ResNet_Decoder, self).__init__()
self.upblock5 = Residual_UpBlock(512, 256, (2,1), (1,0))
self.upblock4 = Residual_UpBlock(256, 128, (2,1), (1,0))
self.upblock3 = Residual_UpBlock(128, 64, (2,1), (1,0))
self.upblock2 = Residual_UpBlock(64, 32, (2,2), (1,1))
self.upblock1 = Residual_UpBlock(32, 32, (2,2), (1,1))
self.upblock0 = Residual_UpBlock(32, 1, (1,1), (0,0))
def forward(self, x):
upblock5 = self.upblock5(x)
upblock4 = self.upblock4(upblock5)
upblock3 = self.upblock3(upblock4)
upblock2 = self.upblock2(upblock3)
upblock1 = self.upblock1(upblock2)
upblock0 = self.upblock0(upblock1)
return torch.tanh(upblock0)
class Sketch_LSTM(nn.Module):
def __init__(self, inp_dim=5, hidden_size=512, LSTM_num_layers=2, dropout=0.5):
super(Sketch_LSTM, self).__init__()
self.inp_dim, self.hidden_size, self.LSTM_num_layers, self.bidirectional = inp_dim, hidden_size, LSTM_num_layers, 2
self.LSTM_encoder = nn.LSTM(inp_dim, hidden_size,
num_layers=LSTM_num_layers,
dropout=dropout,
batch_first=True, bidirectional=True)
def forward(self, x, seq_len):
# batch['stroke_wise_split'][:,:,:2] /= 800
x = pack_padded_sequence(x.to(device), seq_len.to(device), batch_first=True, enforce_sorted=False)
_ , (x_hidden, _) = self.LSTM_encoder(x.float())
x_hidden = x_hidden.view(self.LSTM_num_layers, self.bidirectional, seq_len.shape[0], self.hidden_size)[-1].permute(1,0,2).reshape(seq_len.shape[0], -1)
return x_hidden |
"""
@author : Anish Lakkapragada
@date : 3 - 4 - 2021
Gaussian Mixtures are one of the best models today in the field of anomaly detection and unsupervised clustering.
But it's not just because of that that this is one of the best modules SeaLion has to offer.
"""
import numpy as np
class GaussianMixture() :
"""
Gaussian Mixture Models are really just a fancier extension of KMeans. Make sure you know KMeans before you read this.
If you are unfamiliar with KMeans, feel free to look at the examples on GitHub for unsupervised clustering or
look at the unsupervised clustering documentation (which contains the KMeans docs.) You may also want to know
what a gaussian (normal) distribution is.
In KMeans, you make the assumption that your data is in spherical clusters, all of which are the same shape. What
if your data is instead in such circles, but also maybe ovals that are thin, tall, etc. Basically what if your
clusters are spherical-esque but of different shapes and sizes.
This difference can be measured by the standard deviation, or variance, of each of the clusters. You can think of each
cluster as a gaussian distribution, each with a different mean (similiar to the centroids in KMeans) and standard
deviation (which effects how skinny/wide it is.) This model is called a mixture, because it essentially is just a
mixture of gaussian functions. Some of the clusters maybe bigger than others (aside from shape), and this is
also taken into account with a mixture weight - a coefficient that is multiplied to each gaussian distribution.
With this gaussian distribution, you can then take any points and assign them to the cluster they have the
highest change of being in. Because this is probabilities, you can say that a given data point has a 70% chance
of being in this class, 20% this class, 5% this class, and 5% this other class - which is something you
cannot do with KMeans. The parameters of the gaussian distribution are learnt using an algorithm known
as Expectation Maximization, which you may want to learn about (super cool)!
You can also do anomaly detection with Gaussian Mixture Models! You do this by looking at the probability each data
point belonged to the cluster it had the highest chance of being in. We can call this list of a probability
that a data point belonged to the cluster it was chosen to for all data points "confidences". If a given data points
confidence is in the lowest _blank_ (you set this) percent of confidences, it's marked as an anomaly. This _blank_ is
from 1 to 100, and is essentially what percent of your data you think are outliers.
To find the best value for n_clusters, we also have a visualize_elbow_curve method to do that for you. Check the docs
below if you're interested!
That's a lot, so let's take a look at its methods!
**NOTE: X SHOULD BE 2D FOR ALL METHODS**
"""
def __init__(self, n_clusters = 5, retries = 3, max_iters = 200, kmeans_init = True):
"""
:param n_clusters: number of clusters assumed in the data
:param retries: number of times the algorithm will be tried, and then the best solution picked
:max_iters: maximum number of iterations that the algorithm will be ran for each retry
:kmeans_init: whether the centroids found by using KMeans should be used to initialize the means in this Gaussian Mixture.
This will only work if your data is in more than one dimension. If you are using a lot of retries, this may
not be a good option as it may lead to the same solution over and over again.
"""
from .cython_mixtures import cy_GaussianMixture
self.cython_gmm_class = cy_GaussianMixture
self.cython_gaussian_mixture = cy_GaussianMixture(n_clusters, retries, max_iters, kmeans_init)
self.k = n_clusters
self.retries = retries
self.max_iters = max_iters
self.kmeans_init = kmeans_init
def fit(self, X):
"""
this method finds the parameters needed for Gaussian Mixtures to function
:param X : training data
"""
self.X = X
self.cython_gaussian_mixture.fit(X)
def predict(self, X):
"""
this method returns the cluster each data point in X belongs to
:param X : prediction data
"""
return self.cython_gaussian_mixture.predict(X)
def soft_predict(self, X):
"""
this method returns the probability each data point belonged to each cluster. This is stored in a matrix
with the length of X rows and the width of the amount of clusters.
:param X : prediction data
"""
return self.cython_gaussian_mixture.soft_predict(X)
def confidence_samples(self, X):
"""
This method essentially gives the highest probability in the rows of probabilities in the matrix that is the output
of the ``soft_predict()`` method.
Translation:
It's telling you the probability each data point had in the cluster it had
the largest probability in (and ultimately got assigned to), which is really telling you how confident it is
that a given data point belongs to the data.
:param X: prediction data
"""
return self.cython_gaussian_mixture.confidence_samples(X)
def aic(self):
"""
Returns the AIC metric (lower is better.)
"""
return self.cython_gaussian_mixture.aic()
def bic(self):
"""
Returns the BIC metric (lower is better.)
"""
return self.cython_gaussian_mixture.bic()
def anomaly_detect(self, X, threshold):
"""
:param X: prediction data
:param threshold: what percent of the data you believe is an outlier
:return: whether each data point in X is an anomaly based on whether its confidence in the cluster it was assigned to
is in the lowest **threshold** percent of all of the confidences.
"""
return self.cython_gaussian_mixture.anomaly_detect(X, threshold)
def visualize_clustering(self, color_dict):
"""
This method will not work for data that has only 1 dimension (univariate.) It will plot the data you just
gave in the fit() method.
:param color_dict: parameter of the label a cluster was assigned to to its color (must be matplotlib compatible)
The color dict could be ``{0 : "green", 1 : "blue", 2 : "red"}`` for example.
"""
is_multivariate = self.cython_gaussian_mixture._is_multivariate()
if is_multivariate :
# then visualize
y_pred = self.cython_gaussian_mixture.predict(self.X)
import matplotlib.pyplot as plt
fig = plt.figure()
for index, prediction in enumerate(y_pred):
plt.scatter(self.X[index][0], self.X[index][1], color=color_dict[prediction])
plt.xlabel("x-axis")
plt.ylabel("y-axis")
plt.title("Visualized Clustering with Gaussian Mixtures")
plt.show()
else :
raise ValueError("MODEL HAS DATA WITH ONLY 1 DIMENSION. THIS METHOD CANNOT BE USED THEN (VISUALIZATION DIFFICULTIES.)")
def visualize_elbow_curve(self, min_n_clusters = 2, max_n_clusters = 5):
"""
This method tries different values for n_cluster, from min_n_cluster to max_n_cluster, and then plots
their AIC and BIC metrics. Finding the n_cluster that leads to the "elbow" is probably the optimal n_cluster
value.
:param min_n_clusters: the minimum value of n_clusters to be tried
:param max_n_clusters: the max value of n_clusters to be tried
"""
n_clusters_to_BIC, n_clusters_to_AIC = [], []
max_n_clusters += 1
original_k = self.k
for k in range(min_n_clusters, max_n_clusters) :
self.k = k
self.cython_gaussian_mixture = self.cython_gmm_class(k, self.retries, self.max_iters, self.kmeans_init)
self.cython_gaussian_mixture.fit(self.X)
n_clusters_to_BIC.append(self.cython_gaussian_mixture.bic())
n_clusters_to_AIC.append(self.cython_gaussian_mixture.aic())
self.k = original_k
self.cython_gaussian_mixture = self.cython_gmm_class(original_k, self.retries, self.max_iters, self.kmeans_init)
self.cython_gaussian_mixture.fit(self.X)
import matplotlib.pyplot as plt
fig = plt.figure()
plt.plot(np.array(list(range(min_n_clusters, max_n_clusters))), np.array(n_clusters_to_BIC), color = 'green', label = "BIC")
plt.plot(np.array(list(range(min_n_clusters, max_n_clusters))), np.array(n_clusters_to_AIC), color = "blue", label = "AIC")
plt.scatter(list(range(min_n_clusters, max_n_clusters)), n_clusters_to_BIC, color = "green")
plt.scatter(list(range(min_n_clusters, max_n_clusters)), n_clusters_to_AIC, color = "blue")
plt.xticks([k for k in range(min_n_clusters, max_n_clusters)])
plt.legend()
plt.xlabel("Number of Clusters")
plt.ylabel("Information Criteria")
plt.title("Elbow Curve in Gaussian Mixture Models")
plt.show()
|
from aiohttp import web
from serv.config import web_routes, home_path
import serv.main_views
import serv.grade_views
import serv.student_views
import serv.student_rest
import serv.course_view
import serv.course_rest
app = web.Application()
app.add_routes(web_routes)
app.add_routes([web.static("/", home_path / "static")])
if __name__ == "__main__":
web.run_app(app, port=8080)
|
import pygame
from pygame.draw import *
from random import randint
pygame.init()
FPS = 20
screen = pygame.display.set_mode((1200, 900))
RED = (255, 0, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
GREEN = (0, 255, 0)
MAGENTA = (255, 0, 255)
CYAN = (0, 255, 255)
BLACK = (0, 0, 0)
COLORS = [RED, BLUE, YELLOW, GREEN, MAGENTA, CYAN]
#отдельно все работает, вместе как-то не очень
def new_star():
x = randint(100,1100)
y = 1
vx = randint(5, 10)
vy = randint(15, 25)
color =COLORS[randint(0, 5)]
r = randint(10, 50)
polygon(screen, color, [(x, y - r), (x + r/5, y - r/5), (x + 7*r/8, y - r/4), (x + r/4, y + r/6), (x + r/2, y + 7*r/8), (x, y + r/3), (x - r/2, y + 7*r/8), (x - r/4, y + r/ 6), (x - 7*r/8, y - r/4), (x - r/5, y - r/5)])
return { 'x0': x,
'y0': y,
'r0': r,
'vx0': vx,
'vy0': vy,
'color': color}
def move(star):
star['x0'] += star['vx0']
star['y0'] += star['vy0']
polygon(screen, star['color'], [(star['x0'], star['y0'] - star['r0']), (star['x0'] + star['r0']/5, star['y0'] - star['r0']/5), (star['x0'] + 7*star['r0']/8, star['y0'] - star['r0']/4), (star['x0'] + star['r0']/4, star['y0'] + star['r0']/6), (star['x0'] + star['r0']/2, star['y0'] + 7*star['r0']/8), (star['x0'], star['y0'] + star['r0']/3), (star['x0'] - star['r0']/2, star['y0'] + 7*star['r0']/8), (star['x0'] - star['r0']/4, star['y0'] + star['r0']/ 6), (star['x0'] - 7*star['r0']/8, star['y0'] - star['r0']/4), (star['x0'] - star['r0']/5, star['y0'] - star['r0']/5)])
def print_screen(word):
font = pygame.font.Font(None, 25)
text = font.render(word, True, [255, 255, 255])
a, b = event.pos
screen.blit(text, (a - 25, b - 20))
def new_ball():
x = randint(100, 1100)
y = randint(100, 900)
vx = randint(-10, 10)
vy = randint(-10, 10)
color = COLORS[randint(0, 5)]
r = randint(20, 100)
circle(screen, color, (x, y), r)
ball = { 'x1': x,
'y1': y,
'r1': r,
'vx1': vx,
'vy1': vy,
'color': color}
def move(ball):
if ((ball['x1'] + ball['r1']) <= 1200) and (ball['x1'] - ball['r1']) >= 0:
ball ['x1'] += ball ['vx1']
else:
ball['vx1']*=-1
ball['x1']+=ball['vx1']
if ((ball['y1'] + ball['r1']) <= 900) and (ball['y1'] - ball['r1']) >= 0:
ball['y1'] += ball ['vy1']
else:
ball['vy1']*=-1
ball['y1']+=ball['vy1']
circle (screen, ball['color'], ( ball['x1'], ball ['y1']), ball['r1'])
pygame.display.update()
clock = pygame.time.Clock()
finished = False
n = 0
A = []
stars = [new_star() for _ in range(5)]
balls = [new_ball() for _ in range(10)]
while not finished:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
finished = True
elif event.type == pygame.MOUSEBUTTONDOWN:
A = pygame.mouse.get_pos()
xp= A[0]
yp = A[1]
for k in range(1, 5):
if (((stars[k]['x0']-xp)**2+(stars[k]['y0']-yp)**2)**(1/2))<=stars[k]['r0'] :
stars[k] = new_star()
print_screen("yay!")
n+=3
if stars[k]['x0'] >= 1200 or stars[k]['y0'] >= 900:
stars[k] = new_star()
for i in range(0,10):
if (((balls[i]['x1']-xp)**2+(balls[i]['y1']-yp)**2)**(1/2))<=balls[i]['r'] :
balls[i]=new_ball()
print_screen("yay!")
n+=1
pygame.display.update()
screen.fill(BLACK)
for k in range(1, 5):
move(stars[k])
for i in range (0,10):
move(balls[i])
font = pygame.font.Font(None, 25)
text = font.render(("Score:" + str(n)), True, [255, 255, 255])
screen.blit(text, (10, 20))
pygame.quit()
print('your score', n)
|
from ast import literal_eval
from flask import Flask
from flask_graphql import GraphQLView
from schema import schema
from database import connector
from database.people_table import People
from database.planet_table import Planet
import os
server = Flask(__name__)
def load_database():
connector.Base.metadata.create_all(connector.engine)
if os.path.exists(connector.DB_PATH):
with open('./data/planets.json', 'r') as file:
data = literal_eval(file.read())
for record in data:
planet = Planet(**record)
connector.db_session.add(planet)
connector.db_session.commit()
with open('./data/people.json', 'r') as file:
data = literal_eval(file.read())
for record in data:
person = People(**record)
connector.db_session.add(person)
connector.db_session.commit()
@server.teardown_appcontext
def shutdown_session(exception=None):
connector.db_session.remove()
if __name__ == '__main__':
load_database()
server.add_url_rule('/fictizia/1.0/graphql',
view_func=GraphQLView.as_view('graphql',
schema=schema,
graphiql=True))
server.run(port=5005, threaded=True)
|
"""
A script to back up files that have changed on the local store
to the specified cloud service.
Change is determined by modify time.
"""
import argparse
import os
import logging
import sync_drives.sync as sync
import providers.provider_list as provider_list
from common.basic_utils import check_for_user_quit
def main(args):
logging.basicConfig(level=logging.INFO)
# Init provider metadata
provider_list.init_providers()
# Check that any initial authentication has been done:
if sync.required_config_is_present(args.provider, args.cpath, args.user) is False:
print('It doesn\'t appear you have completed the required authentication '
'step for {}'.format(args.provider))
return
print('==============================================================')
print("Preparing to sync - press \'q\' then enter to stop the sync.")
print('')
for res in sync.sync_drives(args.local_store_path, args.cpath,
{'provider_name': args.provider,
'user_id': args.user,
'server_root_path': args.remote_store_path},
'',
analyse_only=args.analyse_only):
if check_for_user_quit() is True:
break
print('==============================================================')
print('')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
'Compares the local store directory with the contents '
'of the remote store and uploads/deletes files/directories '
'as required.')
parser.add_argument('provider', type=str, choices=provider_list.get_supported_provider_names(),
help='The name of the cloud drive provider.')
parser.add_argument('user', type=str,
help='The account name that identifies you to the drive provider.')
parser.add_argument('local_store_path', type=str,
help='The full path to the local store root directory.')
parser.add_argument('remote_store_path', type=str,
help='The full path to the remote store root directory (relative to the drive root).')
parser.add_argument('--cpath', type=str, default=os.getcwd(),
help='The full path to the directory that stores cloud-backup authentication'
'config files.')
parser.add_argument('-a', action='store_true', dest='analyse_only',
help='If specified, only sync analysis is done, the actual sync isn\'t carried out.')
main(parser.parse_args()) |
# import the necessary packages
from lib.rgbhistogram import RGBHistogram
import argparse
import cPickle as pickle
import glob
import cv2
import os
def parse_args():
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required = True,
help = "Path to the directory that contains the images to be indexed")
ap.add_argument("-i", "--index", required = True,
help = "Path to where the computed index will be stored")
args = ap.parse_args()
return args
def index(dataset, index_path):
# initialize the index dictionary to store our our quantifed
# images, with the 'key' of the dictionary being the image
# filename and the 'value' our computed features
if not os.path.exists(dataset):
raise ValueError("Path {} that is meant to be dataset does not exist.".format(dataset))
index = {}
# initialize our image descriptor -- a 3D RGB histogram with
# 8 bins per channel
desc = RGBHistogram([8, 8, 8])
# use glob to grab the image paths and loop over them
for imagePath in glob.glob(os.path.join(dataset, "*.png")):
# extract our unique image ID (i.e. the filename)
k = os.path.basename(imagePath)
# load the image, describe it using our RGB histogram
# descriptor, and update the index
image = cv2.imread(imagePath)
features = desc.describe(image)
index[k] = features
# we are now done indexing our image -- now we can write our
# index to disk
with open(args.index, "w") as f:
f.write(pickle.dumps(index))
f.close()
if __name__ == '__main__':
args = parse_args()
index(args.dataset, args.index)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.