text
stringlengths 8
6.05M
|
|---|
print "importing matplotlib"
import matplotlib.pyplot as plt
print "done importing matplotlib"
import autograd.numpy.linalg as npla
import autograd.numpy as np
import autograd.numpy.random as npr
import autograd.scipy.misc as scpm
from autograd import grad
import tractor.sdss as sdss
import astrometry.sdss as asdss
import astrometry.util.fits as aufits
from scipy.stats.distributions import gamma
import CelestePy.celeste as celeste
import CelestePy.celeste_galaxy_conditionals as galaxies
from CelestePy.util.data.get_data import tractor_src_to_celestepy_src
from CelestePy.celeste_src import SrcParams
from scipy.optimize import minimize
############################################################################
# Likelihoods of varying shapes/dimensionality for testing samplers
############################################################################
BANDS = ['u', 'g', 'r', 'i', 'z']
def make_fits_images(run, camcol, field):
"""gets field files from local cache (or sdss), returns UGRIZ dict of
fits images"""
print """==================================================\n\n
Grabbing image files from the cache.
TODO: turn off the tractor printing... """
imgs = {}
for band in BANDS:
print "reading in band %s" % band
imgs[band] = sdss.get_tractor_image_dr9(run, camcol, field, band)
fn = asdss.DR9().retrieve('photoField', run, camcol, field)
F = aufits.fits_table(fn)
# convert to FitsImage's
imgfits = {}
for iband,band in enumerate(BANDS):
print "converting images %s" % band
frame = asdss.DR9().readFrame(run, camcol, field, band)
calib = np.median(frame.getCalibVec())
gain = F[0].gain[iband]
darkvar = F[0].dark_variance[iband]
sky = np.median(frame.getSky())
imgfits[band] = celeste.FitsImage(iband,
timg=imgs[band],
calib=calib,
gain=gain,
darkvar=darkvar,
sky=sky)
return imgfits
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.pyplot as plt
def add_colorbar_to_axis(ax, cim):
""" pretty generic helper function to throw a colorbar onto an axis """
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="10%", pad=0.05)
cbar = plt.colorbar(cim, cax=cax)
# Manually set ticklabels (not ticklocations, they remain unchanged)
#ax4.set_yticklabels([0, 50, 30, 'foo', 'bar', 'baz'])
if __name__=="__main__":
# galaxy parameters: loc, shape, etc in arg here
# TODO: actually make this reflect how
# galaxy location, shape parameters actually create gaussian
# mixture parameters
def create_pixel_grid(image, loc):
v_s = image.equa2pixel(loc)
bound = image.R
minx_b, maxx_b = max(0, int(v_s[0] - bound)), min(int(v_s[0] + bound + 1), image.nelec.shape[1])
miny_b, maxy_b = max(0, int(v_s[1] - bound)), min(int(v_s[1] + bound + 1), image.nelec.shape[0])
y_grid = np.arange(miny_b, maxy_b, dtype=np.float)
x_grid = np.arange(minx_b, maxx_b, dtype=np.float)
xx, yy = np.meshgrid(x_grid, y_grid, indexing='xy')
pixel_grid = np.column_stack((xx.ravel(order='C'), yy.ravel(order='C')))
return xx.astype(int), yy.astype(int),pixel_grid
def gen_galaxy_image(pixel_info, images, fluxes, loc,
gal_theta, gal_sig, gal_rho, gal_phi):
xx = pixel_info[0]
pixel_grid = pixel_info[2]
bandims = np.zeros((xx.shape[0], xx.shape[1], len(images)))
for idx,image in enumerate(images):
im = gen_galaxy_psf_image(pixel_info[2], image, loc,
gal_theta, gal_sig, gal_rho, gal_phi,
image.weights, image.means, image.covars)
bandims[:,:,idx] = fluxes[idx] * im.reshape(xx.shape, order='C')
return bandims
def gen_point_source_image(pixel_info, images, fluxes, loc):
xx = pixel_info[0]
pixel_grid = pixel_info[2]
bandims = np.zeros((xx.shape[0], xx.shape[1], len(images)))
for idx,image in enumerate(images):
im = gen_point_source_psf_image(pixel_grid, image, loc,
image.weights, image.means, image.covars)
bandims[:,:,idx] = fluxes[idx] * im.reshape(xx.shape, order='C')
return bandims
def calc_galaxy_prior():
return 0
def calc_point_source_prior():
return 0
def calc_total_prob_galaxy(images, fluxes, loc, shape):
xx,yy,pixel_grid = create_pixel_grid(images[0], loc)
pixel_info = [xx, yy, pixel_grid]
prior = calc_galaxy_prior()
lams = gen_galaxy_image(pixel_info, images, fluxes, loc,
shape[0], shape[1], shape[2], shape[3])
curr_sum = prior
for idx,image in enumerate(images):
curr_sum += np.sum(image.nelec[yy,xx] * np.log(lams[:,:,idx]) - lams[:,:,idx])
# verify galaxy
fig, axarr = plt.subplots(1, 2)
axarr[0].contourf(xx, yy, lams[:,:,0])
axarr[1].contourf(xx, yy, image.nelec[yy,xx])
plt.show()
return curr_sum, pixel_info
def calc_total_prob_point_source(pixel_info, images, fluxes, loc):
xx = pixel_info[0]
yy = pixel_info[1]
prior = calc_point_source_prior()
lams = gen_point_source_image(pixel_info, images, fluxes, loc)
curr_sum = prior
for idx,image in enumerate(images):
curr_sum += np.sum(image.nelec[yy,xx] * np.log(lams[:,:,idx]) - lams[:,:,idx])
return curr_sum
NUM_BANDS = 5
NUM_LOC = 2
NUM_SHAPE = 4
def squared_loss_single_im(galaxy_src, point_src, image):
galaxy_im = celeste.gen_src_image(galaxy_src, image, return_patch=False)
point_src_im = celeste.gen_src_image(point_src, image, return_patch=False)
loss = np.sum((galaxy_im - point_src_im)**2)
return loss
def squared_loss(galaxy_src, point_src, images):
loss = 0
for image in images:
loss += squared_loss_single_im(galaxy_src, point_src, image)
return loss
# read in image and corresponding source
print "read in images and sources"
run = 125
camcol = 1
field = 17
tsrcs = sdss.get_tractor_sources_dr9(run, camcol, field)
imgfits = make_fits_images(run, camcol, field)
# list of images, list of celeste sources
imgs = [imgfits[b] for b in BANDS]
srcs = [tractor_src_to_celestepy_src(s) for s in tsrcs]
src_types = np.array([src.a for src in srcs])
rfluxes = np.array([src.fluxes[2] for src in srcs])
rfluxes[src_types == 0] = -1
brightest_i = np.argmax(rfluxes)
# dim 10
# bright 46
# medium 1
def star_arg_squared_loss_single_im(fluxes, galaxy_src, image):
star = SrcParams(src.u, a=0, fluxes=np.exp(fluxes))
return squared_loss_single_im(galaxy_src, star, image)
def star_arg_squared_loss(fluxes, galaxy_src, images):
star = SrcParams(src.u, a=0, fluxes=np.exp(fluxes))
return squared_loss(galaxy_src, star, images)
# do gradient descent
for src in [srcs[10]]:
if src.a == 0:
continue
star = SrcParams(src.u, a=0, fluxes=src.fluxes)
print "loss, galaxy with itself:", squared_loss(src, src, imgs)
print "loss, galaxy with star:", squared_loss(src, star, imgs)
final_fluxes = np.zeros(len(BANDS))
for bi,b in enumerate(BANDS):
res = minimize(star_arg_squared_loss_single_im,
np.log(src.fluxes),
args=(src, imgs[bi]),
method='Nelder-Mead',
options={'maxiter':10})
print "original result:", squared_loss_single_im(src, star, imgs[bi])
print "opt result for band", bi, ":", res
final_fluxes[bi] = np.exp(res.x[bi])
print "fluxes:", src.fluxes, final_fluxes
final_star = SrcParams(src.u, a=0, fluxes=final_fluxes)
# add noise and calculate per-band likelihoods
ZERO_CONST = 0.1
gal_likelihoods = np.zeros(len(BANDS))
orig_star_likelihoods = np.zeros(len(BANDS))
final_star_likelihoods = np.zeros(len(BANDS))
for bi, b in enumerate(BANDS):
galaxy_im = celeste.gen_src_image(src, imgs[bi], return_patch=False)
galaxy_im_noise = np.zeros(galaxy_im.shape)
for (i,j),value in np.ndenumerate(galaxy_im):
galaxy_im_noise[i,j] = np.random.poisson(galaxy_im[i,j])
# calculate galaxy likelihood
gal_likelihoods[bi] = np.sum(galaxy_im_noise * np.log(galaxy_im + ZERO_CONST) - galaxy_im)
# calculate star likelihood
orig_star_im = celeste.gen_src_image(star, imgs[bi], return_patch=False)
final_star_im = celeste.gen_src_image(final_star, imgs[bi], return_patch=False)
orig_star_likelihoods[bi] = np.sum(np.sum(galaxy_im_noise * np.log(orig_star_im + ZERO_CONST) - orig_star_im)) + ZERO_CONST) - final_star_im))
print "galaxy likelihoods:", gal_likelihoods
print "orig star likelihoods:", orig_star_likelihoods
print "final star likelihoods:", final_star_likelihoods
# show the new star
fig, axarr = plt.subplots(len(BANDS), 2)
for bi, b in enumerate(BANDS):
final_galaxy_im = celeste.gen_src_image(src, imgs[bi])
final_star_im = celeste.gen_src_image(final_star, imgs[bi])
gim = axarr[bi,0].imshow(final_galaxy_im)
sim = axarr[bi,1].imshow(final_star_im)
add_colorbar_to_axis(axarr[bi,0], gim)
add_colorbar_to_axis(axarr[bi,1], sim)
for c in range(2):
axarr[bi,c].get_xaxis().set_visible(False)
axarr[bi,c].get_yaxis().set_visible(False)
axarr[0,0].set_title('galaxy patch')
axarr[0,1].set_title('star patch')
plt.savefig('dim_galaxy.png')
|
from pymongo import MongoClient
symbol_list = ['ethusdt', 'btcusdt', 'bchusdt', 'ltcusdt', 'eosusdt', 'ethbtc', 'eosbtc', 'xrpusdt']
period = ['1min', '5min', '15min', '30min', '60min', '4hour', '1day', '1week', '1mon']
orders_list = ['submitted', 'partial-filled', 'partial-canceled', 'filled', 'canceled']
mdb = {
"host": '172.24.132.208',
# "host": '51facai.51vip.biz',
"user": 'data',
"password": 'data123',
"db": 'invest',
"port": '27017',
# "port": '16538',
"marketP1": 'dw_market',
"M5": 'dw_M5',
"M15": 'dw_M15',
"M30": 'dw_M30',
"D1": 'dw_D1',
"H1": 'dw_H1',
"H4": 'dw_H4',
"W1": 'dw_W1',
"M1": 'dw_M1',
"Y1": 'dw_Y1',
"MON1": 'dw_MON1',
"depth": 'dw_depth',
"acc": 'accounts',
"bal": 'balance',
"orders": 'orders',
"market_detail": 'dw_ticker_detail',
"future": 'ok_future'
}
mongo_url = 'mongodb://' + mdb["user"] + \
':' + mdb["password"] + '@' + mdb["host"] + ':' + \
mdb["port"] + '/' + mdb["db"]
conn = MongoClient(mongo_url)
sdb = conn[mdb["db"]]
ticker_coll = sdb[mdb["marketP1"]]
dwM1_coll = sdb[mdb["M1"]]
dwM5_coll = sdb[mdb["M5"]]
dwD1_coll = sdb[mdb["D1"]]
dwH1_coll = sdb[mdb["H1"]]
dwW1_coll = sdb[mdb["W1"]]
future_kline_coll = sdb[mdb["future"]]
depth_coll = sdb[mdb["depth"]]
detail_coll = sdb[mdb["market_detail"]]
acc_coll = sdb[mdb["acc"]]
balance_coll = sdb[mdb["bal"]]
orders_coll = sdb[mdb["orders"]]
|
import pytest
from pages import LoginPage,MenuPage,Role_management
from utils.db import AddRoleDB
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
@pytest.fixture(scope='session')
def selenium(chrome_options):
docker = 0
container_url = 'http://192.168.1.200:4444/wd/hub' #docker容器远程地址
if docker==0:
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.implicitly_wait(10) #全局等待时间
yield driver
driver.quit()
else:
driver = webdriver.Remote(command_executor=container_url,
desired_capabilities=DesiredCapabilities.CHROME)
driver.implicitly_wait(10) # 全局等待时间
yield driver
driver.quit()
@pytest.fixture(scope='session')
def chrome_options():
chrome_options = Options()
chrome_options.add_argument('--start-maximized') #最大化启动浏览器
# chrome_options.add_argument('--headless')
return chrome_options
@pytest.fixture
def Login_page(selenium):
selenium.get('http://192.168.1.22:20004/#/passport/login')
return LoginPage(selenium)
@pytest.fixture
def menu_page(Login_page,selenium):
Login_page.login('root','123456')
return MenuPage(selenium)
@pytest.fixture
def add_role_page(menu_page,selenium):
menu_page.main_menu_sub_menu('系统管理','角色管理')
return Role_management(selenium)
@pytest.fixture(scope='session')
def db():
try:
db = AddRoleDB()
except Exception as ex:
pytest.skip(str(ex))
else:
yield db
db.close()
|
sentence = input()
s, t = sentence.split()
j = 0
word = ''
for i in range(len(t)):
if t[i] == s[j]:
word += t[i]
j += 1
if j==len(s):
break
if s in word:
print("Yes")
else:
print("No")
|
import requests
import psycopg2
conn = psycopg2.connect(host="localhost", database="cartola_fc",
user="postgres", password="postgres")
print("Conectado ao banco")
cur = conn.cursor()
rowcount = cur.rowcount
url = "https://api.cartolafc.globo.com/atletas/mercado"
try:
data = requests.get(url).json()
print("Carregando os dados dos status dos atletas na rodada - - - - - Aguarde")
for atleta in data['atletas']:
result = []
rodada_id = atleta['rodada_id']
atleta_id = atleta['atleta_id']
id_status = atleta['status_id']
result = [rodada_id, atleta_id, id_status]
cur.execute("""INSERT into cartola_fc.tb_status_rodada
VALUES
( %s,
%s,
%s
)""",(result))
conn.commit()
cur.close()
print("Sucesso! Inicializando próxima carga....")
except IOError as io:
print("Erro")
|
#import sys
#input = sys.stdin.readline
def main():
K = int( input())
now = 7
now %= K
for i in range(10**7):
if now == 0:
print(i+1)
return
now = (now*10+7)%K
print("-1")
if __name__ == '__main__':
main()
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Title :test4.py
# Description :I am test script
# Author :Devon
# Date :2018-01-04
# Version :0.1
# Usage :python test4.py
# Notes :
# python_version :2.7.14
# ==============================================================================
'''
类和实例(class和instance):
1.类:类是抽象的模板,如如Student类,类型为类<class '__main__.Student'>
2.实例:实例是类创建出来的一个个的具体对象,对象有相同方法,但是里面具体数据可能不同。类型为object。<__main__.Student object at 0x7f346359b390>
3.区别:
Student()是实例对象
Student就是类
4.__init__方法是类实例创建之后调用, 对当前对象的实例的一些初始化, 没有返回值
函数:
函数和类中方法区别:
类的方法的第一个参数永远是实例变量self,调用是不需要传递这个参数。其他和普通函数没有区别。
区别:
test_fun()是str类型,可以看成对象
test_fun就是函数
'''
class Student(object):
def __init__(self):
pass
student = Student()
student2 = Student()
student.name = 'devon'
print student.name
print Student
print student2
print Student()
def test_fun():
print "test function"
return "yes"
print test_fun()
print type(test_fun())
# print test_fun
print type(test_fun)
import os
# 定义一个类,初始化id和name,然后定义个方法,让用户输入name,然后打印输入的名字
class Job(object):
def __init__(self, id, name):
self.id = id
self.name = name
def get_user_name(self):
name_input = input("Please enter your name:")
print name_input
job = Job(1, 'devon') # 在实例化的时候要输入参数
print job.id, job.name
print job.get_user_name() # 调用class的方法
|
# 144. Binary Tree Preorder Traversal
#
# Given a binary tree, return the preorder traversal of its nodes' values.
#
# For example:
# Given binary tree [1,null,2,3],
# 1
# \
# 2
# /
# 3
# return [1,2,3].
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def preorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
Solution.res = []
def preorder(node):
if not node: return
Solution.res.append(node.val)
preorder(node.left)
preorder(node.right)
preorder(root)
return Solution.res
if __name__ == '__main__':
sol = Solution()
root = TreeNode(1)
node1 = TreeNode(2)
node2 = TreeNode(3)
root.right = node1
node1.left = node2
assert sol.preorderTraversal(root) == [1, 2, 3]
|
# Lint143. Sort Colors II
'''
Given an array of n objects with k different colors (numbered from 1 to k), sort them so that objects of the same color are adjacent, with the colors in the order 1, 2, ... k.
'''
Solution 1: Counting Sort (Naive)
Time O(n) Space O(k)
Solution 2: Quick Sort idea (preferred)
Time O(nlogk) Space O(1)
Solution 2+: Quick Sort idea, three parts each time
Time O(nlogk) Space O(1)
# Solution 2
class Solution:
"""
@param colors: A list of integer
@param k: An integer
@return: nothing
"""
def sortColors2(self, colors, k):
# write your code here
self.sort(colors, 1, k, 0, len(colors)-1)
def sort(self, colors, lower, upper, left, right):
if lower == upper:
return
mid = (lower+upper)//2
# ....ssssssgggggsgs....gsgss...
# l j i r
j = left # ! made mistake mutiple times to assign j=0 !
for i in range(left, right):
if colors[i] <= mid:
colors[i], colors[j] = colors[j], colors[i]
j += 1
# ....ssssssggggggggggggggggg
# l j r
self.sort(colors, left, j, lower, mid)
self.sort(colors, j, right, mid+1, upper)
# Solution 2+: lt, eq, gt 3 parts version
class Solution:
def sortColors2(self, colors, k):
self.sort(colors, 1, k, 0, len(colors)-1)
def sort(self, colors, lower, upper, left, right):
if lower >= upper:
return
pivot = (lower + upper) // 2
# ....sssssseeeeesge....ggggg
# l j i k r
i, j, k = left, left, right
while i <= k: # check right bound is k not right
if colors[i] == pivot:
i += 1
elif colors[i] < pivot:
self.swap(colors, i, j)
j += 1
i += 1
else:
self.swap(colors, i, k)
k -= 1
# ....sssssseeeeeeeeggggggggg
# l j k r
self.sort(colors, lower, pivot-1, left, j-1)
self.sort(colors, pivot+1, upper, k+1, right)
def swap(self, arr, i, j):
arr[i], arr[j] = arr[j], arr[i]
|
#Programa: intercambio.py
#Propósito: Intercambiar el valor de dos variables númericas
#Autor: Jose Manuel Serrano Palomo.
#Fecha: 13/10/2019
#
#Variables a usar:
# A,B los dos números a los que vamos a intercambiar
# suma nos ayudará a realizar el intercambio
#Algoritmo:
# LEER A,B
# suma <-- A + B
# A <-- suma - A
# B <-- suma - B
#ESCRIBIR A Y B
print("Invertir el valor de dos variables A y B")
print("-----------------------------------------\n")
#Leemos los datos
A = int(input("Introduce el número A: "))
B = int(input("Introduce el número B: "))
#Calculamos
suma = A + B
A = suma - A
B = suma - B
# Forma alternativa A,B = B,A
#Escribimos los datos
print("El valor de A es ",A , " y el valor de B es ", B)
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2017/12/18
@author: ChaoZhong
@email: 16796679@qq.com
'''
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, SelectField
from wtforms.validators import DataRequired, Length, IPAddress
from .models import Category
class UsersForm(FlaskForm):
"""登录表单"""
username = StringField(u'用户名', validators=[DataRequired()])
password = PasswordField(u'密码', validators=[DataRequired(), Length(min=6, message=u'密码长度不对')])
submit = SubmitField(u'登 录')
class HostAddForm(FlaskForm):
hostname = StringField(u'主机名', validators=[DataRequired(), Length(min=-1, max=100, message=u'主机名最大长度100')])
wip = StringField(u'外网', validators=[DataRequired(), IPAddress])
iip = StringField(u'内网', validators=[DataRequired(), IPAddress])
idc = StringField(u'IDC', validators=[DataRequired(), Length(min=-1, max=50)])
category = SelectField(u'项目', choices=[(cat.id, cat.cat_name)
for cat in Category.query.order_by(Category.cat_name).all()])
submit = SubmitField(u'添加')
class CatAddForm(FlaskForm):
cat_name = StringField('项目名称', validators=[DataRequired(), Length(max=100)])
submit = SubmitField(u'添加')
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 2 14:49:57 2019
@author: saib
"""
urls = ['a.in','b.hk','c.in','h.in','k.uk','us.com']
print(list(filter(lambda x : x.endswith('in'),urls)))
|
import pygame
import random
pygame.init()
clock = pygame.time.Clock()
screen = pygame.display.set_mode((640, 640))
# player
playerX_change = 20
playerY_change = 20
pos_X = random.randint(0, 32) * 20
pos_Y = random.randint(0, 32) * 20
player_surface = pygame.transform.scale(pygame.image.load("player.png"), (20, 20)).convert()
player_surface_rect = player_surface.get_rect(center=(pos_X, pos_Y))
position_list = []
# food
pos_foodX = random.randint(0, 32) * 20
pos_foodY = random.randint(0, 32) * 20
food_surface = pygame.transform.scale(pygame.image.load("apple.png"), (20, 20)).convert()
food_surface_rect = food_surface.get_rect(center=(pos_foodX, pos_foodY))
key = ""
black_surface = pygame.transform.scale(pygame.image.load("black-square.png"), (20, 20)).convert()
for i in range(0, 32):
for j in range(0, 32):
screen.blit(black_surface, (i * 20, j * 20))
def create_snake():
global pos_Y, pos_X, player_surface, player_surface_rect
screen.blit(player_surface, player_surface_rect)
create_snake()
def create_food():
global pos_foodY, pos_foodX, food_surface
screen.blit(food_surface, food_surface_rect)
create_food()
def blit_black():
pass
def blit_player(rect):
global position_list
position_list.append(rect)
print(position_list)
screen.blit(player_surface, rect)
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_DOWN:
key = "down"
if event.key == pygame.K_UP:
key = "up"
if event.key == pygame.K_LEFT:
key = "left"
if event.key == pygame.K_RIGHT:
key = "right"
if key == "down":
player_surface_rect.centery += playerY_change
blit_player(player_surface_rect)
if key == "up":
player_surface_rect.centery -= playerY_change
blit_player(player_surface_rect)
if key == "left":
player_surface_rect.centerx -= playerX_change
blit_player(player_surface_rect)
if key == "right":
player_surface_rect.centerx += playerX_change
blit_player(player_surface_rect)
pygame.display.update()
clock.tick(10)
|
from app.controllers.auth import auth
from app.libraries.response_message import error_message, data_message
from app.database import session
from app.models.category import Category
from app.models.course import Course
from flask import (
Blueprint,
request,
)
page = Blueprint('course', __name__, static_folder='static', template_folder='templates')
@page.route('/categories/<int:category_id>/courses', methods=['GET'])
def get_courses_by_category_id(category_id):
try:
category = session.query(Category).filter_by(id=category_id).one()
except:
return error_message(404, "Category not found.")
courses = session.query(Course).filter_by(category_id=category_id).all()
return data_message(200, {"Category": category.serialize, "Courses": [c.serialize for c in courses]},
"Successfully returned all courses by given category.")
@page.route('/categories/<int:category_id>/courses', methods=['POST'])
@auth.login_required
def add_course(category_id):
try:
category = session.query(Category).filter_by(id=category_id).one()
except:
return error_message(404, "Cannot add new course to this category: Category not found.")
name = request.form.get('name')
if name:
course = Course(name=name,
description=request.form.get('description'),
img_url=request.form.get('img-url'),
intro_video_url=request.form.get('intro-video-url'),
category_id=category.id)
session.add(course)
session.commit()
else:
return error_message(400, "Course name is required.")
return data_message(200, {"Course": course.serialize}, "Successfully added a course.")
@page.route('/categories/<int:category_id>/courses/<int:course_id>', methods=['GET'])
def get_course_by_id(category_id, course_id):
try:
course = session.query(Course).filter_by(id=course_id, category_id=category_id).one()
except:
return error_message(404, "Course not found.")
return data_message(200, {"Course": course.serialize},
"Successfully returned the selected course.")
@page.route('/categories/<int:category_id>/courses/<int:course_id>', methods=['PUT'])
@auth.login_required
def edit_course(category_id, course_id):
try:
course = session.query(Course).filter_by(id=course_id, category_id=category_id).one()
except:
return error_message(404, "Cannot update: Course not found.")
if request.form.get('name'): # if 'name' is a non-empty value then update else keep current value
course.name = request.form('name')
course.description = request.form.get('description')
course.img_url = request.form.get('img-url')
course.intro_video_url = request.form.get('intro-video-url')
session.add(course)
session.commit()
return data_message(200, {"Course": course.serialize}, "Successfully updated the course.")
@page.route('/categories/<int:category_id>/courses/<int:course_id>', methods=['DELETE'])
@auth.login_required
def delete_course(category_id, course_id):
try:
course = session.query(Course).filter_by(id=course_id, category_id=category_id).one()
except:
return error_message(404, "Cannot delete: Course not found.")
session.delete(course)
session.commit()
return data_message(200, None, "Course was successfully deleted.")
|
import platform
import re
from collections import namedtuple
from copy import deepcopy
from elasticsearch.helpers import streaming_bulk
from elasticsearch.exceptions import NotFoundError
from langdetect.lang_detect_exception import LangDetectException
from onegov.core.utils import is_non_string_iterable
from onegov.search import log, Searchable, utils
from onegov.search.errors import SearchOfflineError
from queue import Queue, Empty, Full
ES_ANALYZER_MAP = {
'en': 'english',
'de': 'german',
'fr': 'french',
'en_html': 'english_html',
'de_html': 'german_html',
'fr_html': 'french_html',
}
ANALYSIS_CONFIG = {
"filter": {
"english_stop": {
"type": "stop",
"stopwords": "_english_"
},
"english_stemmer": {
"type": "stemmer",
"language": "english"
},
"english_possessive_stemmer": {
"type": "stemmer",
"language": "possessive_english"
},
"german_stop": {
"type": "stop",
"stopwords": "_german_"
},
"german_stemmer": {
"type": "stemmer",
"language": "light_german"
},
"french_elision": {
"type": "elision",
"articles_case": True,
"articles": [
"l", "m", "t", "qu", "n", "s",
"j", "d", "c", "jusqu", "quoiqu",
"lorsqu", "puisqu"
]
},
"french_stop": {
"type": "stop",
"stopwords": "_french_"
},
"french_keywords": {
"type": "keyword_marker",
"keywords": ["Exemple"]
},
"french_stemmer": {
"type": "stemmer",
"language": "light_french"
}
},
"analyzer": {
"english_html": {
"tokenizer": "standard",
"char_filter": [
"html_strip"
],
"filter": [
"english_possessive_stemmer",
"lowercase",
"english_stop",
"english_stemmer"
]
},
"german_html": {
"tokenizer": "standard",
"char_filter": [
"html_strip"
],
"filter": [
"lowercase",
"german_stop",
"german_normalization",
"german_stemmer"
]
},
"french_html": {
"tokenizer": "standard",
"char_filter": [
"html_strip"
],
"filter": [
"french_elision",
"lowercase",
"french_stop",
"french_keywords",
"french_stemmer"
]
},
"autocomplete": {
"type": "custom",
"char_filter": ["html_strip"],
"tokenizer": "standard",
"filter": ["lowercase"]
},
"tags": {
"type": "custom",
"tokenizer": "keyword",
"filter": ["lowercase"]
},
}
}
IndexParts = namedtuple('IndexParts', (
'hostname',
'schema',
'language',
'type_name',
'version'
))
def parse_index_name(index_name):
""" Takes the given index name and returns the hostname, schema,
language and type_name in a dictionary.
* If the index_name doesn't match the pattern, all values are None.
* If the index_name has no version, the version is None.
"""
if index_name.count('-') == 3:
hostname, schema, language, type_name = index_name.split('-')
version = None
elif index_name.count('-') == 4:
hostname, schema, language, type_name, version = index_name.split('-')
else:
hostname = None
schema = None
language = None
type_name = None
version = None
return IndexParts(
hostname=hostname,
schema=schema,
language=language,
type_name=type_name,
version=version
)
class Indexer:
""" Takes actions from a queue and executes them on the elasticsearch
cluster. Depends on :class:`IndexManager` for index management and expects
to have the same :class:`TypeRegistry` as :class:`ORMEventTranslator`.
The idea is that this class does the indexing/deindexing, the index manager
sets up the indices and the orm event translator listens for changes in
the ORM.
A queue is used so the indexer can be run in a separate thread.
"""
def __init__(self, mappings, queue, es_client, hostname=None):
self.es_client = es_client
self.queue = queue
self.hostname = hostname or platform.node()
self.ixmgr = IndexManager(self.hostname, es_client=self.es_client)
self.mappings = mappings
self.failed_task = None
def process(self, block=False, timeout=None):
""" Processes the queue until it is empty or until there's an error.
If there's an error, the next call to this function will try to
execute the failed task again. This is mainly meant for elasticsearch
outages.
:block:
If True, the process waits for the queue to be available. Useful
if you run this in a separate thread.
:timeout:
How long the blocking call should block. Has no effect if
``block`` is False.
:return: The number of successfully processed items
"""
try:
processed = 0
while True:
# get the previously failed task or a new one
task = self.failed_task or self.queue.get(block, timeout)
self.failed_task = None
if self.process_task(task):
processed += 1
else:
# if the task failed, keep it for the next run and give up
self.failed_task = task
return processed
except Empty:
pass
return processed
def bulk_process(self):
""" Processes the queue in bulk. This offers better performance but it
is less safe at the moment and should only be used as part of
reindexing.
"""
def actions():
try:
task = self.queue.get(block=False, timeout=None)
if task['action'] == 'index':
yield {
'_op_type': 'index',
'_index': self.ensure_index(task),
'_id': task['id'],
'doc': task['properties']
}
elif task['action'] == 'delete':
yield {
'_op_type': 'delete',
'_index': self.ensure_index(task),
'_id': task['id'],
'doc': task['properties']
}
else:
raise NotImplementedError
except Empty:
pass
for success, info in streaming_bulk(self.es_client, actions()):
if success:
self.queue.task_done()
def process_task(self, task):
try:
getattr(self, task['action'])(task)
except SearchOfflineError:
return False
self.queue.task_done()
return True
def ensure_index(self, task):
return self.ixmgr.ensure_index(
task['schema'],
task['language'],
self.mappings[task['type_name']],
return_index='internal'
)
def index(self, task):
index = self.ensure_index(task)
self.es_client.index(
index=index,
id=task['id'],
document=task['properties']
)
def delete(self, task):
# get all the types this model could be stored in (with polymorphic)
# identites, this could be many
mapping = self.mappings[task['type_name']]
if mapping.model:
types = utils.related_types(mapping.model)
else:
types = (mapping.name, )
# delete the document from all languages (because we don't know
# which one anymore) - and delete from all related types (polymorphic)
for type in types:
ix = self.ixmgr.get_external_index_name(
schema=task['schema'],
language='*',
type_name=type
)
# for the delete operation we need the internal index names
for internal in self.es_client.indices.get_alias(index=ix).keys():
try:
self.es_client.delete(
index=internal,
id=task['id']
)
except NotFoundError:
pass
class TypeMapping:
__slots__ = ['name', 'mapping', 'version', 'model']
def __init__(self, name, mapping, model=None):
self.name = name
self.mapping = self.add_defaults(mapping)
self.version = utils.hash_mapping(mapping)
self.model = model
def add_defaults(self, mapping):
mapping['es_public'] = {
'type': 'boolean'
}
mapping['es_last_change'] = {
'type': 'date'
}
mapping['es_suggestion'] = {
'analyzer': 'autocomplete',
'type': 'completion',
'contexts': [
{
'name': 'es_suggestion_context',
'type': 'category'
}
]
}
mapping['es_tags'] = {
'analyzer': 'tags',
'type': 'text',
}
return mapping
def for_language(self, language):
""" Returns the mapping for the given language. Mappings can
be slightly different for each language. That is, the analyzer
changes.
Because the :class:`IndexManager` puts each language into its own
index we do not have to worry about creating different versions
of the same mapping here.
"""
return self.supplement_analyzer(deepcopy(self.mapping), language)
def supplement_analyzer(self, dictionary, language):
""" Iterate through the dictionary found in the type mapping and
replace the 'localized' type with a 'text' type that includes a
language specific analyzer.
"""
supplement = False
for key, value in dictionary.items():
if hasattr(value, 'items'):
dictionary[key] = self.supplement_analyzer(value, language)
elif key == 'type' and value.startswith('localized'):
supplement = value.replace('localized', language)
break
if supplement:
assert 'analyzer' not in dictionary
dictionary[key] = 'text'
dictionary['analyzer'] = ES_ANALYZER_MAP[supplement]
return dictionary
class TypeMappingRegistry:
def __init__(self):
self.mappings = {}
def __getitem__(self, key):
return self.mappings[key]
def __iter__(self):
yield from self.mappings.values()
def register_orm_base(self, base):
""" Takes the given SQLAlchemy base and registers all
:class:`~onegov.search.mixins.Searchable` objects.
"""
for model in utils.searchable_sqlalchemy_models(base):
self.register_type(model.es_type_name, model.es_properties, model)
def register_type(self, type_name, mapping, model=None):
""" Registers the given type with the given mapping. The mapping is
as dictionary representing the part below the ``mappings/type_name``.
See:
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/\
indices-create-index.html#mappings>`_
When the mapping changes, a new index is created internally and the
alias to this index (the external name of the index) is pointed to
this new index.
As a consequence, a change in the mapping requires a reindex.
"""
assert type_name not in self.mappings
self.mappings[type_name] = TypeMapping(type_name, mapping, model)
@property
def registered_fields(self):
""" Goes through all the registered types and returns the a set with
all fields used by the mappings.
"""
return {key for mapping in self for key in mapping.mapping.keys()}
class IndexManager:
""" Manages the creation/destruction of indices. The indices it creates
have an internal name and an external alias. To facilitate that, versions
are used.
"""
def __init__(self, hostname, es_client):
assert hostname and es_client
self.hostname = hostname
self.es_client = es_client
self.created_indices = set()
@property
def normalized_hostname(self):
return utils.normalize_index_segment(
self.hostname, allow_wildcards=False)
def query_indices(self):
""" Queryies the elasticsearch cluster for indices belonging to this
hostname. """
return set(
ix for ix in self.es_client.cat.indices(
index=f'{self.normalized_hostname}-*', h='index'
).splitlines()
)
def query_aliases(self):
""" Queryies the elasticsearch cluster for aliases belonging to this
hostname. """
result = set()
infos = self.es_client.indices.get_alias(
index='{}-*'.format(self.normalized_hostname)
)
for info in infos.values():
for alias in info['aliases']:
result.add(alias)
return result
def ensure_index(self, schema, language, mapping, return_index='external'):
""" Takes the given database schema, language and type name and
creates an internal index with a version number and an external
alias without the version number.
:schema:
The database schema this index is based on.
:language:
The language in ISO 639-1 format.
:mapping:
The :class:`TypeMapping` mapping used in this index.
:return_index:
The index name to return. Either 'external' or 'internal'.
:return:
The (external/aliased) name of the created index.
"""
assert schema and language and mapping
assert len(language) == 2
assert return_index == 'external' or return_index == 'internal'
external = self.get_external_index_name(schema, language, mapping.name)
internal = self.get_internal_index_name(
schema, language, mapping.name, mapping.version)
return_value = return_index == 'external' and external or internal
if internal in self.created_indices:
return return_value
if self.es_client.indices.exists(index=internal):
self.created_indices.add(internal)
return return_value
# create the index
self.es_client.indices.create(
index=internal,
mappings={
'properties': mapping.for_language(language)
},
settings={
'analysis': ANALYSIS_CONFIG,
'index': {
'number_of_shards': 1,
'number_of_replicas': 0,
'refresh_interval': '5s'
}
}
)
# point the alias to the new index
self.es_client.indices.put_alias(name=external, index=internal)
# cache the result
self.created_indices.add(internal)
return return_value
def remove_expired_indices(self, current_mappings):
""" Removes all expired indices. An index is expired if it's version
number is no longer known in the current mappings.
:return: The number of indices that were deleted.
"""
active_versions = set(m.version for m in current_mappings)
count = 0
for index in self.query_indices():
info = parse_index_name(index)
if info.version and info.version not in active_versions:
self.es_client.indices.delete(index=index)
self.created_indices.remove(index)
count += 1
return count
def get_managed_indices_wildcard(self, schema):
""" Returns a wildcard index name for all indices managed. """
return '-'.join((
utils.normalize_index_segment(
self.hostname, allow_wildcards=False),
utils.normalize_index_segment(
schema, allow_wildcards=False),
'*'
))
def get_external_index_names(self, schema, languages='*', types='*'):
""" Returns a comma separated string of external index names that
match the given arguments. Useful to pass on to elasticsearch when
targeting multiple indices.
"""
indices = []
for language in languages:
for type_name in types:
indices.append(
self.get_external_index_name(schema, language, type_name))
return ','.join(indices)
def get_external_index_name(self, schema, language, type_name):
""" Generates the external index name from the given parameters. """
segments = (self.hostname, schema, language, type_name)
segments = (
utils.normalize_index_segment(s, allow_wildcards=True)
for s in segments
)
return '-'.join(segments)
def get_internal_index_name(self, schema, language, type_name, version):
""" Generates the internal index name from the given parameters. """
return '-'.join((
self.get_external_index_name(schema, language, type_name),
utils.normalize_index_segment(version, allow_wildcards=False)
))
class ORMLanguageDetector(utils.LanguageDetector):
html_strip_expression = re.compile(r'<[^<]+?>')
def localized_properties(self, obj):
for key, definition in obj.es_properties.items():
if definition.get('type', '').startswith('localized'):
yield key
def localized_texts(self, obj, max_chars=None):
chars = 0
for p in self.localized_properties(obj):
text = getattr(obj, p, '')
if not isinstance(text, str):
continue
yield text.strip()
chars += len(text)
if max_chars is not None and max_chars <= chars:
break
def detect_object_language(self, obj):
properties = self.localized_properties(obj)
if not properties:
# here, the mapping will be the same for all languages
return self.supported_languages[0]
text = ' '.join(self.localized_texts(obj, max_chars=1024))
text = self.html_strip_expression.sub('', text).strip()
if not text:
return self.supported_languages[0]
try:
return self.detect(text)
except LangDetectException:
return self.supported_languages[0]
class ORMEventTranslator:
""" Handles the onegov.core orm events, translates them into indexing
actions and puts the result into a queue for the indexer to consume.
The queue may be limited. Once the limit is reached, new events are no
longer processed and an error is logged.
"""
converters = {
'date': lambda dt: dt and dt.isoformat(),
}
def __init__(self, mappings, max_queue_size=0, languages=(
'de', 'fr', 'en'
)):
self.mappings = mappings
self.queue = Queue(maxsize=max_queue_size)
self.detector = ORMLanguageDetector(languages)
self.stopped = False
def on_insert(self, schema, obj):
if not self.stopped:
if isinstance(obj, Searchable):
self.index(schema, obj)
def on_update(self, schema, obj):
if not self.stopped:
if isinstance(obj, Searchable):
self.delete(schema, obj)
self.index(schema, obj)
def on_delete(self, schema, obj):
if not self.stopped:
if isinstance(obj, Searchable):
self.delete(schema, obj)
def put(self, translation):
try:
self.queue.put_nowait(translation)
except Full:
log.error("The orm event translator queue is full!")
def index(self, schema, obj):
if obj.es_skip:
return
if obj.es_language == 'auto':
language = self.detector.detect_object_language(obj)
else:
language = obj.es_language
translation = {
'action': 'index',
'id': getattr(obj, obj.es_id),
'schema': schema,
'type_name': obj.es_type_name,
'language': language,
'properties': {}
}
mapping_ = self.mappings[obj.es_type_name].for_language(language)
for prop, mapping in mapping_.items():
if prop == 'es_suggestion':
continue
convert = self.converters.get(mapping['type'], lambda v: v)
raw = getattr(obj, prop)
if is_non_string_iterable(raw):
translation['properties'][prop] = [convert(v) for v in raw]
else:
translation['properties'][prop] = convert(raw)
if obj.es_public:
contexts = {'es_suggestion_context': ['public']}
else:
contexts = {'es_suggestion_context': ['private']}
suggestion = obj.es_suggestion
if is_non_string_iterable(suggestion):
translation['properties']['es_suggestion'] = {
'input': suggestion,
'contexts': contexts
}
else:
translation['properties']['es_suggestion'] = {
'input': [suggestion],
'contexts': contexts
}
self.put(translation)
def delete(self, schema, obj):
translation = {
'action': 'delete',
'schema': schema,
'type_name': obj.es_type_name,
'id': getattr(obj, obj.es_id)
}
self.put(translation)
|
#!/usr/bin/python
#!/usr/bin/env python
#simple script to look for and download videos from YT, parameters are provided via arguments, you can select the video acording to size,format
#resolution etc. I have to add an option to download audio only. And have to polish up the code a little bit, its 5:52 am and the holiday was well
#used.
import mechanize
import pafy
from bs4 import BeautifulSoup as bs
from sys import argv
i=0
q=len(argv)
query=""
for i in range(1,len(argv)):
query=query+str(argv[i])+" "
print("Searching for \""+query+"\" :")
search_url="https://www.youtube.com/results?search_query="+query.replace(" ","+")
list=[]
res_list=[]
size_list=[]
ext_list=[]
i=0
br=mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_referer(False)
br.set_handle_refresh(False)
br.addheaders=[('User-agent','Firefox')]
br.open(search_url)
soup=bs(br.response().read())
for link in soup.find_all('a'):
if "watch" in str(link.get('href')):
list.append(str(link.get('href')))
dl="https://www.youtube.com"+str(list[0])
video=pafy.new(dl)
print("\n\nDownloading : "+video.title)
streams = video.streams
for s in streams:
i=i+1
ext_list.append(s.extension)
res_list.append(s.resolution)
size_list.append(s.get_filesize())
l=0
print("Select File ->")
for l in range(0,i):
print(str(l+1)+"> "+str(ext_list[l])+" | "+str(res_list[l])+" | "+str("{0:.2f}".format(size_list[l]/(1024.0*1024.0)))+"Mb"+" |")
print("\n----------")
choice=input()
if(choice==0 or choice>i):
exit()
choice=choice-1
best = video.getbest(preftype=str(ext_list[choice]))
filename = best.download(filepath="/home/txjoe/Downloads/"+video.title +"."+best.extension)
|
from django.db import models
from datetime import datetime as dt
# Create your models here.
class ContactUs(models.Model):
name = models.CharField(max_length=30)
email = models.EmailField()
comment = models.TextField()
comment_date = models.DateTimeField(auto_now_add=dt.now)
def __str__(self):
return self.name + " " + self.email + " " + str(self.comment_date)
|
from astropy.table import Table, Column
import numpy as np
import astropy.table
import sys
import glob
KEEP = "w ns omegam sigma8 H0 omegabh2 omeganuh2".split()
def add_extra_parameters(cat):
"""
Add additional DES systematics to the Planck chain. Since the data
doesn't constrain these at all just draw from a flat distribution
over some allowed range.
"""
extraparam_file = args.extra
extra = np.loadtxt(extraparam_file, dtype=[("param", "S50"), ("centre", "float64"), ("range", "float64")])
print "Drawing random values for extra parameters ", extra["param"]
nsamples = len(cat)
wt = cat["weight"]
like = cat["like"]
cat.remove_column("like")
cat.remove_column("weight")
for line in extra:
param = line["param"]
central_value = line["centre"]
value_range = line["range"]
samp = draw_from_range(nsamples, value_range, central_value)
cat[param] = samp
cat["weight"] = wt
cat["like"] = like
return cat
def draw_from_range(nsamples, value_range, central_value):
vlower = central_value - value_range/2.0
vupper = central_value + value_range/2.0
return np.random.uniform(low=vlower, high=vupper, size=nsamples)
def write_cat(cat, filename, random=False, deweight=False):
"""
Write a catalog in the CosmoSIS format, optionally
converting a weight column into a repeated multiplicity
and reducing the weights to unity and also optionally
randomizing the catalog
"""
c = open(filename, 'w')
c.write('#')
c.write(' '.join(cat.colnames))
c.write('\n')
n = len(cat)
order = np.arange(n, dtype=int)
if random:
np.random.shuffle(order)
if deweight:
weight = cat['weight']
weight_col = cat.colnames.index("weight")
for i in xrange(len(cat)):
j = order[i]
row = cat[j]
if deweight:
repeats = int(weight[j])
row[weight_col] = 1.0
for k in xrange(repeats):
c.write(' '.join(str(s) for s in row))
c.write('\n')
else:
c.write(' '.join(str(s) for s in row))
c.write('\n')
c.close()
def transform(cat):
pairs = [
('cosmological_parameters--w', 'w'),
('cosmological_parameters--n_s', 'ns'),
('cosmological_parameters--omega_m', 'omegam'),
('cosmological_parameters--omega_b', 'omegabh2'),
('cosmological_parameters--sigma8_input', 'sigma8'),
('cosmological_parameters--s8', 'sigma8'),
('cosmological_parameters--h0', 'H0'),
('cosmological_parameters--omnuh2', 'omeganuh2'),
]
for new,old in pairs:
if old in cat.colnames:
cat.rename_column(old, new)
KEEP = args.keep.split()
if "H0" in KEEP:
cat['cosmological_parameters--h0']/=100.0
if "omegabh2" in KEEP:
cat['cosmological_parameters--omega_b']/=cat['cosmological_parameters--h0']**2
if "s8" in KEEP:
cat['cosmological_parameters--s8'] = cat["cosmological_parameters--sigma8_input"] * np.sqrt(cat["cosmological_parameters--omega_m"]/0.3)
#move weight and like to the end
names = cat.colnames[2:] + cat.colnames[:2]
cat = cat[names]
return cat
def write_header(outfile, params):
#put the like cols at the end
line = " ".join(params)
outfile.write('#'+line+"\n")
def find_params(base):
filename = base+".paramnames"
names = ["weight", "like"]
indices = [0,1]
KEEP = args.keep.split()
print "Keeping parameters %s"%KEEP
for i,line in enumerate(open(filename)):
name = line.split()[0].rstrip('*')
if name not in KEEP: continue
if name=="s8": continue
names.append(name)
indices.append(i+2)
return names,indices
def process_files(base, outfilename, randomize, deweight):
filenames = glob.glob(base+"_[0-9].txt")
params,indices = find_params(base)
chains = []
for filename in filenames:
print filename
chains.append(np.loadtxt(filename, usecols=indices).T)
chains = np.hstack(chains)
cat=Table(rows=chains.T, names=params)
cat = transform(cat)
if args.extra:
cat = add_extra_parameters(cat)
write_cat(cat, outfilename, random=randomize, deweight=deweight)
def main():
import argparse
global args
parser=argparse.ArgumentParser(description="Planck Format Chains -> Cosmosis Format")
parser.add_argument("base", help="Root chain name including path")
parser.add_argument("output", help="Output file name")
parser.add_argument("--randomize", action='store_true', help="Randomize the ordering")
parser.add_argument("--keep", default="w ns omegam sigma8 H0 omegabh2 omeganuh2 s8", help="Parameters to keep")
parser.add_argument("--extra", default="", help="File with additional parameters to add.")
parser.add_argument("--deweight", action='store_true', help="Convert to weight=1 chains by repeating lines (only for integer Planck weights i.e. not the _post_ ones)")
if __name__ == '__main__':
args = parser.parse_args()
process_files(args.base, args.output, args.randomize, args.deweight)
KEEP = args.keep.split()
main()
|
from flask import Flask, render_template, Response
import cv2
import time
# import camera
app = Flask(__name__)
# cam = camera.camera('rtsp://admin:QPPZFE@192.168.100.57:554/H.264_stream')
# cam = cv2.VideoCapture('rtsp://admin:QPPZFE@192.168.100.57:554/H.264_stream') # use 0 for web camera
# for cctv camera use rtsp://username:password@ip_address:554/user=username_password='password'_channel=channel_number_stream=0.sdp' instead of camera
# for local webcam use cv2.VideoCapture(0)
def skipFrames(timegap, FPS, cap, CALIBRATION):
latest = None
while True :
for i in range(int(timegap*FPS/CALIBRATION)) :
_,latest = cap.read()
if(not _):
time.sleep(0.5)#refreshing time
break
else:
break
return latest
def skipFrames(timegap, FPS, cap, CALIBRATION):
latest = None
ret = None
while True :
for i in range(int(timegap*FPS/CALIBRATION)) :
ret,latest = cap.read()
if(not ret):
time.sleep(0.5)#refreshing time
break
else:
break
return latest, ret
def gen_frames(): # generate frame by frame from camera
FPS = 60
CALIBRATION = 1.5
gap = 0.1
frame = None
while True:
# time.sleep(0.05)
# Capture frame-by-frame
# success, frame = camera.read() # read the camera frame
# frame, success = skipFrames(gap, FPS, cam, CALIBRATION)
try:
new_frame = cv2.imread('../jetson-inference/build/aarch64/bin/cap.jpg')
print(frame.shape[:2])
new_frame = cv2.resize(new_frame, (640, 480))
frame = new_frame
# frame = cv2.imread('tes.PNG')
except:
# frame = None
pass
# s = time.time()
if frame is None:
continue
ret, buffer = cv2.imencode('.jpg', frame)
frame = buffer.tobytes()
# gap = time.time()-s
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') # concat frame one by one and show result
@app.route('/video_feed')
def video_feed():
#Video streaming route. Put this in the src attribute of an img tag
return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
if __name__ == '__main__':
app.run(host='192.168.100.104', port=5001, debug=True)
|
from keras.callbacks import Callback
class ClassAccuracy(Callback):
def __init__(self, data_x, data_y, class_label, label="class accuracy"):
super(ClassAccuracy, self).__init__()
self.label = "%s for %s" % (label, class_label)
self.class_label = class_label
self.data_x = data_x
self.data_y = data_y
self.scores = []
def on_epoch_end(self, epoch, logs={}):
pred_y = self.model.predict_on_batch(self.data_x)
pred_y_labels = []
for row in pred_y:
pred_y_labels.append( 1 if row[1] > row[0] else 0)
(acc, tp, fp) = self.score_class(self.data_y, pred_y_labels)
logs[self.label] = acc
print(' - {:04.2f} (tp: {:3.0f}, fp: {:3.0f} -- {:15s})'.format(acc * 100, tp, fp, self.label))
self.scores.append(acc)
def score_class(self, data_y, pred_y):
tp = 0.0
fp = 0.0
for i in range(0, len(data_y)):
label = data_y[i]
pred = pred_y[i]
if pred == self.class_label:
if label == pred:
tp += 1.0
else:
fp += 1.0
acc = 0.0 if tp+fp == 0.0 else tp/(tp+fp)
return acc, tp, fp
def avg_score(self, last_n_epochs):
last = self.scores[:-last_n_epochs]
return sum(last)/len(last)
class Accuracy(Callback):
def __init__(self, data_x, data_y, label="accuracy"):
super(Accuracy, self).__init__()
self.label = label
self.data_x = data_x
self.data_y = data_y
self.scores = []
def on_epoch_end(self, epoch, logs={}):
pred_y = self.model.predict_on_batch(self.data_x)
pred_y_labels = []
for row in pred_y:
pred_y_labels.append( 1 if row[1] > row[0] else 0)
(acc, tp, fp) = self.score_class(self.data_y, pred_y_labels)
logs[self.label] = acc
print(' - {:04.2f} (tp: {:3.0f}, fp: {:3.0f} -- {:15s})'.format(acc * 100, tp, fp, self.label))
self.scores.append(acc)
def score_class(self, data_y, pred_y):
tp = 0.0
fp = 0.0
for i in range(0, len(data_y)):
label = data_y[i]
pred = pred_y[i]
if label == pred:
tp += 1.0
else:
fp += 1.0
acc = 0.0 if tp+fp == 0.0 else tp/(tp+fp)
return acc, tp, fp
def avg_score(self, last_n_epochs):
last = self.scores[-last_n_epochs:]
return sum(last)/len(last)
|
from rec2 import factorial as fact
import rec2
print(fact(4))
print(rec2.factorial(2))
|
from slack_webhook import Slack
URL=''
def send_slack(msg):
slack = Slack(url=URL)
slack.post(text=msg)
|
from models import Session, FoodieUser
s = Session()
def create_users():
s.add(FoodieUser(name='John Doe', age=44))
s.add(FoodieUser(name='San Martin', age=999))
s.commit()
def main():
create_users()
if __name__ == '__main__':
main()
|
from __future__ import absolute_import, unicode_literals
import errno
import logging
import os
import signal
import subprocess
from mopidy import backend, exceptions
import pykka
from . import Extension
from .client import dLeynaClient
from .library import dLeynaLibraryProvider
from .playback import dLeynaPlaybackProvider
DBUS_SESSION_BUS_ADDRESS = 'DBUS_SESSION_BUS_ADDRESS'
DBUS_SESSION_BUS_PID = 'DBUS_SESSION_BUS_PID'
logger = logging.getLogger(__name__)
class dLeynaBackend(pykka.ThreadingActor, backend.Backend):
uri_schemes = [Extension.ext_name]
__dbus_pid = None
def __init__(self, config, audio):
super(dLeynaBackend, self).__init__()
try:
if DBUS_SESSION_BUS_ADDRESS in os.environ:
self.client = dLeynaClient()
else:
env = self.__start_dbus()
self.__dbus_pid = int(env[DBUS_SESSION_BUS_PID])
self.client = dLeynaClient(str(env[DBUS_SESSION_BUS_ADDRESS]))
except Exception as e:
logger.error('Error starting %s: %s', Extension.dist_name, e)
# TODO: clean way to bail out late?
raise exceptions.ExtensionError('Error starting dLeyna client')
self.library = dLeynaLibraryProvider(self, config)
self.playback = dLeynaPlaybackProvider(audio, self)
def on_stop(self):
if self.__dbus_pid is not None:
self.__stop_dbus(self.__dbus_pid)
def __start_dbus(self):
logger.info('Starting %s D-Bus daemon', Extension.dist_name)
out = subprocess.check_output(['dbus-launch'], universal_newlines=True)
logger.debug('%s D-Bus environment:\n%s', Extension.dist_name, out)
return dict(line.split('=', 1) for line in out.splitlines())
def __stop_dbus(self, pid):
logger.info('Stopping %s D-Bus daemon', Extension.dist_name)
try:
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno != errno.ESRCH:
raise
logger.debug('Stopped %s D-Bus daemon', Extension.dist_name)
|
#from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
import env
class Command(BaseCommand):
help = 'Delete environment in %s' % env.path
def handle(self, **options):
env.delete()
|
import ev3dev.ev3 as ev3
import ev3dev.core as core
import time as time
def main():
LEFT =ev3.Leds.LEFT
RIGHT =ev3.Leds.RIGHT
GREEN =ev3.Leds.GREEN
RED =ev3.Leds.RED
buttons =ev3.Button
ev3.Leds.all_off()
while(True):
'''core.Screen.clear()
core.Screen.draw.text([5,20,200,100],String(ev3.Button.LEFT))
core.Screen.draw.text([5,50,200,100],String(ev3.Button.RIGHT))
core.Screen.update()'''
time.sleep(1)
left_flag=bool(buttons.left.value())
right_flag=bool(buttons.right.value())
print(str(left_flag)+" "+str(right_flag))
if(left_flag==True):
ev3.Leds.set_color(LEFT, GREEN)
else:
ev3.Leds.set_color(LEFT, RED)
if(right_flag==True):
ev3.Leds.set_color(RIGHT, GREEN)
else:
ev3.Leds.set_color(RIGHT, RED)
if __name__ == '__main__':
main()
|
#!/usr/bin/python
# vim: set fileencoding=UTF-8
a = int(input("? "))
b = int(input("? "))
c = int(input("? "))
if a < 1 or b < 1 or c < 1 :
print("As dimensões dos lados do triângulo devem ser todas positivas")
else:
if a + b <= c or a + c <= b or c + b <= a :
print("Não é triângulo")
else:
if a == b and b == c :
print("O triângulo é equilátero")
else:
if a == b or b == c or c == a :
print("O triângulo é isósceles")
else:
print("O triângulo é escaleno")
|
from django.urls import path
from django.contrib import admin
from . import views
urlpatterns = [
path('', views.HomePage),
path('subject/', views.SubjectPage),
path('results/', views.result1),
]
|
from setuptools import setup, find_packages
setup(
name='ships',
version='1.0',
author='akerlay',
packages=find_packages(),
python_requires='>=3.7',
classifiers=[
'Environment :: Console',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python :: 3.7',
],
include_package_data=True,
entry_points={
'console_scripts': [
'app = app.app:main',
]
},
)
|
# # # # Processing Pipeline for the Full Domain (Artic-wide)
import subprocess, os, warnings
import xarray as xr
os.chdir('/workspace/UA/malindgren/repos/seaice_noaa_indicators/pipeline')
base_path = '/workspace/Shared/Tech_Projects/SeaIce_NOAA_Indicators/project_data/nsidc_0051'
ncpus = str(64)
# interpolate and smooth daily timeseries
print('interp/smooth')
# _ = subprocess.call(['ipython','make_daily_timeseries_interp_smooth.py','--','-b', base_path, '-n', ncpus])
begin = '1979'
for end in ['2007','2013','2017']:
# make clim from smoothed timeseries
print('clim')
begin_full, end_full = '1978', '2017'
fn = os.path.join(base_path,'smoothed','NetCDF','nsidc_0051_sic_nasateam_{}-{}_north_smoothed.nc'.format(begin_full, end_full))
clim_fn = os.path.join(base_path,'smoothed','NetCDF','nsidc_0051_sic_nasateam_{}-{}_north_smoothed_climatology.nc'.format(begin, end))
# _ = subprocess.call(['ipython','make_daily_timeseries_climatology.py','--','-f', fn, '-o', clim_fn, '-b', begin, '-e', end])
# calc FUBU
print('fubu')
outpath = os.path.join(base_path, 'outputs','NetCDF')
if not os.path.exists(outpath):
_ = os.path.makedirs(outpath)
# _ = subprocess.call(['ipython','compute_fubu_dates.py','--','-b', base_path, '-f', fn, '-begin', begin, '-end', end])
# # calc FUBU clim
print('fubu clim')
fubu_fn = os.path.join( base_path,'outputs','NetCDF','nsidc_0051_sic_nasateam_{}-{}_north_smoothed_fubu_dates.nc'.format(begin, end))
fubu_clim_fn = fubu_fn.replace('.nc', '_climatology.nc')
# with xr.open_dataset(fubu_fn) as ds:
# ds_clim = ds.sel(year=slice(1979,2007)).mean('year').round(0)
# ds_clim.to_netcdf(fubu_clim_fn)
# plots mimicking the paper figs
points_fn = '/workspace/Shared/Tech_Projects/SeaIce_NOAA_Indicators/project_data/nsidc_0051/selection_points/chuckchi-beaufort_points.shp'
# fig3
out_fn = os.path.join(base_path,'outputs/png/chuckchi-beaufort_avg_fig3.png')
_ = subprocess.call(['ipython','make_figure_3_paper.py','--','-n',fn,'-c',clim_fn,'-p',points_fn,'-o',out_fn])
# fig4
out_fn = os.path.join(base_path,'outputs','png','barrow_avg_fig4.png')
_ = subprocess.call(['ipython','make_figure_4_paper.py','--','-n',fn,'-c',clim_fn,'-f',fubu_fn,'-fc',fubu_clim_fn,'-p',points_fn,'-o',out_fn])
# fig5/6
_ = subprocess.call(['ipython','make_figure_5-6_paper.py','--','-b',base_path])
_ = subprocess.call(['ipython','make_figure_7_paper.py','--','-b',base_path])
|
import multiprocessing
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
from sklearn.linear_model import SGDClassifier
lr = SGDClassifier(loss='log', penalty = 'l1')
# X_data = []
# Y_data = []
#
# for line in open(r"C:\Users\10651\Desktop\评论数据\200000条总数据\好样本\a.txt","r",encoding='UTF-8'): #设置文件对象并读取每一行文件
# X_data.append(line) #将每一行文件加入到list中
# Y_data.append(1)
# for line in open(r"C:\Users\10651\Desktop\评论数据\200000条总数据\坏样本\b.txt","r",encoding='UTF-8'): #设置文件对象并读取每一行文件
# X_data.append(line) #将每一行文件加入到list中
# Y_data.append(0)
corpus = r'C:\Users\10651\Desktop\评论数据\200000条分词数据\坏样本\b.txt'
sentences = LineSentence(corpus) # 加载语料,LineSentence用于处理分行分词语料
#sentences1 = word2vec.Text8Corpus(corpus) #用来处理按文本分词语料
#print('=--=-=-=-=-=',sentences)
model = Word2Vec(sentences, size=12,window=25,min_count=2,workers=5,sg=1,hs=1) #训练模型就这一句话 去掉出现频率小于2的词
model.save()
# model = Word2Vec(LineSentence(X_data), size=100, window=10, min_count=3,
# workers=multiprocessing.cpu_count(), sg=1, iter=10, negative=20)
from keras.optimizers import SGD,Adam
from keras.models import Sequential
from keras.layers import Dense,Activation,Dropout
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
from scipy.stats import ks_2samp
import statsmodels.api as sm
model_nn = Sequential()
model_nn.add(Dense(2000, input_dim=1000, init = 'uniform'))
model_nn.add(Dropout(0.15)) # 使用Dropout防止过拟合
model_nn.add(Dense(1550, activation='tanh', init = 'uniform'))
model_nn.add(Dropout(0.15)) # 使用Dropout防止过拟合
model_nn.add(Dense(550, activation='tanh', init = 'uniform'))
model_nn.add(Dropout(0.15)) # 使用Dropout防止过拟合
model_nn.add(Dense(1, activation='sigmoid'))
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 22 11:50:29 2019
@author: flau
"""
import numpy as np
from scipy.io import wavfile
from scipy.signal import deconvolve
import matplotlib.pyplot as plt
fs, measure = wavfile.read('Messung.wav')
fs, test = wavfile.read('Testsignal.wav')
difference=len(measure)-len(test)
correlation=np.correlate(test,measure,"full")
maxsample=len(measure)-(np.ceil(np.argmax(correlation)))
zeros=np.zeros(int(maxsample))
measure=np.concatenate([zeros,measure])
test=test[:144000]
measure=measure[:144000]
spectrum_m = np.fft.fft(measure)
magnitude_m = np.abs(spectrum_m)
phase_m = np.angle(spectrum_m,deg=True)
spectrum_t = np.fft.fft(test)
magnitude_t = np.abs(spectrum_t)
phase_t = np.angle(spectrum_t,deg=True)
spectrum_f=spectrum_m/spectrum_t
len2=np.ceil(len(spectrum_f)/2)
spectrum_f=spectrum_f[:int(len2)]
magnitude_f = np.abs(spectrum_f)
phase_f = np.angle(spectrum_f,deg=True)
impulsantwort=np.fft.ifft(spectrum_f)
plt.plot(phase_f)
plt.xscale('log')
#plt.show
|
class Dog:
count = []
def __init__(self, name, type):
self.name = name
self.type = type
self.nomer = len(self.count)
self.count.append(1)
# def add_trick(self, trick):
# self.tricks.append(trick)
dog1 = Dog("Sharik", "Alabai")
dog1.temperature = 10
# print(dog1.temperature)
dog2 = Dog("Simba", "Alabai")
dog3 = Dog("Laika", "Alabai")
print(dog1.nomer)
print(dog2.nomer)
print(dog3.nomer)
# dog1.add_trick("roll over")
# dog2.add_trick("play dead")
#
# print(dog2.tricks)
# print(dog1.tricks)
|
import scipy.io as sio
import h5py
def load_deep_features(data_name):
import numpy as np
valid_data, req_rec, b_wv_matrix = True, True, True
unlabels, zero_shot, doc2vec, split = False, False, False, False
if data_name.find('_doc2vec') > -1:
doc2vec = True
req_rec, b_wv_matrix = False, False
if data_name == 'wiki_doc2vec':
path = './datasets/wiki_data/wiki_deep_doc2vec_data_corr_ae.h5py' # wiki_deep_doc2vec_data
valid_len = 231
MAP = -1
elif data_name == 'nus_wide_doc2vec':
path = './datasets/NUS-WIDE/nus_wide_deep_doc2vec_data_42941.h5py' # pascal_sentence_deep_doc2vec_data
valid_len = 5000
MAP = -1
elif data_name == 'MSCOCO_doc2vec':
path = './datasets/MSCOCO/MSCOCO_deep_doc2vec_data.h5py' #
valid_len = 10000
MAP = -1
elif data_name == 'xmedia':
path = './datasets/XMedia&Code/XMediaFeatures.mat'
MAP = -1
req_rec, b_wv_matrix = False, False
all_data = sio.loadmat(path)
A_te = all_data['A_te'].astype('float32') # Features of test set for audio data, MFCC feature
A_tr = all_data['A_tr'].astype('float32') # Features of training set for audio data, MFCC feature
d3_te = all_data['d3_te'].astype('float32') # Features of test set for 3D data, LightField feature
d3_tr = all_data['d3_tr'].astype('float32') # Features of training set for 3D data, LightField feature
I_te_CNN = all_data['I_te_CNN'].astype('float32') # Features of test set for image data, CNN feature
I_tr_CNN = all_data['I_tr_CNN'].astype('float32') # Features of training set for image data, CNN feature
T_te_BOW = all_data['T_te_BOW'].astype('float32') # Features of test set for text data, BOW feature
T_tr_BOW = all_data['T_tr_BOW'].astype('float32') # Features of training set for text data, BOW feature
V_te_CNN = all_data['V_te_CNN'].astype('float32') # Features of test set for video(frame) data, CNN feature
V_tr_CNN = all_data['V_tr_CNN'].astype('float32') # Features of training set for video(frame) data, CNN feature
te3dCat = all_data['te3dCat'].reshape([-1]).astype('int64') # category label of test set for 3D data
tr3dCat = all_data['tr3dCat'].reshape([-1]).astype('int64') # category label of training set for 3D data
teAudCat = all_data['teAudCat'].reshape([-1]).astype('int64') # category label of test set for audio data
trAudCat = all_data['trAudCat'].reshape([-1]).astype('int64') # category label of training set for audio data
teImgCat = all_data['teImgCat'].reshape([-1]).astype('int64') # category label of test set for image data
trImgCat = all_data['trImgCat'].reshape([-1]).astype('int64') # category label of training set for image data
teVidCat = all_data['teVidCat'].reshape([-1]).astype('int64') # category label of test set for video(frame) data
trVidCat = all_data['trVidCat'].reshape([-1]).astype('int64') # category label of training set for video(frame) data
teTxtCat = all_data['teTxtCat'].reshape([-1]).astype('int64') # category label of test set for text data
trTxtCat = all_data['trTxtCat'].reshape([-1]).astype('int64') # category label of training set for text data
train_data = [I_tr_CNN, T_tr_BOW, A_tr, d3_tr, V_tr_CNN]
test_data = [I_te_CNN[0: 500], T_te_BOW[0: 500], A_te[0: 100], d3_te[0: 50], V_te_CNN[0: 87]]
valid_data = [I_te_CNN[500::], T_te_BOW[500::], A_te[100::], d3_te[50::], V_te_CNN[87::]]
train_labels = [trImgCat, trTxtCat, trAudCat, tr3dCat, trVidCat]
test_labels = [teImgCat[0: 500], teTxtCat[0: 500], teAudCat[0: 100], te3dCat[0: 50], teVidCat[0: 87]]
valid_labels = [teImgCat[500::], teTxtCat[500::], teAudCat[100::], te3dCat[50::], teVidCat[87::]]
if doc2vec:
h = h5py.File(path)
train_imgs_deep = h['train_imgs_deep'][()].astype('float32')
train_imgs_labels = h['train_imgs_labels'][()]
train_imgs_labels -= np.min(train_imgs_labels)
train_texts_idx = h['train_text'][()].astype('float32')
train_texts_labels = h['train_texts_labels'][()]
train_texts_labels -= np.min(train_texts_labels)
train_data = [train_imgs_deep, train_texts_idx]
train_labels = [train_imgs_labels, train_texts_labels]
# valid_data = False
test_imgs_deep = h['test_imgs_deep'][()].astype('float32')
test_imgs_labels = h['test_imgs_labels'][()]
test_imgs_labels -= np.min(test_imgs_labels)
test_texts_idx = h['test_text'][()].astype('float32')
test_texts_labels = h['test_texts_labels'][()]
test_texts_labels -= np.min(test_texts_labels)
test_data = [test_imgs_deep, test_texts_idx]
test_labels = [test_imgs_labels, test_texts_labels]
valid_data = [test_data[0][0: valid_len], test_data[1][0: valid_len]]
valid_labels = [test_labels[0][0: valid_len], test_labels[1][0: valid_len]]
test_data = [test_data[0][valid_len::], test_data[1][valid_len::]]
test_labels = [test_labels[0][valid_len::], test_labels[1][valid_len::]]
if valid_data:
if b_wv_matrix:
return train_data, train_labels, valid_data, valid_labels, test_data, test_labels, wv_matrix, MAP
else:
return train_data, train_labels, valid_data, valid_labels, test_data, test_labels, MAP
else:
if b_wv_matrix:
return train_data, train_labels, test_data, test_labels, wv_matrix, MAP
else:
return train_data, train_labels, test_data, test_labels, MAP
|
import pathlib
import numpy as np
class ConstantRegressor:
def fit(self, X, y, eval_data=None, mlflow_log=True):
self.mean = y.mean()
def predict(self, X):
return np.ones(X.shape[0]) * self.mean
def save(self, path: pathlib.Path):
pass
@staticmethod
def load(path: pathlib.Path):
pass
|
from skimage import data
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from PIL import Image
image = mpimg.imread("1.tif")
plt.imshow(image)
plt.show()
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
def easy(url_str):
import re
pattern = re.compile(r':\w+/?')
matches = pattern.findall(url_str)
for match in matches:
var = match[1:-1]
var_re = r'(?P<%s>.*)/'%var
url_str = url_str.replace(match, var_re)
url_str += '$'
return url_str
urlpatterns = patterns('',
url(r'^$', 'config.views.index', name='index'),
# url(easy('^project/:id/'), 'project.foo.view_name'),
url(r'^admin/', include(admin.site.urls)),
)
|
#!/usr/bin/env python3
import socket
import sys
HOST = sys.argv[1]
PORT = int(sys.argv[2])
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
while True:
txt=input()
s.sendall(txt.encode())
data = s.recv(1024)
#print("Received:", repr(data), "\n")
print(data.decode())
|
# compatibility with python 2/3
try:
basestring
except NameError:
basestring = str
class ExpressionNotFoundError(Exception):
"""Expression not found error."""
class ExpressionEvaluator(object):
"""
Runs exressions used by templates.
"""
__registered = {}
@staticmethod
def register(name, expressionCallable):
"""
Register a expressionCallable as expression.
"""
assert hasattr(expressionCallable, '__call__'), \
"Invalid callable!"
ExpressionEvaluator.__registered[name] = expressionCallable
@staticmethod
def registeredNames():
"""
Return a list of registered expressions.
"""
return ExpressionEvaluator.__registered.keys()
@staticmethod
def run(expressionName, *args):
"""
Run the expression and return a value base on the args.
"""
if expressionName not in ExpressionEvaluator.__registered:
raise ExpressionNotFoundError(
'Could not find expression name: "{0}"'.format(
expressionName
)
)
# executing expression
return str(ExpressionEvaluator.__registered[expressionName](*args))
@staticmethod
def parseRun(expression):
"""
Parse and run an expression.
An expression must be a string describbing the expression name
as first token and the arguments that should be passed to it
separated by space (aka bash), for instance:
"myexpression arg1 arg2"
"sum 1 2"
The arguments are always parsed as string, and they should be
handled per expression callable bases.
"""
assert isinstance(expression, basestring), \
"Invalid expression type!"
cleanedExpressionEvaluator = list(filter(
lambda x: x != '', expression.strip(" ").split(" ")
))
expressionName = cleanedExpressionEvaluator[0]
expressionArgs = cleanedExpressionEvaluator[1:]
return ExpressionEvaluator.run(expressionName, *expressionArgs)
|
import csv
import tweepy
from tweepy import OAuthHandler
consumer_key = ' '
consumer_secret = ' '
access_token = ' '
access_secret = ' '
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth,wait_on_rate_limit=True,wait_on_rate_limit_notify=True)
|
from topology import *
from util import *
def make_ini(T, network_name, sim_time, out_f):
with open(out_f, 'w') as of:
print("[General]", file=of)
print("network = {}".format(network_name), file=of)
print("record-eventlog = false", file=of)
print("result-dir = results_strict_priority", file=of)
print("sim-time-limit = {}s".format(sim_time), file=of)
print('', file=of)
print("**.displayAddresses = true", file=of)
print("**.verbose = true", file=of)
print('', file=of)
# mac address
for i in range(T.node_n):
print('**.host{}.eth.address = \"{}\"'.format(i, T.host_list[i].rout_addr), file=of)
print('', file=of)
# parameter of switch
print("**.switch*.processingDelay.delay = 1us", file=of)
print("**.filteringDatabase.database = xmldoc(\"XML/demo_rout.xml\", \"/filteringDatabases/\")", file=of)
print('', file=of)
for i in range(8):
print("**.switch*.eth[*].queue.tsAlgorithms[{}].typename = \"StrictPriority\"".format(i), file=of)
print("**.queues[*].bufferCapacity = 363360b", file=of)
print('', file=of)
# specify flow creation file
for i in range(T.node_n):
print("**.host{}.trafGenSchedApp.initialSchedule = \
xmldoc(\"XML/demo_sched.xml\")".format(i), file=of)
if __name__ == "__main__":
T = parse_topology_file("./5.in")
make_ini(T, "./test_ini")
|
import os
import pymongo
import logging
from flask import Flask
from flask_cors import CORS
from .jinjia_filters import JJFilters
from flask_login import LoginManager
from flask_wtf.csrf import CSRFProtect
from logging.handlers import WatchedFileHandler
app = Flask(__name__)
CORS(app)
csrf = CSRFProtect(app)
app.config['SECRET_KEY'] = os.urandom(24)
app.jinja_env.filters['is_odd'] = JJFilters.is_odd
app.jinja_env.filters['unix_2_time'] = JJFilters.unix_2_time
env = os.environ.get('ENV', 'dev')
if env == 'dev':
app.config.from_object('config.DevConfig')
else:
app.config.from_object('config.ProductConfig')
db = pymongo.MongoClient(app.config.get("MONGO_URI"), app.config.get("MONGO_PORT"))[app.config.get("MONGO_DB")]
if not os.path.exists(app.config['LOG_ROOT_DIR']):
os.makedirs(app.config['LOG_ROOT_DIR'])
log_path = app.config['LOG_ROOT_DIR'] + '/wz.log'
handler = WatchedFileHandler(log_path)
handler.setFormatter(logging.Formatter('%(time)s|%(level)s|%(process)d|%(filename)s|%(lineno)s|%(message)s', datefmt='%d/%b/%Y:%H:%M:%S'))
app.logger.setLevel(logging.DEBUG)
app.logger.addHandler(handler)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = "/user/login"
from app import blueprints
from app.Models import UserModel
|
from bs4 import BeautifulSoup
import requests
response = requests.get("https://www.empireonline.com/movies/features/best-movies-2/")
movie_webpage = (response.text)
soup = BeautifulSoup(movie_webpage, "html.parser")
# print(soup)
title_tags = soup.find_all(name="h3", class_="title")
all_titles = [title.text for title in title_tags]
print(all_titles)
with open("movies.txt", 'a', encoding="utf8") as file:
for movie in all_titles[::-1]:
file.write(movie+"\n")
|
# coding=utf-8
import requests;
from login.Login import *;
if __name__ == "__main__":
login = Login()
login.login(email='', password='')
|
#This will take the training data in libsvm format and predict ham or spam on the basis of symbols
#using optimized Adaboost Classifier.
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from collections import Counter
X_train, y_train = load_svmlight_file('Trainlabels.txt')
X_test, y_test = load_svmlight_file('Testlabels.txt',n_features=X_train.shape[1])
bdt = AdaBoostClassifier(DecisionTreeClassifier(),
n_estimators=200,
algorithm="SAMME")
bdt.fit(X_train, y_train)
y_ada = bdt.predict(X_test)
ada_results_compare = Counter(zip(y_test,y_ada))
print ada_results_compare
|
import main_module
main_module.main()
print("Second modules name: {}".format(__name__))
|
#makes input cfi files for all beamspot scenarios!
import os, sys
eospath = "/eos/uscms/store/user/skaplan/noreplica/"
shortpath = "/store/user/skaplan/noreplica/"
for phi in (0,225):
for r in range(11):
if (phi == 225 and r == 0):
continue
folder="MinBiasBeamSpotPhi%iR%i_HISTATS/"%(phi,r)
fullpath = eospath+folder
outfiles = os.listdir(fullpath)
outfilesnew=[]
for f in outfiles:
outfilesnew.append(" '"+shortpath + folder + f + "',")
out = open("MinBiasBeamSpotPhi%iR%i"%(phi,r)+"_cfi.py",'w')
out.write("import FWCore.ParameterSet.Config as cms\n")
out.write("\n")
out.write('source = cms.Source("PoolSource",\n')
out.write(" fileNames = cms.untracked.vstring(\n")
for f in outfilesnew:
out.write(f+"\n")
out.write(" )\n")
out.write("\n")
out.write(")\n")
out.close()
|
from bs4.dammit import EntitySubstitution
esub = EntitySubstitution()
def sanitize_html(title):
return esub.substitute_html(title)
def sanitize_irc(title):
badchars = "\r\n\x01"
return "".join(c for c in title if c not in badchars)
escapers = {
"html": sanitize_html,
"irc": sanitize_irc
}
def escape(title, mode):
if not mode:
mode = "irc"
if mode == "all":
for func in list(escapers.values()):
title = func(title)
return title
return escapers.get(mode, lambda title: title)(title)
|
'''
This code is intended to serve as a basic example for a pendulum disturbed by a trolley
'''
import warnings
warnings.simplefilter("ignore", UserWarning)
# import all appropriate modules
import numpy as np
from scipy.integrate import odeint
import Generate_Plots as genplt
import InputShaping as shaping
import pdb
# Define Constants
G = 9.81
DEG_TO_RAD = np.pi / 180
# ODE Solver characteristics
abserr = 1.0e-9
relerr = 1.0e-9
max_step = 0.01
def response(p, Shaper):
'''
Generate the response of a pendulum disturbed by a trolley
'''
# Unpack the constraints and initial conditions
[Amax,Vmax], m, c, k, StartTime, t_step, t, X0, Distance = p
omega = np.sqrt(k/m)
omega_hz = omega / (2 * np.pi)
zeta = c / (2 * m * omega)
#print(roots)
m_1 *= m1_error
m_2 *= m2_error
#Determine the step at which the command is initiated
Start_step = np.round((StartTime / t_step)).astype(int)
response = np.zeros([len(t),len(X0)])
# Create an unshaped input
if isinstance(Shaper,np.ndarray):
#pdb.set_trace()
in_shape = Shaper
elif Shaper == 'ZV':
in_shape = shaping.ZV(omega_hz, zeta).shaper
elif Shaper == 'ZVD':
in_shape = shaping.ZVD(omega_hz,zeta).shaper
elif Shaper == 'ZVDD':
in_shape = shaping.ZVDD(omega_hz,zeta).shaper
elif Shaper == 'UMZV':
in_shape = shaping.UMZV(omega_hz,zeta).shaper
else:
in_shape = np.array([[0.,1]])
# Generate the response
response = odeint(
eq_motion, X0, t, args=(in_shape,t,k,c,Amax,Vmax,m,Distance),
atol=abserr, rtol=relerr, hmax=max_step
)
return response,in_shape
def eq_motion(X,t,in_shape,t_sys,k,c,Amax,Vmax,m,Distance):
'''
Returns the state-space equations of motion for the system.
Inputs:
X - State variables
t - Current system time
sys_in - Acceleration input commanded to the system
t_sys - total time array for the system. Used to interpolate the current time
length - length of the pendulum
'''
# Grab the state Variables
x,x_dot = X
xd_dot = 0.
xd = shaping.shaped_input(shaping.step_input,t,in_shape,Distance)
# Evaluate the differential equations of motion
ODE = [ x_dot,
k / m * (x_d - x) + c / m * (xd_dot - x_dot) - G]
return ODE
|
#import sys
#input = sys.stdin.readline
def solve():
L, R = map(int,input().split())
if L == 0:
return (R+1)*(R+2)//2
elif R < L*2:
return 0
else:
return (R-L*2+1)*(R-L*2+2)//2
def main():
T = int( input())
ANS = [ solve() for _ in range(T)]
print("\n".join(map(str, ANS)))
if __name__ == '__main__':
main()
|
# Write a python script that will do the following. Rename all files in this folder to abide by a naming convention of data_
## where ## is an arbitrary number used to define orderering.
#You can use the following methods from the os module.
# `os.getcwd()`
# this will return the path to the current directory python is being executed in
# `os.listdir()`
# returns a list of files in the current directory
# `os.rename(src,dst)`
# where src is the source path and dst path of the src and paths are inclusive of filenames
# Python code to rename multiple files in a directory or folder
# import os
import os
i = 0
#Function to reanme multiple files
def main():
for filename in os.listdir("file"):
dst = "g" + str(i) + ".jpg"
src = 'file'+ filename
dst = 'file'+ dst
list = os.listdir('src')
# rename() function will rename all the files
os.rename(src, dst)
i += 1
#calls main for the file
if __name__ == 'main':
main()
|
"""
===----Config------------------------------------------------------------------------===
Airbnb Clone Project, config file.
Isolating environments in Python: Development/Production/Test.
===----------------------------------------------------------------------------------===
"""
from os import environ
ENV = environ
if 'AIRBNB_ENV' not in ENV:
raise Exception("Environment variable AIRBNB_ENV not set\n"
"Expected to be 'development', 'production', or 'test'")
def setenv(env):
switch = {
'production' : {
'debug' : False,
'host' : '0.0.0.0',
'port' : 3000,
'user' : 'airbnb_user_prod',
'db' : 'airbnb_prod',
'password' : ENV.get('AIRBNB_DATABASE_PWD_PROD')
},
'development' : {
'debug' : True,
'host' : 'localhost',
'port' : 3333,
'user' : 'airbnb_user_dev',
'db' : 'airbnb_dev',
'password' : ENV.get('AIRBNB_DATABASE_PWD_DEV')
},
'test' : {
'debug' : False,
'host' : 'localhost',
'port' : 5555,
'user' : 'airbnb_user_test',
'db' : 'airbnb_test',
'password' : ENV.get('AIRBNB_DATABASE_PWD_TEST')
}
}
return switch.get(env)
options = setenv(ENV['AIRBNB_ENV'])
DEBUG = options['debug']
HOST = options['host']
PORT = options['port']
DATABASE = {
'host': '158.69.78.253',
'user': options['user'],
'database': options['db'],
'port': 3306,
'charset': 'utf8',
'password': options['password']
}
# Testing the environment:
# print(DEBUG, HOST, PORT)
# print(DATABASE)
|
# coding: utf-8
from django.contrib import admin
from models import ApiToken, SpotifyUser
class ApiTokenAdmin(admin.ModelAdmin):
list_display = ("token", "date_added", "is_active")
search_fields = ["token", ]
list_filter = ("is_active", )
class SpotifyUserAdmin(admin.ModelAdmin):
list_display = ("user_id", "name", "followers")
search_fields = ["name", "user_id", ]
admin.site.register(ApiToken, ApiTokenAdmin)
admin.site.register(SpotifyUser, SpotifyUserAdmin)
|
from pipeline import Pipeline
import argparse
import os
if __name__== "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-s","--sourcePath", help = "Destination for the input folder")
parser.add_argument("-d","--destinationPath", help = "Destination for the output folder")
#We can add any more arguments as per our need including the sampling rate, hop length and eventually we can create a customizable menu
args = parser.parse_args()
## when the user fails to give the destination folder
if(args.destinationPath == None):
defaultPath = os.path.join(os.path.abspath(os.getcwd()),"extractedFeatures/")
if (not(os.path.exists(defaultPath))):
os.makedirs(defaultPath)
args.destinationPath = defaultPath
## when the destination folder does not exist
if(not(os.path.exists(args.destinationPath))):
os.makedirs(args.destinationPath)
Pipeline = Pipeline(args.sourcePath,args.destinationPath) #we can pass the other arguments here. For now it assuming the default values
Pipeline.extractMFCCFeatures()
Pipeline.extractMELFeatures()
|
# coding: utf-8
"""Module that contains all user repository Data Scraping logic."""
import requests
from .utils import BaseRequest
from .exceptions import (InvalidTokenError, RepositoryNameNotFoundError,
ApiError, RepositoryIdNotFoundError, ApiRateLimitError)
class RepositoryApi(BaseRequest):
"""Class that has Repository Data Scraping actions."""
def __init__(self, default_access_token=None):
"""Constructor.
Args:
default_access_token: The default GitHub access_token
If you don't provide an access_token, your number of requests will be limited to 60 requests per hour, acording with GitHub REST API v3.
"""
self.default_access_token = default_access_token
def repository_by_id(self, repository_id, access_token=None):
"""Return a repository with given repository ID."""
url = "{0}/repositories/{1}{2}"
access_token = self.get_token(access_token)
token_arg = ''
if access_token != '':
token_arg = "?access_token={}".format(access_token)
response = requests.get(
url.format(self.ROOT_API_URL, repository_id, token_arg))
self._check_common_status_code(response, access_token)
if response.status_code == requests.codes.not_found:
raise RepositoryIdNotFoundError({'repository_id': repository_id})
return response.json()
def repository_by_name(self, username, repository_name, access_token=None):
"""Return a repository with given repository_name and username."""
url = "{0}/repos/{1}/{2}{3}"
access_token = self.get_token(access_token)
token_arg = ''
if access_token != '':
token_arg = "?access_token={}".format(access_token)
response = requests.get(
url.format(
self.ROOT_API_URL, username, repository_name, token_arg)
)
self._check_common_status_code(response, access_token)
if response.status_code == requests.codes.not_found:
raise RepositoryNameNotFoundError(
{'repository_name': repository_name, 'username': username})
return response.json()
def get_all_data(self, repository_id=None, repository_name=None, access_token=None):
"""Request all repository data from a given repository ID or name."""
data = {}
if repository_id:
root_data = self.repository_by_id(repository_id, access_token)
data['id'] = root_data['id']
data['name'] = root_data['name']
data['private'] = root_data['private']
data['description'] = root_data['description']
data['fork'] = root_data['fork']
data['id'] = root_data['id']
data['created_at'] = root_data['created_at']
data['updated_at'] = root_data['updated_at']
data['pushed_at'] = root_data['pushed_at']
data['homepage'] = root_data['homepage']
data['size'] = root_data['size']
data['stargazers_count'] = root_data['stargazers_count']
data['watchers_count'] = root_data['watchers_count']
data['language'] = root_data['language']
data['has_issues'] = root_data['has_issues']
data['has_projects'] = root_data['has_projects']
data['has_downloads'] = root_data['has_downloads']
data['has_wiki'] = root_data['has_wiki']
data['has_pages'] = root_data['has_pages']
data['forks_count'] = root_data['forks_count']
data['mirror_url'] = root_data['mirror_url']
data['archived'] = root_data['archived']
data['open_issues_count'] = root_data['open_issues_count']
data['forks'] = root_data['forks']
data['open_issues'] = root_data['open_issues']
data['watchers'] = root_data['watchers']
data['default_branch'] = root_data['default_branch']
data['network_count'] = root_data['network_count']
data['subscribers_count'] = root_data['subscribers_count']
data['branches'] = self.get_branches_by_id(
repository_id, access_token)
data['comments'] = self.get_comments_by_id(
repository_id, access_token)
data['commits'] = self.get_commits_by_id(
repository_id, access_token)
data['contents'] = self.get_contents_by_id(
repository_id, access_token)
data['contributors'] = self.get_contributors_by_id(
repository_id, access_token)
data['events'] = self.get_events_by_id(
repository_id, access_token)
data['issues'] = self.get_issues_by_id(
repository_id, access_token)
data['labels'] = self.get_labels_by_id(
repository_id, access_token)
data['languages'] = self.get_languages_by_id(
repository_id, access_token)
data['pulls'] = self.get_pulls_by_id(
repository_id, access_token)
data['subscribers'] = self.get_subscribers_by_id(
repository_id, access_token)
data['tags'] = self.get_tags_by_id(
repository_id, access_token)
return data
def commits_by_name(self, username, repository_name, access_token=None):
"""Return repository commits from a given username.
Arguments:
username: Github username.
repository_name: An existent user's repository name.
access_token: GitHub OAuth2 access token.
"""
return self._complete_request_by_name(
username, repository_name, "commits", access_token)
def contributors_by_name(self, username, repository_name, access_token=None):
"""Return repository contributors from a given username.
Arguments:
username: Github username.
repository_name: An existent user's repository name.
access_token: GitHub OAuth2 access token.
"""
return self._complete_request_by_name(
username, repository_name, "contributors", access_token)
def issues_by_name(self, username, repository_name, access_token=None):
"""Return repository issues from a given username.
Args:
username: Github username.
repository_name: An existent user's repository name
access_token: GitHub OAuth2 access token.
"""
return self._complete_request_by_name(
username, repository_name, "issues", access_token)
def events_by_name(self, username, repository_name, access_token=None):
"""Return repository events from a given username.
Args:
username: Github username.
repository_name: An existent user's repository name.
access_token: GitHub OAuth2 access token.
"""
return self._complete_request_by_name(
username, repository_name, "events", access_token)
def branches_by_name(self, username, repository_name, access_token=None):
"""Return repository branches from a given username.
Args:
username: Github username.
repository_name: An existent user's repository name.
access_token: GitHub OAuth2 access token.
"""
return self._complete_request_by_name(
username, repository_name, "branches", access_token)
def tags_by_name(self, username, repository_name, access_token=None):
"""Return repository tags from a given username.
Args:
username: Github username.
repository_name: An existent user's repository name.
access_token: GitHub OAuth2 access token.
"""
return self._complete_request_by_name(
username, repository_name, "tags", access_token)
def languages_by_name(self, username, repository_name, access_token=None):
"""Return repository languages from a given username.
Args:
username: Github username.
repository_name: An existent user's repository name.
access_token: GitHub OAuth2 access token.
"""
return self._complete_request_by_name(
username, repository_name, "languages", access_token)
def subscribers_by_name(self, username, repository_name, access_token=None):
"""Return repository subscribers from a given username.
Args:
username: Github username.
repository_name: An existent user's repository name.
access_token: GitHub OAuth2 access token.
"""
return self._complete_request_by_name(
username, repository_name, "subscribers", access_token)
def comments_by_name(self, username, repository_name, access_token=None):
"""Return repository comments from a given username.
Args:
username: Github username.
repository_name: An existent user's repository name.
access_token: GitHub OAuth2 access token.
"""
return self._complete_request_by_name(
username, repository_name, "comments", access_token)
def contents_by_name(self, username, repository_name, access_token=None):
"""Return repository contents from a given username.
Args:
username: Github username.
repository_name: An existent user's repository name.
access_token: GitHub OAuth2 access token.
"""
return self._complete_request_by_name(
username, repository_name, "contents", access_token)
def pulls_by_name(self, username, repository_name, access_token=None):
"""Return repository pulls from a given username.
Args:
username: Github username.
repository_name: An existent user's repository name.
access_token: GitHub OAuth2 access token.
"""
return self._complete_request_by_name(
username, repository_name, "pulls", access_token)
def labels_by_name(self, username, repository_name, access_token=None):
"""Return repository labels from a given username.
Args:
username: Github username.
repository_name: An existent user's repository name.
access_token: GitHub OAuth2 access token.
"""
return self._complete_request_by_name(
username, repository_name, "labels", access_token)
def commits_by_id(self, repository_id, access_token=None):
"""Return repository commits from a given username and repository ID.
Args:
repository_id: An existent user's repository ID.
access_token: GitHub OAuth2 access token.
"""
return self._complete_request_by_id(
repository_id, "commits", access_token)
def contributors_by_id(self, repository_id, access_token=None):
"""Return repository contributors from a given username and repository ID.
Args:
repository_id: An existent user's repository ID.
access_token: GitHub OAuth2 access token.
"""
return self._complete_request_by_id(
repository_id, "contributors", access_token)
def issues_by_id(self, repository_id, access_token=None):
"""Return repository issues from a given username and repository ID.
Args:
repository_id: An existent user's repository ID.
access_token: GitHub OAuth2 access token.
"""
return self._complete_request_by_id(
repository_id, "issues", access_token)
def events_by_id(self, repository_id, access_token=None):
"""Return repository events from a given username and repository ID.
Args:
repository_id: An existent user's repository ID.
access_token: GitHub OAuth2 access token.
"""
return self._complete_request_by_id(
repository_id, "events", access_token)
def branches_by_id(self, repository_id, access_token=None):
"""Return repository branches from a given username and repository ID.
Args:
repository_id: An existent user's repository ID.
access_token: GitHub OAuth2 access token.
"""
return self._complete_request_by_id(
repository_id, "branches", access_token)
def tags_by_id(self, repository_id, access_token=None):
"""Return repository tags from a given username and repository ID.
Args:
repository_id: An existent user's repository ID.
access_token: GitHub OAuth2 access token.
"""
return self._complete_request_by_id(repository_id, "tags", access_token)
def languages_by_id(self, repository_id, access_token=None):
"""Return languages tags from a given username and repository ID.
Args:
repository_id: An existent user's repository ID.
access_token: GitHub OAuth2 access token.
"""
return self._complete_request_by_id(
repository_id, "languages", access_token)
def subscribers_by_id(self, repository_id, access_token=None):
"""Return subscribers tags from a given username and repository ID.
Args:
repository_id: An existent user's repository ID.
access_token: GitHub OAuth2 access token.
"""
return self._complete_request_by_id(
repository_id, "subscribers", access_token)
def comments_by_id(self, repository_id, access_token=None):
"""Return comments tags from a given username and repository ID.
Args:
repository_id: An existent user's repository ID.
access_token: GitHub OAuth2 access token.
"""
return self._complete_request_by_id(
repository_id, "comments", access_token)
def contents_by_id(self, repository_id, access_token=None):
"""Return contents tags from a given username and repository ID.
Args:
repository_id: An existent user's repository ID.
access_token: GitHub OAuth2 access token.
"""
return self._complete_request_by_id(
repository_id, "contents", access_token)
def pulls_by_id(self, repository_id, access_token=None):
"""Return pulls tags from a given username and repository ID.
Args:
repository_id: An existent user's repository ID.
access_token: GitHub OAuth2 access token.
"""
return self._complete_request_by_id(
repository_id, "pulls", access_token)
def labels_by_id(self, repository_id, access_token=None):
"""Return labels tags from a given username and repository ID.
Args:
repository_id: An existent user's repository ID.
access_token: GitHub OAuth2 access token.
"""
return self._complete_request_by_id(
repository_id, "labels", access_token)
def _complete_request_by_name(self, username, repository_name, complement, access_token):
"""Complements a repository data request by name.
Args:
username: Github username.
repository_name: An existent user's repository name.
access_token: GitHub OAuth2 access token.
"""
url = "{0}/repos/{1}/{2}/{3}{4}"
access_token = self.get_token(access_token)
token_arg = ''
if access_token != '':
token_arg = "?access_token={}".format(access_token)
response = requests.get(
url.format(
self.ROOT_API_URL, username, repository_name, complement,
token_arg
)
)
self._check_common_status_code(response, access_token)
if response.status_code == requests.codes.not_found:
raise RepositoryNameNotFoundError(
{'repository_name': repository_name, 'username': username})
return response.json()
def _complete_request_by_id(self, repository_id, complement, access_token):
"""Complements a repository data request by ID.
Args:
repository_id: An existent user's repository ID.
complement: A resource to be requested.
access_token: GitHub OAuth2 access token.
"""
url = "{0}/repositories/{1}/{2}{3}"
access_token = self.get_token(access_token)
token_arg = ''
if access_token != '':
token_arg = "?access_token={}".format(access_token)
response = requests.get(
url.format(self.ROOT_API_URL, repository_id, complement, token_arg))
self._check_common_status_code(response, access_token)
if response.status_code == requests.codes.not_found:
raise RepositoryIdNotFoundError({'repository_id': repository_id})
return response.json()
def _check_common_status_code(self, response, access_token):
remaining = int(response.headers['X-RateLimit-Remaining'])
if response.status_code == requests.codes.forbidden and remaining == 0:
raise ApiRateLimitError(
{'X-RateLimit-Remaining': remaining,
'X-RateLimit-Limit': response.headers['X-RateLimit-Limit']})
elif response.status_code == requests.codes.unauthorized:
raise InvalidTokenError({'access_token': access_token})
elif response.status_code >= 500 and response.status_code <= 509:
raise ApiError()
|
import os
import json
from shapely.geometry import Polygon
from OSMPythonTools.api import Api
import pandas as pd
from shapely.geometry import Polygon
import copy
import cv2
import math
import shapely.geometry as geom
'''
Cretes COCO Annotation using VIA annotation
Example VIA annotation format:
{
"FLAT.213253160.jpg43089":{
"fileref":"",
"size":43089,
"filename":"FLAT.213253160.jpg",
"base64_img_data":"",
"file_attributes":{
},
"regions":{
"0":{
"shape_attributes":{
"name":"polygon",
"all_points_x":[
205,
304,
415,
324,
205
],
"all_points_y":[
40,
394,
362,
39,
40
]
},
"region_attributes":{
"building":"flat"
}
}
}
}
},
Example COCO annotation format:
{
"info":{
"contributor":"",
"about":"",
"date_created":"",
"description":"",
"url":"",
"version":"",
"year":2019
},
"categories":[
{
"id":100,
"name":"building",
"supercategory":"building"
}
],
"images":[
{
"id":20289,
"file_name":"000000020289.jpg",
"width":300,
"height":300,
"lat":0,
"lon":0,
zoom=20
}
],
"annotations":[
{
"id":377545,
"image_id":44153,
"segmentation":[
[
152.0,
180.0,
156.0,
176.0,
160.0,
181.0,
156.0,
186.0,
152.0,
180.0
]
],
"area":42.0,
"bbox":[
152.0,
152.0,
28.0,
8.0
],
"category_id":100,
"iscrowd":0
}
]
}
'''
api = Api()
sunroof_data = "../data/sunroof_cities.csv"
df = pd.read_csv(sunroof_data)
# DO NOT CHANGE - USED To train MaskRCNN model "id:0 is background == BG"
CATEGORIES_LIST = ["flat","dome", "N", "NNE", "NE", "ENE", "E", "ESE", "SE","SSE", "S", "SSW","SW","WSW", "W", "WNW", "NW", "NNW","tree"]
CATEGORIES =[ {"id": i+1, "name": cat, "supercategory": "building" } for i, cat in enumerate(CATEGORIES_LIST) ]
CATEGORIES_MAP = dict( (cat["name"], cat["id"]) for cat in CATEGORIES )
print(CATEGORIES_MAP)
def get_latlon(wayid):
df_sel = df[df["wayid"]==wayid]
if len(df_sel) > 0:
try:
lat = float(df_sel["lat"])
lon = float(df_sel["lon"])
except:
lat = float(df_sel.iloc[0]["lat"])
lon = float(df_sel.iloc[0]["lon"])
else:
raise Exception("Way ID not found")
return lat, lon
def get_wayid(filename):
try:
wayid = int(filename.split(".")[0])
except:
wayid = int(filename.split(".")[1])
return wayid
def generate_annotation(inputfolder, outfolder):
'''
crop the images
'''
input_file = os.path.join(inputfolder, "via_region_data.json")
output_file = os.path.join(outfolder, "annotation.json")
coco = {"info": {"contributor": "stepenlee@umass", "about": "Building Dataset",
"date_created": "01/04/2018", "description": "Roof Orientation",
"url": "", "version": "1.0", "year": 2018},
"categories":[],
"images":[],
"annotations":[]
}
# import the via_region_data.json
with open(input_file, 'r') as fp:
via_data = json.load(fp)
categories = [];
images = [];
annotations = [];
annotation_id = 300000
image_id = 100
width = 512; height = 512; zoom = 20;
for i, key in enumerate(via_data.keys()):
filename = via_data[key]["filename"]
wayid = get_wayid(filename)
lat, lon = get_latlon(wayid)
# create image obj
image_id += 1
image_obj ={"id": image_id, "file_name": filename, "width": width, "height": height, "lat": lat, "lon": lon, zoom:zoom}
images.append(image_obj)
# create annnotatoins
for k, region in enumerate(via_data[key]["regions"].keys()):
if (len(via_data[key]["regions"][region]["shape_attributes"]["all_points_x"]) <=2):
continue
annotation_id += 1
# elimiate the segments or regions not in the bbox
xys = zip(via_data[key]["regions"][region]["shape_attributes"]["all_points_x"],via_data[key]["regions"][region]["shape_attributes"]["all_points_y"])
segmentation = []
for x, y in xys:
segmentation.append(x)
segmentation.append(y)
area = geom.Polygon(xys).area
annotation_obj = {"id":annotation_id, "area":area, "image_id":image_id, "segmentation":[copy.deepcopy(segmentation)], "iscrowd":0}
class_name = via_data[key]["regions"][region]["region_attributes"]["building"]
# add annotations
annotation_obj["category_id"] = CATEGORIES_MAP[class_name] ## categories_map[class_name]
annotations.append(annotation_obj)
coco["categories"] = CATEGORIES
coco["annotations"] = annotations
coco["images"] = images
with open(output_file, 'w') as fp:
json.dump(coco, fp)
print("Saving.", output_file)
if __name__ == "__main__":
inputfolder = "../data/deeproof-aug/"
outfolder = "../data/deeproof-aug/"
print("generating annotation for ", inputfolder)
generate_annotation(inputfolder + "/test", outfolder + "/test")
|
import numpy as np
import math
from scipy.spatial import ConvexHull
# "Cross" product as used in UBC-ACM
def cross(p1, p2):
return p1[0] * p2[1] - p1[1] * p2[0]
# Euclidean distance between two points
def point_distance(p1, p2):
return np.linalg.norm(p1-p2)
# Return the rotation of point A around P (default [0, 0]) by an
# angle of theta (COUNTER-CLOCKWISE!)
def point_rotation(theta, A, P = np.array([0, 0])):
sin_t = np.sin(theta)
cos_t = np.cos(theta)
NP = np.array(A) - np.array(P)
NP = np.array([NP[0] * cos_t - NP[1] * sin_t, NP[0] * sin_t + NP[1] * cos_t])
NP = NP + P
return NP
# Scale the vector PA with P FIXED by factor K
# Returns a point A' such that the vector PA' is in the same direction of PA,
# with its length multiplied by k
def scale_vector(P, A, k):
return (A-P)*k + P
# Reflect around the X-axis (negate its y coordinate)
def refX(point):
return [point[0], -point[1]]
# Reflect around the Y-axis (negate its x coordinate)
def refY(point):
return [-point[0], point[1]]
# Scale point's coordinates by k
def scale(point, k):
return [point[0]* k, point[1]*k]
# Shift/translate point by x and y coordinate
def shift(point, x=0, y=0):
return [point[0]+x, point[1]+y]
# Value (in radians) of angle APB
def angle_between(A, P, B):
ap = np.sqrt(np.sum((A-P)**2))
pb = np.sqrt(np.sum((B-P)**2))
return math.acos(np.dot((A-P), (B-P))/(ap*pb))
# Returns true iff there are three collinear points in the set
def check_collinearity(points):
for a in range(len(points)):
for b in range(len(points)):
for c in range(len(points)):
if (a == b or b == c or a == c):
continue
if (abs(angle_between(points[a], points[b], points[c])-pi)<0.000001):
return True
return False
# Returns true iff pointset is convex.
# NOTE: If there are three collinear points, it will be considered NOT convex.
def are_convex(points):
return len(points) == len(ConvexHull(points).vertices)
|
from flask import Flask, redirect, url_for
app = Flask(__name__)
from app.controllers import auth
app.register_blueprint(blueprint=auth.page, url_prefix='/auth')
from app.controllers import category
app.register_blueprint(blueprint=category.page, url_prefix='')
from app.controllers import course
app.register_blueprint(blueprint=course.page, url_prefix='')
|
# Unicode CSV
def unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(utf_8_encoder(unicode_csv_data),
dialect=dialect, **kwargs)
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
yield [unicode(cell, 'utf-8') for cell in row]
def utf_8_encoder(unicode_csv_data):
for line in unicode_csv_data:
yield line.encode('utf-8')
# CSV to JSON for Django
def csv2json(data, model, id_column=False, delim=','):
data = unicode_csv_reader(data.splitlines(), delimiter=delim)
# Get fields from header
fields = data.next()[1:] if id_column else data.next()
# Create entries dictionary
pk = 0
entries = []
# Create entries
for row in data:
if id_column:
pk = row.pop(0)
else:
pk += 1
entry = {}
entry['pk'] = int(pk)
entry['model'] = model
entry['fields'] = dict(zip(fields, row))
# Convert to correct data types
for key, value in entry['fields'].items():
entry['fields'][key] = int(value) if value.isdigit() else value.strip()
if value == 'NULL':
entry['fields'][key] = None
# Append entry to entries list
entries.append(entry)
# Convert to JSON
return json.dumps(entries, indent=4)
|
from django.template import RequestContext
from django.shortcuts import render_to_response, redirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from rango.models import Category, Page, UserProfile
from rango.forms import CategoryForm, PageForm, UserForm, UserProfileForm
from rango.bing_search import run_query
from datetime import datetime
def urlswap(title):
# changes spaces to underscores
if ' ' in title:
return title.replace(' ','_')
return title.replace('_',' ')
def get_category_list():
# get all categories
cat_list = Category.objects.all()
# decode category URLs
for cat in cat_list:
cat.url = urlswap(cat.name)
# return list of category
return cat_list
def index(request):
context = RequestContext(request)
# get category list from helper function
category_list = get_category_list()
# get top viewed pages
page_list = Page.objects.order_by('-views')[:5]
context_dict = {'cat_list':category_list,
'pages':page_list}
if request.session.get('last_visit'):
# the session has value for the last visit
last_visit_time = request.session.get('last_visit')
visits = request.session.get('visits',0)
if (datetime.now() - datetime.strptime(last_visit_time[:-7], "%Y-%m-%d %H:%M:%S")).days > 0:
request.session['visits'] = visits + 1
request.session['last_visit'] = str(datetime.now())
else:
# get returns None, and session does not have value for last visit
request.session['last_visit'] = str(datetime.now())
request.session['visits'] = 1
# render and return rendered response back to user
return render_to_response('rango/index.html', context_dict, context)
def about(request):
import random
context = RequestContext(request)
what = ['happy','clever','shy']
context_dict = {'what': random.choice(what)}
# get category list
context_dict['cat_list'] = get_category_list()
# get number of visits from session
context_dict['visits'] = request.session.get('visits')
return render_to_response('rango/about.html', context_dict, context)
def category(request, category_name_url):
# obtain context from HTTP request
context = RequestContext(request)
result_list = []
if request.method == 'POST':
query = request.POST['query'].strip()
if query:
# run our Bingp function to get the results list!
result_list = run_query(query)
# change underscores in the category to spaces
category_name = urlswap(category_name_url)
# create context dictionary we can pass to template rendering
# we start by containing the name of category passed by user
context_dict = {'category_name': category_name,
'category_name_url': category_name_url,
'result_list': result_list }
# get category list
context_dict['cat_list'] = get_category_list()
try:
# can we find category with given name?
# if we can't, .get() method raises DoesNotExist exception
# so .get() method returns one model instance or raise exception
category = Category.objects.get(name=category_name)
# retrieve all of associated pages.
# note that filter returns >= 1 model instance
pages = Page.objects.order_by('-views').filter(category=category)
# adds our results list to the template context under name pages.
context_dict['pages'] = pages
# we also add category object from database to context dictionary
# we'll use this in the template to verify that category exists.
context_dict['category'] = category
except Category.DoesNotExist:
# we get here if we didn't find specified category.
# don't do anything - template displays "no category" message
pass
# go render the response and return it to the client.
return render_to_response('rango/category.html',
context_dict, context)
@login_required
def add_category(request):
# get context from the request
context = RequestContext(request)
# getting the cat list populated
context_dict = {'cat_list': get_category_list()}
# HTTP POST?
if request.method == 'POST':
form = CategoryForm(request.POST)
# have we been provided with valid form?
if form.is_valid():
# save new category to the database
form.save(commit=True)
# now call index() view
# the user will be shown the homepage
return index(request)
else:
# supplied form contained errors - just print them to terminal
print form.errors
else:
# if request was not POST, display the form to enter details
form = CategoryForm()
context_dict['form'] = form
# bad form (or form details), no form supplied..
# render the form with error messages (if any)
return render_to_response('rango/add_category.html',
context_dict, context)
@login_required
def add_page(request, category_name_url):
context = RequestContext(request)
category_name = urlswap(category_name_url)
if request.method == 'POST':
form = PageForm(request.POST)
# check for valid form fields
if form.is_valid():
# this time we can't commit straight away
# not all fields are automatically populated
page = form.save(commit=False)
# retrieve the associated Category object so we can add it
# wrap code in try block - check category actually exist
try:
cat = Category.objects.get(name=category_name)
page.category = cat
except Category.DoesNotExist:
# if we get here, category does not exist
# go back and render add category form as way of saying
# category does not exist
return render_to_response('rango/add_category.html', {}, context)
# also create a default value for number of views
page.views = 0
# with this we can save our new model instance
page.save()
# now that page is saved, display category instead
return category(request, category_name_url)
else:
print form.errors
else:
form = PageForm()
context_dict = {'category_name_url': category_name_url,
'category_name': category_name,
'form': form}
# getting the cat list populated
context_dict['cat_list'] = get_category_list()
return render_to_response('rango/add_page.html',
context_dict, context)
@login_required
def profile(request):
context = RequestContext(request)
context_dict = {'cat_list': get_category_list()}
u = User.objects.get(username=request.user)
try:
up = UserProfile.objects.get(user=u)
except:
up = None
context_dict['user'] = u
context_dict['userprofile'] = up
return render_to_response('rango/profile.html', context_dict, context)
def track_url(request):
context = RequestContext(request)
url = '/rango/'
# check for page_id
if request.method == 'GET':
if 'page_id' in request.GET:
# get page_id
page_id = request.GET['page_id']
try:
# try to get page using page_id
page = Page.objects.get(id=page_id)
# increment page view
page.views = page.views + 1
# get page url for redirection
url = page.url
# save new changes
page.save()
except Page.DoesNotExist:
pass
# redirect to url
return redirect(url)
def register(request):
# like before, get request context
context = RequestContext(request)
# a boolean value for telling template whether registration was successful
# set false initially. code changes value to true when registration succeed
registered = False
# if it's a HTTP POST, we're interested in processing form data
if request.method == 'POST':
# attempt to grab information from raw form information
# note we make use of both UserForm and UserProfileForm
user_form = UserForm(data=request.POST)
profile_form = UserProfileForm(data=request.POST)
# if two forms are valid...
if user_form.is_valid() and profile_form.is_valid():
# save user's form data to the database
user = user_form.save()
# now we hash the password with set_password method
# once hashed, we can update the user object
user.set_password(user.password)
user.save()
# now sort out the UserProfile instance
# since we need to set user attribute ourselvs we set commit=False
# this delays saving model until ready to avoid integrity problems
profile = profile_form.save(commit=False)
profile.user = user
# did the user provide a profile picture?
# if so, we need to get it from input form, put it in UserProfile model
if 'picture' in request.FILES:
profile.picture = request.FILES['picture']
# now we save the UserProfile instance
profile.save()
# update our variable to tell template registration successful
registered = True
# invalid form or forms - mistakes or something else?
# print problems to terminal
# they'll be shown to user
else:
print user_form.errors, profile_form.errors
# not a HTTP POST so we render our form using two ModelForm instances
# these forms be blank, ready for user input
else:
user_form = UserForm()
profile_form = UserProfileForm()
# render the template depending on the context
return render_to_response(
'rango/register.html',
{'user_form': user_form, 'profile_form': profile_form,
'registered': registered},
context)
def user_login(request):
# like before get context for the user's request
context = RequestContext(request)
# context dictionary to be filled
context_dict = {}
# if request is HTTP POST, try pull out relevant information
if request.method == 'POST':
# gather username and password provided by user
# this information is obtained from login form
username = request.POST['username']
password = request.POST['password']
# use Django machinery to attempt to see the username/password
# combination is valid - a User object is returned if it is
user = authenticate(username=username, password=password)
# if we have User object, details are correct
# if none (Python's way of representing absence of value), no user
# with matching credentials found
if user:
# is account active? it could been disabled
if user.is_active:
# if account is valid and active, we can log user in
# we'll send user back to homepage
login(request, user)
url = '/rango/'
if 'next' in request.POST:
url = request.POST['next']
return redirect(url)
else:
# an inactive account was used - no logging in!
context_dict['error'] = 'Your Rango account is disabled.'
else:
# bad login details were provided. so we can't log user in
print "Invalid login details: {0}, {1}".format(username, password)
context_dict['error'] = 'Invalid login details supplied.'
# request is not a HTTP POST, so display login form
# this scenario would most likely be HTTP GET
#else: # this removed after exercises
# no context variables to pass to template system, hence
# blank dictionary object..
if 'next' in request.GET:
context_dict['next'] = request.GET['next']
return render_to_response('rango/login.html', context_dict, context)
@login_required
def restricted(request):
context = RequestContext(request)
context_dict = {'message':"Since you're logged in, you can see this text!"}
# getting the cat list populated
context_dict['cat_list'] = get_category_list()
return render_to_response('rango/restricted.html', context_dict, context)
@login_required
def user_logout(request):
# since we know user is logged in, we can just log them out.
logout(request)
# take user back to homepage
return redirect('/rango/')
def search(request):
context = RequestContext(request)
result_list = []
if request.method == 'POST':
query = request.POST['query'].strip()
if query:
# run our Bingp function to get the results list!
result_list = run_query(query)
# getting the cat list populated
context_dict = {'cat_list': get_category_list(),
'result_list': result_list }
return render_to_response('rango/search.html',
context_dict, context)
|
from datetime import datetime
from django.http import HttpResponse
from django.shortcuts import render
import requests
from bs4 import BeautifulSoup
from django.template.defaultfilters import safe
from pytz import timezone
import pandas as pd
# Create your views here.
# Grabs the text
def get_text(company):
page = requests.get('https://finance.yahoo.com/quote/' + company + '?p=' + company).text
return page
def home(request):
result = ''
# if we submit form then all attributes will be stored here
if 'company' in request.GET:
# Scraper for yahoo finance
company = request.GET.get('company')
page = get_text(company)
soup = BeautifulSoup(page, 'html.parser')
table = soup.find(
class_='D(ib) W(1/2) Bxz(bb) Pend(12px) Va(t) ie-7_D(i) smartphone_D(b) smartphone_W(100%) smartphone_Pend(0px) '
'smartphone_BdY smartphone_Bdc($seperatorColor)')
table2 = soup.find(class_='D(ib) W(1/2) Bxz(bb) Pstart(12px) Va(t) ie-7_D(i) ie-7_Pos(a) smartphone_D(b) '
'smartphone_W(100%) smartphone_Pstart(0px) smartphone_BdB smartphone_Bdc($seperatorColor)')
# Current stock price (constantly changing) and EST date/time of retrieval
current_stock_price = soup.find(class_='Trsdu(0.3s) Fw(b) Fz(36px) Mb(-4px) D(ib)').get_text()
tz = timezone('EST')
date_time = datetime.now(tz)
# names is a list containing the names of the metric;
# values is a list containing the values of the associated metric
names = table.find_all(class_='C($primaryColor) W(51%)')
values = table.find_all(class_='Ta(end) Fw(600) Lh(14px)')
names2 = table2.find_all(class_='C($primaryColor) W(51%)')
values2 = table2.find_all(class_='Ta(end) Fw(600) Lh(14px)')
metric_names1 = [name.get_text() for name in names]
metric_values1 = [value.get_text() for value in values]
metric_names2 = [name2.get_text() for name2 in names2]
metric_values2 = [value2.get_text() for value2 in values2]
metric_names = metric_names1 + metric_names2
metric_values = metric_values1 + metric_values2
metric_names = ["Date and Time", "Current Stock Price"] + metric_names
metric_values = [str(date_time), current_stock_price] + metric_values
table_dict = {}
for i in range(len(metric_names)):
table_dict[metric_names[i]] = metric_values[i]
# print(table_dict)
stock_data = pd.DataFrame({
'name': metric_names,
'value': metric_values
})
result = stock_data.to_html(header=False, index=False)
return render(request, 'main/results.html', {'result': result})
else:
return render(request, 'main/home.html')
def recApp(request):
# We can likely put our algorithm for the recommendation system here
# or in a helper method
return render(request, 'main/recApp.html')
def about(request):
# Just render the html if we want an about page
return render(request, 'main/about.html')
def results(request):
return render(request, 'main/results.html')
def tickers(request):
alphaResult = ''
# Scraper for tickers
page = requests.get('https://stockanalysis.com/stocks/')
soup = BeautifulSoup(page.content, 'html.parser')
table = soup.find(class_='no-spacing')
tickers_companies = table.find_all("a")
my_data =[tick.get_text().lower() for tick in tickers_companies]
my_tickers = [data.partition('-')[0].strip() for data in my_data]
my_companies = [data.partition('-')[2].strip() for data in my_data]
ticker_table = pd.DataFrame({
'ticker_symbol': my_tickers,
'company_name': my_companies
})
if 'ticker' in request.GET:
ticker = request.GET.get('ticker')
result1 = ticker_table[ticker_table['company_name'] == ticker.lower()]['ticker_symbol'].to_string()
# Only get the alpha characters
for char in result1:
if char.isalpha():
alphaResult += char
return render(request, 'main/ticker.html', {'result': alphaResult})
|
def main():
name = 'Lijun'
print('The name is', name)
name = name + ' Red'
print('New name is', name)
main()
def main():
count = 0
my_string = input('Enter a sentence: ')
for ch in my_string:
if ch=='T' or ch=='t':
count +=1
print(f'Letter T appears {count} times.')
main()
|
import socket
s = socket.socket()
s.connect(("localhost",3500))
str = input("Say Something : ")
while str!="exit":
s.send(str.encode())
data=s.recv(1024)
data=data.decode()
print("Server : ",data)
s1=input("Enter response : ")
s.send(s1.encode())
s.close()
|
# Generated by Django 3.2.4 on 2021-07-06 14:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pay', '0007_forms_made_by'),
]
operations = [
migrations.AlterField(
model_name='forms',
name='made_on',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='forms',
name='order_id',
field=models.CharField(default=1, max_length=100, unique=True),
preserve_default=False,
),
]
|
# Generated by Django 3.0.2 on 2020-04-02 09:14
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('obsapp', '0021_auto_20200401_1723'),
]
operations = [
migrations.AddField(
model_name='product',
name='is_active',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='notificationz',
name='notificationtime',
field=models.DateTimeField(default=datetime.datetime(2020, 4, 2, 14, 44, 15, 666344)),
),
migrations.AlterField(
model_name='requestexchange',
name='datetimeofrequest',
field=models.DateTimeField(default=datetime.datetime(2020, 4, 2, 14, 44, 15, 665343)),
),
]
|
import random
import time
palavras = ('casa', 'abelha', 'cachueira', 'porta', 'mesa', 'lixeira', 'cadeado', 'guarda')
sorteio_palavras = random.choice(palavras) # Pegando palavras aleatorias
letras = []
tentantiva = 5
acertou = False
print('='*20 + ' JOGO DA FORCA ' + '='*20 + '\n')
print('Sorteando um palavra...')
time.sleep(1.5) # Criando um pequeno cronometro
print(f'A palavra sorteada possui {len(sorteio_palavras)} letras\n')
# Fazendo um for para substiruir as letras sortiadas por traços
for i in range(0, len(sorteio_palavras)):
letras.append('-')
# Laço de repetição que executara enquanto
while acertou == False:
enter_letras = str(input('Entre com uma letra: '))
# Pecorre a palavra sorteada e inserir a letra acertada
for i in range(0, len(sorteio_palavras)):
if enter_letras == sorteio_palavras[i]:
letras[i] = enter_letras
print(letras[i])
acertou = True
# Pecorre a palavra sorteada e conpara se ainda existe algum taço
# Caso tenha contunua o laço
for x in range(0, len(sorteio_palavras)):
if letras[x] == '-':
acertou = False
# OBS -> Ao entra nessas linhas do codigo a intenção é que a cada letra errada o numero de
# tentativas seja - 1, o que realmente acontece, porem tambem as tentativas estão sendo subtraidas
# caso a letra estejam certas.
if enter_letras == sorteio_palavras:
tentantiva = tentantiva
else:
print(f'Você possui {tentantiva} Tentativas.')
tentantiva -= 1
# Comparando as tentativas
if tentantiva <= 0:
print('enforcado')
print(letras)
break
|
import json
import pytest
from share.transform.chain import ctx
from share.transformers.v1_push import V1Transformer
class TestV1Transformer:
@pytest.mark.parametrize('input, expected', [
({
"contributors": [{
"name": "Roger Movies Ebert",
"sameAs": ["https://osf.io/thing"],
"familyName": "Ebert",
"givenName": "Roger",
"additionalName": "Danger",
"email": "rogerebert@example.com"
}, {
"name": "Roger Madness Ebert"
}],
"languages": ["eng"],
"description": "This is a thing",
"providerUpdatedDateTime": "2014-12-12T00:00:00Z",
"freeToRead": {
"startDate": "2014-09-12",
"endDate": "2014-10-12"
},
"licenses": [{
"uri": "http://www.mitlicense.com",
"startDate": "2014-10-12T00:00:00Z",
"endDate": "2014-11-12T00:00:00Z"
}],
"publisher": {
"name": "Roger Ebert Inc",
"email": "roger@example.com"
},
"sponsorships": [{
"award": {
"awardName": "Participation",
"awardIdentifier": "http://example.com"
},
"sponsor": {
"sponsorName": "Orange",
"sponsorIdentifier": "http://example.com/orange"
}
}],
"title": "Interesting research",
"version": {"versionId": "someID"},
"uris": {
"canonicalUri": "http://example.com/document1",
"providerUris": [
"http://example.com/document1uri1",
"http://example.com/document1uri2"
]
}
}, {
'@type': 'creativework',
'date_updated': '2014-12-12T00:00:00+00:00',
'description': 'This is a thing',
'language': 'eng',
'identifiers': [
{'@type': 'workidentifier', 'uri': 'http://example.com/document1'},
{'@type': 'workidentifier', 'uri': 'http://example.com/document1uri1'},
{'@type': 'workidentifier', 'uri': 'http://example.com/document1uri2'},
],
'related_agents': [{
'@type': 'creator',
'cited_as': 'Roger Movies Ebert',
'order_cited': 0,
'agent': {
'@type': 'person',
'name': 'Roger Movies Ebert',
'related_agents': [],
'identifiers': [
{'@type': 'agentidentifier', 'uri': 'http://osf.io/thing'},
{'@type': 'agentidentifier', 'uri': 'mailto:rogerebert@example.com'}
],
},
}, {
'@type': 'creator',
'cited_as': 'Roger Madness Ebert',
'order_cited': 1,
'agent': {
'@type': 'person',
'name': 'Roger Madness Ebert',
'related_agents': [],
'identifiers': []
}
}, {
'@type': 'publisher',
'cited_as': 'Roger Ebert Inc',
'agent': {
'@type': 'organization',
'name': 'Roger Ebert Inc',
'related_agents': [],
'identifiers': [
{'@type': 'agentidentifier', 'uri': 'mailto:roger@example.com'},
]
}
}, {
'@type': 'funder',
'awards': [
{
'@type': 'throughawards',
'award': {'@type': 'award', 'name': 'Participation', 'uri': 'http://example.com'}
}
],
'cited_as': 'Orange',
'agent': {
'@type': 'organization',
'name': 'Orange',
'identifiers': [
{'@type': 'agentidentifier', 'uri': 'http://example.com/orange'},
]
}
}],
'subjects': [],
'tags': [],
'title': 'Interesting research',
}), ({
"contributors": [],
"languages": ["eng"],
"description": "This is a thing",
"providerUpdatedDateTime": "2014-12-12T00:00:00Z",
"title": "Interesting research",
"uris": {
"canonicalUri": "http://example.com/document1",
"providerUris": [
"http://example.com/document1uri1",
"http://example.com/document1uri2",
"http://example.com/document1uri2",
'http://example.com/document1',
]
}
}, {
'@type': 'creativework',
'date_updated': '2014-12-12T00:00:00+00:00',
'description': 'This is a thing',
'language': 'eng',
'identifiers': [
{'@type': 'workidentifier', 'uri': 'http://example.com/document1'},
{'@type': 'workidentifier', 'uri': 'http://example.com/document1uri1'},
{'@type': 'workidentifier', 'uri': 'http://example.com/document1uri2'},
],
'related_agents': [],
'subjects': [],
'tags': [],
'title': 'Interesting research',
}), ({
"contributors": [],
"languages": ["eng"],
"description": "This is a thing",
"providerUpdatedDateTime": "2014-12-12T00:00:00Z",
"title": "Interesting research",
"otherProperties": [{"name": "status", "properties": {"status": "deleted"}}],
"uris": {
"canonicalUri": "http://example.com/document1",
"providerUris": [
'http://example.com/document1',
"http://example.com/document1uri1",
"http://example.com/document1uri2",
"http://example.com/document1uri2",
]
}
}, {
'@type': 'creativework',
'date_updated': '2014-12-12T00:00:00+00:00',
'description': 'This is a thing',
'is_deleted': True,
'language': 'eng',
'identifiers': [
{'@type': 'workidentifier', 'uri': 'http://example.com/document1'},
{'@type': 'workidentifier', 'uri': 'http://example.com/document1uri1'},
{'@type': 'workidentifier', 'uri': 'http://example.com/document1uri2'},
],
'related_agents': [],
'subjects': [],
'tags': [],
'title': 'Interesting research',
})
])
def test_normalize(self, input, expected):
ctx.clear()
_, root_ref = V1Transformer({}).do_transform(json.dumps(input), clean_up=False)
actual = self.reconstruct(ctx.pool.pop(root_ref))
assert expected == actual
def reconstruct(self, document, extra=False):
for key, val in tuple(document.items()):
if isinstance(val, dict) and key != 'extra':
related = ctx.pool.pop(val, None)
if related:
document[key] = self.reconstruct(related, extra=extra)
else:
document.pop(key)
if isinstance(val, list):
document[key] = [self.reconstruct(ctx.pool.pop(v), extra=extra) for v in val]
del document['@id']
if not extra:
document.pop('extra', None)
return document
|
import multiprocessing
import platform
import subprocess
import sys
import os
from conans.model.version import Version
from conans.util.log import logger
from conans.client.tools import which
_global_output = None
def args_to_string(args):
if not args:
return ""
if sys.platform == 'win32':
return subprocess.list2cmdline(args)
else:
return " ".join("'" + arg.replace("'", r"'\''") + "'" for arg in args)
def cpu_count():
try:
env_cpu_count = os.getenv("CONAN_CPU_COUNT", None)
return int(env_cpu_count) if env_cpu_count else multiprocessing.cpu_count()
except NotImplementedError:
_global_output.warn("multiprocessing.cpu_count() not implemented. Defaulting to 1 cpu")
return 1 # Safe guess
def detected_architecture():
# FIXME: Very weak check but not very common to run conan in other architectures
if "64" in platform.machine():
return "x86_64"
elif "86" in platform.machine():
return "x86"
return None
# DETECT OS, VERSION AND DISTRIBUTIONS
class OSInfo(object):
""" Usage:
(os_info.is_linux) # True/False
(os_info.is_windows) # True/False
(os_info.is_macos) # True/False
(os_info.is_freebsd) # True/False
(os_info.is_solaris) # True/False
(os_info.linux_distro) # debian, ubuntu, fedora, centos...
(os_info.os_version) # 5.1
(os_info.os_version_name) # Windows 7, El Capitan
if os_info.os_version > "10.1":
pass
if os_info.os_version == "10.1.0":
pass
"""
def __init__(self):
self.os_version = None
self.os_version_name = None
self.is_linux = platform.system() == "Linux"
self.linux_distro = None
self.is_windows = platform.system() == "Windows"
self.is_macos = platform.system() == "Darwin"
self.is_freebsd = platform.system() == "FreeBSD"
self.is_solaris = platform.system() == "SunOS"
if self.is_linux:
import distro
self.linux_distro = distro.id()
self.os_version = Version(distro.version())
version_name = distro.codename()
self.os_version_name = version_name if version_name != "n/a" else ""
if not self.os_version_name and self.linux_distro == "debian":
self.os_version_name = self.get_debian_version_name(self.os_version)
elif self.is_windows:
self.os_version = self.get_win_os_version()
self.os_version_name = self.get_win_version_name(self.os_version)
elif self.is_macos:
self.os_version = Version(platform.mac_ver()[0])
self.os_version_name = self.get_osx_version_name(self.os_version)
elif self.is_freebsd:
self.os_version = self.get_freebsd_version()
self.os_version_name = "FreeBSD %s" % self.os_version
elif self.is_solaris:
self.os_version = Version(platform.release())
self.os_version_name = self.get_solaris_version_name(self.os_version)
@property
def with_apt(self):
return self.is_linux and self.linux_distro in \
("debian", "ubuntu", "knoppix", "linuxmint", "raspbian")
@property
def with_yum(self):
return self.is_linux and self.linux_distro in \
("centos", "redhat", "fedora", "pidora", "scientific",
"xenserver", "amazon", "oracle", "rhel")
@property
def with_pacman(self):
if self.is_linux:
return self.linux_distro == "arch"
elif self.is_windows and which('uname.exe'):
uname = subprocess.check_output(['uname.exe', '-s']).decode()
return uname.startswith('MSYS_NT') and which('pacman.exe')
return False
@staticmethod
def get_win_os_version():
"""
Get's the OS major and minor versions. Returns a tuple of
(OS_MAJOR, OS_MINOR).
"""
import ctypes
class _OSVERSIONINFOEXW(ctypes.Structure):
_fields_ = [('dwOSVersionInfoSize', ctypes.c_ulong),
('dwMajorVersion', ctypes.c_ulong),
('dwMinorVersion', ctypes.c_ulong),
('dwBuildNumber', ctypes.c_ulong),
('dwPlatformId', ctypes.c_ulong),
('szCSDVersion', ctypes.c_wchar * 128),
('wServicePackMajor', ctypes.c_ushort),
('wServicePackMinor', ctypes.c_ushort),
('wSuiteMask', ctypes.c_ushort),
('wProductType', ctypes.c_byte),
('wReserved', ctypes.c_byte)]
os_version = _OSVERSIONINFOEXW()
os_version.dwOSVersionInfoSize = ctypes.sizeof(os_version)
retcode = ctypes.windll.Ntdll.RtlGetVersion(ctypes.byref(os_version))
if retcode != 0:
return None
return Version("%d.%d" % (os_version.dwMajorVersion, os_version.dwMinorVersion))
@staticmethod
def get_debian_version_name(version):
if not version:
return None
elif version.major() == "8.Y.Z":
return "jessie"
elif version.major() == "7.Y.Z":
return "wheezy"
elif version.major() == "6.Y.Z":
return "squeeze"
elif version.major() == "5.Y.Z":
return "lenny"
elif version.major() == "4.Y.Z":
return "etch"
elif version.minor() == "3.1.Z":
return "sarge"
elif version.minor() == "3.0.Z":
return "woody"
@staticmethod
def get_win_version_name(version):
if not version:
return None
elif version.major() == "5.Y.Z":
return "Windows XP"
elif version.minor() == "6.0.Z":
return "Windows Vista"
elif version.minor() == "6.1.Z":
return "Windows 7"
elif version.minor() == "6.2.Z":
return "Windows 8"
elif version.minor() == "6.3.Z":
return "Windows 8.1"
elif version.minor() == "10.0.Z":
return "Windows 10"
@staticmethod
def get_osx_version_name(version):
if not version:
return None
elif version.minor() == "10.13.Z":
return "High Sierra"
elif version.minor() == "10.12.Z":
return "Sierra"
elif version.minor() == "10.11.Z":
return "El Capitan"
elif version.minor() == "10.10.Z":
return "Yosemite"
elif version.minor() == "10.9.Z":
return "Mavericks"
elif version.minor() == "10.8.Z":
return "Mountain Lion"
elif version.minor() == "10.7.Z":
return "Lion"
elif version.minor() == "10.6.Z":
return "Snow Leopard"
elif version.minor() == "10.5.Z":
return "Leopard"
elif version.minor() == "10.4.Z":
return "Tiger"
elif version.minor() == "10.3.Z":
return "Panther"
elif version.minor() == "10.2.Z":
return "Jaguar"
elif version.minor() == "10.1.Z":
return "Puma"
elif version.minor() == "10.0.Z":
return "Cheetha"
@staticmethod
def get_freebsd_version():
return platform.release().split("-")[0]
@staticmethod
def get_solaris_version_name(version):
if not version:
return None
elif version.minor() == "5.10":
return "Solaris 10"
elif version.minor() == "5.11":
return "Solaris 11"
def cross_building(settings, self_os=None, self_arch=None):
self_os = self_os or platform.system()
self_arch = self_arch or detected_architecture()
os_setting = settings.get_safe("os")
arch_setting = settings.get_safe("arch")
platform_os = {"Darwin": "Macos"}.get(self_os, self_os)
if os_setting and platform_os != os_setting:
return True
if arch_setting and self_arch != arch_setting:
return True
return False
try:
os_info = OSInfo()
except Exception as exc:
logger.error(exc)
_global_output.error("Error detecting os_info")
|
import os
import numpy
from pydub import AudioSegment
if __name__ == '__main__':
audioPath = 'D:/PythonProjects_Data/CMU_MOSEI/WAV_16000/'
labelPath = 'D:/PythonProjects_Data/CMU_MOSEI/Step1_StartEndCut/'
savePath = 'D:/PythonProjects_Data/CMU_MOSEI/Step2_AudioCut/'
if not os.path.exists(savePath): os.makedirs(savePath)
for fileName in os.listdir(labelPath):
print('Treating', fileName)
startEndTicket = numpy.reshape(
numpy.genfromtxt(fname=os.path.join(labelPath, fileName), dtype=float, delimiter=','), [-1, 3])
wavFile = AudioSegment.from_file(file=os.path.join(audioPath, fileName.replace('csv', 'wav')))
for counter in range(numpy.shape(startEndTicket)[0]):
wavFile[int(startEndTicket[counter][-2] * 1000):int(startEndTicket[counter][-1] * 1000)].export(
os.path.join(savePath, fileName.replace('.csv', '_%02d.wav' % counter)), format='wav')
# exit()
|
"""
Crie um programa que leia vários números inteiros pelo teclado. O programa só
vai parar quando o usuário digitar o valor 999, que é a condição de parada.
No final, mostre quantos números foram digitados e qual foi a soma entre eles.
"""
n = c = s = 0
while True:
n = int(input('Digite um número: '))
if n == 999:
break
c += 1
s += n
print(f'A soma dos {c} números vale {s}')
|
# This is the api for object oriented interface
import numpy as np
from math import pi
from scipy import interpolate
# The function assumes uniform field
def curl_2D(ufield, vfield, clat, dlambda, dphi, planet_radius=6.378e+6):
"""
Assuming regular latitude and longitude [in degree] grid, compute the curl
of velocity on a pressure level in spherical coordinates.
"""
ans = np.zeros_like((ufield))
ans[1:-1, 1:-1] = (vfield[1:-1, 2:] - vfield[1:-1, :-2])/(2.*dlambda) - \
(ufield[2:, 1:-1] * clat[2:, np.newaxis] -
ufield[:-2, 1:-1] * clat[:-2, np.newaxis])/(2.*dphi)
ans[0, :] = 0.0
ans[-1, :] = 0.0
ans[1:-1, 0] = ((vfield[1:-1, 1] - vfield[1:-1, -1]) / (2. * dlambda) -
(ufield[2:, 0] * clat[2:] -
ufield[:-2, 0] * clat[:-2]) / (2. * dphi))
ans[1:-1, -1] = ((vfield[1:-1, 0] - vfield[1:-1, -2]) / (2. * dlambda) -
(ufield[2:, -1] * clat[2:] -
ufield[:-2, -1] * clat[:-2]) / (2. * dphi))
ans[1:-1, :] = ans[1:-1, :] / planet_radius / clat[1:-1, np.newaxis]
return ans
class BarotropicField(object):
"""
An object that deals with barotropic (2D) wind and/or PV fields
:param xlon: Longitude array in degree with dimension *nlon*.
:type xlon: sequence of array_like
:param ylat: Latitutde array in degree, monotonically increasing with dimension *nlat*
:type ylat: sequence of array_like
:param area: Differential area at each lon-lat grid points with dimension (nlat,nlon). If 'area=None': it will be initiated as area of uniform grid (in degree) on a spherical surface.
:type area: sequence of array_like
:param dphi: Differential length element along the lat grid with dimension nlat.
:type dphi: sequence of array_like
:param pv_field: Absolute vorticity field with dimension [nlat x nlon]. If 'pv_field=None': pv_field is expected to be computed with u,v,t field.
:type pv_field: sequence of array_like
:returns: an instance of the object BarotropicField
:example:
>>> barofield1 = BarotropicField(xlon, ylat, pv_field=abs_vorticity)
"""
def __init__(self, xlon, ylat, pv_field, area=None, dphi=None,
n_partitions=None, planet_radius=6.378e+6):
"""Create a windtempfield object.
**Arguments:**
*xlon*
Longitude array in degree with dimension [nlon].
*ylat*
Latitutde array in degree, monotonically increasing with dimension
[nlat].
*area*
Differential area at each lon-lat grid points with dimension
[nlat x nlon].
If None, it will be initiated as:
2.*pi*Earth_radius**2 *(np.cos(ylat[:,np.newaxis]*pi/180.)*dphi)/float(nlon) * np.ones((nlat,nlon)).
This would be problematic if the grids are not uniformly distributed in degree.
*dphi*
Differential length element along the lat grid with dimension nlat.
*pv_field*
Absolute vorticity field with dimension [nlat x nlon].
If none, pv_field is expected to be computed with u,v,t field.
"""
self.xlon = xlon
self.ylat = ylat
self.clat = np.abs(np.cos(np.deg2rad(ylat)))
self.nlon = xlon.size
self.nlat = ylat.size
self.planet_radius = planet_radius
if dphi is None:
self.dphi = pi/(self.nlat-1) * np.ones((self.nlat))
else:
self.dphi = dphi
if area is None:
self.area = 2.*pi*planet_radius**2*(np.cos(ylat[:, np.newaxis]*pi/180.)*self.dphi[:, np.newaxis])/float(self.nlon)*np.ones((self.nlat, self.nlon))
else:
self.area = area
self.pv_field = pv_field
if n_partitions is None:
self.n_partitions = self.nlat
else:
self.n_partitions = n_partitions
def equivalent_latitudes(self):
"""
Compute equivalent latitude with the *pv_field* stored in the object.
:returns: an numpy array with dimension (nlat) of equivalent latitude array.
:example:
>>> barofield1 = BarotropicField(xlon, ylat, pv_field=abs_vorticity)
>>> eqv_lat = barofield1.equivalent_latitudes()
"""
from hn2016_falwa import basis
pv_field = self.pv_field
area = self.area
ylat = self.ylat
planet_radius = self.planet_radius
self.eqvlat, dummy = basis.eqvlat(ylat, pv_field, area, self.n_partitions,
planet_radius=planet_radius)
return self.eqvlat
def lwa(self):
"""
Compute the finite-amplitude local wave activity based on the *equivalent_latitudes* and the *pv_field* stored in the object.
:returns: an 2-D numpy array with dimension (nlat,nlon) of local wave activity values.
:example:
>>> barofield1 = BarotropicField(xlon, ylat, pv_field=abs_vorticity)
>>> eqv_lat = barofield1.equivalent_latitudes() # This line is optional
>>> lwa = barofield1.lwa()
"""
from hn2016_falwa import basis
if self.eqvlat is None:
self.eqvlat = self.equivalent_latitudes(self)
lwa_ans, dummy = basis.lwa(self.nlon, self.nlat, self.pv_field, self.eqvlat,
self.planet_radius * self.clat * self.dphi)
return lwa_ans
# === Next is a class of 3D objects ===
class QGField(object):
"""
An object that deals with barotropic (2D) wind and/or PV fields
:param xlon: Longitude array in degree with dimension (*nlon*).
:type xlon: sequence of array_like
:param ylat: Latitutde array in degree, monotonically increasing with dimension (*nlat*)
:type ylat: sequence of array_like
:param zlev: Pseudoheight array in meters, monotonically increasing with dimension (*nlev*)
:type zlev: sequence of array_like
:param u_field: Zonal wind field in meters, with dimension (*nlev*,*nlat*,*nlon*).
:type u_field: sequence of array_like
:param v_field: Meridional wind field in meters, with dimension (*nlev*,*nlat*,*nlon*).
:type v_field: sequence of array_like
:param t_field: Temperature field in Kelvin, with dimension (*nlev*,*nlat*,*nlon*).
:type t_field: sequence of array_like
:param qgpv_field: Quasi-geostrophic potential vorticity field in 1/second, with dimension (*nlev*,*nlat*,*nlon*). If u_field, v_field and t_field are input, qgpv_field can be using the method compute_qgpv.
:type qgpv_field: sequence of array_like
:param area: Differential area at each lon-lat grid points with dimension (*nlat*,*nlon*). If 'area=None': it will be initiated as area of uniform grid (in degree) on a spherical surface.
:type area: sequence of array_like
:param dphi: Differential length element along the lat grid with dimension (*nlat*).
:type dphi: sequence of array_like
:param pv_field: Absolute vorticity field with dimension [nlat x nlon]. If 'pv_field=None': pv_field is expected to be computed with u,v,t field.
:type pv_field: sequence of array_like
:returns: an instance of the object BarotropicField
:example:
>>> qgfield1 = QGField(xlon, ylat, np.array([240.]), u, qgpv_field=QGPV)
"""
def __init__(self, xlon, ylat, zlev, u_field, v_field=None, t_field=None,
qgpv_field=None, area=None, dphi=None,
n_partitions=None, rkappa=287./1004., planet_radius=6.378e+6,
scale_height=7000.):
"""Create a windtempfield object.
**Arguments:**
*xlon*
Longitude array in degree with dimension [nlon].
*ylat*
Latitutde array in degree, monotonically increasing with dimension
[nlat].
*zlev*
Pseudoheight array in meters, monotonically increasing with dimension
[nlev].
*u_field*
Zonal wind field in meters, with dimension [nlev x nlat x nlon].
*v_field*
Meridional wind field in meters, with dimension [nlev x nlat x nlon].
*t_field*
Temperature field in Kelvin, with dimension [nlev x nlat x nlon].
*qgpv_field*
Quasi-geostrophic potential vorticity field in 1/second, with dimension
[nlev x nlat x nlon]. If u_field, v_field and t_field are input,
qgpv_field can be using the method compute_qgpv.
*area*
Differential area at each lon-lat grid points with dimension
[nlat x nlon].
If None, it will be initiated as:
2.*pi*Earth_radius**2 *(np.cos(ylat[:,np.newaxis]*pi/180.)*dphi)/float(nlon) * np.ones((nlat,nlon)).
This would be problematic if the grids are not uniformly distributed in degree.
*dphi*
Differential length element along the lat grid with dimension nlat.
*n_partitions*
Number of partitions used to compute equivalent latitude. If not
given, it will be assigned nlat.
"""
self.xlon = xlon
self.ylat = ylat
self.zlev = zlev
self.clat = np.abs(np.cos(np.deg2rad(ylat)))
self.nlon = xlon.size
self.nlat = ylat.size
self.nlev = zlev.size
self.planet_radius = planet_radius
if dphi is None:
self.dphi = pi/(self.nlat-1) * np.ones((self.nlat))
else:
self.dphi = dphi
if area is None:
self.area = 2.*pi*planet_radius**2*(np.cos(ylat[:, np.newaxis]*pi/180.)*self.dphi[:, np.newaxis])/float(self.nlon)*np.ones((self.nlat, self.nlon))
else:
self.area = area
self.qgpv_field = qgpv_field
if n_partitions is None:
self.n_partitions = self.nlat
else:
self.n_partitions = n_partitions
# First, check if the qgpv_field is present
print('check self.qgpv_field')
# print self.qgpv_field
if (qgpv_field is None) & (v_field is None):
raise ValueError('qgpv_field is missing.')
elif (qgpv_field is None):
print('Compute QGPV field from u and v field.')
# === Obtain potential temperature field ===
if t_field:
self.pt_field = t_field[:, :, :] * \
np.exp(rkappa * zlev[:, np.newaxis, np.newaxis]/scale_height)
# Interpolation
f_Thalf = interpolate.interp1d(zlev, self.pt_field.mean(axis=-1),
axis=0)
zlev_half = np.array([zlev[0] + 0.5*(zlev[1]-zlev[0])]*i \
for i in range(zlev.size * 2 + 1))
self.pt_field_half = f_Thalf(zlev_half) # dim = [2*nlev+1,nlat]
print('self.pt_field_half.shape')
print(self.pt_field_half.shape)
def equivalent_latitudes(self, domain_size='half_globe'): # Has to be changed since it is qgpv.
# Use half-globe?
"""
Compute equivalent latitude with the *pv_field* stored in the object.
:param domain_size: domain of grids to be used to compute equivalent latitude. It can he 'half_globe' or 'full_globe'.
:type domain_size: string
:returns: an numpy array with dimension (*nlev*,*nlat*) of equivalent latitude array.
:example:
>>> qgfield1 = QGField(xlon, ylat, np.array([240.]), u, qgpv_field=QGPV)
>>> qgfield_eqvlat = qgfield1.equivalent_latitudes(domain_size='half_globe')
"""
def eqv_lat_core(ylat, vort, area, n_points):
vort_min = np.min([vort.min(), vort.min()])
vort_max = np.max([vort.max(), vort.max()])
q_part_u = np.linspace(vort_min, vort_max, n_points,
endpoint=True)
aa = np.zeros(q_part_u.size) # to sum up area
vort_flat = vort.flatten() # Flatten the 2D arrays to 1D
area_flat = area.flatten()
# Find equivalent latitude:
inds = np.digitize(vort_flat, q_part_u)
for i in np.arange(0, aa.size): # Sum up area in each bin
aa[i] = np.sum(area_flat[np.where(inds == i)])
aq = np.cumsum(aa)
y_part = aq/(2*pi*planet_radius**2) - 1.0
lat_part = np.arcsin(y_part)*180/pi
q_part = np.interp(ylat, lat_part, q_part_u)
return q_part
area = self.area
ylat = self.ylat
planet_radius = self.planet_radius
self.eqvlat = np.zeros((self.nlev, self.nlat))
for k in range(self.nlev):
pv_field = self.qgpv_field[k, ...]
if domain_size == 'half_globe':
nlat_s = int(self.nlat/2)
qref = np.zeros(self.nlat)
# --- Southern Hemisphere ---
# qref1 = eqv_lat_core(ylat[:nlat_s],vort[:nlat_s,:],area[:nlat_s,:],nlat_s,planet_radius=planet_radius)
qref[:nlat_s] = eqv_lat_core(ylat[:nlat_s], pv_field[:nlat_s,:],
area[:nlat_s, :], nlat_s)
# --- Northern Hemisphere ---
pv_field_inverted = -pv_field[::-1, :] # Added the minus sign, but gotta see if NL_North is affected
qref2 = eqv_lat_core(ylat[:nlat_s], pv_field_inverted[:nlat_s,:],
area[:nlat_s, :], nlat_s)
#qref2 = eqvlat(ylat[:nlat_s],vort2[:nlat_s,:],area[:nlat_s,:],nlat_s,planet_radius=planet_radius)
qref[-nlat_s:] = -qref2[::-1]
elif domain_size == 'full_globe':
qref = eqv_lat_core(ylat, pv_field, area, self.nlat,
planet_radius=planet_radius)
else:
raise ValueError('Domain size is not properly specified.')
self.eqvlat[k, :] = qref
return self.eqvlat
def lwa(self):
"""
Compute the finite-amplitude local wave activity on each pseudoheight layer based on the *equivalent_latitudes* and the *qgpv_field* stored in the object.
:returns: an 3-D numpy array with dimension (*nlev*,*nlat*,*nlon*) of local wave activity values.
:example:
>>> qgfield = QGField(xlon, ylat, np.array([240.]), u, qgpv_field=QGPV)
>>> qgfield_lwa = qgfield.lwa()
"""
try:
self.eqvlat
except:
self.eqvlat = self.equivalent_latitudes(domain_size='half_globe')
lwact = np.zeros((self.nlev, self.nlat, self.nlon))
for k in range(self.nlev):
pv_field = self.qgpv_field[k, :, :]
for j in np.arange(0, self.nlat-1):
vort_e = pv_field[:, :]-self.eqvlat[k, j]
vort_boo = np.zeros((self.nlat, self.nlon))
vort_boo[np.where(vort_e[:, :] < 0)] = -1
vort_boo[:j+1, :] = 0
vort_boo[np.where(vort_e[:j+1, :] > 0)] = 1
lwact[k, j, :] = np.sum(vort_e*vort_boo * self.planet_radius *
self.clat[:, np.newaxis] *
self.dphi[:, np.newaxis], axis=0)
return lwact
def main():
from netCDF4 import Dataset
import numpy as np
import matplotlib.pyplot as plt
# === List of tests ===
test_2D = False
test_3D = True
# === Testing the 2D object ===
if test_2D:
data_path = '../examples/barotropic_vorticity.nc'
readFile = Dataset(data_path, mode='r')
abs_vorticity = readFile.variables['absolute_vorticity'][:]
xlon = np.linspace(0, 360., 512, endpoint=False)
ylat = np.linspace(-90, 90., 256, endpoint=True)
nlon = xlon.size
nlat = ylat.size
Earth_radius = 6.378e+6
dphi = (ylat[2]-ylat[1])*pi/180.
area = 2.*pi*Earth_radius**2 * (np.cos(ylat[:, np.newaxis]*pi/180.)
* dphi)/float(nlon) * np.ones((nlat, nlon))
cc1 = BarotropicField(xlon, ylat, pv_field=abs_vorticity) # area computed in the class assumed uniform grid
# Compute equivalent latitudes
cc1_eqvlat = cc1.equivalent_latitudes()
# Compute equivalent latitudes
cc1_lwa = cc1.lwa()
# --- Color axis for plotting LWA --- #
LWA_caxis = np.linspace(0, cc1_lwa.max(), 31, endpoint=True)
# --- Plot the abs. vorticity field, LWA and equivalent-latitude relationship and LWA --- #
fig = plt.subplots(figsize=(14, 4))
plt.subplot(1, 3, 1) # Absolute vorticity map
c = plt.contourf(xlon, ylat, cc1.pv_field, 31)
cb = plt.colorbar(c)
cb.formatter.set_powerlimits((0, 0))
cb.ax.yaxis.set_offset_position('right')
cb.update_ticks()
plt.title('Absolute vorticity [1/s]')
plt.xlabel('Longitude (degree)')
plt.ylabel('Latitude (degree)')
plt.subplot(1, 3, 2) # LWA (full domain)
plt.contourf(xlon, ylat, cc1_lwa, LWA_caxis)
plt.colorbar()
plt.title('Local Wave Activity [m/s]')
plt.xlabel('Longitude (degree)')
plt.ylabel('Latitude (degree)')
plt.subplot(1, 3, 3) # Equivalent-latitude relationship Q(y)
plt.plot(cc1_eqvlat, ylat, 'b', label='Equivalent-latitude relationship')
plt.plot(np.mean(cc1.pv_field, axis=1), ylat, 'g', label='zonal mean abs. vorticity')
plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
plt.ylim(-90, 90)
plt.legend(loc=4, fontsize=10)
plt.title('Equivalent-latitude profile')
plt.ylabel('Latitude (degree)')
plt.xlabel('Q(y) [1/s] | y = latitude')
plt.tight_layout()
plt.show()
# === Testing the 2D object ===
if test_3D:
print('Test QGField')
u_QGPV_File = Dataset('../examples/u_QGPV_240hPa_2012Oct28to31.nc', mode='r')
# --- Read in longitude and latitude arrays --- #
xlon = u_QGPV_File.variables['longitude'][:]
ylat = u_QGPV_File.variables['latitude'][:]
clat = np.abs(np.cos(ylat*pi/180.)) # cosine latitude
nlon = xlon.size
nlat = ylat.size
u = u_QGPV_File.variables['U'][0, ...]
QGPV = u_QGPV_File.variables['QGPV'][0, ...]
u_QGPV_File.close()
print(u.shape)
print(QGPV.shape)
cc2 = QGField(xlon, ylat, np.array([240.]), u, qgpv_field=QGPV) # area computed in the class assumed uniform grid
cc3 = cc2.lwa()
print('cc3 shape')
print(cc3.shape)
# print 'test empty qgpv fields'
# cc4 = QGField(xlon, ylat, np.array([240.]), u)
plt.figure(figsize=(8, 3))
c = plt.contourf(xlon, ylat[80:], cc3[0, 80:, :], 31)
cb = plt.colorbar(c)
cb.formatter.set_powerlimits((0, 0))
cb.ax.yaxis.set_offset_position('right')
cb.update_ticks()
plt.title('Local Wave Activity at 240hPa [m/s]')
plt.xlabel('Longitude (degree)')
plt.ylabel('Latitude (degree)')
plt.show()
if __name__ == "__main__":
main()
|
# Esta é uma tentativa de analisar o método de Newton via programação em python
# Definir uma função
import math
def newton(f, flin, x0, epsilon, maxIter=50):
if math.fabs(f(x0))<= epsilon:
return x0
print("k \t x0 \t\t f(x0)")
k=1
while k<=maxIter:
x1=x0-f(x0)/flin(x0)
print(" %d \t %e \t %e"%(k,x1,f(x1)))
if math.fabs(f(x1))<= epsilon:
return x1
x0 = x1
k = k+1
print("ERRO:Número máximo de interações atingido")
return x1
if __name__ == "__main__":
def f(x):
return 5*x**5 - 2*x**4 + 3*x**2 - 8*x - 10
def flin(x):
return 25*x**4 - 8*x**3 + 6*x - 8
raiz = newton(f, flin, 1.5, 0.001)
print(raiz)
|
# Simple extended BCubed implementation in Python for clustering evaluation
# Copyright 2020 Hugo Hromic, Chris Bowdon
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Extended BCubed algorithm taken from:
# Amigo, Enrique, et al. "A comparison of extrinsic clustering evaluation metrics
# based on formal constraints." Information retrieval 12.4 (2009): 461-486.
"""Generate extended BCubed evaluation for clustering."""
"""Parallelized versions of functions in bcubed.extended."""
import numpy
from multiprocessing import Pool, cpu_count
from itertools import repeat
from .extended import mult_precision, mult_recall
def _p(el1, cdict, ldict):
return numpy.mean([mult_precision(el1, el2, cdict, ldict)
for el2 in cdict if cdict[el1] & cdict[el2]])
def _r(el1, cdict, ldict):
return numpy.mean([mult_recall(el1, el2, cdict, ldict)
for el2 in cdict if ldict[el1] & ldict[el2]])
def parallel(function, cdict, ldict, n_processes=None):
if n_processes is None:
n_processes = max(1, cpu_count() - 2)
with Pool(n_processes) as pool:
return pool.starmap(function, zip(cdict.keys(), repeat(cdict), repeat(ldict)))
def precision(cdict, ldict, n_processes=None):
"""Computes overall extended BCubed precision for the C and L dicts
using multiple processes for parallelism.
Parameters
==========
cdict: dict(item: set(cluster-ids))
The cluster assignments to be evaluated
ldict: dict(item: set(cluster-ids))
The ground truth clustering
n_processes: optional integer
Number of processes to use (defaults to number of CPU cores - 1)
"""
p_per_el = parallel(_p, cdict, ldict, n_processes)
return numpy.mean(p_per_el)
def recall(cdict, ldict, n_processes=None):
"""Computes overall extended BCubed recall for the C and L dicts
using multiple processes for parallelism.
Parameters
==========
cdict: dict(item: set(cluster-ids))
The cluster assignments to be evaluated
ldict: dict(item: set(cluster-ids))
The ground truth clustering
n_processes: optional integer
Number of processes to use (defaults to number of CPU cores - 1)
"""
r_per_el = parallel(_r, cdict, ldict, n_processes)
return numpy.mean(r_per_el)
|
import andrimne.config as config
import andrimne.logger as logger
from andrimne.timer import Timer
import logging
import sys
def main():
timer = Timer()
config.read_main_configuration()
logger.configure()
steps = map(step_import, read_modules())
successful = True
for step in steps:
print_progress()
logger.log_step(step.__name__)
if not execute_step(step):
successful = False
break
log_run_completed(successful, timer)
sys.exit(0 if successful else 1)
def print_progress():
if config.read_or_default('verbosity', 'verbose') == 'verbose':
sys.stdout.write('.')
def execute_step(step):
# noinspection PyBroadException
try:
result = step.run()
if result is None or result is True or result == 0:
return True
except Exception as e:
logging.error(e)
return False
def log_run_completed(successful, timer):
if successful:
logging.info('DONE! elapsed time was {}'.format(timer.elapsed()))
else:
logging.warning('ABORTED! elapsed time was {}'.format(timer.elapsed()))
def read_modules():
logging.debug('reading step modules')
return config.read('step_modules')
def step_import(module_name):
logging.debug('importing step \'{}\''.format(module_name))
return __import__('andrimne.steps.{}'.format(module_name), fromlist='andrimne.steps')
if __name__ == '__main__':
main()
|
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
if len(nums) == 0:
return []
arr = []
for idx in range(0, len(nums)-1):
for val in range(idx+1,len(nums)-1):
if nums[idx]+nums[val] == target:
arr.append(idx)
arr.append(val)
return arr
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @author: yan
def hoemwork1():
dictionary = {
'good':'of a favorable character or tendenc',
'none': 'not any such thing or person',
'nice': 'very beautiful'
}
#长度
print len(dictionary)
#keys
print dictionary.keys()
#values
print dictionary.values()
#是否在
print dictionary.has_key('good')
print dictionary.has_key('bad')
#add
dictionary['bad'] = 'not very good'
#modify
dictionary['bad'] = 'failing to reach an acceptable standard'
#delete
del dictionary['bad']
print dictionary
print len(dictionary)
#get value
print '---------'
print dictionary['good']
if dictionary.has_key('bad'):
print dictionary['bad']
else:
print '没有bad这个词'
print '----------'
#iterator
for key in dictionary.keys():
print key
print dictionary[key]
print '----------------'
def homework2():
print '老爸在看一本英文书'
dictionary = {
'abandon':'to give up to the control or influence of another person or agent',
'abase':'to lower in rank, office, prestige, or esteem',
'abash':'to destroy the self-possession or self-confidence of'
}
print '老爸先查了一个单词 "etiquette"'
if dictionary.has_key('etiquette'):
print 'etiquette 的意思是 %s' % (dictionary['etiquette'])
else:
print '老爸没有查到etiquette的意思'
del dictionary['abandon']
print '老爸怒了,把含有 "abandon" 一页的单词撕掉了'
print '-----------'
print '老爸又查了一个单词"abase"'
if dictionary.has_key('abase'):
print 'abase 的意思是 %s' % (dictionary['abase'])
dictionary['abadon'] = 'to give up to the control or influence of another person or agent'
print '老爸很开心,又把 "abandon" 加入到了字典里'
else:
print '老爸没有查到abase的意思'
if __name__ == '__main__':
hoemwork1()
homework2()
|
from extensions import registry
from maltego_trx.entities import Phrase
from maltego_trx.maltego import MaltegoMsg, MaltegoTransform
from maltego_trx.overlays import OverlayPosition, OverlayType
from maltego_trx.transform import DiscoverableTransform
@registry.register_transform(display_name="Overlay Example", input_entity="maltego.Person",
description='Returns a phrase with overlays on the graph.',
output_entities=["maltego.Phrase"])
class OverlayExample(DiscoverableTransform):
@classmethod
def create_entities(cls, request: MaltegoMsg, response: MaltegoTransform):
person_name = request.Value
entity = response.addEntity(Phrase, f"Hi {person_name}, nice to meet you!")
# Normally, when we create an overlay, we would reference a property name so that Maltego can then use the
# value of that property to create the overlay. Sometimes that means creating a dynamic property, but usually
# it's better to either use an existing property, or, if you created the Entity yourself, and only need the
# property for the overlay, to use a hidden property. Here's an example of using a dynamic property:
entity.addProperty('dynamic_overlay_icon_name', displayName="Name for overlay image", value="Champion")
entity.addOverlay('dynamic_overlay_icon_name', OverlayPosition.WEST, OverlayType.IMAGE)
# You *can* also directly supply the string value of the property, however this is not recommended. Why? If
# the entity already has a property of the same ID (in this case, "DE"), then you would in fact be assigning the
# value of that property, not the string "DE", which is not the intention. Nevertheless, here's an example:
entity.addOverlay('DE', OverlayPosition.SOUTH_WEST, OverlayType.IMAGE)
# Overlays can also be an additional field of text displayed on the entity:
entity.addProperty("exampleDynamicPropertyName", "Example Dynamic Property", "loose", "Maltego Overlay Testing")
entity.addOverlay('exampleDynamicPropertyName', OverlayPosition.NORTH, OverlayType.TEXT)
# Or a small color indicator
entity.addOverlay('#45e06f', OverlayPosition.NORTH_WEST, OverlayType.COLOUR)
|
from selenium import webdriver
import os
import shutil
import time
import pandas as pd
def getWoocommerceOrder(fromdate, todate):
options = webdriver.ChromeOptions()
prefs = {}
prefs['download.default_directory'] = 'C:\\Users\\OEM\\Downloads\\woocommerceOrder'
options.add_experimental_option('prefs', prefs)
driver = webdriver.Chrome(chrome_options=options, executable_path='chromedriver.exe')
driver.get('https://nzkw.com/wp-admin/admin.php?page=wc-order-export')
driver.find_element_by_name('username').send_keys('admin')
driver.find_element_by_name('password').send_keys('nzkw0800')
driver.find_element_by_name('submit_login').click()
driver.find_element_by_name('settings[from_date]').clear()
driver.find_element_by_name('settings[to_date]').clear()
driver.find_element_by_name('settings[from_date]').send_keys(fromdate)
driver.find_element_by_name('settings[to_date]').send_keys(todate)
filePath = 'C:\\Users\\OEM\\Downloads\\woocommerceOrder\\woocommerceOrders.csv'
try:
os.remove(filePath)
except:
print("Error while deleting file ", filePath)
time.sleep(2)
driver.find_element_by_id('export-btn').click()
time.sleep(20)
return prefs['download.default_directory'] + '\\woocommerceOrders.csv'
def modifyPaymentMethod(children):
methods = children['Payment Method'].tolist()
for i in range(len(methods)):
if methods[i] == 'Direct bank transfer':
methods[i] = 'Bank Transfer'
elif methods[i] == 'Payment Express':
methods[i] = 'Credit Card'
children['Payment Method'] = methods
def modifyShippingMethod(children):
methods = children['Shipping Method Title'].tolist()
for i in range(len(methods)):
if methods[i] == 'Small Parcel via Courier Post' or methods[i] == 'Box parcels sent via Fastway Courier':
methods[i] = 'Post'
elif methods[i] == 'Local pickup':
methods[i] = 'Collection'
children['Shipping Method Title'] = methods
def addCompanyName(children):
companies = children['Company (Billing)'].tolist()
firstnames = children['Customer First Name'].tolist()
lastnames = children['Customer Last Name'].tolist()
for i in range(len(companies)):
if companies[i] is not None:
firstnames[i] = firstnames[i] + ' ' + lastnames[i]
lastnames[i] = companies[i]
children['Customer First Name'] = firstnames
children['Customer Last Name'] = lastnames
def ProcessOrder(path):
children = pd.read_csv(path)
children['Order Reference'] = ['nzkw-' + str(ordernumber) for ordernumber in children['Order Reference']]
modifyPaymentMethod(children)
modifyShippingMethod(children)
addCompanyName(children)
children.to_csv(path)
def uploadToTradevine(path):
options = webdriver.ChromeOptions()
prefs = {}
prefs['download.default_directory'] = 'C:\\Users\\OEM\\Downloads\\woocommerceOrder'
options.add_experimental_option('prefs', prefs)
driver = webdriver.Chrome(chrome_options=options, executable_path='chromedriver.exe')
driver.get('https://nz.tradevine.com/KW-Auto-Parts-Ltd/SalesOrderImport/ImportStep1')
driver.find_element_by_id('Item_Email').send_keys('info@nzkw.com')
driver.find_element_by_id('Item_SuppliedPassword').send_keys('Nzkw0800466959@')
driver.find_element_by_id('signInButton').click()
driver.find_element_by_id('ImportFile').send_keys(path)
driver.find_element_by_css_selector('.button.with-icon.next span').click()
driver.find_element_by_id('Mappings_40__AlternateControlValue').send_keys('GST')
driver.find_element_by_css_selector('.button.with-icon.next span').click()
driver.find_element_by_css_selector('.button.with-icon.next span').click()
time.sleep(20000)
def uploadOrder(fromdate, todate):
orderPath = 'C:\\Users\\OEM\\Downloads\\woocommerceOrder'
if not os.path.exists(orderPath):
os.makedirs(orderPath)
path = getWoocommerceOrder(fromdate, todate)
ProcessOrder(path)
uploadToTradevine(path)
|
import os
for root,dir,files in os.walk("E:\Dom\programs\Extractors\Prey\star-citizen-texture-converter-v1-3\Extracted",topdown=False):
print(dir)
|
import numpy as np
import sys
vocab = {}
D = 5
with open("81.jl.out") as f:
for line in f.readlines()[:1]:
words = line[:-1].strip().split(" ")
words = list(filter(lambda x: x != "", words))
for idx in range(len(words)):
randd = np.random.randint(1, D + 1)
d = min(randd, idx + 1, len(words) - randd - 1)
print(d)
sys.stdout.write("{}\t".format(words[idx]))
for i in range(-d, d + 1):
if i != 0:
if idx + i < 0:
sys.stdout.write("N/A\t")
elif idx + i >= len(words):
sys.stdout.write("N/A\t")
else:
sys.stdout.write("{}\t".format(words[idx + i]))
sys.stdout.write("\n")
|
from .xboxcontroller import XboxController
from .buzzer import IllegalBuzzer
from .differentialdrive import DifferentialDrive
from .pca_motor import PCA9685
from .speedcontroller import SpeedController
from .speedcontrollergroup import SpeedControllerGroup
from .timer import Timer
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# diagnostic_node
# Info: Rosnode zum Testen des CAN-Datentransfers
# Input: Sämtliche Signale, die auf dem ROS-Bus liegen
# Output: - (nur externer Output der Dummy-Botschaft)
# Autor: Christof Kary (christof.kary@evomotiv.de)
# Version: 1.0, 04.06.2018
import rospy
import numpy as np
import time
import can
import struct
from sensor_msgs.msg import Image, CameraInfo
from cv_bridge import CvBridge, CvBridgeError
from std_msgs.msg import UInt16
from std_msgs.msg import Float64
from autonomous.msg import lane
from autonomous.msg import stopsign
from autonomous.msg import carcontrol
from sensor_msgs.msg import Range
NODE_NAME = "diagnostic_node"
SUB_TOPIC1 = "/lane_info" # Publisher-Skript: image_lanedetection_node.py
SUB_TOPIC2 = "/stop_info" # Publisher-Skript: image_stopsign_node.py
SUB_TOPIC3 = "/car_control" # Publisher-Skript: car_control_node.py
SUB_TOPIC4 = "/ultrasoundf1" # Publisher-Skript: ROS_1_3.ino
SUB_TOPIC5 = "/autonomous/image_raw" # Publisher-Skript: image_lanedetection_node.py
SUB_TOPIC6 = "/control_esc" # Publisher-Skript: esc_control_node.py
SUB_TOPIC7 = "/control_steering" # Publisher-Skript: PID-Controller (siehe launch-File)
QUEUE_SIZE = 1
spur = lane()
stopper = stopsign()
controller = carcontrol()
#lane_right = np.int64(0)
#lane_left = np.int64(0)
#lane_abw = np.int64(0)
#lane_status= np.int64(0)
#lane_radius= np.float64(0)
#eng_rpm = np.uint16(0)
#steer_angle= np.uint16(0)
straightee = []
cha = 0
stra = 0
bus = can.interface.Bus(bustype='socketcan', channel='can0', bitrate=500000) # Initialisiert den CAN-Bus mit 500 kBit/s
# Message definieren und bei jedem Durchlauf Nutzdaten leeren, ansonsten werden Nutzdaten in andere Botschaften geschrieben
reset_msg = can.Message(arbitration_id = 0x00, data = [0, 0, 0, 0, 0, 0, 0, 0], extended_id = False)
lane_msg_1 = can.Message(arbitration_id = 0x10, data = [0, 0, 0, 0, 0, 0, 0, 0], extended_id = False)
lane_msg_2 = can.Message(arbitration_id = 0x11, data = [0, 0, 0, 0, 0, 0, 0, 0], extended_id = False)
stop_msg = can.Message(arbitration_id = 0x20, data = [0, 0, 0, 0, 0, 0, 0, 0], extended_id = False)
eng_msg = can.Message(arbitration_id = 0x30, data = [0, 0, 0, 0, 0, 0, 0, 0], extended_id = False)
us_msg = can.Message(arbitration_id = 0x40, data = [0, 0, 0, 0, 0, 0, 0, 0], extended_id = False)
stat_img_info_msg = can.Message(arbitration_id = 0x50, data = [0, 0, 0, 0, 0, 0, 0, 0], extended_id = False)
stat_pid_info_msg_1 = can.Message(arbitration_id = 0x51, data = [0, 0, 0, 0, 0, 0, 0, 0], extended_id = False)
stat_pid_info_msg_2 = can.Message(arbitration_id = 0x52, data = [0, 0, 0, 0, 0, 0, 0, 0], extended_id = False)
#-----------------------------------------------------------------------------------------------------------------------
# Initialisierung des Knotens und der abbonierten Topics
def diagnosticnode(NODE_NAME, SUB_TOPIC1, SUB_TOPIC2, SUB_TOPIC3, SUB_TOPIC4, SUB_TOPIC5, SUB_TOPIC6):
rospy.init_node(NODE_NAME, anonymous=True)
#rate = rospy.Rate(25) #publish Rate wird auf 25 Hz gesetzt, da Kamera maximal 25 Bilder/s liefert
rospy.Subscriber(SUB_TOPIC1, lane, callback1) #subscribe to Spurerkennung
rospy.Subscriber(SUB_TOPIC2, stopsign, callback2) #subscribe to Stoppschild
rospy.Subscriber(SUB_TOPIC3, carcontrol, callback3) #subscribe to Motordrehzahl+Lenkwinkel
rospy.Subscriber(SUB_TOPIC4, Range, callback4) #subscribe to Ultraschallsensor
rospy.Subscriber(SUB_TOPIC5, Image, callback5) #subscribe to Rohbild
rospy.Subscriber(SUB_TOPIC6, UInt16, callback6) #subscribe to Motorsteuerung (Längsregelung)
#while not rospy.is_shutdown():
#rate.sleep() # Schleife entsprechend der publish rate, um Wiederholungsfrequenz einzuhalten
rospy.spin()
#-----------------------------------------------------------------------------------------------------------------------
# Routine fuer Signaldaten zur Spurerkennung
def callback1(lane_data):
# Schreibt Signaldaten in log-file im Verzeichnis /home/nvidia/.ros/log/...
#rospy.loginfo("right: %s", lane_data.right)
#rospy.loginfo("left: %s", lane_data.left)
#rospy.loginfo("Abweichung: %s", lane_data.abw)
#rospy.loginfo("Erkennung: %s", lane_data.erkennung)
#rospy.loginfo("Radius: %s", lane_data.radius)
# Signaldaten muessen zuerst in Byte-Array gewandelt werden
lane_right = struct.pack(">H", lane_data.right) # Little Endian Format, unsigned short Variable (2 Byte)
lane_left = struct.pack(">H", lane_data.left) # Little Endian Format, unsigned short Variable (2 Byte)
lane_abw = struct.pack(">h", lane_data.abw) # Little Endian Format, signed short Variable (2 Byte) (Eigentlich signed long, aber Aenderungen nur in 2 Byte)
lane_status= struct.pack(">?", lane_data.erkennung) # Little Endian Format, bool Variable (1 Byte)
lane_radius= struct.pack(">f", lane_data.radius) # Little Endian Format, float Variable (4 Byte)
# Message definieren und bei jedem Durchlauf Nutzdaten leeren, ansonsten werden Nutzdaten in andere Botschaften geschrieben
# Byte-Arrays im Motorolla-Format dem CAN-Frame zuweisen
for i in reversed(range(0, len(lane_right))): # Uebergabe der Signalwerte in CAN-Nutzdaten
lane_msg_1.data[i+4] = lane_left[i] # Spur links in Byte 2...3 (vlnr)
lane_msg_1.data[i+6] = lane_right[i] # Spur rechts in Byte 0...1 (vlnr)
for i in reversed(range(0, len(lane_radius))): # Uebergabe der Signalwerte in CAN-Nutzdaten
lane_msg_1.data[i] = lane_radius[i] # Radius in Byte 4...7
for i in reversed(range(0, len(lane_abw))): # Uebergabe der Signalwerte in CAN-Nutzdaten
lane_msg_2.data[i+6] = lane_abw[i] # Abweichung zur berechneten Spurmitte (vlnr)
lane_msg_2.data[5] = lane_status
# CAN-Frame auf Bus senden
#send_on_can(bus, lane_msg)
bus.send(lane_msg_1)
bus.send(lane_msg_2)
#-----------------------------------------------------------------------------------------------------------------------
# Routine fuer Signaldaten zur Stoppschilderkennung
def callback2(stop_data):
# Schreibt Signaldaten in log-file im Verzeichnis /home/nvidia/.ros/log/...
#rospy.loginfo("stop_data: %s", stop_data)
# Signaldaten muessen zuerst in Byte-Array gewandelt werden
stop_status = struct.pack(">?", stop_data.erkennung) # Little Endian Format, bool Variable (1 Byte)
stop_breite = struct.pack(">B", stop_data.breite_px) # Little Endian Format, unsigned char Variable (1 Byte)
stop_hoehe = struct.pack(">B", stop_data.hoehe_px) # Little Endian Format, unsigned char Variable (1 Byte)
stop_posx = struct.pack(">H", stop_data.posx_px) # Little Endian Format, unsigned short Variable (2 Byte)
stop_posy = struct.pack(">H", stop_data.posy_px) # Little Endian Format, unsigned short Variable (2 Byte)
# Message definieren und bei jedem Durchlauf Nutzdaten leeren, ansonsten werden Nutzdaten noch in andere Botschaften geschrieben
#stop_msg = can.Message(arbitration_id = 0x40, data = [0, 0, 0, 0, 0, 0, 0, 0], extended_id = False)
# Byte-Arrays im Motorola-Format dem CAN-Frame zuweisen
stop_msg.data[1] = stop_status # Status Stoppschild erkannt / nicht erkannt
stop_msg.data[2] = stop_hoehe # Hoehe in px des erkannten Stoppschilds
stop_msg.data[3] = stop_breite # Breite in px des erkannten Stoppschilds
for i in reversed(range(0, len(stop_posx))): # Uebergabe der Signalwerte in CAN-Nutzdaten
stop_msg.data[i+6] = stop_posx[i] # horizontale Position im Bild in px
stop_msg.data[i+4] = stop_posy[i] # vertikale Position im Bild in px
# CAN-Frame auf Bus senden
#send_on_can(bus, stop_msg)
bus.send(stop_msg)
#-----------------------------------------------------------------------------------------------------------------------
# Routine fuer Signaldaten zur Motor- und Servosteuerung
def callback3(eng_data):
# Schreibt Signaldaten in log-file im Verzeichnis /home/nvidia/.ros/log/...
#rospy.loginfo("eng_data: %s", eng_data)
# Signaldaten muessen zuerst in Byte-Array gewandelt werden
eng_rpm = struct.pack(">H", eng_data.esc) # Little Endian Format, unsigned short Variable (2 Byte)
steer_angle = struct.pack(">H", eng_data.servo) # Little Endian Format, unsigned short Variable (2 Byte)
# Message definieren und bei jedem Durchlauf Nutzdaten leeren, ansonsten werden Nutzdaten noch in andere Botschaften geschrieben
#eng_msg = can.Message(arbitration_id = 0x50, data = [0, 0, 0, 0, 0, 0, 0, 0], extended_id = False)
# Byte-Arrays im Motorola-Format dem CAN-Frame zuweisen
for i in reversed(range(0, len(eng_rpm))): # Uebergabe der Signalwerte in CAN-Nutzdaten
eng_msg.data[i+4] = steer_angle[i] # Lenkwinkel in Nutzdaten
eng_msg.data[i+6] = eng_rpm[i] # Motordrehzahl in Nutzdaten
# CAN-Frame auf Bus senden
#send_on_can(bus, eng_msg)
bus.send(eng_msg)
#-----------------------------------------------------------------------------------------------------------------------
# Routine fuer Signaldaten des Ultraschallsensors
def callback4(ultraschall_front):
# Schreibt Signaldaten in log-file im Verzeichnis /home/nvidia/.ros/log/...
#rospy.loginfo("US_data: %s", ultraschall_front.max_range)
# Signaldaten muessen zuerst in Byte-Array gewandelt werden
us_front_entfernung = struct.pack(">f", ultraschall_front.range) # Little Endian Format, float Variable (4 Byte) (wird in "ROS_1_3.ino" angelegt)
us_front_min_range = struct.pack(">H", ultraschall_front.min_range+50) # Little Endian Format, unsigned short Variable (2 Byte)
us_front_max_range = struct.pack(">H", ultraschall_front.max_range) # Little Endian Format, unsigned short Variable (2 Byte)
# Message definieren und bei jedem Durchlauf Nutzdaten leeren, ansonsten werden Nutzdaten noch in andere Botschaften geschrieben
#us_msg = can.Message(arbitration_id = 0x60, data = [0, 0, 0, 0, 0, 0, 0, 0], extended_id = False)
# Byte-Arrays im Motorola-Format dem CAN-Frame zuweisen
for i in reversed(range(0, len(us_front_min_range))): # Uebergabe der Signalwerte in CAN-Nutzdaten
us_msg.data[i] = us_front_max_range[i] # minimaler Abstand in Byte 6...7
us_msg.data[i+2] = us_front_min_range[i] # maximaler Abstand in Byte 4...5
for i in reversed(range(0, len(us_front_entfernung))): # Uebergabe der Signalwerte in CAN-Nutzdaten
us_msg.data[i+4] = us_front_entfernung[i] # Entfernung zum Objekt in cm in Byte 0...3
# CAN-Frame auf Bus senden
bus.send(us_msg)
#-----------------------------------------------------------------------------------------------------------------------
# Enthaelt aktuell nur den Lenkwinkel, dieser wird aber bereits auch in /car_control uebertragen
# Fuer spaetere Erweiterungen
def callback5(image_data):
#rospy.loginfo("img_data: %s", image_data)
test = True
#-----------------------------------------------------------------------------------------------------------------------
# Enthaelt aktuell nur die Motordrehzahl, diese wird aber bereits auch in /car_control uebertragen
# Fuer spaetere Erweiterungen
def callback6(esc_data):
#rospy.loginfo("esc_data: %s", esc_data)
test = True
#-----------------------------------------------------------------------------------------------------------------------
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#-----------------------------------------------------------------------------------------------------------------------
def reset_send(bus):
#reset_msg = can.Message(arbitration_id = 0x00, data = [0, 0, 0, 0, 0, 0, 0, 0], extended_id = False)
rospy.sleep(0.05)
bus.send(reset_msg)
#-----------------------------------------------------------------------------------------------------------------------
# Statische Infos werden zu Beginn ausgelesen und 1x versendet, aendern sich ueber die Programmlaufzeit nicht
def static_img_infos():
rospy.sleep(0.05)
img_device = rospy.get_param("/autonomous/uvc_camera/device")
img_fps = rospy.get_param("/autonomous/uvc_camera/fps")
img_width = rospy.get_param("/autonomous/uvc_camera/width")
img_height = rospy.get_param("/autonomous/uvc_camera/height")
# Signaldaten muessen zuerst in Byte-Array gewandelt werden
camera_fps = struct.pack(">H", img_fps) # Little Endian Format, unsigned short Variable (2 Byte)
camera_width = struct.pack(">H", img_width) # Little Endian Format, unsigned short Variable (2 Byte)
camera_height = struct.pack(">H", img_height) # Little Endian Format, unsigned short Variable (2 Byte)
# Byte-Arrays im Motorola-Format dem CAN-Frame zuweisen
for i in reversed(range(0, len(camera_width))): # Uebergabe der Signalwerte in CAN-Nutzdaten
stat_img_info_msg.data[i+6] = camera_width[i] # Bildbreite in Pixel (x-Richtung) in Byte 0...1
stat_img_info_msg.data[i+4] = camera_height[i] # Bildhoehe in Pixel (y-Richtung) in Byte 2...3
stat_img_info_msg.data[i+2] = camera_fps[i] # Bildwiederholungsrate pro Sekunde in Byte 4...5
# CAN-Frame auf Bus senden
bus.send(stat_img_info_msg)
#-----------------------------------------------------------------------------------------------------------------------
# Statische Infos werden zu Beginn ausgelesen und 1x versendet, aendern sich ueber die Programmlaufzeit nicht
def static_pid_infos():
rospy.sleep(0.05)
pid_Kd = rospy.get_param("/steering_pid/Kd")
pid_Ki = rospy.get_param("/steering_pid/Ki")
pid_Kp = rospy.get_param("/steering_pid/Kp")
pid_lower_limit = rospy.get_param("/steering_pid/lower_limit")
pid_upper_limit = rospy.get_param("/steering_pid/upper_limit")
pid_windup_limit = rospy.get_param("/steering_pid/windup_limit")
pid_max_freq = rospy.get_param("/steering_pid/max_loop_frequency")
# Signaldaten muessen zuerst in Byte-Array gewandelt werden
pid_Kd = struct.pack(">f", pid_Kd) # Little Endian Format, float Variable (4 Byte)
pid_Ki = struct.pack(">f", pid_Ki) # Little Endian Format, float Variable (4 Byte)
pid_Kp = struct.pack(">f", pid_Kp) # Little Endian Format, float Variable (4 Byte)
pid_lower_limit = struct.pack(">b", pid_lower_limit) # Little Endian Format, signed char Variable (1 Byte)
pid_upper_limit = struct.pack(">b", pid_upper_limit) # Little Endian Format, signed char Variable (1 Byte)
pid_windup_limit = struct.pack(">b", pid_windup_limit) # Little Endian Format, signed char Variable (1 Byte)
pid_max_freq = struct.pack(">b", pid_max_freq) # Little Endian Format, signed char Variable (1 Byte)
# Byte-Arrays im Motorola-Format dem CAN-Frame zuweisen
for i in reversed(range(0, len(pid_Kd))): # Uebergabe der Signalwerte in CAN-Nutzdaten
stat_pid_info_msg_1.data[i+4] = pid_Kd[i] # D-Anteil (Differenzierer) des PID-Reglers in Byte 0...3
stat_pid_info_msg_1.data[i] = pid_Ki[i] # I-Anteil (Integrierer) des PID-Reglers in Byte 4...7
stat_pid_info_msg_2.data[i+4] = pid_Kp[i] # P-Anteil (Proportional) des PID-Reglers in Byte 0...3
stat_pid_info_msg_2.data[3] = pid_lower_limit[i] # Untere Begrenzung des PID-Reglers in Byte 4
stat_pid_info_msg_2.data[2] = pid_upper_limit[i] # Obere Begrenzung des PID-Reglers in Byte 5
stat_pid_info_msg_2.data[1] = pid_windup_limit[i] # Windup-Begrenzung des PID-Reglers in Byte 6
stat_pid_info_msg_2.data[0] = pid_max_freq[i] # Maximale Reglerfrequenz in Byte 7
# CAN-Frame auf Bus senden
bus.send(stat_pid_info_msg_1)
rospy.sleep(0.05)
bus.send(stat_pid_info_msg_2)
#-----------------------------------------------------------------------------------------------------------------------
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#-----------------------------------------------------------------------------------------------------------------------
def shutdown_routine():
reset_send(bus)
time.sleep(1)
bus.shutdown()
rospy.loginfo("Shutting down node %s", NODE_NAME)
# Main-Routine, wird nach Initialisierung der Preamble ausgefuehrt
def main():
global bla
# Warte auf empfangene Botschaft ueber den Bus
for msg in bus:
# Pruefe, ob es sich um Diagnose-Startanweisung handelt, falls nicht, ignoriere Botschaft, falls ja, starte Diagnose-Knoten
if msg.arbitration_id == 0x7FF and msg.data[0] == 0x01: # 0x7FF entspricht hoechster maximaler ID, da 2^11 IDs moeglich
try:
reset_send(bus)
static_img_infos()
static_pid_infos()
diagnosticnode(NODE_NAME, SUB_TOPIC1, SUB_TOPIC2, SUB_TOPIC3, SUB_TOPIC4, SUB_TOPIC5, SUB_TOPIC6)
rospy.loginfo("Diagnose-Modus gestartet!")
for msg in bus:
if msg.arbitration_id == 0x7FF and msg.data[0] == 0x00:
print "Test3"
rospy.loginfo("Diagnose-Modus beendet!")
#rospy.signal_shutdown("Diagnose shutdown")
shutdown_routine()
except KeyboardInterrupt:
rospy.loginfo("Shutting down node %s", NODE_NAME)
#reset_send(bus)
#time.sleep(1)
#bus.shutdown()
#bla.unregister()
#shutdown_routine()
#rospy.spin()
#while not rospy.is_shutdown():
# if msg.arbitration_id == 0x7FF and msg.data[0] == 0x00: # 0x7FF entspricht hoechster maximaler ID, da 2^11 IDs moeglich
# data = 0x00 beendet Diagnose-Node wieder, wenn er aktiv ist, ansonsten wird 0x00 ignoriert
# print "Test3"
# rospy.loginfo("Diagnose-Modus beendet!")
# rospy.signal_shutdown("Diagnose shutdown")
#rospy.unregister()
#shutdown_routine()
#rospy.signal_shutdown("Diagnose-Modus beendet!")
if __name__ == '__main__':
main()
|
def init_api(api):
base_url = '/supply'
from . import resources
api.add_resource(resources.SupplyResource, f'{base_url}')
api.add_resource(resources.SpecificSupplyResource, f'{base_url}/<int:item_id>')
|
#!/usr/bin/env python
'''Execution script for DDR3 controller simulation.
PSU ECE 585 Winter'16 final project.
'''
__author__ = "Daniel Collins"
__credits__ = ["Daniel Collins", "Alex Varvel", "Jonathan Waldrip"]
__version__ = "0.3.5"
__email__ = "daniel.e.collins1@gmail.com"
import platform
import os
import sys
import subprocess
import itertools
import time
import shlex
# system information
system = platform.system()
cwd = os.path.abspath(os.path.join(os.path.abspath(__file__),os.pardir))
top = os.path.abspath(os.path.join(cwd,os.pardir))
sv_dir = os.path.abspath(os.path.join(top, "test/SVdesign"))
# workaround - redhat systems on PSU don't have python2.7 installed
# -catch breath here-
# so I cloned the argparse repo (which is in the STL in 2.7)
argp_dir = os.path.abspath(os.path.join(top, '../argparse'))
sys.path.insert(0, argp_dir)
import argparse
# status indicator
bar = itertools.cycle(['-','/','|','\\'])
def build_models(log, compile_cpp, compile_sv):
'''Compiles the C++ and System Verilog DDR3 controller models.'''
if system == 'Linux':
if compile_cpp == True:
write(log, "\n----------------------\n")
write(log, "Compiling C++ model...\n")
write(log, "----------------------\n")
cmd = "make clean"
write(log, cmd + '\n')
clean = subprocess.Popen(shlex.split(cmd), stdout=log, stderr=log, cwd=top)
while clean.poll() is None:
status = status_spin(bar)
if clean.returncode != 0:
write(log, " ERROR: C++ make clean failed! See logs.\n")
sys.exit(1)
cmd = "make"
write(log, cmd + '\n')
build = subprocess.Popen(shlex.split(cmd), stdout=log, stderr=log, cwd=top)
while build.poll() is None:
status = status_spin(bar)
if build.returncode != 0:
write(log, " ERROR: C++ make failed! See logs.\n")
sys.exit(1)
write(log, "\n")
if compile_sv == True:
write(log, "\n---------------------\n")
write(log, "Compiling SV model...\n")
write(log, "---------------------\n")
cmd = "vlib work"
write(log, cmd + '\n')
build = subprocess.Popen(shlex.split(cmd), stdout=log, stderr=log, cwd=sv_dir)
while build.poll() is None:
status = status_spin(bar)
if build.returncode != 0:
write(log, " ERROR: vlib work failed! See logs.\n")
sys.exit(1)
cmd = "vlog -sv -mfcu Definitions.pkg InputDecoder.sv Queue.sv OutputEncoder.sv Top.sv ../SVtestbench/Testbench.sv +define+POLICY=OPEN"
write(log, cmd + '\n')
build = subprocess.Popen(shlex.split(cmd), stdout=log, stderr=log, cwd=sv_dir)
while build.poll() is None:
status = status_spin(bar)
if build.returncode != 0:
write(log, " ERROR: vlog compilation failed! See logs.\n")
sys.exit(1)
write(log, "\n")
else:
write(log, "Unsupported operating system\n")
sys.exit(1)
def generate_expected_results(log, inFile, outFile):
'''Generates expected output from System Verilog golden model.'''
if system == 'Linux':
write(log, "\n-----------------------\n")
write(log, "Runing SV simulation \n")
write(log, " for expected results \n")
write(log, "-----------------------\n")
input_str = "-gINPUT_FILE=" + inFile
output_str = "-gOUTPUT_FILE=" + outFile + ".sv"
cmd = "vsim -do \"run -all\" -c " + input_str + " " + output_str + " Testbench"
write(log, cmd +'\n')
build = subprocess.Popen(shlex.split(cmd), stdout=log, stderr=log, cwd=sv_dir)
while build.poll() is None:
status = status_spin(bar)
if build.returncode != 0:
write(log, " ERROR: SV sim failed! See logs.\n")
sys.exit(1)
else:
write(log, "Unsupported operating system\n")
sys.exit(1)
def run_ddr3Sim(log, inFile, outFile):
'''Executes ddr3Sim C++ model with specified input file.'''
if system == 'Linux':
write(log, "\n-------------------------\n")
write(log, "Running C++ simulation...\n")
write(log, "-------------------------\n")
prog = os.path.join(top, "bin/ddr3Sim")
cmd = prog + " -i " + inFile + " -o " + outFile
write(log, cmd +'\n')
sim = subprocess.Popen(shlex.split(cmd), stdout=log, stderr=log)
while sim.poll() is None:
status = status_spin(bar)
if sim.returncode != 0:
write(log, " ERROR: C++ sim failed! See logs.\n")
sys.exit(1)
else:
write(log, "Unsupported operating system\n")
sys.exit(1)
def check_input_completed(log, inFile, outFile):
'''Checks that all CPU requests in the input file have corresponding \
commands in the output file.'''
# Check that every input request line
# has a corresponding 'output' from each model.
requests = open(inFile, 'r')
cppFile = open(outFile, 'r')
reqLines = requests.readlines()
addrFound = [None for _ in range(len(reqLines))]
cppLines = cppFile.readlines()
# get indicies of all RD/WR commands
cmdIdx = [i for i, cmd in enumerate(cppLines) if cmd.split()[1] == 'RD' or cmd.split()[1] == 'WR']
write(log, "\n-------------------------\n")
write(log, "Verifying output contains\n")
write(log, " all input addresses...\n")
write(log, "-------------------------\n")
# for every input cpu request
for i in range(len(reqLines)):
# decode address
reqLine = reqLines[i]
reqFields = reqLine.split()
row = int(reqFields[0],16) >> 17
bank = (int(reqFields[0],16) >> 14) & 0x7
col = (int(reqFields[0],16) >> 3) & 0x7ff
# search outputs for corresponding RD/WR command
for j in cmdIdx:
cppFields = cppLines[j].split()
# if we find a match that hasn't been hit before
if int(cppFields[2],16) == bank and int(cppFields[3],16) == col and addrFound[i] == None:
# mark this line as already searched
addrFound[i] = True
continue
# if None is in the list, we didn't find all input addresses :(
if None in addrFound:
write(log, " ERROR: at least one address from input not found in output!\n")
for i, req in enumerate(addrFound):
if req == None:
write(log, ' Missing request from line ' + str(i) + ' -- '+ reqLines[i])
return 1
else:
write(log, " PASS: all input addresses accounted for in output file\n")
return 0
def diff_cmds(log, cppFile, svFile):
'''Compares the output from the C++ and SV models.'''
write(log, "\n-----------------------\n")
write(log, "Diffing output files...\n")
write(log, "-----------------------\n")
clean_pass = True
cppFile = open(cppFile, 'r')
svFile = open(svFile, 'r')
cppLines = cppFile.readlines()
svLines = svFile.readlines()
if len(cppLines) != len(svLines):
write(log, "WARNING: unequal command count between expected and actual results\n")
write(log, str(len(cppLines)) + " != " + str(len(svLines)) + '\n')
clean_pass = False
for cppLine, svLine in zip(cppLines, svLines):
cppFields = cppLine.split()
svFields = svLine.split()
if int(cppFields[0]) != int(svFields[0]):
write(log, " ERROR: critical mismatch in timing output\n")
write(log, cppFields[0] + " != " + svFields[0] + '\n')
clean_pass = False
if cppFields[1] != svFields[1]:
write(log, " ERROR: mismatch in command output!\n")
write(log, cppFields[1] + " != " + svFields[1] + '\n')
clean_pass = False
if ''.join(cppFields[2:]) != ''.join(svFields[2:]):
write(log, " ERROR: mismatch in address output!\n")
write(log, ''.join(cppFields[2:]) + " != " + ''.join(svFields[2:]) + '\n')
clean_pass = False
if clean_pass == True:
write(log, " PASS: no diff detected in simlulation vs model\n")
return 0
else:
return 1
def write(filename, data):
'''Writes to stdout and file, much like the unix Tee program.'''
filename.write(data)
filename.flush()
sys.stdout.write(data)
sys.stdout.flush()
def status_spin(bar):
'''Spinning command line bar indicating background work.'''
sys.stdout.write(bar.next())
sys.stdout.flush()
sys.stdout.write('\b')
time.sleep(.20)
def main():
'''ddr3Sim execution entry point.'''
# options
parser = argparse.ArgumentParser(description='Execution script for DDR3 controller simulation.')
req = parser.add_argument_group("required arguments")
req.add_argument("-i", "--inFile", dest="inFile", \
required=True, help="Input file containing CPU requests.")
req.add_argument("-o", "--outFile", dest="outFile", \
required=True, help="Output file containing memory commands.")
parser.add_argument("-l", "--logFile", dest="log_name", \
default="ddr3Sim_test.log", help="Log file name.")
parser.add_argument("-cc", "--compile-cpp", action='store_true', \
dest="compile_cpp", help="Compile C++ model before execution.")
parser.add_argument("-cv", "--compile-sv", action='store_true', \
dest="compile_sv", help="Compile SV model before execution.")
parser.add_argument("-nc", "--no-exec-cpp", action='store_true', \
dest="no_exec_cpp", help="Do not execute cpp model.")
parser.add_argument("-nv", "--no-exec-sv", action='store_true', \
dest="no_exec_sv", help="Do not execute sv model.")
parser.add_argument("-d", "--do-not-check", action='store_true', \
dest="do_not_check_output", help="Do not compare output of models.")
args = parser.parse_args()
logFile = open(args.log_name, 'w')
inFile = os.path.abspath(args.inFile)
outFile = os.path.abspath(args.outFile)
write(logFile, "Input file: " + inFile + '\n')
write(logFile, "Output file: " + outFile + '\n')
# build models if need be
build_models(logFile, args.compile_cpp, args.compile_sv)
# execution
if not args.no_exec_sv:
generate_expected_results(logFile, inFile, outFile)
if not args.no_exec_cpp:
run_ddr3Sim(logFile, inFile, outFile)
# spot check if simulation has any obvious errors
svFile = outFile + ".sv"
rc1 = check_input_completed(logFile, inFile, outFile)
rc2 = 0
if not args.do_not_check_output:
rc2 = diff_cmds(logFile, outFile, svFile)
if rc1 != 0 or rc2 != 0:
write(logFile, "\n ERROR: something failed. See logs.\n")
else:
write(logFile, "\n PASS: no errors detected.\n")
sys.exit(0)
if __name__ == "__main__":
main()
|
class Solution:
def divide(self, dividend: int, divisor: int) -> int:
INT_MAX = 2 ** 31 - 1
INT_MIN = -2 ** 31
def div(a, b):
a = -a if a < 0 else a
b = -b if b < 0 else b
if a < b:
return 0
count = 1
tb = b
while tb + tb < a:
count += count
tb += tb
return count + div(a - tb, b)
if dividend == 0:
return 0
# if divisor == 1:
# return dividend
# if divisor == -1:
# return -dividend if dividend > INT_MIN else INT_MAX
sign = False if (dividend < 0 and divisor > 0) or (dividend > 0 and divisor < 0) else True
res = div(dividend, divisor)
if sign:
return res if res <= INT_MAX else INT_MAX
else:
return -res
if __name__ == '__main__':
s = Solution()
res = s.divide(-2147483648, -1)
print(res)
|
from django.contrib import admin
from django import forms
from userena.utils import get_user_model
from userena.admin import UserenaAdmin
from userena import settings as userena_settings
from accounts.models import (Account,
AccountReminding,
InTheLoopSchedule,
VenueAccount,
VenueType,
AccountTax,
AccountTaxCost)
from home.admin import FusionExportableAdmin
class AccountAdmin(admin.ModelAdmin):
fields = ('user', 'tax_origin_confirmed', 'not_from_canada', 'website')
class VenueAccountAdmin(FusionExportableAdmin):
list_display = ('venue_name', 'venue_address', 'city_name', 'venue_phone', 'venue_email', 'venue_fax', 'venue_site')
export_formats = (
(u'CSV', u','),
)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
if db_field.name == 'types':
kwargs['widget'] = forms.CheckboxSelectMultiple
kwargs['help_text'] = ''
return db_field.formfield(**kwargs)
def venue_name(self, object):
return object.venue.name
def venue_address(self, object):
return object.venue.address
def city_name(self, object):
return object.venue.city.name
def venue_phone(self, object):
return object.phone
def venue_email(self, object):
return object.email
def venue_fax(self, object):
return object.fax
def venue_site(self, object):
return object.site
venue_name.short_description = 'Venue'
venue_address.short_description = 'Address'
city_name.short_description = 'City'
venue_phone.short_description = 'Phone'
venue_email.short_description = 'Email'
venue_fax.short_description = 'Fax'
venue_site.short_description = 'Web site'
city_name.admin_order_field = 'venue__city'
def queryset(self, request):
# Prefetch related objects
return super(VenueAccountAdmin, self).queryset(request).select_related('venue')
class CityFusionUserAdmin(UserenaAdmin):
list_display = ('username', 'email', 'first_name', 'last_name',
'is_staff', 'is_active', 'date_joined', 'last_login')
admin.site.unregister(Account)
admin.site.register(Account, AccountAdmin)
admin.site.register(AccountReminding)
admin.site.register(AccountTax)
admin.site.register(AccountTaxCost)
admin.site.register(InTheLoopSchedule)
admin.site.register(VenueType)
admin.site.register(VenueAccount, VenueAccountAdmin)
if userena_settings.USERENA_REGISTER_USER:
try:
admin.site.unregister(get_user_model())
except admin.sites.NotRegistered:
pass
admin.site.register(get_user_model(), CityFusionUserAdmin)
|
def GetPDFName(lhapdf_id, add_extension=True):
if lhapdf_id == 11000:
pdfname = "CT10nlo"
elif lhapdf_id == 10550:
pdfname = "cteq66"
elif lhapdf_id == 10042:
pdfname = "cteq6l1"
else:
print("LHAPDF ID {} not known.".format(lhapdf_id))
exit(1)
if add_extension:
pdfname += ".LHgrid"
return pdfname
|
""" RPG-lite Discord Bot """
import os
import discord
from discord.ext import commands
bot = commands.Bot(command_prefix='/')
@bot.event
async def on_ready():
await bot.change_presence(activity=discord.Game('Test Active'))
@bot.command()
@commands.bot_has_permissions(manage_messages=True)
async def clear(ctx, amount=999):
await ctx.channel.purge(limit=amount+1)
bot.load_extension('cogs.character_ops')
bot.run(os.getenv('TOKEN'))
|
import uuid
from sqlalchemy.dialects.postgresql import UUID
from .base import db
from .mixins.base import BaseMixin
class Card(BaseMixin, db.Model):
__tablename__ = 'card'
title = db.Column(db.Text, nullable=False)
description = db.Column(db.Text, nullable=False)
comments = db.relationship('Comment', backref='card', lazy=True)
list_id = db.Column(UUID(as_uuid=True), db.ForeignKey('list.id'), nullable=False)
owner_id = db.Column(UUID(as_uuid=True), db.ForeignKey('user.id'), nullable=False)
def __init__(self, title, description, position):
self.title = title
self.description = description
def __repr__(self):
return '<Card %r>' % self.title
|
import pyodbc
from validator import Validator as validator
validator = validator()
class Customer:
def __init__(self):
self.__customerId=""
self.__customerName=""
self.__customerAddress=""
self.__customerPhoneNumber=""
def searchAllCustomers(self,cursor):
try:
cursor.execute('SELECT * FROM dbo.Customer')
dash = '-' * 75
print(dash)
print('{:>7s}{:>30s}{:>35s}'.format("Id", "Name", "Phone Number"))
print(dash)
for row in cursor:
print('{:>7s}{:>30s}{:>30s}'.format(str(row[0]), row[1], row[3]))
except:
print ("Something went wrong.!! Contact the administrator.!")
def searchCustomerByName(self,cursor):
try:
name = input("Please enter the Customer Name")
args = ['%'+name+'%']
sql = 'SELECT c.customer_id, c.customer_name, c.customer_phone FROM Customer c WHERE c.customer_name LIKE ?'
cursor.execute(sql,args)
resultSet = cursor.fetchall()
if(len(resultSet) != 0):
dash = '-'*75
print(dash)
print('{:>7s}{:>30s}{:>35s}'.format("Id", "Name","Phone Number"))
print(dash)
for row in resultSet:
print('{:>7s}{:>30s}{:>30s}'.format(row[0], row[1], row[2]))
else:
print("No Customer found with that name.!")
except:
print("Something went wrong.!! Contact the administrator.!")
def addCustomer(self, databse, cursor):
try:
self.__customerName = input("Enter name of Customer.")
mname = self.__customerName
while not validator.nameValidate(mname):
mname = input("Enter name of Customer.")
self.__customerName = mname
self.__customerPhoneNumber = input("Enter Customer Phone Number.")
addr = self.__customerPhoneNumber
while not validator.numberValidate(addr):
addr = input("Enter Customer Phone Number.")
self.__customerPhoneNumber = addr
print("Enter customer address.")
street = input("Enter street")
bldng = input("Enter building/house name")
room = input("Enter room no/house no")
county = input("Enter county name")
areacode = input("Enter area code")
self.__customerAddress = "<Address><Street>" + street + "</Street><Building>" + bldng + "</Building><RoomNo>" + room + "</RoomNo><County>" + county + "</County><AreaCode>" + areacode + "</AreaCode></Address>"
databse.insertCustomerRecord(self.__customerName,self.__customerAddress,self.__customerPhoneNumber)
print("Customer Record added successfully..!")
except:
print ("Something went wrong.!! Contact the administrator.!")
def updateCustomer(self, database, cursor):
try:
name = input("Enter name of the Customer.")
args = ['%'+name+'%']
sql = 'SELECT * FROM dbo.Customer WHERE customer_name LIKE ?'
cursor.execute(sql,args)
dash = '-' * 75
result = cursor.fetchall()
if len(result) != 0:
print("Customer found with name entered.! ")
print(dash)
print('{:>7s}{:>30s}{:>35s}'.format("Id", " Customer Name", " Customer Phone Number"))
print(dash)
for row in result:
print('{:>7s}{:>30s}{:>30s}'.format(row[0], row[1], row[3]))
self.__customerId = input("Enter the Customer Id which needs to be updated")
self.__customerName = input("Enter Same or New Name of the Customer.")
mname = self.__customerName
while not validator.nameValidate(mname):
mname = input("Enter Same or New Name of the Customer.")
self.__customerName = mname
self.__customerPhoneNumber = input("Enter Customer New or Same Phone Number.")
addr = self.__customerPhoneNumber
while not validator.numberValidate(addr):
addr = input("Enter Customer Phone Number.")
self.__customerPhoneNumber = addr
print("Enter Customer address.")
street = input("Enter street")
bldng = input("Enter building/house name")
room = input("Enter room no/house no")
county = input("Enter county name")
areacode = input("Enter area code")
self.__customerAddress = "<Address><Street>" + street + "</Street><Building>" + bldng + "</Building><RoomNo>" + room + "</RoomNo><County>" + county + "</County><AreaCode>" + areacode + "</AreaCode></Address>"
database.updateCustomerRecord(self.__customerId, self.__customerName, self.__customerAddress, self.__customerPhoneNumber)
print("Customer Record Updated Successfully.!")
else:
print("No Customer found with that name.!")
except:
print("Something went wrong.!! Contact the administrator.!")
|
#!/usr/bin/env python
import numpy as np
import corr, time, struct, sys, logging, socket
import h5py
import matplotlib.pyplot as plt
import hittite
roach = '192.168.42.65'
print('Connecting to server %s... '%(roach)),
fpga = corr.katcp_wrapper.FpgaClient(roach)
time.sleep(0.2)
if fpga.is_connected():
print 'ok\n'
else:
print 'ERROR connecting to server %s.\n'%(roach)
exit()
# setup initial parameters
#numTones=2048
#combCoeff = np.zeros((1024,numTones)) + 1j*np.zeros((1024,numTones))
#combCoeffSingle = np.zeros(numTones) + 1j*np.zeros(numTones)
def defCoeffs14():
real = '0100000000000000'
imag = '0000000000000000'
coeff0 = (real + imag)
coeff0 = int(coeff0,2)
coeffArray = np.ones(1024,'I')*coeff0
coeffStr = struct.pack('>1024I',*coeffArray)
fpga.write('c1_0',coeffStr)
time.sleep(0.5)
fpga.write('c1_1',coeffStr)
time.sleep(0.5)
fpga.write('c1_2',coeffStr)
time.sleep(0.5)
fpga.write('c1_3',coeffStr)
time.sleep(0.5)
fpga.write('c1_4',coeffStr)
time.sleep(0.5)
fpga.write('c1_5',coeffStr)
time.sleep(0.5)
fpga.write('c1_6',coeffStr)
time.sleep(0.5)
fpga.write('c1_7',coeffStr)
time.sleep(0.5)
fpga.write('c4_0',coeffStr)
time.sleep(0.5)
fpga.write('c4_1',coeffStr)
time.sleep(0.5)
fpga.write('c4_2',coeffStr)
time.sleep(0.5)
fpga.write('c4_3',coeffStr)
time.sleep(0.5)
fpga.write('c4_4',coeffStr)
time.sleep(0.5)
fpga.write('c4_5',coeffStr)
time.sleep(0.5)
fpga.write('c4_6',coeffStr)
time.sleep(0.5)
fpga.write('c4_7',coeffStr)
def defCoeffs23(combCoeffSingle):
coeff2 = -1*(combCoeffSingle[0:1024])
coeff3 = -1*(np.power(combCoeffSingle[1024:2048],-1))
coeff3 = np.nan_to_num(coeff3)
coeffs2 = ["" for i in range(1024)]
coeffs2r = ["" for i in range(1024)]
coeffs2i = ["" for i in range(1024)]
coeffs3 = ["" for i in range(1024)]
coeffs3r = ["" for i in range(1024)]
coeffs3i = ["" for i in range(1024)]
#print coeff2.real
for i in range (0,1024):
coeffs2r[i] = np.binary_repr(np.int16(coeff2[i].real*2**14),16)
coeffs2i[i] = np.binary_repr(np.int16(coeff2[i].imag*2**14),16)
coeffs3r[i] = np.binary_repr(np.int16(coeff3[i].real*2**14),16)
coeffs3i[i] = np.binary_repr(np.int16(coeff3[i].imag*2**14),16)
coeffs2[i] = (coeffs2r[i] + coeffs2i[i])
coeffs3[i] = (coeffs3r[i] + coeffs3i[i])
coeffs2[i] = int(coeffs2[i],2)
coeffs3[i] = int(coeffs3[i],2)
coeffArray2 = np.ones(1024,'L')*coeffs2
coeffArray3 = np.ones(1024,'L')*coeffs3
coeffStr2 = struct.pack('>1024L',*coeffArray2)
coeffStr3 = struct.pack('>1024L',*coeffArray3)
tmp = np.roll(coeffArray2[0:128],-2)
tmp_write = struct.pack('>128L',*tmp)
fpga.write('c2_0',tmp_write)
time.sleep(0.5)
tmp = np.roll(coeffArray2[128:128*2],-2)
tmp_write = struct.pack('>128L',*tmp)
fpga.write('c2_1',tmp_write)
time.sleep(0.5)
tmp = np.roll(coeffArray2[128*2:128*3],-2)
tmp_write = struct.pack('>128L',*tmp)
fpga.write('c2_2',tmp_write)
time.sleep(0.5)
tmp = np.roll(coeffArray2[128*3:128*4],-2)
tmp_write = struct.pack('>128L',*tmp)
fpga.write('c2_3',tmp_write)
time.sleep(0.5)
tmp = np.roll(coeffArray2[128*4:128*5],-2)
tmp_write = struct.pack('>128L',*tmp)
fpga.write('c2_4',tmp_write)
time.sleep(0.5)
tmp = np.roll(coeffArray2[128*5:128*6],-2)
tmp_write = struct.pack('>128L',*tmp)
fpga.write('c2_5',tmp_write)
time.sleep(0.5)
tmp = np.roll(coeffArray2[128*6:128*7],-2)
tmp_write = struct.pack('>128L',*tmp)
fpga.write('c2_6',tmp_write)
time.sleep(0.5)
tmp = np.roll(coeffArray2[128*7:128*8],-2)
tmp_write = struct.pack('>128L',*tmp)
fpga.write('c2_7',tmp_write)
time.sleep(0.5)
tmp = np.roll(coeffArray3[0:128],-2)
tmp_write = struct.pack('>128L',*tmp)
fpga.write('c3_0',tmp_write)
time.sleep(0.5)
tmp = np.roll(coeffArray3[128:128*2],-2)
tmp_write = struct.pack('>128L',*tmp)
fpga.write('c3_1',tmp_write)
time.sleep(0.5)
tmp = np.roll(coeffArray3[128*2:128*3],-2)
tmp_write = struct.pack('>128L',*tmp)
fpga.write('c3_2',tmp_write)
time.sleep(0.5)
tmp = np.roll(coeffArray3[128*3:128*4],-2)
tmp_write = struct.pack('>128L',*tmp)
fpga.write('c3_3',tmp_write)
time.sleep(0.5)
tmp = np.roll(coeffArray3[128*4:128*5],-2)
tmp_write = struct.pack('>128L',*tmp)
fpga.write('c3_4',tmp_write)
time.sleep(0.5)
tmp = np.roll(coeffArray3[128*5:128*6],-2)
tmp_write = struct.pack('>128L',*tmp)
fpga.write('c3_5',tmp_write)
time.sleep(0.5)
tmp = np.roll(coeffArray3[128*6:128*7],-2)
tmp_write = struct.pack('>128L',*tmp)
fpga.write('c3_6',tmp_write)
time.sleep(0.5)
tmp = np.roll(coeffArray3[128*7:128*8],-2)
tmp_write = struct.pack('>128L',*tmp)
fpga.write('c3_7',tmp_write)
time.sleep(0.5)
#fpga.write('c2_7',coeffStr2[127::-1])
#time.sleep(0.5)
#fpga.write('c2_6',coeffStr2[(127+128):127:-1])
#time.sleep(0.5)
#fpga.write('c2_5',coeffStr2[(127+128*2):(127+128):-1])
#time.sleep(0.5)
#fpga.write('c2_4',coeffStr2[(127+128*3):(127+128*2):-1])
#time.sleep(0.5)
#fpga.write('c2_3',coeffStr2[(127+128*4):(127+128*3):-1])
#time.sleep(0.5)
#fpga.write('c2_2',coeffStr2[(127+128*5):(127+128*4):-1])
#time.sleep(0.5)
#fpga.write('c2_1',coeffStr2[(127+128*6):(127+128*5):-1])
#time.sleep(0.5)
#fpga.write('c2_0',coeffStr2[(127+128*7):(127+128*6):-1])
#time.sleep(0.5)
# tmp = coeffArray2[0:128]
# tmp_write = struct.pack('>128L',*tmp)
# fpga.write('c2_0',tmp_write)
# time.sleep(0.5)
# tmp = coeffArray2[128:128*2]
# tmp_write = struct.pack('>128L',*tmp)
# fpga.write('c2_1',tmp_write)
# time.sleep(0.5)
# tmp = coeffArray2[128*2:128*3]
# tmp_write = struct.pack('>128L',*tmp)
# fpga.write('c2_2',tmp_write)
# time.sleep(0.5)
# tmp = coeffArray2[128*3:128*4]
# tmp_write = struct.pack('>128L',*tmp)
# fpga.write('c2_3',tmp_write)
# time.sleep(0.5)
# tmp = coeffArray2[128*4:128*5]
# tmp_write = struct.pack('>128L',*tmp)
# fpga.write('c2_4',tmp_write)
# time.sleep(0.5)
# tmp = coeffArray2[128*5:128*6]
# tmp_write = struct.pack('>128L',*tmp)
# fpga.write('c2_5',tmp_write)
# time.sleep(0.5)
# tmp = coeffArray2[128*6:128*7]
# tmp_write = struct.pack('>128L',*tmp)
# fpga.write('c2_6',tmp_write)
# time.sleep(0.5)
# tmp = coeffArray2[128*7:128*8]
# tmp_write = struct.pack('>128L',*tmp)
# fpga.write('c2_7',tmp_write)
# time.sleep(0.5)
#
# tmp = coeffArray3[0:128]
# tmp_write = struct.pack('>128L',*tmp)
# fpga.write('c3_0',tmp_write)
# time.sleep(0.5)
# tmp = coeffArray3[128:128*2]
# tmp_write = struct.pack('>128L',*tmp)
# fpga.write('c3_1',tmp_write)
# time.sleep(0.5)
# tmp = coeffArray3[128*2:128*3]
# tmp_write = struct.pack('>128L',*tmp)
# fpga.write('c3_2',tmp_write)
# time.sleep(0.5)
# tmp = coeffArray3[128*3:128*4]
# tmp_write = struct.pack('>128L',*tmp)
# fpga.write('c3_3',tmp_write)
# time.sleep(0.5)
# tmp = coeffArray3[128*4:128*5]
# tmp_write = struct.pack('>128L',*tmp)
# fpga.write('c3_4',tmp_write)
# time.sleep(0.5)
# tmp = coeffArray3[128*5:128*6]
# tmp_write = struct.pack('>128L',*tmp)
# fpga.write('c3_5',tmp_write)
# time.sleep(0.5)
# tmp = coeffArray3[128*6:128*7]
# tmp_write = struct.pack('>128L',*tmp)
# fpga.write('c3_6',tmp_write)
# time.sleep(0.5)
# tmp = coeffArray3[128*7:128*8]
# tmp_write = struct.pack('>128L',*tmp)
# fpga.write('c3_7',tmp_write)
# time.sleep(0.5)
# fpga.write('c3_0',coeffStr3[0:128])
# time.sleep(0.5)
# fpga.write('c3_1',coeffStr3[128:128*2])
# time.sleep(0.5)
# fpga.write('c3_2',coeffStr3[128*2:128*3])
# time.sleep(0.5)
# fpga.write('c3_3',coeffStr3[128*3:128*4])
# time.sleep(0.5)
# fpga.write('c3_4',coeffStr3[128*4:128*5])
# time.sleep(0.5)
# fpga.write('c3_5',coeffStr3[128*5:128*6])
# time.sleep(0.5)
# fpga.write('c3_6',coeffStr3[128*6:128*7])
# time.sleep(0.5)
# fpga.write('c3_7',coeffStr3[128*7:128*8])
# time.sleep(0.5)
#
def populateSingle(combCoeff):
numTones = 2048
combCoeffSingle = np.zeros(numTones) + 1j*np.zeros(numTones)
for toneIter in range(0,numTones-2):
if toneIter < 1024:
#combCoeffSingle[toneIter] = combCoeff[1023-toneIter,toneIter+1]
#combCoeffSingle[1023-toneIter] = combCoeff[toneIter,1024-toneIter]
combCoeffSingle[1023-toneIter] = combCoeff[1023-toneIter,toneIter+1]
#print "index:" + str(1023-toneIter) + " read1: " + str(1023 - toneIter) + " read2: " + str(toneIter+1)
else:
combCoeffSingle[toneIter] = combCoeff[toneIter-1023,toneIter+1]
#print combCoeffSingle[toneIter]
return combCoeffSingle
def readFiles():
filename = 'combCoeff.h5'
f_data = h5py.File(filename,'r')
f_key = f_data.keys()[0]
#print f_data.keys()
#combCoeff = list(f_data[f_key])
combCoeff = np.array(f_data[f_key])
return combCoeff
combCoeff = readFiles()
#plt.imshow(combCoeff.real)
#plt.plot(combCoeff[1,:].real)
#plt.show()
#print combCoeff[1025,100]
combCoeffSingle = populateSingle(combCoeff)
#plt.plot(combCoeffSingle.real)
#plt.plot(combCoeffSingle.imag)
#plt.show()
defCoeffs14()
defCoeffs23(combCoeffSingle)
|
# Generated by Django 2.2.4 on 2019-08-17 20:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('base', '0025_auto_20190817_2017'),
]
operations = [
migrations.AlterField(
model_name='pypartner',
name='created_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='pypartner_created_by', to=settings.AUTH_USER_MODEL),
),
]
|
price = float(input('Actual price: '))
promotionPrice = price-price*0.05;
print('Promotion price: ',promotionPrice)
|
import pytest
from multiplication_tables import multiplication_tables
def test_two_by_two():
assert multiplication_tables(2, 2) == [[1, 2], [2, 4]]
def test_three_by_four():
assert multiplication_tables(3, 4) == [[1, 2, 3, 4], [2, 4, 6, 8], [3, 6, 9, 12]]
|
"""
Purpose of this script is to train the model with kfold cross validation.
The evaluation metric is MSE and Pearson correlation.
Input: CSV file, train_list
Output: sav file (saved model), CSV file (PCC and MSE)
"""
import time
#time.sleep(15/60*60*60)
import pandas as pd
import pickle
import os
import datetime
from random import Random
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.layers import LeakyReLU
from sklearn.metrics import mean_squared_error
from scipy.stats import pearsonr
# Variables
num_fold = 5
seed = 50
range_value = 0.15
## Hidden layers parameter
#hidden_layer = 3
#hidden_unit = "75%" #512 # No of units in each hidden units
hidden_activation = "sigmoid" # Activation function for each hidden layer
output_activation = LeakyReLU(alpha = 1.0)
output_activation_1 = "Leaky ReLU"
dropout_rate = 0.2
hidden_layer_list = [2,3,4]
epoch_list = [10, 15, 20]
## Learning rate parameters
optimizer = "Adam"
loss_function = "mean_squared_error"
#epoch = 10
batch_size = None # Chose a value such as the remainder of len(train)%batch_size == 0 else leave as None
save_model = False
letter_list = ["C1-P"]
for letter in letter_list:
#main_folder = r'C:\Users\User\Desktop\B factor'
main_folder = r'D:\PHML B factor estimation\02 Project'
subfolder_input = r'02 Topology Application\02 Topological features representation\ML inputs - Counts of points at interval\%s' %letter
subfolder_list = r'00 Basic information'
subfolder_output_model = r'03 ML models\05 ANN\Trained models - Counts'
subfolder_output_results = r'03 ML models\05 ANN\Results - Counts'
writer = pd.ExcelWriter(os.path.join(main_folder, subfolder_output_results,
str(datetime.datetime.today().date()) + " ANN results - Counts %s 3 - %s.xlsx" %(range_value, letter)))
# List of input files
path_input = os.path.join(main_folder, subfolder_input)
input_files = [f for f in os.listdir(path_input) if f.endswith(".csv")]
input_files = [f for f in input_files if "_0.5_" in f or "_1.0_" in f or "_1.5_" in f ]
input_files = [f for f in input_files if f.startswith("%s_35_35" %letter) or f.startswith("%s_40_40" %letter) or f.startswith("%s_45_45" %letter)]
# Initial lists
atoms, bin_sizes, bin_nums, euclidean_distances, filtration_distances = [], [], [], [], []
num_folds, seeds = [], []
hidden_layers, hidden_units, hidden_activations, output_activations = [], [], [], []
optimizers, epochs, batch_sizes, dropout_rates = [], [], [], []
mse_train, mse_cv, pcc_train, pcc_cv = [], [], [], []
for file in input_files:
_, euclidean_distance, filtration_distance, _, _ = file.split("_")
euclidean_distance, filtration_distance = int(euclidean_distance), int(float(filtration_distance))
if filtration_distance <= euclidean_distance:
# Read train list
path_list = os.path.join(main_folder, subfolder_list, r'Train_list')
train_list = open(path_list, "r").read().split("\n")[:-1]
# Read the input data
path_files = os.path.join(path_input, file)
data = pd.read_csv(path_files)
# Extract the train set
criteria = data.iloc[:, -1].str.strip(" ").isin(train_list)
data_set = data[criteria]
data_set.reset_index(inplace = True, drop = True)
# Dataset details
file = file.strip(".csv")
atom, euclidean_distance, filtration_distance, bin_size, bin_num = file.split("_")
euclidean_distance, filtration_distance = int(euclidean_distance), int(float(filtration_distance))
bin_size, bin_num = float(bin_size), int(bin_num)
# kFold cross validation
limit = round(len(train_list)/num_fold)
train_index, cv_index = [], []
for i in range(num_fold):
Random(seed).shuffle(train_list)
train_list_split = train_list[:limit]
train_index_temp = data_set.iloc[:, -1].str.strip(" ").isin(train_list_split)
cv_index_temp = ~data_set.iloc[:, -1].str.strip(" ").isin(train_list_split)
train_index.append(train_index_temp)
cv_index.append(cv_index_temp)
# Split the dataset into x and y data
data_set = data_set.iloc[:, :-1]
data_x, data_y = data_set.iloc[:,:-1], data_set.iloc[:, -1]
for hidden_layer in hidden_layer_list:
for epoch in epoch_list:
# Train model
for i in range(num_fold):
print(datetime.datetime.now(), "\tANN - Training %s \tkFold = %d" %(file, i+1))
# Extract train and cv set
train_x, train_y = data_x.loc[train_index[i]], data_y[train_index[i]]
cv_x, cv_y = data_x.loc[cv_index[i]], data_y[cv_index[i]]
# Normalization
## Normalization parameters
mean_train = train_x.mean(axis = 0)
std_train = train_x.std(axis = 0).replace(0,1) # Replace 0 with 1 to prevent nan values
## Normalizing data
train_x = (train_x - mean_train)/std_train
cv_x = (cv_x - mean_train)/std_train
# No of hidden units == 0.75 of input nodes
input_nodes = train_x.shape[1]
hidden_unit = round(0.75*input_nodes)
print("Layers: %s \tUnits: %s \tBatch: %s \tEpoch: %s \tDropout: %s" %(hidden_layer, hidden_unit, batch_size, epoch, dropout_rate))
# Create ANN model
clf = Sequential()
clf.add(Dense(hidden_unit, input_dim = input_nodes, batch_size = batch_size))
clf.add(Activation(hidden_activation))
clf.add(Dropout(rate = dropout_rate))
## Add in the 2nd hidden layers onwards
if hidden_layer >= 2:
for j in range(2, hidden_layer + 1):
clf.add(Dense(hidden_unit))
clf.add(Activation(hidden_activation))
clf.add(Dropout(dropout_rate))
## Output layer
clf.add(Dense(1))
clf.add(Activation(output_activation))
## Compile the learning rate
clf.compile(optimizer = optimizer, loss = loss_function)
# Training
clf.fit(train_x, train_y, batch_size = batch_size, epochs = epoch, verbose = 0)
y_pred = clf.predict(train_x)
y_pred = y_pred.reshape(len(y_pred))
mse_train.append(mean_squared_error(train_y, y_pred))
pcc_train.append(pearsonr(y_pred, train_y)[0])
# Predict values
y_pred = clf.predict(cv_x)
y_pred = y_pred.reshape(len(y_pred))
# Calculate MSE and PCC
mse_cv.append(mean_squared_error(cv_y, y_pred))
pcc_cv.append(pearsonr(y_pred, cv_y)[0])
# Save the trained model
if save_model:
model_filename = os.path.join(main_folder, subfolder_output_model, file + "_model_%s" %(i+1) + ".sav")
pickle.dump(clf, open(model_filename, "wb"))
# Print the latest results
print("MSE train: %.5f \tPCC train: %.3f \nMSE CV: %.5f \tPCC CV: %.3f\n" %(mse_train[-1], pcc_train[-1], mse_cv[-1], pcc_cv[-1]))
# Create lists for dataframe
print("Creating dataframe details")
## Dataset details
atoms.extend([atom]*num_fold)
bin_sizes.extend([bin_size]*num_fold)
bin_nums.extend([bin_num]*num_fold)
euclidean_distances.extend([euclidean_distance]*num_fold)
filtration_distances.extend([filtration_distance]*num_fold)
## Model details
num_folds.extend([num_fold]*num_fold)
seeds.extend([seed]*num_fold)
hidden_layers.extend([hidden_layer]*num_fold)
hidden_units.extend([hidden_unit]*num_fold)
hidden_activations.extend([hidden_activation]*num_fold)
output_activations.extend([output_activation_1]*num_fold)
optimizers.extend([optimizer]*num_fold)
epochs.extend([epoch]*num_fold)
batch_sizes.extend([batch_size]*num_fold)
dropout_rates.extend([dropout_rate]*num_fold)
# Create dataframe
output_results = pd.DataFrame({
"Atoms":atoms,
"Euclidean distance":euclidean_distances,
"Filtration distance":filtration_distances,
"Bin size":bin_sizes,
"Bin num":bin_nums,
"Seed":seeds,
"Hidden layers":hidden_layers,
"Hidden units":hidden_units,
"Hidden activation":hidden_activations,
"Output activation":output_activations,
"Optimizer":optimizers,
"Epoch":epochs,
"Batch size":batch_sizes,
"Drouput rate":dropout_rates,
"MSE_train":mse_train,
"MSE_CV":mse_cv,
"PCC_train": pcc_train,
"PCC_CV":pcc_cv
})
# Save dataframe to excel
print("Saving results")
output_results.to_excel(writer, index = False)
writer.save()
print("Completed at", datetime.datetime.now())
|
# Cálculo de passagem com valores diferenciados
limiteKm = 200
km = float(input('Digite q quilometragem de sua viagem: '))
# Taxa Viagens curtas
taxMin = .5 * km
# Taxa viagens longas
taxMax = .45 * km
print('O valor de sua passagem será R$ {:.2f}'.format(taxMin if km <= 200 else taxMax))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 22 11:35:04 2021
@author: chulke
"""
import numpy as np
import matplotlib.pyplot as plt
def randomwalk(largo):
pasos=np.random.randint (-1,2,largo)
return pasos.cumsum()
def graficar():
N = 100000
i=0
fig = plt.figure()
lista = []
while i<12:
caminata = randomwalk(N)
lista.append(caminata)
plt.subplot(2, 1, 1) # define la figura de arriba
plt.plot(caminata)
plt.title('12 Caminatas random')
plt.xlabel('Tiempo')
plt.ylabel('Distancia al origen')
i += 1
alejado = []
for i in lista:
alejado.append(max(abs(i)))
index_alejado = alejado.index(max(alejado))
index_menos_alejado = alejado.index(min(alejado))
plt.subplot(2, 2, 3)
plt.plot(lista[index_alejado])
plt.title('Caminata que mas se aleja')
ax = plt.gca() # gca es 'get current axis' ó 'tomar eje actual'
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
plt.subplot(2, 2, 4)
plt.plot(lista[index_menos_alejado])
plt.title('Caminata que menos se aleja')
plt.show()
if __name__ == '__main__':
graficar()
|
from api import app
from api.user_list_api import user_list_api
app.register_blueprint(user_list_api)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
|
import pathlib
import numpy as np
import pandas as pd
import mlflow
from tqdm.notebook import tqdm
from sklearn.model_selection import KFold
def regression_cv(features,
labels,
model_factory,
metric,
folds,
mlflow_tags,
artifacts_dir,
random_state):
train_results = {}
test_results = {}
train_folds_errors = []
test_folds_errors = []
with mlflow.start_run(tags=({'stage': 'evaluation', **mlflow_tags})) as evaluation_run:
mlflow.log_params({
'n_splits': folds,
'cv_random_state': random_state
})
# create splitter
if random_state is not None:
folder = KFold(n_splits=folds, shuffle=True, random_state=random_state)
else:
folder = KFold(n_splits=folds, shuffle=False)
for fold_idx, indices in tqdm(enumerate(folder.split(features, labels)), total=folder.n_splits):
with mlflow.start_run(nested=True, tags={'fold': fold_idx}) as fold_run:
train_idx, test_idx = indices
train_fold_features = features.iloc[train_idx]
train_fold_target = labels.iloc[train_idx]
test_fold_features = features.iloc[test_idx]
test_fold_target = labels.iloc[test_idx]
# train
model = model_factory()
model.fit(train_fold_features, train_fold_target,
eval_data=(test_fold_features, test_fold_target),
mlflow_log=True)
fold_model_path = pathlib.Path(artifacts_dir) / fold_run.info.run_id / 'evaluation_models' / f'fold_{fold_idx}'
fold_model_path.mkdir(parents=True, exist_ok=True)
model.save(fold_model_path)
# predict
train_pred = model.predict(train_fold_features)
test_pred = model.predict(test_fold_features)
# fold error
fold_train_error = metric(train_fold_target, train_pred)
fold_test_error = metric(test_fold_target, test_pred)
train_folds_errors.append(fold_train_error)
test_folds_errors.append(fold_test_error)
# store prediction
append_array(train_results, 'fold', [fold_idx] * len(train_idx))
append_array(train_results, 'sample_idx', train_idx)
append_array(train_results, 'pred', train_pred)
append_array(train_results, 'true', train_fold_target)
append_array(train_results, 'fold_error', [fold_train_error] * len(train_idx))
append_array(test_results, 'fold', [fold_idx] * len(test_idx))
append_array(test_results, 'sample_idx', test_idx)
append_array(test_results, 'pred', test_pred)
append_array(test_results, 'true', test_fold_target)
append_array(test_results, 'fold_error', [fold_test_error] * len(test_idx))
print(f'Fold #{fold_idx}: test = {fold_test_error:0.5f}, train = {fold_train_error:0.5f}')
# mlflow log
mlflow.log_metrics({
'test_err': fold_test_error,
'train_err': fold_train_error
})
folds_stat_df = pd.DataFrame({'fold': range(len(train_folds_errors)),
'train': train_folds_errors,
'test': test_folds_errors})
train_results_df = pd.DataFrame(train_results)
test_results_df = pd.DataFrame(test_results)
print(f'Test error: {folds_stat_df["test"].mean():0.5f} ({folds_stat_df["test"].std():0.5f}),' +
f'Train error: {folds_stat_df["train"].mean():0.5f} ({folds_stat_df["train"].std():0.5f})')
# mlflow log
mlflow.log_metrics({
'test_err': folds_stat_df["test"].mean(),
'test_err_std': folds_stat_df["test"].std(),
'train_err': folds_stat_df["train"].mean(),
'train_err_std': folds_stat_df["train"].std()
})
artifacts_path = pathlib.Path(artifacts_dir) / evaluation_run.info.run_id
artifacts_path.mkdir(parents=True, exist_ok=True)
train_results_df.to_csv(artifacts_path / 'evaluation_train.csv.zip', index=False)
test_results_df.to_csv(artifacts_path / 'evaluation_test.csv.zip', index=False)
mlflow.log_artifacts(artifacts_dir)
return train_results_df, test_results_df, folds_stat_df
def append_array(dictionary, key, array):
if key not in dictionary:
dictionary[key] = array
else:
dictionary[key] = np.concatenate((dictionary[key], array))
|
# Generated by Django 2.0.7 on 2019-01-14 17:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('basedata', '0058_auto_20190114_1708'),
]
operations = [
migrations.AlterField(
model_name='device',
name='total_price',
field=models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=9, max_length=8, null=True, verbose_name='金额'),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.