text stringlengths 38 1.54M |
|---|
class Person:
def __init__(self, name = "Vasya", sex = "M", age = 13 ):
self.name = name
self.sex=sex
self.age = age
class Citizen(Person):
def __init__(self, nation = "Ukrainian", **kwargs):
self.nation = nation
#super(Citizen, self).__init__()
Person.__init__(self)
self.__dict__.update(kwargs)
#vars(self).update(kwargs)
#todo not done till end
#for key in self.__dict__:
# setattr(self, key, self.__dict__[key])
a = Citizen(nation="foo", name="bar", sex = "buzz", some="123")
print(a.name)
print(a.sex)
print(a.age)
print(a.nation)
#print(a.some)
def my_funct(a=2,b=3):
print(locals())
return a+b
print("_____________________")
print(my_funct.__dict__)
print(my_funct.__class__)
#print(my_funct.__bases__)
my_funct(1,2)
x= my_funct
x(1,3)
print(x(4,5))
count = 0
def counter(funct):
def wrapper(*args, **kwargs):
global count
count +=1
print(count)
return funct(*args, **kwargs)
return wrapper
@counter
def myname(x = "some"):
print("Masha", x)
myname("hjkj")
myname("lkju890")
myname("1234")
myname()
print(Citizen.__bases__)
print(Citizen.__dict__)
print(type.__class__)
print(type(a))
# create meta class based on type
class MetaPerson(type):
def __call__(cls, *args, **kwargs):
obj = type.__call__(cls, *args)
obj.__dict__.update(kwargs)
return obj
# create class based on metaclass
class Man(metaclass=MetaPerson):
pass
# єкземпляр класса
m = Man(height=180, weight=80)
print(m.__dict__)
print(m.height)
# влияние на метод метакласа prepare
class Meta(type):
@classmethod
def __prepare__(metacls, name, bases, **kwargs):
return super().__prepare__(metacls)
def __new__(metacls, name, bases, namespace):
return super().__new__(metacls, name, bases, namespace)
def __init__(cls, name, bases, namespace):
print("Init in Metaclsass")
def __call__(cls, *args, **kwargs):
return super().__call__()
# распечатать что там находится
class Class(metaclass=Meta):
def __new__(cls, *args, **kwargs):
return super().__new__(cls)
def __init__(self, *args, **kwargs):
self.args = args
self.__dict__.update(kwargs)
a = Class()
|
from dictdb.GenericDictDB import GenericDictDB
from dictdb.SqliteThreadWork import SqliteThreadWork
from dictdb.StorageDict import SharedStorage, ThreadedSharedStorage
# end-of-file |
#!/usr/bin/env python3
import re
import sys
import ipaddress
from mysql import connector
from itertools import islice
from collections import defaultdict
db = { 'host' : 'localhost',
'database' : 'bgp',
'user' : 'bgp',
'passwd' : 'bgp'
}
#bgp_file = './test5000000.bgp'
#bgp_file = './test1000000.bgp'
bgp_file = '/home/user/oix-full-snapshot-2018-12-01-0200'
asn_list = { 'GOOGLE': '15169',
'YANDEX': '13238'
}
# dict of lists to store networks list for each company
networks = { i: list() for i in asn_list.values() }
skip_network = ''
def print_networks():
global asn_list, networks
for company, asn in asn_list.items():
print(' {0} ({1})'.format(company, asn))
i = 0
for n in sorted(networks[asn]):
i += 1
print(' {0} {1} '.format(i, n))
# print(' {1} '.format(i, n))
def match_network(network, asn):
global asn_list, networks
if asn in asn_list.values():
network = ipaddress.ip_network(network)
prefix = network.prefixlen
# skip network if it is a subnet of already enlisted network
for n in networks[asn]:
# in python 3.7 we could just use 'subnet_of' method:
#
# ...if network.subnetof(n): return False...
#
# in 3.6 we have to iterate manually
n_prefix = n.prefixlen
if prefix <= n_prefix: continue
if network in n.subnets(new_prefix=prefix): return False
networks[asn].append(network)
f = open(bgp_file, 'r')
# filter yandex and google networks first
# skip first 5 lines of the input file
for line in islice(f, 5, None):
_, network, _, _, _, _, *asns = line.split()
# skip same network lines
if skip_network == network:
continue
else:
skip_network = network
asn = str(asns[-2])
asn = re.sub('[^0-9]', '', asn)
# pick networks belonging to yandex or google
match_network(network, asn)
f.close()
# aggregate networks
print('\nNetworks (before aggregation):')
print_networks()
for asn in networks.keys():
print('\nAggregating asn {0}...'.format(asn))
prev_network = None
# go throughout networks list,
# from the longest network prefix to the shortest one
for prefix in reversed(range(1,32)):
# create temporary list for this prefix
temp_list = list()
for network in networks[asn]:
if network.prefixlen == prefix:
temp_list.append(network)
# try to aggregate each 2 adjacent networks...
for network in sorted(temp_list):
if prev_network == None:
prev_network = network
continue
supernet1 = prev_network.supernet()
supernet2 = network.supernet()
if not supernet1 == supernet2:
pass
else:
# ...and if they both belong to one common supernet,
# delete them from list, and add their supernet instead
# print('aggregating ' + str(prev_network) + ' and ' +
# str(network) + ' into ' + str(supernet1))
networks[asn].remove(prev_network)
networks[asn].remove(network)
networks[asn].append(supernet1)
prev_network = network
print('\nNetworks (aggregated):')
print_networks()
print('\nWriting data into database...')
try:
mydb = connector.connect(
host = db['host'],
database = db['database'],
user = db['user'],
passwd = db['passwd']
)
except Exception as err:
print("Database connection error: {0}".format(err))
exit(1)
mycursor = mydb.cursor()
tables = [ """
CREATE TABLE IF NOT EXISTS `asn` (
`asn` int NOT NULL,
`company` varchar(100) NOT NULL,
PRIMARY KEY (`asn`)
);
""" , """
CREATE TABLE IF NOT EXISTS `network` (
`network` varchar(20) NOT NULL,
`asn` int NOT NULL,
PRIMARY KEY (`network`),
FOREIGN KEY (`asn`) REFERENCES asn(`asn`)
);
"""
]
try:
for t in tables:
mycursor.execute(t)
except Exception as err:
print("Tables creation error: {0}".format(err))
exit(1)
try:
sql = """REPLACE INTO asn(asn, company) VALUES (%s, %s);"""
values = [ ( asn, company ) for company, asn in asn_list.items() ]
mycursor.executemany(sql, values)
mydb.commit()
sql = """REPLACE INTO network(network, asn) VALUES (%s, %s);"""
values = [ (str(network), asn) for asn in networks.keys() for network in networks[asn] ]
mycursor.executemany(sql, values)
mydb.commit()
except Exception as err:
print("Inserting data error: {0}".format(err))
exit(1)
mydb.close()
exit(0)
|
import os
import csv
from constant_ble_read import ReadBLE
files_path = os.path.join(os.getcwd(), 'files')
while True:
try:
start = input("Iniciar teste s/n: ")
if start.upper() != "S":
break
name = input("Número do teste: ")
read_time_sec = int(input("Tempo de leitura (segundos - mínimo 10): "))
if read_time_sec < 10:
read_time_sec = 10
path_file = os.path.join(files_path, "{}.csv".format(name))
csv_file = open(path_file, "w")
rssi_list = ReadBLE(read_time_sec, "edd1ebeac04e5defa017").read_ble()
if not rssi_list:
raise Exception("Lista de rssi vazia.")
with csv_file:
writer = csv.writer(csv_file)
writer.writerow(rssi_list)
except Exception as e:
print("Erro: ", e)
|
import hashlib
import json
import os
import random
from datetime import *
from django.db.models import Q
from django.http import Http404, FileResponse
from django.http import JsonResponse
from django.shortcuts import render,redirect
from app02.models import User, File, File_Users, Share
from app02.py import zip
from admins.commom import common
# Create your views here.
@common.is_login
def home(request):
return redirect('/home/all')
# 所有文件
@common.is_login
def all(request):
if request.session.get("login"):
user_info = {"id": request.session.get("id"),'img':request.session.get("img"),'name':request.session.get("name")}
print(user_info)
return render(request, 'home.html',locals())
else:
return render(request, "login.html")
# 图片文件
@common.is_login
def pic(request):
if request.session.get("login"):
user_info = {"id": request.session.get("img"),'img':request.session.get("img"),'name':request.session.get("name")}
print(user_info)
return render(request, 'pic.html',locals())
else:
return render(request, "login.html")
# 文档文件
@common.is_login
def doc(request):
if request.session.get("login"):
user_info = {"id": request.session.get("img"),'img':request.session.get("img"),'name':request.session.get("name")}
print(user_info)
return render(request, 'doc.html',locals())
else:
return render(request, "login.html")
# 视频文件
@common.is_login
def video(request):
if request.session.get("login"):
user_info = {"id": request.session.get("img"),'img':request.session.get("img"),'name':request.session.get("name")}
print(user_info)
return render(request, 'video.html',locals())
else:
return render(request, "login.html")
# 音乐文件
@common.is_login
def music(request):
if request.session.get("login"):
user_info = {"id": request.session.get("img"),'img':request.session.get("img"),'name':request.session.get("name")}
print(user_info)
return render(request, 'music.html',locals())
else:
return render(request, "login.html")
# 其他文件
@common.is_login
def rests(request):
if request.session.get("login"):
user_info = {"id": request.session.get("img"),'img':request.session.get("img"),'name':request.session.get("name")}
print(user_info)
return render(request, 'rests.html',locals())
else:
return render(request, "login.html")
# 分享中心
@common.is_login
def share(request):
if request.session.get("login"):
user_info = {"id": request.session.get("img"),'img':request.session.get("img"),'name':request.session.get("name")}
print(user_info)
return render(request, 'share.html',locals())
else:
return render(request, "login.html")
def aaa(request):
return render(request, 'aaa.html')
# 进行加密文件夹
def file_md5_name(file, time=None):
res = gain_time
cipher = hashlib.md5()
if time:
cipher.update(str(time).encode('utf-8'))
cipher.update(str(res).encode('utf-8'))
cipher.update(file.encode('utf-8'))
res = cipher.hexdigest()
return res
# 上传文件
@common.is_login
def upload(request):
# 需要一个登陆的用户名字
user = request.session.get("name")
# 需要一个file_path
file_path = 'c:'
file_obj = request.FILES.get('file')
file_name = file_obj.name # 文件名
# 首先获取文件名字,看用户的同文件夹中是否有同名文件
name_id = request.session.get("id") # 获取操作用户的ID
# 获取文件名字
get_file_name = File_Users.objects.filter(
Q(user_id=name_id) , ~Q(file_path=file_path) , Q(file_name=file_name)).first()
# 如果文件名字已存在就返回已存在
if get_file_name:
return JsonResponse({'code': 1, 'file_name': file_name})
# ---------------------------文件无重名,继续-------------------------------------------
# 判断系统后台是否有此文件,有就可以实现秒传重指向
res = request.POST.get('md5') # 转码校验,获取前端发送来的MD5校验
# 判断系统后台是否有此文件
res = json.loads(res)
res = res.get(file_name)
file_data = File.objects.filter(data=res).first()
if file_data:
# 用户的ID:name_id
# 用户文件名字: file_name
# 用户文件的路径:file_path
# 文件的ID
file_id = file_data.id
tag, stamp = save(file_id, name_id, file_name, file_path)
if tag:
return JsonResponse({'code': 2, 'file_name': file_name}, )
else:
return JsonResponse({'code': 3, 'file_name': file_name})
else:
# 存放服务器文件路径
path = os.path.join(os.getcwd(), 'data')
# 如果服务器没有此文件夹就创建
if not os.path.exists(path):
os.makedirs(path)
# 存放服务器文件名
file_data_name = os.path.join(path, file_md5_name(file_name)) #
# 如果文件名已存在就修改名字
# 存入服务器真实文件
with open(file_data_name, 'wb') as f:
for i in file_obj:
f.write(i)
# 存入数据
# 存入真实文件地址
size = bytes2human(os.path.getsize(file_data_name)) # 转换文件大小
get_file_type = file_type(file_obj.name) # 得到文件的类型
# 存入文件的数据库中
File.objects.create(path=file_data_name, data=res, size=size, type=get_file_type)
# 存入多对多关联表
file_id = File.objects.filter(data=res).first().id
# 存入用户表
tag, stamp = save(file_id, name_id, file_name, file_path)
if tag:
return JsonResponse({'code': 0, 'file_name': file_name})
else:
return JsonResponse({'code': 3, 'file_name': file_name})
# 获取当前时间
def gain_time():
tim = (datetime.now()+ timedelta(hours=8)).strftime('%Y-%m-%d %H:%M:%S') # 当前时间
return tim
# 生成文件大小的符合的
def bytes2human(n):
"""
>>> bytes2human(10000)
9K
>>> bytes2human(100001221)
95M
"""
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i + 1) * 10
for s in reversed(symbols):
if n >= prefix[s]:
value = int(float(n) / prefix[s])
return '%s%s' % (value, s)
return '%sB' % n
# p判断文件类型
def file_type(file_name: str):
name = file_name.split('.')[-1].upper()
# 判断是否为图片
if name in ['JPG', 'Webp', 'BMP', 'PCX', 'TIF', 'GIF', 'JPEG', 'TGA', 'JPG',
'EXIF', 'FPX', 'SVG', 'PSD', 'CDR', 'PCD', 'DXF', 'UFO', 'EPS',
'AI', 'PNG', 'HDRI', 'RAW', 'WMF', 'FLIC', 'EMF', 'ICO']:
return '图片'
# 判断是否为文档
elif name in ['JAVA', 'XML', 'JSON', 'CONF', 'JSP', 'PHPS', 'ASP', 'PROJECT',
'CLASSPATH', 'SVN', 'GITIGNORE', 'TXT', 'LOG', 'SYS', 'INI', 'PDF',
'XLS', 'XLSX', 'DOC', 'PPT', 'EXE', 'MSI', 'BAT', 'SH', 'RPM', 'DEB',
'BIN', 'DMG', 'PKG', 'CLASS', 'DLL', 'SO', 'A', 'KO', 'RAR', 'ZIP',
'ARJ', 'GZ', 'TAR', 'TAR.GZ', '7Z', 'HTM', 'HTML', 'JS', 'CSS', 'MD']:
return '文档'
# 判断是否为视频MP3、WMA、AVI、RM、RMVB、FLV、MPG、MOV、MKV
elif name in ['MP4', 'M4V', 'MOV', 'QT', 'AVI', 'FLV', 'WMV',
'ASF', 'MPEG', 'MPG', 'VOB', 'MKV', 'ASF', 'RM', 'FLV', 'AVI',
'VOB', 'DAT']:
return '视频'
elif name in ['MP3', 'WMA', 'APE', 'FLAC', 'AAC', 'AC3', 'MMF', 'AMR', 'M4A', 'M4R', 'WAV', 'MP2']:
return '音乐'
else:
return '其他'
# 用户数据进行存储
def save(file_id, user_id, file_name, file_path):
try:
time = gain_time()
File_Users.objects.create(time=time, user_id=user_id, File_id=file_id, file_name=file_name, file_path=file_path)
return True, '秒传成功!'
except Exception as a:
return False, a
# 查询文件列表
@common.is_login
def select(request):
user = request.session.get("name")
# 获取用户的ID
user_id = request.session.get("id")
# table返回信息
info = {"code": 200, "msg": "", "count": 100, "data": []}
# 文件类型
func = {'all': '', 'pic': '图片', 'doc': '文档', 'video': '视频', 'music': '音乐', 'rests': '其他'}
if request.method == 'POST':
type = request.POST.get('type')
if type in func:
page = int(request.POST.get('page')) # 第几页
limit = int(request.POST.get('limit')) # 每页数量
filename = request.POST.get('filename')
data = File_Users.objects.filter(Q(file_name__icontains=filename)
, Q(File__type__icontains=func[type]), user_id=user_id)
else:
return JsonResponse(info)
else:
type = request.GET.get('type')
if type in func:
# 获取所有的数据
page = int(request.GET.get('page')) # 第几页
limit = int(request.GET.get('limit')) # 每页数量
data = File_Users.objects.filter(Q(File__type__icontains=func[type]), user_id=user_id)
else:
return JsonResponse(info)
x = 0
# 设置图标
file_font = {"文件夹": "fa-folder", "图片": "fa-file-image-o", "文档": "fa-file-text", "视频": "fa-file-movie-o",
"音乐": "fa-file-sound-o", "其他": "fa-file"}
for i in data:
# obj = File.objects.filter(id=file_id).first()
x += 1
if x <= (page * limit) and x > ((page - 1) * limit):
a = {
"id": x,
"t_id": i.id,
"filename": i.file_name,
"ope": i.File.data,
"size": i.File.size,
"datetime": i.time,
"experience": i.File.type,
"type": file_font[i.File.type]
}
info["data"].append(a)
info["count"] = x
return JsonResponse(info)
# 查找需要下载的文件路径
def download_file(file_id):
obj = File_Users.objects.filter(id=file_id).first()
obj_path = obj.File.path
return obj_path
pass
# 文件下载
def download(request, data):
# 分割名字和文件校验数据
dow_data, dow_name = data.split('/')
# 查询文件在服务器的路径
file_path = File.objects.filter(data=dow_data).first().path
ext = os.path.basename(file_path).split('.')[-1].lower()
# 不让客户下载的文件路径
if ext not in ['py', 'db', 'sqlite3']:
response = FileResponse(open(file_path, 'rb'))
response['content_type'] = "application/octet-stream"
response['Content-Disposition'] = 'attachment; filename=' + dow_name
return response
else:
raise Http404
# 下载多个文件
def download_pack(request):
if request.method == 'POST':
info = {}
data = json.loads(request.POST.get('data')).get('data')
# 获取文件所在服务器的路径,用列表info接
sum = 0
for i in data:
sum += 1
res = File_Users.objects.filter(id=i.get('t_id')).first()
if res:
info[i.get('filename')] = (res.File.path)
# 文件打包
dow_name = file_md5_name(str(info), gain_time())
res_path = zip.ZIP(info, dow_name)
new_path = os.path.join(os.getcwd(), 'data', dow_name)
with open(res_path, 'rb') as f1, \
open(new_path, 'wb') as f2:
f2.write(f1.read())
os.remove(res_path)
# 拼接返回下载路径
file_path = os.path.join('/home/','download', dow_name, '%s-number-file' % sum + '.zip')
# 存入数据库
File.objects.create(path=new_path, data=dow_name, size=0, type='zip')
return JsonResponse({'start': 1, 'msg': '正在下载。。', 'file_path': file_path, 'type': request.POST.get('type')})
return JsonResponse({'start': 0, 'msg': '请求不合法'})
# 文件删除
@common.is_login
def delete(request): # 提交过来删除有两种方式,一种是单个删除,一种是多个删除
# 反 json 序列化,并get取值
data = request.POST.get('data')
if data:
data = json.loads(request.POST.get('data')).get('data')
for i in data:
File_Users.objects.filter(id=i.get('t_id')).delete()
return JsonResponse({'start': 1, 'msg': '删除成功!'})
if request.method == 'POST':
file_id = request.POST.get('file_id')
if file_id:
File_Users.objects.filter(id=file_id).delete()
return JsonResponse({'start': 1, 'msg': '删除成功!'})
return JsonResponse({'start': 0, 'msg': '非法访问!'})
# 文件修改
@common.is_login
def update(request):
if request.method == 'POST':
file_id = request.POST.get('file_id')
if file_id:
file_name = request.POST.get('file_name')
File_Users.objects.filter(id=file_id).update(file_name=file_name)
return JsonResponse({'start': 1, 'msg': '修改成功!'})
return JsonResponse({'start': 0, 'msg': '非法访问!'})
def random_link(sum=10):
code = ''
for i in range(sum):
add = random.choice([random.randrange(10), chr(random.randrange(65, 91))])
code += str(add)
return code
# 生成文件分享
@common.is_login
def share_page(request, data):
# 需要一个用户名
name = request.session.get("name")
if request.method == 'POST':
info = {}
data = json.loads(request.POST.get('data')).get('data')
# 获取文件所在服务器的路径,用列表info接
sum = 0
# 生成分享用户id
user_id = User.objects.filter(name=name).first().id
sum = len(data)
# 生成分享名
share_name = '%s分享%s的等%s个文件' % (name, data[0].get('filename'), sum)
# 生成随机登录码
code = random_link(10)
# 生成随机密码
password = random_link(4)
# 存入share表
Share_obj = Share.objects.create(share_name=share_name, share_password=password, share_path=code
, user_id=user_id, share_time=gain_time())
# 分享的文件表关联
for i in data:
File_Users_obj = File_Users.objects.filter(id=i.get('t_id')).first()
Share_obj.File_Users.add(File_Users_obj)
return JsonResponse({'start': 1, 'msg': share_name, 'file_path':request.POST.get('link') + code, 'password': password})
return JsonResponse({'start': 0, 'msg': '请求不合法'})
# 查询分享文件列表
@common.is_login
def share_list(request):
user = request.session.get("name")
# 获取用户的ID
user_id = request.session.get("id")
# table返回信息
info = {"code": 200, "msg": "", "count": 100, "data": []}
post_type = request.POST.get('type')
get_type = request.GET.get('type')
if post_type == 'share' or get_type == 'share':
if request.method == 'POST':
link = request.POST.get('link')
page = int(request.POST.get('page')) # 第几页
limit = int(request.POST.get('limit')) # 每页数量
filename = request.POST.get('filename')
data = Share.objects.filter(Q(share_name__icontains=filename), user_id=user_id)
else:
link = request.GET.get('link')
# 获取所有的数据
page = int(request.GET.get('page')) # 第几页
limit = int(request.GET.get('limit')) # 每页数量
data = Share.objects.filter(user_id=user_id)
else:
return JsonResponse(info)
x = 0
# 设置图标
for i in data:
x += 1
if x <= (page * limit) and x > ((page - 1) * limit):
a = {
"id": x,
"t_id": i.id,
"share_name": i.share_name,
"ope": "",
"share_path": link + i.share_path,
"share_password": i.share_password,
"share_time": i.share_time,
}
info["data"].append(a)
info["count"] = x
return JsonResponse(info)
# 文件删除
@common.is_login
def share_cancel(request): # 提交过来删除有两种方式,一种是单个删除,一种是多个删除
# 反 json 序列化,并get取值
data = request.POST.get('data')
if data:
data = json.loads(request.POST.get('data')).get('data')
for i in data:
Share.objects.filter(id=i.get('t_id')).delete()
return JsonResponse({'start': 1, 'msg': '取消分享成功!'})
if request.method == 'POST':
file_id = request.POST.get('file_id')
if file_id:
Share.objects.filter(id=file_id).delete()
return JsonResponse({'start': 1, 'msg': '取消分享成功!'})
return JsonResponse({'start': 0, 'msg': '非法访问!'})
# 访客用户分享文件列表
def select_share_link(request,data):
# table返回信息
info = {"code": 200, "msg": "",'start': 0, "count": 100, "data": [],}
# 获取密码和分享路径
share_password = request.GET.get('password')
if not share_password:
share_password = request.POST.get('password')
share_path = data.split('/')[-1]
# 判断密码,如果为空,返回提示输入分享密码
if share_password == '' or share_password==None:
info['msg']='请输入分享密码!'
return JsonResponse(info)
# 获取分享路径
# 判断密码是否一致
share_obj = Share.objects.filter(share_path=share_path,share_password=share_password.upper()).first()
# 如果校验错误,返回提示分享密码错误
if not share_obj:
info['msg'] = '分享密码错误!'
return JsonResponse(info)
# data.count() 获取数据个数
# # 文件类型
if request.method == 'POST':
filename = request.POST.get('filename')
page = int(request.POST.get('page')) # 第几页
limit = int(request.POST.get('limit')) # 每页数量
data_obj = Share.objects.filter(Q(share_name__icontains=filename),share_path=share_path
,share_password=share_password).first()
if data_obj == None:
data=[]
else:
data = data_obj.File_Users.all()
else:
# 获取所有的数据
page = int(request.GET.get('page')) # 第几页
limit = int(request.GET.get('limit')) # 每页数量
data = share_obj.File_Users.all()
x = 0
# 设置图标
file_font = {"文件夹": "fa-folder", "图片": "fa-file-image-o", "文档": "fa-file-text", "视频": "fa-file-movie-o",
"音乐": "fa-file-sound-o", "其他": "fa-file"}
# 循环取值
for i in data:
x += 1
if x <= (page * limit) and x > ((page - 1) * limit):
a = {
"id": x,
"t_id": i.id,
"filename": i.file_name,
"ope": i.File.data,
"size": i.File.size,
"datetime": i.time,
"experience": i.File.type,
"type": file_font[i.File.type]
}
info["data"].append(a)
info["count"] = x
info["start"] = 1
info["msg"] = '校验成功!'
return JsonResponse(info)
def share_link(request,urls):
# 首先校验分享网址是否存在
share_obj = Share.objects.filter(share_path=urls)
# 如果不存在,跳转错误页面
if not share_obj:
return render(request, 'error.html')
return render(request, 'sharelink.html')
def title(request):
return render(request,'pop-up.html') |
#!/usr/bin/python3
from commands.modules.create_connection import create_connection
from commands.mysql_server.integrate_mysql import integrate_mysql
import typer
app = typer.Typer()
@app.command()
def mysql_storage(
ip: str = typer.Option(...),
key_ssh: str = typer.Option(...),
user_ssh: str = typer.Option(...),
name_project: str = typer.Option(...)
):
cn = create_connection(user_ssh, ip, key_ssh)
integrate_mysql(cn, name_project)
if __name__ == "__main__":
app()
|
# Copyright 2014 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# author: Imron Alston <imron@scalyr.com>
from __future__ import unicode_literals
from __future__ import absolute_import
__author__ = "imron@scalyr.com"
import datetime
import docker
import fnmatch
import traceback
import logging
import os
import re
import random
import socket
import stat
import time
import threading
from io import open
# Work around with a striptime race we see every now and then with docker monitor run() method.
# That race would occur very rarely, since it depends on the order threads are started and when
# strptime is first called.
# See:
# 1. https://github.com/scalyr/scalyr-agent-2/pull/700#issuecomment-761676613
# 2. https://bugs.python.org/issue7980
import _strptime # NOQA
import six
from urllib3.exceptions import ( # pylint: disable=import-error
ProtocolError,
)
from docker.types.daemon import CancellableStream
from scalyr_agent import ScalyrMonitor, define_config_option, define_metric
import scalyr_agent.util as scalyr_util
import scalyr_agent.scalyr_logging as scalyr_logging
from scalyr_agent.json_lib import JsonObject, ArrayOfStrings
import scalyr_agent.monitor_utils.annotation_config as annotation_config
from scalyr_agent.scalyr_monitor import BadMonitorConfiguration
from scalyr_agent.date_parsing_utils import rfc3339_to_datetime
from scalyr_agent.util import StoppableThread
global_log = scalyr_logging.getLogger(__name__)
__monitor__ = __name__
DOCKER_LABEL_CONFIG_RE = re.compile(r"^(com\.scalyr\.config\.log\.)(.+)")
# Error log message which is logged when Docker inspect API endpoint returns empty value for LogPath
# attribute for a particular container.
# Empty attribute usually indicates that the Docker daemon is not configured correctly and that some
# other, non json-log log-driver is used.
# If docker_raw_logs is False and LogPath is empty, it means no logs will be monitored / ingested
# for that container.
NO_LOG_PATH_ATTR_MSG = (
'LogPath attribute for container with id "%s" and name "%s" is empty. '
"Logs for this container will not be monitored / ingested. This likely "
"represents a misconfiguration on the Docker daemon side (e.g. docker "
"daemon is configured to use some other and not json-file log-driver)."
)
define_config_option(
__monitor__,
"module",
"Always ``scalyr_agent.builtin_monitors.docker_monitor``",
convert_to=six.text_type,
required_option=True,
)
define_config_option(
__monitor__,
"container_name",
"Optional (defaults to None). Defines a regular expression that matches the name given to the "
"container running the scalyr-agent."
"If this is None, the scalyr agent will look for a container running /usr/sbin/scalyr-agent-2 as the main process.",
convert_to=six.text_type,
default=None,
)
define_config_option(
__monitor__,
"container_check_interval",
"Optional (defaults to 5). How often (in seconds) to check if containers have been started or stopped.",
convert_to=float,
default=5,
env_aware=True,
)
define_config_option(
__monitor__,
"api_socket",
"Optional (defaults to /var/scalyr/docker.sock). Defines the unix socket used to communicate with "
"the docker API. WARNING, if you have `mode` set to `syslog`, you must also set the "
"`docker_api_socket` configuration option in the syslog monitor to this same value."
"Note: You need to map the host's /run/docker.sock to the same value as specified here, using the -v parameter, e.g."
"\tdocker run -v /run/docker.sock:/var/scalyr/docker.sock ...",
convert_to=six.text_type,
default="/var/scalyr/docker.sock",
)
define_config_option(
__monitor__,
"docker_api_version",
"Optional (defaults to 'auto'). The version of the Docker API to use. WARNING, if you have "
"`mode` set to `syslog`, you must also set the `docker_api_version` configuration option in the "
"syslog monitor to this same value.",
convert_to=six.text_type,
default="auto",
env_aware=True,
)
define_config_option(
__monitor__,
"docker_log_prefix",
"Optional (defaults to docker). Prefix added to the start of all docker logs.",
convert_to=six.text_type,
default="docker",
env_aware=True,
)
define_config_option(
__monitor__,
"docker_percpu_metrics",
"Optional (defaults to False). When `True`, emits cpu usage stats per core. Note: This is disabled by "
"default because it can result in an excessive amount of metric data on cpus with a large number of cores.",
convert_to=bool,
default=False,
env_aware=True,
)
define_config_option(
__monitor__,
"max_previous_lines",
"Optional (defaults to 5000). The maximum number of lines to read backwards from the end of the stdout/stderr logs."
"when starting to log a containers stdout/stderr to find the last line that was sent to Scalyr.",
convert_to=int,
default=5000,
)
define_config_option(
__monitor__,
"readback_buffer_size",
"Optional (defaults to 5k). The maximum number of bytes to read backwards from the end of any log files on disk."
"when starting to log a containers stdout/stderr. This is used to find the most recent timestamp logged to file "
"was sent to Scalyr.",
convert_to=int,
default=5 * 1024,
)
define_config_option(
__monitor__,
"log_mode",
'Optional (defaults to "docker_api"). Determine which method is used to gather logs from the '
'local containers. If "docker_api", then this agent will use the docker API to contact the local '
'containers and pull logs from them. If "syslog", then this agent expects the other containers '
'to push logs to this one using the Docker syslog logging driver. Currently, "syslog" is the '
"preferred method due to bugs/issues found with the docker API (To protect legacy behavior, "
'the default method is "docker_api").',
convert_to=six.text_type,
default="docker_api",
env_aware=True,
env_name="SCALYR_DOCKER_LOG_MODE",
)
define_config_option(
__monitor__,
"docker_raw_logs",
"Optional (defaults to True). If True, the docker monitor will use the raw log files on disk to read logs."
"The location of the raw log file is obtained by querying the path from the Docker API. "
"If false, the logs will be streamed over the Docker API.",
convert_to=bool,
default=True,
env_aware=True,
)
define_config_option(
__monitor__,
"metrics_only",
"Optional (defaults to False). If true, the docker monitor will only log docker metrics and not any other information "
"about running containers. If set to true, this value overrides the config item 'report_container_metrics'.",
convert_to=bool,
default=False,
env_aware=True,
env_name="SCALYR_DOCKER_METRICS_ONLY",
)
define_config_option(
__monitor__,
"container_globs",
"Optional (defaults to None). A whitelist of container name glob patterns to monitor. Only containers whose name "
"matches one of the glob patterns will be monitored. If `None`, all container names are matched. This value "
"is applied *before* `container_globs_exclude`",
convert_to=ArrayOfStrings,
default=None,
env_aware=True,
)
define_config_option(
__monitor__,
"container_globs_exclude",
"Optional (defaults to None). A blacklist of container name glob patterns to exclude from monitoring. Any container whose name "
"matches one of the glob patterns will not be monitored. If `None`, all container names matched by `container_globs` are monitored. This value "
"is applied *after* `container_globs`",
convert_to=ArrayOfStrings,
default=None,
env_aware=True,
)
define_config_option(
__monitor__,
"report_container_metrics",
"Optional (defaults to True). If true, metrics will be collected from the container and reported "
"to Scalyr.",
convert_to=bool,
default=True,
env_aware=True,
)
define_config_option(
__monitor__,
"label_include_globs",
"Optional (defaults to ['*']). If `labels_as_attributes` is True then this option is a list of glob strings used to "
"include labels that should be uploaded as log attributes. The docker monitor first gets all container labels that "
"match any glob in this list and then filters out any labels that match any glob in `label_exclude_globs`, and the final list is then "
"uploaded as log attributes.",
convert_to=ArrayOfStrings,
default=["*"],
env_aware=True,
)
define_config_option(
__monitor__,
"label_exclude_globs",
"Optional (defaults to ['com.scalyr.config.*']). If `labels_as_attributes` is True, then this is a list of glob strings used to "
"exclude labels from being uploaded as log attributes. Any label whose key matches any glob on this list will not be added as a "
"log attribute. Note: the globs in this list are applied *after* `label_include_globs`",
convert_to=ArrayOfStrings,
default=["com.scalyr.config.*"],
env_aware=True,
)
define_config_option(
__monitor__,
"labels_as_attributes",
"Optional (defaults to False). If true, the docker monitor will add any labels found on the container as log attributes, after "
"applying `label_include_globs` and `label_exclude_globs`.",
convert_to=bool,
default=False,
env_aware=True,
)
define_config_option(
__monitor__,
"label_prefix",
'Optional (defaults to ""). If `labels_as_attributes` is true, then append this prefix to the start of each label before '
"adding it to the log attributes",
convert_to=six.text_type,
default="",
env_aware=True,
)
define_config_option(
__monitor__,
"use_labels_for_log_config",
"Optional (defaults to True). If true, the docker monitor will check each container for any labels that begin with "
"`com.scalyr.config.log.` and use those labels (minus the prefix) as fields in the containers log_config. Keys that "
"contain hyphens will automatically be converted to underscores.",
convert_to=bool,
default=True,
env_aware=True,
)
# for now, always log timestamps by default to help prevent a race condition
define_config_option(
__monitor__,
"log_timestamps",
"Optional (defaults to True). If true, stdout/stderr logs for logs consumed via Docker API (docker_raw_logs: false) will contain docker timestamps at the beginning of the line.",
convert_to=bool,
default=True,
)
define_metric(
__monitor__,
"docker.net.rx_bytes",
"Total received bytes on the network interface",
cumulative=True,
unit="bytes",
category="Network",
)
define_metric(
__monitor__,
"docker.net.rx_dropped",
"Total receive packets dropped on the network interface",
cumulative=True,
category="Network",
)
define_metric(
__monitor__,
"docker.net.rx_errors",
"Total receive errors on the network interface",
cumulative=True,
category="Network",
)
define_metric(
__monitor__,
"docker.net.rx_packets",
"Total received packets on the network interface",
cumulative=True,
category="Network",
)
define_metric(
__monitor__,
"docker.net.tx_bytes",
"Total transmitted bytes on the network interface",
cumulative=True,
unit="bytes",
category="Network",
)
define_metric(
__monitor__,
"docker.net.tx_dropped",
"Total transmitted packets dropped on the network interface",
cumulative=True,
category="Network",
)
define_metric(
__monitor__,
"docker.net.tx_errors",
"Total transmission errors on the network interface",
cumulative=True,
category="Network",
)
define_metric(
__monitor__,
"docker.net.tx_packets",
"Total packets transmitted on the network intervace",
cumulative=True,
category="Network",
)
define_metric(
__monitor__,
"docker.mem.stat.active_anon",
"The number of bytes of active memory backed by anonymous pages, excluding sub-cgroups.",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.active_file",
"The number of bytes of active memory backed by files, excluding sub-cgroups.",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.cache",
"The number of bytes used for the cache, excluding sub-cgroups.",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.hierarchical_memory_limit",
"The memory limit in bytes for the container.",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.inactive_anon",
"The number of bytes of inactive memory in anonymous pages, excluding sub-cgroups.",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.inactive_file",
"The number of bytes of inactive memory in file pages, excluding sub-cgroups.",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.mapped_file",
"The number of bytes of mapped files, excluding sub-groups",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.pgfault",
"The total number of page faults, excluding sub-cgroups.",
cumulative=True,
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.pgmajfault",
"The number of major page faults, excluding sub-cgroups",
cumulative=True,
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.pgpgin",
"The number of charging events, excluding sub-cgroups",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.pgpgout",
"The number of uncharging events, excluding sub-groups",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.rss",
"The number of bytes of anonymous and swap cache memory (includes transparent hugepages), excluding sub-cgroups",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.rss_huge",
"The number of bytes of anonymous transparent hugepages, excluding sub-cgroups",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.unevictable",
"The number of bytes of memory that cannot be reclaimed (mlocked etc), excluding sub-cgroups",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.writeback",
"The number of bytes being written back to disk, excluding sub-cgroups",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.total_active_anon",
"The number of bytes of active memory backed by anonymous pages, including sub-cgroups.",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.total_active_file",
"The number of bytes of active memory backed by files, including sub-cgroups.",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.total_cache",
"The number of bytes used for the cache, including sub-cgroups.",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.total_inactive_anon",
"The number of bytes of inactive memory in anonymous pages, including sub-cgroups.",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.total_inactive_file",
"The number of bytes of inactive memory in file pages, including sub-cgroups.",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.total_mapped_file",
"The number of bytes of mapped files, including sub-groups",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.total_pgfault",
"The total number of page faults, including sub-cgroups.",
cumulative=True,
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.total_pgmajfault",
"The number of major page faults, including sub-cgroups",
cumulative=True,
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.total_pgpgin",
"The number of charging events, including sub-cgroups",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.total_pgpgout",
"The number of uncharging events, including sub-groups",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.total_rss",
"The number of bytes of anonymous and swap cache memory (includes transparent hugepages), including sub-cgroups",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.total_rss_huge",
"The number of bytes of anonymous transparent hugepages, including sub-cgroups",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.total_unevictable",
"The number of bytes of memory that cannot be reclaimed (mlocked etc), including sub-cgroups",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.stat.total_writeback",
"The number of bytes being written back to disk, including sub-cgroups",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.max_usage",
"The max amount of memory used by container in bytes.",
unit="bytes",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.usage",
"The current number of bytes used for memory including cache.",
unit="bytes",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.fail_cnt",
"The number of times the container hit its memory limit",
category="Memory",
)
define_metric(
__monitor__,
"docker.mem.limit",
"The memory limit for the container in bytes.",
unit="bytes",
category="Memory",
)
define_metric(
__monitor__,
"docker.cpu.usage",
"Total CPU consumed by container in nanoseconds",
cumulative=True,
category="CPU",
)
define_metric(
__monitor__,
"docker.cpu.system_cpu_usage",
"Total CPU consumed by container in kernel mode in nanoseconds",
cumulative=True,
category="CPU",
)
define_metric(
__monitor__,
"docker.cpu.usage_in_usermode",
"Total CPU consumed by tasks of the cgroup in user mode in nanoseconds",
cumulative=True,
category="CPU",
)
define_metric(
__monitor__,
"docker.cpu.total_usage",
"Total CPU consumed by tasks of the cgroup in nanoseconds",
cumulative=True,
category="CPU",
)
define_metric(
__monitor__,
"docker.cpu.usage_in_kernelmode",
"Total CPU consumed by tasks of the cgroup in kernel mode in nanoseconds",
cumulative=True,
category="CPU",
)
define_metric(
__monitor__,
"docker.cpu.throttling.periods",
"The number of of periods with throttling active.",
cumulative=True,
category="CPU",
)
define_metric(
__monitor__,
"docker.cpu.throttling.throttled_periods",
"The number of periods where the container hit its throttling limit",
cumulative=True,
category="CPU",
)
define_metric(
__monitor__,
"docker.cpu.throttling.throttled_time",
"The aggregate amount of time the container was throttled in nanoseconds",
cumulative=True,
category="CPU",
)
class WrappedStreamResponse(CancellableStream):
"""Wrapper for generator returned by docker.Client._stream_helper
that gives us access to the response, and therefore the socket, so that
we can shutdown the socket from another thread if needed
"""
def __init__(self, client, response, decode):
self.client = client
self.response = response
self.decode = decode
# pylint: disable=bad-super-call
stream = super(DockerClient, self.client)._stream_helper(
response=self.response, decode=decode
)
super(WrappedStreamResponse, self).__init__(stream=stream, response=response)
class WrappedRawResponse(CancellableStream):
"""Wrapper for generator returned by docker.Client._stream_raw_result
that gives us access to the response, and therefore the socket, so that
we can shutdown the socket from another thread if needed
"""
def __init__(self, client, response, chunk_size=8096):
self.client = client
self.response = response
self.chunk_size = chunk_size
# pylint: disable=bad-super-call
stream = super(DockerClient, self.client)._stream_raw_result(
response=self.response, chunk_size=self.chunk_size
)
super(WrappedRawResponse, self).__init__(stream=stream, response=response)
class WrappedMultiplexedStreamResponse(CancellableStream):
"""Wrapper for generator returned by docker.Client._multiplexed_response_stream_helper
that gives us access to the response, and therefore the socket, so that
we can shutdown the socket from another thread if needed
"""
def __init__(self, client, response):
self.client = client
self.response = response
# pylint: disable=bad-super-call
stream = super(DockerClient, self.client)._multiplexed_response_stream_helper(
response=self.response
)
super(WrappedMultiplexedStreamResponse, self).__init__(
stream=stream, response=response
)
class DockerClient(docker.APIClient): # pylint: disable=no-member
"""Wrapper for docker.Client to return 'wrapped' versions of streamed responses
so that we can have access to the response object, which allows us to get the
socket in use, and shutdown the blocked socket from another thread (e.g. upon
shutdown
"""
def _stream_helper(self, response, decode=False):
return WrappedStreamResponse(self, response, decode)
def _stream_raw_result(self, response):
return WrappedRawResponse(self, response)
def _multiplexed_response_stream_helper(self, response):
return WrappedMultiplexedStreamResponse(self, response)
def _get_raw_response_socket(self, response):
if response.raw._fp.fp:
return super(DockerClient, self)._get_raw_response_socket(response)
return None
def _split_datetime_from_line(line):
"""Docker timestamps are in RFC3339 format: 2015-08-03T09:12:43.143757463Z, with everything up to the first space
being the timestamp.
"""
log_line = line
dt = datetime.datetime.utcnow()
pos = line.find(" ")
if pos > 0:
dt = rfc3339_to_datetime(line[0:pos])
log_line = line[pos + 1 :]
return (dt, log_line)
def _get_containers(
client,
ignore_container=None,
restrict_to_container=None,
logger=None,
only_running_containers=True,
glob_list=None,
include_log_path=False,
get_labels=False,
):
"""Gets a dict of running containers that maps container id to container name"""
if logger is None:
logger = global_log
if glob_list is None:
glob_list = {}
include_globs = glob_list.get("include", None)
exclude_globs = glob_list.get("exclude", None)
result = {}
try:
filters = (
{"id": restrict_to_container} if restrict_to_container is not None else None
)
response = client.containers(filters=filters, all=not only_running_containers)
for container in response:
cid = container["Id"]
if ignore_container is not None and cid == ignore_container:
continue
if len(container["Names"]) > 0:
name = container["Names"][0].lstrip("/")
add_container = True
# see if this container name should be included
if include_globs:
add_container = False
for glob in include_globs:
if fnmatch.fnmatch(name, glob):
add_container = True
break
if not add_container:
global_log.log(
scalyr_logging.DEBUG_LEVEL_2,
"Excluding container '%s', because it does not match any glob in `container_globs`"
% name,
)
# see if this container name should be excluded
if add_container and exclude_globs is not None:
for glob in exclude_globs:
if fnmatch.fnmatch(name, glob):
add_container = False
global_log.log(
scalyr_logging.DEBUG_LEVEL_2,
"Excluding container '%s', because it matches '%s' in `container_globs_exclude`"
% (name, glob),
)
break
if add_container:
log_path = None
labels = None
if include_log_path or get_labels:
try:
info = client.inspect_container(cid)
if include_log_path:
log_path = (
info["LogPath"] if "LogPath" in info else None
)
if not log_path:
# NOTE: If docker_raw_logs is True and we hit this code path it
# really means we won't be ingesting any logs so this should
# really be treated as a fatal error.
logger.error(
NO_LOG_PATH_ATTR_MSG % (cid, name),
limit_once_per_x_secs=300,
limit_key="docker-api-inspect",
)
if get_labels:
config = info.get("Config", {})
labels = config.get("Labels", None)
except Exception:
logger.error(
"Error inspecting container '%s'" % cid,
limit_once_per_x_secs=300,
limit_key="docker-api-inspect",
)
result[cid] = {"name": name, "log_path": log_path, "labels": labels}
else:
result[cid] = {"name": cid, "log_path": None, "labels": None}
except Exception as e: # container querying failed
logger.exception(
"Error querying running containers: %s, filters=%s, only_running_containers=%s"
% (six.text_type(e), filters, only_running_containers),
limit_once_per_x_secs=300,
limit_key="docker-api-running-containers",
)
result = None
return result
def get_attributes_and_config_from_labels(labels, docker_options):
"""
Takes a dict of labels and splits it in to two separate attributes and config dicts.
@param labels: All the Docker labels for a container
@param docker_options: Options for determining which labels to use for the attributes and config items
@type labels: dict
@type docker_options: DockerOptions
@return: A tuple with the first element containing a dict of attributes and the second element containing a JsonObject of config items
@rtype: (dict, JsonObject)
"""
config = JsonObject({})
attributes = {}
if labels:
# see if we need to add any labels as attributes
if docker_options.labels_as_attributes:
included = {}
# apply include globs
for key, value in six.iteritems(labels):
for glob in docker_options.label_include_globs:
if fnmatch.fnmatch(key, glob):
included[key] = value
break
# filter excluded labels
for key, value in six.iteritems(included):
add_label = True
for glob in docker_options.label_exclude_globs:
if fnmatch.fnmatch(key, glob):
add_label = False
break
if add_label:
label_key = docker_options.label_prefix + key
attributes[label_key] = value
# see if we need to configure the log file from labels
if docker_options.use_labels_for_log_config:
config = annotation_config.process_annotations(
labels,
annotation_prefix_re=DOCKER_LABEL_CONFIG_RE,
hyphens_as_underscores=True,
)
return (attributes, config)
def get_parser_from_config(base_config, attributes, default_parser):
"""
Checks the various places that the parser option could be set and returns
the value with the highest precedence, or `default_parser` if no parser was found
@param base_config: a set of log config options for a logfile
@param attributes: a set of attributes to apply to a logfile
@param default_parser: the default parser if no parser setting is found in base_config or attributes
"""
# check all the places `parser` might be set
# highest precedence is base_config['attributes']['parser'] - this is if
# `com.scalyr.config.log.attributes.parser is set as a label
if "attributes" in base_config and "parser" in base_config["attributes"]:
return base_config["attributes"]["parser"]
# next precedence is base_config['parser'] - this is if
# `com.scalyr.config.log.parser` is set as a label
if "parser" in base_config:
return base_config["parser"]
# lowest precedence is attributes['parser'] - this is if
# `parser` is a label and labels are being uploaded as attributes
# and the `parser` label passes the attribute filters
if "parser" in attributes:
return attributes["parser"]
# if we are here, then we found nothing so return the default
return default_parser
class ContainerChecker(StoppableThread):
"""
Monitors containers to check when they start and stop running.
"""
def __init__(
self,
config,
logger,
socket_file,
docker_api_version,
host_hostname,
data_path,
log_path,
):
self._config = config
self._logger = logger
self._use_raw_logs = config.get("docker_raw_logs")
self.__delay = self._config.get("container_check_interval")
self.__log_prefix = self._config.get("docker_log_prefix")
name = self._config.get("container_name")
self.__docker_options = DockerOptions(
labels_as_attributes=self._config.get("labels_as_attributes"),
label_prefix=self._config.get("label_prefix"),
label_include_globs=self._config.get("label_include_globs"),
label_exclude_globs=self._config.get("label_exclude_globs"),
use_labels_for_log_config=self._config.get("use_labels_for_log_config"),
)
self.__get_labels = (
self.__docker_options.use_labels_for_log_config
or self.__docker_options.labels_as_attributes
)
self.__socket_file = socket_file
self.__docker_api_version = docker_api_version
self.__client = DockerClient(
base_url=("unix:/%s" % self.__socket_file),
version=self.__docker_api_version,
)
self.container_id = self.__get_scalyr_container_id(self.__client, name)
self.__checkpoint_file = os.path.join(data_path, "docker-checkpoints.json")
self.__log_path = log_path
self.__host_hostname = host_hostname
self.__readback_buffer_size = self._config.get("readback_buffer_size")
self.__glob_list = {
"include": self._config.get("container_globs"),
"exclude": self._config.get("container_globs_exclude"),
}
self.containers = {}
self.__checkpoints = {}
self.__log_watcher = None
self.__module = None
self.__start_time = time.time()
self.__thread = StoppableThread(
target=self.check_containers, name="Container Checker"
)
self.docker_loggers = []
self.raw_logs = []
def start(self):
self.__load_checkpoints()
self.containers = _get_containers(
self.__client,
ignore_container=self.container_id,
glob_list=self.__glob_list,
include_log_path=self._use_raw_logs,
get_labels=self.__get_labels,
)
# if querying the docker api fails, set the container list to empty
if self.containers is None:
self.containers = {}
self.docker_logs = self.__get_docker_logs(self.containers)
self.docker_loggers = []
self.raw_logs = []
global_log.log(
scalyr_logging.DEBUG_LEVEL_2,
"container_globs: %s" % self.__glob_list["include"],
)
global_log.log(
scalyr_logging.DEBUG_LEVEL_2,
"container_globs_exclude: %s" % self.__glob_list["exclude"],
)
# create and start the DockerLoggers
self.__start_docker_logs(self.docker_logs)
self._logger.log(
scalyr_logging.DEBUG_LEVEL_0,
"Starting docker monitor (raw_logs=%s)" % self._use_raw_logs,
)
self.__thread.start()
def stop(self, wait_on_join=True, join_timeout=5):
self.__thread.stop(wait_on_join=wait_on_join, join_timeout=join_timeout)
# stop the DockerLoggers
if self._use_raw_logs:
for logger in self.raw_logs:
path = logger["log_config"]["path"]
if self.__log_watcher:
self.__log_watcher.remove_log_path(self.__module.module_name, path)
self._logger.log(scalyr_logging.DEBUG_LEVEL_1, "Stopping %s" % (path))
else:
for logger in self.docker_loggers:
if self.__log_watcher:
self.__log_watcher.remove_log_path(
self.__module.module_name, logger.log_path
)
logger.stop(wait_on_join, join_timeout)
self._logger.log(
scalyr_logging.DEBUG_LEVEL_1,
"Stopping %s - %s" % (logger.name, logger.stream),
)
self.__update_checkpoints()
self.docker_loggers = []
self.raw_logs = []
def check_containers(self, run_state):
while run_state.is_running():
try:
self.__update_checkpoints()
self._logger.log(
scalyr_logging.DEBUG_LEVEL_2,
"Attempting to retrieve list of containers:",
)
running_containers = _get_containers(
self.__client,
ignore_container=self.container_id,
glob_list=self.__glob_list,
include_log_path=self._use_raw_logs,
get_labels=self.__get_labels,
)
# if running_containers is None, that means querying the docker api failed.
# rather than resetting the list of running containers to empty
# continue using the previous list of containers
if running_containers is None:
self._logger.log(
scalyr_logging.DEBUG_LEVEL_2, "Failed to get list of containers"
)
running_containers = self.containers
self._logger.log(
scalyr_logging.DEBUG_LEVEL_2,
"Found %d containers" % len(running_containers),
)
# get the containers that have started since the last sample
starting = {}
for cid, info in six.iteritems(running_containers):
if cid not in self.containers:
self._logger.log(
scalyr_logging.DEBUG_LEVEL_1,
"Starting loggers for container '%s'" % info["name"],
)
starting[cid] = info
# get the containers that have stopped
stopping = {}
for cid, info in six.iteritems(self.containers):
if cid not in running_containers:
self._logger.log(
scalyr_logging.DEBUG_LEVEL_1,
"Stopping logger for container '%s' (%s)"
% (info["name"], cid[:6]),
)
stopping[cid] = info
# stop the old loggers
self.__stop_loggers(stopping)
# update the list of running containers
# do this before starting new ones, as starting up new ones
# will access self.containers
self.containers = running_containers
# start the new ones
self.__start_loggers(starting)
except Exception as e:
self._logger.warn(
"Exception occurred when checking containers %s\n%s"
% (six.text_type(e), traceback.format_exc())
)
run_state.sleep_but_awaken_if_stopped(self.__delay)
def set_log_watcher(self, log_watcher, module):
self.__log_watcher = log_watcher
self.__module = module
def __get_scalyr_container_id(self, client, name):
"""Gets the container id of the scalyr-agent container
If the config option container_name is empty, then it is assumed that the scalyr agent is running
on the host and not in a container and None is returned.
"""
result = None
regex = None
if name is not None:
regex = re.compile(name)
# get all the containers
containers = client.containers()
for container in containers:
# see if we are checking on names
if name is not None:
# if so, loop over all container names for this container
# Note: containers should only have one name, but the 'Names' field
# is a list, so iterate over it just in case
for cname in container["Names"]:
cname = cname.lstrip("/")
# check if the name regex matches
m = regex.match(cname)
if m:
result = container["Id"]
break
# not checking container name, so check the Command instead to see if it's the agent
else:
if container["Command"].startswith("/usr/sbin/scalyr-agent-2"):
result = container["Id"]
if result:
break
if not result:
# only raise an exception if we were looking for a specific name but couldn't find it
if name is not None:
raise Exception(
"Unable to find a matching container id for container '%s'. Please make sure that a "
"container matching the regular expression '%s' is running."
% (name, name)
)
return result
def __update_checkpoints(self):
"""Update the checkpoints for when each docker logger logged a request, and save the checkpoints
to file.
"""
# checkpoints are only used for querying from the API, so ignore
# them if we are using raw logs
if not self._use_raw_logs:
for logger in self.docker_loggers:
last_request = logger.last_request()
self.__checkpoints[logger.stream_name] = last_request
# save to disk
if self.__checkpoints:
tmp_file = self.__checkpoint_file + "~"
scalyr_util.atomic_write_dict_as_json_file(
self.__checkpoint_file, tmp_file, self.__checkpoints
)
def __load_checkpoints(self):
try:
checkpoints = scalyr_util.read_file_as_json(
self.__checkpoint_file, strict_utf8=True
)
except Exception:
self._logger.info(
"No checkpoint file '%s' exists.\n\tAll logs will be read starting from their current end.",
self.__checkpoint_file,
)
checkpoints = {}
if checkpoints:
for name, last_request in six.iteritems(checkpoints):
self.__checkpoints[name] = last_request
def __stop_loggers(self, stopping):
"""
Stops any DockerLoggers in the 'stopping' dict
@param stopping: a dict of container ids => container names. Any running containers that have
the same container-id as a key in the dict will be stopped.
@type stopping: dict
"""
if stopping:
self._logger.log(
scalyr_logging.DEBUG_LEVEL_2, "Stopping all docker loggers"
)
if self._use_raw_logs:
for logger in self.raw_logs:
if logger["cid"] in stopping:
path = logger["log_config"]["path"]
if self.__log_watcher:
self.__log_watcher.schedule_log_path_for_removal(
self.__module.module_name, path
)
self.raw_logs[:] = [
line for line in self.raw_logs if line["cid"] not in stopping
]
else:
for logger in self.docker_loggers:
if logger.cid in stopping:
logger.stop(wait_on_join=True, join_timeout=1)
if self.__log_watcher:
self.__log_watcher.schedule_log_path_for_removal(
self.__module.module_name, logger.log_path
)
self.docker_loggers[:] = [
line for line in self.docker_loggers if line.cid not in stopping
]
self.docker_logs[:] = [
line for line in self.docker_logs if line["cid"] not in stopping
]
def __start_loggers(self, starting):
"""
Starts a list of DockerLoggers
@param starting: a list of DockerLoggers to start
@type starting: list
"""
if starting:
self._logger.log(
scalyr_logging.DEBUG_LEVEL_2, "Starting all docker loggers"
)
docker_logs = self.__get_docker_logs(starting)
self.__start_docker_logs(docker_logs)
self.docker_logs.extend(docker_logs)
def __start_docker_logs(self, docker_logs):
for log in docker_logs:
if self.__log_watcher:
try:
log["log_config"] = self.__log_watcher.add_log_config(
self.__module.module_name, log["log_config"], force_add=True
)
except Exception as e:
global_log.info(
"Error adding log '%s' to log watcher - %s"
% (log["log_config"]["path"], e)
)
if self._use_raw_logs:
log_path = log["log_config"]["path"]
if not os.path.exists(log_path):
global_log.warn(
"Missing file detected for container log path '%s'. Please ensure that the host's "
"Docker container directory (by default /var/lib/docker/containers) has been "
"mounted in the Scalyr Agent container." % log_path
)
self.raw_logs.append(log)
else:
last_request = self.__get_last_request_for_log(
log["log_config"]["path"]
)
self.docker_loggers.append(
self.__create_docker_logger(log, last_request)
)
def __get_last_request_for_log(self, path):
result = datetime.datetime.utcfromtimestamp(self.__start_time)
fp = None
try:
full_path = os.path.join(self.__log_path, path)
fp = open(full_path, "r", self.__readback_buffer_size)
# seek readback buffer bytes from the end of the file
fp.seek(0, os.SEEK_END)
size = fp.tell()
if size < self.__readback_buffer_size:
fp.seek(0, os.SEEK_SET)
else:
fp.seek(size - self.__readback_buffer_size, os.SEEK_SET)
first = True
for line in fp:
# ignore the first line because it likely started somewhere randomly
# in the line
if first:
first = False
continue
dt, _ = _split_datetime_from_line(line)
if dt:
result = dt
except IOError as e:
# If file doesn't exist, this simple means that the new container has been started and
# the log file doesn't exist on disk yet.
if e.errno == 2:
global_log.info(
"File %s doesn't exist on disk. This likely means a new container "
"has been started and no existing logs are available for it on "
"disk. Original error: %s" % (full_path, six.text_type(e))
)
else:
global_log.info("%s", six.text_type(e))
except Exception as e:
global_log.info("%s", six.text_type(e))
finally:
if fp:
fp.close()
return scalyr_util.seconds_since_epoch(result)
def __create_log_config(
self, default_parser, path, attributes, base_config={}, parse_as_json=False
):
"""Convenience function to create a log_config dict
@param default_parser: a parser to use if no parser is found in the attributes or base_config
@param path: the path of the log file being configured
@param attributes: Any attributes to include as part of the log_config['attributes']
@param base_config: A base set of configuration options to build the log_config from
@type default_parser: six.text_type
@type path: six.text_type
@type attributes: dict of JsonObject
@type base_config: dict or JsonObject
"""
result = base_config.copy()
# Set the parser the log_config['parser'] level
# otherwise it will be overwritten by a default value due to the way
# log_config verification works
result["parser"] = get_parser_from_config(result, attributes, default_parser)
result["path"] = path
result["parse_lines_as_json"] = parse_as_json
if "attributes" in result:
# if 'attributes' exists in `result`, then it must have come from
# base config, which should already be a JsonObject, so no need to
# explicitly convert the `attributes` dict, just update the existing object.
result["attributes"].update(attributes)
else:
# make sure the log_config attributes are a JsonObject
# because the code for verifying log configs explicitly checks for JsonObjects
# and throws an error if other types are found
result["attributes"] = JsonObject(attributes)
return result
def __get_docker_logs(self, containers):
"""Returns a list of dicts containing the container id, stream, and a log_config
for each container in the 'containers' param.
"""
result = []
attributes = None
try:
attributes = JsonObject({"monitor": "agentDocker"})
if self.__host_hostname:
attributes["serverHost"] = self.__host_hostname
except Exception:
self._logger.error("Error setting monitor attribute in DockerMonitor")
raise
prefix = self.__log_prefix + "-"
for cid, info in six.iteritems(containers):
container_attributes = attributes.copy()
container_attributes["containerName"] = info["name"]
container_attributes["containerId"] = cid
# get the attributes and config items from the labels
attrs, base_config = get_attributes_and_config_from_labels(
info.get("labels", None), self.__docker_options
)
attrs.update(container_attributes)
labels = info.get("labels", []) or []
self._logger.log(
scalyr_logging.DEBUG_LEVEL_1,
'Found labels "%s" for container %s. Using attributes: %s.'
% (", ".join(labels), info["name"], str(attrs)),
)
if self._use_raw_logs and "log_path" in info and info["log_path"]:
stream_count = 1
log_config = self.__create_log_config(
default_parser="docker",
path=info["log_path"],
attributes=attrs,
base_config=base_config,
parse_as_json=True,
)
if "rename_logfile" not in log_config:
log_config["rename_logfile"] = "/docker/%s.log" % info["name"]
result.append({"cid": cid, "stream": "raw", "log_config": log_config})
else:
stream_count = 2
path = prefix + info["name"] + "-stdout.log"
log_config = self.__create_log_config(
default_parser="dockerStdout",
path=path,
attributes=attrs,
base_config=base_config,
)
result.append(
{"cid": cid, "stream": "stdout", "log_config": log_config}
)
path = prefix + info["name"] + "-stderr.log"
log_config = self.__create_log_config(
default_parser="dockerStderr",
path=path,
attributes=attrs,
base_config=base_config,
)
result.append(
{"cid": cid, "stream": "stderr", "log_config": log_config}
)
self._logger.log(
scalyr_logging.DEBUG_LEVEL_1,
"Using log config %s for container %s"
% (str(result[-1]), info["name"]),
)
if stream_count == 2:
self._logger.log(
scalyr_logging.DEBUG_LEVEL_1,
"Using log config %s for container %s"
% (str(result[-2]), info["name"]),
)
return result
def __create_docker_logger(self, log, last_request):
"""Creates a new DockerLogger object, based on the parameters passed in in the 'log' param.
@param log: a dict consisting of:
cid - the container id
stream - whether this is the stdout or stderr stream
log_config - the log config used by the scalyr-agent for this log file
@type log: dict
"""
cid = log["cid"]
name = self.containers[cid]["name"]
stream = log["stream"]
stream_name = name + "-" + stream
if stream_name in self.__checkpoints:
checkpoint = self.__checkpoints[stream_name]
if last_request < checkpoint:
last_request = checkpoint
logger = DockerLogger(
self.__socket_file,
cid,
name,
stream,
log["log_config"]["path"],
self._config,
last_request,
)
logger.start()
return logger
class DockerLogger(object):
"""Abstraction for logging either stdout or stderr from a given container
Logging is performed on a separate thread because each log is read from a continuous stream
over the docker socket.
"""
def __init__(
self,
socket_file,
cid,
name,
stream,
log_path,
config,
last_request=None,
max_log_size=20 * 1024 * 1024,
max_log_rotations=2,
):
self.__socket_file = socket_file
self.cid = cid
self.name = name
# stderr or stdout
self.stream = stream
self.log_path = log_path
self.stream_name = name + "-" + stream
self.__max_previous_lines = config.get("max_previous_lines")
self.__log_timestamps = config.get("log_timestamps")
self.__docker_api_version = config.get("docker_api_version")
self.__last_request_lock = threading.Lock()
self.__last_request = time.time()
if last_request:
self.__last_request = last_request
last_request_dt = datetime.datetime.utcfromtimestamp(self.__last_request)
global_log.debug(
'Using last_request value of "%s" for log_path "%s" and cid "%s", name "%s"'
% (last_request_dt, self.log_path, self.cid, self.name)
)
self.__logger = logging.Logger(cid + "." + stream)
self.__log_handler = logging.handlers.RotatingFileHandler(
filename=log_path, maxBytes=max_log_size, backupCount=max_log_rotations
)
formatter = logging.Formatter()
self.__log_handler.setFormatter(formatter)
self.__logger.addHandler(self.__log_handler)
self.__logger.setLevel(logging.INFO)
self.__client = None
self.__logs = None
self.__thread = StoppableThread(
target=self.process_request,
name="Docker monitor logging thread for %s" % (name + "." + stream),
)
def start(self):
self.__thread.start()
def stop(self, wait_on_join=True, join_timeout=5):
# NOTE: Depending on the class used, attribute name may either be response or _response
if (
self.__client
and self.__logs
and getattr(self.__logs, "response", getattr(self.__logs, "_response"))
):
sock = self.__client._get_raw_response_socket(
getattr(self.__logs, "response", getattr(self.__logs, "_response"))
)
if sock:
# Under Python 3, SocketIO is used which case close() attribute and not shutdown
if hasattr(sock, "shutdown"):
sock.shutdown(socket.SHUT_RDWR)
else:
sock.close()
self.__thread.stop(wait_on_join=wait_on_join, join_timeout=join_timeout)
def last_request(self):
self.__last_request_lock.acquire()
result = self.__last_request
self.__last_request_lock.release()
return result
def process_request(self, run_state):
"""This function makes a log request on the docker socket for a given container and continues
to read from the socket until the connection is closed
"""
try:
# random delay to prevent all requests from starting at the same time
delay = random.randint(500, 5000) / 1000
run_state.sleep_but_awaken_if_stopped(delay)
self.__logger.log(
scalyr_logging.DEBUG_LEVEL_3,
"Starting to retrieve logs for cid=%s" % six.text_type(self.cid),
)
self.__client = DockerClient(
base_url=("unix:/%s" % self.__socket_file),
version=self.__docker_api_version,
)
epoch = datetime.datetime.utcfromtimestamp(0)
while run_state.is_running():
self.__logger.log(
scalyr_logging.DEBUG_LEVEL_3,
"Attempting to retrieve logs for cid=%s" % six.text_type(self.cid),
)
sout = False
serr = False
if self.stream == "stdout":
sout = True
else:
serr = True
self.__logs = self.__client.logs(
container=self.cid,
stdout=sout,
stderr=serr,
stream=True,
timestamps=True,
tail=self.__max_previous_lines,
follow=True,
)
# self.__logs is a generator so don't call len( self.__logs )
self.__logger.log(
scalyr_logging.DEBUG_LEVEL_3,
"Found log lines for cid=%s" % (six.text_type(self.cid)),
)
try:
for line in self.__logs:
line = six.ensure_text(line)
# split the docker timestamp from the frest of the line
dt, log_line = _split_datetime_from_line(line)
if not dt:
# Under some edge cases this message can be logged a lot (which can
# exhaust the CPU) so we need to make sure rate limit is in place.
global_log.error(
"No timestamp found on line: '%s'",
line,
limit_once_per_x_secs=300,
limit_key="docker-monitor-line-missing-ts",
)
else:
timestamp = scalyr_util.seconds_since_epoch(dt, epoch)
# see if we log the entire line including timestamps
if self.__log_timestamps:
log_line = line
# check to make sure timestamp is >= to the last request
# Note: we can safely read last_request here because we are the only writer
if timestamp >= self.__last_request:
self.__logger.info(log_line.strip())
# but we need to lock for writing
self.__last_request_lock.acquire()
self.__last_request = timestamp
self.__last_request_lock.release()
else:
# TODO: We should probably log under debug level 5 here to make
# troubleshooting easier.
pass
if not run_state.is_running():
self.__logger.log(
scalyr_logging.DEBUG_LEVEL_3,
"Exiting out of container log for cid=%s"
% six.text_type(self.cid),
)
break
except ProtocolError as e:
if run_state.is_running():
global_log.warning(
"Stream closed due to protocol error: %s" % six.text_type(e)
)
if run_state.is_running():
global_log.warning(
"Log stream has been closed for '%s'. Check docker.log on the host for possible errors. Attempting to reconnect, some logs may be lost"
% (self.name),
limit_once_per_x_secs=300,
limit_key="stream-closed-%s" % self.name,
)
delay = random.randint(500, 3000) / 1000
run_state.sleep_but_awaken_if_stopped(delay)
# we are shutting down, so update our last request to be slightly later than it's current
# value to prevent duplicate logs when starting up again.
self.__last_request_lock.acquire()
# can't be any smaller than 0.01 because the time value is only saved to 2 decimal places
# on disk
self.__last_request += 0.01
self.__last_request_lock.release()
except docker.errors.NotFound as e:
# This simply represents the container has been stopped / killed before the client has
# been able to cleanly close the connection. This error is non-fatal and simply means we
# will clean up / remove the log on next iteration.
global_log.info(
'Container with id "%s" and name "%s" has been removed or deleted. Log file '
"will be removed on next loop iteration. Original error: %s."
% (self.cid, self.name, str(e))
)
except Exception as e:
# Those errors are not fatal so we simply ignore dont dont log them under warning.
# They usually appear on agent restart when using log consumption via API since
# long running streaming API connection will be closed.
if "readinto of closed file" in str(e) or "operation on closed file" in str(
e
):
global_log.log(
scalyr_logging.DEBUG_LEVEL_1,
"Unhandled non-fatal exception in DockerLogger.process_request for %s:\n\t%s.\n\n%s"
% (self.name, six.text_type(e), traceback.format_exc()),
)
return
global_log.warn(
"Unhandled exception in DockerLogger.process_request for %s:\n\t%s.\n\n%s"
% (self.name, six.text_type(e), traceback.format_exc())
)
class ContainerIdResolver:
"""Abstraction that can be used to look up Docker container names based on their id.
This has a caching layer built in to minimize lookups to actual Docker and make this as efficient as possible.
This abstraction is thread-safe.
"""
def __init__(
self,
docker_api_socket,
docker_api_version,
logger,
cache_expiration_secs=300,
cache_clean_secs=5,
):
"""
Initializes one instance.
@param docker_api_socket: The path to the UNIX socket exporting the Docker API by the daemon.
@param docker_api_version: The API version to use, typically 'auto'.
@param cache_expiration_secs: The number of seconds to cache a mapping from container id to container name. If
the mapping is not used for this number of seconds, the mapping will be evicted. (The actual eviction
is performed lazily).
@param cache_clean_secs: The number of seconds between sweeps to clean the cache.
@param logger: The logger to use. This MUST be supplied.
@type docker_api_socket: six.text_type
@type docker_api_version: six.text_type
@type cache_expiration_secs: double
@type cache_clean_secs: double
@type logger: Logger
"""
# Guards all variables except for __logger and __docker_client.
self.__lock = threading.Lock()
self.__cache = dict()
# The walltime of when the cache was last cleaned.
self.__last_cache_clean = time.time()
self.__cache_expiration_secs = cache_expiration_secs
self.__cache_clean_secs = cache_clean_secs
self.__docker_client = docker.APIClient( # pylint: disable=no-member
base_url=("unix:/%s" % docker_api_socket), version=docker_api_version
)
# The set of container ids that have not been used since the last cleaning. These are eviction candidates.
self.__untouched_ids = dict()
self.__logger = logger
def lookup(self, container_id):
"""Looks up the container name for the specified container id.
This does check the local cache first.
@param container_id: The container id
@type container_id: str
@return: A tuple with the first element containing the container name or None if the container id could not be resolved, or if there was an error
accessing Docker, and the second element containing a dict of labels associated with the container, or None if the container id could not bei
resolved, or if there was an error accessing Docker.
@rtype: (str, dict) or (None, None)
"""
try:
# self.__logger.log(scalyr_logging.DEBUG_LEVEL_3, 'Looking up cid="%s"', container_id)
current_time = time.time()
self.__lock.acquire()
try:
self._clean_cache_if_necessary(current_time)
# Check cache first and mark if it as recently used if found.
if container_id in self.__cache:
entry = self.__cache[container_id]
self._touch(entry, current_time)
# self.__logger.log(scalyr_logging.DEBUG_LEVEL_3, 'Cache hit for cid="%s" -> "%s"', container_id,
# entry.container_name)
return (entry.container_name, entry.labels)
finally:
self.__lock.release()
(container_name, labels) = self._fetch_id_from_docker(
container_id, get_labels=True
)
if container_name is not None:
# self.__logger.log(scalyr_logging.DEBUG_LEVEL_1, 'Docker resolved id for cid="%s" -> "%s"', container_id,
# container_name)
self._insert_entry(container_id, container_name, labels, current_time)
return (container_name, labels)
# self.__logger.log(scalyr_logging.DEBUG_LEVEL_3, 'Docker could not resolve id="%s"', container_id)
except Exception:
self.__logger.error(
'Error seen while attempting resolving docker cid="%s"', container_id
)
return (None, None)
def _clean_cache_if_necessary(self, current_time):
"""Cleans the cache if it has been too long since the last cleaning.
You must be holding self.__lock.
@param current_time: The current walltime.
@type current_time: double
"""
if self.__last_cache_clean + self.__cache_clean_secs > current_time:
return
# self.__logger.log(scalyr_logging.DEBUG_LEVEL_2, 'Cleaning cid cache. Before clean=%d:%d', len(self.__cache),
# len(self.__untouched_ids))
self.__last_cache_clean = current_time
# The last access time that will trigger expiration.
expire_threshold = current_time - self.__cache_expiration_secs
# For efficiency, just examine the ids that haven't been used since the last cleaning.
for key in self.__untouched_ids:
if self.__cache[key].last_access_time < expire_threshold:
del self.__cache[key]
# Reset the untouched_ids to contain all of the ids.
self.__untouched_ids = dict()
for key in self.__cache:
self.__untouched_ids[key] = True
# self.__logger.log(scalyr_logging.DEBUG_LEVEL_2, 'After clean=%d:%d', len(self.__cache),
# len(self.__untouched_ids))
def _touch(self, cache_entry, last_access_time):
"""Mark the specified cache entry as being recently used.
@param cache_entry: The entry
@param last_access_time: The time it was accessed.
@type cache_entry: ContainerIdResolver.Entry
@type last_access_time: double
"""
cid = cache_entry.container_id
if cid in self.__untouched_ids:
del self.__untouched_ids[cid]
cache_entry.touch(last_access_time)
def _fetch_id_from_docker(self, container_id, get_labels):
"""Fetch the container name for the specified container using the Docker API.
@param container_id: The id of the container.
@param get_labels: Whether to gather label information for the container
@type container_id: str
@type get_labels: bool
@return: A tuple containing the container name and labels, or (None, None) if it was either not found or if there was an error.
if `get_labels` is False then the returned labels will always be an empty dict
@rtype: (str, dict) or (None, None)
"""
matches = _get_containers(
self.__docker_client,
restrict_to_container=container_id,
logger=self.__logger,
only_running_containers=False,
get_labels=get_labels,
)
if len(matches) == 0:
# self.__logger.log(scalyr_logging.DEBUG_LEVEL_3, 'No matches found in docker for cid="%s"', container_id)
return (None, None)
if len(matches) > 1:
self.__logger.warning(
"Container id matches %d containers for id='%s'."
% (len(matches), container_id),
limit_once_per_x_secs=300,
limit_key="docker_container_id_more_than_one",
)
return (None, None)
# Note, the cid used as the key for the returned matches is the long container id, not the short one that
# we were passed in as `container_id`.
match = matches[list(matches.keys())[0]]
labels = match.get("labels", {})
if labels is None:
labels = {}
return (match["name"], labels)
def _insert_entry(self, container_id, container_name, labels, last_access_time):
"""Inserts a new cache entry mapping the specified id to the container name.
@param container_id: The id of the container.
@param container_name: The name of the container.
@param labels: The labels associated with this container.
@param last_access_time: The time it this entry was last used.
@type container_id: str
@type container_name: str
@type last_access_time: double
"""
self.__lock.acquire()
try:
# ensure that labels is never None
if labels is None:
labels = {}
self.__cache[container_id] = ContainerIdResolver.Entry(
container_id, container_name, labels, last_access_time
)
finally:
self.__lock.release()
class Entry:
"""Helper abstraction representing a single cache entry mapping a container id to its name."""
def __init__(self, container_id, container_name, labels, last_access_time):
"""
@param container_id: The id of the container.
@param container_name: The name of the container.
@param labels: The labels associated with this container.
@param last_access_time: The time the entry was last used.
@type container_id: str
@type container_name: str
@type last_access_time: double
"""
self.__container_id = container_id
self.__container_name = container_name
self.__labels = labels
self.__last_access_time = last_access_time
@property
def container_id(self):
"""
@return: The id of the container.
@rtype: str
"""
return self.__container_id
@property
def container_name(self):
"""
@return: The name of the container.
@rtype: str
"""
return self.__container_name
@property
def labels(self):
"""
@return: The labels associated with this container.
@rtype: str
"""
return self.__labels
@property
def last_access_time(self):
"""
@return: The last time this entry was used, in seconds past epoch.
@rtype: double
"""
return self.__last_access_time
def touch(self, access_time):
"""Updates the last access time for this entry.
@param access_time: The time of the access.
@type access_time: double
"""
self.__last_access_time = access_time
class DockerOptions(object):
"""
A class representing label configuration options from the docker monitor
"""
def __init__(
self,
labels_as_attributes=False,
label_prefix="",
label_include_globs=None,
label_exclude_globs=None,
use_labels_for_log_config=True,
):
"""
@param labels_as_attributes: If True any labels that are not excluded will be added to the attributes result dict
@param label_prefix: A prefix to add to the key of any labels added to the attributes result dict
@param label_include_globs: Any label that matches any glob in this list will be included
will be included in the attributes result dict as long as it isn't filtered out by `label_exclude_globs`.
@param label_exclude_globs: Any label that matches any glob in this list will be excluded
from the attributes result dict. This is applied to the labels *after* `label_include_globs`
@param use_labels_for_log_config: If True any label that begins with com.scalyr.config.log will be converted
to a dict, based on the rules for processing k8s annotations
@type labels_as_attributes: bool
@type label_prefix: str
@type label_include_globs: list[str]
@type label_exclude_globs: list[str]
@type use_labels_for_log_config: bool
"""
if label_include_globs is None:
label_include_globs = ["*"]
if label_exclude_globs is None:
label_exclude_globs = ["com.scalyr.config.*"]
self.label_exclude_globs = label_exclude_globs
self.label_include_globs = label_include_globs
self.use_labels_for_log_config = use_labels_for_log_config
self.label_prefix = label_prefix
self.labels_as_attributes = labels_as_attributes
def __str__(self):
"""
String representation of a DockerOption
"""
return (
"\n\tLabels as Attributes:%s\n\tLabel Prefix: '%s'\n\tLabel Include Globs: %s\n\tLabel Exclude Globs: %s\n\tUse Labels for Log Config: %s"
% (
six.text_type(self.labels_as_attributes),
self.label_prefix,
six.text_type(self.label_include_globs),
six.text_type(self.label_exclude_globs),
six.text_type(self.use_labels_for_log_config),
)
)
def configure_from_monitor(self, monitor):
"""
Configures the options based on the values from the docker monitor
@param monitor: a docker monitor that can be used to configure the options
@type monitor: DockerMonitor
"""
# get a local copy of the default docker config options
label_exclude_globs = self.label_exclude_globs
label_include_globs = self.label_include_globs
use_labels_for_log_config = self.use_labels_for_log_config
label_prefix = self.label_prefix
labels_as_attributes = self.labels_as_attributes
try:
# set values from the docker monitor
self.label_exclude_globs = monitor.label_exclude_globs
self.label_include_globs = monitor.label_include_globs
self.use_labels_for_log_config = monitor.use_labels_for_log_config
self.label_prefix = monitor.label_prefix
self.labels_as_attributes = monitor.labels_as_attributes
except Exception as e:
# Configuration from monitor failes (values not available on the monitor),
# fall back to the default configuration
global_log.warning(
"Error getting docker config from docker monitor - %s. Using defaults"
% six.text_type(e)
)
self.label_exclude_globs = label_exclude_globs
self.label_include_globs = label_include_globs
self.use_labels_for_log_config = use_labels_for_log_config
self.label_prefix = label_prefix
self.labels_as_attributes = labels_as_attributes
class DockerMonitor(ScalyrMonitor): # pylint: disable=monitor-not-included-for-win32
# fmt: off
"""
# Docker Monitor
This plugin uses the Docker API to detect all containers running on the local host, retrieves metrics for each of
them, and logs them to the Scalyr servers.
It can also collect all log messages written to stdout and stderr by those containers, in conjunction with syslog.
See the online documentation for more details.
## Docker Labels
You can configure the Scalyr Agent to upload customer attributes for your containers based on the labels you set on the container itself. This can be used to easily set the parser that should be used to parse the container's log, as well as adding in arbitrary labels on the container's log.
To use this functionality, you must properly configure the agent by setting the `labels_as_attributes` configuration option to `true`. All of the docker monitor configuration options related to this feature are as follows:`
* **labels\\_as\\_attributes** - When `true` upload labels that pass the include/exclude filters (see below) as log attributes for the logs generated by the container. Defaults to `false`
* **label\\_include\\_globs** - A list of [glob strings](https://docs.python.org/2/library/fnmatch.html) used to include labels to be uploaded as log attributes. Any label that matches any glob in this list will be included as an attribute, as long as it not excluded by `label_exclude_globs`. Defaults to `[ '*' ]` (everything)
* **label\\_exclude\\_globs** - A list of glob strings used to exclude labels from being uploaded as log attributes. Any label that matches any glob on this list will be excluded as an attribute. Exclusion rules are applied *after* inclusion rules. Defaults to `[ 'com.scalyr.config.*' ]`
* **label_prefix** - A string to add to the beginning of any label key before uploading it as a log attribute. e.g. if the value for `label_prefix` is `docker_` then the container labels `app` and `tier` will be uploaded to Scalyr with attribute keys `docker_app` and `docker_tier`. Defaults to ''
You can change these config options by editing the `agent.d/docker.json` file. Please [follow the instructions here](https://www.scalyr.com/help/install-agent-docker#modify-config) to export the configuration of your running Scalyr Agent.
A sample configuration that uploaded the attributes `tier`, `app` and `region`, with the prefix `dl_` would look like this:
```
monitors: [
{
"module": "scalyr_agent.builtin_monitors.docker_monitor",
...
labels_as_attributes: true,
label_include_globs: ['tier', 'app', 'region' ],
label_prefix: 'dl_'
}
]
```
Note: Log attributes contribute towards your total log volume, so it is wise to limit the labels to a small set of approved values, to avoid paying for attributes that you don't need.
## Using Docker Labels for Configuration
You can also use docker labels to configure the log settings for a specific container, such as setting the parser or setting redaction rules.
The agent takes any label on a container that begins with `com.scalyr.config.log.` and maps it to the corresponding option in the `log_config` stanza for that container's logs (minus the prefix).
For example, if you add the following label to your container:
```
com.scalyr.config.log.parser=accessLog
```
The Scalyr Agent will automatically use the following for that containers's `log_config` stanza:
```
{ "parser": "accessLog" }
```
This feature is enabled by default, and by default any configuration labels are ignored by the `labels_as_attributes` option. To turn off this feature entirely, you can set the `use_labels_for_log_config` option to `false` in the docker monitor configuration, and the agent will not process container labels for configuration options.
The following fields can be configured via container labels and behave as described in the [Scalyr help docs](https://www.scalyr.com/help/scalyr-agent#logUpload):
* parser
* attributes
* sampling_rules
* rename_logfile
* redaction_rules
Note: keys for docker labels cannot include underscores, so for all options that have an underscore in their name, replace it with a hyphen, and the Scalyr agent will map this to the appropriate option name. e.g. the labels:
```
com.scalyr.config.log.rename-logfile
```
Would be mapped to `rename_logfile`
### Mapping Configuration Options
The rules for mapping labels to objects and arrays are as follows:
Values separated by a period are mapped to object keys e.g. if a label on a given container was specified as:
```
com.scalyr.config.log.attributes.tier=prod
```
Then this would be mapped to the following object, which would then be applied to the log config for that container:
```
{ "attributes": { "tier": "prod" } }
```
Arrays can be specified by using one or more digits as the key, e.g. if the labels were
```
com.scalyr.config.log.sampling-rules.0.match-expression=INFO
com.scalyr.config.log.sampling-rules.0.sampling-rate=0.1
com.scalyr.config.log.sampling-rules.1.match-expression=FINE
com.scalyr.config.log.sampling-rules.1.sampling-rate=0
```
This will be mapped to the following structure:
```
{ "sampling_rules":
[
{ "match_expression": "INFO", "sampling_rate": 0.1 },
{ "match_expression": "FINE", "sampling_rate": 0 }
]
}
```
Note: The Scalyr agent will automatically convert hyphens in the docker label keys to underscores.
Array keys are sorted by numeric order before processing and unique objects need to have different digits as the array key. If a sub-key has an identical array key as a previously seen sub-key, then the previous value of the sub-key is overwritten
There is no guarantee about the order of processing for items with the same numeric array key, so if the config was specified as:
```
com.scalyr.config.log.sampling_rules.0.match_expression=INFO
com.scalyr.config.log.sampling_rules.0.match_expression=FINE
```
It is not defined or guaranteed what the actual value will be (INFO or FINE).
## Syslog Monitor
If you wish to use labels and label configuration when using the syslog monitor to upload docker logs, you must still specify a docker monitor in your agent config, and set the docker label options in the docker monitor configuration. These will then be used by the syslog monitor.
TODO: Back fill the instructions here.
"""
# fmt: on
def __get_socket_file(self):
"""Gets the Docker API socket file and validates that it is a UNIX socket"""
# make sure the API socket exists and is a valid socket
api_socket = self._config.get("api_socket")
try:
st = os.stat(api_socket)
if not stat.S_ISSOCK(st.st_mode):
raise Exception()
except Exception:
raise Exception(
"The file '%s' specified by the 'api_socket' configuration option does not exist or is not a socket.\n\tPlease make sure you have mapped the docker socket from the host to this container using the -v parameter.\n\tNote: Due to problems Docker has mapping symbolic links, you should specify the final file and not a path that contains a symbolic link, e.g. map /run/docker.sock rather than /var/run/docker.sock as on many unices /var/run is a symbolic link to the /run directory."
% api_socket
)
return api_socket
def _initialize(self):
data_path = ""
log_path = ""
host_hostname = ""
if self._global_config:
data_path = self._global_config.agent_data_path
log_path = self._global_config.agent_log_path
if self._global_config.server_attributes:
if "serverHost" in self._global_config.server_attributes:
host_hostname = self._global_config.server_attributes["serverHost"]
else:
self._logger.info("no server host in server attributes")
else:
self._logger.info("no server attributes in global config")
self.__socket_file = self.__get_socket_file()
self.__docker_api_version = self._config.get("docker_api_version")
self.__client = DockerClient(
base_url=("unix:/%s" % self.__socket_file),
version=self.__docker_api_version,
)
self.__glob_list = {
"include": self._config.get("container_globs"),
"exclude": self._config.get("container_globs_exclude"),
}
self.__report_container_metrics = self._config.get("report_container_metrics")
self.__percpu_metrics = self._config.get("docker_percpu_metrics")
metrics_only = self._config.get("metrics_only")
self.label_exclude_globs = self._config.get("label_exclude_globs")
self.label_include_globs = self._config.get("label_include_globs")
if not scalyr_util.is_list_of_strings(self.label_include_globs):
raise BadMonitorConfiguration(
"label_include_globs contains a non-string value: %s"
% six.text_type(self.label_include_globs),
"label_include_globs",
)
if not scalyr_util.is_list_of_strings(self.label_exclude_globs):
raise BadMonitorConfiguration(
"label_exclude_globs contains a non-string value: %s"
% six.text_type(self.label_exclude_globs),
"label_exclude_globs",
)
self.use_labels_for_log_config = self._config.get("use_labels_for_log_config")
self.label_prefix = self._config.get("label_prefix")
self.labels_as_attributes = self._config.get("labels_as_attributes")
# always force reporting of container metrics if metrics_only is True
if metrics_only:
self.__report_container_metrics = True
self.__container_checker = None
if not metrics_only and self._config.get("log_mode") != "syslog":
self.__container_checker = ContainerChecker(
self._config,
self._logger,
self.__socket_file,
self.__docker_api_version,
host_hostname,
data_path,
log_path,
)
self.__network_metrics = self.__build_metric_dict(
"docker.net.",
[
"rx_bytes",
"rx_dropped",
"rx_errors",
"rx_packets",
"tx_bytes",
"tx_dropped",
"tx_errors",
"tx_packets",
],
)
self.__mem_stat_metrics = self.__build_metric_dict(
"docker.mem.stat.",
[
"total_pgmajfault",
"cache",
"mapped_file",
"total_inactive_file",
"pgpgout",
"rss",
"total_mapped_file",
"writeback",
"unevictable",
"pgpgin",
"total_unevictable",
"pgmajfault",
"total_rss",
"total_rss_huge",
"total_writeback",
"total_inactive_anon",
"rss_huge",
"hierarchical_memory_limit",
"total_pgfault",
"total_active_file",
"active_anon",
"total_active_anon",
"total_pgpgout",
"total_cache",
"inactive_anon",
"active_file",
"pgfault",
"inactive_file",
"total_pgpgin",
],
)
self.__mem_metrics = self.__build_metric_dict(
"docker.mem.", ["max_usage", "usage", "fail_cnt", "limit"]
)
self.__cpu_usage_metrics = self.__build_metric_dict(
"docker.cpu.", ["usage_in_usermode", "total_usage", "usage_in_kernelmode"]
)
self.__cpu_throttling_metrics = self.__build_metric_dict(
"docker.cpu.throttling.", ["periods", "throttled_periods", "throttled_time"]
)
self.__version = None
self.__version_lock = threading.RLock()
def set_log_watcher(self, log_watcher):
"""Provides a log_watcher object that monitors can use to add/remove log files"""
if self.__container_checker:
self.__container_checker.set_log_watcher(log_watcher, self)
def __build_metric_dict(self, prefix, names):
result = {}
for name in names:
result["%s%s" % (prefix, name)] = name
return result
def __log_metrics(self, container, metrics_to_emit, metrics, extra=None):
if metrics is None:
return
for key, value in six.iteritems(metrics_to_emit):
if value in metrics:
# Note, we do a bit of a hack to pretend the monitor's name include the container's name. We take this
# approach because the Scalyr servers already have some special logic to collect monitor names and ids
# to help auto generate dashboards. So, we want a monitor name like `docker_monitor(foo_container)`
# for each running container.
self._logger.emit_value(
key, metrics[value], extra, monitor_id_override=container
)
def __log_network_interface_metrics(self, container, metrics, interface=None):
extra = {}
if interface:
extra["interface"] = interface
self.__log_metrics(container, self.__network_metrics, metrics, extra)
def __log_memory_stats_metrics(self, container, metrics):
if "stats" in metrics:
self.__log_metrics(container, self.__mem_stat_metrics, metrics["stats"])
self.__log_metrics(container, self.__mem_metrics, metrics)
def __log_cpu_stats_metrics(self, container, metrics):
if "cpu_usage" in metrics:
cpu_usage = metrics["cpu_usage"]
if self.__percpu_metrics and "percpu_usage" in cpu_usage:
percpu = cpu_usage["percpu_usage"]
count = 1
if percpu:
for usage in percpu:
extra = {"cpu": count}
self._logger.emit_value(
"docker.cpu.usage",
usage,
extra,
monitor_id_override=container,
)
count += 1
self.__log_metrics(container, self.__cpu_usage_metrics, cpu_usage)
if "system_cpu_usage" in metrics:
self._logger.emit_value(
"docker.cpu.system_cpu_usage",
metrics["system_cpu_usage"],
monitor_id_override=container,
)
if "throttling_data" in metrics:
self.__log_metrics(
container, self.__cpu_throttling_metrics, metrics["throttling_data"]
)
def __log_json_metrics(self, container, metrics):
for key, value in six.iteritems(metrics):
if value is None:
continue
if key == "networks":
for interface, network_metrics in six.iteritems(value):
self.__log_network_interface_metrics(
container, network_metrics, interface
)
elif key == "network":
self.__log_network_interface_metrics(container, value)
elif key == "memory_stats":
self.__log_memory_stats_metrics(container, value)
elif key == "cpu_stats":
self.__log_cpu_stats_metrics(container, value)
def __gather_metrics_from_api_for_container(self, container):
try:
self._logger.log(
scalyr_logging.DEBUG_LEVEL_3,
"Attempting to retrieve metrics for cid=%s" % container,
)
result = self.__client.stats(container=container, stream=False)
if result is not None:
self.__log_json_metrics(container, result)
except Exception as e:
self._logger.warning(
"Error readings stats for '%s': %s\n%s"
% (container, six.text_type(e), traceback.format_exc()),
limit_once_per_x_secs=300,
limit_key="api-stats-%s" % container,
)
def __gather_metrics_from_api(self, containers):
for cid, info in six.iteritems(containers):
self.__gather_metrics_from_api_for_container(info["name"])
def get_user_agent_fragment(self):
"""This method is periodically invoked by a separate (MonitorsManager) thread and must be thread safe."""
def _version_to_fragment(ver):
# Helper function to transform docker version to user-agent fragment
if not ver:
# version not yet set: return 'docker=yes' to signify docker
return "docker=true"
log_mode = self._config.get("log_mode")
if log_mode == "syslog":
extra = ""
else:
extra = "|raw" if self._config.get("docker_raw_logs") else "|api"
return "docker=%s|%s%s" % (ver, log_mode, extra)
self.__version_lock.acquire()
try:
return _version_to_fragment(self.__version)
finally:
self.__version_lock.release()
def _fetch_and_set_version(self):
"""Fetch and record the docker system version via the docker API"""
ver = None
try:
ver = self.__client.version().get("Version")
except Exception:
self._logger.exception("Could not determine Docker system version")
if not ver:
return
self.__version_lock.acquire()
try:
self.__version = ver
finally:
self.__version_lock.release()
def gather_sample(self):
"""Besides gathering data, this main-loop method also queries the docker API for version number.
Due to potential race condition between the MonitorsManager and this manager at start up, we set the version
in lazy fashion as follows: On each gather_sample() the DockerMonitor thread queries the docker API for
the version until the first success (In most cases, this means a single query shortly after start up). Once
set, it will never be queried again as the docker system version cannot change without necessitating an agent
restart.
"""
if not self.__version:
self._fetch_and_set_version()
# gather metrics
if self.__report_container_metrics:
containers = _get_containers(
self.__client, ignore_container=None, glob_list=self.__glob_list
)
self._logger.log(
scalyr_logging.DEBUG_LEVEL_3,
"Attempting to retrieve metrics for %d containers" % len(containers),
)
self.__gather_metrics_from_api(containers)
def run(self):
# workaround a multithread initialization problem with time.strptime
# see: http://code-trick.com/python-bug-attribute-error-_strptime/
# we can ignore the result
time.strptime("2016-08-29", "%Y-%m-%d")
if self.__container_checker:
self.__container_checker.start()
ScalyrMonitor.run(self)
def stop(self, wait_on_join=True, join_timeout=5):
# stop the main server
ScalyrMonitor.stop(self, wait_on_join=wait_on_join, join_timeout=join_timeout)
if self.__container_checker:
self.__container_checker.stop(wait_on_join, join_timeout)
|
# Generated by Django 2.2 on 2019-05-04 09:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ordering', '0003_auto_20190504_1351'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Ingre_name', models.TextField(max_length=50)),
('is_empty', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Menu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('menu_name', models.TextField(max_length=100)),
('is_daily_menu', models.BooleanField(default=False)),
('description', models.TextField(max_length=200, null=True)),
('normal_price', models.FloatField()),
('special_price', models.FloatField()),
('menu_type', models.CharField(choices=[('01', 'อาหาร'), ('02', 'เครื่องดื่ม')], default='01', max_length=2)),
('menu_image', models.FileField(blank=True, null=True, upload_to='')),
],
),
migrations.AddField(
model_name='shop',
name='close_time',
field=models.TimeField(null=True),
),
migrations.AddField(
model_name='shop',
name='contact1',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='shop',
name='contact2',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='shop',
name='open_time',
field=models.TimeField(null=True),
),
migrations.AddField(
model_name='shop',
name='phone_number',
field=models.CharField(max_length=10, null=True),
),
migrations.AlterField(
model_name='shop',
name='shop_name',
field=models.TextField(max_length=100, null=True),
),
]
|
#!/usr/bin/env python
#coding: utf-8
import json
import logging_debug
from models import execute_sql
from models import select_all_result
from models import select_one_result
import logging
import hashlib
rememberme='./rememberme'
logging.basicConfig(
filename = './logs',
filemode = 'a',
format = '[%(asctime)s] - [%(threadName)5s] -[%(filename)s-line:%(lineno)d] [%(levelname)s] %(message)s',
level = logging.DEBUG,
datefmt='%m/%d/%y %I:%M:%S %p'
)
#def rememberMe(saveuser):
# users = []
# users.append(saveuser)
# with open(rememberme, 'w') as fd:
# fd.write(json.dumps(users))
#
#
#def getRememberMe():
# try:
# with open(rememberme,'r') as fd:
# users = json.loads(fd.read())
# except Exception as e:
# users= []
# return users
def register(data):
md5passwd = encrption(data['password'])
sql = '''INSERT INTO users(username, email, password) values('%s', '%s', '%s')''' % (data['username'], data['email'], md5passwd)
logging.debug(sql)
return execute_sql(sql)
def get_users():
sql = '''SELECT * FROM users '''
return select_all_result(sql)
def login_check(name,password):
sql = '''SELECT * FROM users WHERE username = '%s' and password = '%s';''' % (name,password)
logging.debug(sql)
return select_one_result(sql)
def get_user(name):
sql = '''SELECT * FROM users WHERE username = '%s';''' % name
return select_one_result(sql)
def get_email(email):
sql = '''SELECT * FROM users WHERE email = '%s';''' % email
return select_one_result(sql)
def get_user_id(uid):
sql = '''SELECT * FROM users WHERE id = '%d';''' % uid
return select_one_result(sql)
def userDel(uid):
sql = '''DELETE FROM users WHERE id = %s; ''' % uid
logging.debug(sql)
return execute_sql(sql)
def userUpdate(data):
sql = '''UPDATE users set username='%s',password='%s',email='%s' WHERE id = %d;''' % (data['username'],data['password'],data['email'],data['id'])
logging.debug(sql)
return execute_sql(sql)
def search(name):
sql = '''SELECT * FROM users WHERE username like '%s\%' or email like '%s\%';''' % (name,name)
print sql
return select_one_result(sql)
def encrption(dstr):
md5sum = hashlib.md5(dstr)
return md5sum.hexdigest()
def authentication(username,password):
md5passwd=encrption(password)
print md5passwd
return login_check(username, md5passwd)
def cleanup():
close_db()
|
# -*- coding: utf-8 -*-
focus = ['yura', 'kolia', 'vasia']
r = []
def show_magicians(focus):
for i in focus:
print(i)
def make_great (focus, r):
for n in focus:
he = n + 'Great'
r.append(he)
print(r)
f2 = ['sdf', 'sdfsdsdf']
gg = []
make_great(f2, gg)
show_magicians(f2) # список не изменился
show_magicians(gg[:]) # к списку добавили дополнительную функцию
#функция добавь в список
ab = []
def kk (tt, ab):
#ab.append(tt)
for uu in tt:
ab.append(uu)
ff = ['abra', 'sdf',]
kk(ff, ab) # cоздал функцию которая добовляет в список
print(ab)
ddd = {}
ddd['1'] = 'yura' # проверка над оболение словаря
print(ddd)
|
# Generated by Django 2.2.4 on 2019-11-04 08:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0012_student_lock'),
('application', '0012_auto_20191103_0407'),
]
operations = [
migrations.RemoveField(
model_name='project',
name='members',
),
migrations.AddField(
model_name='project',
name='member1',
field=models.OneToOneField(default=None, on_delete=django.db.models.deletion.DO_NOTHING, related_name='member1', to='accounts.Student'),
),
migrations.AddField(
model_name='project',
name='member2',
field=models.OneToOneField(blank=True, default=None, on_delete=django.db.models.deletion.DO_NOTHING, related_name='member2', to='accounts.Student'),
),
migrations.AddField(
model_name='project',
name='member3',
field=models.OneToOneField(blank=True, default=None, on_delete=django.db.models.deletion.DO_NOTHING, related_name='member3', to='accounts.Student'),
),
migrations.AddField(
model_name='project',
name='member4',
field=models.OneToOneField(blank=True, default=None, on_delete=django.db.models.deletion.DO_NOTHING, related_name='member4', to='accounts.Student'),
),
]
|
from collections import namedtuple
import json
class Config(object):
def __init__(self, influx_host, influx_port, influx_username, influx_password, influx_database):
self.influx_host = influx_host
self.influx_port = influx_port
self.influx_username = influx_username
self.influx_password = influx_password
self.influx_database = influx_database |
from flask import Flask, escape, request, jsonify
from flask_restful import Api
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
api = Api(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://tmp_hello:ceshiren.com@182.92.129.158/tmp123?charset=utf8mb4'
db = SQLAlchemy(app)
@app.route('/users')
def hello():
"""
返回类型处理:
Response 对象:直接返回。
string :自动创建 Response 对象, 返回该对象,其状态码是 200 OK,类型是text/html。
dict:使用 jsonify 改造并返回。
tuple:(response, status), (response, headers) 或 (response, status, headers)。status 覆盖状态码,headers 是列表或字典,用于填充 headers 内容。
符合 WSGI 接口的函数,自动转换成 response 对象
:return:
"""
name = request.args.get("name", "World3344")
return f'Hello, {escape(name)}!'
# return ("tmp123", 404)
# return "OK"
# return jsonify([1, 2, 3])
# return [1, 2, 3]
@app.route('/post/<int:post_id>')
def show_post(post_id):
# show the post with the given id, the id is an integer
return 'Post %d' % post_id
@app.route('/projects/')
def projects():
return 'The projects page'
@app.route('/about')
def about():
return 'The about page'
@app.route('/abc/123/<int:tmp>', methods=['get', 'post'])
def way(tmp):
print(tmp)
return 'The about page33'
if __name__ == "__main__":
app.run(debug=True)
|
# Day 16 code 1
# Convert a list of Tuples into Dictionary
list_1=[("Vijay kumar",93), ("Vikram kumar",94), ("Vivek dutta",95),
("Vivek kumar",96), ("Vivek choudhary",97)]
dict_1=dict()
for student,score in list_1:
dict_1.setdefault(student, []).append(score)
print(dict_1) |
def gcd(a,b):
for i in range(1,min(a, b)+1):
if (a % i == 0) and (b % i == 0):
x = i
return x |
from scrapy.spider import BaseSpider
class CulinaryFruitsSpider(BaseSpider):
name = 'culinary_fruits'
domain_name = 'wikipedia.org'
start_urls = ['http://en.wikipedia.org/wiki/List_of_culinary_fruits',
'http://en.wikipedia.org/wiki/List_of_edible_seeds',
'http://en.wikipedia.org/wiki/List_of_pasta',
'http://en.wikipedia.org/wiki/List_of_breads',
'http://en.wikipedia.org/wiki/List_of_breads',
'http://en.wikipedia.org/wiki/List_of_breads',
'http://en.wikipedia.org/wiki/List_of_meat_animals',
]
def parse(self, response):
print "test1"
SPIDER=CulinaryFruitsSpider()
|
"""
.. module: lemur
:platform: Unix
:copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <kglisson@netflix.com>
"""
from lemur import factory
from lemur.users.views import mod as users_bp
from lemur.roles.views import mod as roles_bp
from lemur.auth.views import mod as auth_bp
from lemur.domains.views import mod as domains_bp
from lemur.destinations.views import mod as destinations_bp
from lemur.authorities.views import mod as authorities_bp
from lemur.certificates.views import mod as certificates_bp
from lemur.defaults.views import mod as defaults_bp
from lemur.plugins.views import mod as plugins_bp
from lemur.notifications.views import mod as notifications_bp
from lemur.sources.views import mod as sources_bp
LEMUR_BLUEPRINTS = (
users_bp,
roles_bp,
auth_bp,
domains_bp,
destinations_bp,
authorities_bp,
certificates_bp,
defaults_bp,
plugins_bp,
notifications_bp,
sources_bp
)
def create_app(config=None):
app = factory.create_app(app_name=__name__, blueprints=LEMUR_BLUEPRINTS, config=config)
configure_hook(app)
return app
def configure_hook(app):
"""
:param app:
:return:
"""
from flask.ext.principal import PermissionDenied
from lemur.decorators import crossdomain
if app.config.get('CORS'):
@app.after_request
@crossdomain(origin=u"http://localhost:3000", methods=['PUT', 'HEAD', 'GET', 'POST', 'OPTIONS', 'DELETE'])
def after(response):
return response
@app.errorhandler(PermissionDenied)
def handle_invalid_usage(error):
response = {'message': 'You are not allow to access this resource'}
response.status_code = 403
return response
|
'''
Created on 29.10.2018
@author: Henry Fock, Lia Kirsch
'''
import csv
import os
import sys
import sqlite3
import tempfile
import getBoundingBox
# add local modules folder
# file_path = os.path.join('..', 'Python_Modules')
# sys.path.append(file_path)
from osgeo import ogr, osr
import click
import pandas as pd
import pygeoj
import shapefile
import ogr2ogr
# asking for parameters in command line
@click.command()
@click.option('--path', prompt="File path", help='Path to file')
@click.option('--name', prompt="File name", help="File name with extension")
@click.option('--clear', '-c', default=False, is_flag=True, help='Clear screen before showing results')
def main(path, name, clear):
res = getPolygon(name, path)
if clear:
click.clear()
if res[0] is not None:
click.echo(res[0])
else:
click.echo(res[1])
def getPolygon(name, path):
"""returns the Convex Hull of supported Datatypes and standards in WGS84.
supported data: Shapefile (.shp), GeoJson (.json/.geojson), GeoTIFF (.tif), netCDF (.nc), GeoPackage (.gpkg), all ISO19xxx standardised formats and CSV on the web
@param path Path to the file
@param name name of the file with extension
@returns a Convex Hull in first place of a tuple as Array. Points in WGS84 ([(long, lat), (long, lat), ...], None)
"""
# connect name and path to file
filepath = os.path.join(path, name)
# get file extension
filename, file_extension = os.path.splitext(filepath)
#################################################################
def shapefileCase(filepath):
"""Method for extracting the convex hull of a shapefile
@param filepath Full path to shapefile
@returns a tuple where in first place is the convex hull as an array of point tuples
"""
try:
dataset = ogr.Open(filepath)
layer = dataset.GetLayer()
crs = layer.GetSpatialRef()
if crs.IsProjected() == 1:
crs = int(crs.GetAttrValue("PROJCS|AUTHORITY", 1))
elif crs.IsGeographic() == 1:
crs = int(crs.GetAttrValue("GEOGCS|AUTHORITY", 1))
else:
return (None, "CRS is missing!")
myshp = open(filepath, "rb")
sf = shapefile.Reader(shp=myshp)
shapes = sf.shapes()
pointList = []
for shape in shapes:
for points in shape.points:
pointList.append(
tuple(CRSTransform(points[1], points[0], crs)))
# error
except:
return (None, "File Error!")
else: # if no error accured
return (convex_hull(pointList), None)
def jsonCase(filepath):
"""Method for extracting the convex hull of a GeoJSON file
@param filepath Full path to GeoJSON file
@returns a tuple where in first place is the convex hull as an array of point tuples
"""
try:
myGeojson = pygeoj.load(filepath=filepath)
pointList = []
for features in myGeojson:
pointList.extend(
list(map(tuple, getCoordinatesFromGeoJson(features.geometry.coordinates))))
return (convex_hull(pointList), None)
# errors
except:
return (None, "File Error!")
def ncTiffCase(name, path):
"""Method for extracting the convex hull of a netCDF file
@param filepath Full path to netCDF file
@returns a tuple where in first place is the convex hull as an array of point tuples
"""
bbox = getBoundingBox.getBoundingBox(name, path)
if bbox[1] is None:
return ([(bbox[0][0], bbox[0][1]), (bbox[0][0], bbox[0][3]), (bbox[0][2], bbox[0][3]), (bbox[0][2], bbox[0][1])], None)
else:
return bbox
def geoPackageCase(filepath):
"""Method for extracting the convex hull of a GeoPackage
@param filepath Full path to GeoPackage
@returns a tuple where in first place is the convex hull as an array of point tuples
@see https://stackoverflow.com/questions/35945437/python-gdal-projection-conversion-from-wgs84-to-nztm2000-is-not-correct
"""
try:
conn = sqlite3.connect(filepath)
c = conn.cursor()
c.execute(""" SELECT min_x, min_y, max_x, max_y, srs_id
FROM gpkg_contents
WHERE NOT srs_id = 4327 OR srs_id = 4328
""")
row = c.fetchall()
bboxes = []
if row is None:
raise LookupError(
"No valid data detected (check if CRS maybe depracted)")
for line in row:
if not any(x is None for x in line):
bboxes.append(
[line[0], line[1], line[2], line[3], line[4]])
if bboxes == []:
raise LookupError(
"No valid data detected! Coordinates in gpkg_contents are invalid")
wgs84points = []
for bbox in bboxes:
wgs84points.append(CRSTransform(bbox[0], bbox[1], bbox[4]))
wgs84points.append(CRSTransform(bbox[0], bbox[3], bbox[4]))
wgs84points.append(CRSTransform(bbox[2], bbox[1], bbox[4]))
wgs84points.append(CRSTransform(bbox[2], bbox[3], bbox[4]))
return(convex_hull(wgs84points), None)
except LookupError as e:
return(None, e)
except:
return (None, "File Error!")
finally:
try:
conn.close()
except:
pass
def csvCase(filepath):
"""Method for extracting the convex hull of a CSV file
@param filepath Full path to CSV file
@returns a tuple where in first place is the convex hull as an array of point tuples
@see https://stackoverflow.com/questions/16503560/read-specific-columns-from-a-csv-file-with-csv-module
"""
try: # finding the correct columns for latitude and longitude
csvfile = open(filepath)
head = csv.reader(csvfile, delimiter=' ', quotechar='|')
# get the headline an convert, if possible, ';' to ','
# and seperate each word devided by a ',' into an array
header = next(head)[0].replace(";", ",").split(",")
# searching for valid names for latitude and longitude
def getLatLonCrs(header):
"""get the correct names of the columns holding the coordinates
@param header Header of the CSV
@returns (lon, lat, crs) where lon, lat, crs are the column names
"""
lng = None
lat = None
crs = None
for t in header:
if t.lower() in ("longitude", "lon", "long", "lng"):
lng = t
if t.lower() in ("latitude", "lat"):
lat = t
if t.lower() in ("crs", "srs", "coordinate reference systems", "reference systems", "spatial reference system"):
crs = t
return (lng, lat, crs)
lng, lat, crs = getLatLonCrs(header)
# if there is no valid name or coordinates, an exception is thrown an cought with an errormassage
if(lat is None or lng is None):
raise ValueError(
"pleas rename latitude an longitude: latitude/lat, longitude/lon/lng")
if(crs is None):
raise ValueError(
"please provide the coordinate reference systems. Name: crs/srs/coordinate reference systems/reference systems")
# errors
except ValueError as e:
return (None, e)
except:
return (None, "File Error!")
# if no error accured
else:
try:
df = pd.read_csv(filepath, header=0)
# get all coordinates from found columns
latitudes = df[lng].tolist()
longitudes = df[lat].tolist()
srs = df[crs].tolist()
# in case the words are separated by a ';' insted of a comma
except KeyError:
try:
# tell the reader that the seperator is a ';'
df = pd.read_csv(filepath, header=0, sep=';')
# get all coordinates from found columns
latitudes = df[lng].tolist()
longitudes = df[lat].tolist()
srs = df[crs].tolist()
# the csv is not valid
except KeyError:
return (None, "Pleas seperate your data with either ',' or ';'!")
# errors
except:
return (None, "File Error: File not found or check if your csv file is valid to 'csv on the web'")
pointList = []
for i in range(len((latitudes))):
pointList.append(
tuple(CRSTransform(longitudes[i], latitudes[i], crs[0])))
return (convex_hull(pointList), None)
def ISOCase(filepath):
"""Method for extracting the convex hull of an ISO19xxx standardized file
@param filepath Full path to ISO19xxx standardized file
@returns a tuple where in first place is the convex hull as an array of point tuples
"""
try:
# @see https://gis.stackexchange.com/questions/39080/using-ogr2ogr-to-convert-gml-to-shapefile-in-python
# convert the gml file to a GeoJSON file
with tempfile.TemporaryDirectory() as tmpdirname:
curDir = os.getcwd()
os.chdir(tmpdirname)
ogr2ogr.main(["", "-f", "GeoJSON", "output.json", filepath])
res = ogr2ogrCase("output.json")
os.chdir(curDir)
return res
# errors
except:
return (None, "file not found or your gml/xml/kml data is not valid")
def ogr2ogrCase(filepath):
"""Method for extracting the crs of a valid GeoJSON file\n
@param filepath Full path to GeoJSON
@returns a boundingbox as an array in a tuple in WGS84, formated like ([minLong, minLat, maxLong, maxLat], None)
"""
try:
myGeojson = pygeoj.load(filepath=filepath)
crs = myGeojson.crs['properties']['name']
if crs.find('EPSG') != -1:
crs = int(crs.split(':')[-1])
else:
return (None, "No reference system found or not as EPSG Code")
pointList = []
for features in myGeojson:
# the coordinates are beeing extracted from the GeoJSON and transformed into wgs84 coordinates
pointList.extend(list(map(lambda point: CRSTransform(point[1], point[0], crs), list(
map(tuple, getCoordinatesFromGeoJson(features.geometry.coordinates))))))
return (convex_hull(pointList), None)
except:
return (None, "File Error!")
#################################################################
# shapefile handelig
if file_extension == ".shp":
return shapefileCase(filepath)
# geojson handeling
elif file_extension in (".json", ".geojson"):
return jsonCase(filepath)
# netCDF and GeoTiff handeling
elif file_extension in (".tif", ".tiff", ".nc"):
return ncTiffCase(name, path)
# GeoPackage handeling
elif file_extension == ".gpkg":
return geoPackageCase(filepath)
# csv or csv formated textfile handeling (csv on the web)
elif file_extension in (".csv", ".txt"):
return csvCase(filepath)
# gml handeling
elif file_extension in (".gml", ".xml", ".kml"):
return ISOCase(filepath)
# if the extension has not been implemented yet or won't be supported
else:
return (None, "type %s not yet supported" % file_extension)
#################################################################
def CRSTransform(Long, Lat, refsys):
# Coordinate Reference System (CRS)
SourceEPSG = refsys
TargetEPSG = 4326
source = osr.SpatialReference()
source.ImportFromEPSG(SourceEPSG)
target = osr.SpatialReference()
target.ImportFromEPSG(TargetEPSG)
transform = osr.CoordinateTransformation(source, target)
point = ogr.CreateGeometryFromWkt("POINT (%s %s)" % (Long, Lat))
point.Transform(transform)
return (point.GetX(), point.GetY())
def convex_hull(points):
"""Computes the convex hull of a set of 2D points.
Input: an iterable sequence of (x, y) pairs representing the points.
Output: a list of vertices of the convex hull in counter-clockwise order,
starting from the vertex with the lexicographically smallest coordinates.
Implements Andrew's monotone chain algorithm. O(n log n) complexity.
@see https://en.wikibooks.org/wiki/Algorithm_Implementation/Geometry/Convex_hull/Monotone_chain
"""
# Sort the points lexicographically (tuples are compared lexicographically).
# Remove duplicates to detect the case we have just one unique point.
points = sorted(set(points))
# Boring case: no points or a single point, possibly repeated multiple times.
if len(points) <= 1:
return points
# 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.
# Returns a positive value, if OAB makes a counter-clockwise turn,
# negative for clockwise turn, and zero if the points are collinear.
def cross(o, a, b):
return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])
# Build lower hull
lower = []
for p in points:
while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:
lower.pop()
lower.append(p)
# Build upper hull
upper = []
for p in reversed(points):
while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:
upper.pop()
upper.append(p)
# Concatenation of the lower and upper hulls gives the convex hull.
# Last point of each list is omitted because it is repeated at the beginning of the other list.
return lower[:-1] + upper[:-1]
def getCoordinatesFromGeoJson(listInList):
coordinates = []
for sublist in listInList:
if type(sublist) is list:
if type(sublist[0]) is list:
coordinates.extend(getCoordinatesFromGeoJson(sublist))
else:
coordinates.extend(listInList)
break
else:
coordinates.append(listInList)
break
return coordinates
# Main method
if __name__ == '__main__':
main()
|
import os
import motor
from tornado.httpclient import AsyncHTTPClient
from tornado.testing import AsyncHTTPTestCase, gen_test
from tornado.web import Application, RequestHandler
class ExampleHandler(RequestHandler):
async def get(self):
document = await self.settings["database"].things.find_one({"thing": "THING"})
self.finish(document["value"])
class BaseHandlerTestCase(AsyncHTTPTestCase):
def get_app(self):
mongodb_uri = os.environ.get("MONGODB_URI", "mongodb://localhost/motor-test-example")
self.motor_client = motor.MotorClient(mongodb_uri)
self.database = self.motor_client.get_database()
return Application(
[
("/handler", ExampleHandler)
],
database=self.database)
async def async_teardown(self):
await self.motor_client.drop_database(self.database)
def tearDown(self):
self.io_loop.run_sync(self.async_teardown)
self.motor_client.close()
del self.database
del self.motor_client
super().tearDown()
async def async_fetch(self, path):
return await AsyncHTTPClient().fetch(self.get_url(path), raise_error=False)
|
from django.db import models
from datetime import datetime
from django.utils.safestring import mark_safe
from django.urls import reverse
from django.contrib.auth.models import User
from apps.data.models import Cliente, Moneda
from django.core.validators import MaxValueValidator, MinValueValidator
from decimal import Decimal
class FamiliaRepuesto(models.Model):
nombre = models.CharField(max_length=50, unique=True, null=False)
descripcion = models.CharField(max_length=100, null=True)
def __str__(self):
return "{}".format(self.nombre)
class Meta:
verbose_name = "Familia de repuesto"
verbose_name_plural = "Familias de repuestos"
class Repuesto(models.Model):
codigo = models.CharField(max_length=20, unique=True)
nombre = models.CharField(max_length=50, unique=True)
costo = models.DecimalField(default=0, max_digits=10, decimal_places=2, validators=[MinValueValidator(Decimal('0.01'))], help_text='')
familia = models.ForeignKey(FamiliaRepuesto, null=True, blank=False, on_delete=models.SET_NULL)
def __str__(self):
return "{} | {}".format(self.familia, self.nombre)
class Meta:
verbose_name = "Repuesto"
verbose_name_plural = "Repuestos"
class Presupuesto(models.Model):
usuario = models.ForeignKey(User, default=1, null=True, on_delete=models.SET_DEFAULT)
asunto = models.CharField(max_length=100)
fecha = models.DateField(auto_now_add=True)
cliente = models.ForeignKey(Cliente, null=False, blank=False, on_delete=models.CASCADE)
moneda = models.ForeignKey(Moneda, null=False, blank=False, default=1, on_delete=models.CASCADE)
tasa_cambio = models.DecimalField(default=1.00, max_digits=10, decimal_places=2)
oc_autorizacion = models.FileField("OC - Aprobacion", null=True, blank=True, upload_to='oc_autoriz/presupuestos/', help_text="Se agrega al obtener la OC o aprobación del cliente.")
facturado = models.BooleanField(default=False)
#filesPresup = FilerFileField(null=True, blank=True, related_name="files_presup", on_delete=models.SET_NULL)
def costo_total(self):
lineas = LineaPresupuesto.objects.filter(presupuesto=self.id)
if (len(lineas) == 0):
return 0
valorFinal = 0
for val in lineas:
valorFinal += val.costo_custom * val.cantidad if val.costo_custom else val.repuesto.costo * val.cantidad * self.tasa_cambio
return "%.02f"%(valorFinal)
@mark_safe
def fileLink(self):
if self.oc_autorizacion:
return mark_safe('<a href="{}" target="_blank">Enlace</a>'.format(self.oc_autorizacion.url))
else:
return mark_safe('<a href="''"></a>')
@property
def presup_aprobado(self):
aprobado = True if self.oc_autorizacion else False
return aprobado
fileLink.allow_tags = True
fileLink.short_description = "Link OC-Aprob"
def user_names(self):
return '{} {}'.format(self.usuario.first_name, self.usuario.last_name)
user_names.short_description = "Usuario"
def __str__(self):
return "%s"%(self.asunto)
class Meta:
verbose_name = "Presupuesto"
verbose_name_plural = "Presupuestos"
class LineaPresupuesto(models.Model):
presupuesto = models.ForeignKey(Presupuesto, null=True, blank=False, on_delete=models.CASCADE)
repuesto = models.ForeignKey(Repuesto, null=False, blank=False, on_delete=models.CASCADE)
cantidad = models.PositiveIntegerField(default=1)
costo_custom = models.DecimalField(default=0, max_digits=10, decimal_places=2, validators=[MinValueValidator(Decimal('0.00'))], help_text='')
@mark_safe
def presup_link(self):
link = reverse("admin:reparaciones_presupuesto_change", args=[self.presupuesto.id]) #model name has to be lowercase
return mark_safe('<a href="{}">{}</a>'.format(link, self.presupuesto.asunto))
presup_link.allow_tags = True
presup_link.short_description = "Presupuesto"
def __str__(self):
return "%s x %s"%(self.cantidad, self.repuesto)
class Meta:
verbose_name = 'Items en presupuesto'
verbose_name_plural = 'Items en presupuestos' |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-05-27 19:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('journal', '0014_auto_20170527_1932'),
]
operations = [
migrations.RenameField(
model_name='issue',
old_name='bandcamp_album',
new_name='bandcamp_album_number',
),
migrations.AddField(
model_name='issue',
name='bandcamp_album_url',
field=models.CharField(blank=True, max_length=100),
),
]
|
import os
import time
import unittest
# 创建测试套件
import HTMLTestRunner_PY3
from Autoapitest import app
from Autoapitest.script.test_tpshop_login import TpshopLogin
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
suite = unittest.TestSuite()
# 将测试用例添加到测试组件
suite.addTest(unittest.makeSuite(TpshopLogin))
# 定义测试报告的目录和名称
report_path = BASE_DIR + "/report/tpshop_login.html"
with open(report_path, mode='wb') as f:
runner = HTMLTestRunner_PY3.HTMLTestRunner(f, verbosity=1, title='tpshop登陆接口功能测试', description='需要联网')
runner.run(suite) |
# created by Ryan Spies
# 3/5/2015
# Python 2.7
# Description: parse through UHG .xml parameter files and create a new file with
# ordinates rounded to whole numbers (CHPS version currently leaves multiple decimals)
import os
import glob
os.chdir("../..")
maindir = os.getcwd()
###############################################################################
RFC = 'APRFC_FY2015'
working_dir = maindir + os.sep + 'Calibration_NWS' + os.sep + RFC + os.sep + 'Working_Calib_Files' + os.sep + 'UH' + os.sep
original_files = maindir + os.sep + 'Calibration_NWS' + os.sep + RFC + os.sep + 'Working_Calib_Files' + os.sep + 'UH_original_decimals' + os.sep
###############################################################################
for each in glob.glob(original_files+'/*.xml'):
print os.path.basename(each)
file_name = os.path.basename(each)
read_orig = open(each,'r')
new_file =open(working_dir + file_name,'w')
for line in read_orig:
if '<row A' not in line:
new_file.write(line)
else:
sep = line.split('"')
rounded = round(float(sep[1]),-1)
new_file.write(sep[0] + '"' + "%.1f" % rounded + '"' + sep[2])
read_orig.close()
new_file.close()
print 'Completed!'
|
"""
a -> b -> b
b -> a
a -> a
(b -> b) -> b
(a, b -> b)
fail
a allocated by side1
b allocated by side2
(b, a) -> normalized -> (a, b)
once type is temporary nominal type!
a = (b -> b)
a -> a = b
how fail?
subst other side's once vars with its own side's once vars,
if cannot(not principal), fail
otherwise, resume type mapping from once types to (fresh types and slot types),
use infer to prune.
Note that slot types can reach external scope.
!!!so fresh should fail, when unifying!!!
!!!checking forall, always making once types!!!
Another side?
arg : forall a . a -> a
f: ( int -> int ) -> int
f arg
once types become nominal types, so just ignore the substitution.
but we should return a type for representation the instantiated type:
arg_inst : int -> int, {a : int}
so do not make once vars only alive during one `unify`!!
we make a "onced" type from each HS before unifying, which returns 2 maps.
each map is from (fresh + var) to once.
after unifying, we the maps are updated.
var -> once, and once = fresh => don't update
once != fresh, substitute
fresh -> once, and once = fresh => update
unify end, once vars get out of dated, if found use again, failed.
f : forall a. ?var
g : forall b. b
Kf : {var: v1}
Kg : {b: v2}
f' = g' is v1 = v2
[v1 = v2]
for f: [var = v2], okay
for g: [v1 = b], okay
??
arg : forall a . a -> a
f: ( int -> int ) -> int
f arg
Karg : {a : v}
Kf : {}
[a = int -> int]
a = int => int -> int = int failed
arg : forall a . a -> a
f: ( forall b. b -> b ) -> var
Karg : {a : v1}
Kf : {b : v2, var: v3}
[v1 = v2, v1 = v3]
for f: [v1 = b, v1 = var], okay, but var assign to b, we should then check the scope of (forall b)!
for arg: [a = v2], okay
how to get inst of arg?, arg_inst [a -> a][a = v2] = v2 -> v2, however, no one will use v2,
so after tc, v2 is a top level forall vars!
forall v2.
program
?? program allows outer resolution?
program allows only current module resolution.
by what keyword?
module Mine with forall
module Main
great design!
(module A with forall
let x = fun x -> x) : forall a. {x : a}
auto: forall a. a -> a
choose: forall a. a -> a -> a
f: (forall a. a -> a) -> int
arg : int -> int
?? f arg should fail
!! don't inst automatically!
(arg of f) >= arg
f: (forall a. a -> a) -> int
arg : (forall b. [b] -> [b])
?? f arg? should fail
f : {a: v1}
arg: {b: v2}
v1 = [v2]
so, fresh var `a` in f is like nominal type, shouldn't expand.
fresh var `v` in arg is like var type, can expand.
tcs.rigid_inst(f) unify (tcs.flexible_inst(arg) -> var), nice
besides, when unifying, forall can only do rigid instance.
user can manually flexibly expand types, such as argument type!
nice!
So, now, the problem gets raised: how should we implement rigid once type and flexible once type?
1. checking scope exceeding problem after unifying
2. assign a kind to once to indicate it is rigid or flexible?
if rigid, okay for assigned to a type var
okay for making once equality to another once type
fail for other cases
if flexible,
okay to perform like a normal type var
So flexible once type is only a normal type var?
Why once type? To guarantee any checking with once type fail after leaving the given forall scope.
f : forall a. a -> var
arg : a' -> a', (a' is a type var)
Kf: {a: v1, var: v3}, v1 is rigid
Karg: {a': v2}, v2 is flexible
v1, v2, v3 is closed in this scope
[v1 = v2, v3 = v2]
for arg: {a' : v1'=infer(v1)},
a' is tvar, unify a' v1'
for f:
a is fresh, {a: v2, var: v2}
f: forall a. a -> var
g : a' -> b'
f unify g
Kf : {a: v1, var: v2}
Kg : {a': v3, b': v4}
!!! v1, v2, v3, v4 is closed!
[v1 = v3, v2 = v4] (Efg -------------------------------------------------------------------
|
g unify h |
h : z' -> z' |
Kg : {a': v5, b': v6} |
Kh: {z': v7}
[v5 = v7, v6 = v7]
for g: {a': v7, b' = v7}
infer(f) trigger Kf pruning |
{v7: v3, v7: v4} |
[v1 = v3, v2 = v4] -> [v7 = v3 = v4 = v1 = v2] |
{a: v1, var: v2} -> {a: v7, var: v7}, var = a |
|
Hence, we have to keep the Klhs and Krhs and (unification or K values) |
|
any change to one of {var, 'a, 'b} will trigger the update of ------------------------------
Kf, Kg and Efg
Map from a type variable to its related relations:
key = var
value = infer(var), [(Kf, Kg, Efg)]
So, when var chanegd, mark if the relation map is "dirty",
we will update all "dirty" ones.
class LocalTypeTopology:
Klhs : [(T, T)] # the left is from outer universe, the right is from LocalTypeTopology's university
Krhs : [(T, T)]
maintainer : TCState # the LocalTypeTopology's universe
# if every type var is `maintainer` has a reference, we called this topo "solved"
if solved, we should delete this local type topology:
we delete reference of current topo from [each[0].topo_maintainers for each in Klhs]
and [each[0].topo_maintainers for each in Krhs]
can outer type var get used by inner universe directly?
f1 : forall a. a -> var
f2 : a' -> 'b
blablabla,
when auto unifying:
forall a. a -> a
forall b. b -> b
both sides turn out to be rigid.
but, to make this fail:
forall a. a -> a
forall b. b -> int
only rigid is not enough,
we try to assign one rigid initially.
f: forall a. a -> a
g: forall b. b -> var
Kf: {a: v1}
Kg: {b: v2, var: var'}
[v1 = v2, v1 = var']
Kf: {a: v2}
Kg: {b: v2}
so, finally(after the inner universe ended),
for K {a: int, b: int}, we judge it as type check failure according to
"fresh variable can and can only bidirectionally equal to another fresh variable in another side."
unfresh type:
forall a. a -> var
Forall([a], Arrow([a], var))
when frehsing,
Arrow([v], Unfresh([a], [v], var))
unify Unfresh(reals, paths, var1), var2
paths, _ = [path_infer(path)[0] for path in paths]
var_path1, var1 = path_infer(var1)
var_path2, var2 = path_infer(var2_path)
""" |
"""Helper for file handling."""
import os
def cleanup(config_dir):
"""Remove temporary stderr and stdout files as well as the daemon socket."""
stdout_path = os.path.join(config_dir, 'pueue.stdout')
stderr_path = os.path.join(config_dir, 'pueue.stderr')
if os._exists(stdout_path):
os.remove(stdout_path)
if os._exists(stderr_path):
os.remove(stderr_path)
socketPath = os.path.join(config_dir, 'pueue.sock')
if os.path.exists(socketPath):
os.remove(socketPath)
def get_descriptor_output(descriptor, key, handler=None):
"""Get the descriptor output and handle incorrect UTF-8 encoding of subprocess logs.
In case an process contains valid UTF-8 lines as well as invalid lines, we want to preserve
the valid and remove the invalid ones.
To do this we need to get each line and check for an UnicodeDecodeError.
"""
line = 'stub'
lines = ''
while line != '':
try:
line = descriptor.readline()
lines += line
except UnicodeDecodeError:
error_msg = "Error while decoding output of process {}".format(key)
if handler:
handler.logger.error("{} with command {}".format(
error_msg, handler.queue[key]['command']))
lines += error_msg + '\n'
return lines.replace('\n', '\n ')
|
from __future__ import print_function
import ttfw_idf
EXPECT_TIMEOUT = 20
@ttfw_idf.idf_example_test(env_tag='Example_RMT_IR_PROTOCOLS')
def test_examples_rmt_ir_protocols(env, extra_data):
dut = env.get_dut('ir_protocols_example', 'examples/peripherals/rmt/ir_protocols', app_config_name='nec')
print("Using binary path: {}".format(dut.app.binary_path))
dut.start_app()
dut.expect("example: Send command 0x20 to address 0x10", timeout=EXPECT_TIMEOUT)
dut.expect("Scan Code --- addr: 0x0010 cmd: 0x0020", timeout=EXPECT_TIMEOUT)
dut.expect("Scan Code (repeat) --- addr: 0x0010 cmd: 0x0020", timeout=EXPECT_TIMEOUT)
env.close_dut(dut.name)
dut = env.get_dut('ir_protocols_example', 'examples/peripherals/rmt/ir_protocols', app_config_name='rc5')
print("Using binary path: {}".format(dut.app.binary_path))
dut.start_app()
dut.expect("example: Send command 0x20 to address 0x10", timeout=EXPECT_TIMEOUT)
dut.expect("Scan Code --- addr: 0x0010 cmd: 0x0020", timeout=EXPECT_TIMEOUT)
dut.expect("Scan Code (repeat) --- addr: 0x0010 cmd: 0x0020", timeout=EXPECT_TIMEOUT)
env.close_dut(dut.name)
if __name__ == '__main__':
test_examples_rmt_ir_protocols()
|
"""
ANTECEDENTES
-------------
La primera version de numeros a letras se construyó en 1993 en clipper para ayudar a mi amigo Paulino para transferir las notas de los alumnos de la Escuela de Enseñanza de Automoción del EA a la Direccion de Enseñanza que pedia las mismas, aparte de en su valor numerico, como cadena de caracteres. Se desarrollo en Clipper (summer 87) en una función que gestionaba enteros del 1 al 10 y decimales de .01 al .99
La siguiente versión fue como diversión en 2006. Se desarrollo en Alaska-SW, una adaptacion de Clipper con Orientación a Objetos. La función solo gestionaba enteros hasta Quintillones (36 cifras).
En 2019 estudiando Python (3) la he adaptado para este lenguaje, que es la que presento.
@ JuanPi
1993-2019
IDEAS PARA SU CONSTRUCCIÓN
-------------------------
Dentro del leguaje (español de España) cuando expresamos numeros, lo primero que he intentado es agrupar las expresiones comunes y encontrar excepciones lo que revierte complejidad a las funciones.
Algunas excepciones:
- El cero, como caracter numerico unico no lo he querido tratar, saldrá un blanco.Es facil tratarlo si se desea
- El uno, puede expresarse como (un / uno) ej.: un millon..., trescientos treinta y uno
- Hay otras mas ...
He agrupado las expresiones comunes en FUNCIONES:
Vereis que todas tienen un diccionario local para devolver expresiones.
1º d0a9(...) representa los valores de 0 a 9 (ojo: el cero no como unico valor, 1001)
2º d10a20(...) representa los valores de 10 a 20 . Porqué el 20 y no hasta el 19, otra excepción: Hay una diferencia en el lenguaje con las decenas que en el caso del 20 es veinte y 2? venti..., que no pasa con el resto de decenas, 30:treinta, 32: treinta y dos. Observad también que se contruyen de manera diferente hay una "y" por medio.
22 -> ventidos
42 -> cuarenta y dos
3º d21a99(...) representa las decenas que son constantes excepto el caso de las veintenas como que comentó anteriormente
4º d100a999(...) representan a las centenas, que presenta una excepción 100:cien y 10?:ciento.. (similar a lo comentado con el 20), la excepción 100 se trata en la propia función.
5º miles(...) Representa a los miles y a los ...illones (millones, billones,...), fijaros que en lenguaje hasta que no se alcanza 7 cifras, 13 cifras, 19 cifras ... no pasamos a los ...illones, esta función es algo mas compleja de entender que las anteriores. Solo llego hasta los Quintillones, pero a partir de esta función podemos llegar hasta donde queramos.
EXTRATEGIA DE RESOLUCIÓN:
Recibida cualquier cadena, hay al principio algunas expresiones que validan la cadena para convertirla en una cadena numerica. Quitar comas, puntos, caracteres alfabeticos ...
Validada la cadena numerica se trata de pasarla a una lista de la siguiene forma:
1235698733 -> ["1","235","698","733"]
Entraremos en un FOR analizando cada uno de los elemenos de la lista, discriminaremos si nos llego una cadena de 3,2 ó 1 caracteres, pues tienen analisis diferentes.
Caso 3 caracteres: Se va analizando (> 100, mayor de 20, entre 10 y 20 ...) definido donde entra, dentro del mismo se va partiendo y analizando. Construyendo su expresión con las funciones.
"""
#*****************************
# FUNCIONES BASICAS DE APOYO
#*****************************
###############
def d0a9(pipo,lfin):
###############
"""
Función para los valores comprendidos entre 0 y 9
Argumentos:
- pipo : un caracter numerico
- lfin : un boleano.
La función devuelve la expresión que representa dicho caracter numerico. El caso del caracter numerico 1 es especial
NOTA: El tratamiento del valor '0' no representa la expresión: 'cero'
ej:
231.000 -> el 1 en esta cadena numerica es "un" (doscientos treinta y un mil)
321 -> el 1 en esta cadena numerica es "uno" (trescientos ventiuno)
"""
d0_9 = {'0':'',
'1':['un ','uno'],
'2':'dos ',
'3':'tres ',
'4':'cuatro ',
'5':'cinco ',
'6':'seis ',
'7':'siete ',
'8':'ocho ',
'9':'nueve '}
if pipo in d0_9:
if pipo == "1":
if lfin:
return d0_9[pipo][0]
else:
return d0_9[pipo][1]
else:
return d0_9[pipo]
else:
return None
#################
def d10a20(pipo):
#################
"""
Función para los valores comprendidos entre 10 y 20
Argumentos:
- pipo : un caracter numerico
La función devuelve la expresión que representa dicho caracter numerico.
"""
d10_20 = {'10':'diez ',
'11':'once ',
'12':'doce ',
'13':'trece ',
'14':'catorce ',
'15':'quince ',
'16':'dieciseis ',
'17':'diecisiete ',
'18':'dieciocho ',
'19':'diecinueve ',
'20':'veinte '}
if pipo in d10_20:
return d10_20[pipo]
else:
return None
##################
def d21a99(pipo):
##################
"""
Función para los valores comprendidos entre 21 y 99
Argumentos:
- pipo : un caracter numerico
La función devuelve la expresión que representa dicho caracter numerico.
"""
d21_99 = {'2':'venti',
'3':'treinta ',
'4':'cuarenta ',
'5':'cincuenta ',
'6':'sesenta ',
'7':'setenta ',
'8':'ochenta ',
'9':'noventa '}
if pipo in d21_99:
return d21_99[pipo]
else:
return None
###################
def d100a999(pipo):
####################
"""
Función para los valores comprendidos entre 100 y 999
Argumentos:
- pipo : un caracter numerico
La función devuelve la primera expresión que representa dicho caracter numerico.
Nota: la cadena mumerica "100", presenta la excepción (cien/ciento)
"""
d100_999 = {'1':'ciento ',
'2':'doscientos ',
'3':'trescientos ',
'4':'cuatrocientos ',
'5':'quinientos ',
'6':'seiscientos ',
'7':'setecientos ',
'8':'ochocientos ',
'9':'novecientos '}
# excepcion un "cien" por "ciento"
if pipo == '100':
return 'cien '
else:
pipo = pipo[0]
if pipo in d100_999:
return d100_999[pipo]
else:
return None
###################
def miles(lista, n,clt):
# NOTA: No olvidar que las lista empiezan en cero
####################
"""
Función para los miles y ...illones(millones, billones, trillones, ...)
Argumentos:
- lista : Lista de cadenas
- n : Un numeros
- clt : Una cadena
La función devuelve uno de los valores del diccionario
"""
npos = 0
pipo = len(lista) - n
# bloque de trabajo
nbl1 = lista[n-1]
#cadena de 2 grupos el suyo y el anterior si procede, se analizan en el caso de ...illones
if n > 1:
nbl2 = lista[n-2] + lista [n-1]
else:
nbl2 = lista[n-1]
aval = { 0:'', # 1 npos
1:'mil ', # 2
2:['millones, ','millón, '], # 3
3:'mil ', # 4
4:['billones, ','billón, '], # 5
5:'mil ', # 6
6:['trillones, ','trillón, '], # 7
7:'mil ', # 8
8:['cuatrillones, ','cuatrillón, '], # 9
9:'mil ', # 10
10:['quintillones, ','quintillón, '], # 11
11:'mil ' } # 12
if pipo in aval:
npos = pipo + 1
if npos == 2 or npos== 4 or npos == 6 or npos== 8 or npos == 10 or npos == 12:
#grupo de miles
if int(nbl1) == 0:
return aval[0] # en nuestro caso una cadena vacia
elif int(nbl1) == 1:
return aval[pipo]
else:
return clt + aval[pipo]
elif npos == 3 or npos== 5 or npos == 7 or npos== 9 or npos == 11:
# grupo de ..illones
if int(nbl2) == 0:
return aval[0] # en nuestro caso una cadena vacia
elif int(nbl2) == 1:
return clt + aval[pipo][1]
else:
return clt + aval[pipo][0]
elif npos == 1:
return clt + aval[pipo]
else:
return "SUPERADO MAXIMO PERMITIDO ..."
#*******************
# FUNCION PRINCIPAL
#*******************
def dnumachar(pipo):
"""
Función principal de este módulos
Argumentos:
- pipo : una cadena de numeros de cadenas
La función devuelve una cadena con la expresion en español de la cadena de numeros recibida.
"""
# VALIDACIONES
# quitar cualquier blanco
pipo = pipo.replace(" ", "")
# partir parte entera y parte decimal si la hubiera
npos = pipo.find(".")
if npos > 0: # hay parte entera y parte decimal
int_pipo = pipo[:npos]
dec_pipo = pipo[npos+1:]
else: # solo hay parte entera
int_pipo = pipo
dec_pipo = ""
# quitamos en lo obtenido las comas separadoras de miles si las hubiera
int_pipo = int_pipo.replace(",", "")
dec_pipo = dec_pipo.replace(",", "")
# verificar que no nos han colado cualquier otro carater diferente de numeros
if not int_pipo.isdigit():
print("no valen ni letras, ni caracteres especiales")
return None
# quitar ceros por la izquierda
for i in int_pipo:
if i == "0":
int_pipo = int_pipo[1:]
else:
break
# SOLO trabajaremos con la parte entera.
# crearemos una lista en grupos de 3 ejemplo: 1234568 -> ["1","234","568"]
npos = len(int_pipo)
ctmp = ""
l_int_pipo = []
for i in range(1,npos+1) :
# voy agrupando los ultimos caracteres y quitandolos de int_pipo
ctmp = int_pipo[-1] + ctmp
int_pipo = int_pipo[:-1]
# Si recorrido 3 posiciones la añado a la lista
if i%3 == 0:
l_int_pipo = [ctmp] + l_int_pipo
ctmp = ""
# Si no llegue a 3 posiciones, me sobraron, y los añado a continuación
if len(ctmp) > 0:
l_int_pipo = [ctmp] + l_int_pipo
# print("Lista de trabajo:",l_int_pipo)
clet = "" # variable que contendrá la expresion de la cadena numerica.
npos = 0 # variable que determina posicion en la lista
# ANALISIS DE LA LISTA (se analizan los bloques en nuestro ejemplo: "12" ,"234" ... )
for i in l_int_pipo:
clettmp = ""
npos += 1
# Saber si estamos en el ultimo bloque de la lista (excepción del "1" (uno ó un))
if (len(l_int_pipo) - npos) == 0 :
lkey = False
else:
lkey = True
# Tenemos 3 caracteres numericos en la cadena
if len(i) == 3:
""" grupo de 3 elementos completo
se repasa la casuistica de grupo completa:
(a,b,c)>=100 -> d100a999
(b,c)>20 -> d21a99+d9a0
(b,c)>=10 y (b,c)>=10 -> d10a20
(c) resto -> d1a9
"""
if int(i) >= 100:
clettmp += d100a999(i)
if int(i[1:]) > 20:
clettmp += d21a99(i[1:2]) #Paso de la cadena numerica la 2º posicion
# No poner 'y' entre 21..29 y si en el resto de decenas (30,40,...)
if int(i[1:]) < 30 or int(i[1:])%10 == 0 :
clettmp += ""
else:
clettmp += "y "
clettmp += d0a9(i[-1], lkey) #Paso de la cadena numerica la ultima posición
elif int(i[1:]) >= 10 and int(i[1:]) <= 20:
clettmp += d10a20(i[1:]) #Paso de la cadena numerica las dos ultima posiciones
else:
clettmp += d0a9(i[-1], lkey)
# tenemos 2 caracteres numericos en la cadena
elif len(i) == 2:
# De manera similar a cuando teniamos 3 caracteres numericos lo analizamos con 2
if int(i) > 20:
clettmp += d21a99(i[0]) # paso primera posición
# salvar 'y' en 21..29 y multiplos de 10
if int(i) < 30 or int(i)%10 == 0 :
clettmp += ""
else:
clettmp += "y "
clettmp += d0a9(i[-1], lkey)
elif int(i) >= 10 and int(i) <= 20:
clettmp += d10a20(i)
else:
clettmp += d0a9(i, lkey)
# tenemos 1 solo caracter numerico en la cadena
elif len(i) == 1:
clettmp += d0a9(i, lkey)
# Analisis de miles y millones
clet += miles(l_int_pipo,npos,clettmp)
# Recreo el numero añadiendo separadores comas de miles para verificar
csal = ""
npos = 0
for i in l_int_pipo:
npos += 1
if npos > 1:
csal += (","+i)
else:
csal += i
# lo que veras en pantalla
print(csal)
return clet.capitalize()
|
import collections
import random
import re
from collections import Counter
from itertools import islice
import nltk
from nltk.corpus import stopwords
import numpy as np
import pandas as pd
pd.set_option('display.max_colwidth', -1)
from time import time
import re
import string
import os
import emoji
from pprint import pprint
import collections
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="darkgrid")
sns.set(font_scale=1.3)
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.externals import joblib
import gensim
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
import warnings
warnings.filterwarnings('ignore')
np.random.seed(37)
arabic_diacritics = re.compile(""" ّ | # Tashdid
َ | # Fatha
ً | # Tanwin Fath
ُ | # Damma
ٌ | # Tanwin Damm
ِ | # Kasra
ٍ | # Tanwin Kasr
ْ | # Sukun
ـ # Tatwil/Kashida
""", re.VERBOSE)
def remove_diacritics(text):
text = re.sub(arabic_diacritics, '', str(text))
return text
def remove_repeating_char(text):
# return re.sub(r'(.)\1+', r'\1', text) # keep only 1 repeat
return re.sub(r'(.)\1+', r'\1\1', text) # keep 2 repeat
def process_text(text, grams=False):
clean_text = remove_diacritics(text)
clean_text = remove_repeating_char(clean_text)
if grams is False:
return clean_text.split()
else:
tokens = clean_text.split()
grams = list(window(tokens))
grams = [' '.join(g) for g in grams]
grams = grams + tokens
return grams
def window(words_seq, n=2):
"Returns a sliding window (of width n) over data from the iterable"
" s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... "
it = iter(words_seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
def document_features(document, corpus_features):
document_words = set(document)
features = {}
for word in corpus_features:
features['contains({})'.format(word)] = (word in document_words)
return features
all_features = list()
texts = list()
data_labels = list()
negative_file = open("C:/Users/121/.spyder-py3/text_mining/negative_tweets.txt", encoding ="utf8")
positive_file = open("C:/Users/121/.spyder-py3/text_mining/positive_tweets.txt", encoding ="utf8")
n_grams_flag = False
min_freq = 13
print('read data ...')
print('read data ...')
# read positive data
for line in positive_file:
text_features = process_text(line, grams=n_grams_flag)
stop_words = set(stopwords.words('arabic'))
text_features = [w for w in text_features if not w in stop_words]
all_features += text_features
texts.append(text_features)
data_labels.append('pos')
for line in negative_file:
text_features = process_text(line, grams=n_grams_flag)
stop_words = set(stopwords.words('arabic'))
text_features = [w for w in text_features if not w in stop_words]
all_features += text_features
texts.append(text_features)
data_labels.append('neg')
|
import json
import requests
import sys
sys.path.insert(1, '../')
URI_OBTAIN_DOCUMENT_INFORMATION = 'http://librairy.linkeddata.es/solr/tbfy/select?q=id:'
URI_INFERENCES = 'https://librairy.linkeddata.es/jrc-en-model/inferences'
URI_LIST_TOPICS = 'http://librairy.linkeddata.es/jrc-en-model/topics'
URI_DOCUMENT_RANK = 'http://localhost:8081/ranks'
URI_SEND_NEW_DOCUMENT_TOPICS = 'http://localhost:8983/solr/documents/update?commit=true'
results_rank_feedback = "results_rank_test_27052021.json"
rank_body_template = "rank_template.json"
def readJsonFile(jsonFileName):
with open(jsonFileName) as json_file:
data = json.load(json_file)
return data
def loadJsonRankTemplate(id):
data = readJsonFile(rank_body_template)
data['reference']['document']['id'] = id
document_rank = getDocumentRank(data)
return document_rank
# envía una petición post
def postRequestApi(uri, body):
headers = {'content-type': 'application/json'}
r = requests.post(uri, data=json.dumps(body), auth=('demo', '2019'), headers=headers)
return r
# obtiene la información del documento desde la bd
def getDocumentInformationApi(id):
request = URI_OBTAIN_DOCUMENT_INFORMATION + id
r = requests.get(request).json()
json_string = r['response']['docs']
return json_string
def getDocumentRank(body):
new_rank_docs_id = []
r = postRequestApi(URI_DOCUMENT_RANK, body).content
new_rank_docs = json.loads(r.decode('ISO-8859-1'))['response']['docs']
for doc in new_rank_docs:
new_rank_docs_id.append(doc['id'])
return new_rank_docs_id
def getDocumentListInformation(rank_list):
list_docs_ranking = []
document_information_query = ' OR id:'.join([str(elem) for elem in rank_list])
list_docs_ranking[:] = getDocumentInformationApi(document_information_query)
return list_docs_ranking
def obtainArrayDocTopics(document):
doc = use_response_2(document)
return [doc['topics0_t'], doc['topics1_t'], doc['topics2_t']]
# model: position o relevance -> dependiendo del modelo de feedback ejecutado
# topic_main_doc: el topic del main document que fue modificado
# action: put -> si se agregó un tópico junto al tópico de main document, move -> si se recorrió el tópico del
# main document junto a uno de los tópicos de las ptras jerarquías
def createRulesCSV(model, topic_main_doc, action, topic_rank_doc, index_move=-1 ):
rule = "rule: "+model+","+str(index_move)+","+topic_main_doc+","+action+","+topic_rank_doc
file = open('rules_tender_27052021.csv', 'a')
file.write('\n'+rule)
file.close()
def createRulesCSVAllRelatedTopics(topic_main_doc, related_topics ):
rule = "rule: "+topic_main_doc+","+str(related_topics)
file = open('rules_tender_related_topics_27052021.csv', 'a')
file.write('\n'+rule)
file.close()
def rollbackNewTopicsIndex():
postRequestApi(URI_SEND_NEW_DOCUMENT_TOPICS, 0)
#funcion que en el caso de que no exista el tópico 2 en el documento lo añade con un valor de 0
def use_response_2(doc):
try:
a = doc['topics2_t']
return doc
except:
doc['topics2_t'] = "0"
return doc
#funcion que en el caso de que no exista el tópico 1 en el documento lo añade con un valor de 0
def use_response_1(doc):
try:
a = doc['topics1_t']
return doc
except:
doc['topics1_t'] = "0"
return doc
if __name__ == '__main__':
print(readJsonFile())
|
import os
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from scipy.misc import toimage
def make_generator(images, z_vectors):
def _generator():
for image, z_vec in zip(images, z_vectors):
yield image, z_vec
return _generator
def parse_fn(images, z_vec):
# handle image
# reshape to 28x28x1
batch_x = tf.reshape(images, shape=[28, 28, 1])
# rescale images to -1 ~ 1
batch_x = batch_x * 2.0 - 1.0
# create z vector
batch_z = z_vec
return batch_x, batch_z
def input_fn(mnist_images, train_z_vectors, batch_size):
dataset = tf.data.Dataset.from_generator(make_generator(mnist_images, train_z_vectors), (tf.float32, tf.float32))
dataset = dataset.map(parse_fn)
dataset = dataset.prefetch(batch_size)
dataset = dataset.shuffle(buffer_size=10000) # randomize
dataset = dataset.batch(batch_size)
return dataset
def save_result(val_out, val_block_size, image_fn, color_mode):
def preprocess(img):
img = ((img + 1.0) * 127.5).astype(np.uint8)
return img
preprocesed = preprocess(val_out)
final_image = np.array([])
single_row = np.array([])
for b in range(val_out.shape[0]):
# concat image into a row
if single_row.size == 0:
single_row = preprocesed[b, :, :, :]
else:
single_row = np.concatenate((single_row, preprocesed[b, :, :, :]), axis=1)
# concat image row to final_image
if (b+1) % val_block_size == 0:
if final_image.size == 0:
final_image = single_row
else:
final_image = np.concatenate((final_image, single_row), axis=0)
# reset single row
single_row = np.array([])
if final_image.shape[2] == 1:
final_image = np.squeeze(final_image, axis=2)
toimage(final_image, mode=color_mode).save(image_fn)
class Generator(tfe.Network):
def __init__(self):
super(Generator, self).__init__(name='generator')
self.n_f = 512
self.n_k = 4
# input z vector is [None, 100]
self.dense1 = self.track_layer(tf.layers.Dense(3 * 3 * self.n_f))
self.conv2 = self.track_layer(tf.layers.Conv2DTranspose(self.n_f // 2, 3, 2, 'valid'))
self.bn2 = self.track_layer(tf.layers.BatchNormalization())
self.conv3 = self.track_layer(tf.layers.Conv2DTranspose(self.n_f // 4, self.n_k, 2, 'same'))
self.bn3 = self.track_layer(tf.layers.BatchNormalization())
self.conv4 = self.track_layer(tf.layers.Conv2DTranspose(1, self.n_k, 2, 'same'))
return
def call(self, inputs, is_training):
with tf.variable_scope('generator'):
x = tf.nn.leaky_relu(tf.reshape(self.dense1(inputs), shape=[-1, 3, 3, self.n_f]))
x = tf.nn.leaky_relu(self.bn2(self.conv2(x), training=is_training))
x = tf.nn.leaky_relu(self.bn3(self.conv3(x), training=is_training))
x = tf.tanh(self.conv4(x))
return x
class Discriminator(tfe.Network):
def __init__(self):
super(Discriminator, self).__init__(name='discriminator')
self.n_f = 64
self.n_k = 4
# input image is [-1, 28, 28, 1]
self.conv1 = self.track_layer(tf.layers.Conv2D(self.n_f, self.n_k, 2, 'same'))
self.conv2 = self.track_layer(tf.layers.Conv2D(self.n_f * 2, self.n_k, 2, 'same'))
self.bn2 = self.track_layer(tf.layers.BatchNormalization())
self.conv3 = self.track_layer(tf.layers.Conv2D(self.n_f * 4, self.n_k, 2, 'same'))
self.bn3 = self.track_layer(tf.layers.BatchNormalization())
self.flatten4 = self.track_layer(tf.layers.Flatten())
self.dense4 = self.track_layer(tf.layers.Dense(1))
return
def call(self, inputs, is_training):
with tf.variable_scope('discriminator'):
x = tf.nn.leaky_relu(self.conv1(inputs))
x = tf.nn.leaky_relu(self.bn2(self.conv2(x), training=is_training))
x = tf.nn.leaky_relu(self.bn3(self.conv3(x), training=is_training))
x = self.dense4(self.flatten4(x))
return x
# shorten sigmoid cross entropy loss calculation
def celoss_ones(logits, smooth=0.0):
return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,
labels=tf.ones_like(logits)*(1.0 - smooth)))
def celoss_zeros(logits, smooth=0.0):
return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,
labels=tf.zeros_like(logits)*(1.0 - smooth)))
def d_loss_fn(d_real_logits, d_fake_logits):
d_loss_real = celoss_ones(d_real_logits, smooth=0.1)
d_loss_fake = celoss_zeros(d_fake_logits, smooth=0.0)
loss = d_loss_real + d_loss_fake
return loss
def g_loss_fn(d_fake_logits):
return celoss_ones(d_fake_logits, smooth=0.1)
def grad_fn(generator, discriminator, input_images, z_vectors, is_training):
with tfe.GradientTape(persistent=True) as g:
# run generator first
fake_image = generator(z_vectors, is_training)
# run discriminator
real_image = input_images
d_real_logits = discriminator(real_image, is_training)
d_fake_logits = discriminator(fake_image, is_training)
# compute losses
d_loss = d_loss_fn(d_real_logits, d_fake_logits)
g_loss = g_loss_fn(d_fake_logits)
# compute gradients
d_grad = g.gradient(d_loss, discriminator.variables)
g_grad = g.gradient(g_loss, generator.variables)
return d_loss, g_loss, d_grad, g_grad
def run_generator(generator, z_dim, val_block_size):
val_size = val_block_size * val_block_size
# validation results at every epoch
val_z = np.random.uniform(-1, 1, size=(val_size, z_dim))
fake_image = generator(val_z, is_training=False)
return fake_image
def train(generator, discriminator):
# configure directories
assets_dir = './assets'
models_dir = './models'
if not os.path.isdir(assets_dir):
os.makedirs(assets_dir)
if not os.path.isdir(models_dir):
os.makedirs(models_dir)
# hyper parameters
params = {
'z_dim': 100,
'epochs': 30,
'batch_size': 128,
'learning_rate': 0.0002,
'beta1': 0.5,
'val_block_size': 10,
'assets_dir': assets_dir,
'models_dir': models_dir,
}
# load training and eval data
mnist = tf.contrib.learn.datasets.load_dataset('mnist')
train_images = mnist.train.images # Returns np.array
train_z_vectors = np.random.uniform(-1.0, 1.0, size=(train_images.shape[0], params['z_dim']))
# prepare saver
checkpoint_directory = params['models_dir']
checkpoint_prefix = os.path.join(checkpoint_directory, 'v3-ckpt')
# prepare train data
train_dataset = input_fn(train_images, train_z_vectors, params['batch_size'])
# prepare optimizer
d_optimizer = tf.train.AdamOptimizer(learning_rate=params['learning_rate'], beta1=params['beta1'])
g_optimizer = tf.train.AdamOptimizer(learning_rate=params['learning_rate'], beta1=params['beta1'])
# for loss savings
d_losses = []
g_losses = []
# initiate saver (only need generator) - run dummy process first
_ = generator(tf.zeros([1, params['z_dim']], dtype=tf.float32), is_training=False)
generator_saver = tfe.Saver(var_list=generator.variables)
# is_training flag
is_training = True
for e in range(params['epochs']):
epoch_d_loss_avg = tfe.metrics.Mean()
epoch_g_loss_avg = tfe.metrics.Mean()
for mnist_images, z_vector in train_dataset:
# Optimize the model
d_loss, g_loss, d_grad, g_grad = grad_fn(generator, discriminator, mnist_images, z_vector, is_training)
# apply gradient via pre-defined optimizer
d_optimizer.apply_gradients(zip(d_grad, discriminator.variables))
g_optimizer.apply_gradients(zip(g_grad, generator.variables),
global_step=tf.train.get_or_create_global_step())
# save loss
epoch_d_loss_avg(d_loss)
epoch_g_loss_avg(g_loss)
d_losses.append(epoch_d_loss_avg.result())
g_losses.append(epoch_g_loss_avg.result())
print('Epoch {:03d}: d_loss: {:.3f}, g_loss: {:.3f}'.format(e + 1,
epoch_d_loss_avg.result(),
epoch_g_loss_avg.result()))
# save every epoch's generator images
fake_image = run_generator(generator, params['z_dim'], params['val_block_size'])
image_fn = os.path.join(params['assets_dir'], 'gan-val-e{:03d}.png'.format(e + 1))
save_result(fake_image.numpy(), params['val_block_size'], image_fn, color_mode='L')
# save model
generator_saver.save(checkpoint_prefix, global_step=tf.train.get_or_create_global_step())
# visualize losses
fig, axes = plt.subplots(2, sharex=True, figsize=(12, 8))
fig.suptitle('Training Losses')
axes[0].set_ylabel('d_oss', fontsize=14)
axes[0].plot(d_losses)
axes[1].set_ylabel('g_loss', fontsize=14)
axes[1].plot(g_losses)
plt.show()
return
def predict(generator):
z_dim = 100
# run dummy process
_ = generator(tf.zeros([1, z_dim], dtype=tf.float32), is_training=False)
# create test data
val_block_size = 10
val_size = val_block_size * val_block_size
test_z = np.random.uniform(-1, 1, size=(val_size, z_dim))
# initiate saver
models_dir = './models'
saver = tfe.Saver(var_list=generator.variables)
saver.restore(tf.train.latest_checkpoint(models_dir))
gen_out = generator(test_z, is_training=False)
save_result(gen_out.numpy(), val_block_size, 'gen_out.png', color_mode='L')
return
def main():
# Enable eager execution
tfe.enable_eager_execution()
# create generator & discriminator
generator = Generator()
discriminator = Discriminator()
# 1. train
train(generator, discriminator)
# 2. predict
predict(generator)
return
if __name__ == '__main__':
main()
|
from multiprocessing import Process
import time
import redis
def redis_process_no_tran(key):
server_ip = "192.168.64.4"
bunny_node1_port = 6379
pool1 = redis.ConnectionPool(host=server_ip,
port=bunny_node1_port,
db=0,
decode_responses=True,
encoding='utf-8',
socket_connect_timeout=2)
r1 = redis.StrictRedis(connection_pool=pool1)
r1.config_set(name="bunnydeny", value="no")
r1.client_setname(name="_debug_")
r1.set(name=key, value="val")
print("redis client process(no tran): prepare to append command")
r1.append(key=key, value="123")
print("redis client process(no tran):: append command return")
time.sleep(1)
def redis_process_with_tran(key):
server_ip = "192.168.64.4"
bunny_node1_port = 6379
pool1 = redis.ConnectionPool(host=server_ip,
port=bunny_node1_port,
db=0,
decode_responses=True,
encoding='utf-8',
socket_connect_timeout=2)
r1 = redis.StrictRedis(connection_pool=pool1)
r1.config_set(name="bunnydeny", value="no")
r1.client_setname(name="_debug_")
r1.set(name=key, value="val")
print("redis client process(tran): prepare to append command")
pipe = r1.pipeline(transaction=True)
pipe.append(key=key, value="123")
pipe.get(name=key+"_other")
pipe.execute()
print("redis client process(tran):: tran(with append command) command return")
time.sleep(1)
def _main():
key = "abc"
#p = Process(target=redis_process_no_tran, args=(key,))
p = Process(target=redis_process_with_tran, args=(key,))
p.start()
pid = p.pid
print(f"main process, redis process pid = {pid}")
time.sleep(0.3)
p.kill()
p.join()
if __name__ == '__main__':
_main() |
import re
"""
- os.path is either posixpath or ntpath
- os.name is either 'posix' or 'nt'
- os.curdir is a string representing the current directory (always '.')
- os.pardir is a string representing the parent directory (always '..')
- os.sep is the (or a most common) pathname separator ('/' or '\\')
- os.extsep is the extension separator (always '.')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
"""
x=5/5
y=5//5
print(x,y) |
from tkinter import *
# "SELECT * FROM users WHERE personal_code = {}".format(personalcode)
def personal_info_start():
"""
Create a GUI for Personal informatie
:return: Returns a GUI Screen
"""
# Create screen for personal logon
personal_info_window = Tk()
# Screen Title
personal_info_window.title("Persoonlijke informatie")
# Name label on top of screen
name_label = Label(master=personal_info_window, text='Persoonlijke ID: ').grid(row=0)
# Entry field for personal Code
personal_id = Entry(master=personal_info_window).grid(row=1)
# Submit button to submit personal code
# ToDo : Add command to button
submit_button = Button(master=personal_info_window, text='submit').grid(row=2)
# Name Label for recieved code
name_Label2 = Label(master=personal_info_window, text='Ontvangen code: ').grid(row=3)
# Entry field for recieved code
personal_code = Entry(master=personal_info_window).grid(row=4)
# Submit button to submit recieved code
# ToDo : Add command to button
submit_button = Button(master=personal_info_window, text='login').grid(row=5)
# Initialise screen
personal_info_window.mainloop()
# Start function
personal_info_start()
|
#!/usr/bin/env /home/shbae/anaconda3/envs/work/bin/python
import glob
import sys
import os
import subprocess
# modify below three lines
# according to your directory structures
vina_path = "/home/shbae/bin/vina-1.1.2/vina"
receptors = glob.glob("./rec/xyz.pdbqt")
ligands = glob.glob("./lig/com-1234.pdbqt")
def dock(repeat=1, use_Rg_boxsize=True):
for lig_pdbqt in ligands :
lig = os.path.basename(lig_pdbqt).split(".")[0] # COM-XXX.pdbqt
ligdir = os.path.dirname(lig_pdbqt)
output_dir = os.path.join(ligdir,lig)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
# read ligand pdbqt and get optimal box size
boxsize = None
with open(lig_pdbqt,"r") as f:
for line in f:
if line.startswith("REMARK optimal box size"):
boxsize = line.strip().split()[4] # string
for rec_pdbqt in receptors:
rec= os.path.basename(rec_pdbqt)[:4]
recdir= os.path.dirname(rec_pdbqt)
config= os.path.join(recdir,rec+".txt")
with open(config,"r") as f:
box_setting = {}
for line in f:
c = line.strip().split()
box_setting[c[0]] = c[2] # string
docked = "%s/%s-%s-docked" % (output_dir, rec, lig)
with open(docked+".txt","w") as f:
for k in ["size_x","size_y","size_z"]:
if use_Rg_boxsize and boxsize:
f.write(k + " = " + boxsize +"\n")
else:
f.write(k + " = " + box_setting[k] + "\n")
for k in ["center_x","center_y","center_z"]:
f.write(k + " = " + box_setting[k] + "\n")
for n in range(0,repeat):
cmd = [vina_path,
"--cpu","4",
"--receptor", rec_pdbqt,
"--ligand", lig_pdbqt,
"--config", docked+".txt",
"--out", docked+"-"+str(n+1)+".pdbqt",
"--log", docked+"-"+str(n+1)+".log",
]
x = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = [x.decode("utf-8") for x in x.stdout.readlines()]
err = [x.decode("utf-8") for x in x.stderr.readlines()]
if len(out) > 1 or len(err) > 0:
print("".join(out))
print("".join(err))
if __name__ == '__main__':
dock(repeat=10, use_Rg_boxsize=True)
|
import unittest
import os
import sys
import shutil
from common import HTMLTestReportCN
from common.log_utils import logger
from common.email_utils import EmailUtils
current_path = os.path.dirname(__file__)
case_path = os.path.join(current_path,'testcases')
# print(case_path)
html_report_path = os.path.join(current_path,'html_reports/')
logger.info('* 接口测试开始执行 *')
discover_cases = None
try:
discover_cases = unittest.defaultTestLoader.discover(start_dir=case_path,
pattern='tests*.py')
except ImportError as e:
logger.error('测试用例路径配置出错,不能加载测试用例')
except Exception as e:
logger.error('系统错误,错误原因:%s'%e.__str__())
api_case_suite = unittest.TestSuite()
if discover_cases:
api_case_suite.addTest(discover_cases)
logger.info('加载测试用例到测试套件成功')
else:
logger.error('加载测试用例到测试套件失败')
# unittest.main(verbosity=2,defaultTest='api_case_suite')
# 创建测试报告路径对象
html_report_path_obj = HTMLTestReportCN.ReportDirectory(html_report_path)
html_report_path_obj.create_dir('WX_API_TEST_') # 创建测试报告路径
# 获取测试报告网页文件的路径
html_report_file_path = HTMLTestReportCN.GlobalMsg.get_value('report_path')
# print(html_report_file_path)
html_report_file_oj = open(html_report_file_path,'wb')
logger.info('创建测试报告路径:%s'%html_report_file_path)
runner = HTMLTestReportCN.HTMLTestRunner(stream=html_report_file_oj,
tester='P5P6工程师们',
title='微信公众平台接口测试项目',
description='实战使用')
runner.run(api_case_suite)
email_body = '''
<h1 align="center"> 接口自动化测试报告 </h1>
<p align="center"> 详情见附件 </p>
'''
# pycharm发送邮件
# EmailUtils(email_body,html_report_file_path).send_email()
# jenkins发送邮件
shutil.copyfile(html_report_file_path,'%s/WX_API_TEST.html'%sys.argv[1])
# copyfile 强制复制不提醒 |
import json
import pathlib
from .models import *
from django.core.exceptions import ObjectDoesNotExist
class DbLoadError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class PopulationConfig:
def __init__(self, cfg):
self.__dict__["cfg"] = cfg
def __getattr__(self, name):
if name not in self.__dict__["cfg"]:
raise AttributeError(name)
return self.__dict__["cfg"][name]
def __setattr__(self, name, value):
if name not in self.cfg:
raise AttributeError(name)
self.__dict__["cfg"][name] = value
class LaunchConfig:
def __init__(self, filename):
with open(filename, 'r') as f:
cfg = json.load(f)
try:
self.workdir = cfg["workdir"]
self.ntournaments = cfg["ntournaments"]
self.tournament_size = cfg["tournament_size"]
self.start_elo = cfg["start_elo"]
self.populations = [PopulationConfig(p) for p in cfg["populations"]]
self.populations_dict = dict()
for p in self.populations:
self.populations_dict[p.name] = p
except KeyError as err:
raise DbLoadError("KeyError: {}".format(err))
class GenerationFiles:
def __init__(self, cfg, generation):
self.generation = generation
self.workdir = pathlib.Path(cfg.workdir)
self.generation_dir = self.workdir / "generation{0:04d}".format(generation)
def ready_file(self):
return self.generation_dir / 'ready'
def populations_old_json(self):
return self.generation_dir / 'populations_old.json'
def populations_new_json(self):
return self.generation_dir / 'populations_new.json'
def strategies_dir(self):
return self.generation_dir / 'strategies'
def strategy_rsf(self, strategy):
return self.strategies_dir() / '{}.rsf'.format(strategy)
def tournaments_dir(self):
return self.generation_dir / 'tournaments'
def tournament_name(self, ts, tn):
return 'TN{0}T{1}'.format(ts, tn)
def tournament_json(self, ts, tn):
return self.tournaments_dir() / '{}.json'.format(self.tournament_name(ts, tn))
def tournament_rgf(self, ts, tn):
return self.tournaments_dir() / '{}.rgf'.format(self.tournaments_name(ts, tn))
def is_generation_dumped(cfg, generation):
gf = GenerationFiles(cfg, generation)
if not gf.generation_dir.exists() \
or not gf.generation_dir.is_dir() \
or not gf.ready_file().exists():
return False
with open(str(gf.ready_file()), 'r') as f:
if f.read() != '1':
return False
return True
def get_or_create_population(name):
print("population", name)
try:
p = Population.objects.get(name=name)
except ObjectDoesNotExist:
p = Population()
p.name = name
p.save()
return p
def get_or_create_player(population, plcfg, gex):
print("player", plcfg["name"])
try:
p = Player.objects.get(name=plcfg["name"])
except ObjectDoesNotExist:
p = Player()
p.population = population
p.name = plcfg["name"]
p.g_start = gex
p.g_end = gex
p.elo = plcfg["ratings_history"][-1]
p.save()
return p
def get_or_create_tournament(generation, name):
print("tournament", "generation{:04d}/{}".format(generation, name))
try:
t = Tournament.objects.get(name=name, generation=generation)
except ObjectDoesNotExist:
t = Tournament()
t.name = name
t.generation = generation
t.save()
return t
def load_generation(cfg, generation):
gf = GenerationFiles(cfg, generation)
# load players
with open(str(gf.populations_old_json()), 'r') as f:
pn = json.load(f)
for pcfg in pn["populations"]:
p = get_or_create_population(pcfg["name"])
for plcfg in pcfg["individuals"]:
pl = get_or_create_player(p, plcfg, generation)
pl.g_end = generation
pl.save()
total_players = 0
for p in cfg.populations:
total_players += p.size
# load tournaments
if generation == 0:
return
for nt in range(1, cfg.ntournaments + 1):
ctn = 1
for i in range(0, total_players, cfg.tournament_size):
t = get_or_create_tournament(generation, gf.tournament_name(nt, ctn))
with open(str(gf.tournament_json(nt, ctn)), 'r') as f:
tj = json.load(f)
for player in tj["tournament"]:
p = Player.objects.get(name=player["player"])
tr = TournamentResult()
tr.player = p
tr.tournament = t
tr.place = player["place"]
tr.elo_change = player["end_elo"] - player["start_elo"]
tr.save()
ctn += 1
def load():
if Launch.objects.count() != 1:
raise DbLoadError("Wrong amount of Launch objects in the db")
launch = list(Launch.objects.all())[0]
cfg = LaunchConfig(launch.launch_cfg)
while (is_generation_dumped(cfg, launch.last_generation + 1)):
print("generation", launch.last_generation + 1)
load_generation(cfg, launch.last_generation + 1)
launch.last_generation += 1
launch.save()
|
def install(vm):
vm.install('unzip')
vm.install('build-essential')
vm.script('sudo su - hduser -c "git clone https://github.com/hortonworks/hive-testbench.git"')
vm.script('sudo su - hduser -c "cd hive-testbench && ./tpch-build.sh"')
def uninstall(vm):
pass
def installed(vm):
pass
|
from __future__ import absolute_import
from django.core import mail
from sentry.models import (OrganizationAccessRequest, OrganizationMember, OrganizationMemberTeam)
from sentry.testutils import TestCase
class SendRequestEmailTest(TestCase):
def test_sends_email_to_everyone(self):
owner = self.create_user('owner@example.com')
team_admin = self.create_user('team-admin@example.com')
non_team_admin = self.create_user('non-team-admin@example.com')
random_member = self.create_user('member@example.com')
requesting_user = self.create_user('requesting@example.com')
org = self.create_organization(owner=owner)
team = self.create_team(organization=org)
OrganizationMemberTeam.objects.create(
organizationmember=OrganizationMember.objects.get(
organization=org,
user=owner,
),
team=team,
)
self.create_member(
organization=org,
user=team_admin,
role='admin',
teams=[team],
)
self.create_member(
organization=org,
user=non_team_admin,
role='admin',
teams=[],
)
self.create_member(
organization=org,
user=random_member,
role='member',
teams=[team],
)
requesting_member = self.create_member(
organization=org,
user=requesting_user,
role='member',
teams=[],
)
request = OrganizationAccessRequest.objects.create(
member=requesting_member,
team=team,
)
with self.tasks():
request.send_request_email()
assert len(mail.outbox) == 2, [m.subject for m in mail.outbox]
assert sorted([m.to[0] for m in mail.outbox]) == \
sorted([owner.email, team_admin.email])
|
#!/usr/bin/python
import os
import sys
import math
class AES(object):
keySize = dict(SIZE_128=16)
# Rijndael S-box
sbox = [0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67,
0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59,
0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7,
0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1,
0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05,
0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83,
0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29,
0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa,
0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c,
0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc,
0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec,
0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19,
0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee,
0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49,
0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4,
0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6,
0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70,
0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9,
0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e,
0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1,
0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0,
0x54, 0xbb, 0x16]
def getSBoxValue(self,num):
return self.sbox[num]
def rotate(self, word):
return word[1:] + word[:1]
# Rijndael Rcon
Rcon = [0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36,
0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97,
0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72,
0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66,
0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04,
0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d,
0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3,
0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61,
0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a,
0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40,
0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc,
0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5,
0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a,
0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d,
0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c,
0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35,
0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4,
0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc,
0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08,
0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a,
0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d,
0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2,
0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74,
0xe8, 0xcb ]
def getRconValue(self, num):
return self.Rcon[num]
def core(self, word, iteration):
word = self.rotate(word) #rotaciona a palavra(t) de 32 bits em 8 bits a esquerda
for i in range(4):
word[i] = self.getSBoxValue(word[i]) # aplica a substituicao de S-Box em todas as 4 partes da palavra de 32 bits
word[0] = word[0] ^ self.getRconValue(iteration) #aplica XOR na saida da operacao rcon com a primeira letra da esquerda
return word
def expandKey(self, key, size, expandedKeySize): #expansao de Rijndael
currentSize = 0
rconIteration = 1
expandedKey = [0] * expandedKeySize #expandedKey vai ser um vetor de 0 do tamanho da chaveexpandida definida na parte da encriptacao
for j in range(size):
expandedKey[j] = key[j] #coloca o valor da chave dentro da chave expandida
currentSize += size
while currentSize < expandedKeySize:
t = expandedKey[currentSize-4:currentSize] #coloca os ultimos 4 bytes na variavel temporaria t
if currentSize % size == 0:
t = self.core(t, rconIteration) #a cada 16 bytes eh aplicado a programacao principal para t
rconIteration += 1 #incrementa a rconIteration
for m in range(4):
expandedKey[currentSize] = expandedKey[currentSize - size] ^ t[m] #aplica XOR no bloco de 4 bytes com o bloco de 16 bytes e isso vira os proximos 4 bytes na chave expandida
currentSize += 1
return expandedKey
def addRoundKey(self, state, roundKey):
for i in range(16):
state[i] ^= roundKey[i] #aplica XOR no bloco com a chave de round
return state #novo bloco depois do XOR
def createRoundKey(self, expandedKey, roundKeyPointer): #cria uma chave a cada round a partir da chave expandida
roundKey = [0] * 16 #array de 16 posicoes preenchidos com 0
for i in range(4):
for j in range(4):
roundKey[j*4+i] = expandedKey[roundKeyPointer + i*4 + j]
return roundKey
def galois_multiplication(self, a, b): #multiplicacao galois de 8 bits entre a e b
p = 0
for counter in range(8):
if b & 1: p ^= a
hi_bit_set = a & 0x80
a <<= 1
# keep a 8 bit
a &= 0xFF
if hi_bit_set:
a ^= 0x1b
b >>= 1
return p
def subBytes(self, state, isInv): #substitui todos os valores do bloco pelo valor no SBox, usando o valor do bloco como indice para o SBox
if isInv: getter = self.getSBoxInvert #pega o valor na SBox invertida
else: getter = self.getSBoxValue #pega o valor na SBox
for i in range(16): state[i] = getter(state[i])
return state
def shiftRows(self, state, isInv):
for i in range(4):
state = self.shiftRow(state, i*4, i, isInv) #chama shiftRow para cada linha iterada
return state
def shiftRow(self, state, statePointer, nbr, isInv):
for i in range(nbr): #cada iteracao desloca 1 bit a esquerda
if isInv:
state[statePointer:statePointer+4] = state[statePointer+3:statePointer+4] + state[statePointer:statePointer+3]
else:
state[statePointer:statePointer+4] = state[statePointer+1:statePointer+4] + state[statePointer:statePointer+1]
return state
def mixColumns(self, state, isInv): #multiplicacao galois da matriz 4x4
for i in range(4):
column = state[i:i+16:4] #constroi uma coluna cortando as 4 linhas
column = self.mixColumn(column, isInv) #aplica mixColumn na coluna
state[i:i+16:4] = column # put the values back into the state
return state
def mixColumn(self, column, isInv): #multiplicacao galois em uma coluna da matriz 4x4
if isInv: mult = [14, 9, 13, 11]
else: mult = [2, 1, 1, 3]
cpy = list(column)
g = self.galois_multiplication
column[0] = g(cpy[0], mult[0]) ^ g(cpy[3], mult[1]) ^ g(cpy[2], mult[2]) ^ g(cpy[1], mult[3])
column[1] = g(cpy[1], mult[0]) ^ g(cpy[0], mult[1]) ^ g(cpy[3], mult[2]) ^ g(cpy[2], mult[3])
column[2] = g(cpy[2], mult[0]) ^ g(cpy[1], mult[1]) ^ g(cpy[0], mult[2]) ^ g(cpy[3], mult[3])
column[3] = g(cpy[3], mult[0]) ^ g(cpy[2], mult[1]) ^ g(cpy[1], mult[2]) ^ g(cpy[0], mult[3])
return column
def aes_round(self, state, roundKey): #aplica 4 operacoes em cada rodada
state = self.subBytes(state, False) #substituicao de bytes
state = self.shiftRows(state, False) #shiftRows
state = self.mixColumns(state, False) #mistura as colunas
state = self.addRoundKey(state, roundKey) #adiciona a chave de round
return state
# applies the 4 operations of the inverse round in sequence
def aes_invRound(self, state, roundKey):
state = self.shiftRows(state, True)
state = self.subBytes(state, True)
state = self.addRoundKey(state, roundKey)
state = self.mixColumns(state, True)
return state
def aes_main(self, state, expandedKey, nbrRounds):
state = self.addRoundKey(state, self.createRoundKey(expandedKey, 0))
i = 1
while i < nbrRounds:
state = self.aes_round(state, self.createRoundKey(expandedKey, 16*i))
i += 1
state = self.subBytes(state, False)
state = self.shiftRows(state, False)
state = self.addRoundKey(state, self.createRoundKey(expandedKey, 16*nbrRounds))
return state
# Perform the initial operations, the standard round, and the final
# operations of the inverse aes, creating a round key for each round
def aes_invMain(self, state, expandedKey, nbrRounds):
state = self.addRoundKey(state,
self.createRoundKey(expandedKey, 16*nbrRounds))
i = nbrRounds - 1
while i > 0:
state = self.aes_invRound(state,
self.createRoundKey(expandedKey, 16*i))
i -= 1
state = self.shiftRows(state, True)
state = self.subBytes(state, True)
state = self.addRoundKey(state, self.createRoundKey(expandedKey, 0))
return state
def encrypt(self, iput, key, size):
output = [0] * 16
nbrRounds = 0 #numero de rounds
block = [0] * 16 #o bloco de 128 bits que serao encriptados
if size == self.keySize["SIZE_128"]: nbrRounds = 10 #se o tamanho da chave for 128 bits, serao 10 rounds
else: return None
expandedKeySize = 16*(nbrRounds+1) #tamanho da chave expandida
for i in range(4): #iteracao das colunas do bloco
for j in range(4): #iteracao das linhas do bloco
block[(i+(j*4))] = iput[(i*4)+j]
expandedKey = self.expandKey(key, size, expandedKeySize) #expande a chave em 176 bytes
block = self.aes_main(block, expandedKey, nbrRounds) #encripta o bloco usando a chave expandida
for k in range(4):
for l in range(4):
output[(k*4)+l] = block[(k+(l*4))] #desmapeia o bloco novamente na saida
return output
def decrypt(self, iput, key, size):
output = [0] * 16
nbrRounds = 0 #numero de rounds
block = [0] * 16 #o bloco de 128 bits que serao encriptados
if size == self.keySize["SIZE_128"]: nbrRounds = 10 #se o tamanho da chave for 128 bits, serao 10 rounds
else: return None
expandedKeySize = 16*(nbrRounds+1) #tamanho da chave expandida
for i in range(4): #iteracao das colunas do bloco
for j in range(4): #iteracao das linhas do bloco
block[(i+(j*4))] = iput[(i*4)+j]
expandedKey = self.expandKey(key, size, expandedKeySize) #expande a chave em 176 bytes
block = self.aes_invMain(block, expandedKey, nbrRounds) #decripta o bloco usando a chave expandida
for k in range(4):
for l in range(4):
output[(k*4)+l] = block[(k+(l*4))] #desmapeia o bloco novamente na saida
return output
class AESModeOfOperation(object):
aes = AES()
modeOfOperation = dict(CFB=1)
# converte a 16 character string into a number array de acordo com a tabela ascii
def convertString(self, string, start, end, mode):
if end - start > 16:
end = start + 16
ar = []
i = start
j = 0
while len(ar) < end - start: #enquanto o tamanho da string ar for menor que o tamanho do bloco
ar.append(0) #o programa acrescenta 0
while i < end:
ar[j] = ord(string[i]) #o programa substitui o 0 pelo inteiro que representa o char
j += 1
i += 1
return ar #retorna o bloco em forma de string de numeros inteiros representando cada char
def encrypt(self, stringIn, mode, key, size, IV):
plaintext = []
iput = [0] * 16 #matriz de zeros com 16 colunas np.zeros((0,16))
output = []
ciphertext = [0] * 16
cipherOut = []
firstRound = True
if stringIn != None:
for j in range(int(math.ceil(float(len(stringIn))/16))): #divide o tamanho do plaintext por 16 e pega o menor numero positivo depois do resultado da divisao
start = j*16 # o inicio do bloco de 16 bytes
end = j*16+16 # o fim do bloco de 16 bytes
if end > len(stringIn): #se o fim do bloco for maior que o proprio tamanho da string
end = len(stringIn) #o fim do bloco vai ser o final da string
plaintext = self.convertString(stringIn, start, end, mode) #plaintext eh o bloco em forma de inteiros
if mode == self.modeOfOperation["CFB"]:
if firstRound:
output = self.aes.encrypt(IV, key, size) #primeiro round encripta usando o iv
firstRound = False
else:
output = self.aes.encrypt(iput, key, size) #se nao for o primeiro round encripta usando o bloco
for i in range(16):
if len(plaintext)-1 < i:
ciphertext[i] = 0 ^ output[i]
elif len(output)-1 < i:
ciphertext[i] = plaintext[i] ^ 0
elif len(plaintext)-1 < i and len(output) < i:
ciphertext[i] = 0 ^ 0
else:
ciphertext[i] = plaintext[i] ^ output[i]
for k in range(end-start):
cipherOut.append(ciphertext[k])
iput = ciphertext
return mode, len(stringIn), cipherOut
def decrypt(self, cipherIn, originalsize, mode, key, size, IV):
ciphertext = []
iput = []
output = []
plaintext = [0] * 16 #matriz de zeros com 16 colunas np.zeros((0,16))
chrOut = []
firstRound = True
if cipherIn != None:
for j in range(int(math.ceil(float(len(cipherIn))/16))): #divide o tamanho do plaintext por 16 e pega o menor numero positivo depois do resultado da divisao
start = j*16 #o inicio do bloco de 16 bytes
end = j*16+16 #o fim do bloco de 16 bytes
if j*16+16 > len(cipherIn): #se o fim do bloco for maior que o proprio tamanho da string
end = len(cipherIn) #o fim do bloco vai ser o final da string
ciphertext = cipherIn[start:end] #ciphertext vai ser uma string de inteiros representando o bloco de 16 bytes
if mode == self.modeOfOperation["CFB"]:
if firstRound:
output = self.aes.encrypt(IV, key, size) #primeiro round encripta usando o iv
firstRound = False
else:
output = self.aes.encrypt(iput, key, size) #se nao for o primeiro round encripta usando o bloco
for i in range(16):
if len(output)-1 < i:
plaintext[i] = 0 ^ ciphertext[i]
elif len(ciphertext)-1 < i:
plaintext[i] = output[i] ^ 0
elif len(output)-1 < i and len(ciphertext) < i:
plaintext[i] = 0 ^ 0
else:
plaintext[i] = output[i] ^ ciphertext[i]
for k in range(end-start):
chrOut.append(chr(plaintext[k]))
iput = ciphertext
return "".join(chrOut)
def encryptData(key, text, mode=AESModeOfOperation.modeOfOperation["CFB"]):
key = map(ord, key) #transforma a chave em um array de inteiros
keysize = len(key) #pega tamanho da chave
iv = [ord(i) for i in os.urandom(16)] #cria um iv aleatorio
moo = AESModeOfOperation()
(mode, length, ciph) = moo.encrypt(text, mode, key, keysize, iv) #retorna o texto cifrado o modo e o tamanho do texto cifrado
return ''.join(map(chr, iv)) + ''.join(map(chr, ciph)) #transforma o texto cifrado que esta em forma de inteiro em char e concatena para uma mesma string
def decryptData(key, data, mode=AESModeOfOperation.modeOfOperation["CFB"]):
key = map(ord, key) #transforma a chave em um array de inteiros
keysize = len(key) #pega tamanho da chave
iv = map(ord, data[:16]) # iv eh os primeiros 16 bytes em forma de numeros inteiros
data = map(ord, data[16:]) # text eh o restante de bytes em forma de numeros inteiros
moo = AESModeOfOperation()
decr = moo.decrypt(data, None, mode, key, keysize, iv) #retorna o texto descriptografado
return decr
def generateRandomKey(keysize = 16):
senha = os.urandom(keysize)
key = open('key.txt', 'w')
key.write(senha) #Escreve a senha no arquivo "key.txt"
key.close
return senha
if __name__ == "__main__":
moo = AESModeOfOperation()
print('----------Advanced Encryption Standard--------------')
escolha = int(input('deseja criptograr[1] ou descriptografar[2]: '))
if escolha == 1:
path = os.path.abspath(os.path.dirname(__file__)) #Acha o caminho do diretorio em que o programa esta
dir = os.listdir(path) #Acha o diretorio em que o programa esta
for file in dir:
if file == "cypher_text.txt": #Caso ja exista o arquivo "cypher_text.txt" o programa exclui para nao dar nenhum problema
os.remove(file)
entrada = []
datafile = "entrada1.txt"
arq = open(datafile, "r+")
text = arq.read()
print('TEXTO: {}'.format(text))
key = generateRandomKey(16)
print ''
print 'chave =', key
print ''
mode = AESModeOfOperation.modeOfOperation["CFB"]
cipher = encryptData(key, text, mode) #chama o modo de encriptacao CFB
arq = open("cypher_text.txt", "w")
arq.write(str(cipher)) #escreve o texto cifrado no arquivo cypher_text.txt
arq.close
print("TEXTO CIFRADO: {}".format(cipher))
elif escolha == 2:
existe = os.path.exists('cypher_text.txt')
if existe == True: #Caso ja exista o arquivo "cypher_text.txt" o programa ira ler para descriptografar
arq = open("cypher_text.txt", "r+")
text = arq.read()
print("TEXTO CIFRADO: {}".format(text))
mode = AESModeOfOperation.modeOfOperation["CFB"]
arquivo = open('key.txt', 'r+')
key = arquivo.read()
print ''
print('chave = {}'.format(key))
print ''
decr = decryptData(key, text, mode) #chama o modo de decriptacao CFB
arq = open("plain_text.txt", "w")
arq.write(decr) #Escreve o texto descifrado no arquivo "plain_text.txt"
arq.close
print("TEXTO DESCRIPTOGRAFADO: {}".format(decr))
elif existe == False: #Caso nao exista o arquivo "cypher_text.txt" o programa ira identificar que nao ha dados para serem descriptografados
print('Nao existe nenhum dado criptografado')
else:
print("Escolha invalida") |
import os, sys, json, time, datetime
import numpy as np
from utils import read_file, write_file, sigmoid, sigmoid_derivative
def train(X, y, alpha=1, epochs=10000, classes=[]):
print ("Training with alpha:%s" % (str(alpha)) )
print ("Input matrix: %sx%s Output matrix: %sx%s" % (len(X),len(X[0]),len(X[0]), len(classes)) )
np.random.seed(1)
last_mean_error = 1
synapse_0 = 2 * np.random.random((len(X[0]), len(classes))) - 1
layer_0 = X
for j in iter(range(epochs+1)):
layer_1 = sigmoid(np.dot(layer_0, synapse_0))
layer_1_error = y - layer_1
if (j% 1000) == 0:
error = np.mean(np.abs(layer_1_error))
if error >= last_mean_error or error < 1e-2:
print ('break:', error, ', ', last_mean_error )
break
print ('delta after ', j, ' iters:', error)
last_mean_error = error
layer_1_delta = layer_1_error * sigmoid_derivative(layer_1)
synapse_0_weight_update = layer_0.T.dot(layer_1_delta)
synapse_0 += alpha * synapse_0_weight_update
now = datetime.datetime.now()
synapse = {'synapse0': synapse_0.tolist()}
with open('synapses.json', 'w') as outfile:
json.dump(synapse, outfile, indent=4)
print('Train done.')
def main():
train_path = 'resources/data/train'
training = []
output = []
# load resources
classes = read_file('resources/classes').split(', ')
dictionary = read_file('resources/dictionary')[1:-1].replace("'", "").split(', ')
train_folders = os.listdir(train_path)
for folder in train_folders:
train_files = os.listdir(f'{train_path}/{folder}')
for file in train_files:
print("Processing...", file)
train_case = [0] * len(dictionary)
file_path = f'{train_path}/{folder}/{file}'
text = read_file(file_path)[1:-1].replace("'", "").split(', ')
for item in text:
if item == '':
continue
try:
(word, value) = item.split(': ')
train_case[dictionary.index(word)] = int(value)
except:
print("item: ", item)
print("words: ", word)
print("value: ", value)
sys.exit(0)
training.append(train_case)
output_case = [0] * len(train_folders)
output_case[train_folders.index(folder)] = 1
output.append(output_case)
print("training: ", len(training), ' x ', len(training[0]))
print("ouput: ", len(output), ' x ', len(output[0]))
# write_file('resources/training', str(training))
# write_file('resources/output', str(output))
X = np.array(training)
y = np.array(output)
start_time = time.time()
train(X, y, alpha=0.1, epochs=10000, classes=classes)
elapsed_time = time.time() - start_time
print ("processing time:", elapsed_time, "seconds")
if __name__ == '__main__':
main() |
# ******************************************
# Author : Ali Azhari
# Created On : Fri Jul 19 2019
# File : app.py
# *******************************************/
class Solution(object):
# Brute force that takes O(n**2)
def lengthOfLongestSubstring1(self, s):
"""
:type s: str
:rtype: int
"""
# for index, i in enumerate(s):
# print(index, i)
length = len(s)
answer = 0
count = 0
for i in range(0, length):
count = 0
j = i
str= []
while j < length and s[j] not in str:
str.append(s[j])
count += 1
j += 1
answer = max(answer, count)
return answer
def lengthOfLongestSubstring2(self, s):
"""
:type s: str
:rtype: int
"""
dicts = {}
x = dicts['hello'] + 1
print(x)
solution = Solution()
solution.lengthOfLongestSubstring2('hello')
|
#!/usr/bin/python3
from urllib.request import urlopen
from urllib.error import HTTPError
from urllib.error import URLError
from bs4 import BeautifulSoup
try:
html = urlopen(r'http://pythonscraping.com/pages/page1.html')
# html = urlopen(r'http://lilacaromas.com.br/inicio.html')
except HTTPError as e:
print(f'Erro HTTP: {e.msg}')
# devolve null, executa um break ou algum outro "plano B"
except URLError as e:
print(f'Erro URL: {e.msg}')
else:
try:
bs = BeautifulSoup(html.read(), 'html.parser')
title = bs.body.h134
except AttributeError as e:
print(f'Erro: {e.msg}')
else:
if title == None:
print('Title could not be found')
else:
print(title)
|
def test_prompt(netmiko_conn):
assert netmiko_conn.find_prompt() == "arista1#"
def test_show_ver(netmiko_conn):
assert "4.20.10M" in netmiko_conn.send_command("show version")
|
# -*- coding: utf-8 -*-
"""Helper utilities and decorators."""
import re
from flask import flash, request
def flash_errors(form, category='warning'):
"""Flash all errors for a form."""
for field, errors in form.errors.items():
for error in errors:
flash('{0} - {1}'.format(getattr(form, field).label.text, error), category)
def bokeh_app_url(app_name=""):
# replace port :XXXX with :5006 (bokeh port)
url = re.sub(":\d{4}",":5006", request.url_root)
return url + app_name
|
import linecache #lines are 1 indexed
import sys
import random
import re
import collections
#only call this on lines in the matrix. will err otherwise.
def convert(file_name, node_index):
"""turn line into list"""
line = linecache.getline(file_name, node_index+1)
line = line[2:line.index('\n')]
line = line.split()
line = [int(i) for i in line]
return line
#current issue: if more than one node with same cost, only chooses first one
def cheapest(file_name, node, repeat, visited):
for_index = convert(file_name, node)
possibilities = []
# print for_index
# print node
colors = linecache.getline(file_name, len(for_index)+2)
color = for_index[node-1]
if repeat == 3:
for i in len(for_index):
if (color != colors[i]) and (i+1 not in visited) and (i != node-1):
possibilities.append((for_index[i], i)) #tuple of cost and index
else:
for i in range(len(for_index)):
if (i+1 not in visited) and (i != node-1):
possibilities.append((for_index[i], i))
# print "possibilities: ", possibilities
if possibilities == []:
return False
# print [min(possibilities)[0], min(possibilities)[1]+1]
return [min(possibilities)[0], min(possibilities)[1]+1]
def greedy(file_name, N):
# size = int(x) for x in f.readline().split() #read first line - size
# starting_nodes = range(1, size+1) #list (0, ..., size-1)
"""cheapest(file_name, node, repeat, visited)
returns the cost and index of where to travel next """
# print N
# print type(N)
curr_node = random.randrange(1, N+1) #initialize next as random variable
# print curr_node
return_path = [curr_node]
visited = [curr_node]
color_list = linecache.getline(file_name, N+2)
repeat_colors = 1 #can't be greater than 3
curr_color = color_list[curr_node]
permute = range(1, N+1)
while collections.Counter(return_path) != collections.Counter(permute):
costs = convert(file_name, curr_node) #outgoing edge weights for current node
if cheapest(file_name, curr_node, 1, visited):
(choice_cost, choice) = cheapest(file_name, curr_node, 1, visited)
choice_color = color_list[choice-1]
# visited.append(choice)
#need to backtrack
else:
curr_node = return_path[len(return_path)-3] #backtrack 2
repeat_colors = 1
(choice_cost, choice) = cheapest(file_name, curr_node, 1, visited)
choice_color = curr_color
visited = [i for i in visited[:len(visited-2)]]
return_path = [i for i in return_path[:len(return_path-2)]]
continue
if choice_color == curr_color:
repeat_colors += 1
else:
repeat_colors = 0
return_path.append(choice)
visited.append(choice)
curr_node = choice
curr_color = color_list[choice-1]
a = str(return_path)
print re.sub('[^0-9]', ' ', str(a))
for i in return_path:
print color_list[i-1]
return return_path
# T = 1 # number of test cases
# fout = open ("answer.out", "w")
# for t in xrange(1, T+1):
# fin = open(str(t) + ".in", "r")
# N = int(fin.readline()) #size
# d = [[] for i in range(N)] #turn each row into array of integers
# for i in xrange(N):
# d[i] = [int(x) for x in fin.readline().split()]
# c = fin.readline() #print out row
# # find an answer, and put into assign
# # assign = [0] * N
# assign = greedy(fin, str(t) + ".in", N) #call greedy strategy
# for i in xrange(N):
# assign[i] = i+1
# fout.write("%s\n" % " ".join(map(str, assign)))
# fout.close()
|
"""
Test PySlipQt GototPosition() function.
The idea is to have a set of buttons selecting various geo positions on the OSM
tile map. When selected, the view would be moved with GotoPosition() and a
map-relative marker would be drawn at that position. At the same time, a
view-relative marker would be drawn at the centre of the view. The difference
between the two markers shows errors in the Geo2Tile() & Tile2Geo() functions.
"""
import os
import sys
import traceback
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget,
QHBoxLayout, QGridLayout,
QPushButton)
import pySlipQt.pySlipQt as pySlipQt
import pySlipQt.open_street_map as tiles
from display_text import DisplayText
from layer_control import LayerControl
# set up logging
import pySlipQt.log as log
log = log.Log('pyslipqt.log')
######
# Various demo constants
######
# demo name/version
DemoVersion = '1.0'
DemoName = "pySlip %s - GotoPosition() test %s" % (pySlipQt.__version__, DemoVersion)
DemoWidth = 800
DemoHeight = 665
# initial level and position
InitViewLevel = 3
InitViewPosition = (0, 0)
# the number of decimal places in a lon/lat display
LonLatPrecision = 2
# a selection of cities, position from WikiPedia, etc
# format is ((<lon>,<lat>),<name>)
# lat+lon from Google Maps
Cities = [((0.0, 51.4778), 'Greenwich, United Kingdom'),
((5.33, 60.389444), 'Bergen, Norway'),
((151.209444, -33.865), 'Sydney, Australia'),
((-77.036667, 38.895111), 'Washington DC, USA'),
((132.472638, 34.395359), 'Hiroshima (広島市), Japan'),
((-8.008273, 31.632488), 'Marrakech (مراكش), Morocco'),
((18.955321, 69.649208), 'Tromsø, Norway'),
((-70.917058, -53.163863), 'Punta Arenas, Chile'),
((168.347217, -46.413020), 'Invercargill, New Zealand'),
((-147.8094268, 64.8282982), 'Fairbanks AK, USA'),
((103.8508548, 1.2848402), "Singapore (One Raffles Place)"),
((-3.2056135, 55.9552474), "Maxwell's Birthplace"),
((7.6059011, 50.3644454), "Deutsches Eck, Koblenz, Germany"),
((116.391667, 39.903333), "Beijing (北京市)"),
]
################################################################################
# The main application frame
################################################################################
class AppFrame(QMainWindow):
def __init__(self):
super().__init__()
self.setGeometry(300, 300, DemoWidth, DemoHeight)
self.setWindowTitle(DemoName)
self.show()
self.tile_source = tiles.Tiles()
self.tile_directory = self.tile_source.tiles_dir
# the data objects for map and view layers
self.map_layer = None
self.view_layer = None
# build the GUI
self.make_gui()
self.show()
# bind events to handlers
self.pyslipqt.events.EVT_PYSLIPQT_POSITION.connect(self.handle_position_event)
self.pyslipqt.events.EVT_PYSLIPQT_LEVEL.connect(self.handle_level_change)
# finally, goto desired level and position
self.pyslipqt.GotoLevelAndPosition(InitViewLevel, InitViewPosition)
#####
# Build the GUI
#####
def make_gui(self):
"""Create application GUI."""
# build the GUI
grid = QGridLayout()
qwidget = QWidget(self)
qwidget.setLayout(grid)
self.setCentralWidget(qwidget)
# add controls to right of spacer
rows = self.make_gui_controls(grid)
# grid.addLayout(controls)
# put map view in left of horizontal box
self.pyslipqt = pySlipQt.PySlipQt(self, start_level=InitViewLevel, tile_src=self.tile_source)
grid.addWidget(self.pyslipqt, 0, 0, rows+1, 1)
def make_gui_controls(self, grid):
"""Build the 'controls' part of the GUI
grid reference to the grid layout to fill
Returns reference to containing sizer object.
"""
# row to put controls into
row = 0
# add the map level in use widget
level_mouse = self.make_gui_level_mouse()
grid.addLayout(level_mouse, row, 1)
row += 1
# buttons for each point of interest
self.buttons = {}
for (num, city) in enumerate(Cities):
(lonlat, name) = city
btn = QPushButton(name)
grid.addWidget(btn, row, 1)
btn.clicked.connect(self.handle_button)
self.buttons[btn] = city
row += 1
return row
def make_gui_level_mouse(self):
"""Build the control that shows the level and mouse position.
Returns reference to containing layout.
"""
hbox = QHBoxLayout()
self.map_level = DisplayText(title='', label='Level:', tooltip=None)
self.mouse_position = DisplayText(title='', label='Lon/Lat:',
text_width=100, tooltip=None)
hbox.addWidget(self.map_level)
hbox.addWidget(self.mouse_position)
return hbox
######
# Exception handlers
######
def handle_button(self, event):
"""Handle button event."""
# get the button that was pressed
sender_btn = self.sender()
(posn, name) = self.buttons[sender_btn]
log(f"Got button event, posn={posn}, name='{name}'")
self.pyslipqt.GotoPosition(posn)
if self.map_layer:
# if there was a previous layer, delete it
self.pyslipqt.DeleteLayer(self.map_layer)
map_data = [posn]
point_colour = '#0000ff40'
self.map_layer = self.pyslipqt.AddPointLayer(map_data, map_rel=True,
placement='cc',
color=point_colour,
radius=11,
visible=True,
name='map_layer')
if self.view_layer:
self.pyslipqt.DeleteLayer(self.view_layer)
view_data = [(((0,0),(0,-10),(0,0),(0,10),
(0,0),(-10,0),(0,0),(10,0)),{'colour':'#ff0000ff'},)]
# poly_colour = '#ff0000ff'
self.view_layer = self.pyslipqt.AddPolygonLayer(view_data, map_rel=False,
placement='cc',
# colour=poly_colour,
closed=False,
visible=True,
width=2,
name='view_layer')
def handle_position_event(self, event):
"""Handle a pySlip POSITION event."""
posn_str = ''
if event.mposn:
(lon, lat) = event.mposn
posn_str = ('%.*f / %.*f'
% (LonLatPrecision, lon, LonLatPrecision, lat))
self.mouse_position.set_text(posn_str)
def handle_level_change(self, event):
"""Handle a pySlip LEVEL event."""
self.map_level.set_text('%d' % event.level)
################################################################################
# our own handler for uncaught exceptions
def excepthook(type, value, tb):
msg = '\n' + '=' * 80
msg += '\nUncaught exception:\n'
msg += ''.join(traceback.format_exception(type, value, tb))
msg += '=' * 80 + '\n'
print(msg)
sys.exit(1)
# plug our handler into the python system
sys.excepthook = excepthook
# use user tile directory, if supplied
tile_dir = None
if len(sys.argv) > 1:
tile_dir = sys.argv[1]
app = QApplication(sys.argv)
ex = AppFrame()
sys.exit(app.exec_())
|
import requests
def login():
session = requests.session()
url = 'https://authapi.xincheng.com:8090/mobilelogin/index'
params = {
'userid': 'lvrihui',
'password':
'Taco53TfQ2iDfvIUrKHroIRFNBt2/SP5TYYxCzgJ4wE+dElEIvQzGn4nVO9hQOl2qGDhC5T6D3/jEZvyJ/wQKq6uBnbTECGoB\
mvTpwAWGHmLe1HMvlyQmEK2WlgJBSCrKlZyJXZAhlDcxnOvcZmqdQsToiVdzRCRaNUwFEg4/zK1ljgxHPDFq3eIOOmbU+KrpPQURrYrqtRc\
ddpTGCh50EfuWDREDHP9dC0j2J8VcOHRYnP04ysNuK5WpIfWAhhqXWGc0gJJBhSQCpo7ZFB3/Z7NS90/PXfpCYbj6Qd8Vaf0zju2U5Riuj1A2nwEJDWLrSTvHMHHf4arkOWBSKnxSw==',
'systemCode': 'A09'
}
try:
r = session.post(url=url, data=params)
return requests.utils.dict_from_cookiejar(r.cookies)
except expression as err:
print('获取cookie失败:\n{0}'.format(err))
def get_data():
cookie = login()
print(cookie)
res = requests.get(
url="http://crm.xincheng.com/Kfxt/rcfw/RcRwcl_Edit.aspx",
params={
'mode':
'2',
'oid':
'd93aef5b-08fe-ea11-80c3-90e2babd62d1&funcid=01020302&ReceiveGUID=504788b7-07fe-ea11-80c3-90e2babd62d1'
},
cookies= cookie)
print(res.text)
get_data()
'''
if r.status_code == 200:
r = session.get(
url="http://crm.xincheng.com/Kfxt/rcfw/RcRwcl_Edit.aspx",
params={
'mode':
'2',
'oid':
'd93aef5b-08fe-ea11-80c3-90e2babd62d1&funcid=01020302&ReceiveGUID=504788b7-07fe-ea11-80c3-90e2babd62d1'
},
cookies= session.cookies)
print(r.status_code)
print(r.text)
''' |
from app import db
class Users(db.Model):
userid = db.Column(db.String(30), primary_key=True, nullable=False)
passwd = db.Column(db.String(30), nullable=False)
uid = db.Column(db.Integer, nullable=False, unique=True)
gid = db.Column(db.Integer, nullable=False)
homedir = db.Column(db.String(255))
shell = db.Column(db.String(255))
class Groups(db.Model):
groupname = db.Column(db.String(30), primary_key=True, nullable=False)
gid = db.Column(db.Integer, nullable=False, unique=True)
members = db.Column(db.String(255)) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import struct
class NotMemoFileError(Exception):
pass
class MemoItemType:
""" MemoItem's type """
QA = 0
SINGLE_SELECT = 1
JUDGE = 2
class MemoItem(object):
""" parent of MenuItem. """
FMT = "<BII%ss%ss"
def __init__(self, _subject, _answer, _mitype):
super(MemoItem, self).__init__()
self.subject = _subject
self.answer = _answer
self.mitype = _mitype
def vaild(self, _answer):
if _answer.upper() == self.answer:
return True
@staticmethod
def _generate_fmt(_q, _a):
fmt = MemoItem.FMT
q_len = len(_q)
a_len = len(_a)
fmt = fmt % (q_len, a_len)
return fmt
def get_type(self):
""" return MemoMemoItemType """
return self.mitype
def to_bin_format_data(self):
fmt = self._generate_fmt(self.subject, self.answer)
data = struct.pack(fmt,
self.mitype,
len(self.subject),
len(self.answer),
self.subject,
self.answer)
return data
class SingleSelectMemoItem(MemoItem):
"""docstring for MemoItemSingleSelect"""
CHOOSE_CAST = {0: "A", 1: "B", 2: "C", 3: "D"}
def __init__(self, _subject, _answer):
super(SingleSelectMemoItem, self).__init__(_subject, _answer,
MemoItemType.SINGLE_SELECT)
self.subject = _subject
s = _answer
#print _answer
self.chooses = s.split("##")
#print self.chooses
# import sys
# print self.chooses[-1][-3:]
# sys.exit(-1)
answ = self.chooses[-1][-3:][-2]
print answ
#sys.exit(-1)
self.answer = answ
self.chooses[-1] = self.chooses[-1][:-3]
for i in xrange(0,len(self.chooses)):
self.chooses[i] = self.chooses[i].replace("#", "")
def __str__(self):
s = self.subject
arr = []
i = 0
for ch in self.chooses:
arr.append("\t")
arr.append(SingleSelectMemoItem.CHOOSE_CAST[i])
arr.append(ch)
arr.append("\n")
i += 1
cs = "".join(arr[:-1])
tostr = "".join((s, "\n", cs))
return tostr
def vaild(self, _answer):
# choose_dic = dict([(v, k) for k , v in CHOOSE_CAST.iteritems() ])
if _answer.upper() == self.answer:
print "right"
return True
print self.answer
return False
class QAMemoItem(MemoItem):
"""docstring for QA"""
def __init__(self, _subject, _answer):
super(QAMemoItem, self).__init__(_subject, _answer,
MemoItemType.QA)
def __str__(self):
s = "".join([self.subject, "\n", "请回答"])
return s
class JudgeMemoItem(MemoItem):
"""docstring for Judge"""
def __init__(self, _subject, _answer):
super(JudgeMemoItem, self).__init__(_subject, _answer,
MemoItemType.JUDGE)
def __str__(self):
s = "".join([self.subject, "\n", "请输入T或F"])
return s
memo_class_list = { MemoItemType.JUDGE : JudgeMemoItem,
MemoItemType.QA : QAMemoItem,
MemoItemType.SINGLE_SELECT : SingleSelectMemoItem}
class Memo(object):
""" Memo object """
LOADFILE = 0
CREATE_FILE = 1
MEMO_FLAG = 0x5d
MEMO_HEAD_FMT = "BB"
def __init__(self, _init_type, _fn):
super(Memo, self).__init__()
self.ss_list = []
self.judge_list = []
self.qa_list = []
self.mi_dict = {MemoItemType.JUDGE : self.judge_list,
MemoItemType.QA : self.qa_list,
MemoItemType.SINGLE_SELECT : self.ss_list}
self.filename = _fn
if _init_type == Memo.LOADFILE:
self._load_memo()
print len(self.judge_list)
print len(self.qa_list)
print len(self.ss_list)
def _load_memo(self):
_read_memo_head = lambda _f: \
struct.unpack(Memo.MEMO_HEAD_FMT, _f.read(2))
def _read_and_create_memo_item(_f):
""" read a memo item, if success return (item_type, subject, answ) \
else return (False, None, None) """
typ = f.read(1)
typ = struct.unpack("B", typ)[0]
k_len = f.read(4)
v_len = f.read(4)
k_len = struct.unpack("I", k_len)[0]
v_len = struct.unpack("I", v_len)[0]
k = struct.unpack((str(k_len)+"s"), f.read(k_len))[0]
v = struct.unpack((str(v_len)+"s"), f.read(v_len))[0]
# print "k", k
# print "v", v
# print "type", typ
return memo_class_list[typ](k,v)
f = open(self.filename)
print self.filename
mh = _read_memo_head(f)
if mh[0] != Memo.MEMO_FLAG:
raise NotMemoFileError()
i = mh[1]
for x in xrange(0, i):
item = _read_and_create_memo_item(f)
print item.get_type()
self.mi_dict[item.get_type()].append(item)
f.close()
def save():
pass
def start_study_judge(self):
""" start study judge """
fail_item_id = [x for x in xrange(0, len(self.judge_list))]
#failure = self.ss_list
print "共有 %d 道题,开始吧" % len(fail_item_id)
failure = self.judge_list
right = []
while fail_item_id != []:
fail_item_id = []
# while failure != []:
i = 0
while i < len(failure):
print failure[i]
an = raw_input("请输入选项:")
if failure[i].vaild(an.strip().upper()) == True:
print "right"
print "这是 %d 题" % i
i += 1
else:
fail_item_id.append(i)
print fail_item_id
print "有 %d 道题做错了" % len(fail_item_id)
failure = [failure[x] for x in fail_item_id]
def add_memo_item(self, _type, _s, _a):
""" add memo item to array """
pass
def start_study_single_select(self):
#failure = self.ss_list
fail_item_id = [x for x in xrange(0, len(self.ss_list))]
#failure = self.ss_list
print "共有 %d 道题,开始吧" % len(fail_item_id)
failure = self.ss_list
right = []
while fail_item_id != []:
fail_item_id = []
# while failure != []:
i = 0
while i < len(failure):
print failure[i]
an = raw_input("请输入选项:")
if failure[i].vaild(an.strip().upper()) == True:
print "right"
print "这是 %d 题" % i
i += 1
else:
fail_item_id.append(i)
print fail_item_id
print "有 %d 道题做错了" % len(fail_item_id)
failure = [failure[x] for x in fail_item_id]
def main():
import sys
memo_file = "single_select.memo"
#memo_file = "test.memo"
# if len(sys.argv) == 2:
# memo_file = sys.argv[1]
# print len(sys.argv)
# print memo_file
#sys.exit(-1)
# print memo_file
memo = Memo(Memo.LOADFILE, memo_file)
# memo.start_study_judge()
memo.start_study_single_select()
if __name__ == '__main__':
main() |
from App.Model.Local import Local
from App.Model.Pessoa import Pessoa
from App.Model.Tarefa import Tarefa
from App.Model.UserAuth import UserAuth
def routes(api):
api.register(Local)
api.register(Pessoa)
api.register(Tarefa)
api.register(UserAuth)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 12 01:03:39 2018
@author: ikc15
Train an Image Classifier with TensorFlow Learn to classify handwritten digits
from the mnist dataset.
Since we are using a neural network. We do not need to manually select features.
The neural network takes each raw pixel as a feature (ie. 784 features in this
example)
"""
# In[]:
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
learn = tf.contrib.learn
tf.logging.set_verbosity(tf.logging.ERROR)
# In[]:
'Import the dataset'
mnist = learn.datasets.load_dataset('mnist')
data = mnist.train.images
labels = np.asarray(mnist.train.labels, dtype=np.int32)
test_data = mnist.test.images
test_labels = np.asarray(mnist.test.labels, dtype=np.int32)
# In[]:
'Display digits'
def display(i):
img = test_data[i]
plt.title('Example %d, Label: %d' %(i, test_labels[i]))
# reshape to 28x28 pixels because the image was flattened to a 1darray of length 784
plt.imshow(img.reshape((28,28)), cmap=plt.cm.gray_r)
# In[]:
'''
Fit a linear classifier (neural netowrk with input layer (794 nodes) -> output layer (10 nodes))-
Our goal here is to get about 90% accuracy with this simple classifier.
'''
feature_columns = learn.infer_real_valued_columns_from_input(data)
# Choose classifier
#1st arg: 10 classes- one for each digit 0-9, 2nd arg: informs the classifier about the input data
clf = learn.LinearClassifier(n_classes=10, feature_columns=feature_columns)
# train classifier (using gradient decent) to adjust weights of each input-output connection for all training data
clf.fit(data,labels, batch_size=100, steps=1000)
# In[]:
'Evaulate accuracy of classifier'
accuracy = clf.evaluate(test_data, test_labels)
print (accuracy['accuracy'])
# In[]:
'Classify a few examples'
pred = clf.predict(np.array([test_data[0]]), as_iterable=False)
print("Predicted %d, Label: %d" % (pred, test_labels[0]))
display(0)
#print ('Predicted: %d, label: %d'%(clf.predict(test_data[0]), test_labels[0]))
# In[]:
'Visualize learned weights'
f, ax = plt.subplots(2,5, figsize=(10,4))
ax = ax.reshape(-1)
#for var in clf.get_variable_names():
# print("var:", var, "=", clf.get_variable_value(var))
weights = clf.get_variable_value(clf.get_variable_names()[1])
for i in range(len(ax)):
a = ax[i]
a.imshow(weights.T[i].reshape(28,28), cmap=plt.cm.seismic)
a.set_title(i)
a.set_xticks(()) # ticks be gone
a.set_yticks(())
plt.show()
f.savefig('weights.png', bbox_inches='tight') |
#!/usr/bin/env python
# Usage:
# ./QA.py path/to/transcripts/ path/to/output/directory
import os
import subprocess
import sys
from Bio import SeqIO
threads = 16
mgy_db = '/Nancy/data/input/RNA/ENA_gut/db/mgy.dmnd'
def get_fasta_ids(fasta):
ids = set()
for seq_record in SeqIO.parse(fasta, "fasta"):
str = seq_record.id
ind = str.rfind('_')
ids.add(seq_record.id[:ind])
return ids
def get_annotated_proteins(align_path):
proteins = set()
with open(align_path, 'r') as fin:
for line in fin:
str = line.strip().split()[0]
ind = str.rfind('_')
proteins.add(str[:ind])
return proteins
def run_prodigal(assembly_path, outdir):
initial_dir = os.path.abspath(os.getcwd())
os.chdir(outdir)
name = os.path.basename(assembly_path).split('.')[0]
proteins = '{}.proteins.faa'.format(name)
command = 'prodigal -i {assembly} -o {genes} -a {proteins} -p meta'.\
format(assembly=assembly_path, genes='{}.genes.faa'.format(name), proteins=proteins)
print(command)
subprocess.call(command, shell=True)
os.chdir(initial_dir)
return os.path.join(outdir, proteins)
def run_mmseqs(proteins_path, outdir, min_seq_id=0.9):
name = os.path.basename(proteins_path).split('.')[0]
rep_seq_path = os.path.join(outdir, '{}_rep_seq.fasta'.format(name))
command = 'mmseqs easy-linclust {proteins} {clusterRes} {tmp} ' \
'--min-seq-id {min_seq_id} --cov-mode 1 --cluster-mode 2 --kmer-per-seq 80'. \
format(proteins=proteins_path, clusterRes=os.path.join(outdir, name), min_seq_id=min_seq_id, tmp=os.path.join(outdir, 'tmp'))
print(command)
subprocess.call(command, shell=True)
return rep_seq_path
def run_interproscan(rep_seq_path, outdir):
global threads
name = os.path.basename(rep_seq_path).split('_')[0]
ipr_dir = os.path.join(outdir, '{}_rep_seq.clear'.format(name))
if not os.path.exists(ipr_dir):
os.makedirs(ipr_dir)
# Remove the * at the end of the sequences
clear_rep_seq = os.path.join(ipr_dir, '{}_rep_seq.clear.fasta'.format(name))
command = 'sed \'s/*//\' ' + rep_seq_path + ' > ' + clear_rep_seq
print(command)
subprocess.call(command, shell=True)
# Split fasta into smaller chunks to speed up
command = 'pyfasta split -n 50 {}'.format(clear_rep_seq)
print(command)
subprocess.call(command, shell=True)
cat_cmd = 'cat'
for num in ['0' + str(n) for n in range(0, 10)] + list(range(10, 50)):
# print(num)
filename = '{name}_rep_seq.clear.{num}'.format(name=name, num=num)
proteins_path = '{}.fasta'.format(filename)
# Run IPR for each chunk separately
command = 'interproscan.sh -i {proteins} -b {base} -cpu {threads} -dp -dra -appl Hamap,Pfam'\
.format(proteins=os.path.join(ipr_dir, proteins_path),
base=os.path.join(ipr_dir, filename), threads=threads)
print(command)
subprocess.call(command, shell=True)
cat_cmd += ' {}.tsv'.format(os.path.join(ipr_dir, filename))
ipr_path = os.path.join(ipr_dir, '{}_rep_seq.clear.tsv'.format(name))
# Concatenate all IPR tsv-s
cat_cmd += ' > ' + ipr_path
print(cat_cmd)
subprocess.call(cat_cmd, shell=True)
return ipr_path
def run_diamond(rep_seq_path, mgy_db, outdir):
global threads
name = os.path.basename(rep_seq_path).split('_')[0]
diamond_path = os.path.join(outdir, '{}.matches.m8'.format(name))
command = 'diamond blastp -d {mgy} -q {rep_seq} -o {diamond} --query-cover 50 --id 95 --subject-cover 90 ' \
'--threads {threads}'.\
format(mgy=mgy_db, rep_seq=rep_seq_path, diamond=diamond_path, name=name, threads=threads)
print(command)
subprocess.call(command, shell=True)
return diamond_path
def get_counts(rep_seq_path, diamond_path, ipr_path, outdir):
results_path = os.path.join(outdir, 'results.txt')
annotated_mgy = get_annotated_proteins(diamond_path)
annotated_ipr = get_annotated_proteins(ipr_path)
all_clusters = get_fasta_ids(rep_seq_path)
# print(list(annotated_ipr)[:5], list(annotated_mgy)[:5], list(all_clusters)[:5])
none_proteins = all_clusters - annotated_mgy - annotated_ipr
both_proteins = annotated_mgy.intersection(annotated_ipr)
mgy_only = annotated_mgy - both_proteins
ipr_only = annotated_ipr - both_proteins
sum = len(none_proteins) + len(mgy_only) + len(ipr_only) + len(both_proteins)
with open(results_path, 'w') as fout:
fout.write('MGnify: {}\nIPR: {}\n'.format(len(annotated_mgy), len(annotated_ipr)))
fout.write('None: {}\nMGnify only: {}\nIPR only: {}\nboth: {}\nsummary: {}\n'
.format(len(none_proteins), len(mgy_only), len(ipr_only), len(both_proteins), sum))
fout.write('All clusters: {}\n'.format(len(all_clusters)))
def main():
global mgy_db
assembly_path = os.path.abspath(sys.argv[1])
outdir = sys.argv[2]
if not os.path.exists(outdir):
os.makedirs(outdir)
proteins_path = run_prodigal(assembly_path, outdir)
rep_seq_path = run_mmseqs(proteins_path, outdir)
ipr_path = run_interproscan(rep_seq_path, outdir)
diamond_path = run_diamond(rep_seq_path, mgy_db, outdir)
get_counts(rep_seq_path, diamond_path, ipr_path, outdir)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
import os
if os.name == "posix":
from ._DBusSessionService import DBusService as SessionService
elif os.name == "nt":
from ._NtSessionService import NtService as SessionService
else:
SessionService = None
|
import cPickle
import gzip
import os
import sys
import time
import numpy
import collections
import random
random.seed(1)
labels=numpy.load(sys.argv[2])
counter=collections.defaultdict(list)
cnt=0
for l in labels.tolist():
counter[l].append(cnt)
cnt+=1
print counter.keys()
vals = map(len,counter.values())
print vals
mval = min(vals)
print mval
cnt_dict={}
for k,v in counter.items():
#d=zip(xrange(len(v)),v)
d=v
random.shuffle(d)
#print d
selected_d=d[0:mval]
#selected_d.sort(key=lambda tup: tup[0])
selected_d.sort()
cnt_dict[k]=selected_d
data=numpy.load(sys.argv[1])
out_data=[]
out_label=[]
cnt=0
for l,v in zip(labels.tolist(),data.tolist()):
if len(cnt_dict[l])>0:
x=cnt_dict[l][0]
if cnt==x:
cnt_dict[l].pop(0)
out_data.append(data[x])
out_label.append(labels[x])
cnt+=1
#print len(out_label)
#print len(out_data)
out_file, ext1=os.path.splitext(os.path.basename(sys.argv[1]))
out_file_label,ext2=os.path.splitext(os.path.basename(sys.argv[2]))
numpy.save(out_file+"_b.npy",out_data)
numpy.save(out_file_label+"_b.npy",out_label)
|
"""
Using this code you will be able to reduce the number of qubits by finding underlying Z2 symmetries of the Hamiltonian.
The paper expaining the qubit reduction technique is:
by S. Bravyi et al. "Tapering off qubits to simulate fermionic Hamiltonians"
arXiv:1701.08213
This will drastically speed up all the simulations.
"""
import sys
import logging
from qiskit.aqua import set_qiskit_aqua_logging
# provides all information (can be too much text)
# set_qiskit_aqua_logging(logging.DEBUG)
# provides less information than DEBUG mode
set_qiskit_aqua_logging(logging.DEBUG)
from qiskit import Aer
from qiskit.aqua import QuantumInstance
from qiskit.aqua.operators import Z2Symmetries
from qiskit.aqua.algorithms.adaptive import VQE
from qiskit.aqua.algorithms import ExactEigensolver
from qiskit.aqua.components.optimizers import SLSQP, L_BFGS_B
from qiskit.chemistry.core import Hamiltonian, TransformationType, QubitMappingType
from qiskit.chemistry.drivers import PySCFDriver, UnitsType
from qiskit.chemistry.components.variational_forms import UCCSD
from qiskit.chemistry.components.initial_states import HartreeFock
from qiskit.chemistry.drivers import HFMethodType
from qiskit import QuantumCircuit
from qiskit.circuit import Parameter
from qiskit import transpile
def generate_hydrogen_chain(n, spacing):
offset = - (n - 1) * spacing / 2 # make symmetric
mol_str = ''
for i in range(n):
pos = i * spacing + offset
mol_str += f'H {pos} 0.0 0.0; '
return mol_str
n = int(sys.argv[1])
molecule = generate_hydrogen_chain(n, 0.735)
charge = 0
spin = n % 2
driver = PySCFDriver(atom=molecule,
unit=UnitsType.ANGSTROM,
charge=charge,
spin=spin,
hf_method=HFMethodType.ROHF,
basis='sto3g')
# basis='sto6g')
# basis='631g')
qmolecule = driver.run()
core = Hamiltonian(transformation=TransformationType.FULL,
qubit_mapping=QubitMappingType.PARITY,
two_qubit_reduction=True,
freeze_core=False,
orbital_reduction=[])
qubit_op, _ = core.run(qmolecule)
# find the symmetries of the Hamiltonian
z2_symmetries = Z2Symmetries.find_Z2_symmetries(qubit_op)
tapered_ops = z2_symmetries.taper(qubit_op)
smallest_idx = 0
# Prior knowledge of which tapered_op has ground state
# or you can find the operator that has the ground state by diagonalising each operator
#smallest_eig_value = 99999999999999
#smallest_idx = -1
#for idx in range(len(tapered_ops)):
# print('operator number: ', idx)
# ee = ExactEigensolver(tapered_ops[idx], k=1)
# curr_value = ee.run()['energy']
# if curr_value < smallest_eig_value:
# smallest_eig_value = curr_value
# smallest_idx = idx
#print('Operator number: ', smallest_idx, ' contains the ground state.')
# the tapered Hamiltonian operator
the_tapered_op = tapered_ops[smallest_idx]
# optimizers
optimizer = SLSQP(maxiter=50000)
# optimizer = L_BFGS_B(maxiter=1000)
# initial state
init_state = HartreeFock(num_qubits=the_tapered_op.num_qubits,
num_orbitals=core._molecule_info['num_orbitals'],
qubit_mapping=core._qubit_mapping,
two_qubit_reduction=core._two_qubit_reduction,
num_particles=core._molecule_info['num_particles'],
sq_list=the_tapered_op.z2_symmetries.sq_list)
# UCCSD Ansatz
var_form = UCCSD(num_qubits=the_tapered_op.num_qubits,
depth=1,
num_orbitals=core._molecule_info['num_orbitals'],
num_particles=core._molecule_info['num_particles'],
active_occupied=None,
active_unoccupied=None,
initial_state=init_state,
qubit_mapping=core._qubit_mapping,
two_qubit_reduction=core._two_qubit_reduction,
num_time_slices=1,
z2_symmetries=the_tapered_op.z2_symmetries,
shallow_circuit_concat=False)
# force_no_tap_excitation=True,
# method_doubles='succ',
# excitation_type='d',
# same_spin_doubles=False)
# set up VQE
algo = VQE(the_tapered_op, var_form, optimizer)
# Choose the backend (use Aer instead of BasicAer)
backend = Aer.get_backend('statevector_simulator')
quantum_instance = QuantumInstance(backend=backend, optimization_level=1)
# run the algorithm
algo_result = algo.run(quantum_instance)
# get the results
_, result = core.process_algorithm_result(algo_result)
print(result)
|
import FWCore.ParameterSet.Config as cms
# Electron collection merger
mergedSlimmedElectronsForTauId = cms.EDProducer('PATElectronCollectionMerger',
src = cms.VInputTag('slimmedElectrons', 'slimmedElectronsHGC')
)
|
# Download zip file from EC2: scp -i "winter2017.pem" ec2-user@ec2-52-90-235-168.compute-1.amazonaws.com:pz.tar.gz .
# flags to get updated data
scrape_fighters = False
scrape_birthdays = True
###################################
import time
import requests
session = requests.Session()
#session.headers = {}
#session.headers['User-Agent'] = 'Mozilla/5.0'
print session.headers
from bs4 import BeautifulSoup
# store the 26 characters of the English alphabet
import string
chars = string.ascii_lowercase
# scrape new fighter data if needed
base_path = 'fightmetric_fighters_'
if scrape_fighters:
for char in chars:
url = 'http://fightmetric.com/statistics/fighters?char=' + char + '&page=all'
r = session.get(url)
with open(base_path + char + '.html', 'w') as f:
f.write(r.content)
# get list of all previously downloaded files
import os
import glob
header = ['First', 'Last', 'Nickname', 'Height', 'Weight', 'Reach',
'Stance', 'Win', 'Loss', 'Draw', 'Belt']
chars = 'abcdefghijklmno'
chars = 'pqrstuvwxyz'
for char in chars:
# read tables from html into a list of dataframes
with open(base_path + char + '.html', 'r') as f:
html = f.read()
# we find the table rows and if an <img> tag is present (image of UFC belt) in the
# row then we extract the name via the <a> tags (the find_all method is implied)
soup = BeautifulSoup(html, 'lxml')
for row in soup.findAll('tr', {'class':'b-statistics__table-row'}):
# get link to individual fighter if exists
if row.find('td', {'class':'b-statistics__table-col'}):
if row.find('a'):
if scrape_birthdays: time.sleep(0)
url = row.find('a').get('href')
# get page by scraping or from file
iofile = url.split('/')[-1] + '.html'
if scrape_birthdays:
print char, url, iofile
r = session.get(url, headers=session.headers)
with open(iofile, 'w') as f:
f.write(r.content)
|
#!/usr/bin/python
#
# Copyright 2015 Gerard kok
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from autopkglib import Processor, ProcessorError
import hashlib
__all__ = ["Sha1sumVerifier"]
class Sha1sumVerifier(Processor):
description = "Verifies sha1sum of package against a given SHA1 checksum. Throws a ProcessorError on mismatch."
input_variables = {
"pkgpath": {
"required": True,
"description": "Package to checksum.",
},
"expected_sha1sum": {
"required": True,
"description": "Expected SHA1 checksum.",
},
}
output_variables = {
}
__doc__ = description
def sha1sum_from_pkg(self, pkgpath):
sha1 = hashlib.sha1()
f = open(pkgpath, 'rb')
try:
sha1.update(f.read())
finally:
f.close()
return sha1.hexdigest()
def main(self):
pkgpath = self.env['pkgpath']
self.output('Using pkgpath %s' % pkgpath)
expected = self.env['expected_sha1sum']
self.output('Using expected SHA1 checksum %s' % expected)
sha1sum_from_pkg = self.sha1sum_from_pkg(pkgpath)
self.output('SHA1 for %s: %s' % (pkgpath, sha1sum_from_pkg))
if sha1sum_from_pkg == expected:
self.output("SHA1 checksum matches")
else:
raise ProcessorError("SHA1 checksum mismatch")
if __name__ == '__main__':
processor = Sha1sumVerifier()
processor.execute_shell()
|
BOARD_WIDTH = 240
BOARD_HEIGHT = 95
window = {
"name" : "FinishedAchievementDialog",
"x" : SCREEN_WIDTH - (BOARD_WIDTH + 4),
"y" : SCREEN_HEIGHT - (BOARD_HEIGHT + 18),
"width" : BOARD_WIDTH,
"height" : BOARD_HEIGHT,
"children" :
(
{
"name" : "board",
"type" : "board",
"x" : 0,
"y" : 0,
"width" : BOARD_WIDTH,
"height" : BOARD_HEIGHT,
"children" :
(
{
"name" : "AchievementIcon",
"type" : "image",
"vertical_align" : "center",
"x" : 20,
"y" : 0,
"image" : "d:/ymir work/ui/achievementsystem/achievement_icon.tga",
},
{
"name" : "FirstLine",
"type" : "text",
"text_vertical_align" : "center",
"vertical_align" : "center",
"x" : 20+65+12,
"y" : -21,
"outline" : 1,
"text" : "Has alcanzado un logro:",
},
{
"name" : "SecondLine",
"type" : "text",
"text_vertical_align" : "center",
"vertical_align" : "center",
"x" : 20+65+12,
"y" : 0,
"outline" : 1,
"text" : "Subir al nivel 100",
},
{
"name" : "ThirdLine",
"type" : "text",
"text_vertical_align" : "center",
"vertical_align" : "center",
"x" : 20+65+12,
"y" : 21,
"outline" : 1,
"text" : "Puntos: 1000",
},
),
},
),
} |
from flask import Blueprint, render_template, redirect,request,jsonify
from app import app
from app import sched, trigger
from app.etlMongo.pubFunction import set_log
from app.etlMongo.tabStudent import get_stu_info
from app.etlMongo.tabHomework import get_homework
from app.etlMongo.tabGame import get_game_info
from . import shellPy
import datetime
import time
getEtl = Blueprint('getEtl', __name__)
def getTraceId():
import uuid
return str(uuid.uuid1()).replace('-', '')
@getEtl.route('/etlTest/gettest', methods=['GET'])
def etl_test():
# print('aaaTestNow is %s' % datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
datas = {'msg': 'Successful!'}
return jsonify(datas)
@getEtl.route('/etlStudent', methods=['GET'])
def etl_student():
password = request.values.get('password')
start_time = time.time()
if password == 'bigdata123':
get_stu_info()
code = 200
datas = {'msg': 'Succeed to import table of student!', 'code': code}
else:
code = 400
datas = {'msg': 'Error, Password was wrong!', 'code': code}
end_time = time.time()
finish_time = end_time - start_time
set_log('etlStudent-', str(code) + ':' + str(finish_time))
return jsonify(datas)
@getEtl.route('/etlHomework', methods=['GET'])
def etl_homework():
password = request.values.get('password')
start_time = time.time()
if password == 'bigdata123':
get_homework()
code = 200
datas = {'msg': 'Succeed to import table of homework!', 'code': code}
else:
code = 400
datas = {'msg': 'Error, Password was wrong!', 'code': code}
end_time = time.time()
finish_time = end_time - start_time
set_log('etlHomework-', str(code) + ':' + str(finish_time))
return jsonify(datas)
@getEtl.route('/etlGame', methods=['GET'])
def etl_game():
password = request.values.get('password')
start_time = time.time()
if password == 'bigdata123':
get_game_info()
code = 200
datas = {'msg': 'Succeed to import table of game!', 'code': code}
else:
code = 400
datas = {'msg': 'Error, Password was wrong!', 'code': code}
end_time = time.time()
finish_time = end_time - start_time
set_log('etlGame-', str(code) + ':' + str(finish_time))
return jsonify(datas)
@sched.scheduled_job(trigger)
def on_time_etl():
start_time = time.time()
shellPy.local_shell_ext(shellPy.local_mv_command) # 先执行本地MV命令
get_stu_info()
get_homework()
get_game_info()
etl_result = shellPy.data_etl_ext()
end_time = time.time()
finish_time = end_time - start_time
log_info = 'Code[' + str(etl_result) + ']TimeCost[' + str(finish_time) + ']'
set_log('etlAutoTable-', log_info)
|
##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import IECore
class ParameterAlgoTest( unittest.TestCase ) :
def testFindClasses( self ) :
p = IECore.CompoundParameter(
name = "p",
description = "",
members = [
IECore.CompoundParameter(
name = "q",
description = "",
members = [
IECore.ClassVectorParameter(
"cv",
"d",
"IECORE_OP_PATHS",
[
( "mult", os.path.join( "maths", "multiply" ), 2 ),
( "coIO", "compoundObjectInOut", 2 ),
]
),
IECore.ClassParameter(
"c",
"d",
"IECORE_OP_PATHS",
"classParameterTest", 1
)
]
),
]
)
p["q"]["c"]["cp"].setClass( "classVectorParameterTest", 1 )
p["q"]["c"]["cp"]["cv"].setClasses( [
( "mult", os.path.join( "maths", "multiply" ), 2 ),
( "coIO", "compoundObjectInOut", 2 )
] )
c = IECore.ParameterAlgo.findClasses( p )
expected = [
{
"parent" : p["q"]["cv"],
"parameterPath" : [ "q", "cv", "mult" ],
"uiPath" : [ "q", "cv", "mult" ],
"classInstance" : p["q"]["cv"].getClasses()[0],
},
{
"parent" : p["q"]["cv"],
"parameterPath" : [ "q", "cv", "coIO" ],
"uiPath" : [ "q", "cv", "iAmALabel" ],
"classInstance" : p["q"]["cv"].getClasses()[1],
},
{
"parent" : p["q"]["c"],
"parameterPath" : [ "q", "c" ],
"uiPath" : [ "q", "c" ],
"classInstance" : p["q"]["c"].getClass(),
},
{
"parent" : p["q"]["c"]["cp"],
"parameterPath" : [ "q", "c", "cp" ],
"uiPath" : [ "q", "c", "cp" ],
"classInstance" : p["q"]["c"]["cp"].getClass(),
},
{
"parent" : p["q"]["c"]["cp"]["cv"],
"parameterPath" : [ "q", "c", "cp", "cv", "mult" ],
"uiPath" : [ "q", "c", "cp", "cv", "mult" ],
"classInstance" : p["q"]["c"]["cp"]["cv"].getClasses()[0],
},
{
"parent" : p["q"]["c"]["cp"]["cv"],
"parameterPath" : [ "q", "c", "cp", "cv", "coIO" ],
"uiPath" : [ "q", "c", "cp", "cv", "iAmALabel" ],
"classInstance" : p["q"]["c"]["cp"]["cv"].getClasses()[1],
},
]
self.assertEqual( expected, c )
filteredExpected = [ expected[0], expected[4] ]
c = IECore.ParameterAlgo.findClasses( p, classNameFilter=os.path.join( "maths", "*" ) )
self.assertEqual( filteredExpected, c )
def testCopyClasses( self ) :
p = IECore.CompoundParameter(
name = "q",
description = "",
members = [
IECore.ClassVectorParameter(
"cv",
"d",
"IECORE_OP_PATHS",
[
( "mult", os.path.join( "maths", "multiply" ), 2 ),
( "coIO", "compoundObjectInOut", 2 ),
]
),
IECore.ClassParameter(
"c",
"d",
"IECORE_OP_PATHS",
"classParameterTest", 1
)
]
)
p["c"]["cp"].setClass( "classVectorParameterTest", 1 )
p["c"]["cp"]["cv"].setClasses( [
( "mult", os.path.join( "maths", "multiply" ), 2 ),
( "coIO", "compoundObjectInOut", 2 )
] )
p2 = IECore.CompoundParameter(
name = "q",
description = "",
members = [
IECore.ClassVectorParameter(
"cv",
"d",
"IECORE_OP_PATHS",
),
IECore.ClassParameter(
"c",
"d",
"IECORE_OP_PATHS",
)
]
)
IECore.ParameterAlgo.copyClasses( p, p2 )
cl = [ c[1:] for c in p2["cv"].getClasses( True ) ]
self.assertEqual( cl, [
( "mult", os.path.join( "maths", "multiply" ), 2 ),
( "coIO", "compoundObjectInOut", 2 )
]
)
cl = [ c[1:] for c in p2["c"]["cp"]["cv"].getClasses( True ) ]
self.assertEqual( cl, [
( "mult", os.path.join( "maths", "multiply" ), 2 ),
( "coIO", "compoundObjectInOut", 2 )
]
)
if __name__ == "__main__":
unittest.main()
|
import math
from busquedas_02 import ProblemaBusqueda, aestrella
MAPA = """
##############################
# # # #
# #### ######## # #
# o # # # #
# ### #### ###### #
# #### # #
# # # # #### #
# ###### # # x #
# # # #
##############################
"""
MAPA = [list(x) for x in MAPA.split("\n") if x]
COSTOS = {
"arriba": 1.0,
"abajo": 1.0,
"izquierda": 1.0,
"derecha": 1.0,
"arriba izquierda": 1.4,
"arriba derecha": 1.4,
"abajo izquierda": 1.4,
"abajo derecha": 1.4,
}
class JuegoLaberinto(ProblemaBusqueda):
def __init__(self, tablero):
self.tablero = tablero
self.estado_objetivo = (0, 0)
for y in range(len(self.tablero)):
for x in range(len(self.tablero[y])):
if self.tablero[y][x].lower() == "o":
self.estado_inicial = (x, y)
elif self.tablero[y][x].lower() == "x":
self.estado_objetivo = (x, y)
super(JuegoLaberinto, self).__init__(estado_inicial=self.estado_inicial)
def acciones(self, estado):
acciones = []
for accion in list(COSTOS.keys()):
nuevox, nuevoy = self.resultado(estado, accion)
if self.tablero[nuevoy][nuevox] != "#":
acciones.append(accion)
return acciones
def resultado(self, estado, accion):
x, y = estado
if accion.count("arriba"):
y -= 1
if accion.count("abajo"):
y += 1
if accion.count("izquierda"):
x -= 1
if accion.count("derecha"):
x += 1
estado_nuevo = (x, y)
return estado_nuevo
def es_objetivo(self, estado):
return estado == self.estado_objetivo
def costo(self, estado, accion, estado2):
return COSTOS[accion]
def heuristic(self, estado):
x, y = estado
gx, gy = self.estado_objetivo
return math.sqrt((x - gx) ** 2 + (y - gy) ** 2)
def main():
problema = JuegoLaberinto(MAPA)
resultado = aestrella(problema, busqueda_en_grafo=True)
camino = [x[1] for x in resultado.camino()]
for y in range(len(MAPA)):
for x in range(len(MAPA[y])):
if (x, y) == problema.estado_inicial:
print("o", end='')
elif (x, y) == problema.estado_objetivo:
print("x", end='')
elif (x, y) in camino:
print("·", end='')
else:
print(MAPA[y][x], end='')
print()
if __name__ == "__main__":
main()
|
# Device manager
# author: ulno
# created: 2017-04-07
#
# manage a list of installed devices and enable
# sending or reacting to mqtt
import gc
# import ussl - no decent ssl possible on micropython esp8266
# gc.collect
from umqtt.simple import MQTTClient as _MQTTClient
gc.collect()
import machine
import time
import ubinascii
gc.collect()
import uiot._wifi as _wifi
import uiot._cfg as _cfg
gc.collect()
_devlist = {}
_timestack = {}
#### global MQTT variables
_client_id = ubinascii.hexlify(machine.unique_id()) # make unique
_broker = None
_topic = None
_user = None
_password = None
_client = None
_report_ip = True
_port = None
_last_publish = 0
# _ssl = False
MIN_PUBLISH_TIME_US = 100000 # posting every 100ms (100000us) allowed -> only 10messages per second (else network stacks seesm to run full)
# TODO: check if this is too conservative
# ======= Devices
def d(type, name, *args, **kwargs):
# create a new device
global _devlist
gc.collect()
import_name = type.lower()
module = "uiot." + import_name
x = "import " + module
exec(x) # TODO: consider catching an exception that this doesn't exist
class_name = None
# TODO: also strip out _ to search for respective class
for n in eval("dir({})".format(module)): # find actual class_name
if n.lower() == import_name:
class_name = module + '.' + n
break
if class_name is not None:
_Class = eval(class_name)
gc.collect()
_devlist[name] = _Class(name, *args, **kwargs)
gc.collect()
return _devlist[name]
else:
print("Can't find class for {}.".format(name))
return None
# obsolete now as everything accessible in same namespace
# from uiot.new_devices import * # allow adding new devices
# ======= Devices End
# TODO: move somewhere else
# ======= utils
def do_later(time_delta_s, callback, id=None):
delta = int(time_delta_s * 1000)
later = time.ticks_add(time.ticks_ms(), delta)
if id is None:
id = hash((hash(callback), later))
_timestack[id] = (later, callback)
return id
def linecat(filename, frm=1, to=0):
from uiot import _line_edit
_line_edit.linecat(filename, frm=frm, to=to)
def lineedit(filename, linenr, insert=False):
from uiot import _line_edit
_line_edit.lineedit(filename, linenr, insert)
def delete(name):
_devlist.pop(name)
# ====== end utils
# ====== mqtt stuff
def _publish_status(device_list=None, ignore_time=False):
global _client, _last_publish
if _client is None:
return # done nothing to publish
if device_list is None:
device_list = _devlist.values()
current_time = time.ticks_us()
if ignore_time or \
time.ticks_diff(current_time,
_last_publish) >= MIN_PUBLISH_TIME_US:
_last_publish = current_time
for d in device_list:
try:
v = d.value()
except Exception as e:
print('Trouble reading value from %s. Exception:'%str(d), e)
if v is not None:
rt = (_topic + "/" + d.name).encode()
for s in d.getters:
if s == "":
t = rt
else:
t = rt + "/" + s.encode()
my_value=None
try:
my_value = d.getters[s]()
except Exception as e:
print('Trouble executing getter for device %s for %s. Exception:'
% (str(d), s), e)
print('Publishing', t, my_value)
try:
if type(my_value) is bytes or type(
my_value) is bytearray:
_client.publish(t, my_value)
else:
_client.publish(t, str(my_value).encode())
except Exception as e:
print('Trouble publishing %s to %s, re-init network. Exception:'
% (t, str(my_value)), e)
_client = None
try:
_publish_ip()
except Exception as e:
print('Trouble publishing IP, re-init network. Exception:', e)
_client = None
# ======= Setup and execution
def mqtt(broker_host, topic, *args, user=None, password=None,
client_id=None, report_ip=True,
port=None, save = True):
# ssl not really possible ,ssl=False):
global _broker, _topic, _user, _password, _client_id, _report_ip
global _port
# global_ssl
if len(args) > 0:
user = args[0]
if len(args) > 1:
password = args[1]
_broker = broker_host
_topic = topic
if client_id is not None:
_client_id = client_id.encode() + b"_" + _client_id;
if user=="":
user=None
if password=="":
password=None
_user = user
_password = password
_report_ip = report_ip
if port is None:
port = 1883
# if ssl == True:
# port = 8883
# else:
# port = 1883
_port = port
# _ssl = ssl
if save:
_cfg.mqtt( _broker, _topic, _user, _password)
if _report_ip: # already report ip
_init_mqtt()
def _subscription_cb(topic, msg):
global _topic
topic = topic.decode()
msg = msg.decode()
for d in _devlist.values():
root_topic = _topic + "/" + d.name
for st in d.setters:
if st == "":
t = root_topic
else:
t = root_topic + "/" + st
if topic == t:
print("Received \"%s\" for topic %s" % (msg, topic))
d.run_setter(st, msg)
def _publish_ip():
global _client,_report_ip
if _report_ip:
t = (_topic + "/ip").encode()
if _client is not None:
try:
_client.publish(t, str(_wifi.config()[0]), retain=True)
except e:
# Usually no content for exception here
print("Trouble publishing IP. Re-init mqtt. Exception:", e)
_client = None
def _init_mqtt():
global _client, _port
# global _ssl
global _broker, _topic, _user, _password, _client_id
print("Trying to connect to mqtt broker.")
gc.collect()
try:
_client = _MQTTClient(_client_id, _broker, user=_user,
password=_password, port=_port, ssl=False)
_client.set_callback(_subscription_cb)
_client.connect()
print("Connected to", _broker)
t = _topic.encode() + b"/#"
_client.subscribe(t)
print("Subscribed to topic and subtopics of", _topic)
_publish_ip()
except Exception as e:
print("Trouble to init mqtt. Exception:", e)
_client = None
def _poll_subscripton():
global _client
if _client is not None:
try:
_client.check_msg() # non blocking
except Exception as e:
print("Trouble to receive from mqtt. Re-init mqtt. Exception:", e)
_client = None
def run(updates=5, sleepms=1, poll_rate_inputs=4, poll_rate_network=10):
# updates: send out a status every this amount of seconds.
# If 0, never send time based status updates only when change happened.
# sleepms: going o sleep for how long between each loop run
# poll_rate_network: how often to evaluate incoming network commands (how many ms delay)
# poll_rate_inputs: how often to evaluate inputs (how many ms delay)
poll_rate_inputs *= 1000 # measure in us
poll_rate_network *= 1000 # measure in us
t = time.ticks_us()
last_poll_input = t
last_poll_network = t
poll_rate_mqtt = 1000000 # every 1s
last_poll_mqtt = t
updates *= 1000000 # measure in us
last_update = t
while True:
_wifi.monitor() # make sure wifi is in good shape
t = time.ticks_us()
if time.ticks_diff(t, last_poll_network) >= poll_rate_network:
_poll_subscripton()
last_poll_network = t
if time.ticks_diff(t, last_poll_input) >= poll_rate_inputs:
device_list = []
for d in _devlist.values():
if d.update():
device_list.append(d)
if len(device_list) > 0:
_publish_status(device_list)
last_poll_input = t
# monitor mqtt
if _client is None:
if time.ticks_diff(t, last_poll_mqtt) >= poll_rate_mqtt:
_init_mqtt()
last_poll_mqtt = t
else:
if updates != 0 and time.ticks_diff(t, last_update) >= updates:
print("Publishing full update.")
_publish_status(ignore_time=True)
last_update = t
# execute things on timestack
now = time.ticks_ms()
for id in list(_timestack):
t, cb = _timestack[id]
if time.ticks_diff(now, t) >= 0:
del (_timestack[id])
cb(id)
time.sleep_ms(sleepms) # do nothing as requested for this time
mqtt(_cfg.config.mqtt_host, _cfg.config.mqtt_topic,
_cfg.config.mqtt_user, _cfg.config.mqtt_pw, save=False) |
from django.conf.urls import url, include
from django.conf import settings
from django.contrib.staticfiles.urls import static, staticfiles_urlpatterns
from django.contrib import admin
from payment.views import *
app_name='payment'
urlpatterns = [
url(r'login/$', loginK, name="login"),
url(r'logout/$',logoutK,name='logout'),
url(r'payment/$',payment,name='mainPage'),
url(r'paymentEnquiry/$',paymentEnquiry,name='paymentEnquiry'),
url(r'deskTeamEnquiry/$',deskTeamEnquiry,name='deskTeamEnquiry'),
url(r'hospi/$', hospi, name="hospi"),
url(r'giveTshirt/$',giveTshirt,name='giveTshirt'),
# url(r'refund/$',refund,name='idCard'),
] |
def digitalSum(num):
sum = 0
for x in range(0, len(str(num))):
sum = sum + int(str(num)[x])
return sum
largest = 0
for a in range(1, 100):
for b in range(1, 100):
test = digitalSum(a**b)
if test > largest:
largest = test
print(largest) |
from funlib.evaluate import rand_voi
import numpy as np
import gunpowder as gp
def evaluate_affs(pred_labels, gt_labels, return_results=False):
results = rand_voi(gt_labels.data, pred_labels.data)
results["voi_sum"] = results["voi_split"] + results["voi_merge"]
scores = {"sample": results, "average": results}
if return_results:
results = {
"pred_labels": gp.Array(pred_labels.data.astype(np.uint64), gp.ArraySpec(roi=pred_labels.spec.roi, voxel_size = pred_labels.spec.voxel_size)),
"gt_labels": gp.Array(gt_labels.data.astype(np.uint64), gp.ArraySpec(roi=gt_labels.spec.roi, voxel_size = gt_labels.spec.voxel_size)),
}
return scores, results
return scores
|
#################################################################
# Course : CS-382 Network Centric Computing #
# Offering : Spring 2017 #
# University : Lahore University of Management Sciences #
# File name : client_v9.py #
# Assignment title : Programming Assignment 1 #
# Author : Muhammad Zain Qasmi && Hassaan Hassaan #
# Roll No. : 18100276 && 18100059 #
# Submission : Februaru 15th, 2017 #
# Instructor : Fareed Zaffar #
# Python Version : 2.7 #
#################################################################
#==============================================================================
import socket
import sys
def Main():
#host = '127.0.0.1'
#port = 5030
host = sys.argv[1]
port = int(sys.argv[2])
s = socket.socket()
s.connect((host, port))
overkill = 0; #the breaker of loops, terminator of programs
print "#################################################################"
print "# #"
print "# Welcome to CS 382 Search Engine Client #"
print "# #"
print "#################################################################"
while True:
print "================================================================="
print 'Press 1 to search something'
print 'Press 2 to download a file'
print 'Press 3 to exit search engine'
print "================================================================="
option = raw_input("-> ")
if option == "1":
identifierName = "----1----"
identifiersize = len(identifierName)
identifiersize = bin(identifiersize)[2:].zfill(16)
s.send(identifiersize)
s.send(identifierName)
print 'Please enter your search query'
message = raw_input("-> ")
mSize = len(message)
mSize = bin(mSize)[2:].zfill(16)
s.send(mSize)
s.send(message)
while message != 'quit':
mSize = s.recv(16)
mSize = int(mSize, 2)
data = s.recv(mSize)
print str(data)
if (data == "End of Results!!!"):
print 'Please enter a new query or enter quit to exit the search engine'
message = raw_input("-> ")
if message == "quit":
print "here"
break
message = message + " "
mSize = len(message)
mSize = bin(mSize)[2:].zfill(16)
s.send(mSize)
s.send(message)
# s.close()
elif option == "2":
identifierName = "----2----"
identifiersize = len(identifierName)
identifiersize = bin(identifiersize)[2:].zfill(16)
s.send(identifiersize)
s.send(identifierName)
while True:
filename = raw_input("Enter Filename + Path seperated by space or 'quit' to exit:\n-> ")
if filename[:4] == "quit":
break
while not " " in filename and filename != "quit":
filename = raw_input("String must be seperated by SPACE:\n-> ")
if filename == "quit":
break
size = len(filename)
size = bin(size)[2:].zfill(16)
s.send(size)
s.send(filename)
totalSent = str.split(filename)
filename = totalSent[0]
filePath = totalSent[1]
filesize = s.recv(32)
filesize = int(filesize, 2)
file_to_write = open(filename, 'wb')
chunksize = 4096
while filesize > 0:
if filesize < chunksize:
chunksize = filesize
data = s.recv(chunksize)
if data == "File does not exist":
print "ERROR: FILE DOES NOT EXIST"
# print "Client Exited Gracefully"
overkill = 1 # BREAKS FROM CURRENT WHILE LOOP AND GOES TO START OF OUTER WHILE LOOP
break
file_to_write.write(data)
filesize -= chunksize
if overkill == 1:
overkill = 0
continue
file_to_write.close()
print 'File received successfully'
elif option == "3":
identifierName = "----3----"
identifiersize = len(identifierName)
identifiersize = bin(identifiersize)[2:].zfill(16)
s.send(identifiersize)
s.send(identifierName)
s.close()
break
print "Client Exited Gracefully"
if __name__ == '__main__':
Main()
|
import pytest
from gyomu.file_model import FileTransportInfo
from collections import namedtuple
TransportResult = namedtuple('TransportResult', ['input_base', 'input_sdir', 'input_sname', 'input_ddir', 'input_dname',
'source_full_base', 'source_full', 'source_dir', 'source_name',
'destination_full', 'destination_dir', 'destination_name'])
class TestFileTransportInfo:
@pytest.mark.parametrize('input_data',
[
TransportResult(input_base='base', input_sdir='SDir', input_sname='Sname',
input_ddir='Ddir', input_dname='Dname',
source_full_base='base\\SDir\\Sname', source_full='SDir\\Sname',
source_dir='SDir', source_name='Sname', destination_full='Ddir\\Dname',
destination_dir='Ddir', destination_name='Dname'),
TransportResult(input_base='base', input_sdir='SDir', input_sname='Sname',
input_ddir='Ddir', input_dname='',
source_full_base='base\\SDir\\Sname', source_full='SDir\\Sname',
source_dir='SDir', source_name='Sname', destination_full='Ddir\\Sname',
destination_dir='Ddir', destination_name='Sname'),
TransportResult(input_base='base', input_sdir='SDir', input_sname='Sname',
input_ddir='', input_dname='Dname',
source_full_base='base\\SDir\\Sname', source_full='SDir\\Sname',
source_dir='SDir', source_name='Sname', destination_full='SDir\\Dname',
destination_dir='SDir', destination_name='Dname'),
TransportResult(input_base='base', input_sdir='SDir', input_sname='Sname',
input_ddir='', input_dname='', source_full_base='base\\SDir\\Sname',
source_full='SDir\\Sname', source_dir='SDir', source_name='Sname',
destination_full='SDir\\Sname', destination_dir='SDir',
destination_name='Sname'),
TransportResult(input_base='base', input_sdir='SDir', input_sname='',
input_ddir='Ddir', input_dname='', source_full_base='base\\SDir',
source_full='SDir', source_dir='SDir', source_name='',
destination_full='Ddir', destination_dir='Ddir', destination_name=''),
TransportResult(input_base='base', input_sdir='SDir', input_sname='', input_ddir='',
input_dname='', source_full_base='base\\SDir', source_full='SDir',
source_dir='SDir', source_name='', destination_full='SDir',
destination_dir='SDir', destination_name=''),
TransportResult(input_base='base', input_sdir='', input_sname='', input_ddir='',
input_dname='', source_full_base='base', source_full='', source_dir='',
source_name='', destination_full='', destination_dir='',
destination_name=''),
TransportResult(input_base='base', input_sdir='', input_sname='', input_ddir='Ddir',
input_dname='', source_full_base='base', source_full='', source_dir='',
source_name='', destination_full='Ddir', destination_dir='Ddir',
destination_name=''),
TransportResult(input_base='base', input_sdir='', input_sname='Sname',
input_ddir='Ddir', input_dname='Dname', source_full_base='base\\Sname',
source_full='Sname', source_dir='', source_name='Sname',
destination_full='Ddir\\Dname', destination_dir='Ddir',
destination_name='Dname'),
TransportResult(input_base='base', input_sdir='', input_sname='Sname',
input_ddir='Ddir', input_dname='', source_full_base='base\\Sname',
source_full='Sname', source_dir='', source_name='Sname',
destination_full='Ddir\\Sname', destination_dir='Ddir',
destination_name='Sname'),
TransportResult(input_base='base', input_sdir='', input_sname='Sname', input_ddir='',
input_dname='Dname', source_full_base='base\\Sname',
source_full='Sname', source_dir='', source_name='Sname',
destination_full='Dname', destination_dir='',
destination_name='Dname'),
TransportResult(input_base='base', input_sdir='', input_sname='Sname', input_ddir='',
input_dname='', source_full_base='base\\Sname', source_full='Sname',
source_dir='', source_name='Sname', destination_full='Sname',
destination_dir='', destination_name='Sname'),
TransportResult(input_base='', input_sdir='SDir', input_sname='Sname',
input_ddir='Ddir', input_dname='Dname', source_full_base='SDir\\Sname',
source_full='SDir\\Sname', source_dir='SDir', source_name='Sname',
destination_full='Ddir\\Dname', destination_dir='Ddir',
destination_name='Dname'),
TransportResult(input_base='', input_sdir='SDir', input_sname='Sname',
input_ddir='Ddir', input_dname='', source_full_base='SDir\\Sname',
source_full='SDir\\Sname', source_dir='SDir', source_name='Sname',
destination_full='Ddir\\Sname', destination_dir='Ddir',
destination_name='Sname'),
TransportResult(input_base='', input_sdir='SDir', input_sname='Sname', input_ddir='',
input_dname='Dname', source_full_base='SDir\\Sname',
source_full='SDir\\Sname', source_dir='SDir', source_name='Sname',
destination_full='SDir\\Dname', destination_dir='SDir',
destination_name='Dname'),
TransportResult(input_base='', input_sdir='SDir', input_sname='Sname', input_ddir='',
input_dname='', source_full_base='SDir\\Sname',
source_full='SDir\\Sname', source_dir='SDir', source_name='Sname',
destination_full='SDir\\Sname', destination_dir='SDir',
destination_name='Sname'),
TransportResult(input_base='', input_sdir='SDir', input_sname='', input_ddir='Ddir',
input_dname='', source_full_base='SDir', source_full='SDir',
source_dir='SDir', source_name='', destination_full='Ddir',
destination_dir='Ddir', destination_name=''),
TransportResult(input_base='', input_sdir='SDir', input_sname='', input_ddir='',
input_dname='', source_full_base='SDir', source_full='SDir',
source_dir='SDir', source_name='', destination_full='SDir',
destination_dir='SDir', destination_name=''),
TransportResult(input_base='', input_sdir='', input_sname='Sname', input_ddir='Ddir',
input_dname='Dname', source_full_base='Sname', source_full='Sname',
source_dir='', source_name='Sname', destination_full='Ddir\\Dname',
destination_dir='Ddir', destination_name='Dname'),
TransportResult(input_base='', input_sdir='', input_sname='Sname', input_ddir='Ddir',
input_dname='', source_full_base='Sname', source_full='Sname',
source_dir='', source_name='Sname', destination_full='Ddir\\Sname',
destination_dir='Ddir', destination_name='Sname'),
TransportResult(input_base='', input_sdir='', input_sname='Sname', input_ddir='',
input_dname='Dname', source_full_base='Sname', source_full='Sname',
source_dir='', source_name='Sname', destination_full='Dname',
destination_dir='', destination_name='Dname'),
TransportResult(input_base='', input_sdir='', input_sname='Sname', input_ddir='',
input_dname='', source_full_base='Sname', source_full='Sname',
source_dir='', source_name='Sname', destination_full='Sname',
destination_dir='', destination_name='Sname'),
])
def test_valid_transport_information(self, input_data):
info: FileTransportInfo = TestFileTransportInfo.create_transport_information(input_data)
TestFileTransportInfo.compare(input_data, info)
@pytest.mark.parametrize('input_data',
[
TransportResult(input_base='base', input_sdir='SDir', input_sname='',
input_ddir='Ddir',
input_dname='Dname', source_full_base='', source_full='',
source_dir='', source_name='',
destination_full='', destination_dir='', destination_name=''),
TransportResult(input_base='base', input_sdir='SDir', input_sname='', input_ddir='',
input_dname='Dname',
source_full_base='', source_full='', source_dir='', source_name='',
destination_full='',
destination_dir='', destination_name=''),
TransportResult(input_base='base', input_sdir='', input_sname='', input_ddir='Ddir',
input_dname='Dname',
source_full_base='', source_full='', source_dir='', source_name='',
destination_full='',
destination_dir='', destination_name=''),
TransportResult(input_base='base', input_sdir='', input_sname='', input_ddir='',
input_dname='Dname',
source_full_base='', source_full='', source_dir='', source_name='',
destination_full='',
destination_dir='', destination_name=''),
TransportResult(input_base='', input_sdir='SDir', input_sname='', input_ddir='Ddir',
input_dname='Dname',
source_full_base='', source_full='', source_dir='', source_name='',
destination_full='',
destination_dir='', destination_name=''),
TransportResult(input_base='', input_sdir='SDir', input_sname='', input_ddir='',
input_dname='Dname',
source_full_base='', source_full='', source_dir='', source_name='',
destination_full='',
destination_dir='', destination_name=''),
TransportResult(input_base='', input_sdir='', input_sname='', input_ddir='Ddir',
input_dname='Dname',
source_full_base='', source_full='', source_dir='', source_name='',
destination_full='',
destination_dir='', destination_name=''),
TransportResult(input_base='', input_sdir='', input_sname='', input_ddir='Ddir',
input_dname='',
source_full_base='', source_full='', source_dir='', source_name='',
destination_full='',
destination_dir='', destination_name=''),
TransportResult(input_base='', input_sdir='', input_sname='', input_ddir='',
input_dname='Dname',
source_full_base='', source_full='', source_dir='', source_name='',
destination_full='',
destination_dir='', destination_name=''),
])
def test_invalid_transport_information(self, input_data: TransportResult):
with pytest.raises(ValueError):
TestFileTransportInfo.create_transport_information(input_data)
@staticmethod
def compare(expected: TransportResult, source: FileTransportInfo):
assert expected.source_full_base == source.source_fullname_with_basepath
assert expected.source_full == source.source_fullname
assert expected.source_dir == source.source_path
assert expected.source_name == source.source_filename
assert expected.destination_full == source.destination_fullname
assert expected.destination_dir == source.destination_path
assert expected.destination_name == source.destination_filename
@staticmethod
def create_transport_information(result: TransportResult) -> FileTransportInfo:
return FileTransportInfo(base_path=result.input_base,
source_filename=result.input_sname, source_folder_name=result.input_sdir,
destination_filename=result.input_dname, destination_foldername=result.input_ddir)
|
#!/usr/bin/env python
import matplotlib, argparse
import numpy as np
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description='plot with error bars for a set of powers')
parser.add_argument('pdffile', help='an output PDF file')
parser.add_argument('-x', default='power/speed', help='value for the x axis')
parser.add_argument('-y', default='width', help='value for the y axis')
parser.add_argument('--xlabel', default='P/U, J/mm', help='label for the x axis')
parser.add_argument('--ylabel', default='d, µm', help='label for the y axis')
parser.add_argument('--logx', action='store_true', help='log scaling on the x axis')
parser.add_argument('--logy', action='store_true', help='log scaling on the y axis')
parser.add_argument('--use-tex', action='store_true', help='use tex for text')
args = parser.parse_args()
matplotlib.rcParams.update({
'font.size': 10,
'axes.labelsize': 10,
'legend.fontsize': 8,
'xtick.labelsize': 9,
'ytick.labelsize': 9,
'lines.linewidth': 1,
'errorbar.capsize': 3,
'text.usetex': args.use_tex,
'pgf.rcfonts': False,
'font.family': 'serif',
'pgf.preamble': '\n'.join([
r'\usepackage{physics, siunitx}',
]),
'figure.figsize': [6, 4]
})
if args.use_tex:
matplotlib.use('pgf')
def set_colors():
# https://stackoverflow.com/a/55652330/2531400
cmap = matplotlib.cm.get_cmap('Set1')
axes = plt.gca()
axes.set_prop_cycle(color=cmap.colors)
def load_data(filename, columns):
data = np.loadtxt(filename).T
result = dict(zip(columns, data))
result['power/speed'] = result['power']/result['speed']
return result
def analyze_data(data):
result, err = {}, {}
for p in np.unique(data['power']):
for s in np.unique(data['speed']):
mask = np.argwhere((data['power'] == p) & (data['speed'] == s))
if not np.sum(mask):
continue
for key, value in data.items():
if key not in result:
result[key], err[key] = [], []
result[key].append(np.mean(value[mask]))
err[key].append(np.std(value[mask]))
for key in result:
result[key] = np.array(result[key])
err[key] = np.array(err[key])
return result, err
def plot_profile(data, fmt, label, err=None, lw=1, ms=3, **kwargs):
set_colors()
if args.y not in data:
return
x, y = data[args.x], data[args.y]
for p in np.unique(data['power']):
mask = np.argwhere(data['power'] == p)[:,0]
yerr = err if err is None else err[args.y][mask]
plt.errorbar(x[mask], y[mask], yerr=yerr, fmt=fmt, lw=lw, markersize=ms,
label=label + f' ({p:.0f}W)', **kwargs)
# Experimental data 1
data = load_data('exper1.txt', ['power', 'speed', 'width'])
plot_profile(data, '^', 'Experiment1')
# Experimental data 2
data = load_data('exper2.txt', ['power', 'speed', 'depth', 'width', 'height'])
#plot_profile(data, 'v', 'Experiment2')
data, err = analyze_data(data)
plot_profile(data, '-', 'Experiment2', err=err)
# Simulation data
data = load_data('numer1.txt', ['power', 'speed', 'width', 'depth'])
plot_profile(data, 'o', 'Simulation1', ms=5, mfc='none')
#plt.xlim(np.min(x), np.max(x))
#plt.ylim(np.min(y), np.max(y))
plt.xlabel(args.xlabel)
plt.ylabel(args.ylabel)
plt.legend(loc="upper left")
P, U = 113, 700 # Recommended for SS316L by Trumpf
plt.axvline(x=P/U, c='b', ls=':', lw=.5)
if args.logx:
plt.semilogx()
if args.logy:
plt.semilogy()
plt.savefig(args.pdffile, bbox_inches='tight', transparent=True)
|
from core.enums import CommandMessageType
from core.utils import emojize, escape_markdown
def command_menu(command):
msgs = command.message_set.all()
msgs_count = msgs.count()
first_msg = msgs.first()
answer = 'No answer'
if msgs_count == 1 and first_msg.type == CommandMessageType.TEXT:
answer = first_msg.text
elif msgs_count:
answer = f'{msgs_count} message{"s" if msgs_count != 1 else ""}'
return (
f'***Command***: {escape_markdown(command.caller)}\n'
f'***Answer***: {escape_markdown(answer)}\n\n'
'Select what you want to do:'
)
def notification_sent(done_count, all_count):
return emojize(f'Notification was sent to all subscribers. :white_check_mark:\n{done_count} / {all_count} :mega:')
def message_mailing_status(done_count, all_count):
return emojize(f'Sending message to your subscribers.\n{done_count} / {all_count} :mega:')
def delete_command(command):
return f'You are about to delete the command ***{escape_markdown(command.caller)}***. Is that correct?'
def back_text(section):
return f'« Back to {section}'
DELETE_COMMAND = 'Delete command'
SHOW_ANSWER = 'Show answer'
COMMANDS = 'Commands'
DELETE_COMMAND_CONFIRM = 'Yes, delete the command'
SEND_NOTIFICATION = 'Send notification'
SETTINGS = 'Settings'
HELP = 'Help'
ADD_COMMAND = 'Add command'
EDIT_COMMAND = 'Edit command'
EDIT_ANSWER = 'Edit answer'
SEND_NOTIFICATION = 'Send notification'
COMPLETE = 'Complete'
CANCEL = 'Cancel'
DELETE_ALL_MESSAGES = 'Delete all messages'
DELETE_LAST_MESSAGE = 'Delete last message'
UNDO_LAST = 'Undo last action'
EXIT_EDIT_MODE = 'Exit edit mode'
|
from calendar import *
import calendar
month, day, year = map(int,input().split())
result = weekday(year,month,day)
print((calendar.day_name[result]).upper()) |
from math import sin, cos, pi
from matrix import *
from draw import *
def circx(x, t, r):
return r * cos(2 * pi * t) + x
def circy(y, t, r):
return r * sin(2 * pi * t) + y
def add_circle(matrix, x, y, z, r):
return draw_parametric(matrix, x, y, z, r, circx, circy, 0.01)
def cubicx(x, t, c):
#xc is x coefficients
xc = c[0]
return (xc[0] * (t ** 3)) + (xc[1] * (t ** 2)) + (xc[2] * t) + xc[3]
def cubicy(y, t, c):
#yc is y coefficients
yc = c[1]
return (yc[0] * (t ** 3)) + (yc[1] * (t ** 2)) + (yc[2] * t) + yc[3]
def add_curve(matrix, type, vals):
xdata = vals[::2]
ydata = vals[1::2]
xc = get_coefficients(type, xdata)
yc = get_coefficients(type, ydata)
coeffs = [xc, yc]
return draw_parametric(matrix, vals[0], vals[1], 0, coeffs, cubicx, cubicy, 0.01)
def generate_sphere(cx, cy, cz, r):
pts = []
qual = 10
step = 1/float(qual)
rot = 0.0
while rot < 1.01:
circ = 0.0
while circ < 1.01:
point = []
point.append(r * cos(2 * pi * circ) + cx)
point.append(r * sin(2 * pi * circ) * cos(2 * pi * rot) + cy)
point.append(r * sin(2 * pi * circ) * sin(2 * pi * rot) + cz)
point.append(1)
pts.append(point)
circ += step
rot += step
print("pts len: " + str(len(pts)))
#points now has all the significant points
plen = len(pts)
q1 = qual + 1
polys = []
for i in range(0, q1):
polys = add_polygon(polys, pts[i*q1], pts[(i*q1)+1], pts[(((i+1)*q1)+1) % plen])
for j in range(1, q1-1):
polys = add_polygon(polys, pts[(i*q1)+j], pts[(i*q1)+j+1], pts[(((i+1)*q1)+j) % plen])
polys = add_polygon(polys, pts[(i*q1)+j+1], pts[(((i+1)*q1)+j+1) % plen], pts[(((i+1)*q1)+j) % plen])
polys = add_polygon(polys, pts[(i*q1)+q1-1], pts[(((i+1)*q1)+q1-2) % plen], pts[(i*q1)+q1-2])
return polys
def add_sphere(matrix, x, y, z, r):
pts = generate_sphere(x, y, z, r)
i = 0
while i < len(pts):
matrix = add_polygon(matrix, pts[i], pts[i+1], pts[i+2])
i += 3
return matrix
def generate_torus(cx, cy, cz, r, R):
pts = []
qual1 = 40
qual2 = 10
step1 = 1/float(qual1)
step2 = 1/float(qual2)
rot = 0.01
while rot < 1.001:
circ = 0.01
while circ < 1.001:
point = []
point.append(cos(2 * pi * rot) * (r * cos(2 * pi * circ) + R) + cx)
point.append(r * sin(2 * pi * circ) + cy)
point.append(-(sin(2 * pi * rot)) * (r * cos(2 * pi * circ) + R) + cz)
point.append(1)
pts.append(point)
circ += step2
rot += step1
#points now complete
plen = len(pts)
print(plen)
q1 = qual1
q2 = qual2
polys = []
for i in range(0, q1):
for j in range(0, q2):
print("i: " + str(i) + " j: " + str(j))
polys = add_polygon(polys, pts[(i*q2)+j], pts[(i*q2)+((j+1)%q2)], pts[(((i+1)*q2)+j) % plen])
polys = add_polygon(polys, pts[(i*q2)+((j+1)%q2)], pts[(((i+1)*q2)+((j+1)%q2)) % plen], pts[(((i+1)*q2)+j) % plen])
return polys
def add_torus(matrix, x, y, z, r, R):
pts = generate_torus(x, y, z, r, R)
i = 0
while i < len(pts):
matrix = add_polygon(matrix, pts[i], pts[i+1], pts[i+2])
i += 3
return matrix
def generate_box(cx, cy, cz, w, h, d):
pts = [[cx, cy, cz, 1],
[cx+w, cy, cz, 1],
[cx, cy-h, cz, 1],
[cx, cy, cz-d, 1],
[cx+w, cy-h, cz, 1],
[cx, cy-h, cz-d, 1],
[cx+w, cy, cz-d, 1],
[cx+w, cy-h, cz-d, 1]]
polys = [pts[0], pts[2], pts[4],
pts[0], pts[4], pts[1],
pts[1], pts[4], pts[7],
pts[1], pts[7], pts[6],
pts[6], pts[7], pts[5],
pts[6], pts[5], pts[3],
pts[3], pts[5], pts[2],
pts[3], pts[2], pts[0],
pts[3], pts[0], pts[1],
pts[3], pts[1], pts[6],
pts[2], pts[5], pts[7],
pts[2], pts[7], pts[4]]
return polys
def add_box(matrix, x, y, z, w, h, d):
pts = generate_box(x, y, z, w, h, d)
print(str(len(pts)))
i = 0
while i < len(pts):
matrix = add_polygon(matrix, pts[i], pts[i+1], pts[i+2])
i += 3
return matrix
|
# Generated by Django 3.1.7 on 2021-03-14 14:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Musica', '0005_auto_20210314_1348'),
]
operations = [
migrations.RemoveField(
model_name='lista',
name='usuario',
),
]
|
class Config:
def __init__(self):
#DBCONFIG
self.DB_URL = "postgresql://postgres:bry4nchr1s@localhost/postgres" |
import logging
class GlobalRouting:
def __init__(self, floorplan, top_rtl_parser, slot_manager):
self.floorplan = floorplan
self.top_rtl_parser = top_rtl_parser
self.slot_manager = slot_manager
self.v2s = floorplan.getVertexToSlot()
self.s2e = floorplan.getSlotToEdges()
self.e_name2path = {} # from edge to all slots passed
self.naiveGlobalRouting()
self.updateEdgePipelineLevel()
def updateEdgePipelineLevel(self):
"""
update the pipeline_level filed based on the routing results
"""
for e_list in self.s2e.values():
for e in e_list:
slot_path = []
src_slot = self.v2s[e.src]
dst_slot = self.v2s[e.dst]
slot_path = self.e_name2path[e.name]
# 2 levels of pipelining for each slot crossing
if src_slot == dst_slot:
e.pipeline_level = 0
else:
e.pipeline_level = (len(slot_path) + 1) * 2
def naiveGlobalRouting(self):
"""
each edge first go in the Y direction then in the X direction
assume all slots are of the same size and are aligned
the slot_path exclude the src slot and the dst slot
"""
for e_list in self.s2e.values():
for e in e_list:
slot_path = []
src_slot = self.v2s[e.src]
dst_slot = self.v2s[e.dst]
slot_path.append(src_slot)
curr = src_slot
len_x = src_slot.getLenX()
len_y = src_slot.getLenY()
# first go in X direction
x_diff = curr.getPositionX() - dst_slot.getPositionX()
if x_diff:
dir = 'LEFT' if x_diff > 0 else 'RIGHT'
for i in range(int(abs(x_diff/len_x))):
curr = self.slot_manager.createSlotForRouting(curr.getNeighborSlotName(dir))
slot_path.append(curr)
y_diff = curr.getPositionY() - dst_slot.getPositionY()
if y_diff:
dir = 'DOWN' if y_diff > 0 else 'UP'
for i in range(int(abs(y_diff/len_y))):
curr = self.slot_manager.createSlotForRouting(curr.getNeighborSlotName(dir))
slot_path.append(curr)
assert curr == dst_slot
slot_path = slot_path[1:-1] # exclude the src and the dst
logging.info(f'{e.name}: {self.v2s[e.src].getName()} -> {self.v2s[e.dst].getName()} : ' + ' '.join(s.getName() for s in slot_path))
self.e_name2path[e.name] = slot_path
|
from datetime import datetime
from django.db import models
from django.contrib import messages
from django.contrib.messages import get_messages
from .user import User
from .tag import Tag
class TaskManager(models.Manager):
def create_task(self, request):
new_task = None
tag_errors = False
logged_in_user = User.objects.get_logged_in_user(request)
if logged_in_user is None:
messages.error(request, "Must be logged in.")
if len(request.POST['title']) < 2:
messages.error(request, "Title must be at least 2 characters.")
due = datetime.strptime(request.POST['due_date'], "%Y-%m-%dT%H:%M")
if due < datetime.now():
messages.error(request, 'Due date must be in the future.')
assigned_to = User.objects.filter(
id=request.POST['assigned_to']).first()
if assigned_to is None:
messages.error(request, "Assigned user not found.")
error_messages = messages.get_messages(request)
error_messages.used = False # don't clear messages
if not error_messages:
new_task = Task(
title=request.POST['title'],
description=request.POST['description'],
due_date=request.POST['due_date'],
created_by=logged_in_user,
assigned_to=assigned_to
)
new_task.save()
tag_errors = self.add_tags(request, new_task)
return new_task, tag_errors
def add_tags(self, request, task):
tags = request.POST['tags'].split(",")
errors = False
if not tags:
return False
for tag_name in tags:
tag_name = tag_name.strip()
tag_to_add = None
existing_tag = Tag.objects.filter(name=tag_name).first()
if existing_tag:
tag_to_add = existing_tag
else:
tag_to_add = Tag.objects.create_tag(tag_name, request)
if tag_to_add is None:
errors = True
task.tags.add(tag_to_add)
return errors
def toggle_completed(self, request, task_id):
task = Task.objects.filter(id=task_id).first()
if task is not None:
uid = request.session.get('user_id')
if task.created_by.id == uid or task.assigned_to.id == uid:
task.is_complete = not task.is_complete
if task.is_complete:
task.completed_at = datetime.now()
else:
task.completed_at = None
task.save()
def delete(self, request, task_id):
uid = request.session.get('user_id')
task = Task.objects.filter(id=task_id).first()
if task and task.created_by.id == uid:
task.delete()
return True
return False
class Task(models.Model):
title = models.CharField(max_length=255)
description = models.TextField()
due_date = models.DateTimeField()
is_complete = models.BooleanField(default=False)
completed_at = models.DateTimeField(null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
# When a User is deleted
# the delete will 'cascade' to delete all the user's created_tasks as well
created_by = models.ForeignKey(
User, related_name="created_tasks", on_delete=models.CASCADE)
assigned_to = models.ForeignKey(
User, related_name="assigned_tasks", on_delete=models.CASCADE)
tags = models.ManyToManyField(Tag, related_name="tasks")
objects = TaskManager()
|
import FWCore.ParameterSet.Config as cms
CSCTFConfigOnline = cms.ESProducer("CSCTFConfigOnlineProd",
onlineAuthentication = cms.string('.'),
forceGeneration = cms.bool(False),
onlineDB = cms.string('oracle://CMS_OMDS_LB/CMS_TRG_R')
)
|
import statistics
temperatures = [10,14,15,10,9,5]
# temperatures_under_mean = list(filter(lambda x:x>statistics.mean(temperatures),temperatures))
# print(temperatures_under_mean)
def if_greater_then_mean (x):
if x > statistics.mean(temperatures):
return True
else:
return False
# tworzenie sita
temperatures_under_mean = list(filter(if_greater_then_mean,temperatures))
# print(temperatures_under_mean)
# over_12 = [ x
# for x in temperatures
# if x>12
# ]
#
#
# over_12_s = [x
# for x in temperatures
# if x>statistics.mean(temperatures)]
#
# print(over_12_s) |
from Chromosome import Chromosome
from DNN import DNN
from keras import backend as KerasBackend
from Paths import get_path_slash
from Paths import get_symbol_data_path
from Paths import get_symbol_data
from PreprocessCsv import PreprocessCsv
from framePrepDnn import framePrepDnn
from LoadData import load_training_and_testing_data
import random
import os
import os.path
#TODO: add in getting data for self.symbol to train with
class GeneticSearchDNN:
'''
Initializes a GeneticSearchDNN object. Specify some main test parameters here.
Args: int _popSizes = population size
int _selection = number selected each generation
int _mutation = number mutated each generation
int _newblood = number of newblood each generation
int _crossoverPairs = number of pairs to be mated each generation
float _testPercentage = (0, 1) ratio of total inputs and outputs to be used as test data
'''
def __init__ (self, _popSize, _selection, _mutation, _newblood, _crossoverPairs, _testPercentage=0.1):
# check whether search settings make sense
if not self.verify_search_settings (_popSize, _selection, _mutation, _newblood, _crossoverPairs):
raise Exception ('Genetic Search __init__(): search settings do not agree')
self.popSize = _popSize
self.selection = _selection
self.mutation = _mutation
self.newblood = _newblood
self.crossoverPairs = _crossoverPairs
self.testPercentage = _testPercentage # percentage of data to use for testing
'''
Conducts a genetic search on Chromosome objects representing DNNs.
Args: string _saveDirectory = directory path to save best DNN and Chromosome objects for each generation
int _numberToSave = number of the best DNN objects to save each generation
int _generations = number of generation to conduct search for
int _epochs = base number of epochs to train each DNN for
int _batchSize = batch size for Keras training
Chromosome[] _initialPopulation = list of Chromosomes to use as the initial population. Max hidden layer count
of each Chromosome must match _maxHL
int _maxHL = maximum number of hidden layers each Chromosome can represent
float _goodLoss = [0, 1) loss that a model become lower than to become considered 'good.' Good models will
be trained more.
float _goodDA = [0, 1] directional accuracy that a model must be higher than to become considered 'good.'
Good models will be trained more.
'''
def searchVerbose (self, _symbol, _timeSeries, _timeInterval, _saveDirectory, _numberToSave, \
_generations, _epochs, _batchSize, _maxHL, _initialPopulation = None, \
_goodLoss = 0, _goodDA = 1):
# get training/testing data
trainInputs, trainOutputs, testInputs, testOutputs = \
load_training_and_testing_data(self.testPercentage, _symbol, \
_timeSeries, _timeInterval)
# initialize starting random Chromosomes
chromosomes = []
for x in range(self.popSize):
newChromosome = Chromosome()
#print('inputs:', newChromosome.input_size())
chromosomes.append(Chromosome(_maxHL=_maxHL))
if _initialPopulation != None:
chromosomes = _initialPopulation
# loop through generations
for generationCount in range(_generations):
print('\n==========\nStarting New Generation:', generationCount, '\n==========\n')
# Work with Current Generation
dnns = []
losses = []
# create, train, and evaluate DNNs
for x in range(len(chromosomes)):
newDNN = DNN(_chromosome=chromosomes[x])
print('dnn:', newDNN.get_model().summary())
newDNN.compile(newDNN.optimizer, 'mean_squared_error')
newDNN.train(trainInputs, trainOutputs, _epochs, _batchSize)
loss, directionalAccuracy = self.get_dnn_fitness(newDNN, testInputs, testOutputs)
losses.append([loss, directionalAccuracy])
print('\ntrained model:', x, 'with loss:', loss, 'and directional accuracy:', \
directionalAccuracy, '\n')
dnns.append(newDNN)
# Extended training of 'good' models.
# checks if loss is 'good'. If so, train the model some more.
lastIndex = len(losses)-1
if losses[lastIndex][0] < _goodLoss or losses[lastIndex][1] > _goodDA:
print('\nLoss is', losses[lastIndex][0], 'and directional accuracy is:', \
losses[lastIndex][1], '. Training some more.\n')
# train model more
newDNN.train(trainInputs, trainOutputs, _epochs, _batchSize)
# change loss
loss, directionalAccuracy = self.get_dnn_fitness(newDNN, testInputs, testOutputs)
losses[lastIndex] = [loss, directionalAccuracy]
print('\ntrained model:', x, 'with loss:', loss, 'and directional accuracy:', \
directionalAccuracy, '\n')
# aggregate all DNN data (loss, dnn, chromosome) into a list of tuples
print('aggregating model performance data')
models = []
for x in range(len(dnns)):
# add data to list of tuples for sorting
models.append((losses[x], chromosomes[x], dnns[x]))
print('model for generation', generationCount, 'has loss', models[x][0])
print('mode:', models[x][1])
# sort models based on directional accuracy
print('\nsorting models by directional accuracy\n')
models = sorted(models, key=get_sorted_key)
# reverse to use directional accuracy metric to sort, instead of error
newModels = []
for x in range(len(models)-1, -1, -1):
newModels.append(models[x])
models = newModels
# save models
print('\nsaving', _numberToSave, 'best models\n')
# make sure save directory exists
if not os.path.exists(_saveDirectory):
os.mkdir(_saveDirectory)
self.save_best_models(models, _numberToSave, _saveDirectory + get_path_slash() + str(generationCount))
# close models to prevent errors
for dnn in dnns:
dnn.close()
# Prepare Next Generation
# new generation
newChromosomes = []
# selection
selected = []
for x in range(self.selection):
print('\nadding selection\n')
# choose index to select
selection = self.tournament_selection(_max = self.popSize)
while selection in selected:
selection = self.tournament_selection(_max = self.popSize)
selected.append(selection)
newChromosomes.append(chromosomes[x])
print('added:', newChromosomes[len(newChromosomes)-1], 'index:', selection)
# mutation
for x in range(self.mutation):
print('\nadding mutation\n')
# choose index to mutate
index = self.tournament_selection(self.popSize)
# copy Chromosome before mutation
newChromosome = Chromosome(_genome=chromosomes[index].get_genome_copy(), _maxHL=_maxHL)
# mutate new Chromosome
newChromosome.mutate()
print('existing:', chromosomes[index])
# add new Chromosome
newChromosomes.append(newChromosome)
print('added:', newChromosomes[len(newChromosomes)-1])
#print('existing:', chromosomes[index])
# crossover
for x in range(self.crossoverPairs):
print('\nadding crossover\n')
# choose indeces to mate
a = self.tournament_selection(self.popSize)
b = self.tournament_selection(self.popSize)
print('a:', chromosomes[a])
print('b:', chromosomes[b])
# add new crossovered Chromosomes
newChromosomes.append(self.crossover(chromosomes[a], chromosomes[b]))
print('added:', newChromosomes[len(newChromosomes)-1])
newChromosomes.append(self.crossover(chromosomes[a], chromosomes[b]))
print('added:', newChromosomes[len(newChromosomes)-1])
# newblood
for x in range(self.newblood):
print('\nadding newblood\n')
newChromosomes.append(Chromosome(_maxHL=_maxHL))
print('added:', newChromosomes[len(newChromosomes)-1])
# set chromosome popuation as newly created population
chromosomes = newChromosomes
'''
Performs crossover on 2 Chromosomes.
Args: _a = Chromosome
_b = Chromosome
Returns: A Chromosome that is the crossover product of _a and _b.
'''
def crossover (self, _a, _b):
print('performing crossover')
genomeA = _a.get_genome()
genomeB = _b.get_genome()
index = random.randint(0, len(genomeA) - 1)
newGenome = genomeA[:index] + genomeB[index:]
return Chromosome(_genome=newGenome, _maxHL=_a.max_hidden_layers())
'''
Uses a tournament selection formula to select a probably low number comapred to the max.
Args: int _max = maximum number that can be chosen. _max > 0
Retunrs:int = number [0, _max]
'''
def tournament_selection (self, _max):
options = []
for x in range(int(_max * 0.05 + 1)):
options.append(random.randint(0, _max-1))
'''
a = random.randint(0, _max - 1)
b = random.randint(0, _max - 1)
if a < b:
return a
return b
'''
return sorted(options)[0]
'''
Saves the best models of each generation.
Args: list _models = List of DNN objects that represents the generation. Should be sorted so 'best' models are near front
of list.
int _numberToSave = number of best models to save
_directory path = Path to store saved models for the generation
'''
# saves the best models of a generation to disk. Assumes _models is sorted
def save_best_models (self, _models, _numberToSave, _directoryPath):
# save old current working directory
oldPath = os.getcwd()
# check if path exists. if not, create it
if not os.path.exists(_directoryPath):
os.mkdir(_directoryPath)
os.chdir(_directoryPath)
# save models in there
modelIndex = 0
while modelIndex < _numberToSave and modelIndex < len(_models):
# save model with example name: 0-fitness_0.034234123141234
_models[modelIndex][2].save(str(modelIndex) + '-loss_' + str(_models[modelIndex][0][0]) + 'da_' + \
str(_models[modelIndex][0][1]))
modelIndex += 1
# restore old current working directory
os.chdir(oldPath)
'''
Checks if the search settings specified in the constructor method work together.
Args: Genetic Search parameters
Returns:True/False Search settings make sense/Search settings don't make sense
'''
# checks if search-wide variables make sense
def verify_search_settings (self, _popSize, _selection, _mutation, _newblood, _crossoverPairs):
if _popSize < 0 or _selection < 0 or _mutation < 0 or _newblood < 0 or _crossoverPairs < 0:
return False
if _selection + _mutation + _newblood + (_crossoverPairs * 2) != _popSize:
return False
return True
# returns maing loss metric for DNN object with test inputs/outputs. Fitness of 0 is good, means 0 loss!
def get_dnn_fitness (self, _dnn, _inputs, _outputs):
loss = _dnn.evaluate(_inputs, _outputs) # assumes only 1 output
directionalAccuracy = _dnn.evaluate_directional_accuracy(_inputs, _outputs)
return loss, directionalAccuracy
# helps with sorting tuples that hold a population
def get_sorted_key (item):
return item[0][1]
|
from sanic import Sanic
from sanic.response import json
import sys
app = Sanic()
@app.route("/")
async def test(request):
return json({"hello": "world"})
#if __name__ == "__main__":
# app.run(host="0.0.0.0", port=8000)
#app.run()
app.run(host= '0.0.0.0', port=8000)
print ('exiting...')
sys.exit(0) |
from flask_restful import Resource
from flask_jwt_extended import jwt_required
from webargs import fields
from webargs.flaskparser import use_args
from services.meal_plan_generator import generate_meal_plan
from models.dietary_restriction import DietaryRestriction
GENDERS = ['Male', 'Female']
class MealPlanApi(Resource):
@jwt_required
@use_args({'wt': fields.Int(required=True, load_only='weight', validate=lambda val: val > 0),
'ht': fields.Int(required=True, load_only='height', validate=lambda val: val > 0),
'bud': fields.Float(required=True, load_only='budget', validate=lambda val: val > 0),
'diet': fields.DelimitedList(fields.Str(), required=True),
'gen': fields.Str(required=True, validate=lambda val: val in GENDERS),
'age': fields.Float(required=True, validate=lambda val: val > 0),
})
def post(self, args):
meal_plan = generate_meal_plan(7, args.get('wt'), args.get('ht'), args.get('bud'), args.get('diet')[0],
args.get('gen'), args.get('age'))
return meal_plan, 200
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 11 19:16:52 2020
@author: Jay
"""
import numpy as np
A = np.array([[1,0.67,0.33],[0.45,1.,0.55],[0.67,0.33,1.]])
b = np.array([2,2,2])
print(np.linalg.solve(A,b)) |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from utils import AddBias, where
class Categorical(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(Categorical, self).__init__()
self.linear = nn.Linear(num_inputs, num_outputs)
def forward(self, x):
x = self.linear(x)
return x
def sample(self, x, deterministic):
x = self(x)
probs = F.softmax(x, dim=1)
if deterministic is False:
#print("deterministic is false?")
action = probs.multinomial()
else:
action = probs.max(1, keepdim=True)[1]
#print(action.data.numpy(), "ACCTT")
return action
def logprobs_and_entropy(self, x, actions):
x = self(x)
log_probs = F.log_softmax(x, dim=1)
probs = F.softmax(x, dim=1)
action_log_probs = log_probs.gather(1, actions)
dist_entropy = -(log_probs * probs).sum(-1).mean()
#print(action_log_probs.data.numpy(), dist_entropy.data.numpy(), actions.data.numpy(),"LOUCURA 2")
return action_log_probs, dist_entropy
class DiagGaussian(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(DiagGaussian, self).__init__()
self.fc_mean = nn.Linear(num_inputs, num_outputs)
self.logstd = AddBias(torch.zeros(num_outputs))
def forward(self, x):
action_mean = self.fc_mean(x)
# An ugly hack for my KFAC implementation.
zeros = Variable(torch.zeros(action_mean.size()), volatile=x.volatile)
if x.is_cuda:
zeros = zeros.cuda()
action_logstd = self.logstd(zeros)
return action_mean, action_logstd
def sample(self, x, deterministic):
action_mean, action_logstd = self(x)
action_std = action_logstd.exp()
if deterministic is False:
noise = Variable(torch.randn(action_std.size()))
if action_std.is_cuda:
noise = noise.cuda()
action = action_mean + action_std * noise
else:
action = action_mean
return action
def logprobs_and_entropy(self, x, actions):
action_mean, action_logstd = self(x)
action_std = action_logstd.exp()
action_log_probs = -0.5 * ((actions - action_mean) / action_std).pow(2) - 0.5 * math.log(2 * math.pi) - action_logstd
action_log_probs = action_log_probs.sum(-1, keepdim=True)
dist_entropy = 0.5 + 0.5 * math.log(2 * math.pi) + action_logstd
dist_entropy = dist_entropy.sum(-1).mean()
return action_log_probs, dist_entropy
|
import taichi as ti
arch = ti.vulkan if ti._lib.core.with_vulkan() else ti.cuda
ti.init(arch=arch)
n = 128
quad_size = 1.0 / n
dt = 4e-2 / n
substeps = int(1 / 60 // dt)
gravity = ti.Vector([0, -9.8, 0])
spring_Y = 3e4
dashpot_damping = 1e4
drag_damping = 1
ball_radius = 0.3
ball_center = ti.Vector.field(3, dtype=float, shape=(1,))
ball_center[0] = [0, 0, 0]
x = ti.Vector.field(3, dtype=float, shape=(n, n))
v = ti.Vector.field(3, dtype=float, shape=(n, n))
num_triangles = (n - 1) * (n - 1) * 2
indices = ti.field(int, shape=num_triangles * 3)
vertices = ti.Vector.field(3, dtype=float, shape=n * n)
colors = ti.Vector.field(3, dtype=float, shape=n * n)
bending_springs = False
@ti.kernel
def initialize_mass_points():
random_offset = ti.Vector([ti.random() - 0.5, ti.random() - 0.5]) * 0.1
for i, j in x:
x[i, j] = [
i * quad_size - 0.5 + random_offset[0],
0.6,
j * quad_size - 0.5 + random_offset[1],
]
v[i, j] = [0, 0, 0]
@ti.kernel
def initialize_mesh_indices():
for i, j in ti.ndrange(n - 1, n - 1):
quad_id = (i * (n - 1)) + j
# 1st triangle of the square
indices[quad_id * 6 + 0] = i * n + j
indices[quad_id * 6 + 1] = (i + 1) * n + j
indices[quad_id * 6 + 2] = i * n + (j + 1)
# 2nd triangle of the square
indices[quad_id * 6 + 3] = (i + 1) * n + j + 1
indices[quad_id * 6 + 4] = i * n + (j + 1)
indices[quad_id * 6 + 5] = (i + 1) * n + j
for i, j in ti.ndrange(n, n):
if (i // 4 + j // 4) % 2 == 0:
colors[i * n + j] = (0.22, 0.72, 0.52)
else:
colors[i * n + j] = (1, 0.334, 0.52)
initialize_mesh_indices()
spring_offsets = []
def initialize_spring_offsets():
if bending_springs:
for i in range(-1, 2):
for j in range(-1, 2):
if (i, j) != (0, 0):
spring_offsets.append(ti.Vector([i, j]))
else:
for i in range(-2, 3):
for j in range(-2, 3):
if (i, j) != (0, 0) and abs(i) + abs(j) <= 2:
spring_offsets.append(ti.Vector([i, j]))
initialize_spring_offsets()
@ti.kernel
def substep():
for i in ti.grouped(x):
v[i] += gravity * dt
for i in ti.grouped(x):
force = ti.Vector([0.0, 0.0, 0.0])
for spring_offset in ti.static(spring_offsets):
j = i + spring_offset
if 0 <= j[0] < n and 0 <= j[1] < n:
x_ij = x[i] - x[j]
v_ij = v[i] - v[j]
d = x_ij.normalized()
current_dist = x_ij.norm()
original_dist = quad_size * float(i - j).norm() # pylint: disable=no-member
# Spring force
force += -spring_Y * d * (current_dist / original_dist - 1)
# Dashpot damping
force += -v_ij.dot(d) * d * dashpot_damping * quad_size
v[i] += force * dt
for i in ti.grouped(x):
v[i] *= ti.exp(-drag_damping * dt)
offset_to_center = x[i] - ball_center[0]
if offset_to_center.norm() <= ball_radius:
# Velocity projection
normal = offset_to_center.normalized()
v[i] -= ti.min(v[i].dot(normal), 0) * normal
x[i] += dt * v[i]
@ti.kernel
def update_vertices():
for i, j in ti.ndrange(n, n):
vertices[i * n + j] = x[i, j]
def main():
window = ti.ui.Window("Taichi Cloth Simulation on GGUI", (768, 768), vsync=True)
canvas = window.get_canvas()
canvas.set_background_color((1, 1, 1))
scene = window.get_scene()
camera = ti.ui.Camera()
current_t = 0.0
initialize_mass_points()
while window.running:
if current_t > 1.5:
# Reset
initialize_mass_points()
current_t = 0
for i in range(substeps):
substep()
current_t += dt
update_vertices()
camera.position(0.0, 0.0, 3)
camera.lookat(0.0, 0.0, 0)
scene.set_camera(camera)
scene.point_light(pos=(0, 1, 2), color=(1, 1, 1))
scene.ambient_light((0.5, 0.5, 0.5))
scene.mesh(vertices, indices=indices, per_vertex_color=colors, two_sided=True)
# Draw a smaller ball to avoid visual penetration
scene.particles(ball_center, radius=ball_radius * 0.95, color=(0.5, 0.42, 0.8))
canvas.scene(scene)
window.show()
# TODO: include self-collision handling
if __name__ == "__main__":
main()
|
import os
import numpy as np
import rasterio.features
import shapely.ops
import shapely.wkt
import shapely.geometry
import pandas as pd
import cv2
from scipy import ndimage as ndi
from skimage.morphology import watershed
from tqdm import tqdm
from fire import Fire
def _remove_interiors(line):
if "), (" in line:
line_prefix = line.split('), (')[0]
line_terminate = line.split('))",')[-1]
line = (
line_prefix +
'))",' +
line_terminate
)
return line
def my_watershed(what, mask1, mask2):
markers = ndi.label(mask2, output=np.uint32)[0]
labels = watershed(what, markers, mask=mask1, watershed_line=True)
return labels
def wsh(mask_img, threshold, border_img, seeds, shift):
img_copy = np.copy(mask_img)
m = seeds * border_img
img_copy[m <= threshold + shift] = 0
img_copy[m > threshold + shift] = 1
img_copy = img_copy.astype(np.bool)
mask_img[mask_img <= threshold] = 0
mask_img[mask_img > threshold] = 1
mask_img = mask_img.astype(np.bool)
labeled_array = my_watershed(mask_img, mask_img, img_copy)
return labeled_array
def main(folds_predict='/wdata/folds_predicts',
prob_trs=0.3,
shift=0.4,
min_lolygon_area=200,
submit_path='/wdata/submits/solution.csv',
save_path='/wdata/submit_predicts/'):
folds = os.listdir(folds_predict)
print(folds)
files = sorted(os.listdir(os.path.join(folds_predict, folds[0])))[:]
f = open(submit_path, 'w')
f.write('ImageId,PolygonWKT_Pix,Confidence\n')
for _file in tqdm(files):
for fold_i, fold_name in enumerate(folds):
file_path = os.path.join(folds_predict, fold_name, _file)
data = cv2.imread(file_path) / 255.0
if fold_i == 0:
final_data = data[:, :, :]
else:
final_data += data[:, :, :]
fid = '_'.join(_file.split('_')[-4:]).split('.')[0]
pred_data = final_data / len(folds)
file_save_path = os.path.join(save_path, _file)
cv2.imwrite(file_save_path, (pred_data * 255).astype(np.uint8))
labels = wsh(pred_data[:, :, 0],
prob_trs,
# ( 1 - pred_data[:, :, 2])*( 1 - pred_data[:, :, 1]),
1 - pred_data[:, :, 2],
pred_data[:, :, 0],
shift)
label_numbers = list(np.unique(labels))
all_dfs = []
for label in label_numbers:
if label != 0:
submask = (labels == label).astype(np.uint8)
if np.sum(submask) < min_lolygon_area:
continue
shapes = rasterio.features.shapes(submask.astype(np.int16), submask > 0)
mp = shapely.ops.cascaded_union(
shapely.geometry.MultiPolygon([
shapely.geometry.shape(shape)
for shape, value in shapes
]))
if isinstance(mp, shapely.geometry.Polygon):
df = pd.DataFrame({
'area_size': [mp.area],
'poly': [mp],
})
else:
df = pd.DataFrame({
'area_size': [p.area for p in mp],
'poly': [p for p in mp],
})
df = df[df.area_size > min_lolygon_area]
df = df.reset_index(drop=True)
# print(df)
if len(df) > 0:
all_dfs.append(df.copy())
if len(all_dfs) > 0:
df_poly = pd.concat(all_dfs)
df_poly = df_poly.sort_values(by='area_size', ascending=False)
df_poly.loc[:, 'wkt'] = df_poly.poly.apply(lambda x: shapely.wkt.dumps(x, rounding_precision=0))
df_poly.loc[:, 'area_ratio'] = df_poly.area_size / df_poly.area_size.max()
for i, row in df_poly.iterrows():
line = "{},\"{}\",{:.6f}\n".format(
fid,
row.wkt,
row.area_ratio)
line = _remove_interiors(line)
f.write(line)
else:
f.write("{},{},0\n".format(
fid,
"POLYGON EMPTY"))
f.close()
if __name__ == '__main__':
Fire(main)
|
import os
from typing import Optional
from unittest import mock
import pytest
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import Callback
from pytorch_lightning.plugins.training_type.rpc_sequential import RPCPlugin
from tests.helpers.boring_model import BoringModel
from tests.helpers.runif import RunIf
@mock.patch.dict(
os.environ,
{
"CUDA_VISIBLE_DEVICES": "0,1",
"SLURM_NTASKS": "2",
"SLURM_JOB_NAME": "SOME_NAME",
"SLURM_NODEID": "0",
"LOCAL_RANK": "0",
"SLURM_PROCID": "0",
"SLURM_LOCALID": "0",
},
)
@mock.patch("torch.cuda.device_count", return_value=2)
@pytest.mark.parametrize(
["ddp_backend", "gpus", "num_processes"],
[("ddp_cpu", None, 2), ("ddp", 2, 0), ("ddp_spawn", 2, 0)],
)
@RunIf(rpc=True)
def test_rpc_choice(tmpdir, ddp_backend, gpus, num_processes):
class CB(Callback):
def on_fit_start(self, trainer, pl_module):
assert isinstance(trainer.training_type_plugin, RPCPlugin)
raise RuntimeError('finished plugin check')
model = BoringModel()
trainer = Trainer(
default_root_dir=str(tmpdir),
fast_dev_run=True,
gpus=gpus,
num_processes=num_processes,
distributed_backend=ddp_backend,
callbacks=[CB()],
plugins=[RPCPlugin()]
)
with pytest.raises(RuntimeError, match='finished plugin check'):
trainer.fit(model)
class CustomRPCPlugin(RPCPlugin):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.rpc_save_model_count = 0
self.worker_optimizer_step_count = 0
def rpc_save_model(self, *_) -> None:
self.rpc_save_model_count += 1
def barrier(self, name: Optional[str] = None) -> None:
return
@RunIf(min_gpus=2, special=True, rpc=True)
def test_rpc_function_calls_ddp(tmpdir):
model = BoringModel()
plugin = CustomRPCPlugin()
max_epochs = 2
limit_train_batches = 2
trainer = Trainer(
limit_train_batches=limit_train_batches,
limit_val_batches=2,
max_epochs=max_epochs,
gpus=2,
distributed_backend='ddp',
plugins=[plugin],
default_root_dir=tmpdir,
)
trainer.fit(model)
if trainer.global_rank == 0: # Main process
assert plugin.rpc_save_model_count == max_epochs
else: # Worker process
assert plugin.rpc_save_model_count == max_epochs
|
from .device import SimulatedTekafg3XXX
from ..lewis_versions import LEWIS_LATEST
framework_version = LEWIS_LATEST
__all__ = ['SimulatedTekafg3XXX']
|
def BasicCommands():
import os
os.system("tput setaf 10")
print("***************************************************************************************************************************************")
os.system("tput setaf 10")
name = "\" \t\t\t\t LINUX TERMINAL USER INTERFACE\""
os.system("echo {0} | figlet -f wideterm -d ./fonts/".format(name))
os.system("tput setaf 10")
print("***************************************************************************************************************************************")
os.system("tput setaf 11")
print("\n\t\t\t\t..........................BASIC COMMANDS MENU.............................")
os.system("tput setaf 6")
print("""
\n\t\t\t\t\t1.Show Date \t\t\t2.Show Cal
\n\t\t\t\t\t3.Show manual of the command \t\t\t4.Show Free Memory
\n\t\t\t\t\t5.Show Network card \t\t\t6.Show mounted disks
\n\t\t\t\t\t7.Show uptime \t\t\t8.Check user
\n""")
os.system("tput setaf 7")
print("\t\t\t\t\tEnter 0 to terminate \t Enter -1 return to main menu\n")
os.system("tput setaf 2")
choice = int(input("Select the Option : "))
os.system("tput setaf 7")
if(choice==0):
exit()
elif(choice==1):
print()
os.system("date")
elif(choice==2):
print()
os.system("cal")
elif(choice==3):
cmd = str(input("\nEnter the command: "))
os.system("man {}".format(cmd))
elif(choice==4):
print()
os.system("free -m")
elif(choice==5):
print()
os.system("ifconfig")
elif(choice==6):
print()
os.system("df -h")
elif(choice==7):
print()
os.system("uptime")
elif(choice==8):
print()
os.system("whoami")
elif(choice== -1):
os.system("clear")
import main
main.main()
else:
print("You Entered Wrong Choice ...")
os.system("tput setaf 11")
c=input("\nPress Enter: The Screen will be cleared")
os.system("clear")
while True:
BasicCommands()
|
N, K = map(int, input().split())
A = list(map(int, input().split()))
result = 1
L = 2
current = A[0]
visited = [0] * N
visited_loop_cnt = 1
flag = True
while L < K:
if visited[current - 1] == 3:
J = K - L
J %= visited_loop_cnt
K = L + J
elif visited[current - 1] == 2 and flag:
visited[current - 1] += 1
visited_loop_cnt = 1
flag = False
else:
visited[current - 1] += 1
visited_loop_cnt += 1
current = A[current - 1]
L += 1
print(A[current - 1])
# 同じところに来た場合、そのループ分だけ減らす必要がある。
# 1 6 2 5 3 2 5 3 2 5
|
from __future__ import print_function # Make sure this line is always at the top of the file.
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from email.mime.text import MIMEText
import base64
import pickle
import os.path
import config
from prasePrice import Price
class gmail():
def __init__(self):
self.email = config.EMAIL_ADDRESS
self.password = config.PASSWORD
self.carrier = '@txt.att.net'
self.toNumber = config.PHONE_NUMBER + '{}'.format(self.carrier)
self.SCOPES = config.SCOPES
self.buildMessage(self.email, self.toNumber, 'Test Email', self.textMessage())
def textMessage(self):
message = " Your stock is currently at the price of $" + str(Price().parsePrice())
return message
def buildMessage(self, sender, to, subject, message_text):
message = MIMEText(message_text)
message['to'] = to
message['from'] = sender
message['subject'] = subject
return {'raw': base64.urlsafe_b64encode(message.as_bytes()).decode()}
def sendMessage(self, service, user_id, message):
try:
message = (service.users().messages().send(userId=user_id, body=message).execute())
print('Message Id: %s' % message['id'])
return message
except:
print("Message failed to sent :( ")
def getCreds(self):
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the getCreds flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file('credentials.json', self.SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('gmail', 'v1', credentials=creds)
return service
# Call the Gmail API
# results = service.users().labels().list(userId='me').execute()
|
#.find string03.py
#Diogo.c
email = "TheQueen@BuckinghamPalace.uk"
index = email.find("@")
print("The @ is at index",index)
|
from quant.project.fund_project.fund_stock_selection_ability_tianfeng.allfund_alpha_on_factor_file import *
from quant.project.fund_project.fund_stock_selection_ability_tianfeng.allstock_alpha_on_factor import *
if __name__ == '__main__':
# GetAllStockAllDateAlpha
################################################################################
path = 'E:\\3_Data\\4_fund_data\\7_fund_select_stock\\StockAlpha\\'
factor_name_list = ["TotalMarketValue", "BP", "IncomeYOYDaily", "ROETTMDaily"]
beg_date = "20140101"
end_date = "20170730"
date_series = Date().get_normal_date_series(beg_date, end_date, "S")
print(date_series)
GetAllStockAllDateAlpha(path, factor_name_list, date_series)
# GetAllFundAllDateFactorAlphaFile
################################################################################
in_path = 'E:\\3_Data\\4_fund_data\\7_fund_select_stock\\StockAlpha\\'
out_path = 'E:\\3_Data\\4_fund_data\\7_fund_select_stock\\FundSelectStockAlpha\\'
factor_name_list = ["TotalMarketValue", "BP", "IncomeYOYDaily", "ROETTMDaily", "Industry"]
beg_date = "20170530"
end_date = "20180630"
date_series = Date().get_normal_date_series(beg_date, end_date, "S")
GetAllFundAllDateFactorAlphaFile(in_path, out_path, factor_name_list, date_series)
################################################################################ |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# größter gemeinsamer Teiler
def ggt(x, y):
if x > y:
return ggt(x-y, y)
elif x == y:
return x
else:
return ggt(y, x)
def ggt(x,y):
while x != y:
if x > y or x%y != 0:
x % y
tmp = x
x = y
y = tmp
print(x, y)
return x
#print("(10, 30)", ggt(10, 30))
print("(20, 30)", ggt(20, 30))
print("(2, 5)", ggt(2, 5))
print("(8, 6)", ggt(8, 6))
print("(7, 3)", ggt(7, 3)) |
from PyQt5.QtWidgets import QLabel, QWidget
from PyQt5.QtCore import Qt, QMimeData, QRect
from PyQt5.QtGui import QDrag, QPixmap, QPainter, QCursor
import parametros as par
import os
#Inspirado con https://www.youtube.com/watch?v=9CJV-GGP22c
class Pinguino(QLabel):
def __init__(self, parent, color, pos_x, pos_y):
super().__init__(parent)
self.color = color
self.pos_x = pos_x
self.pos_y = pos_y
self.rect = QRect(pos_x, pos_y, 60, 70)
self.drag = True
self.init_gui()
def init_gui(self):
if self.color == "celeste":
ruta_imagen = os.path.join(*par.PATH_PINGUINOS["PINGUI_AZUL_NEUTRO"])
self.pixeles = QPixmap(ruta_imagen).scaledToWidth(110)
self.setPixmap(self.pixeles)
elif self.color == "rojo":
ruta_imagen = os.path.join(*par.PATH_PINGUINOS["PINGUI_ROJO_NEUTRO"])
self.pixeles = QPixmap(ruta_imagen).scaledToWidth(110)
self.setPixmap(self.pixeles)
elif self.color == "amarillo":
ruta_imagen = os.path.join(*par.PATH_PINGUINOS["PINGUI_AMAR_NEUTRO"])
self.pixeles = QPixmap(ruta_imagen).scaledToWidth(110)
self.setPixmap(self.pixeles)
elif self.color == "morado":
ruta_imagen = os.path.join(*par.PATH_PINGUINOS["PINGUI_MORA_NEUTRO"])
self.pixeles = QPixmap(ruta_imagen).scaledToWidth(110)
self.setPixmap(self.pixeles)
elif self.color == "verde":
ruta_imagen = os.path.join(*par.PATH_PINGUINOS["PINGUI_VERD_NEUTRO"])
self.pixeles = QPixmap(ruta_imagen).scaledToWidth(110)
self.setPixmap(self.pixeles)
self.move(self.pos_x, self.pos_y)
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
self.posicion_inicial_drag = event.pos()
def mouseMoveEvent(self, event):
if event.buttons() == Qt.LeftButton and self.drag:
drag = QDrag(self)
data_label = QMimeData() #movemos info del label junto a este
data_label.setText(self.color)
data_label.setImageData(self.pixmap().toImage) #Movemos imagen
drag.setMimeData(data_label)
#efectos de arrastre
drop_action = drag.exec_(Qt.MoveAction)
class PinguinoBailarin(Pinguino):
def __init__(self, parent, color, pos_x, pos_y):
super().__init__(parent, color, pos_x, pos_y)
self.sprites = []
largo = len(self.color)
for path in par.PATH_BAILES_PINGUINOS:
if self.color.upper() == path[:largo]:
self.sprites.append(par.PATH_BAILES_PINGUINOS[path])
# 0 - izquierda / 1 - arriba / 2 - abajo / 3 - derecha
def actualizar_paso(self, direc):
self.setPixmap(self.pixeles)
if len(direc) == 1:
if direc[0] == 0:
ruta_imagen = os.path.join(
*par.PATH_BAILES_PINGUINOS[f"{self.color.upper()}_IZQUIERDA"])
self.nuevos_pixeles = QPixmap(ruta_imagen).scaledToWidth(110)
self.setPixmap(self.nuevos_pixeles)
elif direc[0] == 1:
ruta_imagen = os.path.join(
*par.PATH_BAILES_PINGUINOS[f"{self.color.upper()}_ARRIBA"])
self.nuevos_pixeles = QPixmap(ruta_imagen).scaledToWidth(110)
self.setPixmap(self.nuevos_pixeles)
elif direc[0] == 2:
ruta_imagen = os.path.join(
*par.PATH_BAILES_PINGUINOS[f"{self.color.upper()}_ABAJO"])
self.nuevos_pixeles = QPixmap(ruta_imagen).scaledToWidth(110)
self.setPixmap(self.nuevos_pixeles)
elif direc[0] == 3:
ruta_imagen = os.path.join(
*par.PATH_BAILES_PINGUINOS[f"{self.color.upper()}_DERECHA"])
self.nuevos_pixeles = QPixmap(ruta_imagen).scaledToWidth(110)
self.setPixmap(self.nuevos_pixeles)
elif len(direc) == 2:
if (direc[0] == 0 and direc[1] == 1) or (direc[0] == 1 and direc[1] == 0):
ruta_imagen = os.path.join(
*par.PATH_BAILES_PINGUINOS[f"{self.color.upper()}_ARRIBA_IZQUIERDA"])
self.nuevos_pixeles = QPixmap(ruta_imagen).scaledToWidth(110)
self.setPixmap(self.nuevos_pixeles)
elif (direc[0] == 0 and direc[1] == 2) or (direc[0] == 2 and direc[1] == 0):
ruta_imagen = os.path.join(
*par.PATH_BAILES_PINGUINOS[f"{self.color.upper()}_ABAJO_IZQUIERDA"])
self.nuevos_pixeles = QPixmap(ruta_imagen).scaledToWidth(110)
self.setPixmap(self.nuevos_pixeles)
elif (direc[0] == 3 and direc[1] == 2) or (direc[0] == 2 and direc[1] == 3):
ruta_imagen = os.path.join(
*par.PATH_BAILES_PINGUINOS[f"{self.color.upper()}_ABAJO_DERECHA"])
self.nuevos_pixeles = QPixmap(ruta_imagen).scaledToWidth(110)
self.setPixmap(self.nuevos_pixeles)
elif (direc[0] == 3 and direc[1] == 1) or (direc[0] == 1 and direc[1] == 3):
ruta_imagen = os.path.join(
*par.PATH_BAILES_PINGUINOS[f"{self.color.upper()}_ARRIBA_DERECHA"])
self.nuevos_pixeles = QPixmap(ruta_imagen).scaledToWidth(110)
self.setPixmap(self.nuevos_pixeles)
elif len(direc) == 3:
ruta_imagen = os.path.join(
*par.PATH_BAILES_PINGUINOS[f"{self.color.upper()}_TRES_FLECHAS"])
self.nuevos_pixeles = QPixmap(ruta_imagen).scaledToWidth(110)
self.setPixmap(self.nuevos_pixeles)
def posicion_normal(self):
self.setPixmap(self.pixeles)
|
print("################################################################################")
print('''
INTRODUCTION\n''')
print("################################################################################")
print('''
Classes can be used in many different ways. We are going to focus on using them
for object-oriented programming. The key to object oriented programming is to
think of objects as collections of both data and the methods that operate on
that data. These ideas really took off when C++ and Java arrived on the scene.
We are now going to focus on how to create new types.\n''')
|
import spotipy
import spotipy.util as util
class AlbumInfo:
def __init__(self, name, artist, coverUrl, genres):
self.name = name
self.artist = artist
self.coverUrl = coverUrl
self.genres = genres
class SpotifyInfo:
spotify = None
def __init__(self, username, scope, spotipy_client_id, spotipy_client_secret, spotipy_redirect_url):
self.username = username
self.scope = scope
self.spotipy_client_id = spotipy_client_id
self.spotipy_client_secret = spotipy_client_secret
self.spotipy_redirect_url = spotipy_redirect_url
self.valid = False
def validate(self):
token = util.prompt_for_user_token(self.username, self.scope, client_id = self.spotipy_client_id, client_secret = self.spotipy_client_secret, redirect_uri = self.spotipy_redirect_url)
if not token:
print("Cannot get token for ", self.username, "\nquitting")
self.valid = False
else:
self.spotify = spotipy.Spotify(auth = token)
self.valid = True
return token
# getPlaylistAlbumInfo helper methods are hidden
def __getPlaylistTracks(self, playlist_id): #api call to get tracks from playlist
return self.spotify.user_playlist(self.username, playlist_id, fields = 'tracks ,next')['tracks']['items']
# api call to get the album's genres (if there are none, just get artist genres)
def __getAlbumGenres(self, album_id, artist_id):
genres = self.spotify.album(album_id)['genres']
if len(genres) < 1: genres = self.spotify.artist(artist_id)['genres']
return genres
#create albuminfo object from a track object
def __getAlbumInfoFromTrack(self, trackObj):
track = trackObj['track']
cover = track['album']['images'][0]['url']
artist = track['album']['artists'][0]['name']
album = track['album']['name']
artist_id = track['album']['artists'][0]['id']
album_id = track['album']['id']
genres = self.__getAlbumGenres(album_id, artist_id)
return AlbumInfo(album, artist, cover, genres)
# for every genre the album "has", add to the list in the dictionary associated with that genre
def __addAlbumToAllGenres(self, album, albumDict):
for genre in album.genres:
if not albumDict[genre]: albumDict[genre] = [album]
else: albumDict[genre].append(album)
#returns a list of albumInfo objects
def getPlaylistAlbumInfoList(self, playlist_id):
tracks = self.__getPlaylistTracks(playlist_id)
if len(tracks) < 1:
print("No songs in playlist...\nquitting")
return []
albums = []
for trackObj in tracks:
if trackObj['is_local']: continue # ignore local files
album = self.__getAlbumInfoFromTrack(trackObj)
albums.append(album)
if len(albums) < 1: print("Unable to get album art....\nquitting")
return albums
# returns a dictionary of albumInfo objects with genres as the keys
def getPlaylistAlbumInfoDictionary(self, playlist_id):
tracks = self.__getPlaylistTracks(playlist_id)
if len(tracks) < 1:
print("No songs in playlist...\nquitting")
return {}
albums = {}
for trackObj in tracks:
if trackObj['is_local']: continue # ignore local files
album = self.__getAlbumInfoFromTrack(trackObj)
self.__addAlbumToAllGenres(album, albums)
if len(albums) < 1: print("Unable to get album art....\nquitting")
return albums
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: alpc32
# Date: 2017-09-12 22:29:40
# Last Modified by: alpc32
# Last Modified time: 2017-09-12 22:29:40
import time
import math
from file_handle import FileHandle
from mtools import queue, frame_info_queue, hexstr2int, AllConfig, PI, error_frame
def get_frame_info(frame):
"""
获得每一帧的关键信息
"""
class FrameInfo(object):
"""
frameinfo object
"""
def __init__(self):
self.result = [0] * 12
self.index = 0
def insert_point(self, x, y):
index = 0
for index in range(0, AllConfig.lane_num):
# print index
# print AllConfig.lane_min[index], AllConfig.lane_max[index]
if (x >= AllConfig.lane_min[index]) and (x <= AllConfig.lane_max[index]):
# print 'index', index
self.result[index * 2] = max(self.result[index * 2], y)
self.result[index * 2 + 1] += 1
break
'''
def insert_point(self, x, y):
if x < AllConfig.lane_min[self.index]:
return True
while self.index < AllConfig.lane_num and x > AllConfig.lane_max[self.index]:
self.index += 1
if self.index == AllConfig.lane_num:
return False
print 'index', self.index
self.result[self.index * 2] = max(self.result[self.index * 2], y)
self.result[self.index * 2 + 1] += 1
'''
i = 0
frame_info = FrameInfo()
temp_pi = PI / 180.0
for value in frame:
angle = (i * AllConfig.lidar_resolution + 0) * temp_pi
i += 1
vle = hexstr2int(value) / 10.0
temp_x = int(math.cos(angle) * vle)
temp_y = int(AllConfig.lidar_height - math.sin(angle) * vle)
if temp_y < AllConfig.car_threshold:
continue
# print "insert point", temp_x, temp_y
frame_info.insert_point(temp_x, temp_y)
print 'result ', frame_info.result
frame_info_queue.put(frame_info.result)
return ' '.join(str(temp) for temp in frame_info.result)
def process_frame(ar):
"""
处理雷达每帧数据
"""
# file_handle = FileHandle()
frame_cnt = 0
while True:
global error_frame
for temp_i in range(0, 6):
print 'queuesize', queue.qsize(), 'frame_cnt', frame_cnt, 'error frame', error_frame
# 处理queue队列中留存的所有扫描数据
while not queue.empty():
buf = queue.get()
# print buf
buf_split = buf.split()
buf_split_len = len(buf_split)
if buf[0] != '\x02' or buf_split_len < 26:
error_frame += 1
print "Erro frame"
continue
frame_cnt += 1
point_num = min(hexstr2int(buf_split[25]), buf_split_len - 26)
# print 'point_num', point_num
# print buf_split[9], buf_split[10], process_frame(buf_split[26:26+point_num])
result_data = buf_split[9] + ' ' + \
get_frame_info(buf_split[26:26 + point_num])
# result_data = process_frame(buf_split[26:26+proint_num])
# file_handle.write(result_data + '\n')
# return
if ar[0] == 0:
return
time.sleep(0.1)
def unittest(args):
"""
单元测试
"""
return 0
if __name__ == '__main__':
import sys
sys.exit(unittest(sys.argv))
|
"""
Taken from https://github.com/martinohanlon/quickdraw_python
Paritally modified to suit the needs of our projects.
"""
from PIL import Image, ImageDraw
import numpy as np
class QuickDrawing():
"""
Represents a single Quick, Draw! drawing.
"""
def __init__(self, name, drawing_data):
self._name =name
self._drawing_data = drawing_data
self._strokes = None
self._image = None
@property
def image_data(self):
"""
Returns the raw image data as list of strokes with a list of X
co-ordinates and a list of Y co-ordinates.
Co-ordinates are aligned to the top-left hand corner with values
from 0 to 255.
See https://github.com/googlecreativelab/quickdraw-dataset#simplified-drawing-files-ndjson
for more information regarding how the data is represented.
"""
return self._drawing_data["image"]
@property
def strokes(self):
"""
Returns a list of pen strokes containing a list of (x,y) coordinates which make up the drawing.
To iterate though the strokes data use::
from quickdraw import QuickDrawData
qd = QuickDrawData()
anvil = qd.get_drawing("anvil")
for stroke in anvil.strokes:
for x, y in stroke:
print("x={} y={}".format(x, y))
"""
# load the strokes
if self._strokes is None:
max_x, _, max_y, _, a = self.get_rescale_factors(self.image_data)
self._strokes = []
for stroke in self.image_data:
points = []
stroke[0] = [x for x in np.rint(np.interp(stroke[0], (max_x - a, max_x), (0, 255))).tolist()]
stroke[1] = [y for y in np.rint(np.interp(stroke[1], (max_y - a, max_y), (0, 255))).tolist()]
xs = stroke[0]
ys = stroke[1]
if len(xs) != len(ys):
raise Exception("something is wrong, different number of x's and y's")
for point in range(len(xs)):
x = xs[point]
y = ys[point]
points.append((x,y))
self._strokes.append(points)
return self._strokes
@property
def image(self):
"""
Returns a `PIL Image <https://pillow.readthedocs.io/en/3.0.x/reference/Image.html>`_
object of the drawing on a white background with a black drawing. Alternative image
parameters can be set using ``get_image()``.
To save the image you would use the ``save`` method::
from quickdraw import QuickDrawData
qd = QuickDrawData()
anvil = qd.get_drawing("anvil")
anvil.image.save("my_anvil.gif")
"""
if self._image is None:
self._image = self.get_image()
return self._image
def get_image(self, stroke_color=(0,0,0), stroke_width=2, bg_color=(255,255,255)):
"""
Get a `PIL Image <https://pillow.readthedocs.io/en/3.0.x/reference/Image.html>`_
object of the drawing.
:param list stroke_color:
A list of RGB (red, green, blue) values for the stroke color,
defaults to (0,0,0).
:param int stroke_color:
A width of the stroke, defaults to 2.
:param list bg_color:
A list of RGB (red, green, blue) values for the background color,
defaults to (255,255,255).
"""
image = Image.new("RGB", (256,256), color=bg_color)
image_draw = ImageDraw.Draw(image)
for stroke in self.strokes:
image_draw.line(stroke, fill=stroke_color, width=stroke_width)
return image
def get_rescale_factors(self, strokes):
"""
Get the max and min x and y value of a given image, by processing its strokes
a is the side length of a square perfectly fitting the given image
"""
# Initialize the return valoes with smallest possible / largest possible values
max_x = 0
min_x = np.inf
max_y = 0
min_y = np.inf
a = 0
# Loop trhough all strokes to find min / max values
for stroke in strokes:
# Potential new min / max
_max_x = max(stroke[0])
_min_x = min(stroke[0])
# If new min / max is found, assign to global min / max of drawing
if _max_x > max_x:
max_x = _max_x
if _min_x < min_x:
min_x = _min_x
# Potential new min / max
_max_y = max(stroke[1])
_min_y = min(stroke[1])
if _max_y > max_y:
max_y = _max_y
if _min_y < min_y:
min_y = _min_y
# Caluclate the side length of a square perfectly fitting the drawing
_a = max_x - min_x
if max_y - min_y > a:
a = max_y - min_y
if _a > a:
a = _a
return max_x, min_x, max_y, min_y, a
# @staticmethod
# def tupels_to_arrays(tupel_arrays):
# """
# Takes an list of strokes, each represented as a list of (x, y) tupels and converts them into
# """
# strokes = [[], []]
# for arr in tupel_arrays:
# for t in arr:
# strokes[0].append(int(t[0]))
# strokes[1].append(int(t[1]))
# return strokes
@staticmethod
def image_to_stroke_array(image, dim=256, background=[255,255,255]):
"""
"""
stroke = [[], []]
image = np.reshape(image, (dim, dim, 3))
for x in range(len(image)):
for y in range(len(image[x])):
if (image[x,y]!=background).any():
stroke[0].append(x)
stroke[1].append(y)
return stroke
def __str__(self):
return "QuickDrawing key_id={}".format(self.key_id) |
import re
import logging
import time
pattern = [
u"%Y-%m-%d",
u"%Y-%m-%d %H:%M",
u"%Y-%m-%d %H:%M",
u"%Y-%m-%d %H:%M:%S",
u"%Y-%m-%d %H:%M:%S",
u"%Y/%m/%d",
u"%Y/%m/%d %H:%M",
u"%Y/%m/%d %H:%M",
u"%Y/%m/%d %H:%M:%S",
u"%Y/%m/%d %H:%M:%S",
u"%Y %m/%d",
u"%Y %m/%d %H:%M",
u"%Y %m/%d %H:%M",
u"%Y %m/%d %H:%M:%S",
u"%Y %m/%d %H:%M:%S",
u"%Y年%m月%d日",
u"%Y年%m月%d日 %H时%M分",
u"%Y年%m月%d日 %H时%M分",
u"%Y年%m月%d日 %H时%M分%S秒",
u"%Y年%m月%d日 %H时%M分%S秒"
]
def get_date(str):
for p in pattern:
try:
date = time.strptime(str, p)
if date:
return date
except:
continue
return None
def get_str(date):
return time.strftime("%Y-%m-%d", date)
logging.basicConfig(level=logging.INFO)
removes = ["发布机构:", "文章来源:", "来源:", "时间:", "《", "》"]
splits = ["/", " ", "、", "||", "-"]
# 删除removes中包含关键字
def remove_normalize(line):
result = line.replace(" ","")
for r in removes:
if result.find(r) != -1:
result = result.strip(r)
return result.strip()
# 按照splits包含关键字进行切分
def split_normalize(line):
result = line
for s in splits:
if result.find(s) != -1:
return True, result.split(s)
return False, [result.strip()]
# 查找规则匹配
def rule_normalize(line, rule):
result = line
for regex in rule:
matcher = re.search(regex, result, re.I)
if matcher:
result = matcher.group(0)
return result
# 检测是否包含数字
def has_digit(processor):
return re.search(r'\d+', processor)
# 中文组织信息归一化
def normalize_zh(line):
pre_remove = remove_normalize(line.strip().replace("\xa0", " "))
pre_processor = pre_remove
if has_digit(pre_processor):
if pre_processor.find("来源:") != -1:
pre_filter = pre_remove.split("来源:")
filter = []
for f in pre_filter:
if len(f.strip()) != 0 and (not has_digit(f)):
filter.append(f)
if len(filter) != 0:
pre_processor = filter[0]
elif pre_processor.find("更新时间:") != -1:
pre_filter = pre_remove.split("更新时间:")
filter = []
for f in pre_filter:
if len(f.strip()) != 0 and (not has_digit(f)):
filter.append(f)
if len(filter) != 0:
pre_processor = filter[0]
else:
post_filter = []
for f in pre_filter:
if len(f.strip()) != 0 and (not get_date(f)):
post_filter.append(f)
if len(post_filter) != 0:
pre_processor = post_filter[0]
else:
pre_rule = [r'[\u4e00-\u9fa5]+']
pre_processor = rule_normalize(pre_remove, pre_rule)
label, split = split_normalize(pre_processor.strip())
split_wrap = []
if label:
# 执行分割操作
split_wrap.extend(split)
else:
# 数据没有切分
split_wrap.append(split[0])
# 后置处理器
post_rule = []
result = []
for s in split_wrap:
result.append(rule_normalize(s, post_rule))
return result
# 英文组织信息归一化
def normalize_en(line):
pass
def individual(line, options,web_site): #个别网站个别处理
if web_site == "国务院新闻办公室(scio.gov.cn)":
line = re.compile("来源:(\w+)").findall(line)
return line
else:
return []
def normalize(line, options,web_site):
if web_site in ["国务院新闻办公室(scio.gov.cn)"]:
line = individual(line, options,web_site)
return line
else:
if isinstance(line, str):
if options == "zh":
return normalize_zh(line)
elif options == "en":
return normalize_en(line)
return [line]
if __name__ == '__main__':
lines = [
"来源:中国体育报"
]
web_site = False
for line in lines:
logging.info(normalize(line, "zh", web_site))
|
#!/usr/bin/env python
# coding: utf-8
"""
Extraire un tableau pour un Run en transitoire :
- zfond : point le plus bas du casier (issu de la géométrie des PCS)
- zmax : niveau maximum
- hmoy : hauteur moyenne maximum, calculée comme ratio Vol/Splan
Il faut que les variables Z, Vol et Splan soient disponibles aux casiers.
"""
import numpy as np
import pandas as pd
import sys
from crue10.etude import Etude
from crue10.utils import ExceptionCrue10, logger
from crue10.utils.cli_parser import MyArgParse
def crue10_extract_table_at_casiers(args):
# Read Scenario
etude = Etude(args.etu_path)
if args.sc_name is None:
scenario = etude.get_scenario_courant()
else:
scenario = etude.get_scenario(args.sc_name)
scenario.read_all()
# Get results at Casiers
if args.run_id is None:
run = scenario.get_dernier_run()
else:
run = scenario.get_run(args.run_id)
resultats = run.get_resultats_calcul()
res_trans = resultats.get_data_trans(args.calc_trans)
emh_names = resultats.emh['Casier']
variables = resultats.variables['Casier']
res = res_trans['Casier']
# Check if variables exist at Casiers
try:
pos_Z = variables.index('Z')
pos_Vol = variables.index('Vol')
pos_Splan = variables.index('Splan')
except ValueError as e:
logger.critical("Au moins une variable aux casiers est manquante : %s" % e)
sys.exit(2)
# Select time range
time = resultats.get_res_calc_trans(args.calc_trans).time_serie()
res = res[np.logical_and(args.start_time <= time, time <= args.end_time), :, :]
# Compute Vol/Splan (except when Splan=0 to avoid division by zero) and extract the max over the time
hmoy = np.max(np.divide(res[:, :, pos_Vol], res[:, :, pos_Splan],
out=np.zeros_like(res[:, :, pos_Vol]), where=res[:, :, pos_Splan] != 0),
axis=0)
df = pd.DataFrame({
'emh_name': emh_names,
'zfond': [scenario.modele.get_casier(ca_name).get_min_z() for ca_name in emh_names],
'zmax': np.max(res[:, :, pos_Z], axis=0),
'hmoy': hmoy,
# 'Volmax': np.max(res[:, :, pos_Vol], axis=0),
# 'Splanmax': np.max(res[:, :, pos_Splan], axis=0),
})
df.to_csv(args.csv_path, sep=';')
parser = MyArgParse(description=__doc__)
parser.add_argument('etu_path', help="chemin vers l'étude Crue10 à lire (fichier etu.xml)")
parser.add_argument('--sc_name', help="nom du scénario (avec le preffixe Sc_) (si absent alors le scénario courant est pris)")
parser.add_argument('--run_id', help="identifiant du Run à exploiter (si absent alors le dernier Run est pris)")
parser.add_argument('--start_time', help="premier temps (en secondes) à considérer", type=float, default=-float('inf'))
parser.add_argument('--end_time', help="dernier temps (en secondes) à considérer", type=float, default=float('inf'))
parser.add_argument('calc_trans', help="nom du calcul transitoire")
parser.add_argument('csv_path', help="chemin vers le fichier CSV de sortie")
if __name__ == '__main__':
args = parser.parse_args()
try:
crue10_extract_table_at_casiers(args)
except ExceptionCrue10 as e:
logger.critical(e)
sys.exit(1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.