max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
PSCN/data_imdbB.py
|
Lawlietgit/MA-GCNNs
| 6 |
2022797
|
import graph_tool
class Node:
def __init__(self,node,graph,atr = 0):
self.node = node
self.graph = graph
self.atr = atr
pnode_file = open("IMDB-MULTI/IMDB-MULTI_graph_indicator.txt","r")
pedge_file = open("REDDIT-BINARY/REDDIT-BINARY_A.txt",'r')
label_file = open("REDDIT-BINARY/REDDIT-BINARY_graph_labels.txt","r")
#node_atr = open("IMDB-BINARY/PTC_MR_node_labels.txt","r")
label = []
Graph = []
for lline in label_file:
label.append(lline)
g = graph_tool.Graph()
g.set_directed(False)
Graph.append(g)
l = len(label)
Nodes = {}
node_num = []
k = 1
n = 0
for i,node_line in enumerate(pnode_file):
#node_label = node_atr.readline().strip('\n')
if int(node_line) == k:
Nodes[i] = Node(n,k-1)
n = n + 1
else:
Graph[k-1].add_vertex(n)
vprop_value = Graph[k-1].new_vertex_property("int")
Graph[k-1].vp.label = vprop_value
k = k + 1
n = 0
Nodes[i] = Node(n,k-1)
Graph[k-1].add_vertex(n)
vprop_value = Graph[k-1].new_vertex_property("int")
Graph[k-1].vp.label = vprop_value
print("hello")
for i in range(len(Nodes)):
No = Nodes[i]
Graph[No.graph].vp.label[Graph[No.graph].vertex(No.node)] = No.atr
for i,edge_line in enumerate(pedge_file):
node1,node2 = edge_line.split(', ')
Node1 = Nodes[int(node1)-1]
Node2 = Nodes[int(node2)-1]
if Node1.node <= Node2.node:
Node1.atr += 1
Node2.atr += 1
Graph[Node1.graph].add_edge(Graph[Node1.graph].vertex(Node1.node),Graph[Node1.graph].vertex(Node2.node))
for k in range(len(Graph)):
vprop_value = Graph[k].new_vertex_property("int")
Graph[k].vp.label = vprop_value
for i in range(len(Nodes)):
No = Nodes[i]
Graph[No.graph].vp.label[Graph[No.graph].vertex(No.node)] = No.atr
f_text = open("Reddit-B/text.txt","w")
for i in range(len(Graph)):
file_name = "reddit_b_" + str(i)
Graph[i].save("Reddit-B/"+ file_name + ".xml.gz")
f_text.write(file_name + ".xml.gz" + " " + label[i])
print(Graph[0])
print(Graph[len(Graph)-1])
| 2,011 |
project/custom_session_interface.py
|
DanielGrams/gsevp
| 1 |
2023495
|
from flask import request
from flask.sessions import SecureCookieSessionInterface
class CustomSessionInterface(SecureCookieSessionInterface):
"""Prevent creating session from API requests."""
def save_session(self, *args, **kwargs):
if "authorization" in request.headers:
return
return super(CustomSessionInterface, self).save_session(*args, **kwargs)
| 391 |
scripts/run_notebook.py
|
milakis/microeconometrics
| 0 |
2022885
|
#!/usr/bin/env python
"""Run notebooks.
This script allows to run the lecture notebooks. One can either run all notebooks at once or just a
single lecture. It is enough to provide a substring for the name.
Examples
--------
>> run-notebook Run all lectures.
>> run-notebook -n 01 Run lecture 01-introduction.
"""
import subprocess as sp
import glob
import os
from auxiliary import parse_arguments
from auxiliary import LECTURES_ROOT
def run_notebook(notebook):
cmd = " jupyter nbconvert --execute {} --ExecutePreprocessor.timeout=-1".format(
notebook
)
sp.check_call(cmd, shell=True)
if __name__ == "__main__":
request = parse_arguments("Execute notebook")
os.chdir(LECTURES_ROOT)
for dirname in request:
os.chdir(dirname)
for fname in glob.glob("*.ipynb"):
print(f"\n {os.getcwd().split('/')[-1]}\n")
run_notebook(fname)
os.chdir("../")
| 947 |
get_answer.py
|
VincentGaoHJ/Spider-Zhihu
| 0 |
2023427
|
# -*- coding: utf-8 -*-
"""
@Date: Created on 2019/7/26
@Author: <NAME>
@Description:
"""
import os
import csv
from zhihu_APIs import *
from data_getter import get_data
from w3lib.html import remove_tags
from headers_pool import HEADERS_POOL
# default args
PROCESS_NUM = 4
MAX_PROCESS_NUM = 8
FLOOD_DISCHARGE_RATIO = 0.3
FLOODPLAIN_RATIO = 0.1
HEADERS_POOL = HEADERS_POOL
def load_question(topic_id):
file_name = str(topic_id) + "_topic.csv"
file_path = os.path.join("./data", file_name)
data = []
with open(file_path, encoding="utf-8-sig") as csvfile:
csv_reader = csv.reader(csvfile) # 使用csv.reader读取csvfile中的文件
for row in csv_reader: # 将csv 文件中的数据保存到birth_data中
data.append(row)
return data
def get_answer(topic_id, topic_name):
# 不考虑云端数据变化,step小于limit时,取样必然有重叠
time.perf_counter() # 计时
zhi = ZhiHu()
func = zhi.questions.answers
question_list = load_question(topic_name)
total_num = len(question_list)
print("[获取回答] ===== 正在准备请求 {} 共有问题数 {} =====".format(topic_name, total_num))
i = 0
answer_all = []
fetch_body = []
for question in question_list:
i += 1
question_id = question[0]
question_ansnum = int(question[1])
fetch_body.append({"identifier": question_id,
"query_args": ["content"],
"range": [0, question_ansnum]})
# break
print("[获取回答] ===== 正在请求数据 {} 共有问题数 {} =====".format(topic_name, total_num))
res = get_data(fetch_body, func, process_num=PROCESS_NUM,
max_process_num=MAX_PROCESS_NUM,
flood_discharge_ratio=FLOOD_DISCHARGE_RATIO,
floodplain_ratio=FLOODPLAIN_RATIO,
headers_pool=HEADERS_POOL)
# print(res)
print("[获取回答] ===== 正在处理数据 {} 共有问题数 {} =====".format(topic_name, total_num))
i = 0
for question_id, question_result in res.items():
i += 1
answer_list = question_result["data"]
if i % 1000 == 0:
print("[处理问题 {} / {}]".format(i, total_num), question_id)
for item in answer_list:
answer_id = item["id"]
raw_ans = item["content"]
question_content = item["question"]["title"]
answer_content = remove_tags(raw_ans)
answer_all.append((question_id, answer_id, question_content, answer_content))
print("[获取回答] ===== 正在保存数据 {} 共有问题数 {} =====".format(topic_name, total_num))
file_name = str(topic_name) + "_answers.csv"
file_path = os.path.join("./data", file_name)
with open(file_path, "a", encoding="utf-8-sig", newline='') as file:
writer = csv.writer(file)
for item in answer_all:
writer.writerows([item])
if __name__ == '__main__':
topic_id = "19574423"
topic_name = "ZhongGuoJinDaiShi"
get_answer(topic_id, topic_name)
| 2,904 |
fltk/util/generate_docker_compose.py
|
tudelft-eemcs-dml/fltk-testbed-gr-5
| 1 |
2023141
|
import sys
import yaml
import copy
template_path = './deploy/templates'
def load_system_template():
with open(f'{template_path}/system_stub.yml') as file:
documents = yaml.full_load(file)
return documents
def load_client_template(type='default'):
with open(f'{template_path}/client_stub_{type}.yml') as file:
documents = yaml.full_load(file)
return documents
def generate_client(id, template: dict, world_size: int, type='default'):
local_template = copy.deepcopy(template)
key_name = list(local_template.keys())[0]
container_name = f'client_{type}_{id}'
local_template[container_name] = local_template.pop(key_name)
for key, item in enumerate(local_template[container_name]['environment']):
if item == 'RANK={rank}':
local_template[container_name]['environment'][key] = item.format(rank=id)
if item == 'WORLD_SIZE={world_size}':
local_template[container_name]['environment'][key] = item.format(world_size=world_size)
local_template[container_name]['ports'] = [f'{5000+id}:5000']
return local_template, container_name
def generate(num_clients: int):
world_size = num_clients + 1
system_template :dict = load_system_template()
for key, item in enumerate(system_template['services']['fl_server']['environment']):
if item == 'WORLD_SIZE={world_size}':
system_template['services']['fl_server']['environment'][key] = item.format(world_size=world_size)
for client_id in range(1, num_clients+1):
client_type = 'default'
if client_id == 1:
client_type='slow'
if client_id == 2:
client_type='medium'
client_template: dict = load_client_template(type=client_type)
client_definition, container_name = generate_client(client_id, client_template, world_size, type=client_type)
system_template['services'].update(client_definition)
with open(r'./docker-compose.yml', 'w') as file:
yaml.dump(system_template, file, sort_keys=False)
if __name__ == '__main__':
num_clients = int(sys.argv[1])
generate(num_clients)
print('Done')
| 2,165 |
examples/host2gw_diagram.py
|
community-fabric/python-ipfabric-diagrams
| 1 |
2023573
|
"""
unicast_diagram.py
"""
from ipfabric_diagrams import IPFDiagram, PathLookupSettings, Host2GW
if __name__ == '__main__':
ipf = IPFDiagram()
h2g = Host2GW(startingPoint='10.241.1.203')
json_data = ipf.diagram_json(h2g)
settings = PathLookupSettings()
png_data = ipf.diagram_png(h2g, graph_settings=settings)
with open('tmp/host2gw.png', 'wb') as f:
f.write(png_data)
svg_data = ipf.diagram_svg(h2g)
with open('tmp/host2gw.svg', 'wb') as f:
f.write(svg_data)
ipf.close()
| 528 |
testing/unexpected_passes_common/constants.py
|
chromium/chromium
| 14,668 |
2023181
|
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Constants for unexpected pass finders."""
class BuilderTypes(object):
CI = 'ci'
TRY = 'try'
| 263 |
Chapter2_Freight_Train_Formation_Plan_Problems/Column_Generation.py
|
gaotianze/Gurobi_Learning
| 0 |
2023540
|
# From Gurobi Tutorial P142 - Cutting Stock Problem
# Column Generation Tutorial - https://www.cnblogs.com/dengfaheng/p/11249879.html
import gurobipy
from gurobipy import *
TypesDemand = [3, 7, 9, 16]
QuantityDemand = [25, 30, 14, 8]
LengthUsable = 20
MainProbRelax = Model()
SubProb = Model()
# 构建主问题模型,选择的初始切割方案每根钢管只切一种长度
# 添加变量、目标函数
# 添加了4个变量(分别代表4种方案): z1 z2 z3 z4
Zp = MainProbRelax.addVars(len(TypesDemand), obj=1.0, vtype=GRB.CONTINUOUS, name='z') # 先将主问题松弛成连续,并创建包含4初始方案的基变量
# 添加约束
# 拿四个初始方案做约束添加进去
# 6*z_1 >= 25; 2*z_2>=30; 2*z_3>=14; 1*z_4>=8;
ColumnIndex = MainProbRelax.addConstrs(
quicksum(Zp[p] * (LengthUsable // TypesDemand[i]) for p in range(len(TypesDemand)) if p == i) >= QuantityDemand[i]
for i in range(len(TypesDemand)))
MainProbRelax.optimize()
# 构造子问题模型
# 获得对偶值
# lambda_list=MainProbRelax.getAttr(GRB.Attr.Pi, MainProbRelax.getConstrs())
Dualsolution = MainProbRelax.getAttr(GRB.Attr.Pi, MainProbRelax.getConstrs())
# 添加变量
# 目标函数此时为: max(0.166*c_1 + 0.5*c_2 + 0.5*c_3 + c_4)
Ci = SubProb.addVars(len(TypesDemand), obj=Dualsolution, vtype=GRB.INTEGER, name='c')
# 添加约束
# 3c1+7c2+9c3+16c4 <= 20 单根卷钢长度约束
SubProb.addConstr(quicksum(Ci[i] * TypesDemand[i] for i in range(len(TypesDemand))) <= LengthUsable)
SubProb.setAttr(GRB.Attr.ModelSense, -1) # -1为maximize
SubProb.optimize()
# 判断Reduced Cost是否小于0
while SubProb.objval > 1:
# 获取变量取值
columnCoeff = SubProb.getAttr("X", SubProb.getVars())
column = Column(columnCoeff, MainProbRelax.getConstrs())
# 读取到新方案 [2,2,0,0]^T ,作为新的一列添加到RMP中
# 添加变量
MainProbRelax.addVar(obj=1.0, vtype=GRB.CONTINUOUS, name='CG', column=column)
MainProbRelax.optimize()
# 修改目标函数系数
for i in range(len(TypesDemand)):
Ci[i].obj = ColumnIndex[i].pi
SubProb.optimize()
# 将CG后的模型转为整数,并求
for v in MainProbRelax.getVars():
v.setAttr("VType", GRB.INTEGER)
MainProbRelax.optimize()
print("\nSolotion:")
for v in MainProbRelax.getVars():
if v.X != 0.0:
print('%s %g次' % (v.VarName, v.X))
| 2,005 |
sort/quicksort/quicksort.py
|
BlueRhino/algorithm-python
| 0 |
2023576
|
import random
def partition_arr(arr, start, end):
"""
对输入数组arr在范围start及end之间的元素使用快速排序,使用arr[end]元素作为排序分界
返回arr[end]元素的索引位置
:param arr:
:param start:
:param end:
:return:arr[end]元素的索引位置
"""
if start < 0 or end < start:
raise Exception("The index is not correct.")
arr_len = len(arr)
if end > arr_len - 1:
raise Exception("The end index must less than the length of arr.")
flag = arr[end]
i = start - 1
for j in range(start, end):
if arr[j] <= flag:
i += 1
__exchange_value(arr, i, j)
__exchange_value(arr, i + 1, end)
return i + 1
def quick_sort(arr, start, end):
if start < end:
index = partition_arr(arr, start, end)
quick_sort(arr, start, index - 1)
quick_sort(arr, index + 1, end)
def quick_sort_random(arr, start, end):
i = random.randint(start, end)
__exchange_value(arr, i, end)
if start < end:
index = partition_arr(arr, start, end)
quick_sort(arr, start, index - 1)
quick_sort(arr, index + 1, end)
def __exchange_value(arr, index1, index2):
arr_len = len(arr)
if 0 <= index1 < arr_len and 0 <= index2 < arr_len:
if index1 == index2:
return
tmp = arr[index1]
arr[index1] = arr[index2]
arr[index2] = tmp
else:
raise Exception("Index is not correct.")
| 1,405 |
leetcode/p54.py
|
mythnc/lab
| 0 |
2023363
|
from typing import List
class Solution:
def spiralOrder(self, matrix: List[List[int]]) -> List[int]:
directions = (
(0, 1),
(1, 0),
(0, -1),
(-1, 0)
)
row = len(matrix)
col = len(matrix[0])
total = row * col
count = 0
d_index = 0
result = [0] * total
r = 0
c = 0
VISITED_VALUE = 101
while count < total:
result[count] = matrix[r][c]
matrix[r][c] = VISITED_VALUE
count += 1
if count >= total:
break
next_r, next_c = directions[d_index]
while (not ((0 <= r+next_r < row) and (0 <= c+next_c < col)
and matrix[r+next_r][c+next_c] != VISITED_VALUE)):
d_index = (d_index + 1) % len(directions)
next_r, next_c = directions[d_index]
r += next_r
c += next_c
return result
print(Solution().spiralOrder([[1,2,3],[4,5,6],[7,8,9]]))
print(Solution().spiralOrder([[1,2,3,4],[5,6,7,8],[9,10,11,12]]))
| 1,107 |
examples/testProtection.py
|
Bill2462/AX3003P
| 0 |
2023243
|
""" Example that demonstrates the overcurrent protection (OVP)
and overvoltage protection (OVC).
This script sets the OVP level to 5V and OCP to 0.3A.
Then slowely increases the voltage until the OVP trips.
Next the OVP is reseted and the same procedure is repeated with the OCP.
"""
# This file is part of AX3003P library.
# Copyright (c) 2019 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the “Software”), to deal in the
# Software without restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the
# following conditions: THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
# OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
from time import sleep
import AX3003P
# Get the port selection from the command line argument.
if len(sys.argv) < 2:
print("usage: python3 helloWorld.py [device]")
print("Example: python3 helloWorld.py /dev/ttyUSB0")
sys.exit(1)
port = sys.argv[1]
# connect to the power supply and enable output
psu = AX3003P.connect(port)
# Set the OVP and OCP level.
print("Setting OVP threshold to 5V...")
psu.setOvpTreshold(5)
print("Setting OCP threshold to 0.5A")
psu.setOcpTreshold(0.3)
print("\n######### Testing the OVP #########")
# set the current to 50mA
psu.setCurrent(0.05)
psu.enableOutput()
# slowly increase the voltage until the OVP trips
voltages = [1, 2, 4, 9] # voltages that we are going to test
for voltage in voltages:
# a little hack to trigger the OVP.
# Normally PSU won't allow us to set voltage higher then OVP threshold.
# However we can first turn the OVP off, set the voltage and turn it back on.
psu.disableOvp()
sleep(2)
psu.setVoltage(voltage)
sleep(4)
psu.enableOvp()
sleep(1) # delay to allow the voltage to stabilize
# check if ovp is tripped
if psu.ovpTripped():
status = "TRIP"
else:
status = "RDY"
# print the status
print("Voltage: " + str(voltage) + "V OVP: " + status)
if status == "TRIP":
break # exit if ovp tripped
# reset OVP and set voltage to 5V
print("Resetting the OVP...")
psu.resetOvp()
psu.setVoltage(5)
#now we have to short the PSU output
print("Short PSU output and press enter to continue")
input()
print("\n######### Testing the OCP #########")
psu.enableOutput()
# slowely increase the current until OCP trips
currents = [0.1, 0.2, 0.3, 1.0]
for current in currents:
psu.disableOcp()
sleep(2)
psu.setCurrent(current)
sleep(4)
psu.enableOcp()
sleep(1) # delay to allow the voltage to stabilize
# check if ovp is tripped
if psu.ocpTripped():
status = "TRIP"
else:
status = "RDY"
# print the status
print("Curent: " + str(current) + "A OCP: " + status)
if status == "TRIP":
break # exit if ocp tripped
#disable output and disconnect
psu.disableOutput()
psu.resetOcp()
psu.disconnect()
| 3,514 |
webscraper/Windsor/WebScraping.py
|
rex-lui/Hong-Kong-Mall-Shop-Directory-Web-Scraping
| 0 |
2023737
|
#Import necessary package
import requests
import re
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import datetime as dt
import configparser
import os
import json
#Configure parameter
config = configparser.ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), 'config.ini'))
mall = config['general']['mall']
shoplisturl = config['url']['shoplisturl']
fnblisturl = config['url']['fnblisturl']
shopdetailbasicurl = config['url']['shopdetailbasicurl']
#Get shop category data and export into csv
def getShopCategory():
#Create empty DataFrame for shop category
shopcategory = pd.DataFrame()
for type, url in zip(['Shopping','Dining'],[shoplisturl,fnblisturl]):
#Get shop category
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
for category_selected in soup.find_all('select', class_ = 'categorySelected'):
for cat in category_selected.find_all('option'):
try:
shop_category_id = cat.get('value')
except:
shop_category_id = np.nan
try:
shop_category_name = cat.text.split('\r\n')[0].strip()
except:
shop_category_name = np.nan
shopcategory = shopcategory.append(
{
'type':type,
'shop_category_id':shop_category_id,
'shop_category_name':shop_category_name
}, ignore_index=True
)
shopcategory['update_date'] = dt.date.today()
shopcategory['mall'] = mall
shopcategory.drop(shopcategory[shopcategory.shop_category_name == 'All'].index, inplace = True)
shopcategory = shopcategory.loc[:, ['mall','type','shop_category_id','shop_category_name','update_date']]
return shopcategory
#Get shop master data and export into csv
def getShopMaster():
shopcategory = getShopCategory()
#Create empty DataFrame for shop master
shoplist = pd.DataFrame()
shoplisttc = pd.DataFrame()
shopdetail = pd.DataFrame()
for type, url in zip(['Shopping','Dining'],[shoplisturl,fnblisturl]):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
for shop in soup.find_all('div', class_ = 'shop'):
try:
shop_floor = shop.get('floorid').replace('/','').strip()
except:
shop_floor = np.nan
try:
shop_category_id = shop.get('catid')
except:
shop_category_id = np.nan
try:
shop_category_name = shopcategory.loc[shopcategory['shop_category_id'] == shop_category_id, 'shop_category_name'].values[0]
except:
shop_category_name = np.nan
for shop_body in shop.find_all('div', class_= 'card shop-body'):
for shop_content in shop_body.find_all('div', class_= 'card-body shop-body-content'):
try:
shop_detail_link = shop_content.find(class_= 'card-title').find('a').get('href')
shoplinkid = shop_detail_link.find('&id=')
shop_id = shop_detail_link[shoplinkid+4:].replace('&lang=en','')
except:
shop_detail_link = np.nan
shop_id = np.nan
try:
shop_name = shop_content.find(class_= 'card-title').find('a').text
except:
shop_name = np.nan
try:
shop_number = shop_content.find(src = re.compile('ShopDetail_icon_shopNo')).find_parent('td').find_next_sibling('td').find_next_sibling('td').text
except:
shop_number = np.nan
for shop_footer in shop_body.find_all('div', class_= 'card-footer'):
try:
if shop_footer.find(class_ = 'shop-tag-club'):
loyalty_offer = 'WINDSOR CLUB Offer'
else:
loyalty_offer = np.nan
except:
loyalty_offer = np.nan
try:
if shop_footer.find(class_ = 'shop-tag-coupon'):
voucher_acceptance = '1'
else:
voucher_acceptance = np.nan
except:
voucher_acceptance = np.nan
shoplist = shoplist.append(
{
'type':type,
'shop_id':shop_id,
'shop_name_en':shop_name,
'shop_number':shop_number,
'shop_floor':shop_floor,
'shop_category_id':shop_category_id,
'shop_category_name':shop_category_name,
'loyalty_offer':loyalty_offer,
'voucher_acceptance':voucher_acceptance,
'shop_detail_link':shop_detail_link
}, ignore_index=True
)
urltc = url.replace('en','tc')
page = requests.get(urltc)
soup = BeautifulSoup(page.content, 'html.parser')
for shop in soup.find_all('div', class_ = 'shop'):
for shop_body in shop.find_all('div', class_= 'card shop-body'):
for shop_content in shop_body.find_all('div', class_= 'card-body shop-body-content'):
try:
shop_detail_link = shop_content.find(class_= 'card-title').find('a').get('href')
shoplinkid = shop_detail_link.find('&id=')
shop_id = shop_detail_link[shoplinkid+4:].replace('&lang=tc','')
except:
shop_detail_link = np.nan
shop_id = np.nan
try:
shop_name_zh = shop_content.find(class_= 'card-title').find('a').text
except:
shop_name_zh = np.nan
shoplisttc = shoplisttc.append(
{
'shop_id':shop_id,
'shop_name_tc':shop_name_zh
}, ignore_index=True
)
for shop_detail_link in shoplist['shop_detail_link']:
shopdetailurl = shopdetailbasicurl + shop_detail_link
page = requests.get(shopdetailurl)
soup = BeautifulSoup(page.content, 'html.parser')
for shop_detail in soup.find_all('div', class_ = 'shop-detail'):
for shop_table in shop_detail.find_all('table', class_ = 'shop-table'):
try:
opening_hours = shop_table.find(src = re.compile('ShopDetail_icon_time')).find_parent('td').find_next_sibling('td').find_next_sibling('td').text
opening_hours = ';'.join([opening_hour.strip() for opening_hour in opening_hours.split('\r\n')])
except:
opening_hours = np.nan
try:
phone = shop_table.find(src = re.compile('ShopDetail_icon_tel')).find_parent('td').find_next_sibling('td').find_next_sibling('td').text
phone = phone.replace(' ','')
except:
phone = np.nan
shopdetail = shopdetail.append(
{
'shop_detail_link':shop_detail_link,
'opening_hours':opening_hours,
'phone':phone
}, ignore_index=True
)
shoplist = pd.merge(shoplist, shoplisttc, on = 'shop_id')
shopmaster = pd.merge(shoplist, shopdetail, on = 'shop_detail_link')
shopmaster['update_date'] = dt.date.today()
shopmaster['mall'] = mall
shopmaster['tag'] = np.nan
shopmaster = shopmaster.loc[:, ['mall','type','shop_id','shop_name_en','shop_name_tc','shop_number','shop_floor','phone','opening_hours','loyalty_offer','voucher_acceptance','shop_category_id','shop_category_name','tag','update_date']]
shopmaster = shopmaster[shopmaster['shop_number'] != r'(非商店)']
return shopmaster
| 8,681 |
meta/asttools/tests/test_remove_trivial.py
|
tomviner/Meta
| 95 |
2022862
|
'''
Created on Aug 5, 2011
@author: sean
'''
from __future__ import print_function
import unittest
import ast
from meta.asttools.mutators.remove_trivial import remove_trivial
from meta.asttools.tests import assert_ast_eq, skip_networkx
from meta.asttools.visitors.graph_visitor import GraphGen
def simple_case(self, toremove, expected):
root = ast.parse(toremove)
remove_trivial(root)
expected_root = ast.parse(expected)
assert_ast_eq(self, root, expected_root)
@skip_networkx
class Test(unittest.TestCase):
def assertRemoved(self, toremove, expected):
root = ast.parse(toremove)
remove_trivial(root)
expected = ast.parse(expected)
assert_ast_eq(self, root, expected)
def test_single(self):
simple_case(self, 'a = 1',
'a = 1')
def test_empty(self):
simple_case(self,'',
'')
def test_simple(self):
simple_case(self, 'a = 1; a = 2',
'pass; a = 2')
def test_multi(self):
simple_case(self, 'a = 1; a = 2; a = 3',
'pass; pass; a = 3')
def test_apart(self):
simple_case(self, 'a = 1; b = 1; a = 2',
'pass; b = 1; a = 2')
def test_if(self):
simple_case(self, 'a = 1\nif x: a = 2',
'a = 1\nif x: a = 2')
def test_if2(self):
simple_case(self, 'if x: a = 2\na = 1',
'if x: a = 2\na = 1')
def test_if_else(self):
simple_case(self, 'a = 1\nif x: a = 2\nelse: a = 3',
'pass\nif x: a = 2\nelse: a = 3')
def test_if_else2(self):
simple_case(self, 'if x: a = 2\nelse: a = 3\na = 1',
'if x: pass\nelse: pass\na = 1')
def test_for(self):
simple_case(self, 'a = 1\nfor x in y: a = 2',
'a = 1\nfor x in y: a = 2')
def test_for_else(self):
simple_case(self, 'a = 1\nfor x in y: a = 2\nelse: a = 3',
'pass\nfor x in y: a = 2\nelse: a = 3')
def test_for_else_break(self):
simple_case(self, 'a = 1\nfor x in y:\n break\n a = 2\nelse: a = 3',
'a = 1\nfor x in y:\n break\n a = 2\nelse: a = 3')
def test_for_else_conti(self):
simple_case(self, 'a = 1\nfor x in y:\n continue\n a = 2\nelse: a = 3',
'a = 1\nfor x in y:\n continue\n a = 2\nelse: a = 3')
def test_while(self):
simple_case(self, 'a = 1\nwhile x: a = 2',
'a = 1\nwhile x: a = 2')
def test_while_else(self):
simple_case(self, 'a = 1\nwhile x: a = 2\nelse: a = 3',
'pass\nwhile x: a = 2\nelse: a = 3')
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| 2,975 |
tests/config/test_config.py
|
kevinbuchanjr/sagify
| 352 |
2023862
|
from sagify.config.config import ConfigManager, Config
def test_config(tmpdir):
config_file = tmpdir.join('config.json')
config_file.write("""
{
"image_name": "keras-app-img",
"aws_profile": "sagemaker",
"aws_region": "us-east-1",
"python_version": "3.6",
"sagify_module_dir": "keras-app-img",
"requirements_dir": "requirements.txt"
}
""")
config_manager = ConfigManager(str(config_file))
actual_config_obj = config_manager.get_config()
assert actual_config_obj.to_dict() == Config(
image_name="keras-app-img", aws_profile="sagemaker", aws_region="us-east-1", python_version="3.6", sagify_module_dir="keras-app-img",
requirements_dir="requirements.txt"
).to_dict()
| 767 |
custom_components/sensor.py
|
hombrelab/home-assistant-smartmeter-reader
| 0 |
2022719
|
# Copyright (c) 2021 Hombrelab <<EMAIL>>
import logging
import pytz
from dsmr_parser import obis_references as obis_ref
from dsmr_parser import telegram_specifications
from dsmr_parser.parsers import TelegramParser
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.typing import HomeAssistantType
from . import SmartmeterDevice
from .const import (
DOMAIN,
UUID,
SERVICE,
DSMRVERSION,
PRECISION,
TIMEZONE,
GAS_CONSUMPTION_NAME,
GAS_HOURLY_CONSUMPTION_NAME,
GAS_HOURLY_LAST_UPDATE_NAME,
ENTITIES,
ENTITIES_SCHEMA,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry, async_add_entities):
"""set up entities based on a config entry"""
_version = entry.data[DSMRVERSION]
_precision = entry.data[PRECISION]
_timezone = pytz.timezone(entry.data[TIMEZONE])
# Protocol version specific obis
if _version in "4":
_gas_obis = obis_ref.HOURLY_GAS_METER_READING
_parser = TelegramParser(telegram_specifications.V4)
elif _version in "5":
_gas_obis = obis_ref.HOURLY_GAS_METER_READING
_parser = TelegramParser(telegram_specifications.V5)
elif _version in ("5B",):
_gas_obis = obis_ref.BELGIUM_HOURLY_GAS_METER_READING
_parser = TelegramParser(telegram_specifications.BELGIUM_FLUVIUS)
else:
_gas_obis = obis_ref.GAS_METER_READING
_parser = TelegramParser(telegram_specifications.V2_2)
# Define mapping for electricity mappings
elements = ENTITIES
elements += [
[
GAS_CONSUMPTION_NAME,
'mdi:fire',
_gas_obis
],
]
# generate smart entities
entities = [
ElecticityEntity(name, icon, obis, _precision, _timezone, _parser)
for name, icon, obis in elements
]
elements = [
[
GAS_HOURLY_CONSUMPTION_NAME,
'mdi:fire',
_gas_obis
],
[
GAS_HOURLY_LAST_UPDATE_NAME,
'mdi:update',
_gas_obis
],
]
# generate gas entities
entities += [
GasEntity(name, icon, obis, _precision, _timezone, _parser)
for name, icon, obis in elements
]
# Set up the sensor platform
async_add_entities(entities)
async def async_consume_service(call):
"""handle calls to the service."""
telegram = call.data.get('telegram')
telegram = telegram.replace(" ", "")
telegram = telegram.replace("\\r\\n", "\r\n")
for entity in entities:
entity.set_consumed(telegram)
hass.services.async_register(
DOMAIN,
SERVICE,
async_consume_service,
schema=ENTITIES_SCHEMA,
)
class ElecticityEntity(SmartmeterDevice, RestoreEntity):
"""representation of a electricity entity"""
def __init__(self, name, icon, obis, precision, timezone, parser):
"""initialize the electricity entity"""
self._name = name
self._icon = icon
self._obis = obis
self._element = self._name.lower().replace(" ", "_")
self._unit = ''
self._obis = obis
self._precision = precision
self._timezone = timezone
self._parser = parser
self._data = ''
self._telegram = ''
self._state = '-'
self._attributes = {}
async def async_added_to_hass(self):
"""run when entity is about to be added"""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state:
try:
self._state = state.state
self._attributes = state.attributes
self._data = self._attributes['data']
self._telegram = self._parser.parse(self._data)
except Exception as err:
_LOGGER.warning(f"could not restore {self._name}: {err}")
def get_attribute(self, name):
"""get the attribute value if the object has it"""
attribute = self._telegram[self._obis]
return getattr(attribute, name, None)
@staticmethod
def translate_tariff(value):
# Convert 2/1 to normal/low
# DSMR V2.2: Note: Rate code 1 is used for low rate and rate code 2 is
# used for normal rate.
if value == '0002':
return 'normal'
if value == '0001':
return 'low'
return None
def set_consumed(self, data):
"""set the telegram for the electricity reading"""
if data is not None:
self._data = data
self._telegram = self._parser.parse(self._data)
def update(self):
try:
self._unit = self.get_attribute('unit')
except:
self._unit = ''
try:
value = self.get_attribute('value')
except:
self._state = '-'
return
if self.name == 'Smartmeter Power Consumption (both)':
value = value + self._telegram[obis_ref.ELECTRICITY_USED_TARIFF_2].value
elif self._obis == obis_ref.ELECTRICITY_ACTIVE_TARIFF:
self._state = self.translate_tariff(value)
return
try:
value = round(float(value), self._precision)
except TypeError:
pass
if value is not None:
self._state = value
else:
self._state = '-'
@property
def unique_id(self) -> str:
"""return the unique id"""
return f"{UUID}.{self._element}"
@property
def name(self) -> str:
"""return the name of the entity"""
return self._name
@property
def icon(self) -> str:
"""return the icon to be used for this entity"""
return self._icon
@property
def unit_of_measurement(self):
"""return the unit of measurement"""
return self._unit
@property
def state(self):
"""return the state of the entity"""
return self._state
@property
def state_attributes(self):
"""return the state attributes"""
return {'data': self._data}
class GasEntity(ElecticityEntity):
"""representation of a gas entity"""
def __init__(self, name, icon, obis, precision, timezone, parser):
"""initialize the gas entity"""
super().__init__(name, icon, obis, precision, timezone, parser)
self._previous_state = None
self._previous_timestamp = None
def update(self):
try:
if self._name == GAS_HOURLY_CONSUMPTION_NAME:
self._unit = f"{self.get_attribute('unit')}/h"
elif self._name == GAS_HOURLY_LAST_UPDATE_NAME:
self._unit = ''
except Exception:
self._unit = ''
try:
value = self.get_attribute('value')
except:
self._state = '-'
return
try:
timestamp = self.get_attribute('datetime')
timestamp = timestamp.astimezone(self._timezone)
except:
timestamp = ''
if self._previous_state is None:
try:
self._previous_state = self._attributes['previous_state']
except:
self._previous_state = 0
if self._previous_timestamp is None:
try:
self._previous_timestamp = self._attributes['previous_timestamp']
except:
self._previous_timestamp = ''
# check if the timestamp for the object differs from the previous one
if self.name == GAS_HOURLY_CONSUMPTION_NAME:
if timestamp != self._previous_timestamp:
try:
self._state = value - self._previous_state
#diff = value - self._previous_state
#timediff = timestamp - self._previous_timestamp
#total_seconds = timediff.total_seconds()
#self._state = round(float(diff) / total_seconds * 3600, self._precision)
except:
self._state = 0
self._previous_state = self._state
self._previous_timestamp = timestamp
else:
self._state = 0
else:
self._state = timestamp.strftime('%X')
@property
def device_state_attributes(self):
"""return the state attributes"""
return {'data': self._data, 'previous_state': self._previous_state, 'previous_timestamp': self._previous_timestamp}
| 8,657 |
b-series/b294.py
|
TheLurkingCat/ZeroJudge
| 1 |
2022662
|
a = input()
s = [int(x) for x in input().split()]
ans = 0
for date, num in enumerate(s, 1):
ans += num * date
print(ans)
| 125 |
vaquero/collectors.py
|
jbn/vaquero
| 1 |
2023709
|
import jmespath
import random
# See also: fill_in_unknowns in transformations as a collector.
def sampling(items, p):
for item in items:
if random.random() < p:
yield item
class Collector:
def update(self, item):
raise NotImplementedError("Collector#update(item) not implemented")
def update_over_all(self, items):
for item in items:
self.update(item)
@property
def collected(self):
raise NotImplementedError("Collector#collected not implemented")
class SetCollector(Collector):
"""
Collect the set of values for jmespaths over applied items.
"""
def __init__(self, paths):
self._paths = {}
self._sets = {}
self.add_paths(paths)
def add_paths(self, paths):
"""
:param paths: an interable of jmespath paths
"""
for path in paths:
self.add_path(path)
def add_path(self, path):
"""
:param path: a jmespath
"""
self._paths[path] = jmespath.compile(path)
def update(self, item):
"""
Apply the paths to an item, collecting the values.
:param item: an item to process
"""
for path, jmes_obj in self._paths.items():
res = jmes_obj.search(item)
if res is not None:
result_set = self._sets.get(path)
if not result_set:
result_set = set()
self._sets[path] = result_set
result_set.add(res)
@property
def collected(self):
return self._sets
class GroupCollector(Collector):
"""
Collect one item per group.
"""
def __init__(self, group_f):
"""
:param group_f: function which returns some key representing the group
"""
self._group_f = group_f
self._groups = {}
def update(self, item):
k = self._group_f(item)
if k not in self._groups:
self._groups[k] = item
@property
def collected(self):
return self._groups
| 2,089 |
alipay/aop/api/domain/SmartAutomatScene.py
|
articuly/alipay-sdk-python-all
| 0 |
2022782
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class SmartAutomatScene(object):
def __init__(self):
self._level_1 = None
self._level_2 = None
@property
def level_1(self):
return self._level_1
@level_1.setter
def level_1(self, value):
self._level_1 = value
@property
def level_2(self):
return self._level_2
@level_2.setter
def level_2(self, value):
self._level_2 = value
def to_alipay_dict(self):
params = dict()
if self.level_1:
if hasattr(self.level_1, 'to_alipay_dict'):
params['level_1'] = self.level_1.to_alipay_dict()
else:
params['level_1'] = self.level_1
if self.level_2:
if hasattr(self.level_2, 'to_alipay_dict'):
params['level_2'] = self.level_2.to_alipay_dict()
else:
params['level_2'] = self.level_2
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = SmartAutomatScene()
if 'level_1' in d:
o.level_1 = d['level_1']
if 'level_2' in d:
o.level_2 = d['level_2']
return o
| 1,314 |
ModernWarfare/XAssets/itemSources.py
|
Mario-Kart-Felix/Hyde
| 14 |
2023295
|
import logging
from typing import Any, Dict, List, TypedDict
from utility import Utility
log: logging.Logger = logging.getLogger(__name__)
class ItemSourceTable(TypedDict):
"""Structure of mp/itemsourcetable.csv"""
marketPlaceID: int
refType: str
refName: str
gameSourceID: str
equippableIW8MP: int # bool
equippableWZ: int # bool
equippableT9: int # bool
equippableS4: int # bool
lookupType: str
class ItemSources:
"""Item Source XAssets."""
def Compile(self: Any) -> None:
"""Compile the Item Source XAssets."""
sources: List[Dict[str, Any]] = []
sources = ItemSources.Table(self, sources)
Utility.WriteFile(self, f"{self.eXAssets}/itemSources.json", sources)
log.info(f"Compiled {len(sources):,} Item Sources")
def Table(self: Any, sources: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Compile the mp/itemsourcetable.csv XAsset."""
table: List[Dict[str, Any]] = Utility.ReadCSV(
self, f"{self.iXAssets}/mp/itemsourcetable.csv", ItemSourceTable
)
if table is None:
return sources
for entry in table:
sources.append(
{
"id": entry.get("marketPlaceID"),
"altId": entry.get("refName"),
"type": self.ModernWarfare.GetLootType(entry.get("marketPlaceID")),
"source": entry.get("gameSourceID"),
"iw8mp": bool(entry.get("equippableIW8MP")),
"wz": bool(entry.get("equippableWZ")),
"t9": bool(entry.get("equippableT9")),
"s4": bool(entry.get("equippableS4")),
}
)
return sources
| 1,777 |
v2/goalnet/connectors/telegram/main.py
|
DaniloZZZ/GoalNet
| 0 |
2023010
|
import zmq
import json
import tgflow as tgf
from tgflow.api.cli import cliAPI
from enum import Enum
from network import ConnectorNetwork
from utils__ import get_network_config
from database import DB
def _print(*args):
print(">telegram bot>",*args)
class States(Enum):
action=1
settings=2
login=3
start=4
def bot(netconf):
net = ConnectorNetwork(netconf,
appid='0',
name='telegram'
)
db = DB()
def handle_notif(notif):
str_notif = json.dumps(notif)
try:
user_id = str(notif['user_id'])
tgid = db.get_tg_id(user_id)
except Exception as e:
_print("Notif was not sent",e)
return "FAIL"
_print("got notif:",str_notif)
message = "Got new notif of type %s. Content: %s"%(
notif.get('type'),notif.get('content')
)
if not tgid:
print("User id %s has no telegram log"%user_id)
return "FAIL"
try:
tgf.send_raw(message, tgid)
except Exception as e:
_print("Notif was not sent",e)
return "FAIL"
return 'OK'
net.listen_for_notif(handle_notif)
def login_uid_1(i):
telegram_id = i.message.chat.id
user_id = '1'
db.save_tg_id(user_id,telegram_id)
return States.action, {'user_id': user_id}
def handle_action(i,user_id=None):
_print('inp',i)
if not user_id:
_print('user not logged in')
return States.login
text = i.text
msg_type = 'telegram'
try:
msg_type, content = text.split('\\')
except ValueError:
content = text
message = {
'type':msg_type,
'content':content,
'user_id':user_id,
}
net.send(message)
# stay silent
return -1
UI = {
States.action:{
't':'Enter an action type and content to send',
'b':[
{"Settings":tgf.action(States.settings)}
],
'react':tgf.action(handle_action,react_to='text')
},
States.settings:{
't':'Settings',
'b':[
{"Action":tgf.action(States.action)}
],
},
States.start:{
't':'Welcome!',
'b':[
{"Log in":tgf.action(States.login)},
]
},
States.login:{
't':'Please log in',
'b':[
{"Log in as 1":
tgf.action(login_uid_1)}
],
}
}
key='<KEY>'
tgf.configure(token=key,
state=States.start,
#apiModel=cliAPI,
verbose=True,
)
tgf.start(UI)
def main():
netconf = get_network_config()
print("Starting bot")
bot(netconf)
if __name__=="__main__":
main()
| 3,119 |
zamna/playlists/migrations/0007_rating_vote.py
|
nistalhelmuth/zamna_back_end
| 0 |
2023508
|
# Generated by Django 2.1.7 on 2019-05-13 00:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('playlists', '0006_auto_20190512_1702'),
]
operations = [
migrations.AddField(
model_name='rating',
name='vote',
field=models.IntegerField(default=0),
preserve_default=False,
),
]
| 419 |
strongbox_fixity.py
|
rejveno/IFIscripts
| 0 |
2023192
|
#!/usr/bin/env python
'''
Analyses the CSV file reports from Strongbox.
Accepts an identifier input, at least the package ID but
the UUID would also be useful.
The script then finds the relevant entries, harvests the checksums and
stores them as a regular manifest.
It would make sense to also accept an existing sha512 manifest as an argparse
so that the script can tell if they are identical.
'''
import os
import sys
import argparse
import ififuncs
def parse_args(args_):
'''
Parse command line arguments.
'''
parser = argparse.ArgumentParser(
description='Analyses the CSV file reports from Strongbox.'
'Prints the output to the terminal if the -manifest option is not used'
'if the -manifest option is used, just the differences, if any, will appear on screen'
' Written by <NAME>.'
)
parser.add_argument(
'input', help='Input directory'
)
parser.add_argument(
'-id',
help='Enter the identifier that you would like to search for. UUID/Accession/OE.'
)
parser.add_argument(
'-manifest',
help='Enter the sha512 manifest that you would like to compare against.'
)
parsed_args = parser.parse_args(args_)
return parsed_args
def diff_manifests(args, strongbox_list):
'''
Compare the list of strongbox hashes to the original AIP manifest.
'''
print '\nStrongbox_fixity - IFIscripts'
print '\nDiffing the manifests..'
with open(args.manifest, 'r') as original_manifest:
aip_manifest = original_manifest.read().splitlines()
# A list of items in strongbox, that are different in aip sha512 manifest
strongbox_check = [item for item in strongbox_list if item not in aip_manifest]
# A list of items in the AIP manifest, that are different in the strongbox manifest
aip_check = [item for item in aip_manifest if item not in strongbox_list]
if len(strongbox_check) == 0:
print 'All files in the strongbox manifest are present in your AIP manifest and the hashes validate'
else:
for i in strongbox_check:
print '%s is different from the strongbox_csv to the AIP manifest' % i
if len(aip_check) == 0:
print 'All files in the AIP manifest are present in your strongbox manifest and the hashes validate'
else:
for i in strongbox_check:
print '%s is different from the AIP manifest to the Strongbox manifest' % i
def find_checksums(csv_file, identifier):
'''
Finds the relevant entries in the CSV and prints to terminal
'''
csv_dict = ififuncs.extract_metadata(csv_file)
manifest_lines = []
for items in csv_dict:
for x in items:
if type(x) is dict:
if identifier in x['path']:
identifier_string = "/%s/" % identifier
manifest_line = x['hash_code'] + ' ' + x['path'].replace(identifier_string, '')
manifest_lines.append(manifest_line)
strongbox_list = sorted(manifest_lines, key=lambda x: (x[130:]))
return strongbox_list
def main(args_):
args = parse_args(args_)
source = args.input
identifier = args.id
strongbox_list = find_checksums(source, identifier)
if args.manifest:
diff_manifests(args, strongbox_list)
else:
for i in strongbox_list:
print i
if __name__ == '__main__':
main(sys.argv[1:])
| 3,423 |
api/network_map.py
|
michahagg/domoticz-zigbee2mqtt-plugin
| 146 |
2023176
|
from api.command import APICommand
class NetworkMap(APICommand):
def execute(self, params):
self.publish_mqtt('bridge/networkmap/routes', 'graphviz')
def handle_mqtt_message(self, topic, message):
if topic == 'bridge/networkmap/graphviz':
self.send_response(message)
| 306 |
02-ui/pyqt5/widget_examples.py
|
cccaaannn/useful_functions
| 0 |
2023891
|
# pip install pyqt5
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, qApp
from PyQt5.QtWidgets import QCheckBox, QLabel, QLineEdit, QPushButton, QRadioButton, QButtonGroup, QTextEdit, QFileDialog, QAction, QDesktopWidget
from PyQt5 import QtGui, QtCore, QtWidgets
import sys
import os
class app(QMainWindow):
def __init__(self):
super().__init__()
self.init_variables()
self.init_ui()
def init_ui(self):
"""inits ui"""
self.setWindowTitle("example window")
# self.setGeometry(100,100,700,700)
# self.move(100, 100)
self.center()
self.setFixedSize(700,700)
self.labels()
self.buttons()
self.line_edits()
self.checkboxes()
self.radiobuttons()
self.text_edits()
self.menu_bar()
self.show()
def init_variables(self):
"""inits class variables"""
self.button_counter = 0
self.img1_path = "07-pyqt5/images/img.png"
self.file_dialog_path = ""
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def labels(self):
"""adds labels"""
self.l1 = QLabel(self)
self.l1.setText("label 1")
# self.l1.move(120, 230)
self.l1.setGeometry(120, 230, 100, 10)
# add image
self.l2 = QLabel(self)
self.l2.setPixmap(QtGui.QPixmap(self.img1_path))
self.l2.setGeometry(0, 0, 700, 200)
def buttons(self):
"""adds buttons"""
self.b1 = QPushButton(self)
self.b1.setText("counter")
self.b1.move(10, 220)
self.b1.clicked.connect(self.on_click)
self.b2 = QPushButton(self)
self.b2.setText("clear")
self.b2.move(10, 270)
self.b2.clicked.connect(self.on_click)
self.b3 = QPushButton(self)
self.b3.setText("open file dialog")
self.b3.move(10, 420)
self.b3.clicked.connect(self.on_click)
def line_edits(self):
"""adds line edits"""
self.line1 = QLineEdit(self)
self.line1.setText("")
self.line1.move(120, 270)
def checkboxes(self):
"""adds checkboxes"""
self.checkbox1 = QCheckBox(self)
self.checkbox1.setText("checkbox example")
self.checkbox1.move(10, 320)
self.checkbox1.setObjectName("checkbox1")
# self.checkbox1.setDisabled(True)
self.checkbox1.clicked.connect(self.on_click)
def radiobuttons(self):
"""adds radiobuttons"""
self.radiobutton1 = QRadioButton(self)
self.radiobutton1.setText("radiobutton 1")
self.radiobutton1.move(10, 370)
self.radiobutton1.setObjectName("radiobutton1")
self.radiobutton1.clicked.connect(self.on_click)
self.radiobutton2 = QRadioButton(self)
self.radiobutton2.setText("radiobutton 2")
self.radiobutton2.move(120, 370)
self.radiobutton2.setObjectName("radiobutton2")
self.radiobutton2.clicked.connect(self.on_click)
self.radiobutton3 = QRadioButton(self)
self.radiobutton3.setText("radiobutton 3")
self.radiobutton3.move(230, 370)
self.radiobutton3.setObjectName("radiobutton3")
self.radiobutton3.clicked.connect(self.on_click)
# button groups
self.button_group1 = QButtonGroup()
self.button_group1.addButton(self.radiobutton1)
self.button_group1.addButton(self.radiobutton2)
self.button_group1.setObjectName("button_group1")
# this can listen all buttons in the group but it has no attribute calles .text()
# so you cant use single listener function with it or use objectname()
# self.button_group1.buttonClicked.connect(self.on_click)
self.button_group2 = QButtonGroup()
self.button_group2.addButton(self.radiobutton3)
self.button_group2.setObjectName("button_group2")
def text_edits(self):
"""adds text edits"""
self.te1 = QTextEdit(self)
self.te1.setGeometry(350, 220, 300, 200)
self.te1.setText("this is a biiiiiig text field")
def menu_bar(self):
"""adds menu bar and actions under it"""
# menu items
self.bar = self.menuBar()
self.file_menu = self.bar.addMenu("file")
self.file_menu.triggered.connect(self.on_menu_click)
self.edit_menu = self.bar.addMenu("edit")
self.sub_menu = self.edit_menu.addMenu("sub menu")
# actions
self.open_file_function = QAction("open file", self)
self.open_file_function.setShortcut("Ctrl+O")
self.open_file_function.setObjectName("open_file_function")
self.open_file_function.triggered.connect(self.on_click)
self.file_menu.addAction(self.open_file_function)
self.test_trigger = QAction("test trigger", self)
self.test_trigger.setObjectName("test_trigger")
self.file_menu.addAction(self.test_trigger)
self.exit_function = QAction("exit", self)
self.exit_function.setShortcut("Ctrl+Q")
self.exit_function.setObjectName("exit")
self.exit_function.triggered.connect(self.on_click)
self.sub_menu.addAction(self.exit_function)
def open_file(self):
"""opens file dialog"""
file_name = QFileDialog.getOpenFileName(self, "file dialog example", self.file_dialog_path)
print(file_name)
if(os.path.exists(file_name[0])):
return file_name[0]
else:
return None
def on_menu_click(self, action):
"""triggers on menu clicks"""
if(action.text() == "open file"):
print("open file used")
if(action.text() == "test trigger"):
print("test trigger used")
def on_click(self):
"""button click function for listeners"""
sender = self.sender()
# buttons
if(sender.text() == "counter"):
self.button_counter += 1
self.l1.setText("counter is:{}".format(self.button_counter))
elif(sender.text() == "clear"):
self.line1.setText("")
elif(sender.text() == "open file dialog"):
self.open_file()
# checkboxes
elif(sender.objectName() == "checkbox1"):
if(self.checkbox1.isChecked()):
self.checkbox1.setText("checked")
else:
self.checkbox1.setText("not checked")
# radiobuttons
elif(sender.objectName() == "radiobutton1"):
self.radiobutton1.setText("hi there")
elif(sender.objectName() == "radiobutton2"):
self.radiobutton2.setText("hi there")
elif(sender.objectName() == "radiobutton3"):
self.radiobutton3.setText("hi there")
# menu items
elif(sender.objectName() == "exit"):
sys.exit()
elif(sender.objectName() == "open_file_function"):
self.open_file()
print(sender.objectName())
application = QApplication(sys.argv)
a = app()
sys.exit(application.exec_())
| 7,238 |
crsbi/urls.py
|
kingsdigitallab/crsbi-django
| 1 |
2023681
|
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.views.static import serve
from kdl_ldap.signal_handlers import \
register_signal_handlers as kdl_ldap_register_signal_hadlers
from mezzanine.pages.views import page
from sculpture.views.display import get_pdf
kdl_ldap_register_signal_hadlers()
admin.autodiscover()
urlpatterns = [
url(r'^', include('sculpture.urls')),
url(r'^accounts/', include('registration.backends.default.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^pdf/', get_pdf, name='pdf_view'),
url(r'^search/', include('haystack.urls')),
url(r'^media(?P<path>.*)$', serve,
{'document_root': settings.MEDIA_ROOT}),
url(r'^static(?P<path>.*)$', serve,
{'document_root': settings.STATIC_ROOT}),
# HOMEPAGE AS AN EDITABLE PAGE IN THE PAGE TREE
# ---------------------------------------------
# This pattern gives us a normal ``Page`` object, so that your
# homepage can be managed via the page tree in the admin. If you
# use this pattern, you'll need to create a page in the page tree,
# and specify its URL (in the Meta Data section) as "/", which
# is the value used below in the ``{"slug": "/"}`` part. Make
# sure to uncheck "show in navigation" when you create the page,
# since the link to the homepage is always hard-coded into all the
# page menus that display navigation on the site. Also note that
# the normal rule of adding a custom template per page with the
# template name using the page's slug doesn't apply here, since
# we can't have a template called "/.html" - so for this case, the
# template "pages/index.html" can be used.
url("^$", page, {"slug": "/"}, name="home"),
# MEZZANINE'S URLS
# ----------------
# ADD YOUR OWN URLPATTERNS *ABOVE* THE LINE BELOW.
# ``mezzanine.urls`` INCLUDES A *CATCH ALL* PATTERN
# FOR PAGES, SO URLPATTERNS ADDED BELOW ``mezzanine.urls``
# WILL NEVER BE MATCHED!
# If you'd like more granular control over the patterns in
# ``mezzanine.urls``, go right ahead and take the parts you want
# from it, and use them directly below instead of using
# ``mezzanine.urls``.
url("^", include("mezzanine.urls")),
]
# Adds ``STATIC_URL`` to the context of error pages, so that error
# pages can use JS, CSS and images.
handler500 = "mezzanine.core.views.server_error"
# -----------------------------------------------------------------------------
# Django Debug Toolbar URLS
# -----------------------------------------------------------------------------
try:
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/',
include(debug_toolbar.urls)),
]
except ImportError:
pass
# -----------------------------------------------------------------------------
# Static file DEBUGGING
# -----------------------------------------------------------------------------
if settings.DEBUG:
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
import os.path
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL + 'images/',
document_root=os.path.join(settings.MEDIA_ROOT,
'images'))
| 3,442 |
roomai/RoomAILogger.py
|
yooyoo2004/RoomAI
| 0 |
2022809
|
#!/bin/python
import logging;
import sys;
project_name = "roomai";
logger = logging.getLogger(project_name);
handler = logging.StreamHandler(sys.stderr);
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s");
logger.setLevel(logging.INFO);
handler.setLevel(logging.INFO);
handler.setFormatter(formatter);
logger.addHandler(handler);
def set_level(level):
logger.setLevel(level)
handler.setLevel(level)
def get_logger():
return logger
def init_logger(opts):
global logger;
global handler;
global project_name;
print opts;
if "project_name" in opts:
project_name = opts["project_name"];
print "in Logger", project_name;
logger.removeHandler(handler);
logger = logging.getLogger(project_name);
#set longer
if "logfile" in opts:
handler = logging.FileHandler(opts["logfile"]);
else:
handler = logging.StreamHandler(sys.stderr);
#set formatter
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s");
handler.setFormatter(formatter);
##set level
logger.setLevel(logging.INFO);
if "level" in opts:
if "notset" == opts["level"].lowcase():
logger.setLevel(logging.NOTSET)
elif "debug" == opts["level"].lowcase():
logger.setLevel(logging.DEBUG)
elif "info" == opts["level"].lowcase():
logger.setLevel(logging.INFO)
elif "warning" == opts["level"].lowcase():
logger.setLevel(logging.WARNING)
elif "error" == opts["level"].lowcase():
logger.setLevel(logging.ERROR)
elif "critical" == opts["level"].lowcase():
logger.setLevel(logging.critical)
logger.addHandler(handler);
| 1,777 |
NER/loss/adaptive_dice_loss.py
|
xueshang-liulp/diaKG-code
| 10 |
2023397
|
# encoding: utf-8
import torch
import torch.nn as nn
from torch import Tensor
from typing import Optional
class AdaptiveDiceLoss(nn.Module):
"""
Dice coefficient for short, is an F1-oriented statistic used to gauge the similarity of two sets.
Math Function:
https://arxiv.org/abs/1911.02855.pdf
adaptive_dice_loss(p, y) = 1 - numerator / denominator
numerator = 2 * \sum_{1}^{t} (1 - p_i) ** alpha * p_i * y_i + smooth
denominator = \sum_{1}^{t} (1 - p_i) ** alpha * p_i + \sum_{1} ^{t} y_i + smooth
Args:
alpha: alpha in math function
smooth (float, optional): smooth in math function
square_denominator (bool, optional): [True, False], specifies whether to square the denominator in the loss function.
with_logits (bool, optional): [True, False], specifies whether the input tensor is normalized by Sigmoid/Softmax funcs.
True: the loss combines a `sigmoid` layer and the `BCELoss` in one single class.
False: the loss contains `BCELoss`.
Shape:
- input: (*)
- target: (*)
- mask: (*) 0,1 mask for the input sequence.
- Output: Scalar loss
Examples:
>>> loss = AdaptiveDiceLoss()
>>> input = torch.randn(3, 1, requires_grad=True)
>>> target = torch.empty(3, dtype=torch.long).random_(5)
>>> output = loss(input, target)
>>> output.backward()
"""
def __init__(self,
alpha: float = 0.1,
smooth: Optional[float] = 1e-8,
square_denominator: Optional[bool] = False,
with_logits: Optional[bool] = True,
reduction: Optional[str] = "mean") -> None:
super(AdaptiveDiceLoss, self).__init__()
self.reduction = reduction
self.with_logits = with_logits
self.alpha = alpha
self.smooth = smooth
self.square_denominator = square_denominator
def forward(self,
input: Tensor,
target: Tensor,
mask: Optional[Tensor] = None) -> Tensor:
flat_input = input.view(-1)
flat_target = target.view(-1)
if self.with_logits:
flat_input = torch.sigmoid(flat_input)
if mask is not None:
mask = mask.view(-1).float()
flat_input = flat_input * mask
flat_target = flat_target * mask
intersection = torch.sum((1-flat_input)**self.alpha * flat_input * flat_target, -1) + self.smooth
denominator = torch.sum((1-flat_input)**self.alpha * flat_input) + flat_target.sum() + self.smooth
return 1 - 2 * intersection / denominator
def __str__(self):
return f"Adaptive Dice Loss, smooth:{self.smooth}; alpha:{self.alpha}"
| 2,793 |
core/fileinspector.py
|
domenico-suriano/SentinAir
| 2 |
2023969
|
#!/usr/bin/python
# Copyright 2020 Dr. <NAME> (<EMAIL>)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cgi
# path where files containing data are placed
DATAPATH = "/var/www/html/data/"
## path where files containing data plots are placed
IMGPATH = "/img/"
## function to get plots images to insert in the web page
def get_plots(filename):
csvfile = open(filename,'r')
hd1 = csvfile.readline()
csvfile.close()
hd = hd1.rstrip("\r\n")
header = hd.split(";")
return header
## function to build the web page in the correct format
def print_page_meas(filename,head,mcn):
fn = filename.rstrip("txt")
print ("Content-type: text/html\n")
print ('<html><head>')
print ('<title>' + "Measure page in file " + fn + " on " + mcn + '</title>')
print ('<style type=\"text/css\"> body { background-image: url(\"/sentinair.jpg\");background-size: cover;}</style>')
print ('</head><body>')
print ('<p><h2><font face = \"arial\"> Here below are plots from<br>' + fn.rstrip(".") + '<br>on<br>' + mcn + '</font></h2></p>')
print ('<table>')
hnum = 0
for h in head:
if hnum == 0:
hnum=hnum+1
else:
print ('<tr><td>')
h=h.replace('%','')
h=h.replace('/','')
print ('<img alt=\"Data plot unavailable\" src=\"' + IMGPATH + fn + h + '.png\">')
print ('</td></tr>')
hnum=hnum+1
print ('</table>')
print ('</body></html>')
##### MAIN #########
fs = cgi.FieldStorage()
fn = DATAPATH + str(fs["fn"].value)
mn = str(fs["mn"].value)
hd = get_plots(fn)
print_page_meas(str(fs["fn"].value),hd,mn)
| 2,159 |
codenames/models.py
|
Schluggi/codenames
| 3 |
2023366
|
from . import db
class Game(db.Model):
__tablename__ = 'games'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(255), unique=True, nullable=False)
mode = db.Column(db.String(255), nullable=False)
images = db.Column(db.Text, nullable=False)
cards = db.Column(db.Text, nullable=False)
score_red = db.Column(db.Integer)
score_blue = db.Column(db.Integer)
members_red = db.Column(db.Text, nullable=False, default='[]')
members_blue = db.Column(db.Text, nullable=False, default='[]')
start_score_red = db.Column(db.Integer)
start_score_blue = db.Column(db.Integer)
fields = db.relationship('Field', backref='game', lazy='dynamic')
class Field(db.Model):
__tablename__ = 'fields'
id = db.Column(db.Integer, primary_key=True, nullable=False)
game_id = db.Column(db.Integer, db.ForeignKey('games.id'), nullable=False, primary_key=True)
hidden = db.Column(db.Boolean, nullable=False, default=True)
type = db.Column(db.String(8), nullable=False)
| 1,055 |
emails/admin.py
|
vasudeveloper001/mvc_python_django
| 0 |
2022703
|
from django.contrib import admin
# Register your models here.
from emails.models import EmailEntry
admin.site.register(EmailEntry)
| 133 |
paginas/migrations/0011_players_lobby_slug.py
|
igor-pontes/Dolex
| 0 |
2023684
|
# Generated by Django 2.1.5 on 2019-02-06 17:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('paginas', '0010_players_lobby_slot'),
]
operations = [
migrations.AddField(
model_name='players_lobby',
name='slug',
field=models.CharField(default=None, max_length=110),
),
]
| 404 |
script_python/csvAll.py
|
goldleaf3i/generativeCMLgraphs
| 0 |
2023804
|
#!/usr/bin/python
# APRE LA CARTELLA DOVE STA LO SCRIPT O, ALTERNATIVAMETNE, argv[1].
# PARSA TUTTE <NAME>
# PRENDE TUTTI I FILE DITESTO, CHE CONSIDERA MATRICI DI ADIACENZA DI UN GRAFO
# INSERISCE LE MATRICI TROVATE IN UN GRAFO DI IGRAPH
### COPIATO DA CARTELLA SVILUPPO DROPBOX, DA REINTEGRARE POI NEL PROGETTO ORIGINALE - FINITO 28/9/14
# IN PARTICOLARE INSERIRE NELLA LIBRERIA LE METRICHE PER STAMPARE LE VARIE CARATTERISTICHE DEI GRAFI
# TODO SPOSTARE LE FUNZIONI DI SUPPORTO IN UTILS
from sys import argv
import re
import sys
import math
from loadGraph import *
import numpy as Math
import os
import glob
from multiprocessing import Process
mylabelschema = 'office.xml'
#for i in matrix:
#M.append( [int(j) for j in i.split(',')[:-1] +[i.split(',')[-1].split('\')[0]]])
def parseEverything(direct) :
global mylabelschema
for filename in glob.glob(direct+"/*.xml") :
#try :
print("apro il file " , filename)
loadXML(filename,mylabelschema)
#except Exception as e:
# print str(e)
# print "cannot process " , filename
# exit()
p = []
i = 0
for directories in glob.glob(direct+"/*/") :
#p.append(Process(target = parseEverything, args =(directories,)))
parseEverything(directories)
#p[i].start()
i+=1
print("apro la cartella " , directories)
#for j in range(i-1) :
# p[j].join()
return True
def plotAdiacency(filename) :
myfile = open(filename);
#inizializzo la struttura dati
matrix = []
for line in myfile:
matrix.append([int(i)for i in line.split(',')])
myfile.close()
topologicalmap = importFromMatlabJava2012FormatToIgraph(matrix)
graph = topologicalmap.graph
print(".".join(filename.split(".")[:-1])+ ".png")
print(graph.vs["label"])
#print graph.vs["label"]
#exit()
vertex_shape = ['rect' if i =='C' or i =='H' or i == 'L' or i=='E' or i=='N' or i=='Q' else 'circle' for i in graph.vs["label"]]
#print vertex_shape
#exit()
plot(graph,".".join(filename.split(".")[:-1])+".png",vertex_label_size = 0, vertex_shape = vertex_shape,bbox=(700,700),layout='kk')
# #for i in matrix:
# #M.append( [int(j) for j in i.split(',')[:-1] +[i.split(',')[-1].split('\')[0]]])
# def parseEverything(direct) :
# for filename in glob.glob(direct+"/*") :
# #try :
# print "apro il file " , filename
# plotAdiacency(filename)
# #except Exception as e:
# # print str(e)
# # print "cannot process " , filename
# # exit()
# for directories in glob.glob(direct+"/*/") :
# parseEverything(directories)
# print "apro la cartella " , directories
# return True
# def plotAdiacency(filename) :
# myfile = open(filename);
# #inizializzo la struttura dati
# matrix = []
# for line in myfile:
# matrix.append([int(i)for i in line.split(',')])
# myfile.close()
# topologicalmap = importFromMatlabJava2012FormatToIgraph(matrix)
# graph = topologicalmap.graph
# print ".".join(filename.split(".")[:-1])+ ".png"
# #print graph.vs["label"]
# #exit()
# vertex_shape = ['rect' if i =='C' or i =='H' or i == 'E' else 'circle' for i in graph.vs["label"]]
# #print vertex_shape
# #exit()
# plot(graph,".".join(filename.split(".")[:-1])+".png",vertex_label_size = 0, vertex_shape = vertex_shape)
def evaluateGraphs(direct, myformat = None ) :
# calcola tutte le metriche di igraph e poi le stampa
graphStats = dict()
metrics = ['nodes','R','C','path_len','diameter','density','articulation_points','betweenness',
'mu_betweenness','scaled_betweenness','mu_scaled_betweenness','Rbetweenness','mu_Rbetweenness',
'Cbetweenness','mu_Cbetweenness','closeness','mu_closeness','Rcloseness','mu_Rcloseness',
'Ccloseness','mu_Ccloseness','eig','mu_eig','Reig', 'mu_Reig','Ceig','mu_Ceig'
]
for filename in glob.glob(direct+"/*.txt") :
#try :
print("apro il file " , filename)
graphStats[filename] = analyzeGraph(filename, myformat)
#except Exception as e:
# print str(e)
# print "cannot process " , filename
# exit()
data = aggrateMetrics(graphStats,metrics)
if data :
text_file = open(direct+"/aggregate_graph_data.log", "w")
text_file.write(str(data))
text_file.close()
for directories in glob.glob(direct+"/*/") :
evaluateGraphs(directories, myformat=myformat)
print("apro la cartella " , directories)
return True
def analyzeGraph(filename, myformat = 'adjacency') :
# format: adjacency e' la matrice di 0 e 1, valori spaziati da "," e righe termiante da ; DEFAULT
# il formato matlab e' quello invece ce usa matlab per fare le matrici
myfile = open(filename);
#inizializzo la struttura dati
matrix = []
for line in myfile:
print(line)
if myformat == 'matlab' :
line = line.replace('[','')
line = line.replace(']','')
line = line.replace(';','')
print(line)
matrix.append([int(i)for i in line.split(',')])
myfile.close()
topologicalmap = importFromMatlabJava2012FormatToIgraph(matrix)
g = topologicalmap.graph
Cs = g.vs.select(RC_label = 'C')
Rs = g.vs.select(RC_label = 'R')
indexC = [i.index for i in Cs]
indexR = [i.index for i in Rs]
data = dict()
# numero di nodi
data['nodes'] = len(g.vs())
# numero di R
data['R'] = len(indexR)
# numero di C
data['C'] = len(indexC)
# average path len
data['path_len'] = g.average_path_length()
# diametro
data['diameter'] = g.diameter()
# average degree (densirt)
data['density'] = g.density()
# articulation points, quanti sono
data['articulation_points'] = len(g.articulation_points())
# betweenness
betweenness = g.betweenness()
data['betweenness'] = betweenness
# mean betweenness
data['mu_betweenness'] = avg(betweenness)
# scaled betweenness
scaled_b = [ float(i)/(float(len(betweenness)-1))/(float(len(betweenness))-2) for i in betweenness ]
data['scaled_betweenness'] = scaled_b
# mean scaled betweenness
data['mu_scaled_betweenness'] = avg(scaled_b)
# betweenness scaled solo R
data['Rbetweenness'] = selectLabelArray(scaled_b,indexR)
# average betweennes scaled solo R
print(data['Rbetweenness'])
data['mu_Rbetweenness'] = avg(data['Rbetweenness'])
# betweenness scaled solo C
data['Cbetweenness'] = selectLabelArray(scaled_b,indexC)
# average betwenness scaled solo C
data['mu_Cbetweenness'] = avg(data['Cbetweenness'])
# closenesss
closeness = g.closeness()
data['closeness'] = closeness
# average closeness
data['mu_closeness'] = avg(closeness)
# closeness solo R
data['Rcloseness'] = selectLabelArray(closeness,indexR)
# avg closeness solo R
data['mu_Rcloseness'] = avg(data['Rcloseness'])
# closeness solo C
data['Ccloseness'] = selectLabelArray(closeness,indexC)
# avg closeness solo C
data['mu_Ccloseness'] = avg(data['Ccloseness'])
# eigenvector centrality
eigenvec = g.eigenvector_centrality()
data['eig'] = eigenvec
# mean eig
data['mu_eig'] = avg(eigenvec)
# eigenvec centrality R
data['Reig'] = selectLabelArray(eigenvec,indexR)
# mean eigenvec centrality R
data['mu_Reig'] = avg(data['Reig'])
# eigenvec centrality C
data['Ceig'] = selectLabelArray(eigenvec,indexC)
# mean eigenvec centrality C
data['mu_Ceig'] = avg(data['Ceig'])
#print ".".join(filename.split(".")[:-1])+ ".png"
#plot(graph,".".join(filename.split(".")[:-1])+".png")
stringa = str()
for i in data.keys():
stringa+= str(i) + ":\n"
stringa+= str(data[i]) + "\n"
text_file = open(".".join(filename.split(".")[:-1])+"_aggregate_data.log", "w")
text_file.write(str(stringa))
text_file.close()
return data
def selectLabelArray(array,indexes) :
# restituisce gli elementi del vettore array di indice contenuto in indexes
tmp = []
for i in indexes :
tmp.append(array[i])
return tmp
def averageLabel(array,indexes):
# restituisce la media degli elementi del vettore array di indice contenuto in indexes
tmp = []
for i in indexes :
tmp.append(array[i])
return sum(tmp)/float(len(indexes))
def avg(array) :
return sum(array)/float(len(array))
def aggrateMetrics(dictionary,list_of_metrics) :
# per ora non calcolo dati aggregati sugli array
# prende un array di array e poi ricalcola tutto
mydict = dict()
# inizializzo le variabili
for i in list_of_metrics :
mydict[i] = variable(i)
# per ogni grafo parso il dizionario e lo inserisco nelle variabili
for i in dictionary.keys() :
for j in dictionary[i].keys() :
if type(dictionary[i][j]) is list :
# per ora non calcolo dati aggregati sugli array.
pass
else :
mydict[j].add(dictionary[i][j])
ret_str = str()
for i in list_of_metrics :
if mydict[i].n > 0 :
ret_str += mydict[i].printVar()
return ret_str
# apre ricorsivamente tutti i file di TXT che ci trova. usa la cartella corrente, se non specifichi una cartella di start alternativa
current = os.getcwd()
#try:
# current = argv[1]
#except :
# print("non hai specificato la cartella corrente")
#print("inizio a parsare la cartella ", current , 'che diavleria e ques?')
#parseEverything(current)
#print("finito!")
count = 0
btypename = 'zoffice.xml'
#btypename = 'zoffice.xml'
for filename in glob.glob(current+"/*.xml"):
count+=1
print filename
# LOADXML carica i TOPOLOGICAL. LOAD XML2 carica i XML standard
if not btypename in filename :
matrix = loadXML2(filename, btypename)
Math.savetxt("graph_"+str(count)+".csv", matrix, fmt='%s', delimiter=",")
print "done"
| 9,925 |
tests/utils/test_time.py
|
SatelCreative/toolip
| 0 |
2023696
|
from datetime import datetime, timezone
import pytz
from toolip.utils.time import make_time_aware, now, now_epoch, now_epoch_ms
def test_now():
assert now().tzinfo == timezone.utc
def test_now_epoch():
now = datetime.now(timezone.utc).timestamp()
assert now_epoch() == int(now)
def test_now_epoch_ms():
now = datetime.now(timezone.utc).timestamp() * 1000
assert now_epoch_ms() == int(now)
def test_make_time_aware():
dtime = datetime.now()
assert dtime.tzinfo != pytz.utc
assert make_time_aware(dtime).tzinfo == pytz.utc
| 563 |
app/robot/types.py
|
mogenson/tubers
| 1 |
2022621
|
from dataclasses import dataclass
from enum import Enum
from struct import unpack
from .packet import Packet
@dataclass
class Bumper:
left: bool
right: bool
@classmethod
def from_packet(cls, packet: Packet):
return Bumper(packet.payload[4] & 0x80 != 0, packet.payload[4] & 0x40 != 0)
@dataclass
class Color:
WHITE = 0
BLACK = 1
RED = 2
GREEN = 3
BLUE = 4
ORANGE = 5
YELLOW = 6
MAGENTA = 7
NONE = 15
ANY = -1
colors: list[int]
@classmethod
def from_packet(cls, packet: Packet):
return Color([c >> i & 0xF for c in packet.payload for i in range(4, -1, -4)])
@dataclass
class Light:
DARKER = 4
RIGHT_BRIGHTER = 5
LEFT_BRIGHTER = 6
LIGHTER = 7
state: int
left: int = 0
right: int = 0
@classmethod
def from_packet(cls, packet: Packet):
return Light(
packet.payload[4],
unpack(">H", packet.payload[5:7])[0],
unpack(">H", packet.payload[7:9])[0],
)
@dataclass
class Touch:
front_left: bool
front_right: bool
back_right: bool
back_left: bool
@classmethod
def from_packet(cls, packet: Packet):
return Touch(
packet.payload[4] & 0x80 != 0,
packet.payload[4] & 0x40 != 0,
packet.payload[4] & 0x20 != 0,
packet.payload[4] & 0x10 != 0,
)
def note(note: str, A4=440) -> float:
"""Convert a note name into frequency in hertz: eg. 'C#5'"""
notes = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"]
octave = int(note[-1])
step = notes.index(note[0:-1])
step += ((octave - 1) * 12) + 1
return A4 * 2 ** ((step - 46) / 12)
class Marker(Enum):
UP = 0
DOWN = 1
ERASE = 2
class Animation(Enum):
OFF = 0
ON = 1
BLINK = 2
SPIN = 3
class ColorSensors(Enum):
SENSORS_0_TO_7 = 0
SENSORS_8_TO_15 = 1
SENSORS_16_TO_23 = 2
SENSORS_24_TO_31 = 3
class ColorLighting(Enum):
OFF = 0
RED = 1
GREEN = 2
BLUE = 3
ALL = 4
class ColorFormat(Enum):
ADC_COUNTS = 0
MILLIVOLTS = 1
class ModulationType(Enum):
DISABLED = 0
VOLUME = 1
PULSE_WIDTH = 2
FREQUENCY = 3
| 2,238 |
split_data.py
|
smtnkc/gcn4epi
| 0 |
2023632
|
import os
import random
import pickle as pkl
import argparse
from sklearn.model_selection import train_test_split
from prepare_data import getTuples
def trainTestSplit(cell_line, cross_cell_line, id_dict, cross_begin_id, label_rate, seed):
def getIdPortions(cell_line, cross_cell_line, id_dict, cross_begin_id, seed):
"""
Returns ID portions for train, test, validation split.
Label rate is the number of labeled nodes (x) that are used
for training divided by the total number of nodes in dataset.
Example: Label rate = 0.1
10% labeled training (x)
60% unlabaled training (ux)
10% validation (vx)
20% test (tx) !!! 20% of the same or cross cell-line !!!
allx = x + ux + vx
"""
idx = list(id_dict.values())[0:cross_begin_id] # do not include cross cell-line elements
idx_allx, idx_tx = train_test_split(idx, test_size=0.2, random_state=seed)
idx_x_vx, idx_ux = train_test_split(idx_allx, test_size=1-(label_rate*2/0.8), random_state=seed)
idx_x, idx_vx = train_test_split(idx_x_vx, test_size=0.5, random_state=seed)
if cross_begin_id == len(id_dict):
# No cross cell-line specified. Use same cell-line for testings.
print('SAME CELL-LINE TESTING:\n {} labeled training \n {} validation \n {} test ({}) \n{} unlabeled training'
.format(len(idx_x), len(idx_vx), len(idx_tx), cell_line, len(idx_ux)))
else:
# Use cross cell-line for testing. Overwrite idx_tx.
cross_idx = list(id_dict.values())[cross_begin_id:]
_, idx_tx = train_test_split(cross_idx, test_size=0.2, random_state=seed)
print('CROSS CELL-LINE TESTING:\n {} labeled training \n {} validation \n {} test ({}) \n{} unlabeled training'
.format(len(idx_x), len(idx_vx), len(idx_tx), cross_cell_line, len(idx_ux)))
return idx_x, idx_ux, idx_vx, idx_tx
# TRAIN / TEST / VALIDATION SPLIT
idx_x, idx_ux, idx_vx, idx_tx = getIdPortions(cell_line, cross_cell_line, id_dict, cross_begin_id, seed)
print('Writing index files for train/test/validation split...')
if (args.cross_cell_line != None) and (args.cross_cell_line != args.cell_line):
dump_dir = 'data/{}/'.format(cell_line + '_' + cross_cell_line)
else:
dump_dir = 'data/{}/'.format(cell_line)
if not os.path.exists(dump_dir):
os.makedirs(dump_dir)
lr = '{:.2f}'.format(label_rate).split('.')[1]
idx_x_file = open('{}/x_{}.index'.format(dump_dir, lr), "wb")
pkl.dump(idx_x, idx_x_file)
idx_x_file.close()
idx_ux_file = open('{}/ux_{}.index'.format(dump_dir, lr), "wb")
pkl.dump(idx_ux, idx_ux_file)
idx_ux_file.close()
idx_vx_file = open('{}/vx_{}.index'.format(dump_dir, lr), "wb")
pkl.dump(idx_vx, idx_vx_file)
idx_vx_file.close()
idx_tx_file = open('{}/tx_{}.index'.format(dump_dir, lr), "wb")
pkl.dump(idx_tx, idx_tx_file)
idx_tx_file.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='gcn4epi')
parser.add_argument('--cell_line', default='GM12878', type=str)
parser.add_argument('--cross_cell_line', default=None, type=str) # set to run cross cell-line testing
parser.add_argument('--k_mer', default=5, type=int)
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--label_rate', default=0.2, type=float) # [0.2, 0.1, 0.05]
parser.add_argument('--frag_len', default=200, type=int) # set 0 to disable fragmentation and use full sequences
args = parser.parse_args()
random.seed(args.seed)
_, id_dict, cross_begin_id = getTuples(args.cell_line, args.cross_cell_line, args.k_mer) # requires successful run of prepare_gcn_data.py
trainTestSplit(args.cell_line, args.cross_cell_line, id_dict, cross_begin_id, args.label_rate, args.seed)
| 3,949 |
muk_autovacuum/__init__.py
|
Yousif-Mobark/odoo11_cutom
| 0 |
2023387
|
###################################################################################
#
# Copyright (C) 2018 MuK IT GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###################################################################################
from odoo import api, SUPERUSER_ID
from . import models
def _get_value(env, model):
model_model = env['ir.model']
model_fields = env['ir.model.fields']
model = model_model.search([('model', '=', model)], limit=1)
if model.exists():
field_domain = [
('model_id', '=', model.id),
('ttype', '=', 'datetime'),
('name', '=', 'create_date')]
field = model_fields.search(field_domain, limit=1)
return model, field
return None
def _init_default_rules(cr, registry):
env = api.Environment(cr, SUPERUSER_ID, {})
rule = env['muk_autovacuum.rules']
values = _get_value(env, 'mail.message')
if values:
rule.create({
'name': "Delete Message Attachments after 6 Months",
'model': values[0].id,
'active': False,
'state': 'time',
'time_field': values[1].id,
'time_type': 'months',
'time': 6,
'only_attachments': True})
rule.create({
'name': "Delete Messages after 1 Year",
'model': values[0].id,
'active': False,
'state': 'time',
'time_field': values[1].id,
'time_type': 'years',
'time': 1})
values = _get_value(env, 'ir.logging')
if values:
rule.create({
'name': "Delete Logs after 2 Weeks",
'model': values[0].id,
'active': False,
'state': 'time',
'time_field': values[1].id,
'time_type': 'weeks',
'time': 2,
'protect_starred': False})
| 2,528 |
src/tvl/transforms.py
|
hyperfraise/tvl
| 0 |
2023356
|
"""Functions for transforming image data stored in PyTorch tensors.
This module is necessary since most of the transformations provided by the `torchvision` package
are applicable for PIL.Image images only. Since tvl may load video frames on the GPU, we want
to be able to take the computation to the data rather than moving the images to and from main
memory.
As an additional benefit, these functions are defined such that they also work in batched mode,
which is especially useful for videos.
"""
import math
from typing import Sequence
import torch
from torch.nn.functional import interpolate
from torchgeometry import warp_affine
def normalise(tensor, mean, stddev, inplace=False):
"""Normalise the image with channel-wise mean and standard deviation.
Args:
tensor (torch.Tensor): The image tensor to be normalised.
mean (Sequence of float): Means for each channel.
stddev (Sequence of float): Standard deviations for each channel.
inplace (bool): Perform normalisation in-place.
Returns:
Tensor: The normalised image tensor.
"""
mean = torch.as_tensor(mean, device=tensor.device)[..., :, None, None]
stddev = torch.as_tensor(stddev, device=tensor.device)[..., :, None, None]
if inplace:
tensor.sub_(mean)
else:
tensor = tensor.sub(mean)
tensor.div_(stddev)
return tensor
def denormalise(tensor, mean, stddev, inplace=False):
"""Denormalise the image with channel-wise mean and standard deviation.
Args:
tensor (torch.Tensor): The image tensor to be denormalised.
mean (Sequence of float): Means for each channel.
stddev (Sequence of float): Standard deviations for each channel.
inplace (bool): Perform denormalisation in-place.
Returns:
Tensor: The denormalised image tensor.
"""
mean = torch.as_tensor(mean, device=tensor.device)[..., :, None, None]
stddev = torch.as_tensor(stddev, device=tensor.device)[..., :, None, None]
if inplace:
return tensor.mul_(stddev).add_(mean)
else:
return torch.addcmul(mean, tensor, stddev)
def resize(tensor, size, mode='bilinear'):
"""Resize the image.
Args:
tensor (torch.Tensor): The image tensor to be resized.
size (tuple of int): Size of the resized image (height, width).
mode (str): The pixel sampling interpolation mode to be used.
Returns:
Tensor: The resized image tensor.
"""
assert len(size) == 2
# If the tensor is already the desired size, return it immediately.
if tensor.shape[-2] == size[0] and tensor.shape[-1] == size[1]:
return tensor
if not tensor.is_floating_point():
dtype = tensor.dtype
tensor = tensor.to(torch.float32)
tensor = resize(tensor, size, mode)
return tensor.to(dtype)
out_shape = (*tensor.shape[:-2], *size)
if tensor.ndimension() < 3:
raise Exception('tensor must be at least 2D')
elif tensor.ndimension() == 3:
tensor = tensor.unsqueeze(0)
elif tensor.ndimension() > 4:
tensor = tensor.view(-1, *tensor.shape[-3:])
align_corners = None
if mode in {'linear', 'bilinear', 'trilinear'}:
align_corners = False
resized = interpolate(tensor, size=size, mode=mode, align_corners=align_corners)
return resized.view(*out_shape)
def crop(tensor, t, l, h, w, padding_mode='constant', fill=0):
"""Crop the image, padding out-of-bounds regions.
Args:
tensor (torch.Tensor): The image tensor to be cropped.
t (int): Top pixel coordinate.
l (int): Left pixel coordinate.
h (int): Height of the cropped image.
w (int): Width of the cropped image.
padding_mode (str): Padding mode (currently "constant" is the only valid option).
fill (float): Fill value to use with constant padding.
Returns:
Tensor: The cropped image tensor.
"""
# If the crop region is wholly within the image, simply narrow the tensor.
if t >= 0 and l >= 0 and t + h <= tensor.size(-2) and l + w <= tensor.size(-1):
return tensor[..., t:t+h, l:l+w]
if padding_mode == 'constant':
result = torch.full((*tensor.size()[:-2], h, w), fill,
device=tensor.device, dtype=tensor.dtype)
else:
raise Exception('crop only supports "constant" padding currently.')
sx1 = l
sy1 = t
sx2 = l + w
sy2 = t + h
dx1 = 0
dy1 = 0
if sx1 < 0:
dx1 = -sx1
w += sx1
sx1 = 0
if sy1 < 0:
dy1 = -sy1
h += sy1
sy1 = 0
if sx2 >= tensor.size(-1):
w -= sx2 - tensor.size(-1)
if sy2 >= tensor.size(-2):
h -= sy2 - tensor.size(-2)
# Copy the in-bounds sub-area of the crop region into the result tensor.
if h > 0 and w > 0:
src = tensor.narrow(-2, sy1, h).narrow(-1, sx1, w)
dst = result.narrow(-2, dy1, h).narrow(-1, dx1, w)
dst.copy_(src)
return result
def flip(tensor, horizontal=False, vertical=False):
"""Flip the image.
Args:
tensor (torch.Tensor): The image tensor to be flipped.
horizontal: Flip horizontally.
vertical: Flip vertically.
Returns:
Tensor: The flipped image tensor.
"""
if horizontal == True:
tensor = tensor.flip(-1)
if vertical == True:
tensor = tensor.flip(-2)
return tensor
def affine(tensor, matrix):
"""Apply an affine transformation to the image.
Args:
tensor (torch.Tensor): The image tensor to be warped.
matrix (torch.Tensor): The 2x3 affine transformation matrix.
Returns:
Tensor: The warped image.
"""
is_unbatched = tensor.ndimension() == 3
if is_unbatched:
tensor = tensor.unsqueeze(0)
warped = warp_affine(tensor, matrix, tensor.size()[-2:])
if is_unbatched:
warped = warped.squeeze(0)
return warped
def rotate(tensor, degrees):
"""Rotate the image anti-clockwise about the centre.
Args:
tensor (torch.Tensor): The image tensor to be rotated.
degrees (float): The angle through which to rotate.
Returns:
Tensor: The rotated image tensor.
"""
rads = math.radians(degrees)
h, w = tensor.size()[-2:]
c = math.cos(rads)
s = math.sin(rads)
x = (w - 1) / 2
y = (h - 1) / 2
# Transformation matrix for clockwise rotation about the centre of the image.
matrix = torch.tensor([[
[ c, s, -c * x - s * y + x],
[-s, c, s * x - c * y + y],
]], dtype=torch.float32, device=tensor.device)
return affine(tensor, matrix)
def fit(tensor, size, fit_mode='cover', resize_mode='bilinear', *, fill=0):
"""Fit the image within the given spatial dimensions.
Args:
tensor (torch.Tensor): The image tensor to be fit.
size (tuple of int): Size of the output (height, width).
fit_mode (str): 'fill', 'contain', or 'cover'. These behave in the same way as CSS's
`object-fit` property.
fill (float): padding value (only applicable in 'contain' mode).
Returns:
Tensor: The resized image tensor.
"""
# Modes are named after CSS object-fit values.
assert fit_mode in {'fill', 'contain', 'cover'}
if fit_mode == 'fill':
return resize(tensor, size, mode=resize_mode)
elif fit_mode == 'contain':
ih, iw = tensor.shape[-2:]
k = min(size[-1] / iw, size[-2] / ih)
oh = round(k * ih)
ow = round(k * iw)
resized = resize(tensor, (oh, ow), mode=resize_mode)
result = tensor.new_full((*tensor.size()[:-2], *size), fill)
y_off = (size[-2] - oh) // 2
x_off = (size[-1] - ow) // 2
result[..., y_off:y_off + oh, x_off:x_off + ow] = resized
return result
elif fit_mode == 'cover':
ih, iw = tensor.shape[-2:]
k = max(size[-1] / iw, size[-2] / ih)
oh = round(k * ih)
ow = round(k * iw)
resized = resize(tensor, (oh, ow), mode=resize_mode)
y_trim = (oh - size[-2]) // 2
x_trim = (ow - size[-1]) // 2
result = crop(resized, y_trim, x_trim, size[-2], size[-1])
return result
raise Exception('This code should not be reached.')
| 8,316 |
TE-1/PL-1/OSD/2. Socket(py)/server.py
|
Adityajn/College-Codes
| 1 |
2023780
|
import socket,sys
s=socket.socket()
#host=socket.gethostname()
port=28901 #port between 1024 and 49151
s.bind((sys.argv[1],port))
s.listen(4)
c,addr=s.accept()
print "Connected to:",addr
f1=open(sys.argv[2],"r") #open file in read mode
bytes=f1.read(1024) #read 1024 bytes
while(bytes):
c.send(bytes) #send read bytes
bytes=f1.read(1024) #read next 1024 bytes
f1.close()
c.close()
| 388 |
master_django/intensity/register/context_processors.py
|
kripken/intensityengine
| 31 |
2023204
|
# Copyright 2010 <NAME> ('kripken'). All rights reserved.
# This file is part of Syntensity/the Intensity Engine, an open source project. See COPYING.txt for licensing.
from intensity.models import UserAccount
import intensity.conf as intensity_conf
def account(request):
'''
A context processor that provides 'my_account', the Intensity Engine account info for a user,
and shows messages for that account
'''
ret = {
'my_account': request.account if request.user.is_authenticated() else None,
'message': request.session.get('message'),
}
request.session['message'] = None
return ret
def toplevel(request):
'''
Gives a redirect URL for the toplevel
'''
return { 'toplevel_root': intensity_conf.get('Sites', 'toplevel_root') }
| 795 |
manifest/mixins.py
|
ozgurgunes/django-manifest
| 0 |
2023580
|
# -*- coding: utf-8 -*-
""" Manifest View Mixins
"""
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from django.utils.decorators import method_decorator
from django.views.generic import FormView, View
from manifest import decorators, defaults
from manifest.utils import get_protocol
class MessageMixin:
"""
View mixin adding messages to response.
"""
success_message = ""
error_message = ""
extra_context = None
def set_success_message(self, message):
if defaults.MANIFEST_USE_MESSAGES:
messages.success(self.request, message, fail_silently=True)
def set_error_message(self, message):
if defaults.MANIFEST_USE_MESSAGES:
messages.error(self.request, message, fail_silently=True)
class SendMailMixin:
"""
Mixin that send an email to given recipients.
"""
from_email = None
email_subject_template_name = None
email_message_template_name = None
email_html_template_name = None
def create_email(self, context, recipient):
if not self.email_subject_template_name:
raise ImproperlyConfigured(
"No template name for subject. "
"Provide a email_subject_template_name."
)
if not self.email_message_template_name:
raise ImproperlyConfigured(
"No template name for message. "
"Provide a email_message_template_name."
)
subject = "".join(
render_to_string(
self.email_subject_template_name, context
).splitlines()
)
message = render_to_string(self.email_message_template_name, context)
return EmailMultiAlternatives(
subject, message, self.from_email, [recipient]
)
def send_mail(self, recipient, opts):
"""
Send a django.core.mail.EmailMultiAlternatives to `to_email`.
"""
context = {
"protocol": get_protocol(),
"site": Site.objects.get_current(),
}
context.update(opts)
email = self.create_email(context, recipient)
if self.email_html_template_name is not None:
html_email = render_to_string(
self.email_html_template_name, context
)
email.attach_alternative(html_email, "text/html")
return email.send()
class SendActivationMailMixin(SendMailMixin):
def send_activation_mail(self, user):
context = {
"user": user,
"activation_days": defaults.MANIFEST_ACTIVATION_DAYS,
"activation_key": user.activation_key,
}
self.send_mail(user.email, context)
class EmailChangeMixin(SendMailMixin):
email_subject_template_name_old = (
"manifest/emails/confirmation_email_subject_old.txt"
)
email_message_template_name_old = (
"manifest/emails/confirmation_email_message_old.txt"
)
email_html_template_name_old = None
email_subject_template_name_new = (
"manifest/emails/confirmation_email_subject_new.txt"
)
email_message_template_name_new = (
"manifest/emails/confirmation_email_message_new.txt"
)
email_html_template_name_new = None
def send_confirmation_mail(self, user):
context = {
"user": user,
"new_email": user.email_unconfirmed,
"confirmation_key": user.email_confirmation_key,
}
self.email_subject_template_name = self.email_subject_template_name_old
self.email_message_template_name = self.email_message_template_name_old
self.email_html_template_name = self.email_html_template_name_old
self.send_mail(user.email, context)
self.email_subject_template_name = self.email_subject_template_name_new
self.email_message_template_name = self.email_message_template_name_new
self.email_html_template_name = self.email_html_template_name_new
self.send_mail(user.email_unconfirmed, context)
class SecureRequiredMixin(View):
"""
Mixin that switches URL from http to https if
``MANIFEST_USE_HTTPS`` setting is ``True``.
"""
@method_decorator(decorators.secure_required)
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
class LoginRequiredMixin(View):
"""
Mixin that redirects user to login form if not authenticated yet.
"""
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
# pylint: disable=bad-continuation
class UserFormMixin(
FormView, SecureRequiredMixin, LoginRequiredMixin, MessageMixin
):
"""
Mixin that sets forms user argument to ``request.user``.
"""
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["user"] = self.request.user
return kwargs
| 5,201 |
scripts/size_msgs_test.py
|
UCY-LINC-LAB/Self-Stabilization-Edge-Simulator
| 3 |
2022934
|
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import io
import base64
import os
import sys
import argparse
# See https://matplotlib.org/3.1.0/users/dflt_style_changes.html
plt.style.use('seaborn-ticks')
mpl.rcParams['grid.color'] = 'grey'
mpl.rcParams['grid.linestyle'] = ':'
mpl.rcParams['grid.linewidth'] = 0.5
mpl.rcParams['axes.spines.right'] = False
mpl.rcParams['axes.spines.top'] = False
mpl.rcParams['font.size'] = 15
mpl.rcParams['legend.fontsize'] = 'medium'
mpl.rcParams['figure.titlesize'] = 'large'
def build_graph(f, export):
if export:
f.savefig(export, format='png')
return
img = io.BytesIO()
f.set_size_inches(11.7, 8.27)
f.savefig(img, format='png')
img.seek(0)
graph_url = base64.b64encode(img.getvalue()).decode()
return graph_url
# return 'data:image/png;base64,{}'.format(graph_url)
def load_data(file, period):
data = []
last_time = 0
partial = [0., 0., 0., 0.]
with open(file, 'r') as fp:
for line in fp:
line = line.strip()
if len(line) == 0:
continue
if line.startswith('time'):
continue
toks = line.split(',')
t = int(toks[0])
control_count = int(toks[1])
control_size = int(toks[2])
data_count = int(toks[5])
data_size = int(toks[6])
control_size *=(1000/period)
data_size *=(1000/period)
partial[0] += control_count
partial[1] += control_size
partial[2] += data_count
partial[3] += data_size
if t - last_time > period:
last_time = t
data.append([t, partial[0], partial[1]/1024, partial[2], partial[3]/1024])
partial = [0., 0., 0., 0.]
return np.array(data)
def compute_graph2(data):
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(16, 12), sharex=False)
controlColor = 'xkcd:bright blue'
dataColor = 'xkcd:light orange'
# Time is in ms...
df_time = data[:,0]/1000
df_control_msgs_count = data[:,1]
df_control_msgs_size = data[:,2]/1024
df_data_msgs_count = data[:,3]
df_data_msgs_size = data[:,4]/1024
ax1.fill_between(x=df_time, y1=df_data_msgs_size, y2=0, color=dataColor, alpha=1, label="Data Plane")
ax1.plot(df_time ,df_data_msgs_size, color=dataColor, marker='o', markersize=2, alpha=1, linewidth=1)
ax1.fill_between(x=df_time, y1=df_control_msgs_size,y2=0, color=controlColor, alpha=0.55, label="Control Plane")
ax1.plot(df_time,df_control_msgs_size, color=controlColor, marker='D', markersize=2, alpha=0.85, linewidth=1)
ax1.legend()
# ax1.set_title('Traffic Transmitted')
ax1.set_ylabel('Network Traffic (MB/s)')
ax1.set_xlabel('Time (s)')
ax1.grid()
# Now to MBs
#df['control_msgs_sz'] /= 1024
#df['data_msgs_sz'] /= 1024
ax2.plot(df_time, df_data_msgs_size.cumsum(), color=dataColor, alpha=1, label="Data Plane")
ax2.plot(df_time, df_control_msgs_size.cumsum(), color=controlColor, alpha=1, label="Control Plane")
ax2.legend()
ax2.grid()
ax2.set_ylabel('Total Network Traffic (MB)')
ax2.set_xlabel('Time (s)')
return fig
if __name__ == '__main__':
root = os.getenv('RESULTS_ROOT',"../results/small")
scenario=os.getenv('SCENARIO',"all_failures")
experiments = os.listdir(os.path.join(os.path.abspath(root),scenario))
print("Existing experiments: "+str(experiments))
experiment= experiments[0]
print("Using experiment: "+str(experiment))
file = "stats/network/msgs.csv"
# In ms
period = 200
path = os.path.join(root,scenario,experiment, file)
data = load_data(path, period)
fig = compute_graph2(data)
plt.show()
#build_graph(fig, export=None)
| 3,864 |
src/azure-cli/azure/cli/command_modules/cognitiveservices/tests/latest/test_network_rules.py
|
xaliciayang/azure-cli
| 7 |
2023413
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer
class CognitiveServicesNetworkRulesTests(ScenarioTest):
@ResourceGroupPreparer()
def test_cognitiveservices_network_rules(self, resource_group):
sname = self.create_random_name(prefix='cs_cli_test_', length=16)
customdomain = self.create_random_name(prefix='csclitest', length=16)
self.kwargs.update({
'sname': sname,
'vnetname': sname,
'kind': 'Face',
'sku': 'S0',
'location': 'westus',
'customdomain': customdomain,
})
self.cmd('network vnet create --resource-group {rg} --name {vnetname}')
subnet1 = self.cmd('network vnet subnet create --resource-group {rg} --name default'
' --vnet-name {vnetname} --address-prefixes 10.0.0.0/24').get_output_in_json()
subnet2 = self.cmd('network vnet subnet create --resource-group {rg} --name subnet'
' --vnet-name {vnetname} --address-prefixes 10.0.1.0/24').get_output_in_json()
self.cmd('az cognitiveservices account create -n {sname} -g {rg} --kind {kind} --sku {sku} -l {location}'
' --custom-domain {customdomain} --yes',
checks=[self.check('name', '{sname}'),
self.check('location', '{location}'),
self.check('sku.name', '{sku}'),
self.check('properties.provisioningState', 'Succeeded')])
rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json()
self.assertEqual(len(rules['ipRules']), 0)
self.assertEqual(len(rules['virtualNetworkRules']), 0)
self.cmd('az cognitiveservices account network-rule add -n {sname} -g {rg} --ip-address "172.16.58.3"')
rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json()
self.assertEqual(len(rules['ipRules']), 1)
self.assertEqual(len(rules['virtualNetworkRules']), 0)
self.assertEqual(rules['ipRules'][0]['value'], "172.16.58.3")
self.cmd('az cognitiveservices account network-rule add -n {sname} -g {rg} --ip-address "172.16.17.32/24"')
rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json()
self.assertEqual(len(rules['ipRules']), 2)
self.assertEqual(len(rules['virtualNetworkRules']), 0)
self.assertEqual(rules['ipRules'][0]['value'], "172.16.58.3")
self.assertEqual(rules['ipRules'][1]['value'], "172.16.17.32/24")
self.cmd('az cognitiveservices account network-rule add -n {sname} -g {rg} --subnet ' + subnet1['id'])
rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json()
self.assertEqual(len(rules['ipRules']), 2)
self.assertEqual(len(rules['virtualNetworkRules']), 1)
self.assertEqual(rules['ipRules'][0]['value'], "172.16.58.3")
self.assertEqual(rules['ipRules'][1]['value'], "172.16.17.32/24")
self.assertEqual(rules['virtualNetworkRules'][0]['id'], subnet1['id'])
self.cmd('az cognitiveservices account network-rule add -n {sname} -g {rg} --subnet ' + subnet2['name'] +
' --vnet-name {vnetname}')
rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json()
self.assertEqual(len(rules['ipRules']), 2)
self.assertEqual(len(rules['virtualNetworkRules']), 2)
self.assertEqual(rules['ipRules'][0]['value'], "172.16.58.3")
self.assertEqual(rules['ipRules'][1]['value'], "172.16.17.32/24")
self.assertEqual(rules['virtualNetworkRules'][0]['id'], subnet1['id'])
self.assertEqual(rules['virtualNetworkRules'][1]['id'], subnet2['id'])
self.cmd('az cognitiveservices account network-rule remove -n {sname} -g {rg} --ip-address "172.16.58.3"')
rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json()
self.assertEqual(len(rules['ipRules']), 1)
self.assertEqual(len(rules['virtualNetworkRules']), 2)
self.assertEqual(rules['ipRules'][0]['value'], "172.16.17.32/24")
self.assertEqual(rules['virtualNetworkRules'][0]['id'], subnet1['id'])
self.assertEqual(rules['virtualNetworkRules'][1]['id'], subnet2['id'])
self.cmd('az cognitiveservices account network-rule remove -n {sname} -g {rg} --ip-address "172.16.17.32/24"')
rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json()
self.assertEqual(len(rules['ipRules']), 0)
self.assertEqual(len(rules['virtualNetworkRules']), 2)
self.assertEqual(rules['virtualNetworkRules'][0]['id'], subnet1['id'])
self.assertEqual(rules['virtualNetworkRules'][1]['id'], subnet2['id'])
self.cmd('az cognitiveservices account network-rule remove -n {sname} -g {rg} --subnet ' + subnet1['id'])
rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json()
self.assertEqual(len(rules['ipRules']), 0)
self.assertEqual(len(rules['virtualNetworkRules']), 1)
self.assertEqual(rules['virtualNetworkRules'][0]['id'], subnet2['id'])
self.cmd('az cognitiveservices account network-rule remove -n {sname} -g {rg} --subnet ' + subnet2['name'] +
' --vnet-name {vnetname}')
rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json()
self.assertEqual(len(rules['ipRules']), 0)
self.assertEqual(len(rules['virtualNetworkRules']), 0)
# Remove something doesn't exists in rules
self.cmd('az cognitiveservices account network-rule remove -n {sname} -g {rg} --subnet ' + subnet2['name'] +
' --vnet-name {vnetname}')
rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json()
self.assertEqual(len(rules['ipRules']), 0)
self.assertEqual(len(rules['virtualNetworkRules']), 0)
# delete the cognitive services account
ret = self.cmd('az cognitiveservices account delete -n {sname} -g {rg}')
self.assertEqual(ret.exit_code, 0)
if __name__ == '__main__':
unittest.main()
| 6,825 |
nbcelltests/define.py
|
timkpaine/nbcelltests
| 52 |
2022802
|
# *****************************************************************************
#
# Copyright (c) 2019, the nbcelltests authors.
#
# This file is part of the nbcelltests library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from enum import Enum
class LintType(Enum):
LINES_PER_CELL = "lines_per_cell"
CELLS_PER_NOTEBOOK = "cells_per_notebook"
FUNCTION_DEFINITIONS = "function_definitions"
CLASS_DEFINITIONS = "class_definitions"
LINTER = "linter"
KERNELSPEC = "kernelspec"
MAGICS = "magics"
class TestType(Enum):
CELL_COVERAGE = "cell_coverage"
CELL_TEST = "cell_test"
class LintMessage(object):
def __init__(self, cell, message, type, passed=False):
self.cell = cell
self.message = message
self.type = type
self.passed = passed
def __repr__(self):
ret = "PASSED: " if self.passed else "FAILED: "
ret += self.message
ret += " (Cell %d)" % self.cell if self.cell > 0 else ""
return ret
def to_html(self):
ret = (
'<span style="color: green;">PASSED </span>'
if self.passed
else '<span style="color: red;">FAILED </span>'
)
ret += self.message
ret += "(Cell %d)" % self.cell if self.cell > 0 else ""
return ret
class TestMessage(object):
def __init__(self, cell, message, type, passed=0):
self.cell = cell
self.message = message
self.type = type
self.passed = passed
def __repr__(self):
ret = (
"PASSED: "
if self.passed > 0
else "FAILED: "
if self.passed < 0
else "NOT RUN: "
)
ret += self.message
ret += " (Cell %d)" % self.cell if self.cell > 0 else ""
return ret
def to_html(self):
ret = (
'<span style="color: green;">PASSED </span>'
if self.passed
else '<span style="color: red;">FAILED </span>'
)
ret += self.message
ret += "(Cell %d)" % self.cell if self.cell > 0 else ""
return ret
| 2,196 |
embeddings/clean_text.py
|
onai/code-ecosystem-analyzer
| 0 |
2022976
|
'''
'''
import os
import json
import sys
import emoji
import json
import os
import string
import sys
def remove_emoji(text):
return emoji.get_emoji_regexp().sub(u'', text)
def clean_text(the_text):
lower = the_text.lower().split()
cleaned = ' '.join(lower)
trans_dict = {}
for key in string.punctuation:
if key == "'":
trans_dict[key] = ''
else:
trans_dict[key] = ' '
text_punct = str.maketrans(trans_dict)
text_low = cleaned.lower()
text_toks = text_low.translate(text_punct).split()
return text_toks
if __name__ == '__main__':
dirname = sys.argv[1]
dest = sys.argv[2]
count = 0
reply_count = 0
for root, dirs, files in os.walk(dirname):
for filename in files:
print(filename)
full_path = os.path.join(root, filename)
dest_path = os.path.join(dest, filename)
cmts = []
with open(full_path) as handle:
for new_line in handle:
the_payload = json.loads(new_line)
the_text = ''
if the_payload['kind'] == 'youtube#commentThread':
the_text = the_payload['snippet']['topLevelComment']['snippet']['textOriginal']
elif the_payload['kind'] == 'youtube#comment':
the_text = the_payload['snippet']['textOriginal']
cleaned_toks = clean_text(the_text)
the_payload['cleaned_tokens'] = cleaned_toks
cmts.append(the_payload)
with open(dest_path, 'a') as handle:
for cmt in cmts:
handle.write(json.dumps(cmt))
handle.write('\n')
| 1,781 |
unused/mlat/dim1/stringABEdge.py
|
yoongun/topological-edge-modes-of-mechanical-lattice
| 1 |
2023695
|
import numpy as np
from numpy import linalg as la
from typing import List, Tuple
import plotly.graph_objects as go
import plotly.express as px
import pandas as pd
class StringABLatticeEdge:
"""
Reference:
"A study of topological effects in 1D and 2D mechanical lattices" <NAME> (2018), et. al.
from 'Journal of the Mechanics and Physics of Solids', Volum 117, Aug 2018, 22-36,
https://www.sciencedirect.com/science/article/abs/pii/S0022509618301820
"""
def __init__(self, k: List[float], m: List[float], precision: float = .01) -> None:
"""
Represents dynamic system of 1 dimensional mechanical lattice.
e.g.) ABABAB...
:param k: Spring constants (2)
:param m: Mass (2)
:param precision: Precision for wavenumber q
"""
if len(k) != len(m):
raise ValueError(
f"The length of k={len(k)} and m={len(m)} does not match.")
self.k = k
self.M = np.diag(m)
self.qs = np.arange(-np.pi, np.pi, precision)
def H(self, q):
"""
Hamiltonian
:return: Hamiltonian defined given k and q
"""
k = self.k
Q = np.exp(1.j * q)
return np.array([[k[0] + k[1], -k[0] - k[1] * Q.conj()],
[-k[0] - k[1] * Q, k[0] + k[1]]])
def dispersion(self) -> List[Tuple[float, float]]:
"""
Calculate the dispersion relation
:return: List of angular frequency omega for each q (wavenumber) and its eigenvectors
"""
M_inv = la.inv(self.M)
eigenvals = []
eigenvecs = []
for q in self.qs:
eigen_val, eigen_vec = self._min_eigen(M_inv.dot(self.H(q)))
eigenvals.append(eigen_val)
eigenvecs.append(eigen_vec)
ws = np.sqrt(np.array(eigenvals).real)
evs = np.array(eigenvecs)
return ws, evs
def _min_eigen(self, mat: np.ndarray) -> Tuple[float, float]:
"""
Return eigenvalue, eigenvector pair of minimum eigenvalue.
:return: eigenvalue, eigenvector
"""
eigenvals, eigenvecs = la.eig(mat)
min_idx = np.argsort(eigenvals)
return eigenvals[min_idx], eigenvecs[min_idx]
def beta(self) -> float:
"""
Calculate varying contrast beta with given spring constants
:return: Varying contrast beta
"""
k = self.k
return (k[0] - k[1]) / (k[0] + k[1])
def animate(self, q: float, N: int, mode: int, *, fps: int = 30, s: int = 3):
"""
:param q: Wavenumber to animate [-pi, pi]
:param N: Number of unit cells
:param mode: Mode to animate (0 for acoustic, 1 for optical)
:param fps: (Optional) Frame per second (/s) (default: 30 /s)
:param s: (Optional) Animation duration (s) (default: 3 s)
"""
ws, evs = self.dispersion()
# Parameters
idx = min(range(len(self.qs)), key=lambda i: abs(self.qs[i] - q))
w = ws[idx, mode] # /s
# Construct frames
frames = []
for t in range(int(s * fps)):
dt = t / fps
dphase = dt * w * 2 * np.pi
y = []
for i in range(N):
y.append(evs[idx, mode, 0] * np.exp(1.j * (q * i + dphase)))
y.append(evs[idx, mode, 1] * np.exp(1.j * (q * i + dphase)))
y = np.array(y)
frames.append(
go.Frame(data=[go.Scatter(y=y.real, line_shape='spline')]))
# Figure components
start_button = dict(
label="Play",
method="animate",
args=[
None,
{
"frame": {"duration": 1000 / fps, "redraw": False},
"fromcurrent": True,
"transition": {"duration": 100}
}])
pause_button = dict(
label="Pause",
method="animate",
args=[
[None],
{
"frame": {"duration": 0, "redraw": False},
"mode": "immediate",
"transition": {"duration": 0}
}])
# Plot
fig = go.Figure(
data=frames[0].data,
layout=go.Layout(
title="Dispersion relation animation",
yaxis=dict(range=[-1., 1.], autorange=False),
updatemenus=[
dict(
type="buttons",
buttons=[start_button, pause_button
])
]
),
frames=frames[1:])
fig.show()
def plot_dispersion_relation(self):
ws, _ = self.dispersion()
w0 = ws[:, 0]
w1 = ws[:, 1]
ws = np.append(w0, w1)
x = np.append(self.qs, self.qs)
y = ws
index = np.append(np.repeat(0, len(self.qs)),
np.repeat(1, len(self.qs)))
df = pd.DataFrame({
"q": x,
"w": y,
"index": index,
})
fig = px.line(df, x="q", y="w", color='index')
fig.show()
| 5,196 |
Lesson 4/website_alive/make_request.py
|
arechesk/PythonHW
| 0 |
2023763
|
import requests
OK = requests.codes.ok
def request(url):
r = requests.get(url)
return r
| 100 |
ddi_search_engine/Bio/dbdefs/embl.py
|
dbmi-pitt/DIKB-Evidence-analytics
| 3 |
2023274
|
# Copyright 2002 by <NAME>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
from Bio.config.DBRegistry import CGIDB, DBGroup
from _support import *
embl_xembl_cgi = CGIDB(
name="embl-xembl-cgi",
doc="Query XEMBL for EMBL sequence data in XML format.",
cgi="http://www.ebi.ac.uk/cgi-bin/xembl/XEMBL.pl",
url="http://www.ebi.ac.uk/xembl/",
delay=5.0,
params=[("format", "Bsml")],
key="id",
failure_cases=[(has_str("NOT EXIST"), "id does not exist")],
)
embl_dbfetch_cgi = CGIDB(
name="embl-dbfetch-cgi",
cgi="http://www.ebi.ac.uk/cgi-bin/dbfetch",
url="http://www.ebi.ac.uk/cgi-bin/dbfetch",
doc="dbfetch provides EMBL, Genbank, and SWALL sequences",
delay=5.0,
params=[("db", "embl"),
("style", "raw"),
("format", "embl"),
],
key="id",
failure_cases=[(has_str("not found in database"), "id does not exist")]
)
embl_ebi_cgi = CGIDB(
name="embl-ebi-cgi",
cgi="http://www.ebi.ac.uk/cgi-bin/emblfetch",
url="http://www.ebi.ac.uk/cgi-bin/emblfetch",
doc="Retrieve many kinds of sequences from EBI",
delay=5.0,
params=[("db", "EMBL"),
("format", "default"), # also Fasta, bsml, agave available
("style", "raw")
],
key="id",
failure_cases=[(blank_expr, "No results returned")]
)
embl = DBGroup(
name="embl",
behavior="serial",
## cache="XXX"
)
embl.add(embl_dbfetch_cgi)
embl.add(embl_ebi_cgi)
embl_xml = DBGroup(
name = "embl-xml",
behavior = "serial")
embl_fast = DBGroup(
name="embl-fast",
behavior="concurrent",
)
embl_fast.add(embl_dbfetch_cgi)
embl_fast.add(embl_ebi_cgi)
| 1,844 |
reverse_proxy/proxies/admin.py
|
optimor/reverse-proxy
| 0 |
2023991
|
from django.contrib import admin
from jet.admin import CompactInline
from .models import ProxySite, ProxyRewrite, ProxyHeader
from .forms import ProxySiteForm
class ProxyRewriteInline(CompactInline):
model = ProxyRewrite
extra = 1
fieldsets = (
(
None,
{
"fields": ("from_regex", "to_regex"),
"description": "A list of tuples in the style (from, to) where from "
"must by a valid regex expression and to a valid URL. If "
"request.get_full_path matches the from expression the "
"request will be redirected to to with an status code 302. "
"Matches groups can be used to pass parts from the from "
"URL to the to URL using numbered groups.",
},
),
)
class ProxyHeaderInline(CompactInline):
model = ProxyHeader
extra = 1
fieldsets = (
(
None,
{
"fields": ("header_name", "header_value"),
"description": "A list of tuples in the style (key, value) where key "
"must by a valid HEADER and key a valid header value.",
},
),
)
@admin.register(ProxySite)
class ProxySiteAdmin(admin.ModelAdmin):
list_display = (
"name",
"upstream",
"subdomain_name",
"subdomain_full_url",
"add_remote_user",
"default_content_type",
"retries",
)
fieldsets = (
(None, {"fields": ("name", "upstream", "thumbnail")}),
(
"Subdomain",
{
"fields": ("subdomain_name", "subdomain_full_url"),
"description": "Specify those to setup proxy that redirects based on "
"the subdomain of the current URL",
},
),
("Extra", {"fields": ("add_remote_user", "default_content_type", "retries")}),
)
form = ProxySiteForm
inlines = (ProxyRewriteInline, ProxyHeaderInline)
| 2,024 |
preprocess.py
|
Cyna298/hifi-gan
| 0 |
2023535
|
import glob
import os
from pathlib import Path
from tqdm import tqdm
import numpy as np
from TTS.utils.audio import AudioProcessor
def preprocess_wav_files(out_path, config, ap):
os.makedirs(os.path.join(out_path, "quant"), exist_ok=True)
os.makedirs(os.path.join(out_path, "mel"), exist_ok=True)
wav_files = find_wav_files(config.data_path)
for path in tqdm(wav_files):
wav_name = Path(path).stem
quant_path = os.path.join(out_path, "quant", wav_name + ".npy")
mel_path = os.path.join(out_path, "mel", wav_name + ".npy")
y = ap.load_wav(path)
mel = ap.melspectrogram(y)
np.save(mel_path, mel)
if isinstance(config.mode, int):
quant = (
ap.mulaw_encode(y, qc=config.mode)
if config.mulaw
else ap.quantize(y, bits=config.mode)
)
np.save(quant_path, quant)
def find_wav_files(data_path):
wav_paths = glob.glob(os.path.join(data_path, "**", "*.wav"), recursive=True)
return wav_paths
| 1,051 |
setup.py
|
NineteenPeriod/django-bulk-update-or-create
| 0 |
2023217
|
#!/usr/bin/env python3
from setuptools import setup
setup(
name='django-bulk-update-or-create',
)
| 103 |
artap/tests/test_benchmark_robust.py
|
tamasorosz/artap
| 5 |
2023499
|
import unittest
from ..individual import Individual
from ..benchmark_robust import Synthetic1D, Synthetic2D, Synthetic5D, Synthetic10D
class TestSynthetic1D(unittest.TestCase):
def test_synthetic1d(self):
test = Synthetic1D()
self.assertAlmostEqual(test.evaluate(Individual([11.0]))[0], 3.23, 3)
self.assertAlmostEqual(test.evaluate(Individual([1.6]))[0], 3.205, 2)
class TestSynthetic2D(unittest.TestCase):
def test_synthetic2d(self):
test = Synthetic2D()
self.assertAlmostEqual(test.evaluate(Individual([3.0, 4.0]))[0], 1.21112, 4)
self.assertAlmostEqual(test.evaluate(Individual([3.0, 1.0]))[0], 1.00096, 4)
class TestSynthetic5D(unittest.TestCase):
def test_synthetic5d(self):
test = Synthetic5D()
self.assertAlmostEqual(test.evaluate(Individual([3.0, 4.0, 1.3, 5.0, 5.0]))[0], 1.200000000, 4)
self.assertAlmostEqual(test.evaluate(Individual([3.0, 1.0, 3.0, 2.0, 5.0]))[0], 1.000, 4)
self.assertAlmostEqual(test.evaluate(Individual([10., 1.0, 6.0, 7.0, 8.0]))[0], .7)
self.assertAlmostEqual(test.evaluate(Individual([1.0, 3.0, 8.0, 9.5, 2.0]))[0], .75)
self.assertAlmostEqual(test.evaluate(Individual([3.0, 1.0, 3.0, 2.0, 5.0]))[0], 1.0)
self.assertAlmostEqual(test.evaluate(Individual([3.0, 4.0, 1.3, 5.0, 5.0]))[0], 1.2, 5)
self.assertAlmostEqual(test.evaluate(Individual([5.0, 2.0, 9.6, 7.3, 8.6]))[0], 1.0)
self.assertAlmostEqual(test.evaluate(Individual([7.5, 8.0, 9.0, 3.2, 4.6]))[0], .6, 4)
self.assertAlmostEqual(test.evaluate(Individual([5.7, 9.3, 2.2, 8.4, 7.1]))[0], .5)
self.assertAlmostEqual(test.evaluate(Individual([5.5, 7.2, 5.8, 2.3, 4.5]))[0], .2, 4)
self.assertAlmostEqual(test.evaluate(Individual([4.7, 3.2, 5.5, 7.1, 3.3]))[0], 0.4)
self.assertAlmostEqual(test.evaluate(Individual([9.7, 8.4, 0.6, 3.2, 8.5]))[0], 0.1)
class TestSynthetic10D(unittest.TestCase):
def test_synthetic10d(self):
test = Synthetic10D()
self.assertAlmostEqual(test.evaluate(Individual([3.0, 4.0, 1.3, 5.0, 5.0, 3.0, 4.0, 1.3, 5.0, 5.0]))[0],
1.200000000, 4)
self.assertAlmostEqual(test.evaluate(Individual([3.0, 1.0, 3.0, 2.0, 5.0, 3.0, 1.0, 3.0, 2.0, 5.0]))[0], 1.000,
4)
self.assertAlmostEqual(test.evaluate(Individual([10., 1.0, 6.0, 7.0, 8.0, 1.0, 1.0, 6.0, 7.0, 8.0]))[0], 0.7)
self.assertAlmostEqual(test.evaluate(Individual([1.0, 3.0, 8.0, 9.5, 2.0, 1.0, 3.0, 8.0, 9.5, 2.0]))[0], 0.75)
self.assertAlmostEqual(test.evaluate(Individual([3.0, 1.0, 3.0, 2.0, 5.0, 3.0, 1.0, 3.0, 2.0, 5.0]))[0], 1.0)
self.assertAlmostEqual(test.evaluate(Individual([3.0, 4.0, 1.3, 5.0, 5.0, 3.0, 4.0, 1.3, 5.0, 5.0]))[0], 1.2)
| 2,815 |
tests/data/write_data.py
|
lzmch/framequery
| 66 |
2023577
|
from __future__ import print_function, division, absolute_import
import os.path
import pandas as pd
df = pd.DataFrame({
'g': [0, 0, 0, 1, 1, 2],
'i': [1, 2, 3, 4, 5, 6],
'f': [7.0, 8.0, 9.0, 0.0, 1.0, 2.0],
})
df.to_csv(
os.path.join(os.path.dirname(__file__), 'test.csv'),
sep=';',
index=False,
)
| 326 |
projects/crawl_taobao_goods_migrate/model/result.py
|
kingking888/crawler-pyspider
| 1 |
2022702
|
from crawl_taobao_goods_migrate.model.task import Task
from pyspider.core.model.mongo_base import *
from pyspider.helper.date import Date
class Result(ResultBase):
def __init__(self):
super(Result, self).__init__()
def find_by_goods_id(self, goods_id):
"""
从 goods image 库查找商品
:param goods_id:
:return:
"""
return self.find_one({"taskid": Task.get_task_id_goods_image(goods_id)})
def find_complete_goods(self, goods_id):
"""
从 goods image 和 goods details 两个库中同时查找数据,返回更新时间较新的结果;
如果能查到两条记录,说明有一个商品已经下架了
:param goods_id:
:return:
"""
image_goods = self.find_one({"taskid": Task.get_task_id_goods_image(goods_id)})
detail_goods = self.find_one({"taskid": Task.get_task_id_goods_detail(goods_id)})
img_result = image_goods.get('result') if image_goods else "1970-01-01"
detail_result = detail_goods.get('result') if detail_goods else "1970-01-01"
img_date = img_result.get('update_time') if isinstance(img_result, dict) else "1970-01-01"
detail_date = detail_result.get('update_time') if isinstance(detail_result, dict) else "1970-01-01"
if img_date is None:
img_date = "1970-01-01"
if detail_date is None:
detail_date = "1970-01-01"
return detail_goods if Date(img_date) < Date(detail_date) else image_goods
def find_all_goods(self, shop_id=''):
"""
查询 goods 库里的所有商品;
有 shop_id 则查询该 shop_id 下的所有商品,否则就返回所有的商品;
:param shop_id: 店铺ID
:return:
"""
builder = {
'goods_id': {'$exists': 'true'},
}
if shop_id:
builder['shop_id'] = shop_id
return self.find(builder)
def find_all_shop_goods(self, shop_list: list):
"""
获取所有的店铺商品ID
:param shop_list: str list
:return:
"""
builder = {
"goods_id": {"$exists": 'true'}
}
if shop_list:
shop_list = [str(item) for item in shop_list]
builder["shop_id"] = {"$in": shop_list}
return self.find(builder)
def find_filter_goods(self, shop_ids: list, update_time=0):
"""
过滤查询商品数据
:param shop_ids: int list
:param update_time: 如果有更新时间,则获取小于更新时间的商品
:return:
"""
builder = {
'result.goods_id': {'$exists': 'true'},
}
if shop_ids:
shop_ids = [int(item) for item in shop_ids]
builder['result.shop_id'] = {"$in": shop_ids}
if update_time > 0:
builder['updatetime'] = {"$gte": update_time}
return self.find(builder)
def find_all_shop_id(self):
"""
获取所有的店铺ID
:return:
"""
return self.find({
'result.shop_id': {'$exists': 'true'},
'result.shop_url': {'$exists': 'true'},
'result.banner_imgs': {'$exists': 'true'},
})
def find_shop_by_id(self, shop_id):
"""
从 shop details 库查找店铺详情
:param shop_id:
:return:
"""
return self.find_one({"taskid": Task.get_task_id_shop_details(shop_id)})
def update_shop_crawled_status(self, shop_id, status):
"""
更改店铺的被抓取的状态
:param shop_id:
:param status:
:return:
"""
return self.update_many({'taskid': Task.get_task_id_shop_details(shop_id)},
{"$set": {"result.crawled": status}})
def insert_or_update_goods(self, doc):
"""
写入或者更新天猫商品
:param doc:
:return:
"""
goods_id = doc.get("goods_id", "")
goods_name = doc.get("goods_name", "")
shop_id = doc.get("shop_id", "")
update_time = doc.get("update_time", 0)
if goods_id:
re = self.find_one({"goods_id": goods_id})
if re:
return self.update(
{'goods_id': goods_id},
{"$set": {"goods_id": goods_id, "goods_name": goods_name, "shop_id": shop_id,
"update_time": update_time}})
else:
return self.insert(doc)
else:
return self.insert(doc)
| 4,288 |
src/Tokenize.py
|
ttrung149/turquoise
| 2 |
2023117
|
#!/usr/bin/env python
# -----------------------------------------------------------------------------
# Turquoise - VHDL linter and compilation toolchain
# Copyright (c) 2020-2021: Turquoise team
#
# File name: Tokenize.py
#
# Description: Implementation of tokenizer class
#
# -----------------------------------------------------------------------------
from pyVHDLParser.Token.Parser import Tokenizer
from pyVHDLParser.Blocks import TokenToBlockParser
from pyVHDLParser.Base import ParserException
class Tokenize():
def __init__(self, filename=None):
self._filename = filename
def get_token_stream(self):
with open (self._filename, 'r') as handle:
content = handle.read()
stream = Tokenizer.GetVHDLTokenizer(content)
return stream
def get_token_iter(self):
stream = self.get_token_stream()
token_iter = iter(stream)
return token_iter
| 925 |
python/oneflow/framework/docstr/unbind.py
|
L-Net-1992/oneflow
| 1 |
2023764
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.unbind,
"""
This function is equivalent to PyTorch's unbind function.
Removes a tensor dimension.
Returns a tuple of all slices along a given dimension, already without it.
Args:
x(Tensor): the tensor to unbind
dim(int): dimension to remove
For example:
.. code-block:: python
>>> import oneflow as flow
>>> x = flow.tensor(range(12)).reshape([3,4])
>>> flow.unbind(x)
(tensor([0, 1, 2, 3], dtype=oneflow.int64), tensor([4, 5, 6, 7], dtype=oneflow.int64), tensor([ 8, 9, 10, 11], dtype=oneflow.int64))
>>> flow.unbind(x, 1)
(tensor([0, 4, 8], dtype=oneflow.int64), tensor([1, 5, 9], dtype=oneflow.int64), tensor([ 2, 6, 10], dtype=oneflow.int64), tensor([ 3, 7, 11], dtype=oneflow.int64))
""",
)
| 1,513 |
examples/routes/resequence_multiple_stops.py
|
route4me/route4me-python-sdk
| 10 |
2023170
|
# -*- coding: utf-8 -*-
import argparse
import json
from route4me import Route4Me
def load_json(filename):
data = []
with open(filename, 'rt') as datafile:
data = json.load(datafile)
datafile.close()
return data
def main(args):
r4m = Route4Me(args.api_key)
route_data = load_json(args.route_data_filename)
route = r4m.route
print(f'Route ID: {args.route_id}')
print("Addresses to be Re-sequence")
for address in route_data['addresses']:
print(f'Address Sequence: {address["sequence_no"]:6} - '
f'Route Destination ID: {address["route_destination_id"]:9}')
print(f"After Resequence the Route {args.route_id}")
response_data = route.resequence_multiple_stops(args.route_id, route_data)
for address in response_data['addresses']:
print(f'Address Sequence: {address["sequence_no"]:6} - '
f'Route Destination ID: {address["route_destination_id"]:9} - Address: {address["address"]} ')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Resequence a Route')
parser.add_argument('--api_key', dest='api_key', help='Route4Me API KEY',
type=str, required=True)
parser.add_argument('--route_id', dest='route_id', help='Route ID',
type=str, required=True)
parser.add_argument('--route_data_filename', dest='route_data_filename',
help='JSON file name with Route Addresses ID and Sequence',
type=str, required=True)
args = parser.parse_args()
main(args)
| 1,595 |
src/plot_automaton.py
|
BurnySc2/rust-python-pyo3-test
| 1 |
2023359
|
import sys
import os
import lzma
import pickle
import numpy as np
np.set_printoptions(threshold=sys.maxsize)
import matplotlib.pyplot as plt
from typing import Tuple, List, Iterable
from sc2.game_data import GameData
from sc2.game_info import GameInfo
from sc2.game_state import GameState
from sc2.bot_ai import BotAI
def get_map_specific_bots() -> Iterable[BotAI]:
folder = os.path.dirname(__file__)
subfolder_name = "pickle_data"
pickle_folder_path = os.path.join(folder, subfolder_name)
files = os.listdir(pickle_folder_path)
for file in (f for f in files if f.endswith(".xz")):
with lzma.open(os.path.join(folder, subfolder_name, file), "rb") as f:
raw_game_data, raw_game_info, raw_observation = pickle.load(f)
# Build fresh bot object, and load the pickle'd data into the bot object
bot = BotAI()
game_data = GameData(raw_game_data.data)
game_info = GameInfo(raw_game_info.game_info)
game_state = GameState(raw_observation)
bot._initialize_variables()
bot._prepare_start(client=None, player_id=1, game_info=game_info, game_data=game_data)
bot._prepare_step(state=game_state, proto_game_info=raw_game_info)
yield bot
# Global bot object that is used in TestClass.test_position_*
bot_object_generator = get_map_specific_bots()
# random_bot_object: BotAI = next(bot_object_generator)
# print(random_bot_object.game_info.start_locations)
# print(random_bot_object.townhalls[0].position)
# print(random_bot_object.enemy_start_locations)
def main():
# start = (90, 100)
# goal = (100, 114)
# Spawn
start = (29, 65)
goal = (154, 114)
# Ramp
# start = (32, 51)
# goal = (150, 129)
# map_grid = np.loadtxt("AutomatonLE.txt", delimiter="").astype(int)
grid = []
with open("../AutomatonLE.txt") as f:
for line in f.readlines():
values = [int(i) for i in list(line.strip())]
grid.append(values)
# print(grid)
map_grid = np.asarray(grid)
# print(map_grid)
path = []
with open("../path.txt") as f:
for line in f.readlines():
x, y = line.split(",")
path.append((int(x.strip()), int(y.strip())))
print()
# print(map_grid.shape)
plot(map_grid, route=path, start=start, goal=goal)
def plot(
grid,
route: List[Tuple[int, int]] = None,
start: Tuple[int, int] = None,
goal: Tuple[int, int] = None,
waypoints=None,
):
# extract x and y coordinates from route list
x_coords = []
y_coords = []
if route:
for i in range(0, len(route)):
x = route[i][0]
y = route[i][1]
x_coords.append(x)
y_coords.append(y)
# plot map and path
fig, ax = plt.subplots(figsize=(20, 20))
ax.imshow(grid, cmap=plt.cm.Dark2)
if start:
ax.scatter(start[0], start[1], marker="x", color="red", s=200)
if goal:
ax.scatter(goal[0], goal[1], marker="x", color="blue", s=200)
if route:
for w in route:
ax.scatter(w[0], w[1], marker="x", color="orange", s=100)
if waypoints:
for w in waypoints:
ax.scatter(w[0], w[1], marker="x", color="black", s=50)
# plt.gca().invert_xaxis()
plt.gca().invert_yaxis()
plt.show()
if __name__ == "__main__":
main()
| 3,363 |
models/utils.py
|
Curli-quan/fewshot-select
| 0 |
2023960
|
from torch import nn
import random
from functools import wraps
import torch.nn.functional as F
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if old is None:
return new
return old * self.beta + (1 - self.beta) * new
def update_moving_average(ema_updater, ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = ema_updater.update_average(old_weight, up_weight)
class RandomApply(nn.Module):
def __init__(self, fn, p):
super().__init__()
self.fn = fn
self.p = p
def forward(self, x):
if random.random() > self.p:
return x
return self.fn(x)
def default(val, def_val):
return def_val if val is None else val
def flatten(t):
return t.reshape(t.shape[0], -1)
def singleton(cache_key):
def inner_fn(fn):
@wraps(fn)
def wrapper(self, *args, **kwargs):
# print(f"[.] Function Name: {fn.__name__}")
instance = getattr(self, cache_key)
if instance is not None:
return instance
instance = fn(self, *args, **kwargs)
setattr(self, cache_key, instance)
return instance
return wrapper
return inner_fn
def get_module_device(module):
return next(module.parameters()).device
def set_requires_grad(model, val):
for p in model.parameters():
p.requires_grad = val
# loss fn
def loss_fn(x, y):
x = F.normalize(x, dim=-1, p=2)
y = F.normalize(y, dim=-1, p=2)
return 2 - 2 * (x * y).sum(dim=-1)
| 1,851 |
star_printer.py
|
ChangyongKim0/programming_study
| 1 |
2022792
|
import logger as lg
class StarPrinter:
def __init__(self, print_types):
self.print_queue = [{"type": ele[0], "length": ele[1]}
for ele in print_types]
logger = lg.Logger("StarPrinter")
self.log, self.err = logger.log, logger.err
self.log("StarPrinter initialized.")
def printDiamond(self, length):
space = " " * (length // 2)
star = "*"
print()
for line in range(length):
print(space + star)
if line < length / 2 - 1:
space = space[0:-2]
star += "****"
else:
space += " "
star = star[0:-4]
print()
def printTriangle(self, length):
space = " " * length
star = "*"
print()
for line in range(length):
print(space + star)
space = space[0:-1]
star += "**"
print()
def printLeftTriangle(self, length):
star = "*"
print()
for line in range(length):
print(star)
star += "**"
print()
def addPrintQueue(self, print_type, length):
self.print_queue.append({"type": print_type, "length": length})
self.log("{} with length {} is added in print queue.".format(
print_type, length))
def printStar(self):
for ele in self.print_queue:
if ele["type"] == "diamond":
self.printDiamond(ele["length"])
elif ele["type"] == "triangle":
self.printTriangle(ele["length"])
elif ele["type"] == "left_triangle":
self.printLeftTriangle(ele["length"])
else:
self.err("Wrong print type.")
def __str__(self):
string = "<StarPrinter>\n[print_queue]\n"
for ele in self.print_queue:
string += "type: {0}; length: {1}\n".format(
ele["type"], ele["length"])
return string
if __name__ == "__main__":
star_printer = StarPrinter([["triangle", 5], ["left_triangle", 3]])
print(star_printer)
star_printer.addPrintQueue("none", 8)
star_printer.addPrintQueue("diamond", 7)
print(star_printer)
star_printer.printStar()
| 2,268 |
src/shapes.py
|
AntVil/Wetter-Daten-Visualizer
| 0 |
2023987
|
# this file contains all components needed to collect, structure and save the data from GADM
import os
import requests
from zipfile import ZipFile
from io import BytesIO
import cartopy.io.shapereader as shpreader
from requests.api import get
# constants
SHAPES_URL = "https://biogeo.ucdavis.edu/data/gadm3.6/shp/gadm36_DEU_shp.zip"
SHAPES_FOLDER = os.path.join(os.path.dirname(__file__), "data", "shapes")
os.makedirs(SHAPES_FOLDER, exist_ok = True)
def download_shapes():
"""
this function downloads data from GADM
"""
unpacked = ZipFile(BytesIO(requests.get(SHAPES_URL).content))
file_names = list(set([file.split(".")[0] for file in unpacked.namelist()]).difference({"license"}))
# saving license
with unpacked.open("license.txt", "r") as read_file:
with open(os.path.join(SHAPES_FOLDER, "license.txt"), "wb") as write_file:
write_file.write(read_file.read())
#downloading files
for file in file_names:
for extension in [".shp", ".shx", ".dbf"]:
with unpacked.open(file + extension, "r") as read_file:
# creating folder structure
path = os.path.join(SHAPES_FOLDER, file)
os.makedirs(path, exist_ok = True)
# saving file
file_name = "shape" + extension
with open(os.path.join(path, file_name), "wb") as write_file:
write_file.write(read_file.read())
def get_geometry(level=1):
"""
this function returns the administrative-area geometries for germany
"""
try:
return list(
shpreader.Reader(
os.path.join(os.path.dirname(__file__), "data", "shapes", f"gadm36_DEU_{level}", "shape")
).geometries()
)
except:
download_shapes()
return get_geometry(level)
if __name__ == "__main__":
download_shapes()
| 1,918 |
tests/seekret.apitest/context/response_test.py
|
seek-ret/tavernrtl
| 4 |
2023678
|
import io
import json as _json
from typing import Optional, Union
import pytest
from requests import Response
from requests.structures import CaseInsensitiveDict
from seekret.apitest.context.response import ResponseWrapper, NullResultError
def make_wrapper(json=None,
headers: Optional[Union[dict[str],
CaseInsensitiveDict[str]]] = None):
response = Response()
response.raw = io.BytesIO(_json.dumps(json).encode() if json else b'')
if headers:
response.headers = CaseInsensitiveDict(headers)
return ResponseWrapper(response)
class TestResponseWrapper:
class TestSearch:
def test_json_nested_value(self):
wrapper = make_wrapper({'a': {'b': {'c': 'd'}}})
assert {'c': 'd'} == wrapper.search('json.a.b')
def test_json_array_value(self):
wrapper = make_wrapper([1, 'b', {'c': 'd'}])
assert 'd' == wrapper.search('json[2].c')
def test_json_missing_value_causes_null_result_error(self):
wrapper = make_wrapper({'some-key': 1})
pytest.raises(NullResultError, wrapper.search, 'json."other-key"')
def test_json_value_none_causes_null_result_error(self):
wrapper = make_wrapper({'key': None})
pytest.raises(NullResultError, wrapper.search, 'json.key')
def test_json_case_sensitive(self):
wrapper = make_wrapper({'caseSensitiveKey': 1})
pytest.raises(NullResultError, wrapper.search,
'json.casesensitivekey')
def test_headers_existing_key(self):
wrapper = make_wrapper(headers={'Some-Header': 'value'})
assert wrapper.search('headers."Some-Header"') == 'value'
def test_headers_case_insensitive(self):
wrapper = make_wrapper(headers={'Some-Header': 'value'})
assert wrapper.search('headers."some-header"') == 'value'
def test_headers_missing_key_causes_null_result_error(self):
wrapper = make_wrapper(headers={'Some-Header': 'value'})
pytest.raises(NullResultError, wrapper.search,
'headers."other-header"')
def test_bad_locator_causes_null_result_error(self):
wrapper = make_wrapper(json={'a': 1}, headers={'b': 2})
pytest.raises(NullResultError, wrapper.search,
'expression.must.start.with.json.or.headers')
class TestAssertSchema:
def test_validation_success(self):
wrapper = make_wrapper({
'a': 'hello!',
'b': 1,
})
wrapper.assert_schema("""
type: map
mapping:
a:
type: str
required: true
b:
type: int
""")
def test_validation_failure_causes_assertion_error(self):
wrapper = make_wrapper({
'b': 1,
})
pytest.raises(
AssertionError, wrapper.assert_schema, """
type: map
mapping:
a:
type: str
required: true
b:
type: int
""")
| 3,353 |
src/protocols/BLE/ble_device.py
|
QWERTSKIHACK/peniot
| 143 |
2023848
|
import pexpect
class BLEDevice:
"""
Represents a BLE device.
It uses `gatttool` to connect a BLE device.
"""
def __init__(self, address):
self.device = None
self.address = address
# connect to the device specified with the given address
self.connect()
def connect(self):
"""
Connects to the BLE device
"""
print "Connecting..."
# Run gatttool interactively.
self.device = pexpect.spawn("gatttool -b " + self.address + " -I")
self.device.expect('\[LE\]>', timeout=10)
self.device.sendline('connect')
self.device.expect('Connection successful.*\[LE\]>', timeout=10)
print "Successfully connected!"
"""
Updates the value of the handle
"""
def writecmd(self, handle, value):
cmd = "char-write-cmd " + handle + " " + value
self.device.sendline(cmd)
print "Wrote " + value + " to handle: " + handle
| 979 |
keras/utils/visualize_util.py
|
nishank974/Keras
| 2 |
2023324
|
import itertools
from keras.layers.containers import Graph, Sequential
from keras.layers.core import Merge
try:
# pydot-ng is a fork of pydot that is better maintained
import pydot_ng as pydot
except ImportError:
# fall back on pydot if necessary
import pydot
if not pydot.find_graphviz():
raise RuntimeError("Failed to import pydot. You must install pydot"
" and graphviz for `pydotprint` to work.")
def layer_typename(layer):
return type(layer).__module__ + "." + type(layer).__name__
def get_layer_to_name(model):
"""Returns a dict mapping layer to their name in the model"""
if not isinstance(model, Graph):
return {}
else:
node_to_name = itertools.chain(
model.nodes.items(), model.inputs.items(), model.outputs.items()
)
return {v: k for k, v in node_to_name}
class ModelToDot(object):
"""
This is a helper class which visits a keras model (Sequential or Graph) and
returns a pydot.Graph representation.
This is implemented as a class because we need to maintain various states.
Use it as ```ModelToDot()(model)```
Keras models can have an arbitrary number of inputs and outputs. A given
layer can have multiple inputs but has a single output. We therefore
explore the model by starting at its output and crawling "up" the tree.
"""
def _pydot_node_for_layer(self, layer, label):
"""
Returns the pydot.Node corresponding to the given layer.
`label` specify the name of the layer (only used if the layer isn't yet
associated with a pydot.Node)
"""
# Check if this already exists (will be the case for nodes that
# serve as input to more than one layer)
if layer in self.layer_to_pydotnode:
node = self.layer_to_pydotnode[layer]
else:
layer_id = 'layer%d' % self.idgen
self.idgen += 1
label = label + " (" + layer_typename(layer) + ")"
if self.show_shape:
# Build the label that will actually contain a table with the
# input/output
outputlabels = str(layer.output_shape)
if hasattr(layer, 'input_shape'):
inputlabels = str(layer.input_shape)
elif hasattr(layer, 'input_shapes'):
inputlabels = ', '.join(
[str(ishape) for ishape in layer.input_shapes])
else:
inputlabels = ''
label = "%s\n|{input:|output:}|{{%s}|{%s}}" % (
label, inputlabels, outputlabels)
node = pydot.Node(layer_id, label=label)
self.g.add_node(node)
self.layer_to_pydotnode[layer] = node
return node
def _process_layer(self, layer, layer_to_name=None, connect_to=None):
"""
Process a layer, adding its node to the graph and creating edges to its
outputs.
`connect_to` specify where the output of the current layer will be
connected
`layer_to_name` is a dict mapping layer to their name in the Graph
model. Should be {} when processing a Sequential model
"""
# The layer can be a container layer, in which case we can recurse
is_graph = isinstance(layer, Graph)
is_seq = isinstance(layer, Sequential)
if self.recursive and (is_graph or is_seq):
# We got a container layer, recursively transform it
if is_graph:
child_layers = layer.outputs.values()
else:
child_layers = [layer.layers[-1]]
for l in child_layers:
self._process_layer(l, layer_to_name=get_layer_to_name(layer),
connect_to=connect_to)
else:
# This is a simple layer.
label = layer_to_name.get(layer, '')
layer_node = self._pydot_node_for_layer(layer, label=label)
if connect_to is not None:
self.g.add_edge(pydot.Edge(layer_node, connect_to))
# Proceed upwards to the parent(s). Only Merge layers have more
# than one parent
if isinstance(layer, Merge): # Merge layer
for l in layer.layers:
self._process_layer(l, layer_to_name,
connect_to=layer_node)
elif hasattr(layer, 'previous') and layer.previous is not None:
self._process_layer(layer.previous, layer_to_name,
connect_to=layer_node)
def __call__(self, model, recursive=True, show_shape=False,
connect_to=None):
self.idgen = 0
# Maps keras layer to the pydot.Node representing them
self.layer_to_pydotnode = {}
self.recursive = recursive
self.show_shape = show_shape
self.g = pydot.Dot()
self.g.set('rankdir', 'TB')
self.g.set('concentrate', True)
self.g.set_node_defaults(shape='record')
if hasattr(model, 'outputs'):
# Graph
for name, l in model.outputs.items():
self._process_layer(l, get_layer_to_name(model),
connect_to=connect_to)
else:
# Sequential container
self._process_layer(model.layers[-1], {}, connect_to=connect_to)
return self.g
def to_graph(model, **kwargs):
"""
`recursive` controls whether we recursively explore container layers
`show_shape` controls whether the shape is shown in the graph
"""
return ModelToDot()(model, **kwargs)
def plot(model, to_file='model.png', **kwargs):
graph = to_graph(model, **kwargs)
graph.write_png(to_file)
| 5,854 |
453-Minimum_Moves_to_Equal_Array_Elements.py
|
QuenLo/leecode
| 6 |
2022886
|
class Solution:
def minMoves(self, nums: List[int]) -> int:
return sum(nums)-len(nums)*min(nums)
class SolutionII:
def minMoves(self, nums: List[int]) -> int:
minin = float('inf')
time = 0
for num in nums:
time += num
minin = min( minin, num )
return time - len(nums)*minin
| 366 |
dataStructures/exercises/stacks.py
|
Ry4nW/python-wars
| 1 |
2024029
|
from collections import deque
class Stack():
def __init__(self, items: 'list[any]', maxsize) -> None:
self.items: list[deque] = deque(items)
self.maxsize: int = maxsize
self.top: int = self.get_top()
def get_stack(self) -> deque:
return self.items
def push(self, item) -> None or str:
if len(self.items) < self.maxsize:
self.items.append(item)
self.top = item
else:
return 'Max capacity reached.'
def pop(self) -> any:
if self.items:
popped = self.items.pop()
self.top = self.get_top()
return popped
return 'Stack is empty.'
def is_empty(self) -> bool:
return True if not self.items else False
def get_top(self) -> any:
if not self.is_empty():
return self.items[-1]
else:
return 'Stack is empty.'
stack = Stack([1, 2, 3, 4, 5], 6)
print(stack.top)
print(stack.push(6))
print(stack.top)
print(stack.pop())
print(stack.top)
| 1,054 |
socless/models.py
|
A-Gray-Cat/socless_python
| 4 |
2022933
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class EventTableItem:
id: str
investigation_id: str
status_: str
is_duplicate: bool
created_at: str
event_type: str
playbook: Optional[str]
details: dict
data_types: dict
event_meta: dict
@dataclass
class DedupTableItem:
current_investigation_id: str
dedup_hash: str
@dataclass
class MessageResponsesTableItem:
message_id: str # PK : callback id for message responses
await_token: str # used to start next step in step_functions
receiver: str # step_functions step name
fulfilled: bool # has await_token been used
message: str # message sent to user while waiting for their response
execution_id: str
investigation_id: str
datetime: str
@dataclass
class PlaybookArtifacts:
event: EventTableItem
execution_id: str
@dataclass
class PlaybookInput:
execution_id: str
artifacts: PlaybookArtifacts
results: dict
errors: dict
| 1,018 |
cypherpunkpay/net/tor_client/base_tor_circuits.py
|
prusnak/CypherpunkPay
| 44 |
2023988
|
from abc import abstractmethod
class BaseTorCircuits(object):
SHARED_CIRCUIT_ID = 'shared_circuit' # for requests were linkability of actions does not matter (merchant callbacks, price tickers, blockchain height, etc)
SKIP_TOR = 'skip_tor' # for requests where the target is in the local network or Tor cannot be used for other reasons
@abstractmethod
def mark_as_broken(self, label):
pass
@abstractmethod
def get_for(self, privacy_context):
pass
@abstractmethod
def close(self):
pass
| 549 |
src/Python/1-100/88.MergeArray.py
|
Peefy/PeefyLeetCode
| 2 |
2023594
|
class Solution(object):
def merge(self, nums1, m, nums2, n):
"""
:type nums1: List[int]
:type m: int
:type nums2: List[int]
:type n: int
:rtype: void Do not return anything, modify nums1 in-place instead.
"""
nums = []
for i in range(m):
nums.append(nums1[i])
i = 0
j = 0
index = 0
while i < m or j < n:
if i == m:
for k in range(j, n):
nums1[index] = nums2[k]
index += 1
break
if j == n:
for k in range(i, m):
nums1[index] = nums[k]
index += 1
break
if nums[i] < nums2[j]:
nums1[index] = nums[i]
i += 1
else:
nums1[index] = nums2[j]
j += 1
index += 1
return nums1
if __name__ == '__main__':
solution = Solution()
print(solution.merge([1,2,3,4,0,0,0,0,0],4, [2,5,6], 3))
else:
pass
| 1,092 |
Python/count-primes.py
|
ddyuewang/leetcode
| 4 |
2023859
|
# Time: O(n)
# Space: O(n)
# Description:
#
# Count the number of prime numbers less than a non-negative number, n
#
# Hint: The number n could be in the order of 100,000 to 5,000,000.
class Solution:
# @param {integer} n
# @return {integer}
def countPrimes(self, n):
if n <= 2:
return 0
is_prime = [True] * n
num = n / 2
for i in xrange(3, n, 2):
if i * i >= n:
break
if not is_prime[i]:
continue
for j in xrange(i*i, n, 2*i):
if not is_prime[j]:
continue
num -= 1
is_prime[j] = False
return num
| 713 |
python3/check_array_formation_through_concatenation.py
|
joshiaj7/CodingChallenges
| 1 |
2023897
|
"""
Space : O(n)
Time : O(n)
"""
class Solution:
def canFormArray(self, arr: List[int], pieces: List[List[int]]) -> bool:
d = {}
n = len(arr)
for x in pieces:
if x[0] not in d:
d[x[0]] = x
i = 0
while i < n:
if arr[i] in d:
temp = d[arr[i]]
for j in range(len(temp)):
if temp[j] == arr[i]:
i += 1
else:
return False
else:
return False
return True
| 597 |
models/__init__.py
|
marcoscale98/emojinet
| 0 |
2023214
|
from models.base_lstm_user import base_lstm_user
from models.base_lstm_cnn_user import base_lstm_cnn_user
from models.base_lstm_subword import base_lstm_subword
from models.ensemble_cnn_subword import ensemble_cnn_subword
from models.base_cnn import base_cnn
from models.base_lstm import base_lstm
from models.vdcnn import vdcnn
class ModelDefinition:
def __init__(self, func, params):
self.params = params
self.func = func
def apply(self, values: dict):
return self.func(*[values[param] for param in self.params])
def get_model(model: str) -> ModelDefinition:
models = {
"base_cnn": ModelDefinition(base_cnn, ["vocabulary_size", "embedding_size", "max_seq_length", "embedding_matrix", "y_dictionary"]),
"base_lstm": ModelDefinition(base_lstm, ["vocabulary_size", "embedding_size", "max_seq_length", "embedding_matrix", "y_dictionary"]),
"base_lstm_user": ModelDefinition(base_lstm_user, ["vocabulary_size", "embedding_size", "history_size", "max_seq_length", "embedding_matrix", "y_dictionary"]),
"base_lstm_cnn_user": ModelDefinition(base_lstm_user, ["vocabulary_size", "embedding_size", "history_size", "max_seq_length", "embedding_matrix", "y_dictionary"]),
"base_lstm_subword": ModelDefinition(base_lstm_subword, ["vocabulary_size", "embedding_size", "max_char_length", "max_seq_length", "embedding_matrix", "y_dictionary"]),
"ensemble_cnn_subword": ModelDefinition(ensemble_cnn_subword, ["vocabulary_size", "embedding_size", "max_char_length", "max_seq_length", "embedding_matrix", "y_dictionary"]),
"vdcnn": ModelDefinition(vdcnn, ["num_classes", "depth", "sequence_length", "shortcut", "pool_type", "sorted", "use_bias"])
}
return models[model]
| 1,759 |
JUPYTER/Supervised/Feature Engineering/generate_dataset.py
|
Reynolds534/IASS_18_ML
| 1 |
2023626
|
import numpy as np
def generate_dataset(n_features):
if n_features <6:
print('Please enter a number of features strictly bigger than 5')
return None, None
target = np.random.uniform(0,10,100)
X1 = target**2 - target + np.random.uniform(0,25,100)
X2 = target + np.random.uniform(0,15,100)
X3 = target + target**2 + np.random.uniform(0,50,100)
X4 = X3 + np.random.uniform(0,5,100)
X5 = X1 + X2 + X3
random_state = np.random.RandomState(0)
X = np.array([X1,X2,X3,X4,X5]).T
X = np.c_[X, random_state.randn(100, (n_features-5) )]
Z = X[:, np.random.permutation(X.shape[1])]
return Z, target
| 651 |
mayan/apps/mayan_statistics/dependencies.py
|
Syunkolee9891/Mayan-EDMS
| 1 |
2023680
|
from __future__ import unicode_literals
from mayan.apps.dependencies.classes import JavaScriptDependency
JavaScriptDependency(
module=__name__, name='chart.js', static_folder='statistics',
version_string='=2.7.2'
)
| 225 |
config.py
|
mottenhoff/ReMarkable_Zotero_sync
| 1 |
2023380
|
def config():
return {
# Zotero
"path_to_local_zotero_storage": "<path to your local zotero storage>",
# ReMarkable
# Get authentication code from https://my.remarkable.com/connect/desktop)
# The auth code is only necessary the first run, you can remove the
# code afterwards.
"reMarkable_auth_code": "",
# If you want to sync to a folder called papers at
# ./papers on your reMarkable. then only "papers" as
# reMarkable_folder_name
"reMarkable_folder_name": "",
# Monitor
"check_log_every_n_minutes": 5,
"wait_for_n_seconds_idle": 60
}
| 693 |
tests/unittests/http_functions/no_return/main.py
|
gohar94/azure-functions-python-worker
| 277 |
2023972
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import logging
logger = logging.getLogger('test')
def main(req):
logger.error('hi')
| 186 |
src/gui/window.py
|
Aldeshov/ADBFileExplorer
| 12 |
2023530
|
# ADB File Explorer `tool`
# Copyright (C) 2022 <NAME> <EMAIL>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QMainWindow, QAction, qApp, QInputDialog, QMenuBar, QMessageBox
from core.configurations import Resources
from core.main import Adb
from core.managers import Global
from data.models import MessageData, MessageType
from data.repositories import DeviceRepository
from gui.explorer import MainExplorer
from gui.help import About
from gui.others.notification import NotificationCenter
from helpers.tools import AsyncRepositoryWorker
class MenuBar(QMenuBar):
CONNECT_WORKER_ID = 100
DISCONNECT_WORKER_ID = 101
def __init__(self, parent):
super(MenuBar, self).__init__(parent)
self.about = About()
self.file_menu = self.addMenu('&File')
self.help_menu = self.addMenu('&Help')
connect_action = QAction(QIcon(Resources.icon_link), '&Connect', self)
connect_action.setShortcut('Alt+C')
connect_action.triggered.connect(self.connect_device)
self.file_menu.addAction(connect_action)
disconnect_action = QAction(QIcon(Resources.icon_no_link), '&Disconnect', self)
disconnect_action.setShortcut('Alt+X')
disconnect_action.triggered.connect(self.disconnect)
self.file_menu.addAction(disconnect_action)
devices_action = QAction(QIcon(Resources.icon_phone), '&Show devices', self)
devices_action.setShortcut('Alt+D')
devices_action.triggered.connect(Global().communicate.devices.emit)
self.file_menu.addAction(devices_action)
exit_action = QAction('&Exit', self)
exit_action.setShortcut('Alt+Q')
exit_action.triggered.connect(qApp.quit)
self.file_menu.addAction(exit_action)
about_action = QAction('About', self)
about_action.triggered.connect(self.about.show)
self.help_menu.addAction(about_action)
def disconnect(self):
worker = AsyncRepositoryWorker(
worker_id=self.DISCONNECT_WORKER_ID,
name="Disconnecting",
repository_method=DeviceRepository.disconnect,
response_callback=self.__async_response_disconnect,
arguments=()
)
if Adb.worker().work(worker):
Global().communicate.notification.emit(
MessageData(
title='Disconnect',
body="Disconnecting from devices, please wait",
message_type=MessageType.LOADING_MESSAGE,
message_catcher=worker.set_loading_widget
)
)
Global().communicate.status_bar.emit(f'Operation: {worker.name}... Please wait.', 3000)
worker.start()
def connect_device(self):
text, ok = QInputDialog.getText(self, 'Connect Device', 'Enter device IP:')
Global().communicate.status_bar.emit('Operation: Connecting canceled.', 3000)
if ok and text:
worker = AsyncRepositoryWorker(
worker_id=self.CONNECT_WORKER_ID,
name="Connecting to device",
repository_method=DeviceRepository.connect,
arguments=(str(text),),
response_callback=self.__async_response_connect
)
if Adb.worker().work(worker):
Global().communicate.notification.emit(
MessageData(
title='Connect',
body="Connecting to device via IP, please wait",
message_type=MessageType.LOADING_MESSAGE,
message_catcher=worker.set_loading_widget
)
)
Global().communicate.status_bar.emit(f'Operation: {worker.name}... Please wait.', 3000)
worker.start()
@staticmethod
def __async_response_disconnect(data, error):
if data:
Global().communicate.devices.emit()
Global().communicate.notification.emit(
MessageData(
title="Disconnect",
timeout=15000,
body=data
)
)
if error:
Global().communicate.devices.emit()
Global().communicate.notification.emit(
MessageData(
timeout=15000,
title="Disconnect",
body=f"<span style='color: red; font-weight: 600'>{error}</span>"
)
)
Global().communicate.status_bar.emit('Operation: Disconnecting finished.', 3000)
@staticmethod
def __async_response_connect(data, error):
if data:
if Adb.CORE == Adb.PYTHON_ADB_SHELL:
Global().communicate.files.emit()
elif Adb.CORE == Adb.EXTERNAL_TOOL_ADB:
Global().communicate.devices.emit()
Global().communicate.notification.emit(MessageData(title="Connecting to device", timeout=15000, body=data))
if error:
Global().communicate.devices.emit()
Global().communicate.notification.emit(
MessageData(
timeout=15000,
title="Connect to device",
body=f"<span style='color: red; font-weight: 600'>{error}</span>"
)
)
Global().communicate.status_bar.emit('Operation: Connecting to device finished.', 3000)
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.setMenuBar(MenuBar(self))
self.setCentralWidget(MainExplorer(self))
self.resize(640, 480)
self.setMinimumSize(480, 360)
self.setWindowTitle('ADB File Explorer')
self.setWindowIcon(QIcon(Resources.icon_logo))
# Show Devices Widget
Global().communicate.devices.emit()
# Connect to Global class to use it anywhere
Global().communicate.status_bar.connect(self.statusBar().showMessage)
# Important to add last to stay on top!
self.notification_center = NotificationCenter(self)
Global().communicate.notification.connect(self.notify)
# Welcome notification texts
welcome_title = "Welcome to ADBFileExplorer!"
welcome_body = f"Here you can see the list of your connected adb devices. Click one of them to see files.<br/>"\
f"Current selected core: <strong>{Adb.current_core()}</strong><br/>" \
f"To change it <code style='color: blue'>adb.set_core()</code> in <code>app.py</code>"
Global().communicate.status_bar.emit('Ready', 5000)
Global().communicate.notification.emit(MessageData(title=welcome_title, body=welcome_body, timeout=30000))
def notify(self, data: MessageData):
message = self.notification_center.append_notification(
title=data.title,
body=data.body,
timeout=data.timeout,
message_type=data.message_type
)
if data.message_catcher:
data.message_catcher(message)
def closeEvent(self, event):
if Adb.CORE == Adb.EXTERNAL_TOOL_ADB:
reply = QMessageBox.question(self, 'ADB Server', "Do you want to kill adb server?",
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
Adb.stop()
elif Adb.CORE == Adb.PYTHON_ADB_SHELL:
Adb.stop()
event.accept()
# This helps the toast maintain the place after window get resized
def resizeEvent(self, e):
if self.notification_center:
self.notification_center.update_position()
return super().resizeEvent(e)
| 8,416 |
adv/mega_man.py
|
XenoXilus/dl
| 0 |
2023635
|
from core.advbase import *
from module.bleed import Bleed, mBleed
from module.x_alt import X_alt
def module():
return Mega_Man
class Skill_Ammo(Skill):
def __init__(self, name=None, acts=None):
super().__init__(name, acts)
self.c_ammo = 0
@property
def ammo(self):
return self.ac.conf.ammo
@property
def cost(self):
return self.ac.conf.cost
def check(self):
if self._static.silence == 1:
return False
return self.c_ammo >= self.cost
@allow_acl
def check_full(self):
if self._static.silence == 1:
return False
return self.c_ammo >= self.ammo
def charge_ammo(self, ammo):
self.c_ammo = min(self.ammo, self.c_ammo + ammo)
class Mega_Man(Adv):
comment = '16 hits leaf shield (max 32 hits)'
conf = {}
conf['slots.d'] = 'Gala_Mars'
conf['slots.a'] = ['Primal_Crisis', 'Levins_Champion']
conf['acl'] = """
`dragon, s=4
`s3, not buff(s3)
`s4
if bleed_stack >= 3
`s2, c_x(metalblade) or c_x(default)
`s1, c_x(metalblade)
else
`s1, c_x(default) and s1.check_full()
end
"""
conf['coabs'] = ['Blade', 'Marth', 'Dagger2']
conf['share'] = ['Karl']
# conf['dragonform'] = {
# 'act': 'c5-s',
# 'dx1.dmg': 1.20,
# 'dx1.startup': 10 / 60.0, # c1 frames
# 'dx1.hit': 3,
# 'dx2.dmg': 1.20,
# 'dx2.startup': 13 / 60.0 - 0.03333 = 0.18333666666666666667, # c2 frames
# 'dx2.hit': 3,
# 'dx3.dmg': 1.20,
# 'dx3.startup': 14 / 60.0 - 0.03333 = 0.20000333333333333333, # c3 frames
# 'dx3.hit': 3,
# 'dx4.dmg': 1.20,
# 'dx4.startup': 14 / 60.0, # c4 frames
# 'dx4.hit': 3,
# 'dx5.dmg': 1.20,
# 'dx5.startup': 14 / 60.0, # c5 frames
# 'dx5.recovery': 23 / 60.0, # recovery
# 'dx5.hit': 3,
# 'ds.dmg': 6.00,
# 'ds.recovery': 113 / 60, # skill frames
# 'ds.hit': 5,
# 'dodge.startup': 45 / 60.0, # dodge frames
# }
# def ds_proc(self):
# return self.dmg_make('ds',self.dragonform.conf.ds.dmg,'s')
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.a_s_dict['s1'] = Skill_Ammo('s1')
self.a_s_dict['s2'] = Skill_Ammo('s2')
def prerun(self):
self.leaf = 2 # number of hits per leaf rotation
self.s1.charge_ammo(2000)
self.s2.charge_ammo(4000)
@property
def skills(self):
return self.s3, self.s4
def hitattr_make(self, name, base, group, aseq, attr, onhit=None):
ammo = attr.get('ammo', 0)
if ammo > 0:
for s in (self.s1, self.s2):
s.charge_ammo(ammo)
elif ammo < 0:
s = self.s1 if group == 'metalblade' else self.s2
s.charge_ammo(ammo)
if s.c_ammo <= 0:
self.current_x = 'default'
if ammo != 0:
log('ammo', name, ammo, ' '.join(f'{s.c_ammo}/{s.ammo}' for s in (self.s1, self.s2)))
super().hitattr_make(name, base, group, aseq, attr, onhit=None)
def s1_proc(self, e):
if self.current_x != 'metalblade':
self.current_x = 'metalblade'
else:
self.current_x = 'default'
def s2_proc(self, e):
if self.current_x != 'leafshield':
self.current_x = 'leafshield'
else:
self.current_x = 'default'
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
| 3,623 |
votes/frontend.py
|
estan/votes
| 1 |
2022742
|
from argparse import ArgumentParser
from sys import argv
from autobahn.twisted.wamp import ApplicationSession
from autobahn.twisted.wamp import ApplicationRunner
from autobahn.wamp.types import SessionDetails
from autobahn.wamp.types import CloseDetails
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtCore import QObject
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QMainWindow
import qt5reactor
from votes.ui.votes_window_ui import Ui_VotesWindow
class VotesSession(QObject, ApplicationSession):
"""Votes WAMP application session.
Simply bridges the Autobahn join and leave signals to Qt signals.
"""
joinedSession = pyqtSignal(SessionDetails)
leftSession = pyqtSignal(CloseDetails)
def __init__(self, config=None, parent=None):
QObject.__init__(self, parent)
ApplicationSession.__init__(self, config)
def onJoin(self, details):
self.joinedSession.emit(details)
def onLeave(self, details):
self.leftSession.emit(details)
class VotesWindow(QMainWindow, Ui_VotesWindow):
"""Main window of the votes demo."""
closed = pyqtSignal() # Emitted when window is closed.
def __init__(self, url, realm, parent=None):
super(VotesWindow, self).__init__(parent)
self.setupUi(self)
self.url = url
self.realm = realm
self.session = None
self.votes = {
'Banana': self.bananaVotes,
'Chocolate': self.chocolateVotes,
'Lemon': self.lemonVotes
}
# Factory method for ApplicationRunner.run(..)
def make(config):
self.session = VotesSession(config)
self.session.joinedSession.connect(self.onJoinedSession)
self.session.leftSession.connect(self.onLeftSession)
return self.session
runner = ApplicationRunner(url, realm)
runner.run(make, start_reactor=False)
def onJoinedSession(self):
self.setEnabled(True)
self.session.subscribe(self.onVoteMessage, u'io.crossbar.demo.vote.onvote')
self.session.subscribe(self.onResetMessage, u'io.crossbar.demo.vote.onreset')
self.statusBar().showMessage('Connected to realm {} at {}'
.format(self.realm, self.url))
def onLeftSession(self):
print('leave')
def onVoteMessage(self, result):
self.votes[result[u'subject']].setText(str(result[u'votes']))
def onResetMessage(self):
self.bananaVotes.setText('0')
self.chocolateVotes.setText('0')
self.lemonVotes.setText('0')
def closeEvent(self, event):
self.session.leave()
self.closed.emit()
event.accept()
@pyqtSlot()
def on_resetButton_clicked(self):
self.session.call(u'io.crossbar.demo.vote.reset')
@pyqtSlot()
def on_bananaButton_clicked(self):
self.session.call(u'io.crossbar.demo.vote.vote', 'Banana')
@pyqtSlot()
def on_chocolateButton_clicked(self):
self.session.call(u'io.crossbar.demo.vote.vote', 'Chocolate')
@pyqtSlot()
def on_lemonButton_clicked(self):
self.session.call(u'io.crossbar.demo.vote.vote', 'Lemon')
def main():
parser = ArgumentParser(description='PyQt version of Crossbar Gauges demo.')
parser.add_argument('--url',
type=unicode,
default=u'ws://127.0.0.1:8080/ws',
metavar='<url>',
help='WAMP router URL (default: ws://127.0.0.1:8080/ws).')
args = parser.parse_args()
app = QApplication(argv)
qt5reactor.install()
from twisted.internet import reactor
def quit():
if reactor.threadpool is not None:
reactor.threadpool.stop()
app.quit()
window = VotesWindow(args.url, u'crossbardemo')
window.closed.connect(quit)
window.show()
reactor.run()
if __name__ == '__main__':
main()
| 3,991 |
tests/test_ethosdistro_py.py
|
CoryKrol/ethosdistro_py
| 0 |
2023909
|
import aiohttp
import json
from ethosdistro_py import EthosAPI
import pytest
from aioresponses import aioresponses
NOT_ALL_KEYS_PRESENT = "All keys should be in the response"
CONTENT_HEADERS = {"Content-Type": "text/html"}
@pytest.mark.asyncio
async def test_get_panel(get_panel_keys, get_panel_response):
"""Tests an API call to get block count data for a panel_id"""
session = aiohttp.ClientSession()
ethosapi = EthosAPI(session=session)
assert ethosapi.panel_id_set() is True
with aioresponses() as m:
m.get(
"http://test.ethosdistro.com/?json=yes",
status=200,
body=json.dumps(get_panel_response),
headers=CONTENT_HEADERS,
)
result = await ethosapi.async_get_panel()
assert isinstance(result, dict)
assert set(get_panel_keys).issubset(result.keys()), NOT_ALL_KEYS_PRESENT
await session.close()
| 916 |
class1/exercise10.py
|
SamerLabban/Network_Automation_Course
| 1 |
2022688
|
from ciscoconfparse import CiscoConfParse
#open the cisco file and store it in a variable
cisco_cfg = CiscoConfParse("cisco_ipsec.txt")
#search for any line in our confparse object (cisco_cfg) that begins with the word "crypto map CRYPTO"
intf = cisco_cfg.find_objects_wo_child(parentspec = r"^crypto map CRYPTO", childspec = r"AES")
#Get all children (children and grandchildren) elements
for i in intf:
print i
for child in i.all_children:
print child.text
print "\n"
| 479 |
VirSecCon 2020/old_monitor/rsa.py
|
0xShad3/cybersec-writeups
| 10 |
2022685
|
import gmpy
e = 3
n1 = 7156756869076785933541721538001332468058823716463367176522928415602207483494410804148006276542112924303341451770810669016327730854877940615498537882480613
n2 = 11836621785229749981615163446485056779734671669107550651518896061047640407932488359788368655821120768954153926193557467079978964149306743349885823110789383
n3 = 7860042756393802290666610238184735974292004010562137537294207072770895340863879606654646472733984175066809691749398560891393841950513254137326295011918329
c1 = 816151508695124692025633485671582530587173533405103918082547285368266333808269829205740958345854863854731967136976590635352281190694769505260562565301138
c2 = 8998140232866629819387815907247927277743959734393727442896220493056828525538465067439667506161727590154084150282859497318610746474659806170461730118307571
c3 = 3488305941609131204120284497226034329328885177230154449259214328225710811259179072441462596230940261693534332200171468394304414412261146069175272094960414
N = n1 * n2 * n3
N1 = N/n1
N2 = N/n2
N3 = N/n3
u1 = gmpy.invert(N1,n1)
u2 = gmpy.invert(N2,n2)
u3 = gmpy.invert(N3,n3)
M = (c1*u1*N1 + c2*u2*N2 + c3*u3*N3) % N
m = gmpy.root(M,e)[0]
print hex(m)[2:].rstrip("L").decode("hex")
| 1,216 |
main.py
|
PaTinLei/MHFC-FSL
| 0 |
2022825
|
import math
import os
from copy import deepcopy
from scipy.linalg import svd
import numpy as np
from tqdm import tqdm
import scipy.io as scio
import scipy.sparse
from config import config
from models.HyperG import HyperG
import sklearn
from sklearn.linear_model import ElasticNet
from sklearn.preprocessing import normalize
from sklearn.metrics import accuracy_score
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from datasets import CategoriesSampler, DataSet
from utils import get_embedding, mean_confidence_interval, setup_seed
def initial_embed(reduce, d):
reduce = reduce.lower()
assert reduce in ['isomap', 'itsa', 'mds', 'lle', 'se', 'pca', 'none']
if reduce == 'isomap':
from sklearn.manifold import Isomap
embed = Isomap(n_components=d)
elif reduce == 'itsa':
from sklearn.manifold import LocallyLinearEmbedding
embed = LocallyLinearEmbedding(n_components=d,
n_neighbors=5, method='ltsa')
elif reduce == 'mds':
from sklearn.manifold import MDS
embed = MDS(n_components=d, metric=False)
elif reduce == 'lle':
from sklearn.manifold import LocallyLinearEmbedding
embed = LocallyLinearEmbedding(n_components=d, n_neighbors=5,eigen_solver='dense')
elif reduce == 'se':
from sklearn.manifold import SpectralEmbedding
embed = SpectralEmbedding(n_components=d)
elif reduce == 'pca':
from sklearn.decomposition import PCA
embed = PCA(n_components=d,random_state=0)
return embed
def test(args):
setup_seed(23)
import warnings
warnings.filterwarnings('ignore')
if args.dataset == 'miniimagenet':
num_classes = 64
elif args.dataset == 'tieredimagenet':
num_classes = 351
elif args.dataset == 'cifar':
num_classes = 64
elif args.dataset == 'fc100':
num_classes = 60
if args.resume is not None:
from models.resnet12 import resnet12
model = resnet12(num_classes).to(args.device)
state_dict = torch.load(args.resume)
model.load_state_dict(state_dict)
from models.r_resnet12 import r_resnet12
r_model = r_resnet12(num_classes).to(args.device)
r_state_dict = torch.load(args.r_resume)
r_model.load_state_dict(r_state_dict)
model.to(args.device)
model.eval()
r_model.to(args.device)
r_model.eval()
if args.dataset == 'miniimagenet':
data_root = os.path.join(args.folder, '/home/wfliu/xdd_xr/LaplacianShot-master-org/LaplacianShot-master/data/')
elif args.dataset == 'tieredimagenet':
data_root = '/home/tieredimagenet'
elif args.dataset == 'cifar':
data_root = '/home/cifar'
elif args.dataset == 'fc100':
data_root = '/home/fc100'
else:
print("error!!!!!!!!!!")
hyperG = HyperG(num_class=args.num_test_ways,step=args.step, reduce=args.embed, d=args.dim)
dataset = DataSet(data_root, 'test', args.img_size)
sampler = CategoriesSampler(dataset.label, args.num_batches,
args.num_test_ways, (args.num_shots, 15, args.unlabel))
testloader = DataLoader(dataset, batch_sampler=sampler,
shuffle=False, num_workers=0, pin_memory=True)
k = args.num_shots * args.num_test_ways
loader = tqdm(testloader, ncols=0)
if(args.unlabel==0):
iterations = 22
else:
iterations = args.unlabel+2+5
acc_list = [[] for _ in range(iterations)]
acc_list_task = [[] for _ in range(iterations)]
acc_list_softmax = [[] for _ in range(iterations)]
for data, indicator in loader:
targets = torch.arange(args.num_test_ways).repeat(args.num_shots+15+args.unlabel).long()[
indicator[:args.num_test_ways*(args.num_shots+15+args.unlabel)] != 0]
data = data[indicator != 0].to(args.device)
data_r = get_embedding(r_model, data, args.device)
data_x = get_embedding(model, data, args.device)
if args.dim != 512:
if args.unlabel != 0:
data_train1 = np.concatenate((data_r[:k], data_r[k+15*args.num_test_ways:k+15*args.num_test_ways+args.unlabel*args.num_test_ways]), axis=0)
data_train2 = np.concatenate((data_x[:k], data_x[k+15*args.num_test_ways:k+15*args.num_test_ways+args.unlabel*args.num_test_ways]), axis=0)
data_train = np.concatenate((data_train1, data_train2), axis=0)
embed_data = initial_embed(args.embed, args.dim)
embed_fit = embed_data.fit(data_train)
data_r = embed_data.transform(data_r[:k+15*args.num_test_ways+args.unlabel*args.num_test_ways])
data_x = embed_data.transform(data_x[:k+15*args.num_test_ways+args.unlabel*args.num_test_ways])
else:
data_train1 = np.concatenate((data_r[:k], data_r[k:k+15*args.num_test_ways]), axis=0)
data_train2 = np.concatenate((data_x[:k], data_x[k:k+15*args.num_test_ways]), axis=0)
data_train = np.concatenate((data_train1, data_train2), axis=0)
embed_data = initial_embed(args.embed, args.dim)
embed_fit = embed_data.fit(data_train)
data_r = embed_data.transform(data_train1)
data_x = embed_data.transform(data_train2)
data_r_concat = np.concatenate((data_r, data_x), axis=1)
train_targets = targets[:k]
test_targets = targets[k:k+15*args.num_test_ways]
train_embeddings_task = data_r_concat[:k]
test_embeddings_task = data_r_concat[k:k+15*args.num_test_ways]
if args.unlabel != 0:
unlabel_embeddings_task = data_r_concat[k+15*args.num_test_ways:k+15*args.num_test_ways+args.unlabel*args.num_test_ways]
else:
unlabel_embeddings_task = None
hyperG.fit(train_embeddings_task, train_targets)
acc = hyperG.predict(test_embeddings_task,unlabel_embeddings_task, True, test_targets,args.eta)
for i in range(len(acc)):
acc_list[i].append(acc[i])
cal_accuracy(acc_list)
def cal_accuracy(acc_list_task):
mean_list_task = []
ci_list_task = []
for item in acc_list_task:
mean, ci = mean_confidence_interval(item)
mean_list_task.append(mean)
ci_list_task.append(ci)
print("Test Acc Mean_task{}".format(
' '.join([str(i*100)[:6] for i in mean_list_task])))
print("Test Acc ci_task{}".format(' '.join([str(i*100)[:6] for i in ci_list_task])))
def main(args):
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
args.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(args)
if args.mode == 'test':
test(args)
else:
raise NameError
if __name__ == '__main__':
args = config()
main(args)
| 6,884 |
datatoaster/datatoaster.py
|
abc612008/datatoaster
| 0 |
2023483
|
import collections
""" constants """
XValue = lambda _: ""
Single = lambda _: ""
class DataSet:
def NumberOfAppearance(self, key_function):
self.number_of_appearance = True
if key_function is XValue:
self.single_dict = True
def yfunc(li):
os_list = {}
for i in li:
key = key_function(i)
os_list[key] = os_list.get(key, 0) + 1
return os_list
return yfunc
def Percentage(self, key_function):
self.percentage = True
if key_function is XValue:
self.single_dict = True
def yfunc(li):
os_list = {}
for i in li:
key = key_function(i)
os_list[key] = os_list.get(key, 0) + 1
return os_list
return yfunc
def PercentageWithinGroup(self, key_function):
self.percentage_within_group = True
if key_function is XValue:
self.single_dict = True
def yfunc(li):
os_list = {}
for i in li:
key = key_function(i)
os_list[key] = os_list.get(key, 0) + 1
return os_list
return yfunc
def __init__(self, raw_data):
self.raw_data = raw_data
self.x_function = None
self.y_function = None
self.number_of_appearance = False
self.percentage = False
self.percentage_within_group = False
self.single_dict = False
self.constraints = []
self.pre_constraints = []
self.single = False
self.order_key = None
def set_x(self, func):
if not callable(func):
raise ValueError("Expect the argument to be a function.")
self.x_function = func
return self
def set_y(self, param):
if not callable(param):
raise ValueError("Expect the argument to be a function.")
self.y_function = param
return self
def add_constraint(self, constraint, is_pre=False):
if not callable(constraint):
raise ValueError("Expect the argument to be a function.")
if is_pre:
self.pre_constraints.append(constraint)
else:
self.constraints.append(constraint)
return self
def set_single(self, param):
self.single = param
return self
def ordered_by(self, order_key):
if not callable(order_key):
raise ValueError("Expect the argument to be a function.")
self.order_key = order_key
return self
def get_result(self):
def process_result(result):
if self.single_dict:
for key in result.keys():
result[key] = result[key][""]
if self.single:
if len(result) != 1:
raise ValueError("Single mode set while there are more than one result. "
"Results: " + str(result))
return next(iter(result.values()))
else:
if self.order_key is not None:
return collections.OrderedDict(sorted(result.items(), key=self.order_key))
else:
return result
if self.x_function is None: # x_function should not be None
raise ValueError("set_x not called when calling get_result")
filtered_data = [] # data that passed all constraints
number_of_valid_data = 0 # save the total unfiltered number for percentage
all_appearance = {} # save the unfiltered number per group for percentage_within_group
for item in self.raw_data:
pass_constraints = True
for pre_constraint in self.pre_constraints: # pre constraints
if not pre_constraint(item):
pass_constraints = False
break
if not pass_constraints:
continue
number_of_valid_data += 1
for constraint in self.constraints: # constraints
if not constraint(item):
pass_constraints = False
break
if pass_constraints:
filtered_data.append(item)
if self.percentage_within_group: # for percentage within group
key = self.x_function(item)
all_appearance[key] = all_appearance.get(key, 0) + 1
# handle y_function
if self.y_function:
values = {}
for item in filtered_data:
key = self.x_function(item)
if key in values:
values[key].append(item)
else:
values[key] = [item]
for key, value in values.items():
values[key] = self.y_function(value)
if self.percentage:
for k in values[key].keys():
values[key][k] /= number_of_valid_data
elif self.percentage_within_group:
for k in values[key].keys():
values[key][k] /= all_appearance[key]
return process_result(values)
raise ValueError("set_y not called when calling get_result")
| 5,289 |
requests/requests_user_agent.py
|
BoogalooLi/python_spiders
| 1 |
2024020
|
import requests
# 定义请求的url
# url = 'https://www.lmonkey.com'
url = 'https://www.xicidaili.com/nn'
# 定义请求头信息
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:71.0) Gecko/20100101 Firefox/71.0'
}
# 发起get请求
res = requests.get(url=url, headers=headers)
# 获取响应状态码
code = res.status_code
print(code) # 503 服务器内部拒绝请求
# 响应成功后把响应的内容写入文件中
if code == 200:
with open('./test.html', 'w') as fp:
fp.write(res.text)
| 438 |
dkr-py310/docker-student-portal-310/course_files/begin_advanced/py_sql_1.py
|
pbarton666/virtual_classroom
| 0 |
2023927
|
#py_sql_1.py
#import the python/sqlite3 connector;
#...there are others for postgresql, mysql, etc.
import sqlite3
#create a connection object (on other RMDBs you'd provide credentials, too)
conn = sqlite3.connect('mydb')
#creates a cursor object
curs = conn.cursor()
#SQL is case-insensitive, but most people use CAPS for keywords
#Here, we get rid of the table 'dogs' (IF EXISTS prevents a crash)
cmd = "DROP TABLE IF EXISTS dogs"
curs.execute(cmd) #this runs the SQL command
conn.commit() #... and this locks in the changes
#Build a new table's metadata (framework)
cmd = """CREATE TABLE dogs (name CHAR(10),
toy CHAR(10),
weight INT(4))"""
print(cmd)
curs.execute(cmd)
#add a row
cmd = "INSERT INTO dogs ('name', 'toy', 'weight') VALUES (?, ?, ?)"
vals= ('Fang', 'bone', 90)
curs.execute(cmd, vals)
#get some results
cmd = "SELECT * from {}".format('dogs')
print(cmd)
curs.execute(cmd)
result=curs.fetchall()
print(result)
#... and print them out (if there are any)
if result:
print("congrats, you've got some dawgs")
for row in result:
name, toy, weight=row
print(name, toy, weight)
#Here's an alternative way to insert rows
curs.executemany('INSERT INTO dogs VALUES(?,?,?)',
[('Biscuit', 'towel', '70'),
('Snoopy', 'squirrel', '60')
]
)
#It may make sense to create names for the table and its columns
cols=('name', 'toy','weight')
tname='dogs'
val_tuple=("Fluffy", "sock", "25")
cmd=\
"INSERT INTO {} {} VALUES (?, ?, ?) ".format(tname, cols)
curs.execute(cmd, val_tuple)
print()
#with names we can simply recycle them
def print_rows():
"a utility function you may want to keep"
cmd = "SELECT * from {}".format(tname)
print(cmd)
curs.execute(cmd)
result=curs.fetchall()
if result:
for r in result:
nice_output=''
for label, res in zip(cols, r):
nice_output+="{:>10} = {:<10}".format(label, res)
print (nice_output)
print_rows()
#Getting column names from the database
curs.execute(cmd)
for ix, name in enumerate(curs.description):
print("column {} is called {}".format(ix, name[0]))
#Figure out how many rows in the table
cmd="SELECT COUNT(*) FROM {}".format(tname)
curs.execute(cmd)
result=curs.fetchone()
number_of_rows, = result
print("Awesome, we've captured {} rows.".format (number_of_rows))
print()
#Retrieving information
#
#Ask for everything:
curs.execute('SELECT * FROM dogs')
#You can get however many results using fetchone(), fetchall() or fetchmany()
curs.execute('SELECT * FROM dogs')
while True:
row = curs.fetchone()
if not row:
break
print(row)
print('*'*20)
curs.execute('SELECT * FROM dogs')
while True:
row = curs.fetchmany(2)
if not row:
break
print(row)
print('*'*20)
#You can make queries as complex/fancy as you want
cmd = 'SELECT name, weight FROM dogs WHERE weight >= 60'
print(cmd)
curs.execute(cmd)
print(curs.fetchall())
#... and order the results
cmd = 'SELECT name, weight FROM dogs WHERE weight >= 60 ORDER BY name'
print(cmd)
curs.execute(cmd)
print_rows()
for row in curs.fetchall():
print(row)
print(curs.fetchall())
#updates
print()
cmd="UPDATE {} SET weight=? WHERE name='Snoopy'".format(tname)
weight=(666,)
curs.execute(cmd, weight)
cmd="SELECT * FROM {} WHERE name='Snoopy'".format(tname)
print(cmd)
curs.execute(cmd)
result=curs.fetchone()
print(result)
#deletions
cmd= "DELETE FROM {} WHERE toy = ? ".format(tname)
toy = ('sock',)
curs.execute(cmd, toy)
cmd = "SELECT * FROM {}".format(tname)
curs.execute(cmd)
print_rows()
cmd= "DELETE FROM {} WHERE toy LIKE ?".format(tname)
toy_selector = ('%el',)
curs.execute(cmd, toy_selector)
cmd = "SELECT * FROM {}".format(tname)
curs.execute(cmd)
print_rows()
cmd= "DELETE FROM {}".format(tname)
curs.execute(cmd)
cmd = "SELECT * FROM {}".format(tname)
curs.execute(cmd)
print_rows()
| 4,049 |
ServerML/heatmap.py
|
SmallPlanetiOS/smallplanet_Pinball
| 13 |
2023600
|
from __future__ import division
from PIL import Image
import numpy as np
from keras.preprocessing.image import load_img, img_to_array, array_to_img
import sys
import train
import model
import images
import imageio
def ExportAnimatedHeatmapForAllImages(outputPath):
images = []
savedTrainingRunNumber = train.trainingRunNumber
maxTrainingRun = train.ConfirmTrainingNumber()
for runNumber in range(0,maxTrainingRun):
images.append(imageio.imread(train.HeatmapPath(runNumber)))
# add a few more at the end so there is a pause before it loops
images.append(imageio.imread(train.HeatmapPath(maxTrainingRun-1)))
images.append(imageio.imread(train.HeatmapPath(maxTrainingRun-1)))
images.append(imageio.imread(train.HeatmapPath(maxTrainingRun-1)))
images.append(imageio.imread(train.HeatmapPath(maxTrainingRun-1)))
imageio.mimsave(outputPath, images, duration=0.5)
train.trainingRunNumber = savedTrainingRunNumber
def ExportHeatmapForModel(runNumber, outputPath):
# 0. Load the base image
baseImg = Image.open('resources/heatmap_base.jpg', 'r')
img_w, img_h = baseImg.size
basePix = baseImg.load()
# 1. Load the ball image
ballImg = Image.open('resources/heatmap_ball.png', 'r')
ball_w, ball_h = ballImg.size
# 2. Create the scratch image
scratchImg = Image.new('RGB', (img_w, img_h), (255, 255, 255, 255))
# 3. Create the heat map
heatmapImg = Image.new('RGB', (img_w//2, img_h), (255, 255, 255, 255))
heatmapPix = heatmapImg.load()
# 4. load the model
cnn_model = model.cnn_model()
cnn_model.load_weights(train.ModelWeightsPath(runNumber+1))
# 5. prepare a numpy img to send to our model
scratchNP = np.zeros((1, img_h, img_w, 3), dtype='float32')
print("Generating heatmap:")
for x in range(0,img_w//2):
sys.stdout.write('.')
sys.stdout.flush()
for y in range(0,img_h):
scratchImg.paste(baseImg, (0,0))
scratchImg.paste(ballImg, (x-ball_w//2,y-ball_h//2), ballImg)
scratchImg.paste(ballImg, (x-ball_w//2 + img_w//2 + 5,y-ball_h//2), ballImg)
np.copyto(scratchNP[0],img_to_array(scratchImg))
predictions = cnn_model.predict(scratchNP)
pred_left = predictions[0][0]
pred_right = predictions[0][1]
#heatmapPix[x,y] = ( int(basePix[x,y][0] * 0.4 + pred_left*153.0), int(basePix[x,y][1] * 0.4 + pred_right*153.0), 0)
heatmapPix[x,y] = (int(pred_left*255.0), int(pred_right*255.0), 0)
print('done')
heatmapImg = heatmapImg.resize( (heatmapImg.size[0]*6,heatmapImg.size[1]*6), Image.ANTIALIAS)
# overlay the run number on the image
r = int(runNumber)
x = heatmapImg.size[0]
while r >= 0:
n = r % 10
r = r // 10
numImg = Image.open('resources/num{}.png'.format(n), 'r')
x -= numImg.size[0]
heatmapImg.paste(numImg, (x,heatmapImg.size[1]-numImg.size[1]),numImg)
heatmapImg.paste(numImg, (x,heatmapImg.size[1]-numImg.size[1]),numImg)
heatmapImg.paste(numImg, (x,heatmapImg.size[1]-numImg.size[1]),numImg)
if r == 0:
break
heatmapImg.save(outputPath)
#maxTrainingRun = train.ConfirmTrainingNumber()
#for i in range(0,maxTrainingRun-1):
# ExportHeatmapForModel(i, 'heatmap_{}.png'.format(i))
| 3,558 |
swagger_server/test/operational_controllers/test_users_with_roles_for_site.py
|
hedleyroos/core-access-control
| 0 |
2023111
|
import json
import uuid
from ge_core_shared import db_actions, decorators
from project.settings import API_KEY_HEADER
from swagger_server.test import BaseTestCase, db_create_entry
from swagger_server.models.domain import Domain
from swagger_server.models.domain_role import DomainRole
from swagger_server.models.role import Role
from swagger_server.models.site import Site
from swagger_server.models.site_create import SiteCreate
from swagger_server.models.site_role import SiteRole
from swagger_server.models.user_domain_role import UserDomainRole
from swagger_server.models.user_site_role import UserSiteRole
ROLES = [
{
"label": ("%s" % uuid.uuid1())[:30],
"description": "Role to view"
},
{
"label": ("%s" % uuid.uuid1())[:30],
"description": "Role to create",
},
{
"label": ("%s" % uuid.uuid1())[:30],
"description": "Role to update"
},
{
"label": ("%s" % uuid.uuid1())[:30],
"description": "Role to delete",
}
]
class TestUsersWithRolesForSite(BaseTestCase):
@decorators.db_exception
def setUp(self):
super().setUp()
# Parent Domain
self.domain_parent_data = {
"name": ("%s" % uuid.uuid1())[:30],
"description": "The Root Domain",
}
self.domain_parent_model = db_actions.crud(
model="Domain",
api_model=Domain,
data=self.domain_parent_data,
action="create"
)
# Child Domain
self.domain_child_data = {
"name": ("%s" % uuid.uuid1())[:30],
"description": "The Child Domain",
"parent_id": self.domain_parent_model.id
}
self.domain_child_model = db_actions.crud(
model="Domain",
api_model=Domain,
data=self.domain_child_data,
action="create"
)
# Site Child
self.site_data = {
"name": ("%s" % uuid.uuid1())[:30],
"domain_id": self.domain_child_model.id,
"description": "A Site",
"client_id": 1,
"is_active": True,
}
self.site = db_create_entry(
model="Site",
data=self.site_data,
)
# Create some roles.
self.roles = []
for role in ROLES:
role_model = db_actions.crud(
model="Role",
api_model=Role,
data=role,
action="create"
)
self.roles.append(role_model)
# Some users as well.
self.user_id_1 = "%s" % uuid.uuid1()
self.user_id_2 = "%s" % uuid.uuid1()
for role in self.roles:
domain_role_data = {
"domain_id": self.domain_parent_model.id,
"role_id": role.id,
"grant_implicitly": "view" in role.description
}
db_actions.crud(
model="DomainRole",
api_model=DomainRole,
data=domain_role_data,
action="create"
)
if not domain_role_data["grant_implicitly"]:
user_domain_role_data = {
"user_id": self.user_id_1,
"domain_id": self.domain_parent_model.id,
"role_id": role.id
}
db_actions.crud(
model="UserDomainRole",
api_model=UserDomainRole,
data=user_domain_role_data,
action="create"
)
domain_role_data = {
"domain_id": self.domain_child_model.id,
"role_id": role.id,
"grant_implicitly": "view" in role.description
}
db_actions.crud(
model="DomainRole",
api_model=DomainRole,
data=domain_role_data,
action="create"
)
if "create" in role.description:
user_domain_role_data = {
"user_id": self.user_id_2,
"domain_id": self.domain_child_model.id,
"role_id": role.id
}
db_actions.crud(
model="UserDomainRole",
api_model=UserDomainRole,
data=user_domain_role_data,
action="create"
)
site_role_data = {
"site_id": self.site.id,
"role_id": role.id,
"grant_implicitly": "view" in role.description
}
db_actions.crud(
model="SiteRole",
api_model=SiteRole,
data=site_role_data,
action="create"
)
if "update" in role.description:
user_site_role_data = {
"user_id": self.user_id_2,
"site_id": self.site.id,
"role_id": role.id
}
db_actions.crud(
model="UserSiteRole",
api_model=UserSiteRole,
data=user_site_role_data,
action="create"
)
self.headers = {API_KEY_HEADER: "test-api-key"}
def test_get_users_with_roles_for_site(self):
"""Test case for get_users_with_roles_for_site
"""
response = self.client.open(
"/api/v1/ops/users_with_roles_for_site/{site_id}".format(
site_id=self.site.id
), method='GET', headers=self.headers)
r_data = json.loads(response.data)
self.assertEquals(len(r_data), 2)
for user in r_data:
self.assertEquals(
len(user["role_ids"]),
3 if user["user_id"] == self.user_id_1 else 2
)
if __name__ == '__main__':
import unittest
unittest.main()
| 5,981 |
6 kyu/Jungersteins Math Training Room 1 How many zeros are at the end of n.py
|
mwk0408/codewars_solutions
| 6 |
2022654
|
def count_zeros_n_double_fact(n):
if n%2!=0:
return 0
multiply=10
total=0
while multiply<n:
total+=n//multiply
multiply*=5
return total
| 180 |
django_private_chat2/admin.py
|
sidarun88/django_private_chat2
| 150 |
2022778
|
# -*- coding: utf-8 -*-
from django.contrib.admin import ModelAdmin, site
from .models import MessageModel, DialogsModel
class MessageModelAdmin(ModelAdmin):
readonly_fields = ('created', 'modified',)
search_fields = ('id', 'text', 'sender__pk', 'recipient__pk')
list_display = ('id', 'sender', 'recipient', 'text', 'file', 'read')
list_display_links = ('id',)
list_filter = ('sender', 'recipient')
date_hierarchy = 'created'
class DialogsModelAdmin(ModelAdmin):
readonly_fields = ('created', 'modified',)
search_fields = ('id', 'user1__pk', 'user2__pk')
list_display = ('id', 'user1', 'user2')
list_display_links = ('id',)
date_hierarchy = 'created'
site.register(DialogsModel, DialogsModelAdmin)
site.register(MessageModel, MessageModelAdmin)
| 796 |
cam-server/camera.py
|
ckauth/swissless
| 0 |
2022875
|
from picamera import PiCamera
from time import sleep
camera = PiCamera()
camera.rotation = 180
camera.start_preview()
for x in xrange(30): #capture for 1 min
sleep(2)
camera.capture('images/image.jpg')
camera.stop_preview()
| 245 |
tools/Vitis-AI-Library/graph_runner/test/yolov4-tiny.py
|
hito0512/Vitis-AI
| 848 |
2023288
|
#
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python3
import hashlib
import xir
import vart
import numpy as np
def md5(np_array):
hash_md5 = hashlib.md5()
hash_md5.update(np_array)
return hash_md5.hexdigest()
g = xir.Graph.deserialize(
'/workspace/aisw/debug_models/tensorflow-yolov4-tiny-master/tensorflow-yolov4-tiny-master/compile/yolov4-tiny/yolov4-tiny.xmodel'
)
# dissection of subgraphs.
subgraphs = g.get_root_subgraph().toposort_child_subgraph()
dpu_subgraph1 = subgraphs[2]
print("dpu_subgraph1 = " + dpu_subgraph1.get_name()
) # must be subgraph_detector/yolo-v4-tiny/Conv/Conv2D
dpu_subgraph2 = subgraphs[4]
print("dpu_subgraph2 = " + dpu_subgraph2.get_name()
) # must be subgraph_detector/yolo-v4-tiny/Conv_3/Conv2D
dpu_subgraph3 = subgraphs[6]
print("dpu_subgraph3 = " + dpu_subgraph3.get_name()
) # must be subgraph_detector/yolo-v4-tiny/Conv_10/Conv2D
dpu_subgraph4 = subgraphs[8]
print("dpu_subgraph4 = " + dpu_subgraph4.get_name()
) # must be subgraph_detector/yolo-v4-tiny/Conv_11/Conv2D
### start to run first DPU subgraph 'subgraph_detector/yolo-v4-tiny/Conv/Conv2D'
input1 = np.fromfile(
'/scratch/models/cache/golden/74/32192dbe8b0cacdf99c2112732324b',
dtype='int8')
print("md5(input1)={}".format(md5(input1))) # 7432192dbe8b0cacdf99c2112732324b
input1 = input1.reshape([1, 416, 416, 3])
output1 = np.zeros(
[1, 104, 104, 64], dtype='int8'
) # it would be better to use fix point, convenient for comparing.
dpu_1 = vart.Runner.create_runner(dpu_subgraph1, "run")
job1 = dpu_1.execute_async([input1], [output1])
dpu_1.wait(job1)
print("md5(output1)={}".format(
md5(output1))) # a47ffd19dbae3b7185f48198e024736a
### start to run second DPU subgraph subgraph_detector/yolo-v4-tiny/Conv_3/Conv2D
### note this subgraph needs two inputs.
# copy is important, otherwise we see error like 'ndarray is not C-contiguous'
input2_0 = output1[:, :, :, 32:64].copy()
print("md5(input2_0)={}".format(
md5(input2_0))) # aa55fc2bfef038563e5a031dbddebee9
input2_1 = output1 # dpu2 need two inputs
output2 = np.zeros(
[1, 52, 52, 128], dtype='int8'
) # it would be better to use fix point, convenient for comparing.
dpu_2 = vart.Runner.create_runner(dpu_subgraph2, "run")
job2 = dpu_2.execute_async([input2_0, input2_1], [output2])
dpu_2.wait(job2)
print("md5(output2)={}".format(
md5(output2))) # 1866755506ebdb54c7f766fd530e1cc3
### start to run 3rd DPU subgraph subgraph_detector/yolo-v4-tiny/Conv_10/Conv2D
### similiar to the second subgraph.
input3_0 = output2[:, :, :, 64:128].copy()
print("md5(input3_0)={}".format(
md5(input3_0))) # 9fe461a5deb61f09210bb4ac415ec8b7
input3_1 = output2 # dpu3 need two inputs
output3 = np.zeros(
[1, 26, 26, 256], dtype='int8'
) # it would be better to use fix point, convenient for comparing.
dpu_3 = vart.Runner.create_runner(dpu_subgraph3, "run")
print("dpu_3.get_input_tensors()={}".format(dpu_3.get_input_tensors()))
# note: the input tensors do not have stable order, we must be careful to match the order of inputs.
job3 = dpu_3.execute_async([input3_1, input3_0], [output3])
dpu_3.wait(job3)
print("md5(output3)={}".format(
md5(output3))) # 4efe5a9bf47ce2bd861632ec1a535b34
### start to run 4th DPU subgraph subgraph_detector/yolo-v4-tiny/Conv_11/Conv2D
input4_0 = output3[:, :, :, 128:256].copy()
print("md5(input4_0)={}".format(
md5(input4_0))) # b4eb64306980a99f951ae2396edc08e4
input4_1 = output3 # dpu3 need two inputs
output4 = np.zeros(
[1, 26, 26, 255], dtype='int8'
) # it would be better to use fix point, convenient for comparing.
dpu_4 = vart.Runner.create_runner(dpu_subgraph4, "run")
print("dpu_4.get_input_tensors()={}".format(dpu_4.get_input_tensors()))
# note: the input tensors do not have stable order, we must be careful to match the order of inputs.
job4 = dpu_4.execute_async([input4_1, input4_0], [output4])
dpu_4.wait(job4)
print("md5(output4)={}".format(
md5(output4))) # 17eb158cbb6c978bb75445c3002998fb
| 4,549 |
google/cloud/osconfig/agentendpoint/v1beta/osconfig-agentendpoint-v1beta-py/google/cloud/osconfig/agentendpoint_v1beta/types/__init__.py
|
googleapis/googleapis-gen
| 7 |
2023993
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .agentendpoint import (
ReceiveTaskNotificationRequest,
ReceiveTaskNotificationResponse,
RegisterAgentRequest,
RegisterAgentResponse,
ReportTaskCompleteRequest,
ReportTaskCompleteResponse,
ReportTaskProgressRequest,
ReportTaskProgressResponse,
StartNextTaskRequest,
StartNextTaskResponse,
)
from .guest_policies import (
AptRepository,
EffectiveGuestPolicy,
GooRepository,
LookupEffectiveGuestPolicyRequest,
Package,
PackageRepository,
SoftwareRecipe,
YumRepository,
ZypperRepository,
DesiredState,
)
from .patch_jobs import (
AptSettings,
ExecStep,
ExecStepConfig,
GcsObject,
GooSettings,
PatchConfig,
RetryStrategy,
WindowsUpdateSettings,
YumSettings,
ZypperSettings,
)
from .tasks import (
ApplyPatchesTask,
ApplyPatchesTaskOutput,
ApplyPatchesTaskProgress,
ExecStepTask,
ExecStepTaskOutput,
ExecStepTaskProgress,
Task,
TaskDirective,
TaskType,
)
__all__ = (
'ReceiveTaskNotificationRequest',
'ReceiveTaskNotificationResponse',
'RegisterAgentRequest',
'RegisterAgentResponse',
'ReportTaskCompleteRequest',
'ReportTaskCompleteResponse',
'ReportTaskProgressRequest',
'ReportTaskProgressResponse',
'StartNextTaskRequest',
'StartNextTaskResponse',
'AptRepository',
'EffectiveGuestPolicy',
'GooRepository',
'LookupEffectiveGuestPolicyRequest',
'Package',
'PackageRepository',
'SoftwareRecipe',
'YumRepository',
'ZypperRepository',
'DesiredState',
'AptSettings',
'ExecStep',
'ExecStepConfig',
'GcsObject',
'GooSettings',
'PatchConfig',
'RetryStrategy',
'WindowsUpdateSettings',
'YumSettings',
'ZypperSettings',
'ApplyPatchesTask',
'ApplyPatchesTaskOutput',
'ApplyPatchesTaskProgress',
'ExecStepTask',
'ExecStepTaskOutput',
'ExecStepTaskProgress',
'Task',
'TaskDirective',
'TaskType',
)
| 2,601 |
ee/clickhouse/sql/sessions/average_per_period.py
|
avoajaugochukwu/posthog
| 7,409 |
2022639
|
AVERAGE_PER_PERIOD_SQL = """
SELECT
AVG(session_duration_seconds) as total,
{interval}(timestamp) as day_start
FROM
({sessions})
GROUP BY
{interval}(timestamp)
"""
| 214 |
metoffice_ec2/message.py
|
tomwhite/metoffice_ec2
| 1 |
2023774
|
import os
from typing import Dict, List
import hashlib
import json
import pandas as pd
import io
import xarray as xr
import boto3
class MetOfficeMessage:
def __init__(self, sqs_message: Dict):
"""
Args:
sqs_message: An AWS Simple Queue Service message.
"""
body_json_string = sqs_message['Body']
_check_md5(body_json_string, sqs_message['MD5OfBody'])
body_dict = json.loads(body_json_string)
self.message = json.loads(body_dict['Message'])
self.sqs_message = sqs_message
def sqs_message_sent_timestamp(self) -> pd.Timestamp:
"""Returns the time the message was sent to the queue."""
attributes = self.sqs_message['Attributes']
sent_timestamp = float(attributes['SentTimestamp']) / 1000
return pd.Timestamp.fromtimestamp(sent_timestamp)
def sqs_approx_receive_count(self) -> int:
"""Returns the approx number of times a message has been received from
the queue but not deleted."""
attributes = self.sqs_message['Attributes']
return int(attributes['ApproximateReceiveCount'])
def is_multi_level(self):
return 'height' in self.message and ' ' in self.message['height']
def is_wanted(
self, nwp_params: List[str], max_receive_count: int=10) -> bool:
"""Returns True if this message describes an NWP we want.
Args:
nwp_params: The Numerical Weather Prediction parameters we want.
max_receive_count: If this message has been received more than
`max_receive_count` times, then we don't want this message.
"""
var_name = self.message['name']
is_multi_level = self.is_multi_level()
approx_receive_count = self.sqs_approx_receive_count()
return (
var_name in nwp_params and
is_multi_level and
approx_receive_count < max_receive_count)
def source_url(self) -> str:
source_bucket = self.message['bucket']
source_key = self.message['key']
return os.path.join(source_bucket, source_key)
def load_netcdf(self) -> xr.Dataset:
boto_s3 = boto3.client('s3')
get_obj_response = boto_s3.get_object(
Bucket=self.message['bucket'],
Key=self.message['key'])
netcdf_bytes = get_obj_response['Body'].read()
netcdf_bytes_io = io.BytesIO(netcdf_bytes)
return xr.open_dataset(netcdf_bytes_io, engine='h5netcdf')
def object_size_mb(self) -> float:
return self.message['object_size'] / 1E6
def __repr__(self) -> str:
string = ''
string += 'var_name={}; '.format(self.message['name'])
string += 'is_multi_level={}; '.format(self.is_multi_level())
string += 'object_size={:,.1f} MB; '.format(self.object_size_mb())
string += 'model={}; '.format(self.message['model'])
string += 'SQS_message_sent_timestamp={}; '.format(
self.sqs_message_sent_timestamp())
string += 'forecast_reference_time={}; '.format(
self.message['forecast_reference_time'])
string += 'created_time={}; '.format(self.message['created_time'])
string += 'time={}; '.format(self.message['time'])
string += 'source_url={}; '.format(self.source_url())
string += 'SQS_approx_receive_count={}; '.format(
self.sqs_approx_receive_count())
string += 'SQS_message_ID={}'.format(self.sqs_message['MessageId'])
return string
def _check_md5(text: str, md5_of_body: str):
text = text.encode('utf-8')
md5 = hashlib.md5(text)
if md5.hexdigest() != md5_of_body:
raise RuntimeError('MD5 checksum does not match!')
| 3,720 |
problem/migrations/0014_auto_20180618_1952.py
|
d9e7381f/onlinejudge-2.0
| 0 |
2024034
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-06-18 11:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('problem', '0013_auto_20180406_1533'),
]
operations = [
migrations.RemoveField(
model_name='problem',
name='vote_downs',
),
migrations.RemoveField(
model_name='problem',
name='vote_ups',
),
migrations.AddField(
model_name='problem',
name='vote_rank_score',
field=models.FloatField(default=0.0),
),
]
| 672 |
torch_inception_resnet_v2/blocks/inception/inception_resnet.py
|
mhconradt/inception-resnet-v2
| 9 |
2023122
|
from torch import nn
from torch_inception_resnet_v2.utils.concurrent import Concurrent
"""
Defines the base of an inception ResNet block.
"""
class InceptionResNetBlock(nn.Module):
def __init__(self, scale, combination: nn.Module, *branches: nn.Module):
super().__init__()
self.scale = scale
self.combination = combination
self.branches = Concurrent()
for i, branch in enumerate(branches):
self.branches.append(branch)
self.activation = nn.ReLU(inplace=True)
def forward(self, x):
output = self.branches(x)
output = self.combination(output)
output = self.scale * output + x
return self.activation(output)
| 709 |
app/migrations/0007_auto_20201116_0507.py
|
michael-huber2772/portfolio-dashboard
| 0 |
2023302
|
# Generated by Django 3.1.3 on 2020-11-16 12:07
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0006_auto_20201031_1437'),
]
operations = [
migrations.CreateModel(
name='MTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
],
),
migrations.AlterField(
model_name='productprice',
name='start_date',
field=models.DateTimeField(default=datetime.datetime(2020, 11, 16, 5, 7, 53, 23971), null=True),
),
migrations.CreateModel(
name='RawMaterial',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('tag', models.ManyToManyField(to='app.MTag')),
],
),
migrations.AddField(
model_name='product',
name='raw_material',
field=models.ManyToManyField(to='app.RawMaterial'),
),
]
| 1,266 |
Solutions/Problem12.py
|
sausage948/AoC2017
| 0 |
2023911
|
import re
# ------Input----- #
answer1 = 0
answer2 = 0
print("Input the adjacency list. Calculation will start on the first empty line.")
inputList = []
while True:
inputString = input("")
if inputString == "":
break
inputList.append(inputString)
def parseString(string):
splitUp = re.split(' <-> |, ', string)
neighborsList = []
for n in splitUp[1:]:
if n != '':
neighborsList.append(int(n))
return [int(splitUp[0]), neighborsList, False]
adjacencyList = list(map(parseString, inputList))
numberOfNodes = len(adjacencyList)
numberOfDiscoveredNodes = 0
numberOfGroups = 0
# ------Parts 1 & 2------ #
def DFS(vertex):
adjacencyList[vertex][2] = True
for neighbor in adjacencyList[vertex][1]:
if not wasDiscovered(neighbor):
DFS(neighbor)
def wasDiscovered(vertex):
return adjacencyList[vertex][2]
while numberOfDiscoveredNodes < numberOfNodes:
indexFirstUndiscovered = list(map(wasDiscovered, range(numberOfNodes))).index(0)
DFS(indexFirstUndiscovered)
numberOfDiscoveredNodes = sum(list(map(wasDiscovered, range(numberOfNodes))))
numberOfGroups += 1
if indexFirstUndiscovered == 0:
answer1 = numberOfDiscoveredNodes
answer2 = numberOfGroups
# ------Output----- #
print("Answer 1: " + str(answer1))
print("Answer 2: " + str(answer2))
| 1,365 |
tests/speculos/test_status_word.py
|
aido/app-sskr
| 3 |
2022839
|
from pathlib import Path
from typing import List, Dict, Any, Tuple
import re
from sskr_client.exception import DeviceException
SW_RE = re.compile(r"""(?x)
\# # character '#'
define # string 'define'
\s+ # spaces
(?P<identifier>SW(?:_[A-Z0-9]+)*) # identifier (e.g. 'SW_OK')
\s+ # spaces
0x(?P<sw>[a-fA-F0-9]{4}) # 4 bytes status word
""")
def parse_sw(path: Path) -> List[Tuple[str, int]]:
if not path.is_file():
raise FileNotFoundError(f"Can't find file: '{path}'")
sw_h: str = path.read_text()
return [(identifier, int(sw, base=16))
for identifier, sw in SW_RE.findall(sw_h) if sw != "9000"]
def test_status_word(sw_h_path):
expected_status_words: List[Tuple[str, int]] = parse_sw(sw_h_path)
status_words: Dict[int, Any] = DeviceException.exc
assert len(expected_status_words) == len(status_words), (
f"{expected_status_words} doesn't match {status_words}")
# just keep status words
expected_status_words = [sw for (identifier, sw) in expected_status_words]
for sw in status_words.keys():
assert sw in expected_status_words, f"{status_words[sw]}({hex(sw)}) not found in sw.h!"
| 1,320 |
camtfpc.py
|
MikeHallettUK/RosRobotics
| 0 |
2022707
|
#!/usr/bin/env python3
# tf_pc_cam subscribes to PC2 from RealSense camera = and transforms it to frame odom /points2
import rospy
import tf2_ros
from tf2_sensor_msgs.tf2_sensor_msgs import PointCloud2, do_transform_cloud # to support PointCloud2
rospy.init_node("tf_pc_cam")
tf_pub = rospy.Publisher("points2", PointCloud2, queue_size=10)
tf_buffer = tf2_ros.Buffer(cache_time=rospy.Duration(2))
tf_listener = tf2_ros.TransformListener(tf_buffer)
rospy.sleep(0.2) # let tf_buffer fill up a bit ...
def pc_cb(msg):
cantran = tf_buffer.can_transform("odom", msg.header.frame_id,
msg.header.stamp,
rospy.Duration(0.1))
if cantran:
trans = tf_buffer.lookup_transform("odom", msg.header.frame_id,
msg.header.stamp,
rospy.Duration(0.1))
cloud_out = do_transform_cloud(msg, trans)
tf_pub.publish(cloud_out)
print("Starting do_transform_cloud from /camera/depth/color/points v1")
rospy.Subscriber("/camera/depth/color/points", PointCloud2, pc_cb, queue_size=1, buff_size=2**24)
rospy.spin()
| 1,192 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.