text stringlengths 38 1.54M |
|---|
import cv2
import numpy as np
img= cv2.imread("./input/rc-1.png")
hsv=cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
original=img.copy()
def empty(a):
pass
def remove_bad_contours(conts):
new_conts = []
for cont in conts:
bound_rect = cv2.minAreaRect(cont)
length, breadth = float(bound_rect[1][0]), float(bound_rect[1][1])
try:
if max((length/breadth, breadth/length)) > 5:
continue
if not 0.9*img.shape[0] > max((length, breadth)) > 0.05*img.shape[0]:
continue
if cv2.contourArea(cont)/(length*breadth) <0.4:
continue
new_conts.append(cont)
except ZeroDivisionError:
continue
return new_conts
def colorDetection(color,image):
colordict={"Red":[[0, 50, 70],[9, 255, 255],[159, 50, 70],[180, 255, 255],"R"],"Blue":[[90, 50, 70],[128, 255, 255],"B"],"Green":[[36, 50, 70],[89, 255, 255],"G"],"White":[[0, 0, 231],[180, 18, 255],"W"],"Orange":[[10, 50, 70],[24,255,255],"O"],"Yellow":[[ 25, 50,70],[35,255,255],"Y"]}
if color in colordict:
if color=="Red":
lower_l=np.array(colordict[color][0])
upper_l=np.array(colordict[color][1])
lower_u=np.array(colordict[color][2])
upper_u=np.array(colordict[color][3])
Mask_l=cv2.inRange(image,lower_l,upper_l)
Mask_u=cv2.inRange(image,lower_u,upper_u)
Mask=Mask_l+Mask_u
else:
lower=np.array(colordict[color][0])
upper=np.array(colordict[color][1])
Mask=cv2.inRange(image,lower,upper)
kernel=np.ones((2,2),np.uint8)
img_Erode=cv2.erode(Mask, kernel,iterations=2)
img_dilate=cv2.dilate(img_Erode, kernel,iterations=1)
# Find Canny edges
edged = cv2.Canny(img_Erode, 20, 200)
# Find contours and print how many were found
contours, _ = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
newContor=remove_bad_contours(contours)
for c in newContor:
cv2.drawContours(img, [c], -1, (0,0,255), 3)
M = cv2.moments(c)
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
cv2.circle(img,(cx,cy), 5, (0,0,255), -1)
name_col=colordict[color][-1]
cv2.putText(img, name_col, (cx, cy), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
return img_dilate
return "Error"
nullarray=np.zeros([img.shape[0],img.shape[1],3])
nullarray2=np.zeros([img.shape[0],img.shape[1],3])
nullarray.fill(255)
# ----------- Blue Color Mask Creation ----------------
BlueMask=colorDetection("Blue", hsv)
# ----------- Red Color Mask Creation ----------------
RedMask=colorDetection("Red", hsv)
# ----------- Orange Color Mask Creation ----------------
OrangeMask=colorDetection("Orange", hsv)
# ----------- White Color Mask Creation ----------------
WhiteMask=colorDetection("White",hsv)
# ----------- Green Color Mask Creation ----------------
GreenMask=colorDetection("Green", hsv)
# ----------- Yellow Color Mask Creation ----------------
YellowMask=colorDetection("Yellow", hsv)
# Adding ALl Masks In ONE
result=RedMask | GreenMask | BlueMask | WhiteMask | YellowMask | OrangeMask
cv2.imshow("Original",original)
MaskedImg=cv2.bitwise_and(img,img, mask=result)
cv2.imshow("Image",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
from flask import Flask
from .middlewares import after_request_middleware, before_request_middleware, teardown_appcontext_middleware
from .middlewares import response
from .controllers import register_modules
from app.config import DB, App
def create_app():
# initialize flask application
application = Flask(__name__)
# load_config()
# register all blueprints
application = register_modules(application)
# register custom response class
application.response_class = response.JSONResponse
# register before request middleware
before_request_middleware(app=application)
# register after request middleware
after_request_middleware(app=application)
# register after app context teardown middleware
teardown_appcontext_middleware(app=application)
# register custom error handler
response.json_error_handler(app=application)
# initialize the database
if App.init_db == 'True':
from .common.database import init_db
init_db(application)
return application
|
from random import random
from time import sleep
from time import perf_counter
def cached_property(method):
"""decorator used to cache expensive object attribute lookup"""
prop_name = '_{}'.format(method.__name__)
def wrapped_func(self, *args, **kwargs):
# print(self)
if not hasattr(self, prop_name):
setattr(self, prop_name, method(self, *args, **kwargs))
return getattr(self, prop_name)
return property(wrapped_func)
# return prop_name
class Planet:
"""the nicest little orb this side of Orion's Belt"""
GRAVITY_CONSTANT = 42
TEMPORAL_SHIFT = 0.12345
SOLAR_MASS_UNITS = 'M\N{SUN}'
def __init__(self, color):
self.color = color
# self._mass = None
def __repr__(self):
return f'{self.__class__.__name__}({repr(self.color)})'
@cached_property
def mass(self):
print('setting mass')
scale_factor = random()
sleep(self.TEMPORAL_SHIFT)
self._mass = (f'{round(scale_factor * self.GRAVITY_CONSTANT, 4)} '
f'{self.SOLAR_MASS_UNITS}')
return self._mass
# @mass.setter
# def mass(self, value):
# self._mass = value
@cached_property
def something(self):
print('accessing')
return 981978
# def main():
# print('here ...')
# blue = Planet('blue')
# # print(blue.something)
# # print(blue.something)
# # print(blue.mass)
# # print(blue.mass)
# # print(blue.something)
# start_time = perf_counter()
# for _ in range(5):
# blue.mass
# # print(blue.mass)
# end_time = perf_counter()
# elapsed_time = end_time - start_time
# # print(elapsed_time)
# assert elapsed_time < .5
# # print(blue.something)
# # print(blue.something)
# masses = [blue.mass for _ in range(10)]
# initial_mass = masses[0]
# assert all(m == initial_mass for m in masses)
# blue.mass = 11
# if __name__ == '__main__':
# main()
|
from socket import *
import time
from threading import *
import struct
import colorama
import scapy.all
class Server:
def __init__(self):
'''constructor for the server that initalize the the
data structures for the game'''
self.clients = []
self.group1 = {}
self.score1 = 0
self.score2 = 0
self.group2 = {}
ips = ["172.1.0.33","172.99.0.33",scapy.all.get_if_addr(scapy.all.conf.iface)]
for i in range(len(ips)):
print(str(i+1) + " " + ips[i])
n = input("enter your ip: ")
while n != '1' and n != '2' and n != '3':
n = input("enter your ip: ")
self.my_ip = ips[int(n) - 1]
colorama.init()
print(f'{colorama.Fore.GREEN}Server started,listening on IP address ' + self.my_ip)
def spread_the_message(self):
'''method for broadcasting offers to join the game
using udp packets'''
dest_port = 13117
source_port = 12000
cookie = 0xfeedbeef
offer = 0x2
port_hexa = 0x2ee1
broadcast_ip = ""
if self.my_ip.startswith("172.1"):
broadcast_ip = "172.1.255.255"
elif self.my_ip.startswith("172.99"):
broadcast_ip = "172.99.255.255"
else:
broadcast_ip = "255.255.255.255"
udp_socket = socket(AF_INET, SOCK_DGRAM)
udp_socket.bind((self.my_ip, source_port))
udp_socket.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
t_end = time.time() + 10
message = struct.pack('Ibh',cookie, offer, port_hexa)
while time.time() < t_end:
udp_socket.sendto(message, (broadcast_ip, dest_port))
time.sleep(1)
udp_socket.close()
def accept_clients(self, tcp_socket):
'''method for accepting clients that recived the
offer for join the game'''
t_end = time.time() + 10
while time.time() < t_end:
try:
connection, addr = tcp_socket.accept()
self.add_new_client(connection, addr)
except:
continue
def add_new_client(self, client, addr):
'''adding new client for the game'''
client.settimeout(10)
name = client.recv(1024)
name = name.decode(encoding='utf-8')
self.clients.append([name, client, addr])
def communicate_with_client(self, client):
'''method for communicate with the clients during the game,
send messages to the client about the game and recives the pressed keys
from the clients and count them for their group during the game'''
mutex = Lock()
respond = f'{colorama.Fore.LIGHTMAGENTA_EX}Welcome to Keyboard Spamming Battle Royale.\n'
respond += "Group 1:\n==\n"
for i in self.group1:
respond += self.group1[i]
respond += "Group 2:\n==\n"
for i in self.group2:
respond += self.group2[i]
respond += "\nStart pressing keys on your keyboard as fast as you can!!\n"
try:
client.send(str.encode(respond))
except:
print(f'{colorama.Fore.RED}connection lost')
return
start = time.time()
while time.time() < start + 10:
try:
msg = client.recv(1024).decode(encoding='utf-8')
if msg is not None:
if client in self.group1:
mutex.acquire()
self.score1 += 1
mutex.release()
if client in self.group2:
mutex.acquire()
self.score2 += 1
mutex.release()
except:
return
def server_main_func(self):
'''method that manage the server, using all the functions above
and arrange the game groups and starts the threads for wach client'''
dest_port = 12001
flag = False
clients = []
tcp_socket = socket(AF_INET, SOCK_STREAM)
tcp_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
tcp_socket.bind((self.my_ip, dest_port))
tcp_socket.listen(100)
tcp_socket.settimeout(1)
while not flag:
t1 = Timer(0.1, self.spread_the_message)
t2 = Timer(0.1, self.accept_clients, args=(tcp_socket,))
t1.start()
t2.start()
t1.join()
t2.join()
if len(self.clients) > 0:
flag = True
tcp_socket.close()
tcp_socket = socket(AF_INET, SOCK_STREAM)
tcp_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
tcp_socket.bind((self.my_ip, dest_port))
tcp_socket.listen(100)
tcp_socket.settimeout(1)
for c in range(len(self.clients)):
if c % 2 == 0:
self.group1[self.clients[c][1]] = self.clients[c][0]
else:
self.group2[self.clients[c][1]] = self.clients[c][0]
for i in self.clients:
clients.append(Timer(0.1, self.communicate_with_client, args=(i[1],)))
for i in clients:
i.start()
for i in clients:
i.join()
message = "Game Over!\n"
winners = ""
message += "Group 1 typed in " + str(self.score1) + " characters. Group 2 typed in " + str(self.score2) +\
" characters.\n"
if self.score1 > self.score2:
message += "Group 1 wins!\n\n"
for i in self.group1:
winners += self.group1[i]
if self.score1 < self.score2:
message += "Group 2 wins!\n\n"
for i in self.group2:
winners += self.group2[i]
message += "Congratulations to the winners:\n"
message += "==\n"
message += winners
for i in self.clients:
try:
i[1].send(str.encode(message))
except:
print("client is not available")
tcp_socket.close()
print("Game over, sending out offer requests...")
self.reset()
def reset(self):
'''reset the class field after a game over'''
self.clients.clear()
self.group1.clear()
self.group2.clear()
self.score1 = 0
self.score2 = 0
def run_server(server):
'''driver code for the server'''
while True:
server.server_main_func()
time.sleep(1)
server = Server()
run_server(server)
|
#
# @lc app=leetcode id=523 lang=python3
#
# [523] Continuous Subarray Sum
#
# https://leetcode.com/problems/continuous-subarray-sum/description/
#
# algorithms
# Medium (24.24%)
# Likes: 1107
# Dislikes: 1559
# Total Accepted: 110K
# Total Submissions: 449.6K
# Testcase Example: '[23,2,4,6,7]\n6'
#
# Given a list of non-negative numbers and a target integer k, write a function
# to check if the array has a continuous subarray of size at least 2 that sums
# up to a multiple of k, that is, sums up to n*k where n is also an
# integer.
#
#
#
# Example 1:
#
#
# Input: [23, 2, 4, 6, 7], k=6
# Output: True
# Explanation: Because [2, 4] is a continuous subarray of size 2 and sums up to
# 6.
#
#
# Example 2:
#
#
# Input: [23, 2, 6, 4, 7], k=6
# Output: True
# Explanation: Because [23, 2, 6, 4, 7] is an continuous subarray of size 5 and
# sums up to 42.
#
#
#
#
# Note:
#
#
# The length of the array won't exceed 10,000.
# You may assume the sum of all the numbers is in the range of a signed 32-bit
# integer.
#
#
#
# @lc code=start
#TAGS variant on 2-sum
#
# Several tricks
# 1. reduce to two-sum with partial sums and mod k
# 2. corner case % k
# 3. sub array length >= 2, imply to maintain cur and cur_prev
# see 974 for a simpler variant
class Solution:
def checkSubarraySum(self, nums: List[int], k: int) -> bool:
n = len(nums)
prev = {0}
cur_prev = 0
cur = nums[0] % k if k else nums[0]
for i in range(1, n):
tmp = cur + nums[i]
cur_prev, cur = cur, (tmp % k if k else tmp)
if cur in prev:
return True
prev.add(cur_prev)
return False
# @lc code=end
|
import sys, os, Queue
import cPickle as pickle
import numpy as np
from os.path import join as pathjoin
import pixel_reg.doExtract as doExtract
"""
Yolo bad target extract paths:
yolo_s2_074/yolo_s2_074-020.png
yolo_s2_074/yolo_s2_074-044.png
yolo_s3_086/yolo_s3_086-032.png
yolo_s2_074/yolo_s2_074-083.png
yolo_s2_074/yolo_s2_074-007.png
yolo_s2_074/yolo_s2_074-027.png
yolo_s2_074/yolo_s2_074-012.png
yolo_s3_086/yolo_s3_086-035.png
yolo_s3_071/yolo_s3_071-099.png
yolo_s2_074/yolo_s2_074-046.png
yolo_s4_006/yolo_s4_006-219.png
Only the state senator contest targets were misaligned
Usage:
To reproduce Yolo bad target extraction:
python do_single_textract.py /home/arya/opencount/opencount/projects/Yolo_2012 \
/media/data1/audits2012_straight/yolo/votedballots/yolo_s2_074/yolo_s2_074-020.png bad_out
Or, as a super-simple shortcut, the following is equivalent:
python do_single_textract.py bad_out
This will do target extraction on yolo_s2_074-020.png, and dump it to bad_out/.
"""
def isimgext(f):
return os.path.splitext(f)[1].lower() in ('.jpg', '.png', '.jpeg')
def main():
args = sys.argv[1:]
if len(args) == 1:
outdir = args[0]
projdir = '/home/arya/opencount/opencount/projects/Yolo_2012'
votedpath = '/media/data1/audits2012_straight/yolo/votedballots/yolo_s2_074/yolo_s2_074-020.png'
else:
projdir = args[0]
votedpath = args[1]
if isimgext(votedpath):
imgpaths = [votedpath]
else:
imgpaths = []
for dirpath, dirnames, filenames in os.walk(votedpath):
for imgname in [f for f in filenames if isimgext(f)]:
imgpaths.append(os.path.join(dirpath, imgname))
outdir = args[2]
t_imgs = pathjoin(outdir, 'extracted')
t_diff = pathjoin(outdir, 'extracted_diff')
t_meta = pathjoin(outdir, 'extracted_metadata')
b_meta = pathjoin(outdir, 'ballot_metadata')
try: os.makedirs(t_imgs)
except: pass
try: os.makedirs(t_diff)
except: pass
try: os.makedirs(t_meta)
except: pass
try: os.makedirs(b_meta)
except: pass
bal2group = pickle.load(open(pathjoin(projdir, 'ballot_to_group.p'), 'rb'))
group2bals = pickle.load(open(pathjoin(projdir, 'group_to_ballots.p'), 'rb'))
b2imgs = pickle.load(open(pathjoin(projdir, 'ballot_to_images.p'), 'rb'))
img2b = pickle.load(open(pathjoin(projdir, 'image_to_ballot.p'), 'rb'))
img2page = pickle.load(open(pathjoin(projdir, 'image_to_page.p'), 'rb'))
img2flip = pickle.load(open(pathjoin(projdir, 'image_to_flip.p'), 'rb'))
target_locs_map = pickle.load(open(pathjoin(projdir, 'target_locs_map.p'), 'rb'))
group_exmpls = pickle.load(open(pathjoin(projdir, 'group_exmpls.p'), 'rb'))
proj = pickle.load(open(pathjoin(projdir, 'proj.p'), 'rb'))
voteddir_root = proj.voteddir
# 0.) Set up job
jobs = []
def get_bbs(groupID, target_locs_map):
bbs_sides = []
boxes_sides = target_locs_map[groupID]
for side, contests in sorted(boxes_sides.iteritems(), key=lambda t: t[0]):
bbs = np.empty((0, 5))
for contest in contests:
cbox, tboxes = contest[0], contest[1:]
for tbox in tboxes:
# TODO: Temporary hack to re-run target extract
# on SantaCruz, without re-doing SelectTargets
x1 = tbox[0] + 33
y1 = tbox[1]
x2 = tbox[0] + tbox[2] - 23
y2 = tbox[1] + tbox[3]
id = tbox[4]
bb = np.array([y1, y2, x1, x2, id])
bbs = np.vstack((bbs, bb))
bbs_sides.append(bbs)
return bbs_sides
for votedpath in imgpaths:
ballotid = img2b[votedpath]
groupID = bal2group[ballotid]
bbs = get_bbs(groupID, target_locs_map)
# 1.a.) Create 'blank ballots'. This might not work so well...
exmpl_id = group_exmpls[groupID][0]
blankpaths = b2imgs[exmpl_id]
blankpaths_ordered = sorted(blankpaths, key=lambda imP: img2page[imP])
blankpaths_flips = [img2flip[blank_imP] for blank_imP in blankpaths_ordered]
imgpaths = b2imgs[ballotid]
imgpaths_ordered = sorted(imgpaths, key=lambda imP: img2page[imP])
imgpaths_flips = [img2flip[imP] for imP in imgpaths_ordered]
job = [blankpaths_ordered, blankpaths_flips, bbs, imgpaths_ordered, imgpaths_flips,
t_imgs, t_diff, t_meta, b_meta, voteddir_root, Queue.Queue(), Queue.Queue()]
jobs.append(job)
'''
res = doExtract.convertImagesSingleMAP(bal2imgs, tpl2imgs, bal2tpl, img2bal,
csvPattern,
t_imgs, t_meta, b_meta,
pathjoin(projdir, 'quarantined.csv'),
lambda: False,
None)
'''
for job in jobs:
doExtract.convertImagesWorkerMAP(job)
if __name__ == '__main__':
main()
|
from sys import argv
script, user_name = argv
prompt = '> '
print "Hi %s" %user_name
print "I'd like to ask you some questions."
print "Do you like me?"
likes = raw_input(prompt)
print "Where do you live %s?" %(user_name)
lives = raw_input(prompt)
print "What kind of computer do you have %s?" %(user_name)
computer = raw_input(prompt)
print """\n\nHi %s so you said you %s like me.
You live in %s. I have no idea where that is.
And you have a %s computer.""" %(user_name, likes, lives, computer)
|
import argparse # Needs python2.7+
def check_positive(value):
"Check if a variable entered to argparse is positive"
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError("%s is an invalid positive int value" % value)
return ivalue
parser = argparse.ArgumentParser(description='Run k-mer associations and then GEMMA')
# Files:
# phenotype file
parser.add_argument("--pheno", dest = "fn_phenotype", type=str, required=True,
help='phenotype file name Format:sample name[TAB]phenotype val[NEW-LINE]...)')
# output directory
parser.add_argument("--outdir", dest = "outdir", type=str, required=True,
help='Directory to output pipeline results (shouldnt exist)')
# out names
parser.add_argument("-o", "--out", dest = "name", type=str, default="results",
help='base name for all output files')
# k-mers presence/absence table
parser.add_argument("--kmers_table", dest = "kmers_table", type=str, required=True,
help='Base for presence/absence table and accessions list')
# Parallel
parser.add_argument("-p", "--parallel", dest = "parallel", default=1, type=check_positive,
help='Maximal number of threads to use')
# k-mer length
parser.add_argument("-l", "--kmer_len", dest = "kmers_len", type=int,choices=range(15,32), metavar="[15-31]",
help='Length of K-mers in the database table')
# number of k-mers to take
parser.add_argument("-k", "--kmers_number", dest = "n_kmers", type=int, default=100001,
help='Numbers of k-mers to filter from first step (due to bug in GEMMA 0.98 number shouldnt be a multiplication of 20K)')
# number of snps to take (if a two step method is used)
parser.add_argument("--snps_number", dest = "n_snps", type=int, default=10001,
help='Numbers of snps to filter from first step (used only if there using a two step snps approximation)')
# Number of permutation
parser.add_argument("--permutations", dest = "n_permutations", type=int, default=0,
help='number of permutation for permutation test')
# Use kinship matrix from the kmers table
parser.add_argument("--kinship_kmers", dest = "use_kinship_from_kmers",
help="Use the kinship matrix from kmers_table", action="store_true")
# Run SNPs associations in ONE step - only run GEMMA
parser.add_argument("--run_on_kmers", dest = "run_kmers", help="run pipeline on k-mers",
action="store_true")
# Run SNPs associations in ONE step - only run GEMMA
parser.add_argument("--run_on_snps_one_step", dest = "run_one_step_snps", help="run pipeline with the same parameters on SNPs",
action="store_true")
# RUN SNPs association in TWO steps - for permutations, first filter likley snps and then run GEMMA on them
parser.add_argument("--run_on_snps_two_steps", dest = "run_two_steps_snps",
help="run pipeline with the same parameters on SNPs - first filtering using GRAMMAR-Gamma and then using GEMMA for the top ones",
action="store_true")
### Percent of missing values of SNPs to tolerate
##parser.add_argument("--miss_gemma", dest = "miss_gemma", type=float, default=0.5,
## help='Tolerance for missing values in SNPs table')
## MAF (for k-mers and also for SNPs if used)
parser.add_argument("--maf", dest = "maf", type=float, default=0.05,
help='Minor allele frequency')
## MAC (for k-mers and also for SNPs if used)
parser.add_argument("--mac", dest = "mac", type=float, default=5,
help='Minor allele count')
## Min data poinrt
parser.add_argument("--min_data_points", dest = "min_data_points", type=float, default=30,
help='Stop running if there is less data points than this threshold')
# SNP files (bed/bim/fam)
parser.add_argument("--snp_matrix", dest = "snps_matrix", type=str,
default = "/ebio/abt6/yvoichek/1001G_1001T_comparison/code/k_mer_clusters/ArticlePhenotypes/1001G_SNPs_info/1001genomes_snp",
help='base name for snps bed/bim/fam files')
# Control the verbosity of the program
parser.add_argument("-v", "--verbose", dest = "verbose", help="increase output verbosity",
action="store_true")
## count patterns of presence absence
parser.add_argument("--pattern_counter", dest = "kmers_pattern_counter",
help="Count the number of presence absence patterns k-mers pattern (has a large RAM usage)",
action="store_true")
## an option specifying if to calculate or not a a qq plot for the k-mers (default is yes)
parser.add_argument("--no_qq_plot", dest = "qq_plot",
help="Don't calculate a qq plot (less computations)", action="store_false")
## Keep the intermediate files
parser.add_argument("--dont_remove_intermediates", dest = "remove_intermediate",
help="Mostly for debugging, to keep the intermediate files", action="store_false")
# path for GEMMA
parser.add_argument("--gemma_path", dest = "gemma_path", type=str,
default = "/ebio/abt6/yvoichek/smallproj/prefix/bin/gemma",
help='path to GEMMA')
args = parser.parse_args()
|
import numpy as np
import os
os.chdir('C:/Users/DELL/Desktop/Quant_macro/Pset4/hand')
import matplotlib.pyplot as plt
import Rep_agent_labor2 as ral
#The basis functions are in the class as self.func
###parameters
para = {}
para['theta'] = 0.679
para['beta'] = 0.988
para['delta'] = 0.013
para['kappa'] = 5.24
para['nu'] = 2
para['h'] = 1
kss = (((1-para['theta'])*para['beta'])/(1-para['beta']*(1-para['delta'])))**(1/para['theta'])
n = 200
kmax = kss
kmin = 0.01*kss
hmax = 1
hmin = 0
gridk = np.linspace(kmin, kmax, n)
cheby = ral.rep_ag(para['theta'], para['beta'], para['delta'], para['kappa'], para['nu'], kmin, kmax, hmin, hmax)
New_opt, Theta = cheby.problem()
cheby.Val_pol_fun()
Vg = cheby.V(gridk)
gc = cheby.gc(gridk)
gh = cheby.gh(gridk)
f2, (ax3, ax4, ax5) = plt.subplots(1,3)
f2.set_figheight(5)
f2.set_figwidth(10)
ax3.plot(gridk, Vg, 'b', label='Value')
ax3.legend(loc = 'upper right')
ax3.set_xlabel('k')
ax3.set_ylabel('Level')
ax3.set_title('Value Function')
ax4.plot(gridk, gc, 'b', label='Consumption')
ax4.legend(loc = 'upper right')
ax4.set_xlabel('k')
ax4.set_ylabel('Level')
ax4.set_title('Policy Labor Consumption')
ax5.plot(gridk, gh, 'b', label='Labor Supply')
ax5.legend(loc = 'upper right')
ax5.set_xlabel('k')
ax5.set_ylabel('Level')
ax5.set_title('Policy Labor Supply')
|
#! /usr/bin/python
import util
largest_num = 0
i=200000000
while(True):
i += 1
for j in range(1, 21):
if not i % j == 0:
break
if j == 20:
largest_num = i
if largest_num > 0:
break
print(largest_num)
|
Arrow1 = Arrow()
my_view1 = GetRenderView()
AnimationScene1 = GetAnimationScene()
my_view0 = GetRenderViews()[1]
RenderView2 = CreateRenderView()
RenderView2.CompressorConfig = 'vtkSquirtCompressor 0 3'
RenderView2.UseLight = 1
RenderView2.LightSwitch = 0
RenderView2.RemoteRenderThreshold = 3.0
RenderView2.LODThreshold = 5.0
RenderView2.ViewTime = 2.0
RenderView2.LODResolution = 50.0
RenderView2.Background = [0.31999694819562063, 0.3400015259021897, 0.4299992370489052]
AnimationScene1.ViewModules = [ my_view0, my_view1, RenderView2 ]
DataRepresentation2 = Show()
DataRepresentation2.ScaleFactor = 0.1
DataRepresentation2.EdgeColor = [0.0, 0.0, 0.5000076295109483]
Delete(my_view1)
LegacyVTKReader1 = FindSource("LegacyVTKReader1")
a1_volume_scalars_PVLookupTable = GetLookupTableForArray( "volume_scalars", 1, RGBPoints=[-0.1073455885052681, 0.23, 0.299, 0.754, 0.16080981492996216, 0.706, 0.016, 0.15] )
SetActiveSource(LegacyVTKReader1)
DataRepresentation3 = Show()
DataRepresentation3.EdgeColor = [0.0, 0.0, 0.5000076295109483]
DataRepresentation3.SelectionPointFieldDataArrayName = 'volume_scalars'
DataRepresentation3.ScalarOpacityFunction = []
DataRepresentation3.ColorArrayName = 'volume_scalars'
DataRepresentation3.ScalarOpacityUnitDistance = 3.5086941684454556
DataRepresentation3.LookupTable = a1_volume_scalars_PVLookupTable
DataRepresentation3.Representation = 'Slice'
DataRepresentation3.ScaleFactor = 1.5
AnimationScene1.ViewModules = [ my_view0, RenderView2 ]
RenderView2.CameraPosition = [0.5, 0.0, 1.9983228879559547]
RenderView2.CameraClippingRange = [1.8060006068164731, 2.245237091029618]
RenderView2.CameraFocalPoint = [0.5, 0.0, 0.0]
RenderView2.CameraParallelScale = 0.5172040216672718
RenderView2.CenterOfRotation = [0.5, 0.0, 0.0]
Arrow1.TipResolution = 128
Arrow1.ShaftResolution = 128
RenderView2.CameraPosition = [7.5, 3.449999999254942, 32.06175407727596]
RenderView2.CameraFocalPoint = [7.5, 3.449999999254942, 0.0]
RenderView2.CameraClippingRange = [30.707102222943664, 33.84431654696104]
RenderView2.CenterOfRotation = [7.5, 3.449999999254942, 0.0]
RenderView2.CameraParallelScale = 8.298192574592415
DataRepresentation2.Origin = [-0.8, 0.2, 0.0]
DataRepresentation2.Scale = [6.0, 6.0, 6.0]
DataRepresentation2.NonlinearSubdivisionLevel = 0
Arrow2 = Arrow()
RenderView2.CameraClippingRange = [30.54713651871121, 34.045680410831544]
Arrow2.TipResolution = 128
Arrow2.ShaftResolution = 128
DataRepresentation4 = Show()
DataRepresentation4.ScaleFactor = 0.1
DataRepresentation4.EdgeColor = [0.0, 0.0, 0.5000076295109483]
Text1 = Text()
DataRepresentation4.Origin = [1.0, 0.3, 0.0]
DataRepresentation4.Scale = [6.0, 6.0, 6.0]
DataRepresentation4.NonlinearSubdivisionLevel = 0
DataRepresentation4.Orientation = [0.0, 0.0, 270.0]
Text1.Text = 'Strike 0\xb0'
Text2 = Text()
SetActiveSource(Text1)
DataRepresentation5 = Show()
DataRepresentation5.FontSize = 12
DataRepresentation5.Position = [0.45, 0.21]
RenderView2.CameraPosition = [6.799999995529651, 2.6999999955296516, 35.8492540776365]
RenderView2.CameraClippingRange = [34.29676151906815, 37.889992911197496]
RenderView2.CameraFocalPoint = [6.799999995529651, 2.6999999955296516, 0.0]
RenderView2.CameraParallelScale = 9.278469708011528
RenderView2.CenterOfRotation = [6.799999995529651, 2.6999999955296516, 0.0]
Text2.Text = 'Dip\n 0\xb0'
SetActiveSource(Text2)
DataRepresentation6 = Show()
DataRepresentation6.FontSize = 12
DataRepresentation6.Position = [0.34, 0.5]
Text2.Text = 'Dip\n90\xb0'
Render()
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2020/5/7 20:43
# @author : Mo
# @function: FastText [Bag of Tricks for Efficient Text Classification](https://arxiv.org/abs/1607.01759)
from macadam.base.graph import graph
from macadam import K, L, M, O
class FastTextGraph(graph):
def __init__(self, hyper_parameters):
super().__init__(hyper_parameters)
def build_model(self, inputs, outputs):
x_m = L.GlobalMaxPooling1D()(outputs)
x_g = L.GlobalAveragePooling1D()(outputs)
x = L.Concatenate()([x_g, x_m])
x = L.Dense(min(max(self.label, 128), self.embed_size), activation=self.activate_mid)(x)
x = L.Dropout(self.dropout)(x)
self.outputs = L.Dense(units=self.label, activation=self.activate_end)(x)
self.model = M.Model(inputs=inputs, outputs=self.outputs)
self.model.summary(132)
# 注意: 随着语料库的增加(word, bi-gram,tri-gram),内存需求也会不断增加,严重影响模型构建速度:
# 一、自己的思路(macadam, 中文):
# 1. 可以去掉频次高的前后5%的n-gram(, 没有实现)
# 2. 降低embed_size, 从常规的300变为默认64
# 3. 将numpy.array转化时候float32改为默认float16
# 二、其他思路(英文)
# 1. 过滤掉出现次数少的单词
# 2. 使用hash存储
# 3. 由采用字粒度变化为采用词粒度(英文)
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 15 21:48:29 2018
@author: Riko
"""
from agents.parcel import Parcel
import matplotlib.pyplot as plt
def graph_function(model, simulation_time, execution_time):
parcel_age = [p.age / model.get_steps_per_hour() for p in model.schedule.agents_by_type[Parcel]]
plt.hist(parcel_age)
plt.title("Parcel Dwell")
plt.xlabel("Parcel Age [hr]")
plt.ylabel("Qty []")
plt.show()
print("This instance ran for {:.2f}sec simulating {} hours \n" \
"During which {} parcels were generated, , {:.2f} % ({}) were " \
"delivered ".format(execution_time,
model.schedule.steps / model.get_steps_per_hour(),
len(parcel_age),
100*len(model.parcel_aggregator) / len(parcel_age),
len(model.parcel_aggregator)))
from agents.uav import Uav
TFH = [u._tfh for u in model.schedule.agents_by_type[Uav]]
utilization = [x / simulation_time*100 for x in TFH]
plt.hist(utilization)
plt.title("UAV Utilization")
plt.xlabel("Percent Utilization of UAV [%]")
plt.ylabel("Qty []")
print("The graph above is a uav utilization histogram")
plt.show()
|
class Solution(object):
def integerReplacement(self, n):
if n == 1: return 0
if n % 2 == 0:
return 1 + self.integerReplacement(n/2)
else:
return 1 + min(self.integerReplacement(n + 1), \
self.integerReplacement(n - 1))
"""
res = 0
while(n != 1):
if n % 2 == 0:
n /= 2
else:
n = n + 1 if ((n + 1) / 2) % 2 == 0 else n - 1
res += 1
return res
"""
sol = Solution()
print sol.integerReplacement(3)
|
import smtplib
import logging
from datetime import datetime
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
username = "ngtthanh1010@gmail.com"
password = "xxxxxxx"
msg = MIMEMultipart('relate')
def attach_text_file(filename):
try:
logger.info('openning file %s' % filename)
with open(filename, 'rb') as ftext:
msgtext = MIMEText(ftext.read(), 'text')
msgtext.add_header("Content-ID", "<text1>")
msgtext.add_header("Content-Disposition",
"attachment", filename=filename)
msgtext.add_header("Content-Disposition",
"inline", filename=filename)
msg.attach(msgtext)
except Exception as e:
logger.error(e, exc_info=True)
def attach_img(img):
try:
logger.info('openning file %s' % img)
with open(img, 'rb') as fimage:
msgimg = MIMEImage(fimage.read(), 'png')
msgimg.add_header("Content-ID", "<image1>")
msgimg.add_header("Content-Disposition", "attachment",
filename=img)
msgimg.add_header("Content-Disposition", "inline",
filename=img)
msg.attach(msgimg)
except Exception as e:
logger.error(e, exc_info=True)
def send_mail(rctps, subject, body, textfile=None, img=None):
str_all_mails = ', '.join(rctps)
if textfile is None and img is None:
return None
time_str = str(datetime.now())
msg["From"] = 'ngtthanh1010@gmail.com'
msg["To"] = str_all_mails
msg["Subject"] = subject
body = MIMEText(body, 'plain')
if img is not None and textfile is not None:
attach_text_file(textfile)
attach_img(img)
if img is None:
attach_text_file(textfile)
if textfile is None:
attach_img(img)
msg.attach(body)
try:
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
logger.info('%s login on mail server' % time_str )
server.login(username, password)
logger.info('%s logged on mail server' % time_str )
logger.info('%s sending mail to %s' % (time_str, str_all_mails))
server.sendmail(username, rctps, msg.as_string())
logger.info ('%s sent mail sucessful' % time_str)
server.quit()
except Exception as e:
logger.error(e, exc_info=True)
if __name__ == '__main__':
rctps = ["ngtthanh1010@gmail.com"]
subject = 'Test'
body = "My test email"
filename = "hehe.txt"
imgfile ="Screenshot from 2014-07-11 09:29:20.png"
send_mail(rctps, subject, body, textfile=filename)
# send_mail(rctps, subject, body, textfile=None, img=imgfile)
# send_mail(rctps, subject, body, textfile=filename, img=imgfile)
|
# -*- mode:python -*-
#
# Copyright (c) Dimitry Kloper <kloper@users.sf.net> 2002-2012
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
#
# This file is part of dgscons library (https://github.com/kloper/dgscons.git)
#
# dgscons/tools/hardlink.py -- make SCons create hard links insread of
# file copies on windows
#
import sys
import string
import SCons
def link_func(dst, src):
import ctypes
if not ctypes.windll.kernel32.CreateHardLinkA(dst, src, 0):
raise OSError
def CreateHardLink(fs, src, dst):
link_func(dst, src)
def generate(env):
if sys.platform == 'win32':
SCons.Node.FS._hardlink_func = CreateHardLink
SCons.Defaults.Link = SCons.Defaults.ActionFactory(
link_func,
lambda dest, src: 'Link("{}", "{}")'.format(dest, src),
convert=str
)
def exists(env):
if sys.platform == 'win32':
return True
else:
return False
|
from django import forms
from django.contrib.auth.models import User
class LoginForm(forms.Form):
username = forms.CharField(widget=forms.TextInput)
password = forms.CharField(widget=forms.PasswordInput)
def clean(self):
username = self.cleaned_data['username']
password = self.cleaned_data['password']
if not User.objects.filter(username=username).exists():
raise forms.ValidationError(f'Пользователь с именем {username} не был найден')
user = User.objects.filter(username=username).first()
if user:
if not user.check_password(password):
raise forms.ValidationError('Неправильный пароль')
return self.cleaned_data
|
import random
import numpy as np
from math import log
from netcal.metrics import ECE
from scipy.optimize import fmin_bfgs
from scipy.special import expit, xlogy
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, log_loss
import warnings
warnings.filterwarnings('once')
def sigmoid(m, b, X):
#Z = np.dot(row, self.Ws)
return 1 / (1 + np.exp(-np.dot(X, m)-b))
def smooth_labels(y_train, f_pos, f_neg):
y_train_smoothed = np.zeros(len(y_train))
for i in range(len(y_train)):
if y_train[i] > 0:
y_train_smoothed[i] = 1 - f_pos
else:
y_train_smoothed[i] = f_neg
return y_train_smoothed
def _sigmoid_calibration(X, y, T1 = None, tol = 1e-3):
if X.ndim == 1:
X = X.reshape(-1, 1)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
if T1 is None:
T = np.zeros(y.shape)
T[y <= 0] = (prior1 + 1.) / (prior1 + 2.)
T[y > 0] = 1. / (prior0 + 2.)
T1 = 1. - T
else:
T = 1. - T1
def objective(AB):
tmp = 0
for i in range(X.shape[1]):
tmp += AB[i] * X[:,i]
tmp += AB[X.shape[1]]
#P = expit(-(AB[0] * X + AB[1]))
P = expit(-(tmp))
loss = -(xlogy(T, P) + xlogy(T1, 1. - P))
return loss.sum()
def grad(AB):
# gradient of the objective function
tmp = 0
for i in range(X.shape[1]):
tmp += AB[i] * X[:,i]
tmp += AB[X.shape[1]]
#P = expit(-(AB[0] * X + AB[1]))
P = expit(-(tmp))
TEP_minus_T1P = T - P
dA = np.dot(TEP_minus_T1P, X)
dB = np.sum(TEP_minus_T1P)
out_grad = np.append(dA, dB)
return out_grad#np.array([dA, dB])
AB0 = np.array([0.] * X.shape[1] + [log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False, gtol = tol)
return AB_[0:-1], AB_[-1]
class CustomLogisticRegression():
def __init__(self, smoothing_factor_pos = 0, smoothing_factor_neg = 0, tolerance = 1e-3, regularization = 'none', regularization_strength = 0, platt_scaling = False):
self.smoothing_factor_pos = smoothing_factor_pos
self.smoothing_factor_neg = smoothing_factor_neg
self.platt = platt_scaling
self.regularization = regularization
self.reg_strength = regularization_strength #Inverse of Regularization Strength (Must be positive)
self.tolerance = tolerance
random.seed(0)
def fit(self, X_train, y_train):
if self.platt == True:
y_train_smoothed = None
self.a, self.b = _sigmoid_calibration(X_train, y_train, y_train_smoothed, tol = self.tolerance)
elif self.smoothing_factor_pos > 0 or self.smoothing_factor_neg > 0:
y_train_smoothed = smooth_labels(y_train, self.smoothing_factor_pos, self.smoothing_factor_neg)
self.a, self.b = _sigmoid_calibration(X_train, y_train, y_train_smoothed, tol = self.tolerance)
else:
if len(X_train.shape) < 2:
X_train = X_train.reshape(-1, 1)
if self.regularization == 'l1':
clf = LogisticRegression(random_state=0, solver='saga', penalty = self.regularization, C = self.reg_strength, tol=self.tolerance)
else:
clf = LogisticRegression(random_state=0, solver='lbfgs', penalty = self.regularization, C = self.reg_strength, tol=self.tolerance)
clf.fit(X_train, y_train)
self.a = clf.coef_[0]; self.b = clf.intercept_[0]
#print('COEFFS:', self.a, self.b)
def predict_proba(self, X):
preds_probs = sigmoid(self.a, self.b, X)
return preds_probs
def predict(self, X, threshold = 0.5):
return self.predict_proba(X) >= threshold
def predict_logloss(self, X, y):
preds_probs = self.predict_proba(X)
return log_loss(y, preds_probs, labels = [0, 1])
def predict_accuracy(self, X, y, threshold = 0.5):
return accuracy_score(y, self.predict(X, threshold = threshold))
def predict_ece(self, X, y, bins = 10):
ece = ECE(bins)
calibrated_score = ece.measure(self.predict_proba(X), y)
return calibrated_score
def predict_ece_logloss(self, X, y, bins = 10):
preds_probs = self.predict_proba(X)
ece = ECE(bins)
calibrated_score = ece.measure(preds_probs, y)
#print(calibrated_score, y, preds_probs)
return calibrated_score, log_loss(y, preds_probs, labels = [0, 1]) |
from lib.walletConnection import Connection
from lib.utils import sha_256
from lib.address import Address
import os
def showDetails(wallet):
print(wallet.user_ID)
#wallet.checkUpdate()
print(str(wallet.addr) + " => " + str(wallet.count))
print("\n\n\nIt's possible that this value was not up-to-date. Please, want some minutes to validate the last transaction")
def makeTransaction(wallet):
clear()
addr = input("Please, enter the address to send the money\n")
money = input("Please, enter the amount of money do you want to send to "+addr+"\n")
receivers = [ (addr,int(money)) ]
while True:
again = input("Do you want to make an another transfer in your transaction ? (y or n)")
while again != 'y' and again != 'n':
again = input("Do you want to make an another transfer in your transaction ? (y or n)")
if again == 'y':
addr = input("Please, enter the address to send the money\n")
money = input("Please, enter the amount of money do you want to send to "+addr+"\n")
receivers.append( (addr,money) )
elif again == 'n':
print("\n\n\n")
break
password = input("Please, enter your password to valid the Transaction\n")
isValid = wallet.createTransaction(password, receivers)
if not isValid:
print("Sorry, but your Transaction is not valid")
def manuel():
print("This is a list of possible command : \n")
print("show : Show your Wallet information with the money in your address")
print("trans : Create a new transaction")
print("back : Go back to an ald address if your last transaction was not validate")
def backAddress(wallet):
"""Return to the previous address
"""
wallet.backAddress()
def clear():
os.system('cls' if os.name == 'nt' else 'clear')
if __name__ == '__main__':
conn = Connection()
clear()
print("Hello")
isNew = input("Are you a new user ? (y or n or help)\n")
while isNew != 'y' and isNew != 'n':
if isNew == 'help':
clear()
print("If it's the first time you use this application, type 'y' to create your Wallet")
print("If you already have a Wallet, type 'n' to connecte to your Wallet")
isNew = input("Are you a new user ? (y or n or help)\n")
clear()
user_ID = input("Please, enter your user ID\n")
clear()
password = input("Please, enter your password\n")
clear()
isNew = (isNew == 'y') #True if 'y', False if 'n'
wallet = conn.allowConnection(user_ID, password, isNew)
while wallet is None:
print("You make a mistake, please retry")
if isNew:
print("Maybe this user ID already exist")
else:
print("Maybe your user ID or password are incorrect")
print("\n\n")
user_ID = input("Please, enter your user ID again\n")
clear()
password = input("Please, enter your password again\n")
clear()
wallet = conn.allowConnection(user_ID, password, isNew)
command = 'show'
while command != 'close':
if command == 'show':
showDetails(wallet)
if command == 'trans':
makeTransaction(wallet)
if command == 'back':
backAddress(wallet)
showDetails(wallet)
if command == 'man':
manuel()
print("\n\n\n")
manuel()
print()
command = input("What do you what to do ? (type 'man' for the list of action)")
print("Good Bye !")
|
'''
------------------------------------------------------------------------------------------------
DAY THREE
------------------------------------------------------------------------------------------------
PROBLEM:
Santa is delivering presents to an infinite two-dimensional grid of houses.
He begins by delivering a present to the house at his starting location, and then an elf at the
North Pole calls him via radio and tells him where to move next. Moves are always exactly one
house to the north (^), south (v), east (>), or west (<). After each move, he delivers another
present to the house at his new location.
However, the elf back at the north pole has had a little too much eggnog, and so his directions
are a little off, and Santa ends up visiting some houses more than once. How many houses receive
at least one present?
For example:
> delivers presents to 2 houses: one at the starting location, and one to the east.
^>v< delivers presents to 4 houses in a square, including twice to the house at
his starting/ending location.
^v^v^v^v^v delivers a bunch of presents to some very lucky children at only 2 houses.
------------------------------------------------------------------------------------------------
SOLUTION:
'''
currentX = 0
currentY = 0
vistiedPoints = []
def main():
consumeInstructions(getListOfInstructions())
print(len(vistiedPoints))
def getListOfInstructions():
with open('Input/DayThreeInput') as f:
content = f.read()
return content
def consumeInstructions(instructionList):
vistiedPoints.append((currentX, currentY))
for instruction in instructionList :
consumeInstruction(instruction)
def consumeInstruction(direction):
global currentX
global currentY
if (direction == '>'):
currentX += 1
elif (direction == '<'):
currentX -= 1
elif (direction == '^'):
currentY += 1
elif (direction == 'v'):
currentY -= 1
newPoint = (currentX, currentY)
if newPoint not in vistiedPoints:
vistiedPoints.append(newPoint)
if __name__ == '__main__':
main() |
"""Chats app."""
# Django
from django.apps import AppConfig
class ChatsConfig(AppConfig):
"""Chats app config."""
default_auto_field = 'django.db.models.BigAutoField'
name = 'app.chats'
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^validate/(?P<route>\w+)$', views.validate, name='validate')
]
|
print("Enter the first number add")
first = input()
print("Enter the second number add")
second = input()
print("Enter the third number add")
third = input()
print("The sum is " + int(first) + int(second) + int(third))
|
L = float(input('Largura da parede em metros:'))
Al = float(input('Altura da parede em metros'))
a = L * Al
p = a/2
print('Sua parede tem dimensão de {}x{} e sua área é de {}m²'.format(L, Al, a))
#A CADA 2m² DE PAREDE PRECISA DE 1L DE TINTA
print('Para pintar essa parede você precisará de {}L de tinta'.format(p))
|
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
#1 load model and load data
#numpy大量矩陣維度與矩陣運算, log
import numpy as np
#Paython上Excel所有操作, 欄位的加總、分群、樞紐分析表、小計、畫折線圖、圓餅圖
import pandas as pd
#畫圖範圍框架
import matplotlib.pyplot as plt
#seaborn直方圖, heatmap
import seaborn as sns
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
#2 data processing
din.info() type(int64, float64, object, ...)
din.describe() max, min, mean, mode, 75%, 25%,
din.head(10)
din.tail()
din.shape (dimension)
din.columns feature name
#2-1將training set與testing set concat起來處理
#保證這兩個sets在同feature做同樣的處理
#2-2 training set Nan補值/testing set Nan補值
din.isnull().sum() 查看null個數
din.drop('Cabin', axis=1, inplace=True)
din.drop(['Cabin'], axis = 1, inplace = True)
din.Embarked.fillna(din.Embarked.max(), inplace = True)
din.Age.fillna(din.Age.mean, inplace = True)
#2-3 Object to Int
#非numeric要全部轉成numeric才可以做training
#converting categorical feature to numeric
din.Sex = din.Sex.map({0:'female', 1:'male'}).astype(object)
din['Sex'] = din.Sex.map({'female':0, 'male':1}).astype(int)
#2-4多連續值切分範圍
train_df['AgeBand'] = pd.cut(train_df['Age'], 5) #cut into same width
train_df['FareBand'] = pd.qcut(train_df['Fare'], 4)
#2-5查看X(AgeBand)與y(Survived)的關係
train_df[['AgeBand','Survived']].groupby(['AgeBand'], as_index = False).mean().
sort_values(by = 'AgeBand', ascending = True) #groupby
#2-6把連續X(Fare)分小類成X(Fare), 並把datatype轉成int
for dataset in combine:
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
#2-7normalization
#2-8heatmap
corrmat = train_df.corr()
sns.heatmap(corrmat, vmin=-0.3, vmax=0.8, square=True)
for column in corrmat[corrmat.SalePrice > 0.6].index:
plt.subplot(2,2,2)
plt.scatter(train_df[column], train_df['SalePrice'])
plt.show()
#2-9把前10名的feature自動列出來
#2-10先使用 Neighborhood 做區域分類,再用區域內 LotFrontage 的中位數進行補值
X['LotFrontage'] = X.groupby('Neighborhood')['LotFrontage'].transform(lambda x: x.fillna(x.median()))
#2-11取代值
X[X['GarageYrBlt'] == 2207].index
X.loc[X[X['GarageYrBlt'] == 2207].index, 'GarageYrBlt'] = 2007
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
#3 split training set & validation set
#3-1機器學習模塊
from sklearn import model_selection
#3-2把training set分成training set & validation set
x_ = din.drop(['Cabin','Survived'], axis=1)
y_ = din.Survived
x_train, x_valid, y_train, y_valid = model_selection.train_test_split(x_, y_, random_state=0, test_size=0.33)
#3-3k-fold
#3-4label encoding
final_X = pd.DataFrame()
for columns in object_columns:
final_X = pd.concat([final_X, rank_label_encoding(X,columns)], axis=1)
for columns in not_object_columns:
final_X = pd.concat([final_X, X[columns]], axis=1)
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
#4 Model
#4-1 predict, accuracy
from sklearn import ensemble
clf = ensemble.RandomForestClassifier(n_estimators=100, max_depth=2)
clf.fit(x_train,y_train)
y_preds = clf.predict(x_valid)
from sklearn.metrics import accuracy_score
accuracy_score(y_valid, y_preds)
#4-3 XGBRegressor fit
from xgboost import XGBRegressor
xgb_model = XGBRegressor(learning_rate = 0.01, n_estimators = 3300,
objective = "reg:linear",
max_depth= 3, min_child_weight=2,
gamma = 0, subsample=0.6,
colsample_bytree=0.7,
scale_pos_weight=1,seed=0,
reg_alpha= 0, reg_lambda= 1)
xgb_model.fit(x_train, y_train)
#4-4 LGBMRegressor
from lightgbm import LGBMRegressor
lgbm_model = LGBMRegressor(learning_rate = 0.01, n_estimators = 2900,
objective='regression',
max_depth= 3,min_child_weight=0,
gamma = 0,
subsample=0.6, colsample_bytree=0.6,
scale_pos_weight=1,seed=0,
reg_alpha= 0.1, reg_lambda= 0)
lgbm_model.fit(x_train, y_train)
#4-5SVR
from sklearn.svm import SVR
SVR_model = SVR(C = 10, epsilon = 0.1, gamma = 1e-06)
SVR_model.fit(x_train, y_train)
#4-6ElasticNetCV
from sklearn.linear_model import ElasticNetCV
alphas = [0.0001, 0.0002, 0.0003]
l1ratio = [0.5, 0.6, 0.7, 0.8, 0.7]
elastic_model = ElasticNetCV(max_iter=1e7, alphas = alphas, cv = kfolds, l1_ratio = l1ratio)
elastic_model.fit(x_train, y_train)
print(elastic_model.alpha_) #印出最佳解之alpha
print(elastic_model.l1_ratio_)#印出最佳解之l1_ratio
#4-7cross validation
from sklearn.model_selection import KFold
kfolds = KFold(n_splits=6)
from sklearn.model_selection import cross_val_score
def cv_rmse(model, X, y):
return np.sqrt(-cross_val_score(model, X, y,
scoring = 'neg_mean_squared_error',
cv = kfolds))
cv_error = {"xgb": cv_rmse(xgb_model, x_train, y_train),
"lgbm":cv_rmse(lgbm_model, x_train, y_train),
"SVR": cv_rmse(SVR_model, x_train, y_train),
"elastic":cv_rmse(elastic_model, x_train, y_train)}
cv_error
#4-8參數調整
from xgboost import XGBRegressor
from sklearn.grid_search import GridSearchCV
def XGBRegressor_cv(x,y):
cv_params = {'learning_rate': [0.005,0.01, 0.05, 0.07]}
other_params = dict(learning_rate = 0.01, n_estimators = 3300,
objective = "reg:linear",
max_depth= 3, min_child_weight=2,
gamma = 0, subsample=0.6,
colsample_bytree=0.7,
scale_pos_weight=1,seed=0,
reg_alpha= 0, reg_lambda= 1)
model = XGBRegressor(**other_params)
optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring="neg_mean_squared_log_error", cv=5, verbose=1, n_jobs=4)
optimized_GBM.fit(x, y)
evalute_result = optimized_GBM.grid_scores_
print('每輪迭代運行結果:{0}'.format(evalute_result))
print('参数的最佳取值:{0}'.format(optimized_GBM.best_params_))
print('最佳模型得分:{0}'.format(optimized_GBM.best_score_))
return model
XGBRegressor_cv(train_X,train_y)
#4-9 validation error
valid_data = {"xgb":xgb_model.predict(x_valid),
"lgbm":lgbm_model.predict(x_valid),
"elastic": elastic_model.predict(x_valid),
"SVR":SVR_model.predict(x_valid)}
valid_error = dict()
for model,v in valid_data.items():
valid_error[model] = np.power((v - y_valid),2).mean()
print(valid_error)
for train_df in combine: #為什麼一定要加這一行
train_df['Embarked'] = train_df['Embarked'].fillna(freq_port)
|
base_num=int(input('Give me the base number:'))
power_num=int(input('give me the power number:'))
# result = base_num**power_num
result=pow(base_num,power_num)
print('Your result is',result) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# filename: client.py
# modified: 2019-10-25
__all__ = ["WxApiClient"]
from requests.sessions import Session
from ..const import WXAPI_PROFILE
_APP_ID = WXAPI_PROFILE["appID"]
_APP_SECRET = WXAPI_PROFILE["appSecret"]
_DEFAULT_TIMEOUT = WXAPI_PROFILE["client"]["default_timeout"]
_USER_AGENT = WXAPI_PROFILE["client"]["user_agent"]
def _get_hooks(*fn):
return {
"response": fn,
}
def _hook_verify_status_code(r, **kwargs):
if r.status_code != 200:
r.raise_for_status()
def _hook_verify_error_field(r, **kwargs):
pass
class WxApiClient(object):
def __init__(self):
self._session = Session()
self._session.headers.update({
"User-Agent": _USER_AGENT,
})
def _request(self, method, url, **kwargs):
kwargs.setdefault("timeout", _DEFAULT_TIMEOUT)
return self._session.request(method, url, **kwargs)
def _get(self, url, params=None, **kwargs):
return self._request('GET', url, params=params, **kwargs)
def _post(self, url, data=None, json=None, **kwargs):
return self._request('POST', url, data=data, json=json, **kwargs)
def auth_code2Session(self, code):
r = self._get(
url="https://api.weixin.qq.com/sns/jscode2session",
params={
"appid": _APP_ID,
"secret": _APP_SECRET,
"js_code": code,
"grant_type": "authorization_code",
},
hooks=_get_hooks(
_hook_verify_status_code,
_hook_verify_error_field,
),
)
return r
|
"""
thread_server 基于线程的并发模型
重点代码
创建监听套接字
循环接收客户端连接请求
当有新的客户端连接创建线程处理客户端请求
主线程继续等待其他客户端连接
当客户端退出,则对应分支线程退出
"""
from socket import *
from threading import Thread
import sys
# 全局变量
HOST = '0.0.0.0'
PORT = 8888
ADDR = (HOST, PORT)
# 客户端处理函数
def handle(c):
while True:
data = c.recv(1024).decode()
if not data:
break
print(data)
c.send(b'OK')
c.close()
# 创建tcp套接字
s = socket()
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
s.bind(ADDR)
s.listen(5)
print("Listen the port 8888...")
while True:
# 循环等待处理客户端连接
try:
c, addr = s.accept()
print("Connect from", addr)
except KeyboardInterrupt:
sys.exit("服务器退出")
except Exception as e:
print(e)
continue
# 创建线程
t = Thread(target = handle,args=(c,))
t.setDaemon(True) # 分支线程随主线退出
t.start()
|
#===============================MOTIVATION================================
# This code was created for the semester project of Agent-Based Systems
# course (SAG_2020L) of master studies programme at the Warsaw University
# of Technology - Faculty of Electronics and Information Technology.
#
# Supervision and mentoring: PhD D.Ryżko
#
#===============================SUMMARY===================================
#
# The agent system performs task of a distributed image classification.
# System consists of agents that are communicating asynchronously. The decision
# of the classifier is obtained by voting. A randomly selected commanding agent
# from ordinary agents is responsible for outsourcing tasks and collecting
# classification results. System ensures operation even if contact with some
# agents is lost.
#
#===============================LICENSE===================================
#
# This code is a free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details. It can be found
# at <http://www.gnu.org/licenses/>.
#
#==========================================================================
# 2020 Warsaw University of Technology - M.Karcz, D.Orlinski, T.Szczepanski
#==========================================================================
#
# help_functions.py - used by classifying_agent.py,
# provides methods used by agents' behaviours - communication oriented
#
#==========================================================================
import agent_config as ac
from spade.message import Message
import random
import asyncio
import os
import glob
import re
from collections import Counter
import datetime
def get_file_paths(d):
return glob.glob(os.path.join(d, '*'))
# control, to cmb, mutliple commanders
async def send_to_all(obj, meta_key, meta_value, msg_body):
for k, v in ac.agents_dict.items():
if str(v['jid']) != str(obj.agent.jid):
msg_to_send = prep_msg(v['jid'], meta_key, meta_value, msg_body)
# print("Agent {} , message to {} has been sent.".format(self.agent.jid, v['jid']))
await obj.send(msg_to_send)
def prep_msg(to, meta_key, meta_value, msg_body):
msg_to_send = Message(to=str(to))
msg_to_send.set_metadata(meta_key, meta_value)
msg_to_send.body = msg_body
return msg_to_send
def make_vote():
return '{' + str(random.uniform(0, 10)) + '}'
def get_vote(vote):
return float(vote[vote.find("{") + 1:vote.find("}")])
def did_you_win(all_votes, your_vote):
if len(all_votes) == 0 or float(max(all_votes)) <= float(your_vote):
return True
else:
return False
# Tutaj ktoś mógłby się zastanowić, że co jak będą 2 wyniki takie same?
# Będzie 2 dowodzących? A więc przy 100 000 agentów prawdopodobieństwo
# że 2 z nich będzie miało taki sam numer to 0.0000000001% . A nawet jeśli
# to każdy nowo powstały agent dowodzący upewnia się że jest jedynym agentem dowodzącym,
# wysyłając wiadomość MULTIPLE_COMMANDERS . W razie dubla głosowanie przeprowadzane jest jeszcze raz
async def start_voting(obj, meta_key, meta_value, type_of_voting):
# funkcja do przeprowadzania glosowania. Ogolnie w programie wyrozniam 2 typy glosowan:
# miedzy zwyklymi agentami i zabezpieczajace między kilkoma agentami dowodzacymi.
print("Agent {} is voting!".format(obj.agent.jid))
my_vote = make_vote()
all_votes = []
msg_body = type_of_voting + my_vote + " Vote of Agent {}.".format(obj.agent.jid)
await send_to_all(obj, meta_key, meta_value, msg_body)
while True:
voting_end = True
msg = await obj.receive(timeout=1) # waiting 1 sec from last gathered vote
if msg and msg.body[:len(type_of_voting)] == type_of_voting:
all_votes.append(get_vote(msg.body)) # zbieranie glosow
# print(all_votes)
voting_end = False
if voting_end:
break
if did_you_win(all_votes, get_vote(my_vote)):
print("Agent {} won! He will become the new Commander!".format(obj.agent.jid))
return True
else:
return False
async def promotion_to_commanding(obj):
# Funkcja ustanawiajaca nowego agenta dowodzącego - po ustaleniu ze jest tylko jeden agent dowodzący
# ponizszy kod zostanie wywołany. Agent z pomocą tej funkcji wysyła sam sobie wiadomosc, awansując na agenta
# dowodzacego. Agent powinien przejść do stanu pierwszego
msg_to_send = prep_msg(obj.agent.jid, ac.CONTROL, ac.TO_CMB, "Taking the command.")
print("Agent {} , sending promotion note to himself.".format(obj.agent.jid))
await obj.send(msg_to_send)
# obj.set_next_state(ac.STATE_ONE)
async def simulate_death(obj):
# Okazuje się, że agenci oraz behaviour's SPAD'a są nieśmiertelne i nie da się ich zabić. Poniższa łatka ma
# za zadanie zatrzymać wykonywanie takiego agenta
if obj.is_killed():
print("Agent {} was killed?: {}".format(obj.agent.jid, obj.is_killed()))
while True:
await asyncio.sleep(1000)
def get_contacts_from_roster(roster):
# funkcja do wydobywania kontaktow ze SPAD'e w formie listy
contact_list = []
con_str = str(roster)
indexes = [m.start() for m in re.finditer('t=\'(.+?)\',', con_str)]
for x in indexes:
tmp = re.search('\'(.+?)\'', con_str[x:x + 20]).group(0)
tmp = tmp[1:-1] + '@' + ac.server_name
contact_list.append(tmp)
return contact_list
def ballot_box(classif_list,not_classif_list):
# funkcja pobiera głosy za, przeciw i zwraca trzy słowniki z wynikami klasyfikacji: pozytywnej, negatywnej i zsumowany.
# Jesli wynik jest negatywny np. horse : -3, to znaczy ze agenci twierdza ze na obrazku nie ma konia.
# Wynik 0 oznacza ze glosow za i przeciw bylo tyle samo
# Od głosów "za" odejmowane są głosy "przeciw"
classif_list_counter = Counter(classif_list)
not_classif_list_counter = Counter(not_classif_list)
res = classif_list_counter - not_classif_list_counter
return [dict(classif_list_counter), dict(not_classif_list_counter), dict(res)]
def log_results(commander_jid,alive_agent_number,img,result):
# Funkcja do zapisywania wyników. Wyniki dopisywane są na koniec pliku ac.CLASSIFICATION_RESULTS_FILE
f = open(ac.CLASSIFICATION_RESULTS_FILE, "a+")
f.write("\nClassification date: {}.\r".format(datetime.datetime.now()))
f.write("Commanding Agent: {}. No. Agents: {}.\r".format(commander_jid,alive_agent_number))
f.write("Object to recognize: {}.\r".format(img))
f.write("Classification results: {}.\r".format(result[2]))
f.write("Votes for YES: {}.\r".format(result[0]))
f.write("Votes for NO: {}.\r".format(result[1]))
f.close()
def print_wrapper(filename):
# filename is the file where output will be written - logs.txt will be used
def wrap(func): #print is the function that will be wrapped
def wrapped_func(*args,**kwargs):
#use with statement to open, write to, and close the file safely
with open(filename,'a+') as outputfile:
line = str(datetime.datetime.now()) +": "+ args[0] + "\n"
outputfile.write(line,**kwargs)
#now original function executed with its arguments as normal
return func(*args,**kwargs)
return wrapped_func
return wrap
|
import pytest
from game.hungarian_deck import HungarianDeck, HungarianCard, card
from game.hungarian_deck.deck import OutOfCardsException
def test_deck_can_be_created():
deck = HungarianDeck()
assert True, "couldn't initialize deck"
def test_new_deck_contains_32_cards(deck: HungarianDeck):
assert len(deck) == 32
def test_card_can_be_drawn_from_deck(deck: HungarianDeck):
drawn_card = deck.draw()
assert isinstance(drawn_card, HungarianCard)
def test_many_cards_can_be_drawn(deck: HungarianDeck):
cards_drawn = deck.draw_many(8)
unique_cards = set(cards_drawn)
assert len(cards_drawn) == 8
assert len(unique_cards) == 8
assert len(deck) == 24
def test_deck_contains_32_different_cards(deck: HungarianDeck):
cards = {deck.draw() for _ in range(32)}
assert len(cards) == 32
def test_drawing_more_than_32_times_causes_exception(deck: HungarianDeck):
# ok so far
deck.draw_many(32)
with pytest.raises(OutOfCardsException) as e:
deck.draw()
assert "no cards left" in str(e)
def test_reset_recreates_the_deck(deck: HungarianDeck):
deck.draw_many(10)
assert len(deck) == 22
deck.reset()
assert len(deck) == 32
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import functools
from src.save_dataset import save_dataset
from sklearn.preprocessing import LabelBinarizer
def generate_synthetic_data(numdims, noise, numsamples=1000, num_group_types=1,
min_subgroups=2, max_subgroups=10, min_subgroup_size=20,
mean_range=0, variability=1, num_uniform_features=0, intercept_scale=2,
binary=False, drop_group_as_feature=False,
save_data=False, file_dir='', file_name='',
random_seed=0):
"""
Generates two matrices X, y of features and labels where for each type of groups, X is divided into numgroups
different groups each of which has a shared linear function from which labels are sampled with noise.
For the binary case, we convert the real valued labels into 0 or 1 by sign of label (positive or negative)
:param numsamples : Number of instances/rows of X
:param numdims : Dimensionality of synthetic data
:param noise : Gaussian noise in Y
:param num_group_types: Number of categories (e.g. race, sex, etc.) such that each instances belongs to one
subgroup for each groups type
:param min_subgroups : Minimum number of subgroups for each groups type (selected uniformly at random)
:param max_subgroups : Minimum number of subgroups for each groups type (selected uniformly at random)
:param min_subgroup_size : Minimum number of instances for each subgroup. Generated by randomized algorithm that
repeats until minimum size is satisfied for all subgroups. Can't exceed average
subgroup size.
:param intercept_scale : Coefficient on randomly generated intercept for each groups. Intercepts drawn from unit
normal and 0.0 denotes no intercept.
:param mean_range : Mean for each feature dist. is selected uniformly at random from [-mean_range, mean_range]
:param variability: Denotes std. dev. for normally distributed features and distance from mean to endpoint for
uniform features
:param num_uniform_features: How many of `numdims` features should be drawn uniformly from the distribution
defined by the mean and `variability`. Remaining features drawn from normal dist.
:param binary : If labels should be converted to binary (0/1) for classification. Uses sign (+/-) of numeric label
:param drop_group_as_feature : Denotes if X should drop columns corresponding to one-hot encoded groups labels
:param random_seed : Random seed for all numpy randomization
:param save_data : Denotes whether or not generated matrices should be saved to a file
:param file_dir : Directory to save to if save_data is True
:param file_name : File name in file_dir to save to if save_data is True
"""
# Set the random seed
np.random.seed(random_seed)
if num_uniform_features > numdims:
raise Exception(f'Error! More uniform features ({num_uniform_features}) than total dimensions ({numdims})')
Xs = []
ys = []
group_sets = []
grouplabel_list = []
for i in range(num_group_types):
n_subgroups = np.random.randint(min_subgroups, max_subgroups+1) # Determines num. subgroups for this class
# With multiple categories of groups, we partition into subgroups of random but lower-bounded size
# Generate a numpy array of the sizes for each groups
groupsize = generate_random_intervals(numsamples, n_subgroups, min_subgroup_size)
# Fill out the labels in order
# e.g. groups 0 will be the first groupsize[0] rows in the matrix, group1 the next set of rows, etc.
grouplabels = []
curr_grp_index = 0
for size in groupsize:
for _ in range(size):
grouplabels.append(curr_grp_index)
curr_grp_index += 1
grouplabels = np.array(grouplabels) # convert to numpy array
# Compute number of samples and generate feature matrix X
assert numsamples == np.size(grouplabels)
# Generate feature matrix X
X = generate_feature_matrix(numsamples, numdims, n_subgroups, num_uniform_features,
grouplabels, mean_range, variability)
# Generate y; each groups has a different linear model
weights = np.random.randn(n_subgroups, numdims)
intercepts = np.zeros(n_subgroups) if drop_group_as_feature else (np.random.randn(n_subgroups) * intercept_scale)
y = np.zeros(numsamples)
# print('intercepts', intercepts)
# Create y according to X with noise
for g in range(0, n_subgroups):
w = weights[g]
idx = np.where(grouplabels == g)
X_g = X[idx, :]
y[idx] = np.matmul(X_g, w) + noise * np.random.randn(1, np.size(idx)) + intercepts[g]
# Given "labels" to each groups in the synthetic data
group_sets.append([f'Subgroup ' + str(1 + x) for x in range(n_subgroups)])
assert n_subgroups == len(groupsize)
grouplabel_list.append(grouplabels)
Xs.append(X)
ys.append(y)
# End of for loop over groups type
# Sum Xs and sum ys and divide by number of gorup types to get the average feature and label matrix
X = functools.reduce(lambda x1, x2: np.add(x1, x2), Xs) / num_group_types
y = functools.reduce(lambda y1, y2: np.add(y1, y2), ys) / num_group_types
# Add all the groups membership variables to the feature matrix with one-hot categorical encoding
if not drop_group_as_feature:
matrices_to_stack = [X] # Will store all the matrices to be horizontally concatenated to increase columns
for i in range(num_group_types):
lb = LabelBinarizer()
matrices_to_stack.append(lb.fit_transform(grouplabel_list[i]))
# Add the new columns to X
X = np.column_stack(matrices_to_stack)
# If we want a binary dataset, we can threshold the y labels
if binary:
y = (y > 0)
grouplabel_list = np.array(grouplabel_list)
# Saves the data as numpy objects
if save_data:
save_dataset(file_dir, file_name, X, y, grouplabel_list, group_sets, binary,
upload_dataset_to_s3, bucket_name, credentials_file)
group_types = [f'Type {i+1}' for i in range(num_group_types)]
return X, y, grouplabel_list, group_sets, group_types, binary
def generate_feature_matrix(numsamples, numdims, numgroups, num_uniform_features, grouplabels, mean_range, variability):
"""
:param numsamples: Total number of samples
:param numdims: total dimensionality (number of columns)
:param num_uniform_features: how many of the distributions for each groups should be uniform rather than normal
:param numgroups: number of groups
:param grouplabels: array of grouplabels
:param mean_range: the mean of each distribution is selected uniformly at random from [-mean_range, mean_range]
:param variability: standard deviation for normal or distance from center to upper/lower bound on uniform
:return: X, matrix of features where each groups has a unique distribution for each feature
"""
# If we are using a vanilla dataset, just use unit normal for all features for all groups
if mean_range == 0 and variability == 1 and num_uniform_features == 0:
return np.random.randn(numsamples, numdims)
# Instantiate a feature matrix to be eventually returned once filled with non-zero values
X = np.zeros((numsamples, numdims)) # Instantiate an empty feature matrix
# Each groups has its own set of "numdims" distributions, defined by choice of normal/uniform, mean, and variability
# Then, we populate each groups features by sampling a row vector for each groups member, where each elemeent
# of this row vector is selected from one of the numdims pre-defined distributions. In practice, we may do this,
# column by column.
# Create a list of tuples for each groups
# Each list contains numdims 3-tuples, with each tuple defining a unique distribution
for g in range(0, numgroups):
# Tuple will store (is_uniform, mean, variability (std. dev or distance from endpoint to center in uniform))
# The last num_uniform_features features have a 1 in first position indicating uniform, rest are 0 for normal
distribution_attributes = \
[(i >= (numdims - num_uniform_features), np.random.uniform(-mean_range, mean_range), variability)
for i in range(numdims)]
# Mask the rows of X corresponding to the members of the current groups and populate accordingly
idx = np.where(grouplabels == g)
X[idx, :] = generate_group_features(distribution_attributes, np.size(idx))
return X
def generate_group_features(distribution_attributes, groupsize):
"""
:param distribution_attributes: List of numdims tuples of form (dist. type, mean, variability) each defining a
unique distribution for that feature
:param groupsize: The number of rows in the matrix to create (or members of the particular groups)
:return: A matrix of dimensions groupsize by numdims where each column (feature, e.g. height)
corresponds to a particular distribution
"""
columns = []
for dist in distribution_attributes:
# Generate a column vector corresponding to a particular distribution
columns.append(generate_column_from_distribution(dist, groupsize))
# Concatenate all the column vectors to create total feature matrix for the groups, to be returned
return np.column_stack(columns)
def generate_column_from_distribution(dist, groupsize):
"""
:param dist: 3-tuple containing (is_uniform, mean, variability)
:param groupsize: the number of rows in the column vector
:return: column vector of length groupsize according to distribution
"""
mean = dist[1]
variability = dist[2]
if dist[0]: # If we are sampling from the uniform
# Generate and return a column vector whose elements are sampled uniformly within the specified bounds
return np.random.randint(mean - variability, mean + variability, (groupsize, 1))
else: # Otherwise we are sampling from the normal
# Create a unit normal, then multiply all elements to increase std. dev while at mean 0, then add mean after
return (np.random.randn(groupsize, 1) * variability) + mean
def generate_random_intervals(n_samples, n_subgroups, min_size, max_repititions=10000):
"""
The following randomized algorithm partitions an array of length `n_samples` into k = `n_subgroups` contiguous
regions by randomly placing k-1 dividing positions in the array. Every interval must have size >= `min_size`,
or the algorithm is repeated up to `max_repititions` times. If we fail 10000 times, an exception is raised.
:return: List of n_subgroups "sizes" of intervals in order which sum to n_sumples
"""
if n_samples / n_subgroups < min_size:
raise Exception(f"ERROR: Cannot subdivide {n_samples} instances into {n_subgroups} such that all subgroups "
f"have size at least {min_size}.")
attempts = 0
while attempts < max_repititions:
# Determines the positions of the dividers randomly
d = sorted(np.random.randint(n_samples, size=n_subgroups - 1))
# The ith size is defined by the ith divider minus position of i-1th divider.
# In total the inner array will give us n_samples - 2, and we add the outer and inner interval
subgroup_sizes = [d[0]] + [(d[i] - d[i - 1]) for i in range(1, len(d))] + [n_samples - d[-1]]
# If no subgroup is too small, return the sizes, otheriwse we will repeat
if min(subgroup_sizes) >= min_size:
return subgroup_sizes
raise Exception(f'We failed to find a valid partition of {n_samples} elements into {n_subgroups} groups such that'
f'each groups had size >= {min_size} after {max_repititions} attempts of our randomized algorithm.'
f' Please lower the minimum'
f'threshold, increase the number of samples, or decrease the number of subgroups and try again.')
|
# Generated by Django 3.1.7 on 2021-03-04 08:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shopping_cart_app', '0004_auto_20210211_1518'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='desc',
),
]
|
from num2words import num2words
total = 0
for i in range(1,1001):
total += len(num2words(i)) - ((num2words(i)).count(" ")) - ((num2words(i)).count("-"))
print(total)
|
from math import log10
n, d = 3, 2
tot = 0
for i in xrange(1000):
if int(log10(n)) > int(log10(d)):
tot += 1
n,d = n+2*d, n+d
print tot
## It is possible to show that the square root of two
## can be expressed as an infinite continued fraction.
##
## 2 = 1 + 1/(2 + 1/(2 + 1/(2 + ... ))) = 1.414213...
##
## By expanding this for the first four iterations, we get:
##
## 1 + 1/2 = 3/2 = 1.5
## 1 + 1/(2 + 1/2) = 7/5 = 1.4
## 1 + 1/(2 + 1/(2 + 1/2)) = 17/12 = 1.41666...
## 1 + 1/(2 + 1/(2 + 1/(2 + 1/2))) = 41/29 = 1.41379...
##
## The next three expansions are 99/70, 239/169, and 577/408,
## but the eighth expansion, 1393/985, is the first example
## where the number of digits in the numerator exceeds the
## number of digits in the denominator.
##
## In the first one-thousand expansions, how many fractions
## contain a numerator with more digits than denominator?
|
# Copyright (c) 2013, Fortylines LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Command for the cron job. Daily statistics"""
import datetime, sys
import datetime
import time
from time import mktime
from datetime import datetime
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from saas.models import Organization, Transaction, NewVisitors
from saas.models import Organization
class Command(BaseCommand):
help = 'Save new vistor datas into the database. This command needs the path of the log file to analyse.'
def handle(self, args, **options):
visitors = []
values=[]
log3 = []
browser = []
date = []
log = open(args)
#delete all bot
rob = "bot"
pub = "Pub"
spy = "Spider"
spy2 = "spider"
goog = "google"
rob2="AhrefsBot"
for ligne in log.readlines():
log1 = ligne
if (not rob in ligne) and (not pub in ligne) and (not spy in ligne) and (not spy2 in ligne) and (not goog in ligne) and (not rob2 in ligne) :
visitors += [ligne]
print(len(visitors))
# create a dictionnary of IP, browser per date
for i in range(len(visitors)):
browser_name = (visitors[i].split('"'))[5]
log3 = visitors[i].split("[")
date = log3[1].split("]")
datee =(date[0].split(":"))[0]
IP = log3[0].split(" -")[0]
c = time.strptime(datee,"%d/%b/%Y")
dt = datetime.strftime(datetime.fromtimestamp(mktime(c)),"%Y/%m/%d")
browser += [{"IP": IP, "browser" : browser_name,
"date": dt }]
# all dates per visitors
dates_per_unique_visitor = {}
for datas in browser:
key = (datas["IP"], datas["browser"])
if not key in dates_per_unique_visitor:
dates_per_unique_visitor[key] = []
dates_per_unique_visitor[key]+= [datas["date"]]
final_list ={}
for it in dates_per_unique_visitor:
key = dates_per_unique_visitor[it][0]
if not key in final_list:
final_list[key] = []
final_list[key]+=[it]
table=[]
total = []
total2 =0
final_list2 = sorted(final_list.items())
for it in range(len(final_list2)):
total += [len(final_list2[it][1])]
total2 += len(final_list2[it][1])
c = time.strptime(final_list2[it][0],"%Y/%m/%d")
dt = datetime.strftime(datetime.fromtimestamp(mktime(c)),"%Y-%m-%d")
new = NewVisitors()
new.date =dt
new.visitors_number = len(final_list2[it][1])
# check in database if the date exists and if not save into the database
newvisitor = NewVisitors.objects.filter(date=dt)
if not newvisitor:
new.save()
|
from presidio_analyzer import Pattern, PatternRecognizer
# pylint: disable=line-too-long,abstract-method
class UsSsnRecognizer(PatternRecognizer):
"""
Recognizes US Social Security Number (SSN) using regex
"""
PATTERNS = [
Pattern("SSN (very weak)", r"\b(([0-9]{5})-([0-9]{4})|([0-9]{3})-([0-9]{6}))\b", 0.05), # noqa E501
Pattern("SSN (weak)", r"\b[0-9]{9}\b", 0.3),
Pattern("SSN (medium)", r"\b([0-9]{3})-([0-9]{2})-([0-9]{4})\b", 0.5),
]
CONTEXT = [
"social",
"security",
# "sec", # Task #603: Support keyphrases ("social sec")
"ssn",
"ssns",
"ssn#",
"ss#",
"ssid",
]
def __init__(
self,
patterns=None,
context=None,
supported_language="en",
supported_entity="US_SSN",
):
patterns = patterns if patterns else self.PATTERNS
context = context if context else self.CONTEXT
super().__init__(
supported_entity=supported_entity,
patterns=patterns,
context=context,
supported_language=supported_language,
)
|
"""
File for database utilities
Authors: Edward Mattout & Daniella Grimberg
"""
import logging
import sys
import mysql.connector
from config import HOST, DATABASE, USER, PASSWORD, LOG_FILE_FORMAT, LOG_FILE_NAME
formatter = logging.Formatter(LOG_FILE_FORMAT)
logger = logging.getLogger('database')
logger.setLevel(logging.DEBUG)
file_handler = logging.FileHandler(LOG_FILE_NAME)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(logging.ERROR)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
def connect_to_database():
"""
Function creates connection to database
:return: cursor
"""
try:
connection = mysql.connector.connect(host=HOST,
database=DATABASE,
user=USER,
password=PASSWORD)
except mysql.connector.Error as error:
logger.error("Error: Failed to connect to database. Exiting Program", error)
sys.exit(1)
logger.info("Successfully connected to database")
return connection, connection.cursor()
def close_database_connection(connection, cursor):
"""
Function closes connection to database
:return: None
"""
if connection.is_connected():
cursor.close()
connection.close()
logger.info("Successfully closed database connection")
def insert_author(connection, cursor, author_name, twitter_handle):
"""
Function inserts data into authors table
:param connection: connection to database
:param cursor: cursor
:param author_name: full name of authors
:param twitter_handle: twitter handle of author
:return: author_id
"""
try:
cursor.execute("""INSERT IGNORE INTO authors (full_name, twitter_handle)
VALUES (%s, %s) """, (author_name, twitter_handle))
connection.commit()
except mysql.connector.Error as error:
logger.error("Failed to insert into table AUTHORS {}".format(error))
finally:
cursor.execute("""SELECT author_id FROM authors WHERE full_name = (%s)""", (author_name,))
res = cursor.fetchall()
author_id = res[0][0] if res else None
return author_id
def insert_article(connection, cursor, link, title, date):
"""
Function inserts article to articles table
:param connection: connection to database
:param cursor: cursor to execute sql queries
:param link: link of article
:param title: title of article
:param date: publish data of article
:return: article_id
"""
try:
cursor.execute("""INSERT IGNORE INTO articles (link, title, date)
VALUES (%s, %s, %s)""", (link, title, date))
connection.commit()
except mysql.connector.Error as error:
logger.error("Failed to insert into table ARTICLES {}".format(error))
finally:
cursor.execute("""SELECT article_id FROM articles WHERE title = (%s)""", (title,))
res = cursor.fetchall()
article_id = res[0][0] if res else None
return article_id
def insert_tag(connection, cursor, tag, article_id):
"""
Function inserts tags of article to tags table
:param connection: connection to database
:param cursor: cursor to execute sql queries
:param tag: tag of article
:param article_id: article_id
:return: None
"""
try:
cursor.execute("""INSERT IGNORE INTO tags (tag_text) VALUES (%s)""", (tag,))
connection.commit()
except mysql.connector.Error as error:
logger.error("Failed to insert into table TAGS {}".format(error))
finally:
cursor.execute("""SELECT tag_id FROM tags WHERE tag_text = (%s)""", (tag,))
res = cursor.fetchall()
tag_id = res[0][0] if res else None
if tag_id and article_id:
try:
cursor.execute("""INSERT INTO article_to_tags (article_id, tag_id) VALUES (%s, %s)""",
(article_id, tag_id))
connection.commit()
except mysql.connector.Error as error:
logger.error("Failed to insert into table ARTICLE_TO_TAGS {}".format(error))
def insert_article_author_relation(connection, cursor, article_id, author_id):
"""
Inserts article author relationship in database
:param connection:
:param cursor:
:param article_id:
:param author_id:
:return: None
"""
try:
cursor.execute("""INSERT IGNORE INTO article_to_authors (article_id, author_id) VALUES (%s, %s)""",
(article_id, author_id))
connection.commit()
except mysql.connector.Error as error:
logger.error("Failed to insert into table ARTICLE_TO_AUTHORS {}".format(error))
def article_in_database(cursor, title):
"""
Function checks if article already exists in database to avoid need to search
:param cursor:
:param title:
:return:
"""
cursor.execute("""SELECT article_id FROM articles WHERE title = (%s)""", (title,))
if cursor.fetchall():
logger.info(f"Article with title {title} already in database!")
return True
return False
def insert_article_entry(author_name, twitter_handle, tag_list, title, date, link):
"""
Function inserts article information into database
:param author_name: name of author
:param date: article date
:param link article link
:param twitter_handle: authors' twitter handle
:param tag_list: list of tags associated to article
:param title: title of article
:return: None
"""
connection, cursor = connect_to_database()
if not article_in_database(cursor, title):
author_id = insert_author(connection, cursor, author_name, twitter_handle)
article_id = insert_article(connection, cursor, link, title, date)
for tag in set(tag_list):
insert_tag(connection, cursor, tag, article_id)
if article_id and author_id:
insert_article_author_relation(connection, cursor, article_id, author_id)
else:
logger.error(f"Error inserting author article relation for article title {title} and author {author_name}")
close_database_connection(connection, cursor)
|
from django import forms
from .models import ModelosClustering
'''
class ModelosForm(forms.ModelForm):
class Meta:
model = Modelos
fields = ['modelo']
def __init__(self, *args, **kwargs):
super().__init__(*args **kwargs)
self.fields['modelo'].widget.attrs.update({
'class': 'form-control',
#default=1,
})
'''
class NModel(forms.ModelForm):
class Meta:
model = ModelosClustering
fields = [
'id_modelo',
'nombre_modelo',
'estado',
'fecha_creacion',
'algoritmo',
'autor',
'nombre_archivo',
'caracteristicas',
]
labels = {
'id_modelo': 'ID Modelo',
'nombre_modelo': 'Nombre',
'estado': 'Estado',
'fecha_creacion': 'Fecha de creación',
'algoritmo': 'Algoritmo',
'autor': 'Autor',
'nombre_archivo': 'Nombre del archivo',
'caracteristicas': 'Características seleccionadas',
}
widgets = {
'id_modelo': forms.TextInput(attrs={'class':'form-control'}),
'nombre_modelo': forms.TextInput(attrs={'class':'form-control'}),
'estado': forms.HiddenInput(),
'fecha_creacion': forms.TextInput(attrs={'class':'form-control','readonly':'readonly'}),
'algoritmo': forms.TextInput(attrs={'class':'form-control','readonly':'readonly'}),
'autor': forms.TextInput(attrs={'class':'form-control','readonly':'readonly'}),
'nombre_archivo': forms.TextInput(attrs={'class':'form-control','readonly':'readonly'}),
'caracteristicas': forms.TextInput(attrs={'class':'form-control','readonly':'readonly'}),
}
class ModelosForm(forms.ModelForm):
class Meta:
model = ModelosClustering
fields = [
'id_modelo',
'nombre_modelo',
'estado',
'nombre_archivo',
'caracteristicas',
]
labels = {
'id_modelo': 'ID Modelo',
'nombre_modelo': 'Nombre',
'estado': 'Estado',
'nombre_archivo': 'Nombre del archivo',
'caracteristicas': 'Características seleccionadas',
}
widgets = {
'id_modelo': forms.TextInput(attrs={'class':'form-control'}),
'nombre_modelo': forms.TextInput(attrs={'class':'form-control'}),
'estado': forms.Select(attrs={'class':'form-control'}),
'nombre_archivo': forms.TextInput(attrs={'class':'form-control','readonly':'readonly'}),
'caracteristicas': forms.TextInput(attrs={'class':'form-control','readonly':'readonly'}),
}
# def __init__(self, *args, **kwargs):
# super().__init__(*args **kwargs)
# self.fields['estado'].widget.attrs.update({
# 'class': 'form-control',
# 'default' : '1',
# }) |
m,n=map(int,input().split())
string = list(input())
string = [int(x) for x in string]
for i in range(1,m):
k=i-1
while (i-k)!=n and k>=0:
string[i]^=string[k]
k-=1
print(''.join(map(str,string[:m]))) |
from abc import ABC
import geopandas as gpd
import pandas as pd
from coord2vec.common.db.postgres import get_df, connect_to_db
from coord2vec.feature_extraction.feature import Feature
class BasePostgresFeature(Feature, ABC):
def __init__(self, **kwargs):
"""
Args:
table_filter_dict: a dictionary of shape: {table_name: {filter_name: filter_sql}}
should contain all the tables, with all the filters required for this filter.
"""
super().__init__(**kwargs)
def _calculate_feature(self, input_gs: gpd.GeoSeries):
if self.intersect_tbl_name_dict is None or self.input_geom_table is None:
raise Exception("Must use an OSM feature factory before extracting the feature")
# calculate the feature
conn = connect_to_db()
query = self._build_postgres_query()
res = get_df(query, conn=conn)
# edit the df
full_df = pd.DataFrame(index=range(len(input_gs)), columns=self.feature_names)
if len(res['geom_id']) != 0:
full_df.iloc[res['geom_id']] = res.drop('geom_id', axis=1).values
full_df.fillna(self.default_value, inplace=True)
full_df['geom'] = input_gs
conn.close()
return full_df
|
"""
define classe to describe information about density in cell
"""
__author__ = 'ikibalin'
__version__ = "2019_07_09"
import os
import numpy
import f_mem.cl_atom_density
import f_common.cl_variable
class CellDensity(dict):
"""
Class to describe all information concerning the density in cell
"""
def __init__(self, name=None, points_number_a=None,
points_number_b=None, points_number_c=None,
file_dir=None, file_name=None):
super(CellDensity, self).__init__()
self._p_name = None
self._p_points_number_a = None
self._p_points_number_b = None
self._p_points_number_c = None
self._p_file_dir = None
self._p_file_name = None
self._list_atom_density = []
self._refresh(name, points_number_a,
points_number_b, points_number_c,
file_dir, file_name)
def __repr__(self):
ls_out = """CellDensity:\nname: {:}\n points_number_a: {:}
points_number_b: {:}\n points_number_c: {:}\n file_dir: {:}
file_name: {:}""".format(self._p_name, self._p_points_number_a,
self._p_points_number_b, self._p_points_number_c, self._p_file_dir, self._p_file_name)
for atom_density in self._list_atom_density:
ls_out += "{:}".format(atom_density)
return ls_out
def _refresh(self, name, points_number_a, points_number_b,
points_number_c, file_dir, file_name):
if name is not None:
self._p_name = name
if points_number_a is not None:
self._p_points_number_a = int(points_number_a)
if points_number_b is not None:
self._p_points_number_b = int(points_number_b)
if points_number_c is not None:
self._p_points_number_c = int(points_number_c)
if file_dir is not None:
self._p_file_dir = file_dir
if file_name is not None:
self._p_file_name = file_name
def set_val(self, name=None, points_number_a=None,
points_number_b=None, points_number_c=None,
file_dir=None, file_name=None):
self._refresh(name, points_number_a, points_number_b,
points_number_c, file_dir, file_name)
def get_val(self, label):
lab = "_p_"+label
if lab in self.__dict__.keys():
val = self.__dict__[lab]
if isinstance(val, type(None)):
self.set_val()
val = self.__dict__[lab]
else:
print("The value '{:}' is not found".format(lab))
val = None
return val
def list_vals(self):
"""
give a list of parameters with small descripition
"""
lsout = """
Parameters:
name is the name of mem
"""
print(lsout)
def create_density(self):
points_number_a = 1*self._p_points_number_a
points_number_b = 1*self._p_points_number_b
points_number_c = 1*self._p_points_number_c
#np_frac_x = numpy.linspace(0., 1., points_number_a, endpoint=False)
#np_frac_y = numpy.linspace(0., 1., points_number_b, endpoint=False)
#np_frac_z = numpy.linspace(0., 1., points_number_c, endpoint=False)
#np_frac_x_3d, np_frac_y_3d, np_frac_z_3d = numpy.meshgrid([np_frac_x, np_frac_y, np_frac_z], indexing ="ij")
val = 1./float(points_number_a*points_number_b*points_number_c)
propability = val*numpy.ones(shape=(points_number_a, points_number_b, points_number_c), dtype=float, order='C')
def calc_fourier_transform(self):
entropy = None
return entropy
def write_density(self, f_name):
chi_sq = None
return chi_sq
def read_density(self, f_name):
minimizer = None
return minimizer
def calc_magnetic_structure_factor(self):
flip_ratio = None
return flip_ratio
def calc_structure_factor_tensor(self):
res = None
return res
|
# coding: utf-8
# In[ ]:
import argparse, datetime, os, json
import statistics
parser = argparse.ArgumentParser(description='Averge number')
parser.add_argument('--search_term', help='search term')
parser.add_argument('--min_date', help= 'min day in yyyy-mm-dd')
parser.add_argument('--max_date', help= 'max day in yyyy-mm-dd')
args = parser.parse_args()
search_term = args.search_term
min_day = args.min_date
max_day = args.max_date
min_date =datetime.datetime.strptime(min_day, '%Y-%m-%d').date()
max_date =datetime.datetime.strptime(max_day, '%Y-%m-%d').date()
dif_days_int = (max_date-min_date).days
def add_days(days):
r_date = min_date + datetime.timedelta(days =days)
r_date_str = r_date.strftime('%Y-%m-%d')
return r_date_str
for i in range(0,dif_days_int +1):
parentPath = search_term +'/' + add_days(i)
listdir = os.listdir(parentPath)
list_text_count = list()
all_tweets = []
for file in listdir:
if (file.endswith('.json')):
with open(parentPath + '/' + file) as f:
content = json.load(f)
tweets = content['statuses']
j = 0
for tweet in tweets:
text = tweets[j]['text']
j+=1
if 'Trump' in text:
list_text_count.append(text)
print(add_days(i))
t=len(list_text_count)
print('how many people talked about trump each day:')
print(t)
|
#!/usr/bin/env python
import subprocess
import sys
import os
import time
import psutil
import appindicator
import gtk
import gobject
import notify2
import natsort
import pyxhook as hook
import atexit as at_exit
import pickle
import threading
statefile = os.path.expanduser('~/.vlcwrapy-nix/vlcdatabase.p')
show_notifications = True
class Vlc:
def __init__(self, filename):
self.now_playing = filename
self.process = None
self.play()
def restart(self, filename):
self.kill()
self.now_playing = filename
self.play()
def play(self):
if not self.is_alive():
self.process = subprocess.Popen(['vlc', self.now_playing])
def kill(self):
p, self.process = self.process, None
if p is not None and p.poll() is None:
p.kill()
p.wait()
def is_alive(self):
if self.process is not None and self.process.poll() is None:
return True
else:
return False
def fetch_watch_table():
if os.path.exists(statefile):
with open(statefile) as f:
try:
table = pickle.load(f)
except:
table = {}
else:
if not os.path.exists(os.path.dirname(statefile)):
os.makedirs(os.path.dirname(statefile))
table = {}
return table
def get_new_file(**kwargs):
direction, current = kwargs[
'direction'], os.path.basename(kwargs['current'])
supplist = ['.mkv', '.flv', '.avi', '.mpg',
'.wmv', '.ogm', '.mp4', '.rmvb', '.m4v']
files = natsort.natsorted([filename for filename in os.listdir('.')
if os.path.splitext(filename)[-1].lower() in supplist])
if direction == 2:
table = fetch_watch_table()
state = table.get(os.getcwd(), None)
if state:
newfile = os.path.realpath(state)
else:
return False
else:
newfile = os.path.realpath(
files[(files.index(current) + direction) % len(files)])
return newfile
def lookupIcon(icon_name):
icon_theme = gtk.icon_theme_get_default()
return icon_theme.lookup_icon(icon_name, 48, 0).get_filename()
class SubliminalThread(threading.Thread):
def __init__(self,filename):
print 'subliminal'
threading.Thread.__init__(self)
self.filename=filename
def run(self):
try:
retcode=subprocess.call(['subliminal','-q','-s','-f','-l','en','--',self.filename])
if retcode==0:
notify.display('Subtitles Downloaded','text')
else:
notify.display('Subtitles Not Found','error')
except:
e = sys.exc_info()[0]
notify.display(e,'error')
class Indicator:
def __init__(self, path):
self.a = appindicator.Indicator(
'appmenu', lookupIcon('vlc'), appindicator.CATEGORY_APPLICATION_STATUS)
self.a.set_status(appindicator.STATUS_ACTIVE)
self.vlc = Vlc(path)
self.build_menu()
gobject.timeout_add(5 * 1000, self.quitCallback)
at_exit.register(self.save_state)
self.last_alive = 0
def quitCallback(self):
if self.vlc.is_alive():
self.last_alive = time.time()
else:
dead_since = time.time() - self.last_alive
if dead_since > 2:
gtk.mainquit()
return True
def make_item(self, name, icon):
item = gtk.ImageMenuItem(name)
img = gtk.Image()
img.set_from_file(lookupIcon(icon))
item.set_image(img)
item.show()
return item
def build_menu(self):
menu = gtk.Menu()
prev_file_item = self.make_item('Previous', 'gtk-media-next-rtl')
prev_file_item.connect('activate', self.menuHandler, -1)
menu.append(prev_file_item)
next_file_item = self.make_item('Next', 'gtk-media-next-ltr')
next_file_item.connect('activate', self.menuHandler, 1)
menu.append(next_file_item)
reload_item = self.make_item('Resume', 'reload')
menu.append(reload_item)
subsitem=self.make_item('Download Subtitles','text')
subsitem.connect('activate',self.subs)
menu.append(subsitem)
quitmenuitem=self.make_item('Quit','gtk-quit')
quitmenuitem.connect('activate',self.quit)
menu.append(quitmenuitem)
self.a.set_menu(menu)
def quit(self,item):
self.vlc.kill()
gtk.mainquit()
def subs(self,item):
f=self.vlc.now_playing
subliminal=SubliminalThread(f)
subliminal.start()
def menuHandler(self, item, direction):
f = get_new_file(direction=direction, current=self.vlc.now_playing)
if f:
self.vlc.restart(f)
def save_state(self):
table = fetch_watch_table()
table[os.getcwd()] = self.vlc.now_playing
table['lastplayed'] = os.path.join(os.getcwd(), self.vlc.now_playing)
with open(statefile, 'w') as f:
pickle.dump(table, f)
def seek_and_destroy(to_kill):
for process in psutil.get_process_list():
# print process
if process.name() == to_kill:
process.kill()
class Message:
def __init__(self, title):
self.title = title
# self.icon=icon
notify2.init("title")
self.notice = notify2.Notification(
title, 'automation-indicator active')
# self.notice.show()
def display(self, message, icon='vlc'):
self.notice.update(self.title, message, icon=icon)
self.notice.timeout = 100
if show_notifications:
self.notice.show()
notify = Message('vlcwrapy-nix')
class Hook:
def __init__(self, indicator):
self.ind = indicator
self.hm = hook.HookManager()
self.hm.HookKeyboard()
self.hm.KeyDown = self.kbeventhandler
self.hm.start()
def kbeventhandler(self, event):
# print event
if event.Key == 'Home' and 'vlc' in event.WindowProcName.lower():
self.ind.menuHandler(None, -1)
notify.display('[Home] Previous file', 'gtk-media-next-rtl')
# print 'home'
if event.Key == 'End' and 'vlc' in event.WindowProcName.lower():
self.ind.menuHandler(None, 1)
notify.display('[End] Next File', 'gtk-media-next-ltr')
# print 'end'
if event.Key == 'F2' and 'vlc' in event.WindowProcName.lower():
self.ind.menuHandler(None, 2)
notify.display(
'[F2] Loading last played in current directory', 'reload')
# print 'F2'
# print 'event detected'
def kill(self):
time.sleep(2)
self.hm.cancel()
from datetime import datetime
logfile=os.path.expanduser('~/.vlcwrapy-nix/vlcwrapy-nix.log')
def log(logline):
with open(logfile,'w') as f:
f.write(logline+'\n')
def main():
gobject.threads_init()
log('\n\n\nStarted '+str(datetime.now()))
if len(sys.argv)==1 : #no argument
lastwatched = fetch_watch_table().get('lastplayed', False)
if lastwatched:
filedir, path = os.path.split(lastwatched)
else:
notify.display('Run vlcwrapy-nix from a video file.', 'error')
sys.exit()
else:
filedir,path=os.path.split(os.path.abspath(sys.argv[1]))
os.chdir(filedir)
log('filename received={}\n cwd={}'.format(path,os.getcwd()))
# notify.display('filename received={}\n cwd={}'.format(path,os.getcwd()),'vlc')
seek_and_destroy('vlc')
indicator = Indicator(path)
KBhook = Hook(indicator)
gtk.main()
KBhook.kill()
if __name__ == '__main__':
main()
'''
arguments =['/home/thekindlyone/projects/nautilus-test.py', '/media/thekindlyone/storage/anime/Guilty Crown/[Commie] Guilty Crown - 10 [6094511C].mkv']
cwd=/home/thekindlyone
'''
|
from __future__ import print_function
from django.conf import settings
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import AllowAny
from django.core import serializers
core_serializers = serializers
from django.http import JsonResponse
import requests
import json
from .serializers import *
from collections import namedtuple
import time
import datetime
from django.forms.models import model_to_dict
from django.shortcuts import render
from .models import *
import uuid
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from django.db.models import Q
import sys
import tldextract
import re
from urllib.parse import urlparse, parse_qs
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import csv
import io
CLIENT_ID = 'hUq5QmrIYSHu15LKS7nHjnXteMApeTHHTwSEWz9x'
CLIENT_SECRET = 'ealGKj90JJ3ERVVPOeFpwbZhUgNVZIvNERXSLlqmQKHcUCm8nLxAd3wtzYmbEh61ER6x4XqZGO0yd8vj9JL4PHmdPREx3VMXdOHYsDLWcXZQiiet3AP4HcOpyxkifJtB'
CHROME_DRIVE_PATH = settings.CHROME_DRIVE_PATH
STATIC_PATH = settings.STATIC_PATH
def index(request):
return render(request, 'index.html')
@api_view(['POST'])
@permission_classes([AllowAny])
def register(request):
json_data = json.loads(request.POST.get("register_info"))
GmailID = json_data["googleID"]
Email = json_data["Email"]
selected_general_tags = json_data["selected_general_tags"]
selected_grades = json_data["selected_grades"]
selected_role = json_data["selected_role"]
selected_school = json_data["selected_school"]
selected_student_needs = json_data["selected_student_needs"]
selected_subjects = json_data["selected_subjects"]
firstname = json_data["Firstname"]
lastname = json_data["Lastname"]
school = selected_school.split(", ")[0]
state_name = selected_school.split(", ")[2]
city = selected_school.split(", ")[1]
sh = School.objects.get(Name=school, City=city, State=State.objects.get(StateName=state_name))
try:
rl = Role.objects.get(Title=selected_role[0]["label"])
except Role.DoesNotExist:
rl = Role(Title=selected_role[0]["label"])
rl.save()
new_user = User(GmailID=GmailID,School=sh,Role=rl,Firstname=firstname,Lastname=lastname, Email=Email)
new_user.save()
for sbj in selected_subjects:
try:
t_sbj = Subject.objects.get(Name=sbj["label"])
except Subject.DoesNotExist:
t_sbj = Subject(Name=sbj["label"])
t_sbj.save()
new_user.Subjects.add(t_sbj)
try:
sub_trg = SubjectTrigger.objects.get(TriggerWord=sbj["label"])
except SubjectTrigger.DoesNotExist:
sub_trg = SubjectTrigger(TriggerWord=sbj["label"], Subject=t_sbj)
sub_trg.save()
for gd in selected_grades:
t_gd = GradeTrigger.objects.get(TriggerWord=gd["label"])
new_user.Grades.add(t_gd.Grade)
for gd in selected_grades:
t_gd = GradeTrigger.objects.get(TriggerWord=gd["label"])
new_user.GradeTrigger.add(t_gd)
for sn in selected_student_needs:
try:
t_sn = Student_Need.objects.get(Population=sn["label"])
except Student_Need.DoesNotExist:
t_sn = Student_Need(Population=sn["label"])
t_sn.save()
new_user.Student_needs.add(t_sn)
for gt in selected_general_tags:
try:
t_gt = General_Tag.objects.get(Tag=gt["label"])
except General_Tag.DoesNotExist:
t_gt = General_Tag(Tag=gt["label"])
t_gt.save()
new_user.General_Tags.add(t_gt)
# add shared collection when login from shared page
next_page = request.POST.get("next_page")
if next_page != "/home":
uuid = next_page[1:]
collection_detail = Collection.objects.get(uuid=uuid)
try:
shared_user = User.objects.get(GmailID=GmailID)
if shared_user.pk == new_user.pk:
try:
shared_collection = SharedCollection.objects.get(SharedUser=shared_user, SharedCollection=collection_detail)
except SharedCollection.DoesNotExist:
shared_collection = SharedCollection(SharedUser=shared_user, SharedCollection=collection_detail)
shared_collection.save()
except User.DoesNotExist:
return Response({"RegisterSuccess": False}, 200)
return Response({"RegisterSuccess": True}, 200)
@api_view(['POST'])
def update_profile(request):
json_data = json.loads(request.POST.get("register_info"))
GmailID = json_data["googleID"]
Email = json_data["Email"]
selected_general_tags = json_data["selected_general_tags"]
selected_grades = json_data["selected_grades"]
selected_role = json_data["selected_role"]
selected_school = json_data["selected_school"]
selected_student_needs = json_data["selected_student_needs"]
selected_subjects = json_data["selected_subjects"]
firstname = json_data["Firstname"]
lastname = json_data["Lastname"]
school = selected_school.split(", ")[0]
state_name = selected_school.split(", ")[2]
city = selected_school.split(", ")[1]
sh = School.objects.get(Name=school, City=city, State=State.objects.get(StateName=state_name))
try:
rl = Role.objects.get(Title=selected_role[0]["label"])
except Role.DoesNotExist:
rl = Role(Title=selected_role[0]["label"])
rl.save()
new_user = User.objects.get(GmailID=GmailID)
new_user.Role = rl
new_user.School = sh
new_user.Subjects.clear()
for sbj in selected_subjects:
try:
t_sbj = Subject.objects.get(Name=sbj["label"])
except Subject.DoesNotExist:
t_sbj = Subject(Name=sbj["label"])
t_sbj.save()
new_user.Subjects.add(t_sbj)
try:
sub_trg = SubjectTrigger.objects.get(TriggerWord=sbj["label"])
except SubjectTrigger.DoesNotExist:
sub_trg = SubjectTrigger(TriggerWord=sbj["label"], Subject=t_sbj)
sub_trg.save()
new_user.Grades.clear()
for gd in selected_grades:
t_gd = GradeTrigger.objects.get(TriggerWord=gd["label"])
new_user.Grades.add(t_gd.Grade)
new_user.GradeTrigger.clear()
for gd in selected_grades:
t_gd = GradeTrigger.objects.get(TriggerWord=gd["label"])
new_user.GradeTrigger.add(t_gd)
new_user.Student_needs.clear()
for sn in selected_student_needs:
try:
t_sn = Student_Need.objects.get(Population=sn["label"])
except Student_Need.DoesNotExist:
t_sn = Student_Need(Population=sn["label"])
t_sn.save()
new_user.Student_needs.add(t_sn)
new_user.General_Tags.clear()
for gt in selected_general_tags:
try:
t_gt = General_Tag.objects.get(Tag=gt["label"])
except General_Tag.DoesNotExist:
t_gt = General_Tag(Tag=gt["label"])
t_gt.save()
new_user.General_Tags.add(t_gt)
return Response({"updateSuccess": True}, 200)
@api_view(['POST'])
def token(request):
'''
Gets tokens with username and password. Input should be in the format:
{"username": "username", "password": "1234abcd"}
'''
r = requests.post(
'http://127.0.0.1:8000/o/token/',
data={
'grant_type': 'password',
'username': "username",
'password': "1234abcd",
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
},
)
return Response(r.json())
@api_view(['POST'])
@permission_classes([AllowAny])
def refresh_token(request):
'''
Registers user to the server. Input should be in the format:
{"refresh_token": "<token>"}
'''
r = requests.post(
'http://127.0.0.1:8000/o/token/',
data={
'grant_type': 'refresh_token',
'refresh_token': request.data['refresh_token'],
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
},
)
return Response(r.json())
@api_view(['POST'])
@permission_classes([AllowAny])
def revoke_token(request):
'''
Method to revoke tokens.
{"token": "<token>"}
'''
r = requests.post(
'http://127.0.0.1:8000/o/revoke_token/',
data={
'token': request.data['token'],
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
},
)
# If it goes well return sucess message (would be empty otherwise)
if r.status_code == requests.codes.ok:
return Response({'message': 'token revoked'}, r.status_code)
# Return the error if it goes badly
return Response(r.json(), r.status_code)
@api_view(['POST'])
def login(request):
try:
user = User.objects.get(GmailID=request.POST.get('GmailID'))
return Response({"loginSuccess": True}, 200)
except User.DoesNotExist:
# print("doesnotexist")
return Response({"loginSuccess": False}, 200)
@api_view(['GET'])
def basedata(request):
# time.sleep(5)
return Response(getBaseData())
def getBaseData():
Basedata = namedtuple('Basedata', ('roles', 'subjects', 'grades', 'student_needs', 'general_tags'))
basedata = Basedata(
roles=Role.objects.all(),
subjects=Subject.objects.all(),
grades=GradeTrigger.objects.all(),
student_needs=Student_Need.objects.all(),
general_tags=General_Tag.objects.all(),
)
serializer = BaseDataSerializer(basedata)
return serializer.data
@api_view(['POST'])
def get_profile_data(request):
google_id = request.POST.get("google_id")
user = User.objects.get(GmailID=google_id)
doc_json = {}
doc_json["selected_school"] = user.School.Name + ", " + user.School.City + ", " + user.School.State.StateName
doc_json["selected_role"] = user.Role.Title
selected_subject = []
for sub in user.Subjects.all():
selected_subject.append(sub.Name)
doc_json["selected_subject"] = selected_subject
selected_grade = []
for gr in user.GradeTrigger.all():
selected_grade.append(gr.TriggerWord)
doc_json["selected_grade"] = selected_grade
selected_student_need = []
for sn in user.Student_needs.all():
selected_student_need.append(sn.Population)
doc_json["selected_student_need"] = selected_student_need
selected_general_tag = []
for gt in user.General_Tags.all():
selected_general_tag.append(gt.Tag)
doc_json["selected_general_tag"] = selected_general_tag
return Response({"profile_data": doc_json, "basedata": getBaseData()})
@api_view(["POST"])
def search_school(request):
input_length = request.POST.get("inputLength")
input_value = request.POST.get("inputValue")
min_query_length = 4
school_suggestions = []
if int(input_length) <= min_query_length - 1:
return Response({"schools": json.dumps(school_suggestions)})
else:
filtered_school = School.objects.filter(Name__icontains=input_value)
Schooldata = namedtuple('Schooldata', ('schools'))
schooldata = Schooldata(
schools=filtered_school
)
serializer = SchoolDataSerializer(schooldata)
return Response(serializer.data)
@api_view(['POST'])
def cordata(request):
# time.sleep(5)
return Response( getCorData(request) )
def getCorData(request):
GmailID = request.POST.get("GmailID")
user = User.objects.get(GmailID=GmailID)
general_tags = General_Tag.objects.all()
subject_triggerword = SubjectTrigger.objects.all()
grade_triggerword = GradeTrigger.objects.all()
doctype = DocType.objects.all()
cols = Collection.objects.filter(Owner_User=user)
collections = []
for c in cols:
json_data = {}
json_data["title"] = c.Title
json_data["pk"] = c.pk
if c.Thumbnail == "":
json_data["thumbnail"] = getThumbnailFromCollection(c)
else:
json_data["thumbnail"] = c.Thumbnail
collections.append(json_data)
return {
"general_tags": core_serializers.serialize("json", general_tags),
"subject_triggerword": core_serializers.serialize("json", subject_triggerword),
"grade_triggerword": core_serializers.serialize("json", grade_triggerword),
"doctype": core_serializers.serialize("json", doctype),
"collections": json.dumps(collections)
}
@api_view(['POST'])
def getStrand(request):
# time.sleep(5)
return Response( getStrandData(request) )
def getStrandData(request):
try:
selected_subject_triggerword = request.POST.get("selected_subject_triggerword")
selected_grade_triggerword = request.POST.get("selected_grade_triggerword")
GmailID = request.POST.get("GmailID")
state = User.objects.get(GmailID=GmailID).School.State
category = "Adult" if User.objects.get(GmailID=GmailID).School.Have_standard > 1 else "K12"
subject = SubjectTrigger.objects.get(TriggerWord=selected_subject_triggerword).Subject
grade = GradeTrigger.objects.get(TriggerWord=selected_grade_triggerword).Grade
standard_set = StateStandard.objects.get(State=state, Subject=subject, Category=category).StandardSet
strand = Standard.objects.filter(StandardSet=standard_set, Grade=grade).values("Strand").distinct()
return {"strand": json.dumps( list(strand) ), "standard_set": standard_set.SetLabel}
# except Standard.DoesNotExist or StateStandard.DoesNotExist:
except:
return {"strand": json.dumps( list([]) ), "standard_set": ''}
@api_view(['POST'])
def getStandard(request):
# time.sleep(5)
return Response({"code": json.dumps( getStandardData(request) ) })
def getStandardData(request):
selected_subject_triggerword = request.POST.get("selected_subject_triggerword")
selected_grade_triggerword = request.POST.get("selected_grade_triggerword")
selected_strand = request.POST.get("selected_strand")
GmailID = request.POST.get("GmailID")
state = User.objects.get(GmailID=GmailID).School.State
category = "Adult" if User.objects.get(GmailID=GmailID).School.Have_standard > 1 else "K12"
subject = SubjectTrigger.objects.get(TriggerWord=selected_subject_triggerword).Subject
grade = GradeTrigger.objects.get(TriggerWord=selected_grade_triggerword).Grade
try:
standard_set = StateStandard.objects.get(State=state, Subject=subject, Category=category).StandardSet
code = Standard.objects.filter(StandardSet=standard_set, Grade=grade, Strand=selected_strand).values("id", "Standard_Number", "Description").distinct()
return {"code": json.dumps( list(code) )}
except Standard.DoesNotExist or StateStandard.DoesNotExist:
return {"code": json.dumps( list([]) )}
@api_view(['POST'])
def uploadFile(request):
c_d_pk = upload(request)
if c_d_pk['c_pk'] == 0:
return Response({"col_id": 0, "doc_id": 0, "upload": "already exist"})
else:
return Response({"col_id": c_d_pk['c_pk'], "doc_id": c_d_pk['d_pk'], "upload": "success"})
def upload(request):
json_data = json.loads(request.POST.get("upload_file_info"))
Title = json_data["Title"]
GmailID = json_data["GmailID"]
DocID = json_data["DocID"]
DocT = json_data["DocType"]
col_new_title = json_data["col_new_title"]
col_default_title = json_data["col_default_title"]
first_name = json_data["first_name"]
collection_pk = json_data["collection_pk"]
thumbnail = json_data["web_thumbnail"]
# selected_methods = json_data["selected_methods"] # not saved
selected_general_tags = json_data["selected_general_tags"]
ServiceType = json_data["ServiceType"]
IconUrl = json_data["iconUrl"]
Url = json_data["url"]
standard_pk = json_data["standard_pk"]
selected_subject_triggerword = json_data["selected_subject_triggerword"]
selected_grade_triggerword = json_data["selected_grade_triggerword"]
user = User.objects.get(GmailID=GmailID)
if collection_pk == "new":
# determine title of collection
if col_new_title == "":
df_col_name = "New Collection"
c = len( list( Collection.objects.raw("SELECT * FROM users_collection WHERE Title LIKE 'New Collection%%'") ) )
if c != 0:
df_col_name += str(c)
else:
df_col_name = col_new_title
# save collection
u = uuid.uuid4()
col = Collection(
Title=df_col_name,
Owner_User=user,
Description="",
DateShared = datetime.datetime.now(),
Thumbnail="", # not saved
AccessCount=1,
uuid=u.hex
)
col.save()
try:
doc = Document.objects.get(DocID=DocID)
doc.Collection.add(col)
return {"c_pk": col.pk, "d_pk": doc.pk}
except Document.DoesNotExist:
# save Document
doc = Document(
Title=Title,
Owner_User=user,
DocID=DocID,
DocType = DocType.objects.get(Type=DocT),
DateShared = datetime.datetime.now(),
OpenNumber = 0,
ServiceType = ServiceType,
IconUrl = IconUrl,
Url = Url,
thumbnail = thumbnail,
Subject = SubjectTrigger.objects.get(TriggerWord=selected_subject_triggerword).Subject,
Grade = GradeTrigger.objects.get(TriggerWord=selected_grade_triggerword).Grade,
subject_triggerword = SubjectTrigger.objects.get(TriggerWord=selected_subject_triggerword),
grade_triggerword = GradeTrigger.objects.get(TriggerWord=selected_grade_triggerword)
)
doc.save()
# add collection
doc.Collection.add(col)
if standard_pk != None:
doc.Standard.add( Standard.objects.get(pk=standard_pk) )
# add tags
for gt in selected_general_tags:
try:
eg = General_Tag.objects.get(Tag=gt['label'])
doc.General_Tags.add(eg)
except General_Tag.DoesNotExist:
eg = General_Tag(Tag=gt['label'])
eg.save()
doc.General_Tags.add(eg)
return {"c_pk": col.pk, "d_pk": doc.pk}
elif collection_pk == "default":
# determine title of collection
if col_default_title == "":
df_col_name = first_name + "'s First Collection"
else:
df_col_name = col_default_title
# save collection
u = uuid.uuid4()
col = Collection(
Title=df_col_name,
Owner_User=user,
Description="",
DateShared = datetime.datetime.now(),
Thumbnail="", # not saved
AccessCount=1,
uuid=u.hex
)
col.save()
try:
doc = Document.objects.get(DocID=DocID)
doc.Collection.add(col)
return {"c_pk": col.pk, "d_pk": doc.pk}
except Document.DoesNotExist:
# save Document
doc = Document(
Title=Title,
Owner_User=user,
DocID=DocID,
DocType = DocType.objects.get(Type=DocT),
DateShared = datetime.datetime.now(),
OpenNumber = 0,
ServiceType = ServiceType,
IconUrl = IconUrl,
Url = Url,
thumbnail = thumbnail,
Subject = SubjectTrigger.objects.get(TriggerWord=selected_subject_triggerword).Subject,
Grade = GradeTrigger.objects.get(TriggerWord=selected_grade_triggerword).Grade,
subject_triggerword = SubjectTrigger.objects.get(TriggerWord=selected_subject_triggerword),
grade_triggerword = GradeTrigger.objects.get(TriggerWord=selected_grade_triggerword)
)
doc.save()
# add collection
doc.Collection.add(col)
if standard_pk != None:
doc.Standard.add( Standard.objects.get(pk=standard_pk) )
# add tags
for gt in selected_general_tags:
try:
eg = General_Tag.objects.get(Tag=gt['label'])
doc.General_Tags.add(eg)
except General_Tag.DoesNotExist:
eg = General_Tag(Tag=gt['label'])
eg.save()
doc.General_Tags.add(eg)
return {"c_pk": col.pk, "d_pk": doc.pk}
else:
col = Collection.objects.get(pk=collection_pk)
try:
doc = Document.objects.get(DocID=DocID)
# document is already saved in collection, then return
if col in doc.Collection.all():
return {"c_pk": 0, "d_pk": 0}
else:
# add document to this collection
doc.Collection.add(col)
return {"c_pk": col.pk, "d_pk": doc.pk}
except Document.DoesNotExist:
doc = Document(
Title=Title,
Owner_User=user,
DocID=DocID,
DocType = DocType.objects.get(Type=DocT),
DateShared = datetime.datetime.now(),
OpenNumber = 0,
ServiceType = ServiceType,
IconUrl = IconUrl,
Url = Url,
thumbnail = thumbnail,
Subject = SubjectTrigger.objects.get(TriggerWord=selected_subject_triggerword).Subject,
Grade = GradeTrigger.objects.get(TriggerWord=selected_grade_triggerword).Grade,
subject_triggerword = SubjectTrigger.objects.get(TriggerWord=selected_subject_triggerword),
grade_triggerword = GradeTrigger.objects.get(TriggerWord=selected_grade_triggerword)
)
doc.save()
doc.Collection.add(col)
if standard_pk != None:
doc.Standard.add( Standard.objects.get(pk=standard_pk) )
# add tags
for gt in selected_general_tags:
try:
eg = General_Tag.objects.get(Tag=gt['label'])
doc.General_Tags.add(eg)
except General_Tag.DoesNotExist:
eg = General_Tag(Tag=gt['label'])
eg.save()
doc.General_Tags.add(eg)
return {"c_pk": col.pk, "d_pk": doc.pk}
@api_view(["POST"])
def updateFile(request):
json_data = json.loads(request.POST.get("upload_file_info"))
Title = json_data["Title"]
DocID = json_data["DocID"]
doc_pk = json_data["doc_pk"]
thumbnail = json_data["web_thumbnail"]
icon_url = json_data["iconUrl"]
# selected_methods = json_data["selected_methods"] # not saved
selected_general_tags = json_data["selected_general_tags"]
Url = json_data["url"]
standard_pk = json_data["standard_pk"]
selected_subject_triggerword = json_data["selected_subject_triggerword"]
selected_grade_triggerword = json_data["selected_grade_triggerword"]
doc = Document.objects.get(pk=doc_pk)
doc.Title = Title
doc.DocID = DocID
doc.thumbnail = thumbnail
doc.Url = Url
doc.Subject = SubjectTrigger.objects.get(TriggerWord=selected_subject_triggerword).Subject
doc.Grade = GradeTrigger.objects.get(TriggerWord=selected_grade_triggerword).Grade
doc.subject_triggerword = SubjectTrigger.objects.get(TriggerWord=selected_subject_triggerword)
doc.grade_triggerword = GradeTrigger.objects.get(TriggerWord=selected_grade_triggerword)
if icon_url != "":
doc.IconUrl = icon_url
doc.save()
doc.Standard.clear()
if standard_pk != None:
doc.Standard.add( Standard.objects.get(pk=standard_pk) )
for gt in selected_general_tags:
try:
eg = General_Tag.objects.get(Tag=gt['label'])
doc.General_Tags.add(eg)
except General_Tag.DoesNotExist:
eg = General_Tag(Tag=gt['label'])
eg.save()
doc.General_Tags.add(eg)
return Response({"message": "update successfully!"})
@api_view(["POST"])
def getMyData(request):
GmailID = request.POST.get("GmailID")
owner = User.objects.get(GmailID=GmailID)
docs = Document.objects.filter(Owner_User=owner)
# return Response({"docs": core_serializers.serialize("json", docs)})
result = []
for d in docs:
js = {}
js['id'] = d.pk
js["title"] = d.Title
js["DocID"] = d.DocID
js["subject"] = d.subject_triggerword.TriggerWord
js["DocType"] = DocType.objects.get(pk=d.DocType.pk).Type
js["standard"] = []
for sd in d.Standard.all():
js["standard"].append(sd.Standard_Number)
js["iconUrl"] = d.IconUrl
js["url"] = d.Url
js["tags"] = []
if d.General_Tags.all():
for tag in d.General_Tags.all():
js["tags"].append(tag.Tag)
result.append(js)
return Response({"docs": json.dumps(result)})
def get_youtube_id(url):
u_pars = urlparse(url)
quer_v = parse_qs(u_pars.query).get('v')
if quer_v:
return quer_v[0]
pth = u_pars.path.split('/')
if pth:
return pth[-1]
@api_view(["POST"])
def getWebThumbnail(request):
global CHROME_DRIVE_PATH
url = request.POST.get("web_url")
print(url)
if not 'http' in url:
url = "https://" + url
ext = tldextract.extract(url)
if ext.domain == "youtube" or ext.domain == "youtu":
video_id = get_youtube_id(url)
url = "https://img.youtube.com/vi/" + video_id + "/0.jpg"
return Response({"thumbnail_url": url})
else:
try:
filename = re.sub(r'[\\/*?:"<>.#|]',"",url)
_start = time.time()
options = Options()
options.add_argument("--headless") # Runs Chrome in headless mode.
options.add_argument('--no-sandbox') # # Bypass OS security model
options.add_argument('start-maximized')
options.add_argument('disable-infobars')
options.add_argument("--disable-extensions")
driver = webdriver.Chrome(chrome_options=options, executable_path=CHROME_DRIVE_PATH)
driver.get(url)
global STATIC_PATH
driver.save_screenshot(STATIC_PATH + filename + '.png')
print(settings.STATICFILES_DIRS[0] + '/images/' + filename + '.png')
driver.quit()
_end = time.time()
return Response({"thumbnail_url": '/static/images/' + filename + '.png'})
except:
return Response({"thumbnail_url": ""})
@api_view(["POST"])
def get_webimage_by_random_number(request):
global CHROME_DRIVE_PATH
url = request.POST.get("web_url")
number = int( request.POST.get("r_num") )
if not 'http' in url:
url = "https://" + url
ext = tldextract.extract(url)
if ext.domain == "youtube" or ext.domain == "youtu":
video_id = get_youtube_id(url)
url = "https://img.youtube.com/vi/" + video_id + "/" + str(number % 4) + ".jpg"
return Response({"thumbnail_url": url})
else:
# try:#
filename = re.sub(r'[\\/*?:"<>.#|]',"",url)
_start = time.time()
options = Options()
options.add_argument("--headless") # Runs Chrome in headless mode.
options.add_argument('--no-sandbox') # # Bypass OS security model
options.add_argument('start-maximized')
options.add_argument('disable-infobars')
options.add_argument("--disable-extensions")
driver = webdriver.Chrome(chrome_options=options, executable_path=CHROME_DRIVE_PATH)
driver.get(url)
images = driver.find_elements_by_tag_name('img')
image_srcs = []
for image in images:
image_srcs.append(image.get_attribute('src'))
return Response({"thumbnail_url": image_srcs[number % len(image_srcs)] if image_srcs else ""})
# except:
# return Response({"thumbnail_url": json.dumps([])})
@api_view(["POST"])
def searchData(request):
gmail_id = request.POST.get("GmailID")
user = User.objects.get(GmailID=gmail_id)
keyword = request.POST.get("keyword")
option = request.POST.get("option")
community_id = request.POST.get("community_id")
if keyword == "":
return Response({"docs": json.dumps([])})
searchResult = []
if user.School.Have_standard == 2:
docs = Document.objects.filter( (Q(Title__icontains=keyword) | Q(Standard__Standard_Number__icontains=keyword) | Q(General_Tags__Tag__icontains=keyword) ) & Q(Owner_User__School__Have_standard=2) )
else:
docs = Document.objects.filter( Q(Title__icontains=keyword) | Q(Standard__Standard_Number__icontains=keyword) | Q(General_Tags__Tag__icontains=keyword) )
result = []
for d in docs:
js = {}
js["title"] = d.Title
js["DocID"] = d.DocID
js["owner"] = str(User.objects.get(pk=d.Owner_User.pk).Firstname) + " " + str(User.objects.get(pk=d.Owner_User.pk).Lastname)
js["DocType"] = DocType.objects.get(pk=d.DocType.pk).Type
js["standard"] = []
for sd in d.Standard.all():
js["standard"].append(sd.Standard_Number)
js["iconUrl"] = d.IconUrl
js["url"] = d.Url
js["tags"] = []
if d.General_Tags.all():
for tag in d.General_Tags.all():
js["tags"].append(tag.Tag)
result.append(js)
return Response({"docs": json.dumps(result)})
def getThumbnailFromCollection(collection):
docs = Document.objects.filter(Collection=collection)[0:4]
thumbnail = []
for d in docs:
if d.ServiceType == "Website":
thumbnail.append(d.thumbnail)
else:
thumbnail.append("https://drive.google.com/thumbnail?authuser=0&sz=w320&id=" + d.DocID)
return thumbnail
@api_view(["POST"])
def getCollectiondata(request):
GmailID = json.loads(request.POST.get("GmailID"))
user = User.objects.get(GmailID=GmailID)
cols = Collection.objects.filter(Owner_User=user)
my_collections = []
for c in cols:
json_data = {}
json_data["title"] = c.Title
json_data["pk"] = c.pk
if c.Thumbnail == "":
json_data["thumbnail"] = getThumbnailFromCollection(c)
else:
json_data["thumbnail"] = c.Thumbnail
my_collections.append(json_data)
shared_collections_key = SharedCollection.objects.filter(SharedUser=user)
shared_collections = []
for s in shared_collections_key:
json_data = {}
json_data["title"] = s.SharedCollection.Title
json_data["pk"] = s.SharedCollection.pk
if s.SharedCollection.Thumbnail == "":
json_data["thumbnail"] = getThumbnailFromCollection(s.SharedCollection)
else:
json_data["thumbnail"] = s.SharedCollection.Thumbnail
shared_collections.append(json_data)
return Response({"my_collections": json.dumps(my_collections), "share_collections": json.dumps(shared_collections) })
@api_view(["POST"])
def searchCollection(request):
GmailID = request.POST.get("GmailID")
keyword = request.POST.get("keyword")
user = User.objects.get(GmailID=GmailID)
cols = Collection.objects.filter(Q(Owner_User=user) & (Q(Title__icontains=keyword) | Q(Description__icontains=keyword)))
my_collections = []
for c in cols:
json_data = {}
json_data["title"] = c.Title
json_data["pk"] = c.pk
if c.Thumbnail == "":
json_data["thumbnail"] = getThumbnailFromCollection(c)
else:
json_data["thumbnail"] = c.Thumbnail
my_collections.append(json_data)
shared_collections_key = SharedCollection.objects.filter(SharedUser=user)
shared_collections = []
for s in shared_collections_key:
json_data = {}
json_data["title"] = s.SharedCollection.Title
json_data["pk"] = s.SharedCollection.pk
if s.SharedCollection.Thumbnail == "":
json_data["thumbnail"] = getThumbnailFromCollection(s.SharedCollection)
else:
json_data["thumbnail"] = s.SharedCollection.Thumbnail
shared_collections.append(json_data)
return Response({"my_collections": json.dumps(my_collections), "share_collections": json.dumps(shared_collections) })
@api_view(["POST"])
def getCollectionDetail(request):
collection_id = request.POST.get("collection_id")
collection_detail = Collection.objects.get(pk=collection_id)
user = collection_detail.Owner_User
docs = Document.objects.filter(Collection__pk=collection_id)
documents = []
for doc in docs:
doc_json = {}
doc_json["pk"] = doc.pk
doc_json["Title"] = doc.Title
doc_json["DocID"] = doc.DocID
doc_json["DocType"] = doc.DocType.Type
doc_json["DateShared"] = doc.DateShared.strftime("%b. %d %Y")
doc_json["Subject"] = doc.Subject.Name
doc_json["Grade"] = doc.Grade.Grade
doc_json["FileType"] = doc.ServiceType if doc.ServiceType == "Website" else "Document"
doc_json["thumbnail"] = doc.thumbnail
doc_json["iconUrl"] = doc.IconUrl
doc_json["Standards"] = []
for st in doc.Standard.all():
doc_json["Standards"].append(st.Standard_Number)
doc_json["General_Tags"] = []
for tag in doc.General_Tags.all():
doc_json["General_Tags"].append(tag.Tag)
doc_json["Url"] = doc.Url
documents.append(doc_json)
if collection_detail.Thumbnail == "":
thumbnail = getThumbnailFromCollection(collection_detail)
else:
thumbnail = collection_detail.Thumbnail
return Response({"title": collection_detail.Title, "thumbnail":thumbnail,
"description": collection_detail.Description, "uuid": collection_detail.uuid,
"role": user.Role.Title, "school": user.School.Name,
"docs": json.dumps(documents)
})
@api_view(["POST"])
def collectionShare(request):
collection_id = request.POST.get("collection_id")
target_email = request.POST.get("target_email")
try:
shared_user = User.objects.get(Email=target_email)
except User.DoesNotExist:
return Response({"message": "This user is not signed in Coteacher."})
shared_collection = Collection.objects.get(pk=collection_id)
if shared_collection.Owner_User == shared_user:
return Response({"message": "This email is yours."})
try:
sc = SharedCollection.objects.get(SharedUser=shared_user, SharedCollection=shared_collection)
return Response({"message": "This collection was already shared with this user."})
except SharedCollection.DoesNotExist:
sc = SharedCollection(SharedUser=shared_user, SharedCollection=shared_collection)
sc.save()
return Response({"message": "Successfully shared!"})
@api_view(["POST"])
def getCollectionDetailFromUUID(request):
collection_uuid = request.POST.get("uuid")
google_id = request.POST.get("GmailID")
collection_detail = Collection.objects.get(uuid=collection_uuid)
user = collection_detail.Owner_User
docs = Document.objects.filter(Collection__uuid=collection_uuid)
documents = []
for doc in docs:
doc_json = {}
doc_json["pk"] = doc.pk
doc_json["Title"] = doc.Title
doc_json["DocID"] = doc.DocID
doc_json["DocType"] = doc.DocType.Type
doc_json["DateShared"] = doc.DateShared.strftime("%b. %d %Y")
doc_json["Subject"] = doc.Subject.Name
doc_json["Grade"] = doc.Grade.Grade
doc_json["FileType"] = doc.ServiceType if doc.ServiceType == "Website" else "Document"
doc_json["thumbnail"] = doc.thumbnail
doc_json["iconUrl"] = doc.IconUrl
doc_json["Standards"] = []
for st in doc.Standard.all():
doc_json["Standards"].append(st.Standard_Number)
doc_json["General_Tags"] = []
for tag in doc.General_Tags.all():
doc_json["General_Tags"].append(tag.Tag)
doc_json["Url"] = doc.Url
documents.append(doc_json)
if collection_detail.Thumbnail == "":
thumbnail = getThumbnailFromCollection(collection_detail)
else:
thumbnail = collection_detail.Thumbnail
if google_id != None:
try:
shared_user = User.objects.get(GmailID=google_id)
if shared_user.pk != user.pk:
try:
shared_collection = SharedCollection.objects.get(SharedUser=shared_user, SharedCollection=collection_detail)
except SharedCollection.DoesNotExist:
shared_collection = SharedCollection(SharedUser=shared_user, SharedCollection=collection_detail)
shared_collection.save()
except User.DoesNotExist:
return Response({"title": collection_detail.Title, "thumbnail":thumbnail,
"description": collection_detail.Description, "uuid": collection_detail.uuid,
"role": user.Role.Title, "school": user.School.Name,
"docs": json.dumps(documents)
})
return Response({"title": collection_detail.Title, "thumbnail":thumbnail,
"description": collection_detail.Description, "uuid": collection_detail.uuid,
"role": user.Role.Title, "school": user.School.Name,
"docs": json.dumps(documents)
})
@api_view(["POST"])
def changeCollectionTitleDescription(request):
collection_id = request.POST.get("col_id")
collection_title = request.POST.get("col_title")
collection_description = request.POST.get("col_description")
obj = Collection.objects.get(pk=collection_id)
obj.Title = collection_title
obj.Description = collection_description
obj.save()
collection_detail = Collection.objects.get(pk=collection_id)
user = collection_detail.Owner_User
docs = Document.objects.filter(Collection__pk=collection_id)
documents = []
for doc in docs:
doc_json = {}
doc_json["pk"] = doc.pk
doc_json["Title"] = doc.Title
doc_json["DocID"] = doc.DocID
doc_json["DocType"] = doc.DocType.Type
doc_json["DateShared"] = doc.DateShared.strftime("%b. %d %Y")
doc_json["Subject"] = doc.Subject.Name
doc_json["Grade"] = doc.Grade.Grade
doc_json["FileType"] = doc.ServiceType if doc.ServiceType == "Website" else "Document"
doc_json["thumbnail"] = doc.thumbnail
doc_json["iconUrl"] = doc.IconUrl
doc_json["Standards"] = []
for st in doc.Standard.all():
doc_json["Standards"].append(st.Standard_Number)
doc_json["General_Tags"] = []
for tag in doc.General_Tags.all():
doc_json["General_Tags"].append(tag.Tag)
doc_json["Url"] = doc.Url
documents.append(doc_json)
if collection_detail.Thumbnail == "":
thumbnail = getThumbnailFromCollection(collection_detail)
else:
thumbnail = collection_detail.Thumbnail
return Response({"title": collection_detail.Title, "thumbnail":thumbnail,
"description": collection_detail.Description, "uuid": collection_detail.uuid,
"role": user.Role.Title, "school": user.School.Name,
"docs": json.dumps(documents)
})
@api_view(["POST"])
def createEmptyCollection(request):
collection_uuid = request.POST.get("col_uuid")
collection_title = request.POST.get("col_title")
collection_description = request.POST.get("col_description")
GmailID = request.POST.get("GmailID")
user = User.objects.get(GmailID=GmailID)
if collection_uuid == "":
u = uuid.uuid4()
col = Collection(
Title=collection_title,
Description=collection_description,
Owner_User=user,
DateShared = datetime.datetime.now(),
Thumbnail="", # not saved
AccessCount=1,
uuid=u.hex
)
col.save()
else:
col = Collection.objects.get(uuid=collection_uuid)
col.Title = collection_title
col.Description = collection_description
col.save()
return Response({"uuid": col.uuid, "col_id": col.pk})
@api_view(["POST"])
def removeDocument(request):
doc_id = request.POST.get("doc_id")
doc = Document.objects.get(DocID=doc_id)
doc.delete()
return Response({"message": "delete document is succeeded"})
@api_view(["POST"])
def removeCollection(request):
col_id = request.POST.get("col_id")
col = Collection.objects.get(pk=col_id)
col.delete()
return Response({"message": "delete collection is succeeded"})
@api_view(["POST"])
def remove_shared_collection(request):
col_id = request.POST.get("col_id")
google_id = request.POST.get("GmailID")
user = User.objects.get(GmailID=google_id)
shared_collection = Collection.objects.get(pk=col_id)
SharedCollection.objects.filter(SharedUser=user, SharedCollection=shared_collection).delete()
return Response({"message": "delete shared collection is succeeded"})
@api_view(["POST"])
def getDocumentData(request):
doc_id = request.POST.get("document_id")
GmailID = request.POST.get("GmailID")
user = User.objects.get(GmailID=GmailID)
category = "Adult" if user.School.Have_standard > 1 else "K12"
general_tags = General_Tag.objects.all()
subject_triggerword = SubjectTrigger.objects.all()
grade_triggerword = GradeTrigger.objects.all()
doctype = DocType.objects.all()
cols = Collection.objects.filter(Owner_User=user)
collections = []
for c in cols:
json_data = {}
json_data["title"] = c.Title
json_data["pk"] = c.pk
if c.Thumbnail == "":
json_data["thumbnail"] = getThumbnailFromCollection(c)
else:
json_data["thumbnail"] = c.Thumbnail
collections.append(json_data)
doc = Document.objects.filter(pk=doc_id).first()
doc_json = {}
doc_json["pk"] = doc.pk
doc_json["Title"] = doc.Title
doc_json["DocID"] = doc.DocID
doc_json["DocType"] = doc.DocType.Type
doc_json["DateShared"] = doc.DateShared.strftime("%b. %d %Y")
doc_json["subject_triggerword"] = doc.subject_triggerword.TriggerWord
doc_json["grade_triggerword"] = doc.grade_triggerword.TriggerWord
doc_json["FileType"] = doc.ServiceType if doc.ServiceType == "Website" else "Document"
doc_json["thumbnail"] = doc.thumbnail
doc_json["iconUrl"] = doc.IconUrl
doc_json["Standards"] = []
doc_json["Strands"] =[]
for st in doc.Standard.all():
doc_json["Standards"].append(st.Standard_Number)
doc_json["Strands"].append(st.Strand)
if not doc_json["Standards"]:
doc_json["standard"] = ""
else:
doc_json["standard"] = doc_json["Standards"][0]
if not doc_json["Strands"]:
doc_json["strand"] = ""
else:
doc_json["strand"] = doc_json["Strands"][0]
doc_json["General_Tags"] = []
for tag in doc.General_Tags.all():
doc_json["General_Tags"].append(tag.Tag)
doc_json["Url"] = doc.Url
doc_json["standard_set"] = ""
try:
selected_subject_triggerword = doc_json["subject_triggerword"]
selected_grade_triggerword = doc_json["grade_triggerword"]
state = User.objects.get(GmailID=GmailID).School.State
subject = SubjectTrigger.objects.get(TriggerWord=selected_subject_triggerword).Subject
grade = GradeTrigger.objects.get(TriggerWord=selected_grade_triggerword).Grade
standard_set = StateStandard.objects.get(State=state, Subject=subject, Category=category).StandardSet
strand = Standard.objects.filter(StandardSet=standard_set, Grade=grade).values("Strand").distinct()
json_strands = json.dumps( list(strand) )
doc_json["standard_set"] = standard_set.SetLabel
# except Standard.DoesNotExist or StateStandard.DoesNotExist:
except:
json_strands = json.dumps( list([]) )
json_standard_set = ''
state = User.objects.get(GmailID=GmailID).School.State
subject = SubjectTrigger.objects.get(TriggerWord=selected_subject_triggerword).Subject
grade = GradeTrigger.objects.get(TriggerWord=selected_grade_triggerword).Grade
json_codes = json.dumps( list([]) )
if doc_json["Strands"]:
selected_strand = doc_json["Strands"][0]
try:
standard_set = StateStandard.objects.get(State=state, Subject=subject, Category=category).StandardSet
code = Standard.objects.filter(StandardSet=standard_set, Grade=grade, Strand=selected_strand).values("id", "Standard_Number", "Description").distinct()
json_codes = json.dumps( list(code) )
except Standard.DoesNotExist or StateStandard.DoesNotExist:
json_codes = json.dumps( list([]) )
return Response({
"general_tags": core_serializers.serialize("json", general_tags),
"subject_triggerword": core_serializers.serialize("json", subject_triggerword),
"grade_triggerword": core_serializers.serialize("json", grade_triggerword),
"doctype": core_serializers.serialize("json", doctype),
"collections": json.dumps(collections),
"strand": json_strands,
"code": json_codes,
"standard_set": doc_json["standard_set"],
"doc": doc_json
})
@api_view(["POST"])
def get_community(request):
GmailID = request.POST.get("GmailID")
user = User.objects.get(GmailID=GmailID)
communities = CommunityMember.objects.filter(user=user, role="user")
coms = []
for community in communities:
json_data = {}
json_data["community_name"] = community.community.community_name
json_data["pk"] = community.community.pk
coms.append(json_data)
shared_communities = []
for sc in user.shared_community.all():
shared_communities.append(sc.pk)
return Response({
"communities": json.dumps(coms),
"shared_communities": json.dumps(shared_communities)
})
@api_view(["POST"])
def get_admin_communities(request):
GmailID = request.POST.get("GmailID")
user = User.objects.get(GmailID=GmailID)
admin_communities = CommunityMember.objects.filter(user=user, role="admin").order_by("member_since_date")
communities = []
for ac in admin_communities:
json_data = {}
json_data["pk"] = ac.community.pk
json_data["name"] = ac.community.community_name
json_data["memberCount"] = CommunityMember.objects.filter(role="user", community=ac.community).count()
communities.append(json_data)
return Response({
"communities": json.dumps(communities),
})
@api_view(["POST"])
def get_community_name(request):
community_id = request.POST.get("communityID")
return Response({"communityName": Community.objects.get(pk=community_id).community_name})
@api_view(["POST"])
def save_sharings_setting(request):
GmailID = request.POST.get("GmailID")
SettingData = json.loads(request.POST.get("setting"))
user = User.objects.get(GmailID=GmailID)
user.shared_community.clear()
for sd in SettingData:
if sd["isChecked"] == True:
user.shared_community.add(Community.objects.get(pk=sd["id"]))
communities = CommunityMember.objects.filter(user=user, role="user")
coms = []
for community in communities:
json_data = {}
json_data["community_name"] = community.community.community_name
json_data["pk"] = community.community.pk
coms.append(json_data)
shared_communities = []
for sc in user.shared_community.all():
shared_communities.append(sc.pk)
return Response({
"communities": json.dumps(coms),
"shared_communities": json.dumps(shared_communities)
})
@api_view(["POST"])
def is_admin(request):
GmailID = request.POST.get("GmailID")
try:
user = User.objects.get(GmailID=GmailID)
except User.DoesNotExist:
return Response({"isAdmin": False})
CountCommunity = CommunityMember.objects.filter(user=user, role="admin").count()
return Response({"isAdmin": True if CountCommunity > 0 else False})
@api_view(["POST"])
def get_users_per_communities(request):
CommunityID = request.POST.get("community_id")
Cm = Community.objects.get(pk=CommunityID)
CommunityResult = CommunityMember.objects.filter(community=Cm, role="user")
ExistingUsers = []
for cr in CommunityResult:
ExistingUsers.append(cr.user.pk)
return Response({"users": core_serializers.serialize('json', User.objects.all() ),
"existingUsers": json.dumps(ExistingUsers)})
@api_view(["POST"])
def save_community_setting(request):
users = json.loads( request.POST.get("users") )
CommunityID = request.POST.get("communityID")
DeletedCommunity = Community.objects.get(pk=CommunityID)
CommunityMember.objects.filter(community=DeletedCommunity, role="user").delete()
for user in users:
if user["isChecked"] == True:
c = CommunityMember(role="user", member_since_date=datetime.datetime.now(), user=User.objects.get(pk=user["id"]),
community=DeletedCommunity)
c.save()
CommunityResult = CommunityMember.objects.filter(community=DeletedCommunity, role="user")
ExistingUsers = []
for cr in CommunityResult:
ExistingUsers.append(cr.user.pk)
return Response({"users": core_serializers.serialize('json', User.objects.all() ),
"existingUsers": json.dumps(ExistingUsers)})
@api_view(["POST"])
def add_email_to_community(request):
community_id = request.POST.get("communityID")
email = request.POST.get("email")
community = Community.objects.get(pk=community_id)
try:
com_usr_email = CommunityUserEmail.objects.get(email=email)
except CommunityUserEmail.DoesNotExist:
com_usr_email = CommunityUserEmail(email=email, community=community)
com_usr_email.save()
return Response({"message": "success"})
@api_view(["POST"])
def add_email_from_csv(request):
community_id = request.data['community_id']
file_obj = request.data['file_object']
community = Community.objects.get(pk=community_id)
decoded_file = file_obj.read().decode('utf-8')
io_string = io.StringIO(decoded_file)
for line in csv.DictReader(io_string):
try:
com_usr_email = CommunityUserEmail.objects.get(email=line["Email"])
except CommunityUserEmail.DoesNotExist:
com_usr_email = CommunityUserEmail(first_name=line["Firstname"], last_name=line["Lastname"], email=line["Email"], community=community)
com_usr_email.save()
return Response({"message": "success", "fileName": file_obj.name})
@api_view(["POST"])
def download_csv(request):
community_id = request.POST.get("community_id")
with open('data/download/MEMBERS LIST.csv', mode='w') as csv_file:
fieldnames = ['Firstname', 'Lastname', 'Email']
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
com_usr_data = CommunityUserEmail.objects.all()
for usr in com_usr_data:
writer.writerow({'Firstname': usr.first_name, 'Lastname': usr.last_name, 'Email': usr.email})
return Response({"message": "success"}) |
# -*- coding: utf-8 -*-
import pymysql
import time
import re
class LightMysql:
_dbconfig = None
_cursor = None
_connect = None
_error_code = '' # error_code from pymysql
TIMEOUT_DEADLINE = 30 # quit connect if beyond 30S
TIMEOUT_THREAD = 10 # threadhold of one connect
TIMEOUT_TOTAL = 0 # total time the connects have waste
def __init__(self, dbconfig):
try:
self._dbconfig = dbconfig
self.get_dbconfig(dbconfig)
self._connect = pymysql.connect(
host=self._dbconfig['host'],
port=self._dbconfig['port'],
user=self._dbconfig['user'],
passwd=self._dbconfig['passwd'],
db=self._dbconfig['db'],
use_unicode=True,
cursorclass=pymysql.cursors.DictCursor,
charset=self._dbconfig['charset'],
connect_timeout=self.TIMEOUT_THREAD)
except pymysql.Error as e:
self._error_code = e.args[0]
error_msg = "%s --- %s" % (
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())), type(e).__name__
), e.args[0], e.args[1]
# reconnect if not reach TIMEOUT_DEADLINE.
if self.TIMEOUT_TOTAL < self.TIMEOUT_DEADLINE:
interval = 0
self.TIMEOUT_TOTAL += (interval + self.TIMEOUT_THREAD)
time.sleep(interval)
# return self.__init__(dbconfig)
raise Exception(error_msg)
self._cursor = self._connect.cursor(pymysql.cursors.DictCursor)
def get_dbconfig(self, dbconfig):
flag = True
if type(dbconfig) is not dict:
flag = False
else:
for key in ['host', 'port', 'user', 'passwd', 'db']:
if key not in dbconfig.keys():
flag = False
if key not in dbconfig.keys():
self._dbconfig['charset'] = 'utf8mb4'
if not flag:
raise Exception('Dbconfig Error')
return flag
def query(self, sql, ret_type='all'):
#print(sql)
try:
self._cursor.execute("SET NAMES utf8mb4")
self._cursor.execute(sql)
if ret_type == 'all':
return self.rows2array(self._cursor.fetchall())
elif ret_type == 'one':
return self._cursor.fetchone()
elif ret_type == 'count':
return self._cursor.rowcount
except pymysql.Error as e:
self._error_code = e.args[0]
print(e)
return False
def dml(self, sql):
# update or delete or insert
# print(sql)
try:
self._cursor.execute("SET NAMES utf8mb4")
self._cursor.execute(sql)
self._connect.commit()
stype = self.dml_type(sql)
if stype == 'insert':
return self._connect.insert_id()
else:
return True
except pymysql.Error as e:
self._error_code = e.args[0]
print(e)
return False
def dml_type(self, sql):
re_dml = re.compile('^(?P<dml>\w+)\s+', re.I)
m = re_dml.match(sql)
if m:
if m.group("dml").lower() == 'delete':
return 'delete'
elif m.group("dml").lower() == 'update':
return 'update'
elif m.group("dml").lower() == 'insert':
return 'insert'
return False
def rows2array(self, data):
'''transfer tuple to array.'''
result = []
for da in data:
if type(da) is not dict:
raise Exception('Format Error: data is not a dict.')
result.append(da)
return result
def __del__(self):
'''free source.'''
try:
self._cursor.close()
self._connect.close()
except:
pass
def close(self):
self.__del__()
class Spider:
_db = None
def __init__(self, dbconfig):
self._db = LightMysql(dbconfig)
def close(self):
self._db.close()
def __load_database(self, table, fields, where, string=None, ret_type="all"):
sql_select = "SELECT %s FROM %s " % (",".join(["`%s`" % v for v in fields]), table)
strwhere = ""
if type(where) is dict:
arr = []
for k, v in where.items():
if type(v) is list:
arr.append("%s in (%s)" % (k, ','.join(["'%s'" % x for x in v])))
else:
arr.append("%s = '%s'" % (k, v))
if len(arr):
strwhere = "where %s" % " AND ".join(arr)
elif type(where) is str and len(where):
strwhere = "where %s" % where
if string:
if strwhere:
strwhere = "%s AND %s" % (strwhere, string)
else:
strwhere = "where %s" % string
if strwhere:
sql_select += strwhere
return self._db.query(sql_select, ret_type)
def __insert_database(self, table, param):
if not len(param):
return False
field = param[0].keys()
row = []
for r in param:
arr = []
for f in field:
if f not in r.keys():
return False
arr.append("'%s'" % r[f])
row.append("(%s)" % ",".join(arr))
sql_insert = "INSERT INTO %s (%s) VALUES %s" % (table, ",".join(["`%s`" % v for v in field]), ",".join(row))
return self._db.dml(sql_insert)
def __update_database(self, table, id_key, param):
if type(param) is not dict:
return False
arr = []
for k, v in param.items():
arr.append("%s = '%s'" % (k, v))
if not len(arr):
return False
where = ""
if type(id_key) is list:
where = "where id in (%s)" % ','.join(['%d' % d for d in id_key])
else:
where = "where id=%s" % id_key
sql_update = "update %s set %s %s" % (table, ",".join(arr), where)
return self._db.dml(sql_update)
# pf_task
def load_task(self, fields, where, string=None):
return self.__load_database("pf_task", fields, where, string)
def update_task(self, tid, param):
return self.__update_database("pf_task", tid, param)
# pf_real_identity
def load_real_identity(self, fields, where, string=None):
return self.__load_database("pf_real_identity", fields, where, string)
def insert_real_identity(self, param):
return self.__insert_database("pf_real_identity", param)
def update_real_identity(self, rid, param):
return self.__update_database("pf_real_identity", rid, param)
# twitter
def load_twi_task(self, fields, where, string=None):
return self.__load_database("pf_twi_task", fields, where, string)
def load_twi_dynamic(self, fields, where, string=None):
return self.__load_database("pf_twi_dynamic", fields, where, string)
def insert_twi_task(self, param):
return self.__insert_database("pf_twi_task", param)
def update_twi_task(self, tid, param):
return self.__update_database("pf_twi_task", tid, param)
def load_twi_comment(self, fields, where, string=None):
return self.__load_database("pf_twi_comment", fields, where, string)
def update_twi_comment(self, cid, param):
return self.__update_database("pf_twi_comment", cid, param)
def load_twi_person(self, fields, where, string=None):
return self.__load_database("pf_twi_person", fields, where, string)
# facebook
def load_fac_comment(self, fields, where, string=None):
return self.__load_database("pf_fac_comment", fields, where, string)
def update_fac_comment(self, cid, param):
return self.__update_database("pf_fac_comment", cid, param)
def load_fac_person(self, fields, where, string=None):
return self.__load_database("pf_fac_person", fields, where, string)
def load_fac_task(self, fields, where, string=None):
return self.__load_database("pf_fac_task", fields, where, string)
def load_fac_dynamic(self, fields, where, string=None):
return self.__load_database("pf_fac_dynamic", fields, where, string)
def insert_fac_task(self, param):
return self.__insert_database("pf_fac_task", param)
def update_fac_task(self, tid, param):
return self.__update_database("pf_fac_task", tid, param)
# instagram
def load_ins_comment(self, fields, where, string=None):
return self.__load_database("pf_ins_comment", fields, where, string)
def update_ins_comment(self, cid, param):
return self.__update_database("pf_ins_comment", cid, param)
def load_ins_person(self, fields, where, string=None):
return self.__load_database("pf_ins_person", fields, where, string)
def load_ins_task(self, fields, where, string=None):
return self.__load_database("pf_ins_task", fields, where, string)
def load_ins_dynamic(self, fields, where, string=None):
return self.__load_database("pf_ins_dynamic", fields, where, string)
def insert_ins_task(self, param):
return self.__insert_database("pf_ins_task", param)
def update_ins_task(self, tid, param):
return self.__update_database("pf_ins_task", tid, param)
# virtual
def load_virtual_identity(self, fields, where, string=None):
return self.__load_database("pf_virtual_identity", fields, where, string)
def find_virtual_identity(self, fields, where, string=None):
return self.__load_database("pf_virtual_identity", fields, where, string, "one")
def insert_virtual_identity(self, param):
return self.__insert_database("pf_virtual_identity", param)
def find_virtual_from_dynamic(self, dyid, platform):
sql_select = "select i.id, i.url from pf_%s_dynamic d, pf_virtual_identity i where d.vid=i.id and d.id=%s" % (
platform.lower(),
dyid)
return self._db.query(sql_select, 'one')
def get_max_vid(self):
sql_select = "select max(id) as maxid from pf_virtual_identity"
ret = self._db.query(sql_select, 'one')
return ret['maxid']
# platform
def load_platform(self, fields, where, string=None):
return self.__load_database("pf_platform", fields, where, string)
# news
def load_news_stat(self, rids):
sql = "select r.rid as rid, m.media as media, count(*) as cnt from pf_news_rela r,pf_news_media m where r.url = m.url and r.rid in (%s) group by r.rid,m.media" % ','.join(rids)
return self._db.query(sql)
def insert_news_task(self, param):
return self.__insert_database("pf_news_task", param)
def load_news_task(self, fields, where, string=None):
return self.__load_database("pf_news_task", fields, where, string)
def escape_string(self, string):
return pymysql.escape_string(string) |
#백준 11049 - 행렬 곱셈 순서
import sys
import math
input = sys.stdin.readline
n = int(input())
arr = [list(map(int,input().split())) for _ in range(n)]
dp = [[0 for _ in range(n)] for _ in range(n)]
for gap in range(1,n): # dp[start][end] 일때 start와 end의 차이 1부터 3까지 기록한다는 뜻 ex) dp[1][2], dp[2][3], dp[3][4]
start = 0 #스타트 0부터 시작
while start + gap < n:
end = start+gap
dp[start][end] = math.inf # 처음에 가장 큰 값을 넣어 놓고
for mid in range(start,end): # mid를 start~end까지 해서 가장 작은 값 골라내는 과정
dp[start][end] = min(dp[start][end], dp[start][mid] + dp[mid+1][end] + arr[start][0]*arr[mid][1]*arr[end][1] )
start += 1
print(dp[0][n-1])
|
from gym_pybullet_drones.envs.single_agent_rl.BaseSingleAgentAviary import ObservationType, ActionType
from track import TrackV1
import numpy as np
from gym_pybullet_drones.utils.Logger import Logger
# Create the environment
gui = True
obs = ObservationType.RGB # Define what type of observation your agent should intake, see README for details
act = ActionType.RPM # Define what type of action your agent should take in, see README for details
env = TrackV1(gui=gui, obs=obs, act=act)
# Obtain the PyBullet Client ID from the environment
PYB_CLIENT = env.getPyBulletClient()
# Now you can loop through it like any other gym environment, see below
# Logger to track stats
logger = Logger(logging_freq_hz=int(env.SIM_FREQ / env.AGGR_PHY_STEPS), num_drones=1)
# Training Algorithm
num_training_episodes = 10
obs = env.reset()
for episode in range(1, num_training_episodes + 1):
done = False
while not done:
action = np.array([0.0, 0.0, 0.3, 0.3]) # TODO Implement your action, hopefully backed by RL
obs, reward, done, info = env.step(action)
# log current drone info, also used for graphing at the end
logger.log(drone=0, timestamp=env.step_counter, state=env._getDroneStateVector(0))
# Reset env when we finish an episode
if done:
obs = env.reset()
print(episode)
# save any information here that you might need to used your trained drone i.e. model weights, parameters, etc
env.close()
# save .npy arrays to logs directory
logger.save()
# save csv information to desktop, comment out/delete if you don't want this
logger.save_as_csv("trial")
# plot the data on a graph
logger.plot() |
# -*- coding: utf-8 -*-
from bkz.settings import *
DEBUG = False
TEMPLATE_DEBUG = False
DATABASES['default'] = {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bkz', # Or path to database file if using sqlite3.
'USER': 'bkz', # Not used with sqlite3.
'PASSWORD': 'bkz', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '5432', # Set to empty string for default. Not used with sqlite3.
}
STATIC_ROOT = '/var/www/bkz/static/'#diff --git a/settings.py b/settings.py
DATABASES['old'] = {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'disp', # Or path to database file if using sqlite3.
'USER': 'root', # Not used with sqlite3.
'PASSWORD': '89026441284', # Not used with sqlite3.
'HOST': 'server', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
|
#Калькулятор для множеств
instruction = str(input())
sets = [str(i) for i in input().split()]
|
# Generated by Django 2.0.2 on 2018-09-03 17:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('miapp', '0002_auto_20180903_1708'),
]
operations = [
migrations.AddField(
model_name='admincc',
name='compros',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='miapp.Combos_promos'),
),
]
|
import glfw
import numpy as np
from OpenGL.GL import *
class Window:
def __init__(self, width: int, height: int, title: str):
if not glfw.init():
raise Exception("glfw cannot be initialized.")
self.win = glfw.create_window(width, height, title, None, None)
if not self.win:
glfw.terminate()
raise Exception("window cannot be created.")
glfw.set_window_pos(self.win, 400, 200)
glfw.make_context_current(self.win)
glClearColor(0, 0, 0, 1)
glColor3f(0, 1, 1)
def main_loop(self):
while not glfw.window_should_close(self.win):
glfw.poll_events()
glClear(GL_COLOR_BUFFER_BIT)
glPointSize(5)
DrawRectangle(0, 0, 1, 1, [0, 0, 1])
glFlush()
glfw.swap_buffers(self.win)
glfw.terminate()
def DrawRectangle(h: float, k: float, length: float, breadth: float, color: list = [255, 255, 255]):
glBegin(GL_POLYGON)
glColor3f(color[0] ,color[1], color[2])
glVertex2f(h + length / 2, k + breadth / 2)
glVertex2f(h + length / 2, k - breadth / 2)
glVertex2f(h - length / 2, k - breadth / 2)
glVertex2f(h - length / 2, k + breadth / 2)
glEnd()
win = Window(1280, 720, "Square")
glEnableClientState(GL_VERTEX_ARRAY)
win.main_loop()
|
selection = input("1 - gaussian filter" + '\n' + "2 - median filter" +'\n' + "Enter your option number : " )
import numpy as np
import cv2
if( selection == "1" or selection =="2"):
print("processing")
if(selection == "1"):
import numpy as np
import cv2
img = cv2.imread('C:/Users/Isuru/Desktop/160153C_Filters/image.jpg', cv2.IMREAD_GRAYSCALE)
img_out = img.copy()
height = img.shape[0]
width = img.shape[1]
gauss = (1.0/57) * np.array([[0,1,2,1,0],[1,3,5,3,1],[2,5,9,5,2],[1,3,5,3,1],[0,1,2,1,0]])
sum(sum(gauss))
for i in np.arange(2, height-2):
for j in np.arange(2, width-2):
sum = 0
for k in np.arange(-2,3):
for l in np.arange(-2,3):
a = img.item(i+k, j+l)
p = gauss[2+k, 2+l]
sum = sum + (p*a)
b = sum
img_out.itemset((i,j),b)
cv2.imwrite('C:/Users/Isuru/Desktop/160153C_Filters/output.jpg', img_out)
cv2.imshow('image', img_out)
cv2.waitKey(0)
cv2.destroyAllWindows()
if(selection == "2"):
img = cv2.imread('C:/Users/Isuru/Desktop/Filters/image.jpg', cv2.IMREAD_GRAYSCALE)
img_out = img.copy()
height = img.shape[0]
width = img.shape[1]
for i in np.arange(3, height-3):
for j in np.arange(3, width-3):
neighbors = []
for k in np.arange(-3,4):
for l in np.arange(-3,4):
a = img.item(i+k, j+l)
neighbors.append(a)
neighbors.sort()
median = neighbors[24]
b = median
img_out.itemset((i,j), b)
cv2.imwrite('C:/Users/Isuru/Desktop/Filters/output.jpg', img_out)
cv2.imshow('image', img_out)
cv2.waitKey(0)
cv2.destroyAllWindows()
print("processing completed")
else:
print("Invalid input")
|
#introuce random
import random
#建立随机单词库
dictionary=("cat","dog","rabbit","bear","sheep")
def hangman(word): #関数を定義
wrong = 0#エラー数
HP = ["",
"_______ ",
"| | ",
"| | ",
"| | ",
"| | ",
"| 0 ",
"| /|\ ",
"| / \ ",
"| "
]
data = list(word)#リリースリストWORD
board = ["_"] * len(word)#ランダムな語長に基づく下線,
win = False#デフォルトは失敗です
print("単語当てゲームへようこそ!(動物)")
while wrong < len(HP) - 1:#回数がヘルス値より小さい場合、実行を継続します
print("\n")
msg = "アルファベットを入力してください! "
char = input(msg)#アルファベットを入力してください!
if char in data:#判断のメカニズムは、アルファベットが正しければ
cind = data.index(char)#アルファベットの位置を判断する
board[cind] = char#アルファベットを置き換える
data[cind] = '0'#データベースの判定順序を更新する
else:#エラーカウンタはエラーカウンタが1加算される
wrong += 1
print(" ".join(board))#正しくても間違っても単語板を印刷する
e = wrong + 1
print("\n".join(HP[0:e]))#小人の失敗位置を記録する
if "_" not in board:#如結果単語が全て負ければ勝ちとなり、単語が印刷される
print("勝利!")
print(" ".join(board))
win = True
break
if not win:
print("\n".join(HP[0:wrong+1]))#HPが足りなければ失敗する
print("あなたは失敗しました。正解は{}".format(word))
sblsy = random.choice(dictionary)#単語を無作為に抽出する
hangman(sblsy) |
#!/usr/bin/env python3
#-*- coding:utf-8 -*-
import requests
import re
from bs4 import BeautifulSoup
import database
#可以使用的url
list=[15335287,13495332,12438452,10167348,7704150,15081291,13754755,13754608,15230833]
#不可以使用的url
ll=[20960386,10050485,9848063,10595949,10771117,9987189,21639667,21403332]
#http://www.edewakaru.com/archives/15335287.html
def spaide(id):
url = 'http://www.edewakaru.com/archives/{}.html'.format(id)
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36'}
res = requests.get(url, headers=headers, timeout=10).text
soup = BeautifulSoup(res, 'lxml')
data = soup.select('#main-inner > article > div > div')
p = re.findall('</a><br/>【(.*?)<img ', str(data[0]), re.S)
YUFA = database.TYUFA()
YUFA.YUFA = soup.title
if len(p) == 0:
p = re.findall('</a><br/><br/>【(.*?)<img ', str(data[0]), re.S)
#if '[意味]' in p[0]:
if '【意味】' in p[0]:
p = p[0].split('<br/>[')
else:
p=p[0].replace('【「','「')
p = p.split('<br/>【')
for i in p:
if i[0:2] == '接続':
##print('【接続】')
d = i.split('<br/>')
JIEXU = []
for d1 in d[1:-1]:
d1 = d1.strip()
if len(d1)>0:
JIEXU.append(d1)
YUFA.JIEXU = '######'.join(str(i) for i in JIEXU)
##print('yufajie==='.join(YUFA.JIEXU))
if i[0:2] == '意味':
##print('【意味】')
d = i.split('<br/>')
if len(res) != 0:
d = d[1:-1]
else:
d = d[1:-2]
YISI = []
for d1 in d:
p = BeautifulSoup(d1, 'lxml')
YISI.append(p.get_text())
##print(p.get_text())
YUFA.YISI = '######'.join(str(i) for i in YISI)
if i[0:2] == '例文':
##print('Dasdasdasdasd【例文】')
d = i.split('<br/><br/>')
for d1 in d[:-1]:
if d.index(d1) == 0: d1 = d1[3:]
p = BeautifulSoup(d1, 'lxml')
p = p.get_text().split('→')
YUFA.LIJU.append(p[0])
##print(p[0])
if len(p) >= 2:
p1 = p[1].split('(復習')
if len(p1) >= 2:
##print('→' + p1[0])
for p2 in p1[1:]:
YUFA.LIJU.append(p2)
##print('(復習' + p2)
else:
YUFA.LIJU.append(p1[0])
##print('→' + p1[0])
##print()
if i[0:2] == '説明':
##print('【説明】')
d = i.split('<br/>')
SHUOMING=[]
for d1 in d[1:]:
# if d1!='':
p = BeautifulSoup(d1, 'lxml')
SHUOMING.append(p.get_text())
##print(p.get_text())
YUFA.SHUOMING = '######'.join(str(i) for i in SHUOMING)
##print()
return YUFA
#i=input('请输入id:')
print('start...........')
conn = database.opendb('yufa.sqlite')
f = open("yufaid.txt","r")
ids = f.readlines()
for id in ids:
try:
yufa = spaide(int(id))
database.insert_into_T_YUFA(conn,yufa)
for l in yufa.LIJU:
lijuObj = database.toTYUFA_LIJU(l)
database.insert_into_T_SENTENCE(conn,l)
break
except:
print(str(id))
print('end...........') |
"""
An example of tmap visualizing data gathered in a flow cytometry
experiment. The k-nearest neighbor graph is constructed
using the Annoy library.
Data Source:
https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0057002
"""
import numpy as np
import flowkit as fk
import tmap as tm
from faerun import Faerun
from annoy import AnnoyIndex
from scipy.spatial.distance import cosine as cosine_distance
PATHS = [
"FR-FCM-ZZCF/K562 Cells_No Target Probes_002.fcs",
"FR-FCM-ZZCF/K562 Cells_BCR-A647_001.fcs",
]
SKIP = 1
def load_data(sample: fk.Sample):
channels = range(SKIP, len(sample.channels))
channel_data = []
for channel in channels:
channel_data.append(np.array(sample.get_channel_events(channel, source="raw")))
return np.array(channel_data).T
def load_time(sample: fk.Sample):
"""Assuming the time is channel 0"""
return np.array(sample.get_channel_events(0, source="raw"))
def main():
"""Main function"""
data = []
time = []
for path in PATHS:
sample = fk.Sample(path)
data.append(load_data(sample))
time.append(load_time(sample))
sources = []
for i, e in enumerate(data):
sources.extend([i] * len(e))
data = np.concatenate(data, axis=0)
time = np.concatenate(time, axis=0)
d = len(data[0])
# Initialize a new Annoy object and index it using 10 trees
annoy = AnnoyIndex(d, metric="angular")
for i, v in enumerate(data):
annoy.add_item(i, v)
annoy.build(10)
# Create the k-nearest neighbor graph (k = 10)
edge_list = []
for i in range(len(data)):
for j in annoy.get_nns_by_item(i, 10):
edge_list.append((i, j, cosine_distance(data[i], data[j])))
# Compute the layout from the edge list
x, y, s, t, _ = tm.layout_from_edge_list(len(data), edge_list)
legend_labels = [(0, "No Target Probe Negative Control"), (1, "Stained Sample")]
# Create the plot
faerun = Faerun(
view="front",
coords=False,
legend_title="RNA Flow Cytometry: evaluation of detection sensitivity in low abundant intracellular RNA ",
)
faerun.add_scatter(
"CYTO",
{"x": x, "y": y, "c": sources, "labels": sources},
point_scale=1.0,
max_point_size=10,
shader="smoothCircle",
colormap="Set1",
has_legend=True,
categorical=True,
legend_labels=legend_labels,
legend_title="Cell Types",
)
faerun.add_tree(
"CYTO_tree", {"from": s, "to": t}, point_helper="CYTO", color="#222222"
)
faerun.plot("cyto")
if __name__ == "__main__":
main()
|
import level_data
import main
import cache
import pygame
from os.path import join
import resources
pygame.init()
levels = level_data.get_levels()
for level in levels:
print level
maskfile = join(resources.IMG_DIR, level[2])
bboxes = cache.get_cache(maskfile, main.get_bboxes)
print "bboxes done"
pygame.quit() |
from .models import Zipcode
import django_filters
class ZipCodeFilter(django_filters.FilterSet):
JURISDICTION_NAME = django_filters.CharFilter(lookup_expr='icontains')
class Meta:
model = Zipcode
fields = ['JURISDICTION_NAME', 'COUNT_FEMALE', 'COUNT_MALE', ]
|
daftarharga = {"apel" : 5000, "jeruk" : 8500, "mangga" : 7800, "duku" : 6500}
def rataharga():
jumlahBuah = 0
jumlahHarga = 0
Rata = 0
for key,value in daftarharga.items():
jumlahHarga += value
jumlahBuah += 1
Rata = jumlahHarga / jumlahBuah
print("Rata-Rata Harga buah adalah", Rata)
rataharga()
|
{
'targets': [
{
'target_name': 'liblibaxtls',
'type': 'static_library',
'sources': [
'crypto/aes.c',
'crypto/bigint.c',
'crypto/crypto_misc.c',
'crypto/hmac.c',
'crypto/md2.c',
'crypto/md5.c',
'crypto/rc4.c',
'crypto/rsa.c',
'crypto/sha1.c',
'ssl/asn1.c',
'ssl/gen_cert.c',
'ssl/loader.c',
'ssl/openssl.c',
'ssl/os_port.c',
'ssl/p12.c',
'ssl/tls1.c',
'ssl/tls1_svr.c',
'ssl/tls1_clnt.c',
'ssl/x509.c'
],
'include_dirs': [
'ssl',
'crypto',
'config'
],
'direct_dependent_settings': {
'include_dirs': [
'ssl',
'crypto',
'config'
]
}
},
{
'target_name': 'axssl',
'type': 'executable',
'dependencies': [
'liblibaxtls'
],
'sources': [
'samples/c/axssl.c'
]
}
]
}
|
#####################################################
#
# WebScarping Data Camp Course Details
#
#####################################################
#
# Import scrapy library
import scrapy
from scrapy.crawler import CrawlerProcess
#
# DC Spider class
class DCSpider( scrapy.Spider ):
# variable name
name = "dcspider"
# start_requests method: to define which websites to scrape
def start_requests( self ):
# list of webpages to scrape
urls = [ "https://www.datacamp.com/courses/all" ]
# follow the links to the next parser
for url in urls:
yield scrapy.Request( url = url, callback = self.parse_course_links )
# parse_front method: to parse the front page
def parse_front( self, response ):
# narrow down on the course block elements
course_blocks = response.css( 'div.course-block' )
# direct to the course links
course_links = course_blocks.xpath( './a/@href' )
# extract the links
links_to_follow = course_links.extract()
# follow the links to the next parser
for link in links_to_follow:
yield response.follow( url = link, callback = self.parse_pages )
# parse_pages method: to parse the pages
def parse_pages( self, response ):
# direct to the course title text
course_title = response.xpath( '//h1[contains(@class, "title")]/text()' )
# extract and clean the course title text
course_title_text = course_title.extract_first().strip()
# direct to chapter titles text
chapter_titles = response.css( 'h4.chapter__title::text' )
# extract and clean the chapter titles text
chapter_titles_text = [t.strip() for t in chapter_titles.extract()]
# store this in dictonary
dict_dc[ chapter_titles_text ] = chapter_titles_text
# parse_href method: to work on the website pages
def parse_course_links( self, response ):
# direct to the course titles
titles = response.css('h4.course-block__title::text').extract()
# direct to the course authors
authors = response.css('div.course-block__author > img::attr(alt)').extract()
# direct to the course hyperlinks
links = response.css('div.course-block > a::attr(href)').extract()
# direct to author's image
images = response.css('div.course-block__author > img::attr(src)').extract()
# write_csv method: to write links to csv
DCC_file = 'DataCampCourses.csv'
with open( DCC_file, 'w' ) as f:
f.write('Course Title' + '\t' + 'Course Author' + '\t' + 'Course Link' + '\t' + 'Authors\'s Image Link' + '\n')
for i in range(len(titles)):
f.write( "%s \t %s\t %s\t %s\n" % (titles[i], authors[i], links[i], images[i]) )
#f.close()
#
# Initialize the dictionary
dict_dc = dict()
#
# Run the Spider
process = CrawlerProcess( )
process.crawl( DCSpider )
process.start( )
|
import cv2
img1 = cv2.imread('joji.jpg')
img2 = cv2.imread('luci.png')
img3= img1[1:353,1:201,:]
print(img3.shape)
print(img2.shape)
print(img1.shape)
dst = cv2.addWeighted(img3,0.7,img2,0.3,0)
cv2.imshow('dst',dst)
#cv2.imwrite('dst.png',dst)
cv2.waitKey(0)
cv2.destroyAllWindows() |
import json
from os import path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
from scipy import stats
from scipy.io import loadmat
from sklearn import model_selection
from tools import draw_neural_net, train_neural_net
script_dir = path.dirname(__file__) # <-- absolute dir the script is in
rel_path = "./heart.csv"
data_file = path.join(script_dir, rel_path)
plot = False
df = pd.read_csv(data_file)
target = "chol"
y = np.array([df[target]])
y = (y - y.mean()) / y.std()
y = y.transpose()
X = df.loc[:, df.columns != target]
results = {}
for do_pca_preprocessing in [True, False]:
results[f"pca: {do_pca_preprocessing}"] = {}
for K in [5, 10]:
results[f"pca: {do_pca_preprocessing}"][f"k: {K}"] = {}
results[f"pca: {do_pca_preprocessing}"][f"k: {K}"]["losses"] = {}
# Normalize data
# do_pca_preprocessing = False
if do_pca_preprocessing:
Y = stats.zscore(X, 0)
U, S, V = np.linalg.svd(Y, full_matrices=False)
V = V.T
# Components to be included as features
k_pca = 2
X = X @ V[:, 0:k_pca]
N, M = X.shape
else:
norm_vars = df[
["age", "trestbps", "thalach", "oldpeak", "ca", "slope"]
]
norm_vars = (norm_vars - norm_vars.mean()) / norm_vars.std()
new = pd.DataFrame()
discrete = [
"sex",
"cp",
"fbs",
"restecg",
"exang",
"thal",
"target",
]
for col in discrete:
temp = pd.get_dummies(df[col], prefix=col)
new = pd.concat([new, temp], axis=1)
d = pd.DataFrame(np.ones((new.shape[0], 1)))
X = pd.concat([d, new], axis=1)
attribute_names = list(X)
X = np.array(X)
N, M = X.shape
C = 2
# Parameters for neural network classifier
n_hidden_units = 1 # number of hidden units
n_replicates = 15 # number of networks trained in each k-fold
max_iter = 10000 #
# K-fold crossvalidation
# K = 5 # only three folds to speed up this example
CV = model_selection.KFold(K, shuffle=True)
# Setup figure for display of learning curves and error rates in fold
summaries, summaries_axes = plt.subplots(1, 2, figsize=(10, 5))
# Define the model
model = lambda: torch.nn.Sequential(
torch.nn.Linear(M, n_hidden_units), # M features to n_hidden_units
torch.nn.Tanh(), # 1st transfer function,
torch.nn.Linear(
n_hidden_units, 1
) # n_hidden_units to 1 output neuron
# no final tranfer function, i.e. "linear output"
)
loss_fn = (
torch.nn.MSELoss()
) # notice how this is now a mean-squared-error loss
# print("Training model of type:\n\n{}\n".format(str(model())))
errors = (
[]
) # make a list for storing generalizaition error in each loop
for (k, (train_index, test_index)) in enumerate(CV.split(X, y)):
# print("\nCrossvalidation fold: {0}/{1}".format(k + 1, K))
# Extract training and test set for current CV fold, convert to tensors
X_train = torch.tensor(X[train_index, :], dtype=torch.float)
y_train = torch.tensor(y[train_index], dtype=torch.float)
X_test = torch.tensor(X[test_index, :], dtype=torch.float)
y_test = torch.tensor(y[test_index], dtype=torch.uint8)
# Train the net on training data
net, final_loss, learning_curve = train_neural_net(
model,
loss_fn,
X=X_train,
y=y_train,
n_replicates=n_replicates,
max_iter=max_iter,
)
print(f"\n\tBest loss: {final_loss}\n")
results[f"pca: {do_pca_preprocessing}"][f"k: {K}"]["losses"][
f"fold: {k + 1}"
] = final_loss.tolist()
# Determine estimated class labels for test set
y_test_est = net(X_test)
# Determine errors and errors
se = (y_test_est.float() - y_test.float()) ** 2 # squared error
mse = (sum(se).type(torch.float) / len(y_test)).data.numpy() # mean
errors.append(mse) # store error rate for current CV fold
# Make a list for storing assigned color of learning curve for up to K=10
color_list = [
"tab:orange",
"tab:green",
"tab:purple",
"tab:brown",
"tab:pink",
"tab:gray",
"tab:olive",
"tab:cyan",
"tab:red",
"tab:blue",
]
if plot:
# Display the learning curve for the best net in the current fold
h, = summaries_axes[0].plot(learning_curve, color=color_list[k])
h.set_label("CV fold {0}".format(k + 1))
summaries_axes[0].set_xlabel("Iterations")
summaries_axes[0].set_xlim((0, max_iter))
summaries_axes[0].set_ylabel("Loss")
summaries_axes[0].set_title("Learning curves")
if plot:
# Display the MSE across folds
summaries_axes[1].bar(
np.arange(1, K + 1), np.squeeze(errors), color=color_list
)
summaries_axes[1].set_xlabel("Fold")
summaries_axes[1].set_xticks(np.arange(1, K + 1))
summaries_axes[1].set_ylabel("MSE")
summaries_axes[1].set_title("Test mean-squared-error")
print("Diagram of best neural net in last fold:")
weights = [net[i].weight.data.numpy().T for i in [0, 2]]
biases = [net[i].bias.data.numpy() for i in [0, 2]]
tf = [str(net[i]) for i in [1, 2]]
draw_neural_net(
weights, biases, tf, attribute_names=attribute_names
)
# Print the average classification error rate
print(
"\nEstimated generalization error, RMSE: {0}".format(
round(np.sqrt(np.mean(errors)), 4)
)
)
results[f"pca: {do_pca_preprocessing}"][f"k: {K}"]["rmse"] = float(
round(np.sqrt(np.mean(errors)), 4)
)
if plot:
# When dealing with regression outputs, a simple way of looking at the quality
# of predictions visually is by plotting the estimated value as a function of
# the true/known value - these values should all be along a straight line "y=x",
# and if the points are above the line, the model overestimates, whereas if the
# points are below the y=x line, then the model underestimates the value
plt.figure(figsize=(10, 10))
y_est = y_test_est.data.numpy()
y_true = y_test.data.numpy()
axis_range = [
np.min([y_est, y_true]) - 1,
np.max([y_est, y_true]) + 1,
]
plt.plot(axis_range, axis_range, "k--")
plt.plot(y_true, y_est, "ob", alpha=0.25)
plt.legend(["Perfect estimation", "Model estimations"])
plt.title("Chol: estimated versus true value (for last CV-fold)")
plt.ylim(axis_range)
plt.xlim(axis_range)
plt.xlabel("True value")
plt.ylabel("Estimated value")
plt.grid()
plt.show()
with open(path.join(script_dir, "results.json"), "w") as file:
file.write(json.dumps(results))
|
# 2019/12/24
n,m=map(int,input().split())
cnt=0
for i in range(1,n+1):
cnt+=(i**2)%m
print(cnt%m) |
"""
Read in the "show_version.txt" file. From this file use regular expressions to extract the
os_version, serial_number, and configuration register value.
Your output should look as follows:
OS Version: 15.4(2)T1
Serial Number: FTX0000038X
Config Register: 0x2102
"""
from __future__ import print_function, unicode_literals
import re
with open("show_version.txt") as f:
show_ver = f.read()
match = re.search(r"^Cisco IOS Software,.* Version (.*),", show_ver, flags=re.M)
if match:
os = match.group(1)
match = re.search(r"^Processor board ID (.*)\s\$", show_ver, flags=re.M)
if match:
sn = match.group(1)
match = re.search(r"^Configuration register is (.*)\s*$", show_ver, flags=re.M)
if match:
conf_reg = match.group(1)
print()
print("{:>20}: {:15}".format("OS Version", os))
print("{:>20}: {:15}".format("Serial Number", sn))
print("{:>20}: {:15}".format("Configuration Register", conf_reg))
print()
|
import sys
sys.stdin = open("D3_8457_input.txt", "r")
T = int(input())
for test_case in range(T):
N, B, E = map(int, input().split())
data = list(map(int, input().split()))
ans = 0
for i in data:
temp = (B // i - 1) * i
for _ in range(3):
temp += i
if temp <= (B + E) and (B - E) <= temp:
ans += 1
break
print("#{} {}".format(test_case + 1, ans)) |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 16 11:12:40 2017
Data is from the reviews of movies in 'data/labeledTrainData.tsv'
Two Model:
1. Common CNN Model
2. Complex CNN Model from Yoon Kim's Paper. Merge multiple filters.
It proves the second model preforms better.
@author: teding
"""
import numpy as np
import pandas as pd
import pickle
from collections import defaultdict
import re
from bs4 import BeautifulSoup
import sys
import os
import keras
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Embedding
from keras.layers import Dense,Input,Flatten
from keras.layers import Conv1D, MaxPooling1D,Dropout,Concatenate
from keras.models import Model, Sequential
def clean_str(string):
"""
Tokenization/string cleaning for dataset
Every dataset is lower cased except
"""
string = re.sub(r"\\","",string)
string = re.sub(r"\'","",string)
string = re.sub(r"\"","",string)
return string.strip().lower()
# Parameters setting
MAX_SEQUENCE_LENGTH = 1000
MAX_NB_WORDS = 20000
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.2
# Data input
data_train = pd.read_csv('data/labeledTrainData.tsv',sep='\t')
texts=[]
labels=[]
# Use BeautifulSoup to remove some html tags and remove some unwanted characters.
for idx in range(data_train.review.shape[0]):
text = BeautifulSoup(data_train.review[idx],'lxml')
texts.append(clean_str(text.get_text()))
labels.append(data_train.sentiment[idx])
tokenizer=Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
print('Shape of data tensor: ', data.shape)
print('Shape of label tensor: ',labels.shape)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
num_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-num_validation_samples]
y_train = labels[:-num_validation_samples]
x_val = data[-num_validation_samples:]
y_val = labels[-num_validation_samples:]
print ('Number of negative and positive reviews in training and validation set')
print(y_train.sum(axis=0))
print(y_val.sum(axis=0))
## Use pre-trained wordToVec
embeddings_index = {}
f=open('data/glove.6B/glove.6B.100d.txt',encoding='utf8')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:],dtype='float32')
embeddings_index[word]=coefs
f.close()
print('Total %s word vectors in Glove 6B 100d.' % len(embeddings_index))
embedding_matrix = np.random.random((len(word_index)+1,EMBEDDING_DIM))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i]=embedding_vector
# create model
model = Sequential()
model.add(Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights = [embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=True))
model.add(Conv1D(128, 5, activation='relu'))
model.add(MaxPooling1D(5))
model.add(Conv1D(128, 5, activation='relu'))
model.add(MaxPooling1D(5))
model.add(Conv1D(128, 5, activation='relu'))
model.add(MaxPooling1D(35))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(2, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
print("model fitting - convolutional 1D neural network")
model.summary()
model.fit(x_train, y_train, validation_data=(x_val, y_val),
epochs=10, batch_size=128)
#-----------------------Complex CNN ------------------------------------
"""
In Yoon Kim’s paper, multiple filters have been applied.
"""
print ('---Start to run Complex CNN model--------------:')
embedding_layer = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights = [embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=True)
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
convs = []
filter_sizes = [3,4,5]
for fsz in filter_sizes:
l_conv = Conv1D(filters=128,kernel_size=fsz,activation='relu')(embedded_sequences)
l_pool = MaxPooling1D(5)(l_conv)
convs.append(l_pool)
l_merge = Concatenate(axis=1)(convs)
l_cov1 = Conv1D(128,5,activation='relu')(l_merge)
l_pool1 = MaxPooling1D(5)(l_cov1)
l_cov2 = Conv1D(128,5,activation='relu')(l_pool1)
l_pool2 = MaxPooling1D(30)(l_cov2)
l_flat = Flatten()(l_pool2)
l_dense = Dense(128,activation='relu')(l_flat)
out = Dense(2,activation='softmax')(l_dense)
model2 = Model(sequence_input,out)
model2.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
model2.summary()
print("model fitting - complex CNN network")
model2.fit(x_train, y_train, validation_data=(x_val, y_val),
epochs=10, batch_size=50)
|
from browser import ajax, bind, document, html, timer
def on_complete(request):
document['response'] <= html.P(request.text)
def request_simulation():
ajax.get('/page-dynamic/data', oncomplete=on_complete)
timer.set_timeout(request_simulation, 2000)
request_simulation()
|
from django.shortcuts import render
<<<<<<< HEAD
from django.utils import timezone
from models import Donation
=======
from django.template import Context
from models import Donation
from forms import DonationForm
>>>>>>> 1506909a0a168091856c108e091a09dbc075cc7b
from django.views.decorators.csrf import csrf_protect, csrf_exempt
@csrf_exempt
def donations_form(request):
<<<<<<< HEAD
if request.method == 'POST':
data = request.POST
new_donation = Donation(first_name=data.get('first_name'),
last_name=data.get('last_name'),
amount=data.get('amount'),
card_number=data.get('card_number'),
cvv=data.get('cvv'),
message=data.get('message'),
donation_made=timezone.now())
new_donation.save()
return render(request, "donations/donation_form.html", {'donated': True})
else:
return render(request, "donations/donation_form.html", {'donated': False})
=======
form = DonationForm(request.POST or None)
context = Context({
'donated': False,
'form': form
})
if request.method == 'POST' and form.is_valid():
data = request.POST
new_donation = Donation(name=data.get('name'),
amount=data.get('amount'),
card_number=data.get('card_number'),
message=data.get('message'))
new_donation.save()
context['donated'] = True
return render(request, "donations/donation_form.html", context)
else:
return render(request, "donations/donation_form.html", {'donated': False,
'form': form})
>>>>>>> 1506909a0a168091856c108e091a09dbc075cc7b
|
# -*- coding: utf-8 -*-
import time
import datetime
import os
import pandas as pd
import numpy as np
from db_operation import DBOperations
from db_credential import credentials, oracle_credentials
# 连接数据库
db_opt_wind = DBOperations(**oracle_credentials)
# =============================================================================
# 取交易日期
# =============================================================================
sql1 = '''
select distinct acal.TRADE_DAYS
from wind.AShareCalendar acal
where acal.S_INFO_EXCHMARKET = 'SSE'
'''
enddate = (datetime.datetime.today() - datetime.timedelta(days=1)).strftime("%Y%m%d")
trad_date = db_opt_wind.read_sql(sql1).sort_values(by='TRADE_DAYS', ascending=True).set_index(
keys="TRADE_DAYS", drop=False).loc["20031231":enddate, :]
if not os.path.exists('./data'):
os.mkdir('./data')
trad_date.to_csv('./data/trad_date.csv')
# %%===========================================================================
# Wind一致预期
# -----------------------------------------------------------------------------
# Wind一致预测个股滚动指标[AShareConsensusRollingData]
sql2 = '''select S_INFO_WINDCODE, EST_DT, ROLLING_TYPE, NET_PROFIT, EST_EPS, EST_PE, EST_PEG, EST_PB, EST_ROE
EST_OPER_REVENUE, EST_CFPS, EST_DPS, EST_BPS, EST_EBIT, EST_EBITDA, EST_TOTAL_PROFIT, EST_OPER_PROFIT,
EST_OPER_COST, BENCHMARK_YR, EST_BASESHARE
from AShareConsensusRollingData order by EST_DT
'''
ConsensusRollingData = db_opt_wind.read_sql(sql2)
ConsensusRollingData.to_csv('./data/ConsensusRollingData.csv')
# %%===========================================================================
# 交易信息
# -----------------------------------------------------------------------------
# 中国A股停复牌信息[AShareTradingSuspension]
sql3 = '''select S_INFO_WINDCODE, S_DQ_SUSPENDDATE, S_DQ_RESUMPDATE
from AShareTradingSuspension order by S_DQ_SUSPENDDATE
'''
tradeornot = db_opt_wind.read_sql(sql3)
tradeornot.to_csv('./data/tradeornot.csv')
# %%===========================================================================
# 交易信息
# -----------------------------------------------------------------------------
# 中国A股日行情估值指标[AShareEODDerivativeIndicator]
sql4 = '''select S_INFO_WINDCODE, TRADE_DT, S_VAL_PE_TTM, S_VAL_PB_NEW, S_VAL_PCF_OCFTTM, S_VAL_PS_TTM, S_DQ_FREETURNOVER,
S_DQ_CLOSE_TODAY, UP_DOWN_LIMIT_STATUS
from AShareEODDerivativeIndicator order by TRADE_DT
'''
updownlimitstatus = db_opt_wind.read_sql(sql4)
updownlimitstatus.to_csv('./data/updownlimitstatus.csv')
# %%===========================================================================
# 资产负债表
# -----------------------------------------------------------------------------
# 中国A股资产负债表[AShareBalanceSheet]
sql5 = '''select S_INFO_WINDCODE, REPORT_PERIOD, TOT_SHRHLDR_EQY_EXCL_MIN_INT, ACTUAL_ANN_DT,MONETARY_CAP, ST_BORROW,
BONDS_PAYABLE, LT_PAYABLE
from AShareBalanceSheet order by REPORT_PERIOD
'''
balancesheet = db_opt_wind.read_sql(sql5)
balancesheet.to_csv('./data/balancesheet.csv')
# %%===========================================================================
# 利润表
# -----------------------------------------------------------------------------
# 中国A股利润表[AShareIncome]
sql6 = '''select S_INFO_WINDCODE, REPORT_PERIOD, EBIT, TOT_PROFIT, INC_TAX
from AShareIncome order by REPORT_PERIOD
'''
profitloss = db_opt_wind.read_sql(sql6)
profitloss.to_csv('./data/profitloss.csv')
|
__copyright__ = """\
(c). Copyright 2008-2013, Vyper Logix Corp.,
All Rights Reserved.
Published under Creative Commons License
(http://creativecommons.org/licenses/by-nc/3.0/)
restricted to non-commercial educational use only.,
http://www.VyperLogix.com for details
THE AUTHOR VYPER LOGIX CORP DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
USE AT YOUR OWN RISK.
"""
__server_version__ = 'Vyper-Proxy'
__version__ = "0.2.1.2"
import sys
import BaseHTTPServer, select, socket, SocketServer, urlparse
from vyperlogix.classes.CooperativeClass import Cooperative
from vyperlogix.lists.ListWrapper import CircularList
from vyperlogix import misc
from vyperlogix.misc import ObjectTypeName
class VyperProxy(BaseHTTPServer.BaseHTTPRequestHandler):
__base = BaseHTTPServer.BaseHTTPRequestHandler
__base_handle = __base.handle
server_version = "%s/%s" % (__server_version__,__version__)
rbufsize = 0 # self.rfile Be unbuffered
def handle(self):
(ip, port) = self.client_address
if hasattr(self, 'allowed_clients') and ip not in self.allowed_clients:
self.raw_requestline = self.rfile.readline()
if self.parse_request(): self.send_error(403)
else:
self.__base_handle()
def _connect_to(self, netloc, soc):
toks = netloc.split(':')
if (len(toks) == 2):
toks[-1] = int(toks[-1])
host_port = tuple(toks)
else:
host_port = netloc, 80
print "\t%s :: connect to %s" % (ObjectTypeName.objectSignature(self),':'.join([str(t) for t in list(host_port)]))
try: soc.connect(host_port)
except socket.error, arg:
try: msg = arg[1]
except: msg = arg
self.send_error(404, msg)
return 0
return 1
def do_CONNECT(self):
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
if self._connect_to(self.path, soc):
self.log_request(200)
self.wfile.write(self.protocol_version +
" 200 Connection established\r\n")
self.wfile.write("Proxy-agent: %s\r\n" % self.version_string())
self.wfile.write("\r\n")
self._read_write(soc, 300)
finally:
print "\t" "bye"
soc.close()
self.connection.close()
def do_GET(self):
(scm, netloc, path, params, query, fragment) = urlparse.urlparse(self.path, 'http')
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
netloc = VyperProxy.remotes.next()
if self._connect_to(netloc, soc):
self.log_request()
soc.send("%s %s %s\r\n" % (
self.command,
urlparse.urlunparse(('', '', path, params, query, '')),
self.request_version))
self.headers['Connection'] = 'close'
del self.headers['Proxy-Connection']
for key_val in self.headers.items():
soc.send("%s: %s\r\n" % key_val)
soc.send("\r\n")
self._read_write(soc)
finally:
print "\t" "bye"
soc.close()
self.connection.close()
def _read_write(self, soc, max_idling=20):
iw = [self.connection, soc]
ow = []
count = 0
while 1:
count += 1
(ins, _, exs) = select.select(iw, ow, iw, 3)
if exs: break
if ins:
for i in ins:
if i is soc:
out = self.connection
else:
out = soc
data = i.recv(8192)
if data:
out.send(data)
count = 0
else:
print "\t" "idle", count
if count == max_idling: break
do_HEAD = do_GET
do_POST = do_GET
do_PUT = do_GET
do_DELETE=do_GET
class ThreadingHTTPServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer): pass
def start_VyperProxy(server_version=None,version=None):
global __server_version__, __version__
if (misc.isString(server_version)):
__server_version__ = server_version
if (misc.isString(version)):
__version__ = version
BaseHTTPServer.test(VyperProxy, ThreadingHTTPServer, protocol="HTTP/1.1")
|
import requests
import lxml
import os
#需求爬取三国演义的所有章节
from bs4 import BeautifulSoup
if __name__ == "__main__":
url = 'https://www.shicimingju.com/book/sanguoyanyi.html'
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'}
#创建小说存储地址
if not os.path.exists('./小说'):
os.makedirs('./小说')
#开始页数据请求
response = requests.get(url=url,headers=headers).text
#使用bs4解析
soup = BeautifulSoup(response,'lxml')
#查找各章节的li标签
list_all_chapter = soup.select('.book-mulu > ul > li')
fp = open("./小说/三国演义.text",'w',encoding='utf-8')
#爬取每个章节页
for list_everyList in list_all_chapter:
#章节名称
chapter_name = list_everyList.get_text()
#章节详情地址
list_href = "https://www.shicimingju.com"+ list_everyList.a['href']
#请求章节详情地址
chapter_details_response = requests.get(url=list_href,headers=headers).text
chapter_details_soup = BeautifulSoup(chapter_details_response,'lxml')
#章节详情
chapter_details_text = chapter_details_soup.find('div',class_='chapter_content').text
fp.write(chapter_name + ":"+ chapter_details_text + "\n" )
print(chapter_name + "爬取成功") |
# Generated by Django 3.1.1 on 2020-11-13 16:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('product', '0023_auto_20201113_1419'),
]
operations = [
migrations.RenameField(
model_name='product',
old_name='height_unit',
new_name='demission_unit',
),
migrations.RemoveField(
model_name='product',
name='length_unit',
),
migrations.RemoveField(
model_name='product',
name='width_unit',
),
]
|
#!/usr/bin/python
from os import listdir, getcwd
import pandas as pd
import matplotlib.pyplot as pl
from matplotlib.ticker import MaxNLocator
from matplotlib.backends.backend_pdf import PdfPages
c = {'ph3p3':'r', 'ph4p7':'g', 'ph7p4':'b'}
psz = 4 # scatter plot marker size
raw = pl.figure(figsize=(8.5,11)) # figsize args are w,h in inches
r1 = raw.add_subplot(311)
r2 = raw.add_subplot(312, sharex=r1)
noe = raw.add_subplot(313, sharex=r1)
rawf = [i for i in listdir(getcwd()) if i.endswith('rlx_raw')]
for f in rawf:
m = f.split('_')[0]
n = m # n can be assigned as DataFrame while m is preserved is string
n = pd.read_table(f, delim_whitespace=True, usecols = [0,5,6,7,8,9,10], \
names = ['res','r1','r1r','r2','r2r','noe','noer'], \
header = None, index_col = 'res')
data = n.reindex(index=range(1,150))
r1.plot(data.index, data.r1, '-', markersize=psz, color=c[m])
r2.plot(data.index, data.r2, '-', markersize=psz, color=c[m])
noe.plot(data.index, data.noe, '-', markersize=psz, color=c[m])
print m
r1pe = 100*data.r1r/data.r1
r2pe = 100*data.r2r/data.r2
noepe = 100*data.noer/data.noe
print 'R1 percent error avg = %f' % r1pe.mean()
print 'R2 percent error avg = %f' % r2pe.mean()
print 'NOE percent error avg = %f' % noepe.mean()
seqmin = 6
seqmax = 143
r1.set_xlim(seqmin, seqmax)
r1.xaxis.set_major_locator(MaxNLocator(15))
pl.setp(r1.get_xticklabels(), visible = False)
pl.setp(r2.get_xticklabels(), visible = False)
noe.set_xlabel('Residue')
r1min = 0.95
r1max = 1.55
r1.set_ylim(r1min, r1max)
r2min = 7
r2max = 17
r2.set_ylim(r2min, r2max)
noemin = 0.6
noemax = 0.95
noe.set_ylim(noemin, noemax)
r1.yaxis.set_major_locator(MaxNLocator(8, prune ='both'))
r2.yaxis.set_major_locator(MaxNLocator(8, prune ='both'))
noe.yaxis.set_major_locator(MaxNLocator(9, prune ='both'))
r1.set_ylabel('$R_1$ (1/s)')
r2.set_ylabel('$R_2$ (1/s)')
noe.set_ylabel('NOE')
r1.grid(False, which = 'major')
r2.grid(False, which = 'major')
noe.grid(False, which = 'major')
pl.tight_layout(pad = 8.0, h_pad = 0.5)
plotfile = PdfPages('plots_trace.pdf')
plotfile.savefig(raw)
plotfile.close() |
xpected = [1,"F", 5, 6, "DFG", 7, 3, 9, 34, 3]
actual = ["F", 2, 3, "ASFFSA", 5, 2, 3]
i = 0
j = 0
missing = []
extra = []
try:
while True:
found = False
try:
while expected[i] != actual[j]:
i+=1
else:
found = True
except:
if not found:
extra.append(actual[j])
finally:
i = 0
j+=1
except:
print extra
|
import spira.all as spira
class Resistor(spira.PCell):
width = spira.NumberParameter(default=spira.RDD.R1.MIN_WIDTH, doc='Width of the shunt resistance.')
length = spira.NumberParameter(default=spira.RDD.R1.MIN_LENGTH, doc='Length of the shunt resistance.')
def validate_parameters(self):
if self.width > self.length:
raise ValueError('`Width` cannot be larger than `length`.')
return True
def create_elements(self, elems):
elems += spira.Box(width=self.length, height=self.width, center=(0,0), layer=spira.RDD.PLAYER.R1.METAL)
return elems
def create_ports(self, ports):
w, l = self.width, self.length
ports += spira.Port(name='P1_R1', midpoint=(-l/2,0), orientation=180, width=self.width)
ports += spira.Port(name='P2', midpoint=(l/2,0), orientation=0, width=self.width, process=spira.RDD.PROCESS.R1)
return ports
if __name__ == '__main__':
D = Resistor()
D.gdsii_output(name='Resistor')
|
import random
import json
ZE_ZASEDENO = "Z"
NAPACEN_ZNAK = "#"
NAPACEN_UGIB = "&"
KONEC = "E"
NADALJUJ = "C"
TRI = "T"
seznam = [1,2,3,4,5,6,7,8,9]
ZACETEK = "S"
def transponiraj(matrika): #transponira sudoku, ki je v obliki matrike
transponiranka = []
for i in range(len(matrika[0])):
vrstica = []
for j in range(len(matrika)):
vrstica.append(matrika[j][i])
transponiranka.append(vrstica)
return transponiranka
def preveri_sudoku(sudoku): #preveri, če je sudoku rešen pravilno
for vrstica in sudoku: #ali je v vsaki vrstici vseh 9 stevilk
i = 1
while i <= len(sudoku):
if i not in vrstica:
return False
i += 1
transponiran = transponiraj(sudoku)
for vrstica in transponiran: #ali je v vsakem stolpcu vseh 9 stevilk
i = 1
while i <= len(transponiran):
if i not in vrstica:
return False
i += 1
for j in range(0, 9, 3): #preveri ali je v vsakem kvadratku 3×3 vseh 9 stevilk
li = []
for vrstica in sudoku:
if vrstica == sudoku[j] or vrstica == sudoku[j+1] or vrstica == sudoku[j+2]:
l = vrstica[j:j+3]
for m in l:
li.append(m)
i = 1
while i <= 9:
if i not in li:
return False
i += 1
return True
def preveri_delno(sudoku):
for vrstica in sudoku:
seznam_stevilk = []
for element in vrstica:
if element != 0:
seznam_stevilk.append(element)
for i in seznam_stevilk:
if seznam_stevilk.count(i) > 1:
return False
for vrstica in transponiraj(sudoku):
seznam_stevilk = []
for element in vrstica:
if element != 0:
seznam_stevilk.append(element)
for i in seznam_stevilk:
if seznam_stevilk.count(i) > 1:
return False
for j in range(0, 9, 3):
li = []
for vrstica in sudoku:
if vrstica == sudoku[j] or vrstica == sudoku[j+1] or vrstica == sudoku[j+2]:
l = vrstica[j:j+3]
for m in l:
if m != 0:
li.append(m)
for i in li:
if li.count(i) > 1:
return False
return True
class Igra:
def __init__(self, plosca, ugibi=None):
self.plosca = plosca
if ugibi is None:
self.ugibi = []
else:
self.ugibi = ugibi
def pravilni_del(self): #izpiše do sedaj rešen del
plosca = self.plosca
mreza = json.loads(plosca)
for j in self.ugibi:
vrstica = j[0]
stolpec = j[1]
stevilka = j[2]
mreza[vrstica - 1][stolpec - 1] = stevilka
return mreza
def napisana_polja(self, seznami):
niz = ""
for vrstica in seznami:
prazno = ""
for i in range(len(vrstica)):
if i % 3 == 0 and i % 9 != 0:
prazno = prazno + " | " + str(vrstica[i])
else:
prazno = prazno + " " + str(vrstica[i])
prazno = prazno[1:]
prazno += " \n"
niz += prazno
if vrstica == seznami[2] or vrstica == seznami[5]:
niz += "──────+───────+──────\n"
return niz
def za_igro(self, niz):
seznam = []
vrstice = niz.split("\n")
for i in vrstice:
vrstica = []
for j in i:
vrstica.append(j)
seznam.append(vrstica)
return seznam
def konec(self): #sudoku je rešen
return preveri_sudoku(self.pravilni_del())
def ugibaj(self, ugib):
vrstica = ugib[0]
stolpec = ugib[1]
stevilka = ugib[2]
if len(ugib) != 3:
return TRI
if vrstica not in seznam or stolpec not in seznam or stevilka not in seznam:
return NAPACEN_ZNAK #preveri, da so vsi vpisani znaki stevilke med 1 in 9
plosca = json.loads(self.plosca)
if plosca[vrstica - 1][stolpec - 1] != 0:
return ZE_ZASEDENO #če je na zacetni plosci na tem mestu stevilka, je sem ne mores vpisati
else:
self.ugibi.append(ugib)
for i, u in enumerate(self.ugibi): #če je mesto novega ugiba enaka kateremu izmed prejsnjih, prejsnjega spremeni v novega
if u[0] == vrstica and u[1] == stolpec and preveri_delno(self.pravilni_del()) == True:
self.ugibi[i] = ugib
if preveri_delno(self.pravilni_del()) == False: #sproti preveri, če ugib lahko pride na to mesto
self.ugibi = self.ugibi[:-1]
return NAPACEN_UGIB
if self.konec():
return KONEC
else:
return NADALJUJ
with open("Plosce.txt", "r", encoding="utf-8") as datoteka_s_ploscami:
mozne_plosce = [vrstica for vrstica in datoteka_s_ploscami]
def nova_igra():
return Igra(random.choice(mozne_plosce))
class Sudoku:
def __init__(self, datoteka_s_stanjem, datoteka_s_ploscami="Plosce.txt"):
self.igre = {}
self.datoteka_s_ploscami = datoteka_s_ploscami
self.datoteka_s_stanjem = datoteka_s_stanjem
def prost_id_igre(self):
if len(self.igre) == 0:
return 0
else:
return max(self.igre.keys()) + 1
def nova_igra(self):
self.nalozi_igre_iz_datoteke()
with open(self.datoteka_s_ploscami, 'r', encoding='utf-8') as dsp:
mozne_plosce = [vrstica.strip() for vrstica in dsp]
igra = Igra(random.choice(mozne_plosce))
id_igre = self.prost_id_igre()
self.igre[id_igre] = (igra, ZACETEK)
self.zapisi_igre_v_datoteko()
return id_igre
def ugibaj(self, id_igre, ugib):
self.nalozi_igre_iz_datoteke()
igra = self.igre[id_igre][0]
poskus = igra.ugibaj(ugib)
self.igre[id_igre] = (igra, poskus)
self.zapisi_igre_v_datoteko()
def zapisi_igre_v_datoteko(self):
with open(self.datoteka_s_stanjem, "w", encoding="utf-8") as dss:
igre1 = {id_igre: ((igra.plosca, igra.ugibi), poskus) for id_igre, (igra, poskus) in self.igre.items()}
json.dump(igre1, dss, ensure_ascii=False)
return
def nalozi_igre_iz_datoteke(self):
with open(self.datoteka_s_stanjem, "r", encoding="utf-8") as dss:
igre = json.load(dss)
self.igre = {int(id_igre): (Igra(plosca, ugibi), poskus) for id_igre, ((plosca, ugibi), poskus) in igre.items()}
|
##Group 8: Álvaro Alfayate, Andrea de la Fuente, Carla Guillén y Jorge Nuevo.
def ReadFasta(FileName): ##Definimos una función que lea el archivo FASTA elegidoy extraiga la información requerida
MyFile=open(FileName,'r')
ReadSeq='' #Una variable vacia que va a almacenar el Fasta leído
for Line in MyFile: ##Unimos todas las líneas del fasta.
if '>' in Line: ##Si es la primera línea definimos esta condición
#No hacemos un strip para poder separar la primera línea de la secuencia por un \n
ReadSeq=ReadSeq+Line #Añadimos la primera línea con el \n
else:
Line=Line.strip().upper() #Con la secuencia si hacemos strip, para unir toda la secuencia junta.
ReadSeq=ReadSeq+Line
MySeq_RE=r'([NX]M_\d+\.\d).+\n([AGCT]+)' #Definimos la expresión regular que nos extrae por un lado el accession number y por otro la secuencia.
MySeq_Comp=re.compile(MySeq_RE)
SeqInfo=MySeq_Comp.search(ReadSeq).groups() #Buscamos nuestra expresión regular en la secuencia leída y sacamos los grupos.
return (SeqInfo) ##SeqInfo es una lista donde el primer elemento es el accesion number y el segundo la secuencia de DNA
MyFile.close()
def CreateDictionary(DicFile): ##Definimos una función que crea diccionarios a partir del archivo que le pasemos.
MyFile=open(DicFile,'r')
MyDic_RE=r'([ATGC]{3})\t([^BJUXZ])\t([A-Z][a-z]{2})' ##Definimos una expresión variable que saca por un lado el codon, por otro los aminoácidos (en ambos códigos)
MyDic_Comp=re.compile(MyDic_RE)
Data2=''
GENCODE={}
for Line in MyFile: ##Recorremos todas las líneas del archivo y las unimos en Data 2
Data2=Data2+Line.strip()
MyRes2=MyDic_Comp.findall(Data2) ##Busca en Data2 todos los elementos que cumplen la secuencia consenso y los almacena en MyRes2 como una lista de listas (2D)
x=0
for n in range(0,len(MyRes2)):##Durante la longitud de la lista MyRes2 va a ejecutar este bloque de código.
GENCODE[MyRes2[x][0]]=MyRes2[x][1:] #Forma un diccionario recorriendo todas las líneas del archivo (que corresponden a la primera dimensión de la lista)
x+=1 #Avanzamos una posición en la primera dimensión --> A la siguiente línea del archivo de código genético
return (GENCODE)
MyFile.close()
def ComplementaryGenerator(SeqName): #Creamos una función que nos devuelve la hebra complementaria de la secuencia de la primera función
SeqReverse=SeqName[::-1] ##Se invierte la secuencia, de forma que se va a leer la secuencia + en dirección 3'-5'
SeqComplementary='' ##Se genera la variable donde se almacenará la secuencia complementaria
GenCode={'A':'T','C':'G','G':'C','T':'A'} ##Diccionario con los nucleótidos complementarios
for Nucleotide in SeqReverse: ##Vamos itinerando por cada nucleótido de la secuencia
##Se van añadiendo los nucleótidos complementarios 1 a 1 en nuestra variable, generando la secuencia complementaria en dirección 5'-3'.
SeqComplementary=SeqComplementary+GenCode[Nucleotide]
return(SeqComplementary) ##Ahora SeqComplementary será la variable resultado de correr esta función.
def TranslateDNA(DNASeq,COMPSEQ,DicFile,ExportName):
MyFile=open(ExportName+'.txt','w')
Counter='+' #Declaramos Seq como +. Es un contador de en qué secuencia estamos
for Seq in (DNASeq,COMPSEQ):
if Counter=='+': ##Al empezar estamos en la secuencia +
print('\t\t\t\t\t\t\t\t\t\tPLUS STRAND\n')
MyFile.write('\t\t\t\t\t\t\t\t\t\tPLUS STRAND\n')
if Counter=='-': #Para que escriba Minus Strand en este caso
MyFile.write('\n\t\t\t\t\t\t\t\t\t\tMINUS STRAND\n\n')
print('\n\t\t\t\t\t\t\t\t\t\tMINUS STRAND\n\n')
for CodingFrame in range(0,3): #Bucle para leer en las tres pautas de lectura
ProtSeq=''
MyFile.write('\n\t\t\t\t\t\t\t\t\t\t Frame '+str(CodingFrame+1)+'\n\n')#Escribe el Frame en el que está (Sumando +1 pues el rango empieza en 0)
print('\n\t\t\t\t\t\t\t\t\t\t Frame '+str(CodingFrame+1)+'\n\n')
while True:
if CodingFrame>(((len(Seq)/3)-1)*3): ##Esta condición permite correr el código hasta que se alcanza el final de la secuencia.
break
SubSeq=Seq[CodingFrame]+Seq[CodingFrame+1]+Seq[CodingFrame+2] ##Formamos el codón y lo asignamos a SubSeq.
ProtSeq=ProtSeq+DicFile[SubSeq][0] ##Traducimos el codón actual a código de una letra y lo añadimos a la secuencia traducida que ya estuviera.
CodingFrame+=3 #Movemos 3 nucleótidos para leer el siguiente codón
print(ProtSeq)
MyFile.write(ProtSeq+'\n') #Escribimos la secuencia
Counter='-' #Cuando terminamos el bloque con SeqName, para la empezar con la reversa Seq será -
MyFile.close()
def Body():
DNAList=ReadFasta(sys.argv[1]) #Lista que contiene el DNA y el Accession number
GenCode=CreateDictionary('GeneticCode_standard.csv')
CompSeq=ComplementaryGenerator(DNAList[1]) #CompSeq contiene ahora la secuencia complementaria correspondente de llamar la función ComplementaryGenerator
Protein=TranslateDNA(DNAList[1],CompSeq,GenCode,DNAList[0]) ##DNAList[1] contiene la secuencia de DNA extraida y DNAList[0] el Accession Number
if __name__=='__main__':
import sys
import re
if len(sys.argv)<2:
print('Please, introduce as an argument the file you want to translate.') #Si no nos introduce el argumento con la secuencia, se lo pide.
if not('.fasta') in sys.argv[1]: #Si introducimos como argumento un archivo que no es fasta te indica que introduzcas un fasta
print('You have to introduce a fasta sequence')
else:
Body()
|
from PIL import Image, ImageDraw
import os
import numpy as np
from sklearn import neighbors
import sklearn
from sklearn.datasets import load_iris
def createData1(path='../data/single_code/'):
xx = []
yy = []
lists = os.listdir(path) # 列出目录的下所有文件和文件夹保存到lists
lists.sort()
for i in lists:
im = Image.open(path + i)
data = im.getdata()
# data = np.matrix(data, dtype='float') / 225 # 转换成矩阵
yy.append(i.split("_")[0])
xx.append(data)
return xx, yy
def testData():
xx = []
yy = []
path = '../data/t/'
lists = os.listdir(path) # 列出目录的下所有文件和文件夹保存到lists
for i in lists:
im = Image.open(path + i)
im = im.convert("L") # 转成灰色模式
data = im.getdata()
data = np.matrix(data, dtype='float') / 225 # 转换成矩阵
yy.append(i.split("_")[0])
xx.append(np.array(data)[0])
return xx, yy
def distance(train, v1):
d = []
for v2 in train:
d.append(np.sqrt(np.sum((v2 - v1) ** 2)))
return d
def test2():
xx = []
yy = []
path = '../data/verify_code/'
lists = os.listdir(path) # 列出目录的下所有文件和文件夹保存到lists
for i in lists:
im = Image.open(path + i)
xxx = []
for j in range(5):
box = (20 * j, 00, (1 + j) * 20, 30)
dm = im.crop(box)
dm = dm.convert("L")
iamge2imbw(dm, 180)
clear_noise(dm)
data = dm.getdata()
data = np.matrix(data, dtype='float') / 225 # 转换成矩阵
xxx.append(np.array(data)[0])
xx.append(xxx)
yy.append(i.split("_")[0])
return xx, yy
def test():
x, y = createData1()
tx1, ty2 = test2()
count = 0
err = []
for index, t in enumerate(tx1):
p = ''
for jindex, j in enumerate(t):
sortd = np.argsort(distance(x, j))
p = p + y[sortd[0]]
if p == ty2[index]:
count = count + 1
print("预测值:" + p)
print("实际值:" + ty2[index])
print("-------------------")
print(len(tx1))
print("正确率" + str(count / len(tx1)))
# knn
def testt3():
x, y = createData1()
tx1, ty2 = test2()
knn = neighbors.KNeighborsClassifier()
knn.fit(x, y);
count = 0
for index, t in enumerate(tx1):
p = ''.join(knn.predict(np.array(t)))
if p == ty2[index]:
count = count + 1
print("预测值:" + p)
print("实际值:" + ty2[index])
print("-------------------")
print(len(tx1))
print("正确率" + str(count / len(tx1)))
|
import datetime
import pytest
from django.contrib.contenttypes.models import ContentType
from apps.history.metrics import (total_group_count_over_time,
total_idol_count_over_time)
from apps.people.factories import GroupFactory
pytestmark = pytest.mark.django_db
def test_total_group_count_over_time():
target = datetime.date.today()
[GroupFactory() for i in xrange(10)]
assert total_group_count_over_time(target) == {
'tag': 'total-group-count',
'datetime': target,
'source': ContentType.objects.get(app_label='people', model='group'),
'sum': 10,
}
def test_total_idol_count_over_time():
target = datetime.date.today()
assert total_idol_count_over_time(target) == {
'tag': 'total-idol-count'
}
|
#!/usr/bin/python3
"""Alta3 Research | Zach Feeser
List - An example of working with python lists"""
# define our main function (run time code goes here)
def main():
# create a list to contain IPs to ban
ban = [] # create an empty list
# ban = list() # does the same thing as the line above
# add an IP address to our list
ban.append("172.16.8.2") # adds a SINGLE VALUE to the end of our list
# add a second IP address to our list
ban.append("10.8.3.22")
# create a second list of host names to ban
ban_hosts = ["acme.example.org", "smith.example.org", "*.example.com"]
# combine both lists into a single list
# ban = ban + ban_hosts
# all_ban = ban + ban_hosts
#ban.extend(ban_hosts) # extend is the way to combine both lists via ITERATION
#["172.16.8.2", "10.8.3.22", "acme.example.org", "smith.example.org", "*.example.com"]
#ban.append(ban_hosts)
#["172.16.8.2", "10.8.3.22", ["acme.example.org", "smith.example.org", "*.example.com"]]
# display our list to the screen
print(ban)
main()
|
class Solution:
def searchInsert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
ans = self.binary_search(nums, 0, len(nums)-1, target)
if ans == len(nums)-1 and target > nums[-1]:
ans += 1
return ans
def binary_search(self, nums, l, r, target):
if l == r:
return l
mid = (l + r) // 2
if nums[mid] == target:
return mid
elif nums[mid] < target:
return self.binary_search(nums, mid + 1, r, target)
else:
return self.binary_search(nums, l, mid, target)
|
from botocore.vendored import requests
import os
import json
import gzip
from StringIO import StringIO
MAX_LINE_LENGTH = 32000
MAX_REQUEST_TIMEOUT = 30
def lambda_handler(event, context):
key, hostname, tags, baseurl = setup()
cw_log_lines = decodeEvent(event)
messages, options = prepare(cw_log_lines, hostname, tags)
sendLog(messages, options, key, baseurl)
def setup():
key = os.environ.get('LOGDNA_KEY', None)
hostname = os.environ.get('LOGDNA_HOSTNAME', None)
tags = os.environ.get('LOGDNA_TAGS', None)
baseurl = buildURL(os.environ.get('LOGDNA_URL', None))
return key, hostname, tags, baseurl
def buildURL(baseurl):
if baseurl is None:
return 'https://logs.logdna.com/logs/ingest'
else:
return 'https://' + baseurl
def decodeEvent(event):
cw_data = str(event['awslogs']['data'])
cw_logs = gzip.GzipFile(fileobj=StringIO(cw_data.decode('base64', 'strict'))).read()
return json.loads(cw_logs)
def prepare(cw_log_lines, hostname=None, tags=None):
messages = list()
options = dict()
app = 'CloudWatch'
meta = {'type': app}
if 'logGroup' in cw_log_lines:
app = cw_log_lines['logGroup'].split('/')[-1]
meta['group'] = cw_log_lines['logGroup'];
if 'logStream' in cw_log_lines:
options['hostname'] = cw_log_lines['logStream'].split('/')[-1].split(']')[-1]
meta['stream'] = cw_log_lines['logStream']
if hostname is not None:
options['hostname'] = hostname
if tags is not None:
options['tags'] = tags
for cw_log_line in cw_log_lines['logEvents']:
message = {
'line': cw_log_line['message'],
'timestamp': cw_log_line['timestamp'],
'file': app,
'meta': meta}
messages.append(sanitizeMessage(message))
return messages, options
def sanitizeMessage(message):
if message and message['line']:
if len(message['line']) > MAX_LINE_LENGTH:
message['line'] = message['line'][:MAX_LINE_LENGTH] + ' (cut off, too long...)'
return message
def sendLog(messages, options, key=None, baseurl):
if key is not None:
data = {'e': 'ls', 'ls': messages}
requests.post(
url=baseurl,
json=data,
auth=('user', key),
params={
'hostname': options['hostname'],
'tags': options['tags'] if 'tags' in options else None},
stream=True,
timeout=MAX_REQUEST_TIMEOUT)
|
import pandas as pd
from pandas import DataFrame
import matplotlib.pyplot as plot
target_url = ("https://archive.ics.uci.edu/ml/machine-learning-"
"databases/undocumented/connectionist-bench/sonar/sonar.all-data")
data = pd.read_csv(target_url, header=None, prefix="V")
dataRow2 = data.iloc[0:208, 1]
dataRow3 = data.iloc[0:208, 1]
plot.scatter(dataRow2, dataRow3)
plot.xlabel("2nd Attribute")
plot.ylabel("3rd Attribute")
plot.show()
dataRow21 = data.iloc[0:208, 20]
plot.scatter(dataRow2, dataRow21)
plot.xlabel("2nd Attribute")
plot.ylabel("21st Attribute")
plot.show() |
import os
import socket
import threading
from Position import Position
class GPSReceiver:
def __init__(self, deviceIP, devicePort):
self.deviceIP = deviceIP
self.devicePort = devicePort
#we use TCP
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind((deviceIP, devicePort))
#manages thread
self.receive_gps_thread_id = None
#array of gps points since last retrieval
def start_gps_receiver(self):
self.receive_gps_thread_id = threading.Thread(target=self.__rcv_data_from_gps, daemon=True)
self.receive_gps_thread_id.start()
def stop_gps_receiver(self):
self.receive_gps_thread_id.do_run=False
def __rcv_data_from_gps(self):
t = threading.currentThread()
self.sock.listen(1)
while getattr(t, "do_run", True):
connection, client_addr = self.sock.accept()
print("accepted conn")
data = self.__recvall(connection)
lat,lon,speed = data.split(",")
print ("g:" + str(lat) + "," + str(lon))
connection.close()
def __recvall(self,sock):
BUFF_SIZE = 1 # 4 KiB
data = b''
while True:
part = sock.recv(BUFF_SIZE)
data += part
recv_end = data.decode('utf-8').find('\n')
if recv_end != -1:
break
return data.decode('utf-8')[:-1]
|
# list vs strings
# srtring are immutable
# lists are mutable
# in ruby string is mutable
s = "string"
# t=s.title()
# print(t)
# print(s)
l = ['word1','word2','word3']
l.pop()
l.append('word3')
print(l) |
import random
score_dict = {}
def main():
while True:
prompt = 'Enter command:1. data entry, '
prompt += '2. query, 3. exit >>'
s = input(prompt)
if not s:
break
cmd = int(s)
if cmd == 3:
break
if cmd == 1:
add_score()
elif cmd == 2:
display_score()
def add_score():
while True:
key_str = input('Input name (ENTER to exit):')
key_str = key_str.strip()
if not key_str:
return
val_str = random.uniform(1, 100)
if not val_str:
return
score_dict[key_str] = val_str
def display_score():
if len(score_dict) == 0:
print('your score dict is empty')
else:
while True:
key_str = input('Enter name (ENTER to exit):')
key_str = key_str.strip()
if not key_str:
return
val_str = score_dict.get(key_str)
if val_str:
print(val_str)
else:
print('Name not found. Re-enter.')
main()
|
# dictionary with three importants rivers
rivers = {'Amazonas': 'Brasil',
'San Francisco': 'Eua',
'Tamisa': 'England'}
for river, country in rivers.items():
print(f'The {river} flows through {country}')
for river in rivers:
print(river)
for country in rivers.values():
print(country)
|
"""
取消功能允许我们要求取消期货或协程:
"""
import asyncio
async def myCoroutine():
print("My Coroutine")
async def main():
current = asyncio.Task.current_task()
print(current)
loop = asyncio.get_event_loop()
try:
task1 = loop.create_task(myCoroutine())
task2 = loop.create_task(myCoroutine())
task3 = loop.create_task(myCoroutine())
task3.cancel()
loop.run_until_complete(main())
finally:
loop.close()
"""
My Coroutine
My Coroutine
<Task pending coro=<main() running at C:/Users/zhourudong/PycharmProjects/learn/事件驱动编程/9_cancel.py:14> cb=[_run_until_complete_cb() at C:\3.6\lib\asyncio\base_events.py:176]>
在执行前面的程序时,您应该看到task1和task2都已成功执行。我们计划的第三个任务,由于取消了我们的调用,实际上从来没有执行过。现在,这只是一个简单的例子,说明我们如何取消一个任务,我们以这样的方式做了,我们几乎可以保证我们的第三个任务被取消了。然而,在野外,不能保证取消功能肯定会取消您的待定任务:
""" |
def add_positive_numbers(x, y):
assert x > 0 and y > 0, "Both numbers must be positive!"
return x + y
print(add_positive_numbers(1,2)) #3
# print(add_positive_numbers(1,-3)) #Assertion Error
def eat_junk(food):
assert food in ["pizza", "ice cream", "candy", "fried butter"], "Food must be in 'junk food' list"
return f"nom nom nom, I'm eating {food}!"
food = input("Please enter the food you're eating: ").lower()
print(eat_junk(food))
|
#!/usr/bin/env python
#Client
#imports
import sys
import socket
from threading import Thread
from queue import Queue
class Client:
def __init__(self, log, message_queue):
self.message_queue = message_queue
self.log = log
self.packet_size = 1024
self.info = ""
self.sock = None
self.off = False
self.send = None
self.get = None
self.username = ""
#threads
def get_messages(self):
try:
while True:
if self.off:
break
data = self.sock.recv(self.packet_size)
if not data == b"":
self.log.put(data.decode("utf-8"))
print(data.decode("utf-8"))
except:
pass
def send_messages(self):
try:
self.sock.sendall(bytearray(str.encode("/user " + self.username)))
while True:
info = ""
if self.off:
print("breaking send")
break
if not self.message_queue.empty():
print("getting msgs")
info = self.message_queue.get()
try:
if not info == "":
self.sock.sendall(bytearray(str.encode(info)))
self.log.put(self.username + ": " + info)
if info == "/leave":
self.leave()
except:
pass
except:
pass
def start(self, address, username):
self.off = False
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.username = username
#begin connection
print("Connecting to server at ip %s:%s" % address)
self.log.put("Connecting to server at ip %s:%s" % address)
try:
self.sock.connect(address)
except ConnectionRefusedError as cre:
print("Error: Could not connect to ip %s:%s" % address)
self.log.put("Error: Could not connect to ip %s:%s" % address)
return
except socket.gaierror as e:
print("Error: Could not gather address info")
self.log.put("Error: Could not gather address info")
return
print("Connected...")
self.log.put("Connected")
self.send = Thread(target=self.send_messages)
self.get = Thread(target=self.get_messages)
try:
self.send.start()
self.get.start()
finally:
pass
def leave(self):
if self.sock is not None:
try:
self.sock.sendall(bytearray(str.encode("/leave")))
except:
print("Server wasn't on.")
self.off = True
print("Closing connection.")
self.log.put("Disconnected")
if self.send.is_alive():
self.send.join()
if self.get.is_alive():
self.get.join()
try:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
self.sock = None
except:
print("Wasn't connected in the first place") |
from sklearn.externals import joblib
import os, numpy as np, sys
vsm = '/tmp/event_analaysis_output/modeling/TfIdfMatrix_False_False_doc_matrix_term_2016-11-07_2017-01-01.model'
index = joblib.load(vsm)
feature_m = index["matrix"]
for dirpath, dirnames, filenames in os.walk("/tmp/event_analaysis_output/evaluation/"):
for filename in filenames:
if "2016-11-07_2017-01-01" in filename:
path = "%s%s" % (dirpath, filename)
model = joblib.load(path)
lda = model["fitted_model"]
population_size = feature_m.T.shape[0]
sample_rate, sample_count = 0.05, 50
sample_size = population_size * sample_rate
point_estimations = []
for i in range(sample_count):
samples = feature_m.T[np.random.choice(population_size, size=sample_size, replace=False),:]
p_without_dist = lda.perplexity(samples)
point_estimations.append(p_without_dist)
print i, p_without_dist
sys.stdout.flush()
print "%s: p_without_dist=%.5e" % (filename, np.average(point_estimations))
|
def exec(instn, pch, reg, var):
opc = instn[:5]
if opc == "00010":
movI(instn, pch, reg)
elif opc == "00101":
store(instn, pch, reg, var)
elif opc == "10010":
je(instn, pch, reg)
elif opc == "00000":
add(instn, pch, reg)
elif opc == "00001":
sub(instn, pch, reg)
elif opc == "00011":
movR(instn, pch, reg)
elif opc == "00100":
load(instn, pch, reg, var)
elif opc == "00110":
mul(instn, pch, reg)
elif opc == "00111":
div(instn, pch, reg)
elif opc == "01000":
rshift(instn, pch, reg)
elif opc == "01001":
lshift(instn, pch, reg)
elif opc == "01010":
xor(instn, pch, reg)
elif opc == "01011":
OR(instn, pch, reg)
elif opc == "01100":
AND(instn, pch, reg)
elif opc == "01101":
invert(instn, pch, reg)
elif opc == "01110":
cmpr(instn, pch, reg)
elif opc == "01111":
jmp(instn, pch, reg)
elif opc == "10000":
jlt(instn, pch, reg)
elif opc == "10001":
jgt(instn, pch, reg)
else:
hlt(pch, reg)
def overflow(register, reg):
if reg[register] > 65535:
reg[register] %= 65536
reg["111"] = "1000"
elif reg[register] < 0:
reg[register] = 0
reg["111"] = "1000"
def add(instn, pch, reg):
reg["111"] = "0000"
r1 = instn[-9:-6]
r2 = instn[-6:-3]
r3 = instn[-3:]
reg[r1] = reg[r2] + reg[r3]
pch[0] += 1
overflow(r1, reg)
def sub(instn, pch, reg):
reg["111"] = "0000"
r1 = instn[-9:-6]
r2 = instn[-6:-3]
r3 = instn[-3:]
reg[r1] = reg[r2] - reg[r3]
pch[0] += 1
overflow(r1, reg)
def movI(instn, pch, reg):
reg["111"] = "0000"
imm = instn[-8:]
r1 = instn[-11:-8]
reg[r1] = int(imm, 2)
pch[0] += 1
def movR(instn, pch, reg):
r1 = instn[-6:-3]
r2 = instn[-3:]
if r2 == "111":
reg[r1] = int(reg[r2], 2)
else:
reg[r1] = reg[r2]
pch[0] += 1
reg["111"] = "0000"
def load(instn, pch, reg, var):
reg["111"] = "0000"
vr = instn[-8:]
vr = int(vr, 2)
r1 = instn[-11:-8]
if vr not in var.keys():
var[vr] = 0
reg[r1] = var[vr]
pch[0] += 1
def store(instn, pch, reg, var):
reg["111"] = "0000"
vr = instn[-8:]
r1 = instn[-11:-8]
var[int(vr, 2)] = reg[r1]
pch[0] += 1
def mul(instn, pch, reg):
reg["111"] = "0000"
r1 = instn[-9:-6]
r2 = instn[-6:-3]
r3 = instn[-3:]
reg[r1] = reg[r2] * reg[r3]
pch[0] += 1
overflow(r1, reg)
def div(instn, pch, reg):
reg["111"] = "0000"
r1 = instn[-6:-3]
r2 = instn[-3:]
if reg[r2] != 0:
a = reg[r1] // reg[r2]
b = reg[r1] % reg[r2]
reg["000"] = a
reg["001"] = b
pch[0] += 1
def rshift(instn, pch, reg):
reg["111"] = "0000"
imm = instn[-8:]
r1 = instn[-11:-8]
rs = int(imm, 2)
if rs > 15:
reg[r1] = 0
else:
d = 2**rs
reg[r1] //= d
pch[0] += 1
def lshift(instn, pch, reg):
reg["111"] = "0000"
imm = instn[-8:]
r1 = instn[-11:-8]
rs = int(imm, 2)
if rs > 15:
reg[r1] = 0
else:
d = 2**rs
temp = reg[r1]
temp *= d
reg[r1] = temp % 65536
pch[0] += 1
def xor(instn, pch, reg):
reg["111"] = "0000"
r1 = instn[-9:-6]
r2 = instn[-6:-3]
r3 = instn[-3:]
reg[r1] = reg[r2] ^ reg[r3]
pch[0] += 1
def OR(instn, pch, reg):
reg["111"] = "0000"
r1 = instn[-9:-6]
r2 = instn[-6:-3]
r3 = instn[-3:]
reg[r1] = reg[r2] | reg[r3]
pch[0] += 1
def AND(instn, pch, reg):
reg["111"] = "0000"
r1 = instn[-9:-6]
r2 = instn[-6:-3]
r3 = instn[-3:]
reg[r1] = reg[r2] & reg[r3]
pch[0] += 1
def invert(instn, pch, reg):
reg["111"] = "0000"
r1 = instn[-6:-3]
r2 = instn[-3:]
reg[r1] = 65535 - reg[r2]
pch[0] += 1
def cmpr(instn, pch, reg):
r1 = instn[-6:-3]
r2 = instn[-3:]
if reg[r1] < reg[r2]:
reg["111"] = "0100"
elif reg[r1] > reg[r2]:
reg["111"] = "0010"
else:
reg["111"] = "0001"
pch[0] += 1
def jmp(instn, pch, reg):
addr = instn[-8:]
pch[0] = int(addr, 2)
reg["111"] = "0000"
def jlt(instn, pch, reg):
addr = instn[-8:]
if reg["111"][1] == '1':
pch[0] = int(addr, 2)
else:
pch[0] += 1
reg["111"] = "0000"
def jgt(instn, pch, reg):
addr = instn[-8:]
if reg["111"][2] == '1':
pch[0] = int(addr, 2)
else:
pch[0] += 1
reg["111"] = "0000"
def je(instn, pch, reg):
addr = instn[-8:]
if reg["111"][-1] == '1':
pch[0] = int(addr, 2)
else:
pch[0] += 1
reg["111"] = "0000"
def hlt(pch, reg):
pch[0] += 1
pch[1] = 0
reg["111"] = "0000"
|
from BitVector import BitVector
from constants import Constants
from utils import Utils
from typing import *
import copy
import logging
class Key:
NO_OF_ROUNDS = 10 # NO_OF_ROUNDS + 1 keys needed including original. So 'NO_OF_ROUNDS' key expansions are needed.
def __init__(self, key_string: str):
self.key_string = key_string
self.key_int_array = Key.generate_key_from_string(key_string) # Array of ints. Eg - [75, 22..]
self.expanded_key_int_array = [self.key_int_array] # array of arrays.
for round_no in range(1, Key.NO_OF_ROUNDS + 1):
new_round_key = Key.generate_new_round_key(self.expanded_key_int_array[round_no - 1], round_no)
logging.debug(f'Key for Round: {round_no} in Hex is: {Utils.convert_int_array_to_hex_array(new_round_key)}')
self.expanded_key_int_array.append(new_round_key)
# Returns an int array of ASCII values of a specific round's key
def get_round_key(self, round_no: int) -> List[int]:
if not (0 <= round_no <= Key.NO_OF_ROUNDS):
raise Exception("Invalid round number specified")
return self.expanded_key_int_array[round_no]
@staticmethod
def generate_new_round_key(prev_round_key_int_array: List[int], round_no: int) -> List[int]:
prev_round_root_word = prev_round_key_int_array[12:16]
updated_root_word = Key.g_function_on_root_word(prev_round_root_word, round_no)
# generate each word of the new round key
first_word = Utils.xor_operation_on_int_array(updated_root_word, prev_round_key_int_array[0:4])
second_word = Utils.xor_operation_on_int_array(first_word, prev_round_key_int_array[4:8])
third_word = Utils.xor_operation_on_int_array(second_word, prev_round_key_int_array[8:12])
fourth_word = Utils.xor_operation_on_int_array(third_word, prev_round_key_int_array[12:16])
new_round_key = first_word + second_word + third_word + fourth_word
return new_round_key
@staticmethod
def g_function_on_root_word(root_word: List[int], round_no: int) -> List[int]:
if not (1 <= round_no <= Key.NO_OF_ROUNDS):
raise Exception("Invalid round used for generate_round_key")
# circular byte left shift
shifted_root_word = Key.circular_byte_left_shift(root_word)
# byte substitution
byte_substituted_root_word = Utils.byte_substitution_sbox_for_array(shifted_root_word)
# adding round constant
round_constant_int = Constants.round_constants[round_no]
round_constant_bitvector = BitVector(intVal=round_constant_int, size=8)
root_word_significant_byte_bitvector = BitVector(intVal=byte_substituted_root_word[0], size=8)
updated_root_word_significant_byte_bitvector = root_word_significant_byte_bitvector.__xor__(
round_constant_bitvector)
# only most significant byte changes for round constant addition.
byte_substituted_root_word[0] = updated_root_word_significant_byte_bitvector.intValue()
return byte_substituted_root_word
@staticmethod
def circular_byte_left_shift(root_word_int_array: List[int]) -> List[int]:
return [root_word_int_array[1], root_word_int_array[2], root_word_int_array[3], root_word_int_array[0]]
@staticmethod
def generate_key_from_string(key_string: str) -> List[int]:
size_adjusted_string = key_string
if len(key_string) > 16:
size_adjusted_string = key_string[0:16]
elif len(key_string) < 16:
size_adjusted_string = key_string.ljust(16, '\0') # pad string to the right with 0's
key = [ord(size_adjusted_string[x]) for x in range(16)]
logging.debug(f'Original Key: {size_adjusted_string}\nKey in Int: {key}')
return key
|
"""create users table
Revision ID: 39e93e7ef50b
Revises: None
Create Date: 2012-08-09 21:33:28.187794
"""
# revision identifiers, used by Alembic.
revision = '39e93e7ef50b'
down_revision = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.schema import CreateSequence, DropSequence
def upgrade():
op.execute(CreateSequence(sa.Sequence("user_id_seq")))
op.create_table(
'users',
sa.Column('id', sa.Integer, sa.Sequence('user_id_seq'), primary_key=True)
)
def downgrade():
op.drop_table('users')
op.execute(DropSequence(sa.Sequence("user_id_seq")))
|
from flask.ext.wtf import Form
from wtforms import TextField, BooleanField, IntegerField
from wtforms.validators import Required
class LoginForm(Form):
openid = TextField('openid', validators = [Required()])
remember_me = BooleanField('remember_me', default = False)
class NewTaskForm(Form):
task = TextField('task', validators = [Required()])
class TrackDurationForm(Form):
duration = IntegerField('duration', validators = [Required()])
active_task = TextField('active_task', validators = [Required()]) |
foods = ["dosa", "chapathi", "beef", "chicken", "mutton"]
for f in foods [1:3]:
print (f)
print (len(f))
|
import pandas as pd
import numpy as np
import fdb
import time
import os
import sys
import shutil
import zipfile
from datetime import datetime
from unicodedata import normalize
if os.path.exists(os.getcwd() + '\\VSCyber.FDB'):
os.remove(os.getcwd() + '\\VSCyber.FDB')
shutil.copyfile(os.getcwd() + '\\_.FDB', os.getcwd() + '\\VSCyber.FDB')
file = os.getcwd() + '\\Exportar.xls'
hora = float(sys.argv[1].replace(',', '.'))
def timeToInt(strTime):
if isinstance(strTime, int):
return strTime / 60
fmt = ''
strTime = str(strTime)
if strTime == '':
return 0
if strTime.find('h') != -1:
fmt += "%Hh"
if strTime.find('m') != -1:
fmt += "%Mm"
if strTime.find('s') != -1:
fmt += "%Ss"
if fmt == '':
return 0
try:
result = time.strptime(strTime, fmt)
except ValueError as e:
result = time.strptime('0s', '%Ss')
return result.tm_hour + (result.tm_min/60) + ((result.tm_sec/60)/60)
def insertPESSXFORMACNTT(cur, idformacntt, referencia, unidgeo, firstName, lastName, username):
if(pd.notnull(referencia)):
sql = "INSERT INTO PESSXFORMACNTT(Idpessxformacntt,Idpessoa,idformacntt,referencia,idlocd,Idinc,Dhinc,Idalt,Dhalt) select first 1 GEN_ID(PESSXFORMACNTT_GEN,1), p.IDPESSOA, {}, '{}', (select first 1 idunidgeo from unidgeo where nome='{}' and idunidgeo in (select idlocd from locd)) , 1, CURRENT_TIMESTAMP, NULL, NULL from pessoa p left join login l on p.idpessoa=l.idlogin where p.NOMEFANTASIA like '{}' and p.NOMECOMPLETO like '{}' and l.login like CASE WHEN '{}' = '' THEN p.idpessoa ELSE '{}' END".format(
idformacntt, referencia, unidgeo, firstName, lastName, username, username)
cur.execute(sql)
dfExportar = pd.read_excel(file, sheet_name=0, header=None)
dfExportar.columns = ['Nome', 'Username', 'Código', 'Status', 'Tipo', 'Débito', 'Cred.Tempo', 'Cred.Valor', 'Créditos Promocionais', 'Data Nasc.',
'Tempo Usado', 'RG', 'Endereço', 'Bairro', 'Cidade', 'UF', 'CEP', 'Sexo', 'E-mail', 'Telefone', 'Escola', 'NickName', 'Celular',
'Incluído Em', 'Limite Débito', 'Incluído Por', 'Alterado Em', 'Alterado Por', 'Tit. Eleitor', 'Pai', 'P.Disponíveis', 'P. Acumulados',
'P. Resgatados', 'Mãe', 'Censura de Horário', 'CPF']
dfExportar = dfExportar[dfExportar.Username.notnull()]
dfExportar = dfExportar[dfExportar.Username.str.match('Username', na=False)==False]
dfExportar = dfExportar.replace("'","",regex=True)
new = dfExportar.Nome.str.split(" ", n=1, expand=True)
dfExportar['FirstName'] = new[0].str[:20]
dfExportar['LastName'] = new[1].str[:50]
dfExportar['LastName'] = dfExportar['LastName'].replace([None], [''])
dfExportar['Cred.Tempo'].fillna(0, inplace=True)
dfExportar['Cred.Valor'].fillna(0, inplace=True)
dfExportar['Débito'].fillna(0, inplace=True)
dfExportar['Valor'] = round((dfExportar['Cred.Tempo'].apply(timeToInt) * hora) +
dfExportar['Cred.Valor'] - dfExportar['Débito'], 2)
dfExportar['Valor'].fillna(0, inplace=True)
dfExportar['Créditos Promocionais'] = dfExportar['Créditos Promocionais'].replace([
None], [''])
dfExportar['Cortesia'] = round(dfExportar['Créditos Promocionais'].apply(timeToInt)* hora,2)
dfExportar['Data Nasc.'] = dfExportar['Data Nasc.'].replace([None], [''])
dfExportar['Data Nasc.'] = dfExportar['Data Nasc.'].replace([0], [''])
dfExportar['DataNasc'] = dfExportar['Data Nasc.'].apply(
lambda x: None if str(x) == '' else datetime.strftime(x, '%Y.%m.%d'))
dfExportar.UF = dfExportar.UF.fillna('UF').str.upper()
dfExportar.Cidade = dfExportar.Cidade.fillna('Cidade')
dfExportar.Bairro = dfExportar.Bairro.fillna('Bairro')
dfExportar.Pai = dfExportar.Pai.replace([None], [''])
dfExportar.Mãe = dfExportar.Mãe.replace([None], [''])
dfExportar['Responsavel'] = np.where(
dfExportar.Pai=='', dfExportar.Pai.str[:50], dfExportar.Mãe.str[:50])
dfExportar.Username = dfExportar.Username.astype(str).str.strip()
con = fdb.connect(dsn="localhost:{}\\VSCyber.FDB".format(os.getcwd()),
user="sysdba",
password="masterkey",
port=3050)
cur = con.cursor()
for index, row in dfExportar.iterrows():
cur.execute("insert into pessoa values (GEN_ID(PESSOA_GEN,1), ?, ?, 'F', 0, 1, current_timestamp, 1, current_timestamp)",
(row.FirstName, row.LastName))
cur.execute("insert into pessoafisica (idpessoa, sexo) select idpessoa, ? from pessoa where nomefantasia = ? and Nomecompleto = ? AND IDPESSOA NOT IN ( SELECT Idpessoa FROM pessoafisica)",
(row.Sexo, row.FirstName, row.LastName))
cur.execute("insert into cli (idcli, sitppgcli, flags) select idpessoa, 1, 2 from pessoa where idpessoa not in (select idcli from cli)")
cur.execute("update VRFXTABHORA set valor = {}".format(hora))
for UF in dfExportar.UF.unique():
sql1 = "insert into UNIDGEO values (GEN_ID(UnidGeo_GEN,1),'{}', 1, current_timestamp, NULL, NULL)".format(
UF)
cur.execute(sql1)
sql2 = "insert into UF (IDUF, sigla) select idunidgeo, '{}' from unidgeo where nome ='{}'".format(
UF, UF)
cur.execute(sql2)
for index, row in dfExportar[dfExportar.UF.notnull()].drop_duplicates(['UF', 'Cidade'])[['UF', 'Cidade']].iterrows():
sql1 = "insert into UNIDGEO values (GEN_ID(UnidGeo_GEN,1),'{}', 1, current_timestamp, NULL, NULL)".format(
row.Cidade)
cur.execute(sql1)
sql2 = "insert into LOCD (IDLOCD, IDUF) select first 1 idunidgeo, (SELECT first 1 idunidgeo FROM unidgeo WHERE nome = '{}' and idunidgeo in (select idUF from UF)) from unidgeo where nome ='{}' and idunidgeo not in (select iduf from UF) and idunidgeo not in (select idlocd from locd)".format(row.UF, row.Cidade)
cur.execute(sql2)
for index, row in dfExportar[dfExportar.Cidade != ''].drop_duplicates(['Cidade', 'Bairro'])[['Cidade', 'Bairro']].iterrows():
sql1 = "insert into UNIDGEO values (GEN_ID(UnidGeo_GEN,1),'{}', 1, current_timestamp, NULL, NULL)".format(
row.Bairro)
cur.execute(sql1)
sql2 = "insert into BAIRRO (IDBAIRRO, IDLOCD) select first 1 idunidgeo, (SELECT first 1 idunidgeo FROM unidgeo WHERE nome = '{}' and idunidgeo in (select idLOCD from LOCD)) from unidgeo where nome ='{}' and idunidgeo not in (select iduf from UF) and idunidgeo not in (select idLOCD from LOCD) and idunidgeo not in (select idbairro from bairro)".format(row.Cidade, row.Bairro)
cur.execute(sql2)
prevUsername = ''
dfExportar = dfExportar.sort_values('Username')
for index, row in dfExportar.iterrows():
if(row.Tipo == 'Acesso Grátis'):
cur.execute("update cli set BFree=1 where idcli in (select idpessoa from pessoa where NOMEFANTASIA like ? and NOMECOMPLETO like ?)",
(row.FirstName, row.LastName))
if(row.Username.upper() == 'ADMIN'):
row.Username += '_1'
if(row.Username.strip() == prevUsername):
row.Username += '*'
prevUsername = row.Username
cur.execute("INSERT INTO login (IdLogin,Login,PW,Flags) select first 1 p.idpessoa, ?, NULL, NULL from pessoa p left join mov m on p.idpessoa=m.idcli where p.NOMEFANTASIA like ? and p.NOMECOMPLETO like ? and p.idpessoa not in (select idlogin from login)",
(row.Username, row.FirstName, row.LastName))
if(row.Valor > 0):
sql = "INSERT INTO Mov(IdMov,IdCli,DhMov,Valor,SiTpOpMov,IdCon,DtValidCred,IdInc,DhInc) select first 1 GEN_ID(Mov_GEN,1), p.IDPESSOA, CURRENT_TIMESTAMP, {}, 1, NULL, CURRENT_TIMESTAMP, 1, CURRENT_TIMESTAMP from pessoa p left join login l on p.idpessoa=l.idlogin where p.NOMEFANTASIA like '{}' and p.NOMECOMPLETO like '{}' and l.login like CASE WHEN '{}' = '' THEN p.idpessoa ELSE '{}' END and p.idpessoa NOT IN ( SELECT idcli FROM mov)".format(
row.Valor, row.FirstName, row.LastName, row.Username, row.Username)
cur.execute(sql)
if(row.Cortesia > 0):
sql = "INSERT INTO Mov(IdMov,IdCli,DhMov,Valor,SiTpOpMov,IdCon,DtValidCred,IdInc,DhInc) select first 1 GEN_ID(Mov_GEN,1), p.IDPESSOA, CURRENT_TIMESTAMP, {}, 6, NULL, CURRENT_TIMESTAMP, 1, CURRENT_TIMESTAMP from pessoa p left join login l on p.idpessoa=l.idlogin where p.NOMEFANTASIA like '{}' and p.NOMECOMPLETO like '{}' and l.login like CASE WHEN '{}' = '' THEN p.idpessoa ELSE '{}' END and p.idpessoa NOT IN ( SELECT idcli FROM mov where sitpopmov=6)".format(
row.Cortesia, row.FirstName, row.LastName, row.Username, row.Username)
cur.execute(sql)
if(pd.notnull(row.DataNasc)):
sql = "INSERT INTO DATAPESSOA (IDDTPESSOA, IDPESSOA, SITPDATA, DATA, IDINC, DHINC, IDALT, DHALT) select first 1 GEN_ID(DATAPESSOA_GEN,1), p.idpessoa, 1, '{}', 1, CURRENT_TIMESTAMP,1, CURRENT_TIMESTAMP from pessoa p left join login l on p.idpessoa=l.idlogin where p.NOMEFANTASIA like '{}' and p.NOMECOMPLETO like '{}' and l.login like CASE WHEN '{}' = '' THEN p.idpessoa ELSE '{}' END and p.idpessoa not in (select idpessoa from DATAPESSOA)".format(
row.DataNasc, row.FirstName, row.LastName, row.Username, row.Username)
cur.execute(sql)
if(pd.notnull(row.RG)):
sql = "INSERT INTO IDENTFPESS (IDIDENTFPESS, IDPESSOA, SITPIDENTF, REFERENCIA, IDINC, DHINC, IDALT, DHALT) select first 1 GEN_ID(IDENTFPESS_GEN,1), p.idpessoa, 1,'{}', 1, CURRENT_TIMESTAMP,1, CURRENT_TIMESTAMP from pessoa p left join login l on p.idpessoa=l.idlogin where p.NOMEFANTASIA like '{}' and p.NOMECOMPLETO like '{}' and l.login like CASE WHEN '{}' = '' THEN p.idpessoa ELSE '{}' END and p.idpessoa not in (select idpessoa from IDENTFPESS where sitpidentf=1)".format(
row.RG, row.FirstName, row.LastName, row.Username, row.Username)
cur.execute(sql)
if(pd.notnull(row.CPF)):
sql = "INSERT INTO IDENTFPESS (IDIDENTFPESS, IDPESSOA, SITPIDENTF, REFERENCIA, IDINC, DHINC, IDALT, DHALT) select first 1 GEN_ID(IDENTFPESS_GEN,1), p.idpessoa, 2,'{}', 1, CURRENT_TIMESTAMP,1, CURRENT_TIMESTAMP from pessoa p left join login l on p.idpessoa=l.idlogin where p.NOMEFANTASIA like '{}' and p.NOMECOMPLETO like '{}' and l.login like CASE WHEN '{}' = '' THEN p.idpessoa ELSE '{}' END and p.idpessoa not in (select idpessoa from IDENTFPESS where sitpidentf=2) and '{}' not in (select referencia from identfpess where sitpidentf=2)".format(row.CPF, row.FirstName, row.LastName, row.Username, row.Username, row.CPF)
cur.execute(sql)
if(pd.notnull(row['Limite Débito'])):
sql = "update cli set LIMDEB={} where idcli=(select first 1 p.idpessoa from pessoa p left join login l on p.idpessoa=l.idlogin where p.NOMEFANTASIA like '{}' and p.NOMECOMPLETO like '{}' and l.login like CASE WHEN '{}' = '' THEN p.idpessoa ELSE '{}' END and p.idpessoa in (select idcli from cli))".format(
row['Limite Débito'], row.FirstName, row.LastName, row.Username, row.Username)
cur.execute(sql)
insertPESSXFORMACNTT(cur, 1, row.Telefone, row.Cidade,
row.FirstName, row.LastName, row.Username)
insertPESSXFORMACNTT(cur, 2, row.Endereço, row.Cidade,
row.FirstName, row.LastName, row.Username)
insertPESSXFORMACNTT(
cur, 3, row["E-mail"], row.Cidade, row.FirstName, row.LastName, row.Username)
insertPESSXFORMACNTT(cur, 4, row.Celular, row.Cidade,
row.FirstName, row.LastName, row.Username)
insertPESSXFORMACNTT(cur, 5, row.Responsavel, row.Cidade,
row.FirstName, row.LastName, row.Username)
if(pd.notnull(row.Endereço)):
sql = "INSERT INTO ENDERECO (IDPESSXFORMACNTT, IDBAIRRO, CEP) select first 1 IDPESSXFORMACNTT,(SELECT first 1 IDUNIDGEO from unidgeo where upper(nome)=upper('{}') and IDUNIDGEO in (select idbairro from bairro)), NULL FROM PESSXFORMACNTT WHERE IDPESSXFORMACNTT not in (select idpessxformacntt from endereco) and IDPESSOA=(select first 1 p.idpessoa from pessoa p left join login l on p.idpessoa=l.idlogin where p.NOMEFANTASIA like '{}' and p.NOMECOMPLETO like '{}' and l.login like CASE WHEN '{}' = '' THEN p.idpessoa ELSE '{}' END)".format(row.Bairro, row.FirstName, row.LastName, row.Username, row.Username)
cur.execute(sql)
con.commit()
zip = zipfile.ZipFile('VSCyber.zip', 'w')
zip.write('VSCyber.FDB', compress_type=zipfile.ZIP_DEFLATED)
print('Importação concluída!')
|
# https://atcoder.jp/contests/tessoku-book/tasks/tessoku_book_bd
# https://github.com/E869120/kyopro-tessoku
# 入力
N, Q = map(int, input().split())
S = input()
queries = [ list(map(int, input().split())) for i in range(Q) ]
# print(queries)
# 文字を数値に変換(ここでは書籍とは異なり、0-indexed で実装しています)
# ord(c) で文字 c の文字コード(ASCII コード)を取得
T = list(map(lambda c: ord(c) - ord('a') + 1, S))
# print(T)
# 100 の n 乗を前計算
MOD = 2147483647
power100 = [ None ] * (N + 1)
power100[0] = 1
for i in range(N):
power100[i + 1] = power100[i] * 100 % MOD
# print(power100)
# H[1], H[2], ..., H[N] を計算する
H = [ None ] * (N + 1)
H[0] = 0
for i in range(N):
H[i + 1] = (H[i] * 100 + T[i]) % MOD
# print(H)
# ハッシュ値を求める関数
# S[l-1:r] のハッシュ値は (H[r] - H[l - 1] * power100[r - l + 1]) % MOD で計算
# C++ とは異なり、(負の値)% M (M >= 1) も 0 以上 M-1 以下になることに注意
def hash_value(l, r):
return (H[r] - H[l - 1] * power100[r - l + 1]) % MOD
# クエリに答える
for a, b, c, d in queries:
hash1 = hash_value(a, b)
hash2 = hash_value(c, d)
if hash1 == hash2:
print("Yes")
else:
print("No")
|
#!/usr/bin/env python
#
# Titanium API Coverage Merger
#
# Initial Author: Jeff Haynie, 06/03/09
#
import os, sys, types
import simplejson as json
def dequote(s):
if s[0:1] == '"':
return s[1:-1]
return s
def is_leaf(obj,defvalue=False):
if type(obj) == types.DictType and (obj.has_key('property') or obj.has_key('method')):
return obj.has_key('description')
return defvalue
def flatten_values(prefix,obj):
r = []
# print "prefix=%s" % prefix
if type(obj)!=types.DictType: return r
for k in obj:
# print k
entry = obj[k]
newkey = "%s%s" % (prefix,k)
# print " newkey=%s,key=%s" % (newkey,k)
# print json.dumps(entry, sort_keys=True, indent=4)
if is_leaf(entry):
r.append([newkey,entry])
else:
a = flatten_values(("%s." % newkey),entry)
for i in a:
r.append(i)
return r
def flatten(obj):
n = flatten_values('',obj)
nh = {}
for i in n:
nh[i[0]]=i[1]
return nh
def normalize(obj):
flat = {}
for key in obj.keys():
value = obj[key]
if is_leaf(value,True):
flat[key]=value
else:
for subkey in value:
# print subkey
try:
i = subkey.index('.')
except:
flat[subkey]=value
continue
newkey = subkey[0:i]
newprop = subkey[i+1:]
if not flat.has_key(key):
flat[key]={}
if not flat[key].has_key(newkey):
flat[key][newkey]={}
flat[key][newkey][newprop]=value[subkey]
return flat
def add_recursive(key,obj,newobj):
# print key
lasttoken = None
tokens = key.split('.')
c = 0
count = len(tokens)
if count==1:
newobj[key]=obj
else:
for token in tokens:
if not newobj.has_key(token):
newobj[token]={}
newobj = newobj[token]
lasttoken = token
c+=1
if (c == count-1): break
newobj[tokens[count-1]]=obj
def denormalize(obj):
newobj = {}
for key in obj:
add_recursive(key,obj[key],newobj)
return newobj
def main(mobile, a, b=None):
a_normalized = normalize(a)
b_normalized = None
merged = {}
b_flat = None
if b:
b_normalized = normalize(b)
a_flat = flatten(a_normalized)
if b:
b_flat = flatten(b_normalized)
for key in a_flat:
#platforms = {'iphone':['2.2.1','3.0','3.1']}
merged[key]=a_flat[key]
#if b and b_flat.has_key(key):
# platforms['android']=['1.5']
#if mobile: merged[key]['platforms']=platforms
if b:
for key in b_flat:
if not merged.has_key(key):
merged[key]=b_flat[key]
#if mobile: merged[key]['platforms'] = {'android':['1.5']}
newmerged = denormalize(merged)
print json.dumps(newmerged, sort_keys=True, indent=4)
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage: %s <a> <b>" % os.path.basename(sys.argv[0])
sys.exit(1)
mobile = len(sys.argv)==3
a = None
b = None
a = json.load(open(os.path.expanduser(dequote(sys.argv[1])),'r'))
if mobile:
b = json.load(open(os.path.expanduser(dequote(sys.argv[2])),'r'))
main(mobile,a,b)
sys.exit(0)
|
"""Module for the sample adapter classes."""
import os
import sys
import time
from multiprocessing import Manager, Process
import six
from activities_python.common.action_support.base import BaseAction
from activities_python.common.constants.controller import ControllerConstants
class ActionQuery3(BaseAction):
"""Sample Class for executing a python script action in jail. """
def __init__(self, jail_options):
super(ActionQuery3, self).__init__()
self.jail_options = jail_options
def invoke(self, data, context):
try:
self.logger.info('Invoked ExecutePythonScriptQuery')
# check input parameters
self.check_input_params(data, "script")
script = data["script"]
timeout = abs(data.get("action_timeout", 180)) # same default as in console
script_queries = {}
script_arguments = []
if "script_queries" in data:
for k in data['script_queries']:
script_queries[k['script_query_name']] = k['script_query_type'] + " " + k['script_query']
if "script_arguments" in data:
for args in data["script_arguments"]:
if isinstance(args, six.string_types):
script_arguments.append(args)
else:
script_arguments.append(str(args))
opts = ExecuterOptions()
opts.timeout = timeout
opts.script = script
opts.script_arguments = script_arguments
opts.script_queries = script_queries
opts.jail_options = self.jail_options
opts.logger = self.logger
executer = Executer(opts)
result = executer.run_parent()
if "Error:" in result:
self.raise_action_error(400, result)
return result
except Exception as e: # pylint: disable=broad-except
self.raise_action_error(400, e)
class ExecuterOptions(object):
"""Class for Executer options. """
timeout = 0
script = ""
script_arguments = []
script_queries = {}
jail_options = {}
logger = None
def __init__(self):
pass
class Executer(Process):
"""Class for running a Python scripts. """
__STARTED = "started"
__OUTPUT = "output"
__EXCEPTION = "exception"
def __init__(self, options):
super(Executer, self).__init__()
self.manager = Manager()
self.shared_dict = self.manager.dict()
self.options = options
def run(self):
"""Override for Process.run() """
# jailing
self.shared_dict[self.__STARTED] = time.time()
if self.options.jail_options[ControllerConstants.IS_JAILED]:
self.options.logger.info("Executing script in chroot jail")
os.chroot(self.options.jail_options[ControllerConstants.JAIL_DIR])
os.setgid(self.options.jail_options[ControllerConstants.USER_GID]) # Important! Set GID first
os.setuid(self.options.jail_options[ControllerConstants.USER_UID])
else:
self.options.logger.info("Executing script unjailed")
try:
output = run_script(self.options.script, self.options.script_arguments, self.options.script_queries)
self.shared_dict[self.__OUTPUT] = output
except Exception as e: # pylint: disable=broad-except
self.shared_dict[self.__EXCEPTION] = e
def run_parent(self):
"""Execute self.run in forked process."""
self.start()
self.join(self.options.timeout)
if self.is_alive():
self.terminate()
raise Exception('Activity timeout')
if self.__EXCEPTION in self.shared_dict:
raise self.shared_dict[self.__EXCEPTION]
return self.shared_dict[self.__OUTPUT]
class FileCacher(object):
"""Class for caching the stdout text. """
def __init__(self):
self.reset()
def reset(self):
"""Initialize the output cache."""
self.out = []
def write(self, line):
"""Write the specified line to the cache."""
self.out.append(line)
def flush(self):
"""Flush the cache."""
if '\n' in self.out:
self.out.remove('\n')
output = '\n'.join(self.out)
self.reset()
return output
class Shell(object):
"""Class for running a Python script as interactive interpreter. """
def __init__(self, arguments):
self.stdout = sys.stdout
self.cache = FileCacher()
self.set_arguments(arguments)
self.locals = {"__name__": "__console__", "__doc__": None}
def run_code(self, script):
"""Run the specified script."""
# pylint: disable=broad-except,bare-except
try:
sys.stdout = self.cache
try:
# pylint: disable=exec-used
exec(script, self.locals)
except SystemExit:
raise
except: # noqa: E722
e = sys.exc_info()[1:2]
return "Error: " + str(e)
sys.stdout = self.stdout
output = self.cache.flush()
return output
except: # noqa: E722
e = sys.exc_info()[1:2]
return "Error: " + str(e)
@classmethod
def set_arguments(cls, arguments):
"""Set arguments to be passed to the script."""
if arguments:
sys.argv[1:] = ""
for arg in arguments:
sys.argv.append(arg)
return
def run_script(script, script_arguments, script_queries):
"""Runs the Python script with arguments and interactive queries. """
try:
shell = Shell(script_arguments)
result = {}
out = shell.run_code(script)
if "Error:" in out:
return out
result["response_body"] = out
if script_queries:
result["script_queries"] = {}
for key in script_queries.keys():
query = script_queries[key]
parts = query.split()
query = "print " + parts[1]
out = shell.run_code(query)
if parts[0] == "str":
if isinstance(out, six.string_types):
output = out
else:
output = str(out)
elif parts[0] == "int":
output = int(out)
elif parts[0] == "bool":
output = out.lower() in ("yes", "true", "t", "1")
elif parts[0] == "float":
output = float(out)
else:
output = out
if "Error:" in out:
return out
result["script_queries"][key] = output
return result
except Exception as e:
raise Exception(e)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.