text stringlengths 38 1.54M |
|---|
#####################################
##| |##
##| |##
##| Rocky Vargas |##
##| |##
##| |##
#####################################
from random import randint
#Car Generator
name = ["Jasper", "Ethyn", "Rocky", "Steve", "James", "Chris", "Cooley", "Storm", "Richard", "Sarah", "Kevin", "Karen", "Tyler", "Marcos", "Jimenez", "Rhoda", "Justin", "Patrick", "Jairo", "Madonna"]
sponsor = ["Bethesda", "Chic fil a", "UAT", "Panda Express", "Nvidia", "AMC", "Fox", "ArenaNet", "Mcdonalds", "Rockstar", "Monster", "Red Bull", "Walmart", "Target", "Pringles", "Taco Bell", "Wendys", "Mobil", "EA", "Gamestop"]
car = []
class Car():
#Initializing at 0
def __init__(self, name, sponsor):
self.name = name
self.sponsor = sponsor
self.miles = 0
self.speed = 0
self.avg = 0
self.lap = 0
def __str__(self):
output = '''\n\tDriver Name:\t{}\n\tSponsor:\t{}\n\tMiles:\t{:.2f}\n\tSpeed:\t{:.2f}\n\n'''.format(self.name, \
self.sponsor,self.miles,self.speed)
return output
#Average speed
def update(self):
mph = randint(1,120)
self.speed = mph
self.miles += self.speed/60
#Showing car and sponsor
for x in range(len(name)):
car.append(Car(name[x], sponsor[x]))
print(car[x])
#Updating car attributes
while car[0].miles < 500:
for y in range(len(car)):
car[y].update()
car.sort(key = lambda x: x.miles, reverse = True)
for x in range(len(car)):
print(car[x])
print("----------------------------------------\n\n")
#Display Winner
print("Coming in 1st place is: %s\n And in 2nd place: %s\n Finally, in 3rd place: %s"%(car[0], car[1], car[2]))
print("----------------------------------------")
|
###########
#FILE: hmk_new_3.py
#AUTHOR(S): Kelsey Herndon & Rebekke Muench
#EMAIL: keh0023@uah.edu
#ORGANIZATION: UAH
#CREATION DATE: March 22, 2017
#LAST MOD DATE: March 29, 2017
#PURPOSE: Compare the relationship between NDVI and three census variables for 2006 and 2016
#DEPENDENCIES: arcpy, numpy
###########
#Import packages
import arcpy
from arcpy.sa import *
import numpy as np
arcpy.env.overwriteOutput = 'True'
arcpy.CheckOutExtension('Spatial')
# Import variables
ndvi06 = r'\\Mac\Home\Documents\UAH\ESS308\ndvi\modis_conus_ndvi2006_int.tif'
ndvi16 = r'\\Mac\Home\Documents\UAH\ESS308\ndvi\modis_conus_ndvi2016_int.tif'
census = r'\\Mac\Home\Documents\UAH\ESS308\CONUS_County_Census\County_2010Census_CONUS.shp'
# Put the NDVI for the two years into an array
files = np.array([ndvi06, ndvi16])
# Run the sam process for each part of the given array (for both NDVIs)
for x in files:
# Obtain year from filepath to be used in naming later saved images
y = x[-10] + x[-9]
# Designate that the file is a raster
ras = arcpy.Raster(x)
# Rename the census file so fields are not added to same file
census_n = census[:-4] + y +'.shp'
arcpy.CopyFeatures_management(census, census_n)
#Define out_table variable which will be used in the zonal statistics
out_table = r'\\Mac\Home\Documents\UAH\ESS308\out_table' + y + '.dbf'
# Conduct zonal statistics
stats = arcpy.sa.ZonalStatisticsAsTable(census_n, 'FID', ras, out_table, ignore_nodata = 'DATA', statistics_type = 'ALL')
# Join the statistics output to the new census file
joinshp = arcpy.JoinField_management(census_n, 'FID', stats, 'OID')
census_join = r'\\Mac\Home\Documents\UAH\ESS308\join' + y + '.dbf'
arcpy.CopyFeatures_management(joinshp, census_join)
# Project the census file so it represents the true area of the counties
census_prj = census_join[:-4] + '_prj.shp'
arcpy.Project_management(census_join, census_prj, arcpy.SpatialReference('North America Albers Equal Area Conic'))
# Add field and calculate it for the true area of the counties
arcpy.AddField_management(census_prj, field_name = 'area_true', field_type = 'FLOAT')
arcpy.CalculateField_management(census_prj, 'area_true', "float(!SHAPE.AREA!)/1E6", "PYTHON")
#Add field and calculate it for the population density
arcpy.AddField_management(census_prj, 'pop_den', field_type = 'FLOAT')
arcpy.CalculateField_management(census_prj, 'pop_den', "float(!DP0010001! /!area_true!)", "PYTHON")
#Add field and calculate so FID reads as an integer
arcpy.AddField_management(census_prj, field_name = 'FID_real', field_type = 'INTEGER')
arcpy.CalculateField_management(census_prj, 'FID_real', "int(!FID!)", "PYTHON")
#Add field and calculate percent institutionalized
arcpy.AddField_management(census_prj, field_name = 'perc_inst', field_type = 'FLOAT')
arcpy.CalculateField_management (census_prj, 'perc_inst', "(float(!DP0120015!)/float(!DP0010001!))*1000", "PYTHON")
# Add field and calculate
arcpy.AddField_management(census_prj, field_name = 'male', field_type = 'FLOAT')
arcpy.CalculateField_management(census_prj, 'male', "(float(!DP0040002!)/float(!DP0010001!))*100", "PYTHON")
# Perform Ordinary Least Squares stats with Mean and population density
ols_results = r'\\Mac\Home\Documents\UAH\ESS308\ols_popden' + y + '.shp'
outpdf = r'\\Mac\Home\Documents\UAH\ESS308\ols_popden' + y + '.pdf'
arcpy.OrdinaryLeastSquares_stats(census_prj, 'FID_real', ols_results, 'MEAN', 'pop_den', '#', '#', outpdf)
# Perform Ordinary Least Squares stats with Mean and {ercent Institutionalized Population
ols_results2 = r'\\Mac\Home\Documents\UAH\ESS308\ols_inst' + y + '.shp'
outpdf2 = r'\\Mac\Home\Documents\UAH\ESS308\ols_inst' + y + '.pdf'
# Perform Ordinary Least Squares stats with Mean and Percent Male (18+) Population
arcpy.OrdinaryLeastSquares_stats(census_prj, 'FID_real', ols_results2, 'MEAN', 'perc_inst', '#', '#', outpdf2)
ols_results3 = r'\\Mac\Home\Documents\UAH\ESS308\ols_male' + y + '.shp'
outpdf3 = r'\\Mac\Home\Documents\UAH\ESS308\ols_male' + y + '.pdf'
arcpy.OrdinaryLeastSquares_stats(census_prj, 'FID_real', ols_results3, 'MEAN', 'male', '#', '#', outpdf3)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Usage:
#
#
# Example:
# '/passport/user/resetPassword'
#
# Reference:
# https://docs.python.org/2/library/httplib.html
import httplib
import urllib
import json
default_domain = 'passport.qatest.didichuxing.com'
def passport_api_request(path, params, method='POST', domain=default_domain, debug=False):
params_full = dict(params)
params_full.update({'role': 1})
params_q = {'q': json.dumps(params_full)}
params_encoded = urllib.urlencode(params_q)
headers = {'Content-type': 'application/x-www-form-urlencoded', 'Accept': 'text/plain'}
conn = httplib.HTTPConnection(domain)
if debug:
conn.set_debuglevel(1)
conn.request(method, path, params_encoded, headers)
response = conn.getresponse()
if debug:
print response.status, response.reason
data = response.read()
if debug:
print data
conn.close()
return data
|
#!/usr/bin/env python
"""
Description:
Node that performs primary vision operations for pick and place operation including:
- Camera stream thresholding
- Calculates image moments to locate objects
- Publishes necessary information for other nodes
Help from: opencv-srf.blogspot.ro/2010/09/object-detection-using-color-seperation.html
"""
import roslib
import rospy
import sys
import cv
import cv2
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
from sensor_msgs.msg import Image
from geometry_msgs.msg import Point
import baxter_interface
from jon_baxter.srv import Coordinate
class Pick_Place_Vision(object):
def __init__(self):
"""
Initialize Pick_Place_Vision class
"""
# Instantiate distance
self.distance = 0.0
# Instantiate all three cameras, close them, open left camera
self._left_camera = baxter_interface.CameraController('left_hand_camera')
self._right_camera = baxter_interface.CameraController('right_hand_camera')
self._head_camera = baxter_interface.CameraController('head_camera')
self.close_cameras()
self.open_camera('left')
# Open CV windows (resizable and positioned)
cv2.namedWindow("Control", cv2.WINDOW_NORMAL)
cv2.moveWindow("Control", 1990, 630)
cv2.namedWindow("Original", cv2.WINDOW_NORMAL)
cv2.moveWindow("Original", 1990, 30)
cv2.namedWindow("Thresholded", cv2.WINDOW_NORMAL)
cv2.moveWindow("Thresholded", 3270, 30)
# Initialize thresholding values
low_h = 165
high_h = 179
low_s = 70
high_s = 255
low_v = 60
high_v = 255
# "Do nothing" callback
def nothing(x):
pass
# Create HSV trackbars (nothing on callback)
cv2.createTrackbar("Low H", "Control", low_h, 179, nothing)
cv2.createTrackbar("High H", "Control", high_h, 179, nothing)
cv2.createTrackbar("Low S", "Control", low_s, 255, nothing)
cv2.createTrackbar("High S", "Control", high_s, 255, nothing)
cv2.createTrackbar("Low V", "Control", low_v, 255, nothing)
cv2.createTrackbar("High V", "Control", high_v, 255, nothing)
# Initialize ROS bridge
self.bridge = CvBridge()
# Initialize object locator publisher
self.object_location_pub = rospy.Publisher("/camera_manager/object_location", Point)
return
def set_available(self):
"""
Sets Mover node to available
"""
# Try setting Mover node to available
try:
coordinate = rospy.ServiceProxy('coordinate', Coordinate)
resp = coordinate(
'pick_place_vision', #node
0, #status_request
1, #status_set
1) #status_setting
# Return success (1 or 0)
return resp.successful
# Service exception
except rospy.ServiceException, e:
print "==[VISION]== Service call failed: %s" %e
#Return unsuccessful
return 0
def set_busy(self):
"""
Sets Mover node to busy
"""
# Try setting Mover node to busy
try:
coordinate = rospy.ServiceProxy('coordinate', Coordinate)
resp = coordinate(
'pick_place_vision', #node
0, #status_request
1, #status_set
0) #status_setting
# Return success (1 or 0)
return resp.successful
# Service exception
except rospy.ServiceException, e:
print "==[VISION]== Service call failed: %s" %e
# Return unsuccessful
return 0
def check_availability(self, node):
"""
Checks if another node is available
"""
# Try checking node availability
try:
coordinate = rospy.ServiceProxy('coordinate', Coordinate)
resp = coordinate(
node, #node
1, #status_request
0, #status_set
0) #status_setting
# If successful, return availability (1 or 0)
if resp.successful:
return resp.available
# Service exception
except rospy.ServiceException, e:
print "==[VISION]== Service call failed: %s" %e
# Returns unsuccessful
return 0
def calibrate_distance(self):
"""
Retrieves the distance from Baxter's arm
to the table and uses the value as a constant
throughout the rest of the demo
"""
# Set node to busy
self.set_busy()
# Get left hand range state
dist = baxter_interface.analog_io.AnalogIO('left_hand_range').state()
# If > 65,000, did not get correct range
if dist > 65000:
sys.exit("==[VISION]== ERROR - calibrate_distance - no distance found")
# Set value
self.distance = dist
# Set node to available
self.set_available()
def close_cameras(self):
"""
Close all three cameras
"""
# Close cameras
self._left_camera.close()
self._right_camera.close()
self._head_camera.close()
def open_camera(self, camera):
"""
Open a camera at max resolution
"""
# Open camera at resolution of 1280 x 800
switch = {'left': self._left_camera,
'right': self._right_camera,
'head': self._head_camera}
switch[camera].open()
switch[camera].resolution = [1280, 800]
def stream_images(self):
"""
Stream ROS images from Baxter's camera
"""
# Subscibe to Baxter's camera images
_camera_sub = rospy.Subscriber("/cameras/left_hand_camera/image", Image, self._on_camera)
def _on_camera(self, data):
"""
Camera Image callback: Converts ROS Image to HSV format,
thresholds it, performs morphological opening and closing,
and shows both the original and thresholded images in a
window. Also publishes necessary information for
pick_place_controller node
"""
# Convert Image message to CV image with blue-green-red color order (bgr8)
try:
img_original = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError, e:
print("==[CAMERA MANAGER]==", e)
# Convert image to HSV format
img_hsv = cv2.cvtColor(img_original, cv2.COLOR_BGR2HSV)
# Threshold image based on trackbar values
low_h = cv2.getTrackbarPos("Low H", "Control")
high_h = cv2.getTrackbarPos("High H", "Control")
low_s = cv2.getTrackbarPos("Low S", "Control")
high_s = cv2.getTrackbarPos("High S", "Control")
low_v = cv2.getTrackbarPos("Low V", "Control")
high_v = cv2.getTrackbarPos("High V", "Control")
img_thresholded = cv2.inRange(img_hsv, np.array([low_h, low_s, low_v]), np.array([high_h, high_s, high_v]))
# Morphological opening (remove small objects from the foreground)
img_thresholded = cv2.erode(img_thresholded, np.ones((2, 2), np.uint8), iterations=1)
img_thresholded = cv2.dilate(img_thresholded, np.ones((2, 2), np.uint8), iterations=1)
# Morphological closing (fill small holes in the foreground)
img_thresholded = cv2.dilate(img_thresholded, np.ones((2, 2), np.uint8), iterations=1)
img_thresholded = cv2.erode(img_thresholded, np.ones((2, 2), np.uint8), iterations=1)
# Calculate the moments of the thresholded image
moments = cv2.moments(img_thresholded)
d_m01 = moments["m01"]
d_m10 = moments["m10"]
d_area = moments["m00"]
# If the area <= 10000, just noise
if d_area > 10000:
size = img_hsv.shape
pos_x = size[0] - (d_m01 / d_area)
pos_y = size[1] - (d_m10 / d_area)
# Publish object location
self.object_location_pub.publish(Point(pos_x, pos_y, 0.000))
# Show the CV image and wait 3ms for a keypress
cv2.imshow("Original", img_original)
cv2.imshow("Thresholded", img_thresholded)
cv2.waitKey(3)
def main():
print("==[VISION]== Initializing Vision")
rospy.init_node('pick_place_vision')
rospy.wait_for_service('coordinate')
wait_rate = rospy.Rate(1)
print("==[VISION]== Starting Vision...")
ppv = Pick_Place_Vision()
while not ppv.check_availability('pick_place_mover'):
wait_rate.sleep()
print("==[VISION]== Calibrating Vision")
ppv.calibrate_distance()
print("==[VISION]== Streaming images")
ppv.stream_images()
while not rospy.is_shutdown():
rospy.spin()
cv2.destroyAllWindows()
print("\n==[VISION]== Vision done")
if __name__ == '__main__':
main() |
__author__ = 'Mona Jalal'
'''
Uses user's left mouse click to annotate the fingertips
Guide from the original CVAR dataset is shown to user and
user should left click on a point close to the fingertip that is
visible to her.
'''
import cv2
import itertools
import math
import os
import sys
try:
CVAR_dataset_path = sys.argv[1]
except IndexError:
CVAR_dataset_path = ""
print('You should enter the absolute path to CVAR dataset!')
sys.exit(1)
def select_point(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
print("x is {0} and y is {1}".format(x,y))
points.append((x,y))
for subdirs, dirs, files in os.walk(CVAR_dataset_path):
for dir in dirs:
cur_path = CVAR_dataset_path+'\\'+dir
count = 0
visible_fingertips_file = open(cur_path+'\\'+'visible_fingertips.txt', 'w+')
os.remove(cur_path +'\\'+'correct_fingertips.txt')
os.remove(cur_path+'\\'+'all_fingertips.txt')
correct_fingertips_file = open(cur_path + '\\'+'correct_fingertips.txt', 'a+')
all_fingertips_file = open(cur_path + '\\' + 'all_fingertips.txt', 'a+')
with open(cur_path + '\\' + 'fingertips.txt') as fingertips_file:
for line in fingertips_file:
points = []
fingertips_split = line.split(' ')
depth_image_filename = fingertips_split[0]
print(depth_image_filename)
correct_fingertips_file.write(depth_image_filename+' ')
all_fingertips_file.write(depth_image_filename+' ')
visible_fingertips_file.write(depth_image_filename+' ')
test_image = cv2.imread(cur_path + '\\' + depth_image_filename)
line_split = ' '.join(fingertips_split[1:]).rstrip()
line_split = line_split.split(' ')
iterable = iter(line_split)
sliced_list = list(iter(lambda: list(itertools.islice(iterable, 3)), []))
cv2.circle(test_image, (int(float(sliced_list[0][0])), int(float(sliced_list[0][1]))), 1, (255,255,0), -1)
cv2.circle(test_image, (int(float(sliced_list[1][0])), int(float(sliced_list[1][1]))), 1, (0,255,255), -1)
cv2.circle(test_image, (int(float(sliced_list[2][0])), int(float(sliced_list[2][1]))), 1, (0,255,0), -1)
cv2.circle(test_image, (int(float(sliced_list[3][0])), int(float(sliced_list[3][1]))), 1, (0,0,255), -1)
cv2.circle(test_image, (int(float(sliced_list[4][0])), int(float(sliced_list[4][1]))), 1, (255,0,0), -1)
cv2.namedWindow(depth_image_filename)
cv2.imshow(depth_image_filename, test_image)
cv2.setMouseCallback(depth_image_filename, select_point)
count +=1
cv2.waitKey(0)
cv2.destroyAllWindows()
visible_fingertips = {}
for i in range(5):
visible_fingertips[i] = 0
for i in range(len(points)):
for j in range(len(sliced_list)):
if (abs(math.sqrt(pow((float(sliced_list[j][0]) - float(points[i][0])), 2)
+ pow((float(sliced_list[j][1]) - float(points[i][1])), 2))) < 10):
visible_fingertips[j] = 1
sliced_list[j][0] = points[i][0]
sliced_list[j][1] = points[i][1]
print("i is {0} and it is 1".format(j))
break
for i in range(len(sliced_list)):
all_fingertips_file.write(str(sliced_list[i][0])+' '+str(sliced_list[i][1])+' ')
for value in visible_fingertips.values():
visible_fingertips_file.write(str(value)+' ')
print(points)
print(visible_fingertips)
for tuple in points:
correct_fingertips_file.write(str(tuple[0])+' '+str(tuple[1])+' ')
correct_fingertips_file.write('\n')
visible_fingertips_file.write('\n')
all_fingertips_file.write('\n')
visible_fingertips_file.close()
correct_fingertips_file.close()
all_fingertips_file.close()
|
from flask import Flask, redirect, request, url_for, send_from_directory, Blueprint
saga_routes = Blueprint("saga_routes", __name__)
@saga_routes.route("/<path>")
def send_saga_page(path):
return send_from_directory("saga/0.1.0/docs/html", path)
@saga_routes.route("/_static/<path>")
def send_static_stuff(path):
return send_from_directory("saga/0.1.0/docs/html/_static", path)
@saga_routes.route("/_static/css/<path>")
def send_static_css_stuff(path):
return send_from_directory("saga/0.1.0/docs/html/_static/css", path)
@saga_routes.route("/_static/css/fonts/<path>")
def send_static_css_font_stuff(path):
return send_from_directory("saga/0.1.0/docs/html/_static/css/fonts", path)
@saga_routes.route("/_static/js/<path>")
def send_static_js_stuff(path):
return send_from_directory("saga/0.1.0/docs/html/_static/js", path)
|
import random
def Guessing_Game_One():
try:
userInput = int(input('Guess the number between 1 and 9: '))
random_number = random.randint(1, 9)
if userInput == random_number:
print('Congratulations! You guessed correct!')
elif userInput < random_number:
print(f'You guessed to low! The correct answer is {random_number}')
elif userInput > random_number:
print(f'You guessed to high! The correct answer is {random_number}')
elif userInput > 9:
print('Error! You should enter a number between 1 and 9!')
except:
print('Error! You must enter a number between 1 and 9.')
Guessing_Game_One()
Guessing_Game_One()
while True:
answer = input('Dou you want to play again? (yes/exit): ')
if answer == 'yes':
Guessing_Game_One()
elif answer != 'exit':
print('Enter: yes or exit')
elif answer == 'exit':
#print(f'You took {guesses} guesses!')
break |
from collections import defaultdict
from random import uniform
from math import sqrt
def read_points():
dataset=[]
with open('鸢尾花.txt','r') as file:
for line in file:
if line =='\n':
continue
dataset.append(list(map(float,line.split(' '))))
file.close()
return dataset
def write_results(listResult,dataset,k):
with open('result.txt','a') as file:
for kind in range(k):
file.write( "CLASSINFO:%d\n"%(kind+1) )
for j in listResult[kind]:
file.write('%d\n'%j)
file.write('\n')
file.write('\n\n')
file.close()
def point_avg(points):
dimensions=len(points[0])
new_center=[]
for dimension in range(dimensions):
sum=0
for p in points:
sum+=p[dimension]
new_center.append(float("%.8f"%(sum/float(len(points)))))
return new_center
def update_centers(data_set ,assignments,k):
new_means = defaultdict(list)
centers = []
for assignment ,point in zip(assignments , data_set):
new_means[assignment].append(point)
for i in range(k):
points=new_means[i]
centers.append(point_avg(points))
return centers
def assign_points(data_points,centers):
assignments=[]
for point in data_points:
shortest=float('inf')
shortest_index = 0
for i in range(len(centers)):
value=distance(point,centers[i])
if value<shortest:
shortest=value
shortest_index=i
assignments.append(shortest_index)
if len(set(assignments))<len(centers) :
print("\n--!!!产生随机数错误,请重新运行程序!!!!--\n")
exit()
return assignments
def distance(a,b):
dimention=len(a)
sum=0
for i in range(dimention):
sq=(a[i]-b[i])**2
sum+=sq
return sqrt(sum)
def generate_k(data_set,k):
centers=[]
dimentions=len(data_set[0])
min_max=defaultdict(int)
for point in data_set:
for i in range(dimentions):
value=point[i]
min_key='min_%d'%i
max_key='max_%d'%i
if min_key not in min_max or value<min_max[min_key]:
min_max[min_key]=value
if max_key not in min_max or value>min_max[max_key]:
min_max[max_key]=value
for j in range(k):
rand_point=[]
for i in range(dimentions):
min_val=min_max['min_%d'%i]
max_val=min_max['max_%d'%i]
tmp=float("%.8f"%(uniform(min_val,max_val)))
rand_point.append(tmp)
centers.append(rand_point)
return centers
def k_means(dataset,k):
k_points=generate_k(dataset,k)
assignments=assign_points(dataset,k_points)
old_assignments=None
while assignments !=old_assignments:
new_centers=update_centers(dataset,assignments,k)
old_assignments=assignments
assignments=assign_points(dataset,new_centers)
result=list(zip(assignments,dataset))
print('\n\n---------------------------------分类结果---------------------------------------\n\n')
for out in result :
print(out,end='\n')
print('\n\n---------------------------------标号简记---------------------------------------\n\n')
listResult=[[] for i in range(k)]
count=0
for i in assignments:
listResult[i].append(count)
count=count+1
write_results(listResult,dataset,k)
for kind in range(k):
print("第%d类数据有:"%(kind+1))
count=0
for j in listResult[kind]:
print(j,end=' ')
count=count+1
if count%25==0:
print('\n')
print('\n')
print('\n\n--------------------------------------------------------------------------------\n\n')
def main():
dataset=read_points()
k_means(dataset,3)
if __name__ == "__main__":
main() |
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
import math as m
import time
import serial
def phsv(img):
HSV = cv.cvtColor(img, cv.COLOR_BGR2HSV)
return HSV
def col_r(image):
red = (0, 0, 255)
blue = (255,0,0)
#gray = cv.cvtColor(blurred, cv.COLOR_RGB2GRAY)
Lower1 = np.array([100,90,90])
Upper1 = np.array([130,255,255])
edge_outputr = cv.inRange(image,Lower1,Upper1)
#cv.imshow("r", edge_outputr)
output1 = cv.cvtColor(edge_outputr, cv.COLOR_GRAY2RGB)
font = cv.FONT_HERSHEY_DUPLEX
kernel = cv.getStructuringElement(cv.MORPH_RECT, (11, 11), (-1, -1))
kernel2 = cv.getStructuringElement(cv.MORPH_RECT, (19, 19), (-1, -1))
eroder = cv.erode(edge_outputr, kernel)
#cv.imshow("eroder", eroder)
dilater = cv.dilate(eroder, kernel2)
#cv.imshow("dilater", dilater)
M = cv.moments(dilater)
if(M["m00"] == 0 or M["m00"] == 0):
tx = 'cr = no target'
cXr = -1
cYr = -1
else:
cXr = int(M["m10"] / M["m00"]);
cYr = int(M["m01"] / M["m00"]);
tx = 'cr = ''%.2f'%cXr+' %.2f'%cYr
cv.circle(output1,(cXr,cYr),25,(255,0,255),1)
cv.putText(output1, tx,(0,25),font,1,(0,255,255),1,8)
cv.line(output1, (300,0), (300,480), blue, 1, 4)
cv.line(output1, (340,0), (340,480), blue, 1, 4)
cv.line(output1, (0,350), (640,350), red, 1, 4)
cv.imshow("r1", output1)
return cXr,cYr
def drive(rx,ry):
if(rx != -1):
print('red detected')
oper(rx,ry)
def oper(x,y):
if(y > 350):
print('stop')
ser.write('5'.encode("utf-8"))
elif(x < 300):
print('turn right')
ser.write('3'.encode("utf-8"))
elif(x > 340):
print('turn left')
ser.write('1'.encode("utf-8"))
else:
print('go forward')
ser.write('2'.encode("utf-8"))
cap = cv.VideoCapture(0)
cap.set(3,640) # set Width
cap.set(4,480) # set Height
td = 0
ser = serial.Serial("/dev/ttyAMA0", 9600)
while(True):
print('\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n')
time_start = time.time()
ret,frame = cap.read()
cv.imshow('frame', frame)
HSV = phsv(frame)
rx,ry = col_r(HSV)
drive(rx,ry)
time_end = time.time()
td = time_end-time_start
time_start = 0
print('time delay = ',td)
k = cv.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv.destroyAllWindows() |
import requests
import json
def get_forecast_by_lat_long(latitude, longitude, no_of_days=7):
output = ''
parameters = {
'key': '6a95b306f1334a3a87c190139212005',
'q': str(latitude) + ',' + str(longitude),
'aqi': 'no',
'alerts': 'no',
'days': no_of_days
}
url1 = 'http://api.weatherapi.com/v1/forecast.json'
response = requests.get(url1, params=parameters)
data = response.json()
# output += str(data)
output += '\n'
# output += jp(data)
output += '\n'
output += fun_parse_future_forecast(data)
return output
def get_climate_by_lat_long(latitude, longitude):
output = ''
parameters = {
'key': '6a95b306f1334a3a87c190139212005',
'q': str(latitude) + ',' + str(longitude),
'aqi': 'no'
}
url1 = 'http://api.weatherapi.com/v1/current.json'
response = requests.get(url1, params=parameters)
data = response.json()
# output += str(data)
# output += '\n'
# output += jp(data)
output += '\n'
output += fun_parse_climate(data)
return output
def fun_parse_climate(data):
output = ''
location = data['location']
output += b_parse_location(location)
current = data['current']
output += b_parse_current(current)
return output
def fun_parse_future_forecast(data):
output = ''
location = data['location']
output += b_parse_location(location)
output += '\n'
current = data['current']
output += b_parse_current(current)
output += '\n'
forecast_data = data['forecast']['forecastday']
output += b_parse_forecast_days(forecast_data)
# forecast_alerts = data['forecast']['alerts']
return output
def b_parse_location(data):
s1 = ""
s1 += "Selected Place : " + data['name'] + '\n'
s1 += "Region : " + data['region'] + '\n'
s1 += "Country : " + data['country'] + '\n'
s1 += "Latitude : " + str(data['lat']) + '\n'
s1 += "Longitude : " + str(data['lon']) + '\n'
s1 += "Local Time : " + data['localtime'] + '\n'
return s1
def b_parse_current(data):
s1 = ""
s1 += "Temperature : " + str(data['temp_c']) + '\n'
s1 += "Humidity : " + str(data['humidity']) + '\n'
s1 += "Condition : " + data['condition']['text'] + '\n'
s1 += "Wind (KPH) : " + str(data['wind_kph']) + '\n'
s1 += "Wind Direction : " + data['wind_dir'] + '\n'
s1 += "Cloud : " + str(data['cloud']) + '\n'
s1 += "Last Updated on : " + data['last_updated'] + '\n'
return s1
def b_parse_forecast_days(data):
"""Input forecastday in forecast"""
l = []
s1 = ''
s2 = ''
for i in data:
s1 += 'Date : ' + i['date']
s1 += 'Details : ' + '\n'
s1 += 'Maximum Temperature -' + str(i['day']['maxtemp_c']) + '\n'
s1 += 'Minimum Temperature -' + str(i['day']['mintemp_c']) + '\n'
s1 += 'Average Temperature -' + str(i['day']['avgtemp_c']) + '\n'
s1 += 'Condition -' + i['day']['condition']['text'] + '\n'
s1+='\n'
s1 += 'Hourly Climate Details\n\n'
for j in i['hour']:
s1 += 'Time - ' + j['time'] + '\n'
s1 += 'Temperature - ' + str(j['temp_c']) + '\n'
s1 += 'Condition - ' + j['condition']['text'] + '\n'
s1 += 'Wind (KPH) - ' + str(j['wind_kph']) + '\n'
s1 += 'Humidity - ' + str(j['humidity']) + '\n'
s1 += 'Cloud - ' + str(j['cloud']) + '\n'
s1 += 'Chance of Rain (%) - ' + j['chance_of_rain'] + '\n'
s1 += '\n'
s1 += '\n'
return s1
# get_climate_by_lat_long(13.6288, 79.4192)
def jp(obj):
text = json.dumps(obj, sort_keys=True, indent=4)
return text
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019-05-13 20:51:13
# @Author : Mage
# @Link : http://fengmm521.taobao.com
# @Version : $Id$
import os,sys
from magetool import urltool
import json
import time
bilibiliID = 166287840
fansUrl = 'https://api.bilibili.com/x/relation/stat?vmid=%d&jsonp=jsonp'%(bilibiliID)
viewsUrl = 'https://api.bilibili.com/x/space/upstat?mid=%d&jsonp=jsonp'%(bilibiliID)
def sayMsg(fans,views = None):
if views:
cmd = '/usr/bin/say 粉丝数:%d,播放数:%d'%(fans,views)
os.system(cmd)
else:
cmd = '/usr/bin/say 粉丝数:%d'%(fans)
os.system(cmd)
def main():
# headers = {"UserAgen":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36"}
tstr = urltool.getUrlWithChrome(fansUrl)
tstr = urltool.conventStrTOUtf8(tstr)
print(tstr)
jdic = json.loads(tstr)
# {"code":0,"message":"0","ttl":1,"data":{"mid":166287840,"following":112,"whisper":0,"black":0,"follower":95}}
fans = jdic['data']['follower']
time.sleep(1)
print(viewsUrl)
# tstr = urltool.getUrlWithChrome(viewsUrl)
# tstr = urltool.conventStrTOUtf8(tstr)
# print(tstr)
# # {"code":0,"message":"0","ttl":1,"data":{"archive":{"view":8386},"article":{"view":0}}}
# jdic = json.loads(tstr)
# views = jdic['data']['archive']['view']
# sayMsg(fans,views)
sayMsg(fans)
if __name__ == '__main__':
main()
|
import lasagne
import theano.tensor as tt
import numpy as np
import theano
def sfunc(bias, sat_func, *args, **kwargs):
return sat_func(*args, **kwargs) + bias
def gSin(m, v, i=None, e=None):
D = m.shape[0]
if i is None:
i = tt.arange(D)
if e is None:
e = tt.ones((D,))
elif e.__class__ is list:
e = tt.as_tensor_variable(np.array(e)).flatten()
elif e.__class__ is np.array:
e = tt.as_tensor_variable(e).flatten()
# compute the output mean
mi = m[i]
vi = v[i, :][:, i]
vii = v[i, i]
exp_vii_h = tt.exp(-vii/2)
M = exp_vii_h*tt.sin(mi)
# output covariance
vii_c = vii.dimshuffle(0, 'x')
vii_r = vii.dimshuffle('x', 0)
lq = -0.5*(vii_c+vii_r)
q = tt.exp(lq)
exp_lq_p_vi = tt.exp(lq+vi)
exp_lq_m_vi = tt.exp(lq-vi)
mi_c = mi.dimshuffle(0, 'x')
mi_r = mi.dimshuffle('x', 0)
U1 = (exp_lq_p_vi - q)*(tt.cos(mi_c-mi_r))
U2 = (exp_lq_m_vi - q)*(tt.cos(mi_c+mi_r))
V = 0.5*(U1 - U2)
# inv input covariance dot input output covariance
C = tt.diag(exp_vii_h*tt.cos(mi))
# account for the effect of scaling the output
M = e*M
V = tt.outer(e, e)*V
C = e*C
retvars = [M, V, C]
return retvars
def gSat(m, v=None, i=None, e=None):
''' Reimplementation from the PILCO matlab code. Saturates the input
signal to -1 to 1 through the function sat(x) = (9*sin(x) +sin(3*x))/8.
If v is not None, this function returns the output mean, covariance and
input-output covariance for computing he joint distribution p(input,output)
as a multivariate Gaussian.'''
D = m.shape[0]
if i is None:
i = tt.arange(D)
if e is None:
e = tt.ones((D,))
elif e.__class__ is list:
e = tt.as_tensor_variable(np.array(e)).flatten()
elif e.__class__ is np.array:
e = tt.as_tensor_variable(e).flatten()
e = e.astype(m.dtype)
# if no input variance, return deterministic
if v is None:
return e*(9*tt.sin(m) + tt.sin(3*m))/8
# construct joint distribution of x and 3*x
Q = tt.vertical_stack(tt.eye(D), 3*tt.eye(D))
ma = Q.dot(m)
va = Q.dot(v).dot(Q.T)
# compute the joint distribution of 9*sin(x)/8 and sin(3*x)/8
i1 = tt.concatenate([i, i+D])
e1 = tt.concatenate([9.0*e, e])/8.0
M2, V2, C2 = gSin(ma, va, i1, e1)
# get the distribution of (9*sin(x) + sin(3*x))/8
P = tt.vertical_stack(tt.eye(D), tt.eye(D))
# mean
M = M2.dot(P)
# variance
V = P.T.dot(V2).dot(P)
# inv input covariance dot input output covariance
C = Q.T.dot(C2).dot(P)
retvars = [M, V, C]
return retvars
def tanhSat(u, e):
return e*tt.tanh(u)
def sigmoidSat(u, e):
return e*(2*tt.nnet.sigmoid(u)-1)
def maxSat(u, e):
return tt.minimum(theano.tensor.maximum(u, -e), e)
|
from django.contrib.auth.models import User
from rest_framework import serializers
from posts.models import Channel
class ChannelUserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username')
class BaseChannelSerializer(serializers.ModelSerializer):
class Meta:
model = Channel
fields = ('id', 'title', 'summary', 'rules',)
read_only_fields = ('id',)
class ChannelCreateSerializer(BaseChannelSerializer):
class Meta:
model = BaseChannelSerializer.Meta.model
fields = BaseChannelSerializer.Meta.fields + ('admin',)
read_only_fields = BaseChannelSerializer.Meta.read_only_fields
class ChannelListRetrieveSerializer(BaseChannelSerializer):
is_author = serializers.SerializerMethodField()
is_admin = serializers.SerializerMethodField()
followers_count = serializers.SerializerMethodField()
is_follower = serializers.SerializerMethodField()
class Meta:
model = Channel
fields = BaseChannelSerializer.Meta.fields + ('is_admin', 'is_author', 'followers_count', 'is_follower')
@staticmethod
def get_followers_count(instance: Channel):
return instance.followers.count()
def get_is_follower(self, instance: Channel):
user_id = self.context['request'].user.id
return instance.followers.filter(id=user_id).exists()
def get_is_author(self, instance: Channel):
user = self.context['request'].user
return instance.authors.filter(id=user.id).exists()
def get_is_admin(self, instance: Channel):
user = self.context['request'].user
return instance.admin == user
class ChannelAdminSerializer(BaseChannelSerializer):
class Meta:
model = Channel
fields = BaseChannelSerializer.Meta.fields
read_only_fields = BaseChannelSerializer.Meta.read_only_fields
|
import argparse
import logging
from operatorcert import iib, utils
from typing import Any, List
import time
import os
from datetime import datetime, timedelta
LOGGER = logging.getLogger("operator-cert")
def setup_argparser() -> argparse.ArgumentParser:
"""
Setup argument parser
Returns:
Any: Initialized argument parser
"""
parser = argparse.ArgumentParser(description="Publish bundle to index image")
parser.add_argument(
"--bundle-pullspec", required=True, help="Operator bundle pullspec"
)
parser.add_argument(
"--from-index", required=True, help="Base index pullspec (without tag)"
)
parser.add_argument(
"--indices",
required=True,
nargs="+",
help="List of indices the bundle supports, e.g --indices registry/index:v4.9 registry/index:v4.8",
)
parser.add_argument(
"--iib-url",
default="https://iib.engineering.redhat.com",
help="Base URL for IIB API",
)
parser.add_argument("--verbose", action="store_true", help="Verbose output")
return parser
def wait_for_results(iib_url: str, batch_id: int, timeout=30 * 60, delay=20) -> Any:
"""
Wait for IIB build till it finishes
Args:
iib_url (Any): CLI arguments
batch_id (int): IIB batch identifier
timeout ([type], optional): Maximum wait time. Defaults to 30*60.
delay (int, optional): Delay between build pollin. Defaults to 20.
Returns:
Any: Build response
"""
start_time = datetime.now()
loop = True
while loop:
response = iib.get_builds(iib_url, batch_id)
builds = response["items"]
# all builds have completed
if all([build.get("state") == "complete" for build in builds]):
LOGGER.info(f"IIB batch build completed successfully: {batch_id}")
return response
# any have failed
elif any([build.get("state") == "failed" for build in builds]):
for build in builds:
LOGGER.error(f"IIB build failed: {build['id']}")
state_history = build.get("state_history", [])
if state_history:
reason = state_history[0].get("state_reason")
LOGGER.info(f"Reason: {reason}")
return response
LOGGER.debug(f"Waiting for IIB batch build: {batch_id}")
LOGGER.debug("Current states [build id - state]:")
for build in builds:
LOGGER.debug(f"{build['id']} - {build['state']}")
if datetime.now() - start_time > timedelta(seconds=timeout):
LOGGER.error(f"Timeout: Waiting for IIB batch build failed: {batch_id}.")
break
LOGGER.info(f"Waiting for IIB batch build to finish: {batch_id}")
time.sleep(delay)
return None
def publish_bundle(
from_index: str, bundle_pullspec: str, iib_url: str, index_versions: List[str]
) -> None:
"""
Publish a bundle to index image using IIB
Args:
iib_url: url of IIB instance
bundle_pullspec: bundle pullspec
from_index: target index pullspec
index_versions: list of index versions (tags)
Raises:
Exception: Exception is raised when IIB build fails
"""
user = os.getenv("QUAY_USER")
token = os.getenv("QUAY_TOKEN")
payload = {"build_requests": []}
for version in index_versions:
payload["build_requests"].append(
{
"from_index": f"{from_index}:{version}",
"bundles": [bundle_pullspec],
"overwrite_from_index": True,
"add_arches": ["amd64", "s390x", "ppc64le"],
"overwrite_from_index_token": f"{user}:{token}",
}
)
resp = iib.add_builds(iib_url, payload)
batch_id = resp[0]["batch"]
response = wait_for_results(iib_url, batch_id)
if response is None or not all(
[build.get("state") == "complete" for build in response["items"]]
):
raise Exception("IIB build failed")
def parse_indices(indices: List[str]) -> List[str]:
"""
Parses a list of indices and returns only the versions,
e.g [registry/index:v4.9, registry/index:v4.8] -> [v4.9, v4.8]
Args:
indices: List of indices
Returns:
Parsed list of versions
"""
versions = []
for index in indices:
# split by : from right and get the rightmost result
split = index.rsplit(":", 1)
if len(split) == 1:
# unable to split by :
raise Exception(f"Unable to extract version from index {index}")
else:
versions.append(split[1])
return versions
def main() -> None: # pragma: no cover
"""
Main function
"""
parser = setup_argparser()
args = parser.parse_args()
log_level = "INFO"
if args.verbose:
log_level = "DEBUG"
logging.basicConfig(level=log_level)
utils.set_client_keytab(os.environ.get("KRB_KEYTAB_FILE", "/etc/krb5.krb"))
publish_bundle(
args.from_index, args.bundle_pullspec, args.iib_url, parse_indices(args.indices)
)
if __name__ == "__main__": # pragma: no cover
main()
|
import numpy as np
from numpy import exp, log
from scipy.special import digamma, gamma, loggamma, polygamma, logsumexp
from math import pi
from collections import Counter, OrderedDict
import pickle
import time
from _online_lda_fast import _dirichlet_expectation_2d, _dirichlet_expectation_1d_
from sklearn.feature_extraction.text import CountVectorizer
EPS = np.finfo(np.float).eps
class SAGE_VI:
def __init__(self, path_data, alpha, delta, K, sampling):
# loading data
self.data = pickle.load(open(path_data, 'rb'))
if sampling:
np.random.seed(0)
idx = np.random.choice(len(self.data), 1000, replace=False)
self.data = [j for i, j in enumerate(self.data) if i in idx]
self.alpha = alpha # hyperparameter; dimension: T * 1 but assume symmetric prior
self.delta = delta # hyperparameter for exponential distribution
self.K = K
self.perplexity = []
def _make_vocab(self):
self.vocab = []
for lst in self.data:
self.vocab += lst
self.vocab = sorted(list(set(self.vocab)))
# make DTM
self.data_join = [' '.join(doc) for doc in self.data]
self.cv = CountVectorizer()
self.X = self.cv.fit_transform(self.data_join).toarray()
self.w2idx = self.cv.vocabulary_
self.idx2w = {val: key for key, val in self.w2idx.items()}
def _init_params(self):
'''
Initialize parameters for SAGE
<variational free parameters>
q(z_dn): Multi(phi)
q(theta_d): Dir(gamma_d)
q(tau_ki): Gamma(a,b)
## eta will be updated by newtons method
<latent variables>
z_dn
theta_d
eta
tau
'''
self.V = len(self.w2idx)
self.D = len(self.data)
self.Nd = [len(doc) for doc in self.data]
self.m = np.zeros(self.V)
# # set initial value for free variational parameters
# self.phi = np.ones((self.V, self.K)) # dimension: for topic d, Nd * K
'''
Set initial values for variational parameters
'''
# initialize phi: different for each document (variational parameters for z_dn)
self.phi = {}
for d in range(self.D):
self.phi[d] = np.zeros((self.V, self.K))
# initialize gamma (variational parameters for theta)
np.random.seed(1)
self.gam = np.random.gamma(100, 1/100, (self.D, self.K)) # dimension: D * K
# initialize a,b (variational parameters for tau)
np.random.seed(2)
self.a = np.random.gamma(5, 1, (self.V, self.K)) # dimension: V * K
np.random.seed(3)
self.b = np.random.gamma(1, 1, (self.V, self.K)) # dimension: V * K
# initialize latent eta parameters
np.random.seed(4)
self.eta = np.random.gamma(10,1/1000, (self.V, self.K))
# initialize c_k, C_k, beta_k
self._cal_small_c_k()
self._cal_large_C_k()
self._cal_beta()
# initialize dirichlet expectation to reduce computation time
self._update_gam_E_dir()
def _ELBO(self):
term1 = 0 # E[ log p( w | phi, z) ]
term2 = 0 # E[ log p( z | theta) ]
term3 = 0 # E[ log q(z) ]
term4 = 0 # E[ log p( theta | alpha) ]
term5 = 0 # E[ log q( theta ) ]
term6 = 0 # E[ log p(eta | tau) ]
term7 = 0 # E[ log p(tau | delta) ]
term8 = 0 # E[ log q(tau) ]
'''
ELBO is calculated w.r.t. each document
Update term1, term2, term5 together
Update term3, term6 together
Update term4, term7 together
ELBO = term1 + term2 + term3 + term4 - term5 - term6 - term7
'''
# update term 1, 2, 3, 4, 5
for d in range(self.D):
ndw = self.X[d,:]
for k in range(self.K):
# update term 1
tmp = self.eta[:,k] * self.phi[d][:,k]
# ndw_vec = np.zeros(self.V) # V * 1
# ndw_vec[ndw] += self.X[d,ndw]
term1 += (tmp * ndw).sum()
# update term 2
tmp = (ndw * self.phi[d][:,k]).sum() # sum of V * 1 numpy arrays: scalar
E_theta_dk = self.gam_E[d,k] # scalar
term2 += E_theta_dk * tmp # scalar * scalar = scalar
# update term 3
tmp = self.phi[d][:,k] * log(self.phi[d][:,k] + 0.000000001) # for numerical stability
term3 += (tmp * ndw).sum()
# update term 4
term4 += loggamma(self.K * self.alpha) - log(self.K * gamma(self.alpha))
term4 += (self.alpha - 1) * self.gam_E[d,:].sum()
# update term 5
term5 += loggamma(sum(self.gam[d,:])) - sum(loggamma(self.gam[d,:]))
term5 += ( (self.gam[d,:]-1) * self.gam_E[d,:] ).sum()
print('Done term 1 ~ 5')
for k in range(self.K):
a_k = self.a[:,k]
b_k = self.b[:,k]
# update term 6
term6 += -self.V/2 * np.log(2*pi) - np.log( np.prod( a_k * b_k ) )/2
term6 += -np.dot(self.eta[:,k], 1 / ((a_k-1) * b_k), self.eta[:,k] )/2
# update term 7
term7 += ( np.log(self.delta) - a_k * b_k * self.delta ).sum()
# update term 8
term8 += ( (a_k-1) * (digamma(a_k) + np.log(b_k)) - a_k - loggamma(a_k) - a_k * np.log(b_k) ).sum()
print('Done term 6, 7')
return term1 + term2 - term3 + term4 - term5 + term6 + term7 - term8
def _eval_log_eta(self,k, eta_k):
term1 = 0
for d in range(self.D):
N = self.X[d,:] * self.phi[d][:,k] # dimension: V*1
term1 += np.dot(N, eta_k) # scalar
logexpsum = np.log( np.sum( np.exp(eta_k + self.m) ) )
E_tau_invs = 1 / ( (self.a[:,k]-1) * self.b[:,k] )
return term1 - self.C_k[k] * logexpsum - np.dot(E_tau_invs, np.power(eta_k, 2)) / 2
def _E_dir(self, params_mat):
'''
input: vector parameters of dirichlet
output: Expecation of dirichlet - also vector
'''
return _dirichlet_expectation_2d(params_mat)
def _E_dir_1d(self, params):
return _dirichlet_expectation_1d_(params)
def _update_gam_E_dir(self):
self.gam_E = self._E_dir(self.gam.transpose()).transpose()
def _cal_exp_prob(self, k, Nd_index):
eta_k = np.exp(self.eta[Nd_index,k] + self.m)
normalizer = eta_k.sum()
return eta_k / normalizer
def _cal_small_c_k(self):
self.c_k = np.zeros((self.V, self.K)) # dimension: V * K
for d in range(self.D):
self.c_k = self.c_k + self.X[d,:][:,None] * self.phi[d]
def _cal_large_C_k(self):
self.C_k = np.sum(self.c_k, axis=0) # dimension: K * 1
def _cal_beta(self):
# initialize beta (exponential probabilites)
numerator = self.eta + self.m[:, None] # dimension: V * K
numerator = np.exp(numerator)
# numerator -= np.min(numerator, axis=0)
normalizer = np.sum(numerator, axis=0) # dimension: 1 * K
self.beta = numerator / normalizer[None, :]
def _simple_newtons(self, x0, delta, tol, max_iter, multivariate=False):
x1 = x0 - delta
if multivariate:
while sum(abs(x1 - x0)) > tol:
x0 = x1
x1 = x0 - delta
else:
while abs(x1-x0) > tol:
x0 = x1
x1 = x0 - delta
return x1
def _update_phi(self, d):
# duplicated words are ignored
Nd_index = np.nonzero(self.X[d,:])[0]
# get the proportional value in each topic t,
# and then normalize to make as probabilities
# the indicator of Z_dn remains only one term
for k in range(self.K):
prob_beta = self.beta[Nd_index, k]# Nd * 1: indexing for words in dth document
E_theta = np.exp(self.gam_E[d,k]) # scalar: indexing for kth topic
self.phi[d][Nd_index,k] = prob_beta * E_theta # Nd * 1
## vectorize to reduce time
# to prevent overfloat
#self.phi -= self.phi.min(axis=1)[:,None]
#self.phi[d][Nd_index,:] = exp(self.phi[d][Nd_index,:])
# normalize prob
self.phi[d][Nd_index,:] /= np.sum(self.phi[d][Nd_index,:], axis=1)[:,None]
# print(None)
def _update_gam(self,d):
gam_d = np.repeat(self.alpha, self.K)
ids = np.nonzero(self.X[d,:])[0]
n_dw = self.X[d,:][ids] # ids*1
phi_dwk = self.phi[d][ids,:] # ids*K
gam_d = gam_d + np.dot(n_dw, phi_dwk) # K*1 + K*1
self.gam[d,:] = gam_d
self.gam_E[d, :] = self._E_dir_1d(self.gam[d, :])
def _update_eta(self, beta, tau, init_alpha):
# Assume small c_k and large C_k are already updated.
# Newton Armijo Update for eta_k
alpha = init_alpha
for k in range(self.K):
# u = self.C_k[k] * np.outer(beta_k, beta_k) # K*K
# v = self.beta[:,k] # K*1
# Hessian_k = np.outer(u,v) + A
# Hessian_k_inverse = A_inv - self.C_k[k] * np.dot(A_inv, np.outer(beta_k, beta_k), A_inv ) \
# / 1 + self.C_k[k] * np.dot(beta_k, A_inv, beta_k)
E_tau_inv = 1 / ((self.a[:, k] - 1) * self.b[:, k]) # K*1
eta0 = np.random.gamma(10,1/100,(self.V,))
old_score = self._eval_log_eta(k, eta0)
eta1 = np.random.gamma(100,1/100,(self.V,))
new_score = self._eval_log_eta(k, eta1)
tol = 0.01
# Newtons optimization
while abs(old_score - new_score) > tol:
eta0 = eta1
old_score = new_score
numerator = eta0 + self.m
#numerator -= np.min(numerator)
numerator = np.exp(numerator)
normalizer = np.sum(numerator)
# numerator = eta0 + self.m
# normalizer = np.exp(logsumexp(numerator))
beta_k = numerator / normalizer
diag_element = - (self.C_k[k] * beta_k + E_tau_inv)
# A = np.diag(diag_element) # K*K
tmp = beta_k / diag_element
# A_inv = np.diag(1 / diag_element)
# tmp = np.dot(A_inv, beta_k)
grad_k = self.c_k[:,k] - self.C_k[k] * beta_k - E_tau_inv * eta0
tmp2 = grad_k / diag_element
# delta with minus
step = tmp2 - self.C_k[k] * np.dot( tmp,
np.dot(beta_k, tmp2)) \
/ (1 + self.C_k[k] * np.dot(beta_k, tmp))
new_score = self._eval_log_eta(k, eta0 + alpha*step)
# do armijo linear-search to find optimized step size, alpha
while new_score > old_score + beta * alpha * np.dot(grad_k, step):
alpha = alpha*tau
new_score = self._eval_log_eta(k, eta0 + alpha*step)
# optimize eta with adjusted step size alpha
eta1 = eta0 + alpha*step
alpha = 1 # walk back one step
self.eta[:,k] = eta1
print(f'finished {k}th topic in eta')
def _update_a(self, max_iter):
for k in range(self.K):
for w in range(self.V):
b = self.b[w,k]
eta = self.eta[w,k]
a0 = 2
a1 = 1
tol = 0.001
# Newtons optimization
while abs(a1 - a0) > tol:
a0 = a1
numerator = -polygamma(1,a0) * (a0-1/2) + eta**2 / (2*b*(a0-1)**2) - b*self.delta + 1
denominator = polygamma(2,a0) * (a0-1/2) + polygamma(1,a0) + eta**2 / (b*(a0-1)**3)
# this delta is without minus
delta = numerator / denominator
a1 = a0 - delta
self.a[w,k] = a1
print(f'finished {k}th topic in a')
def _update_b(self):
for k in range(self.K):
eta_k = self.eta[:,k]
a_k = self.a[:,k]
numerator = 1+ np.sqrt(1 + 8*self.delta * np.power(eta_k, 2) * a_k / (a_k - 1)) # K*1
denominator = 4*self.delta*a_k # K*1
self.b[:,k] = numerator / denominator
def train(self, threshold, max_iter):
print('Making Vocabs...')
self._make_vocab()
print('Initializing Parms...')
self._init_params()
print(f'# of Documents: {self.D}')
print(f'# of unique vocabs: {self.V}')
print(f'{self.K} topics chosen')
print('Start optimizing!')
# initialize ELBO
ELBO_before = 0
ELBO_after = 99999
self._ELBO_history = []
print('##################### start training #####################')
for iter in range(max_iter):
start = time.time()
ELBO_before = ELBO_after
print('\n')
print('E step: start optimizing phi, gamma...')
self.gam = np.ones((self.D, self.K))
self._update_gam_E_dir()
for d in range(self.D):
gam_before = self.gam[d,:]
gam_after = np.repeat(999,self.K)
while sum(abs(gam_before - gam_after)) / self.K > threshold:
gam_before = gam_after
self._update_phi(d)
self._update_gam(d)
gam_after = self.gam[d,:]
# update small c_k, large C_k
self._cal_small_c_k()
self._cal_large_C_k()
self._cal_beta()
# update beta_star
print('M step: Updating eta..')
self._update_eta(beta=1e-4, tau=0.25, init_alpha=1)
self._update_a(max_iter=1000)
self._update_b()
# update exponential probabilities
print('Finished Iteration!')
print('\n')
if iter % 50 == 0:
print('Now calculating ELBO...')
ELBO_after = self._ELBO()
self._ELBO_history.append(ELBO_after)
self._perplexity(ELBO_after)
print(f'Before ELBO: {ELBO_before}')
print(f'After ELBO: {ELBO_after}')
print('\n')
if abs(ELBO_before - ELBO_after) < threshold:
break
print(f'Computation time: {(time.time()-start)/60} mins')
print('Done Optimizing!')
def _perplexity(self, ELBO):
'''
Calculates Approximated Perplexity
'''
denominator = sum(self.Nd)
self.perplexity.append( exp(-ELBO / denominator) )
|
# coding:utf8
import os
import random
from configparser import ConfigParser
class RandomProxyMiddleware(object):
def __init__(self):
self.proxy = self.proxy_generator()
def proxy_generator(self):
# read config file
parser = ConfigParser()
parser.read(os.path.dirname(os.path.realpath(__file__)) + '/../resources/credentials.ini')
if parser.has_section('proxy'):
credentials = dict(parser.items('proxy'))
session_id = random.random()
proxy = 'http://{}-session-{}:{}@zproxy.lum-superproxy.io:{}'.format(
credentials['username'],
session_id,
credentials['password'],
credentials['port']
)
return proxy
def process_request(self, request, spider):
request.meta['proxy'] = self.proxy
def process_exception(self, request, exception, spider):
request.meta['proxy'] = self.proxy |
from django.db import models
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
class Personel(models.Model):
"""Şirkette çalışan personel listesi"""
personel_adi = models.CharField(max_length=50)
def __str__(self):
return self.personel_adi
class Project(models.Model):
COUNTRIES = (('TURKIYE', 'Türkiye'), ('GERMANY', 'Almanya'), ('FRANCE', 'Fransa'))
ulke = models.CharField(max_length=50, name="Ülke", choices=COUNTRIES, null=True, blank=True)
bolge = models.CharField(max_length=50)
firma = models.CharField(max_length=200)
proje_adi = models.CharField(max_length=200)
proje_turu = models.CharField(max_length=50)
kod = models.CharField(max_length=4)
proje_suresi = models.CharField(max_length=10, blank=True, null=True)
adres = models.TextField()
telefon = models.CharField(max_length=15)
faks = models.CharField(max_length=15)
genel_mudur = models.CharField(max_length=50, blank=True, null=True, verbose_name="Genel Müdür", )
proje_direktoru = models.CharField(max_length=50, blank=True, null=True)
proje_muduru = models.CharField(max_length=50, blank=True, null=True)
proje_mudur_yrd = models.CharField(max_length=50, blank=True, null=True)
santiye_sefi = models.CharField(max_length=50, blank=True, null=True)
teknik_ofis_muduru = models.CharField(max_length=50, blank=True, null=True)
teknik_ofis_elemani = models.CharField(max_length=50, blank=True, null=True)
dizayn_ofis_sefi = models.CharField(max_length=50, blank=True, null=True)
planlama_sefi = models.CharField(max_length=50, blank=True, null=True)
butce_kontrol_muduru = models.CharField(max_length=50, blank=True, null=True)
ince_isler_sefi = models.CharField(max_length=50, blank=True, null=True)
elektromekanik_muduru = models.CharField(max_length=50, blank=True, null=True)
mekanik_isler_sefi = models.CharField(max_length=50, blank=True, null=True)
elektrik_isler_sefi = models.CharField(max_length=50, blank=True, null=True)
idari_isler_sefi = models.CharField(max_length=50, blank=True, null=True)
altyapi_isler_sefi = models.CharField(max_length=50, blank=True, null=True)
ihale_ve_sozlesme_uzmani = models.CharField(max_length=50, blank=True, null=True)
maliyet_kontrol_sorumlusu = models.CharField(max_length=50, blank=True, null=True)
kalite_kontrol_muhendisi = models.CharField(max_length=50, blank=True, null=True)
finans_muduru = models.CharField(max_length=50, blank=True, null=True)
muhasebe_muduru = models.CharField(max_length=50, blank=True, null=True)
muhasebe_sorumlusu = models.CharField(max_length=50, blank=True, null=True)
insan_kaynaklari = models.CharField(max_length=50, blank=True, null=True)
satinalma_genel_muduru = models.CharField(max_length=50, blank=True, null=True)
satinalma_muduru = models.CharField(max_length=50, blank=True, null=True)
merkez_satinalma_sorumlusu1 = models.CharField(max_length=50, blank=True, null=True)
merkez_satinalma_sorumlusu2 = models.CharField(max_length=50, blank=True, null=True)
santiye_satinalma_sorumlusu1 = models.CharField(max_length=50, blank=True, null=True)
santiye_satinalma_sorumlusu2 = models.CharField(max_length=50, blank=True, null=True)
depo_sorumlusu = models.CharField(max_length=50, blank=True, null=True)
depo_elemani = models.CharField(max_length=50, blank=True, null=True)
proje_personeli = models.ManyToManyField(Personel,null=True)
def __str__(self):
return self.proje_adi
|
#!/usr/bin/env python
PACKAGE = "ocular"
from dynamic_reconfigure.parameter_generator_catkin import *
gen = ParameterGenerator()
gen.add("scan_frame", int_t, 0, "Update every 'scan_frame'", 2, 1, 10)
exit(gen.generate(PACKAGE, "ocular", "ocularconf")) |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-16 08:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0004_auto_20170216_0811'),
]
operations = [
migrations.DeleteModel(
name='Lecturer',
),
migrations.AlterModelOptions(
name='classroom',
options={'verbose_name': 'Sınıf', 'verbose_name_plural': 'Sınıflar'},
),
migrations.AlterModelOptions(
name='lecture',
options={'verbose_name': 'Ders', 'verbose_name_plural': 'Dersler'},
),
migrations.AlterField(
model_name='lecture',
name='day',
field=models.SmallIntegerField(choices=[(1, 'Pazartesi'), (2, 'Salı'), (3, 'Çarşamba'), (4, 'Perşembe'), (5, 'Cuma'), (6, 'Cumartesi'), (7, 'Pazar')], verbose_name='gün'),
),
migrations.AlterField(
model_name='lecture',
name='end_time',
field=models.TimeField(verbose_name='bitiş saati'),
),
migrations.AlterField(
model_name='lecture',
name='name',
field=models.CharField(max_length=250, verbose_name='ders adı'),
),
migrations.AlterField(
model_name='lecture',
name='start_time',
field=models.TimeField(verbose_name='başlangıç saati'),
),
]
|
#!/usr/bin/env python3
# Converts a list of hosts to my usual config snippet
# Usage: host_to_config [host1 fqdn] ([host2 fqdn] (…))
import socket
import sys
template = """
++ {t_short}
title = {t_fqdn} IPv4
menu = {t_short} IPv4
probe = FPing
host = {t_fqdn}"""
template_v6 = """
++ {t_short}_v6
title = {t_fqdn} IPv6
menu = {t_short} IPv6
probe = FPing6
host = {t_fqdn}"""
if len(sys.argv) == 1:
print("Usage: host_to_config [host1 fqdn] ([host2 fqdn] (…))")
exit(0)
for fqdn in sys.argv[1:]:
ai = socket.getaddrinfo(fqdn, 80)
for elem in ai:
if socket.AddressFamily.AF_INET in elem:
print(template.format(t_fqdn = fqdn, t_short = fqdn.split('.')[0]))
break
for elem in ai:
if socket.AddressFamily.AF_INET6 in elem:
print(template_v6.format(t_fqdn = fqdn, t_short = fqdn.split('.')[0]))
break
|
#Google Colabでやっているので少し違うかもしれない
!git clone https://github.com/neubig/nlptutorial.git #gitからCloneする
def word_count(inputs):
dicts = {}
for line in inputs:
line = line.strip()
words = line.split(" ")
for word in words:
if word in dicts.keys():
dicts[word] += 1
else:
dicts[word] = 1
return dicts
def unit_test(answers, dicts):
for line in answers:
line = line.strip()
words = line.split(" ")
bar = dicts[str(words[0])]
if int(words[1]) != bar:
return 0
return 1
import random
from collections import defaultdict
#必要なファイルを読みこみ
t_input = open('/content/nlptutorial/test/00-input.txt')
t_answer = open('/content/nlptutorial/test/00-answer.txt')
#プログラムのテスト
t_dict = word_count(t_input)
for foo, bar in sorted(t_dict.items()):
print("%s %r" % (foo, bar))
print(unit_test(t_answer, t_dict))
t_input.close()
t_answer.close()
#演習問題
p_input = open('/content/nlptutorial/data/wiki-en-train.word')
p_dict = word_count(p_input)
#answer = open('/content/wiki-en-answer.txt', 'w')
print("Number of Word: %i" % len(p_dict))
for i in range(10):
foo, bar = random.choice(list(p_dict.items()))
print("%s %r" % (foo, bar))
#for foo, bar in sorted(p_dict.items()):
# answer.writelines("%s %r\n" % (foo, bar))
# print("%s %r" % (foo, bar)) |
'''
1. 模拟'斗地主'发牌
牌共54张
花色:
黑桃('\u2660'), 梅花('\u2663'), 方块('\u2665'), 红桃('\u2666')
大小王
数字:
A0~10JQK
1) 生成54张片
2) 三个人玩牌,每人发17张,底牌留三张
输入回车, 打印第1个人的17张牌
输入回车, 打印第2个人的17张牌
输入回车, 打印第3个人的17张牌
输入回车, 打印3张底牌
'''
import random
color = ['\u2660','\u2663','\u2665','\u2666'] # 黑红梅方四花色
poker = ['大王','小王'] # 大小王
num = ['A','2','3','4','5','6','7','8','9','10','J','Q','K'] # 数字
for c in color: # 遍历花色
for x in num: # 数字
p = c + x # 拼成一张牌
poker.append(p)
print("完整扑克",poker)
random.shuffle(poker) # 打乱扑克顺序
input()
print("第一个人的牌为:",poker[:17])
input()
print("第二个人的牌为:",poker[17:34])
input()
print("第三个人的牌为:",poker[34:51])
input()
print("底牌为:",poker[-3:]) |
from .AbsRetrievalTask import AbsRetrievalTask
class QuoraRetrieval(AbsRetrievalTask):
download_url = 'https://public.ukp.informatik.tu-darmstadt.de/reimers/seb/datasets/quora_retrieval.json.gz'
local_file_name = 'quora_retrieval.json.gz'
@property
def description(self):
return {
"name": "QuoraRetrieval",
"description": "QuoraRetrieval is based on questions that are marked as duplicates on the Quora platform. Given a question, find other (duplicate) questions.",
"reference": "https://quoradata.quora.com/First-Quora-Dataset-Release-Question-Pairs",
"type": "retrieval",
"available_splits": ["dev", "test"],
"main_score": "map",
}
|
import urllib2
import concurrent.futures
import logging
import ConfigParser
import wx
logging.basicConfig()
#URL = 'http://svr-dev-20:8080/?cmd=isGlowing'
LAMP_ON = '1'
LAMP_OFF = '0'
LAMP_UNDEFINED = '?'
UNDEFINED_STATE = [LAMP_UNDEFINED, 'Getting invalid response(s) from server']
OK_ICON = 'data\\ok.png'
FAIL_ICON = 'data\\fail.png'
WARN_ICON = 'data\\warn.png'
CONFIG_FILE = 'data\\lava.cfg'
MAIN_SECTION = 'Main'
TIMER_INTERVAL_KEY = 'timer_interval'
URL_KEY = 'url'
config = ConfigParser.RawConfigParser()
config.read(CONFIG_FILE)
def get_latest_status():
print ("getting latest status")
#raise Exception()
content = urllib2.urlopen(config.get(MAIN_SECTION, URL_KEY)).read()
status = content.split('|')
if ((status[0] == LAMP_ON or status[0] == LAMP_OFF) and len(status) == 2):
return status
else:
print 'Invalid response: ' + content
return UNDEFINED_STATE
def create_menu_item(menu, label, func):
item = wx.MenuItem(menu, -1, label)
menu.Bind(wx.EVT_MENU, func, id=item.GetId())
menu.AppendItem(item)
return item
class TaskBarIcon(wx.TaskBarIcon):
def __init__(self):
super(TaskBarIcon, self).__init__()
self.timer = wx.Timer(self)
#self.set_icon(OK_ICON)
self.Bind(wx.EVT_TIMER, self.on_timer, self.timer)
self.Bind(wx.EVT_TASKBAR_LEFT_DOWN, self.on_left_down)
self.timer.Start(config.getint(MAIN_SECTION, TIMER_INTERVAL_KEY))
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=2)
self.current_future = None
self.current_state = ['','']
self.change_state(UNDEFINED_STATE)
self.change_state(get_latest_status())
def CreatePopupMenu(self):
menu = wx.Menu()
create_menu_item(menu, 'Exit', self.on_exit)
return menu
def set_icon(self, path, tooltip):
icon = wx.IconFromBitmap(wx.Bitmap(path))
self.SetIcon(icon, tooltip)
def on_exit(self, event):
wx.CallAfter(self.Destroy)
def on_timer(self, event):
self.run_async()
def run_async(self):
future = self.executor.submit(get_latest_status)
def func(future):
wx.CallAfter(self.on_new_result, future)
future.add_done_callback(func)
def on_left_down(self, event):
self.run_async()
def change_state(self, new_state):
#print("new state: "+new_state)
if (new_state[0] == self.current_state[0]):
return
if (new_state[0] == LAMP_OFF):
self.set_icon(OK_ICON, new_state[1])
self.ShowBalloon("Build OK", new_state[1], 0, wx.ICON_INFORMATION)
elif (new_state[0] == LAMP_ON):
self.set_icon(FAIL_ICON, new_state[1])
self.ShowBalloon("Build failed", new_state[1], 0, wx.ICON_ERROR)
else:
new_state = UNDEFINED_STATE
self.set_icon(WARN_ICON, new_state[1])
self.current_state = new_state
def on_new_result(self, future):
if future.exception() is not None:
print("Error getting latest result");
self.change_state(LAMP_UNDEFINED)
return
self.change_state(future.result())
def main():
app = wx.App()
TaskBarIcon()
app.MainLoop()
if __name__ == '__main__':
main()
|
from pyspark import SparkContext
from pyspark.sql import SparkSession, Row
import json
import requests
sc = SparkContext(appName="ddapp_test")
spark = SparkSession \
.builder \
.appName("DDapp_model_updt") \
.getOrCreate()
SEASON_1718 = 'https://pkgstore.datahub.io/sports-data/' \
'english-premier-league/season-1718_json/data/' \
'dbd8d3dc57caf91d39ffe964cf31401b/season-1718_json.json'
content_1718 = requests.get(SEASON_1718)
json1718 = json.loads(content_1718.content)
df_1718 = spark.createDataFrame(Row(**x) for x in json1718)
df_1819 = spark.read.load('s3://ddapi.data/season-1819.csv', format='csv')
df_1718.write.save("s3://ddapi.data/testout.csv",
format="csv",
header=True,
mode="overwrite")
|
import random
import test2
def tilemap_builder():
tilemap = [[[random.randint(0,600),random.randint(0,100),0,0,0,0,0] for i in range(0,3,1)] for j in range(0,3,1)]
test2.tilemap = tilemap
return tilemap |
#!/usr/bin/python3
# -*- coding: utf8 -*-
import sys
import cx_Oracle
class Db:
def __init__(self):
# Vebosity level to show all
self._VERBOSITY = 4
self.CONTINUE_ON_ERROR = False
self.connected = False
self.verbosity = 0
self.className = self.__class__.__name__
self.dbModule = None
self.nonRaiserErrors = {}
self.STRING = None
self.NUMBER = None
def custonConnect(self):
pass
def connect(self):
""" Connect to the database. if this fails, raise. """
if self.verbosity >= self._VERBOSITY:
print(self.className, '-> connect start')
try:
self.custonConnect()
self.connected = True
except self.dbModule.DatabaseError as e:
error, *args = e.args
msgPrefix = 'Database connection error:'
if error.code in self.nonRaiserErrors.keys():
reraise = False
msg = self.nonRaiserErrors[error.code]
else:
# raise "unknown" errors
reraise = True
msg = '(Code={}) {}'.format(error.code, e)
if self.verbosity >= 1:
print('{} {}'.format(msgPrefix, msg))
if reraise:
raise
else:
if self.verbosity >= self._VERBOSITY:
print('Exiting.')
sys.exit(10)
if self.connected:
if self.verbosity >= self._VERBOSITY:
print('Connected!')
self.cursor = self.con.cursor()
else:
if self.verbosity >= self._VERBOSITY:
print('Not connected!')
if self.verbosity >= self._VERBOSITY:
print(self.className, '-> connect end')
def commit(self):
""" Commit data to the database. If this fails, don't care. """
if self.verbosity >= self._VERBOSITY:
print(self.className, '-> commit start')
try:
self.con.commit()
except cx_Oracle.DatabaseError:
pass
if self.verbosity >= self._VERBOSITY:
print(self.className, '-> commit end')
def disconnect(self):
""" Disconnect from the database. If this fails, don't care. """
if self.verbosity >= self._VERBOSITY:
print(self.className, '-> disconnect start')
try:
self.cursor.close()
self.con.close()
self.connected = False
except cx_Oracle.DatabaseError:
pass
if self.verbosity >= self._VERBOSITY:
print(self.className, '-> disconnect end')
def cursorExecute(self, statement, data=None, halt=True):
""" Execute statement using data and return cursor. """
if self.verbosity >= self._VERBOSITY:
print(self.className, '-> cursorExecute start')
try:
if data:
self.cursor.execute(statement, data)
else:
self.cursor.execute(statement)
except cx_Oracle.DatabaseError as e:
error, = e.args
if self.verbosity >= 1:
print('Error: {}'.format(e))
if halt:
raise
if self.verbosity >= self._VERBOSITY:
print(self.className, '-> cursorExecute end')
return self.cursor
if __name__ == '__main__':
pass
|
#/usr/bin/python
import psycopg2
import os
import datetime
import json
import time
from flask import Blueprint, request
import calendar
ghcn_data_blueprint = Blueprint('ghcn_data', __name__)
def build_series(name, data):
return {'name' : name, 'data' : data, 'zIndex' : 1, 'marker' : '{ fillColor: "white", lineWidth: 2, lineColor: Highcharts.getOptions().colors[0] }'}
def convert_data(element, conversion_factor):
return [[calendar.month_name[item['MONTH']],round(item['VALUE']) / conversion_factor] for item in element] if element is not None else None
def construct_degree_days_composite(ghcn_composite):
degree_days_composite = {'META' : None, 'VARS' : None, 'SERIES' : None}
avghddays_data = {'name' : 'avghddays', 'data' : convert_data(ghcn_composite['AVGHDDAYS'], 1)}
avgcddays_data = {'name' : 'avgcddays', 'data' : convert_data(ghcn_composite['AVGCDDAYS'], 1)}
degree_days_composite['VARS'] = [ item for item in [avghddays_data, avgcddays_data] if item['data'] is not None ]
degree_days_composite['META'] = {'title' : "'Derived Temperature Units'", 'type' : "'month'", 'yAxis-title' : "'Number of occurences'"}
degree_days_composite['SERIES'] = []
if avghddays_data['data'] is not None:
degree_days_composite['SERIES'].append(build_series("'Heating Degree Days'", "avghddays"))
if avgcddays_data['data'] is not None:
degree_days_composite['SERIES'].append(build_series("'Cooling Degree Days'", "avgcddays"))
return degree_days_composite
def construct_temperature_composite(ghcn_composite):
temperature_composite = {'META' : None, 'VARS' : None, 'SERIES' : None}
tmax_data = {'name' : 'tmax', 'data' : convert_data(ghcn_composite['TMAX'], 10)}
tmin_data = {'name' : 'tmin', 'data' : convert_data(ghcn_composite['TMIN'], 10)}
tavg_data = {'name' : 'tavg', 'data' : convert_data(ghcn_composite['TAVG'], 10)}
tmaxextreme_data = {'name' : 'tmaxextreme', 'data' : convert_data(ghcn_composite['TMAXEXTREME'], 10)}
tminextreme_data = {'name' : 'tminextreme', 'data' : convert_data(ghcn_composite['TMINEXTREME'], 10)}
temperature_composite['VARS'] = [ item for item in [tmax_data, tmin_data, tavg_data, tmaxextreme_data, tminextreme_data] if item['data'] is not None ]
temperature_composite['META'] = {'title' : "'Temperatures by Month'", 'type' : "'month'", 'yAxis-title' : "'Degrees Celsius'"}
temperature_composite['SERIES'] = []
if tmax_data['data'] is not None:
temperature_composite['SERIES'].append(build_series("'Average TMAX'", "tmax"))
if tmin_data['data'] is not None:
temperature_composite['SERIES'].append(build_series("'Average TMIN'", "tmin"))
if tavg_data['data'] is not None:
temperature_composite['SERIES'].append(build_series("'Average TAVG'", "tavg"))
if tmaxextreme_data['data'] is not None:
temperature_composite['SERIES'].append(build_series("'Extreme TMAX'", "tmaxextreme"))
if tminextreme_data['data'] is not None:
temperature_composite['SERIES'].append(build_series("'Extreme TMIN'", "tminextreme"))
return temperature_composite
def construct_precip_composite(ghcn_composite):
precip_composite = {'META' : None, 'VARS' : None, 'SERIES' : None}
prcp_data = {'name' : 'prcp', 'data' : convert_data(ghcn_composite['PRCP'], 10)}
snow_data = {'name' : 'snow', 'data' : convert_data(ghcn_composite['SNOW'], 1)}
snwd_data = {'name' : 'snwd', 'data' : convert_data(ghcn_composite['SNWD'], 1)}
precip_composite['VARS'] = [ item for item in [prcp_data, snow_data, snwd_data] if item['data'] is not None ]
precip_composite['META'] = {'title' : "'Precipitation by Month'", 'type' : "'month'", 'yAxis-title' : "'(mm)'"}
precip_composite['SERIES'] = []
if prcp_data['data'] is not None:
precip_composite['SERIES'].append(build_series("'Average Precipitation'", "prcp"))
if snow_data['data'] is not None:
precip_composite['SERIES'].append(build_series("'Average Snowfall'", "snow"))
if snow_data['data'] is not None:
precip_composite['SERIES'].append(build_series("'Average Snowdepth'", "snwd"))
return precip_composite
def run_queries(queries, station_id):
conn = psycopg2.connect("host='{0}' dbname='{1}' user='{2}' password='{3}'".format(os.environ['INGEST_HOST'],
os.environ['INGEST_DB'],
os.environ['INGEST_USER'],
os.environ['INGEST_PASS']))
cur = conn.cursor()
query_results = {}
for key in queries.keys():
query_results[key] = None
cur.execute(queries[key], {'id' : station_id})
for row in cur:
record = {'VALUE' : row[0], 'MONTH' : row[1]}
if key in query_results.keys() and query_results[key] != None:
query_results[key].append(record)
else:
query_results[key] = [record]
conn.close()
return query_results
def get_composite_report(station_id):
queries = {'TMAX' : '''SELECT "VALUE", "MONTH" FROM "GHCN_DATA"."GHCN_SUMMARY" WHERE "ID" = %(id)s AND "TYPE" = 'TMAXAVG' ''',
'TMIN' : '''SELECT "VALUE", "MONTH" FROM "GHCN_DATA"."GHCN_SUMMARY" WHERE "ID" = %(id)s AND "TYPE" = 'TMINAVG' ''',
'PRCP' : '''SELECT "VALUE", "MONTH" FROM "GHCN_DATA"."GHCN_SUMMARY" WHERE "ID" = %(id)s AND "TYPE" = 'PRCPAVG' ''',
'SNOW' : '''SELECT "VALUE", "MONTH" FROM "GHCN_DATA"."GHCN_SUMMARY" WHERE "ID" = %(id)s AND "TYPE" = 'SNOWAVG' ''',
'SNWD' : '''SELECT "VALUE", "MONTH" FROM "GHCN_DATA"."GHCN_SUMMARY" WHERE "ID" = %(id)s AND "TYPE" = 'SNWDAVG' ''',
'AWDR' : '''SELECT "VALUE", "MONTH" FROM "GHCN_DATA"."GHCN_SUMMARY" WHERE "ID" = %(id)s AND "TYPE" = 'AWDRAVG' ''',
'AWND' : '''SELECT "VALUE", "MONTH" FROM "GHCN_DATA"."GHCN_SUMMARY" WHERE "ID" = %(id)s AND "TYPE" = 'AWNDAVG' ''',
'TAVG' : '''SELECT "VALUE", "MONTH" FROM "GHCN_DATA"."GHCN_SUMMARY" WHERE "ID" = %(id)s AND "TYPE" = 'TAVGAVG' ''',
'TMAXEXTREME' : '''SELECT "VALUE", "MONTH" FROM "GHCN_DATA"."GHCN_SUMMARY" WHERE "ID" = %(id)s AND "TYPE" = 'TMAXEXTREME' ''',
'TMINEXTREME' : '''SELECT "VALUE", "MONTH" FROM "GHCN_DATA"."GHCN_SUMMARY" WHERE "ID" = %(id)s AND "TYPE" = 'TMINEXTREME' ''',
'AVGCDDAYS' : '''SELECT "VALUE", "MONTH" FROM "GHCN_DATA"."GHCN_SUMMARY" WHERE "ID" = %(id)s AND "TYPE" = 'AVGCDDAYS' ''',
'AVGHDDAYS' : '''SELECT "VALUE", "MONTH" FROM "GHCN_DATA"."GHCN_SUMMARY" WHERE "ID" = %(id)s AND "TYPE" = 'AVGHDDAYS' '''}
result = run_queries(queries, station_id)
return result
@ghcn_data_blueprint.route('/get_composite_report/')
def get_composite_report_json():
station_id = request.args.get('id', None)
return json.dumps(get_composite_report(station_id))
if __name__ == '__main__':
from flask import Flask
app = Flask(__name__)
app.debug = True
app.register_blueprint(ghcn_data_blueprint, url_prefix="/ghcn_data/")
app.run()
|
class matrix(object):
def __init__(self, num_rows, num_columns):
'''
num_rows: type = int
num_columns: type = int
num_rows is the number of rows in the matrix
num_columsn is the number of columns in the matrix
matrix_obj is initialised to all zeros
'''
self.num_rows = num_rows
self.num_columns = num_columns
self.matrix_obj = {i:[0 for j in range(num_columns)] for i in range(num_rows)}
def __repr__(self):
return 'matrix({},{})'.format(self.num_rows, self.num_columns)
def print_matrix(self):
print('\n')
for i in range(self.num_rows):
for j in range(self.num_columns):
print(self.matrix_obj[i][j], end = ' ')
print('\n')
print('\n')
def get_num_rows(self):
return self.num_rows
def get_num_columns(self):
return self.num_columns
def add_col_to_matrix(self, col, col_index):
'''
col: type = list, or any other iterable
col_index: type = int
col_index is the column number, 0 <= col_index <= n-1 (from left to right)
'''
assert col_index < self.num_columns, 'col_index not in matrix'
assert len(col) == self.num_rows, 'column length is incorrect'
for i in range(len(col)):
self.matrix_obj[i][col_index] = col[i]
def add_row_to_matrix(self, row, row_index):
'''
row: type = list, or any other iterable
row_index: type = int
row_index is the column number, 0 <= col_index <= m-1 (top to bottom)
'''
assert row_index < self.num_rows, 'row_index not in matrix'
assert len(row) == self.num_columns, 'row length is incorrect'
self.matrix_obj[row_index] = row
def get_row(self, row_index):
'''
Returns a list
'''
assert row_index < self.num_rows, 'row_index not in matrix'
return self.matrix_obj[row_index]
def get_column(self, col_index):
'''
Returns a list
'''
assert col_index < self.num_columns, 'col_index not in matrix'
ans = []
for i in self.matrix_obj:
ans.append(self.matrix_obj[i][col_index])
return ans
def __mul__(self, other):
'''
Multiplies the matrix object with another matrix object.
Returns a new matrix object
'''
assert self.num_columns == other.num_rows, 'matrices are of incompatible dimensions'
##Not sure whether this would be a potential problem. Maybe you have to write matrices.matrix???
ans = matrix(self.num_rows, other.num_columns)
for i in range(self.num_rows):
temp_row = self.get_row(i)
for j in range(other.num_columns):
temp_column = other.get_column(j)
count = 0
for num in range(self.num_columns):
count += temp_row[num]*temp_column[num]
ans.matrix_obj[i][j] = count
return ans
def __pow__(self, other):
'''
Does element-wise multiplication with another matrix object
Returns a new matrix object
'''
assert self.num_columns == other.num_columns, 'matrices are of incompatible dimensions'
assert self.num_rows == other.num_rows, 'matrices are of incompatible dimensions'
ans = matrix(self.num_rows, other.num_columns)
for i in range(self.num_rows):
for j in range(self.num_columns):
ans.matrix_obj[i][j] = self.matrix_obj[i][j] * other.matrix_obj[i][j]
return ans
def __add__(self, other):
'''
Returns a new matrix object
'''
assert self.num_columns == other.num_columns, 'matrices are of incompatible dimensions'
assert self.num_rows == other.num_rows, 'matrices are of incompatible dimensions'
ans = matrix(self.num_rows, other.num_columns)
for i in range(self.num_rows):
for j in range(self.num_columns):
ans.matrix_obj[i][j] = self.matrix_obj[i][j] + other.matrix_obj[i][j]
return ans
def __sub__(self, other):
'''
Returns a new matrix object
'''
assert self.num_columns == other.num_columns, 'matrices are of incompatible dimensions'
assert self.num_rows == other.num_rows, 'matrices are of incompatible dimensions'
ans = matrix(self.num_rows, other.num_columns)
for i in range(self.num_rows):
for j in range(self.num_columns):
ans.matrix_obj[i][j] = self.matrix_obj[i][j] - other.matrix_obj[i][j]
return ans
# a = matrix(3,3)
# a.add_row_to_matrix([1,1,3], 0)
# a.add_row_to_matrix([2,2,2], 1)
# a.add_row_to_matrix([3,3,1], 2)
# a.print_matrix()
# b = matrix(3,3)
# b.add_col_to_matrix([1,0,1],0)
# b.add_col_to_matrix([2,0,1],1)
# b.add_col_to_matrix([3,1,2],2)
# b.print_matrix()
# # b.add_col_to_matrix([3,1,2],3)
# # b.add_col_to_matrix([3,1,3],4)
# c = a-b
# c.print_matrix()
# # # print(a.get_row(2))
# # # print(a.get_column(1))
# # a.print_matrix()
|
def deleni (cislo_1: int, cislo_2: int):
if cislo_2 == 0:
return 0
return (cislo_1/cislo_2)
print (deleni(12,4))
|
# Django - Jet Configure
X_FRAME_OPTIONS = 'SAMEORIGIN'
JET_INDEX_DASHBOARD = 'pytube.dashboard.CustomIndexDashboard'
# Django - Jet theme colors for admin backend.
JET_DEFAULT_THEME = 'default'
JET_THEMES = [
{
'theme': 'default', # theme folder name
'color': '#47bac1', # color of the theme's button in user menu
'title': 'Default' # theme title
},
{
'theme': 'green',
'color': '#44b78b',
'title': 'Green'
},
{
'theme': 'light-green',
'color': '#2faa60',
'title': 'Light Green'
},
{
'theme': 'light-violet',
'color': '#a464c4',
'title': 'Light Violet'
},
{
'theme': 'light-blue',
'color': '#5EADDE',
'title': 'Light Blue'
},
{
'theme': 'light-gray',
'color': '#222',
'title': 'Light Gray'
}
]
# Path to Google Analytics client_secrets.json
#JET_MODULE_GOOGLE_ANALYTICS_CLIENT_SECRETS_FILE = os.path.join(BASE_DIR, 'client_secrets.json')
|
from flask import Flask
from flask_cors import CORS
import random
app = Flask(__name__)
CORS(app)
@app.route('/jerry')
def yourMethod():
return 'Hello World.'
#response = Flask.jsonify({'some': 'data'})
#response.headers.add('Access-Control-Allow-Origin', '*')
# return response
if __name__ == "__main__":
app.run()
|
from django.contrib import admin
from django.contrib.auth.models import Group, User
from django import forms
from .models import Department, Designation, Farmer,\
Supplier, SupplierFarmer, ShrimpType, ShrimpItem, \
UserManager, RowStatus, Author, Book, LogShrimpItem
from .inventorymodel import ShrimpProdItem, \
PackagingMaterial, FinishProductCode, \
BasicShrimpType, ProdType, SoakingType,\
GlazinType, BlockType, CountType, ProdItem
# Register your models here.
admin.site.site_header = 'Shrimp Administration'
admin.site.site_title = 'Shrimp Admin'
admin.site.index_title = 'Shrimp Admin Portal'
admin.site.register(BasicShrimpType)
admin.site.register(ProdType)
admin.site.register(SoakingType)
admin.site.register(GlazinType)
admin.site.register(BlockType)
admin.site.register(CountType)
class ProdItemAdmin(admin.ModelAdmin):
list_display = ('Name', 'BasicShrimpTypeId', 'PrTyId', 'SoakingTypeId', 'GlazinTypeId', 'BlockTypeId', 'CountTypeId', )
search_fields = ['PrTyId']
list_filter = ['GlazinTypeId', 'PrTyId', 'CountTypeId']
ordering = ['-Id']
list_per_page = 20
def get_form(self, request, obj=None, **kwargs):
self.exclude = ["Name"]
#kwargs['widgets'] = {'Notes': forms.Textarea}
form = super(ProdItemAdmin, self).get_form(request, obj, **kwargs)
return form
def save_model(self, request, obj, form, change):
#obj.user = request.user
pItem = ProdItem.objects.filter(BasicShrimpTypeId=obj.BasicShrimpTypeId,
PrTyId=obj.PrTyId,
SoakingTypeId=obj.SoakingTypeId,
GlazinTypeId=obj.GlazinTypeId,
BlockTypeId=obj.BlockTypeId,
CountTypeId=obj.CountTypeId)
if pItem:
return
else:
#print("=="+str(obj.BasicShrimpTypeId))
obj.Name = str(obj.BasicShrimpTypeId)+' '+str(obj.PrTyId)+' '+str(obj.SoakingTypeId)+' '+str(obj.GlazinTypeId)+'Glazin '+str(obj.BlockTypeId)+' '+str(obj.CountTypeId)
return super(ProdItemAdmin, self).save_model(request, obj, form, change)
admin.site.register(ProdItem, ProdItemAdmin)
#admin.site.register(Department)
#admin.site.register(Designation)
#https://www.dev2qa.com/how-to-manage-models-in-django-admin-site/
#https://books.agiliq.com/projects/django-admin-cookbook/en/latest/filter_fk_dropdown.html
# .values('UserName')
#https://books.agiliq.com/projects/django-admin-cookbook/en/latest/many_to_many.html
#https://reinout.vanrees.org/weblog/2011/11/29/many-to-many-field-save-method.html
#https://www.pythoncircle.com/post/28/creating-custom-user-model-and-custom-authentication-in-django/
#https://automationstepbystep.com/jenkins/
class UserManagerAdmin(admin.ModelAdmin):
list_display = ['UserId', 'UserName', 'Password','StaffId','Mobile', 'DepartmentId', 'DesignationId']
search_fields = ['UserId', 'StaffId', 'Mobile']
list_filter = ['UserId', 'StaffId', 'Mobile']
list_per_page = 20
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "DepartmentId":
kwargs["queryset"] = Department.objects.all()
if db_field.name == "DesignationId":
kwargs["queryset"] = Designation.objects.all()
return super().formfield_for_foreignkey(db_field, request, **kwargs)
class FarmerAdmin(admin.ModelAdmin):
list_display = ('FarmerName', 'FarmerCode', 'FarmerMobile', 'Address', 'IsActive')
search_fields = ['FarmerName', 'FarmerCode', 'FarmerMobile']
list_filter = ['FarmerCode', 'FarmerMobile']
list_per_page = 20
class SupplierAdmin(admin.ModelAdmin):
list_display = ('SupplierName', 'SupplierCode', 'SupplierMobile', 'Address', 'IsActive', 'IsFarmer')
filter_horizontal = ['FarmerId']
search_fields = ['SupplierName', 'SupplierCode', 'SupplierMobile']
list_filter = ['SupplierCode', 'SupplierMobile']
list_per_page = 20
admin.site.register(UserManager, UserManagerAdmin)
admin.site.register(Farmer, FarmerAdmin)
admin.site.register(Supplier, SupplierAdmin)
admin.site.register(ShrimpType)
class ShrimpItemAdmin(admin.ModelAdmin):
list_display = ('Name', 'ItemCount', 'MeasurUnit','Price')
search_fields = ['Name', 'Price']
list_filter = ['Name', 'Price']
list_per_page = 20
def get_form(self, request, obj=None, **kwargs):
self.exclude = ["EntryBy"]
#kwargs['widgets'] = {'Notes': forms.Textarea}
form = super(ShrimpItemAdmin, self).get_form(request, obj, **kwargs)
return form
def save_model(self, request, obj, form, change):
obj.EntryBy = request.user
obj.save()
LogShrimpItem(Name=obj.Name, ItemCount=obj.ItemCount,
MeasurUnit=obj.MeasurUnit, Price=obj.Price,
ShrimpTypeId=obj.ShrimpTypeId, ShrimpItemId=obj,
EntryBy=request.user).save()
#return super(ShrimpItemAdmin, self).save_model(request, obj, form, change)
return obj
admin.site.register(ShrimpItem, ShrimpItemAdmin)
admin.site.register(ShrimpProdItem)
class PackagingMaterialAdmin(admin.ModelAdmin):
list_display = ('Name', 'PackSize', 'Stock',)
search_fields = ['Name', 'PackSize']
list_filter = ['Name', 'PackSize']
list_per_page = 20
def get_queryset(self, request):
qs = super(PackagingMaterialAdmin, self).get_queryset(request)
#return qs.filter(Id=request.user)
return qs.exclude(Id=1)
admin.site.register(PackagingMaterial, PackagingMaterialAdmin)
admin.site.register(FinishProductCode)
admin.site.unregister(Group)
admin.site.unregister(User)
# class BookInline(admin.TabularInline):
# model = Book
#
# class AuthorAdmin(admin.ModelAdmin):
# inlines = [
# BookInline,
# ]
#
# admin.site.register(Author, AuthorAdmin)
# class FarmerAdminForm(forms.ModelForm):
# tags = forms.ModelMultipleChoiceField(
# Farmer.objects.all(),
# widget=admin.widgets.FilteredSelectMultiple('Tags', False),
# required=False,
# )
#
# def __init__(self, *args, **kwargs):
# super(FarmerAdminForm, self).__init__(*args, **kwargs)
# if self.instance.pk:
# self.initial['tags'] = self.instance.tags.values_list('pk', flat=True)
#
# def save(self, *args, **kwargs):
# instance = super(FarmerAdminForm, self).save(*args, **kwargs)
# if instance.pk:
# instance.tags.clear()
# instance.tags.add(*self.cleaned_data['tags'])
# return instance
#
# class SupplierFarmerAdmin(admin.ModelAdmin):
# form = FarmerAdminForm
# search_fields = ['SupplierId']
#admin.site.register(SupplierFarmer, SupplierFarmerAdmin)
#admin.site.register(SupplierFarmer)
|
from datetime import datetime
from odoo import api, fields, models, _
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT
class PosConfig(models.Model):
_inherit = 'pos.config'
invoicing_mnd = fields.Boolean(string="Invoicing Mandatory")
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zhavbmq', '0007_auto_20150218_1622'),
]
operations = [
migrations.RemoveField(
model_name='dvkdj',
name='jzwapass',
),
migrations.RemoveField(
model_name='gwmyxcxdii',
name='lnioihvte',
),
migrations.RemoveField(
model_name='ulvookvun',
name='tugjdleqdq',
),
migrations.AddField(
model_name='gwmyxcxdii',
name='mhcua',
field=models.IntegerField(default=0),
),
]
|
import logging
from django.http import (HttpResponse, HttpResponseRedirect,
HttpResponseNotAllowed,
HttpResponseNotFound, HttpResponseForbidden)
from django.shortcuts import render, get_object_or_404
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.utils.html import escape
from taskw import decode_task
from task import forms
from task.decorators import logged_in_or_basicauth
from task.grids import TaskDataGrid
from task.models import Task, Undo, Tag, Project
from task.util import parse_undo
from django.conf import settings
TASK_URL = 'taskdb'
TASK_ROOT = settings.TASKDATA_ROOT
TASK_FNAMES = ('undo.data', 'completed.data', 'pending.data')
class TaskFilter(object):
def __init__(self, request, qs=Task.objects.all()):
self.qs = qs
self.request = request
def filter(self):
proj = self.request.GET.get('project')
if proj:
qs = self.qs.filter(project__name=proj)
return qs
tag = self.request.GET.get('tag')
if tag:
qs = self.qs.filter(tags__tag=tag)
return qs
return self.qs
def get_tags(status=None):
if status is None:
return Tag.objects.all().values('tag')
return Tag.objects.filter(task__status=status).values('tag').distinct()
def get_projects(status=None):
if status is None:
return Project.objects.all().values('name')
return Project.objects.filter(task__status=status).values('name').distinct()
def pending_tasks(request, template='task/index.html'):
pending = Task.objects.filter(status='pending')
task_url = "http://%s/taskdb/" % request.get_host()
filtered = TaskFilter(request, pending).filter()
grid = TaskDataGrid(request, queryset=filtered)
return grid.render_to_response(template,
extra_context={'task_url': task_url,
'tags': get_tags('pending'),
'projects': get_projects('pending')})
def completed_tasks(request, template='task/index.html'):
completed = Task.objects.filter(status='completed')
task_url = "http://%s/taskdb/" % request.get_host()
filtered = TaskFilter(request, completed).filter()
grid = TaskDataGrid(request, queryset=filtered)
return grid.render_to_response(template,
extra_context={'task_url': task_url,
'tags': get_tags('completed'),
'projects': get_projects('completed')})
@login_required
def add_task(request, template='task/add.html'):
if request.method == 'POST':
task = Task(user=request.user)
form = forms.TaskForm(request.POST, instance=task)
if form.is_valid():
task = form.save()
# silly extra save to create
# undo object for m2m fields
task.save()
return HttpResponseRedirect('/')
else:
form = forms.TaskForm(initial={'user': request.user})
context = {
'method': 'add',
'form': form,
'title': "Add Task",
}
return render(request, template, context)
@login_required
def add_tag(request):
return add_model(request, forms.TagForm, 'tags')
@login_required
def add_project(request):
return add_model(request, forms.ProjectForm, 'project')
def add_model(request, form_cls, name, template="popup.html"):
if request.method == 'POST':
form = form_cls(request.POST)
if form.is_valid():
new_obj = form.save()
return HttpResponse('<script type="text/javascript">'
'opener.dismissAddAnotherPopup(window, "%s", "%s");'
'</script>' %
(escape(new_obj._get_pk_val()), escape(new_obj)))
else:
form = form_cls()
page_context = {'form': form, 'field': name}
return render(request, template, page_context)
@login_required
def done_task(request, task_id, template='task/done.html'):
task = get_object_or_404(Task, pk=task_id)
task.status = 'completed'
task.save()
context = {'task': task}
return render(request, template, context)
@login_required
def edit_task(request, task_id, template='task/add.html'):
task = get_object_or_404(Task, pk=task_id)
if request.method == 'POST':
form = forms.TaskForm(request.POST, instance=task)
if form.is_valid():
task = form.save()
# silly extra save to create
# undo object for m2m fields
task.save()
return HttpResponseRedirect('/')
else:
form = forms.TaskForm(instance=task)
context = {
'task': task,
'method': 'edit',
'form': form,
'title': "Edit Task",
}
return render(request, template, context)
def detail_task(request, task_id, template='task/detail_task.html'):
task = get_object_or_404(Task, pk=task_id)
context = {'task': task}
return render(request, template, context)
def detail_project(request, proj_id, template='task/detail_project.html'):
project = get_object_or_404(Project, pk=proj_id)
context = {'project': project}
return render(request, template, context)
def get_taskdb(request, filename):
if filename == 'pending.data':
taskstr = Task.serialize('pending')
elif filename == 'completed.data':
taskstr = Task.serialize('completed')
elif filename == 'undo.data':
taskstr = Undo.serialize()
else:
return HttpResponseNotFound()
response = HttpResponse(taskstr, mimetype='text/plain')
response['Content-Length'] = len(taskstr)
return response
def put_taskdb(request, filename):
return post_taskdb(request, filename)
def post_taskdb(request, filename):
if filename not in TASK_FNAMES:
return HttpResponseForbidden('Forbidden!')
user = request.user
data = request.raw_post_data
if filename in ['pending.data', 'completed.data']:
parsed = [decode_task(line) for line in data.splitlines()]
if filename == 'pending.data':
tasks = Task.objects.filter(status='pending', user=user)
elif filename == 'completed.data':
tasks = Task.objects.filter(status__in=['completed', 'deleted'])
tasks.delete()
for task in parsed:
task.update({'user': user})
Task.fromdict(task)
elif filename == 'undo.data':
Undo.objects.all().delete()
parsed = parse_undo(data)
for undo_dict in parsed:
undo_dict.update({'user': user})
Undo.fromdict(undo_dict)
else:
return HttpResponseNotFound()
return HttpResponse()
@logged_in_or_basicauth()
def taskdb(request, filename):
""" Serve {undo, completed, pending}.data files as requested.
I't probably better to serve outside of django, but
this is much more flexible for now.
"""
if request.method == 'GET':
return get_taskdb(request, filename)
elif request.method == 'POST':
return post_taskdb(request, filename)
elif request.method == 'PUT':
return put_taskdb(request, filename)
else:
return HttpResponseNotAllowed(['GET', 'PUT', 'POST'])
|
# imports
import numpy as np
import cv2
import matplotlib.pyplot as plt
# load images, remember openCV loads color images as BGR
waterfall_original = cv2.imread('input/waterfall.png')
mountains_original = cv2.imread('input/mountains.png')
####### Problem - swapping blue and red color planes
waterfall_blues = waterfall_original[:,:,0] # access blue color plane
waterfall_reds = waterfall_original[:,:,2] # access the red color plane
# show image planes for reference
cv2.imshow('Red Plane', waterfall_reds)
cv2.imshow('Blue Plane', waterfall_blues)
cv2.waitKey(0)
cv2.destroyAllWindows()
# clone and swap color planes
# in OpenCV C++ you would use the .clone() method, however to copy and image in
# OpenCV Python us np.copy() from numpy
waterfall_red_blue_swap = waterfall_original.copy()
waterfall_red_blue_swap[:,:,0] = waterfall_reds
waterfall_red_blue_swap[:,:,2] = waterfall_blues
# show original and R-B swapped color planes
cv2.imshow('Original Image', waterfall_original)
cv2.imshow('Red & Blue Swapped', waterfall_red_blue_swap)
cv2.waitKey(0)
cv2.destroyAllWindows()
# save red and blue swapped image
cv2.imwrite('output/waterfall_red_blue_swapped.png', waterfall_red_blue_swap)
####### Problem - create a green and red monochromatic image
waterfall_greens = waterfall_original[:,:,1] # access the green color plane
cv2.imshow('Green Monochromatic Image', waterfall_greens)
cv2.imshow('Red Monochromatic Image', waterfall_reds)
cv2.waitKey(0)
cv2.destroyAllWindows()
# because green "looks" better we will save this as the monochromatic version
cv2.imwrite('output/water_monochromatic.png', waterfall_greens)
waterfall_mono = waterfall_greens.copy()
###### Problem - Replacement of Pixels
# replace the center 100 x 100 pixel square of the mountains mono image with the
# 100 x 100 center pixel square from the waterfall mono image
# first see what mono channel looks better for the mountains image
mountains_blues = mountains_original[:,:,0]
mountains_greens = mountains_original[:,:,1]
mountains_reds = mountains_original[:,:,2]
# show images
cv2.imshow('Blue Monochromatic Image', mountains_blues)
cv2.imshow('Green Monochromatic Image', mountains_greens)
cv2.imshow('Red Monochromatic Image', mountains_reds)
cv2.waitKey(0)
cv2.destroyAllWindows()
# green is the winner, copy it
mountains_mono = mountains_greens.copy()
# find the chunk
waterfall_center = np.array(waterfall_mono.shape[:2]) / 2.
waterfall_center_chunk = waterfall_mono[
int(waterfall_center[0] - 50):int(waterfall_center[0] + 50),
int(waterfall_center[1] - 50):int(waterfall_center[1] + 50)
]
mountains_center = np.array(mountains_mono.shape[:2]) / 2.
mountains_mono_center_replaced = mountains_mono.copy() # copy first
mountains_mono_center_replaced[
int(mountains_center[0] - 50):int(mountains_center[0] + 50),
int(mountains_center[1] - 50):int(mountains_center[1] + 50)
] = waterfall_center_chunk
# show center chunk replaced mono moutnains image and save on close
cv2.imshow('Center Replaced', mountains_mono_center_replaced)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite('output/mountains_mono_center_replaced.png',
mountains_mono_center_replaced)
###### Problem - Arithmetic and Geometric Operations
print 'The minimum value in the mono waterfall image is: {}'.format(
waterfall_mono.min())
print 'The maximum value in the mono waterfall image is: {}'.format(
waterfall_mono.max())
print 'The average value in the mono waterfall image is: {:.2f}'.format(
waterfall_mono.mean())
print 'The standard deviation in the mono waterfall image is: {:.2f}'.format(
waterfall_mono.std())
# subtract the mean, then divide by the standard deviation, then multiply by 10
# finally add the mean back in
waterfall_arithmetic = waterfall_mono.copy()
waterfall_arithmetic = cv2.absdiff(waterfall_arithmetic, waterfall_arithmetic.mean())
waterfall_arithmetic = cv2.divide(waterfall_arithmetic, waterfall_arithmetic.std())
waterfall_arithmetic = cv2.multiply(waterfall_arithmetic, 10)
waterfall_arithmetic = cv2.add(waterfall_arithmetic, waterfall_arithmetic.mean())
cv2.imshow('Waterfall Arithmetic', waterfall_arithmetic)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite('output/waterfall_arithmetic.png', waterfall_arithmetic)
# shift the waterfall_greens image 2 pixels to the left
M = np.float32([[1,0,2],[0,1,0]]) # the transformation matrix for Translation
rows, cols = waterfall_greens.shape[:2]
waterfall_greens_shifted = cv2.warpAffine(waterfall_greens, M, (cols, rows))
waterfall_greens_sub_shifted = cv2.subtract(
waterfall_greens, waterfall_greens_shifted)
cv2.imshow('Waterfall Greens Sub Shifted', waterfall_greens_sub_shifted)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite('output/waterfall_greens_sub_shifted.png', waterfall_greens_sub_shifted)
###### Problem - Noise
# compare Gaussian Noise between blue and green channels in Waterfall image
im = np.zeros(waterfall_original.shape, np.uint8) # do not use original image it overwrites it
mean = (0, 1, 0) # gaussian mean BGR channels
sigma = (0, 10, 0) # gaussian sigma BGR channels
gaussian_noise = cv2.randn(im, mean, sigma)
waterfall_noise_green = cv2.add(waterfall_original, gaussian_noise)
cv2.imshow('Waterfall Green Noise', waterfall_noise_green)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite('output/waterfall_noise_green.png', waterfall_noise_green)
# repeat for Blue Channel
mean = (1, 0, 0) # gaussian mean only for the blue channel (BGR)
sigma = (10, 0, 0) # gaussian sigma only for the blue channel (BGR)
gaussian_noise = cv2.randn(im, mean, sigma)
waterfall_noise_blue = cv2.add(waterfall_original, gaussian_noise)
cv2.imshow('Waterfall Blue Noise', waterfall_noise_blue)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite('output/waterfall_noise_blue.png', waterfall_noise_blue)
|
import state
import io
import streamreader
# using backtracking search to build an NFA
class NFAStateMachine:
def __init__(self, states, startStateId, classes):
self.states = states
self.startStateId = startStateId
self.classes = classes
for stateId in self.states:
self.states[stateId].setClasses(classes)
def accepts(self, strm):
def acceptsSuffix(stateId):
if (stateId, strm.numCharsRead()) in visited:
return False
visited.add((stateId, strm.numCharsRead()))
theState = self.states[stateId]
c = strm.readChar()
if strm.eof() and theState.isAccepting():
return True
strm.unreadChar(c)
for onClass, toStateId in theState.getTransitions():
if onClass == "epsilon":
if acceptsSuffix(toStateId):
return True
else:
c = strm.readChar()
if c in self.classes[onClass] and acceptsSuffix(toStateId):
return True
strm.unreadChar(c)
return False
visited = set()
return acceptsSuffix(self.startStateId)
def main():
q0 = state.State(0)
q1 = state.State(1)
q2 = state.State(2)
q3 = state.State(3, True)
q4 = state.State(4)
q5 = state.State(5)
q6 = state.State(6)
q7 = state.State(7)
q8 = state.State(8, True)
classes = {"a": frozenset(["a"])}
q0.addTransition("a", 1)
q0.addTransition("a", 4)
q1.addTransition("a", 2)
q2.addTransition("a", 3)
q3.addTransition("a", 1)
q4.addTransition("a", 5)
q5.addTransition("a", 6)
q6.addTransition("a", 7)
q7.addTransition("a", 8)
q8.addTransition("a", 4)
nfa = NFAStateMachine(
{0: q0, 1: q1, 2: q2, 3: q3, 4: q4, 5: q5, 6: q6, 7: q7, 8: q8}, 0, classes)
done = False
s = input(
"Please enter a string of zeros and ones (type done to quit): ").strip()
while s != "done":
strm = streamreader.StreamReader(io.StringIO(s))
if nfa.accepts(strm):
print("The string is accepted by the finite state machine.")
else:
print("The string is not accepted.")
s = input(
"Please enter a string of zeros and ones (type done to quit): ").strip()
print("Program Completed.")
if __name__ == "__main__":
main()
|
# see https://www.codewars.com/kata/534d2f5b5371ecf8d2000a08/solutions/python
from TestFunction import Test
def multiplication_table(size):
rv = []
k = 0
for i in range(size):
s = 0
k += 1
temp = []
for j in range(size):
s += k
temp.append(s)
rv.append(temp)
return rv
test = Test(None)
test.describe("Basic Tests")
test.it("Should pass basic tests")
test.assert_equals(multiplication_table(3), [[1,2,3],[2,4,6],[3,6,9]])
|
import pandas as pd
import numpy as np
#딕셔너리 데이터로 판다스 시리즈 만들기
student1 = pd.Series({'국어':np.nan,'영어':80,'수학':90})
student2 = pd.Series({'수학':80,'국어':90})
print(student1,student2, sep='\n\n')
print()
print("# 두 학생의 과목별 점수로 사칙연산 수행")
sr_add = student1.add(student2,fill_value=0) # 덧셈
sr_sub = student1.sub(student2,fill_value=0) #뺄셈
sr_mul = student1.mul(student2,fill_value=0) #곱셈
sr_div = student1.div(student2,fill_value=0) #나눗셈
print("#사칙연산 결과를 데이터프레임으로 합치기 (시리즈-> 데이터프레임)")
result = pd.DataFrame([sr_add,sr_sub,sr_mul,sr_div], index=['덧셈','뺄셈','곱셈','나눗셈'])
print(result)
|
'''
Given an undirected graph G having positive weights and N vertices.
You start with having a sum of M money. For passing through a vertex i, you must pay S[i] money.
If you don't have enough money - you can't pass through that vertex. Find the shortest path from vertex 1 to vertex N,
respecting the above conditions; or state that such path doesn't exist. If there exist more than one path having the
same length, then output the cheapest one. Restrictions: 1<N<=100 ; 0<=M<=100 ; for each i, 0<=S[i]<=100.
'''
if __name__ == '__main__':
G = [
[0,1,2,0],
[1,0,0,2],
[2,0,0,1],
[0,2,1,0]
]
n = len(G)
M = 20
S = [0,12,8,5]
INF = 999999
visited = set() # [[False for j in range(M+1)] for i in range(n)]
minim = [[INF for j in range(M+1)] for i in range(n)]
minim[0][M] = 0
while(True):
minDist = (INF + 1)
minState = ()
for i in range(len(minim)):
for j in range(len(minim[0])):
if (i,j) not in visited:
if minDist > minim[i][j]:
minDist = minim[i][j]
minState = (i,j)
if minDist == INF:
break
visited.add(minState)
if len(visited) == n:
break
for nbr in Neighbours(minState[0]):
if (minState[1]-S[nbr])>=0 and ( minim[nbr][minState[1]-S[nbr]]>(minim[minState[0]][minState[1]] + G[minState[0]][nbr]) ):
minim[nbr][minState[1]-S[nbr]] = minim[minState[0]][minState[1]] + G[minState[0]][nbr]
minValue = INF
moneyLeft = -1
for j in range(M+1):
if minValue > minim[n-1][j]:
minValue = minim[n-1][j]
moneyLeft = j
if minValue == minim[n-1][j]:
if moneyLeft < j:
moneyLeft = j
if minValue == INF:
print("No path exists")
else:
print("Shortest path has length {0} and money left is {1}".format(minValue, moneyLeft)) |
import os
import csv
import pandas as pd
###DEPRECATED BY CSV FORMATTER
def masscsvformatter(filename,olm):
print("Formatting " + filename)
page4.update_idletasks()
file = open(os.path.join(os.getcwd(),filename))
outpname = filename.split('.')[0] + ".csv"
outpath = os.path.join(os.getcwd(),outpname)
reader = csv.reader(file)
x = 1
if(olm == 1):
for i, row in enumerate(reader):
if i == 8:
length = int(len(row[0].split())) - 1
writer = csv.writer(open(outpath, "w"), delimiter = ',', lineterminator='\n')
header = ['year','day','hour']
header.extend(list(range(1,length)))
writer.writerow(header)
if i==9:
new_row = []
if x == 1:
for j in range(1,len(filter(None,str(row).split(" "))[2:]) + 1):
new_row.append("Rec " + str(j))
loclist = pd.DataFrame(new_row)
new_row = []
x = 0
for j in filter(None,str(row).split(" "))[2:]:
new_row.append(float(j.strip("']")))
loclist['x'] = new_row
if i==10:
new_row = []
for j in filter(None,str(row).split(" "))[2:]:
new_row.append(float(j.strip("']")))
loclist['y'] = new_row
locname = filename + "_Locations.csv"
loclist.to_csv(locname)
if i>15 and row[0] is not "":
year = row[0].split()[0]
day = row[0].split()[1]
hour = row[0].split()[2]
columns = []
new_row = [year,day,hour]
length = int(len(row[0].split()))
for j in range(3,length):
value = float(row[0].split()[j])
if value is not "":
columns.append(value)
new_row.extend(columns)
writer = csv.writer(open(outpath, "a+"), delimiter = ',', lineterminator='\n')
writer.writerow(new_row)
else:
txt_file = filename
csv_file = outpath
in_txt = csv.reader(open(txt_file, "rb"), delimiter = '\t')
out_csv = csv.writer(open(outpath, 'wb'))
out_csv.writerows(in_txt) |
'''
import socket
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server='www.ntvspor.com'
port=80
server_ip=socket.gethostbyname(server)
request="GET / HTTP/1.1\nHost: "+server+"\n\n"
s.connect((server,port))
s.send(request.encode())
sonuc=s.recv(1024)
print(sonuc)
'''
import socket
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server='www.ntvspor.com'
def port_tarama(port):
try:
s.connect((server,port))
return True
except:
return False
for i in range(1,26):
if(port_tarama(i)):
print("Port ",i,"Açık")
else:
print("Port ",i,"Kapalı")
|
import unittest
from calculator import calculator
from csvreader import csvreader
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
self.calculator = calculator()
def test_instantiate_calculator(self):
self.assertIsInstance(self.calculator, calculator)
def test_addition(self):
test_data = csvreader('../src/csv/UnitTestAddition.csv').data
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.calculator.addition(row['Value 1'], row['Value 2']), result)
def test_subtraction(self):
test_data = csvreader('../src/csv/UnitTestSubtraction.csv').data
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.calculator.subtraction(row['Value 1'], row['Value 2']), result)
self.assertEqual(self.calculator.result, result)
def test_times(self):
test_data = csvreader('../src/csv/UnitTestMultiplication.csv').data
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.calculator.multiply(row['Value 1'], row['Value 2']), result)
self.assertEqual(self.calculator.result, result)
def test_div(self):
test_data = csvreader('../src/csv/UnitTestDivision.csv').data
for row in test_data:
result = float(row['Result'])
self.assertAlmostEqual(self.calculator.division(row['Value 1'], row['Value 2']), result)
self.assertAlmostEqual(self.calculator.result, result)
def test_square(self):
test_data = csvreader('../src/csv/UnitTestSquare.csv').data
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.calculator.square_(row['Value 1']), result)
self.assertEqual(self.calculator.result, result)
def test_sqrt(self):
test_data = csvreader('../src/csv/UnitTestSquareRoot.csv').data
for row in test_data:
result = float(row['Result'])
self.assertAlmostEqual(self.calculator.sqrt_(row['Value 1']), result)
self.assertAlmostEqual(self.calculator.result, result)
|
# !/usr/bin/python3
# -*- coding: utf-8 -*-
"""
启动jvm
@Author : pgsheng
@Time : 2018/7/24 10:44
"""
import platform
import jpype
from public import config
from public.log import Log
class JVMStart(object):
def __init__(self):
self.log = Log("jvm初始化").get_logger()
"""
启动Java虚拟机
"""
def start_jvm(self, jar_list):
# 获得默认jvm路径
jvm_path = jpype.getDefaultJVMPath()
# self.log.info(jvm_path)
# 你的java扩展包的路径
ext_classpath = ''
# 判断系统类型,Linux系统使用“ :”分隔
sysstr = platform.system()
if sysstr == "Windows":
# java扩展包的路径或class文件所在文件夹路径,注意:class文件路径是它所在的上级文件夹
ext_classpath = config.sdk_path
for name in jar_list:
ext_classpath += ';' + config.sdk_path + '%s' % name
elif sysstr == "Linux":
ext_classpath = config.sdk_path + 'sdk'
for name in jar_list:
ext_classpath = ':' + config.sdk_path + '%s' % name
# self.log.info("系统类型:" + sysstr)
# 判断 JVM 是否已启动
if not jpype.isJVMStarted():
# 启动Java虚拟机,并加载jar包
jpype.startJVM(jvm_path, '-ea', '-Djava.class.path=%s' % ext_classpath)
jpype.java.lang.System.out.println("startJVM success!")
if jpype.isJVMStarted():
return True
else:
return False
else:
return True
"""
关闭Java虚拟机
"""
def shutdown_jvm(self):
if jpype.isJVMStarted():
self.log.info("关闭jvm")
jpype.shutdownJVM()
if __name__ == '__main__':
JVMStart().start_jvm(['jar_test.jar'])
JVMStart().shutdown_jvm()
"""
jpype安装教程:https://www.jianshu.com/p/a701f021df1d
"""
|
"""Unit Tests for the module"""
import logging
from django.test import TestCase
LOGGER = logging.getLogger(name="django-errors")
class ErrorsTestCase(TestCase):
"""Test Case for django-errors"""
def setUp(self):
"""Set up common assets for tests"""
LOGGER.debug("Tests setUp")
def tearDown(self):
"""Remove Test Data"""
LOGGER.debug("Tests tearDown")
def test_not_exist_urls(self):
"""Test that redirects kicking in when trying to go to 404 page."""
LOGGER.debug("404 Test URL not exist")
response = self.client.get("/UrlShouldNotExist/", follow=True)
LOGGER.debug(response)
self.assertEqual(404, response.status_code)
self.assertTemplateUsed(response, "errors/404.html")
def test_page_auth_protected(self):
"""Test that redirects kicking in when trying to go to a page Auth protected."""
LOGGER.debug("Test URL accept only get")
response = self.client.get("/admin/", follow=True)
LOGGER.debug(response)
# todo: test auth protected: NOT_AUTHORIZED (should be 401)
# self.assertEqual(403, response.status_code)
# self.assertTemplateUsed(response, 'errors/403.html')
self.assertRedirects(response, "/admin/login/?next=%2Fadmin%2F")
def test_page_without_privileges(self):
"""Test that redirects kicking in when trying to go to a page Auth protected."""
LOGGER.debug("Test URL accept only get")
response = self.client.get("/admin/", follow=True)
LOGGER.debug(response)
# todo: test auth protected: NOT_AUTHORIZED (should be 401)
# self.assertEqual(403, response.status_code)
# self.assertTemplateUsed(response, 'errors/403.html')
self.assertRedirects(response, "/admin/login/?next=%2Fadmin%2F")
# def test_X_redirect_urls(self):
# """Test that redirects end urls"""
# LOGGER.debug("Test X Redirect URLs")
# response = self.client.get('/403/', follow=True)
# self.assertRedirects(response, "http://testserver/admin/login/?next=/admin/")
def test_method_not_allowed_get(self):
"""Test that redirects kicking in when trying
to use method GET on view accept only POST."""
LOGGER.debug("Test URL accept only get")
response = self.client.get("/test-method-only-post/", follow=True)
LOGGER.debug(response)
self.assertEqual(405, response.status_code)
self.assertTemplateUsed(response, "errors/405.html")
def test_method_not_allowed_post(self):
"""Test that redirects kicking in when trying
to use method POST on view accept only GET."""
LOGGER.debug("Test URL accept only get")
response = self.client.post("/test-method-only-get/", follow=True)
LOGGER.debug(response)
self.assertEqual(405, response.status_code)
self.assertTemplateUsed(response, "errors/405.html")
|
from start_utils import db, login_manager, app
from datetime import datetime
from flask_login import UserMixin
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.String(20), unique=True,nullable=False)
email = db.Column(db.String(120), unique=True,nullable=False)
image_file = db.Column(db.String(20),nullable=False, default='default.jpg')
password = db.Column(db.String(60),nullable=False)
email_verified = db.Column(db.Integer,default=0)
posts = db.relationship('Post', backref='author',lazy=True)
posts_liked_by_user = db.relationship('PostLikes', backref='postlikes',lazy=True)
def get_reset_token(self, expires_sec=1800):
s = Serializer(app.config['SECRET_KEY'], expires_sec)
return s.dumps({'user_id':self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
except:
return None
return User.query.get(user_id)
def like_post(self, post):
if not self.has_liked_post(post):
like = PostLikes(user_id=self.id, post_id=post.id)
db.session.add(like)
def unlike_post(self, post):
if self.has_liked_post(post):
PostLikes.query.filter_by(
user_id=self.id,
post_id=post.id).delete()
def has_liked_post(self, post):
return PostLikes.query.filter(
PostLikes.user_id == self.id,
PostLikes.post_id == post.id).count() > 0
def __repr__(self):
return "User('{}','{}','{}')".format(self.username,self.email,self.image_file)
class Post(db.Model):
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String(100), nullable=False)
date_posted = db.Column(db.DateTime, nullable=False,default=datetime.utcnow)
content = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
likes = db.relationship('PostLikes', backref='post', lazy=True)
def __repr__(self):
return "Post('{}','{}')".format(self.title,self.date_posted)
class PostLikes(db.Model):
id = db.Column(db.Integer, primary_key = True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
post_id = db.Column(db.Integer, db.ForeignKey('post.id'))
def __repr__(self):
return "PostLikes('{}','{}')".format(self.user_id,self.post_id) |
keys=["one","two","three","four","five"]
values=[1,2,3,4,5,]
l=len(keys)
dic={}
i=0
while i <l:
dic[keys[i]]=values[i]
i+=1
print(dic)
#make dictionary with two lists |
"""
Capture Regions on Board
Problem Description
Given a 2-D board A of size N x M containing 'X' and 'O', capture all regions surrounded by 'X'.
A region is captured by flipping all 'O's into 'X's in that surrounded region.
Problem Constraints
1 <= N, M <= 1000
Input Format
First and only argument is a N x M character matrix A.
Output Format
Make changes to the the input only as matrix is passed by reference.
Example Input
Input 1:
A = [
[X, X, X, X],
[X, O, O, X],
[X, X, O, X],
[X, O, X, X]
]
Input 2:
A = [
[X, O, O],
[X, O, X],
[O, O, O]
]
Example Output
Output 1:
After running your function, the board should be:
A = [
[X, X, X, X],
[X, X, X, X],
[X, X, X, X],
[X, O, X, X]
]
Output 2:
After running your function, the board should be:
A = [
[X, O, O],
[X, O, X],
[O, O, O]
]
Example Explanation
Explanation 1:
O in (4,2) is not surrounded by X from below.
Explanation 2:
No O's are surrounded.
"""
from collections import deque
def valid(A, i, j, visited):
rows = len(A)
cols = len(A[0])
if i>=0 and i<rows and j>=0 and j<cols and A[i][j] == 'O':
return True
return False
def bfs(A, visited, i, j):
queue = deque()
directions = [(-1,0), (1,0), (0,-1), (0,1)]
queue.append((i, j))
A[i][j] = 'Y'
while queue:
x, y = queue.popleft()
A[x][y] = 'Y'
for d1, d2 in directions:
row = x + d1
col = y + d2
if valid(A, row, col, visited):
queue.append((row, col))
# We are traversing First column, Last column, First row and Last row and if 'O' if found:
# We do bfs starting from thatin the four directions for adjacent 'O' and mark them 'Y'
# These 'Y' marked cells can not be captured by 'X'
class Solution:
# @param A : list of list of chars
def solve(self, A):
rows = len(A)
cols = len(A[0])
visited = [[False for j in range(cols)] for i in range(rows)]
for j in range(cols):
if A[0][j] == 'O':
bfs(A, visited, 0, j)
for j in range(cols):
if A[rows-1][j] == 'O':
bfs(A, visited, rows-1, j)
for i in range(rows):
if A[i][0] == 'O':
bfs(A, visited, i, 0)
for i in range(rows):
if A[i][cols-1] == 'O':
bfs(A, visited, i, cols-1)
for i in range(rows):
for j in range(cols):
if A[i][j] == 'Y':
A[i][j] = 'O'
elif A[i][j] == 'O':
A[i][j] = 'X'
return A
|
from django.contrib import admin
from .models import Comment
# Register your models here.
# Register your models here.
@admin.register(Comment)
class ImageAdmin(admin.ModelAdmin):
list_display = ['owner', 'belongproduct', 'content', 'reply']
list_filter = ['created']
|
#Importar funcion sqtr
from math import sqrt
#Pedir al altura a usuario
h = float(input("Proporciona la altura en metros de la torre: "))
g=9.81 #Constate de la gravedad
t=sqrt(2*h/g) #Calculo de tiempo de caida
#Impresion de Resultados
print'El tiempo de caida para una altura ',h,'m es: ', t ,'s'
|
def allowed_bar(my_age):
persons_age = my_age + 3
return persons_age
person_allowed = allowed_bar(18)
print(person_allowed)
def gender_selector(sex ='uknown'):
if sex is 'm':
sex = "male"
elif sex is 'f':
sex = "female"
print(sex)
gender_selector()
|
import babel
from flask import request
from sqlalchemy.ext.hybrid import hybrid_property
def chunks(lst, size):
"""Yield successive size chunks from lst."""
for i in range(0, len(lst), size):
yield lst[i:i + size]
def get_locale():
return request.cookies.get('language', 'ru')
def cast_locale(obj, locale):
"""
Cast given locale to string. Supports also callbacks that return locales.
:param obj:
Object or class to use as a possible parameter to locale callable
:param locale:
Locale object or string or callable that returns a locale.
"""
if callable(locale):
try:
locale = locale()
except TypeError:
locale = locale(obj)
if isinstance(locale, babel.Locale):
return str(locale)
return locale
class TranslationHybrid:
def __init__(self, current_locale, default_locale):
self.current_locale = current_locale
self.default_locale = default_locale
def getter_factory(self, **kwargs):
"""
Return a hybrid_property getter function for given attribute. The
returned getter first checks if object has translation for current
locale. If not it tries to get translation for default locale. If there
is no translation found for default locale it returns None.
"""
def getter(obj):
current_locale = cast_locale(obj, self.current_locale)
attr = kwargs[current_locale]
return getattr(obj, attr.key)
return getter
def setter_factory(self, **kwargs):
def setter(obj, value):
locale = cast_locale(obj, self.current_locale)
attr = kwargs[locale]
setattr(obj, attr.key, value)
return setter
def __call__(self, **kwargs):
return hybrid_property(fget=self.getter_factory(**kwargs), fset=self.setter_factory(**kwargs))
|
# Версия 1
# Подключаем библиотеки
import pygame
from pygame.locals import *
from control import Control
from plane import Plane
from fuel import Fuel
from bullet import Bullet
from gui import GUI
from heart import Heart
# Задаем разрешение экрана
win = pygame.display.set_mode((1440, 900), FULLSCREEN)
# win = pygame.display.set_mode((500, 500))
# Задаем название окна
pygame.display.set_caption("Plane Game")
# Переменная типа Control
control = Control()
# Объекты типа Plane
plane1 = Plane("yellow")
plane2 = Plane("green")
# Объект типа GUI
gui = GUI()
# Объекты типа Fuel
fuels = []
for i in range(0, 2):
fuels.append(Fuel())
# Объекты типа Bullet
bullets = []
for i in range(0, 10):
bullets.append(Bullet())
# Объекты типа Heart
hearts = []
for i in range(0, 2):
hearts.append(Heart())
while control.flag_game:
control.Control()
control.DrawBackground(win)
plane1.Animation(win)
plane1.Shoot(win, plane2)
plane1.Fuel()
plane1.Health(win, control)
# plane1.Health(win, (243, 224, 119))
# plane1.Fuel(win, (243, 224, 119))
if plane1.bexplosion:
plane1.Explosion(win, control)
plane2.Animation(win)
plane2.Shoot(win, plane1)
plane2.Fuel()
plane2.Health(win, control)
# plane2.Health(win, (119, 200, 176))
# plane2.Fuel(win, (119, 200, 176))
for fuel in fuels:
fuel.Draw(win, plane1, plane2)
for bullet in bullets:
bullet.Draw(win, plane1, plane2)
for heart in hearts:
heart.Draw(win, plane1, plane2)
gui.Draw(win, plane1, plane2)
exit() |
total = 0
number = str(2**1000)
i = 0
while i < len(number):
num = int(number[i])
total = total + num
i = i+1
print(total)
|
class Solution:
def maxProfit(self, prices):
if len(prices) < 1:
return 0
dp = []
dp.append(0)
min_value = prices[0]
for i in range(1, len(prices)):
dp.append(max(dp[i - 1], prices[i] - min_value))
if prices[i] < min_value:
min_value = prices[i]
return dp[-1]
if __name__ == "__main__":
prices = [7, 1, 5, 3, 6]
s = Solution()
print(s.maxProfit(prices))
|
tests = int(input())
for t in xrange(1,tests+1):
n = int(input())
if n == 0:
print "Case #{}: INSOMNIA".format(t)
continue
i = 1
s = set()
answer = n
while len(s) < 10:
answer = n*i
s = s.union(set(list(str(answer))))
i+=1
print "Case #{}: {}".format(t, answer)
|
# Functions to compare two files based different attributes
from nltk.tokenize import sent_tokenize
def lines(a, b):
"""Return lines in both a and b"""
# Split a and b to get individual lines. Store in sets to avoid duplicates
a = set(a.split("\n"))
b = set(b.split("\n"))
# Create a list of lines that appear both in a and b to sim_lines
sim_lines = list(a & b)
return sim_lines
def sentences(a, b):
"""Return sentences in both a and b"""
# Split a and b to get individual sentences. Store in sets to avoid duplicates
a = set(sent_tokenize(a, language='english'))
b = set(sent_tokenize(b, language='english'))
# Create a list of sentences that appear both in a and b to sim_lines
sim_sentences = list(a & b)
return sim_sentences
# Define a helper function to return substrings of length n
def substr(a, n):
"""Return substrings of length n from string a"""
# List to store substrings
subs = []
# Loop through a and create substrings of length n
for i in range(len(a) - (n - 1)):
subs.append(a[i:i + n])
return subs
def substrings(a, b, n):
"""Return substrings of length n in both a and b"""
# Get substrings of length n from a and b and store them as sets
a = set(substr(a, n))
b = set(substr(b, n))
# Create a list of substrings that appear in both a and b
sub_strings = list(a & b)
return sub_strings
|
# install package elm
# pip install elm
import elm
import numpy as np
def main():
trainFeature = np.genfromtxt('trainFeature.csv', delimiter=',')[0::5]
trainLabel = np.genfromtxt('trainLabel.csv', delimiter='\n')[0::5]
testFeature = np.genfromtxt('testFeature.csv', delimiter=',')
#trainFeature = np.genfromtxt('mytrain.csv', delimiter=',')
#trainLabel = np.genfromtxt('mylabel.csv', delimiter='\n')
#testFeature = np.genfromtxt('mytest.csv', delimiter=',')
testLabel = np.asarray([0]*testFeature.shape[0])
train = np.concatenate((np.asarray([trainLabel]).T, trainFeature), axis=1)
test = np.concatenate((np.asarray([testLabel]).T, testFeature), axis=1)
elmk = elm.ELMKernel()
elmk.search_param(train, cv="kfold", of="accuracy", eval=10)
tr_result = elmk.train(train)
te_result = elmk.test(test)
predicted = te_result.predicted_targets
predicted_class = np.round(predicted).astype(int)
dec = [[0] * 39 for i in xrange(testFeature.shape[0])]
for i in xrange(testFeature.shape[0]):
dec[i][predicted_class[i]] = 1
header = "Id,ARSON,ASSAULT,BAD CHECKS,BRIBERY,BURGLARY,DISORDERLY CONDUCT,DRIVING UNDER THE INFLUENCE,DRUG/NARCOTIC,DRUNKENNESS,EMBEZZLEMENT,EXTORTION,FAMILY OFFENSES,FORGERY/COUNTERFEITING,FRAUD,GAMBLING,KIDNAPPING,LARCENY/THEFT,LIQUOR LAWS,LOITERING,MISSING PERSON,NON-CRIMINAL,OTHER OFFENSES,PORNOGRAPHY/OBSCENE MAT,PROSTITUTION,RECOVERED VEHICLE,ROBBERY,RUNAWAY,SECONDARY CODES,SEX OFFENSES FORCIBLE,SEX OFFENSES NON FORCIBLE,STOLEN PROPERTY,SUICIDE,SUSPICIOUS OCC,TREA,TRESPASS,VANDALISM,VEHICLE THEFT,WARRANTS,WEAPON LAWS"
#dec = np.asarray([[(i - min(j)) / (max(j) - min(j)) for i in j] for j in dec])
dec = np.asarray(dec)
fmt=['%d'] + ['%1.3f'] * dec.shape[1]
dec = np.insert(dec, 0, range(len(dec)), axis=1)
np.savetxt("predict.csv", dec, delimiter=",", header=header, fmt=fmt, comments="")
if __name__ == "__main__":
# execute only if run as a script
main()
|
class RM3DWriter:
header = 'START-OF-FILE\nDATEFORMAT=YYYYMMDD\nFIELDSEPARATOR=TAB\nSUBFIELDSEPARATOR=PIPE\nDECIMALSEPARATOR=PERIOD\nSTART-OF-DATA'
footer = 'END-OF-DATA\nEND-OF-FILE\n'
def __init__(self):
pass
def produce_string(self, rm3d_list):
out = '\n'.join(['\t'.join(e) for e in rm3d_list])
return '\n'.join([self.header, out, self.footer])
|
from StringBuilder import StringBuilder
class PythonBuilder(StringBuilder):
def __init__(self):
self.stringList=[]
self.indentModel="\t"
self.commentModel="#"
self.level =0
self.builderList=[]
@property
def Level(self):
return self.level
def AppendBuilder(self,builder):
for a in builder.List:
self.AppendLine("{0}{1}").Format([str((self.Level + 1) * self.indentModel),a])
# def ToString(self):
# for a in self.builderList:
# print("{0}{1}".format(a.level*a.indentModel,a.ToString()))
# for b in a.builderList:
# b.ToString()
|
"""Channels module for Zigbee Home Automation."""
from __future__ import annotations
import asyncio
from typing import TYPE_CHECKING, Any
from typing_extensions import Self
import zigpy.endpoint
import zigpy.zcl.clusters.closures
from homeassistant.const import ATTR_DEVICE_ID
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
from . import ( # noqa: F401
base,
closures,
general,
homeautomation,
hvac,
lighting,
lightlink,
manufacturerspecific,
measurement,
protocol,
security,
smartenergy,
)
from .. import (
const,
device as zha_core_device,
discovery as zha_disc,
registries as zha_regs,
)
if TYPE_CHECKING:
from ...entity import ZhaEntity
from ..device import ZHADevice
_ChannelsDictType = dict[str, base.ZigbeeChannel]
class Channels:
"""All discovered channels of a device."""
def __init__(self, zha_device: ZHADevice) -> None:
"""Initialize instance."""
self._pools: list[ChannelPool] = []
self._power_config: base.ZigbeeChannel | None = None
self._identify: base.ZigbeeChannel | None = None
self._unique_id = str(zha_device.ieee)
self._zdo_channel = base.ZDOChannel(zha_device.device.endpoints[0], zha_device)
self._zha_device = zha_device
@property
def pools(self) -> list[ChannelPool]:
"""Return channel pools list."""
return self._pools
@property
def power_configuration_ch(self) -> base.ZigbeeChannel | None:
"""Return power configuration channel."""
return self._power_config
@power_configuration_ch.setter
def power_configuration_ch(self, channel: base.ZigbeeChannel) -> None:
"""Power configuration channel setter."""
if self._power_config is None:
self._power_config = channel
@property
def identify_ch(self) -> base.ZigbeeChannel | None:
"""Return power configuration channel."""
return self._identify
@identify_ch.setter
def identify_ch(self, channel: base.ZigbeeChannel) -> None:
"""Power configuration channel setter."""
if self._identify is None:
self._identify = channel
@property
def zdo_channel(self) -> base.ZDOChannel:
"""Return ZDO channel."""
return self._zdo_channel
@property
def zha_device(self) -> ZHADevice:
"""Return parent ZHA device."""
return self._zha_device
@property
def unique_id(self) -> str:
"""Return the unique id for this channel."""
return self._unique_id
@property
def zigbee_signature(self) -> dict[int, dict[str, Any]]:
"""Get the zigbee signatures for the pools in channels."""
return {
signature[0]: signature[1]
for signature in [pool.zigbee_signature for pool in self.pools]
}
@classmethod
def new(cls, zha_device: ZHADevice) -> Self:
"""Create new instance."""
channels = cls(zha_device)
for ep_id in sorted(zha_device.device.endpoints):
channels.add_pool(ep_id)
return channels
def add_pool(self, ep_id: int) -> None:
"""Add channels for a specific endpoint."""
if ep_id == 0:
return
self._pools.append(ChannelPool.new(self, ep_id))
async def async_initialize(self, from_cache: bool = False) -> None:
"""Initialize claimed channels."""
await self.zdo_channel.async_initialize(from_cache)
self.zdo_channel.debug("'async_initialize' stage succeeded")
await asyncio.gather(
*(pool.async_initialize(from_cache) for pool in self.pools)
)
async def async_configure(self) -> None:
"""Configure claimed channels."""
await self.zdo_channel.async_configure()
self.zdo_channel.debug("'async_configure' stage succeeded")
await asyncio.gather(*(pool.async_configure() for pool in self.pools))
async_dispatcher_send(
self.zha_device.hass,
const.ZHA_CHANNEL_MSG,
{
const.ATTR_TYPE: const.ZHA_CHANNEL_CFG_DONE,
},
)
@callback
def async_new_entity(
self,
component: str,
entity_class: type[ZhaEntity],
unique_id: str,
channels: list[base.ZigbeeChannel],
):
"""Signal new entity addition."""
if self.zha_device.status == zha_core_device.DeviceStatus.INITIALIZED:
return
self.zha_device.hass.data[const.DATA_ZHA][component].append(
(entity_class, (unique_id, self.zha_device, channels))
)
@callback
def async_send_signal(self, signal: str, *args: Any) -> None:
"""Send a signal through hass dispatcher."""
async_dispatcher_send(self.zha_device.hass, signal, *args)
@callback
def zha_send_event(self, event_data: dict[str, str | int]) -> None:
"""Relay events to hass."""
self.zha_device.hass.bus.async_fire(
const.ZHA_EVENT,
{
const.ATTR_DEVICE_IEEE: str(self.zha_device.ieee),
const.ATTR_UNIQUE_ID: self.unique_id,
ATTR_DEVICE_ID: self.zha_device.device_id,
**event_data,
},
)
class ChannelPool:
"""All channels of an endpoint."""
def __init__(self, channels: Channels, ep_id: int) -> None:
"""Initialize instance."""
self._all_channels: _ChannelsDictType = {}
self._channels = channels
self._claimed_channels: _ChannelsDictType = {}
self._id = ep_id
self._client_channels: dict[str, base.ClientChannel] = {}
self._unique_id = f"{channels.unique_id}-{ep_id}"
@property
def all_channels(self) -> _ChannelsDictType:
"""All server channels of an endpoint."""
return self._all_channels
@property
def claimed_channels(self) -> _ChannelsDictType:
"""Channels in use."""
return self._claimed_channels
@property
def client_channels(self) -> dict[str, base.ClientChannel]:
"""Return a dict of client channels."""
return self._client_channels
@property
def endpoint(self) -> zigpy.endpoint.Endpoint:
"""Return endpoint of zigpy device."""
return self._channels.zha_device.device.endpoints[self.id]
@property
def id(self) -> int:
"""Return endpoint id."""
return self._id
@property
def nwk(self) -> int:
"""Device NWK for logging."""
return self._channels.zha_device.nwk
@property
def is_mains_powered(self) -> bool | None:
"""Device is_mains_powered."""
return self._channels.zha_device.is_mains_powered
@property
def manufacturer(self) -> str:
"""Return device manufacturer."""
return self._channels.zha_device.manufacturer
@property
def manufacturer_code(self) -> int | None:
"""Return device manufacturer."""
return self._channels.zha_device.manufacturer_code
@property
def hass(self) -> HomeAssistant:
"""Return hass."""
return self._channels.zha_device.hass
@property
def model(self) -> str:
"""Return device model."""
return self._channels.zha_device.model
@property
def skip_configuration(self) -> bool:
"""Return True if device does not require channel configuration."""
return self._channels.zha_device.skip_configuration
@property
def unique_id(self) -> str:
"""Return the unique id for this channel."""
return self._unique_id
@property
def zigbee_signature(self) -> tuple[int, dict[str, Any]]:
"""Get the zigbee signature for the endpoint this pool represents."""
return (
self.endpoint.endpoint_id,
{
const.ATTR_PROFILE_ID: self.endpoint.profile_id,
const.ATTR_DEVICE_TYPE: f"0x{self.endpoint.device_type:04x}"
if self.endpoint.device_type is not None
else "",
const.ATTR_IN_CLUSTERS: [
f"0x{cluster_id:04x}"
for cluster_id in sorted(self.endpoint.in_clusters)
],
const.ATTR_OUT_CLUSTERS: [
f"0x{cluster_id:04x}"
for cluster_id in sorted(self.endpoint.out_clusters)
],
},
)
@classmethod
def new(cls, channels: Channels, ep_id: int) -> Self:
"""Create new channels for an endpoint."""
pool = cls(channels, ep_id)
pool.add_all_channels()
pool.add_client_channels()
if not channels.zha_device.is_coordinator:
zha_disc.PROBE.discover_entities(pool)
return pool
@callback
def add_all_channels(self) -> None:
"""Create and add channels for all input clusters."""
for cluster_id, cluster in self.endpoint.in_clusters.items():
channel_class = zha_regs.ZIGBEE_CHANNEL_REGISTRY.get(
cluster_id, base.ZigbeeChannel
)
# really ugly hack to deal with xiaomi using the door lock cluster
# incorrectly.
if (
hasattr(cluster, "ep_attribute")
and cluster_id == zigpy.zcl.clusters.closures.DoorLock.cluster_id
and cluster.ep_attribute == "multistate_input"
):
channel_class = general.MultistateInput
# end of ugly hack
channel = channel_class(cluster, self)
if channel.name == const.CHANNEL_POWER_CONFIGURATION:
if (
self._channels.power_configuration_ch
or self._channels.zha_device.is_mains_powered
):
# on power configuration channel per device
continue
self._channels.power_configuration_ch = channel
elif channel.name == const.CHANNEL_IDENTIFY:
self._channels.identify_ch = channel
self.all_channels[channel.id] = channel
@callback
def add_client_channels(self) -> None:
"""Create client channels for all output clusters if in the registry."""
for cluster_id, channel_class in zha_regs.CLIENT_CHANNELS_REGISTRY.items():
cluster = self.endpoint.out_clusters.get(cluster_id)
if cluster is not None:
channel = channel_class(cluster, self)
self.client_channels[channel.id] = channel
async def async_initialize(self, from_cache: bool = False) -> None:
"""Initialize claimed channels."""
await self._execute_channel_tasks("async_initialize", from_cache)
async def async_configure(self) -> None:
"""Configure claimed channels."""
await self._execute_channel_tasks("async_configure")
async def _execute_channel_tasks(self, func_name: str, *args: Any) -> None:
"""Add a throttled channel task and swallow exceptions."""
channels = [*self.claimed_channels.values(), *self.client_channels.values()]
tasks = [getattr(ch, func_name)(*args) for ch in channels]
results = await asyncio.gather(*tasks, return_exceptions=True)
for channel, outcome in zip(channels, results):
if isinstance(outcome, Exception):
channel.warning(
"'%s' stage failed: %s", func_name, str(outcome), exc_info=outcome
)
continue
channel.debug("'%s' stage succeeded", func_name)
@callback
def async_new_entity(
self,
component: str,
entity_class: type[ZhaEntity],
unique_id: str,
channels: list[base.ZigbeeChannel],
):
"""Signal new entity addition."""
self._channels.async_new_entity(component, entity_class, unique_id, channels)
@callback
def async_send_signal(self, signal: str, *args: Any) -> None:
"""Send a signal through hass dispatcher."""
self._channels.async_send_signal(signal, *args)
@callback
def claim_channels(self, channels: list[base.ZigbeeChannel]) -> None:
"""Claim a channel."""
self.claimed_channels.update({ch.id: ch for ch in channels})
@callback
def unclaimed_channels(self) -> list[base.ZigbeeChannel]:
"""Return a list of available (unclaimed) channels."""
claimed = set(self.claimed_channels)
available = set(self.all_channels)
return [self.all_channels[chan_id] for chan_id in (available - claimed)]
@callback
def zha_send_event(self, event_data: dict[str, Any]) -> None:
"""Relay events to hass."""
self._channels.zha_send_event(
{
const.ATTR_UNIQUE_ID: self.unique_id,
const.ATTR_ENDPOINT_ID: self.id,
**event_data,
}
)
|
import hash_functions
import pandas as pd
def load_names():
#Last names
last_names = pd.read_fwf('Names/dist.all.last', header=None, widths=[14,7,7,7])
first_male = pd.read_fwf('Names/dist.male.first', header=None, widths=[14,7,7,7])
first_female = pd.read_fwf('Names/dist.female.first', header=None, widths=[14,7,7,7])
subset_last_name = last_names[last_names[2]<=70]
subset_first_male = first_male[first_male[2]<=80]
subset_first_femal = first_female[first_female[2]<=80]
names = pd.concat([subset_last_name[0], subset_first_male[0], subset_first_femal[0]], ignore_index=True)
return names
def is_name(name, name_corpus):
return hash_functions.clean(name).upper() in name_corpus.values
def capital_words(text):
results = []
for words in text.split():
if words[0].isupper():
results.append(words)
return results
def name_positions(text, name_corpus):
results = []
for index, name in enumerate(text.split()):
if name[0].isupper() and is_name(name, name_corpus):
results.append(index)
return results
if __name__ == "__main__":
names = load_names()
test_case = "William is coming over to Jason's house and they will play Super Smash Bros. Kendall likes to write books. Milk is tasty. Mary likes Jack. We like Long and Hee and Smith and House"
print(name_positions(test_case)) |
ArcanasData = {
'Chariot': [['Fool', 'Lovers'],
['Magician', 'Temperance'],
['Priestess', 'Sun'],
['Empress', 'Strength'],
['Emperor', 'Justice'],
['Hierophant', 'Death'],
['Lovers', 'Hermit'],
['Lovers', 'Star'],
['Chariot', 'Chariot'],
['Strength', 'Temperance'],
['Strength', 'Tower'],
['Death', 'Devil'],
['Devil', 'Moon']],
'Death': [['Fool', 'Magician'],
['Fool', 'Strength'],
['Magician', 'Hierophant'],
['Priestess', 'Justice'],
['Priestess', 'Hanged Man'],
['Hierophant', 'Temperance'],
['Lovers', 'Strength'],
['Strength', 'Devil'],
['Hanged Man', 'Temperance'],
['Death', 'Death'],
['Tower', 'Star'],
['Sun', 'Judgement']],
'Devil': [['Magician', 'Lovers'],
['Priestess', 'Strength'],
['Priestess', 'Temperance'],
['Emperor', 'Hanged Man'],
['Emperor', 'Temperance'],
['Chariot', 'Hermit'],
['Chariot', 'Death'],
['Justice', 'Moon'],
['Hermit', 'Sun'],
['Fortune', 'Star'],
['Death', 'Star'],
['Devil', 'Devil']],
'Emperor': [['Magician', 'Justice'],
['Priestess', 'Empress'],
['Empress', 'Tower'],
['Empress', 'Judgement'],
['Emperor', 'Emperor'],
['Justice', 'Fortune'],
['Justice', 'Temperance'],
['Hermit', 'Judgement'],
['Fortune', 'Hanged Man'],
['Tower', 'Sun']],
'Empress': [['Fool', 'Tower'],
['Magician', 'Hanged Man'],
['Priestess', 'Emperor'],
['Empress', 'Empress'],
['Hierophant', 'Judgement'],
['Lovers', 'Tower'],
['Lovers', 'Sun'],
['Justice', 'Star'],
['Fortune', 'Temperance'],
['Moon', 'Sun']],
'Fool': [['Fool', 'Fool'],
['Magician', 'Strength'],
['Empress', 'Hierophant'],
['Empress', 'Death'],
['Emperor', 'Lovers'],
['Hierophant', 'Strength'],
['Chariot', 'Hanged Man'],
['Justice', 'Death'],
['Justice', 'Devil'],
['Temperance', 'Devil'],
['Moon', 'Judgement']],
'Fortune': [['Priestess', 'Lovers'],
['Empress', 'Moon'],
['Emperor', 'Hierophant'],
['Hierophant', 'Hermit'],
['Chariot', 'Tower'],
['Fortune', 'Fortune'],
['Hanged Man', 'Devil'],
['Temperance', 'Tower'],
['Temperance', 'Moon'],
['Star', 'Judgement']],
'Hanged Man': [['Fool', 'Empress'],
['Magician', 'Emperor'],
['Priestess', 'Tower'],
['Hierophant', 'Justice'],
['Hierophant', 'Devil'],
['Lovers', 'Judgement'],
['Justice', 'Sun'],
['Fortune', 'Tower'],
['Hanged Man', 'Hanged Man'],
['Death', 'Temperance']],
'Hermit': [['Fool', 'Hierophant'],
['Magician', 'Death'],
['Priestess', 'Star'],
['Empress', 'Fortune'],
['Emperor', 'Death'],
['Chariot', 'Strength'],
['Hermit', 'Hermit'],
['Hanged Man', 'Tower'],
['Temperance', 'Judgement'],
['Devil', 'Sun'],
['Tower', 'Moon']],
'Hierophant': [['Fool', 'Temperance'],
['Magician', 'Devil'],
['Magician', 'Sun'],
['Priestess', 'Chariot'],
['Priestess', 'Moon'],
['Emperor', 'Hermit'],
['Hierophant', 'Hierophant'],
['Justice', 'Strength'],
['Hermit', 'Strength'],
['Fortune', 'Devil'],
['Strength', 'Death'],
['Hanged Man', 'Sun'],
['Death', 'Moon']],
'Judgement': [['Empress', 'Lovers'],
['Emperor', 'Sun'],
['Hierophant', 'Tower'],
['Lovers', 'Justice'],
['Hermit', 'Tower'],
['Star', 'Sun'],
['Judgement', 'Judgement']],
'Justice': [['Fool', 'Moon'],
['Fool', 'Sun'],
['Magician', 'Empress'],
['Magician', 'Fortune'],
['Priestess', 'Judgement'],
['Empress', 'Emperor'],
['Emperor', 'Devil'],
['Hierophant', 'Fortune'],
['Justice', 'Justice'],
['Hanged Man', 'Star']],
'Lovers': [['Fool', 'Fortune'],
['Magician', 'Hermit'],
['Magician', 'Moon'],
['Empress', 'Justice'],
['Empress', 'Star'],
['Emperor', 'Star'],
['Hierophant', 'Sun'],
['Lovers', 'Lovers'],
['Chariot', 'Moon'],
['Justice', 'Hanged Man'],
['Devil', 'Judgement']],
'Magician': [['Fool', 'Star'],
['Magician', 'Magician'],
['Priestess', 'Hierophant'],
['Priestess', 'Fortune'],
['Priestess', 'Death'],
['Lovers', 'Moon'],
['Justice', 'Hermit'],
['Strength', 'Moon'],
['Temperance', 'Sun'],
['Devil', 'Tower']],
'Moon': [['Fool', 'Priestess'],
['Fool', 'Chariot'],
['Priestess', 'Devil'],
['Lovers', 'Devil'],
['Chariot', 'Justice'],
['Chariot', 'Star'],
['Strength', 'Star'],
['Strength', 'Sun'],
['Hanged Man', 'Death'],
['Tower', 'Judgement'],
['Moon', 'Moon']],
'Priestess': [['Fool', 'Hermit'],
['Magician', 'Chariot'],
['Magician', 'Star'],
['Priestess', 'Priestess'],
['Empress', 'Hanged Man'],
['Empress', 'Temperance'],
['Emperor', 'Judgement'],
['Hierophant', 'Moon'],
['Chariot', 'Fortune'],
['Chariot', 'Sun'],
['Hermit', 'Devil'],
['Hermit', 'Moon'],
['Death', 'Sun']],
'Star': [['Fool', 'Justice'],
['Empress', 'Chariot'],
['Emperor', 'Tower'],
['Hierophant', 'Chariot'],
['Hermit', 'Fortune'],
['Hermit', 'Hanged Man'],
['Fortune', 'Death'],
['Fortune', 'Sun'],
['Hanged Man', 'Judgement'],
['Star', 'Star']],
'Strength': [['Fool', 'Death'],
['Magician', 'Judgement'],
['Empress', 'Hermit'],
['Emperor', 'Chariot'],
['Hierophant', 'Lovers'],
['Lovers', 'Fortune'],
['Lovers', 'Temperance'],
['Chariot', 'Temperance'],
['Hermit', 'Death'],
['Hermit', 'Temperance'],
['Hermit', 'Star'],
['Strength', 'Strength'],
['Hanged Man', 'Moon'],
['Devil', 'Star']],
'Sun': [['Fool', 'Judgement'],
['Empress', 'Devil'],
['Emperor', 'Fortune'],
['Hierophant', 'Hanged Man'],
['Lovers', 'Hanged Man'],
['Justice', 'Tower'],
['Fortune', 'Moon'],
['Death', 'Tower'],
['Temperance', 'Star'],
['Sun', 'Sun']],
'Temperance': [['Fool', 'Emperor'],
['Fool', 'Devil'],
['Magician', 'Priestess'],
['Magician', 'Tower'],
['Priestess', 'Hermit'],
['Lovers', 'Chariot'],
['Lovers', 'Death'],
['Chariot', 'Devil'],
['Fortune', 'Strength'],
['Strength', 'Hanged Man'],
['Temperance', 'Temperance'],
['Star', 'Moon']],
'Tower': [['Fool', 'Hanged Man'],
['Empress', 'Sun'],
['Emperor', 'Strength'],
['Emperor', 'Moon'],
['Hierophant', 'Star'],
['Fortune', 'Judgement'],
['Tower', 'Tower']]}
|
"""
Given an integer rowIndex, return the rowIndexth row of the Pascal's triangle.
Notice that the row index starts from 0.
In Pascal's triangle, each number is the sum of the two numbers directly above it.
Follow up:
Could you optimize your algorithm to use only O(k) extra space?
Example 1:
Input: rowIndex = 3
Output: [1,3,3,1]
Example 2:
Input: rowIndex = 0
Output: [1]
Example 3:
Input: rowIndex = 1
Output: [1,1]
Constraints:
0 <= rowIndex <= 33
"""
class Solution(object):
def getRow(self, rowIndex):
"""
:type rowIndex: int
:rtype: List[int]
"""
res = [[0 for _ in range(i+1)] for i in range(rowIndex + 1)]
# Intialise each number as 0
def helper(i,j):
# put the right number into the right place
if j == 0 or j == i: return 1
if res[i][j] == 0:
res[i][j] = res[i-1][j-1] + res[i-1][j]
return res
# create a new array to store the numbers
row = res[rowIndex]
for j in range(rowIndex + 1):
# to get the corresponding values
row[j] = helper(rowIndex, j)
# return the whole array
return row
# we may use extra space here, no idea how to optimise it atm.
# Any comments?
|
#!/usr/bin/env ccp4-python
import sys
sys.path.insert(0, "/opt/ample-dev1/python" )
sys.path.insert(0, "/opt/ample-dev1/scripts" )
import cPickle
import csv
sys.path.append("/usr/lib/python2.7/dist-packages")
from dateutil import parser
import glob
import os
import ample_util
from analyse_run import AmpleResult
#import phaser_parser
import parse_shelxe
def sgeLogTime( log ):
with open( log, 'r') as f:
for line in f:
line = line.strip()
if line.startswith("Started at "):
start = parser.parse( line[10:] )
if line.startswith("Results reported at "):
end = parser.parse( line[19:] )
if line.startswith("CPU time"):
ctime = float( line.split()[3] )
break
# Calculate walltime
wtime = end - start
wtime = wtime.seconds
ctime = int( ctime )
return wtime, ctime
def ampleLogTime( log ):
with open( log, 'r') as f:
for line in f:
line = line.strip()
if line.startswith("ALL DONE"):
thours = float( line.split()[3] )
return thours * 60 * 60
def logTime(log):
with open( log, 'r') as f:
lines = f.readlines()
# times are first two chunks of file
start = " ".join(lines[0].strip().split()[0:2])
# strip milliseconds - why the FUCK!!! is the default log format a time that can't be parsed with
# standard python tools?!?!?!?!?!
start = start.split(",")[0]
l=None
for i in range(1,5):
l = lines[-i].strip()
if l:
break
assert l
end = " ".join(l.split()[0:2])
end = end.split(",")[0]
tstart = parser.parse( start )
tend = parser.parse( end )
delta = tend-tstart
return delta.seconds
#l = "/media/data/shared/coiled-coils/ensemble/ensemble_redo_failures1/1G1J/ROSETTA_MR_0/ensemble.log"
#t = logTime(l)
#print "TIME ",t
#sys.exit()
e1root = "/media/seagate/coiled-coils/ensemble/ensemble.run1"
runDir = os.getcwd()
os.chdir( runDir )
pfile = os.path.join( runDir, "final_results.pkl" )
with open( pfile ) as f:
ensembleResults = cPickle.load( f )
# Map targets to directories
pdb2dir = {}
for jd in [ l.strip() for l in open( "/media/data/shared/coiled-coils/ensemble/final_results/dirs.list") if not l.startswith("#") ]:
directory = "/".join(jd.split("/")[0:-1])
pdbCode = jd.split("/")[-1]
pdb2dir[pdbCode]=directory
# Hack to add extra attributes
a = AmpleResult()
#for pdbCode in [ "3H7Z" ]:
for r in ensembleResults:
#if r.pdbCode not in pdb2dir:
# continue
print "processing ",r.pdbCode, r.ensembleName
# Need to add the extra attributes
r.orderedAttrs = a.orderedAttrs
r.orderedTitles = a.orderedTitles
dataRoot=pdb2dir[r.pdbCode]
# Always use the old models as we dont' have the times for the redo ones
#if dataRoot == "/media/data/shared/coiled-coils/ensemble/ensemble.run2":
# modelsDir=os.path.join(e1root,r.pdbCode,"models")
#else:
# modelsDir=os.path.join(dataRoot,r.pdbCode,"models")
modelsDir=os.path.join(e1root,r.pdbCode,"models")
# Find all fragment logs and add up times
fdir = os.path.join( e1root, r.pdbCode, "fragments" )
ftime = 0.0
for flog in glob.glob( fdir + "/frags_*log*"):
w, c = sgeLogTime( flog )
ftime += c
#print "GOT ftime ",ftime
r.fragmentTime = ftime
# Parse model time
mlog = glob.glob( modelsDir + "/models_*")[0]
w, mtime = sgeLogTime( mlog )
#print "MTIME ",mtime
r.modelTime = mtime
# Get the ensembling time from the ensemble log
if dataRoot == "/media/data/shared/coiled-coils/ensemble/ensemble.run2":
elog=os.path.join(e1root,r.pdbCode,"ROSETTA_MR_0","ensemble.log")
etime = logTime(elog)
# First run ran 3 ensembles
etime = etime/3
else:
elog=os.path.join(dataRoot,r.pdbCode,"ROSETTA_MR_0","ensemble.log")
etime = logTime(elog)
# Parse ample log to get ample time
#alog = os.path.join( dataRoot, r.pdbCode, "run_ample.sh.out" )
#atime = ampleLogTime( alog )
#print "ETIME ",etime
r.ensembleTime = etime
# For all jobs add up phaser and shelxe times to get overall time
if dataRoot == "/media/data/shared/coiled-coils/ensemble/ensemble.run2":
mrbdir = os.path.join(dataRoot,r.pdbCode)
else:
mrbdir = os.path.join( dataRoot, r.pdbCode, "ROSETTA_MR_0/MRBUMP/cluster_1" )
mrDir = os.path.join( mrbdir,
"search_{0}_mrbump".format( r.ensembleName ),
"data",
"loc0_ALL_{0}".format( r.ensembleName ),
"unmod/mr/phaser"
)
#Already calculated the phaser log time
# phaserLog = os.path.join( mrDir, "phaser_loc0_ALL_{0}_UNMOD.log".format( r.ensembleName ) )
# ptime = 0.0
# if os.path.isfile( phaserLog ):
# phaserP = phaser_parser.PhaserLogParser( phaserLog )
# ptime = phaserP.time
shelxeLog = os.path.join( mrDir, "build/shelxe/shelxe_run.log" )
stime = 0.0
if os.path.isfile( shelxeLog ):
shelxeP = parse_shelxe.ShelxeLogParser( shelxeLog )
stime = shelxeP.cputime
r.shelxeTime = stime
#print "PTIME ",r.phaserTime
#print "STIME ",stime
pfile = ample_util.filename_append( pfile, astr="timings")
f = open( pfile, 'w' )
ampleDict = cPickle.dump( ensembleResults, f )
cpath = os.path.join( runDir, 'final_results_timings.csv' )
csvfile = open( cpath, 'wb')
csvwriter = csv.writer(csvfile, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
header=False
for r in ensembleResults:
if not header:
#csvwriter.writerow( r.titlesAsList() )
csvwriter.writerow( r.valueAttrAsList() )
header=True
csvwriter.writerow( r.valuesAsList() )
csvfile.close()
|
from django.contrib.auth.models import User
from rest_framework import serializers
from fbbackend.models import UserProfile, Messages, Comments
class MsgSerializer(serializers.Serializer):
message = serializers.CharField(max_length=4000)
class FriendSerializer(serializers.ModelSerializer):
class Meta:
model = UserProfile
fields = ('profile_pic','cover_pic', 'about', 'intro')
class UserSerializer(serializers.ModelSerializer):
userprofile = FriendSerializer(many=True, read_only=True)
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'userprofile')
class MsgSerializer(serializers.ModelSerializer):
class Meta:
model = Messages
fields = ('message', 'created')
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comments
fields = ('comment', 'created') |
from flask import Flask, jsonify, request
import cv2
import numpy as np
app= Flask(__name__)
@app.route("/",methods=['POST'])
def index():
if request.files:
# try:
# filestr = request.files['file'].read()
# npimg = np.frombuffer(filestr, np.uint8)
# img = cv2.imdecode(npimg, cv2.IMREAD_COLOR)
# _,_ = img.shape[:2]
# except Exception as e:
# return ({"message":"Invalid Image File","error":str(e)})
# else:
# img=cv2.resize(img,(0,0),fx=0.5,fy=0.5)
# return({"message":"File Used"})
file=request.files['file']
file.save(file.filename)
return({"message":"File Saved."})
else:
return jsonify({"message":"No file found!"})
if __name__ =="__main__":
app.run(debug=True) |
def mdc(a, b):
if b == 0:
return a
return mdc(b, a % b)
def mmc(a, b):
return abs(a*b) / mdc(a,b)
|
class SprintInfo:
def __init__(self, start_date, end_date):
self._start_date = start_date
self._end_date = end_date
@property
def start_date(self):
return self._start_date
@property
def end_date(self):
return self._end_date
|
from core.youtube_video_processor import YouTubeVideoProcessor
from core.youtube_playlist_videourl_extractor import YouTubePlaylistVideoUrlExtractor
from core.file_blob_writer import FileBlobWriter
import moviepy.editor as mp
class YouTubePlaylistCrawler():
def __init__(self):
pass
def crawl_playlist(self, playlist_url, max_videos=None, createHTML=False, getThumbnails=False, location="."):
print("Starting crawl of: " + playlist_url)
url_extractor = YouTubePlaylistVideoUrlExtractor()
urls = url_extractor.get(playlist_url).urls
print("# of videos found: ", len(urls))
vp = YouTubeVideoProcessor()
i=0
if max_videos is None:
max_videos = len(urls)
writer = FileBlobWriter(location)
videos = []
for url in urls:
vi = vp.get_video_info_via_url(url)
print(vp.filename)
videos.append(vi)
#vp.download("mp4", location=location, video_id=vp.video_id)
print("Ripping MP3")
clip = mp.VideoFileClip(location + "/" + vp.video_id + ".mp4")
clip.audio.write_audiofile(location + "/" + vp.video_id + ".mp3")
getThumbnails = False
if getThumbnails:
vp.get_thumbnails()
for thumbnail_key in vp.thumbnails:
thumbnail_filename=vp.video_id + "_" + thumbnail_key + ".jpg"
thumbnail_bytes = vp.thumbnails[thumbnail_key]
writer.write(thumbnail_filename, thumbnail_bytes)
print("Wrote thumbnail: " + thumbnail_filename)
i += 1
if i == max_videos:
print("Reached max number of videos - stopping")
break
if createHTML:
# create HTML
print("Writing HTML")
with open(location + "/" + "index.html", "w") as tf:
tf.write("<HTML><BODY>")
for v in videos:
tf.write("<P>")
tf.write("<IMG SRC='" + v.video_id + "_sddefault.jpg' /><br/>")
tf.write("<H2>" + v.title + "</H2>")
tf.write("<A HREF='" + v.video_id + ".mp4'>Watch</A><BR/>")
tf.write("<A HREF='" + v.video_id + ".mp3'>Listen</A><BR/>")
tf.write("<P>")
tf.write("</BODY></HTML>")
|
# encoding = utf-8
import threading
import time
#Python2
# from Queue mimport Queue
# Python
import queue
lock_1 = threading.Lock()
lock_2 = threading.Lock()
def func_1():
print("func1")
lock_1.acquire()
print("申请1")
time.sleep(2)
lock_2.acquire()
print("申请2")
lock_2.release()
print("释放2")
lock_1.release()
print("释放1")
print("done 1")
def func_2():
print("func2")
lock_2.acquire()
print("申请2")
time.sleep(4)
lock_1.acquire()
print("申请1")
lock_1.release()
print("释放1")
lock_2.release()
print("释放2")
print("done2")
if __name__ == '__main__':
print("主程序")
t1 = threading.Thread(target=func_1, args=())
t2 = threading.Thread(target=func_2, args=())
t1.start()
t2.start()
t1.join()
t2.join() |
import cocotb
from lib.util import assertions
from lib.cycle import clock, wait, reset
@cocotb.test()
def program_counter(dut):
def assert_o_count(value, error_msg):
"""Check the value of the output count"""
assertions.assertEqual(dut.o_count.value.binstr, value, error_msg)
# Test initialization
yield from wait()
assert_o_count('xxxx', 'o_count should start disconnected')
# Pulse the clock, nothing should change:
yield from clock(dut)
assert_o_count('xxxx', 'o_count should still be disconnected')
# Enable the output:
dut.i_enable_out = 1
yield from wait()
assert_o_count('0000', 'o_count should be enabled and initialized')
# Increment:
dut.i_increment = 1
yield from wait()
assert_o_count('0000', 'o_count should not increment until clock pulse')
yield from clock(dut)
assert_o_count('0001', 'o_count should increment')
yield from clock(dut)
assert_o_count('0010', 'o_count should increment')
yield from clock(dut)
assert_o_count('0011', 'o_count should increment')
# Cycle without increment:
dut.i_increment = 0
yield from clock(dut)
assert_o_count('0011', 'o_count should not increment')
# Disable and Re-enable output:
dut.i_enable_out = 0
yield from wait()
assert_o_count('zzzz', 'o_count should disconnect')
dut.i_enable_out = 1
yield from wait()
assert_o_count('0011', 'o_count should re-enable')
# Reset:
yield from reset(dut)
assert_o_count('0000', 'o_count should reset')
# Test roll-over:
dut.i_increment = 1
# Increment over 8 cycles:
yield from clock(dut, 8)
assert_o_count('1000', 'o_count should be 8')
# Increment over 9 cycles, rolling over the count:
yield from clock(dut, 9)
assert_o_count('0001', 'o_count should roll-over back to 1')
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 21 16:25:30 2022
@author: astertaylor
"""
import SAMUS
#create class
standard_class=SAMUS.model("standard_tolerance",a=20,b=50,c=110,mu=10**7)
#runs simulation with only 1 time step per rotation
standard_class.run_model(5,rtol=0.05,data_name='hyperbolic_traj')
#create class
halved_class=SAMUS.model("halved_tolerance",a=20,b=50,c=110,mu=10**7)
#runs simulation with only 1 time step per rotation
halved_class.run_model(5,rtol=0.025,data_name='hyperbolic_traj')
|
# Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
def get(url, header=''):
"""Does http GET."""
try:
response = requests.get(url, headers=header)
except requests.ConnectionError as detail:
print('ConnectionError: Exception in http.get {}'.format(detail))
except requests.HTTPError as detail:
print('HTTPError: Exception in http.get {}'.format(detail))
except requests.Timeout as detail:
print('Timeout: Exception in http.get {}'.format(detail))
except requests.TooManyRedirects as detail:
print('TooManyRedirects: Exception in http.get {}'.format(detail))
return response
def head(url, header=''):
"""Does http HEAD."""
try:
response = requests.head(url, headers=header)
except requests.ConnectionError as detail:
print('ConnectionError: Exception in http.head {}'.format(detail))
except requests.HTTPError as detail:
print('HTTPError: Exception in http.head {}'.format(detail))
except requests.Timeout as detail:
print('Timeout: Exception in http.head {}'.format(detail))
except requests.TooManyRedirects as detail:
print('TooManyRedirects: Exception in http.head {}'.format(detail))
return response
def post(url, header='', body=''):
"""Does http POST."""
body = str(body)
body = body.replace("'", '"')
try:
response = requests.post(url, headers=header, data=body)
except requests.ConnectionError as detail:
print('ConnectionError: Exception in http.post {}'.format(detail))
except requests.HTTPError as detail:
print('HTTPError: Exception in http.post {}'.format(detail))
except requests.Timeout as detail:
print('Timeout: Exception in http.post {}'.format(detail))
except requests.TooManyRedirects as detail:
print('TooManyRedirects: Exception in http.post {}'.format(detail))
return response
def put(url, header='', body=''):
"""Does http PUT."""
response = None
try:
response = requests.put(url, headers=header, data=body)
except requests.ConnectionError as detail:
print('ConnectionError: Exception in http.put {}'.format(detail))
except requests.HTTPError as detail:
print('HTTPError: Exception in http.put {}'.format(detail))
except requests.Timeout as detail:
print('Timeout: Exception in http.put {}'.format(detail))
except requests.TooManyRedirects as detail:
print('TooManyRedirects: Exception in http.put {}'.format(detail))
return response
def delete(url, header=''):
"""Does http DELETE."""
response = None
try:
response = requests.delete(url, headers=header)
except requests.ConnectionError as detail:
print('ConnectionError: Exception in http.delete {}'.format(detail))
except requests.HTTPError as detail:
print('HTTPError: Exception in http.delete {}'.format(detail))
except requests.Timeout as detail:
print('Timeout: Exception in http.delete {}'.format(detail))
except requests.TooManyRedirects as detail:
print('TooManyRedirects: Exception in http.delete {}'.format(detail))
return response
def patch(url, header='', body=''):
"""Does http PATCH."""
response = None
try:
response = requests.patch(url, headers=header, data=body)
except requests.ConnectionError as detail:
print('ConnectionError: Exception in http.patch {}'.format(detail))
except requests.HTTPError as detail:
print('HTTPError: Exception in http.patch {}'.format(detail))
except requests.Timeout as detail:
print('Timeout: Exception in http.patch {}'.format(detail))
except requests.TooManyRedirects as detail:
print('TooManyRedirects: Exception in http.patch {}'.format(detail))
return response
|
'''
Created on 2019年6月5日
@author: juicemilk
'''
"""
function declaration:
height_train:the network topology of training net
"眼高网络的结构"
"""
import tensorflow as tf
import os
import numpy as np
import matplotlib.pyplot as plt
from Eye_net_project.Eye_net.lib.Data_Shuffle import data_shuffle
from Eye_net_project.Eye_net.lib.Data_Batch import data_batch
from Eye_net_project.Eye_net.lib.Def_My_Layer import my_dense_layer,my_logits_layer
from Eye_net_project.Eye_net.lib.Remove_File import remove_file
"""
The number of neuron in every layer
"网络中每一层的神经元个数"
"""
TX_INPUT_NODE = 3
S_INPUT_NODE = 12
RX_INPUT_NODE =2
OUTPUT_NODE = 1
TX_LAYER1_NODE = 10
TX_LAYER2_NODE = 50
TX_LAYER3_NODE = 30
S_LAYER1_NODE=400
S_LAYER2_NODE=30
RX_LAYER1_NODE = 10
RX_LAYER2_NODE = 50
RX_LAYER3_NODE = 30
LAYER4_NODE = 800
LAYER5_NODE = 400
LAYER6_NODE = 200
LAYER7_NODE = 100
LAYER8_NODE = 20
LAYER9_NODE = 5
LAYER10_NODE = 1
def height_train(Activation_mode,Regularization_mode,Cost_mode,Loss_mode,Is_batch_normalization,Regularization_rate,Learning_rate,Keep_prob,MOMENTUM,Epochs,Batch_size,Train_data,Test_data,Data_max,Data_min,Use_type,Models_save_path,Model_name,Save_model_num,Use_tensorboard):
tx = tf.placeholder(tf.float64,[None,TX_INPUT_NODE],name = 'tx-input')
s = tf.placeholder(tf.float64,[None,S_INPUT_NODE],name = 's-input')
rx = tf.placeholder(tf.float64,[None,RX_INPUT_NODE],name = 'rx-input')
y = tf.placeholder(tf.float64,[None,OUTPUT_NODE],name = 'y-input')
training = tf.placeholder_with_default(False, shape=(), name='training')
keep_prob = tf.placeholder(tf.float64,name='keep_prob')
with tf.name_scope('dnn_Tx'):
TX_LAYER1_OUTPUT = my_dense_layer(tx, TX_LAYER1_NODE, training, keep_prob, Activation_mode, Regularization_mode, Is_batch_normalization, Regularization_rate, 'tx_layer1', 'tx_layer1_output')
TX_LAYER2_OUTPUT = my_dense_layer(TX_LAYER1_OUTPUT, TX_LAYER2_NODE, training, keep_prob, Activation_mode, Regularization_mode, Is_batch_normalization, Regularization_rate, 'tx_layer2', 'tx_layer2_output')
TX_LAYER3_OUTPUT = my_dense_layer(TX_LAYER2_OUTPUT, TX_LAYER3_NODE, training, keep_prob, Activation_mode, Regularization_mode, Is_batch_normalization, Regularization_rate, 'tx_layer3', 'tx_layer3_output')
with tf.name_scope('dnn_S'):
S_LAYER1_OUTPUT = my_dense_layer(s, S_LAYER1_NODE, training, keep_prob, Activation_mode, Regularization_mode, Is_batch_normalization, Regularization_rate, 's_layer1', 's_layer1_output')
S_LAYER2_OUTPUT = my_dense_layer(S_LAYER1_OUTPUT, S_LAYER2_NODE, training, keep_prob, Activation_mode, Regularization_mode, Is_batch_normalization, Regularization_rate, 's_layer2', 's_layer2_output')
with tf.name_scope('dnn_Rx'):
RX_LAYER1_OUTPUT = my_dense_layer(rx, RX_LAYER1_NODE, training, keep_prob, Activation_mode, Regularization_mode, Is_batch_normalization, Regularization_rate, 'rx_layer1', 'rx_layer1_output')
RX_LAYER2_OUTPUT = my_dense_layer(RX_LAYER1_OUTPUT, RX_LAYER2_NODE, training, keep_prob, Activation_mode, Regularization_mode, Is_batch_normalization, Regularization_rate, 'rx_layer2', 'rx_layer2_output')
RX_LAYER3_OUTPUT = my_dense_layer(RX_LAYER2_OUTPUT, RX_LAYER3_NODE, training, keep_prob, Activation_mode, Regularization_mode, Is_batch_normalization, Regularization_rate, 'rx_layer3', 'rx_layer3_output')
with tf.name_scope('dnn_all'):
ALL_INPUT = tf.concat([TX_LAYER3_OUTPUT,S_LAYER2_OUTPUT,RX_LAYER3_OUTPUT],1,name = 'all_input')
LAYER4_OUTPUT = my_dense_layer(ALL_INPUT, LAYER4_NODE, training, keep_prob, Activation_mode, Regularization_mode, Is_batch_normalization, Regularization_rate, 'layer4', 'layer4_output')
LAYER5_OUTPUT = my_dense_layer(LAYER4_OUTPUT, LAYER5_NODE, training, keep_prob, Activation_mode, Regularization_mode, Is_batch_normalization, Regularization_rate, 'layer5', 'layer5_output')
LAYER6_OUTPUT = my_dense_layer(LAYER5_OUTPUT, LAYER6_NODE, training, keep_prob, Activation_mode, Regularization_mode, Is_batch_normalization, Regularization_rate, 'layer6', 'layer6_output')
LAYER7_OUTPUT = my_dense_layer(LAYER6_OUTPUT, LAYER7_NODE, training, keep_prob, Activation_mode, Regularization_mode, Is_batch_normalization, Regularization_rate, 'layer7', 'layer7_output')
LAYER8_OUTPUT = my_dense_layer(LAYER7_OUTPUT, LAYER8_NODE, training, keep_prob, Activation_mode, Regularization_mode, Is_batch_normalization, Regularization_rate, 'layer8', 'layer8_output')
LAYER9_OUTPUT = my_dense_layer(LAYER8_OUTPUT, LAYER9_NODE, training, keep_prob, Activation_mode, Regularization_mode, Is_batch_normalization, Regularization_rate, 'layer9', 'layer9_output')
logits = my_logits_layer(LAYER9_OUTPUT, LAYER10_NODE, training, keep_prob, Regularization_mode, Is_batch_normalization, Regularization_rate, 'logits_layer', 'logits')
with tf.name_scope('loss'):
if Cost_mode=='1':
logits_loss = tf.div(tf.reduce_mean(tf.square(logits-y)),2.0,name = 'logits_loss')
elif Cost_mode=='2':
logits_loss = tf.reduce_mean(tf.abs(logits-y),name = 'logits_loss')
else:
print('ERROR,please input correct cost mode')
regularization_loss = tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES),name = 'regularization_loss')
loss = tf.add(logits_loss,regularization_loss,name = 'loss')
with tf.name_scope('eval'):
label_max=np.array(Data_max.get(Use_type))
label_min=np.array(Data_min.get(Use_type))
logits_real=logits*(label_max-label_min)+label_min
y_real = y*(label_max-label_min)+label_min
if Loss_mode=='rmse' :
error_rate=tf.div(tf.sqrt(tf.reduce_mean(tf.square(logits_real-y_real))),tf.reduce_mean(y_real),name='rmse_loss')
elif Loss_mode=='mse':
error_rate=tf.div(tf.reduce_mean(tf.square(logits_real-y_real)),tf.square(tf.reduce_mean(y_real)),name='mse_loss')
elif Loss_mode=='mae':
error_rate=tf.div(tf.reduce_mean(tf.abs(logits_real-y_real)),tf.reduce_mean(y_real),name='mae_loss')
elif Loss_mode=='mre':
error_rate=tf.reduce_mean(tf.div(tf.abs(logits_real-y_real),y_real),name='precent_loss')
else:
print('ERROR,please input correct loss mode')
with tf.name_scope('train'):
global_step = tf.Variable(0,trainable = False)
optimizer = tf.train.MomentumOptimizer(Learning_rate,MOMENTUM,name = 'optimizer')
if(Is_batch_normalization==False):
train_op = optimizer.minimize(loss,global_step=global_step)
saver = tf.train.Saver(max_to_keep=Save_model_num)
else:
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
train_op = optimizer.minimize(loss,global_step=global_step)
saver = tf.train.Saver(max_to_keep=Save_model_num,var_list=tf.global_variables())
init = tf.global_variables_initializer()
try:
if not os.path.isdir(Models_save_path):
os.makedirs(Models_save_path)
if not os.path.isdir('./Logs/'+Use_type+'_net/tensorboard/train'):
os.makedirs('./Logs/'+Use_type+'_net/tensorboard/train')
if not os.path.isdir('./logs/'+Use_type+'_net/tensorboard/test'):
os.makedirs('./Logs/'+Use_type+'_net/tensorboard/test')
if not os.path.isdir('./Logs/'+Use_type+'_net/train_result'):
os.makedirs('./Logs/'+Use_type+'_net/train_result')
except Exception:
print('无法创建模型存储目录或训练过程报告目录')
os._exit(0)
remove_file(Models_save_path)
remove_file('./Logs/'+Use_type+'_net/tensorboard/train')
remove_file('./Logs/'+Use_type+'_net/tensorboard/test')
if Use_tensorboard ==True:
tf.summary.scalar('loss',loss)
tf.summary.scalar('error rate',error_rate)
for var in tf.trainable_variables():
tf.summary.histogram(var.name,var)
merged_summary_op=tf.summary.merge_all()
loss_list={'train_loss':[],'test_loss':[]}
error_rate_list={'train_error_rate':[],'test_error_rate':[]}
loss_file=open('./Logs/'+Use_type+'_net/train_result/loss_list.txt', 'w')
error_rate_file=open('./Logs/'+Use_type+'_net/train_result/error_rate_list.txt', 'w')
with tf.Session() as sess:
print('开始训练......')
if Use_tensorboard==True:
writer1=tf.summary.FileWriter('./Logs/'+Use_type+'_net/tensorboard/train',sess.graph)
writer2=tf.summary.FileWriter('./Logs/'+Use_type+'_net/tensorboard/test',sess.graph)
init.run()
Epochs_step=(int)(len(Train_data.get(Use_type))/Batch_size)
test_feed={keep_prob:1,training:False,tx:Test_data.get('Tx'),rx:Test_data.get('Rx'),s:Test_data.get('S'),y:Test_data.get(Use_type)}
train_feed={keep_prob:1,training:False,tx:Train_data.get('Tx'),rx:Train_data.get('Rx'),s:Train_data.get('S'),y:Train_data.get(Use_type)}
# x=[]
# fig,ax=plt.subplots(1,2)
# ax[0].set(title='loss')
# ax[0].set_xlim(1,Epochs)
# ax[0].set_ylim(0,0.5)
# line1,=ax[0].plot(x,loss_list.get('train_loss'),c='red')
# line2,=ax[0].plot(x,loss_list.get('test_loss'),c='blue')
# ax[0].legend(['train_loss','test_loss'])
# ax[1].set(title='error_rate')
# ax[1].set_xlim(1,Epochs)
# ax[1].set_ylim(0,1)
# line3,=ax[1].plot(x,error_rate_list.get('train_error_rate'),c='red')
# line4,=ax[1].plot(x,error_rate_list.get('test_error_rate'),c='blue')
# ax[1].legend(['train_error_rate','test_error_rate'])
for i in range(Epochs):
train_shuffer_data=data_shuffle(Train_data,Use_type)
for j in range(Epochs_step) :
train_batch_data=data_batch(train_shuffer_data, Batch_size, j)
train_feed_batch={keep_prob:Keep_prob,training:True,tx:train_batch_data.get('Tx'),rx:train_batch_data.get('Rx'),s:train_batch_data.get('S'),y:train_batch_data.get(Use_type)}
sess.run(train_op,feed_dict=train_feed_batch)
if Use_tensorboard==True:
train_loss,train_eval,summary= sess.run([loss,error_rate,merged_summary_op],feed_dict = train_feed)
print('after %d Epochs training,the train loss is %g,the train error rate is %g'%(i,train_loss,train_eval))
writer1.add_summary(summary,i)
writer1.flush()
test_loss,test_eval,summary= sess.run([loss,error_rate,merged_summary_op],feed_dict = test_feed)
writer2.add_summary(summary,i)
writer2.flush()
print('after %d Epochs training,the test loss is %g,the test error rate is %g'%(i,test_loss,test_eval))
else:
train_loss,train_eval= sess.run([loss,error_rate],feed_dict = train_feed)
print('after %d Epochs training,the train loss is %g,the train error rate is %g'%(i,train_loss,train_eval))
test_loss,test_eval= sess.run([loss,error_rate],feed_dict = test_feed)
print('after %d Epochs training,the test loss is %g,the test error rate is %g'%(i,test_loss,test_eval))
saver.save(sess, os.path.join(Models_save_path,Model_name),global_step = i)
loss_list.get('train_loss').append(train_loss)
loss_list.get('test_loss').append(test_loss)
error_rate_list.get('train_error_rate').append(train_eval)
error_rate_list.get('test_error_rate').append(test_eval)
# x.append(i)
# line1.set_xdata(x)
# line1.set_ydata(loss_list.get('train_loss'))
# line2.set_xdata(x)
# line2.set_ydata(loss_list.get('test_loss'))
# line3.set_xdata(x)
# line3.set_ydata(error_rate_list.get('train_error_rate'))
# line4.set_xdata(x)
# line4.set_ydata(error_rate_list.get('test_error_rate'))
# plt.pause(0.1)
print('训练完成')
print('正在保存训练过程数据.........')
for k in loss_list.keys():
loss_file.write(str(k)+' ')
loss_file.write('\n')
for i in range(Epochs):
for v in loss_list.values():
loss_file.write(str(v[i])+' ')
loss_file.write('\n')
loss_file.close()
for k in error_rate_list.keys():
error_rate_file.write(str(k)+' ')
error_rate_file.write('\n')
for i in range(Epochs):
for v in error_rate_list.values():
error_rate_file.write(str(v[i])+' ')
error_rate_file.write('\n')
error_rate_file.close()
print('保存完成')
plt.figure(1)
plt.plot(loss_list.get('train_loss'),c='red')
plt.plot(loss_list.get('test_loss'),c='blue')
plt.legend(['train_loss','test_loss'])
plt.title('loss')
plt.figure(2)
plt.plot(error_rate_list.get('train_error_rate'),c='red')
plt.plot(error_rate_list.get('test_error_rate'),c='blue')
plt.legend(['train_error_rate','test_error_rate'])
plt.title('error_rate')
plt.show() |
cities = {
'Chisinau': 'CH',
'Orhei': 'OR',
'Soroca': 'SO'
}
raion = {
'CH': 'Buiucani',
'CH': 'Botanica',
'CH': 'Riscani',
'OR': 'Butuceni'
}
raion['SO'] = 'Bulboci'
print '=' * 27
print "Chisinau has:", raion[cities['Chisinau']]
print "Orhei has:", raion[cities['Orhei']]
print "Soroca has:", raion[cities['Soroca']]
print '=' * 27
for abbrev, rai in raion.items():
print "%s are raionului %s" % (
abbrev, rai)
print '=' * 27
for city, abbrev in cities.items():
print "%s abreviarea %s are raionul %s" % (
city, abbrev, raion[abbrev])
print '=' * 27
city = cities.get('Dubasare')
if not city:
print "Gomenasai! No Dubasare!"
rai = raion.get("DU",'NU Exista!')
print "Raionul pentru orasul 'DU': %s" % rai
# del cities['Orhei'] // sterge orheiul
# cities.clear() // sterge toate datele din dictionar
print "str() produces a printable string representaion of a dictionary %s" % str(raion)
|
from django.db import models
class Poll(models.Model):
question = models.CharField(max_length=2500, null=False, blank=False, verbose_name='Вопрос')
created_at = models.DateTimeField(auto_now_add=True, verbose_name='Время создания')
def __str__(self):
return f'{self.question} - {self.created_at}'
class Meta:
verbose_name = 'Опрос'
verbose_name_plural = 'Опросы'
class Choice(models.Model):
text = models.TextField(max_length=3000, null=False, blank=False, verbose_name='Текст варианта')
poll = models.ForeignKey('webapp.Poll', on_delete=models.CASCADE, related_name='choices', verbose_name='Опрос')
def __str__(self):
return f'{self.text}'
class Answer(models.Model):
poll = models.ForeignKey('webapp.Poll', on_delete=models.CASCADE, related_name='answers', verbose_name='Опрос')
created_at = models.DateTimeField(auto_now_add=True, verbose_name='Время создания')
option = models.ForeignKey('webapp.Choice', on_delete=models.CASCADE, related_name='answers', verbose_name='Ответ')
def __str__(self):
return f'{self.poll}: {self.option}'
|
import time
from argparse import ArgumentParser
import cv2
from search_images import ImageSearcher
from optimize_utils import OrderSolver
from app_config import Config
def build_argparser():
parser = ArgumentParser()
parser.add_argument("-i", "--input", help="Required. Path to a image.",
required=True,
type=str)
return parser.parse_args()
def main():
args = build_argparser()
conf_file = "config_double.yml"
conf = Config(conf_file)
image_searcher = ImageSearcher(
conf.model_obj_path, conf.model_scene_path,
conf.sqlite_path, conf.ngt_obj_path, conf.ngt_scene_path)
image = cv2.imread(args.input)
start = time.time()
f_pathes, dists, similarity_matrix = image_searcher.search_related(
image, 10)
process_time = time.time() - start
print("search time", process_time, " sec")
results = []
for f_path, dist in zip(f_pathes, dists):
print(f_path, dist)
results.append([f_path, dist])
rebalancer = OrderSolver()
start = time.time()
success, results = rebalancer.rebalance_order(
similarity_matrix, results, omega=2, M=4)
process_time = time.time() - start
print("order optimizing time", process_time, " sec")
print("success", success)
for f_path, dist in results:
print(f_path, dist)
if __name__ == '__main__':
main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from _core import worker_base
class API_Worker( worker_base.API_Worker_Base ):
def do_GET( self ):
self.reply( 'Hello World', 'text/html', 200 )
|
from math import sqrt, ceil
def pz(s, n):
l = len(s)
return '0' * (n-l) + s
def gen_coin(n):
if n <= 2:
yield '11'
return
for i in range(int('1'*(n-2), 2)+1):
yield '1' + pz(bin(i)[2:], n-2) + '1'
# def indx(i):
# j = 0
# while i > soe[j]:
# j+=1
# if i == soe[j]:
# return j
# return -1
def prime(n, i=3):
# try:
# s = soe.index(i)+1
# except:
# s = 0
# for i in range(s, len(soe)):
# if n % soe[i] == 0:
# return i
# return -1
if n % 2 == 0:
return 2
sqn = ceil(sqrt(n))
while i < sqn:
if n % i == 0:
return i
i += 2
return 0
def s_of_e(n):
a = [1]*(n+1)
# a = 2**(n+1)-1
# i = 1
# c = 0
# cn = a+1
# while i < cn:
# if a & i:
# j = i
# while j < n:
# a = a & ~j
# j <<= c
# i <<= 1
# c += 1
for i in range(2, int(sqrt(n+1))+1):
if a[i]:
for j in range(i*i, n+1, i):
a[j] = 0
b = [i for i in range(2, n+1) if a[i]]
return a, b
d = set()
def f(n, j, s=''):
ji = 0
l1 = [0] * (j)
g = gen_coin(n)
if s:
while next(g) != s:
pass
ji = 0
for i in g:
l = []
kl = []
# print(i)
for m in range(2, 11):
k = int(i, m)
kl.append(k)
# if soea[k]:
# break
# p = prime(k)
# if p == -1:
# break
if k in d:
break
p = prime(k)
if not p:
d.add(k)
break
d.add(p)
l.append(p)
else:
# print(i)
if ji >= len(l1):
break
print(i, end=' ')
for j in l:
print(j, end=' ')
print()
# for j in kl:
# print(j, end=' ')
# print()
l1[ji] = [i] + l
ji += 1
if ji != j:
pass
else:
break
return l1
t = input()
n, j = input().split()
n = int(n)
j = int(j)
# g = input()
# print(n, j)
# soea, soe = s_of_e(int('1'*n))
# soe.append(int('1'*n))
print("Case #1:")
l = f(n, j)
# print(l)
# for i in l:
# for j in i:
# print(j, end=' ')
# print()
|
from django.contrib.auth import authenticate, login
from django.core.urlresolvers import reverse
from django.contrib.auth.views import login as login_view
from sqlshare_rest.models import CredentialsModel
from django.contrib.auth.models import User
from django.shortcuts import redirect, render_to_response
from django.conf import settings
from apiclient.discovery import build
import httplib2
import six
if six.PY2:
from urllib import quote
if six.PY3:
from urllib.parse import quote
def wayf(request):
# the google login flow requires a session id, so make sure the session
# is saved early.
request.session.modified = True
return login_view(request)
def require_uw_login(request):
login = request.META['REMOTE_USER']
name = request.META.get('givenName', '')
last_name = request.META.get('sn', '')
email = request.META.get('mail', '')
return _login_user(request, login, name, last_name, email)
def require_google_login(request):
email = request.META['mail']
name = request.META.get('givenname', '')
last_name = request.META.get('surname', '')
return _login_user(request, email, name, last_name, email)
def _login_user(request, login_name, name, last_name, email):
user = authenticate(username=login_name, password=None)
user.first_name = name
user.last_name = last_name
user.email = email
user.save()
login(request, user)
next_url = request.GET.get('next', settings.SQLSHARE_WEB_URL)
return redirect(next_url)
def google_return(request):
f = FlowModel.objects.get(id=request.session.session_key)
try:
credential = f.flow.step2_exchange(request.REQUEST)
except FlowExchangeError as ex:
if ex[0] == "access_denied":
return render_to_response("oauth2/denied.html", {})
raise
flow = f.flow
if type(flow) == 'str':
flow = f.flow.to_python()
storage = Storage(CredentialsModel,
'id',
request.session.session_key,
'credential')
storage.put(credential)
google_login_url = reverse('sqlshare_rest.views.auth.require_google_login')
google_login_url = "%s?next=%s" % (google_login_url,
quote(request.GET['state']))
return redirect(google_login_url)
|
from twx.botapi import TelegramBot, ReplyKeyboardMarkup
from telegram.ext import Updater, CommandHandler
import requests
from bs4 import BeautifulSoup
url = "https://www.google.com/finance?q=HKD"
r = requests.get(url)
soup = BeautifulSoup(r.content)
g_data = soup.find_all('span', {'class': 'bld'})
c = g_data[0].text
"""Set up the Fucking bot"""
bot = TelegramBot('331382532:AAF_VdgQmBf7rxPskV2x3BeswEjbpR7f9b4')
bot.update_bot_info().wait()
print(bot.username)
#result = bot.send_message(user_id,'test message body').wait()
#print(result)
def Rate(bot, update):
text = '1 HKD = '
update.message.reply_text(text, c)
updater = Updater('331382532:AAF_VdgQmBf7rxPskV2x3BeswEjbpR7f9b4')
updater.dispatcher.add_handler(CommandHandler('Rate', Rate))
#""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
updater.start_polling()
|
from pymongo import MongoClient
username = 'universai'
password = 'cumzone'
cluster = '127.0.0.1:1488'
client = MongoClient(f"mongodb://{username}:{password}@{cluster}")
db1 = client.dtp.dtp
db2 = client.dtpsFull
def address(data):
add = ""
s = str(data['data']['infoDtp']['street'])
h = str(data['data']['infoDtp']['house']).replace(' ', '/')
d = str(data['data']['infoDtp']['dor'])
km = str(data['data']['infoDtp']['km'])
m = str(data['data']['infoDtp']['m'])
if 'МКАД' in d:
s = 'МКАД'
if s != '' and h != '':
add = s + ", дом " + h
elif km != '' and d != '':
add = d + ', ' + km + 'км' + (', ' + m + 'м' if m != '' and int(m) != 0 else '')
elif s != '':
add = s
elif d != '':
add = d
else:
add = 'Точный адрес ДТП не указан'
return [s, add]
for y in range(2015, 2022):
for m in range(1, 13):
month = f"0{m}" if m < 10 else m
print(f"{y}:{month}")
col = db2[f"{y}{month}"]
count = 0
for i in db1.find({
"year":y,
"month":m
}):
i['id'] = f"{y}{month}{count}"
res = address(i)
i['data']['infoDtp']['street'] = res[0]
i['data']['infoDtp']['address'] = res[1]
count += 1
col.insert_one(i)
|
"""Entry point."""
import argparse
import time
import torch
import graphnas.trainer as trainer
import graphnas.utils.tensor_utils as utils
import warnings
warnings.filterwarnings('ignore')
import os
def build_args():
parser = argparse.ArgumentParser(description='GraphNAS')
register_default_args(parser)
args = parser.parse_args()
return args
def register_default_args(parser):
parser.add_argument('--mode', type=str, default='train',
choices=['train', 'derive'],
help='train: Training GraphNAS, derive: Deriving Architectures')
parser.add_argument('--random_seed', type=int, default=123)
parser.add_argument("--cuda", type=bool, default=True, required=False,
help="run in cuda mode")
parser.add_argument('--save_epoch', type=int, default=2)
parser.add_argument('--max_save_num', type=int, default=5)
# controller
parser.add_argument('--shared_initial_step', type=int, default=0)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--entropy_mode', type=str, default='reward', choices=['reward', 'regularizer'])
parser.add_argument('--entropy_coeff', type=float, default=1e-4)
parser.add_argument('--shared_rnn_max_length', type=int, default=35)
parser.add_argument('--load_path', type=str, default='')
parser.add_argument('--layers_of_child_model', type=int, default=1)
parser.add_argument('--search_mode', type=str, default='Zeng')
parser.add_argument('--num_hops', type=str, default=8) ###########
parser.add_argument('--format', type=str, default='Zeng')
parser.add_argument('--max_epoch', type=int, default=5) ###########大循环
parser.add_argument('--ema_baseline_decay', type=float, default=0.95)
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--controller_max_step', type=int, default=50, ##### 多少个模型
help='step for controller parameters')
parser.add_argument('--controller_optim', type=str, default='adam')
parser.add_argument('--controller_lr', type=float, default=3.5e-4,
help="will be ignored if --controller_lr_cosine=True")
parser.add_argument('--controller_grad_clip', type=float, default=0)
parser.add_argument('--tanh_c', type=float, default=2.5)
parser.add_argument('--softmax_temperature', type=float, default=5.0)
parser.add_argument('--derive_num_sample', type=int, default=20) ###########
parser.add_argument('--derive_finally', type=bool, default=True)
parser.add_argument('--derive_from_history', type=bool, default=True)
parser.add_argument('--num_granularity', type=int, default=0)
# child model
parser.add_argument("--dataset", type=str, default="Citeseer", required=False,
help="The input dataset.")
parser.add_argument("--epochs", type=int, default=100, ###########
help="number of training epochs")
parser.add_argument("--retrain_epochs", type=int, default=100,
help="number of training epochs")
parser.add_argument("--multi_label", type=bool, default=False,
help="multi_label or single_label task")
parser.add_argument("--residual", action="store_false",
help="use residual connection")
parser.add_argument("--in-drop", type=float, default=0.0,
help="input feature dropout")
parser.add_argument("--lr", type=float, default=0.2,
help="learning rate")
parser.add_argument("--param_file", type=str, default="cora_test.pkl",
help="learning rate")
parser.add_argument("--optim_file", type=str, default="opt_cora_test.pkl",
help="optimizer save path")
parser.add_argument('--weight_decay', type=float, default=1e-4)
parser.add_argument('--max_param', type=float, default=5E6)
parser.add_argument('--supervised', type=bool, default=True)
parser.add_argument('--submanager_log_file', type=str, default=f"hop_8_gran_16.txt") #sub_manager_logger_file_{time.time()}.txt
parser.add_argument('--device', type=str, default='1')
parser.add_argument('--normalization', type=str, default='AugNormAdj',
choices=['NormLap', 'Lap', 'RWalkLap', 'FirstOrderGCN',
'AugNormAdj', 'NormAdj', 'RWalk', 'AugRWalk',
'NoNorm', 'LowPass'],
help='Normalization method for the adjacency matrix.')
def main(args): # pylint:disable=redefined-outer-name
# os.environ['CUDA_VISIBLE_DEVICE'] = args.device
if args.cuda and not torch.cuda.is_available(): # cuda is not available
args.cuda = False
print("CUDA NOT AVAILABLE!")
# args.max_epoch = 1
# args.controller_max_step = 1
# args.derive_num_sample = 1
torch.manual_seed(args.random_seed)
if args.cuda:
# print("Device Num", torch.cuda.current_device())
torch.cuda.manual_seed(args.random_seed)
utils.makedirs(args.dataset)
trnr = trainer.Trainer(args)
if args.mode == 'train':
print(args)
trnr.train()
elif args.mode == 'derive':
trnr.derive()
else:
raise Exception(f"[!] Mode not found: {args.mode}")
if __name__ == "__main__":
args = build_args()
main(args)
|
#!/usr/bin/env ipython3
# -*- encoding: utf-8 -*-
import sys
import os
import fileinput
import hashlib
import random
import re
from ipython_genutils.py3compat import cast_bytes, str_to_bytes
# Get the password from the environment
password_environment_variable = sys.argv[1]
# Hash the password, this is taken from https://github.com/jupyter/notebook/blob/master/notebook/auth/security.py
salt_len = 12
algorithm = 'sha1'
h = hashlib.new(algorithm)
salt = ('%0' + str(salt_len) + 'x') % random.getrandbits(4 * salt_len)
h.update(cast_bytes(password_environment_variable, 'utf-8') + str_to_bytes(salt, 'ascii'))
password = ':'.join((algorithm, salt, h.hexdigest()))
# Store the password in the configuration
setup_line = "c.ServerApp.password = "
new_setup_line = setup_line + "u'" + password + "'"
new_setup_line = new_setup_line.replace("# ", "")
setup_file = os.getenv("HOME") + "/.jupyter/jupyter_server_config.py"
if not os.path.exists(setup_file):
os.system('jupyter server --generate-config')
for line in fileinput.input(setup_file, inplace=True):
m = re.search(setup_line,line)
if m:
print(new_setup_line)
else:
print(line)
for line in fileinput.input(setup_file, inplace=True):
print (line.replace("# c.ServerApp.password_required = False", "c.ServerApp.password_required = True"))
|
import art
import game_data
import random
from replit import clear
def data_format(data):
data_name = data["name"]
data_description = data["description"]
data_country = data["country"]
return(f"Name is {data_name}, {data_description} and from {data_country}")
def check_answer(guess,data1_count,data2_count):
if data1_count > data2_count:
return guess == "a"
else:
return guess == "b"
#printing logo of a game
print(art.logo)
score = 0
game_over = False
data2 = random.choice(game_data.data)
while not game_over:
#picking random from a dict
data1 = data2
data2 = random.choice(game_data.data)
if data1 == data2:
data2 = random.choice(game_data.data)
#print it as name,description and country
print(f"Compare : {data_format(data1)} ")
print(art.vs)
print(f"Against : {data_format(data2)} ")
data1_count = data1["follower_count"]
data2_count = data2["follower_count"]
#guess of a User
guess = input("choose : 'A' or 'B' ").lower()
#call fuction with new variable
is_correct = check_answer(guess,data1_count,data2_count)
clear()
print(art.logo)
#print if answer is check or wrong
if is_correct:
score += 1
print(f"you are correct and youe score is {score} \n")
else:
game_over = True
print(f"sorry, you are wrong and Your Final score is {score}")
|
# -*- coding: utf-8 -*-
import networkx as nx
class NetX:
G = None
nodes = dict()
edges = None
def __init__(self):
self.G = nx.Graph()
def addNeoNodes(self):
self.G.add_nodes_from(self.nodes)
|
#!/usr/bin/env python
import sys
import subprocess
import re
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
calendars = {}
category_default = 'DEFAULT'
re_ics = re.compile('SUMMARY|DTSTAMP|CATEGORIES')
re_fields = re.compile('^([^;:]+)[^:]*:(.*)')
re_date = re.compile('^(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})(\d{2})Z$')
arg_calendar = None
arg_date_raw = None
def usage(extra):
argv0 = re.sub('.*/', '', sys.argv[0])
print >>sys.stderr, "Usage: {} [-c calendar] [YYYY[-MM[-DD]]]".format(argv0)
print >>sys.stderr, extra
sys.exit(2)
for i in range(1, len(sys.argv)):
arg = sys.argv[i]
if arg == '-c':
i += 1
if i == len(sys.argv):
usage("need argument for -c")
calendar = sys.argv[i]
elif arg_date_raw is None:
arg_date_raw = arg
else:
usage('unexpected argument "{}"'.format(arg))
arg_y = arg_m = arg_d = None
if arg_date_raw:
match = re.match('(\d{4})(-(\d{2})(-(\d{2}))?)?$', arg_date_raw)
if not match:
usage('invalid date "{}"'.format(arg_date_raw))
arg_y = match.group(1)
arg_m = match.group(3)
arg_d = match.group(5)
def parse_date(s):
fields = re_date.split(s)
y = fields[1]
m = fields[2]
d = fields[3]
hr = fields[4]
min = fields[5]
sec = fields[6]
when = '{}-{}-{} {}:{}:{}'.format(y, m, d, hr, min, sec)
return when, y, m, d, hr, min, sec
def match(a, b):
return a is None or b is None or a == b
class Calendar:
def __init__(self, name):
self.entries = []
self.name = name
def add_entry(self, entry):
self.entries.append(entry)
class Entry:
def __init__(self, kvs):
self.when, self.y, self.m, self.d, \
self.hr, self.min, self.sec = parse_date(kvs['DTSTAMP'])
self.summary = kvs['SUMMARY']
def __str__(self):
return "{} {}".format(self.when, self.summary)
def on_day(self, y, m, d):
return match(y, self.y) and \
match(m, self.m) and \
match(d, self.d)
files = subprocess.check_output(
"find ~/Library/Calendars -iname '*.ics'",
shell=True)
for fname in filter(len, files.split('\n')):
with open(fname, 'r') as f:
lines = filter(lambda line: re_ics.match(line), f.readlines())
entries = {}
for line in lines:
fields = re_fields.split(line.replace('\r', '').replace('\n', ''))
entries[fields[1]] = fields[2]
key = entries.get('CATEGORIES', None) or category_default
cal = calendars.get(key, None)
if cal is None:
cal = Calendar(key)
calendars[key] = cal
cal.add_entry(Entry(entries))
for cal in calendars.values():
if arg_calendar and cal.name != arg_calendar:
continue
for entry in cal.entries:
if entry.on_day(arg_y, arg_m, arg_d):
print entry
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import sys, time, math
import serial
import serial.tools.list_ports
import pandas
PORT = 'COM3'
try:
ser.close();
except:
print();
try:
ser = serial.Serial(PORT, 115200, timeout=100)
except:
print ('Serial port %s is not available' % PORT);
portlist=list(serial.tools.list_ports.comports())
print('Trying with port %s' % portlist[0][0]);
ser = serial.Serial(portlist[0][0], 115200, timeout=100)
ser.isOpen()
xsize = 600
def data_gen():
# I know we have to change this so that it plots my temperatures
t = data_gen.t
while True:
out_str = ""
strin = ser.readline();
strin = strin.decode('ascii')
if strin[0] != ' ':
out_str += str(strin[0])
if strin[1] != ' ':
out_str += str(strin[1])
if strin[2] != ' ':
out_str += str(strin[2])
if strin[3] != ' ':
out_str += str(strin[3])
if strin[4] != ' ':
out_str += str(strin[4])
newdata = int(out_str)
# newdata = newdata
#print(newdata)
t+=1
val=newdata
yield t, val
def run(data):
# update the data
t,y = data
if t>-1:
xdata.append(t)
ydata.append(y)
if t>xsize: # Scroll to the left.
ax.set_xlim(t-xsize, t)
line.set_data(xdata, ydata)
return line,
def on_close_figure(event):
sys.exit(0)
#main
out_str1 = ""
strin1 = ser.readline();
strin1 = strin1.decode('ascii')
if strin1[5] != ' ':
out_str1 += str(strin1[5])
if strin1[6] != ' ':
out_str1 += str(strin1[6])
data_for_colour = int(out_str1)
# graph is blue by default
graph = 'blue'
if data_for_colour == 1:
graph = 'blue'
elif data_for_colour == 2:
graph = 'green'
elif data_for_colour == 3:
graph = 'red'
elif data_for_colour == 4:
graph = 'yellow'
else:
graph = 'blue'
data_gen.t = 1
fig = plt.figure()
fig.canvas.mpl_connect('close_event', on_close_figure)
ax = fig.add_subplot(111)
#sets the colour of the graph
line, = ax.plot([], [], lw=2, color=graph)
ax.set_ylim(-10, 400)
ax.set_xlim(0, xsize)
ax.grid()
xdata, ydata = [], []
# Important: Although blit=True makes graphing faster, we need blit=False to prevent
# spurious lines to appear when resizing the stripchart.
ani = animation.FuncAnimation(fig, run, data_gen, blit=False, interval=100, repeat=False)
plt.show()
|
import folium
import csv
def color_producer(elevation):
"""Returns color name depending on the height of the volcanoe"""
if elevation < 1000:
return "green"
elif 1000 <= elevation < 3000:
return "orange"
else:
return "red"
my_map = folium.Map(location=[38.9700012,-112.5009995], zoom_start=4)
fgv = folium.FeatureGroup(name="Volcanoes")
fgp = folium.FeatureGroup(name="Population")
with open("Volcanoes_USA.txt", newline="") as csvfile:
volcanoes = csv.DictReader(csvfile)
for row in volcanoes:
lat, lon = map(float, [row["LAT"], row["LON"]])
elev = float(row["ELEV"])
color = color_producer(elev)
#fg.add_child(folium.Marker(location=(lat, lon), popup=str(elev)+" m", icon=folium.Icon(color=color)))
fgv.add_child(folium.CircleMarker(location=(lat, lon), radius=9, color="grey", fill=True,
fill_color=color, fill_opacity=1, popup=str(elev)+" m"))
fgp.add_child(folium.GeoJson(open("world.json", "r", encoding="utf-8-sig").read(),
style_function=lambda x: {"fillColor":"green" if x["properties"]["POP2005"] < 10000000
else "orange" if 10000000 <= x["properties"]["POP2005"] < 20000000 else "red"}))
my_map.add_child(fgv)
my_map.add_child(fgp)
my_map.add_child(folium.LayerControl())
my_map.save("my_map.html")
|
#At this file we are declaring all the menus related
# to the Books
# Clean Arquitecture Principles
def booksMenu():
print("****************************************")
print("* B O O K S M E N U *")
print("****************************************")
print("* a. Add New Book *")
print("* b. Modify Existing Book *")
print("* c. View Book *")
print("* d. Delete Book *")
print("* e. List Books *")
print("* f. Find a Book *")
print("* 0. Back To The Main Menu *")
print("* *")
print("****************************************\n")
def newBook():
print("****************************************")
print("* N E W B O O K *")
print("****************************************\n")
def choosingBook(msg):
print("****************************************")
print("* Choose The Book ID *")
print(f"* You Would Like to {msg} *")
print("****************************************\n")
def internalOptionMenu(banner, option):
print("****************************************")
print(f"* {banner} *")
print("****************************************")
print(f"* 1. {option} *")
print("* 0. Back To The Previous Menu *")
print("* *")
print("****************************************\n")
def findBookMenu():
print("****************************************")
print("* L O O K I N G FOR A B O O K ? *")
print("****************************************")
print("* 1. Find a book by the book name, *")
print("* autor or genre. *")
print("* 0. Back To The Previous Menu *")
print("* *")
print("**************************************\n")
def writingNameMenu():
print("**************************************************")
print("* Please Type The Book Name, Author or Genre *")
print(f"* All Matches Would Be Listed *")
print("*************************************************\n")
def resultsFound():
print("*********************************************")
print("* W E F O U N D T H E S E R E S U L T S *")
print("*******************************************\n") |
from yacs.config import CfgNode
from kale.predict.decode import GripNetLinkPrediction
from kale.prepdata.supergraph_construct import SuperGraph, SuperVertexParaSetting
def get_supervertex(sv_configs: CfgNode) -> SuperVertexParaSetting:
"""Get supervertex parameter setting from configurations."""
exter_list = sv_configs.EXTER_AGG_CHANNELS_LIST
if len(exter_list):
exter_dict = {k: v for k, v in exter_list}
return SuperVertexParaSetting(
sv_configs.NAME,
sv_configs.INTER_FEAT_CHANNELS,
sv_configs.INTER_AGG_CHANNELS_LIST,
exter_agg_channels_dict=exter_dict,
mode=sv_configs.MODE,
)
return SuperVertexParaSetting(sv_configs.NAME, sv_configs.INTER_FEAT_CHANNELS, sv_configs.INTER_AGG_CHANNELS_LIST,)
def get_model(supergraph: SuperGraph, cfg: CfgNode) -> GripNetLinkPrediction:
"""Get model from the supergraph and configurations."""
learning_rate = cfg.SOLVER.BASE_LR
epsilon = cfg.SOLVER.EPSILON
return GripNetLinkPrediction(supergraph, learning_rate, epsilon)
|
import pandas as pd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
def explore_dataset(df):
print("\nExploring the Dataset..\n")
# Dataframe shape
print(df.shape[1], " columns.")
print(df.shape[0], " observations.\n")
# Dataframe datatypes
datatypes = df.dtypes
#print(datatypes)
#print(df['Soil_Type15'].sum())
# Supprimer une colonne non pertinente :
#df_train = df_train.drop(['Soil_Type7', 'Soil_Type15'], axis=1)
def plot_correlation(df):
correlation_matrix = df.corr()
f, ax = plt.subplots(figsize=(10, 8))
sns_plot = sns.heatmap(correlation_matrix, vmax=0.8, square=True)
fig = sns_plot.get_figure()
fig.savefig("correlation_matrix.png")
plt.show()
def main():
print("Reading data")
train_df = pd.read_csv("all-data/train-set.csv")
explore_dataset(train_df)
test_df = pd.read_csv("all-data/test-set.csv")
explore_dataset(test_df)
# Remove ID column
train_df = train_df.iloc[:, 1:]
#df_test = test_df.iloc[:, 1:]
# Correlation matrix for 10 first columns
# Plotting
#plot_correlation(train_df.iloc[:, :10])
if __name__ == '__main__':
main() |
# Empty Tuple.
sampleTuple1 = ()
print(sampleTuple1)
# Empty tuple using builtin function.
sampleTuple2 = tuple()
print(sampleTuple2)
|
def check(year,dates):
monthList=["JAN","FEB","MAR","APR","MAY","JUN","JUL","AUG","SEP","OCT","NOV","DEC"]
dates-=500
if dates<=31:
month=0
day=dates
elif dates<=60:
month=1
day=dates-31
elif dates<=91:
month=2
day=dates-60
elif dates<=121:
month=3
day=dates-91
elif dates<=152:
month=4
day=dates-121
elif dates<=182:
month=5
day=dates-152
elif dates<=213:
month=6
day=dates-182
elif dates<=244:
month=7
day=dates-213
elif dates<=274:
month=8
day=dates-244
elif dates<=305:
month=9
day=dates-274
elif dates<=335:
month=10
day=dates-305
else:
month=11
day=dates-335
return year+monthList[month]+str(day)
N=int(input())
givenBD=input()
flag=1
for i in range(N):
line=input()
nic=line.split(" ")[0]
if(len(nic)==10):
year="19"+nic[0:2]
dates=int(nic[2:5])
else:
year=nic[0:4]
dates=int(nic[4:7])
if dates>=500:
targetBD=check(year,dates)
if givenBD==targetBD:
print(line)
flag=0
else:
continue
if(flag):
print("SORRY!HER FB IS LYING")
|
##Como no existe el do while se raliza con el while y no se presenta nada ya que no entra
contador = 100
while(contador <= 10):
print("%d\n"% (contador))
contador = contador + 2
|
# Generated by Django 3.2.18 on 2023-05-19 19:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('survey', '0002_auto_20230421_1715'),
]
operations = [
migrations.AddField(
model_name='surveyshomepage',
name='partner_logo_alt',
field=models.TextField(blank=True, default='', help_text='A concise description of the image for users of assistive technology.', verbose_name='Partner logo alternative text'),
),
]
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('home.views',
url(r'^$', 'index', name='home'),
url(r'^(lt;short_id>\w{6})$', 'redirect_original', name='redirectoriginal'), # Erro of regex here
url(r'^makeshort/$', 'shorten_url', name='shortenurl'),
)
|
import unittest
import os
import sys
import pathlib
from archivemanager import ArchiveManager
from backupmanager import BackupManager
from config import ConfigManager
# aPATH = pathlib.Path("archives/").resolve()
filetotest = pathlib.Path(sys.executable)
class TestArchiveManager(unittest.TestCase):
def setUp(self):
self.config = ConfigManager()
self.AM = ArchiveManager(self.config)
def test_20_fileadd(self):
self.assertTrue(self.AM.fileadd(filetotest))
# adding the same file should also return true, even though it's not added.
self.assertTrue(self.AM.fileadd(filetotest))
def test_30_filefind(self):
self.assertTrue(self.AM.filefind(filetotest) )
def test_40_check_allarchives(self):
for f in self.AM.all_archives():
self.assertTrue( str(os.path.exists(f.fullpath)) )
def test_60_addallpython(self):
p = pathlib.Path(sys.executable).parent
# add all files in the python directory
for x in p.glob("*"):
if not x.is_dir():
self.assertTrue(self.AM.fileadd(x))
def test_70_deletefilerecord(self):
# try to delete the first record
# find the record to delete, delete it, then validate it's not still in the DB
record = pathlib.Path(self.AM.all_files()[0].fullpath)
self.assertTrue(self.AM.filedelete(record) )
self.assertFalse(self.AM.filefind(record) )
# class MyBackup(backupmanager.BackupManager):
# def __init__(self, **kwargs):
# super(MyBackup, self).__init__(**kwargs)
# self.stopcount = 0
# self.fileglob = ["*.pyc", "*.dll", "*.csv", "*.iso"]
# def _dirglob(self):
# return [] + [self._apath(),]
# def _drives(self):
# return [str(pathlib.Path(sys.executable).parent),]
# def _fileglob(self):
# return
# def _stop(self):
# # print "test: %s" % self.stopcount
# self.stopcount += 1
# if self.stopcount > 8:
# return True
# else:
# return False
# class MyBlankBackup(backupmanager.BackupManager):
# pass
# class TestBackupManager(unittest.TestCase):
# def test_10(self):
# b = MyBackup(mypath=aPATH)
# b.run()
# def test_20(self):
# # test a blank class with nothing in it.
# b = MyBlankBackup()
# b.run()
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.