text stringlengths 8 6.05M |
|---|
from selene import browser
def password_recovery():
browser.open_url('https://pf-client-api.partnerearning.com')
browser.element('a.nav-general__login-link:nth-child(1)').click()
browser.element('.popup-authorization__forgot-link').click()
browser.element('.popup-block__form > div:nth-child(1) > input:nth-child(1)').click().send_keys('restorepass@gmail.com')
browser.element('.popup-block__btn').click()
|
import pygame
pygame.init()
pygame.mixer.music.load('04. Djimetta - Tudo Ou Nada (feat. Bander).mp3')
pygame.mixer.music.play()
pygame.event.wait() |
import json
from collections import namedtuple
class Serializer(object):
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
@staticmethod
def from_json(json_data):
return json.loads(json_data, object_hook=Serializer.custom_decoder)
@staticmethod
def custom_decoder(json_obj):
return namedtuple('custom_decoded', json_obj.keys())(*json_obj.values())
|
from heapq import *
n, k = map(int, input().strip().split(' '))
h = list(map(int, input().strip().split(' ')))
heapify(h)
ans = 0
while len(h) >= 2 and h[0] < k:
least = heappop(h)
heappushpop(h, least + (h[0] * 2))
ans += 1
if h[0] < k:
print(-1)
else:
print(ans) |
from enum import Enum
class DateMode(Enum):
DAILY = 'B'
WEEKLY = 'W'
MONTHLY = 'BM'
YEARLY = 'BY'
|
import pyodbc
import json
import os
from flask import Flask
from flask import request
from flask import make_response
from urllib.request import urlopen
app = Flask(__name__)
class Menu:
def __init__(self):
self.urlMenu = "https://www.mealgaadi.in/Meal_API/products_api.php?query=product_category"
self.jsonData = json.load(urlopen(self.urlMenu))
self.urlCategory = "https://www.mealgaadi.in/Meal_API/products_api.php?query=product_menus&category_Id="
self.categoryId = ""
def extractMenu(self):
menuItems = []
for menuObject in self.jsonData:
if menuObject == 'result':
for block in self.jsonData[menuObject]:
for property in block:
if property == "name":
menuItems.append(block[property])
return (menuItems)
class Category(Menu):
def extractCatergoryId(self,category):
for menuObject in self.jsonData:
if menuObject == 'result':
for block in self.jsonData[menuObject]:
for property in block:
if property == "name" and block[property] == category:
self.categoryId = block['category_Id']
self.urlCategory += self.categoryId
print(self.urlCategory)
def getdata(self):
subCatData = json.load(urlopen(self.urlCategory))
subCatItem = []
for menuObject in subCatData:
if menuObject == 'result':
for block in subCatData[menuObject]:
for property in block:
if property == "name":
subCatItem.append(block[property])
return (subCatItem)
@app.route('/webhook',methods = ['POST'])
def webhook():
req = request.get_json(silent=True,force=True)
res = makeWebhookResult(req)
res = json.dumps(res,indent = 4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def makeSpeech(result):
speech = "The details you asked are : \n\n " + ",".join([str(i) for i in list(result)])
print(speech)
return {'textToSpeech': speech,'displayText': speech,'fulfillmentText': speech}
def makeWebhookResult(req):
if req['queryResult']['action'] == 'showMenuAction':
menu = Menu()
return makeSpeech( menu.extractMenu())
elif req['queryResult']['action'] == 'expandMenuAction':
result = req['queryResult']
parameters = result.get('parameters')
category = parameters.get('categoryEntity')
cat = Category()
cat.extractCatergoryId(category)
return makeSpeech(cat.getdata())
if __name__ == "__main__":
port = int(os.getenv('PORT',80))
app.run(debug=True,port = port,host = '0.0.0.0')
|
import sys
import platform
from selenium import webdriver
from tests.test_login import LoginTest
from tests.test_adding_new_employees import AddingNewEmployeesTest
from tests.test_clock_in import ClockInClockOutTest
from modules.logs_module import WriteLogs
default_parameters = {'base_url': ['', None],
'test_login': [False, LoginTest],
'test_adding_new_employees': [False, AddingNewEmployeesTest],
'test_clock_in': [False, ClockInClockOutTest],
'browser': ['chrome', None],
'browser_version': ['2.24', None],
}
arg_list = sys.argv
if any('http' in i for i in arg_list) and any('base_url' in i for i in arg_list):
for i in arg_list:
for key, value in default_parameters.items():
if key in i:
i = i.replace(key, '').replace('=', '')
if str(i) in ('True', 'true', '1'):
default_parameters[key][0] = True
else:
default_parameters[key][0] = i
for key, value in default_parameters.items():
if value[0] is True and value[1]:
driver = None
if default_parameters['browser'][0] in ('chrome', 'Chrome'):
driver_path = '{}/drivers/{}/{}/{}/chromedriver'.format(WriteLogs().return_parent_path(),
default_parameters['browser'][0],
default_parameters['browser_version'][0],
platform.system().lower())
driver = webdriver.Chrome('{}'.format(driver_path))
elif default_parameters['browser'][0] in ('firefox', 'Firefox'):
driver = webdriver.Firefox()
default_parameters[key][1](default_parameters['base_url'][0], driver).start_test()
else:
print ('You did not define base_url parameter (the parameter should contain "http") '
'or some other parameter is not defined')
|
#!/usr/bin/env python3
deck = [4]*9
deck.append(16)
sd = sum(deck)
p = [x/sd for x in deck]
# Dealer hits soft 17
hit_soft_17 = True
def dealer_p(total, ace, cards):
outcomes = [0.0]*22
if (total > 21):
# Dealer busts
outcomes[0] = 1.0
elif (total >= 17):
# Dealer stands
outcomes[total] = 1.0
elif ((total==7) and ace and not(hit_soft_17)):
# Dealer stands on a soft 17
outcomes[total+10] = 1.0
elif ((8 <= total <= 11) and ace):
# Dealer stands on a soft total > 17
outcomes[total+10] = 1.0
else:
# Dealer hits
# Remove blackjacks
if (cards==1 and total==1):
high=9
else:
high=10
if (cards==1 and total==10):
low=1
else:
low=0
for i in range(low,high):
l = [p[i]*x for x in dealer_p(total+i+1,(ace or i==0),cards+1)]
outcomes[:] =[a + b for a, b in zip(outcomes, l)]
return(outcomes)
def memoize(f):
class memodict(dict):
def __getitem__(self, *key):
return dict.__getitem__(self, key)
def __missing__(self, key):
ret = self[key] = f(*key)
return ret
return memodict().__getitem__
@memoize
def player_p(total, ace, double, dc):
# Bust
if (total > 21):
return(-1.0, 'bust')
# Stand
if (total <= 11 and ace):
# Stand on total+10
e_stand = 1.0*sum(dealer[0:total+10]) - 1.0*sum(dealer[total+11:])
else:
# Stand on total
e_stand = 1.0*sum(dealer[0:total]) - 1.0*sum(dealer[total+1:])
# Double
if (double):
e_double = -100.0
else:
e_double = 0.0
for i in range(0,10):
e_card, strategy = player_p(total+i+1,(ace or i==0),True,dc)
e_double += 2.0*p[i]*e_card
# Hit
# We can't hit again if doubling
if (double):
e_hit = -100.0
else:
e_hit = 0.0
for i in range(0,10):
e_card, strategy = player_p(total+i+1,(ace or i==0),double,dc)
e_hit += p[i]*e_card
# If we haven't busted we can always stand
e_best = e_stand
strategy = 'stand'
if not(e_hit==None) and (e_hit > e_best):
e_best = e_hit
strategy = 'hit'
if not(e_double==None) and (e_double > e_best):
e_best = e_double
strategy = 'double'
return(e_best, strategy)
# Dealer has 10 showing
#bj = 1.0*p[0]
#d = dealer_p(10, False, 1)
#d = d/sum(d)
#print(bj)
#print(d)
# Dealer has A showing
#bj = 1.0*p[9]
#d = dealer_p(1, True, 1)
#d = d/sum(d)
#print(bj)
#print(d)
# Dealer
d_card = 7
d = dealer_p(d_card, False, 1)
dealer = d
# Dealer blackjack
p_dbj = 0.0
# Hard
for i in range(4,22):
e, s = player_p(i, False, False, d_card)
print('Dealer showing {}, hard {} : {}, E = {:4.2f}'.format(d_card,i,s,e))
# Soft
for i in range(2,12):
e, s = player_p(i, True, False, d_card)
print('Dealer showing {}, soft {} : {}, E = {:4.2f}'.format(d_card,i,s,e))
# Splitting
for i in range(0,10):
es = 0.0
for j in range(0,10):
if (i==j):
# No resplitting; we adjust for resplitting later
continue
elif (i==0 and j==9) or (i==9 and j==0):
# Doesn't typically count as a blackjack
es += p[j]*1.0
else:
# Remaining cases
e, s = player_p(i+j+2, ((i==0) or (j==0)), False, d_card)
es += p[j]*e
es = 2*es/(1-2*p[i])
print('Dealer showing {}, holding {}/{} : {}, E = {:4.2f}'.format(d_card,i+1,i+1,'split',es))
|
#! /usr/bin/env python
import argparse
import os
import re
def parse_args():
parser = argparse.ArgumentParser(description="Submit c3i one-off job(s)")
parser.add_argument(
"feedstocks",
nargs="+",
help="Feedstock(s) to submit as a one-off job(s)")
parser.add_argument(
"--pipeline-prefix",
default=os.environ.get("C3I_PIPELINE_PREFIX", "null"),
help=("Prefix for the pipeline, if not specified the value of "
"the C3I_PIPELINE_PREFIX environment variable or 'null' is "
"used.")
)
parser.add_argument(
"--split",
action="store_true",
help="Split the feedstocks into seperate pipelines, one feedstock per pipeline"
)
args = parser.parse_args()
return args
def submit_pipeline(pipeline_prefix, feedstocks):
feedstock = feedstocks[0]
match = re.match("(.*)-feedstock", feedstock)
if match is None:
raise ValueError(f"cannot determine package name in {feedstock}")
pkg_name = match.group(1)
pipeline_name = f"{pipeline_prefix}_{pkg_name}"
feedstocks_str = " ".join(feedstocks)
command = f"c3i one-off {pipeline_name} {feedstocks_str}"
print("Running:", command)
os.system(command)
def main():
args = parse_args()
if args.split:
for feedstock in args.feedstocks:
submit_pipeline(args.pipeline_prefix, [feedstock])
else:
submit_pipeline(args.pipeline_prefix, args.feedstocks)
if __name__ == "__main__":
main()
|
class seq():
seqarr = ""
def isValidLetter(self, ch):
return(ch.isalpha())
def __init__(self, seqarr):
for ch in seqarr:
if( not (self.isValidLetter(ch))):
raise ValueError("illegal argument")
else:
self.seqarr = seqarr
def seqLength(self):
return len(seqarr)
def getSeq(self):
return seqarr
def toString(self):
stringy = ""
for ch in seqarr:
stringy += ch.upper()
return stringy
def equals(self, newObj):
if(newObj is seq):
return True
elif (type(newObj).__name__ != type(self).__name__):
return False
elif (newObj.toString() == self.toString()):
return True
class proteinseq(seq):
def __init__(self, seqarr):
super().__init__(seqarr)
def isValidLetter(ch):
ch = ch.upper()
booleany = not ((ch == 'B') or (ch == 'J') or (ch == 'O') or (ch == 'U') or (ch == 'X') or (ch == 'Z'))
return (super.isValidLetter(ch) and booleany)
class DNAseq(seq):
def __init__(self, seqarr):
super().__init__(seqarr)
def isValidLetter(ch):
ch = ch.upper()
return((let=='A') or (let=='C') or (let=='G') or (let=='T'))
class CodingDNAseq(DNAseq):
def __init__(self, seqarr):
super().__init__(seqarr)
def checkStartCodon(self):
if(len(seqarr) < 3):
return False
else:
return ((seqarr[0].upper() == 'A') and (seqarr[1].upper() == 'T') and (seqarr[2].upper() == 'G'))
def translate(self):
if(not self.checkStartCodon):
raise RuntimeError("No Start Codon")
else:
maxNumberOfCodons = self.seqLength()/3
for i in xrange(maxNumberOfCodons):
n = i*3
stringed = ""
while(n<(i+1)*3):
if(stringed.getAminoAcid() == '$'):
break
else:
stringed += seqarr[n]
if(stringed.getAminoAcid() == '$'):
break
else:
translated[i] = stringed.getAminoAcid
return translated
def getAminoAcid(codon):
if ( not codon ):
return '$'
else:
return{
"AAA": 'K',
"AAC": 'N',
"AAG": 'K',
"AAT": 'N',
"ACA": 'T',
"ACC": 'T',
"ACG": 'T',
"ACT": 'T',
"AGA": 'R',
"AGC": 'S',
"AGG": 'R',
"AGT": 'S',
"ATA": 'I',
"ATC": 'I',
"ATG": 'M',
"ATT": 'I',
"CAA": 'Q',
"CAC": 'H',
"CAG": 'Q',
"CAT": 'H',
"CCA": 'P',
"CCC": 'P',
"CCG": 'P',
"CCT": 'P',
"CGA": 'R',
"CGC": 'R',
"CGG": 'R',
"CGT": 'R',
"CTA": 'L',
"CTC": 'L',
"CTG": 'L',
"CTT": 'L',
"GAA": 'E',
"GAC": 'D',
"GAG": 'E',
"GAT": 'D',
"GCA": 'A',
"GCC": 'A',
"GCG": 'A',
"GCT": 'A',
"GGA": 'G',
"GGC": 'G',
"GGG": 'G',
"GGT": 'G',
"GTA": 'V',
"GTC": 'V',
"GTG": 'V',
"GTT": 'V',
"TAA": '$',
"TAC": 'Y',
"TAG": '$',
"TAT": 'Y',
"TCA": 'S',
"TCC": 'S',
"TCG": 'S',
"TCT": 'S',
"TGA": '$',
"TGC": 'C',
"TGG": 'W',
"TGT": 'C',
"TTA": 'L',
"TTC": 'F',
"TTG": 'L',
"TTT": 'F'}[codon]
def main():
sequenced = seq(['A', 'T', 'G', 'C', 'C', 'C'])
print sequenced.toString()
if __name__ == '__main__':
main()
|
import os
from reorg import cli
def test_whats_dir_target(monkeypatch):
monkeypatch.setenv("PWD", "/foo")
assert "/foo/reorged" == cli.whats_dir_target(None)
assert "/bar/reorged" == cli.whats_dir_target("/bar")
def test_prepare_dir_target(tmpdir):
target_dir = cli.whats_dir_target(str(tmpdir))
print(target_dir)
cli.prepare_dir_target(target_dir)
assert os.path.exists(target_dir)
# Test when target directory already exists with stuff in it
with open(os.path.join(target_dir, "some-file.md"), "w+") as f:
f.write("Hello world!")
cli.prepare_dir_target(target_dir)
assert os.path.exists(target_dir) and len(os.listdir(target_dir)) == 0
|
import os.path
import math
from os import path
allNames =[
"lizard",
"shiftHappens",
"erato",
"cubes",
"sponza",
"daviaRock",
"rungholt",
"breakfast",
"sanMiguel",
"amazonLumberyardInterior",
"amazonLumberyardExterior",
"amazonLumberyardCombinedExterior",
"gallery",
]
# 0 = lizard
# 1 = shift happens
# 2 = erato
# 3 = cubes
# 4 = sponza
# 5 = daviaRock
# 6 = rungholt
# 7 = breakfast
# 8 = sanMiguel
# 9 = amazon lumberyard interior
# 10 = amazon lumberyard exterior
# 11 = amazon lumberyard combined with interior perspective
# 12 = gallery
class sceneContainer:
def __init__(self):
self.sceneNameId = 0
self.sceneName = ""
self.subdivisions = []
class storageType:
def __init__(self):
self.branch = 0
self.leaf = 0
self.branchMemory = 0
self.leafMemory = 0
self.subdivision = 0
self.triangleCount = 0
self.averageBvhDepth = 0
self.totalTime = 0
self.timeRaySum = 0
self.timeTriangleSum = 0
self.timeNodeSum = 0
class everything:
def __init__(self, workType = 0, gangType = 0):
#maximum branchingfactor and max leafsite
self.minBranchingFactorList = [[8,2],[4,2]]
self.maxBranchingFactorList = [[8,64],[4,64]]
self.minLeafSizeList = [[1,8],[1,4]]
self.maxLeafSizeList = [[64, 8], [64, 4]]
#number of subdivisions we test:
self.subdivisionRange = [0, 5]
self.subdivisionCount = self.subdivisionRange[1] - self.subdivisionRange[0] + 1
# 0 = leaf , 1 = node (need to adjust when table change!) (i separate those since i dont want to do a combined performance test since it gets messy quite fast)
self.workType = workType
self.workName = ["Leaf", "Node"]
# 0 = avx, sse = 1
self.gangType = gangType
self.gangName = ["Avx", "Sse"]
#the folder all the scene folders are in: (leave empty if no folder)
if(self.subdivisionRange[1] == 0):
#self.folder = "SavesPerf/Laptop/" +self.workName[self.workType] + "Memory" + self.gangName[self.gangType] +"/"
self.folder = "Results/"
else:
#self.folder = "SavesPerf/Laptop/" +self.workName[self.workType] + "MemorySub" + self.gangName[self.gangType] +"/"
self.folder = "Results/"
#real outputFolder is outputFolderName + names + outputPrefix
self.outputFolderName = "Summary/"
#nameIds of the tables
self.names = [0]
#self.names = [7,8,9,10,11,12]
#prefixTo the folderNames
#self.prefix = "SSESeqMemoryLeaf"
self.prefix = ""
#Prefix to the output txt (so its sceneNamePrefix.txt)
self.outputPrefix = self.workName[self.workType] + "Memory" + self.gangName[self.gangType]
self.minBranchingFactor = self.minBranchingFactorList[self.gangType][self.workType]
self.maxBranchingFactor = self.maxBranchingFactorList[self.gangType][self.workType]
self.minLeafSize = self.minLeafSizeList[self.gangType][self.workType]
self.maxLeafSize = self.maxLeafSizeList[self.gangType][self.workType]
self.storage = [None for i in range(len(self.names))]
self.possibleMemorySizes = [4, 8, 12, 16, 20, 24, 28, 32, 40, 48, 56, 64]
def readDataFromFiles(self):
# now loop over all scenes to do the single scene file (and collect min max)
# then loop over all and get averages
for loopId, nameId in enumerate(self.names):
self.storage[loopId] = sceneContainer()
self.storage[loopId].sceneName = allNames[nameId]
self.storage[loopId].sceneNameId = nameId
self.storage[loopId].subdivisions = [[] for _ in range(self.subdivisionCount)]
firstLine = self.getFirstLine()
#first loop over different possible memory sizes for l and b (and ml and mb)
for mb in self.possibleMemorySizes:
for ml in self.possibleMemorySizes:
#now do what normal data manger does, loop over b and l and write files
for loopId, nameId in enumerate(self.names):
name = allNames[nameId]
for s in range(self.subdivisionCount):
#anyFound = False
#the folder all the scene folders are in: (leave empty if no folder)
if(self.subdivisionRange[1] == 0):
#self.folder = "SavesPerf/Laptop/" +self.workName[self.workType] + "Memory" + self.gangName[self.gangType] +"/"
folder = self.folder + name + self.gangName[self.gangType] +"Perf/"
else:
#self.folder = "SavesPerf/Laptop/" +self.workName[self.workType] + "MemorySub" + self.gangName[self.gangType] +"/"
folder = self.folder + name + self.gangName[self.gangType] + "PerfSub" + str(s) + "/"
for b in range(self.maxBranchingFactor -(self.minBranchingFactor - 1)):
for l in range(self.maxLeafSize - (self.minLeafSize - 1)):
branch = b + self.minBranchingFactor
leaf = l + self.minLeafSize
#test all variables of this file
if(self.subdivisionRange[1] == 0):
fileName = folder + name + "_b" + str(branch) + "_l" + str(leaf) + "_mb" + str(mb) + "_ml" + str(ml) + "_Perf.txt"
else:
fileName = folder + name + "_b" + str(branch) + "_l" + str(leaf) + "_mb" + str(mb) + "_ml" + str(ml) + "_Perf.txt"
if (path.exists(fileName)):
#open file and read important values
f = open(fileName, "r")
if f.mode == 'r':
storagePart = self.fillStorage(f, branch, leaf, mb, ml, s)
self.storage[loopId].subdivisions[s].append(storagePart)
#remove all empty fields (due to scenes with different max subdivision)
for s in self.storage:
for i in reversed(range(self.subdivisionCount)):
if (len(s.subdivisions[i]) == 0):
s.subdivisions.pop(i)
def printEverything(self):
#prints all files:
firstLine = "branchMemory, leafMemory, triangleCount, averageBvhDepth, raytracerTotalTime, rayTimeSum, triangleIntersectionSum, rayTimeSumWithoutTri"
for scene in self.storage:
for sub in scene.subdivisions:
for b in range(self.maxBranchingFactor -(self.minBranchingFactor - 1)):
for l in range(self.maxLeafSize - (self.minLeafSize - 1)):
branch = b + self.minBranchingFactor
leaf = l + self.minLeafSize
foundAny = False
for obj in sub:
if (obj.leaf != leaf) or (obj.branch != branch):
continue
if not foundAny:
configText = "b" + str(obj.branch) + "l" + str(obj.leaf)
name = scene.sceneName
if(self.subdivisionRange[1] == 0):
fileName = self.outputFolderName + name + "/" + name + self.prefix + self.outputPrefix + configText + "Table.txt"
if not os.path.exists(self.outputFolderName + name):
os.makedirs(self.outputFolderName +name)
else:
fileName = self.outputFolderName + name + "Sub" + str(obj.subdivision) + "/" + name + self.prefix + self.outputPrefix + configText + "Table.txt"
if not os.path.exists(self.outputFolderName + name+ "Sub" + str(obj.subdivision)):
os.makedirs(self.outputFolderName + name + "Sub" + str(obj.subdivision))
fResult = open(fileName, "w+")
fResult.write(firstLine + "\n")
foundAny = True
line = self.makeLine([obj.branchMemory, obj.leafMemory, obj.triangleCount, obj.averageBvhDepth, obj.totalTime, obj.timeRaySum, obj.timeTriangleSum, obj.timeNodeSum])
fResult.write(line + "\n")
if (foundAny):
fResult.close()
def printEverythingOneFile(self):
#idea: everything of storage array in one file
later = 0
firstLine = "name, nameId, subdivision, branch, branchMemory, leaf, leafMemory, triangleCount, averageBvhDepth, raytracerTotalTime, rayTimeSum, triangleIntersectionSum, rayTimeSumWithoutTri"
fileName = self.outputFolderName + "total" + self.workName[self.workType] + self.gangName[self.gangType]+ "PerfTable.txt"
fResult = open(fileName, "w+")
fResult.write(firstLine + "\n")
for scene in self.storage:
for sub in scene.subdivisions:
for obj in sub:
line = self.makeLine([scene.sceneName, scene.sceneNameId, obj.subdivision, obj.branch, obj.branchMemory, obj.leaf, obj.leafMemory, obj.triangleCount, obj.averageBvhDepth, obj.totalTime, obj.timeRaySum, obj.timeTriangleSum, obj.timeNodeSum])
fResult.write(line + "\n")
def makeLine(self, array):
line = "" + str(array[0])
for element in array[1:]:
line += ", " + str(element)
return line
def gatherValue(self, string, key):
result = 0
foundAnything = False
if(string.find(key) != -1):
for t in string.split():
try:
result = float(t)
foundAnything = True
break
except ValueError:
pass
return foundAnything, result
def fillStorage(self, file, branch, leaf, branchMemory, leafMemory, subdivision):
storage = storageType()
storage.branch = branch
storage.leaf = leaf
storage.subdivision = subdivision
storage.branchMemory = branchMemory
storage.leafMemory = leafMemory
for x in file:
#check our variables. Since the performance stuff doesnt change that much its hardcoded.
t = self.gatherValue(x, "Triangle Count:")
if t[0] :
storage.triangleCount = t[1]
t = self.gatherValue(x, "Average BVH depth:")
if t[0] :
storage.averageBvhDepth = t[1]
t = self.gatherValue(x, "Raytracer total time:")
if t[0] :
storage.totalTime = t[1]
t = self.gatherValue(x, "Time for all rays (SUM):")
if t[0] :
storage.timeRaySum = t[1]
t = self.gatherValue(x, "Time for triangle intersections (SUM):")
if t[0] :
storage.timeTriangleSum = t[1]
t = self.gatherValue(x, "Time all rays(sum) - triangle(sum):")
if t[0] :
storage.timeNodeSum = t[1]
return storage
#first line for files that loop over branchFactor
def getFirstLine(self):
firstLine = "branchFactor, leafSize"
return firstLine
doAll = True
# 0 = leaf , 1 = node (need to adjust when table change!) (i separate those since i dont want to do a combined performance test since it gets messy quite fast)
workType = 0
# 0 = avx, sse = 1
gangType = 0
if doAll:
for i in range(2):
for j in range(2):
e = everything(i,j)
e.readDataFromFiles()
e.printEverything()
e.printEverythingOneFile()
else:
e = everything(workType, gangType)
e.readDataFromFiles()
e.printEverything()
e.printEverythingOneFile()
|
from discord.ext import commands
import discord
import random
import datetime
import cogs._json
import cogs._utils
class Events(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_member_join(self, member):
try:
if self.bot.muted_users[member.id]:
roles = cogs._json.read_json('muted_roles')
if str(member.guild.id) in roles:
role = discord.utils.get(member.guild.roles, id=int(str(roles[str(member.guild.id)])))
else:
role = discord.utils.get(member.guild.roles, name="Muted")
if role:
await member.add_roles(role)
except KeyError:
pass
@commands.Cog.listener()
async def on_message(self, message):
await self.bot.handler.propagate(message)
if self.bot.tracker.is_spamming(message):
roles = cogs._json.read_json('muted_roles')
if self.bot.tracker.get_user_count(message) == 1:
if message.author.id in [message.author.id, self.bot.user.id]:
return await message.send("You cannot warn yourself or the bot!")
current_warn_count = len(
await self.bot.warns.find_many_by_custom(
{
"user_id": message.author.id,
"guild_id": message.guild.id
}
)
) + 1
warn_filter = {"user_id": message.author.id, "guild_id": message.guild.id, "number": current_warn_count}
warn_data = {"reason": "Spamming", "timestamp": message.created_at, "warned_by": message.author.id}
await self.bot.warns.upsert_custom(warn_filter, warn_data)
embed = discord.Embed(
title="You are being warned:",
description=f"__**Reason**__:\nSpamming",
color=discord.Color.red(),
timestamp=message.created_at
)
embed.set_author(name=message.guild.name, icon_url=message.guild.icon_url)
embed.set_footer(text=f"Warn: {current_warn_count}")
try:
await message.author.send(embed=embed)
await message.send("Warned that user in dm's for you.")
except discord.HTTPException:
await message.send(message.author.mention, embed=embed)
elif self.bot.tracker.get_user_count(message) == 2:
if str(message.guild.id) in roles:
role = discord.utils.get(message.guild.roles, id=int(str(roles[str(message.guild.id)])))
else:
role = discord.utils.get(message.guild.roles, name="Muted")
data = {
'_id': message.author.id,
'mutedAt': datetime.datetime.now(),
'muteDuration': None,
'mutedBy': message.author.id,
'guildId': message.guild.id,
}
await self.bot.mutes.upsert(data)
self.bot.muted_users[message.author.id] = data
await message.author.add_roles(role)
elif self.bot.tracker.get_user_count(message) == 3:
await message.guild.kick(user=message.author, reason="Spamming")
# ETC
self.bot.tracker.remove_punishments(message)
data = cogs._json.read_json('filtered_words')
if message.guild is None or not str(message.guild.id) in data:
filtered_words = self.bot.default_filtered_messages
else:
filtered_words = data[str(message.guild.id)]
deleted = False
for word in filtered_words:
if (word.lower() in message.content.lower() or word.lower().replace(" ",
"") in message.content.lower().replace(
" ", "")) and not message.author == self.bot.user and message.guild and not deleted:
await message.delete()
deleted = True
data = cogs._json.read_json('muted_roles')
if message.guild and str(message.guild.id) in data and discord.utils.get(message.guild.roles, id=int(str(data[str(message.guild.id)]))) in message.author.roles and not message.author.guild_permissions.administrator:
await message.delete()
def setup(bot):
bot.add_cog(Events(bot))
|
import torch
import ocnn
bn_momentum, bn_eps = 0.01, 0.001
class OctreeConvBnRelu(torch.nn.Module):
def __init__(self, depth, channel_in, channel_out, kernel_size=[3], stride=1):
super(OctreeConvBnRelu, self).__init__()
self.conv = ocnn.OctreeConv(depth, channel_in, channel_out, kernel_size, stride)
self.bn = torch.nn.BatchNorm2d(channel_out, bn_eps, bn_momentum)
self.relu = torch.nn.ReLU(inplace=True)
def forward(self, data_in, octree):
out = self.conv(data_in, octree)
out = self.bn(out)
out = self.relu(out)
return out
class FcBnRelu(torch.nn.Module):
def __init__(self, channel_in, channel_out):
super(FcBnRelu, self).__init__()
self.flatten = torch.nn.Flatten(start_dim=1)
self.fc = torch.nn.Linear(channel_in, channel_out, bias=False)
self.bn = torch.nn.BatchNorm1d(channel_out, bn_eps, bn_momentum)
self.relu = torch.nn.ReLU(inplace=True)
def forward(self, data_in):
out = self.flatten(data_in)
out = self.fc(out)
out = self.bn(out)
out = self.relu(out)
return out
|
"""
TICCLAT testing helper functions.
"""
from pathlib import Path
from itertools import chain
import nltk.data
from nltk import word_tokenize
import pandas
def load_test_db_data(dbsession):
"""
Insert mock data into database.
"""
files = (Path(__file__).parent / 'db_data').glob('./*.tsv')
dbsession.execute('set foreign_key_checks=0;')
for file in files:
df = pandas.read_csv(file, sep='\t')
df.to_sql(file.stem, if_exists='append', index=False, con=dbsession.bind)
# note that the just inserted data is not checked for consistency, only new data/updates will be checked
dbsession.execute('set foreign_key_checks=1;')
# This used to be in ticclat.tokenize, but it was no longer used anywhere but
# in some tests and in the add_wikipedia_documents notebook, so we took it out
# of the install dependencies.
def nltk_tokenize(texts_file, punkt='tokenizers/punkt/dutch.pickle'):
"""
Inputs:
texts_file (str): File name of a file that contains the texts. This
should contain one document per line.
punkt (str): Path to the nltk punctuation data to be used.
Yields:
Counter: term-frequency vector representing a document.
"""
nltk.download('punkt')
tokenizer = nltk.data.load(punkt)
with open(texts_file) as file_handle:
for line in file_handle:
tokens = [word_tokenize(sent)
for sent in tokenizer.tokenize(line.strip())]
yield list(chain(*tokens))
|
import threading
import time
import random
import sys
import socket
rsListenPort = sys.argv[1]
rsListenPort = int(rsListenPort)
#djdjdjd
def sendData(value,sock):
sock.send(value.encode('utf-8'))
def file_to_dict(fileName):
f = open(fileName, "r")
lst = []
dic = {}
for line in f:
for word in line.split():
lst.append(word)
counter = 0
for entry in lst:
if counter == 0:
currentKey = entry.lower()
values = []
counter = counter + 1
elif counter == 1:
values.append(entry)
counter = counter + 1
elif counter == 2:
values.append(entry)
dic[currentKey] = values
counter = 0
f.close()
return dic
# go through the dictionary and find where the tshostname is
def getTS(dictionary):
for key in dictionary:
k = dictionary[key]
if k[1] == "NS":
returnVal = key
break
return returnVal
def return_dns_query(dictionary,domain):
if domain in dictionary:
values = dictionary[domain]
ipaddress = values[0]
flag = values[1]
return domain + " " + ipaddress + " " + flag
else:
return domain + " - NS"
def server():
newDict = file_to_dict("PROJI-DNSRS.txt")
try:
rs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("[RS]: Server socket created")
except socket.error as err:
print('socket open error: {}\n'.format(err))
exit()
# get root server host name
rsServerHost = socket.gethostname()
print("[RS]: root server name is {}".format(rsServerHost))
# get root server ip address
rsHostip = socket.gethostbyname(rsServerHost)
print("[RS] root server ip is {}".format(rsHostip))
#bind host,port
rs.bind((rsServerHost,rsListenPort))
#root server listen
rs.listen(5)
# accept incoming connections
csockid, addr = rs.accept()
print("[RS]: Got a connection request from a client at {}\n".format(addr))
#create a list temporarily to hold domain names from client
domain_list = []
cond = True
while cond:
data_from_client = csockid.recv(1024).decode('ascii')
if data_from_client != "*":
domain_list.append(data_from_client)
print("[RS]: data received from client: {}".format(data_from_client))
elif data_from_client == "*":
cond = False
time.sleep(2)
time.sleep(3)
#if "*" in domain_list:
#domain_list.remove("*")
print("\n[RS] Domains received from client:")
domain_list = [str(r) for r in domain_list]
print(domain_list)
print("\n")
#a way to send the correct string back to client
for dn in domain_list:
result = return_dns_query(newDict,dn.lower())
print("[RS] sending to client: {}".format(result))
csockid.send(result.encode('ascii'))
time.sleep(3)
csockid.send("00".encode('ascii'))
time.sleep(1)
tsHostName = getTS(newDict)
csockid.send(tsHostName.encode("ascii"))
#print(domain_list)
print("\nRS DNS table as hash map:")
print(newDict)
rs.close()
exit()
if __name__ == "__main__":
t1 = threading.Thread(name='server', target=server)
t1.start()
time.sleep(random.random() * 5)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from openravepy import *
env = Environment() # create openrave environment
env.SetViewer('qtcoin') # attach viewer (optional)
env.Load('data/lab1.env.xml') # load a simple scene
robot = env.GetRobots()[0] # get the first robot
with env: # lock the environment since robot will be used
print "Robot ",robot.GetName()," has ",robot.GetDOF()," joints with values:\\n",robot.GetJointValues()
robot.SetDOFValues([0.5],[0]) # set joint 0 to value 0.5
T = robot.GetLinks()[1].GetTransform() # get the transform of link 1
print "The transformation of link 1 is:\n",T
raw_input("Press Enter to exit...")
env.Destroy()
|
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
class SFIModel_gating(nn.Module):
def __init__(self,hparams,vocab):
super().__init__()
self.name = hparams['name']
self.cdd_size = (hparams['npratio'] + 1) if hparams['npratio'] > 0 else 1
self.batch_size = hparams['batch_size']
self.dropout_p = hparams['dropout_p']
self.k = hparams['k']
self.level = hparams['head_num']
# concatenate category embedding and subcategory embedding
self.signal_length = hparams['title_size']
self.his_size = hparams['his_size']
self.embedding_dim = hparams['embedding_dim']
self.value_dim = hparams['value_dim']
self.query_dim = hparams['query_dim']
self.device = hparams['device']
# pretrained embedding
if hparams['train_embedding']:
self.embedding = nn.Parameter(vocab.vectors.clone().detach().requires_grad_(True).to(self.device))
else:
self.embedding = vocab.vectors.to(self.device)
# elements in the slice along dim will sum up to 1
self.softmax = nn.Softmax(dim=-1)
self.DropOut = nn.Dropout(p=self.dropout_p)
self.query_words = nn.Parameter(torch.randn((1, self.query_dim), requires_grad=True))
# self.query_words = nn.Parameter(torch.randn((1,self.filter_num), requires_grad=True))
self.queryProject_words = nn.ModuleList([]).extend([nn.Linear(self.embedding_dim,self.embedding_dim, bias=False) for _ in range(self.level)])
self.valueProject_words = nn.ModuleList([]).extend([nn.Linear(self.embedding_dim,self.value_dim, bias=False) for _ in range(self.level)])
self.keyProject_words = nn.Linear(self.value_dim * self.level, self.query_dim, bias=True)
self.SeqCNN3D = nn.Sequential(
nn.Conv3d(in_channels=self.level,out_channels=32,kernel_size=[3,3,3],padding=1),
nn.ReLU(),
nn.MaxPool3d(kernel_size=[3,3,3],stride=[3,3,3]),
nn.Conv3d(in_channels=32,out_channels=16,kernel_size=[3,3,3],padding=1),
nn.ReLU(),
nn.MaxPool3d(kernel_size=[3,3,3],stride=[3,3,3])
)
self.learningToRank = nn.Linear(int((int((self.k - 3)/3 + 1) - 3)/3 + 1) * 2 * 2 * 16,1)
def _scaled_dp_attention(self,query,key,value):
""" calculate scaled attended output of values
Args:
query: tensor of [*, query_num, key_dim]
key: tensor of [batch_size, *, key_num, key_dim]
value: tensor of [batch_size, *, key_num, value_dim]
Returns:
attn_output: tensor of [batch_size, *, query_num, value_dim]
"""
# make sure dimension matches
assert query.shape[-1] == key.shape[-1]
key = key.transpose(-2,-1)
attn_weights = torch.matmul(query,key)/torch.sqrt(torch.tensor([self.embedding_dim],dtype=torch.float,device=self.device))
attn_weights = self.softmax(attn_weights)
attn_output = torch.matmul(attn_weights,value)
return attn_output
def _self_attention(self,input,head_idx):
""" apply self attention of head#idx over input tensor
Args:
input: tensor of [batch_size, *, embedding_dim]
head_idx: interger of attention head index
Returns:
self_attn_output: tensor of [batch_size, *, value_dim]
"""
query = self.queryProject_words[head_idx](input)
attn_output = self._scaled_dp_attention(query,input,input)
self_attn_output = self.valueProject_words[head_idx](attn_output)
return self_attn_output
def _multi_head_self_attention(self,input):
""" apply multi-head self attention over input tensor
Args:
input: tensor of [batch_size, *, signal_length, repr_dim]
Returns:
additive_attn_repr: tensor of [batch_size, *, repr_dim]
multi_head_self_attn_value: tensor of [batch_size, *, signal_length, repr_dim]
"""
self_attn_outputs = [self._self_attention(input,i) for i in range(self.level)]
mha_embedding = torch.stack(self_attn_outputs, dim=-3)
mha_repr = torch.cat(self_attn_outputs, dim=-1)
# project the embedding of each words to query subspace
# keep the original embedding of each words as values
multi_head_self_attn_key = torch.tanh(self.keyProject_words(mha_repr))
additive_attn_repr = self._scaled_dp_attention(self.query_words,multi_head_self_attn_key,mha_repr).squeeze(dim=-2)
return mha_embedding, additive_attn_repr
def _news_encoder(self,news_batch):
""" encode set of news to news representation
Args:
news_set: tensor of [batch_size, *, signal_length]
Returns:
news_embedding_dilations: tensor of [batch_size, *, level, signal_length, filter_num]
"""
news_embedding = self.DropOut(self.embedding[news_batch])
news_embedding_attn, news_reprs = self._multi_head_self_attention(news_embedding)
return news_embedding_attn, news_reprs
def _news_attention(self, cdd_repr, his_repr, his_embedding, his_mask):
""" apply news-level attention
Args:
cdd_repr: tensor of [batch_size, cdd_size, *]
his_repr: tensor of [batch_size, his_size, *]
his_embedding: tensor of [batch_size, his_size, self.level, signal_length, *]
his_mask: tensor of [batch_size, his_size, 1]
Returns:
his_activated: tensor of [batch_size, cdd_size, k, signal_length, *]
"""
# [bs, cs, hs]
attn_weights = torch.bmm(cdd_repr,his_repr.transpose(-1,-2))
# his_activated_list = []
# Padding in history will cause 0 in attention weights, underlying the probability that gumbel_softmax may attend to those meaningless 0 vectors.
# Masking off these 0s will force the gumbel_softmax to attend to only non-zero histories.
# Masking in candidate also cause such a problem, however we donot need to fix it
# because the whole row of attention weight matrix is zero so gumbel_softmax can only capture 0 vectors
# though reuslting in redundant calculation but it is what we want of padding 0 to negtive sampled candidates as they may be less than npratio.
attn_weights = self.softmax(attn_weights.masked_fill(his_mask.transpose(-1,-2), -float("inf")))
# print(attn_weights.shape, attn_weights[0,0])
# for i in range(self.k):
# # attn_focus = F.gumbel_softmax(attn_weights,dim=-1,tau=0.1,hard=True)
# attn_focus = F.one_hot(attn_weights.argmax(dim=-1), num_classes=self.his_size).float()
# his_activated = torch.matmul(attn_focus,his_embedding.view(self.batch_size,self.his_size,-1)).view(self.batch_size, self.cdd_size, self.level, self.signal_length, self.filter_num)
# his_activated_list.append(his_activated)
# attn_weights = attn_weights.masked_fill(attn_focus.bool(), -float('inf'))
_, attn_weights_sorted = attn_weights.detach().sort(dim=-1, descending=True)
attn_focus = F.one_hot(attn_weights_sorted[:,:,:self.k], num_classes=self.his_size).float()
# [bs, cs, k, sl, rd]
his_activated = torch.matmul(attn_focus, his_embedding.view(self.batch_size, 1, self.his_size,-1)).view(self.batch_size, self.cdd_size, self.k, self.level, self.signal_length, self.value_dim)
# [bs, cs, k, sl, rd]
# his_activated = torch.stack(his_activated_list, dim=2)
return his_activated
def _fusion(self,cdd_news_reprs,his_news_reprs):
""" construct fusion tensor between candidate news repr and history news repr at each dilation level
Args:
cdd_news_embedding: tensor of [batch_size, cdd_size, level, signal_length, filter_num]
his_activated: tensor of [batch_size, cdd_size, k, level, signal_length, filter_num]
Returns:
fusion_tensor: tensor of [batch_size, 320], where 320 is derived from MaxPooling with no padding
"""
# [batch_size, cdd_size, his_size, level, signal_length, signal_length]
fusion_tensor = torch.matmul(cdd_news_reprs.unsqueeze(dim=2),his_news_reprs.transpose(-2,-1)) / math.sqrt(self.value_dim)
# reshape the tensor in order to feed into 3D CNN pipeline
fusion_tensor = fusion_tensor.view(-1, self.k, self.level, self.signal_length, self.signal_length).transpose(1,2)
fusion_tensor = self.SeqCNN3D(fusion_tensor).view(self.batch_size,self.cdd_size,-1)
return fusion_tensor
def _click_predictor(self,fusion_tensors):
""" calculate batch of click probabolity
Args:
fusion_tensors: tensor of [batch_size, cdd_size, 320]
Returns:
score: tensor of [batch_size, npratio+1], which is normalized click probabilty
"""
score = self.learningToRank(fusion_tensors)
if self.cdd_size > 1:
score = nn.functional.log_softmax(score,dim=1)
else:
score = torch.sigmoid(score)
return score
def forward(self,x):
if x['candidate_title'].shape[0] != self.batch_size:
self.batch_size = x['candidate_title'].shape[0]
# compress batch_size and cdd_size into dim0
cdd_news = x['candidate_title'].long().to(self.device)
cdd_news_embedding, cdd_news_reprs = self._news_encoder(cdd_news)
# print(cdd_news_embedding.shape, cdd_news_reprs.shape)
# compress batch_size and his_size into dim0
his_news = x['clicked_title'].long().to(self.device)
his_news_embedding, his_news_reprs = self._news_encoder(his_news)
# print(his_news_embedding.shape, his_news_reprs.shape)
his_activated = self._news_attention(cdd_news_reprs, his_news_reprs, his_news_embedding, x['his_mask'].to(self.device))
fusion_tensors = self._fusion(cdd_news_embedding, his_activated)
score = self._click_predictor(fusion_tensors).squeeze()
return score |
#!/usr/lib/python
# NYUAD SVM Project Preprocessor
# AI - Fall 2013
# Written by Lingliang Zhang
# A very simple Python preprocessor script that normalizes ARFF formatted
# data. It counts the number of instances in each class, determines the
# minimum. It then takes a random subset of each class of the minimum size.
import sys
import re
import random
def init(file_handle):
preamble = []
attributes = []
attribute_count = 0
attribute = re.compile('@attribute|@ATTRIBUTE')
data = re.compile('@data|@DATA')
for i, line in enumerate(file_handle):
preamble.append(line)
if data.match(line):
break
if attribute.match(line):
attributes.append(line.split(" ")[1])
last_attribute_index = i
attribute_count += 1
if not attributes:
raise Exception("No classes found in a file, did you pass a non-ARFF file?")
question = ""
for i, attribute_name in enumerate(attributes):
question += "%d: %s\n" % (i, attribute_name)
class_index = input("%sPlease select the class field:\n" % question)
class_abs_index = last_attribute_index - len(attributes) + class_index + 1
preamble[last_attribute_index], preamble[class_abs_index] =\
preamble[class_abs_index], preamble[last_attribute_index]
return class_index, preamble
def gather_classes(file_handle, class_index):
classes = {}
number_of_classes = 0
for line in file_handle:
data = line.rstrip("\n").split(",")
if not number_of_classes:
number_of_classes = len(data)
data[number_of_classes - 1], data[class_index] =\
data[class_index], data[number_of_classes - 1]
if data[number_of_classes - 1] in classes:
classes[data[number_of_classes - 1]].append(data)
else:
classes[data[number_of_classes - 1]] = [data]
return classes
def main():
if len(sys.argv) == 0:
raise Exception("Err: please pass a file or list of files as arguments")
else:
for filename in sys.argv[1:]:
with open(filename) as f:
class_index, preamble = init(f)
classes = gather_classes(f, class_index)
class_count = map(lambda classname: len(classes[classname]), classes)
min_count = min(class_count)
min_allowed = input("Please enter the max instances allowed of any class:\n")
if not min_allowed:
min_allowed = 10000000000
if min_count > min_allowed:
min_count = min_allowed
data = []
for key, value in classes.iteritems():
print("Class %s has %d entries" % (key, len(value)))
value = map(lambda lines: ",".join(lines), value)
random.shuffle(value)
data.append("\n".join(value[:min_count]))
random.shuffle(data)
with open("stripped-" + filename, "w") as ff:
ff.write("".join(preamble))
ff.write("\n".join(data))
print("\nData cleanup complete for %s, all classes have been reduced to %d entries.\
\nPrevious total entries: %d\nNew total entries: %d\n\
\nOutput in stripped-%s" % (filename, min_count, sum(class_count),\
min_count*len(class_count), filename))
if __name__ == "__main__":
main()
|
# Generated by Django 2.0.5 on 2018-07-11 15:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0012_part_meta'),
]
operations = [
migrations.CreateModel(
name='Set',
fields=[
('set_num', models.CharField(max_length=25, primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('year', models.SmallIntegerField()),
('image_url', models.CharField(blank=True, max_length=200)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='SetTheme',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=60)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('parent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.SetTheme')),
],
),
migrations.AddField(
model_name='set',
name='theme',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='api.SetTheme'),
),
]
|
# def table(nb):
# i = 0
# while i < 10: # tant que i est strictement inferieure à 10.
# print(i + 1, "*",nb,"=",(i+1)*nb)
# i += 1 #on incremente i de 1 à chaque tour
# table(2)
# version avec le choix du chiffre multipliant x
def table(nb,max):
i=0
while i < max:
print(i + 1, "*",nb,"=",(i+1)*nb)
i += 1 #on incremente i de 1 à chaque tour
table(2,20)
|
from django.contrib.auth import (login as auth_login, authenticate)
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from django.http import HttpResponseRedirect
from django.urls import reverse
from .models import DataSchema, DataSet, Field
from .forms import FieldFormset, DataSchemaForm
def login(request):
_message = 'Please sign in'
if request.method == 'POST':
_username = request.POST['username']
_password = request.POST['password']
user = authenticate(username=_username, password=_password)
if user is not None:
if user.is_active:
auth_login(request, user)
return HttpResponseRedirect(reverse('home'))
else:
_message = 'Your account is not activated'
else:
_message = 'Invalid login, please try again.'
context = {'message': _message}
return render(request, 'login/login.html', context)
@login_required
def list_schema(request):
schemas = DataSchema.objects.all()
return render(request, 'fake_data/list_schema.html', {'schemas': schemas})
@login_required
def new_schema(request):
if request.method == "POST":
dataschema_form = DataSchemaForm(request.POST)
if dataschema_form.is_valid():
schema = dataschema_form.save()
field_formset = FieldFormset(request.POST, instance=schema)
if field_formset.is_valid():
field_formset.save()
return redirect('list_dataset', id=schema.id)
else:
dataschema_form = DataSchemaForm()
field_formset = FieldFormset()
return render(request, 'fake_data/edit_schema.html', {'field_formset': field_formset, 'dataschema_form': dataschema_form})
@login_required
def edit_schema(request, id):
schema = DataSchema.objects.get(id=id)
if request.method == "POST":
dataschema_form = DataSchemaForm(request.POST, instance=schema)
if dataschema_form.is_valid():
schema = dataschema_form.save()
field_formset = FieldFormset(request.POST, instance=schema)
if field_formset.is_valid():
field_formset.save()
return redirect('list_dataset', id=schema.id)
else:
dataschema_form = DataSchemaForm(instance=schema)
field_formset = FieldFormset(instance=schema)
return render(request, 'fake_data/edit_schema.html', {'field_formset': field_formset, 'dataschema_form': dataschema_form})
@login_required
def list_dataset(request, id):
schema = DataSchema.objects.get(id=id)
datasets = DataSet.objects.filter(schema=schema)
return render(request, 'fake_data/list_dataset.html', {'schema': schema, 'datasets': datasets})
@login_required
def delete_schema(request, id):
schema = DataSchema.objects.get(id=id)
schema.delete()
return redirect('list_schema') |
from django.db import models
from LandingPage.models import Course
# Create your models here.
class CorporatesTalks(models.Model):
name=models.CharField(max_length=50)
email=models.EmailField(max_length=100)
organization=models.CharField(max_length=250)
course= models.ForeignKey(Course,related_name='corporate_course', on_delete=models.CASCADE,null=True)
added = models.DateTimeField(auto_now_add=True,blank=True,null=True)
updated = models.DateTimeField(auto_now=True,blank=True,null=True)
def __str__(self):
return self.name
class Meta:
verbose_name='Corporate Talks'
verbose_name_plural='Corporate Talks'
|
#!/usr/bin/python
#\file crop_img.py
#\brief Crop an image.
#\author Akihiko Yamaguchi, info@akihikoy.net
#\version 0.1
#\date Jun.15, 2021
import numpy as np
import cv2
import sys,os
if __name__=='__main__':
file_in= sys.argv[1]
img= cv2.imread(file_in)
print 'Input image shape:',img.shape
x,y,w,h= 260,180,120,300
img_cropped= img[y:y+h,x:x+w]
#img_cropped= cv2.flip(cv2.transpose(img_cropped),1)
print 'Cropped image shape:',img_cropped.shape
cv2.imshow('image', img)
if 0 not in img_cropped.shape: cv2.imshow('cropped', img_cropped)
while True:
key= cv2.waitKey()
if key in map(ord,[' ','q']): break
if key==ord('s'):
file_out= os.path.basename(file_in)
if not os.path.exists(file_out):
cv2.imwrite(file_out,img_cropped)
print 'Saved the image to the file:',file_out
else:
print 'Failed to save the image as the file already exists:',file_out
|
from rest_framework import serializers
from .models import StepUser, StepUserHistory
class StepUserHistorySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = StepUserHistory
class StepUserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = StepUser
fields = ('url', 'username', 'first_name', 'last_name', 'email', 'age', 'city', 'photo', 'steps',
'step_user_history',)
|
from django.contrib import admin
from .models import Profile, Role
# Register your models here.
admin.site.register(Profile)
admin.site.register(Role)
|
'''
Your goal in this kata is to implement a difference function, which subtracts one list from another and returns the result.
It should remove all values from list a, which are present in list b.
array_diff([1,2],[1]) == [2]
If a value is present in b, all of its occurrences must be removed from the other:
array_diff([1,2,2,2,3],[2]) == [1,3]
'''
def array_diff(a, b):
for element_b in b:
if element_b in a:
for element_a in range(a.count(element_b)):
a.remove(element_b)
return a
|
#_*_coding:utf-8_*_
from apps.online_user.models import OnlineUser, OnlineUserSerializer
from django.shortcuts import get_object_or_404, render
from rest_framework.response import Response
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
import logging
logger = logging.getLogger(__name__)
@api_view(['GET'])
@authentication_classes((TokenAuthentication,))
@permission_classes((IsAuthenticated,))
def onlineFriends(request):
email = request.user.email
email = 'falqs@foxmail.com'
friendList = OnlineUser.objects.raw('select o.email, o.lan_ip, o.wlan_ip, o.mac id from kx_userlogin o where o.email in (select friend from kx_user_friend where user=%s) or o.email=%s', [email,email])
serializer = OnlineUserSerializer(friendList, many=True)
return Response(serializer.data)
#return render(request, 'sharefile/friendFiles.html')
|
# for gmail
EMAIL_USE_TLS = True
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'vlad120403@gmail.com'
EMAIL_HOST_PASSWORD = ''
EMAIL_PORT = 587 |
from django.conf import settings
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import redirect, render_to_response
from django.template import RequestContext
import json
from django.http import HttpResponse
import subprocess
import psutil
import os
from firebase_token_generator import create_token
def home(request):
c = RequestContext(request, {
# Something good
'body_class': 'home'
})
return render_to_response('index.html', c)
def createauthtoken(request):
response = None
rdio_user_key = request.GET.get('userKey')
if rdio_user_key:
custom_data = {'rdio_user_key': rdio_user_key}
options = {'debug': settings.DEBUG}
firebase_token = create_token(settings.FIREBASE_TOKEN, custom_data, options)
response = { "token": firebase_token }
else:
response = {"error": "userKey is a required GET param"}
return HttpResponse(json.dumps(response), content_type = "application/json")
def make_room_daemon(room_name):
child_processes = psutil.Process(os.getpid()).get_children()
for process in child_processes:
try:
if process.cmdline() and len(process.cmdline()) > 0 and process.cmdline()[-1] == room_name:
return
except psutil.AccessDenied:
pass
directory = os.path.dirname(os.path.realpath(__file__))
subprocess.Popen(["python", "%s/../manage.py" % directory, "master", room_name])
@login_required
def party(request, room_name):
if room_name is None:
return redirect('/p/rdio')
c = RequestContext(request, {
'firebase_url': "%s/%s" % (settings.FIREBASE_URL, room_name),
'room_name': room_name,
'body_class': 'party'
})
make_room_daemon(room_name)
return render_to_response('party.html', c)
def parties(request):
c = RequestContext(request, {
'firebase_url': "%s/" % (settings.FIREBASE_URL,),
'body_class': 'parties'
})
return render_to_response('partylist.html', c)
def sign_out(request):
response = logout(request, next_page=reverse('index'))
return HttpResponse(response)
def player_helper(request):
return render_to_response('player-helper.html',
{},
context_instance=RequestContext(request))
|
# -*- coding: UTF-8 -*-
__author__ = 'Aaron zh'
from function import *
import json
from django.db import connection, transaction
global current_time
current_time = time.strftime('%Y-%m-%d', time.localtime(time.time()))
reload(sys)
sys.setdefaultencoding("utf-8")
from ..businessRule.actions import *
class MyThread(object):
def __init__(self, func_list=None):
self.func_list = func_list
self.threads = []
def set_thread_func_list(self, func_list):
"""
@note: func_list是一个list,每个元素是一个dict,有func和args两个参数
"""
self.func_list = func_list
@staticmethod
def trace_func(func, *args, **kwargs):
"""
@note:替代profile_func,新的跟踪线程返回值的函数,对真正执行的线程函数包一次函数,以获取返回值
"""
func(*args, **kwargs)
def start(self):
"""
@note: 启动多线程执行,并阻塞到结束
"""
self.threads = []
for func_dict in self.func_list:
if func_dict["args"]:
new_arg_list = [func_dict["func"]]
for arg in func_dict["args"]:
new_arg_list.append(arg)
new_arg_tuple = tuple(new_arg_list)
task = threading.Thread(target=self.trace_func, args=new_arg_tuple)
else:
task = threading.Thread(target=self.trace_func, args=(func_dict["func"],))
self.threads.append(task)
for thread_obj in self.threads:
thread_obj.start()
for thread_obj in self.threads:
thread_obj.join()
class RuleActions(BaseActions):
def __init__(self):
pass
@rule_action()
def category_near_ndays_fee(self, tag, day=60):
"""
计算当前各类目ID,抽取最近60天(自有/行业)销售额
:param day: 分析的天数
:return:
"""
res = calcute_all_category_fee(day) # 【FUNC】 获取当前所有类目的去年这个时候后n天的销售额
resule = [] # 此时需要对数据进行处理
for temp in res:
tag = dict(tag1=temp[0], tag2=temp[1], tag3=temp[2])
temp_dict = dict(name=tag, value=temp[3])
resule.append(temp_dict)
return resule
@rule_action()
def calcu_n_percent(self, res, threshold=0.3):
"""
计算出结果集中占一定比重的商品类目
:param res: 类目结果
:param threshold: 自定义所占的比重
:return:
"""
k = number_main_list(res, threshold)
return res[:k]
@rule_action()
def calcu_indus_category_trend(self, res, Days=[10, 30, 45]):
"""
计算结果集中行业趋势上升的类目
:param res: 结果集
:param Days: 计算趋势的时间区间
:return:
"""
temp_res = []
for temp in res:
# Days = [10, 30, 45] # 计算一个销售斜率的平均值 [该类目去年的行业趋势]
args_industry_category = {'trigger': 'industry_category', 'tag1': temp['name']['tag1'], 'tag2': temp['name']['tag2'],
'tag3': temp['name']['tag3'], 'Days': Days}
industry_category_slopes = calculate_trend(**args_industry_category) # 【FUNC】 计算该占比较大的行业的销售趋势
if industry_category_slopes > 0: # 选取趋势上升的类目
temp['indus_category_slopes'] = industry_category_slopes # 将所要的结果添加到结果集中
temp_res.append(temp)
return temp_res
@rule_action()
def calcu_zc_category_trend(self, res, Days=[10, 20, 30]):
"""
抽取追灿的趋势小于市场趋势的类目
:param res: 市场的类目,以及销售趋势
:param Days: 计算趋势的时间区间
:return:
"""
temp_res = []
for temp in res:
# Days_zc = [10, 20, 30]
args_zc_category = {'trigger': 'zc_category', 'tag1': temp['name']['tag1'], 'tag2': temp['name']['tag2'],
'tag3': temp['name']['tag3'], 'Days': Days}
zc_category_slopes = calculate_trend(**args_zc_category) # 【FUNC】 计算追灿自招的行业的销售趋势
if zc_category_slopes < temp['indus_category_slopes']:
temp['zc_category_slopes'] = zc_category_slopes
temp_res.append(temp)
return temp_res
@rule_action()
def under_category_all_item(self, res, ):
"""
获取具体类目下的所有货号
:param res:
:return:
"""
temp_item_list = []
temp_res = []
for temp in res:
cur = connection.cursor()
sql_all_item = category_main_itemnum(temp['name']['tag1'], temp['name']['tag2'], temp['name']['tag3'])
cur.execute(sql_all_item)
res_item = cur.fetchall()
for temp_item in res_item:
# print temp_item[0]
temp_item_list.append(temp_item[0])
temp['item'] = temp_item_list
temp_res.append(temp)
# print type(res_item)
print temp_res
# ======================= 规则-求主推类目 =======================
@rule_action()
def rule_main_category(self, day):
"""
根据最近n天的销售情况求出主推类目
:param day: 分析类目的销售时间区间
:return:
"""
cur = connection.cursor()
result_all_cagegory_fee = self.category_near_ndays_fee(day) # 计算所有类目的销售额
result_k_main_category = self.calcu_n_percent(result_all_cagegory_fee) # 计算占比比较大的类目
resut_main_cate = self.calcu_indus_category_trend(result_k_main_category) # 主推类目
resut_zc_mian_cate = self.calcu_zc_category_trend(resut_main_cate)
self.under_category_all_item(resut_zc_mian_cate)
# ===================以上为测试独立模块=====================
# res = calcute_all_category_fee(day) # 【FUNC】 获取当前所有类目的去年这个时候后n天的销售额
# k = number_main_list(res, 3, 0.3) # 【FUNC】 获取销售额大于给定百分值的类目个数
# # 此处的中间参数为目标数值【销售额】对应的下标
result = [] # 最终的数据集合
res = calcute_all_category_fee(day) # 【FUNC】 获取当前所有类目的去年这个时候后n天的销售额
k = 1
for ii in range(0, k + 1):
Days = [10, 30, 45] # 计算一个销售斜率的平均值 [该类目去年的行业趋势]
args_industry_category = {'trigger': 'industry_category', 'tag1': res[ii][0], 'tag2': res[ii][1],
'tag3': res[ii][2], 'Days': Days}
industry_category_slopes = calculate_trend(**args_industry_category) # 【FUNC】 计算该占比较大的行业的销售趋势
result_category_list = [] # 主推类目的集合
if industry_category_slopes > 0: # 剔除占比虽然大,但是行业趋势下降的类目【比较】
main_category = '%s %s %s' % (res[ii][0], res[ii][1], res[ii][2])
main_cate = dict(Category=main_category, Tag='行业的主推类目', Slope=industry_category_slopes)
result_category_list.append(main_cate)
Days_zc = [10, 20, 30]
args_zc_category = {'trigger': 'zc_category', 'tag1': res[ii][0], 'tag2': res[ii][1],
'tag3': res[ii][2], 'Days': Days_zc}
zc_category_slopes = calculate_trend(**args_zc_category) # 【FUNC】 计算追灿自招的行业的销售趋势
if zc_category_slopes < industry_category_slopes: # 找到追灿的销售趋势小于行业趋势的类目【比较】
main_cate_zc = dict(Category=main_category, Tag='追灿应该主推的类目', Slope=zc_category_slopes)
result_category_list.append(main_cate_zc)
sql_itemnum = category_main_itemnum(res[ii][0], res[ii][1], res[ii][2])
cur.execute(sql_itemnum)
res_itemnum = cur.fetchall()
res_item_all_list = [] # 主推类目下的所有的产品销售趋势
res_item_bad_list = [] # 产品的销售趋势小于行业趋势
for i in range(0, len(res_itemnum), 5):
tasktemp_itemnum = res_itemnum[i:i + 5]
mt = MyThread()
g_func_list = []
for j in range(0, len(tasktemp_itemnum)):
g_func_list.append(
{"func": self.main_itemnum,
"args": (
tasktemp_itemnum[j][0], main_category, Days_zc, industry_category_slopes,
res_item_all_list, res_item_bad_list)})
mt.set_thread_func_list(g_func_list)
mt.start()
res_seller_recommend = self.recommend_seller(res_item_bad_list, res[ii][0], res[ii][1], res[ii][2],
res[ii][3]) # 推荐已有的没有卖这款产品的商家
result_all = dict(Category=result_category_list, ItemNum=res_item_all_list,
Seller=res_seller_recommend)
result.append(result_all)
else:
main_cate_zc = dict(Category=main_category, Tag='追灿销售比较好的类目', Slope=zc_category_slopes)
result_category_list.append(main_cate_zc)
result_all_1 = dict(Category=result_category_list)
result.append(result_all_1)
else:
main_category = '%s %s %s' % (res[ii][0], res[ii][1], res[ii][2])
main_cate = dict(Category=main_category, Tag='类目销量占比大,但是趋势下降的类目', Slope=industry_category_slopes)
result_category_list.append(main_cate)
result_all_2 = dict(Category=result_category_list)
result.append(result_all_2)
return result
def main_itemnum(self, itemnum, category, days, slope_main_category, res_item_all_list, res_item_bad_list):
"""
根据行业趋势求出销售销售趋势
:param itemnum:
:param days:
:param slope_main_category:
:param res_list:
:return:
"""
cur = connection.cursor()
slopes = 0
total_slopes = 0
for day in days:
sql = item_day_fee(itemnum, day)
count = cur.execute(sql)
if count > 1: # 比较的对象大小要至少两个
res_zhuican_current = cur.fetchall()
slo = culcate_slope(res_zhuican_current, count)
slopes += slo
total_slopes += 1
if total_slopes > 0: # 最近没有销售记录的商家,特殊处理,分母不能为零
slopes = slopes / total_slopes
if slope_main_category < slopes:
res = dict(Category=category, ItemNum=itemnum, Tag='产品销售趋势大于行业趋势', Slope=slopes)
res_item_all_list.append(res)
else:
res = dict(Category=category, ItemNum=itemnum, Tag='产品销售趋势小于等于行业趋势', Slope=slopes)
res_item_all_list.append(res)
res_item_bad_list.append(itemnum)
def recommend_seller(self, res_item_bad_list, tag1, tag2, tag3, category_total):
"""
对于行业数据上升,并且占比比较大的类目,销售趋势小于行业趋势的产品进行分析,并且推荐商家
:param res_item_bad_list:
:param category_total : 该类目的行业总销售额
:return:
"""
cur = connection.cursor()
List = [] # 存储所有产品的最近n天的销售额
List_seller = []
day = 30
for item in res_item_bad_list:
sql = item_total_fee(item, day) # 计算该货号最近30天的销售额
cur.execute(sql)
result_item_fee = cur.fetchall()
res = dict(Item=item, Fee=result_item_fee[0][0])
List.append(res)
List.sort(key=lambda obj: obj.get('Fee'), reverse=True)
k = number_main_dict(List, 'Fee', 0.9) # 获取销售额占百分之八十的商品数目
for re_k in range(0, k + 1): # 对每个产品进行分析
day = 60 # 考察新近商家的天数
sql_recommend_seller = recommend_itemnum_seller(List[re_k]['Item'], day) # 没有买这个产品的商家推荐
count_recommend_seller = cur.execute(sql_recommend_seller)
res_recommend_seller = cur.fetchall()
if count_recommend_seller > 0: # 找到卖这个产品比较好的商家
for temp_recommend_seller in res_recommend_seller:
seller_category_per = seller_category_percent(temp_recommend_seller[0], category_total, tag1, tag2,
tag3) # 计算这个商家卖这个类目在市场的占比
cur.execute(seller_category_per)
percent = cur.fetchall()
if percent[0][0] >= 0:
category = '%s %s %s - %s' % (tag1, tag2, tag3, List[re_k]['Item'])
seller = dict(SellerNick=temp_recommend_seller[0], ItemNum=category,
Percent=percent[0][0])
List_seller.append(seller)
List_seller.sort(key=lambda obj: obj.get('Percent'), reverse=True) # 对未上架该类目产品的商家所销售该类目的总额占总销售额的占比进行排序
return List_seller[:10]
# ======================= 规则-1 =======================
@rule_action()
def rule1(self, category, day):
cur = connection.cursor()
Sql = all_seller(category, day, current_time)
cur.execute(Sql)
Result_seller = cur.fetchall()
List_te = []
for i in range(0, len(Result_seller), 10):
tasktemp_seller = Result_seller[i:i + 10]
mt = MyThread()
g_func_list = []
for j in range(0, len(tasktemp_seller)):
g_func_list.append(
{"func": self.run_seller,
"args": (category, day, tasktemp_seller[j][0], List_te)})
mt.set_thread_func_list(g_func_list)
mt.start()
cur.close()
return List_te
def run_seller(self, category, day, sellernick, List_te):
cur = connection.cursor()
Sql_itemnum = seler_category(sellernick, category, day, current_time)
cur.execute(Sql_itemnum)
Result_itemnum = cur.fetchall()
for i in range(0, len(Result_itemnum), 5):
tasktemp_itemnum = Result_itemnum[i:i + 5]
mt = MyThread()
g_func_list = []
for j in range(0, len(tasktemp_itemnum)):
g_func_list.append(
{"func": self.run_itemnum,
"args": (day, sellernick, tasktemp_itemnum[j][0], List_te)})
mt.set_thread_func_list(g_func_list)
mt.start()
cur.close()
def run_itemnum(self, day, sellernick, itemnum, List_te):
cur = connection.cursor()
sql = day_fee(day, sellernick, itemnum, current_time)
cur.execute(sql)
result = cur.fetchall()
slo = culcate_slope(result, day)
if slo > 0.2:
tag = '高于行业数据'
elif slo < 0:
tag = '低于行业数据'
else:
tag = '正常'
a = dict(SellerNick=sellernick, ItemNum=str(itemnum), Tag=tag, Slope=slo)
List_te.append(a)
cur.close()
# ======================= 规则-3 =======================
@rule_action()
def rule3(self, category, day):
"""
规则三(第6行):产品需要上架,匹配但未上架的商家
:param category: 需要商家的产品
:param day: 筛选新进合作商家的时间区间
:return:
"""
cur = connection.cursor()
List_te = []
sql = rule_three(day, category, current_time)
cur.execute(sql)
result_none_seller = cur.fetchall()
tag = '商家没有上架%s' % category
for res in result_none_seller:
a = dict(SellerNick=res[0], Tag=tag)
List_te.append(a)
cur.close()
return List_te
# ======================= 规则-4 =======================
@rule_action()
def rule4(self, category, day):
"""
规则三(第6行):产品需要上架,匹配但未上架的商家
:param category: 需要商家的产品
:param day: 筛选新进合作商家的时间区间
:return:
"""
cur = connection.cursor()
List_te = []
sql = rule_four(day, category, current_time)
cur.execute(sql)
result_none_seller = cur.fetchall()
tag = '商家上架了%s' % category
for res in result_none_seller:
a = dict(SellerNick=res[0], Tag=tag, Itemnum=res[1], Total=res[2])
List_te.append(a)
cur.close()
return List_te
# ======================= 规则-6 =======================
@rule_action()
def rule6(self, category, day=30, number=10):
"""
规则七(第10行):主推类目中,行业趋势上升, 但实际销量不能达到平均趋势
:param seller_day: (默认值为30天) 规定的最近多少天新进的商家
:param item_day: (默认值为10天) 需要分析de商家上架之后的天数
:param threshold: (默认值为800) 销量的限额(默认是以销售额作为评判基准)
:return:
"""
cur = connection.cursor()
List_te = []
sql = all_seller(category, day, current_time) # 获取最近30(n)天新招的商家
cur.execute(sql)
result_seller = cur.fetchall()
# print sql
for i in range(0, len(result_seller), 10):
tasktemp_seller = result_seller[i:i + 10]
mt = MyThread()
g_func_list = []
for j in range(0, len(tasktemp_seller)):
# print str(tasktemp_seller[j][0])
g_func_list.append(
{"func": self.rule_seven_item,
"args": (tasktemp_seller[j][0], category, day, List_te)})
mt.set_thread_func_list(g_func_list)
mt.start()
List_te.sort(key=lambda obj: obj.get('Fee'), reverse=True)
cur.close()
return List_te[:number] # 销售前十名的推荐
# ======================= 规则-7 =======================
@rule_action()
def rule7(self, category, day=30):
"""
规则七(第10行):主推类目中,行业趋势上升, 但实际销量不能达到平均趋势
:param seller_day: (默认值为30天) 规定的最近多少天新进的商家
:param item_day: (默认值为10天) 需要分析de商家上架之后的天数
:param threshold: (默认值为800) 销量的限额(默认是以销售额作为评判基准)
:return:
"""
cur = connection.cursor()
List_te = []
sql = all_seller(category, day, current_time) # 获取最近30(n)天新招的商家
cur.execute(sql)
result_seller = cur.fetchall()
# print sql
for i in range(0, len(result_seller), 10):
tasktemp_seller = result_seller[i:i + 10]
mt = MyThread()
g_func_list = []
for j in range(0, len(tasktemp_seller)):
# print str(tasktemp_seller[j][0])
g_func_list.append(
{"func": self.rule_seven_item,
"args": (tasktemp_seller[j][0], category, day, List_te)})
mt.set_thread_func_list(g_func_list)
mt.start()
cur.close()
return List_te
def rule_seven_item(self, seller, category, day, List_te):
cur = connection.cursor()
sql = rule_six_seven(seller, category, current_time, day)
cur.execute(sql)
result = cur.fetchall()
for re in result:
a = dict(SellerNick=seller, Fee=re[2])
List_te.append(a)
# ======================= 规则-9 =======================
@rule_action()
def rule9(self, threshold):
"""
规则十四(第17行):连续30天(n天)以上未销售的商家名单
:param day: 考察的销售时间区间
:param new_seller_day:最近多少天新加入的商家
:return:
"""
cur = connection.cursor()
List_te = []
sql = rule_nine_ten()
cur.execute(sql)
result = cur.fetchall()
for i in result:
if i[2] >= threshold:
tag = '转化率高的商家产品'
a = dict(SellerNick=i[0], ItemNum=i[1], Percent=i[2], Tag=tag, Sell_total=i[3], Pv=i[4])
List_te.append(a)
cur.close()
return List_te
# ======================= 规则-10 =======================
@rule_action()
def rule10(self, threshold):
"""
规则十四(第17行):连续30天(n天)以上未销售的商家名单
:param day: 考察的销售时间区间
:param new_seller_day:最近多少天新加入的商家
:return:
"""
cur = connection.cursor()
List_te = []
sql = rule_nine_ten()
cur.execute(sql)
result = cur.fetchall()
for i in result:
if i[2] < threshold:
tag = '转化率低的商家产品'
a = dict(SellerNick=i[0], ItemNum=i[1], Percent=i[2], Tag=tag, Sell_total=i[3], Pv=i[4])
List_te.append(a)
cur.close()
return List_te
# ======================= 规则-11 =======================
@rule_action()
def rule11(self, category='保暖内衣', seller_day=30, item_day=10, threshold=800):
"""
规则十一(第14行):对新进商家,上架产品后10天(n天)以上未实现销量的商家名单
:param seller_day: (默认值为30天) 规定的最近多少天新进的商家
:param item_day: (默认值为10天) 需要分析de商家上架之后的天数
:param threshold: (默认值为800) 销量的限额(默认是以销售额作为评判基准)
:return:
"""
cur = connection.cursor()
List_te = []
# threshold = 20
sql = rule_eleven_seller(seller_day, current_time) # 获取最近30(n)天新招的商家
cur.execute(sql)
result_seller = cur.fetchall()
for i in range(0, len(result_seller), 5):
tasktemp_seller = result_seller[i:i + 5]
mt = MyThread()
g_func_list = []
for j in range(0, len(tasktemp_seller)):
g_func_list.append(
{"func": self.rule_eleven_item,
"args": (tasktemp_seller[j][0], item_day, threshold, category, List_te)})
mt.set_thread_func_list(g_func_list)
mt.start()
cur.close()
return List_te
def rule_eleven_item(self, sellernick, item_day, threshold, category, List_te):
cur = connection.cursor()
sql = rule_eleven_item(sellernick)
count = cur.execute(sql)
result_itemnum = cur.fetchall()
if count > 0:
for i in range(0, len(result_itemnum), 5):
tasktemp_item = result_itemnum[i:i + 5]
mt = MyThread()
g_func_list = []
for j in range(0, len(tasktemp_item)):
g_func_list.append(
{"func": self.rule_eleven_find,
"args": (sellernick, tasktemp_item[j][0], item_day, threshold, category, List_te)})
mt.set_thread_func_list(g_func_list)
mt.start()
else: # 没有任何上架产品的商家直接提取
tag = '商家没有任何上架%s' % category
a = dict(SellerNick=sellernick, ItemNum='NULL', Fee=0, Tag=tag)
List_te.append(a)
cur.close()
def rule_eleven_find(self, sellernick, itemnum, item_day, throshold, category, List_te):
cur = connection.cursor()
sql = rule_eleven_find_MaxTime(sellernick, itemnum) # 获取该商家上架该商品的最新时间
cur.execute(sql)
max_time = cur.fetchall()
MaxTime = max_time[0][0]
self.rule_eleven_analyse(sellernick, itemnum, item_day, MaxTime, throshold, category, List_te)
cur.close()
def rule_eleven_analyse(self, sellernick, itemnum, item_day, MaxTime, throshold, category, List_te):
cur = connection.cursor()
sql = rule_eleven_find(sellernick, itemnum, item_day, str(MaxTime))
count = cur.execute(sql)
result = cur.fetchall()
if count > 0:
for re in result:
if re[1] < throshold:
tag = '商家的上架了%s,有销售记录,但没有达到预定的销量' % category
a = dict(SellerNick=sellernick, ItemNum=itemnum, Fee=re[1], Tag=tag)
List_te.append(a)
else: # 没有销售记录的直接提取
tag = '商家的上架了%s, 但没有任何销售记录' % category
a = dict(SellerNick=sellernick, ItemNum=itemnum, Fee=0, Tag=tag)
List_te.append(a)
cur.close()
# ======================= 规则-13 =======================
@rule_action()
def rule13(self, seller_day=60, day=7):
"""
规则13(第16行):招商状态成功后,连续7天(n天)以上未上架的商家名单
:param seller_day: 获取新进商家的时间范围
:param day: 设定的连续n天的时间范围
:return:
"""
cur = connection.cursor()
List_te = []
sql_none = rule_thirteen_none_seller(seller_day, current_time) # 获取最近60(n)天新招的没有上架的商家
cur.execute(sql_none)
result_none_seller = cur.fetchall()
for res in result_none_seller:
a = dict(SellerNick=res[0], Tag='新招商家没有铺货')
List_te.append(a)
sql_exist = rule_thirteen_exist_seller(seller_day, current_time)
cur.execute(sql_exist)
result_exist_seller = cur.fetchall()
for i in range(0, len(result_exist_seller), 5):
tasktemp_seller = result_exist_seller[i:i + 5]
mt = MyThread()
g_func_list = []
for j in range(0, len(tasktemp_seller)):
g_func_list.append(
{"func": self.rule_thirteen_create_time,
"args": (tasktemp_seller[j][0], day, List_te)})
mt.set_thread_func_list(g_func_list)
mt.start()
cur.close()
return List_te
def rule_thirteen_create_time(self, sellernick, day, List_te):
cur = connection.cursor()
sql = rule_thirteen_create_time(sellernick) # 获取特定商家的所有铺货时间
count = cur.execute(sql)
result_create_time = cur.fetchall()
if count == 1:
if (datetime.datetime.strptime(current_time, "%Y-%m-%d").date() - result_create_time[0][0]).days > day:
tag = '只有一次铺货,并且距离当前时间较长(大于给定值%s天)' % day
a = dict(SellerNick=sellernick, Tag=tag)
List_te.append(a)
else:
for j in range(0, len(result_create_time)):
for k in range(j + 1, len(result_create_time)):
if (result_create_time[k][0] - result_create_time[j][0]).days > day:
tag = '两次铺货的时间距离较长(大于给定值%s天)' % day
a = dict(SellerNick=sellernick, Tag=tag)
List_te.append(a)
return # 直接退出
# ======================= 规则-14 =======================
@rule_action()
def rule14(self, day=30, new_seller_day=365 * 1):
"""
规则十四(第17行):连续30天(n天)以上未销售的商家名单
:param day: 考察的销售时间区间
:param new_seller_day:最近多少天新加入的商家
:return:
"""
cur = connection.cursor()
List_te = []
sql = rule_fourteen(day, new_seller_day, current_time)
cur.execute(sql)
result = cur.fetchall()
for i in result:
tag = '连续%s天以上没有实现任何销售' % day
a = dict(SellerNick=i[0], Tag=tag)
List_te.append(a)
cur.close()
return List_te |
# 1. web server under construction
# 2. other than get is invalid request response with error code
# 3. /index.html, match contents.
# 4. list of directory with files and folders. (html template for printing)
# 5. Handling threads
# 1. /scripts/file.py, check for the output
# 2. /file.py, other than scripts folder
# 3. /scripts/file.py, checking with the parameters in the uri
# 4. /scripts/file.py, infinite loop checking
# 5. Handling threads
# -*- coding: utf-8 -*-
"""Test_webserver.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1--N3SeVNrh6fSAK77MMzrbz-m-MQu6V3
@author Praveen Garimella
@author Laxmi Narayana Murthy
@author Deepak Kumar Reddy
@author Vipul
@author Siva Sankar
"""
# import statements here.
import requests
# driver to run all testcases
def Testcase(r, expstatuscode, expcontenttype, exptext, count):
print("-----TestCase-" , count, "-----")
if r.status_code != expstatuscode:
print (r.status_code, " :: " , expstatuscode)
print("Wrong status code")
return False
if r.headers['Content-Type'] != expcontenttype:
print("Wrong content-type")
return False
if r.text != exptext:
print(r.text, ": :", exptext)
print("Wrong content")
return False
return True
# Test case
# give your local host and port number here.
# Testcase 1: Checking whether your server is handling requests or not.
# if it handle requests, the default response should be
# <h1>Webserver Under construction</h1>
# This response should only be sent when the variable
#enable_directory_browsing is set to False
r = requests.get('http://127.0.0.1:8888')
print()
# print(r.headers)
# print(r.status_code)
# print(r.text)
if Testcase(r, 200, "text/html", "<h1>Webserver Under construction</h1>",1):
print("Testcase 1 meets the given specificatoin")
else:
print("Testcase 1 failed")
|
import matplotlib.pyplot as plt
import numpy as np
def plot_surface(clf,X,y,h=.02):
fig = plt.figure(figsize=[6,6])
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 0]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z,cmap='RdGy',alpha=.5)
plt.scatter(X[:, 0], X[:, 1], c=y, s=2, cmap='RdGy')
|
import codecs
import datetime
import mimetypes
import re
import os
import json
from typing import Dict, Any, List, Union, Optional, Tuple, Type
from django.shortcuts import redirect, render
from .exceptions import ParamNotFoundError, ValueOutOfRangeError, RequestValidationError, \
InvalidParamFormatError, ObjectNotFoundError, AuthNeedError
try:
import khayyam
except ImportError:
khayyam = None
from datetime import date
from uuid import uuid4
import unicodedata
from django.core import signing
from django.contrib.auth.models import User
from django.db.models import QuerySet, Model
from django.http import HttpResponse, QueryDict, JsonResponse, HttpResponseRedirect
from django.utils.deprecation import MiddlewareMixin
from django.utils.translation import gettext as _
from django.views import View
class FileUploadResult(object):
"""
Contains information about uploaded files.
"""
def get_user(self) -> str:
"""
Username who uploaded file
:return: username of the user
:rtype: str
"""
return self._user
def get_upload_date(self) -> str:
"""
Upload date of file
:return:
"""
return self._upload_date
def get_path(self):
return self._upload_path
def get_file_name(self):
return self._upload_file
def get_file_size(self):
return self._file_size
user_id = property(get_user)
upload_date = property(get_upload_date)
upload_path = property(get_path)
file_name = property(get_file_name)
file_size = property(get_file_size)
def __init__(self, user, upload_date, upload_path, file_name, file_sz):
self._user = user
self._upload_file = file_name
self._upload_path = upload_path
self._upload_date = upload_date
self._file_size = file_sz
class SARequest(View):
"""
Class to handle generic requests
"""
def get_store(self) -> QueryDict:
"""
Get params from request.
:return: A dictionary of parameters passed by user
:rtype: QueryDict
"""
if self.request.method == 'GET':
return self.request.GET
elif self.request.method == 'POST':
return self.request.POST
return QueryDict()
# Class Properties
store = property(get_store)
body = property(lambda self: self.request.body)
user = property(lambda self: self.request.user)
staff = property(lambda self: self.user.is_staff)
superuser = property(lambda self: self.user.is_superuser)
logged_in = property(lambda self: self.user.is_authenticated)
@staticmethod
def _raise_invalid_param_error(name: str, raise_error: bool, default: Any) -> Any:
"""
Shortcut to raise error or return default value
:param name: name of the parameter to fill in exception
:param raise_error: Raise error
:param default: Default value to return
:return: depends on the type of default value
:rtype: Any
:raise: ParamNotFoundException
"""
if raise_error:
raise ParamNotFoundError(_("Parameter is not valid"), name)
return default
@staticmethod
def _raise_min_max_error(name: str, raise_error: bool, min_value: Any, max_value: Any, default: Any) -> Any:
"""
Shortcut to raise min and max for value error
:param name: Name of the parameter
:param raise_error: raise error
:param default: Default value when raise error is False
:param min_value: Min value
:param max_value: Max value
:return: default_value if raise_error is set to False
:rtype: Any
"""
if raise_error:
raise ValueOutOfRangeError(name, min_value, max_value)
return default
@staticmethod
def _raise_object_not_found(name: str, raise_error: bool, default_value: Any) -> Any:
"""
Shortcut to raise ObjectNotFoundError
:param name: param name
:param raise_error: raise error or not?
:param default_value: default value to return if raise error is False
:return: Any
:rtype: Any
"""
if raise_error:
raise ObjectNotFoundError(name)
return default_value
@staticmethod
def _raise_format_error(name: str, valid_example: str, raise_error: bool, default: Any) -> Any:
"""
Shortcut to raise format error
:param name: Param name
:param valid_example: A valid example of data to view to user
:param raise_error: raiser error
:param default: default value to return if raise error is False
:return: Any depends on default value
:rtype: Any
"""
if raise_error:
raise InvalidParamFormatError(name, valid_example)
return default
def is_ajax(self):
return self.request.META.get("HTTP_X_REQUESTED_WITH", "") == "XMLHttpRequest"
def response_render(self, template_path: str, context: Optional[Dict] = None) -> HttpResponse:
"""
Return rendered HTML
:param template_path: path of html
:param context: context data
:return: Rendered HTML
:rtype: HttpResponse
"""
return render(self.request, template_path, context=context)
def get_int(self, name: str,
raise_error: bool = False,
default: int = 0,
min_value: int = None,
max_value: int = None
) -> int:
"""
Get "name" as int from request
:param name: Name of the parameter.
:param raise_error: Raises error if name not found in request.
:param default: default value if raise error is set to False to return
:param min_value: Min acceptable value
:param max_value: Max acceptable value
:return: requested value converted to int
:rtype: int
:raise: ParamNotFoundError
"""
data = self.store.get(name)
if data is None:
return self._raise_invalid_param_error(name, raise_error, default)
data = data.replace(",", "")
rx = re.search(r'(-)?\d+', data)
if rx is None:
return self._raise_invalid_param_error(name, raise_error, default)
res = int(rx.group(0))
# Now let's check for min and max values
if min_value is not None:
if res < min_value:
return self._raise_min_max_error(name, raise_error, min_value, max_value, default)
# Checking Max value
if max_value is not None:
if res > max_value:
return self._raise_min_max_error(name, raise_error, min_value, max_value, default)
# Let's end this game
return res
def get_string(self, name: str,
raise_error: bool = False,
default: str = '',
min_len: int = 0,
max_len: int = 0) -> str:
"""
Get "name" from request as str
:param name: Parameter name to retrieve
:param raise_error: raise error if parameter was not found in request
:param default: default value to return if parameter was not found
:param min_len: Min len of str to accept
:param max_len: Max len of str to accept
:return: str parameter value
:rtype: str
:raise: ParamNotFoundError, ValueOutOfRangeError
"""
res = self.store.get(name, "")
if res == "":
return self._raise_invalid_param_error(name, raise_error, default)
if min_len:
if len(res) < min_len:
return self._raise_min_max_error(name, raise_error, min_len, max_len, default)
if max_len:
if len(res) > max_len:
return self._raise_min_max_error(name, raise_error, min_len, max_len, default)
return res
@staticmethod
def _get_paging_(list_len: int, start_point: int, per_page: int = 5) -> Dict:
"""
Arithmetic for pagination
:param list_len: Length of the list to paginate
:param start_point: Start point to retrieve
:param per_page: Results per page
:return: A Dictionary contains parameters needed by paginate()
"""
if per_page:
block_count = int(per_page)
if block_count == 0:
block_count = 10
else:
block_count = 10
if list_len == 0:
sp = 0
nx = 0
p = 1
pc = 1
bp = -1
elif list_len <= block_count:
sp = 0
nx = list_len
p = 1
pc = 1
bp = -1
elif (start_point + block_count) < list_len:
if start_point == 0:
sp = 0
elif start_point == block_count:
sp = block_count
elif (start_point % block_count) == 0:
sp = start_point
else:
sp = block_count + int(start_point / block_count) + 1
nx = sp + block_count
bp = start_point - block_count
if bp < 0:
bp = 0
pc = int(list_len / block_count) + 1
if (list_len % block_count) == 0:
pc -= 1
p = int((start_point + block_count) / block_count)
else:
sp = start_point
bp = start_point - block_count
if (list_len % block_count) == 0:
nx = start_point + block_count
else:
nx = start_point + (list_len % block_count)
p = int((start_point + block_count) / block_count)
pc = int(list_len / block_count) + 1
if (list_len % block_count) == 0:
pc -= 1
return {'start': sp, 'back': bp, 'page': p, 'page_count': pc, 'end': nx}
def paginate(self, query_set: Union[List, QuerySet],
data_name: str,
extra: Dict = None,
default_per_page: int = 10):
"""
Paginate query set or list and returns it to use in a template. Queryset or data key name in dictionary is
the same data_name provided. Per Page parameter is read from "pp" querystring. Current Page is also
read from "cp" querystring.
Result contains these keys:
data_name: Contains paginated data
next: Next number to pass to "cp" parameter to view next page.
back: Back number to pass to "cp" parameter in querystring to view previous page
current_page: Current page number
pages: Total number of pages
last_page: Number of last page to pass to "cp" parameter to view
request: request object to build URL
is_last_page: True if reached last page
is_first_page: True if cp is on the first page
total_result: Number of records to view
next_pages: List of pages to view for the next pages. Each item must pass to cp param to view the page.
back_pages: List of pages to view for previous pages. Each item must pass to cp param to view the pate.
:param query_set: Queryset or list to paginate
:param data_name: Key name of data in result dict
:param extra: Extra data to include in result
:param default_per_page: per page result if not pp parameter is not defined
:return: Dict contains the result of paginate
:rtype: Dict
"""
if extra is None:
extra = {}
if query_set is None:
return {}
per_page = self.get_int('pp', False, default=default_per_page)
current_page = self.get_int('cp', False, 0)
if isinstance(query_set, QuerySet):
query_len = query_set.count()
else:
query_len = len(query_set)
if current_page:
paging = self._get_paging_(query_len, int(current_page), int(per_page))
else:
paging = self._get_paging_(query_len, 0, int(per_page))
res = query_set[paging['start']: paging['end']]
next_link = paging['end']
back_link = paging['back']
current_page = paging['page']
page_count = paging['page_count']
last_page = (page_count - 1) * int(per_page)
is_last_page = current_page == page_count
is_first_page = current_page == 1
# Calculate next and previous pages.
links_to_show = 5
if page_count <= 5:
links_to_show = 10
next_pages = {a: (a*current_page, (a-1) * per_page) for a in range(current_page, current_page + links_to_show) if 0 < a <= page_count}
back_pages = {a-1: (a-1, (a - 2) * per_page) for a in range(current_page, current_page - links_to_show, -1) if 1 < a <= page_count}
back_pages = {k: back_pages[k] for k in sorted(back_pages.keys())}
rx = {data_name: res,
'next': next_link,
'back': back_link,
'current_page': current_page,
'pages': page_count,
'per_page': int(per_page),
'last_page': last_page,
'request': self.request,
'is_last_page': is_last_page,
'is_first_page': is_first_page,
'total_result': query_len,
'next_pages': next_pages,
'back_pages': back_pages
}
rx.update(extra)
return rx
def get_decrypted_list(self, name: str,
raise_error: bool = False,
default: List = None) -> List:
"""
Process request and find objects by name, decrypt and return in a list
:param name: name of the collection to read
:param raise_error: raise error if parameter not found
:param default: default list to return if raise error is False
:return: A list of decrypted data
:rtype: List
"""
x = self.store.getlist(name)
if not x:
return self._raise_invalid_param_error(name, raise_error, default)
res = []
for a in x:
z = signing.dumps(a)
if z:
res.append(z)
if len(res) < 1:
return self._raise_invalid_param_error(name, raise_error, default)
return res
def get_int_list(self, name: str, raise_error: bool = False, default: List = ()) -> List:
"""
Process the request and get a list of int
:param name: The name of the parameter to check
:param raise_error: False by default
:param default: default return
:return: list of ints
:rtype: List
"""
if default is None:
default = []
i_list = self.store.getlist(name)
if not i_list:
return self._raise_invalid_param_error(name, raise_error, default)
res = []
for a in i_list:
z = re.findall(r'\d+', a)
if not z:
continue
res.append(int(z[0]))
return res
def get_file_size(self, name: str,
raise_error: bool = False, default: float = 0) -> float:
"""
Convert user input data to file size. e.g. User enters : 1024 MB, you will receive : 1024 * 1024 bytes
:param name: name of the parameter
:param raise_error: raise error if parameter not found
:param default: Default value if raise_error is False
:return: Size that user entered in bytes
:rtype: int
"""
user_data = self.store.get(name, "")
if not user_data:
return self._raise_invalid_param_error(name, raise_error, default)
x = re.match(r'(?P<size>\d+(\.\d+)?)(?P<space>\s)*(?P<types>[MmKkGgTtPp]?)(?P<x>[Bb])?',
user_data)
if x is None:
return self._raise_format_error(name, "2M 3G", raise_error, default)
try:
res = x.groups()
if len(res) < 4:
return self._raise_format_error(name, "2M 3G", raise_error, default)
size, points, space, mode, extra = res[0], res[1], res[2], res[3], res[4]
mods = ['', 'k', 'm', 'g', 't', 'p']
real_size = float(size)
if mode == '':
return real_size
for m in mods:
if m.lower() == mode.lower():
break
real_size *= 1024
return real_size
except ValueError:
return self._raise_format_error(name, "2M 3G", raise_error, default)
def get_regex(self,
pattern: str,
default_value: Optional[List[str]] = (),
raise_error: Optional[bool] = False
) -> List[str]:
"""
Find parameters by a regex and returns a list of matched data.
It's useful when you want read a list of parameters with a specific name pattern.
:param pattern: regex pattern to match parameter names
:param default_value: default value if noting found
:param raise_error: raise error if not result found
:return: a list of patched parameters with their values
:rtype: List[str]
"""
rx = re.compile(pattern, re.IGNORECASE)
match_keys = []
for k in self.store.keys():
if rx.search(k):
match_keys.append(k)
if not match_keys:
return self._raise_invalid_param_error("", raise_error, default_value)
return match_keys
def get_float(self, name: str,
raise_error: bool = False,
default: float = 0.0) -> float:
"""
Get "name" as float
:param name: name of the param
:param raise_error: raise error if param not found
:param default: default value if raise error is False
:return: float value of "name" parameter
:rtype: float
"""
data = self.store.get(name, None)
if data is None:
return self._raise_invalid_param_error(name, raise_error, default)
data = data.replace(",", "")
rx = re.findall(r'(\d+)(\.?\d+)?', data)
if len(rx) > 0:
return float("".join([a[0] + a[1] for a in rx]))
return self._raise_invalid_param_error(name, raise_error, default)
def get_date(self, name: str,
date_format: str = '%Y/%m/%d %H:%M',
raise_error: bool = False,
default: datetime.datetime = None) -> datetime.datetime:
"""
Get date with format
:param name: name of parameter
:param date_format: date format to convert
:param raise_error: raise error if param not found
:param default: default date to return if raise error is False
:return: datetime object
:rtype: datetime.datetime
"""
if not date_format:
raise ValueError(("Parameter date_format is empty!",))
data = self.store.get(name, None)
if data is None:
return self._raise_invalid_param_error(name, raise_error, default)
try:
res = datetime.datetime.strptime(data, date_format)
return res
except ValueError:
return self._raise_format_error(name, date_format, raise_error, default)
def get_from_persian_date(self,
name: str,
date_format: str = '%Y/%m/%d %H:%M',
raise_error: bool = False,
default: khayyam.JalaliDatetime = None) -> datetime:
"""
Converts inout to a persian datetime. Khayyam library is needed for this conversion.
pip install khayyam
:param name: Param name to convert
:param date_format: datetime format of input
:param raise_error: raise error if input was not found
:param default: default value to return if raise_error is False
:return: khayyam datetime object
:rtype: khayyam.datetime
"""
if not khayyam:
raise ImportError(("khayyam is not installed. Please install it first by pip install khayyam",))
if not date_format:
raise ValueError(("date_format is not valid",))
data = self.store.get(name, None)
if not data:
return self._raise_invalid_param_error(name, raise_error, default)
try:
return khayyam.JalaliDatetime.strptime(data, date_format)
except ValueError:
raise self._raise_format_error(name, date_format, raise_error, default)
def get_email(self, name: str,
raise_error: bool = False, default: str = None) -> str:
"""
Get email from user input
:param name: Parameter name to read
:param raise_error: raise error if param not found
:param default: Default value to return if raise error is False
:return: Email address
:rtype: str
"""
data = self.store.get(name, None)
if not data:
return self._raise_invalid_param_error(name, raise_error, default)
res = re.findall(r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-z]{2,4}', data)
if len(res) > 0:
return res[0]
return self._raise_format_error(name, "user@domain.com", raise_error, default)
def get_target_user(self,
name: str = 'u',
raise_error: bool = False,
default: str = None
) -> User:
"""
Get user object from request. This can be passed by a parameter in query string or post.
:param name: name of parameter. Default is "u". Note "u" is int
:param raise_error: Raise error if user not found
:param default: Default user object to return if raise error is False
:return: User object
:rtype: User
"""
target_id = self.get_int(name, raise_error)
user = User.objects.filter(pk=target_id).first()
if not user:
return self._raise_invalid_param_error(name, raise_error, default)
# Security Check
# If current logged-in user is:
# 1- Superuser: then no validation is required.
# 2- Staff: Staff user can access lower users but not top users
# 3- None of them:
#
# note: Superuser is has staff flag too.
if self.request.user.is_staff:
if self.request.user.is_superuser:
return user
elif user.is_superuser:
return self._raise_object_not_found(name, raise_error, default)
# user is staff or a normal user
return user
# User is not staff and not superuser, so it must be a normal user. So he can access same level user
# At this point permission validation is at your own or use validate_request to check user can access
# other objects or not!
elif user.is_staff:
return self._raise_object_not_found(name, raise_error, default)
return user
def get_decrypted_value(self,
name: str,
raise_error: bool = False,
default: Any = None) -> Any:
"""
Get the value of an encrypted data sent by user. This encrypted value must generated by django.signing.dumps
This value is an int or str, but not big objects or queryset. For queryset it's better to use
get_decrypted_object
:param name: Param name
:param raise_error: Raise error if parameter not found
:param default: default unencrypted value
:return: Any data
:rtype: Any
"""
encrypted_data = self.store.get(name, None)
if encrypted_data is None:
return self._raise_invalid_param_error(name, raise_error, default)
try:
res = signing.loads(encrypted_data)
return res
except Exception:
return self._raise_invalid_param_error(name, raise_error, default)
def get_decrypted_object(self,
value: Dict,
model_class: QuerySet,
raise_error: bool = False,
default: Any = None
) -> Optional[Model]:
"""
Data to decrypt and recover object from Database. You can encrypt PK of the row, then pass it these function
with it's model class to recover the data from DB. If data is not exists, then an exception will raise.
e.g. value={"pk": "abc"}, base_class=User. This will decrypts value first: "abc". Then
calls base_class filter to find the object:
User.objects.filter(**{"pk": decrypted("abc")})
:param value: A dictionary contains a key and a value, { database_filed_name: encrypted_value }
:param model_class: Model class to read data from
:param raise_error: Raise error if data not found, or encryption is not valid
:param default: default value if raise error was set to False
:return: An object inherited from Model
:rtype: Model
"""
key, vl = value.popitem()
try:
decrypted_value = signing.loads(vl)
except Exception:
return self._raise_object_not_found(key, raise_error, default)
if decrypted_value is None:
return None
res = model_class.objects.filter(**{key: vl}).first()
if not res:
return self._raise_object_not_found(key, raise_error, default)
return res
def decrypt_from_request(self,
key_name: str,
name: str,
model_class: QuerySet,
raise_error: bool = False,
default: Model = None
) -> Optional[Model]:
"""
Recover DB data from request.
e.g. pk of User table is encrypted and is sent by user with "user_id" name. So that would be:
decrypt_from_request("pk", "user_id", User)
:param key_name: Model field name to match
:param name: item name in QueryDict in GET or POST method
:param model_class: Model class to find data from
:param raise_error: Raise error if data not found
:param default: Default value to return if raise error is False
:return: an object inherited from Model
"""
x = self.get_string(name, raise_error)
if not x:
return default
res = self.get_decrypted_object({key_name: x}, model_class)
return res
def json(self) -> Dict:
"""
Convert body of request into json. If method is GET or body is not set, then an empty dict will be the
result
:return: Returns body of request as json.
:rtype: Dict
"""
if self.request.method == 'GET' or len(self.request.POST) > 0 or not self.request.body:
return {}
try:
res = json.loads(self.request.body)
return res
except Exception:
return {}
def handle_upload(self,
base_path: str,
add_date: bool = False,
add_user: bool = False,
random_name: bool = False
) -> List[FileUploadResult]:
"""
Handles sent file by user and saves it into base_path. You can also add date, username and
choose if save file with it's original name or a random name.
:param base_path: base folder path to store data. This path MUST exists.
:param add_date: If set to True, then a folder with name of today will created.
:param add_user: If set to True, then a folder with username of file uploader will created.
:param random_name: If set to True, the file name will be set to a random name.
:return: A list of uploaded files with their properties.
:rtype: List[FileUploadResult]
"""
# Creating base folders
if not os.path.exists(base_path):
os.mkdir(base_path)
today = str(date.today())
if add_date:
today_path = os.path.join(base_path, today)
if not os.path.exists(today_path):
os.mkdir(today_path)
else:
today_path = base_path
if add_user:
if not self.request.user.is_authenticated:
# if user is not authenticated, then use ALL as the name
user_path = os.path.join(today_path, 'ALL')
else:
user_path = os.path.join(today_path, str(self.request.user.pk))
if not os.path.exists(user_path):
os.mkdir(user_path)
else:
user_path = base_path
res = []
# Start to write files
for f in self.request.FILES:
try:
n = str(self.request.FILES.get(f))
if random_name:
t = os.path.join(user_path, str(uuid4()))
else:
t = os.path.join(user_path, n)
a = codecs.open(t, 'w+b')
for d in self.request.FILES[f]:
a.write(d)
a.close()
file_size = os.stat(t).st_size
uid = 0
if self.request.user.is_authenticated:
uid = self.request.user.pk
res.append(FileUploadResult(uid, today, t, n, file_size))
except Exception:
continue
return res
def resolve_ip_address(self) -> str:
"""
Resolves requester ip address
:return: User ip address
"""
# Check if user is using a proxy
x_forwarded_for = self.request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = self.request.META.get('REMOTE_ADDR')
return ip
ip_address = property(resolve_ip_address)
@staticmethod
def slugify(value: str):
"""
Slugify data from request
:param value: Value to slugify. e.g. site title
:return: Slugify value
:rtype: str
"""
value = unicodedata.normalize('NFKC', value)
value = re.sub(r'[^\w\s-]', '', value.lower()).strip()
return re.sub(r'[\s]+', '-', value)
def respond_as_attachment(self,
file_path: str,
original_filename: str
) -> HttpResponse:
"""
Response file to user. This method is good for small chunks of file.
:param file_path: File path to read
:param original_filename: Original name of the file
:return: HttpResponse
:rtype: HttpResponse
"""
if original_filename is None:
original_filename = 'unknown_file'
fp = open(str(file_path), 'rb')
response = HttpResponse(fp.read())
fp.close()
f_type, encoding = mimetypes.guess_type(original_filename)
if f_type is None:
f_type = 'application/octet-stream'
response['Content-Type'] = f_type
response['Content-Length'] = str(os.stat(file_path.encode('utf-8')).st_size)
if encoding is not None:
response['Content-Encoding'] = encoding
# To inspect details for the below code, see http://greenbytes.de/tech/tc2231/
if u'WebKit' in self.request.META['HTTP_USER_AGENT']:
# Safari 3.0 and Chrome 2.0 accepts UTF-8 encoded string directly.
filename_header = 'filename=%s' % original_filename
elif u'MSIE' in self.request.META['HTTP_USER_AGENT']:
# IE does not support internationalized filename at all.
# It can only recognize internationalized URL, so we do the trick via routing rules.
filename_header = ''
else:
# For others like Firefox, we follow RFC2231 (encoding extension in HTTP headers).
filename_header = 'filename*=UTF-8\'\'%s' % original_filename
response['Content-Disposition'] = 'attachment; ' + filename_header
return response
def validate_request(self,
methods: Tuple = ('get', 'post'),
check_referer: bool = False,
auth: bool = False,
superuser: bool = False,
staff: bool = False,
perm: str = ''
) -> None:
"""
Validate request against referer, auth, and permissions.
E.g. validate_request(staff=True, perm="user.add_user|user.change_user")
:param methods: Acceptable methods. it's not usable for class base views.
:param check_referer: Check if user entered the address directly or with a link.
:param auth: Check if the user is authenticated or not. if not AuthenticationNeededError will raise
:param superuser: Check if the user is superuser or not. If not RequestValidationError will raise
:param staff: Check if the user is staff or not. if not RequestValidationError will raise
:param perm: Checks for a specified permission. Multi perm can be separated with a pip ( | )
:return: None
"""
request = self.request
if request.method.lower() not in methods:
raise RequestValidationError(_('Invalid request Method'), ())
if check_referer:
rx = request.META.get('HTTP_REFERER')
if not rx:
raise RequestValidationError(_('Direct Call is not permitted'), ())
if request.build_absolute_uri('/') not in rx:
raise RequestValidationError(_('You can not bypass site structure'), ())
if auth:
if not request.user.is_authenticated:
raise AuthNeedError(request)
if superuser:
if not request.user.is_superuser:
raise AuthNeedError(request)
if staff:
if not request.user.is_staff:
raise AuthNeedError(request)
# Check for permissions
if perm:
perms = perm.split('|')
is_granter = False
for p in perms:
if request.user.has_perm(p):
is_granter = True
break
if not is_granter:
raise AuthNeedError(request)
@staticmethod
def response_success(response: Dict = None) -> JsonResponse:
"""
Send response to client with status code 200(OK)
:param response: Response to send to convert to json string.
:return: JsonResponse
:rtype: JsonResponse
"""
if response is None:
response = {}
return JsonResponse(response, status=200)
@staticmethod
def response_not_found(error_message: str, param_name: str = "") -> JsonResponse:
"""
Send error_message to user as a json response with status code 404(not found)
:param error_message: Error message to show to user
:param param_name: parameter name that was not exist
:return: JsonResponse
:rtype: JsonResponse
"""
return JsonResponse({"message": error_message, "param": param_name}, status=404)
@staticmethod
def response_error(error_message: str, is_json: bool = True, status_code: int = 500, param_name: str = "") -> \
Union[JsonResponse, HttpResponse]:
"""
Send response to client with an error. This error can be e.g. a json or a text.
:param error_message: Error message to send to client
:param is_json: True if you want to response with a json
:param status_code: Custom status code on error
:param param_name: Parameter name if error happened on a parameter. Note: This will append to output
when is_json is set to True
:return: JsonResponse or HttpResponse
:rtype: Union[JsonResponse, HttpResponse]
"""
if is_json:
return JsonResponse({"message": error_message, "param": param_name}, status=500)
return HttpResponse(error_message, status=status_code)
@staticmethod
def response_redirect(address: str) -> Union[HttpResponseRedirect]:
"""
Send redirect to user
:param address: Address to redirect. NOTE: address is not view name!
:return: HttpResponse
:rtype: HttpResponseRedirect
"""
return redirect(address)
def has_value(self, name: str) -> bool:
"""
Check if parameter has a value or not
:param name: name to check
:return: True if any value entered for param
:rtype: bool
"""
return len(self.get_string(name, default="")) > 0
class SADeleteRequest(SARequest):
"""
Delete request
"""
def auth(self, object_to_delete: Type) -> bool:
"""
Authenticate user
:param object_to_delete: Object that is going to delete
:return: True if current user can delete object
:rtype: bool
"""
raise NotImplementedError()
@staticmethod
def get_config(class_object: Type, key: str, encrypted: bool, field: str):
return {"class": class_object, "key": key, "encrypted": encrypted, "field": field}
def config(self) -> Dict:
"""
Gets a Dict contains configuration of class:
class: Object to read data from
key: The key name to read from post
encrypted -> bool: If key value is encrypted by django signing. default is False
field -> str: model filed to check key against. For example: name__iexact
e.g. {"class": User, "key": "pk", "encrypted": True}
:return: Dict
:rtype: Dict
"""
raise NotImplementedError()
def deleted_success(self, deleted_item: Type) -> None:
"""
When object deleted successfully then this method will called.
:param deleted_item: deleted object
:return: Noting returns
"""
pass
def post(self, request) -> HttpResponse:
"""
Request to delete object
:param request: HttpRequest
:return: HttpResponse
:rtype: HttpResponse
"""
config = self.config()
# Check and validate config
if not config:
raise ValueError("Config is not correct")
if "class" not in config:
raise ValueError("Database model object not defined")
if "key" not in config:
raise ValueError("Key name is not defined")
if "encrypted" not in config:
config["encrypted"] = False
if config["encrypted"]:
item = self.get_decrypted_value(config["key"])
else:
item = self.get_string(config["key"])
if "field" not in config:
raise ValueError("Field name is not defined")
if not item:
return self.response_error("Item not found to delete")
# Try to retrieve object
object_to_delete = config["class"].objects.filter(**{config["field"]: item}).first()
if not object_to_delete:
return self.response_error("Object not found", status_code=404)
try:
if not self.auth(object_to_delete):
return self.response_error("Permission Denied", status_code=403)
object_to_delete.delete()
self.deleted_success(object_to_delete)
except Exception:
return self.response_error("Failed to delete item(s)")
return self.response_success()
class RequestParamValidator(MiddlewareMixin, SARequest):
def process_request(self, request):
self.request = request
request.SAR = self
|
#!/usr/bin/python3
"""main.py: Read a file and create linked lists of word size containing no repeating words
"""
import string
from linked_list import LinkedList
from node import Node
def main():
"""main: Opens up a text file and creates linked lists of words
"""
with open('paragraph.txt') as file:
linked_lists = {}
words = file.read().split()
for word in words:
word = word.translate(str.maketrans('', '', string.punctuation))
word = word.lower()
new_node = Node(word)
if len(word) not in linked_lists:
linked_lists[len(word)] = LinkedList()
linked_lists[len(word)].append(new_node)
else:
if not linked_lists[len(word)].is_in(word):
linked_lists[len(word)].append(new_node)
for i in linked_lists:
linked_lists[i].sort()
print('Words of length {0}: {1}'.format(i, linked_lists[i]))
main()
|
contador1 = 1
contador2 = 1
while contador1 <= 9:
print('-'*12)
while contador2 <= 10:
print('{} x {:2} = {}'.format(contador1, contador2, contador1 * contador2))
contador2 += 1
contador2 = 1
contador1 += 1
|
import os
import os.path
config_file_name = 'config_file.cfg'
PATH = '.svds/config/config_file.cfg'
if ( os.path.isfile(PATH) ):
print("true")
else:
print("false") |
import bcrypt, sys
import pymongo
from pymongo import MongoClient
from pymongo import errors
import gridfs
class Database(object):
def __init__(self):
try:
self.client = MongoClient()
self.movies = self.client.xray.movies
self.shots = self.client.xray.shots
self.actors = self.client.xray.actors
self.fs = gridfs.GridFS(self.client.xray)
except errors.ServerSelectionTimeoutError as err:
print(err)
def __del__(self):
self.client.close()
def reset(self):
self.movies.drop()
self.actors.drop()
self.shots.drop()
# movies
def movies_find(self, query):
return self.movies.find(query)
def movies_findone(self, query):
return self.movies.find(query)
def movies_findall(self):
return self.movies.find({})
def movies_insertone(self, movie):
return self.movies.insert_one(movie)
def movies_insertone_unique(self, movie):
if self.movies_findone({
'movie_name': movie['movie_name'],
'video_path': movie['video_path'],
}).count() > 0:
print('movie with name ' + movie['movie_name'] + ' already exists, not inserting')
return
return self.movies.insert_one(movie)
def movies_createindex(self):
return self.movies.create_index([
('movie_name', pymongo.TEXT),
('overview', pymongo.TEXT),
('genres', pymongo.TEXT),
])
# shots
def shots_find(self, query):
return self.shots.find(query)
def shots_findone(self, query):
return self.shots.find(query)
def shots_findall(self):
return self.shots.find({})
def shots_insertone(self, shot):
return self.shots.insert_one(shot)
def shots_updateone(self, shot_id, update):
return self.shots.replace_one({'_id': shot_id}, update)
def shots_insertone_unique(self, shot):
if self.shots_findone({
'movie_name': shot['movie_name'],
'video_path': shot['video_path'],
}).count() > 0:
print('shot of ' + shot['movie_name'] + ' already exists, not inserting')
return
return self.shots.insert_one(shot)
# actor
def actors_find(self, query):
return self.actors.find(query)
def actors_findone(self, query):
return self.actors.find(query)
def actors_findall(self):
return self.actors.find({})
def actors_insertone(self, actor):
return self.actors.insert_one(actor)
def actors_insertone_unique(self, actor):
if self.actors_findone(actor).count() > 0:
print('actor with name ' + actor['name'] + ' already exists, not inserting')
return
return self.actors.insert_one(actor)
def actors_createindex(self):
return self.actors.create_index([
('name', pymongo.TEXT),
('biography', pymongo.TEXT),
])
# class DatabaseHelper(object):
# def __init__(self):
# try:
# self.client = MongoClient()
# self.db = self.client.xray
# self.sherlock = self.db.sherlock
# self.lotr = self.db.lotr_bilbo_gandalf
# except errors.ServerSelectionTimeoutError as err:
# print(err)
# def retrieve_sherlock(self):
# return self.sherlock.find({})
# def retrieve_lotr(self):
# return self.lotr.find({})
# def retrieve_tasks(self):
# return self.tasks.find({}, {'_id': 0})
# def retrieve_task_with_title(self, title):
# return self.tasks.find_one({'title': title}, {'_id': 0})
# def retrieve_task_with_id(self, id_):
# return self.tasks.find_one({'id': id_}, {'_id': 0})
# def find_and_update_task(self, task):
# id_ = task['id']
# tasks = self.retrieve_tasks()
# try:
# task_to_change = next(task for task in tasks if task['id'] == id_)
# self._update_task(id_, task, task_to_change)
# except:
# raise ValueError("Task was not updated")
# def _update_task(self, id_, task, task_to_change):
# for key, value in task_to_change.items():
# for k, new_value in task.items():
# if key == k and value != new_value:
# self.tasks.update({'id': id_}, {'$set': {key: new_value}})
# def remove_task(self, task):
# id_ = task['id']
# task_to_remove = self.retrieve_task_with_id(id_)
# if task_to_remove == task:
# self.tasks.remove({'id': id_})
# else:
# raise ValueError("Task was not found!")
# def retrieve_users(self):
# return self.users.find({}, {'_id': 0})
# def retrieve_user_by_username(self, username):
# return self.users.find_one({'username': username}, {'_id': 0})
# class TestDB(DatabaseHelper):
# def __init__(self):
# try:
# self.client = MongoClient()
# self.db = self.client.test
# self.tasks = self.db.tasks
# self.users = self.db.users
# except errors.ServerSelectionTimeoutError as err:
# print(err)
# def create_test_users_to_test_db(self):
# self.create_non_existing_user_to_database('mojo', 'python')
# self.create_non_existing_user_to_database('kojo', 'python')
# def remove_test_users_from_db(self):
# self.users.remove({})
# if __name__ == "__main__":
# database = DatabaseHelper()
# sher = database.retrieve_sherlock()
# print(sher) |
"""
smorest_sfs.modules.codes.resource
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
编码的资源模块
"""
from typing import Any, Dict
from flask.views import MethodView
from smorest_sfs.modules.auth import PERMISSIONS
from smorest_sfs.modules.auth.decorators import doc_login_required, permission_required
from . import blp, models, schemas
@blp.route("/options")
class CodeListView(MethodView):
@doc_login_required
@permission_required(PERMISSIONS.CodeQuery)
@blp.arguments(schemas.TypeCodeSchema, location="query", as_kwargs=True)
@blp.response(schemas.CodeListSchema)
def get(self, type_code: str) -> Dict[str, Any]:
# pylint: disable=unused-argument
"""
获取所有编码选项信息
"""
schema = schemas.CodeOptsSchema()
codes = models.Code.get_tree(
models.db.session,
json=True,
json_fields=schema.dump,
query=lambda q: q.filter_by(type_code=type_code),
)
return {"data": codes}
|
#!/usr/bin/env python3
import sys
sys.path.insert(0, '..')
import random
import earthquake.catalog as catalog
import models.model as model
import gaModel.parallelGAModelP_AVR as parallelGAModelP_AVR
import time
import numpy as np
def createRealModelSCwithP_AVR(
year, region, qntYears=5, withMag=True, save=True):
definicao = model.loadModelDefinition(
'../../params/'+region+'.txt')
catalog_ = catalog.readFromFile('../../data/SC-catalog.dat')
catalog_ = catalog.filter(catalog_, definicao)
observation = model.newModel(definicao)
riskMap = catalog.readFromFileP_AVR(
'../../data/P_AVR-MAP_T30-TTL_TTL_TTL_TOTAL_I45_PS.csv')
riskMap = catalog.filterP_AVR(riskMap, definicao)
observation = model.addFromCatalogP_AVR(
observation, catalog_, riskMap, year)
if save == True:
model.saveModelToFile(observation,
'../../Zona3/realSCwithP_AVR/' + region + 'real' "_" + str(year) + '.txt', real=True)
def execParallelGA(year, region, qntYears=5, times=10):
"""
Creates the GAModel with SC catalog with parallel and distributed island model
"""
observations = list()
means = list()
for i in range(qntYears):
observation = model.loadModelDB(region+'jmaData', year+i)
aux = model.loadModelFromFile('../../Zona3/realSCwithP_AVR/'
+ region + 'real' + "_" + str(year + i) + '.txt')
aux.values4poisson = [x+1 for x in aux.values4poisson]
observation.values4poisson = aux.values4poisson
del aux
observation.bins = observation.bins.tolist()
observations.append(observation)
means.append(observation.bins)
mean = np.mean(means, axis=0)
for i in range(times):
model_=model.model()
model_ = parallelGAModelP_AVR.gaModel(
NGEN=10,
CXPB=0.9,
MUTPB=0.1,
modelOmega=observations,
year=year +
qntYears,
region=region,
mean=mean,
n_aval=50)
model_.executionNumber=i
model_.year=year+qntYears
model_.modelName = region+'parallelGA'
parallelGA_ = model.loadModelDB(region+'parallelGA', year)
# if (parallelGA_.definitions==None):
# model.saveModelDB(model_)
def callParallelGAwithP_AVR(region):
"""
It is a wrapper to the function that generates the parallel GAModel
It cover the years of 2000 to 2005, and the models are from 2005 to 2010
"""
year = 2000
while(year <= 2005):
execParallelGAwithP_AVR(year, region)
year+=1
def main():
"""
This function creates the needed enviroment needed to generate both, in parallel and distrituded,
GAModel and List Model with SC catalog
for the regions: EastJapan, Kanto, Kansai, Tohoku
from 2000 to 2005 to create models from 2005 to 2010
"""
region = 'Kanto'
year = 2000
callParallelReducedGAwithP_AVR(region)
region = 'EastJapan'
year = 2000
callParallelReducedGAwithP_AVR(region)
region = 'Tohoku'
year = 2000
callParallelReducedGAwithP_AVR(region)
region = 'Kansai'
year = 2000
callParallelReducedGAwithP_AVR(region)
if __name__ == "__main__":
main()
|
#!/usr/bin/python3
import sqlalchemy
#TODO {time:[food1, food2...]}
class FoodType():
milk = 'молочные продукты'
fruit = 'фрукты'
meat = 'мясо'
fish = 'рыба'
vegetable = 'овощи'
nut = 'орехи'
fastfood = 'фастфуд'
water = 'вода'
drink = 'напитки'
alco = 'алкогольные напитки'
oil = 'масло'
spice = 'специи'
class Food():
def __init__(self, name, food_type, proteins, fats, carbs, calories = -1):
"""
Create food with params:
name = name of the product
food_type = type of product, FoodType.type (instead of type insert:
milk, fruit, meat, fish, vegetable, nut, fastfood,
water, drink, alco, oil, spice)
proteins, fats, carbs
calories = if not inserted, programm calculates it
"""
if calories == -1:
self.calories = proteins * 4 + fats * 9 + carbs * 4
else:
self.calories = calories
self.name = name
self.food_type = food_type
self.proteins = proteins
self.fats = fats
self.carbs = carbs
def energy(self, mass = 100):
"""
Return list with proteins, fats, carbs and calories in amount(gr) of product
"""
energy = [self.proteins, self.fats, self.carbs, self.calories]
energy = map(lambda x: x / 100 * mass, energy)
return energy
class Recipe():
pass
class Event():
def __init__(self):
pass
class EatEvent(Event):
pass
class DailyRation():
pass
def main():
jogurt = Food('Йогурт Danone Натуральный', FoodType.milk, 3.5, 2.5, 4.7)
print(jogurt.food_type, jogurt.food_type == FoodType.milk, jogurt.food_type == 'молочные продукты')
if __name__ == '__main__':
main() |
# -*- coding:gb18030 -*-
# -*- coding:utf-8 -*-
from appium import webdriver
from time import sleep, ctime
def appium_start():
desired_caps = {"platformName": "Android",
"deviceName": "127.0.0.1:62001",
"platformVersion": "4.4.2",
# apk°üĆū
"appPackage": "com.ss.android.article.news",
# apkµÄlauncherActivity
"appActivity": "com.ss.android.article.news.activity.SplashActivity",
"unicodeKeyboard": True,
"resetKeyboard": True}
return webdriver.Remote("http://127.0.0.1:4723/wd/hub", desired_caps)
|
def getFunc(multiplier):
def getMultiplications(l):
return [multiplier*i for i in l]
return getMultiplications
funcs = []
for multiplier in range(4):
funcs.append(getFunc(multiplier))
l = [10, 20, 30, 40, 50]
for f in funcs:
print(f(l)) |
h1,m1=map(int,input().split())
h2,m2=map(int,input().split())
hrs=h1-h2
mins=m1-m2
print(abs(hrs),abs(mins),end=" ")
|
# _*_coding:utf-8_*_
# Author : Leo
# Time : 17/12/2018
import requests
import json
import sys
class Translation:
def __init__(self, query):
# set the translation page
self.post_url = "https://fanyi.baidu.com/basetrans"
# simulate a mobile user
self.header = {
"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1"}
# get the query
self.query_string = query
def get_language(self):
# get the type of query language from url
language_return = requests.post(url="https://fanyi.baidu.com/langdetect", data={"query": self.query_string},
headers=self.header).content.decode()
query_language = json.loads(language_return)["lan"]
# calculate the result language
result_language = "zh" if query_language == "en" else "en"
# return the tupe of language type
return query_language, result_language
def init_data(self, laguage):
# initial the data post to the url based on query, language type of query and language type of result
return {
'from': laguage[0],
'to': laguage[1],
"query": self.query_string,
}
def parse_url(self, data, header):
# get the translation result from url
response = requests.post(url=self.post_url, data=data, headers=header)
dict_return = json.loads(response.content.decode())
result_return = dict_return["trans"][0]["dst"]
print("The result:", result_return)
def run(self):
self.parse_url(self.init_data(self.get_language()), self.header)
if __name__ == '__main__':
# get the input query from args
try:
translate = Translation(sys.argv[1])
translate.run()
except IndexError:
print("Please input a query after .py file")
print("etc. python baidu_translation.py 'How are you'")
|
# University project of a TechFarm where I should use binary search to find cows in a specific list and know if it was milked or not and also if it is on the list:
cows = [100, 101, 102, 103, 104, 105]
milkedCow = {100: 'Milked', 101: 'Not Milked', 102: 'Milked', 103: 'Milked', 104: 'Milked', 105: 'Milked'}
searchCow = int(input('Type the number of the cow you want to find: '))
def buscarVaca(list, item):
prim = 0
ult = len(list) - 1
found = False
resultFalse = f'The cow {item} was not found'
resultTrue = f' The cow {item} was found'
while prim <= ult and not found:
meio = (prim + ult) // 2
if list[meio] == item:
found = True
else:
if item < list[meio]:
ult = meio - 1
else:
prim = meio + 1
if found == True:
return resultTrue
else:
return resultFalse
print(buscarVaca(cows, searchCow))
print(f'{milkedCow.get(searchCow)}')
|
import urllib2, json, re
from bs4 import BeautifulSoup
def weather_by_name(city):
"""requests the weather data for city from the openweathermap api
Args:
city: A string containing a valid city name.
Returns:
Json of weather data.
"""
key = "4a2a82ee8a8dec7dc93d7bf8e048a6bc"
uri = "http://api.openweathermap.org/data/2.5/weather?q=%s&APPID=%s"
url = uri%(city,key)
request = urllib2.urlopen(url)
result = json.loads(request.read())
#print json.dumps(result, sort_keys=True, indent=4)
return result
def weather_by_zip(zip_code):
"""requests the weather data at zip_code from the openweathermap api
Args:
zip_code: A string or int containing a valid zip code.
Returns:
Json of weather data.
"""
key = "4a2a82ee8a8dec7dc93d7bf8e048a6bc"
uri = "http://api.openweathermap.org/data/2.5/weather?zip=%s,us&APPID=%s"
url = uri%(zip_code,key)
request = urllib2.urlopen(url)
result = json.loads(request.read())
#print json.dumps(result, sort_keys=True, indent=4)
return result
def kelvin_to_f(kel):
return (kel - 273.15)*1.8 + 32.0
def get_temp(city):
"""Gets the temperature in city.
Args:
city: A string containing a valid city name.
Returns:
The current temperature in city.
"""
city = re.sub(r'\s','',city);
data = weather_by_name(city)
if "message" in data:
return data["message"]
temp = data["main"]["temp"]
return "%.2f" % kelvin_to_f(temp)
|
import re, sys
import pdf
if len(sys.argv) != 2:
print 'Syntax: ' + sys.argv[0] + ' filename'
exit()
emailregex = r'\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,}\b'
pdffilename = sys.argv[1]
m = pdf.convert_pdf_to_txt(pdffilename)
res = m.split()
#for r in res:
# print r
emails = set()
for line in res:
m = re.findall(emailregex, line.upper())
if m:
for i in m:
emails.add(i)
for email in emails:
print email.lower()
|
import sqlite3, hashlib, binascii, os, stdiomask, time, sys, pprint
from tabulate import tabulate
from datetime import datetime
from colorama import init, Fore, Back, Style
def sqlConnect():
conn = sqlite3.connect("mail.db", timeout=10)
cursor = conn.cursor()
# conn.set_trace_callback(print) # отладка sql - запросов, в релизе уберем
return cursor, conn
def hashPassword(password):
salt = hashlib.sha256(os.urandom(60)).hexdigest().encode('ascii') # генеририуем соль (штука, которая передается хэш-функции вместе с паролем, чтобы одинаковые пароли в бд были представлены по-разному)
pwdhash = hashlib.pbkdf2_hmac('sha512', password.encode('utf-8'), salt, 100000)
pwdhash = binascii.hexlify(pwdhash)
return (salt + pwdhash).decode('ascii')
def verifyPassword(storedPassword, providedPassword): # функция проверки введенного пароля на соответствие зашифрованному в бд
salt = storedPassword[:64]
storedPassword = storedPassword[64:]
pwdhash = hashlib.pbkdf2_hmac('sha512',
providedPassword.encode('utf-8'),
salt.encode('ascii'),
100000)
pwdhash = binascii.hexlify(pwdhash).decode('ascii')
return pwdhash == storedPassword
def register():
cursor, conn = sqlConnect()
username = input('\nEnter your username' + Fore.RED + '*' + Style.RESET_ALL + ': ')
cursor.execute('''
SELECT
user_id
FROM
users
WHERE
username = ?
''', (username, ))
userId = cursor.fetchall()
if len(userId) != 0 or username.lower() == 'list':
print(Fore.CYAN + '\nThis user already exists. Please choose another username')
register()
else:
password = stdiomask.getpass(prompt='Enter your password' + Fore.RED + '*' + Style.RESET_ALL + ': ')
password_again = stdiomask.getpass(prompt='Repeat your password' + Fore.RED + '*' + Style.RESET_ALL + ': ')
while password != password_again:
print(Fore.RED + Style.BRIGHT + 'Passwords do not match!')
password = stdiomask.getpass(prompt='Enter your password' + Fore.RED + '*' + Style.RESET_ALL + ': ')
password_again = stdiomask.getpass(prompt='Repeat your password' + Fore.RED + '*' + Style.RESET_ALL + ': ')
hashedPwd = hashPassword(password)
if username != '' and password != '':
name = input('Enter your name' + Fore.RED + '*' + Style.RESET_ALL + ': ')
query = '''
INSERT INTO users
(username, password, name)
VALUES
(?,?,?)
'''
data = (username, hashedPwd, name)
cursor.execute(query, data)
conn.commit()
print(Fore.CYAN + '\nNice to meet you, ' + name + '! You have been successfuly registered and can now sign in.')
auth()
else:
print(Fore.CYAN + '\nNor username neither password can be empty!')
register()
def auth():
cursor, conn = sqlConnect()
username = input('\nEnter your username: ')
password = stdiomask.getpass(prompt='Enter your password: ')
cursor.execute('''
SELECT
user_id, password, name
FROM
users
WHERE
username = ?
''', (username, )) # проверяем наличие аккаунта в базе
userInfo = cursor.fetchone()
if str(userInfo) != 'None':
userId = str(userInfo[0])
storedPwd = str(userInfo[1])
name = str(userInfo[2])
if verifyPassword(storedPwd, password) == True:
print(Fore.CYAN + Style.BRIGHT + '\nWelcome back, '+ name + '!')
actionChoice(userId)
else:
print(Fore.WHITE + Back.RED + 'Wrong details!')
auth()
else:
choice = input('\nThis account does not exist! Want to create one? (y\\n): ')
if choice == 'y':
register()
else:
print(Fore.CYAN + 'Returning...')
time.sleep(1)
auth()
def actionChoice(userId):
print(tabulate([['1','Send a letter'],['2','View Inbox'],['3','View Outbox'],['4', 'View Bin']], ["#", "Action"], tablefmt="fancy_grid"))
action = input('What would you like to do? Choose a number: ')
if action == '1':
sendLetter(userId, None)
elif action == '2':
viewInbox(userId)
elif action == '3':
viewOutbox(userId)
elif action == '4':
viewBin(userId)
else:
print(Style.BRIGHT + Fore.RED + 'Invalid choice!')
time.sleep(1)
actionChoice(userId)
def viewInbox(userId):
cursor, conn = sqlConnect()
cursor.execute('''
SELECT
letters.id, users.name, letters.date, letters.theme, letters.read
FROM
letters, users
WHERE
receiver_id = ?
AND
users.user_id = letters.sender_id
AND
letters.removed_receiver = '0'
ORDER BY
id
DESC
''', (userId,))
results = cursor.fetchall()
updatedRes = []
for element in results:
element = list(element)
updatedRes.append(element)
for element in updatedRes:
element[4] = str(element[4])
if element[4] == '0':
element[1] = Fore.CYAN + Style.BRIGHT + element[1] + Style.RESET_ALL
element[2] = Fore.CYAN + Style.BRIGHT + element[2] + Style.RESET_ALL
element[3] = Fore.CYAN + Style.BRIGHT + element[3] + Style.RESET_ALL
del element[4]
print('\n This is your inbox folder:')
print(tabulate(updatedRes, headers=['№', 'Sent By', 'Date', 'Theme'], tablefmt='fancy_grid'))
letterId = input('Which letter would you like to interact with?: ')
if letterId == '0':
print(Fore.CYAN + 'Returning to menu...')
time.sleep(1)
actionChoice(userId)
else:
readLetter(userId, letterId, 'sender')
choice = input('\nWhat would you like to do? (1 - Reply, 2 - Remove, 3 - Read another letter): ')
if choice == '1':
cursor.execute ('''
SELECT
sender_id
FROM
letters
WHERE
id = ? ''', (letterId, ))
senderId = cursor.fetchone()[0]
sendLetter(userId, senderId)
elif choice == '2':
removeLetter(userId, letterId, who='receiver')
elif choice == '3':
viewInbox(userId)
else:
actionChoice(userId)
def viewOutbox(userId):
cursor, conn = sqlConnect()
cursor.execute('''
SELECT
letters.id, users.name, letters.date, letters.theme
FROM
letters, users
WHERE
sender_id = ?
AND
users.user_id = letters.sender_id
AND
letters.removed_sender = 0
ORDER BY
id
DESC
''', (userId,))
results = cursor.fetchall()
print(tabulate(results, headers=['№', 'Sent To', 'Date', 'Theme'], tablefmt='fancy_grid'))
letterId = input('Which letter would you like to interact with?: ')
if letterId == '0':
print(Fore.CYAN + 'Returning to menu...')
time.sleep(1)
actionChoice(userId)
else:
readLetter(userId, letterId, 'sender')
choice = input('What would you like to do? (1 - Remove, 2 - Read another letter): ')
if choice == '1':
removeLetter(userId, letterId, 'sender')
elif choice == '2':
viewOutbox(userId)
elif choice == '0':
print('Returning to menu...')
time.sleep(1)
actionChoice(userId)
else:
print(Fore.CYAN + 'Invalid choice! ' + Style.RESET_ALL + 'Returning...')
time.sleep(1)
viewOutbox(userId)
def viewBin(userId):
cursor, conn = sqlConnect()
cursor.execute('''
SELECT
letters.id, users.name, letters.date, letters.theme
FROM
letters, users
WHERE
receiver_id = ?
AND
users.user_id = letters.sender_id
AND
(letters.removed_receiver = '1' OR letters.removed_sender = '1')
ORDER BY
id
DESC
''', (userId,))
results = cursor.fetchall()
if not results:
print(Fore.CYAN + 'Your bin is empty! Returning to menu...')
time.sleep(1)
actionChoice(userId)
else:
print(tabulate(results, headers=['№', 'Sent By', 'Sent To', 'Date', 'Theme'], tablefmt='fancy_grid'))
letterId = input('What letter would you like to interact with?: ')
if letterId == '0':
print(Fore.CYAN + 'Returning to menu...')
time.sleep(1)
actionChoice(userId)
else:
cursor.execute('''
SELECT
receiver_id
FROM
letters
WHERE
id = ?
''', (letterId, ))
_recv = cursor.fetchone()
if _recv == None:
print(Fore.RED + 'Do not remove other people\'s letters! ' + Style.RESET_ALL + 'Returning...')
time.sleep(1)
viewBin(userId)
else:
if int(_recv[0]) == int(userId):
readLetter(userId, letterId, 'sender')
action = input('What would you like to do? (1 - Back to bin, 0 - Back to menu: ')
if action == '1':
viewBin(userId)
elif action == '0':
print(Fore.CYAN + 'Returning to menu...')
time.sleep(1)
actionChoice(userId)
else:
print(Fore.RED + 'Invalid choice! ' + Style.RESET_ALL + 'Returning...')
time.sleep(1)
viewBin(userId)
else:
print(Fore.RED + 'Do not remove other people\'s letters!' + Style.RESET_ALL + 'Returning...')
time.sleep(1)
actionChoice(userId)
def readLetter(userId, letterId, who):
cursor, conn = sqlConnect()
if who == 'receiver':
cursor.execute('''
SELECT
sender_id
FROM
letters
WHERE
id = ?
''', (letterId, ))
sender_id = cursor.fetchone()
if sender_id == None:
print(Fore.RED + 'Do not read other people\'s letters! ' + Style.RESET_ALL + 'Returning to menu...')
time.sleep(1)
actionChoice(userId)
else:
if int(sender_id[0]) == int(userId):
cursor.execute('''
SELECT
letters.data, letters.date, letters.theme, sender_id, receiver_id
FROM
letters
WHERE
id = ?
''', (letterId, ))
letter = cursor.fetchone()
letterData = str(letter[0])
letterDate = str(letter[1])
letterTheme = str(letter[2])
sender_id = str(letter[3])
receiverId = str(letter[4])
cursor.execute('''
SELECT
name
FROM
users
WHERE
user_id = ?
''', (sender_id, ))
letterFrom = cursor.fetchone()
cursor.execute('''
SELECT
name
FROM
users
WHERE
user_id = ?
''', (receiverId, ))
letterTo = cursor.fetchone()
cursor.execute('''
UPDATE
letters
SET
read = '1'
WHERE
id = ?
''', (letterId, ))
conn.commit()
if who == 'sender':
cursor.execute('''
SELECT
receiver_id
FROM
letters
WHERE
id = ?
''', (letterId, ))
receiver_id = cursor.fetchone()
if receiver_id == None:
print(Fore.RED + 'Do not read other people\'s letters! ' + Style.RESET_ALL + 'Returning to menu...')
time.sleep(1)
actionChoice(userId)
else:
if int(receiver_id[0]) == int(userId):
cursor.execute('''
SELECT
letters.data, letters.date, letters.theme, sender_id, receiver_id
FROM
letters
WHERE
id = ?
''', (letterId, ))
letter = cursor.fetchone()
letterData = str(letter[0])
letterDate = str(letter[1])
letterTheme = str(letter[2])
sender_id = str(letter[3])
receiverId = str(letter[4])
cursor.execute('''
SELECT
name
FROM
users
WHERE
user_id = ?
''', (sender_id, ))
letterFrom = cursor.fetchone()
cursor.execute('''
SELECT
name
FROM
users
WHERE
user_id = ?
''', (receiverId, ))
letterTo = cursor.fetchone()
cursor.execute('''
UPDATE
letters
SET
read = '1'
WHERE
id = ?
''', (letterId, ))
conn.commit()
print('\n==================================================')
print(Style.BRIGHT + 'Date: ' + Style.RESET_ALL + letterDate)
print(Style.BRIGHT + 'From: ' + Style.RESET_ALL + letterFrom[0])
print(Style.BRIGHT + 'To: ' + Style.RESET_ALL + letterTo[0])
print(Style.BRIGHT + 'Subject: ' + Fore.YELLOW + letterTheme)
print('\n' + Style.BRIGHT + letterData + '\n')
print('==================================================')
def sendLetter(userId, receiverId):
cursor, conn = sqlConnect()
recvList = []
if receiverId == None:
receiver = input('Who do you want to send a letter to? (type \'list\' to see users list): ')
if receiver == 'list':
cursor.execute ('''
SELECT
username, name
FROM
users
''')
usersList = cursor.fetchall()
print(tabulate(usersList, headers=['Username', 'Name'], tablefmt='fancy_grid'))
sendLetter(userId, receiverId)
else:
receiver = receiver.replace(' ', '')
receiversList = receiver.split(',')
for recv in receiversList:
cursor.execute('''
SELECT
user_id
FROM
users
WHERE
username = ?
''', (recv,))
receiverId = str(cursor.fetchone())
receiverId = receiverId.replace(',','')
receiverId = receiverId.replace('(','')
receiverId = receiverId.replace(')','')
if receiverId == 'None':
print('There is no user', recv)
sendLetter(userId, None)
else:
recvList.append(receiverId)
else:
recvList.append(receiverId)
if len(recvList) != 0:
theme = input('Enter letter topic: ')
letter = input('Write your letter: ')
letterDate = datetime.today().strftime('%B %d %H:%M')
choice = input('Please confirm sending this letter (y\\n): ')
if choice == 'y':
for receiverId in recvList:
query = '''
INSERT INTO letters
(sender_id, receiver_id, date, theme, data)
VALUES
(?,?,?,?,?)
'''
data = (userId, receiverId, letterDate, theme, letter)
cursor.execute(query,data)
conn.commit()
print(Fore.GREEN + 'You have successfully sent the letter. ' + Style.RESET_ALL + 'Returning to menu...')
time.sleep(1)
actionChoice(userId)
else:
print(Fore.CYAN + 'Sending has been cancelled. ' + Style.RESET_ALL + 'Returning to menu...')
time.sleep(1)
actionChoice(userId)
else:
actionChoice(userId)
def removeLetter(userId, letterId, who):
cursor, conn = sqlConnect()
cursor.execute('''
SELECT
receiver_id
FROM
letters
WHERE
id = ?
''', (letterId, ))
receiver_id = cursor.fetchone()
cursor.execute('''
SELECT
sender_id
FROM
letters
WHERE
id = ?
''', (letterId, ))
sender_id = cursor.fetchone()
if receiver_id == None:
print(Fore.RED + 'Do not remove other people\'s letters! Returning...')
time.sleep(1)
viewInbox(userId)
else:
if who == 'receiver':
if int(receiver_id[0]) == int(userId):
cursor.execute('''
UPDATE
letters
SET
removed_receiver = '1'
WHERE
id = ?
''', (letterId, ))
conn.commit()
print(Fore.CYAN + 'You have successfully removed the letter! ' + Style.RESET_ALL + 'Returning to menu...')
time.sleep(1)
actionChoice(userId)
else:
print(Fore.RED + 'Do not remove other people\'s letters! Returning...')
time.sleep(1)
viewInbox(userId)
elif who == 'sender':
if int(sender_id[0]) == int(userId):
cursor.execute('''
UPDATE
letters
SET
removed_sender = '1'
WHERE
id = ?
''', (letterId, ))
conn.commit()
print(Fore.CYAN + 'You have successfully removed the letter! ' + Style.RESET_ALL + 'Returning to menu...')
time.sleep(1)
actionChoice(userId)
else:
print(Fore.RED + 'Do not remove other people\'s letters! ' + Style.RESET_ALL + 'Returning...')
time.sleep(1)
viewInbox(userId)
else:
print(Fore.RED + 'Do not remove other people\'s letters! ' + Style.RESET_ALL + 'Returning...')
time.sleep(1)
viewInbox(userId)
def main():
init(autoreset = True) # для colorama
os.system('title Console E-mail Client v.0.1')
print(Back.BLUE + Style.BRIGHT + '+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+'.center(120, ' '), end='')
print(Back.BLUE + Style.BRIGHT + '|C|o|n|s|o|l|e| |M|e|s|s|e|n|g|e|r| |v|.|0|.|1|'.center(120, ' '), end='')
print(Back.BLUE + Style.BRIGHT + '+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+'.center(120, ' '), end='')
print(Style.BRIGHT + Fore.YELLOW + '\n\nWelcome! ' + Style.RESET_ALL + '\n(You can return to menu at any time by entering 0)')
action = input('\nWhat would you like to do? (1 - Login, 2 - Register): ')
if action == '1':
auth()
elif action == '2':
register()
else:
print('Invalid choice! Closing the app in 5 seconds...')
time.sleep(5)
if __name__ == "__main__":
main()
|
import json
import time
import urllib.request
import datetime
import sys
import logging
logging.basicConfig(level=logging.DEBUG)
from kafka import KafkaProducer
# Run this program
# python3 analyze.py 2016-12-01
# python3 analyze.py 2016-12-01 2016-12-31
# python3 analyze.py 2018-01-01 2018-12-31
# Example call of the API
# https://wikimedia.org/api/rest_v1/metrics/pageviews/top/en.wikipedia/all-access/2015/07/01
API_URL = "https://wikimedia.org/api/rest_v1/metrics/pageviews/top/en.wikipedia/all-access/{}"
kafka_servers = ['kafka-0.kafka-svc:9093', 'kafka-1.kafka-svc:9093', 'kafka-2.kafka-svc:9093']
def download_and_publish(str_date_begin, str_date_end=None):
#date = datetime.date.today() - datetime.timedelta(1)
start_date = str2date(str_date_begin)
if str_date_end is None:
date = start_date
else:
date = str2date(str_date_end)
# TODO: use correct kafka server
producer = KafkaProducer(bootstrap_servers=kafka_servers)
nb_requests = 0
while date >= start_date:
str_day = date2str(date)
try:
print("REQUEST", API_URL.format(str_day.replace('-', '/')))
response = urllib.request.urlopen(API_URL.format(str_day.replace('-', '/')))
data = json.loads(response.read().decode())
for article in data['items'][0]['articles']:
article['date'] = str_day
# print(article)
# print(json.dumps(article).encode())
producer.send("articles", json.dumps(article).encode())
print("{} Produced {} article records".format(time.time(), len(data['items'][0]['articles'])))
except Exception as e:
print("Unable to load data for day: {}".format(str_day))
print(e)
nb_requests += 1
if nb_requests == 100:
nb_requests = 0
time.sleep(2)
date -= datetime.timedelta(1) # 1 day
# break
def str2date(stringDate):
return datetime.datetime.strptime(stringDate, "%Y-%m-%d")
def date2str(date):
return date.isoformat()[:10]
if __name__ == "__main__":
if len(sys.argv) == 1:
print("Correct usage:")
print("{} <start_date> [end_date]".format(sys.argv[0]))
print("<date> follows isoformat (ex: 2018-01-30)")
print("(start_date & end_date included)")
print("(start_date < end_date)")
exit(1)
if len(sys.argv) == 2:
download_and_publish(sys.argv[1])
elif len(sys.argv) == 3:
download_and_publish(sys.argv[1], sys.argv[2])
|
"""
back.app.models.json
This module aims at converting models in a json format.
"""
class JsonModel(object):
"""
This module aims at converting models in a json format.
Methods
-------
as_dict()
Convert a model that inherit from JsonModel in a json format
"""
def as_dict(self):
return { c.name: getattr(self, c.name) for c in self.__table__.columns } |
n = int (input ())
A = []
for i in range (n):
A.append (list ('0' * n))
for i in range (n):
A [i][n - i - 1] = 1
for j in range (n):
if n - j < i + 1:
A [i][j] = 2
for i in range (n):
for j in range (n):
print (A [i][j], end = ' ')
if j == n - 1:
print () |
class MyException(Exception):
def __init__(self,arg):
self.msg=arg
def check(dict):
for k,v in dict.items():
print("Name = {} balance = {}".format(k,v))
if (v<2000):
raise MyException("balance amount less in {}".format(k))
bank={"raj":500,"sanju":4500}
try:
check(bank)
except MyException as me:
print(me)
|
import traceback
import asyncio
import http3
import json
import sys
import os
class RetryError(Exception):
pass
class retry():
def __init__(self, f):
self.f = f
async def _handle_function(self, func, selfF, *args, **kwargs):
tb = 'None found if you get this in the stack + error section contact codemonkey51 on discord at coderman51#8112 or by email at pypi@codemonkey51.dev'
for i in range(0,11):
try:
return await func(selfF, *args, **kwargs)
except:
tb = traceback.format_exc()
raise RetryError(func.__name__+' was retried 10 times but got an error each time\n \033[91mstack + error: '+tb)
async def __call__(self,selfF, *args, **kwargs):
return await self._handle_function(self.f,selfF, *args, **kwargs)
#requests_async = http3.AsyncClient()
import requests_async
def DeprecationWarning(text):
print(f"\033[1;31mDeprecationWarning: {text}\033[0;0m")
class Client():
def __init__(self, url=os.environ['REPLIT_DB_URL']):
self.asyncclient = AsyncClient(url)
self.oldadd = '''def run(self,command,*args,**kwargs):
try:
return asyncio.run(command(*args,**kwargs))
except:
print('error')
print(f'await {command}(*{args},**{kwargs})')
return exec(f'asyncio.run({command}(*{args},**{kwargs}))')'''
def add(self, **args):
return asyncio.run(self.asyncclient.add(**args))
def set(self, **args):
return asyncio.run(self.asyncclient.set(**args))
def add_dict(self, add):
return (asyncio.run(self.asyncclient.add_dict(add)))
def set_dict(self, add):
return (asyncio.run(self.asyncclient.set_dict(add)))
def remove(self, *args):
asyncio.run(self.asyncclient.remove(*args))
def remove_list(self, remove):
return asyncio.run(self.asyncclient.remove_list(remove))
def view(self, view):
return (asyncio.run(self.asyncclient.view(view)))
def view_multiple(self, *args):
return (asyncio.run(self.asyncclient.view_multiple(*args)))
def view_multiple_list(self, view):
return asyncio.run(self.asyncclient.view_multiple_list(view))
def list(self, item):
return (asyncio.run(self.asyncclient.list(item)))
def list_multiple(self, *args):
return (asyncio.run(self.asyncclient.list_multiple(*args)))
def list_multiple_list(self, args):
return asyncio.run(self.asyncclient.list_multiple_list(args))
def transfer(self, url):
return asyncio.run(self.asyncclient.transfer(url))
@property
def all(self):
return asyncio.run(self.asyncclient.all)
@property
def all_dict(self):
return asyncio.run(self.asyncclient.all_dict)
@property
def wipe(self):
asyncio.run(self.asyncclient.wipe)
class AsyncClient():
def __init__(self, url=os.environ['REPLIT_DB_URL']):
self.url = url
async def add(self, **args):
DeprecationWarning(
"add() is deprecated and will be removed in a later version use set() instead"
)
await self.set(**args)
async def set(self, **args):
keys = list(args.keys())
for i in keys:
t = None
t2 = args.get(i)
if(isinstance(t2,str)):
t = t2
else:
t = str(json.dumps(t2))
await requests_async.post(self.url, data={i:t})
async def add_dict(self, add):
DeprecationWarning(
"add_dict() is deprecated and will be removed in a later version use set_dict() instead"
)
await self.set_dict(add)
async def set_dict(self, set):
for i in list(set.keys()):
await self.set(**set)
async def remove(self, *args):
for i in args:
await requests_async.delete(self.url + '/' + i)
async def remove_list(self, remove):
return await self.remove(*remove)
async def view(self, view):
try:
x = await self._view_json(view)
except:
x = await self._view_str(view)
return (x)
async def view_multiple(self, *args):
keys = {}
for i in args:
keys.update({i: await self.view(i)})
return keys
async def view_multiple_list(self, view):
return await self.view_multiple(*view)
async def _view_str(self, view):
request = await requests_async.get(self.url + '/' + view)
return (request.text)
async def _view_str_multiple(self, *args):
keys = {}
for i in args:
keys.update({i: await self._view_str(i)})
return keys
async def _view_str_multiple_list(self, view):
return await self._view_str_multiple(*view)
async def _view_json(self, view):
request = await requests_async.get(self.url + '/' + view)
return (json.loads(request.text))
async def _view_json_multiple(self, *args):
keys = {}
for i in args:
keys.update({i: await self._view_json(i)})
return keys
async def _view_json_multiple_list(self, view):
return await self._view_json_multiple(*view)
async def list(self, item):
request = await requests_async.get(self.url + '?prefix=' + item)
return (request.text.splitlines())
async def list_multiple(self, *args):
data = {}
for i in args:
data.update({i: await self.list(i)})
return (data)
async def list_multiple_list(self, args):
return await self.list_multiple(*args)
async def transfer(self, url):
tclient = AsyncClient(url.strip())
await self.set_dict(await tclient.all_dict)
@property
async def all(self):
return await self.list('')
@property
async def all_dict(self):
return await self.view_multiple(*await self.list(''))
@property
async def wipe(self):
for i in await self.all:
await self.remove(i)
|
import example
print example.fact(3) |
import pytest
from pytest import mark
from time import sleep
@mark.flaky(reruns=3)
def test_snippets(browser):
browser.visit('/snippets')
browser.wait_for_js_variable('initFormSnippets')
browser.execute_script("""
initFormSnippets(document.querySelector('.formcode-snippets'));
""")
assert browser.is_element_present_by_css('.formcode-snippets')
assert len(browser.find_by_css('.formcode-toolbar')) == 1
assert not browser.find_by_css('.formcode-snippet')
browser.find_by_css('.formcode-toolbar-element').click()
assert browser.find_by_css('.formcode-snippet')
browser.find_by_css('.formcode-toolbar-element').click()
assert not browser.find_by_css('.formcode-snippet')
browser.find_by_css('.formcode-toolbar-element').click()
browser.find_by_css('.formcode-snippet-optional').click()
assert '= ___' in browser.find_by_css('textarea').value
assert not browser.find_by_css('.formcode-snippet')
browser.find_by_css('.formcode-toolbar-element').click()
browser.find_by_css('.formcode-snippet-required').click()
assert '*= ___' in browser.find_by_css('textarea').value
def test_registry(browser):
browser.visit('/registry')
browser.wait_for_js_variable('formcodeWatcherRegistry')
browser.execute_script("""
var watcher = formcodeWatcherRegistry.new("test");
var unsubscribe = watcher.subscribe(function(value) {
window.code = value;
});
watcher.update("Label = ...");
unsubscribe();
watcher.update("Label = ___");
""")
browser.wait_for_js_variable('window.code')
code = browser.evaluate_script("window.code")
assert code == [{'human_id': 'Label', 'type': 'textarea', 'id': 'label'}]
def test_formcode_format(browser):
# Todo: This test is flaky since mai 2020
browser.visit('/formcode-format')
browser.wait_for_js_variable('initFormcodeFormat')
browser.execute_script("""
var watcher = formcodeWatcherRegistry.new("test");
var el = document.querySelector('#container');
el.setAttribute('data-watcher', 'test');
el.setAttribute('data-target', 'textarea');
initFormcodeFormat(el);
watcher.update('Textfield = ___');
""")
browser.find_by_css('.formcode-toolbar-element').click()
browser.find_by_css('.formcode-snippet').click()
assert browser.find_by_css('textarea').value == '[Textfield]'
def test_formcode_select_empty_checkbox(browser):
browser.visit('/formcode-select')
browser.wait_for_js_variable('initFormcodeSelect')
browser.driver.execute_script("""
var watcher = formcodeWatcherRegistry.new();
var el = document.querySelector('#container');
initFormcodeSelect(
el, watcher, 'textarea', 'checkbox', ['text', 'textarea']);
watcher.update(arguments[0]);
""", 'A = ___\nB = ...\nC = *.png')
assert len(browser.find_by_css('.formcode-select input')) == 2
browser.find_by_css('.formcode-select input')[0].click()
browser.find_by_css('.formcode-select input')[1].click()
assert browser.find_by_css('textarea').value == "A\nB"
browser.find_by_css('.formcode-select input')[1].click()
assert browser.find_by_css('textarea').value == "A"
browser.find_by_css('.formcode-select input')[1].click()
assert browser.find_by_css('textarea').value == "A\nB"
browser.find_by_css('.formcode-select input')[0].click()
assert browser.find_by_css('textarea').value == "B"
browser.find_by_css('.formcode-select input')[1].click()
assert browser.find_by_css('textarea').value == ""
def test_formcode_select_empty_radio(browser):
browser.visit('/formcode-select')
browser.wait_for_js_variable('initFormcodeSelect')
browser.driver.execute_script("""
var watcher = formcodeWatcherRegistry.new();
var el = document.querySelector('#container');
initFormcodeSelect(
el, watcher, 'textarea', 'radio', ['text', 'textarea']);
watcher.update(arguments[0]);
""", 'A = ___\nB = ...\nC = *.png')
assert len(browser.find_by_css('.formcode-select input')) == 2
browser.find_by_css('.formcode-select input')[0].click()
assert browser.find_by_css('textarea').value == "A"
browser.find_by_css('.formcode-select input')[1].click()
assert browser.find_by_css('textarea').value == "B"
browser.find_by_css('.formcode-select input')[0].click()
assert browser.find_by_css('textarea').value == "A"
browser.find_by_css('.formcode-select input')[0].click()
assert browser.find_by_css('textarea').value == "A"
@pytest.mark.parametrize('input_type', ('checkbox', 'radio'))
def test_formcode_select_prefilled(browser, input_type):
browser.visit('/formcode-select')
browser.wait_for_js_variable('initFormcodeSelect')
browser.driver.execute_script(f"""
var watcher = formcodeWatcherRegistry.new();
var el = document.querySelector('#container');
document.querySelector('textarea').value='A'
initFormcodeSelect(
el, watcher, 'textarea', '{input_type}', ['text', 'textarea']);
watcher.update(arguments[0]);
""", 'A = ___\nB = ...\nC = *.png')
assert len(browser.find_by_css('.formcode-select input:checked')) == 1
@pytest.mark.parametrize('input_type', ('checkbox', 'radio'))
def test_formcode_keep_selection(browser, input_type):
browser.visit('/formcode-select')
browser.wait_for_js_variable('initFormcodeSelect')
browser.driver.execute_script(f"""
var watcher = document.watcher = formcodeWatcherRegistry.new();
var el = document.querySelector('#container');
document.querySelector('textarea').value='A'
initFormcodeSelect(
el, watcher, 'textarea', '{input_type}', ['text', 'textarea']);
watcher.update('B = ___');
""")
assert len(browser.find_by_css('.formcode-select input:checked')) == 0
browser.driver.execute_script("document.watcher.update('A = ___');")
sleep(0.1)
assert len(browser.find_by_css('.formcode-select input:checked')) == 1
browser.driver.execute_script("document.watcher.update('C = ___');")
sleep(0.1)
assert len(browser.find_by_css('.formcode-select input:checked')) == 0
def test_field_errors_should_not_yield_updates(browser):
browser.visit('/formcode-format')
browser.wait_for_js_variable('initFormcodeFormat')
browser.execute_script("""
document.watcher = formcodeWatcherRegistry.new();
var el = document.querySelector('#container')
initFormcodeFormat(el, document.watcher, 'textarea');
document.watcher.update('Textfield = ___');
""")
browser.find_by_css('.formcode-toolbar-element').click()
assert len(browser.find_by_css('.formcode-snippet')) == 1
assert browser.find_by_css('.formcode-snippet').text == "Textfield"
browser.execute_script("document.watcher.update('Test =-= !invalid');")
assert len(browser.find_by_css('.formcode-snippet')) == 1
assert browser.find_by_css('.formcode-snippet').text == "Textfield"
browser.execute_script("document.watcher.update('Fixed = ___');")
assert len(browser.find_by_css('.formcode-snippet')) == 1
assert browser.find_by_css('.formcode-snippet').text == "Fixed"
|
from library.imapclient.imapclient import IMAPClient
from app.email.model import EmailModel
from app.core import settings
import email
import os
import smtplib
import time
class EmailController:
'''
Email controller
@todo: create mark message as read function
'''
client = None
''' IMAP client library'''
model = None
''' Email model'''
offline = True
''' Is IMAP connected '''
def __init__(self,controller,host,port,user,passw,ssl=True):
'''
Initialize EmailController -- works with imap
@param controller: parent core L{Controller}
@type controller: L{Controller}
@param host: IMAP host address
@type host: String
@param port: IMAP port
@type port: L{int}
@param user: IMAP username
@type user: String
@param passw: IMAP password
@type passw: String
@param ssl: Use IMAP through SSL?
@type ssl: Boolean
'''
self.controller = controller
self.host = host
self.port = port
self.user = user
self.passw = passw
self.ssl = ssl
try:
self.client = IMAPClient(host,port,True,ssl)
self.client.login(user,passw)
self.offline = False
except:
print "Couldn't connect to IMAP server: ",host,port,user,passw,"ssl=",ssl
self.offline = True
self.model = EmailModel(self,settings.DB_PATH)
def sendMessage(self,send_from,send_to,mimemessage):
'''
Send a message through smtp
@param send_from: email of sender
@type send_from: String
@param send_to: email of reciever
@type send_to: String
@param mimemessage: messagedata
@param mimemessage: MIMEMessage
'''
smtp = smtplib.SMTP_SSL(settings.SMTP_HOST,settings.SMTP_PORT)
smtp.login(settings.IMAP_USER,settings.IMAP_PASS)
try:
smtp.sendmail(send_from, send_to, mimemessage.as_string())
#return True
except smtplib.SMTPException, e:
raise Exception('SMTP to '+settings.SMTP_HOST+':'+settings.SMTP_PORT+' failed.')
#return False
'''try:
self.emailmodel.updateStatus(
except Exception, e:
print e'''
smtp.close()
def addNewEmails(self):
'''
Add UNSEEN emails in IMAP to DB
'''
self.client.select_folder("INBOX")
uids = self.client.search("UNSEEN")
for uid in uids:
msg = self.client.fetch([uid],['RFC822'])
message = email.message_from_string(msg[uid]['RFC822'])
mid = self._addMessage(message)
def _addMessage(self,message):
'''
analyze email message & add to database
@param message: the email message data
@type message: Message
@return: new message id
@rtype: L{int}
'''
messageid = message.get('Message-ID')
subject = message.get('Subject')
body = ''
date = message.get('Date')
metajson = ''
attachments = []
replyto = message.get('Reply-to')
if message.get('From') is not None:
fromp = message.get('From').split('\n')
else:
fromp = []
if message.get('To') is not None:
to = message.get('To').split('\n')
else:
to = []
if message.is_multipart():
payload = message.get_payload()
for part in payload:
if part.is_multipart():
payload2 = part.get_payload()
for part2 in payload2:
maintype = part2.get_content_maintype()
subtype = part2.get_content_subtype()
if maintype == "application" and subtype == "json":
filen = part2.get_filename()
metajson = str(part2.get_payload(decode=True))
elif maintype != "text":
filen = part2.get_filename()
if filen is not None:
fpath = self._saveFile(part2.get_payload(decode=True),'text','attachments/',filen)
attachments.append(fpath)
else:
print "odd message: ",maintype,subtype
else:
body += part2.get_payload()
else:
maintype = part.get_content_maintype()
subtype = part.get_content_subtype()
if maintype == "application" and subtype == "json":
filen = part.get_filename()
metajson = str(part.get_payload(decode=True))
elif maintype != "text":
filen = part.get_filename()
if filen is not None:
fpath = self._saveFile(part.get_payload(decode=True),'text','attachments/',filen)
attachments.append(fpath)
else:
print "odd message: ",maintype,subtype
else:
body += part.get_payload()
else:
body = str(message.get_payload())
if metajson != '':
evmailcontroller = self.controller.getEvmailController()
setname,name,version,messagesetid = evmailcontroller.processEvmail(metajson)
self.templatesetmodel = self.controller.getTemplatesetController().model
tsid = self.templatesetmodel.hasTemplateset(setname,version)
tid = self.templatesetmodel.hasTemplate(setname,name,version)
else:
tsid = ''
tid = ''
messagesetid = messageid
datet = email.utils.parsedate(date)
date = time.strftime('%Y-%m-%d %H:%M:%S ',datet)
mid = self.model.addMessage(tsid,tid,subject,body,date,to,fromp,replyto,messageid,messagesetid,metajson,attachments)
return mid
#for uid in uids:
# self.client.fetch(uid,['UID','BODY','TO','SUBJECT','FROM'])
def _saveFile(self,payload,loadtype='text',prefix="",filename=""):
'''
Save a file attachment
@param payload: mime payload
@param loadtype: payload type (text or binary)
@type loadtype: String
@param prefix: File prefix
@type prefix: String
@param filename: original filename
@type filename: String
@return: filepath within files directory,
@rtype: String
'''
if filename is not None and filename.find('.') != -1:
filesplit = filename.rpartition('.')
ftype = "."+filesplit[2]
path = settings.FILES_PATH+filesplit[0]
else:
ftype = ""
path = settings.FILES_PATH+filename
counter = ""
delim = ''
while os.path.isfile(path+delim+str(counter)+ftype):
if counter == "":
counter = 1
else:
counter += 1
delim = '-'
print path+delim+str(counter)+ftype
if loadtype == 'binary':
openstr = 'wb'
else:
openstr = 'w'
fp = open(path+delim+str(counter)+ftype,openstr)
fp.write(payload)
fp.close()
return str(counter)+ftype
def isoffline(self,count=0):
if count == 3 and self.offline is True:
return True
elif self.offline is False:
return False
else:
self.__init__(self.controller,self.host,self.port,self.user,self.passw,self.ssl)
return self.isoffline(count+1)
if __name__ == "__main__":
emailcontroller()
|
import dyspatch
def test_version():
assert "." in dyspatch.__version__
assert len(dyspatch.__version__) >= 5
|
from django.contrib import admin
from .models import Event
class EventAdmin(admin.ModelAdmin):
list_display = ['name', 'is_published', 'start_date', 'end_date', 'location']
exclude = ('date_created',)
admin.site.register(Event, EventAdmin)
|
n = int(input())
odd_sum = 0
even_sum = 0
for i in range(0, n):
num = int(input())
if i % 2 == 0:
even_sum += num
else:
odd_sum += num
if odd_sum == even_sum:
print("Yes")
print("Sum = {0}".format(odd_sum))
else:
print("No")
print("Diff = {0}".format(abs(odd_sum - even_sum)))
|
import yaml
import pandas as pd
import os
from autumn.projects.covid_19.vaccine_optimisation.vaccine_opti import initialise_opti_object
from autumn.settings import BASE_PATH
def load_decision_vars(file_name):
file_path = os.path.join(
BASE_PATH, "apps", "covid_19", "vaccine_optimisation", "optimal_plans", file_name
)
df = pd.read_csv(file_path, sep=",", header=None)
decision_vars = list(df.iloc[0])[0:17]
return decision_vars
def write_scenario_yml_file(country, decision_vars, sc_start_index=None):
"""
Create a yml file for a scenario associated with a given decision vector
"""
country_folder_name = country.replace("-", "_")
opti_object = initialise_opti_object(country)
# uniform scenario
uniform_vars = [1.0 / 8.0 for _ in range(16)] + [decision_vars[-1]]
# elderly scenario
elderly_vars = 2 * ([0.0 for _ in range(6)] + [0.5, 0.5]) + [decision_vars[-1]]
sc_decision_vars = [
uniform_vars,
elderly_vars,
decision_vars,
]
for i, _vars in enumerate(sc_decision_vars):
sc_index = sc_start_index + i
sc_params = opti_object.scenario_func(_vars)
sc_params["parent"] = f"apps/covid_19/regions/{country_folder_name}/params/default.yml"
param_file_path = os.path.join(
BASE_PATH,
"apps",
"covid_19",
"regions",
country_folder_name,
"params",
f"scenario-{sc_index}.yml",
)
with open(param_file_path, "w") as f:
yaml.dump(sc_params, f)
def write_optimised_scenario():
country = "malaysia"
file_name = "malaysia_mono_4mai2021.csv"
sc_start_index = 1
decision_vars = load_decision_vars(file_name)
write_scenario_yml_file(country, decision_vars, sc_start_index)
|
class GeneralizeStorageLocationException(Exception):
pass
class UninitializedStorageLocationException(Exception):
pass
|
"""
Auth provider that authenticates against django.contrib.auth.
This is currently half-assed and assumes that django.settings.configure() has
already been called by the time that DjangoAuth.authenticate gets called, and
that the database is set up correctly, etc.
By default, this only authenticates users who are is_staff=True, but you can
override that by subclassing and overriding user_has_access().
"""
from zope.interface import implements
from buildbot.status.web import auth
class DjangoAuth(auth.AuthBase):
implements(auth.IAuth)
def user_has_access(self, user):
return user.is_staff
def authenticate(self, username, password):
from django.contrib.auth.models import User
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return False
return user.check_password(password) and self.user_has_access(user)
|
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('listings/', include('listings.urls')),
path('send_mail/', views.send_email, name='send_mail'),
] |
from django.contrib.auth.models import User
from django import forms
class UserForm(forms.ModelForm):
username = forms.CharField(label='帳號')
password = forms.CharField(label='密碼', widget=forms.PasswordInput)
password2 = forms.CharField(label='確認密碼', widget=forms.PasswordInput)
first_name = forms.CharField(label='暱稱')
class Meta:
model = User
fields = ('username', 'password', 'password2', 'first_name')
def clean_password2(self):
password = self.cleaned_data.get('password')
password2 = self.cleaned_data.get('password2')
if password!=password2:
raise forms.ValidationError(' 密碼與確認密碼錯誤')
return password2 |
import numpy as np
import random
import copy
import util
import itertools
class Simple_Cycles():
# the epsilon value we use
epsilon = 0.01
# runs the iterative algorithm for this of iterations
iterations = 2000
# the threshold for the convergence limit = 10^-7
convergence_threshold = 0.0000001
def votes_evaluation(self, pro, con):
return pro/(pro + con + self.epsilon)
def test_with_uniform_initialization(self, attack_rel_init, votes_init, filename1, filename2):
# the set of initial values we want to iterate to
initial_values_set = util.get_initial_set(0.2, len(votes_init))
# the original permutation
original_permutation = np.array(range(len(votes_init)))
# finds all the possible permutations. This is used as different labellings
labellings = list(itertools.permutations(original_permutation))
attack_rel = []
votes = []
for label in labellings:
# stores the results for INR
# Of form [unique fixed points, initilizations, number of times it converges to that solution]
inr_results = []
# stores the results for ISS
iss_results = []
attack_rel = attack_rel_init
label = np.array(label)
# modify the attack_rel and the votes according to new labelling
attack_rel[original_permutation,:] = attack_rel[label,:]
attack_rel[:, original_permutation] = attack_rel[:,label]
votes = votes_init
votes[original_permutation] = votes[label]
for i in range(len(initial_values_set)):
#initial_values = np.array([ 0.4366013 , 0.57195905, 0.16146849 , 0.8727271])
# finds the converged values for
conv_INR = self.run_INR(initial_values_set[i], attack_rel, votes, label)
conv_ISS = self.run_ISS(initial_values_set[i], attack_rel, votes, label)
# the first value is directly appended
if(i == 0):
inr_results.append([conv_INR, initial_values_set[i], 1])
iss_results.append([conv_ISS, initial_values_set[i], 1])
else:
# for the others add depending if the fixe point has already been found
inr_results = self.add_to_existing(inr_results, conv_INR, initial_values_set[i])
iss_results = self.add_to_existing(iss_results, conv_ISS, initial_values_set[i])
# Write the results for the labelling to the file
util.write_to_file(filename1, label, inr_results)
util.write_to_file(filename2, label, iss_results)
def add_to_existing(self, result, fixed_point, initialization):
# result is of form [unique fixed points, initilization, frequency]
for i in result:
# the convergence threshold is decresed so has to not have multiple same fixed points
if (np.absolute(np.subtract(i[0], fixed_point)) < self.convergence_threshold*10).prod():
i[2] = i[2] + 1
return result
result.append([fixed_point, initialization, 1])
return result
def run_ISS(self, initial_values, attack_relations, votes, labelling):
# runs the ISS iterative algorithm
save_iterations = np.zeros(shape = (self.iterations,len(votes)))
save_iterations [0,:] = initial_values
i = 0
condition = 0
while (condition == 0 and i < self.iterations-1):
current_iter = copy.deepcopy(save_iterations[i,:])
i += 1
for j in range(len(votes)):
# the iteration rule
tau = self.votes_evaluation(votes[j][0],votes[j][1])
attacking_set = attack_relations[:,j]
attacking_pos = np.where(attacking_set)
temp1 = np.subtract(np.ones(len(votes)),current_iter)
temp2 = np.take(temp1,attacking_pos)
current_iter[j] = tau*np.prod(temp2)
save_iterations[i,:] = current_iter
# to store the total absolute change in value after every iteration
diff1 = np.subtract(save_iterations[i-1,:],save_iterations[i,:])
# the convergence condition, delta for each variable should be less than the convergence_threshold)
condition = (np.absolute(diff1) < self.convergence_threshold).prod()
if(i<self.iterations-1):
return save_iterations[i,:]
else:
return np.zeros(len(votes))
def run_INR(self, initial_values, attack_relations, votes, labelling):
# runs the INR iterative algorithm
save_iterations = np.zeros(shape = (self.iterations,len(votes)))
save_iterations [0,:] = initial_values
i = 0
condition = 0
while (condition == 0 and i < self.iterations-1):
# stores the value of the variables during the last iteration
current_iter = copy.deepcopy(save_iterations[i,:])
# the current_iter_1 is used to store the function f, part of the iter rule
current_iter_1 = copy.deepcopy(save_iterations[i,:])
i += 1
for j in range(len(votes)):
# the iteration rule
tau = self.votes_evaluation(votes[j][0],votes[j][1])
attacking_set = attack_relations[:,j]
attacking_pos = np.where(attacking_set)
temp1 = np.subtract(np.ones(len(votes)),current_iter)
temp2 = np.take(temp1,attacking_pos)
current_iter_1[j] = tau*np.prod(temp2)
# 2nd part of iter rule
if(len(votes) == 4):
part_2 = util.jacobian_inverse_4_cycle(current_iter, votes, labelling)
else:
part_2 = util.jacobian_inverse_5_cycle(current_iter, votes, labelling)
# if jacobian doesn't exist
if (len(part_2) == 1):
return np.zeros(len(votes))
part_2 = np.dot(part_2, np.transpose(current_iter_1))
save_iterations[i,:] = np.subtract(current_iter, part_2)
# to store the total absolute change in value after every iteration
diff1 = np.subtract(save_iterations[i-1,:],save_iterations[i,:])
# the convergence condition, delta for each variable should be less than the convergence_threshold)
condition = (np.absolute(diff1) < self.convergence_threshold).prod()
if(i<self.iterations-1):
return save_iterations[i,:]
else:
return np.zeros(len(votes))
if __name__ == '__main__':
# standard labelling 1-2-3-4 (cyclic)
row1 = [0,1,0,1]
row2 = [1,0,1,0]
row3 = [0,1,0,1]
row4 = [1,0,1,0]
attack_rel = np.array([row1, row2, row3, row4])
v = [1,0]
# v2 = [2,0]
votes = np.array([v2,v,v,v])
# row1 = [0,1,0,0,1]
# row2 = [1,0,1,0,0]
# row3 = [0,1,0,1,0]
# row4 = [0,0,1,0,1]
# row5 = [1,0,0,1,0]
# attack_rel = np.array([row1, row2, row3, row4, row5])
# v = [1,0]
# votes = np.array([v,v,v,v,v])
# file which will contain the results for INR
filename1 = 'INR_results.txt'
# file which will contain the results for ISS
filename2 = 'ISS_results.txt'
Simple_Cycles().test_with_uniform_initialization(attack_rel, votes, filename1, filename2)
|
# Is The Number Less Than Or Equal To Zero
def lessThanZero(input):
if(input <= 0):
return "Yes less than or equal to zero"
else:
return "Not less than zero"
print(lessThanZero(0)) |
# Copyright 2010-2011 OpenStack Foundation
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ansible.runner
import os
import sys
import test_config as config
import test_utils as utils
import time
import unittest
import uuid
class node_info_obj(object):
def __init__(self, host, ip, user, role, name):
self.host = host
self.ip = ip
self.user = user
self.role = role
self.name = name
def getHost(self):
return self.host
def getIp(self):
return self.ip
def getUser(self):
return self.user
def getRole(self):
return self.role
def getName(self):
return self.name
class node_info_reader(object):
def __init__(self, host_file=None):
if host_file is None:
print ("Host file not passed. exit")
sys.exit(0)
self.host_file = utils.get_absolute_path_for_file(__file__, host_file)
if not os.path.exists(self.host_file):
print ("%s file does not exist" % self.host_file)
return
self.parsed_data = utils.create_parsed_yaml(self.host_file)
def get_host_list(self):
host_ip_list = []
for key, data in self.parsed_data.items():
hostname = key
name = key
ip = data.get('ip')
user = data.get('user')
role = data.get('role')
node = node_info_obj(hostname, ip, user, role, name)
host_ip_list.append(node)
return host_ip_list
class AnsibleRunner(object):
def __init__(self,
host_list=None,
remote_user=None,
sudo=True):
# AnsibleRunner init.
self.host_list = host_list
self.sudo = sudo
def get_validated_data(self, results, stdout=True, stderr=False):
# print ("\n\nInside get_validated_data", results)
output = ''
###################################################
# First validation is to make sure connectivity to
# all the hosts was ok.
###################################################
if results['dark']:
output = ''
##################################################
# Now look for status 'failed'
##################################################
for node in results['contacted'].keys():
if 'failed' in results['contacted'][node]:
if results['contacted'][node]['failed'] is True:
output = ''
#################################################
# Check for the return code 'rc' for each host.
#################################################
for node in results['contacted'].keys():
info = results['contacted'][node]
if stdout:
op = info.get('stdout')
else:
op = info.get('stderr')
output = op
return output
def ansible_perform_operation(self,
host_list=None,
remote_user=None,
module=None,
complex_args=None,
module_args='',
environment=None,
check=False,
forks=2,
stderr=None,
stdout=None):
# Perform any ansible operation.
runner = ansible.runner.Runner(
module_name=module,
host_list=host_list,
remote_user=remote_user,
module_args=module_args,
complex_args=complex_args,
environment=environment,
check=check,
forks=forks)
results = runner.run()
res = self.get_validated_data(results, stdout, stderr)
return res
class FunctionalTestMethods(unittest.TestCase):
ansirunner = AnsibleRunner()
config = config.Configs()
node_config_file_name = config.node_config_file
node_reader = node_info_reader(node_config_file_name)
node_list = node_reader.get_host_list()
env_value = config.env_value
TEST_CASE_NAME = config.test_case_name
INVALID_TEST_CASE_NAME = config.invalid_test_case_name
TEST_CASES = config.all_test_cases
# expected_testcase_run = config.expected_testcase_run
# sleep_interval = config.sleep_interval
# input_periodic_test = config.input_periodic_test
# container_name = config.container_name
# update_script_file = config.update_script
# revert_script_file = config.revert_script
# tmp_loc = config.tmp_loc
# conf_file_path = config.conf_file_path
# endpoint testcase validation check
# services_to_check = config.services_to_check
# endpoint_testcase = config.endpoint_testcase
@classmethod
def setUpClass(cls):
# set_env_variables(cls.env_value)
pass
@classmethod
def tearDownClass(cls):
pass
def get_node_details(self, node_info):
self.host_list = node_info.getIp()
self.remote_user = node_info.getUser()
self.host = node_info.getHost()
self.role = node_info.getRole()
def reset_node_details(self):
self.host_list = None
self.remote_user = None
self.host = None
self.role = None
# Check for test run with invalid test case name with credentials
def test_invalid_test_case_run(self):
opt = utils.form_cli_env_params(self.env_value)
cmd = "%s run %s" % (opt, self.INVALID_TEST_CASE_NAME)
run_cmd = utils.form_cmd(cmd)
for node in self.node_list:
self.get_node_details(node)
res = self.ansirunner.ansible_perform_operation(
host_list=[self.host_list],
remote_user=self.remote_user,
module="shell",
module_args=run_cmd,
stdout=False,
stderr=True)
check = self.INVALID_TEST_CASE_NAME + ' is invalid'
self.assertIn(check, res)
self.reset_node_details()
# Check for test run with valid test case name with credentials
def test_valid_test_case_run_and_delete(self):
opt = utils.form_cli_env_params(self.env_value)
cmd = "%s run %s" % (opt, self.TEST_CASE_NAME)
run_cmd = utils.form_cmd(cmd)
for node in self.node_list:
self.get_node_details(node)
res = self.ansirunner.ansible_perform_operation(
host_list=[self.host_list],
remote_user=self.remote_user,
module="shell",
module_args=run_cmd,
stdout=True,
stderr=False)
result = utils.parse_run_cmd_result(res)
self.assertIn('Pass', result)
# Try deleting the test case
del_uuid = utils.get_uuid(res)
cmd = "%s delete %s" % (opt, del_uuid)
delete_cmd = utils.form_cmd(cmd)
res = self.ansirunner.ansible_perform_operation(
host_list=[self.host_list],
remote_user=self.remote_user,
module="shell",
module_args=delete_cmd,
stdout=True,
stderr=False)
self.assertIn('', res)
self.reset_node_details()
# Check for test run with invalid test case name with env variable
def test_invalid_test_case_run_with_env(self):
cmd = "run %s" % (self.INVALID_TEST_CASE_NAME)
run_cmd = utils.form_cmd(cmd)
for node in self.node_list:
self.get_node_details(node)
res = self.ansirunner.ansible_perform_operation(
host_list=[self.host_list],
remote_user=self.remote_user,
module="shell",
module_args=run_cmd,
environment=self.env_value,
stdout=False,
stderr=True)
check = self.INVALID_TEST_CASE_NAME + ' is invalid'
self.assertIn(check, res)
self.reset_node_details()
# Check for run & show command using credentials
def test_run_and_show_result(self):
opt = utils.form_cli_env_params(self.env_value)
cmd = "%s run %s" % (opt, self.TEST_CASE_NAME)
run_cmd = utils.form_cmd(cmd)
for node in self.node_list:
self.get_node_details(node)
res = self.ansirunner.ansible_perform_operation(
host_list=[self.host_list],
remote_user=self.remote_user,
module="shell",
module_args=run_cmd,
stdout=True,
stderr=False)
result = utils.parse_run_cmd_result(res)
self.assertIn('Pass', result)
if res:
# Wait till the run completes and then execute show cmd
time.sleep(5)
uuid = utils.get_uuid(res)
cmd = "%s show %s" % (opt, uuid)
show_cmd = utils.form_cmd(cmd)
res = self.ansirunner.ansible_perform_operation(
host_list=[self.host_list],
remote_user=self.remote_user,
module="shell",
module_args=show_cmd,
stdout=True,
stderr=False)
result = utils.parse_show_cmd_result(res, uuid)
self.assertEqual('Pass', result)
self.reset_node_details()
# Check for run & show command using credentials
def test_multiple_run_and_show(self):
opt = utils.form_cli_env_params(self.env_value)
for case in self.TEST_CASES:
cmd = "%s run %s" % (opt, case)
run_cmd = utils.form_cmd(cmd)
for node in self.node_list:
self.get_node_details(node)
res = self.ansirunner.ansible_perform_operation(
host_list=[self.host_list],
remote_user=self.remote_user,
module="shell",
module_args=run_cmd,
stdout=True,
stderr=False)
if res:
uuid = utils.get_uuid(res)
cmd = "%s show %s" % (opt, uuid)
show_cmd = utils.form_cmd(cmd)
res = self.ansirunner.ansible_perform_operation(
host_list=[self.host_list],
remote_user=self.remote_user,
module="shell",
module_args=show_cmd,
stdout=True,
stderr=False)
# Wait till the run completes and then execute show cmd
time.sleep(5)
result = utils.parse_show_cmd_result(res, uuid)
self.assertEqual('Pass', result)
self.reset_node_details()
# Check for result command is working with credentials
def test_result_cmd(self):
opt = utils.form_cli_env_params(self.env_value)
# result_cmd = "cloudpulse %s result"%(opt)
cmd = "%s result" % (opt)
result_cmd = utils.form_cmd(cmd)
for node in self.node_list:
self.get_node_details(node)
res = self.ansirunner.ansible_perform_operation(
host_list=[self.host_list],
remote_user=self.remote_user,
module="shell",
module_args=result_cmd,
stdout=True,
stderr=False)
result = utils.parse_result_cmd_result(res)
self.assertIn('Pass', result)
self.reset_node_details()
# Check for delete command with invalid uuid is working with credentials
def test_delete_cmd_with_invalid_uuid(self):
opt = utils.form_cli_env_params(self.env_value)
del_uuid = str(uuid.uuid4())
cmd = "%s delete %s" % (opt, del_uuid)
delete_cmd = utils.form_cmd(cmd)
for node in self.node_list:
self.get_node_details(node)
res = self.ansirunner.ansible_perform_operation(
host_list=[self.host_list],
remote_user=self.remote_user,
module="shell",
module_args=delete_cmd,
stdout=True,
stderr=False)
check = 'Test %s could not be found' % (del_uuid)
self.assertIn(check, res)
self.reset_node_details()
def test_with_invalid_user(self):
opt = utils.form_cli_env_params(self.env_value, invalid_uname=True)
cmd = '%s result ' % (opt)
result_cmd = utils.form_cmd(cmd)
for node in self.node_list:
self.get_node_details(node)
res = self.ansirunner.ansible_perform_operation(
host_list=[self.host_list],
remote_user=self.remote_user,
module="shell",
module_args=result_cmd,
stdout=False,
stderr=True)
check = 'Could not find user:'
self.assertIn(check, res)
self.reset_node_details()
def test_with_invalid_password(self):
opt = utils.form_cli_env_params(self.env_value, invalid_pwd=True)
cmd = '--debug %s result ' % (opt)
result_cmd = utils.form_cmd(cmd)
for node in self.node_list:
self.get_node_details(node)
pwd_test = self.ansirunner.ansible_perform_operation(
host_list=[self.host_list],
remote_user=self.remote_user,
module="shell",
module_args=result_cmd,
stdout=False,
stderr=True)
check = 'Invalid user / password'
self.assertIn(check, pwd_test)
self.reset_node_details()
def test_with_invalid_tenant(self):
opt = utils.form_cli_env_params(self.env_value, invalid_tenant=True)
cmd = '--debug %s result ' % (opt)
result_cmd = utils.form_cmd(cmd)
for node in self.node_list:
self.get_node_details(node)
tenant_test = self.ansirunner.ansible_perform_operation(
host_list=[self.host_list],
remote_user=self.remote_user,
module="shell",
module_args=result_cmd,
stdout=False,
stderr=True)
check = 'Could not find project'
self.assertIn(check, tenant_test)
self.reset_node_details()
def test_with_invalid_auth(self):
opt = utils.form_cli_env_params(self.env_value, invalid_auth=True)
cmd = '--debug %s result ' % (opt)
result_cmd = utils.form_cmd(cmd)
for node in self.node_list:
self.get_node_details(node)
res = self.ansirunner.ansible_perform_operation(
host_list=[self.host_list],
remote_user=self.remote_user,
module="shell",
module_args=result_cmd,
stdout=False,
stderr=True)
check = 'Authorization Failed'
self.assertIn(check, res)
self.reset_node_details()
if __name__ == '__main__':
unittest.main()
|
import tensorflow as tf
from matplotlib import pyplot as plt
import numpy as np
import matplotlib.image as mpimg
from random import randint
import sys
infile_i = open("images.npy", "rb")
infile_l = open("labels.npy", "rb")
emo = [np.load(infile_i), np.load(infile_l)]
randomize = np.arange(len(emo[0]))
np.random.shuffle(randomize)
emo[0] = emo[0][randomize]
emo[1] = emo[1][randomize]
infile_i.close()
infile_l.close()
print('Data Loaded...')
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, shape=[None, 10000])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
W = tf.Variable(tf.zeros([10000,10]))
b = tf.Variable(tf.zeros([10]))
sess.run(tf.global_variables_initializer())
y = tf.matmul(x,W) + b
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
for i in range(69):
batch = (emo[0][i*10:(i+1)*10], emo[1][i*10:(i+1)*10])
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
if i%10 == 0:
print(accuracy.eval(feed_dict={x: batch[0], y_: batch[1]}))
batch_test = (emo[0][600:], emo[1][600:])
#generate test data
for i in range(90):
n = randint(0,689)
batch_test[0][i] = emo[0][n]
batch_test[1][i] = emo[1][n]
print(accuracy.eval(feed_dict={x: batch_test[0], y_: batch_test[1]}))
'''
for i in range(25):
num = randint(0, 689)
img = emo[0][num]
classification = sess.run(tf.argmax(y, 1), feed_dict={x: [img]})
#plt.imshow(img.reshape(28, 28), cmap=plt.cm.binary)
#plt.show()
print ('Actual', np.argmax(emo[1][num]))
print ('Logistic Regression Predicted ', classification[0])
'''
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1,100,100,1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([40000, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 40000])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
saver = tf.train.Saver()
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.global_variables_initializer())
try:
saver.restore(sess, "..\model.ckpt")
print("Model restored.")
print("Continuing Training...")
except:
print("Model not found\nTraining new Model...")
for epoch in range(120):
for i in range(69):
try:
batch = (emo[0][i*10:(i+1)*10], emo[1][i*10:(i+1)*10])
if i%10 == 0:
train_accuracy = accuracy.eval(feed_dict={
x:batch[0], y_: batch[1], keep_prob: 1.0})
print("epoch %d : step %d, training accuracy %g"%(epoch, i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
except:
print('Exception caught')
save_path = saver.save(sess, "..\model.ckpt")
print("Model saved in file: %s" % save_path)
print("Test accuracy %g"%accuracy.eval(feed_dict={x: emo[0][570:], y_: emo[1][570:], keep_prob: 1.0}))
sys.exit()
'''
for i in range(25):
num = randint(0, mnist.test.images.shape[0])
img = mnist.test.images[num]
prediction = tf.argmax(y_conv,1)
classification = prediction.eval(feed_dict={x: [img],keep_prob: 1.0}, session=sess)
#plt.imshow(img.reshape(28, 28), cmap=plt.cm.binary)
#plt.show()
print 'Actual', np.argmax(mnist.test.labels[num])
print 'NN predicted', classification
'''
|
#-*- coding:utf8 -*-
import time
import json
import datetime
from django.db import models
from shopback.signals import user_logged_in
from shopapp.jingdong import apis
import logging
logger = logging.getLogger('django.request')
class JDShop(models.Model):
shop_id = models.CharField(max_length=32,primary_key=True,verbose_name=u'店铺ID')
vender_id = models.CharField(max_length=32,blank=True,verbose_name=u'商家ID')
shop_name = models.CharField(max_length=32,blank=True,verbose_name=u'店铺名称')
open_time = models.DateTimeField(blank=True,null=True,verbose_name=u'开店时间')
logo_url = models.CharField(max_length=512,blank=True,verbose_name=u'LOGO')
brief = models.TextField(max_length=2000,blank=True,verbose_name=u'店铺简介')
category_main = models.BigIntegerField(null=True,verbose_name=u'主营类目ID')
category_main_name = models.CharField(max_length=2000,blank=True,verbose_name=u'主营类目名称')
order_updated = models.DateTimeField(blank=True,null=True,
verbose_name="订单增量更新时间")
refund_updated = models.DateTimeField(blank=True,null=True,
verbose_name="维权增量更新时间")
class Meta:
db_table = 'shop_jingdong_shop'
verbose_name=u'京东商铺'
verbose_name_plural = u'京东商铺列表'
def __unicode__(self):
return u'<%s>'%(self.shop_name)
def updateOrderUpdated(self,updated):
self.order_updated = updated
self.save()
def updateRefundUpdated(self,updated):
self.refund_updated = updated
self.save()
class JDLogistic(models.Model):
logistics_id = models.CharField(max_length=32,primary_key=True,verbose_name=u'物流ID')
logistics_name = models.CharField(max_length=32,blank=True,verbose_name=u'物流名称')
logistics_remark = models.CharField(max_length=128,blank=True,verbose_name=u'备注')
sequence = models.CharField(max_length=32,blank=True,verbose_name=u'序列')
company_code = models.CharField(max_length=32,blank=True,verbose_name=u'公司编码')
class Meta:
db_table = 'shop_jingdong_logistic'
verbose_name=u'京东物流'
verbose_name_plural = u'京东物流列表'
def __unicode__(self):
return u'<%s,%s>'%(self.logistics_id,self.logistics_name)
class JDOrder(models.Model):
SOP = '22'
LBP = '23'
SOPL = '25'
ORDER_TYPE = ((SOP,'SOP'),
(LBP,'LBP'),
(SOPL,'SOPL'),
)
PAY_TYPE_COD = '1'
PAY_TYPE_POST = '2'
PAY_TYPE_SELF = '3'
PAY_TYPE_ONLINE = '4'
PAY_TYPE_CTF = '5'
PAY_TYPE_BANK = '6'
PAY_TYPE = ((PAY_TYPE_COD,u'货到付款'),
(PAY_TYPE_POST,u'邮局汇款'),
(PAY_TYPE_SELF,u'自提'),
(PAY_TYPE_ONLINE,u'在线支付'),
(PAY_TYPE_CTF,u'公司转账'),
(PAY_TYPE_BANK,u'银行转账'),
)
ORDER_STATE_WSTO = 'WAIT_SELLER_STOCK_OUT'
# ORDER_STATE_STDC = 'SEND_TO_DISTRIBUTION_CENER'
# ORDER_STATE_DCR = 'DISTRIBUTION_CENTER_RECEIVED'
ORDER_STATE_WGRC = 'WAIT_GOODS_RECEIVE_CONFIRM'
# ORDER_STATE_RC = 'RECEIPTS_CONFIRM'
# ORDER_STATE_WSD = 'WAIT_SELLER_DELIVERY'
ORDER_STATE_FL = 'FINISHED_L'
ORDER_STATE_TC = 'TRADE_CANCELED'
ORDER_STATE_LOCKED = 'LOCKED'
ORDER_STATE = ((ORDER_STATE_WSTO,u'等待出库'),
# (ORDER_STATE_STDC,u'发往配送中心'),
# (ORDER_STATE_DCR,u'配送中心已收货'),
(ORDER_STATE_WGRC,u'等待确认收货'),
# (ORDER_STATE_RC,u'收款确认'),
# (ORDER_STATE_WSD,u'等待发货'),
(ORDER_STATE_FL,u'完成'),
(ORDER_STATE_TC,u'取消'),
(ORDER_STATE_LOCKED,u'已锁定'),
)
order_id = models.CharField(max_length=32,primary_key=True,verbose_name=u'订单ID')
shop = models.ForeignKey(JDShop,verbose_name=u'所属商铺')
pay_type = models.CharField(max_length=16,blank=True
,choices=PAY_TYPE,verbose_name=u'支付方式')
order_total_price = models.FloatField(default=0.0,verbose_name=u'总金额')
order_payment = models.FloatField(default=0.0,verbose_name=u'实付款')
order_seller_price = models.FloatField(default=0.0,verbose_name=u'货款金额')
freight_price = models.FloatField(default=0.0,verbose_name=u'运费')
seller_discount = models.FloatField(default=0.0,verbose_name=u'优惠金额')
delivery_type = models.CharField(max_length=32,blank=True,verbose_name=u'送货类型')
invoice_info = models.CharField(max_length=256,blank=True,verbose_name=u'发票信息')
order_start_time = models.DateTimeField(blank=True,null=True,verbose_name=u'下单时间')
order_end_time = models.DateTimeField(blank=True,null=True,verbose_name=u'结单时间')
modified = models.DateTimeField(blank=True,null=True,verbose_name=u'修改时间')
payment_confirm_time = models.DateTimeField(blank=True,null=True,verbose_name=u'付款时间')
consignee_info = models.TextField(max_length=1000,blank=True,verbose_name=u'收货人信息')
item_info_list = models.TextField(max_length=10000,blank=True,verbose_name=u'商品列表')
coupon_detail_list = models.TextField(max_length=2000,blank=True,verbose_name=u'优惠列表')
return_order = models.CharField(max_length=2,blank=True,verbose_name=u'换货标识')
pin = models.CharField(max_length=64,blank=True,verbose_name=u'账号信息')
balance_used = models.FloatField(default=0.0,verbose_name=u'余额支付金额')
logistics_id = models.CharField(max_length=128,blank=True,verbose_name=u'物流公司ID')
waybill = models.CharField(max_length=128,blank=True,verbose_name=u'运单号')
vat_invoice_info = models.CharField(max_length=512,blank=True,verbose_name=u'增值税发票')
parent_order_id = models.CharField(max_length=32,blank=True,verbose_name=u'父订单号')
order_remark = models.CharField(max_length=512,blank=True,verbose_name=u'买家备注')
vender_remark = models.CharField(max_length=1000,blank=True,verbose_name=u'卖家备注')
order_type = models.CharField(max_length=8,blank=True,
choices=ORDER_TYPE,verbose_name=u'订单类型')
order_state = models.CharField(max_length=32,blank=True,
choices=ORDER_STATE,verbose_name=u'订单状态')
order_state_remark = models.CharField(max_length=128,blank=True,verbose_name=u'订单状态说明')
class Meta:
db_table = 'shop_jingdong_order'
verbose_name=u'京东订单'
verbose_name_plural = u'京东订单列表'
def __unicode__(self):
return u'<%s,%s>'%(self.order_id,self.pin)
@classmethod
def mapTradeStatus(cls,jd_trade_status):
from shopback import paramconfig as pcfg
if jd_trade_status == cls.ORDER_STATE_WSTO:
return pcfg.WAIT_SELLER_SEND_GOODS
elif jd_trade_status == cls.ORDER_STATE_WGRC:
return pcfg.WAIT_BUYER_CONFIRM_GOODS
elif jd_trade_status == cls.ORDER_STATE_FL:
return pcfg.TRADE_FINISHED
elif jd_trade_status == cls.ORDER_STATE_TC:
return pcfg.TRADE_CLOSED
return pcfg.WAIT_BUYER_PAY
class JDProduct(models.Model):
NEVER_UP = 'NEVER_UP'
CUSTORMER_DOWN = 'CUSTORMER_DOWN'
SYSTEM_DOWN = 'SYSTEM_DOWN'
ON_SALE = 'ON_SALE'
AUDIT_AWAIT = 'AUDIT_AWAIT'
AUDIT_FAIL = 'AUDIT_FAIL'
WARE_STATUS = (
(NEVER_UP,u'从未上架'),
(CUSTORMER_DOWN,u'自主下架'),
(SYSTEM_DOWN,u'系统下架'),
(ON_SALE,u'在售'),
(AUDIT_AWAIT,u'待审核'),
(AUDIT_FAIL,u'审核不通过'),
)
DELETE = 'DELETE'
INVALID = 'INVALID'
VALID = 'VALID'
STATUS = (
(DELETE,u'删除'),
(INVALID,u'无效'),
(VALID,u'有效')
)
ware_id = models.BigIntegerField(primary_key=True,verbose_name=u'商品ID')
vender_id = models.CharField(max_length=32,blank=True,verbose_name=u'商家ID')
shop_id = models.CharField(max_length=32,blank=True,verbose_name=u'店铺ID')
spu_id = models.CharField(max_length=32,blank=True,verbose_name=u'SPU ID')
cid = models.CharField(max_length=32,blank=True,verbose_name=u'分类ID')
outer_id = models.CharField(max_length=32,blank=True,verbose_name=u'SKU外部ID')
skus = models.TextField(max_length=10000,blank=True,verbose_name=u'商品SKU')
title = models.CharField(max_length=64,blank=True,verbose_name=u'标题')
item_num = models.CharField(max_length=32,blank=True,verbose_name=u'货号')
upc_code = models.CharField(max_length=16,blank=True,verbose_name=u'UPC编码')
transport_id = models.BigIntegerField(null=True,verbose_name=u'运费模板')
online_time = models.DateTimeField(blank=True,null=True,verbose_name=u'最后上架时间')
offline_time = models.DateTimeField(blank=True,null=True,verbose_name=u'最后下架时间')
attributes = models.CharField(max_length=1024,blank=True,verbose_name=u'属性列表')
desc = models.CharField(max_length=2000,blank=True,verbose_name=u'商品描述')
producter = models.CharField(max_length=32,blank=True,verbose_name=u'生产厂商')
wrap = models.CharField(max_length=32,blank=True,verbose_name=u'包装规格')
cubage = models.CharField(max_length=16,blank=True,verbose_name=u'长:宽:高')
weight = models.CharField(max_length=8,blank=True,verbose_name=u'重量')
pack_listing = models.CharField(max_length=128,blank=True,verbose_name=u'包装清单')
service = models.CharField(max_length=128,blank=True,verbose_name=u'售后服务')
cost_price = models.FloatField(default=0.0,verbose_name=u'进货价')
market_price = models.FloatField(default=0.0,verbose_name=u'市场价')
jd_price = models.FloatField(default=0.0,verbose_name=u'京东价')
stock_num = models.IntegerField(default=0,verbose_name=u'库存')
logo = models.CharField(max_length=256,blank=True,verbose_name=u'主图')
creator = models.CharField(max_length=32,blank=True,verbose_name=u'录入人')
created = models.DateTimeField(blank=True,null=True,verbose_name=u'创建时间')
modified = models.DateTimeField(blank=True,null=True,verbose_name=u'修改日期')
is_pay_first = models.BooleanField(default=True,verbose_name=u'先款后货')
is_can_vat = models.BooleanField(default=True,verbose_name=u'发票限制')
ware_big_small_model = models.IntegerField(null=True,verbose_name=u'商品件型')
ware_pack_type = models.IntegerField(null=True,verbose_name=u'商品包装')
ad_content = models.CharField(max_length=256,blank=True,verbose_name=u'广告语')
sync_stock = models.BooleanField(default=True,verbose_name=u'同步库存')
shop_categorys = models.CharField(max_length=128,blank=True,verbose_name=u'店内分类')
status = models.CharField(max_length=8,blank=True
,choices=STATUS,verbose_name=u'状态')
ware_status = models.CharField(max_length=16,blank=True
,choices=WARE_STATUS,verbose_name=u'商品状态')
class Meta:
db_table = 'shop_jingdong_product'
verbose_name=u'京东商品'
verbose_name_plural = u'京东商品列表'
def __unicode__(self):
return u'<%d,%s>'%(self.ware_id,self.item_num)
def add_jingdong_user(sender, user,top_session,
top_parameters, *args, **kwargs):
"""docstring for user_logged_in"""
from shopback.users.models import User
profiles = User.objects.filter(type=User.SHOP_TYPE_JD,user=user)
if profiles.count() == 0:
return
profile = profiles[0]
user_dict = apis.jd_seller_vender_info_get(
access_token=top_parameters['access_token'])
profile.visitor_id = user_dict['vender_id']
profile.nick = profile.nick or user_dict['shop_name']
profile.save()
shop_dict = apis.jd_vender_shop_query(
access_token=top_parameters['access_token'])
jd_shop,state = JDShop.objects.get_or_create(shop_id=shop_dict['shop_id'])
for k,v in shop_dict.iteritems():
hasattr(jd_shop,k) and setattr(jd_shop,k,v)
jd_shop.open_time = datetime.datetime.fromtimestamp(shop_dict['open_time']/1000)
jd_shop.save()
#初始化系统数据
from .tasks import pullJDLogisticByVenderidTask,pullJDProductByVenderidTask
pullJDProductByVenderidTask(user_dict['vender_id'],ware_status=1)
pullJDProductByVenderidTask(user_dict['vender_id'],ware_status=2)
pullJDLogisticByVenderidTask(user_dict['vender_id'])
user_logged_in.connect(add_jingdong_user,
sender='jingdong',
dispatch_uid='jingdong_logged_in')
|
import sys
def solve(fences):
stack = []
ret = 0
# stack = [(idx, value)]
for cur, v in enumerate(fences):
new_idx = cur
while stack and stack[-1][1] >= v:
ret = max(ret, stack[-1][1] * (cur - stack[-1][0]))
new_idx = stack.pop()[0]
stack.append((new_idx, v))
return ret
if __name__ == "__main__":
rl = lambda : sys.stdin.readline().rstrip(' \t\r\n\0')
for _ in xrange(int(rl())):
n = int(rl())
fences = map(int, rl().split())
fences.append(0)
print solve(fences)
|
# Generated by Django 3.1 on 2020-09-14 22:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('state', models.CharField(max_length=5)),
('is_government', models.BooleanField()),
('organisation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.organisation')),
],
),
migrations.CreateModel(
name='ProjectUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_admin', models.BooleanField(default=False)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project_management.project')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.user')),
],
),
migrations.AddField(
model_name='project',
name='users',
field=models.ManyToManyField(through='project_management.ProjectUser', to='user.User'),
),
]
|
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
torch.manual_seed(1)
class BiLSTM_CRF(nn.Module):
def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim):
super(BiLSTM_CRF, self).__init__()
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.tag_to_ix = tag_to_ix
self.tag_size = len(tag_to_ix)
self.word_embeds = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim//2, bidirectional=True)
self.hidden2tag = nn.Linear(hidden_dim, self.tag_size)
self.transitions = nn.Parameter(torch.randn(self.tag_size, self.tag_size))
self.transitions.data[:, self.tag_to_ix['START']] = -100000 # data required
self.transitions.data[self.tag_to_ix['STOP'], :] = -100000
self.hidden = self.init_hidden()
def init_hidden(self):
return (torch.randn(2, 1, self.hidden_dim // 2),
torch.randn(2, 1, self.hidden_dim // 2))
def _get_lstm_features(self, sentence):
'''
sentence : input sentence, represented by one-hot-encoding
return : lstm_out which represents the emission probability
'''
# 1. initialize the hidden layer, no reason, but for the initial step of lstm
# initial hidden layer required
self.hidden = self.init_hidden()
# 2. embedding the sentence
# len(sentence) * embedding_dim, the second dimension is just for batch size
embeds = self.word_embeds(sentence).view(len(sentence), 1, -1)
# 3. lstm_hidden : 1 * 1 * hidden_dim lstm_out : len(sentence) * 1 * hidden_dim
lstm_out, self.hidden = self.lstm(embeds, self.hidden)
# 4. lstm_out : len(sentence) * hidden_dim
lstm_out = lstm_out.view(len(sentence), -1)
# 5. each sentence should be 1 * tag_size, which indicates emission probability
lstm_feats = self.hidden2tag(lstm_out)
return lstm_feats
def _score_sentence(self, feats, tags):
'''
recall CRF loss :
loss = log(exp(Score) / sum (exp(Scores))) = Score - log(sum(exp(Scores)))
return : Score
'''
# 1. define the score as torch format
score = torch.zeros(1)
# 2. as tag sequence is in the torch format, we need to catenate the start with the following tags
tags = torch.cat([torch.tensor([self.tag_to_ix['START']], dtype=torch.long), tags])
# 3. loop each word
for i, feat in enumerate(feats):
# transition score : tag[i] -> tag[i+1]
# feat[tags[i+1]] : emission score at this timestep
score = score + self.transitions[tags[i], tags[i+1]] + feat[tags[i+1]]
score = score + self.transitions[tags[-1], self.tag_to_ix['STOP']]
return score
def _forward_alg(self, feats):
# 1. initialize the start timestep score, as the sequnce starts from START, so initialize with -10000
init_alphas = torch.full((1, self.tag_size), -10000)
# 2. the START tag is different
init_alphas[0, self.tag_to_ix['START']] = 0
forward_var = init_alphas
# loop each timestep
for feat in feats:
alphas_t = []
for tag in range(self.tag_size):
emit_score = feat[tag].view(1, -1).expand(1, self.tag_size)
trans_score = self.transitions[:, tag].view(1, -1)
next_tag_var = forward_var + trans_score + emit_score
alphas_t.append(self.log_sum_exp(next_tag_var).view(1))
forward_var = torch.cat(alphas_t).view(1, -1)
terminal_var = forward_var + self.transitions[:, self.tag_to_ix['STOP']]
return self.log_sum_exp(terminal_var)
def neg_log_likelihood(self, sentence, tags):
feats = self._get_lstm_features(sentence)
forward_score = self._forward_alg(feats)
gold_score = self._score_sentence(feats, tags)
return forward_score - gold_score
def _viterbi_decode(self, feats):
backpointers = []
init_alphas = torch.full((1, self.tag_size), -10000)
init_alphas[0][self.tag_to_ix['START']] = 0
forward_var = init_alphas
for feat in feats:
bptrs_t = []
viterbivars_t = []
for tag in range(self.tag_size):
next_tag_var = forward_var + self.transitions[:, tag]
best_tag_id = self.argmax(next_tag_var)
bptrs_t.append(best_tag_id)
viterbivars_t.append(next_tag_var[0][best_tag_id].view(1))
forward_var = (torch.cat(viterbivars_t) + feat).view(1, -1)
backpointers.append(bptrs_t)
terminal_var = forward_var + self.transitions[:, self.tag_to_ix['STOP']]
best_tag_id = self.argmax(terminal_var)
path_score = terminal_var[0][best_tag_id]
best_path = [best_tag_id]
for bptrs_t in reversed(backpointers):
best_tag_id = bptrs_t[best_tag_id]
best_path.append(best_tag_id)
best_path.pop()
best_path.reverse()
return path_score, best_path
def forward(self, sentence):
lstm_feats = self._get_lstm_features(sentence)
score, tag_seq = self._viterbi_decode(lstm_feats)
return score, tag_seq
def argmax(self, vec):
_, idx = torch.max(vec, 1)
return idx.item()
def log_sum_exp(self, vec):
max_score = vec[0, self.argmax(vec)]
max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
return max_score + torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))
def prepare_sequence(seq, to_ix):
idxs = [to_ix[w] for w in seq]
return torch.tensor(idxs, dtype=torch.long)
def padding(train_x, lengths, padded):
for i, x_len in enumerate(lengths):
sequence = train_x[i]
padded[i, :x_len] = sequence[:x_len]
return padded
if __name__ == '__main__':
training_data = [(
"the wall street journal reported today that apple corporation made money".split(),
"B I I I O O O B I O O".split()
), (
"georgia tech is a university in georgia".split(),
"B I O O O O B".split()
), (
"a university in georgia".split(),
"O O O B".split()
), (
"reported today that apple corporation made money".split(),
"O O O B I O O".split()
), (
"tech is a university in georgia".split(),
"I O O O O B".split()
), (
"corporation made money".split(),
"I O O".split()
)]
# 1. get word and index paris
train_x = []
train_y = []
for x, y in training_data:
train_x.extend(x)
train_y.extend(y)
train_x.extend(['PAD'])
train_y.extend(['START', 'STOP', 'PAD'])
vocabulary = set(train_x)
tags = set(train_y)
word_ix = {word : index for index, word in enumerate(vocabulary)}
ix_word = {index : word for index, word in enumerate(vocabulary)}
tag_ix = {tag : index for index, tag in enumerate(tags)}
ix_tag = {index : tag for index ,tag in enumerate(tags)}
# 2. transfer the data to the tensor
train_x = []
train_y = []
for x, y in training_data:
train_x.append([word_ix[w] for w in x])
train_y.append([tag_ix[t] for t in y])
###### Make all sequences in the mini-batch have the same length by padding #####
X_lengths = [len(sentence) for sentence in train_x]
pad_token = word_ix['PAD']
longest_sent = max(X_lengths)
batch_size = len(train_x)
padded_X = np.ones((batch_size, longest_sent)) * pad_token
y_lengths = [len(tags) for tags in train_y]
pad_token = tag_ix['PAD']
longest_tag = max(y_lengths)
batch_size = len(train_y)
padded_y = np.ones((batch_size, longest_sent)) * pad_token
padded_X = padding(train_x, X_lengths, padded_X)
padded_y = padding(train_y, y_lengths, padded_y)
#################################################################################
# 3 build the model
model = BiLSTM_CRF(len(word_ix), tag_ix, 5, 4)
optimizer = optim.SGD(model.parameters(), lr=0.01)
for epoch in range(300):
for sentence, tags in training_data:
model.zero_grad()
sentence_in = prepare_sequence(sentence, word_ix)
targets = prepare_sequence(tags, tag_ix)
loss = model.neg_log_likelihood(sentence_in, targets)
loss.backward()
optimizer.step()
print(loss.item())
test_seq = prepare_sequence(training_data[0][0], word_ix)
score, tag_index = model(test_seq)
for i in tag_index:
print(ix_tag[i], end=' ') |
# -*- coding: UTF-8 -*-
from flask import Blueprint, url_for, render_template as flask_render_template, request, make_response, g, send_from_directory, redirect, session
from werkzeug.contrib.atom import AtomFeed
from fypress.folder import Folder
from fypress.post import Post, GuestCommentForm, LoggedCommentForm, SimpleComment
from fypress.admin import Option, Theme
from fypress.utils import Paginator
from fypress.folder import Folder
from fypress.user import User
from fypress.admin.views import handle_404 as is_admin_404, render_template as admin_render_template
from fypress import __version__, FyPress
from .cache import cached, get_cache_key
public = Blueprint('public', __name__)
fypress = FyPress()
def render_template(template, **kwargs):
g.user = None
if session.get('user_id'):
g.user = User.get(User.id == session['user_id'])
render = flask_render_template(template, **kwargs)
if session.get('user_id') and g.user.status >= 4:
render = render.replace('</html>', '')
render += admin_render_template('admin/bar.html')
render += '</html>'
return render
def is_404():
return render_template(Theme.get_template('404.html')), 404
@public.context_processor
def template():
return Theme.context()
@public.before_request
def before_request():
if not fypress.options:
fypress.options = Option.auto_load()
@public.route('/')
@cached(pretty=True)
def root():
index = Post.filter(Post.id_folder == 1, Post.slug == 'index', Post.status == 'published', Post.type == 'page').one()
return render_template(Theme.get_template('index.html'), index=index, this=False)
@public.route('/_preview/')
def preview():
fypress.options['theme'] = request.args.get('theme')
render = root()
fypress.options = Option.auto_load()
return render
@public.route('/articles/')
@cached(pretty=True)
def posts():
folder = Folder()
folder.name = 'Articles'
folder.guid = 'articles'
folder.is_folder = True
folder.posts = Paginator(
query=Post.filter(Post.status == 'published', Post.type == 'post').order_by(Post.created, 'DESC'),
page=request.args.get('page'),
theme='bootstrap',
per_page=5
)
return render_template(Theme.get_template('articles.html'), this=folder)
@public.route('/<path:slug>.html', methods=['GET', 'POST'])
@cached(pretty=True)
def is_post(slug):
post = Post.get(Post.guid == slug)
if post:
if post.slug == 'index' and post.id_folder != 0:
return redirect('/' + post.folder.guid + '/')
post.views += 1
post.save()
if post.type == 'post':
post.is_post = True
post.comments = SimpleComment.filter(SimpleComment.id_post == post.id, SimpleComment.status == 'valid').order_by(SimpleComment.created, 'asc').all()
if session.get('user_id'):
form = LoggedCommentForm()
else:
form = GuestCommentForm()
if form.validate_on_submit():
comment = SimpleComment.add(request.form, post.id)
if fypress.cache:
fypress.cache.delete(get_cache_key())
return redirect(request.url + '#comment_%s' % comment.id)
return render_template(Theme.get_template('post.html'), this=post, show_sidebar=False, comment_form=form)
else:
post.is_page = True
post.pages = Post.filter(Post.id_folder == post.id_folder, Post.status == 'published', Post.type == 'page').order_by(Post.created).all()
return render_template(Theme.get_template('page.html'), this=post)
else:
return is_404()
@public.route('/<path:slug>/')
@cached(pretty=True)
def is_folder(slug):
if slug.split('/')[0] != 'admin':
folder = Folder.get(Folder.guid == slug)
if folder:
folder.is_folder = True
folder.pages = Post.filter(Post.id_folder == folder.id, Post.status == 'published', Post.type == 'page').order_by(Post.created).all()
folder.index = Post.filter(Post.id_folder == folder.id, Post.slug == 'index', Post.status == 'published', Post.type == 'page').one()
folder.posts = Paginator(
query=Post.filter(Post.id_folder == folder.id, Post.status == 'published', Post.type == 'post').order_by(Post.created),
page=request.args.get('page'),
theme='bootstrap',
per_page=4
)
return render_template(Theme.get_template('folder.html'), this=folder)
else:
return is_404()
else:
return is_admin_404()
@public.route('/public/<path:folder>/<file>')
def static(folder, file):
folder, file = Theme.get_template_static(folder, file, fypress.config)
return send_from_directory(folder, file)
@public.route('/feed/<path:folder>/')
@cached()
def feed_folder(folder):
if folder.split('/')[0] != 'admin':
folder = Folder.get(Folder.guid == folder)
if folder:
posts = Post.filter(Post.id_folder == folder.id, Post.status == 'published', Post.type == 'post').order_by(Post.created).limit(20)
feed = AtomFeed(
fypress.options['name'] + ' • ' + folder.name,
subtitle=folder.seo_content,
feed_url=request.url_root + 'feed/',
url=request.url_root,
generator=None
)
for post in posts:
feed.add(
post.title,
post.content,
content_type='html',
author=post.user.nicename,
url=request.url_root + post.guid,
updated=post.modified,
published=post.created
)
response = feed.get_response()
response.headers["Content-Type"] = 'application/xml'
return response
else:
return is_404()
else:
return is_admin_404()
@public.route('/feed/')
@cached()
def feed():
feed = AtomFeed(
fypress.options['name'],
subtitle=fypress.options['slogan'],
feed_url=request.url_root + 'feed/',
url=request.url_root,
generator=None
)
posts = Post.filter(Post.status == 'published', Post.type == 'post').order_by(Post.created).limit(20)
for post in posts:
feed.add(
post.title,
post.content,
content_type='html',
author=post.user.nicename,
url=request.url_root + post.guid,
updated=post.modified,
published=post.created
)
response = feed.get_response()
response.headers["Content-Type"] = 'application/xml'
return response
@public.route('/sitemap.xls')
@cached()
def sitemap_xls():
response = make_response(render_template(Theme.get_template('_sitemap.xls')))
response.headers["Content-Type"] = 'application/xml'
return response
@public.route('/sitemap.xml')
@cached()
def sitemap():
posts = Post.filter(Post.status == 'published', Post.type == 'post').order_by(Post.created).all()
folders = Folder.get_all()
pages = []
# home
pages.append({'url': request.url_root, 'freq': 'daily', 'prio': '1'})
# folders
for folder in folders:
if folder.guid != '':
url = request.url_root + folder.guid + '/'
modified = folder.modified.strftime('%Y-%m-%d')
pages.append({'url': url, 'mod': modified, 'freq': 'weekly', 'prio': '0.6'})
# pages
posts = Post.filter(Post.status == 'published', Post.type == 'page').order_by(Post.created).limit(20)
for post in posts:
if post.slug != 'index':
url = request.url_root + post.guid + '.html'
modified = post.modified.strftime('%Y-%m-%d')
pages.append({'url': url, 'mod': modified, 'freq': 'monthly', 'prio': '0.9'})
# posts
posts = Post.filter(Post.status == 'published', Post.type == 'post').order_by(Post.created).limit(20,)
for post in posts:
url = request.url_root + post.guid + '.html'
modified = post.modified.strftime('%Y-%m-%d')
pages.append({'url': url, 'mod': modified, 'freq': 'monthly', 'prio': '0.8'})
response = make_response(render_template(Theme.get_template('_sitemap.xml'), pages=pages))
response.headers["Content-Type"] = 'application/xml'
return response
|
from django.conf import settings
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic.edit import FormView
from zenpy import Zenpy
from zenpy.lib.api_objects import CustomField, Ticket, User as ZendeskUser
from .forms import RequestAccessForm
class AccessDeniedView(FormView):
template_name = 'sso/access-denied.html'
form_class = RequestAccessForm
success_url = reverse_lazy('contact:success')
def form_valid(self, form):
ticket_id = self.create_zendesk_ticket(form.cleaned_data)
return render(self.request, 'sso/request-access-success.html', dict(zendesk_ticket_id=ticket_id))
def get_zendesk_client(self):
# Zenpy will let the connection timeout after 5s and will retry 3 times
return Zenpy(timeout=5, **settings.ZENPY_CREDENTIALS)
def create_zendesk_ticket(self, cleaned_data):
email = self.request.user.email
application = self.request.session.get('_last_failed_access_app', 'Unspecified')
zendesk_user = ZendeskUser(name=cleaned_data['full_name'], email=email)
description = (
'Name: {full_name}\n'
'Team: {team}\n'
'Email: {email}\n'
'Application: {application}\n'
).format(email=email, application=application, **cleaned_data)
ticket = Ticket(
subject=settings.ZENDESK_TICKET_SUBJECT,
description=description,
requester=zendesk_user,
custom_fields=[CustomField(id='31281329', value='auth_broker')]
)
response = self.get_zendesk_client().tickets.create(ticket)
return response.ticket.id
|
#!/usr/bin/python3
""" create a new class Scuare """
from models.rectangle import Rectangle
class Square(Rectangle):
""" new class
Args:
Rectangle (class): Inheritance: from Base Class
"""
def __init__(self, size, x=0, y=0, id=None):
""" constructor
Args:
size (int): side of square
x (int, optional): axis x. Defaults to 0.
y (int, optional): axis y. Defaults to 0.
id (int, optional): description. Defaults to None.
"""
self.size = size
super().__init__(size, size, x, y, id)
@property
def size(self):
"""
This function returns the size (width)
"""
return self.width
@size.setter
def size(self, value):
"""
This function sets the size (width and height)
"""
self.width = value
self.height = value
def __str__(self):
""" overloading
Returns:
str: string
"""
return "[Square] ({}) {}/{} - {}".format(
self.id,
self.x,
self.y,
self.size
)
|
"""Brain Even game logic."""
from random import randint
from typing import Tuple
MIN_NUMBER = 0
MAX_NUMBER = 100
DESCRIPTION = 'Answer "yes" if number even otherwise answer "no".'
def get_task() -> Tuple[str, str]:
"""
Generate new question and right answer for even game.
Returns:
dict: dict with question and answer
"""
number = randint(MIN_NUMBER, MAX_NUMBER)
question = str(number)
answer = 'no' if number % 2 else 'yes'
return question, answer
|
# Generated by Django 2.2.5 on 2019-09-30 21:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('afriventapp', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateField(auto_now_add=True)),
('total_cost', models.DecimalField(decimal_places=2, max_digits=15)),
('authorization_url', models.URLField(blank=True)),
('ref_code', models.CharField(blank=True, max_length=250)),
('payment_confirmation', models.BooleanField(null=True)),
('access_code', models.CharField(max_length=1000, null=True)),
('order_unique_id', models.UUIDField(default=uuid.uuid4, editable=False)),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='event_ordered', to='afriventapp.Event')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveIntegerField()),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_ticket', to='order.Order')),
('ticket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='afriventapp.EventTicket')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
#!/usr/bin/env python3
##############################
# Data Thread section
##############################
import logging
import os
from time import sleep
from threading import Thread
from datetime import datetime
from serial import Serial, SerialException, SerialTimeoutException, STOPBITS_ONE, EIGHTBITS, PARITY_NONE
# Define our data thread.
class ArduinoDataThread (Thread):
def __init__(self, serial_connection, output_directory, headers):
Thread.__init__(self)
self.__ser = serial_connection
self.__out = output_directory
self.headers = headers
self.headers_parsed = False
# Keep alive, basically.
# Have to encode it because the serial stream only takes bytes.
self.__keep_alive = "Hello.".encode('ascii')
self.last_received_line = None
self.__stop = False
logging.debug('Data Thread: New logging thread created.')
def stop(self):
self.__stop = True
def run(self):
try:
# If we haven't been told to shut down:
while not self.__stop:
# Open a file to write data to and write 100 lines.
line_count = 0
with open(self.gen_filename(), 'wt', encoding='utf-8') as f:
logging.debug('Data Thread: Opened new file for sensor data: {0}'.format(f.name))
while line_count < 100:
# We have to send this to start the data flowing
# Also keep writing to it just to make sure the buffer on the other
# end stays active.
self.__ser.write(self.__keep_alive)
# Take the line we read, strip off end characters and convert it from
# a series of bytes into a string.
response = self.__ser.readline().rstrip().decode()
logging.debug(str(line_count) + " : " + response)
# In case we haven't already done so, separate out the headers.
# We're going to want them for each file. Maybe.
if not self.headers_parsed and len(response.split(",")) > 1:
logging.debug("Data Thread: Don't have headers, trying to parse.")
self.get_headers(response)
logging.debug(self.headers)
logging.debug("Data Thread: Supposedly we got headers.")
if len(self.headers) > 1:
self.headers_parsed = True
# If we just opened a new file and we have headers, print them to the file.
if line_count == 0 and self.headers_parsed:
logging.debug("Data Thread: We have headers, line count is zero. Writing headers to file.")
# Joins the headers using a comma to separate them.
f.write(','.join(str(x) for x in self.headers))
f.write('\n')
# Make sure the response actually has data in it
if len(response) > 0:
logging.debug("Data Thread: Writing response to file.")
# Write our response and attach an endline.
self.last_received_line = response
f.write(response)
f.write('\n')
f.flush()
# We wrote another line, increment the counter.
line_count += 1
if self.__stop:
break
except SerialException or SerialTimeoutException as err:
logging.debug("Data Thread: Problem with serial connection. Trying to re-start one.")
logging.debug("Error: {0}".format(err.args))
logging.debug("Exiting thread.")
# A small function to generate the name of the file we'll log to.
# Format for the filename is: YYYYMMDD.HHMMSS.csv
def gen_filename(self):
d = datetime.today()
fn = os.path.join(self.__out, d.strftime('%Y%m%d') + "." + d.strftime('%H%M%S') + ".csv")
assert isinstance(fn, str)
return fn
def get_headers(self, to_parse):
# Separate out the headers so we can include them in future files
x = to_parse.split(",")
for l in x:
self.headers.append(l)
logging.debug('Data Thread: Parsing headers.')
logging.debug('Data Thread: Before: {0}'.format(to_parse))
logging.debug('Data Thread: After: {0}'.format(self.headers))
class ArduinoThreadSupervisor (Thread):
def __init__(self, port, output_dir):
Thread.__init__(self)
self.__serial_connection = Serial()
# Place to store headers
self.sensor_headers = []
# Have we parsed the headers already?
self.headers_parsed = False
self.__port = port
self.__out_dir = output_dir
self.__current_thread = None
self.__stop = False
def stop(self):
self.__stop = True
self.__current_thread.stop()
def last_line(self):
return self.__current_thread.last_received_line
@property
def current_gps_coords(self):
# Sure, not as efficient as it could be, but this is more readable.
# We should probably check to see if we're actually sending numbers, but the only other option
# is that we happen to time it right after the Arduino is reset, in which case it'll probably be either
# blank or "Latitude, Longitude". In either case it's not harmful.
data_array = str.split(self.last_line(), ',')
# That's really only two parts out of the array (spots 4 and 5). Slices go up to the last number, not
# including it.
return "{0}, {1}".format(*data_array[4:6])
# If we have a connection, reset the Arduino by toggling DTR
def __reset_arduino(self):
if self.__serial_connection.isOpen():
logging.debug('Arduino Supervisor: Resetting connection to Arduino.')
self.__serial_connection.setDTR(True)
sleep(1)
self.__serial_connection.setDTR(False)
# Flush any data there at the moment
self.__serial_connection.flushInput()
self.__serial_connection.flushOutput()
# Define a function to actually establish a connection. That way, if we don't
# get one immediately, we can try to get one any time we would try to establish
# a new thread.
def __setup_serial_connection(self):
try:
logging.debug('Arduino Supervisor: Setting up connection on {0}'.format(self.__port))
self.__serial_connection.port = self.__port
self.__serial_connection.baudrate = 115200
self.__serial_connection.stopbits = STOPBITS_ONE
self.__serial_connection.bytesize = EIGHTBITS
self.__serial_connection.parity = PARITY_NONE
self.__serial_connection.timeout = 2
except SerialException as err:
logging.warning("Arduino Supervisor: Serial Error: {0}".format(err))
def run(self):
# Try to establish a connection to the Arduino
self.__setup_serial_connection()
# So long as we're not told to stop
while not self.__stop:
try:
self.__serial_connection.open()
if self.__serial_connection.isOpen():
logging.debug("Arduino Supervisor: Connection open.")
# Reset the Arduino:
self.__reset_arduino()
# Set up a data thread:
self.__current_thread = ArduinoDataThread(self.__serial_connection,
self.__out_dir,
self.sensor_headers)
# Start the thread
self.__current_thread.start()
# Join
# The while not part here is so we can test this and give the join a timeout.
# That way a keyboard interrupt actually works on it.
while not self.__stop:
# Checking every five seconds works for testing. I'll probably boost this later.
self.__current_thread.join(5)
else:
logging.debug("Arduino Supervisor: Connection failed to open.")
except KeyboardInterrupt:
logging.warning('Arduino Supervisor: Received keyboard interrupt.')
self.__stop = True
self.__current_thread.stop()
finally:
logging.info("Arduino Supervisor: Closing the serial connection before exiting.")
self.__serial_connection.close()
if __name__ == "__main__":
import getopt
import sys
debugLevel = logging.INFO
logging.basicConfig(stream=sys.stderr,
format='%(asctime)s %(levelname)s:%(message)s',
level=debugLevel)
def process_args(inargs):
# Automatically select the correct port for the OS.
out = os.getcwd()
port = 'COM3' if os.name == 'nt' else '/dev/ttyACM0'
usage = """
-o, --outDir Where to output the videos.
-p, --port The port to connect to the Arduino on.
"""
try:
# allow -o or --outDir, -t or --time, -n or --num, and -h.
opts, args = getopt.getopt(inargs, "ho:p:", ["outDir", "port"])
except getopt.GetoptError as err:
print(err.msg)
print("\n")
print(usage)
sys.exit(2)
for opt, arg in opts:
if opt == "-h":
print(usage)
elif opt == "-p":
port = arg
print("{0} is where we'll look for the Arduino.".format(arg))
elif opt == "-o":
out = arg
print('Output Dir: {0}'.format(arg))
elif opt == "-":
# We don't actually detect this, but sending a - on its own kills further
# processing. Not sure why. Will find out later.
print("What?")
else:
print("Unrecognized option: {0}:{1}".format(opt, arg))
return out, port
out_dir, serial_port = process_args(sys.argv[1:])
sup = ArduinoThreadSupervisor(serial_port, out_dir)
try:
sup.start()
sleep(3)
for i in range(1, 5):
print("{0}: {1}".format(i, sup.current_gps_coords))
sleep(10)
sup.stop()
except KeyboardInterrupt:
sup.stop()
|
contents = open('rosalind_1a.txt', 'r')
text,k = contents.readlines()
d = {}
highest_score = 0
for i in range(int(k), len(text)):
k_mer = text[i-int(k):i]
if k_mer in d:
d[k_mer] += 1
if d[k_mer] > highest_score:
highest_score = d[k_mer]
else:
d[text[i-int(k):i]] = 1
to_print = []
for keys in d:
if d[keys] == highest_score:
to_print.append(keys)
for p in to_print:
print p
|
# Generated by Django 3.0.7 on 2020-08-06 13:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Inventario', '0009_auto_20200806_0814'),
]
operations = [
migrations.RemoveField(
model_name='entry',
name='provider',
),
]
|
import tensorflow as tf
import matplotlib.pyplot as plt
import requests
import csv
import os
import numpy as np
from tensorflow.python.framework import ops
## load data
## name of data file
birth_weight_file = 'birth_weight.csv'
#birthdata_url = 'https://github.com/nfmcclure/tensorflow_cookbook/raw/master/01_Introduction/07_Working_with_Data_Sources/birthweight_data/birthweight.dat'
#
## Download data and create data file if file does not exist in current directory
#os.remove(birth_weight_file)
#if not os.path.exists(birth_weight_file):
# birth_file = requests.get(birthdata_url)
# birth_data = birth_file.text.split('\r\n')
#
#
# birth_header = birth_data[0].split('\t')
# birth_data = [[float(x) for x in y.split('\t') if len(x) >= 1]
# for y in birth_data[1:] if len(y) >= 1]
# with open(birth_weight_file, "w") as f:
# writer = csv.writer(f)
# writer.writerows([birth_header])
# writer.writerows(birth_data)
# f.close()
# read birth weight data into memory
birth_data = []
with open(birth_weight_file, newline='') as csvfile:
csv_reader = csv.reader(csvfile)
birth_header = next(csv_reader)
for row in csv_reader:
birth_data.append(row)
birth_data = [[float(x) for x in row] for row in birth_data]
# Extract y-target (birth weight)
y_vals = np.array([x[8] for x in birth_data])
# Filter for features of interest
# note: here, we are interested in SEVEN (7) features
cols_of_interest = ['AGE', 'LWT', 'RACE', 'SMOKE', 'PTL', 'HT', 'UI']
x_vals = np.array([[x[ix] for ix, feature in enumerate(birth_header) if feature in cols_of_interest] for x in birth_data])
# Reset the graph for new run
ops.reset_default_graph()
sess = tf.Session()
#
seed = 3
tf.set_random_seed(seed)
np.random.seed(seed)
batch_size = 100
# split data 80-20
train_indices = np.random.choice(len(x_vals),round(len(x_vals)*0.8),replace=False)
test_indices = np.array( list( set(range(len(x_vals))) - set(train_indices) ) )
x_vals_train = x_vals[train_indices]
x_vals_test = x_vals[test_indices]
y_vals_train = y_vals[train_indices]
y_vals_test = y_vals[test_indices]
# Record TRAINING column max and min for scaling of non-training data
train_max = np.max(x_vals_train, axis=0)
train_min = np.min(x_vals_train, axis=0)
# Normalize by column (min-max norm to be between 0 and 1)
def normalize_cols(mat, max_vals, min_vals):
return (mat - min_vals) / (max_vals - min_vals)
x_vals_train = np.nan_to_num(normalize_cols(x_vals_train, train_max, train_min))
x_vals_test = np.nan_to_num(normalize_cols(x_vals_test, train_max, train_min))
# Define Variable Functions (weights and bias)
def init_weight(shape, st_dev):
weight = tf.Variable(tf.random_normal(shape, stddev=st_dev))
return weight
def init_bias(shape, st_dev):
bias = tf.Variable(tf.random_normal(shape, stddev=st_dev))
return bias
# Create Placeholders
x_data = tf.placeholder(shape=[None, 7], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
# Create a fully connected layer:
def fully_connected(input_layer, weights, biases):
#print('before')
#print(tf.shape(input_layer))
#print(tf.shape(biases))
layer = tf.add(tf.matmul(input_layer, weights), biases)
#layer = tf.matmul(input_layer, weights)
#print(tf.shape(layer))
#print('after')
return tf.nn.relu(layer)
# -------Create the first layer (50 hidden nodes)--------
weight_1 = init_weight(shape=[7, 25], st_dev=10.0)
bias_1 = init_bias(shape=[25], st_dev=10.0)
layer_1 = fully_connected(x_data, weight_1, bias_1)
# -------Create second layer (25 hidden nodes)--------
weight_2 = init_weight(shape=[25, 10], st_dev=10.0)
bias_2 = init_bias(shape=[10], st_dev=10.0)
layer_2 = fully_connected(layer_1, weight_2, bias_2)
# -------Create third layer (5 hidden nodes)--------
weight_3 = init_weight(shape=[10, 3], st_dev=10.0)
bias_3 = init_bias(shape=[3], st_dev=10.0)
layer_3 = fully_connected(layer_2, weight_3, bias_3)
# -------Create output layer (1 output value)--------
weight_4 = init_weight(shape=[3, 1], st_dev=10.0)
bias_4 = init_bias(shape=[1], st_dev=10.0)
final_output = fully_connected(layer_3, weight_4, bias_4)
# Declare loss function (L1)
loss = tf.reduce_mean(tf.abs(y_target - final_output))
# Declare optimizer
my_opt = tf.train.AdamOptimizer(0.02)
train_step = my_opt.minimize(loss)
# Initialize Variables
init = tf.global_variables_initializer()
sess.run(init)
# Training loop
loss_vec = []
test_loss = []
for i in range(1200):
rand_index = np.random.choice(len(x_vals_train), size=batch_size)
rand_x = x_vals_train[rand_index] # size 100x7
rand_y = np.transpose([y_vals_train[rand_index]]) # size 100x1
#sess.run(layer_1,feed_dict = {x_data: rand_x, y_target: rand_y})
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
fd_train = {x_data: rand_x, y_target: rand_y}
temp_loss = sess.run(loss, feed_dict=fd_train)
loss_vec.append(temp_loss)
fd_test = {x_data: x_vals_test, y_target: np.transpose([y_vals_test])}
test_temp_loss = sess.run(loss, feed_dict=fd_test)
test_loss.append(test_temp_loss)
if (i+1) % 25 == 0:
#print('Generation: ' + str(i+1))
print('Generation: ' + str(i+1) + '. Loss = ' + str(temp_loss))
# Plot loss (MSE) over time
plt.plot(loss_vec, 'k-', label='Train Loss')
plt.plot(test_loss, 'r--', label='Test Loss')
plt.title('Loss (MSE) per Generation')
plt.legend(loc='upper right')
plt.xlabel('Generation')
plt.ylabel('Loss')
plt.show()
# Model Accuracy
# using TRAINED NEURAL NET
actuals = np.array([x[0] for x in birth_data])
test_actuals = actuals[test_indices]
train_actuals = actuals[train_indices]
test_preds = [x[0] for x in sess.run(final_output, feed_dict={x_data: x_vals_test})]
train_preds = [x[0] for x in sess.run(final_output, feed_dict={x_data: x_vals_train})]
test_preds = np.array([1.0 if x < 2500.0 else 0.0 for x in test_preds])
train_preds = np.array([1.0 if x < 2500.0 else 0.0 for x in train_preds])
# Print out accuracies
test_acc = np.mean([x == y for x, y in zip(test_preds, test_actuals)])
train_acc = np.mean([x == y for x, y in zip(train_preds, train_actuals)])
print('On predicting the category of low birthweight from regression output (<2500g):')
print('Test Accuracy: {}'.format(test_acc))
print('Train Accuracy: {}'.format(train_acc))
# Evaluate new points on the model
# Need vectors of 'AGE', 'LWT', 'RACE', 'SMOKE', 'PTL', 'HT', 'UI'
new_data = np.array([[35, 185, 1., 0., 0., 0., 1.],
[18, 160, 0., 1., 0., 0., 1.]])
new_data_scaled = np.nan_to_num(normalize_cols(new_data, train_max, train_min))
new_logits = [x[0] for x in sess.run(final_output, feed_dict={x_data: new_data_scaled})]
new_preds = np.array([1.0 if x < 2500.0 else 0.0 for x in new_logits])
print('New Data Predictions: {}'.format(new_preds))
|
import numpy as np
def hm_estimator(vqsamp, targ=None):
vq = np.sort(vqsamp, kind='mergesort')
if targ is not None:
ft = lambda h: targ(h)
else:
ft = lambda h: custom_target(h, 0, 0, 5)
M = len(vqsamp)
vh = [(1./M)*(0.5 + j) for j in range(M)]
vt = np.array([float(ft(hi)) for hi in vh])
Dhat = sum([(1./M)*(vq[j] - vt[j])**2 for j in range(len(vq))])
return float(np.sqrt(Dhat))
def custom_target(x, q0=0, q1=1, p=2):
if x < 0:
return q0
elif x > 1:
return q1
else:
if p > 0:
return q0 + (q1 - q0)*(x)**p
else:
return q1 + (q0 - q1)*(1-x)**abs(p)
def quantile_estimator(vqsamp, quantile=0.9):
M = len(vqsamp)
vq = np.sort(vqsamp, kind='mergesort')
vh = [(1./M)*(j+1) for j in range(M)]
for ii, (q, h) in enumerate(zip(vq, vh)):
if h > quantile:
Dhat = vq[ii]
break
return float(Dhat)
|
# hack script to get around the OS X tkinter threading limitation
# works on Python 2 and 3
import sys
try:
import tkinter
except:
import Tkinter as tkinter
try:
from tkinter import filedialog
except:
import tkFileDialog as filedialog
root = tkinter.Tk()
root.withdraw()
root.overrideredirect(True)
root.geometry('0x0+0+0')
root.deiconify()
root.lift()
root.focus_force()
root.wm_attributes("-topmost", 1)
def saveDialog():
return filedialog.asksaveasfilename(parent=root, filetypes=[('Flipbook Sessions', '.flipbook')], defaultextension='.flipbook')
def openFlipbookDialog():
return filedialog.askopenfilename(parent=root, filetypes = [('Flipbook Sessions', '.flipbook'), ('All', '*')])
def openPDBDialog():
return filedialog.askopenfilename(parent=root, filetypes = [('PDB Files', '.pdb'), ('All', '*')])
def openAny():
return filedialog.askopenfilename(parent=root)
if __name__ == '__main__':
action = sys.argv[1]
if action == 'save':
out = saveDialog()
elif action == 'openFlipbook':
out = openFlipbookDialog()
elif action == 'openPDB':
out = openPDBDialog()
elif action == 'openAny':
out = openAny()
else:
out = ''
sys.stdout.write(out)
|
#!/usr/bin/env python3
"""https://snlp2020.github.io/a5/
Course: Statistical Language processing - SS2020
Assignment: A5
Author: Jinghua Xu
Description: experiment with ANNs
Honor Code: I pledge that this program represents my own work.
"""
import random
import numpy as np
from sklearn import preprocessing
import keras
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM
from sklearn.metrics import f1_score, precision_score, recall_score, confusion_matrix
def read_data(treebank, shuffle=True, lowercase=True,
tags=None):
""" Read a CoNLL-U formatted treebank, return words and POS tags.
Parameters:
-----------
treebank: The path to the treebank (individual file).
shuffle: If True, shuffle the word-POS pairs before returning
lowercase: Convert words (tokens) to lowercase
tags: If not 'None', return only the pairs where POS tags in tags
Returns: (a tuple)
-----------
words: A list of words.
pos: Corresponding POS tags.
"""
# list of word_pos pairs
word_pos = []
with open(treebank, 'r', encoding='utf-8') as f:
lines = f.readlines()
if shuffle:
random.shuffle(lines)
for line in lines:
# skip blank lines and comments
if not line.strip() or line.startswith('#'):
continue
vs = line.split('\t')
# skip words (and their POS tags) that are part of a multi-word token and empty nodes
idx = vs[0]
if '-' in idx or '.' in idx:
continue
word = vs[1]
pos = vs[3]
if lowercase:
word = word.lower()
# unique pairs
if (word, pos) in word_pos:
continue
else:
if tags is not None:
if pos in tags:
word_pos.append((word, pos))
else:
continue
else:
word_pos.append((word, pos))
words = [x[0] for x in word_pos]
pos = [x[1] for x in word_pos]
return words, pos
class WordEncoder:
"""An encoder for a sequence of words.
The encoder encodes each word as a sequence of one-hot characters.
The words that are longer than 'maxlen' is truncated, and
the words that are shorter are padded with 0-vectors.
Two special symbols, <s> and </s> (beginning of sequence and
end of sequence) should be prepended and appended to every word
before padding or truncation. You should also reserve a symbol for
unknown characters (distinct from the padding).
The result is either a 2D vector, where all character vectors
(including padding) of a word are concatenated as a single vector,
o a 3D vector with a separate vector for each character (see
the description of 'transform' below and the assignment sheet
for more detail.
Parameters:
-----------
maxlen: The length that each word (including <s> and </s>) is padded
or truncated. If not specified, the fit() method should
set it to cover the longest word in the training set.
"""
def __init__(self, maxlen=None):
# to be set up in fit()
self._maxlen = maxlen
self._char2idx = dict()
self._nchars = len(self._char2idx)
def fit(self, words):
"""Fit the encoder using words.
All collection of information/statistics from the training set
should be done here.
Parameters:
-----------
words: The input words used for training.
Returns: None
"""
setUPmaxlen = False
if self._maxlen is None:
self._maxlen = 0
setUPmaxlen = True
# special symbols
self._char2idx['<s>'] = 0
self._char2idx['</s>'] = 1
# reserve for unknown chararacters
self._char2idx['uk'] = 2
# current index
idx = 3
# chars in words
for word in words:
if len(word) > self._maxlen and setUPmaxlen:
self._maxlen = len(word)
for char in word:
if char not in self._char2idx:
self._char2idx[char] = idx
idx += 1
self._nchars = len(self._char2idx)
def transform(self, words, pad='right', flat=True):
""" Transform a sequence of words to a sequence of one-hot vectors.
Transform each character in each word to its one-hot representation,
combine them into a larger sequence, and return.
The returned sequences formatted as a numpy array with shape
(n_words, max_wordlen * n_chars) if argument 'flat' is true,
(n_words, max_wordlen, n_chars) otherwise. In both cases
n_words is the number of words, max_wordlen is the maximum
word length either set in the constructor, or determined in
fit(), and n_chars is the number of unique characters.
Parameters:
-----------
words: The input words to encode
pad: Padding direction, either 'right' or 'left'
flat: Whether to return a 3D array or a 2D array (see above
for explanation)
Returns: (a tuple)
-----------
encoded_data: encoding the input words (a 2D or 3D numpy array)
"""
# params check
if isinstance(flat, bool) and (pad == 'right' or pad == 'left'):
pass
else:
raise ValueError(
"Illegal Argument! pad can only be 'right' or 'left', flat has to be bool!")
encoded_words = []
for word in words:
word = list(word)
encoded_word = []
# prepend special char
word.insert(0, '<s>')
# append special char
word.append('</s>')
if len(word) > self._maxlen:
# truncation
word = word[:self._maxlen]
for char in word:
char_vec = [0]*self._nchars
if char in self._char2idx:
idx = self._char2idx[char]
char_vec[idx] = 1
else:
# unknown char
char = 'uk'
idx = self._char2idx[char]
char_vec[idx] = 1
if flat:
encoded_word = encoded_word + char_vec
else:
encoded_word.append(char_vec)
if len(word) < self._maxlen:
# padding
padding = [0]*self._nchars
if pad == 'right':
for _ in range(self._maxlen-len(word)):
if flat:
encoded_word = encoded_word + padding
else:
encoded_word.append(padding)
else:
for _ in range(self._maxlen-len(word)):
if flat:
encoded_word = padding + encoded_word
else:
encoded_word.insert(0, padding)
encoded_words.append(encoded_word)
return np.array(encoded_words)
def print_stats(test_pos, y_test_pred):
"""Print out macro-averaged precision, recall, F1 scores, and the confusion matrix on the test set
Parameters:
-----------
test_pos: pos tags in test data
y_test_pred: predicted y(pos) of test data
Returns: None
"""
print(
f'macro-averaged precision: {precision_score(test_pos, y_test_pred, average="macro")}')
print(
f'macro-averaged recall: {recall_score(test_pos, y_test_pred, average="macro")}')
print(
f'macro-averaged f-1: {f1_score(test_pos, y_test_pred, average="macro")}')
print(f'confusion-matrix:\n {confusion_matrix(test_pos, y_test_pred)}')
def train_test_mlp(train_x, train_pos, test_x, test_pos):
"""Train and test MLP model predicting POS tags from given encoded words.
Parameters:
-----------
train_x: A sequence of words encoded as described above
(a 2D numpy array)
train_pos: The list of list of POS tags corresponding to each row
of train_x.
test_x, test_pos: As train_x, train_pos, for the test set.
Returns: None
"""
# encode train_pos
lb = preprocessing.LabelBinarizer()
lb.fit(train_pos)
encoded_train_pos = lb.transform(train_pos)
# output shape
output_layer_units = encoded_train_pos.shape[1]
# input shape
input_shape = (train_x.shape[1],)
mlp = Sequential()
# hidden layer
mlp.add(Dense(units=64, activation='relu', input_shape=input_shape))
# output layer
mlp.add(Dense(units=output_layer_units, activation='softmax'))
mlp.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
hist = mlp.fit(train_x, encoded_train_pos, epochs=50, validation_split=0.2)
# the best epoch
losses = hist.history['loss']
best_epoch = losses.index(min(losses))+1
# re-train the model (from scratch) using the full training set up to the best epoch determined earlier
mlp.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
mlp.fit(train_x, encoded_train_pos, epochs=best_epoch)
# print out macro-averaged precision, recall, F1 scores, and the confusion matrix on the test set
y_test_pred = lb.inverse_transform(mlp.predict(test_x))
# print stats
print_stats(test_pos, y_test_pred)
def train_test_rnn(trn_x, trn_pos, tst_x, tst_pos):
"""Train and test RNN model predicting POS tags from given encoded words.
Parameters:
-----------
train_x: A sequence of words encoded as described above
(a 3D numpy array)
train_pos: The list of list of POS tags corresponding to each row
of train_x.
test_x, test_pos: As train_x, train_pos, for the test set.
Returns: None
"""
# encode train_pos
lb = preprocessing.LabelBinarizer()
lb.fit(trn_pos)
encoded_train_pos = lb.transform(trn_pos)
# output shape
output_dim = encoded_train_pos.shape[1]
rnn = Sequential()
rnn.add(LSTM(64, input_shape=(
trn_x.shape[1], trn_x.shape[2]), activation='relu'))
rnn.add(Dense(output_dim, activation='softmax'))
rnn.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
hist = rnn.fit(trn_x, encoded_train_pos, epochs=50, validation_split=0.2)
# the best epoch
losses = hist.history['loss']
best_epoch = losses.index(min(losses))+1
# re-train the model (from scratch) using the full training set up to the best epoch determined earlier
rnn.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
rnn.fit(trn_x, encoded_train_pos, epochs=best_epoch)
# print out macro-averaged precision, recall, F1 scores, and the confusion matrix on the test set
y_test_pred = lb.inverse_transform(rnn.predict(tst_x))
# print stats
print_stats(tst_pos, y_test_pred)
if __name__ == '__main__':
#
# dataset downloaded from https://github.com/UniversalDependencies/UD_English-ParTUT
words, pos_tags = read_data("en_partut-ud-train.conllu")
test_words, test_pos = read_data("en_partut-ud-test.conllu")
encoder = WordEncoder()
encoder.fit(words)
# 5.3
encoded_array = encoder.transform(words)
test_encoded_array = encoder.transform(test_words)
train_test_mlp(encoded_array, pos_tags, test_encoded_array, test_pos)
print("-------------------------------------------------------------------------------------------------------------")
# 5.4
rnn_train_words = encoder.transform(words, flat=False)
rnn_test_words = encoder.transform(test_words, flat=False)
train_test_rnn(rnn_train_words, pos_tags, rnn_test_words, test_pos)
|
#!/usr/bin/env python
from the_window import GameWindow
from screens import level_from_file
import pyglet
def go():
window = GameWindow()
window.thescreen = level_from_file.GameplayScreen(window, "one")
window.unpause()
pyglet.app.run()
if __name__ == "__main__":
go()
|
# -*- coding: utf-8 -*-
import scrapy
import time
from movie.items import LeetcodeItem
class MeijuSpider(scrapy.Spider):
# name = 'meiju'
# allowed_domains = ['meijutt.com']
# start_urls = ['http://www.meijutt.com/new100.html']
#
# def parse(self, response):
# movies = response.xpath('//ul[@class="top-list fn-clear"]/li')
# for movie in movies:
# item = MovieItem()
# item['name'] = movie.xpath('./h5/a/@title').extract()[0]
# yield item
name = 'meiju'
allowed_domains = ['bookshadow.com']
base_url = "http://bookshadow.com"
start_urls = ['http://bookshadow.com/leetcode/']
def parse(self, response):
questions = response.xpath('//td/a[starts-with(@href, "/weblog")]')
count = 1
for question in questions:
item = LeetcodeItem()
item['name'] = question.xpath('./text()').extract()[0]
item['url'] = self.base_url + question.xpath('./@href').extract()[0]
count = count + 1
item['num'] = count
yield scrapy.Request(item['url'], meta={'item': item}, callback=self.detail_parse)
def detail_parse(self, response):
# 接收上级已爬取的数据
item = response.meta['item']
#一级内页数据提取
item['content'] = response.xpath("//div[@class='entry-content lead']").xpath('string(.)').extract()[0]
yield item
|
class AlreadyExistsError(Exception):
""" Raised if a product of an onboarding process exists already. """
|
import numpy as np
from scipy import sparse, stats, linalg
from project.esn.utils import mydataclass, pre_proc_args, force_2dim
import pickle as pic
''' generator for scipy and np matrix '''
def generate_smatrix(m, n, density=1, bound=0.5, **kwargs):
"""generate a sparse matrix in the CSR format (Compressed Sparse Row)
"""
smatrix = sparse.random(m,
n,
density=density,
format="csr",
data_rvs=stats.uniform(-bound, 1).rvs)
return smatrix
def generate_rmatrix(m, n, bound=0.5, **kwargs):
""" generate a random dense matrix
"""
return np.random.rand(m, n) - bound
def scale_spectral_smatrix(matrix: sparse.issparse,
spectral_radius=1.25,
in_place=False):
"""calculate the spectral radius and scale the matrix
"""
eigs = None
try:
eigs = sparse.linalg.eigs(matrix)[0]
except sparse.linalg.ArpackNoConvergence as e:
eigs = e.eigenvalues
finally:
spectral = (spectral_radius / max(abs(eigs)))
if not in_place:
return matrix * spectral
matrix *= spectral
from pprint import pprint
@pre_proc_args({"inputs": force_2dim, "states": force_2dim})
def build_extended_states(inputs: np.ndarray, states: np.ndarray, init_len=0):
""" create the extend state given an input array and a state array
"""
return np.vstack((inputs.T[:, init_len:], states.T[:, init_len:])).T
@mydataclass(init=True, repr=True, check=False)
class Esn_matrixs():
"""Wraps all the matrixs needed by the network
"""
W_in: np.ndarray
W_res: sparse.issparse
W_feb: np.ndarray = np.zeros((0, 0))
W_out: np.ndarray = np.zeros((0, 0))
spectral_radius: float = 1.25
density: float = 1.
scaled: bool = False
def __post_init__(self):
if not self.scaled :
scale_spectral_smatrix(self.W_res,
spectral_radius=self.spectral_radius,
in_place=True)
esn_matrixs = lambda W_in, *args, **kwargs: Esn_matrixs(
W_in, generate_smatrix(W_in.shape[0], W_in.shape[0], **kwargs), *args, **
kwargs)
def load_smatrix(path, idx):
"""used to load a matrix from a pickled file
"""
with open(path, "rb") as f:
dic = pic.load(f)
return {
k if k != "result" else "W_res": v if k != "result" else v[idx]
for k, v in dic.items() if not k in ["repetition", "size"]
}
read_matrix = lambda W_in, path, idx, *args, **kwargs: Esn_matrixs(
W_in, *args, **load_smatrix(path, idx), **kwargs)
|
import os
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from tqdm import tqdm
import numpy as np
from tensorboardX import SummaryWriter
import sys
import time
import json
from qanet.tvqanet import TVQANet
from tvqa_dataset import TVQADataset, pad_collate, prepare_inputs
from config import BaseOptions
import logging
logging.basicConfig()
def mask_logits(target, mask):
return target * mask
def IOFSM(selection_greedy, targets, ts_target, ts_target_mask):
bsz = targets.size(0)
img_len = selection_greedy.size(1)
selection_greedy = selection_greedy.view(bsz, 5, -1)
selection_greedy = selection_greedy[torch.arange(bsz, dtype=torch.long), targets] #(N, Li)
label = torch.zeros(bsz, img_len).cuda()
st_list = ts_target["st"].tolist()
ed_list = ts_target["ed"].tolist()
for idx, (st, ed) in enumerate(zip(st_list, ed_list)):
label[idx, st:ed+1] = 1
label_inv = (label != 1).float()
rewards_greedy_inv = (selection_greedy * label_inv * ts_target_mask).sum(-1) / (label_inv * ts_target_mask).sum(-1)
loss = 1 + rewards_greedy_inv - ((selection_greedy * label).sum(-1) / label.sum(-1))
return loss.sum(), rewards_greedy_inv.sum(), ((selection_greedy * label).sum(-1) / label.sum(-1)).sum()
def binaryCrossEntropy(max_statement_sm_sigmoid, targets, ts_target, ts_target_mask):
bsz = targets.size(0)
max_statement_sm_sigmoid = max_statement_sm_sigmoid.view(bsz, 5, -1)
img_len = max_statement_sm_sigmoid.size(2)
max_statement_sm_sigmoid = max_statement_sm_sigmoid[torch.arange(bsz, dtype=torch.long), targets]
label = torch.zeros(bsz, img_len).cuda()
st_list = ts_target["st"].tolist()
ed_list = ts_target["ed"].tolist()
for idx, (st, ed) in enumerate(zip(st_list, ed_list)):
label[idx, st:ed+1] = 1
loss = nn.functional.binary_cross_entropy_with_logits(max_statement_sm_sigmoid, label, reduction="none")
loss = mask_logits(loss, ts_target_mask).sum()
loss *= 0.1
return loss
def balanced_binaryCrossEntropy(max_statement_sm_sigmoid, targets, ts_target, ts_target_mask):
bsz = targets.size(0)
max_statement_sm_sigmoid = max_statement_sm_sigmoid.view(bsz, 5, -1)
img_len = max_statement_sm_sigmoid.size(2)
max_statement_sm_sigmoid = max_statement_sm_sigmoid[torch.arange(bsz, dtype=torch.long), targets] #(N, Li)
label = torch.zeros(bsz, img_len).cuda()
st_list = ts_target["st"].tolist()
ed_list = ts_target["ed"].tolist()
for idx, (st, ed) in enumerate(zip(st_list, ed_list)):
label[idx, st:ed+1] = 1
label_inv = (label != 1).float()
loss = nn.functional.binary_cross_entropy_with_logits(max_statement_sm_sigmoid, label, reduction="none")
loss_p = mask_logits(loss, label).sum(-1) / label.sum(-1)
loss_n = mask_logits(loss, label_inv * ts_target_mask).sum(-1) / (label_inv * ts_target_mask).sum(-1)
loss = loss_p + loss_n
return loss.sum()
def train(opt, dset, model, criterion, optimizer, epoch, previous_best_acc):
dset.set_mode("train")
model.train()
train_loader = DataLoader(dset, batch_size=opt.bsz, shuffle=True,
collate_fn=pad_collate, num_workers=opt.num_workers, pin_memory=True)
train_loss = []
train_loss_iofsm = []
train_loss_accu = []
train_loss_ts = []
train_loss_cls = []
valid_acc_log = ["batch_idx\tacc\tacc1\tacc2"]
train_corrects = []
torch.set_grad_enabled(True)
max_len_dict = dict(
max_sub_l=opt.max_sub_l,
max_vid_l=opt.max_vid_l,
max_vcpt_l=opt.max_vcpt_l,
max_qa_l=opt.max_qa_l,
max_dc_l=opt.max_dc_l,
)
timer_dataloading = time.time()
for batch_idx, batch in tqdm(enumerate(train_loader)):
timer_start = time.time()
model_inputs, targets, qids = prepare_inputs(batch, max_len_dict=max_len_dict, device=opt.device)
try:
timer_start = time.time()
outputs, max_statement_sm_sigmoid_ = model(model_inputs)
max_statement_sm_sigmoid, max_statement_sm_sigmoid_selection = max_statement_sm_sigmoid_
temporal_loss = balanced_binaryCrossEntropy(max_statement_sm_sigmoid, targets, model_inputs["ts_label"], model_inputs["ts_label_mask"])
cls_loss = criterion(outputs, targets)
iofsm_loss, _, _ = IOFSM(max_statement_sm_sigmoid_selection, targets, model_inputs["ts_label"], model_inputs["ts_label_mask"])
att_loss_accu = 0
loss = cls_loss + temporal_loss + iofsm_loss
timer_start = time.time()
loss.backward(retain_graph=False)
torch.nn.utils.clip_grad_norm_(model.parameters(), opt.clip)
optimizer.step()
optimizer.zero_grad()
train_loss.append(loss.data.item())
train_loss_iofsm.append(float(iofsm_loss))
train_loss_ts.append(float(temporal_loss))
train_loss_cls.append(cls_loss.item())
pred_ids = outputs.data.max(1)[1]
train_corrects += pred_ids.eq(targets.data).tolist()
except RuntimeError as e:
if "out of memory" in str(e):
print("WARNING: ran out of memory, skipping batch")
else:
print("RuntimeError {}".format(e))
sys.exit(1)
if batch_idx % opt.log_freq == 0:
niter = epoch * len(train_loader) + batch_idx
if batch_idx == 0:
train_acc = 0
train_loss = 0
train_loss_iofsm = 0
train_loss_ts = 0
train_loss_cls = 0
else:
train_acc = sum(train_corrects) / float(len(train_corrects))
train_loss = sum(train_loss) / float(len(train_corrects))
train_loss_iofsm = sum(train_loss_iofsm) / float(len(train_corrects))
train_loss_cls = sum(train_loss_cls) / float(len(train_corrects))
train_loss_ts = sum(train_loss_ts) / float(len(train_corrects))
valid_acc, valid_loss, qid_corrects, valid_acc1, valid_acc2, submit_json_val = \
validate(opt, dset, model, criterion, mode="valid")
valid_log_str = "%02d\t%.4f\t%.4f\t%.4f" % (batch_idx, valid_acc, valid_acc1, valid_acc2)
valid_acc_log.append(valid_log_str)
if valid_acc > previous_best_acc:
with open("best_github.json", 'w') as cqf:
json.dump(submit_json_val, cqf)
previous_best_acc = valid_acc
if epoch >= 10:
torch.save(model.state_dict(), os.path.join("./results/best_valid_to_keep", "best_github_7420.pth"))
print("Epoch {:02d} [Train] acc {:.4f} loss {:.4f} loss_iofsm {:.4f} loss_ts {:.4f} loss_cls {:.4f}"
"[Val] acc {:.4f} loss {:.4f}"
.format(epoch, train_acc, train_loss, train_loss_iofsm, train_loss_ts, train_loss_cls,
valid_acc, valid_loss))
torch.set_grad_enabled(True)
model.train()
dset.set_mode("train")
train_corrects = []
train_loss = []
train_loss_iofsm = []
train_loss_ts = []
train_loss_cls = []
timer_dataloading = time.time()
with open(os.path.join(opt.results_dir, "valid_acc.log"), "a") as f:
f.write("\n".join(valid_acc_log) + "\n")
return previous_best_acc
def validate(opt, dset, model, criterion, mode="valid"):
dset.set_mode(mode)
torch.set_grad_enabled(False)
model.eval()
valid_loader = DataLoader(dset, batch_size=opt.test_bsz, shuffle=False,
collate_fn=pad_collate, num_workers=opt.num_workers, pin_memory=True)
submit_json_val = {}
valid_qids = []
valid_loss = []
valid_corrects = []
max_len_dict = dict(
max_sub_l=opt.max_sub_l,
max_vid_l=opt.max_vid_l,
max_vcpt_l=opt.max_vcpt_l,
max_qa_l=opt.max_qa_l,
max_dc_l=opt.max_dc_l,
)
for val_idx, batch in enumerate(valid_loader):
model_inputs, targets, qids = prepare_inputs(batch, max_len_dict=max_len_dict, device=opt.device)
outputs, _= model(model_inputs)
loss = criterion(outputs, targets)
valid_qids += [int(x) for x in qids]
valid_loss.append(loss.data.item())
pred_ids = outputs.data.max(1)[1]
for qdix, q_id in enumerate(model_inputs['qid']):
q_id_str = str(q_id)
submit_json_val[q_id_str] = int(pred_ids[qdix].item())
valid_corrects += pred_ids.eq(targets.data).tolist()
acc_1st, acc_2nd = 0., 0.
valid_acc = sum(valid_corrects) / float(len(valid_corrects))
valid_loss = sum(valid_loss) / float(len(valid_corrects))
qid_corrects = ["%d\t%d" % (a, b) for a, b in zip(valid_qids, valid_corrects)]
return valid_acc, valid_loss, qid_corrects, acc_1st, acc_2nd, submit_json_val
def main():
opt = BaseOptions().parse()
torch.manual_seed(opt.seed)
cudnn.benchmark = False
cudnn.deterministic = True
np.random.seed(opt.seed)
dset = TVQADataset(opt)
opt.vocab_size = len(dset.word2idx)
model = TVQANet(opt)
if opt.device.type == "cuda":
print("CUDA enabled.")
if len(opt.device_ids) > 1:
print("Use multi GPU", opt.device_ids)
model = torch.nn.DataParallel(model, device_ids=opt.device_ids, output_device=0) # use multi GPU
model.to(opt.device)
# model.load_state_dict(torch.load("./path/best_release_7420.pth"))
criterion = nn.CrossEntropyLoss(reduction="sum").to(opt.device)
optimizer = torch.optim.Adam(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
weight_decay=opt.wd)
best_acc = 0.
start_epoch = 0
early_stopping_cnt = 0
early_stopping_flag = False
for epoch in range(start_epoch, opt.n_epoch):
if not early_stopping_flag:
niter = epoch * np.ceil(len(dset) / float(opt.bsz))
cur_acc = train(opt, dset, model, criterion, optimizer, epoch, best_acc)
is_best = cur_acc > best_acc
best_acc = max(cur_acc, best_acc)
if not is_best:
early_stopping_cnt += 1
if early_stopping_cnt >= opt.max_es_cnt:
early_stopping_flag = True
else:
early_stopping_cnt = 0
else:
print("=> early stop with valid acc %.4f" % best_acc)
break
if epoch == 10:
for g in optimizer.param_groups:
g['lr'] = 0.0002
return opt.results_dir.split("/")[1]
if __name__ == "__main__":
results_dir = main()
|
import os
import csv
from enum import Enum
class Column(Enum):
timestamp = 0
location = 1
classtime = 2
open_seats = 3
taken_seats = 4
is_full = 5
IS_FULL = 'is_full'
TOTAL_SLOTS = 'total_slots'
summary = {}
with open(os.getcwd() + '/classes.csv', 'r') as csvfile:
for row in reversed(list(csv.reader(csvfile))):
if not row: continue
location_summary = summary.setdefault(row[Column.location.value], {})
class_summary = location_summary.get(row[Column.classtime.value])
if not class_summary:
# this is the last time we checked this class, record if it filled up
location_summary.setdefault(row[Column.classtime.value], {IS_FULL: row[Column.is_full.value]})
elif not class_summary.get(TOTAL_SLOTS) and row[Column.open_seats.value]:
# this check was made when the class wasn't full yet, record total slots
class_summary[TOTAL_SLOTS] = str(int(row[Column.open_seats.value]) + int(row[Column.taken_seats.value]))
location_summary.setdefault(row[Column.classtime.value], class_summary)
for location in summary.keys():
print(location + ":")
for classtime in summary.get(location).keys():
print("\tclass at " + classtime +
" had " + summary.get(location).get(classtime).setdefault(TOTAL_SLOTS, "NA") + " slots" +
" and was full? " + summary.get(location).get(classtime).setdefault(IS_FULL, "NA"))
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
def simple_ann():
import cv2
import numpy as np
ann = cv2.ml.ANN_MLP_create()
# 设置相应的层数为 9-5-9
ann.setLayerSizes(np.array([9,5,9],dtype=np.uint8))
# 使用反向传播算法进行权重修正
ann.setTrainMethod(cv2.ml.ANN_MLP_BACKPROP)
ann.train(np.array([[1.2,1.3,1.9,2.2,2.3,2.9,3.0,3.2,3.3]],dtype=np.float32),
cv2.ml.ROW_SAMPLE,
np.array([[0,0,0,0,0,1,0,0,0]],dtype=np.float32))
print ann.predict(np.array([[1.0,1.5,1.2,0.0,0.5,0.8,3.0,3.1,3.8]],dtype=np.float32))
def simple_ann2():
import cv2
import numpy as np
from random import randint
animals_net = cv2.ml.ANN_MLP_create()
animals_net.setTrainMethod((cv2.ml.ANN_MLP_RPROP | cv2.ml.ANN_MLP_UPDATE_WEIGHTS))
animals_net.setActivationFunction(cv2.ml.ANN_MLP_SIGMOID_SYM)
animals_net.setLayerSizes(np.array([3,6,4]))
animals_net.setTermCriteria((cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,10,1))
def dog_sample():
return [randint(1,5),randint(5,10),randint(15,22)]
def cat_sample():
return [randint(10,20),randint(45,50),randint(40,50)]
def snake_sample():
return [randint(22,30),randint(22,30),randint(22,30)]
def dolphin_sample():
return [randint(22,30),randint(1,10),randint(22,33)]
def dog_class():
return [1,0,0,0]
def cat_class():
return [0,1,0,0]
def snake_class():
return [0,0,1,0]
def dolphin_class():
return [0,0,0,1]
for i in range(0,500000000):
animals_net.train(np.array([dog_sample()],dtype=np.float32),cv2.ml.ROW_SAMPLE,np.array([dog_class()],dtype=np.float32))
animals_net.train(np.array([cat_sample()],dtype=np.float32),cv2.ml.ROW_SAMPLE,np.array([cat_class()],dtype=np.float32))
animals_net.train(np.array([snake_sample()],dtype=np.float32),cv2.ml.ROW_SAMPLE,np.array([snake_class()],dtype=np.float32))
animals_net.train(np.array([dolphin_sample()],dtype=np.float32),cv2.ml.ROW_SAMPLE,np.array([dolphin_class()],dtype=np.float32))
print animals_net.predict(np.array([dog_sample()],dtype=np.float32))
print animals_net.predict(np.array([cat_sample()],dtype=np.float32))
print animals_net.predict(np.array([snake_sample()],dtype=np.float32))
print animals_net.predict(np.array([dolphin_sample()],dtype=np.float32))
simple_ann2() |
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.rcParams['agg.path.chunksize'] = 20000
print 'reading data'
df = pd.read_csv('data.csv')
col_list = list(df.columns)
col_list.remove('y')
col_list.remove('id')
df['count_null'] = df.isnull().sum(axis = 1)
for col in col_list:
df[str(col + '_na')] = pd.isnull(df[col]).astype(int)
mean_values = df.mean(axis=0)
df.fillna(mean_values, inplace = True)
timestamp_list = sorted(np.unique(df['timestamp']))
for feature in col_list:
corr_list = []
for time in timestamp_list:
temp_df = df[df['timestamp'] == time][[feature, 'y']]
corr_list.append(np.corrcoef(temp_df[feature].values, temp_df['y'].values)[0, 1])
# print(corr_list[-1])
print (timestamp_list)
print (corr_list)
print ('saving figure : ' + feature)
plt.clf()
plt.grid(True)
plt.scatter(timestamp_list, corr_list)
plt.xlabel(feature)
plt.ylabel('y')
plt.savefig('images_time_corr/' + feature + '.png')
|
""" Interfaces
"""
from zope.interface import Interface
from zope import schema
from zope.i18nmessageid import MessageFactory
_ = MessageFactory("edw")
class IDataCube(Interface):
"""Description of the Example Type"""
# -*- schema definition goes here -*-
class IDataCubeSettings(Interface):
""" Settings for datacube
"""
datacube_thumbnail = schema.TextLine(
title=_(u"DataCube thumbnail"),
description=_(u"Default picture URL when no thumbnail is available"),
required=True,
default=u"++resource++scoreboard.theme.images/connect_thumbnail.png"
)
visualization_thumbnail = schema.TextLine(
title=_(u"Visualization thumbnail"),
description=_(u"Default picture URL when no thumbnail is available"),
required=True,
default=u"++resource++scoreboard.theme.images/map_thumbnail.png"
)
|
class Node(object):
"""Node for singly linked list."""
def __init__(self, val, next=None):
self.value = val
self.next = next
def __str__(self):
return str(self.value)
class LinkedList(object):
"""Singly linked list."""
def __init__(self):
self.first = self.last = None
self.length = 0
def __len__(self):
return self.length
def __iter__(self):
node = self.first
while node:
yield node
node = node.next
def __str__(self):
return 'Linked list: [{}]'.format('->'.join(map(str, iter(self))))
def append(self, val):
if self.first is None:
self.first = self.last = Node(val)
else:
self.last.next = Node(val)
self.last = self.last.next
self.length += 1
def _remove_first(self):
node = self.first
if self.first is self.last:
self.first = self.last = None
else:
self.first = self.first.next
self.length -= 1
return node
def _remove_after(self, node, val):
while node.next:
if node.next.value == val:
to_remove = node.next
if node.next is self.last:
self.last = node
node.next = to_remove.next
self.length -= 1
return to_remove
node = node.next
def remove(self, val):
if self.first is None:
return None
if self.first.value == val:
return self._remove_first()
else:
return self._remove_after(self.first, val)
def _reverse(self, node):
if node.next is None:
return node
prev = self._reverse(node.next)
prev.next = node
node.next = None
return node
def reverse(self):
if len(self) < 2:
return
self.first, self.last = self.last, self._reverse(self.first)
def reverse_iterative(self):
prev = None
current = self.first
self.first, self.last = self.last, self.first
while current:
next = current.next
current.next = prev
prev = current
current = next
|
def publish_sensor(project_id, topic_id,data,origin='python-sample',username='gcp'):
""""
description: send data to pubsub topic
arguments:
project_id (str): name of GCP project ID
topic_id (str): name of pubsub topic
data (str): data you want to upload
origin (str): name of origin it could be any string
username (str): name of username it could be any
returns (str): topic path id
"""
from google.cloud import pubsub_v1
#import os
#os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="cred.json"
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project_id, topic_id)
data = data.encode("utf-8")
future = publisher.publish(
topic_path, data, origin=origin, username=username
)
#print(future.result())
return "Published messages with custom attributes to {} ".format(topic_path)
if __name__ =='__main__':
publish_sensor("elaborate-howl-285701", "iot-check","hello2") |
from datetime import datetime
def take_time(func):
def wrapper(*args, **kwargs):
start = datetime.now()
result = func(*args, **kwargs)
print(func.__name__, 'duration', datetime.now() - start)
return result
return wrapper
|
# --------------------------------------------- #
# Imports #
# --------------------------------------------- #
from collections import OrderedDict
import pyfftw, mrcfile, itertools
import numpy as np
# --------------------------------------------- #
# Fourier transform-related #
# --------------------------------------------- #
def compute_ft(image):
"""
Compute complex, centered DFT of an n-d numpy array using pyFFTW.
Inputs:
-------
image: array of any dimensions
Outputs:
--------
ft_image: array of (complex) structure factors
"""
ft_image = pyfftw.empty_aligned(image.shape)
ft_image[:] = image
f = pyfftw.interfaces.scipy_fftpack.fftn(ft_image)
return np.fft.fftshift(f)
def ft_to_I_phase(ft):
"""
Convert structure factors to separate numpy arrays of intensity and phase.
Inputs:
-------
ft: array of complex structure factors
Outputs:
--------
I: array of intensities
phase: array of phases in radians
"""
I = np.square(np.abs(ft))
phase = np.arctan2(ft.imag, ft.real)
return I, phase
# --------------------------------------------- #
# Visualization helpers #
# --------------------------------------------- #
def followup_brightness_scale(data, brightness=0.5, hsize=100):
"""
Numpy version of Brewster/Zwart code to rescale intensities for visualization.
"""
qave = np.mean(data)
histogram = np.zeros(hsize)
for i in range(data.size):
temp = int((hsize/2)*data.flatten()[i]/qave)
if temp < 0: histogram[0]+=1
elif temp >= hsize: histogram[hsize-1]+=1
else: histogram[temp]+=1
percentile, accum = 0, 0
for i in range(hsize):
accum+=histogram[i]
if (accum > 0.9*data.size):
percentile=i*qave/(hsize/2)
break
adjlevel = 0.4
if percentile > 0.:
correction = brightness * adjlevel/percentile
else:
correction = brightness / 5.0
outscale = 256
corrected = data*correction
outvalue = outscale * (1.0 - corrected)
outvalue[outvalue < 0] = 0
outvalue[outvalue >= outscale] = outscale - 1
return outvalue
def hsv_phase_plot(I_sel, p_sel):
"""
Convert intensities and phases to an array that can be plotted by matplotlib's
imshow, in which hue / saturation / color is used to visualize intensities and
phases simultaneously. Inputs are intensities (I_sel) and phases (p_sel) arrays.
"""
from matplotlib import colors
I_sel = followup_brightness_scale(I_sel)
p_sel = p_sel.astype(float)
i_plot = (256.0 - I_sel) / 256.0
p_plot = (p_sel + 180.0) / 360.0
ones = np.ones(p_plot.shape)
c = colors.hsv_to_rgb(np.array(zip(p_plot.flatten(),
i_plot.flatten(),
ones.flatten())))
return c.reshape(p_plot.shape[0],p_plot.shape[1],3)
# --------------------------------------------- #
# Handling data in reciprocal space #
# --------------------------------------------- #
def wraptopi(p_vals):
"""
Wrap array of input phases (in degrees) to lie in domain [-180, 180).
Inputs:
-------
p_vals: array of phase values in degrees, dtype float
Outputs:
--------
p_vals: updated array of wrapped phase values in domain [-180, 180)
"""
n1 = (-180.0 - p_vals) / 360.0
n2 = (180.0 - p_vals) / -360.0
dn1 = (n1 + 1).astype(int) * 360.0
dn2 = (n2 + 1).astype(int) * 360.0
p_vals[p_vals < -180.0] += dn1[p_vals < -180.0]
p_vals[p_vals > 180.0] -= dn2[p_vals > 180.0]
p_vals[p_vals == 180.0] = -180.0
return p_vals
def average_phases(p_vals, weights=None):
"""
Average phases using a method that works for circular data, with wrapping
from -180 to 180. Modified code courtesy:
https://stackoverflow.com/questions/491738/
how-do-you-calculate-the-average-of-a-set-of-circular-data
Inputs:
-------
p_vals: array of phase values in degrees
weights: optional, weights for each phase in p_vals
Outputs:
--------
p_avg: phase average in degrees
"""
if weights is None:
weights = np.ones(p_vals.shape)
x,y = 0,0
for (angle, weight) in zip(p_vals, weights):
x += np.cos(np.deg2rad(angle)) * weight / np.sum(weights)
y += np.sin(np.deg2rad(angle)) * weight / np.sum(weights)
return np.rad2deg(np.arctan2(y,x))
def std_phases(p_vals, weights=None):
"""
Compute the standard deviation of input phases, yielding a value that
roughly matches the output of scipy.stats.circstd (provided the range
of phase values is not too large). Unlike scipy.stats.circstd, enable
value-weighting of the phases.
Inputs:
-------
p_vals: array of phase values in degrees
weights: optional, weights for each phase in p_vals
Outputs:
--------
p_std: phase standard deviation in degrees
"""
p_avg = average_phases(p_vals, weights=weights)
p_var = np.average(wraptopi(p_vals - p_avg)**2, weights=weights)
return np.sqrt(p_var)
def stderr_phases(p_vals, weights = None):
"""
Estimate the standard error of input phases as the square root of their
circular variance, which yields a value in the range [0,1], divided by
the number of samples.
Inputs:
-------
p_vals: array of phase values in degrees
weights: optional, weighs of phases in p_vals
Outputs:
--------
p_stderr: circular variance, between 0 and 1
"""
if weights is None:
weights = np.ones(p_vals.shape)
x,y = 0,0
for (angle, weight) in zip(p_vals, weights):
x += np.cos(np.deg2rad(angle)) * weight / np.sum(weights)
y += np.sin(np.deg2rad(angle)) * weight / np.sum(weights)
r2 = np.square(x) + np.square(y)
p_var = 1 - np.sqrt(r2)
p_var = np.clip(p_var, 0.0, 1.0) # avoid floating point errors
return np.sqrt(p_var / float(len(p_vals)))
def residual_phases(hklp1, hklp2):
"""
Compute the difference between the phases of reflections shared between hklp1
and hklp2 input dictionaries. Input phases and output residual are in degrees.
Inputs:
-------
hklp1: OrderedDict with keys as Millers and phases as np.array([phase])
hklp2: OrderedDict with keys as Millers and phases as np.array([phase])
Output:
-------
residuals: array of difference between shared phases in hklp1 and hklp2
"""
p_shared = shared_dict(hklp1, hklp2)
p_vals = np.array(p_shared.values())
p1, p2 = p_vals[:,0], p_vals[:,1]
diff = p1 - p2
diff[diff>180] -= 360
diff[diff<-180] += 360
return np.abs(diff)
def residual_to_avgphase(hklp):
"""
For each reflection in hklp with more than one recorded phase value, compute
the mean residual of the values to the mean phase -- a measure of precision.
Inputs:
-------
hklp: OrderedDict with keys as Millers and phases as np.array([phase(s)])
Output:
-------
residuals: array of mean delta between mean phase and consituent phases
"""
residuals = list()
for key in hklp.keys():
if len(hklp[key]) > 1:
diff = hklp[key] - average_phases(hklp[key])
diff[diff>180] -= 360
diff[diff<-180] += 360
residuals.append(np.mean(np.abs(diff)))
return np.array(residuals)
def residual_phase_distribution(hklp1, hklp2):
"""
Compute the difference between the phases of reflections shared between hklp1
and hklp2 input dictionaries. Input phases and output residual are in degrees.
Inputs:
-------
hklp1: OrderedDict with keys as Millers and phases as np.array([phase])
hklp2: OrderedDict with keys as Millers and phases as np.array([phase])
Output:
-------
sigma: sigma from fitting N(mu, sigma) to phase residuals
m_error: mean error of phase residuals
"""
import scipy.stats
p_shared = shared_dict(hklp1, hklp2)
p_vals = np.array(p_shared.values())
p1, p2 = p_vals[:,0], p_vals[:,1]
diff = p1 - p2
diff[diff>180] -= 360
diff[diff<-180] += 360
mu, std = scipy.stats.norm.fit(diff)
return std, np.mean(np.abs(diff))
def remove_Friedels(miller_list):
"""
Remove Friedel pairs from input list of Miller indices (list of tuples format).
Inputs:
-------
miller_list: list of Miller indices, each given as a tuple
Outputs:
--------
miller_list: updated list with Friedel mates removed
"""
for miller in miller_list:
friedel = (-1*miller[0], -1*miller[1], -1*miller[2])
if friedel in miller_list:
miller_list.remove(friedel)
return miller_list
def sym_ops_friedels(sym_ops):
"""
Expand the input dictionary of symmetry operations with those for each of
the Friedel mates. For each symmetry operation, the rotational element is
multiplied by negative 1 while the translational component is the same as
the starting symmetry relationship.
Inputs:
-------
sym_ops: dictionary of symmetry operations
Outputs:
-------
sym_ops: expanded dictionary of symmetry operations
"""
for key in sym_ops.keys():
sym_ops[max(sym_ops.keys())+1] = np.vstack((-1*sym_ops[key][:,:-1].T,
sym_ops[key][:,-1])).T
return sym_ops
def compute_resolution(space_group, cell_constants, hkl):
"""
Compute d-spacing / resolution for a set of scattering vectors, where q = 2*pi*s.
If present, set (0,0,0) to arbitrarily high (but not infinite) resolution.
Inputs:
-------
space_group: number corresponding to space group
cell_constants: array of cell constants, (a, b, c, alpha, beta, gamma)
hkl: array of n Miller indices, n x 3
Output:
-------
resolution: resolution for hkl list (empty if space group is unsupported)
"""
a, b, c, alpha, beta, gamma = cell_constants
h, k, l = hkl[:,0], hkl[:,1], hkl[:,2]
# valid for orthorhombic, cubic, and tetragonal
if ((space_group >= 16) and (space_group <=142)) or ((space_group >= 195) and (space_group <=230)):
inv_d = np.sqrt(np.square(h/a) + np.square(k/b) + np.square(l/c))
# valid for hexagonal (and possibly trigonal? if so change lower bound to 143)
elif (space_group >= 168) and (space_group <=194):
inv_d = np.sqrt(4.0*(np.square(h) + h*k + np.square(k))/(3*np.square(a)) + np.square(l/c))
# valid for monoclinic
elif (space_group >= 3) and (space_group <=15):
beta = np.deg2rad(beta)
inv_d = np.sqrt(np.square(h/(a*np.sin(beta))) + np.square(k/b) + np.square(l/(c*np.sin(beta))) \
+ 2*h*l*np.cos(beta) / (a*c*np.square(np.sin(beta))))
# valid for triclinic
elif (space_group <= 2):
alpha, beta, gamma = np.deg2rad(alpha), np.deg2rad(beta), np.deg2rad(gamma)
s11 = np.square(b*c*np.sin(alpha))
s22 = np.square(a*c*np.sin(beta))
s33 = np.square(a*b*np.sin(gamma))
s12 = a*b*np.square(c)*(np.cos(alpha)*np.cos(beta) - np.cos(gamma))
s23 = np.square(a)*b*c*(np.cos(beta)*np.cos(gamma) - np.cos(alpha))
s13 = a*np.square(b)*c*(np.cos(gamma)*np.cos(alpha) - np.cos(beta))
V = a*b*c*np.sqrt(1 - np.square(np.cos(alpha)) - np.square(np.cos(beta)) - np.square(np.cos(gamma)) \
+ 2*np.cos(alpha)*np.cos(beta)*np.cos(gamma))
inv_d = np.sqrt(1.0/np.square(V)*(s11*np.square(h) + s22*np.square(k) + s33*np.square(l) \
+ 2*s12*h*k + 2*s23*k*l + 2*s13*h*l))
else:
print "This space group is currently unsupported."
return np.empty(0)
inv_d[inv_d==0] = 1e-5
res = 1.0 / inv_d
return res
# --------------------------------------------- #
# Data wrangling in real space #
# --------------------------------------------- #
def save_mrc(volume, savename):
"""
Save 3d numpy array, volume, to path savename in mrc format.
"""
mrc = mrcfile.new(savename, overwrite=True)
mrc.header.map = mrcfile.constants.MAP_ID
mrc.set_data(volume.astype(np.float32))
mrc.close()
return
def rescale_vol(volume):
"""
Rescale the voxel values of a volume to lie between 0 and 1.
"""
return (volume - volume.min()) / (volume.max() - volume.min())
def reformat_mdtraj_pdb(fname, outname):
"""
Reformat the input pdb file (fname) in MDTraj format to match PDB conventions.
Standard PDB convention has one more column in front of the atom label than
what's written out by MDTraj.
"""
file1 = open(fname, "r")
file2 = open(outname, "w")
for fline in file1.readlines():
if "ATOM" in fline:
file2.write(fline[:62] + "5" + fline[63:76] + " " + fline[76:])
else:
file2.write(fline)
file1.close()
file2.close()
return
# --------------------------------------------- #
# Miscellany #
# --------------------------------------------- #
def shared_dict(d1, d2):
"""
Return a dictionary of common reflections from the input dictionaries.
Inputs:
-------
d1: dict whose keys are Millers, values are unrestricted
d2: dict whose keys are Millers, values are unrestricted
Outputs:
--------
shared_d: dict of common Millers as keys, np.array([d1[miller], d1[miller]]) as values
"""
shared_hkl = set(d1.keys()).intersection(d2.keys())
shared_d = OrderedDict()
for hkl in shared_hkl:
shared_d[hkl] = np.array([d1[hkl], d2[hkl]])
return shared_d
|
from rv.modules import Behavior as B
from rv.modules import Module
from rv.modules.base.lfo import BaseLfo
class Lfo(BaseLfo, Module):
behaviors = {B.sends_audio}
|
# -*- coding: utf-8 -*-
from irc3.testing import BotTestCase
from irc3.testing import MagicMock
from irc3.testing import asyncio
import datetime
import tempfile
import shutil
import os
def hook(entries):
return []
class Hook:
def __init__(self, bot):
pass
def __call__(self, entries):
return []
class Dispatcher:
def __init__(self, bot):
self.loop = bot.loop
self.reset()
def reset(self):
self.future = asyncio.Future(loop=self.loop)
return self.future
def __call__(self, messages):
self.future.set_result(list(messages))
class TestFeeds(BotTestCase):
name = 'irc3.plugins.feeds'
def setUp(self):
wd = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, wd)
self.wd = os.path.join(wd, 'feeds')
dt = datetime.datetime.now().strftime('%Y-%m-%dT%M:%M:OO-08:00')
self.patch_requests(
filename='tests/feed.atom',
DATE=dt,
)
def callFTU(self, **kwargs):
loop = kwargs.pop('loop', None)
config = dict(
directory=self.wd,
irc3='http://xxx',
dispatcher='tests.test_feeds.Dispatcher',
channels='#irc3', **kwargs
)
config = {
'includes': [self.name],
self.name: config
}
if loop:
config.update(loop=loop)
return super(TestFeeds, self).callFTU(**config)
def test_connection_made(self):
bot = self.callFTU()
bot.loop.call_later = MagicMock()
bot.notify('connection_made')
self.assertTrue(bot.loop.call_later.called)
def test_feed(self):
bot = self.callFTU(loop=asyncio.new_event_loop())
future = bot.feeds.dispatcher.reset()
bot.feeds.update()
bot.loop.run_until_complete(future)
assert future.result() == [
('#irc3', '[irc3] coverage https://github.com/gawel/irc3/commit/'
'ec82ae2c5f8b2954f0646a2177deb65ad9db712a')]
bot = self.callFTU(loop=asyncio.new_event_loop())
future = bot.feeds.dispatcher.reset()
bot.feeds.update()
bot.loop.run_until_complete(future)
assert future.result() == []
def test_hooked_feed(self):
bot = self.callFTU(hook='tests.test_feeds.hook',
loop=asyncio.new_event_loop())
future = bot.feeds.dispatcher.reset()
bot.feeds.update()
bot.loop.run_until_complete(future)
assert future.result() == []
def test_hooked_feed_with_class(self):
bot = self.callFTU(hook='tests.test_feeds.Hook',
loop=asyncio.new_event_loop())
assert isinstance(bot.feeds.hook, Hook)
future = bot.feeds.dispatcher.reset()
bot.feeds.update()
bot.loop.run_until_complete(future)
assert future.result() == []
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.