text stringlengths 38 1.54M |
|---|
#!/usr/bin/python
import csv
import datetime
import eyed3
import os
import re
import shutil
import subprocess
import urllib
import urlparse
import cgi
import string
directory = os.path.abspath("data")
##############
# DATA MODEL #
##############
class Event(object):
def __init__(self, level, gender, category, min_music_length, max_music_length, dance):
self.level = level
self.gender = gender
self.category = category
self.min_music_length = min_music_length
self.max_music_length = max_music_length
# back-references
self.starts = []
# computed properties
self.name = level
if gender == "Female":
self.name += " Ladies "
elif gender == "Male":
self.name += " Mens "
else:
self.name += " "
self.name += category
self.short_name = self.level + " " + self.category.replace("Solo ", "")
self.has_submitted_music = (self.max_music_length > 0)
self.dance = dance
def __repr__(self):
return str(self)
def __str__(self):
return "Event: {}".format(self.name)
class Skater(object):
def __init__(self, usfs_number, first_name, last_name, email):
self.usfs_number = usfs_number
self.first_name = first_name
self.last_name = last_name
self.email = email
self.university = ""
self.notes = ""
# back-references
self.starts = []
# computed properties
self.full_name = "{} {}".format(self.first_name, self.last_name)
def __repr__(self):
return str(self)
def __str__(self):
return "Skater: {} {} {}".format(self.full_name, self.usfs_number, self.email)
class Skaters(object):
def __init__(self):
self.skaters = []
self.skaters_by_usfs = {}
self.skaters_by_name = {}
self.skaters_by_email = {}
def find_or_create(self, usfs_number, first_name, last_name, email):
if usfs_number == "0" or usfs_number == "none":
usfs_number = ""
full_name = first_name + " " + last_name
skater = self.find(usfs_number, full_name, email)
if not skater:
skater = Skater(usfs_number, first_name, last_name, email)
self.skaters.append(skater)
self.skaters_by_usfs[usfs_number] = skater
self.skaters_by_name[full_name] = skater
self.skaters_by_email[email] = skater
return skater
def find(self, usfs_number, name, email):
if usfs_number and usfs_number in self.skaters_by_usfs and self.skaters_by_usfs[usfs_number].last_name in name:
skater = self.skaters_by_usfs[usfs_number]
elif name and name in self.skaters_by_name:
skater = self.skaters_by_name[name]
print ("Warning matching skater by name", name, email, usfs_number, skater)
elif email and email in self.skaters_by_email:
skater = self.skaters_by_email[email]
print ("Warning matching skater by email", name, email, usfs_number, skater)
else:
skater = None
return skater
def find_by_name_and_university(self, name, university):
if name in self.skaters_by_name:
skater = self.skaters_by_name[name]
if skater.university == university:
return skater
else:
print (skater.university, university)
return None
class Start(object):
def __init__(self, skater, event):
self.skater = skater
self.event = event
self.music_submissions = []
self.music_key = re.sub(r"\W+", "_", event.name + " " + skater.full_name)
self.music_length = 0
self.confirmed = False
skater.starts.append(self)
event.starts.append(self)
def last_music_submission(self):
if self.music_submissions:
return self.music_submissions[-1]
else:
return None
def __repr__(self):
return str(self)
def __str__(self):
return "Start: {} {}".format(self.event, self.skater.full_name)
class MusicSubmission(object):
def __init__(self, skater, event, url, index):
self.skater = skater
self.event = event
self.url = url
self.index = index
def __repr__(self):
return str(self)
def __str__(self):
return "Submission: {} {} {} {}".format(self.skater, self.event, self.url, self.index)
##################
# READING INPUTS #
##################
def int_or_zero(s):
if not s:
return 0
return int(s)
def read_events():
events = []
with open("events.csv", "r") as file_in:
reader = csv.DictReader(file_in)
for row in reader:
event = Event(
level=row["Level"],
gender=row["Gender"],
category=row["Category"],
min_music_length=int_or_zero(row["Min Music Length"]),
max_music_length=int_or_zero(row["Max Music Length"]),
dance=row["Dance"]
)
events.append(event)
return events
def create_submission(skater, event_name, url, index):
if url:
if event_name:
submission = MusicSubmission(skater, event_name, url, index)
for start in skater.starts:
if event_name == start.event.short_name:
start.music_submissions.append(submission)
break
else:
print ("Warning cannot find start", event_name, skater.starts)
else:
print ("Missing event name", skater)
def read_submissions(skaters):
with open(os.path.join(directory, "input.csv"), "r") as file_in:
reader = csv.DictReader(file_in)
for i, row in enumerate(reader):
usfs_number = row["USFS Number"]
name = row["Skater Name"].strip().title()
email = row["Email Address"].strip()
skater = skaters.find(usfs_number, name, email)
if skater:
notes = row["Notes for Announcer"]
if notes:
skater.notes = notes
free_dance_event = row["Free Dance Event"]
free_dance_url = row["Free Dance Music"]
free_skate_event = row["Free Skate Event"]
free_skate_url = row["Free Skate Music"]
short_event = row["Short Program Event"]
short_url = row["Short Program Music"]
create_submission(skater, free_dance_event, free_dance_url, i)
create_submission(skater, free_skate_event, free_skate_url, i)
create_submission(skater, short_event, short_url, i)
else:
print ("Warning cannot find skater", name, email, usfs_number)
def get_cached_music(start, subdir):
prefix = str(start.last_music_submission().index) + "_" + start.music_key
for file_name in os.listdir(os.path.join(directory, subdir)):
if os.path.splitext(file_name)[0] == prefix:
return file_name
return None
def download_music(start):
# TODO use google drive API
if start.music_submissions and not get_cached_music(start, "music_raw"):
submission = start.last_music_submission()
url = submission.url
parsed_url = urlparse.urlparse(url)
if parsed_url.netloc == "drive.google.com":
query_params = urlparse.parse_qs(parsed_url.query)
url = "https://drive.google.com/uc?export=download&id=" + query_params["id"][0]
print "Downloading music from " + url
(download_path, headers) = urllib.urlretrieve(url)
original_filename = None
for disposition in headers["Content-Disposition"].split(";"):
disposition_parts = disposition.split("=")
if len(disposition_parts) == 2 and disposition_parts[0] == "filename":
original_filename = disposition_parts[1].strip("\"")
file_extension = os.path.splitext(original_filename)[1]
music_filename = str(submission.index) + "_" + start.music_key + file_extension
music_path = os.path.join(directory, "music_raw", music_filename)
shutil.copy(download_path, music_path)
def convert_music(start):
if start.music_submissions:
input_file_name = get_cached_music(start, "music_raw")
if input_file_name:
input_path = os.path.join(directory, "music_raw", input_file_name)
output_path = os.path.join(directory, "music", start.music_key + ".mp3")
version = len(start.music_submissions)
if version > read_version(output_path):
if version > 1:
print ("Overriding submission", output_path, version)
title = start.skater.full_name + " " + str(version)
album = start.event.name
file_extension = os.path.splitext(input_file_name)[1]
if file_extension.lower() in [".mp3", ".wav", ".m4a", ".aif", ".aiff", ".wma", ".mp2", ".m4v", ""]:
print ("Converting", input_file_name)
subprocess.call(["ffmpeg", "-y", "-i", input_path, "-acodec", "mp3", "-ab", "256k", output_path])
else:
print ("Unknown music format", input_file_name)
return
mp3_file = eyed3.load(output_path)
if mp3_file.tag:
mp3_file.tag.clear()
else:
mp3_file.initTag()
mp3_file.tag.title = unicode(title)
mp3_file.tag.album = unicode(album)
mp3_file.tag.save(output_path)
def read_version(path):
if os.path.exists(path):
mp3_file = eyed3.load(path)
return int(mp3_file.tag.title.split()[-1])
else:
return 0
def read_time(start):
music_path = os.path.join(directory, "music", start.music_key + ".mp3")
if os.path.exists(music_path):
mp3_file = eyed3.load(music_path)
start.music_length = mp3_file.info.time_secs
# convert entries spreadsheet to events spreadsheet format
def normalize_event_name(event):
if "(Male)" in event:
event = event.replace(" (Male)", "")
male_event = True
elif "(Men)" in event:
event = event.replace(" (Men)", "")
male_event = True
else:
male_event = False
if "Short Program" in event:
level = event.split()[0]
if male_event:
return level + " Mens Short Program"
else:
return level + " Ladies Short Program"
elif "Excel" in event or "Championship" in event:
if male_event:
return event + " Mens Freeskate"
else:
return event + " Ladies Freeskate"
elif "Pattern Dance" in event:
level = event.split()[0]
return level + " Solo Pattern Dance"
else: # Team Maneuvers or Solo Free Dance
return event
def read_entries(events_by_name):
skaters = Skaters()
with open(os.path.join(directory, "entries.csv"), "r") as file_in:
reader = csv.DictReader(file_in)
for row in reader:
raw_event = row["Event"].title().strip()
event = events_by_name[normalize_event_name(raw_event)]
if event.gender:
assert event.gender == row["Gender"]
usfs_number = row["USF #"].strip()
first_name = row["First Name"].strip().title()
last_name = row["Last Name"].strip().title()
email = row["E-mail"].strip()
skater = skaters.find_or_create(usfs_number, first_name, last_name, email)
skater.university = row["University"].strip().title()
Start(skater, event)
return skaters
def format_time(seconds):
return str(datetime.timedelta(seconds=seconds))[3:]
def generate_report(events, include_details):
if include_details:
output_path = os.path.join(directory, "music", "index.html")
else:
output_path = os.path.join(directory, "index.html")
with open("template.html", "r") as template, open(output_path, "w") as file_out:
for row in template:
if row == "<!--TIMESTAMP-->\n":
file_out.write("<p>Last Updated: ")
file_out.write(datetime.datetime.now().strftime("%A, %B %d %I:%M %P"))
file_out.write("</p>\n")
elif row == "<!--CONTENT-->\n":
if include_details:
file_out.write("<h2>National Anthem</h2>\n")
file_out.write("<a href='anthem.m4a'>Anthem</a>")
for event in events:
confirmed_starts = [start for start in event.starts if start.confirmed]
if not confirmed_starts:
continue
file_out.write("<h2>" + event.name + "</h2>\n")
if event.dance:
file_prefix = event.dance.lower().replace(" ", "_") + "_"
file_out.write("<div class='dance-music'>")
file_out.write(event.dance)
file_out.write("<ul>")
file_out.write("<li><a href='" + file_prefix + "0.mp3'>Warmup</a></li>")
for i in range(1, 6):
file_out.write("<li><a href='" + file_prefix + str(i) + ".mp3'>Track " + str(i) + "</a></li>")
file_out.write("</ul>")
file_out.write("</div>")
if event.has_submitted_music:
file_out.write("<div class='time'>")
file_out.write("Program Length: ")
if event.min_music_length:
file_out.write("Min " + format_time(event.min_music_length) + " ")
if event.max_music_length:
file_out.write("Max " + format_time(event.max_music_length))
file_out.write("</div>\n")
file_out.write("<div>Entries below are NOT in starting order.</div>")
file_out.write("<table>\n")
file_out.write("<tr>\n")
file_out.write("<th>Skater</th>\n")
file_out.write("<th>University</th>\n")
if event.has_submitted_music:
file_out.write("<th>Music Length</th>\n")
file_out.write("<th>Submit Count</th>\n")
if include_details:
file_out.write("<th>Music</th>\n")
file_out.write("<th>Notes</th>\n")
file_out.write("</tr>\n")
for start in sorted(confirmed_starts, key=lambda s: s.skater.full_name):
university = start.skater.university
scratch = False
skater = start.skater.full_name
music_length = ""
music = ""
submit_count = str(len(start.music_submissions))
if start.music_length > 0:
music_length = format_time(start.music_length)
music = "<a href=" + start.music_key + ".mp3>mp3</a>"
if scratch:
file_out.write("<tr class='scratch'>\n")
else:
file_out.write("<tr>\n")
file_out.write("<td>" + skater + "</td>\n")
file_out.write("<td>" + university + "</td>\n")
if event.has_submitted_music:
file_out.write("<td>" + music_length + "</td>\n")
file_out.write("<td>" + submit_count + "</td>\n")
if include_details:
file_out.write("<td>" + music + "</td>\n")
file_out.write("<td>" + cgi.escape(start.skater.notes) + "</td>\n")
file_out.write("</tr>\n")
file_out.write("</table>\n")
else:
file_out.write(row)
def read_updated_entries(skaters, events_by_name):
with open(os.path.join(directory, "updated_entries.csv"), "r") as file_in:
reader = csv.DictReader(file_in)
event = None
for row in reader:
name = " ".join(row["Name"].split()).title() # clean up whitespace
university = row["University"].strip().title()
if name:
if university:
if event.category != "Team Maneuvers":
skater = skaters.find_by_name_and_university(name, university)
if not skater:
# TODO handle this case
print ("Unknown Skater", name, university, event.name)
raise ValueError()
for start in skater.starts:
if start.event == event:
start.confirmed = True
break
else:
start = Start(skater, event)
start.confirmed = True
print ("Created new start", start, skater.starts)
else: # event header row
event = events_by_name[normalize_event_name(name)]
def print_counts(events, print_missing):
submitted = 0
total = 0
missing_skaters = set()
for event in events:
if event.has_submitted_music:
for start in event.starts:
if start.confirmed:
total += 1
if start.music_submissions:
submitted += 1
else:
missing_skaters.add(start.skater)
if print_missing:
for skater in missing_skaters:
print (skater.full_name, skater.email, skater.university)
for start in skater.starts:
if not start.music_submissions and start.confirmed and start.event.has_submitted_music:
print start.event.name
print ("Submitted", submitted, "Total", total)
print ("Missing Entries", total - submitted, "Missing Skaters", len(missing_skaters))
print ("Missing Emails", [skater.email for skater in missing_skaters])
def debug_skater(skaters, name):
skater = skaters.find("", name, "")
print(skater)
print(skater.starts)
for start in skater.starts:
print start.last_music_submission()
############
# WORKFLOW #
############
def main():
# read events
events = read_events()
events_by_name = {event.name: event for event in events}
# read entries
skaters = read_entries(events_by_name)
read_updated_entries(skaters, events_by_name)
# TODO use google sheets api
# Download Spreadsheet
input_spreadsheet_path = os.path.join(directory, "input.csv")
if os.path.exists(input_spreadsheet_path):
print "Using cached spreadsheet"
else:
print "Downloading live spreadsheet"
key_path = os.path.join(directory, "key.txt")
with open(key_path, "r") as key_file:
spreadsheet_key = key_file.read().strip()
music_spreadsheet_url = "https://docs.google.com/spreadsheets/d/" + spreadsheet_key + "/export?format=csv"
urllib.urlretrieve(music_spreadsheet_url, input_spreadsheet_path)
# read submissions
read_submissions(skaters)
for event in events:
for start in event.starts:
# download music files
download_music(start)
# convert music to mp3
convert_music(start)
# read music length
read_time(start)
print_counts(events, True)
generate_report(events, True)
generate_report(events, False)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
input_file = open('aoc_6_input_sample.txt').readlines()
letter_flags=[]
for i in range(0,26):
letter_flags.append(0)
letters=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
count_of_each_group=[]
# In[2]:
group=1
group_length=0
group_length_list=[]
for line in input_file:
if(line[0]=='\n'):
group=group+1
group_length_list.append(group_length)
group_length=0
else:
group_length=group_length+1
# In[3]:
group=1
group_length=0
group_id=0
final_sum=[]
for line in input_file:
if(line[0]=='\n'):
count_of_each_group.append(sum(letter_flags))
for i in letter_flags:
if (i==group_length_list[group_id]):
final_sum.append(1)
for i in range(0,26):
letter_flags[i]=0
group_id=group_id+1
else:
for character in line:
if character in letters:
if((letter_flags[letters.index(character)])==group_length_list[group_id]):
letter_flags[letters.index(character)] = 1
else:
letter_flags[letters.index(character)] = letter_flags[letters.index(character)]+1
# In[4]:
sum(final_sum)
|
# -*- coding: utf-8 -*-
# @Filename: index.py
# @Author: Yee
# @Email: rlk002@gmail.com
# @Link: https://wj.pe
# @Date: 2018-03-20 14:37:09
# @Copyright: :copyright: (c)2018
# @Last Modified by: Yee
# @Last Modified time: 2018-03-29 12:10:38
from yee.base.basehandler import BaseHandler, RequestMixin, JsonBodyMixin
from yee.models.users import usersModel
from playhouse.shortcuts import model_to_dict
class IndexHandler(BaseHandler, RequestMixin, JsonBodyMixin):
def initialize(self):
super(BaseHandler, self).initialize()
self.um = usersModel()
async def get(self):
user = await self.um.get_user_by_user_id("dkjsa")
self.jsonecho(model_to_dict(user))
return
routes = [
(r"/", IndexHandler),
]
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'Simon'
from hashlib import md5
import os
import sys
import random
import string
import time
import common_optMysql,common_optOracle, common_optOracle_billing
# def MD5String(str):
# m = md5()
# m.update(str)
# return m.hexdigest()
def getRandomString(number):
"""
@param: number 需要获得的位数.
@return: 包含大小写字母、数字的随机字符串
"""
rule = string.letters + string.digits
str = random.sample(rule, number)
return "".join(str)
def getTimeStamp():
"""
@return: 返回当前时间戳的日期格式
"""
return time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
def getCommonRequestContent(mvnokey,service_type,service_name,api_name):
"""
@param: mvnokey 转企的mvnokey.
@param: service_type basic_service/extended_service/model_product
@param: service_name 接口名称
@param: api_name 接口分册中定义的接口名称
@return: 包含大小写字母、数字的随机字符串
"""
dict = {
"mvnokey": mvnokey,
"serial_number": getRandomString(30),
"timestamp": getTimeStamp(),
"service_type": service_type,
"service_name": service_name,
"api_name": api_name
}
return dict
def getToken(mvnokey):
'''
@param mvnokey
@return mvnokey对应的token
'''
return common_optMysql.QueryDB("SELECT `MVNO_TOKEN` FROM `esbcfgdb`.`mvno_app` WHERE `MVNO_KEY` = '%s';" % mvnokey)
def getServcieNumber(seg):
'''
@param seg 对应的万号段 号码前7位
@return 符合号段限制的有效号码
'''
sql = "SELECT to_char(svc_number) FROM SVC_NUMBER WHERE svc_number_status = 10 and MVNO_BUSINESS_MARK = 'VOPI' and THOUSAND_SVC_SEG like '%s'" % seg
return common_optOracle.QueryDB(sql)
def getIMSI(seg):
'''
@param seg 对应的万号段 号码前7位
@return 符合号段限制的IMSI
'''
return common_optOracle.QueryDB("SELECT IMSI FROM imsi WHERE TEN_THOUSAND_SEGMENT like '%s' and IMSI_STATUS = 20" % seg)
def getICCID(imsi):
'''
@param imsi 根据imsi找到绑定关系的iccid
@return 符合对应的ICCID
'''
return common_optOracle.QueryDB("SELECT iccid FROM iccid WHERE iccid_id in (select iccid_id from imsi where imsi = '%s')" % imsi)
def getIMSI10_8():
'''
同时满足IMSI第10位等于8
@return 符合号段限制的IMSI
'''
return common_optOracle.GetDatas_QueryDB("select im.TEN_THOUSAND_SEGMENT,icd.iccid, im.imsi from iccid icd,imsi im where icd.iccid_id = im.iccid_id \
and im.TEN_THOUSAND_SEGMENT in \
( SELECT substr(THOUSAND_SVC_SEG,0,7) FROM SVC_NUMBER WHERE svc_number_status = 10 and MVNO_BUSINESS_MARK = 'VOPI' and (THOUSAND_SVC_SEG like '170%' or THOUSAND_SVC_SEG like '171%')\
) and im.IMSI_STATUS = 20 and substr( im.imsi,10,1) = 8 and rownum = 1")[0]
def getUid_IMSI(svc_number):
'''
@param svc_number 需要查询用户ID的开户号码
@return 符合传入开户号码的UID和IMSI元组
'''
svc_number = int(svc_number)
return common_optOracle.GetDatas_QueryDB("select mvno_user_id,imsi from dbvop.mvno_user where mvno_user_status = 1 and mvno_user_type = 2 and svc_number = %d" %svc_number )[0]
def billing_intercept(dicts=None,Original=None):
"""
Original 原始话单
dicts 列表嵌套字典, 每个字典元素是一条话单,类型 key 要替换内容的列位置。 vales为替换内容
参数示例:[{0:"a"},{0:"b"}] 生成2条话单,分别修改第一位置元素内容
"""
resOriginal = []
if Original is None:
Original = "20011004 143 23607858958 0100000GJYY0099089800201705090332124SZD.00.m.gz 2 1000000072 xxx m 10020000000000 17151192569 005521317184040481 17151192569 201711061030472017110510194986 460091195800113 21072 09535 61418702220 21 0 0 0 0 0 0 "
if type(dicts) != list:
return Original
for eachDisc in dicts:
for eachKey in eachDisc.keys():
Original = "20011004 143 23607858958 0100000GJYY0099089800201705090332124SZD.00.m.gz 2 1000000072 xxx m 10020000000000 17151192569 005521317184040481 17151192569 201711061030472017110510194986 460091195800113 21072 09535 61418702220 21 0 0 0 0 0 0 "
# sql 找到替换的开始位置与可用最大长度
sql = "select a.factor1, a.factor2 from conf_src_record a where a.file_type = 'MBVC' and a.record_serial = '0' and a.field_serial='%s' order by a.field_serial" % eachKey
start_position, end_position = common_optOracle_billing.GetDatas_QueryDB(sql)[0]
rl_old = Original[int(start_position): int(start_position) + int(end_position)]
#print rl_old ,eachDisc[eachKey].ljust(end_position)
Front = Original[:int(start_position)]
After = Original[int(start_position) + int(end_position):]
Middle = Original[int(start_position): int(start_position) + int(end_position)].replace(rl_old, eachDisc[eachKey].ljust(end_position))
Original = Front + Middle + After
resOriginal.append(Original)
return resOriginal
def billing_file(Original,file_name=None):
"""
__author__ = 'XT'
Original 文件内容必传
file_name 文件名称 可以没有
"""
if not file_name:
nowTime = time.strftime('%Y%m%d%H%M%S',time.localtime(time.time()))
file_name = '93001800700000000GJYY0099089811201711060332124SZD.C0'
file_name = file_name.replace('20171106033212',nowTime)
file_name = nowTime + file_name[14:]
parent_path = os.path.dirname(sys.path[0])
with file(parent_path + '/vop_Billing_TestCases/files_in/'+file_name,'w') as fobj: # '/app/application_mode/data_in/1/MBVC/1/in/' +
fobj.write(Original)
if __name__ == '__main__':
billing_file(billing_intercept(dicts = [{15:"15010793333"}])) |
import subprocess
import re
import collections
def dictionary_seq(identifier, filename):
# Returns a dict of a multi FASTA file with sequence header as keys and sequence as values.
dictionary = {}
for line in filename:
if line.startswith(">"):
C_seq = ''
C_split_line = line.split(' ')
C_name = C_split_line[0]
C_name = C_name.rstrip()
C_name = C_name.lstrip('>')
else:
C_seq = C_seq + str(line)
C_seq = C_seq.rstrip()
C_seq = C_seq.upper()
dictionary[identifier + C_name] = C_seq
return dictionary
def mcl(file, inflation):
# Calls MCL function to create index files necessary for MCL clustering from the concatenated FASTA file of all species
subprocess.call(["mcxload -abc " + file + " --stream-mirror --stream-neg-log10 -stream-tf 'ceil(200)' -o " + file.rstrip('.abc') + ".mci -write-tab " + file.rstrip('.abc') + ".tab"], shell=True)
# Calls MCL. The value following '-I ' is the inflation parameter, larger -I yields fewer clusters.
subprocess.call(["mcl " + file.rstrip('.abc') + ".mci -I " + str(inflation)], shell=True)
# Uses index files to create the output file using sequence headers from all species. If you changed the inflation parameter in the previous stage, change the two occurences of it in this command also. e.g. -I 1.5 == .mci.I15
subprocess.call(["mcxdump -icl out." + file.rstrip('.abc') + ".mci.I30 -tabr "+ file.rstrip('.abc') + ".tab -o dump." + file.rstrip('.abc') + ".mci.I30"], shell=True)
def blast(file):
# makes a BLAST directory and uses discontinuous megablast to BLAST concatenated file of all species against itself [NEEDS TO BE GENERALIZED]
subprocess.call(["makeblastdb -in /home/kemelianova/ncbi-blast-2.2.27+/db/alltranscriptomes2blast -dbtype nucl"], shell=True)
subprocess.call(["blastn -task dc-megablast -query /home/kemelianova/ncbi-blast-2.2.27+/db/alltranscriptomes2blast -db /home/kemelianova/ncbi-blast-2.2.27+/db/alltranscriptomes2blast -out alltranscriptomes2mcl.blastout -evalue 1e-70 -outfmt '6 qseqid sseqid evalue length pident'"], shell=True)
subprocess.call(["mv alltranscriptomes2mcl.blastout alltranscriptomes2mcl.abc"], shell=True)
subprocess.call(["rm /home/kemelianova/ncbi-blast-2.2.27+/db/alltranscriptomes2blast*"], shell=True)
def rc(seq):
# Reverse complements a sequence
basecomplement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N', '-': '-', 'W':'N', 'Y':'N', 'S':'N', 'K':'N', 'M':'N'}
useq = seq.upper()
sort = str(type(useq))
letters = list(useq)
completters = [basecomplement[base] for base in letters]
sort = str(type(completters))
rc_list = completters[::-1]
sort = str(type(rc_list))
rc_seq = "".join(rc_list)
sort = str(type(rc_list))
return rc_seq
def codons(seq):
# Finds longest ORF (here defined as longest sequence with no stop codons in any frame)
try:
ORF_list = []
codon_list = []
for i in range(len(seq)):
codon = [seq[j:j+3] for j in range(i, len(seq), 3)]
current_seq = []
for i in codon:
if i != 'TAG' and i != 'TAA' and i != 'TGA':
current_seq.append(i)
else:
joined_seq = ''.join(current_seq)
ORF_list.append(joined_seq)
del(current_seq[:])
break
return(max(ORF_list, key = len))
except ValueError:
print('I dont think there are any sequences here')
def dists():
# Calls mothur, estimates pairwise distances, and then records the pairwise distances for each sequence to every other sequence.
subprocess.call(['mothur "#dist.seqs(fasta=cluster.orfs.transalign);"'], shell=True)
f = open('cluster.orfs.dist')
dists = f.readlines()
seqs = []
for i in dists:
first_seq = i.split()[0]
second_seq = i.split()[1]
seqs.append(first_seq)
seqs.append(second_seq)
seqs = set(seqs)
seqDists = {}
for i in seqs:
currentSeqDists = []
for line in dists:
if i in line:
currentSeqDist = float(line.split()[2])
currentSeqDists.append(currentSeqDist)
meanDist = sum(currentSeqDists)/len(currentSeqDists)
seqDists[i] = meanDist
return(seqDists)
def consensus():
# Calls consensus.R, parses the output file to get the percentage of bases which have consensus under the threshold set in script consensus.R
subprocess.call(["Rscript consensus.R"], shell=True)
f = open('alignment_consensus')
consensus = f.readlines()
consensus_bases = 0
total_bases = 0
for i in consensus:
i = i.rstrip('\n')
total_bases+=1
if i != 'NA':
if i != '-':
consensus_bases+=1
percent_consensus_bases = (float(consensus_bases*100)/float(total_bases))
return(percent_consensus_bases)
# different options for alignment of protein sequences and translational aligner
def dna2pep():
subprocess.call(["python dna2pep-1.1/dna2pep.py --outformat fasta cluster_longest_orfs > cluster.orfs.pep"], shell=True)
def mafft():
subprocess.call(["mafft --quiet cluster.orfs.pep > cluster.orfs.pep.aln"], shell=True)
def tcoffee():
subprocess.call(["t_coffee cluster.orfs.pep -output=fasta_aln"], shell=True)
subprocess.call(["mv cluster.orfs.fasta_aln cluster.orfs.pep.aln"], shell=True)
def revtrans():
subprocess.call(["python RevTrans-1.4/revtrans.py cluster_longest_orfs cluster.orfs.pep.aln cluster.orfs.transalign"], shell=True)
def pal2nal():
subprocess.call(["perl pal2nal.v14/pal2nal.pl -output fasta cluster.orfs.pep.aln cluster_longest_orfs > cluster.orfs.transalign"], shell=True)
def del_intermediates():
subprocess.call(['rm alignment_consensus'], shell=True)
subprocess.call(['rm *cluster.orf.raxml*'], shell=True)
subprocess.call(['rm paml_out'], shell=True)
subprocess.call(['rm mothur.*'], shell=True)
subprocess.call(['rm erraafile.*'], shell=True)
subprocess.call(['rm errnucfile.*'], shell=True)
subprocess.call(['rm RAxML_bestTree.cluster.orf.raxml'], shell=True)
subprocess.call(['rm RAxML_log.cluster.orf.raxml'], shell=True)
subprocess.call(['rm RAxML_result.cluster.orf.raxml'], shell=True)
subprocess.call(['rm RAxML_info.cluster.orf.raxml'], shell=True)
subprocess.call(['rm RAxML_parsimonyTree.cluster.orf.raxml'], shell=True)
# create a dict to hold all the dicts for all species
dictionary_seq_dictionaries={}
# Create an empty list to hold all species identifiers to refer to later
identifiers_list=[]
f = open('pipeline_control')
testfile = f.readlines()
# Read in contents of control file, creating a dict for each species, and adding these dicts to another dict
for i in testfile:
identifier=i.split()[0]
identifier = identifier.upper()
identifiers_list.append(identifier)
path=i.split()[1]
filename = open(path)
dictionary_seq_dictionaries['{0}'.format(identifier)]=dictionary_seq(identifier, filename)
filename.close()
# write contents of dict of dicts to a multifasta file, filtering by sequence length
all_trans = open('/home/kemelianova/ncbi-blast-2.2.27+/db/alltranscriptomes2blast', 'w')
for i in identifiers_list:
for x in (dictionary_seq_dictionaries[i]):
if int(len((dictionary_seq_dictionaries[i])[x])) > 200:
all_trans.write('>' + x + '\n' + (dictionary_seq_dictionaries[i])[x] + '\n')
blast('/home/kemelianova/ncbi-blast-2.2.27+/db/alltranscriptomes2blast')
mcl('alltranscriptomes2mcl.abc', 3)
# Open the outfile to write results to
outfile = open('pipline_generic_outfile', 'w')
f = open('dump.alltranscriptomes2mcl.mci.I30')
dump = f.readlines()
for cluster in dump:
# define dicts and lists to store cluster specific values to, and open file to write sequences of cluster to a FASTA file
copyDict = {}
seq_count = []
seq_list = []
file = open('cluster_seqs', 'w')
# Count taxon specific copy numbers and store counts in a list
for x in identifiers_list:
count = cluster.count(x)
copyDict[x] = count
seq_count.append(count)
# Store all sequence names in a list to be referred to later
seq = cluster.split()
for i in seq:
seq_list.append(i)
# write cluster sequences to FASTA file
for i in identifiers_list:
for x in seq:
if x.startswith(i):
file.write('>' + x + '\n' + (dictionary_seq_dictionaries[i])[x] + '\n')
file.close()
# Limit further analyses to clusters with more than one sequence
if sum(seq_count) > 1:
file = open('cluster_seqs')
out = open('cluster_longest_orfs', 'w')
# for each cluster, get the longest ORF and write to a new file
L = {}
for line in file:
if line.startswith(">"):
C_seq = ''
C_split_line = line.split(' ')
C_name = C_split_line[0]
C_name = C_name.rstrip()
C_name = C_name.lstrip('>')
else:
C_seq = C_seq + str(line)
C_seq = C_seq.rstrip()
C_seq = C_seq.upper()
L[C_name] = C_seq
for i in L:
s = L[i]
rcs = rc(s)
stop = ('TAG','TAA','TGA')
# check whether the sequence has no stop codons anywhere
if any(i not in s for i in stop) or any(i not in rcs for i in stop):
out.write('>' + i + '\n' + s + '\n')
else:
FandR = []
F = codons(s)
R = codons(rc(s))
FandR.append(F)
FandR.append(R)
out.write('>' + i + '\n' + max(FandR, key = len) + '\n')
# translate, align and translationall align all sequences
dna2pep()
mafft()
pal2nal()
f = open('cluster.orfs.transalign')
test = f.read()
if test:
f.close()
# Make a ML tree using RAxML if cluster size exceeds 3
if sum(seq_count) > 3:
subprocess.call(["./standard-RAxML-master/raxmlHPC-PTHREADS-SSE3 -T 6 -m GTRCAT -p 12345 -s cluster.orfs.transalign -n cluster.orf.raxml"], shell=True)
# Write taxon copy numbers to oufile
for i in identifiers_list:
outfile.write(str(copyDict[i]) + '\t')
# Write % consensus bases to outfile
consensus_bases = consensus()
outfile.write(str(consensus_bases) + '\t')
# Write all pairwise distances to outfile
distDict = dists()
outfile.write(('| start dists | ') + '\t')
for i in distDict:
dist = str(distDict[i])
outfile.write(dist + '\t')
outfile.write(('| end dists | ') + '\t')
# Call codeml
subprocess.call(['codeml'], shell=True)
f = open('paml_out')
paml = f.read()
# Use try statement to prevent stalling due to no codeml result
try:
# Use split to isolate matrix output by codeml from other output and remove brackets and values within them (these contain kN and kS values)
knks_list = []
output = paml.split('comparison.)\n\n')
output = (output[1])
output = output.split('TREE')
output = output[0]
knks = re.sub(r'\([^)]*\)', '', output)
# Add all values from the matrix to a list, excluding sequence names and values returned with a -1.0000 value
for i in knks.split():
if not any(x in i for x in identifiers_list):
if i != '-1.0000':
knks_list.append(i)
# Reformat bracket placement for ease of splitting
output = output.replace(')-', ') -')
kn_list = []
ks_list = []
# Write kN and kS values to separate lists
for i in output.split():
if '(' in i:
kn = i.lstrip('(')
if kn != '-1.0000':
kn_list.append(kn)
elif ')' in i:
ks = i.rstrip(')')
if ks != '-1.0000':
ks_list.append(ks)
# Write kN/kS, kN and kS to outfile
outfile.write('|knks start|' + '\t')
for i in knks_list:
outfile.write(str(i) + '\t')
outfile.write('|knks end|' + '\t')
outfile.write('|kn start|' + '\t')
for i in kn_list:
outfile.write(str(i) + '\t')
outfile.write('|kn end|' + '\t')
outfile.write('|ks start|' + '\t')
for i in ks_list:
outfile.write(str(i) + '\t')
outfile.write('|ks end|' + '\t')
outfile.write('|sequence name start|' + '\t')
for i in seq_list:
outfile.write(i + '\t')
outfile.write('|sequence name end|' + '\t')
outfile.write('\n')
except IndexError:
print('no paml result')
outfile.write('\n')
# Delete all intermediate files
del_intermediates()
|
import pygame as pg
import loading as ld
import Server.dinosaur as dino
import Server.cactus as cac
import Server.bird as bd
import Server.background as bg
def screen():
pg.init()
delta_x, delta_y = (pg.display.Info().current_w, pg.display.Info().current_h)
display = pg.display.set_mode([pg.display.Info().current_w, pg.display.Info().current_h])
pg.display.set_caption("T-Rex Running")
space_game, edge_start, edge_end, middle = ld.dimensions_game(delta_x, delta_y)
# position_x, position_y = position_game(space_game)
image_dino, images_cactus, image_bird, image_floor = ld.load_image("/assets/image_general.png")
t_rex = dino.Dinosaur(edge_start+300, 350, image_dino)
cactus1 = cac.Cactus(edge_end, 370, images_cactus)
cactus2 = cac.Cactus(edge_start, 370, images_cactus)
bird = bd.Bird(edge_end, 320, image_bird)
start_floor = bg.Background(0, 420, image_floor)
final_floor = bg.Background(2400, 420, image_floor)
close = False
is_jump = False
is_down = False
obstacle_cactus = False
jump_count = 12
count_frame_bird = 0
count_frame_dino = 0
speed = 10
game_time = 0
clock = pg.time.Clock()
while close is not True:
clock.tick(60)
start_floor.speed_up(speed)
final_floor.speed_up(speed)
bird.speed_up(speed)
cactus1.speed_up(speed)
cactus2.speed_up(speed)
game_time += 1
keys = pg.key.get_pressed()
for event in pg.event.get():
if event.type == pg.QUIT or keys[pg.K_ESCAPE]:
close = True
if keys[pg.K_UP]:
is_jump = True
if keys[pg.K_DOWN]:
is_down = True
if is_jump:
if jump_count >= -12:
t_rex.jump(jump_count)
jump_count -= 1
else:
jump_count = 12
is_jump = False
elif is_down:
if count_frame_dino > 3:
t_rex.down()
count_frame_dino = 0
t_rex.coordinates(start_floor)
count_frame_dino += 1
is_down = False
else:
if count_frame_dino > 3:
t_rex.walk()
count_frame_dino = 0
count_frame_dino += 1
t_rex.coordinates(start_floor)
if count_frame_bird > 15:
bird.fly()
count_frame_bird = 0
count_frame_bird += 1
if cactus1.position_x < middle and obstacle_cactus is False:
cactus2.choose_image(edge_end, start_floor)
obstacle_cactus = True
if cactus2.position_x < middle and obstacle_cactus is True:
cactus1.choose_image(edge_end, start_floor)
obstacle_cactus = False
close = t_rex.collided([cactus1, cactus2])
cactus1.change_position()
cactus2.change_position()
bird.change_position()
start_floor.move()
final_floor.move()
pg.display.update()
ld.render(display, t_rex, cactus1, cactus2, bird, start_floor, final_floor, space_game)
pg.quit()
screen()
|
import datetime
from dataclasses import dataclass
from sqlalchemy import Column, Integer, String
from src import db
@dataclass
class Branch(db.Model):
"""
This is the table for branches and their codes. Each account belongs to a particular branch the
default branch is the head office (01) which will be created during the system setup process.
"""
__tablename__ = 'branch'
id: int = Column(Integer, primary_key=True)
code: str = Column(String(5))
description: str = Column(String(100))
create_date: str = Column(String(30), default=datetime.datetime.now())
|
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
import string, random
from django.core.mail import BadHeaderError, EmailMultiAlternatives
class EmailBackend(ModelBackend):
def authenticate(self, request, email=None, password=None, **kwargs):
UserModel = get_user_model()
try:
user = UserModel.objects.get(email=email)
except UserModel.DoesNotExist:
return None
else:
if user.check_password(password):
return user
return None
def send_emails(subject, html_content, to):
# to = ['shaheroumwali@gmail.com']
msg = EmailMultiAlternatives(subject, '', 'arslanmehmood051@gmail.com', to)
msg.attach_alternative(html_content, "text/html")
try:
res = msg.send()
except BadHeaderError:
return res
return res
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
|
import pika
def on_message(channel, method_frame, header_frame, body):
print("Message body", body)
channel.basic_ack(delivery_tag=method_frame.delivery_tag)
credentials = pika.PlainCredentials('guest', 'guest')
parameters = pika.ConnectionParameters('localhost', credentials=credentials)
print(parameters)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange="toulouse", exchange_type="direct", passive=False, durable=True, auto_delete=False)
channel.queue_declare(queue="notifications", auto_delete=True)
channel.queue_bind(queue="notifications", exchange="toulouse", routing_key="toulouse.state")
channel.basic_qos(prefetch_count=1)
channel.basic_consume(on_message, "notifications")
try:
channel.start_consuming()
except KeyboardInterrupt:
channel.stop_consuming()
connection.close() |
from openpyxl import load_workbook # Openpyxl is a Python library for reading and writing Excel 2010
import datetime
WS_LIST = {} #тут перечисляем все названия листов, к-ые хотим прочитать
def repair_time(my_time): # функция для исправления типа даты и времени
if isinstance(my_time=my_time, datetime.datetime): #isinstance() по сравнению с type() позволяет проверить данное на принадлежность хотя бы одному типу из кортежа, переданного в качестве второго аргумента
my_time = my_time.time()
if isinstance(my_time=my_time, datetime.time):
return my_time
else:
return ""
def read_ws_data(work_sheet):
excel_data = list() # явное создание list() считается более правильным хз почему, вместо []
for row in range(6,work_sheet.max_row): # проходимся с колонки 6 по 41
excel_row = dict()
if work_sheet.cell(row=row, column=1).value is None: # чтобы не читать пустые строки. надо именно is None писать, а не == None
break
excel_row["time_start"] = work_sheet.cell(row=row, column=1).value
excel_row["time_end"] = work_sheet.cell(row=row, column=2).value
excel_row["title"] = work_sheet.cell(row=row, column=3).value # и т.д.
excel_data.append(excel_row)
return excel_data
def read_excel(filename, ws_list):
work_book = load_workbook(filename)
result = dict()
for ws_name in ws_list:
work_sheet = work_book[ws_name] # название листа
result[ws_name] = read_ws_data(work_sheet)
return result
def show_excel_data(data):
print(data)
pass
if __name__ == "__main__":
excel_data = read_excel("имя файла")
show_excel_data(excel_data)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import chainer
import cv2 as cv
import glob
import numpy as np
import os
import xml.etree.ElementTree as ET
class VOC(chainer.dataset.DatasetMixin):
LABELS = ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
IMG_TARGET_SIZE = 600
IMG_MAX_SIZE = 1000
def __init__(
self, img_dir, anno_dir, list_dir, list_suffix, use_diff=False):
self.mean = np.array([[[103.939, 116.779, 123.68]]]) # BGR
self.img_dir = img_dir
self.anno_dir = anno_dir
self.use_diff = use_diff
self.use_list = []
for fn in glob.glob('{}/*_{}.txt'.format(list_dir, list_suffix)):
for line in open(fn):
self.use_list.append(line.strip().split()[0])
self.parse_anno()
def parse_anno(self):
self.objects = []
for fn in glob.glob('{}/*.xml'.format(self.anno_dir)):
tree = ET.parse(fn)
filename = tree.find('filename').text
img_id = os.path.splitext(filename)[0]
if img_id not in self.use_list:
continue
for obj in tree.findall('object'):
if not self.use_diff and int(obj.find('difficult').text) == 1:
continue
bb = obj.find('bndbox')
bbox = [int(bb.find('xmin').text), int(bb.find('ymin').text),
int(bb.find('xmax').text), int(bb.find('ymax').text)]
bbox = [float(b - 1) for b in bbox]
datum = {
'filename': filename,
'name': obj.find('name').text.lower().strip(),
'pose': obj.find('pose').text.lower().strip(),
'truncated': int(obj.find('truncated').text),
'difficult': int(obj.find('difficult').text),
'bndbox': bbox,
}
self.objects.append(datum)
def __len__(self):
return len(self.objects)
def get_example(self, i):
obj = self.objects[i]
bbox = obj['bndbox']
name = obj['name']
clsid = self.LABELS.index(name)
gt_boxes = np.asarray([bbox[0], bbox[1], bbox[2], bbox[3], clsid],
dtype=np.float32)
# Load image
img_fn = '{}/{}'.format(self.img_dir, obj['filename'])
img = cv.imread(img_fn).astype(np.float)
img -= self.mean
# Scaling
im_size_min = np.min(img.shape[:2])
im_size_max = np.max(img.shape[:2])
im_scale = float(self.IMG_TARGET_SIZE) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > self.IMG_MAX_SIZE:
im_scale = float(self.IMG_MAX_SIZE) / float(im_size_max)
img = cv.resize(img, None, None, fx=im_scale, fy=im_scale,
interpolation=cv.INTER_LINEAR)
h, w = img.shape[:2]
im_info = np.asarray([h, w, im_scale], dtype=np.float32)
img = img.transpose(2, 0, 1).astype(np.float32)
return img, im_info, gt_boxes
if __name__ == '__main__':
dataset = VOC('data/VOCdevkit/VOC2007/JPEGImages',
'data/VOCdevkit/VOC2007/Annotations',
'data/VOCdevkit/VOC2007/ImageSets/Main', 'train')
img, im_info, gt_boxes = dataset[0]
print(img.shape)
print(im_info)
print(gt_boxes)
print('len:', len(dataset))
dataset = VOC('data/VOCdevkit/VOC2007/JPEGImages',
'data/VOCdevkit/VOC2007/Annotations',
'data/VOCdevkit/VOC2007/ImageSets/Main', 'val')
img, im_info, gt_boxes = dataset[0]
print(img.shape)
print(im_info)
print(gt_boxes)
print('len:', len(dataset))
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# datetime:2020/12/24 9:40
# author:qiu
from appium import webdriver
import time
desired_caps = {
'platformName':'Android',
'deviceName':'949c3e52',
'platformVersion':'10.0.3',
'appPackage':'com.tencent.news',
'appActivity':'com.tencent.news.activity.SplashActivity',
'unicodeKeyboard ':True, #使用 unicode 编码方式发送字符串
'resetKeyboard':True #是将键盘隐藏起来
}
driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub',desired_caps)
time.sleep(5)
driver.implicitly_wait(20)
driver.find_element_by_id('com.tencent.news:id/pd').click()
time.sleep(2)
driver.switch_to.alert.accept()
time.sleep(2)
driver.find_element_by_id('com.tencent.news:id/cp4').click()
time.sleep(5)
contexts = driver.contexts
print(contexts)#切换到新闻主页获取所有的环境
time.sleep(3)
print('over')
# driver.find_element_by_id('com.tencent.news:id/c8l').send_keys(u'英国')
# # driver.switch_to.alert.accept()
# time.sleep(3)
# driver.find_element_by_xpath('').click()
# time.sleep(2)
# driver.find_element_by_class_name('android.widget.LinearLayout').click()
# time.sleep(3)
# driver.find_element_by_class_name('android.widget.ImageViewo').click() |
#!/usr/bin/env python3
import aiohttp
from aiohttp import web
import asyncio
from asyncio import Queue
from async_timeout import timeout
import json
import logging
import re
import signal
import os
from tinyrpc.protocols.jsonrpc import JSONRPCProtocol
from tinyrpc.exc import RPCError
from hbmqtt.client import MQTTClient, ClientException
from hbmqtt.mqtt.constants import QOS_2
HTTP_URL = os.environ.get('HTTP_URL', 'http://localhost:8069/jsonrpc')
HTTP_BIND_ADDRESS = os.environ.get('HTTP_BIND_ADDRESS', '0.0.0.0')
HTTP_BIND_PORT = os.environ.get('HTTP_BIND_PORT', '8888')
HTTP_REQUEST_TIMEOUT = float(os.environ.get('HTTP_REQUEST_TIMEOUT', 4))
MQTT_URL = os.environ.get('MQTT_URL', 'mqtt://localhost/')
CLIENT_UID = os.environ.get('CLIENT_UID', 'http_bridge')
MQTT_REPLY_TIMEOUT = float(os.environ.get('MQTT_REPLY_TIMEOUT', 5))
DEBUG = os.environ.get('DEBUG', False)
logger = logging.getLogger('mqtt_http_bridge')
rpc_potocol = JSONRPCProtocol()
class HttpMqttBridge(object):
mqtt_replies = {} # Keep track of sent requests
mqtt_reply_subscriptions = [] # Topics we subscribed for reply
def __init__(self, loop=None):
if not loop:
loop = asyncio.get_event_loop()
self.loop = loop
self.mqtt_url = MQTT_URL
self.mqtt_reply_timeout = MQTT_REPLY_TIMEOUT
self.http_url = HTTP_URL
self.http_request_timeout = HTTP_REQUEST_TIMEOUT
self.client_uid = CLIENT_UID
logger.info('Bridging {} with UID {}'.format(HTTP_URL, CLIENT_UID))
self.client = MQTTClient(client_id=CLIENT_UID)
logger.info('Initialized')
self.http_server = None
# Install signal handlers
for signame in ('SIGINT', 'SIGTERM'):
self.loop.add_signal_handler(getattr(signal, signame),
lambda: asyncio.ensure_future(self.stop()))
logger.info('Client {} initialized'.format(CLIENT_UID))
async def stop(self):
logger.info('Stopping...')
# Check subscriptions
await self.client.unsubscribe(self.mqtt_reply_subscriptions)
await self.client.disconnect()
tasks = [task for task in asyncio.Task.all_tasks() if task is not
asyncio.tasks.Task.current_task()]
list(map(lambda task: task.cancel(), tasks))
results = await asyncio.gather(*tasks, return_exceptions=True)
logger.debug('Finished cancelling tasks, result: {}'.format(results))
async def process_messages(self):
self.http_server = web.Server(self.process_http_requests)
await loop.create_server(self.http_server,
HTTP_BIND_ADDRESS, int(HTTP_BIND_PORT))
await self.client.connect(self.mqtt_url)
await self.client.subscribe([
('rpc/{}/+'.format(CLIENT_UID), QOS_2),
])
logger.debug('Starting process messages...')
while True:
try:
await self.process_mqtt_message(
await self.client.deliver_message())
except asyncio.CancelledError:
return
except Exception as e:
raise e
async def process_mqtt_message(self, message):
await self.client._connected_state.wait()
# Init a template for JSON-RPC error
error = { "jsonrpc": "2.0",
"id": None,
"error": {"message": None, "code": -1}
}
logger.debug('Message at topic {}: {}'.format(message.topic,
message.data))
if re.search('^rpc/(\w+)/(\w+)$', message.topic):
try:
_, _, from_uid = message.topic.split('/')
logger.debug('Request from {}'.format(from_uid))
js = json.loads(message.data)
logger.debug('RPC: {}'.format(js))
self.loop.create_task(self.process_mqtt_request(from_uid, js))
except json.decoder.JSONDecodeError:
logger.error('Bad JSON data for request: {}'.format(message.data))
except Exception as e:
logger.exception(e)
elif re.search('^rpc/(\w+)/(\w+)/reply$', message.topic):
# RPC reply
logger.debug('RPC reply at {}'.format(message.topic))
_, _, context, _ = message.topic.split('/')
data_str = message.data.decode()
waiting_replies = self.mqtt_replies.get(message.topic)
if not waiting_replies:
logger.warning(
'Got an unexpected RPC reply from {}: {}'.format(
message.topic, data_str))
return
# Valid reply received, let put it in the queue.
try:
data_js = json.loads(data_str)
logger.debug("It's a JSON reply.")
except json.decoder.JSONDecodeError:
logger.error('RPC reply bad json data: {}'.format(data_str))
# If there is only one waiting request then match with it.
if len(waiting_replies) == 1:
request_id = list(waiting_replies.keys())[0]
error['error']['message'] = "Bad JSON: {}".format(data_str)
await waiting_replies[key].put(error)
else:
# We got valid JSON from data
request_id = data_js.get('id')
if request_id not in waiting_replies.keys():
logger.error(
'Got a reply from {} without id: {}'.format(
message.topic, data_str))
# Try to match it with the only one request
if len(waiting_replies) == 1:
request_id = list(waiting_replies.keys())[0]
error['error']['message'] = "Bad reply id: {}".format(data_str)
await waiting_replies[key].put(error)
else:
# We cannot match, so forget it.
logger.error('Cannot match reply without id')
else:
# Finally matched the request by id
logger.debug(
'Waiting reply found for request {}'.format(
request_id))
await waiting_replies[request_id].put(data_js)
else:
logger.warning('Unknown MQTT message, ignoring')
async def process_http_requests(self, request):
# Init a template for JSON-RPC error
error = { "jsonrpc": "2.0",
"id": None,
"error": {"message": None, "code": -1}
}
#
try:
data = await request.text()
rpc_potocol.parse_request(data)
except RPCError as e:
response = e.error_respond().serialize()
logger.error('Bad RPC: {}'.format(response))
return web.Response(text=response)
except Exception as e:
logger.exception(e)
error['error']['message'] = str(e)
return web.json_response(error)
# We have valid RPC, let check it has MQTT destination
js_data = json.loads(data)
try:
dst = js_data['params'].pop('dst')
timeout = js_data['params'].get('timeout') \
and js_data['params'].pop('timeout')
if not timeout:
logger.debug('Setting default timeout={} on MQTT reply'.format(
self.mqtt_reply_timeout))
timeout = self.mqtt_reply_timeout
else:
logger.debug('Setting timeout={} from params'.format(timeout))
except KeyError:
logger.error('Bad RPC, no dst specified: {}'.format(data))
error['error']['message'] = "No dst specified"
return web.json_response(error)
# Check if it a notification call, publish return True then.
if not js_data.get('id'):
# Publish packet..
logger.debug('RPC Notification: {}'.format(data))
await self.client.publish('rpc/{}/{}'.format(dst, self.client_uid),
json.dumps(js_data).encode())
return web.Response(text='')
# Subscribe for reply topic, check there is not already a subscription.
reply_topic = 'rpc/{}/{}/reply'.format(dst, self.client_uid)
self.mqtt_replies.setdefault(reply_topic, {})[js_data['id']] = Queue()
if reply_topic not in self.mqtt_reply_subscriptions:
logger.debug('Adding subscrption to reply topic {}'.format(reply_topic))
self.mqtt_reply_subscriptions.append(reply_topic)
await self.client.subscribe([(reply_topic, QOS_2)])
logger.debug('Subscribed to reply topic {}'.format(reply_topic))
else:
logger.debug('Already subscribed for topic {}'.format(reply_topic))
# Publish MQTT message and wait for reply
await self.client.publish('rpc/{}/{}'.format(dst, self.client_uid),
json.dumps(js_data).encode())
logger.debug(
'Published request id {} to {}'.format(js_data['id'], dst))
try:
reply_data = await asyncio.wait_for(
self.mqtt_replies[reply_topic][js_data['id']].get(), timeout)
self.mqtt_replies[reply_topic][js_data['id']].task_done()
except asyncio.TimeoutError:
del self.mqtt_replies[reply_topic][js_data['id']]
error['error']['message'] = 'RPC Timeout'
return web.json_response(error)
else:
# We got a reply. Let send a response.
return web.json_response(reply_data)
async def process_mqtt_request(self, from_uid, data):
response = None
async with aiohttp.ClientSession() as session:
try:
async with timeout(HTTP_REQUEST_TIMEOUT, loop=self.loop) as t:
async with session.get(self.http_url, json=data) as resp:
response, status = await resp.text(), resp.status
logger.debug('HTTP response: [{}] {}'.format(
resp.status, response))
except asyncio.TimeoutError:
if data.get('id'):
logger.debug('Timeout, sending RPC reply')
return await self.send_mqtt_rpc_error_message(
from_uid, data['id'], 'HTTP request timeout')
else:
logger.warning('Notification timeout: {}'.format(data))
# Parse response as JSON and inject error.data if present
try:
js_response = json.loads(response)
if js_response.get('error') and js_response['error'].get('data'):
js_response['error']['message'] = '{}: {}'.format(
js_response['error']['message'],
js_response['error']['data'])
await self.client.publish(
'rpc/{}/{}/reply'.format(CLIENT_UID, from_uid),
json.dumps(js_response).encode())
else:
# No error
await self.client.publish(
'rpc/{}/{}/reply'.format(CLIENT_UID, from_uid),
response.encode())
except json.decoder.JSONDecodeError:
return await self.send_mqtt_rpc_error_message(from_uid, data['id'],
message='Cannot jsonify response')
async def send_mqtt_rpc_error_message(self, to_uid, request_id,
message, code=-1):
data = {
"jsonrpc": "2.0",
"id": request_id,
"error": {'message': message, 'code': code}
}
await self.client.publish('rpc/{}/{}/reply'.format(CLIENT_UID, to_uid),
json.dumps(data).encode())
if __name__ == '__main__':
formatter = '[%(asctime)s] %(levelname)s %(name)s %(message)s'
logging.basicConfig(level=logging.DEBUG if DEBUG else logging.INFO,
format=formatter)
logging.getLogger('hbmqtt').setLevel(level=logging.WARNING)
loop = asyncio.get_event_loop()
server = HttpMqttBridge()
loop.run_until_complete(server.process_messages())
|
'''
Created on 28 nov. 2019
@author: Javier Fernández
'''
print(5/0)
raise ZeroDivisionError('5/0')
try:
raise ZeroDivisionError(5/0)
except ZeroDivisionError:
print("Se lanzo la excepcion")
raise |
# -*- coding: utf-8 -*-
# 医疗器械广告
import pickle
import re
from selenium import webdriver
from gjypjd.utils import *
import json
import time
def main():
option = None
mysql_db = DataBase()
# 配置文件中开启是否无头,生产阶段关闭
if if_headless():
option = webdriver.ChromeOptions()
option.add_argument(argument='headless')
option.add_argument('--no-sandbox')
for i in range(1, 5292): # 遍历5293个一级目录网页
try:
browser = webdriver.Chrome(chrome_options=option)
url_1 = 'http://app1.sfda.gov.cn/datasearchcnda/face3/search.jsp?tableId=40&State=1&bcId=152904606190314452995556113153&State=1&curstart='+str(i)+'&State=1&tableName=TABLE40&State=1&viewtitleName=COLUMN428&State=1&viewsubTitleName=COLUMN427,COLUMN424&State=1&tableView=%25E5%258C%25BB%25E7%2596%2597%25E5%2599%25A8%25E6%25A2%25B0%25E5%25B9%25BF%25E5%2591%258A&State=1&cid=0&State=1&ytableId=0&State=1&searchType=search&State=1'
browser.get(url_1)
s = browser.page_source.replace('amp;', '')
m = re.findall(r'content.jsp\?tableId=40&tableName=TABLE40&tableView=医疗器械广告&Id=\d+', s, re.M)
browser.close()
for j in range(len(m)):
url_2 = 'http://app1.sfda.gov.cn/datasearchcnda/face3/' + m[j]
browser = webdriver.Chrome(chrome_options=option)
browser.get(url_2)
sql = "insert into t_ylqxgg(c_bh, dt_insertTime, c_url, b_content, c_json,c_page) VALUES (REPLACE(UUID(),\"-\",\"\"), sysdate(), %s,%s,%s,%s)"
mysql_db.exetcute_sql(sql, [url_2, browser.page_source, parse2json(browser.page_source),
str(i) + '_' + str(j + 1)])
# pickle.loads(s) 可用该方法将乱码汉字转换
browser.close()
except Exception as e:
print(e)
time.sleep(5)
def parse2json(html):
"""
医疗器械广告批准文号ylqxggpzwh
单位名称dwmc
地址dz
邮政编码yzbm
通用名称tymc
备注bz
商标名称sbmc
广告类别gglb
时长sc
广告有效期ggyxq
广告发布内容ggfbnr
注册证号zczh
商品名称spmc
:return:json
"""
# 初始化,避免取不到的情况下为空值
result_json = dict()
# 批准文号
reg_dict = dict()
reg_dict['ylqxggpzwh'] = r"医疗器械广告批准文号</td>\s*<td.*>(.*)</td></tr>"
reg_dict['dwmc'] = r"单位名称</td>\s*<td.*>(.*)</td></tr>"
reg_dict['dz'] = r"地址</td>\s*<td.*>(.*)</td></tr>"
reg_dict['yzbm'] = r"邮政编码</td>\s*<td.*>(.*\s*.*)</td></tr>"
reg_dict['tymp'] = r"通用名称</td>\s*<td.*>(.*\s*.*)</td></tr>"
reg_dict['bz'] = r"备注</td>\s*<td.*>(.*)</td></tr>"
reg_dict['sbmc'] = r"商标名称</td>\s*<td.*>(.*)</td></tr>"
reg_dict['gglb'] = r"广告类别</td>\s*<td.*>(.*)</td></tr>"
reg_dict['sc'] = r"时长</td>\s*<td.*>(.*)</td></tr>"
reg_dict['ggyxq'] = r"广告有效期</td>\s*<td.*>(.*\s*.*)</td></tr>"
reg_dict['ggfbnr'] = r"广告发布内容</td>\s*<td><a href=\"(.*)\" target.*>.*</a></td></tr>"
reg_dict['zczh'] = r"注册证号</td>\s*<td.*>(.*)</td></tr>"
reg_dict['spmc'] = r"商品名称</td>\s*<td.*>(.*)</td></tr>"
for i, v in reg_dict.items():
reg_search = re.search(v, html)
if reg_search is not None:
result_json[i] = reg_search.group(1)
else:
result_json[i] = ''
result_json['ggfbnr'] = 'http://app1.sfda.gov.cn' + result_json['ggfbnr']
return json.dumps(result_json, ensure_ascii=False)
if __name__ == '__main__':
main()
|
# Generated by Django 3.2.7 on 2021-11-03 20:34
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('usuarios', '0009_auto_20211103_1217'),
]
operations = [
migrations.AlterField(
model_name='user',
name='created',
field=models.DateTimeField(default=datetime.datetime(2021, 11, 3, 14, 34, 12, 813020)),
),
migrations.AlterField(
model_name='user',
name='updated',
field=models.DateTimeField(default=datetime.datetime(2021, 11, 3, 14, 34, 12, 813020)),
),
]
|
#!/usr/bin/env python3
"""
xnat/xnat_archive.py: Provide information to allow direct access to an XNAT data archive.
"""
# import of built-in modules
import logging
import logging.config
import os
# import of third-party modules
# import of local modules
import utils.file_utils as file_utils
# authorship information
__author__ = "Timothy B. Brown"
__copyright__ = "Copyright 2016-2017, Connectome Coordination Facility"
__maintainer__ = "Timothy B. Brown"
# create a module logger
module_logger = logging.getLogger(__name__)
module_logger.setLevel(logging.WARNING) # Note: This can be overridden by log file configuration.
module_logger.setLevel(logging.INFO) # Note: This can be overriddent by log file configuration.
class XNAT_Archive:
"""This class provides information about direct access to an XNAT data archive.
This access goes 'behind the scenes' and uses the actual underlying file system.
Because of this, a change in XNAT implementation could cause this code to no longer
be correct.
"""
@property
def DEFAULT_COMPUTE_PLATFORM(self):
"""The default value used for the compute platform.
If the COMPUTE environment is not set, this value is used.
"""
return 'CHPC'
def __init__(self):
"""Constructs an XNAT_Archive object for direct access to an XNAT data archive."""
module_logger.debug("xnat_archive.__init__")
@property
def archive_root(self):
"""Returns the path to the root of the archive."""
XNAT_PBS_JOBS_ARCHIVE_ROOT = os.getenv('XNAT_PBS_JOBS_ARCHIVE_ROOT')
module_logger.debug("XNAT_PBS_JOBS_ARCHIVE_ROOT = " + str(XNAT_PBS_JOBS_ARCHIVE_ROOT))
if not XNAT_PBS_JOBS_ARCHIVE_ROOT:
raise RuntimeError("Environment variable XNAT_PBS_JOBS_ARCHIVE_ROOT must be set")
return XNAT_PBS_JOBS_ARCHIVE_ROOT
@property
def build_space_root(self):
"""Returns the path to the temporary build/processing directory root."""
XNAT_PBS_JOBS_BUILD_DIR = os.getenv('XNAT_PBS_JOBS_BUILD_DIR')
module_logger.debug("XNAT_PBS_JOBS_BUILD_DIR = " + str(XNAT_PBS_JOBS_BUILD_DIR))
if not XNAT_PBS_JOBS_BUILD_DIR:
raise RuntimeError("Environment variable XNAT_PBS_JOBS_BUILD_DIR must be set")
return XNAT_PBS_JOBS_BUILD_DIR
def project_archive_root(self, project_name):
"""Returns the path to the specified project's root directory in the archive.
:param project_name: name of the project in the XNAT archive
:type project_name: str
"""
par = self.archive_root + os.sep + project_name + os.sep + 'arc001'
return par
def project_resources_root(self, project_name):
"""Returns the path to the specified project's root project-level resources directory in the archive.
:param project: name of the project in the XNAT archive
:type project_name: str
"""
return self.archive_root + '/' + project_name + '/resources'
def _simple_interactive_demo():
archive = XNAT_Archive()
project_name = 'HCP_Staging'
print('archive_root: ' + archive.archive_root)
print('project_archive_root(\'' + project_name + '\'): ' + archive.project_archive_root(project_name))
print('project_resources_root(\'' + project_name + '\'): ' + archive.project_resources_root(project_name))
print('build_space_root: ' + archive.build_space_root)
if __name__ == "__main__":
logging.config.fileConfig(
file_utils.get_logging_config_file_name(__file__, False),
disable_existing_loggers=False)
_simple_interactive_demo()
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import models
from odoo.api import Environment, SUPERUSER_ID
def _synchronize_cron(cr, registry):
env = Environment(cr, SUPERUSER_ID, {'active_test': False})
cron = env.ref('crm_iap_lead_enrich.ir_cron_lead_enrichment')
if cron:
config = env['ir.config_parameter'].get_param('crm.iap.lead.enrich.setting', 'manual')
cron.active = config != 'manual'
|
from django.db import models
from .request import Request
class RequestPhoto(models.Model):
request = models.ForeignKey(Request, related_name="photos", on_delete=models.CASCADE)
photo_url = models.CharField(max_length=500)
class Meta:
verbose_name = ("RequestPhoto")
verbose_name_plural = ("RequestPhotos")
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 22 16:03:31 2020
@author: qchat
"""
from core.api.database import system
from core.api.database.users import User
user = User(12)
#system.add_user('dummy')
#user2 = User(max(system.get_user_dict().keys()))
#system.add_user('hh')
print('user dict: ',system.get_user_dict())
from core.api import email
#email.notify_negative_balance('q.chateiller@gmail.com','Quentin',-8.77)
#
##user.set_name('Francesco')
#print('name: ',user.get_name())
#
##user.set_email('thjs@sls.fr')
#print('email: ',user.get_email())
#
#print('tag: ',user.get_tag())
##user.set_tag(user.get_tag())
#user.set_tag(user.get_tag()+1)
#
##user.set_auto_donation(0.04)
#print('autodonation: ',user.get_auto_donation())
#print('total donation: ',user.get_donations())
#print('curr year donation: ',user.get_donations(year=2020))
#
##user.add_conso()
#print('nb consos: ',user.get_consos_by_date(2020,3,22))
##user.recharge(4)
##user.recharge(-4)
#
##system.list_operations_id()
##system.is_operation_checked(375)
##system.set_operation_checked(375)
##system.set_operation_checked(375,state=False)
#
#print('consos value: ' ,user.get_consos_value())
#print('recharges value: ' ,user.get_recharges_value())
#print('balance: ' ,user.get_balance())
#
#print('global shares value:' , system.get_shares_value())
#print('global recharges value:' ,system.get_recharges_value())
#
##system.add_caps(30,10)
#system.add_caps(0,10)
#
#system.add_supplies(10)
#
#system.add_donation(2)
#print('global donations value:' , system.get_donations_value())
#print('users donations value:' , system.get_users_donations_value())
#print('donations to do:' , system.get_donation_todo())
|
"""
lecture 5
"""
#a = [1,2,3]
#b = [1,2,3]
#print(a == b)
#print(a is b)
#print(id(a))
#print(id(b))
#x = None
#print(id(None))
#print(id(x))
#print(x is None)
#print(x == None)
#y = []
#x = None
#print(type(y))
#print(y == None)
#print(y is None)
#print(x is None)
#print(True and False )
#print (True or False)
#print(not True)
#print(not None)
#print(not '0') #when paranthesis are used, it is a string
#print(not 0) #when there are no parenthesis used, it is an integer
#print(() and [])
#if 2>1 :
# if 1.5>1:
# print('1.5>1')
# print('2>1')
#if 2<=1:
# print('2<=1')
#else:
# print('2>1') |
from .utils import *
from .simulation import *
from .split_data import *
from .create_data import *
from .execute_evaluate import *
from tempural_analysis.majority_rule import *
from tempural_analysis.majority_per_problem import *
from tempural_analysis.majority_last_trials import *
|
# Generated by Django 3.0.8 on 2020-11-12 13:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0031_auto_20201112_1820'),
]
operations = [
migrations.RenameModel(
old_name='Questions',
new_name='QuestionBank',
),
]
|
from django.test import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import WebDriverException
import time
MAX_WAIT = 10
class NewVisitorTest(LiveServerTestCase):
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self):
self.browser.quit()
def wait_for_row_in_list_table(self, row_text):
start_time = time.time()
while True:
try:
table = self.browser.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
self.assertIn(row_text, [row.text for row in rows])
return
except (AssertionError, WebDriverException) as e:
if time.time() - start_time > MAX_WAIT:
raise e
time.sleep(0.5)
def test_can_start_a_list_for_one_user(self):
# Kemarius found out about a new to-do list app. He goes
# to the page
self.browser.get(self.live_server_url)
# The header mentions to-do in the page title
self.assertIn('To-Do', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('To-Do', header_text)
# He is asked to enter a to-do item right away
inputbox = self.browser.find_element_by_id('id_new_item')
self.assertEqual(
inputbox.get_attribute('placeholder'),
'Enter a to-do item'
)
# He types complete google classroom work
inputbox.send_keys("Complete google classwork")
# Upon hitting enter, the page updates and now lists
# 1: Finish google classroom assignment
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Complete google classwork')
# A text box exists allowing him to add another item
# He enters complete essay for Civics about the
# US constitution
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys("Finish US Constitution essay")
inputbox.send_keys(Keys.ENTER)
# The page updates and now both items are on the list
self.wait_for_row_in_list_table('1: Complete google classwork')
self.wait_for_row_in_list_table('2: Finish US Constitution essay')
def test_multiple_users_can_start_lists_at_different_urls(self):
#Kemarius starts a new to do list
self.browser.get(self.live_server_url)
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Complete google classwork')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Complete google classwork')
# The site generates a unique URL to remember the
# task Kemarius enters
kemarius_list_url = self.browser.current_url
self.assertRegex(kemarius_list_url, '/lists/.+')
# Kemarius visits the URL and the list he made is still there
# Now he goes back to playing video games
# Now a new users, Jamia, comes along to the site
## We use a new browser session to make sure that nor information
## of Jamia's is coming through from cookies etc.
self.browser.quit()
self.browser = webdriver.Firefox()
# Jamia vists the home page. There is no sign on Kemarius's list
self.browser.get(self.live_server_url)
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Complete google classwork', page_text)
self.assertNotIn('Finish US Constituion essay', page_text)
# Jamia starts a new list by entering a new item
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Buy flamin hot cheetos')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Buy flamin hot cheetos')
#Jamia gets her own unique URL
jamia_list_url = self.browser.current_url
self.assertRegex(jamia_list_url, '/lists/.+')
self.assertNotEqual(jamia_list_url, kemarius_list_url)
# Again, there is no trace of Kemarius's list
page_text = self.browser.find_element_by_tag_name('body').text
self.assetNotIn('Complete google classwork', page_text)
self.assertIn("Buy flamin hot cheetos", page_text)
# Satisfied, they both go back to sleep
|
from netCDF3 import Dataset
from scipy.interpolate import UnivariateSpline
import matplotlib.pyplot as plt
import numpy as np
filename='soundings.nc';
nc_file=Dataset(filename)
var_names=nc_file.variables.keys()
print "variable names: ",var_names
print "global attributes: ",nc_file.ncattrs()
print "col_names: ",nc_file.col_names
fig=plt.figure(1)
fig.clf()
ax1=fig.add_subplot(111)
for var_name,the_var in nc_file.variables.items():
ax1.plot(the_var[:,2],the_var[:,1])
#now interpolate the first variable onto a 100 m grid
the_var=nc_file.variables[var_names[0]]
interp_temp=UnivariateSpline(the_var[:,1],the_var[:,2])
z_interp=np.arange(300.,25000.,100.)
ax1.plot(interp_temp(z_interp),z_interp,'g-',linewidth=3)
fig.canvas.draw()
plt.show()
|
'''
Author: Raisa
Date: 28-12-19
Python Script for train data
'''
import keras
import sys
sys.path.append('../')
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from keras.optimizers import SGD
from keras.layers import BatchNormalization
from keras import backend as K
from matplotlib import pyplot
#import matplotlib.pyplot as plt
# project modules
import config
import my_model, process_image
from keras.callbacks import LearningRateScheduler
# plot diagnostic learning curves
def summarize_diagnostics(history):
# plot loss
pyplot.subplot(211)
pyplot.title('Cross Entropy Loss')
pyplot.plot(history.history['loss'], color='blue', label='train')
pyplot.plot(history.history['val_loss'], color='orange', label='test')
# plot accuracy
pyplot.subplot(212)
pyplot.title('Classification Accuracy')
pyplot.plot(history.history['accuracy'], color='blue', label='train')
pyplot.plot(history.history['val_accuracy'], color='orange', label='test')
# save plot to file
pyplot.savefig(config.project_root+'output/' + '_plot.png')
pyplot.close()
x_train, Y_train = process_image.load_train_data()
print("train data shape: ", x_train.shape)
print("train data label: ", Y_train.shape)
#x_train=18627, Y_train=18627 Label
model = my_model.get_model()
model.summary()
#splitting the whole data in 0.15 size
X_train, X_test, y_train, y_test = train_test_split(x_train, Y_train, random_state=42, test_size=0.20)
#print(X_train.shape, X_test.shape,y_train.shape,y_test.shape)
#Learning rate scheduler
def lr_scheduler(epoch):
if (epoch == 20):
K.set_value(model.optimizer.lr, config.lr_1)
elif (epoch == 70):
K.set_value(model.optimizer.lr, config.lr_2)
#elif (epoch == 60):
#K.set_value(model.optimizer.lr, config.lr_3)
#elif (epoch == 80):
#K.set_value(model.optimizer.lr, config.lr_4)
print("learning rate: ", K.get_value(model.optimizer.lr))
return K.get_value(model.optimizer.lr)
#compile
opt = SGD(lr=0.0001, momentum=0.9)
model.compile(optimizer= opt,
loss= keras.losses.categorical_crossentropy,
metrics = ['accuracy'])
#checkpoints
model_cp = my_model.save_model_checkpoint()
early_stopping = my_model.set_early_stopping()
datagen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1,
rotation_range=5,
featurewise_center=True,
featurewise_std_normalization=True)
it_train = datagen.flow(X_train, y_train, batch_size=config.batch_size)
#change_lr = LearningRateScheduler(lr_scheduler)
steps = int(X_train.shape[0] / config.batch_size)
history = model.fit_generator(it_train, steps_per_epoch=steps, epochs=200, validation_data=(X_test, y_test), verbose=2,
callbacks=[early_stopping,model_cp])
summarize_diagnostics(history)
#print ("%s: %.2f%%" % (model.metrics_names[1], history[1]*100))
#history= model.fit(x_train, y_train, batch_size=config.batch_size, epochs=70, verbose=2, callbacks=[early_stopping,model_cp]
# ,shuffle=True,validation_split=0.15)
print("Done")
|
#
# Copyright (C) 2022 Vaticle
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from abc import ABC
from datetime import datetime
import typedb_protocol.common.concept_pb2 as concept_proto
from typedb.api.concept.concept import ValueType
from typedb.api.concept.value.value import Value, LongValue, BooleanValue, DoubleValue, StringValue, DateTimeValue
from typedb.concept.concept import _Concept
from typedb.concept.proto import concept_proto_reader
class _Value(Value, _Concept, ABC):
def as_value(self) -> "Value":
return self
class _BooleanValue(BooleanValue, _Value):
def __init__(self, value: bool):
super(_BooleanValue, self).__init__()
self._value = value
@staticmethod
def of(value_proto: concept_proto.Value):
return _BooleanValue(value_proto.value.boolean)
def get_value(self):
return self._value
def get_value_type(self) -> "ValueType":
return ValueType.BOOLEAN
class _LongValue(LongValue, _Value):
def __init__(self, value: int):
super(_LongValue, self).__init__()
self._value = value
@staticmethod
def of(value_proto: concept_proto.Value):
return _LongValue(value_proto.value.long)
def get_value(self):
return self._value
def get_value_type(self) -> "ValueType":
return ValueType.LONG
class _DoubleValue(DoubleValue, _Value):
def __init__(self, value: float):
super(_DoubleValue, self).__init__()
self._value = value
@staticmethod
def of(value_proto: concept_proto.Value):
return _DoubleValue(value_proto.value.double)
def get_value(self):
return self._value
def get_value_type(self) -> "ValueType":
return ValueType.DOUBLE
class _StringValue(StringValue, _Value):
def __init__(self, value: str):
super(_StringValue, self).__init__()
self._value = value
@staticmethod
def of(value_proto: concept_proto.Value):
return _StringValue(value_proto.value.string)
def get_value(self):
return self._value
def get_value_type(self) -> "ValueType":
return ValueType.STRING
class _DateTimeValue(DateTimeValue, _Value):
def __init__(self, value: datetime):
super(_DateTimeValue, self).__init__()
self._value = value
@staticmethod
def of(value_proto: concept_proto.Value):
return _DateTimeValue(datetime.utcfromtimestamp(float(value_proto.value.date_time) / 1000.0))
def get_value(self):
return self._value
def get_value_type(self) -> "ValueType":
return ValueType.DATETIME
|
import inspect
from django.template import Library, Node, TemplateSyntaxError
from django.urls import reverse
from django.utils.encoding import force_str
from django.utils.translation import get_language
from parler.models import TranslatableModel, TranslationDoesNotExist
from parler.utils.context import smart_override, switch_language
register = Library()
class ObjectLanguageNode(Node):
def __init__(self, nodelist, object_var, language_var=None):
self.nodelist = nodelist # This name is special in the Node baseclass
self.object_var = object_var
self.language_var = language_var
def render(self, context):
# Read context data
object = self.object_var.resolve(context)
new_language = self.language_var.resolve(context) if self.language_var else get_language()
if not isinstance(object, TranslatableModel):
raise TemplateSyntaxError(f"Object '{object}' is not an instance of TranslableModel")
with switch_language(object, new_language):
# Render contents inside
output = self.nodelist.render(context)
return output
@register.tag
def objectlanguage(parser, token):
"""
Template tag to switch an object language
Example::
{% objectlanguage object "en" %}
{{ object.title }}
{% endobjectlanguage %}
A TranslatedObject is not affected by the ``{% language .. %}`` tag
as it maintains it's own state. This tag temporary switches the object state.
Note that using this tag is not thread-safe if the object is shared between threads.
It temporary changes the current language of the object.
"""
bits = token.split_contents()
if len(bits) == 2:
object_var = parser.compile_filter(bits[1])
language_var = None
elif len(bits) == 3:
object_var = parser.compile_filter(bits[1])
language_var = parser.compile_filter(bits[2])
else:
raise TemplateSyntaxError(
"'%s' takes one argument (object) and has one optional argument (language)" % bits[0]
)
nodelist = parser.parse(("endobjectlanguage",))
parser.delete_first_token()
return ObjectLanguageNode(nodelist, object_var, language_var)
@register.simple_tag(takes_context=True)
def get_translated_url(context, lang_code, object=None):
"""
Get the proper URL for this page in a different language.
Note that this algorithm performs a "best effect" approach to give a proper URL.
To make sure the proper view URL is returned, add the :class:`~parler.views.ViewUrlMixin` to your view.
Example, to build a language menu::
<ul>
{% for lang_code, title in LANGUAGES %}
{% get_language_info for lang_code as lang %}
{% get_translated_url lang_code as tr_url %}
{% if tr_url %}<li{% if lang_code == LANGUAGE_CODE %} class="is-selected"{% endif %}><a href="{{ tr_url }}" hreflang="{{ lang_code }}">{{ lang.name_local|capfirst }}</a></li>{% endif %}
{% endfor %}
</ul>
Or to inform search engines about the translated pages::
{% for lang_code, title in LANGUAGES %}
{% get_translated_url lang_code as tr_url %}
{% if tr_url %}<link rel="alternate" hreflang="{{ lang_code }}" href="{{ tr_url }}" />{% endif %}
{% endfor %}
Note that using this tag is not thread-safe if the object is shared between threads.
It temporary changes the current language of the view object.
The query string of the current page is preserved in the translated URL.
When the ``object`` variable is explicitly provided however, the query string will not be added.
In such situation, *django-parler* assumes that the object may point to a completely different page,
hence to query string is added.
"""
view = context.get("view", None)
request = context["request"]
if object is not None:
# Cannot reliable determine whether the current page is being translated,
# or the template code provides a custom object to translate.
# Hence, not passing the querystring of the current page
qs = ""
else:
# Try a few common object variables, the SingleObjectMixin object,
# The Django CMS "current_page" variable, or the "page" from django-fluent-pages and Mezzanine.
# This makes this tag work with most CMSes out of the box.
object = (
context.get("object", None)
or context.get("current_page", None)
or context.get("page", None)
)
# Assuming current page, preserve query string filters.
qs = request.META.get("QUERY_STRING", "")
try:
if view is not None:
# Allow a view to specify what the URL should be.
# This handles situations where the slug might be translated,
# and gives you complete control over the results of this template tag.
get_view_url = getattr(view, "get_view_url", None)
if get_view_url:
with smart_override(lang_code):
return _url_qs(view.get_view_url(), qs)
# Now, the "best effort" part starts.
# See if it's a DetailView that exposes the object.
if object is None:
object = getattr(view, "object", None)
if object is not None and hasattr(object, "get_absolute_url"):
# There is an object, get the URL in the different language.
# NOTE: this *assumes* that there is a detail view, not some edit view.
# In such case, a language menu would redirect a user from the edit page
# to a detail page; which is still way better a 404 or homepage.
if isinstance(object, TranslatableModel):
# Need to handle object URL translations.
# Just using smart_override() should be enough, as a translated object
# should use `switch_language(self)` internally before returning an URL.
# However, it doesn't hurt to help a bit here.
with switch_language(object, lang_code):
return _url_qs(object.get_absolute_url(), qs)
else:
# Always switch the language before resolving, so i18n_patterns() are supported.
with smart_override(lang_code):
return _url_qs(object.get_absolute_url(), qs)
except TranslationDoesNotExist:
# Typically projects have a fallback language, so even unknown languages will return something.
# This either means fallbacks are disabled, or the fallback language is not found!
return ""
# Just reverse the current URL again in a new language, and see where we end up.
# This doesn't handle translated slugs, but will resolve to the proper view name.
resolver_match = request.resolver_match
if resolver_match is None:
# Can't resolve the page itself, the page is apparently a 404.
# This can also happen for the homepage in an i18n_patterns situation.
return ""
with smart_override(lang_code):
clean_kwargs = _cleanup_urlpattern_kwargs(resolver_match.kwargs)
return _url_qs(
reverse(
resolver_match.view_name,
args=resolver_match.args,
kwargs=clean_kwargs,
current_app=resolver_match.app_name,
),
qs,
)
def _url_qs(url, qs):
if qs and "?" not in url:
return f"{force_str(url)}?{force_str(qs)}"
else:
return force_str(url)
@register.filter
def get_translated_field(object, field):
"""
Fetch a translated field in a thread-safe way, using the current language.
Example::
{% language 'en' %}{{ object|get_translated_field:'name' }}{% endlanguage %}
"""
return object.safe_translation_getter(field, language_code=get_language())
def _cleanup_urlpattern_kwargs(kwargs):
# For old function-based views, the url kwargs can pass extra arguments to the view.
# Although these arguments don't have to be passed back to reverse(),
# it's not a problem because the reverse() function just ignores them as there is no match.
# However, for class values, an exception occurs because reverse() wants to force_text() them.
# Hence, remove the kwargs to avoid internal server errors on some exotic views.
return {k: v for k, v in kwargs.items() if not inspect.isclass(v)}
|
from django.db import models
from .mixins import TimeStampedMixin
from django.contrib.auth.models import User
class Category(TimeStampedMixin):
description = models.CharField(max_length=255)
icon = models.CharField(max_length=255)
def __str__(self):
return self.description
class Site(TimeStampedMixin):
user = models.ManyToManyField(User, through='Rating', related_name="ratings")
user_like = models.ManyToManyField(User, through='Like')
description = models.CharField(max_length=255)
detail = models.TextField(blank=True, null=True)
latitude = models.CharField(max_length=50)
longitude = models.CharField(max_length=50)
tags = models.TextField()
category = models.ForeignKey(Category)
creator_by = models.ForeignKey(User, related_name="creator")
def get_category_icon(self):
return Category.objects.get(pk=self.category.id).icon
class Picture(TimeStampedMixin):
picture = models.ImageField(upload_to='sites/')
site = models.ForeignKey(Site)
class Rating(TimeStampedMixin):
user = models.ForeignKey(User)
site = models.ForeignKey(Site)
rating = models.DecimalField(max_digits=2, decimal_places=1)
class Like(TimeStampedMixin):
user = models.ForeignKey(User)
site = models.ForeignKey(Site)
|
class Employee:
def __init__(self, fname, lname):
self.fname = fname
self.lname = lname
# self.email = f"{fname}.{lname}@sandy.com"
def explain(self):
return f"This employee is {self.fname} {self.lname}"
@property
def email(self):
if self.fname == None or self.lname == None:
return "Emain is not set"
return f"{self.fname}.{self.lname}.sandy@programmer.com"
@email.setter
def email(self, string):
print("Setting Now")
names = string.split("@")[0]
self.fname = names.split(".")[0]
self.lname = names.split(".")[1]
@email.deleter
def email(self):
self.fname = None
self.lname = None
obj1 = Employee("Sandy", "Pol")
print(type(obj1))
print(obj1.email)
print(type("this is string"))
print(id("this is string"))
print(id("this is string"))
okk = "this is string"
print(dir(okk)) #display the defined method for perform of this |
'''
Write a script that takes a list and turns it into a tuple.
'''
list_ = [1234, 'steve', '¥¥¥¥', 54321]
print(type(list_))
print(list_[0])
print(len(list_))
list_2 = tuple(list_)
print(type(list_2))
print(list_2)
|
import pandas as pd
import pickle
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
filename = 'preprocessing/vectorizer.pkl'
# Load data
df = pd.read_pickle("data/data.pkl")
descriptions = df["description"]
# Get features
try:
# vectorizer = pickle.load(open(filename, 'rb'))
X = vectorizer.transform(descriptions)
except:
vectorizer = TfidfVectorizer(max_df=0.8, ngram_range=(1,2))
X = vectorizer.fit_transform(descriptions)
# Perform SVD
svd = TruncatedSVD(n_components=500, n_iter=5, random_state=42)
svd.fit(X)
# save models
pickle.dump(vectorizer, open('mdoels/vectorizer.pkl', 'wb'))
pickle.dump(svd, open('models/svd.pkl', 'wb'))
print('done!')
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
class PlotSpectrum:
def __init__(self,
energies,
intensities,
plt_energy_range,
gauss_width=None,
mixed_indcs=None,
norm_contrib_fl=None):
"""
Returns xy data of sticks and of gaussian convolution spectra, which you can then plot with
:param energies: List of energies of states to be plotted. (optional) Includes states involved in mixing.
:param intensities: List of intensities of states to be plotted. (optional) Includes states involved in mixing.
:param plt_energy_range: the window that will be displayed when you plot. the gaussian convolution cares
about this too
:param gauss_width: If convoluting stick spectra with gaussians, this is sigma.
:param mixed_indcs: if there is state mixing, include indices that correspond to states
:param norm_contrib_fl: path to file to obtain contributions from that, norm intensity in there.
"""
self.energies = energies
self.intensities = intensities
self.e_range = np.sort(plt_energy_range)
self.gauss_width = gauss_width
self.mix_idx = mixed_indcs
self.norm_contrib_fl = norm_contrib_fl
self._initialize()
def _initialize(self):
if self.gauss_width is None:
self.gauss_time = False
else:
self.gauss_time = True
if self.norm_contrib_fl is not None:
if '/' in self.norm_contrib_fl:
self.contrib_pth = self.norm_contrib_fl.rpartition('/')[0]
else:
self.contrib_pth = '.'
self.contrib_xy, self.other_stuff = self._extract_contribs()
# Get most intense feature in energy range if not given norm_to_ind
self._norm_intensity()
def _extract_contribs(self):
with open(f'{self.norm_contrib_fl}','r') as fll:
linez = fll.readlines()
line_num = 0
contrib_xy = []
other_stuff = []
for line in linez:
if line_num == 0:
pass
elif line_num % 2 == 0 and line_num != 0:
other_stuff.append(line)
else:
line = line.split(', ')
contrib_xy.append([float(line[0]),float(line[1])])
line_num+=1
return np.array(contrib_xy), other_stuff
def _truncate_data(self):
"""Truncate data to energy range in order to normalize intensity in range to 1"""
trunc_idx = np.argsort(self.energies)
trunc_intensities = self.intensities[trunc_idx]
norm_by = np.amax(trunc_intensities)
return norm_by
def _rewrite_contrib(self):
contribs = open(f"{self.contrib_pth}/contribs_normed.txt", "w")
contribs.write('E I\n')
for num, f_i in enumerate(self.contrib_xy):
contribs.write(f'{f_i[0]:.3f}, {f_i[1]:.5f}\n ')
contribs.write(self.other_stuff[num])
contribs.close()
def _norm_intensity(self):
norm_by = self._truncate_data()
self.intensities = self.intensities / norm_by
if self.norm_contrib_fl is not None:
self.contrib_xy[:, 1] = self.contrib_xy[:, 1] / norm_by
self._rewrite_contrib()
def _convolute_spectrum(self):
disc_e = np.linspace(self.e_range[0], self.e_range[1], 5000)
g = np.zeros(len(disc_e))
for i in range(len(self.intensities)):
g += self.intensities[i] * np.exp(
-np.square(disc_e - self.energies[i]) / (2.0 * np.square(self.gauss_width)))
g /= np.amax(g)
return np.array([disc_e, g]).T
def get_xy_data(self):
stick_xy = np.column_stack((self.energies, self.intensities))
if self.gauss_time:
gauss_xy = self._convolute_spectrum()
return stick_xy, gauss_xy
else:
return stick_xy
def plot_xy_data(self,
stick_xy,
savefig_name='spectrum',
gauss_xy=None,
stick_color='b',
mixed_stick_color='r',
gauss_color='k',
xlabel='Energy (cm$^{-1}$)',
ylabel='Rel. Intensity',
pdf=False
):
params = {'text.usetex': False,
'mathtext.fontset': 'dejavusans',
'font.size': 14}
plt.rcParams.update(params)
# Plot sticks
plt.stem(stick_xy[:, 0], stick_xy[:, 1], f'{stick_color}', markerfmt=" ", basefmt=" ")
if self.mix_idx is not None:
plt.stem(stick_xy[self.mix_idx, 0], stick_xy[self.mix_idx, 1], f'{mixed_stick_color}', markerfmt=" ",
basefmt=" ")
if gauss_xy is not None:
plt.plot(gauss_xy[:, 0], gauss_xy[:, 1], f'{gauss_color}', linewidth=2)
# Labels
plt.xlabel(f'{xlabel}')
plt.ylabel(f'{ylabel}')
# Save figure
if '.' in savefig_name:
splt = savefig_name.split('.')
savefig_name = splt[0]
plt.xlim([self.e_range[0],self.e_range[1]])
if pdf:
plt.savefig(f'{savefig_name}.pdf', bbox_inches='tight')
else:
plt.savefig(f'{savefig_name}.png', dpi=300, bbox_inches='tight')
|
def distance(seqA, seqB):
if len(seqA) != len(seqB):
raise ValueError("Sequences must be the same length")
if seqA != seqB:
zip_seq = list(zip(list(seqA),list(seqB)))
return [ele[0] == ele[1] for ele in zip_seq].count(False)
else:
return 0
|
import pygame
import sys
from Character.sprite import *
from Xuly.background import *
if not pygame.font: print('Warning, fonts disabled')
if not pygame.mixer: print('Warning, sound disabled')
pygame.init()
size = width, height = 640, 480
screen = pygame.display.set_mode(size)
clock = pygame.time.Clock()
img = pygame.image.load('image/hulk2.png')
FRAME_WIDTH = 180
FRAME_HEIGHT = 260
setBackgroundImage('image/background_Play.png')
hulk=Sprite(0,0)
nextFrame = 0
frame=0
time_clock = 0
frame_last_x=0
frame_last_y=0
def draw(hulk):
global frame
global nextFrame
global time_clock
global frame_last_y
time_clock+=1
if time_clock>nextFrame:
frame=(frame+1)%4
nextFrame+=5
if keyPressed("right"):
scrollBackground(-10,0)
screen.blit(img, (hulk.getX(), hulk.getY()), (frame*45, 130, hulk.getW(), hulk.getH()))
frame_last_y=130
elif keyPressed("up"):
scrollBackground(0,10)
screen.blit(img, (hulk.getX(), hulk.getY()), (frame*45, 195, hulk.getW(), hulk.getH()))
frame_last_y=195
elif keyPressed("down"):
scrollBackground(0,-10)
screen.blit(img, (hulk.getX(), hulk.getY()), (frame*45, 0, hulk.getW(), hulk.getH()))
frame_last_y=0
elif keyPressed("left"):
scrollBackground(10,0)
screen.blit(img, (hulk.getX(), hulk.getY()), (frame*45, 65, hulk.getW(), hulk.getH()))
frame_last_y=65
else:
scrollBackground(0,0)
screen.blit(img, (hulk.getX(), hulk.getY()), (frame_last_x, frame_last_y, hulk.getW(), hulk.getH()))
while True:
clock.tick(20)
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit(0)
handle(event,hulk)
hulk.move()
#screen.fill((95, 183, 229)) # sky color
if pygame.font:
font = pygame.font.Font(None, 36)
#text = font.render("Hello World !", 1, (255, 0, 0))
#textpos = text.get_rect(centerx=width/2)
#screen.blit(text, textpos)
time_clock+=1
draw(hulk)
pygame.display.flip()
|
import datetime
import sqlite3
class db_worker:
def __init__(self, path='DB/myDB.db'):
super().__init__()
self.conn = None
self.cur = None
self.connect(path)
def __del__(self):
self.cur.close()
self.conn.close()
def connect(self, path='DB/myDB.db'):
try:
self.conn = sqlite3.connect(path)
self.cur = self.conn.cursor()
print('Connected')
except sqlite3.Error as error:
print("Error while working with SQLite in <connect>", error)
def check_login(self, login, password):
try:
self.cur.execute("SELECT fname, lname FROM employees WHERE login=? AND password=?", (login, password))
self.conn.commit()
ans = self.cur.fetchone()
print('Data received')
# print(ans)
return ans
except sqlite3.Error as error:
print("Error while working with SQLite in <check_login>", error)
def search(self, request):
sp = request.split()
if len(sp) == 1:
try:
self.cur.execute("SELECT fname, lname, bd FROM children WHERE fname=? OR lname=?", (sp[0], sp[0]))
self.conn.commit()
ans = self.cur.fetchall()
print('BACK: ', ans)
except sqlite3.Error as error:
print("Error while working with SQLite in <search>", error)
return ans
elif len(sp) == 2:
try:
self.cur.execute(
"SELECT fname, lname, bd FROM children WHERE (fname=? AND lname=?) OR (fname=? AND lname=?)",
(sp[0], sp[1], sp[1], sp[0]))
self.conn.commit()
ans = self.cur.fetchall()
print('BACK: ', ans)
except sqlite3.Error as error:
print("Error while working with SQLite in <search>", error)
return ans
return []
def child_card(self, request):
try:
self.cur.execute(
"SELECT childid, photo,fname, lname, bd FROM children WHERE (fname=? AND lname=? AND bd=?)", request)
self.conn.commit()
ans = self.cur.fetchone()
except sqlite3.Error as error:
print("Error while working with SQLite in <child_card>", error)
ans = list(ans)
ans[4] = datetime.date.fromisoformat(ans[4])
try:
self.cur.execute("SELECT parentid, fname,mname, lname, number, role FROM parents WHERE childid=?",
(str(ans[0])))
self.conn.commit()
parents = self.cur.fetchall()
except sqlite3.Error as error:
print("Error while working with SQLite in <child_card>", error)
ans.append(parents)
print('BACK: ', ans)
return tuple(ans)
def update(self, request):
if request[0] == '':
try:
self.cur.execute("INSERT INTO children(photo, fname, lname,bd) VALUES(?,?,?,?)", request[1:5])
self.conn.commit()
new_id = self.cur.lastrowid
self.cur.executemany(
"INSERT INTO parents(childid, fname, mname, lname, number, role) VALUES(?,?,?,?,?,?)",
[(new_id,) + tuple(request[5][i][1:]) for i in range(len(request[5]))])
self.conn.commit()
print('Created new child card')
except sqlite3.Error as error:
print("Error while working with SQLite in <update>", error)
else:
try:
self.cur.execute("UPDATE children SET photo=?, fname=?, lname=?, bd=? where childid=?",
tuple(request[1:5]) + (request[0],))
self.conn.commit()
for parent in request[5]:
if parent[0] == '':
self.cur.execute(
"INSERT INTO parents(childid, fname, mname, lname, number, role) VALUES(?,?,?,?,?,?)",
(request[0],) + parent[1:])
else:
data = (request[0],) + tuple(parent[1:]) + (parent[0],)
print('BACK try:', data)
self.cur.execute(
"UPDATE parents SET childid=?, fname=?, mname=?, lname=?, number=?, role=? where parentid=?",
tuple(data))
self.conn.commit()
print('Updated child card')
except sqlite3.Error as error:
print("Error while working with SQLite in <update>", error)
def delete(self, request):
try:
self.cur.execute("DELETE FROM parents WHERE childid=?", (str(request),))
self.conn.commit()
self.cur.execute("DELETE FROM children WHERE childid=?", (str(request),))
self.conn.commit()
print('Deleted: ', request)
except sqlite3.Error as error:
print("Error while working with SQLite in <child_card>", error)
|
from django.contrib import admin
from django.urls import path, include
from . import views
app_name = 'lessons'
urlpatterns = [
path('', views.index, name = 'index'),
path('subjects/<int:subject_id>', views.subject, name = 'subject'),
path('itype/<int:subject_id>/<int:itype_id>', views.itype, name = 'itype'),
path('add_item_ajax', views.add_item_ajax, name="add_item_ajax"),
path('add_itype_ajax', views.add_itype_ajax, name="add_itype_ajax"),
path('delete_itype_ajax', views.delete_itype_ajax, name="delete_itype_ajax"),
path('addblock_items_ajax', views.addblock_items_ajax, name="addblock_items_ajax"),
path('delete_item_ajax', views.delete_item_ajax, name="delete_item_ajax"),
path('load_item_ajax', views.load_item_ajax, name="load_item_ajax"),
path('item/<int:item_id>', views.item, name="item"),
]
|
# -*- coding: utf-8 -*-
import QFramework
import ROOT
def parseSystematicsList(version, config):
vars_path = ROOT.TString()
if not config.getTagString(version, vars_path):
return list()
vars_full_path = QFramework.TQPathManager.findFileFromEnvVar(vars_path, "CAFANALYSISSHARE")
vars = list()
with open(vars_full_path) as vars_file:
for line in vars_file.readlines():
line = line.strip()
if not line or line.startswith("#"):
continue
content = line.split(",")
if len(content) != 2:
QFramework.BREAK("Could not parse line '{:s} in file {:s}".format(line, vars_full_path))
sys_name, sys_type = content
sys_name = sys_name.strip()
sys_type = sys_type.strip()
if sys_type == "onesided":
vars.append(sys_name + "__1up")
elif sys_type == "twosided":
vars.append(sys_name + "__1up")
vars.append(sys_name + "__1down")
else:
QFramework.BREAK("Unknown systematic type {:s} for {:s}".format(sys_type, sys_name))
return vars
def sampleFolderHasTagValueSomewhereBool(samples, tagname, value, default):
for sample in samples.getListOfSamples():
if sample.getTagBoolDefault(tagname, default) == value:
return True
return False
def prepareSystematics(config, samples):
"""Prepare the systematic handling"""
CLI = config.getFolder("CLI+")
# flag indicating to run a robust analysis
robust = CLI.getTagBoolDefault("robust", False)
# flag indicating to run a dummy analysis
dummy = CLI.getTagBoolDefault("dummy", False)
if not robust and not dummy:
subfolders = samples.getListOfSampleFolders("?")
doNominal = config.getTagBoolDefault("sysDoNominal", True)
channels = config.getTagVStandardString("channels")
mcasvchannels = set([c for c in channels])
for c in channels:
for sf in subfolders:
if sf.getTagBoolDefault("isData", False):
# we're in a data SampleFolder
# just set some nominal tags for this channel subfolder
f = sf.getSampleFolder(c)
if not f:
continue
f.setTagString(".mcasv.channel", f.getTagStringDefault("channel", ""))
continue
# we're in a MC SampleFolder
f = sf.getSampleFolder(c)
if not f:
QFramework.WARN(
"unable to retrieve sample folder '{:s}' from '{:s}'. ".format(c, sf.GetName())
+ "Is this expected from your path restriction?"
)
sf.printContents()
# if this is not a valid folder, don't do anything with it,
# you'll get a null pointer exception otherwise!
continue
p4_vars = parseSystematicsList("sysP4List", config)
for p4var in p4_vars:
# run AFII systematics only on AFII samples
if "_AFII" in p4var and sampleFolderHasTagValueSomewhereBool(f, "~isAFII", False, False):
continue
if "_AF2" in p4var and sampleFolderHasTagValueSomewhereBool(f, "~isAFII", False, False):
continue
# run MC16 systematics only on fullsim samples
if "_MC16" in p4var and sampleFolderHasTagValueSomewhereBool(f, "~isAFII", True, False):
continue
# for each p4 varematic, copy the channel folder
newname = c + "_" + p4var
newf = f.copy(newname)
if not newf:
QFramework.BREAK("Unable to copy folder {:s} to new name {:s}".format(f.GetName(), newname))
sf.addFolder(newf)
# set the appropriate tags
newf.setTagString(".mcasv.channel", newname)
newf.setTagString("p4Variation", p4var)
if "JET_JERPD" in p4var:
newf.setTagInteger("JERWeight", -1)
mcasvchannels.add(newname)
# if no nominal analysis was requested, we can remove the nominal channels
if not doNominal:
f.detachFromBase()
else:
f.setTagString(".mcasv.channel", f.GetName())
# Add some nominal top level tags, even if systematics aren't being added
samples.setTagString("p4Variation", "Nominal")
# possibly print how the folder looks like now
if config.getTagBoolDefault("showChannels", False):
QFramework.INFO("After taking care of channel and systematics setup, your sample folder looks like this:")
samples.printContents("r2dt")
# save the whole collection of channels (including now systematics)
# for access later when creating the MCASV
runtime = config.getFolder("runtime+")
for i, channel in enumerate(mcasvchannels):
if "ROOT" in str(type(channel)):
QFramework.WARN(
"Please make sure that every mcasvchannel is only booked once. "
+ "The python set doesn't help when adding c++ ROOT instances ({:s}).".format(type(channel))
)
runtime.setTagString("mcasvchannels." + str(i), channel)
return
def build_weight_sys_observable_names(nominal_name, selection, config):
weight_variations = [nominal_name]
for weight_var in parseSystematicsList("sysWeightList", config):
if selection(weight_var):
weight_variations.append(nominal_name + ":" + weight_var)
return weight_variations
|
# Generated by Django 2.2.5 on 2019-09-02 14:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name='PhotoSpots',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo_path', models.CharField(max_length=512)),
],
),
migrations.CreateModel(
name='Region',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name='Route',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('starts', models.CharField(max_length=64)),
('ends', models.CharField(max_length=64)),
('description', models.TextField()),
('surface_condition', models.FloatField()),
('surface_condition_votes', models.FloatField(null=True)),
('scenic_rating', models.FloatField()),
('scenic_rating_votes', models.FloatField(null=True)),
('funny_to_drive', models.FloatField()),
('funny_to_drive_votes', models.FloatField(null=True)),
('overal_rating', models.FloatField()),
('overal_rating_votes', models.FloatField(null=True)),
('embed_view', models.CharField(max_length=512)),
('country', models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='routemap.Country')),
('region', models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='routemap.Region')),
],
),
]
|
#
# Copyright (c) 2018 Eric Faurot <eric@faurot.net>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import asyncio
import inspect
import logging
import signal
import time
import types
def _logger(logger):
return logger if logger is not None else logging.getLogger(__name__)
_running = None
def stop():
_running.cancel()
def start(func, logger = None):
global _running
assert _running is None
logger = _logger(logger)
logger.debug("starting")
def _signal(signame):
def _():
logger.debug("got signal %s", signame)
if _running:
_running.cancel()
return _
loop = asyncio.get_event_loop()
loop.add_signal_handler(signal.SIGINT, _signal('SIGINT'))
loop.add_signal_handler(signal.SIGTERM, _signal('SIGTERM'))
try:
_running = asyncio.ensure_future(func())
loop.run_until_complete(_running)
except asyncio.CancelledError:
logger.debug("cancelled")
else:
logger.debug("stopped normally")
finally:
logger.debug("closing event loop")
loop.close()
logger.debug("done")
def collect_future(future, logger = None):
logger = _logger(logger)
if future.cancelled():
logger.warning("FUTURE CANCELLED")
elif future.exception():
try:
raise future.exception()
except:
logger.exception("FUTURE EXCEPTION")
else:
result = future.result()
if result is not None:
logger.warning("FUTURE RESULT: %r", result)
class Schedulable:
timestamp = None
_period = None
_suspended = False
_cancelled = False
def __init__(self, thread):
self.thread = thread
def set_period(self, period = None):
self._period = period
def schedule(self, delay = 0, period = None):
if period is not None:
self._period = period
self.schedule_at(time.time() + delay)
def schedule_at(self, timestamp):
if timestamp is None:
timestamp = time.time()
self.timestamp = timestamp
self.thread._scheduled.add(self)
self.thread._wakeup()
def cancel(self):
if self._cancelled:
return
self._cancelled = True
self.unschedule()
self.thread._uninstall(self)
def unschedule(self):
self.thread._scheduled.discard(self)
self.thread._wakeup()
def is_scheduled(self):
return self in self.thread._scheduled
def is_pending(self):
return self in self.thread._pending
def suspend(self):
if self._suspended:
return
self._suspended = True
self.unschedule()
def resume(self):
if not self._suspended:
return
del self._suspended
if not self._cancelled:
self.schedule_at(self.timestamp)
def _reschedule(self):
# reschedule periodic events if not scheduled already.
if self._period is not None and not self.is_scheduled():
self.schedule(self._period)
if self._suspended:
self.unschedule()
class DataMixin:
_data = None
def update(self, obj):
if obj:
for key, value in obj.items():
self[key] = value
def __contains__(self, key):
if self._data is None:
return False
return key in self._data
def __getitem__(self, key):
if self._data is None:
raise KeyError(key)
return self._data[key]
def __setitem__(self, key, value):
if self._data is None:
self._data = {}
self._data[key] = value
def __delitem__(self, key):
if self._data is None:
raise KeyError(key)
return self._data[key]
class EventMixin:
thread = None
def is_signal(self):
return False
def trigger(self):
self.timestamp = time.time()
self.thread._scheduled.discard(self)
self.thread._pending.add(self)
self.thread._wakeup()
class Event(Schedulable, EventMixin, DataMixin):
def __init__(self, thread, params = None):
Schedulable.__init__(self, thread)
if params:
self.update(params)
class Signal(EventMixin, DataMixin):
def __init__(self, thread, params = None):
self.thread = thread
if params:
self.update(params)
def is_signal(self):
return True
class Tasklet(Schedulable, DataMixin):
_running = False
_handler = None
def __init__(self, thread, params = None):
Schedulable.__init__(self, thread)
if params:
self.update(params)
def set_handler(self, handler):
self._handler = handler
def is_running(self):
return self._running
async def run(self):
if self._suspended or self._cancelled:
return
self._running = True
try:
value = self._handler(self)
if isinstance(value, types.CoroutineType):
value = await value
elif isinstance(value, types.GeneratorType):
value = await value
except asyncio.CancelledError:
self.thread.logger.warning("cancelled: %r", self)
except:
self.thread.logger.exception("exception: %r", self)
del self._running
# the task has cancelled itself.
if self._cancelled:
return
self._reschedule()
class Threadlet:
_future = None
_coro = None
_stopping = False
_ready = None
def __init__(self, logger = None):
self.logger = _logger(logger)
self._schedulables = {}
self._schedulables_rev = {}
self._pending = set()
self._scheduled = set()
def __contains__(self, key):
return key in self._schedulables
def __getitem__(self, key):
return self._schedulables[key]
def is_running(self):
return self._coro is not None
def is_stopping(self):
return self._stopping
def start(self, func = None, when_done = None, delay = 0):
assert not self.is_running()
async def default_func(thread):
await thread.idle()
def default_done(future):
collect_future(future, self.logger)
async def run():
if delay:
await asyncio.sleep(delay)
await (func or default_func)(self)
def done(future):
self._pending.clear()
self._scheduled.clear()
try:
(when_done or default_done)(future)
except:
self.logger.exception("done: %r", self)
del self._coro
if func is not None and not inspect.iscoroutinefunction(func):
raise TypeError("not a coroutine function")
self._coro = asyncio.ensure_future(run())
self._coro.add_done_callback(done)
def stop(self):
if not self._stopping:
self._stopping = True
self._wakeup()
def join(self, loop = None):
if loop is None:
loop = asyncio.get_event_loop()
loop.run_until_complete(self._coro)
def __await__(self):
yield from self._coro
async def idle(self):
async for _ in self:
pass
async def __aiter__(self):
while not self._stopping:
if not self._ready:
# wait for the next batch of events
self._ready = await self._wait_for_events()
continue
item = self._ready.pop(0)
if isinstance(item, Tasklet):
await item.run()
elif isinstance(item, Signal):
yield item
elif isinstance(item, Event):
yield item
item._reschedule()
async def _wait_for_events(self):
while not self._stopping:
# get the set of scheduled events that are ready
now = time.time()
events = { evt for evt in self._scheduled if evt.timestamp <= now }
self._scheduled.difference_update(events)
# if there are pending events or signals, add them to the set
if self._pending:
events.update(self._pending)
self._pending.clear()
if events:
return sorted(events, key = lambda x: x.timestamp)
await self._sleep()
def _task(self, func, name, params = None):
task = Tasklet(self, params = params)
task.set_handler(func)
if name is not None:
self._register_schedulable(name, task)
return task
def _event(self, name, params = None):
event = Event(self, params = params)
if name is not None:
self._register_schedulable(name, event)
return event
def _register_schedulable(self, name, schedulable):
if name in self._schedulables:
raise KeyError(name)
self._schedulables[name] = schedulable
self._schedulables_rev[schedulable] = name
def _unregister_schedulable(self, schedulable):
name = self._schedulables_rev.pop(schedulable, None)
if name is not None:
self._schedulables.pop(name)
def event(self, name = None, **kwargs):
if name is not None:
kwargs["name"] = name
return self._event(name, params = kwargs)
def signal(self, name = None, **kwargs):
if name is not None:
kwargs["name"] = name
sig = Signal(self, params = kwargs)
sig.trigger()
def tasklet(self, name = None, suspend = False, delay = 0, period = None, **kwargs):
def _(func):
sname = name
if sname is None:
sname = func.__name__
task = self._task(func, sname, params = kwargs)
task.set_period(period)
task.schedule(delay)
if suspend:
task.suspend()
return func
return _
def set_tasklet(self, func, **kwargs):
self.tasklet(**kwargs)(func)
def schedule(self, func, delay = 0, name = None):
"""
Register an delayed call
"""
def _(task):
return func()
task = self._task(_, name)
task.schedule(delay)
return task
async def _sleep(self):
self._future = asyncio.Future()
try:
if self._scheduled:
timestamp = min(entry.timestamp for entry in self._scheduled)
delay = max(0.0001, timestamp - time.time())
await asyncio.wait_for(self._future, delay)
else:
await self._future
except asyncio.TimeoutError:
pass
except asyncio.CancelledError:
pass
finally:
if self._future:
del self._future
def _wakeup(self):
if not self._future:
return
future = self._future
del self._future
if future.done():
if future.cancelled():
self.logger.warning("%r._wakeup(): future cancelled", self)
elif future.exception():
self.logger.warning("%r._wakeup(): future exception: %s", self, future.exception())
else:
self.logger.warning("%r._wakeup(): future result: %r", self, future.result())
else:
future.set_result(None)
def _uninstall(self, schedulable):
assert schedulable.thread is self
del schedulable.thread
self._unregister_schedulable(schedulable)
class ThreadMixin:
__thread = None
@property
def thread(self):
if self.__thread is None:
self.__thread = Threadlet()
return self.__thread
def thread_start(self):
self.thread.start(self.thread_run, when_done = self.__thread_exit)
def __thread_exit(self, future):
collect_future(future)
self.thread_exit(self.thread)
async def thread_run(self, thread):
await thread.idle()
def thread_exit(self, thread):
pass
|
# Generated by Django 3.1 on 2020-11-10 05:23
from django.db import migrations, models
import ecom.models
class Migration(migrations.Migration):
dependencies = [
('ecom', '0024_auto_20201110_1111'),
]
operations = [
migrations.AddField(
model_name='item',
name='extra_image',
field=models.ManyToManyField(to='ecom.ImageItem'),
),
migrations.AddField(
model_name='item',
name='main_image',
field=models.ImageField(default='default.jpg', upload_to=ecom.models.user_img_path),
),
]
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='pyrosenv',
version='0.0.4',
author='Omri Rozenzaft',
author_email='omrirz@gmail.com',
url='https://github.com/omrirz/pyrosenv.git',
description='Set an environment for easy work with ROS in python without setting things up',
long_description=long_description,
long_description_content_type='text/markdown',
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: MIT License",
"Operating System :: Unix"
],
python_requires='>=3.6',
install_requires=[
'pyros_setup',
'rospkg',
'defusedxml',
'pycrypto',
'gnupg'
],
)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os, cookielib, urllib, urllib2, time
#-----------------------------------------
serv_id = '1'
siteUrl = 'zabava-htlive.cdn.ngenix.net'
httpSiteUrl = 'http://' + siteUrl
#sid_file = os.path.join(os.getcwd(), siteUrl+'.sid')
#cj = cookielib.FileCookieJar(sid_file)
#hr = urllib2.HTTPCookieProcessor(cj)
#opener = urllib2.build_opener(hr)
#urllib2.install_opener(opener)
def ru(x):return unicode(x,'utf8', 'ignore')
def xt(x):return xbmc.translatePath(x)
def unmark(nm):
for i in range (0,20):
nm=nm.replace(" #"+str(i),"")
return nm
def lower(t):
RUS={"А":"а", "Б":"б", "В":"в", "Г":"г", "Д":"д", "Е":"е", "Ё":"ё", "Ж":"ж", "З":"з", "И":"и", "Й":"й", "К":"к", "Л":"л", "М":"м", "Н":"н", "О":"о", "П":"п", "Р":"р", "С":"с", "Т":"т", "У":"у", "Ф":"ф", "Х":"х", "Ц":"ц", "Ч":"ч", "Ш":"ш", "Щ":"щ", "Ъ":"ъ", "Ы":"ы", "Ь":"ь", "Э":"э", "Ю":"ю", "Я":"я"}
for i in range (65,90):
t=t.replace(chr(i),chr(i+32))
for i in RUS.keys():
t=t.replace(i,RUS[i])
return t
def getURL(url, Referer = httpSiteUrl):
urllib2.install_opener(urllib2.build_opener())
req = urllib2.Request(url)
#req.add_header('User-Agent', 'Opera/10.60 (X11; openSUSE 11.3/Linux i686; U; ru) Presto/2.6.30 Version/10.60')
req.add_header('User-Agent', 'SmartSDK')
req.add_header('Accept', 'text/html, application/xml, application/xhtml+xml, */*')
req.add_header('Accept-Language', 'ru,en;q=0.9')
#req.add_header('Referer', Referer)
response = urllib2.urlopen(req)
link=response.read()
response.close()
return link
def mfindal(http, ss, es):
L=[]
while http.find(es)>0:
s=http.find(ss)
e=http.find(es)
i=http[s:e]
L.append(i)
http=http[e+2:]
return L
def save_channels(ns, L):
fp=xbmc.translatePath(os.path.join(addon.getAddonInfo('path'), 'Channels'+ns+'.py'))
fl = open(fp, "w")
fl.write('# -*- coding: utf-8 -*-\n')
fl.write('Channels=[\n')
for i in L:
fl.write(repr(i)+',\n')
fl.write(']\n')
fl.close()
class PZL:
def __init__(self):
pass
def Streams(self, url):
url=url[7:]
print url
hp=getURL(url+'?version=2&hd')
#print hp
L=hp.splitlines()
link=''
LL=[]
for i in L:
if '#EXT' not in i and '.m3u8' in i: LL.append(i)
#LL.reverse()
return LL
def Canals(self):
sdp_id = '98771354'#КиноViP OTT
sdp_id = '81204872'#стартовый
sdp_id = '101327188'#максимальный
url='https://itv.rt.ru/api/v1/channels.json?q%5Binclude_telecasts%5D=false&q%5Bsort%5D=sort_order&q%5Bsdp_id%5D='+sdp_id
hp=getURL(url).replace('\\"','')
null=''
true=True
false=False
try:total=int(eval(hp)['meta']['num_pages'])
except: print "errrrr"
LL=[]
for n in range(total+1):
print n
url='https://itv.rt.ru/api/v1/channels.json?q%5Binclude_telecasts%5D=false&q%5Bsort%5D=sort_order&q%5Bsdp_id%5D='+sdp_id+'&page='+str(n)#&q%5Bkeyword%5D=&q%5Bfrom_time%5D=2019-06-09
hp=getURL(url).replace('\\"','')
jsn = eval(hp)
L=jsn['list']
for i in L:
try:
if i["mode"] == 'uncryptedPvr':
title = i["name"]
url='zabava:'+i["asset_url"]
#print url
try:img='https://itv.rt.ru/fe-ct/images/r100x100/'+i["poster"]
except: img=''
LL.append({'url':url, 'img':img, 'title':title, 'group':""})
except:
print i
#if LL!=[]: save_channels(serv_id, LL)
#else: showMessage('yandex.ru', 'Не удалось загрузить каналы', times = 3000)
return LL
|
a = "hello"
b = 100
try:
c =a+b
print(c)
print("try block get executed!!!!")
except:
d = a+str(b)
print(d)
print("except block get executed!!!!")
else:
print("else block get executed!!!!")
finally:
print("finally block get executed!!!!") |
from apscheduler.schedulers.background import BackgroundScheduler
from flask import Flask, request
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import os
import os.path as path
import json
import csv
from scraper import scrape
from geocoder import get_lat_long
from optimalCenter import optimal_center_formula
firebase_credentials_path = os.getcwd()
firebase_credentials_path += "/mental-health-redistribution-firebase-adminsdk-j3xlw-617de04f19.json"
cred = credentials.Certificate(firebase_credentials_path)
firebase_admin.initialize_app(cred)
cred = credentials.ApplicationDefault()
db = firestore.client()
county_dictionary = {}
with open('county.csv', mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
county_dictionary[row["Location"]] = row
county_coords = {}
with open('county_coords.csv', mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
identifier = row["COUNAME"] + " County, " + row["STNAME"]
county_coords[identifier] = {}
county_coords[identifier]["lat"] = row["LATITUDE"]
county_coords[identifier]["lon"] = row["LONGITUDE"]
def store_scraped_in_google(address, name, lat, lon):
doc_ref = db.collection(u'potentialLocations').document(name)
doc_ref.set({
u'Address': address,
u'lat': lat,
u'lon': lon
})
def scraper():
loopnetListings = scrape()
j = 0
name_set = {}
for index, row in loopnetListings.iterrows():
address_map = {}
address = row[0]
address_map['address'] = address
components = address.split(",")
try:
address_map['street'] = components[0]
address_map['city'] = components[1]
address_map['state'] = components[2]
except:
print("Exception: invalid format of address")
continue
name = row[1]
if name_set.get(name) == None:
name_set[name] = 1
else:
name = name + " " + str(name_set.get(name))
lat, lon = get_lat_long(address)
try:
store_scraped_in_google(address_map, name, lat, lon)
except:
print("Exception: Could not store in Google")
scheduler = BackgroundScheduler(daemon=True)
scheduler.add_job(scraper,'interval',minutes=1440)
scheduler.start()
app = Flask(__name__)
@app.route("/potential_mental_health_centers")
def potential_mental_health_centers():
collection = db.collection(u'potentialLocations').where(u'lat', u'!=', 0).stream()
response = []
for doc in collection:
response.append(doc.to_dict())
return json.dumps(response)
@app.route("/current_mental_health_centers")
def current_mental_health_centers():
collection = db.collection(u'currentLocations').where(u'lat', u'!=', 0).stream()
response = []
for doc in collection:
response.append(doc.to_dict())
return json.dumps(response)
@app.route("/county_info")
def county_info():
return json.dumps(county_dictionary[request.args.get('county')])
@app.route("/optimal_centers")
def optimal_centers():
county_list = request.args.getlist('counties')
response = {}
potential_locations = db.collection(u'potentialLocations').where(u'lat', u'!=', 0).stream()
for doc in potential_locations:
potential_lat = float(doc.to_dict()["lat"])
potential_lon = float(doc.to_dict()["lon"])
score = 0
for county in county_list:
county_lat = float(county_coords[county]["lat"])
county_lon = float(county_coords[county]["lon"])
county_classification = float(county_dictionary[county]["Mental Health Need Classification"])
score += optimal_center_formula(county_lat, county_lon, potential_lat, potential_lon, county_classification)
score = score/len(county_list)
response[str(doc.id)] = {}
response[str(doc.id)]["details"] = doc.to_dict()
response[str(doc.id)]["score"] = score
response = {key: value for key, value in sorted(response.items(), key = lambda item: item[1]['score'], reverse=True)}
return json.dumps(response)
if __name__ == "__main__":
app.run() |
from SingletonMeta import SingletonMeta
class IO(metaclass=SingletonMeta):
openFiles = []
def readFileIntoLines(self, file_name):
f = open(file_name, 'r')
self.openFiles.append(f)
return f.readlines()
def writeLineToFile(self, file_name, lines):
f = open(file_name, 'w')
f.writelines(lines)
def closeOpenFiles(self):
[f.close() for f in self.openFiles] |
import pandas as pd
import matplotlib.pyplot as plt
w13 = pd.read_csv("2013Weather.csv", header = 22)
w14 = pd.read_csv("2014Weather.csv", header = 22)
w15 = pd.read_csv("2015Weather.csv", header = 22)
w16 = pd.read_csv("2016Weather.csv", header = 22)
w17 = pd.read_csv("2017Weather.csv", header = 22)
w = w13.append(w14)
w = w.append(w15)
w = w.append(w16)
w = w.append(w17)
w = w[['Year', 'Month', 'Day', 'Mean Temp (°C)']]
weather = w.groupby(['Year','Month']).mean()
weather = weather.reset_index()
weather = weather.groupby('Month').mean()
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pywt
'''
#Importing stock data
import yfinance as yf
from datetime import date,datetime,timedelta
ticker = '^GSPC'
first_day = datetime(2000, 1, 3)
last_day = datetime(2019, 7, 1)
data = yf.Ticker(ticker).history(interval = '1d', start=first_day, end=last_day)
data.reset_index(inplace=True)
'''
'''
#Importing our crypto data
ticker = 'QTUMUSD' #Try QTUMUSD, XBTEUR, ETCUSD, ZECXBT, GNOXBT, XBTEUR, LTCEUR, XBTUSD, EOSXBT, EOSETH, GNOUSD
data = pd.read_csv('/Users/Sanjit/Google Drive/CollectiWise/Data/high_low.csv') #change this
data = data[data['asset'] == ticker]
data.reset_index(inplace=True, drop=True)
'''
data = pd.read_csv('/Users/Sanjit/Repos/CollectiWise/formatted_features.csv')
column = 'e_XBTUSD avg_price'
data = data[column]
from waveletDenoising import denoise, SNR, RMSE, optDenoise, standardise, gridSearch_v2, optDenoise_v2 #Store this file in the same folder as 'waveletDenoising.py'
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from skimage.restoration import denoise_wavelet
#x = np.array(data.Close)
x = np.array(data)
original_mean = np.mean(x)
original_std = np.std(x)
#In the paper they used zero-mean normalization, which means the series is just shifted vertically downwards by its mean.
x = x - np.mean(x) #equivalently, standardise(x, 0, np.std(x))
#x = standardise(x, 0, 1) #N(0,1) standardisation
#See https://www.youtube.com/watch?v=HSG-gVALa84
#y = denoise_wavelet(x, wavelet='coif3', mode='hard', wavelet_levels=3, method='BayesShrink', rescale_sigma=True)
#method: 'BayesShrink' or 'VisuShrink'
#Most of the time, the denoised series is basically identical to the original. Problem is worse when we standardise to N(0, 1)
#VisuShrink doesn't capture price peaks, and these obviously can't be noise.
y = optDenoise_v2(x)
#x = x + original_mean
#y = y + original_mean
#x = standardise(x, original_mean, original_std)
#y = standardise(x, original_mean, original_std)
print("SNR: ", SNR(x, y))
print("RMSE: ", RMSE(x, y))
plt.plot(data.index, x, color='Green')
plt.plot(data.index, y, color='Red')
#plt.title(ticker)
plt.title(column)
plt.show()
'''
We see strange behaviour when the prices are very large (XBTEUR, XBTUSD, in 1000s) and very small (GNOXBT, EOSXBT, in 0.001s)
When prices are large, the denoised signal is almost identical to the raw signal
When prices are small, the denoised signal is a constant zero signal, i.e. nothing like the raw signal
It seems that in the second case, everything is considered noise since all the movements are so small, and in the first case,
nothing is considered noise since all the movements are so large.
There must be some way to 'normalise' the data, so that the absolute value of prices moves is irrelevant, and only the relative
value of price moves matters.
I've now implented this in the rescale function: it rescales the data to have any mean and std you specify. The issue with
rescaling and then descaling is that RMSE increases by a lot (for GSPC, where new_mean = sqrt(old_mean) and similarly for std).
Despite this, the plot looks alright.
Why do we descale? At some point we need to, either after feeding the data through the model or before.
Rescaling, to the squares of the orignial mean and standard deviation, works really nicely with QTUMUSD.
When the numbers are too small (<1), there seems to be some kind of numerical overflow: the denoised signal is way off. So, the
usual mean = 0 std = 1 transform is not really an option.
Many cryptos were worth extremely small amounts when they started trading. In these cases, the denoised signal at the start of the
period is way off. ZECXBT offers basically no information.
It seems that it's not easy to write one function which can properly denoise every series we give it in just one click.
There needs to be an element of inspection. Maybe we can try a grid search for each series, but I don't see anything better.
I have now implemented a grid search! Don't see how we can do much better. It works for the most part, but for certain assets,
the denoised series is still not right.
''' |
import os
import cv2
import numpy as np
caminho_lfw = "treinamento" #caminho da base de treinamento LFW
nomes = os.listdir(caminho_lfw) #nomes das pessoas
nomes_auxiliar = []
def aplicaLBP (img):
cinza = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cascata = cv2.CascadeClassifier('lbpcascade_frontalface.xml')
rostos = cascata.detectMultiScale(cinza, scaleFactor=1.2, minNeighbors=5)
if(len(rostos)==0):
return None, None
(x, y, w, h) = rostos[0]
return cinza[y:y+w, x:x+h], rostos[0]
def retornaLista(subpasta):
# retorna lista com dados de arquivo .txt disponibilizado no caminho do parâmetro
# deve esar em uma subpasta dentro da pasta raiz
# monta caminho
filename = os.path.abspath(os.curdir) + subpasta
# Le arquivo e transforma em lista de listas
with open(filename) as file:
lista = [line.split() for line in file.read().splitlines()]
return lista
def prepararDados(caminho):
diretorio = os.listdir(caminho) #nomes das pessoas
rostos = []
ids = []
aux = 0
for i in diretorio: #percorre cada pasta
pasta_pessoa = caminho_lfw + "\\" + i
subdiretorio = os.listdir(pasta_pessoa)
for j in subdiretorio: #percorre cada arquivo
arquivo = pasta_pessoa + "\\" + j
imagem = cv2.imread(arquivo)
rosto, retangulo = aplicaLBP(imagem)
if rosto is not None:
rostos.append(rosto)
nomes_auxiliar.append(i)
ids.append(aux)
aux=aux+1
return rostos, ids
def draw_rectangle(img, rect):
(x, y, w, h) = rect
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
def draw_text(img, text, x, y):
cv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)
def predict(test_img, nome):
img = test_img.copy()
face, rect = aplicaLBP(img)
label, confidence = reconhecedor.predict(face)
label_text = nomes_auxiliar[label]
draw_rectangle(img, rect)
draw_text(img, label_text, rect[0], rect[1] - 5)
return img
def rodaTeste(dirTeste,nomes):
indice = 0
for t in dirTeste:
dirImg = os.listdir("teste\\" + t)
for img in dirImg:
imagem_teste = cv2.imread("teste\\" + t + "\\" + str(img))
preditor = predict(imagem_teste, t)
cv2.imshow(nomes[indice], preditor)
cv2.waitKey(0)
cv2.destroyAllWindows()
indice = indice + 1
# prepara dados
rostos, ids = prepararDados(caminho_lfw)
#carrega arquivos texto da base LFW
lfw_names = retornaLista("/txt_lfw/lfw-names.txt")
pairsDevTrain = retornaLista("/txt_lfw/pairsDevTrain.txt")
pairsDevTest = retornaLista("/txt_lfw/pairsDevTest.txt")
pairs = retornaLista("/txt_lfw/pairs.txt")
# cria reconhecedor LBP
reconhecedor = cv2.face.LBPHFaceRecognizer_create()
# treina reconhecedor com as imagens da pasta treinamento
reconhecedor.train(rostos, np.array(ids))
# testes
indice = 0
testes = os.listdir("teste") #caminho do conjunto de testes
for t in testes:
imagem_teste = cv2.imread("teste\\" + str(t))
preditor = predict(imagem_teste,t)
cv2.imshow(nomes[indice], preditor)
cv2.waitKey(0)
cv2.destroyAllWindows()
indice = indice+1
rodaTeste(testes, nomes)
|
# -*- coding: utf-8 -*-
import unittest
import sys
class BaseTestCase(unittest.TestCase):
def tap(self, out):
sys.stderr.write("--- tap output start ---\n")
for line in out.splitlines():
sys.stderr.write(line + '\n')
sys.stderr.write("--- tap output end ---\n")
class TestCase(BaseTestCase):
pass
# vim: ai sts=4 et sw=4
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 08 19:06:53 2016
@author: caiyi
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 08 19:06:53 2016
@author: caiyi
"""
"""
counting sheep
"""
def write_res(file_name, res):
with open(file_name,'w') as f:
res_str = ''
for i in range(len(res[:-1])):
res_str += "Case #{}: ".format(i+1)+ str(res[i])+'\n'
res_str += "Case #{}: ".format(i+2) + str(res[-1])
f.write(res_str)
def func(S):
"""
S: a string contains '+' and '-'
return the last number before sleep, if never stop, return INSOMNIA
dont try to change S
"""
L = len(S)
if S == '+' * L:
return 0
if S == '-' * L:
return 1
if S[-1] =="+":
right = L - 1
while S[right] == '+':
right -= 1
return func(S[:right+1])
else:
left = 0
if S[left] == '-':
while S[left] == '-' and left < L:
left += 1
return 1 + func(''.join(['+' if l=='-' else '-' for l in S[left:]])[::-1]) # a shorter one
else:
while S[left] == '+' and left < L:
left += 1
return 1 + func('-'*left + S[left:]) # the saome length
#l = ['-','-+','+-','+++','--+-','---+','+++-','']
#for S in test_case:
# print S
# print func(S)
#
with open('B-large.in') as f:
str1 = f.read()
l = [S for S in str1.strip().split('\n')[1:]]
res = []
for S in l:
#num = int(ch)
print "the S", S
tmp = func(S)
print 'the result ', tmp
#print tmp
res.append(tmp)
write_res('res_B_large.txt', res)
|
def calculate_rectangle_area(height, width):
## Todo: update `None` to contain the formula for `rectangle_area`
height = float(input("Enter the height"))
width = float(input("Enter the width"))
rectangle_area = height * width
return rectangle_area
print ("The rectangle area is:", rectangle_area)
def calculate_area_of_square(side):
## Todo: update `None` to contain the formula for `squre_area`
side = float(input("Enter the side length"))
square_area = (side)**2
return square_area
print ("The square area is:", square_area)
def calculate_total_plus_tip_per_person(total_bill, tip_percent, number_of_people):
## Todo: update `None` to contain the formula for `total_plus_tip_` and `total_plus_tip_per_person`.
total_bill= float(input("Enter the total bill value"))
tip_percent= float(input("Enter the tip percentage as a decimal))
number_of_people = int(input("Enter the number of people"))
total_plus_tip = total_bill + (total_bill * tip_percent)
total_plus_tip_per_person = total_plus_tip / number_of_people
return total_plus_tip_per_person
print("The total per person is:", total_plus_tip_per_person)
def fahrenheit_to_celcius(degrees):
## Todo: update `None` to contain the formula for `degrees_in_celcius`
## use the formula (F - 32) × 5/9 = C
degrees = float(input("Degrees in Fahrenheit"))
degrees_in_celcius = (degrees - 32) * (5/9)
return degrees_in_celcius
print("The degrees in Celcius is:", degrees_in_celcius)
def calculate_the_remainder(num1, num2):
## Todo: update `None` to contain the formula for `remainder`
num1 = float(input("Enter number 1"))
num2 = float(input("Enter number 2"))
remainder = num1 % num2
return remainder
print("The remainder is:", remainder)
|
#!/usr/bin/env python
# Config stub that all modules can share, instead of reloading..
import configparser
import logging
import os
general = {}
payments = {}
pinpayments = {}
stripe = {}
smartwaiver = {}
# What if we just build one config object, instead?
Config = configparser.ConfigParser()
inifile='makeit.ini'
if 'AUTHIT_INI' in os.environ:
inifile = os.environ['AUTHIT_INI']
Config.read(inifile)
if 'General' in Config.sections():
for o in Config.options('General'):
general[o] = Config.get('General',o)
if 'Payments' in Config.sections():
for o in Config.options('Payments'):
payments[o] = Config.get('Payments',o)
if 'Pinpayments' in Config.sections():
for o in Config.options('Pinpayments'):
pinpayments[o] = Config.get('Pinpayments',o)
if 'Stripe' in Config.sections():
for o in Config.options('Stripe'):
stripe[o] = Config.get('Stripe',o)
if 'Smartwaiver' in Config.sections():
for o in Config.options('Smartwaiver'):
smartwaiver[o] = Config.get('Smartwaiver',o)
def configLogger(l):
l.basicConfig()
l.setLevel(logger.DEBUG)
|
# 뱀과 사다리
import sys
from collections import deque
input = sys.stdin.readline
# 지도
board = []
visited = [False] * 110
for i in range(0,110) :
board.append([i,i])
n, m = map(int,input().split())
for _ in range(n+m) :
x, y = map(int,input().split())
board[x][1] = y
#print(board)
li = deque([[1,0]])
def roll(cur) :
for i in range(1,7) :
temp = board[cur[0]+i][1]
count = cur[1] + 1
if visited[temp] == False :
li.append([temp,count])
visited[temp] = True
#print(li)
while True :
cur = li.popleft()
if cur[0] == 100 :
print(cur[1])
break
else :
roll(cur) |
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
from app import db
engine = create_engine('sqlite:///user.db', echo=True)
db_session = scoped_session(sessionmaker(autocommit=True,
autoflush=False,
bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
# Set your classes here.
class User(Base):
__tablename__ = 'Users'
id = db.Column(db.Integer,primary_key=True)
name = db.Column(db.String(120), unique=True)
email = db.Column(db.String(120), unique=True)
password = db.Column(db.String(30))
user_type = db.Column(db.String(1))
region = db.Column(db.String(20))
def __init__(self, name, email, password, user_type,region):
self.name = name
self.email = email
self.password = password
self.user_type = user_type
self.region = region
class Transaction(Base):
__tablename__ = 'Transaction'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.String(10), db.ForeignKey('Users.id'))
create_user_id = db.relationship("User", foreign_keys=user_id)
date = db.Column(db.Date)
amount = db.Column(db.Integer)
region = db.Column(db.String(20))
def __init__(self,user_id,date,amount,region):
self.user_id = user_id
self.date = date
self.amount = amount
self.region = region
class Retailer(Base):
__tablename__ = 'Retailer'
id = db.Column(db.Integer,primary_key=True)
retailer_name = db.Column(db.String(10))
vegetable_name = db.Column(db.String(60))
price = db.Column(db.Float)
def __init__(self,retailer_name,vegetable_name,price):
self.retailer_name = retailer_name
self.vegetable_name = vegetable_name
self.price = price
class Wholeseller(Base):
__tablename__ = 'Wholeseller'
id = db.Column(db.Integer,primary_key=True)
wholeseller_name = db.Column(db.String(10))
vegetable_name = db.Column(db.String(60))
price = db.Column(db.Integer)
def __init__(self,wholeseller_name,vegetable_name,price):
self.wholeseller_name = wholeseller_name
self.vegetable_name = vegetable_name
self.price = price
class Government(Base):
__tablename__='Government'
id = db.Column(db.Integer,primary_key=True)
vegetable_name = db.Column(db.String(60))
price = db.Column(db.Integer)
def __init__(self,vegetable_name,price):
self.vegetable_name = vegetable_name
self.price = price
class Feedback(Base):
__tablename__='Feedback'
id = db.Column(db.Integer,primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('Users.id'))
create_user_id = db.relationship("User", foreign_keys=user_id)
title = db.Column(db.String(60))
status = db.Column(db.String(60))
date = date = db.Column(db.Date)
def __init__(self,user_id,date,title,status):
self.user_id = user_id
self.date = date
self.title = title
self.status = status
# Create tables.
Base.metadata.create_all(bind=engine)
|
import os
import cv2
import pickle
import imutils
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_recall_fscore_support as prfs
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential, load_model, save_model
from tensorflow.keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Dropout, Dense, Flatten
def load_data(data_path):
X = []
y = []
labels = os.listdir(data_path)
img_path_per_label = {labels[i]: [os.path.join(data_path, labels[i], img_path) for img_path in os.listdir(data_path + '/' + labels[i])] for i in range(len(labels))}
for key in list(img_path_per_label.keys()):
for img_path in img_path_per_label[key]:
X.append(cv2.resize(cv2.imread(img_path), (30, 30), interpolation=cv2.INTER_BITS2))
y.append(key)
return np.array(X), np.array(y)
def increase_brightness(img, value=20):
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv_img)
limit = 255 - value
v[v <= limit] += value
v[v > limit] = 255
final_hsv = cv2.merge((h, s, v))
return cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
def display_random_set(data, labels):
for i in range(10):
random_val = np.random.randint(low=0, high=len(data))
plt.subplot(2, 5, (i + 1))
plt.imshow(imutils.opencv2matplotlib(data[random_val]))
plt.title(labels[random_val])
plt.axis(False)
plt.show()
def build_model(num_classes, img_dim):
model = Sequential()
model.add(Conv2D(filters=64, kernel_size=(2, 2), padding='same', activation='relu', input_shape=img_dim))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(2, 2), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters=128, kernel_size=(2, 2), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(2, 2), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(256, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
sgd = SGD(learning_rate=0.001, nesterov=True, name='SGD_Optimizer')
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['categorical_accuracy', 'mse'])
print(model.summary())
return model
def train_model(x, y, x_val, y_val, model, train=False):
batch_size = 64
num_epochs = 25
if train:
checkpoint = ModelCheckpoint(filepath='traffic_sign_model.h5', monitor='val_loss', save_best_only=True, verbose=1)
history = model.fit(x=x, y=y, validation_data=(x_val, y_val), shuffle=True, batch_size=batch_size, epochs=num_epochs, callbacks=[checkpoint], verbose=1)
save_history_file(file_name='traffic_sign.pickle', history=history)
def save_history_file(file_name, history):
pickle_out = open(file_name, 'wb')
pickle.dump(history.history, pickle_out)
pickle_out.close()
def load_history(file_name):
pickle_in = open(file_name, 'rb')
saved_hist = pickle.load(pickle_in)
return saved_hist
def plot_curves(history):
plt.figure(figsize=(10, 5))
sns.set_style(style='dark')
plt.subplot(1, 2, 1)
plt.plot(history['loss'])
plt.plot(history['val_loss'])
plt.xlabel('Iterations')
plt.ylabel('Error')
plt.title('Training & Validation Loss')
plt.legend(['Train loss', 'Validation loss'])
plt.subplot(1, 2, 2)
plt.plot(history['mse'])
plt.plot(history['val_mse'])
plt.xlabel('Iterations')
plt.ylabel('Error')
plt.title('Training & Validation MSE')
plt.legend(['Train mse', 'Validation mse'])
plt.show()
def accuracy_per_class(labels, precision, recall, f1):
# plt.subplots(figsize=(18, 30))
x = range(len(labels))
plt.subplot(3, 1, 1)
plt.title("Precision per class")
plt.ylim(0, 1.00)
plt.bar(x, precision, color='Red')
plt.xticks(x, rotation=90)
plt.subplot(312)
plt.title('Recall per class')
plt.ylim(0, 1.00)
plt.bar(x, recall, color='Green')
plt.xticks(x, rotation=90)
plt.subplot(313)
plt.title('F1 score per class')
plt.ylim(0, 1.00)
plt.bar(x, f1, color='Blue')
plt.xticks(x, rotation=90)
plt.show()
def load_test_data(test_data_dir, test_data_labels_dir):
# reading csv file
data = np.loadtxt(test_data_labels_dir, delimiter=',', skiprows=1, dtype=str)
x_test = np.array([os.path.join(test_data_dir, img_name) for img_name in data[:, 0]])
x_test = np.array([cv2.resize(cv2.imread(img_path), (30, 30), interpolation=cv2.INTER_BITS2) for img_path in x_test])
y_test = np.array(data[:, 1]).astype(np.int)
return x_test, y_test
def main():
# Reading Data from folders
X, y = load_data(data_path='./crop_dataset/crop_dataset/')
print(f"Data shape: {X.shape}, Labels: {y.shape}\n")
# Displaying random set of images from data
display_random_set(data=X, labels=y)
# Splitting data into training and testing data, training will consist of 70% of the data and 30% of the remaining
# will be testing data.
x_train, x_val, y_train, y_val = train_test_split(X, y, test_size=0.3, random_state=42, shuffle=True)
print(f"Training Data: {x_train.shape}, Training labels: {y_train.shape}\nValidation Data: {x_val.shape}, "
f"Validation labels: {y_val.shape}\n")
# Adjusting labels to be represented as categorical data.
y_train = to_categorical(y=y_train, num_classes=len(np.unique(y)))
y_val = to_categorical(y=y_val, num_classes=len(np.unique(y)))
# Creating Neural network model.
model = build_model(num_classes=len(np.unique(y)), img_dim=x_train[0].shape)
# To train the model again change train value to True, change to False to not train.
train_model(x=x_train, y=y_train, x_val=x_val, y_val=y_val, model=model, train=True)
print("[In progress] Loading H5 model and history file...")
classifier = load_model(filepath='traffic_sign_model.h5')
hist_loaded = load_history(file_name='traffic_sign.pickle')
print("[Done] Loading H5 model and history file...")
# Loading data for testing model.
x_test, y_test = load_test_data(test_data_dir='./test_data/test_data', test_data_labels_dir='./test_labels.csv')
predictions = classifier.predict_classes(x_test)
accuracy = np.array([1 if predictions[i] == int(y_test[i]) else 0 for i in range(len(predictions))])
print(f"Accuracy on test data: {np.mean(accuracy) * 100} %.")
# plotting loss and mse curves for training and validation steps
plot_curves(hist_loaded)
# plotting accuracy bar graph per class
labels = np.unique(y)
precision, recall, f1, support = prfs(y_true=y_test, y_pred=predictions, average=None)
accuracy_per_class(labels, precision, recall, f1)
if __name__ == '__main__':
main()
|
from gamelib import event
B_LEFT = 1 #to 3a
B_TOP = 2 #to 3g
TRIG = 89
DOOR = 71
TURRETS = [72, 73]
def go_top():
event.move_player(0, -50)
event.go_to_level('level_3g', True, True, True)
event.stop_music()
def go_left():
event.move_player(50, 0)
event.go_to_level('level_3a', True, True, True)
def gen_msg():
event.show_message("Objective: Destroy Generator")
def talk():
if event.get_flag('3_gen_destroyed') and not event.get_flag('showed_msg_2'):
event.set_flag('showed_msg_2', True)
event.show_ai_message(msges['human2_3fbridge_1'], head='Terran_2')
event.show_ai_message(msges['human1_3fbridge_2'], 2, head='Terran_1')
event.show_ai_message(msges['vn4n_3fbridge_3'])
else:
if event.get_flag('showed_msg'): return
event.set_flag('showed_msg', True)
event.show_ai_message(msges['vn4n_10'])
event.register_timed_func(gen_msg, 2)
def init():
global msges
msges = event.file_to_dict('Messages_3.txt')
event.register_collision_func(B_TOP, go_top)
event.register_collision_func(B_LEFT, go_left)
event.register_collision_func(TRIG, talk)
if not event.get_flag("3_gen_destroyed"):
event.close_door(DOOR)
else:
event.open_door(DOOR)
for obj_id in TURRETS:
try:
event.get_object(obj_id).active = False
except:
pass #don't care
def on_load():
global msges
msges = event.file_to_dict('Messages_3.txt')
if not event.get_flag("3_gen_destroyed"):
event.close_door(DOOR)
else:
event.open_door(DOOR)
for obj_id in TURRETS:
try:
event.get_object(obj_id).active = False
except:
pass #don't care
|
a=input().split()
min=1000
for i in range(len(a)):
s=int(a[i])
if (s<min)and(s>0):
min=s
print(min) |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 21 13:06:48 2019
@author: Ying-Fang.Kao
Pymc3 quick start
"""
%matplotlib inline
import numpy as np
import theano.tensor as tt
import pymc3 as pm
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_context('notebook')
plt.style.use('seaborn-darkgrid')
print('Running on PyMC3 v{}'.format(pm.__version__))
#%%
with pm.Model() as model:
# unobserved RV
mu = pm.Normal('mu', mu = 0, sigma = 1)
# observed RV, needs data to be passed into it
obs = pm.Normal('obs', mu = mu, sigma = 1, observed = np.random.randn(100))
# print variables
model.basic_RVs
model.free_RVs
model.observed_RVs
#%% logp
model.logp({'mu': 0})
## the second case is not faster as the guide suggested
# logp is good when dynamic
%timeit model.logp({mu: 0.1})
logp = model.logp
%timeit model.logp({mu:0.1})
#%% Probability Distributions
help(pm.Normal)
dir(pm.distributions.mixture)
dir(model.mu)
#%% Deterministic transforms
# freely do the algebra
with pm.Model():
x = pm.Normal('x', mu = 0, sigma = 1)
y = pm.Gamma('y', alpha = 1, beta = 1)
summed = x + y
squared = x**2
sined = pm.math.sin(x)
# to keep track of a transformed variable, use pm.Deterministic
with pm.Model():
x = pm.Normal('x', mu = 0, sigma = 1)
plus_2 = pm.Deterministic('x plus 2', x + 2)
#%% Automatic transforms of bounded RVs
#In order to sample models more efficiently, PyMC3 automatically transforms bounded RVs to be unbounded.
with pm.Model() as model:
x = pm.Uniform('x', lower = 0, upper = 1)
model.free_RVs
model.deterministics
#we can trun transfroms off
with pm.Model() as model:
x = pm.Uniform('x', lower = 0, upper = 1, transform = None)
print(model.free_RVs)
# or specify different transformation other than the default
import pymc3.distributions.transforms as tr
with pm.Model() as model:
# use the default log transformation
x1 = pm.Gamma('x1', alpha = 1, beta = 1)
# specified a different transformation
x2 = pm.Gamma('x2', alpha = 1, beta = 1, transform = tr.log_exp_m1)
print('The default transformation of x1 is: ' + x1.transformation.name)
print('The user specified transformation of x2 is: ' + x2.transformation.name)
#%% Transformed distributions and changes of variables
class Exp(tr.ElemwiseTransform):
name = "exp"
def backward(self, x):
return tt.log(x)
def forward(self, x):
return tt.exp(x)
def jacobian_det(self, x):
return -tt.log(x)
with pm.Model() as model:
x1 = pm.Normal('x1', 0., 1., transform=Exp())
x2 = pm.Lognormal('x2', 0., 1.)
lognorm1 = model.named_vars['x1_exp__']
lognorm2 = model.named_vars['x2']
_, ax = plt.subplots(1, 1, figsize=(10, 6))
x = np.linspace(0., 10., 100)
ax.plot(
x,
np.exp(lognorm1.distribution.logp(x).eval()),
'--',
alpha=.5,
label='log(y) ~ Normal(0, 1)')
ax.plot(
x,
np.exp(lognorm2.distribution.logp(x).eval()),
alpha=.5,
label='y ~ Lognormal(0, 1)')
plt.legend();
#%% Ordered RV: x1, x2 ~ unifrom(0,1) and x1<x2
Order = tr.Ordered()
Logodd = tr.LogOdds()
chain_tran = tr.Chain([Logodd, Order])
with pm.Model() as m0:
x = pm.Uniform(
'x', 0., 1., shape=2,
transform=chain_tran,
testval=[0.1, 0.9])
trace = pm.sample(5000, tune=1000, progressbar=False)
_, ax = plt.subplots(1, 2, figsize=(10, 5))
for ivar, varname in enumerate(trace.varnames):
ax[ivar].scatter(trace[varname][:, 0], trace[varname][:, 1], alpha=.01)
ax[ivar].set_xlabel(varname + '[0]')
ax[ivar].set_ylabel(varname + '[1]')
ax[ivar].set_title(varname)
plt.tight_layout()
#%% List of RVs higher-dimensional RVs
# bad example
with pm.Model():
x = [pm.Normal('x_{}'.format(i), mu = 0, sigma = 1) for i in range(10)]
# good example
with pm.Model() as model:
x = pm.Normal('x', mu, sigma = 1, shape = 10)
# we can index into x and do linear algebra
with model:
y = x[0] * x[1]
x.dot(x.T)
#%% Initialisation with test_values
#While PyMC3 tries to automatically initialize models it is sometimes helpful to define initial values for RVs. This can be done via the testval kwarg:
with pm.Model():
x = pm.Normal('', mu = 0, sigma = 1, shape = 5)
print(x.tag.test_value)
with pm.Model():
x = pm.Normal('x', mu = 0, sigma = 1, shape = 5, testval= np.random.randn(5))
print(x.tag.test_value)
#%% Inference - Sampling and Variational
with pm.Model() as model:
mu = pm.Normal('mu', mu =0, sigma = 1)
obs = pm.Normal('obs', mu = mu, sigma = 1, observed = np.random.randn(100))
trace = pm.sample(1000, tune = 500)
#%%multiple chains in parallel using the cores kwarg
with pm.Model() as model:
mu = pm.Normal('mu', mu = 0, sigma = 1)
obs = pm.Normal('obs', mu = mu, sigma = 1, observed= np.random.randn(100))
trace = pm.sample(cores=4)
trace['mu'].shape
#%%
# number of chans
trace.nchains
# get values of a single chain
trace.get_values('mu', chains = 1).shape
#%% Other sampler
# show only the methods with upper case in the beginning
list(filter(lambda x: x[0].isupper(), dir(pm.step_methods)))
# sampling methods can be passed to sample
with pm.Model() as model:
mu = pm.Normal('mu', mu = 0, sigma = 1)
obs = pm.Normal('obs', mu = mu, sigma = 1, observed = np.random.randn(100))
step = pm.Metropolis()
trace = pm.sample(1000, step = step)
#%% assign variables to different step methods
with pm.Model() as model:
mu = pm.Normal('mu', mu=0, sigma=1)
sd = pm.HalfNormal('sd', sigma=1)
obs = pm.Normal('obs', mu=mu, sigma=sd, observed=np.random.randn(100))
step1 = pm.Metropolis(vars=[mu])
step2 = pm.Slice(vars=[sd])
trace = pm.sample(10000, step=[step1, step2], cores=4)
#%% Analyze sampling results
# need to have arviz library for the plots
# traceplot
pm.traceplot(trace)
plt.show()
# gelman_rubin (R-hat)
pm.gelman_rubin(trace)
# forestplot
pm.forestplot(trace)
# plot_posterior
pm.plot_posterior(trace)
# energyplot
with pm.Model() as model:
x = pm.Normal('x', mu = 0, sigma = 1, shape = 100)
trace = pm.sample(cores=4)
pm.energyplot(trace)
#%% Variational inference
# this is much faster but less accurate - with pm.fit()
with pm.Model() as model:
mu = pm.Normal('mu', mu=0, sigma=1)
sd = pm.HalfNormal('sd', sigma=1)
obs = pm.Normal('obs', mu=mu, sigma=sd, observed=np.random.randn(100))
approx = pm.fit()
approx.sample(500)
#%% full-rank ADVI (Automatic Diffrentiation Variational Inference)
mu = pm.floatX([0., 0.])
cov = pm.floatX([[1, .5], [.5, 1.]])
with pm.Model() as model:
pm.MvNormal('x', mu=mu, cov=cov, shape=2)
approx = pm.fit(method='fullrank_advi')
# equivalently, using object-oriented interface
with pm.Model() as model:
pm.MvNormal('x', mu=mu, cov=cov, shape=2)
approx = pm.FullRankADVI().fit()
plt.figure()
trace = approx.sample(10000)
sns.kdeplot(trace['x'][:,0], trace['x'][:,1])
#%% Stein Variational Gradient Descent (SVGD) uses particles to estimate the posterior
w = pm.floatX([.2, .8])
mu = pm.floatX([-.3, .5])
sd = pm.floatX([.1, .1])
with pm.Model() as model:
pm.NormalMixture('x', w=w, mu=mu, sigma =sd)
approx = pm.SVGD(n_particles = 200, jitter = 1).fit()
plt.figure()
trace = approx.sample(10000)
sns.distplot(trace['x']);
#%% Posterior Predictive Sampling
# The sample_posterior_predictive() function performs prediction on hold-out data and posterior predictive checks
data = np.random.randn(100)
with pm.Model() as model:
mu = pm.Normal('mu', mu = 0, sigma = 1)
sd = pm.HalfNormal('sd', sigma = 1)
obs = pm.Normal('obs', mu = mu, sigma = sd, observed = data)
trace = pm.sample()
with model:
post_pred = pm.sample_posterior_predictive(trace, samples = 5000)
# sample_posterior_predictive() returns a dict with a key for every observed node
post_pred['obs'].shape
fig, ax = plt.subplots()
sns.distplot(post_pred['obs'].mean(axis = 1), label = 'Posterior predictive means', ax = ax)
ax.axvline(data.mean(), ls = '--', color = 'r', label = 'True mean')
ax.legend()
#%% Predicting on hold-out data
# rely on theano.shared variable. These are theano tensors whose values can be changed later
import theano
x = np.random.randn(100)
y = x > 0
x_shared = theano.shared(x)
y_shared = theano.shared(y)
with pm.Model() as model:
coeff = pm.Normal('x', mu = 0, sigma = 1)
logistic = pm.math.sigmoid(coeff * x_shared)
pm.Bernoulli('obs', p=logistic, observed = y_shared)
trace = pm.sample()
x_shared.set_value([-1, 0, 1])
y_shared.set_value([0,0,0]) # dummy values
#%%
with model:
post_pred = pm.sample_posterior_predictive(trace, samples = 500)
post_pred['obs'].mean(axis=0)
|
import time
import traceback
from typing import Dict
from bearlibterminal import terminal
from debug import Debug
from engine.input.long_notation_parser import LongNotationParser
from engine.input.notation_parser import NotationParser, InvalidNotationException
from engine.map.components.board import Board, IllegalMoveException
from engine.map.util.player import Player
from engine.menu import MenuOption
from engine.render.board_console import BoardConsole
from engine.render.captures_console import CapturesConsole
from engine.render.text_console import TextConsole
from engine.stockfish import StockfishWrapper
class BackToMenuException(Exception):
pass
class GameOverException(Exception):
pass
class RetryMoveException(Exception):
pass
class Game():
def __init__(self, board_con: BoardConsole, text_con: TextConsole, captures_con: CapturesConsole,
player_options: Dict[int, MenuOption]):
self._player = 1
self._captures = {1: [], 2: []}
self._move_number = 0
self._move_long_notations = []
self._hint_level = 0
self._show_score = False
self._show_weak_squares = False
self._text_con = text_con
self._text_con.set_player(self._player)
self._captures_con = captures_con
self._board_con: BoardConsole = board_con
self._notation_parser = NotationParser()
self._long_notation_parser = LongNotationParser()
self._board: Board = Board(long_notation_parser=self._long_notation_parser)
self._stockfish_lvl_by_player = dict()
for player in player_options.keys():
options = player_options[player]
if not options.is_human:
self._stockfish_lvl_by_player[player] = options.cpu_lvl
self._stockfish = StockfishWrapper()
def loop(self):
self._move_setup()
self._make_move(self._get_move())
self._render_move_end()
self._prepare_for_next_move()
def _move_setup(self):
self._board_con.render(self._board)
self._captures_con.render(self._captures)
self._text_con.set_evaluation(self._stockfish.get_evaluation())
self._render_detail()
terminal.refresh()
# this takes a small but noticeable amount of time so render after refreshing everything else
if self._board.is_in_check(self._player):
self._text_con.render_status('check')
terminal.refresh()
best_move_coords = self._long_notation_parser.parse_to_coords(self._stockfish.get_best_move())
self._hint_coords = best_move_coords
self._weak_coords = self._board.get_weak_coords(Player.other(self._player))
def _get_move(self):
if self._player not in self._stockfish_lvl_by_player:
notation_input = self._read_input(10)
try:
move = self._notation_parser.parse_to_move(self._player, notation_input)
except InvalidNotationException as e:
terminal.clear()
self._text_con.render_error(str.format('\'{}\' {}', notation_input, str(e)))
traceback.print_exc()
raise RetryMoveException()
else:
self._check_input()
time.sleep(.5)
long_notation_input = self._stockfish.get_move_at_lvl(self._stockfish_lvl_by_player[self._player])
Debug.log(
str.format('stockfish lvl {} (p{}): {}', self._stockfish_lvl_by_player[self._player], self._player,
long_notation_input))
move = self._long_notation_parser.parse_to_move(self._player, long_notation_input)
return move
def _make_move(self, move):
try:
move_result = self._board.make_move(move, self._captures, self._move_number)
self._move_long_notations.append(move_result.long_notation)
except IllegalMoveException as e:
terminal.clear()
self._text_con.render_error(str.format('{}', str(e)))
raise RetryMoveException()
def _render_move_end(self):
self._stockfish.set_position(self._move_long_notations)
stockfish_evaluation = self._stockfish.get_evaluation()
next_best_move = self._stockfish.get_best_move()
# if player made a move and stockfish evaluates M0 that player just won
if stockfish_evaluation['type'] == 'mate' and stockfish_evaluation['value'] == 0:
self._gameover(str.format('checkmate! p{} wins (press any key)', self._player))
# if player made a move and stockfish has no next best move then stalemate
if next_best_move is None:
self._gameover('stalemate! (press any key)')
terminal.clear()
def _gameover(self, message):
terminal.clear()
self._board_con.render(self._board)
self._captures_con.render(self._captures)
self._text_con.render_green(message)
terminal.refresh()
raise GameOverException()
def _prepare_for_next_move(self):
self._player = Player.other(self._player)
self._text_con.set_player(self._player)
self._move_number += 1
self._hint_level = 0
self._show_weak_squares = False
def _read_input(self, max: int) -> str:
input = ''
while True:
self._text_con.render_notation_input(input)
key = terminal.read()
if self._handle_meta_input(key):
continue
elif key == terminal.TK_RETURN:
return input
elif key == terminal.TK_BACKSPACE and len(input) > 0:
input = input[:-1]
elif terminal.check(terminal.TK_WCHAR) and len(input) < max:
input += chr(terminal.state(terminal.TK_WCHAR))
def _check_input(self):
'''
allow interrupt for a cpu vs cpu game
'''
if not terminal.has_input():
return
key = terminal.read()
self._handle_meta_input(key)
def _handle_meta_input(self, key):
if key == terminal.TK_ESCAPE:
raise BackToMenuException()
elif key == terminal.TK_CLOSE:
raise SystemExit()
elif key == terminal.TK_F1:
self._show_score = not self._show_score
self._render_detail()
return True
elif key == terminal.TK_SLASH:
self._hint_level = (self._hint_level + 1) % 3
if self._hint_level == 0:
self._board_con.refresh(self._board)
else:
self._board_con.render_hint(hint_level=self._hint_level, hint_coords=self._hint_coords, board=self._board)
return True
elif key == terminal.TK_PERIOD:
self._show_weak_squares = not self._show_weak_squares
if self._show_weak_squares:
self._board_con.render_weak(weak_coords=self._weak_coords, board=self._board)
else:
self._board_con.refresh(self._board)
return True
return False
def _render_detail(self):
if self._show_score:
self._text_con.render_score()
else:
self._text_con.render_key_guide()
|
# -*- coding: utf-8 -*-
import pandas as pd
def main():
# 1次元配列
data = pd.Series([158, 157, 157], index=['miho','saori','yukari'])
print(data)
if __name__ == "__main__":
main()
|
import cv2
import numpy as np
#调用笔记本内置摄像头,所以参数为0,如果有其他的摄像头可以调整参数为1,2
class Camera:
def __init__(self,NO):
self.cap = cv2.VideoCapture(NO)
self.face_cascade = cv2.CascadeClassifier("/anaconda3/pkgs/libopencv-3.4.2-h7c891bd_1/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml")
self.failed = 2
def closeCamera(self):
# 关闭摄像头
self.cap.release()
def runCamera(self):
while True:
sucess, img = self.cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
x = int(img.shape[0] / 2)
y = int(img.shape[1] / 2)
# 高斯滤波
temp = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)
GaussianImage = cv2.GaussianBlur(gray, (7, 7), 0)
thresh = cv2.adaptiveThreshold(GaussianImage, 255, cv2.CHAIN_APPROX_NONE, cv2.THRESH_BINARY, 9
, 8)
_, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_L1)
cv2.drawContours(temp, contours, -1, (255, 255, 255), 5)
# 检测人脸位置
faces = self.face_cascade.detectMultiScale(GaussianImage, 1.3, 5)
temp = cv2.GaussianBlur(temp, (3,3), 0)
if sucess:
if len(faces) > 0:
for faceRect in faces:
x, y, w, h = faceRect
cv2.rectangle(temp, (x, y), (x + w - 15, y + h + 15), (255, 255, 255), 10)
if self.failed == 0:
cv2.putText(img=temp, text="Distinguish failed, Please try again", fontScale=1,
fontFace=cv2.FONT_HERSHEY_COMPLEX, color=(0, 0, 255), org=(x, y))
elif self.failed == 1:
cv2.putText(img=temp, text="Distinguish sucessful, Please waiting for a moment", fontScale=1,
fontFace=cv2.FONT_HERSHEY_COMPLEX, color=(0, 255, 255), org=(x, y))
cv2.imshow('img', temp)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
elif cv2.waitKey(1) & 0xFF == ord('w'):
# 模型输入接口
z = 0
if z == 0:
self.failed = 0
else:
self.failed = 1
if z == 1:
pass
elif z == 2:
pass
elif z == 3:
pass
self.closeCamera()
# test = Camera(0)
# test.runCamera() |
import requests, xmltodict, json
class PortalData:
def __init__(self, latitude, longitude):
self.latitude = latitude
self.longitude = longitude
def get_geonames(self):
geonames_url = 'http://api.geonames.org/extendedFindNearby?lat='+self.latitude+'&lng='+self.longitude+'&username=ondrae'
response = requests.get(geonames_url)
geonames = xmltodict.parse(response.text)
self.city_name = geonames['geonames']['address']['placename']
self.county_name = geonames['geonames']['address']['adminName2']
self.state_code = geonames['geonames']['address']['adminCode1']
self.state_name = geonames['geonames']['address']['adminName1']
self.country_code = geonames['geonames']['address']['countryCode']
def get_portals(self):
self.city_state_country = self.city_name + ', ' + self.state_name + ', ' + self.country_code
self.county_state_country = self.county_name + ', ' + self.state_name + ', ' + self.country_code
self.state_country = self.state_name + ', ' + self.country_code
response = requests.get('https://raw.github.com/ondrae/portalportal/master/static/data/portals.json')
portals = response.json()
try:
self.city_portal = portals['city'][self.city_state_country]
except KeyError:
pass
try:
self.county_portal = portals['county'][self.county_state_country]
except KeyError:
pass
try:
self.state_portal = portals['state'][self.state_country]
except KeyError:
pass
try:
self.country_portal = portals['country'][self.country_code]
except KeyError:
pass
def build_response(self):
self.res = {}
try:
self.res["country"] = {"name" : self.country_code, "data_portal_url" : self.country_portal}
except AttributeError:
pass
try:
self.res["state"] = {"name" : self.state_name, "data_portal_url" : self.state_portal}
except AttributeError:
pass
try:
self.res["county"] = {"name" : self.county_name, "data_portal_url" : self.county_portal}
except AttributeError:
pass
try:
self.res["city"] = {"name" : self.city_name, "data_portal_url" : self.city_portal}
except AttributeError:
pass |
#!/usr/bin/python
import xmlrpclib
PASSWORD = 'test'
USER = 'admin'
# Get user_id and session
s = xmlrpclib.ServerProxy ('http://%s:%s@192.168.30.153:8069/test' % (USER, PASSWORD))
# Get the user context
context = s.model.res.user.get_preferences(True, {})
# Print all methods (introspection)
methods = s.system.listMethods()
# Search parties and print rec_name
product_ids = s.model.product.product.search(
[], # search clause
0, # offset
10, # limit
False, # order
context) # context
uoms = s.model.product.uom.search([], # search clause
0, # offset
1000, # limit
False, # order
context) # context
print s.model.product.uom.read(uoms, ['name', 'symbol'], context)
print s.model.product.product.read(
product_ids, # party ids
['rec_name', 'code', 'sale_uom'], # list of fields
context) # context
sale = s.model.sale.sale.create('', context)
print sale
# Execute report
#type, data, _ = s.report.party.label.execute(
# party_ids, # party ids
# {}, # data
# context) # context
|
#!/usr/bin/env python3
import sys
# Return the trie built from patterns
# in the form of a dictionary of dictionaries,
# e.g. {0:{'A':1,'T':2},1:{'C':3}}
# where the key of the external dictionary is
# the node ID (integer), and the internal dictionary
# contains all the trie edges outgoing from the corresponding
# node, and the keys are the letters on those edges, and the
# values are the node IDs to which these edges lead.
def build_trie(patterns):
tree = {0: {}}
id_counter = 0
for pattern in patterns:
curr_node_id = 0
for curr_char in pattern:
# check if node already exists
if curr_char in tree[curr_node_id]:
curr_node_id = tree[curr_node_id][curr_char]
continue
# new node
id_counter += 1
new_node_id = id_counter
tree[new_node_id] = {}
tree[curr_node_id][curr_char] = new_node_id
curr_node_id = new_node_id
return tree
def test():
patterns = ['ATA']
trie = build_trie(patterns)
exp = {0: {'A': 1}, 1: {'T': 2}, 2: {'A': 3}, 3: {}}
print(f'{trie}\n{exp} - expected')
assert trie == exp
patterns = ['AT', 'AG', 'AC']
trie = build_trie(patterns)
exp = {0: {'A': 1}, 1: {'T': 2, 'G': 3, 'C': 4}, 2: {}, 3: {}, 4: {}}
print(f'{trie}\n{exp} - expected')
assert trie == exp
print('OK')
def main():
patterns = sys.stdin.read().split()[1:]
tree = build_trie(patterns)
for node in tree:
for c in tree[node]:
print("{}->{}:{}".format(node, tree[node][c], c))
if __name__ == '__main__':
# test()
main() |
import numpy as np
import pandas as pd
def main():
c_cols = ['2user_id','3ISBN','4bookrating']
current_user_data = pd.read_csv('book-dataset/BX-Book-Ratings.csv', sep=';', names=c_cols,encoding='latin-1')
print current_user_data['2user_id'][0]
d = {'1slno': [0], '2user_id': current_user_data['2user_id'][0], '3ISBN': current_user_data['3ISBN'][0], '4bookrating':current_user_data['4bookrating'][0]}
df = pd.DataFrame(d)
df.to_csv('book-dataset/book_ratings.txt',sep=';',index=False, header=False)
for i in range(1,len(current_user_data)):
d = {'1slno': [i], '2user_id': current_user_data['2user_id'][i], '3ISBN': current_user_data['3ISBN'][i], '4bookrating':current_user_data['4bookrating'][i]}
df = pd.DataFrame(d)
df.to_csv('book-dataset/book_ratings.txt',mode='a' ,sep=';',index=False, header=False)
if __name__ == "__main__":
main()
|
import os
import csv
import json
import copy
class Persist:
def __init__(self, dir = None):
self.__set_filename()
self.dir = self.set_dir(dir)
self.fullpath = None
self.format = 'csv'
def __set_filename(self):
self.filename = self.tablename +'.csv'
def __define_format(self):
if not self.filename:
pass
elif self.filename.endswith('.csv'):
self.format='csv'
elif self.filename.endswith('.json'):
self.format = 'json'
else:
raise ValueError("'filename' must have .csv or .json extension")
def set_dir(self, dirname):
if isinstance(dirname, str):
if not dirname.endswith('/'):
dirname = dirname + '/'
if not os.path.exists(dirname):
os.makedirs(dirname)
self.dir = dirname
self.__filename()
def __filename(self):
if self.filename and self.dir:
self.fullpath = self.dir + self.filename
def write(self):
if not self.fullpath:
raise ValueError("'filename' and 'dir' values must be set")
if not os.path.exists(self.fullpath):
mode = 'w'
else:
mode = 'a'
with open(self.fullpath, mode) as f:
if self.format == 'csv':
writer = csv.DictWriter(f, fieldnames = self.record.keys())
if mode == 'w':
writer.writeheader()
writer.writerow(self.record)
else:
json.dump(record, f)
def load(self):
with open(self.fullpath, 'r') as f:
if self.format == 'json':
data = json.load(f)
else:
reader = csv.DictReader(f, delimiter=',')
data = []
for row in reader:
_obj = copy.deepcopy(self)
_obj.__dict__.update(row)
data.append(_obj)
return data
|
import tensorflow as tf
from layers import Layers
class HAN:
def __init__(self,config):
self.config = config
self.layers = Layers(config)
def build_HAN_net(self):
X_id = self.layers.X_input()
senti_Y = self.layers.senti_Y_input()
table = self.layers.word_embedding_table()
mask = self.layers.padded_word_mask(X_id)
X = self.layers.lookup(X_id, table, mask)
seq_len = self.layers.sequence_length(X_id)
sent_repr_ls = []
for i in range(self.config['model']['sentAtt_num']):
name = '_layer%d'%i
X = self.layers.biLSTM(X,seq_len,name)
graph = tf.get_default_graph()
tf.add_to_collection('reg', tf.contrib.layers.l2_regularizer(self.config['model']['reg_rate'])(
graph.get_tensor_by_name('biLSTM%s/bidirectional_rnn/fw/lstm_cell/kernel:0'%name)))
tf.add_to_collection('reg', tf.contrib.layers.l2_regularizer(self.config['model']['reg_rate'])(
graph.get_tensor_by_name('biLSTM%s/bidirectional_rnn/bw/lstm_cell/kernel:0'%name)))
sent_att = self.layers.sent_attention(X,X_id)
# (batch size, max sent len)
sent_repr = self.layers.sent_repr(sent_att, X)
sent_repr_ls.append(sent_repr)
# (batch size, sentAtt_num * max sent len)
sent_repr = tf.concat(sent_repr_ls,axis=1)
senti_score = self.layers.score(sent_repr)
pred = self.layers.senti_prediction(senti_score)
loss = self.layers.senti_loss(senti_score, senti_Y)
train_step = tf.train.AdamOptimizer(self.config['model']['lr']).minimize(loss)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=2)
return {'loss': loss, 'pred': pred, 'graph': tf.get_default_graph(), 'train_step': train_step, 'saver': saver} |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2004-2014 Pexego Sistemas Informáticos All Rights Reserved
# $Marta Vázquez Rodríguez$ <marta@pexego.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm
from openerp import pooler
from openerp.tools.translate import _
from datetime import date
from psycopg2 import OperationalError
import openerp
class procurement_order(orm.Model):
_inherit = 'procurement.order'
def _procure_orderpoint_confirm(self, cr, uid, use_new_cursor=False, company_id = False, context=None):
'''
Create procurement based on Orderpoint
:param bool use_new_cursor: if set, use a dedicated cursor and auto-commit after processing each procurement.
This is appropriate for batch jobs only.
'''
if context is None:
context = {}
if use_new_cursor:
cr = openerp.registry(cr.dbname).cursor()
orderpoint_obj = self.pool.get('stock.warehouse.orderpoint')
stock_unsafety = self.pool.get('product.stock.unsafety')
procurement_obj = self.pool.get('procurement.order')
prod_obj = self.pool.get('product.product')
prod_ids = prod_obj.search(cr, uid, [])
for prod in prod_obj.browse(cr, uid, prod_ids):
orderpoint_ids = orderpoint_obj.search(cr, uid, [('product_id', '=', prod.id),
('from_date', '<=', date.today()),
('to_date', '>=', date.today()), ], offset=0, limit=None)
if not orderpoint_ids:
orderpoint_ids = orderpoint_obj.search(cr, uid, [('product_id', '=', prod.id),
('from_date', '=', False),
('to_date', '=', False)], offset=0, limit=None)
if not orderpoint_ids:
orderpoint_ids = []
for op in orderpoint_obj.browse(cr, uid, orderpoint_ids, context=context):
try:
seller = False
prods = self._product_virtual_get(cr, uid, op)
if prods is None:
continue
if prods < op.product_min_qty:
if prod.seller_ids:
seller = prod.seller_ids[0].name.id
state = 'in_progress'
else:
state = 'exception'
vals = {'product_id': prod.id,
'supplier_id': seller,
'min_fixed': op.product_min_qty,
'max_fixed': op.product_max_qty,
'real_stock': prod.qty_available,
'virtual_stock': prods,
'responsible': uid,
'state': state,
'name': 'stock minimo',
'company_id': op.company_id.id}
stock_unsafety.create_or_write(cr, uid, vals,
context=context)
if use_new_cursor:
cr.commit()
except OperationalError:
if use_new_cursor:
orderpoint_ids.append(op.id)
cr.rollback()
continue
else:
raise
if use_new_cursor:
cr.commit()
if use_new_cursor:
cr.commit()
cr.close()
return {}
|
# Create your views here.
#IMPORT models
from .models import Movie,ApiUsers
#IMPORT LIBRARIRES/FUNCTIONS
#from django.shortcuts import render , HttpResponse
from django.http import JsonResponse
import json
from firstapp.customClasses import *
#IMPORT DJANGO PASSWORD HASH GENERATOR AND COMPARE
from django.contrib.auth.hashers import make_password, check_password
from django.shortcuts import render, HttpResponse
def vista (request):
return render(request,'index.html')
#check_password(noHashPassword,HashedPassword) this funcion validate if the password match to the hash
def login(request):
#VALIDATE METHOD
if request.method == 'POST':
#DECLARE RESPONSE
responseData = {}
msgError = ""
detectedError = False
#CHECK JSON STRUCTURE
validateJson = checkJson(request.body)
if (validateJson):
jsonBody = json.loads(request.body)
#CHECK JSON CONTENT
if "user" not in jsonBody:
msgError = "Requires User"
detectedError = True
elif "password" not in jsonBody:
msgError = "Requires Password"
detectedError = True
#CHECK IF USER EXITST
else:
try:
currentUser = ApiUsers.objects.get(user = jsonBody['user'])
except Exception as e:
msgError = "The user does not exist or the password is incorrect"
detectedError = True
responseData['result'] = 'error'
responseData['message'] = msgError
return JsonResponse(responseData,status=401)
#TAKE PASSWORD OF THE USER
passJsonBody = jsonBody['password']
currentUserPass = currentUser.password
#CHECK IF PASSWORD IS CORRECT
if not check_password(passJsonBody,currentUserPass):
msgError = "The user does not exist or the password is incorrect"
detectedError = True
#CHECK IF USER HAS API-KEY
elif currentUser.api_key == None:
newApiKey = ApiKey().generate_key_complex()
currentUser.api_key = newApiKey
currentUser.save()
responseData['result'] = 'SUCCESS'
responseData['message'] = 'Correct Credentials'
responseData['userApiKey'] = currentUser.api_key
return JsonResponse(responseData,status=200)
else:
responseData['result'] = 'SUCCESS'
responseData['message'] = 'Correct Credentials'
responseData['userApiKey'] = currentUser.api_key
return JsonResponse(responseData,status=200)
if detectedError == True:
responseData['result'] = 'ERROR'
responseData['message'] = msgError
detectedError = False
return JsonResponse(responseData,status=401)
else:
responseData['result'] = 'ERROR'
msgError = "Invalid JSON Format"
responseData['message'] = msgError
return JsonResponse(responseData)
#RETURN RESPONSE
else:
responseData = {}
responseData['result'] = 'ERROR'
responseData['message'] = 'Invalid Request'
return JsonResponse(responseData, status=400)
def makepassword(request,password):
hashPassword = make_password(password)
response_data = {}
response_data['password'] = hashPassword
return JsonResponse(response_data, status=200)
def checkJson(myJson):
try:
json_object = json.loads(myJson)
except ValueError as e:
return False
return True
def list(request):
#VALIDATE METHOD
if request.method == 'POST':
#DECLARE RESPONSE
responseData = {}
msgError = ""
detectedError = False
#Checamos que este este el header
if request.headers.get("user-api-key") == None:
responseData['result'] = 'ERROR'
responseData['message'] = 'Requires ApiKey'
return JsonResponse(responseData,status=400)
#CHECK JSON STRUCTURE
validateJson = checkJson(request.body)
if (validateJson):
jsonBody = json.loads(request.body)
#CHECK JSON CONTENT
if "user" not in jsonBody:
msgError = "Requires User"
detectedError = True
elif "password" not in jsonBody:
msgError = "Requires Password"
detectedError = True
#CHECK IF USER EXITST
else:
try:
currentUser = ApiUsers.objects.get(user = jsonBody['user'])
except Exception as e:
msgError = "The user does not exist or the password is incorrect"
detectedError = True
responseData['result'] = 'ERROR'
responseData['message'] = msgError
return JsonResponse(responseData,status=401)
#TAKE PASSWORD OF THE USER
passJsonBody = jsonBody['password']
currentUserPass = currentUser.password
#CHECK IF PASSWORD IS CORRECT
if not check_password(passJsonBody,currentUserPass):
msgError = "The user does not exist or the password is incorrect"
detectedError = True
#Comparar la apiKey introducida con la de la base de datos. S+i es correcta mostrart la vista
elif currentUser.api_key == request.headers["user-api-key"]:
responseData['result'] = 'SUCCESS'
responseData['movie'] = {}
cont = 0
for i in Movie.objects.all():
responseData["movie"][cont] = {}
responseData["movie"][cont]["id"] = i.movieid
responseData["movie"][cont]['name'] = i.movietitle
responseData["movie"][cont]['image'] = i.imageurl
responseData["movie"][cont]['decription'] = i.description
cont = cont + 1
return JsonResponse(responseData,status=200)
else:
responseData['result'] = 'ERROR'
responseData['message'] = 'Invalid Api-key'
return JsonResponse(responseData,status=401)
if detectedError == True:
responseData['result'] = 'ERROR'
responseData['message'] = msgError
detectedError = False
return JsonResponse(responseData,status=401)
else:
responseData['result'] = 'ERROR'
msgError = "Invalid JSON Format"
responseData['message'] = msgError
return JsonResponse(responseData)
#RETURN RESPONSE
else:
responseData = {}
responseData['result'] = 'ERROR'
responseData['message'] = 'Invalid Request'
return JsonResponse(responseData, status=400)
|
def midpoint_integration(f, a, b, n=100):
h = (b - a)/float(n)
I = 0
for i in range(n):
I += f(a + i*h + 0.5*h)
return h*I
from math import *
import sys
from scitools.StringFunction import StringFunction
f_formula = sys.argv[1]
a = eval(sys.argv[2])
b = eval(sys.argv[3])
if len(sys.argv) >= 5:
n = int(sys.argv[4])
else:
n = 200
g=StringFunction(f_formula)
I = midpoint_integration(g, a, b, n)
print 'Integral of %s on [%g, %g] with n=%d: %g' % \
(f_formula, a, b, n, I)
def test_midpoint():
ov1=midpoint_integration(cos, 0, 2*pi) #expected=0
ov2=midpoint_integration(sin, 0, pi, n=1000) #expected=2
success=(abs(ov1)<1e-6)and(abs(ov2-2)<1e-6)
msg="test failed"
assert success, msg
#test_midpoint()
|
from __future__ import annotations
from abc import abstractmethod
from meiga import BoolResult
from petisco.base.domain.message.domain_event import DomainEvent
from petisco.base.domain.message.message_subscriber import MessageSubscriber
class DomainEventSubscriber(MessageSubscriber):
"""
A base class to model your events subscribers.
Inherit from this class to parser the domain event, configure middlewares and instantiate and execute a UseCase.
"""
@abstractmethod
def subscribed_to(self) -> list[type[DomainEvent]]:
"""
returns the list of events that we want to subscribe
"""
raise NotImplementedError()
@abstractmethod
def handle(self, domain_event: DomainEvent) -> BoolResult:
"""
returns True if the event was processed or False if it could not be processed
"""
raise NotImplementedError()
|
import requests
from bs4 import BeautifulSoup, Tag, NavigableString
from urllib.parse import urljoin
def scrape_mccormick_courses(dept="computer-science"):
# download the index page
index_url = "https://www.mccormick.northwestern.edu/"+dept+"/courses/"
index_page = requests.get(index_url)
# load the page into beautifulsoup for parsing
index = BeautifulSoup(index_page.content, 'html.parser')
# get the rows in the course table
rows = index.select('#course_list tr')
for i, row in enumerate(rows):
# skip the first row of table headers
if i == 0:
continue
# use CSS selectors to search just within this row
number = row.select_one('td:nth-of-type(1)').text
title = row.select_one('td:nth-of-type(2)').text
anchor_element = row.select_one('a')
# get the "href" attribute from the "a" element we found above
relative_url = anchor_element['href']
absolute_url = urljoin(index_url, relative_url)
print(number, title, absolute_url)
# now download the course detail page
detail = BeautifulSoup(requests.get(absolute_url).content, 'html.parser')
# parse prereqs and description
prereqs = None
description = None
for h3 in detail.select("#main-content h3"):
if h3.next_sibling:
s = h3.next_sibling
text_after_h3 = s.get_text() if isinstance(s, Tag) else s
if h3.text == "Prerequisites":
prereqs = text_after_h3
if h3.text == "Description":
description = text_after_h3
print("Prereqs:", prereqs)
print("Description:", description, "\n")
if __name__ == '__main__':
scrape_mccormick_courses("computer-science");
scrape_mccormick_courses("industrial");
|
#10 진수, 2진수, 16진수 표기하기
num = 10
b_num = 0b1010
h_num = 0xa
print(num)
print(b_num)
print(h_num)
|
class Calculator:
def __init__(self, number1, number2, result, chc, div0):
self.a = number1
self.b = number2
self.result = result
self.choice = chc
self.d = div0
def add(self):
self.result = self.a + self.b
return self.result
def sub(self):
self.result = self.a - self.b
return self.result
def mul(self):
self.result = self.a * self.b
return self.result
def div(self):
self.d = "Cannot Divide By 0"
if self.b == 0:
self.result = self.d
else:
self.result = self.a / self.b
self.d = 0
return self.result
def getinput(self):
self.a = int(input("Enter 1st Number : "))
self.b = int(input("Enter 2nd Number : "))
def optiondisplay(self):
print("\n")
print("Select Options : 1 . Addition")
print(" : 2 . Subtraction")
print(" : 3 . Multiplication")
print(" : 4 . Division")
print(" : 5 . Exit")
print(" : 6 . Change Input")
self.choice = int(input("Choice : "))
print("\n\n")
if __name__ == "__main__":
a = 0
b = 0
r = 0
choice = 0
d = 0
calc1 = Calculator(a, b, r, choice, d)
prev1 = Calculator(a, b, r, choice, d)
calc1.optiondisplay()
while calc1.choice > 6:
print("Invalid choice")
calc1.optiondisplay()
if calc1.choice != 6:
calc1.getinput()
else:
prev1.choice = calc1.choice
while calc1.choice != 5:
if calc1.choice == 1:
print("\n")
print(" " + str(calc1.a) + " +")
print(" " + str(calc1.b))
print("---------")
print(" " + str(calc1.add()))
prev1.choice = calc1.choice
calc1.optiondisplay()
elif calc1.choice == 2:
print("\n")
print(" " + str(calc1.a) + " -")
print(" " + str(calc1.b))
print("---------")
print(" " + str(calc1.sub()))
prev1.choice = calc1.choice
calc1.optiondisplay()
elif calc1.choice == 3:
print("\n")
print(" " + str(calc1.a) + " *")
print(" " + str(calc1.b))
print("---------")
print(" " + str(calc1.mul()))
prev1.choice = calc1.choice
calc1.optiondisplay()
elif calc1.choice == 4:
print("\n")
print(" " + str(calc1.a) + " /")
print(" " + str(calc1.b))
print("---------")
print(" " + str(calc1.div()))
prev1.choice = calc1.choice
if calc1.d == 0:
calc1.optiondisplay()
else:
print("\n")
calc1.getinput()
elif calc1.choice == 5:
exit()
elif calc1.choice == 6:
calc1.getinput()
if prev1.choice >= 6:
calc1.optiondisplay()
else:
calc1.choice = prev1.choice
else:
print("Invalid choice")
prev1.choice = calc1.choice
calc1.optiondisplay()
|
import numpy as np
import pandas as pd
import unittest
from SetBinDiscretizer import SetBinDiscretizer
class TestSetBinDiscretizer(unittest.TestCase):
def test_1D_split(self):
X = [[-1], [-1], [0], [0], [0], [1], [1], [1], [1]]
est = SetBinDiscretizer(bin_edges_internal=[[-0.5, 0.5]], encode='ordinal')
est.fit(X)
self.assertListEqual(est.transform(X).flatten().tolist(), [0,0,1,1,1,2,2,2,2])
def test_2D_splits(self):
# Test it works on 2D data
X = [[-1, 10], [-1, 9], [0, 8], [0, 7], [0, 6], [1, 5], [1, 4], [1, 3], [1, 2]]
est = SetBinDiscretizer(bin_edges_internal=[[-0.5, 0.5], [6.5, 5.5, 4.5, -1]], encode='ordinal')
est.fit(X)
self.assertListEqual(est.transform(X)[:,0].flatten().tolist(), [0,0,1,1,1,2,2,2,2])
self.assertListEqual(est.transform(X)[:,1].flatten().tolist(), [3.0, 3.0, 3.0, 3.0, 2.0, 1.0, 0.0, 0.0, 0.0])
self.assertListEqual(est.bin_edges_[0].tolist(), [-1., -0.5, 0.5, 1])
self.assertListEqual(est.bin_edges_[1].tolist(), [-1.0, 4.5, 5.5, 6.5, 10.0])
# Test Onehot encoder
est2 = SetBinDiscretizer(bin_edges_internal=[[-0.5, 0.5], [6.5, 5.5, 4.5, -1]], encode='onehot-dense')
Xt2 = est2.fit_transform(X)
self.assertListEqual(est2.get_feature_names().tolist(), ['x0_0', 'x0_1', 'x0_2', 'x1_0', 'x1_1', 'x1_2', 'x1_3'])
self.assertListEqual(est2.get_feature_names(["feat1", "feat2"]).tolist(), ['feat1_0', 'feat1_1', 'feat1_2', 'feat2_0', 'feat2_1', 'feat2_2', 'feat2_3'])
# Test that you can input feature names
est3 = SetBinDiscretizer(bin_edges_internal=[[-0.5, 0.5], [6.5, 5.5, 4.5, -1]], encode='onehot', input_features=["feat1", "feat2"])
Xt3 = est3.fit_transform(X)
self.assertListEqual(est3.get_feature_names().tolist(), ['feat1_0', 'feat1_1', 'feat1_2', 'feat2_0', 'feat2_1', 'feat2_2', 'feat2_3'])
if __name__ == "__main__":
unittest.main() |
#
# meteo-station obv BME280 Digitale Barometer Druk en Vochtigheid Sensor Module
#
# bron: https://www.tinytronics.nl/shop/nl/sensoren/temperatuur-lucht-vochtigheid/bme280-digitale-barometer-druk-en-vochtigheid-sensor-module
# Een zeer compacte barometer die werkt via I2C of SPI. De BME280 is een 3-in-1 module
# die temperatuur, druk en vochtigheid kan meten.
#
# De module kan alleen gevoed worden met 3.3VDC. De I2C/SPI werkt dus ook met 3.3V en
# je hebt dus een level converter nodig bij gebruik van bijv. een 5V Arduino Uno.
#
# Het standaard I2C adres van deze module is 0x76. Dit moet mogelijk in de
# voorbeeldcode/library veranderd worden van 0x77 naar 0x76. Indien je de SDO pin
# verbind met Vcc, dan wordt het I2C adres 0x77.
#
# (Arduino) project met de ESP2866 en BME280 (nuttig voor aansluitingen) is te vinden op
# https://core-electronics.com.au/projects/thingspeak-temperature-pressure-logger
#
# MicroPython library voor de BME280 en ESP2866 gevonden op GitHub:
# https://github.com/triplepoint/micropython_bme280_i2c
#
# upload files to device:
# ampy --port /dev/ttyUSB0 put main.py
# ampy --port /dev/ttyUSB0 put bme280_i2c.py
#
# open console en start programma
# screen /dev/ttyUSB0 115200
# type enter-toets
# en type cntrl-D
#
# BvH, 26-05-2019
#
# LIBRARIES:
#
# We start by importing the Pin class from the machine library, as this will enable us to use
# the GPIO pins. We need to use a wait-time in the loop and import the sleep function from the
# time library.
from machine import Pin, I2C
from time import sleep
# using mqtt for exchanging data
from umqttsimple import MQTTClient
#
# The bme280_i2c library assumes the default connection of the I2C bus
# On Wymos D1 mini devices that is SCL-to-D1 (pin5), SDA-to-D2 (pin4).
#
import bme280_i2c
# Variabelen:
#temp_min = 100
#temp_max = 0
#pres_min = 10000
#pres_max = 0
#humi_min = 100
#humi_max = 0
# Functies:
def do_tripple_blink(n=3):
# tripple blink
for x in range(n):
led.on()
sleep(0.5)
led.off()
def update_measurements():
# how to deal with a 'dict'?
# Example from https://www.tutorialspoint.com/python/python_dictionary.htm
# dict = {'Name': 'Zara', 'Age': 7, 'Class': 'First'}
# print "dict['Name']: ", dict['Name']
values = bme.read_compensated_data(result = None)
# INITIALISATIE:
#
# Next we create an object called led which will store the GPIO pin that we wish to use, and
# whether it is an input or an output. In this case it is an output as we wish to light up the LED.
#
# see pinout on https://escapequotes.net/esp8266-wemos-d1-mini-pins-and-diagram/
# pin 16 = D0 (naar LED)
led = Pin(16, Pin.OUT)
# show succesfull
do_tripple_blink()
# Initialise the i2c interface.
# pin 5 (= D1) SCL naar BME280-SCL.
# pin 4 (= D2) SDA naar BME280-SDA.
i2cbus = I2C(sda=Pin(4), scl=Pin(5))
i2cbus.scan()
# Initialise the Bosch temperature/humidity/pressure sensor.
bme = BME280_I2C(i2c=i2cbus)
# show succesfull
do_tripple_blink()
# setup MQTT connection
def sub_cb(topic, msg):
print((topic, msg))
if topic == b'notification' and msg == b'received':
print('ESP8266-wijngaar-Achthoeven received a mqtt-message!')
def connect_and_subscribe():
global client_id, mqtt_server, topic_sub
client = MQTTClient(client_id, mqtt_server)
client.set_callback(sub_cb)
client.connect()
client.subscribe(topic_sub)
print('Connected to %s mqtt-broker, subscribed to %s topic' % (mqtt_server, topic_sub))
return client
def restart_and_reconnect():
print('Failed to connect to mqtt-broker. Reconnecting...')
time.sleep(10)
machine.reset()
try:
client = connect_and_subscribe()
except OSError as e:
print('Failed connecting to mqtt-broker. Error=' + e)
restart_and_reconnect()
# All in an endless loop:
while True:
# So now we need to turn on the LED, and it is as easy as this!
led.on()
# retrieve BME280-measurements:
update_measurements()
# show BME280-measurements
print('temperature : ' + values['temperature'])
print('humidity : ' + values['humidity'])
print('pressure : ' + values['pressure'])
payload = values['temperature'] + ',' + values['humidity'] + ',' + values['pressure']
# better version:
#values = read_compensated_data(result = None)
# wait
sleep(0.5)
# and turn off the LED
led.off()
# once a minute, send a message with the data to the mqtt broker
try:
client.check_msg()
if (time.time() - last_message) > message_interval:
msg = b'measurement #%d' % counter
# msg = b'measurement #%d' + payload % counter
client.publish(topic_pub, msg)
last_message = time.time()
counter += 1
except OSError as e:
restart_and_reconnect()
# wait and measure approx. every 15 secs
sleep(measure_interval-0.5)
|
class Solution:
def maxProfit(self, prices: List[int]) -> int:
length = len(prices)
if length<2:
return 0
profit = 0
currentBuy = -1
for i in range(length):
current = prices[i]
#Buy
if currentBuy == -1 and i+1<length:
nextVal = prices[i+1]
if nextVal > current: #next greater is present
currentBuy = current
continue
#sell
if currentBuy >= 0:
if i == length - 1:
profit += (current - currentBuy)
break
nextVal = prices[i+1]
if nextVal < current: #nextval is smaller, if next is greater sell at that
profit += (current-currentBuy)
currentBuy = -1
return profit
|
class Queue(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.popstack = []
self.pushstack = []
def push(self, x):
"""
:type x: int
:rtype: nothing
"""
if not self.pushstack:
while self.popstack:
self.pushstack.append(self.popstack.pop())
self.pushstack.append(x)
def pop(self):
"""
:rtype: nothing
"""
if not self.popstack:
while self.pushstack:
self.popstack.append(self.pushstack.pop())
self.popstack.pop()
def peek(self):
"""
:rtype: int
"""
if not self.popstack:
while self.pushstack:
self.popstack.append(self.pushstack.pop())
return self.popstack[-1]
def empty(self):
"""
:rtype: bool
"""
return len(self.popstack) == 0 and len(self.pushstack) == 0 |
# -*- coding: utf-8 -*-
"""
gites.walhebcalendar
Licensed under the GPL license, see LICENCE.txt for more details.
Copyright by Affinitic sprl
"""
from zope.configuration import xmlconfig
def parseZCML(package, configFile='configure.zcml'):
context = xmlconfig._getContext()
xmlconfig.include(context, configFile, package)
context.execute_actions()
|
import os
class atm:
def __init__(self):
self.__account= {}
self.__counter = 0
def ReturnDetails(self, account_number):
return self.__account.get(account_number)
def CreateAccount(self, name, balance, pin):
details = {'name':name, 'balance':balance, 'pin':pin}
temp = {self.__counter:details}
self.__account.update(temp)
self.__counter = self.__counter + 1
def CheckUser(self, account_number, pin):
details = self.__account.get(account_number)
if details.get('pin') == pin:
return True
else:
return False
def CheckBalance(self, account_number):
print('User balance = ', self.__account.get(account_number).get('balance'))
a = input()
def WithdrawCash(self, account_number):
amount = int(input('enter amount to withdraw: '))
balance = self.__account.get(account_number).get('balance')
if(balance - amount >= 0):
self.__account.get(account_number)['balance']= balance-amount
print('new balance: ', balance - amount)
a = input()
else:
print('insufficient balance')
a = input()
def ChangePin(self, account_number):
if self.CheckUser(account_number, int(input('Enter old pin: '))):
new_pin = int(input('Enter new pin: '))
self.__account.get(account_number)['pin'] = new_pin
else:
print('incorrect pin')
a = input()
def menu():
os.system('clear')
print('1. Check Balance')
print('2. Withdraw Balance')
print('3. Change Pin')
print('4. Exit')
option = input('Enter option: ')
return option
ATM = atm()
ATM.CreateAccount('vedant', 1000, 1234)
ATM.CreateAccount('shantanu', 1000, 2345)
opt = 1
tag = 0
while tag <= 3:
os.system('clear')
acc_num = int(input('enter account number: '))
pin = int(input('enter account pin: '))
det = ATM.ReturnDetails(acc_num)
if det.get('pin') == pin:
tag = 0
while(1):
opt = int(menu())
if opt == 1:
os.system('clear')
ATM.CheckBalance(acc_num)
if opt == 2:
os.system('clear')
ATM.WithdrawCash(acc_num)
if opt == 3:
os.system('clear')
ATM.ChangePin(acc_num)
if opt == 4:
break
else:
tag = tag + 1
print('incorrect pin')
a = input()
print('wrong pin more than 3 times....exit')
|
from typing import Optional
from datetime import datetime
from pydantic import BaseModel
import uuid as pyuuid
class GenericData(BaseModel):
name: str
timestamp: datetime
optionaldata: Optional[str]
jsondata: dict
uuid: pyuuid.UUID
class GPS(GenericData):
class Config:
orm_mode = True
class SomeOtherData(GenericData):
class Config:
orm_mode = True
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import json
import glob
import codecs
import unicodedata
from scrapy.selector import Selector
import common
maps = dict(zip(['in_individual', 'in_profit', 'in_party', 'in_civil', 'in_anonymous', 'in_others', 'in_total', 'out_personnel', 'out_propagate', 'out_campaign_vehicle', 'out_campaign_office', 'out_rally', 'out_travel', 'out_miscellaneous', 'out_return', 'out_exchequer', 'out_public_relation', 'out_total', 'balance'], [u'個人捐贈收入', u'營利事業捐贈收入', u'政黨捐贈收入', u'人民團體捐贈收入', u'匿名捐贈收入', u'其他收入', u'收入合計', u'人事費用支出', u'宣傳支出', u'租用宣傳車輛支出', u'租用競選辦事處支出', u'集會支出', u'交通旅運支出', u'雜支支出', u'返還捐贈支出', u'繳庫支出', u'公共關係費用支出', u'支出合計', u'收支結存金額']))
files = [codecs.open(f, 'r', encoding='utf-8') for f in glob.glob('../../data/xml/*.xml')]
for f in files:
print f.name
fileName, fileExt = os.path.splitext(os.path.basename(f.name))
xml_text = unicodedata.normalize('NFC', f.read())
x = Selector(text=xml_text, type='xml')
# mayors
counties = x.xpath(u'//bookmark[@title="政治獻金收支結算表"]/bookmark[contains(@title, "直轄市市長選舉")]/bookmark | //bookmark[@title="政治獻金收支結算表"]/bookmark[contains(@title, "縣(市)長選舉")]/bookmark')
year_ROC = counties.xpath(u'../@title').re(u'(\d+)年')
year = str(int(year_ROC[0])+1911) if year_ROC else None
items = []
for county in counties:
print('mayors: %s' % county)
for people in county.xpath('bookmark'):
item = {}
item['election_year'] = year
item['county'] = county.xpath('@title').extract()[0]
item['title'] = 'mayors'
item['election_name'] = u'%s長選舉' % (item['county'], )
item['name'] = people.xpath('@title').extract()[0]
item['name'] = re.sub(u'[。˙・•..]', u'‧', item['name'])
item['name'] = re.sub(u'[ \s()()]', '', item['name'])
item['name'] = item['name'].title()
print item['name']
structID = people.xpath('destination/@structID').extract()[0]
head = x.xpath(u'//*[@id="%s"]' % structID)
tds = head.xpath('following-sibling::Table[1]/TR/TD/*/text()').extract()
for i in range(0, len(tds)):
for key, value in maps.iteritems():
if re.match(value, tds[i]):
item[key] = int(re.sub('[^-\d.]', '', tds[i+1]))
break
items.append(item)
# councilors
counties = x.xpath(u'//bookmark[@title="政治獻金收支結算表"]/bookmark[contains(@title, "議員")]/bookmark')
year_ROC = counties.xpath(u'../@title').re(u'(\d+)年')
year = str(int(year_ROC[0])+1911) if year_ROC else None
for county in counties:
print('councilors: %s' % county)
for people in county.xpath('bookmark'):
item = {}
item['election_year'] = year
item['county'] = county.xpath('@title').extract()[0]
item['title'] = 'councilors'
item['election_name'] = u'%s議員選舉' % (item['county'], )
item['name'] = people.xpath('@title').extract()[0]
item['name'] = re.sub(u'[。˙・•..]', u'‧', item['name'])
item['name'] = re.sub(u'[ \s()()]', '', item['name'])
item['name'] = item['name'].title()
print item['name']
structID = people.xpath('destination/@structID').extract()[0]
head = x.xpath(u'//*[@id="%s"]' % structID)
tds = head.xpath('following-sibling::Table[1]/TR/TD/*/text()').extract()
for i in range(0, len(tds)):
for key, value in maps.iteritems():
if re.match(value, tds[i]):
item[key] = int(re.sub('[^-\d.]', '', tds[i+1]))
break
items.append(item)
# legislators
reports = x.xpath(u'//*[contains(text(), "立法委員選舉")]')
print('legislators: %d' % len(reports))
for report in reports:
item = {}
item['ad'] = report.xpath('text()').re('\d+')[0]
meta = report.xpath(u'following-sibling::*/text()').extract()
if meta:
item['county'] = meta[0].strip()
item['title'] = 'legislators'
item['election_name'] = u'%s立法委員選舉' % (item['county'], )
item['name'] = meta[1].strip()
item['name'] = re.sub(u'[。˙・•..]', u'‧', item['name'])
item['name'] = re.sub(u'[ \s]', '', item['name'])
item['name'] = item['name'].title()
print 'county: "%s"' % item['county'], 'name: "%s"' % item['name']
else:
print 'no meta data'
continue
tds = [x.strip() for x in report.xpath('following-sibling::Table[1]/TR/TD/*/text()').extract()]
for i in range(0, len(tds)):
for key, value in maps.iteritems():
if re.match(value, tds[i]):
item[key] = int(re.sub('[^-\d.]', '', tds[i+1]))
break
items.append(item)
if items:
dump_data = json.dumps(items, sort_keys=True, indent=4, ensure_ascii=False)
common.write_file(dump_data, '../../data/json/pretty_format/political_contribution/%s.json' % fileName)
dump_data = json.dumps(items)
common.write_file(dump_data, '../../data/json/political_contribution/%s.json' % fileName)
|
__author__ = 'venth'
import itertools
import sys
import unittest
import superdigit
class SuperdigitTest(unittest.TestCase):
def test_every_single_digit_has_equal_superdigit(self):
# given single digits
single_digits = [digit for digit in xrange(0, 9)]
# when super digit is calculated for every single digit
calculated = itertools.imap(
lambda d: (d, superdigit.calc(d)),
single_digits,
)
# then every single digit is the super digit as well
for digit, calc_superdigit in calculated:
self.assertEqual(digit, calc_superdigit)
def test_every_negative_single_digit_has_equal_negative_superdigit(self):
# given single digits
single_digits = [digit for digit in xrange(-9, -1)]
# when super digit is calculated for every single digit
calculated = itertools.imap(
lambda d: (d, superdigit.calc(d)),
single_digits,
)
# then every single digit is the super digit as well
for digit, calc_superdigit in calculated:
self.assertEqual(digit, calc_superdigit)
def test_calculated_superdigit_for_numbers(self):
# given numbers with pre-calculated super digits
numbers = [
dict(number=10, superdigit=1),
dict(number=99, superdigit=9),
dict(number=999, superdigit=9),
dict(number=88, superdigit=7),
dict(number=532, superdigit=1),
dict(number=4567, superdigit=4),
dict(number=98741, superdigit=2),
dict(number=-10, superdigit=-1),
dict(number=-99, superdigit=-9),
dict(number=-999, superdigit=-9),
dict(number=-88, superdigit=-7),
dict(number=-532, superdigit=-1),
dict(number=-4567, superdigit=-4),
dict(number=-98741, superdigit=-2),
]
# when super digit is calculated for every given number
calculated = itertools.imap(
lambda given: (given['superdigit'], superdigit.calc(given['number'])),
numbers,
)
# then calculated superdigit is equal a given calculated by hand
for given_superdigit, calc_superdigit in calculated:
self.assertEqual(given_superdigit, calc_superdigit)
def test_calculate_superdigit_for_max_positive_ends_with_calculated_superdigit(self):
# when superdigit is calculated for max positive number
superdigit.calc(sys.maxint)
# then superdigit is calculated without error
pass
class _DigitizingIteratorTest(unittest.TestCase):
def test_for_given_positive_number_return_digits_as_were_predigitized(self):
# given positive numbers
positive_numbers = [0, 1, 100, 200, 1212312, 43543512, 123123, 4345435, 12312312, 34435435, 9234748753, sys.maxint]
for number in positive_numbers:
self._for_a_given_positive_number_returns_its_digit(number=number)
def test_negative_numbers_are_digitized_with_negative_digits(self):
# given negative numbers
negative_numbers = [-1, -100, -200, -1212312, -43543512, -123123, -4345435, -12312312, -34435435, -12397654512, -sys.maxint]
for number in negative_numbers:
self._for_a_given_negative_number_returns_its_digit(number=number)
def _for_a_given_positive_number_returns_its_digit(self, number):
# when the given number is digitized
digitized = list(superdigit._DigitizingIterator(number=number))
# then the number is digitized the same as in text made from the number
digits_from_number = list(itertools.imap(
lambda str_digit: int(str_digit),
str(number),
))
self.assertItemsEqual(digits_from_number, digitized, 'Expected number: %s, was: %r' % (digits_from_number, digitized))
def _for_a_given_negative_number_returns_its_digit(self, number):
# when the given number is digitized
digitized = list(superdigit._DigitizingIterator(number=number))
# then the number is digitized with a negative digits
digits_from_number = list(itertools.imap(
lambda str_digit: -int(str_digit),
str(-number),
))
self.assertItemsEqual(digits_from_number, digitized, 'Expected number: %s, was: %r' % (digits_from_number, digitized))
|
from .statement import Statement
from functions import Function
def functiondef(c, m):
"""Function definition. For instance:
function myMethod(ax) returns dx {
; code
}
"""
f = Function(
name=m.group(1),
params=m.group(2),
returns=m.group(3)
)
# Ensure that the function doesn't have repeated parameters
for i in range(len(f.params)):
for j in range(i + 1, len(f.params)):
if f.params[i] == f.params[j]:
raise ValueError(f'Parameter "{f.params[i]}" used twice on '
f'"{f.name}" definition (positions {i+1} '
f'and {j+1})')
c.begin_function(f)
functiondef_statement = Statement(
r'function (VAR) \((CSVAR)\)(?: returns (VAR))? {',
functiondef
)
|
# -*- utf8 -*-
from PIL import Image
from PIL.ExifTags import TAGS
import matplotlib.pyplot as plt
import os
import pandas as pd
# exif info from PIL
doc1 = """
ExifVersion
ComponentsConfiguration
ExifImageWidth
DateTimeOriginal
DateTimeDigitized
ExifInteroperabilityOffset
FlashPixVersion
MeteringMode
LightSource
Flash
FocalLength
41986
ImageDescription
Make
Model
Orientation
YCbCrPositioning
41988
XResolution
YResolution
59932
ExposureTime
ExposureProgram
ColorSpace
41990
ISOSpeedRatings
ResolutionUnit
41987
FNumber
Software
DateTime
ExifImageHeight
ExifOffset
"""
# exif info from modul exif
doc2 = """
EXIF ColorSpace (Short): sRGB
EXIF ComponentsConfiguration (Undefined): YCbCr
EXIF DateTimeDigitized (ASCII): 2012:11:22 15:35:14
EXIF DateTimeOriginal (ASCII): 2012:11:22 15:35:14
EXIF DigitalZoomRatio (Ratio): 1
EXIF ExifImageLength (Long): 2560
EXIF ExifImageWidth (Long): 1920
EXIF ExifVersion (Undefined): 0220
EXIF ExposureBiasValue (Signed Ratio): 0
EXIF ExposureMode (Short): Auto Exposure
EXIF ExposureProgram (Short): Portrait Mode
EXIF ExposureTime (Ratio): 1/256
EXIF FNumber (Ratio): 14/5
EXIF Flash (Short): Flash did not fire
EXIF FlashPixVersion (Undefined): 0100
EXIF FocalLength (Ratio): 35
EXIF ISOSpeedRatings (Short): 56
EXIF InteroperabilityOffset (Long): 4810
EXIF LightSource (Short): other light source
EXIF MeteringMode (Short): CenterWeightedAverage
EXIF Padding (Undefined): []
EXIF SceneCaptureType (Short): Portrait
EXIF WhiteBalance (Short): Auto
Image DateTime (ASCII): 2012:11:24 09:44:50
Image ExifOffset (Long): 2396
Image ImageDescription (ASCII):
Image Make (ASCII):
Image Model (ASCII):
Image Orientation (Short): Horizontal (normal)
Image Padding (Undefined): []
Image ResolutionUnit (Short): Pixels/Inch
Image Software (ASCII): Microsoft Windows Photo Viewer 6.1.7600.16385
Image XResolution (Ratio): 72
Image YCbCrPositioning (Short): Co-sited
Image YResolution (Ratio): 72
Thumbnail Compression (Short): JPEG (old-style)
Thumbnail JPEGInterchangeFormat (Long): 4970
Thumbnail JPEGInterchangeFormatLength (Long): 3883
Thumbnail Orientation (Short): Horizontal (normal)
Thumbnail ResolutionUnit (Short): Pixels/Inch
Thumbnail XResolution (Ratio): 72
Thumbnail YCbCrPositioning (Short): Co-sited
Thumbnail YResolution (Ratio): 72
"""
class Exif(object):
def __init__(self, file_name=None):
self.image = None
self.file_name = file_name
self.file_path = None
if isinstance(file_name, str):
self.load_image(self.file_name)
def load_image(self, file_name=None):
if file_name is None:
file_name = self.file_name
if not isinstance(file_name, str):
print('file name is empty!')
return False
self.file_name = file_name
p = file_name.rfind('/') if file_name.rfind('/') else file_name.rfind('\\')
self.file_path = file_name[:p] if p > 0 else ''
if os.path.isfile(file_name):
try:
self.image = Image.open(file_name)
self.__get_exif()
# self.image_data = self.image
# self.image.close()
except IOError:
print('IOERROR ' + file_name)
return False
elif os.path.isdir(self.file_path):
print('file path=({}) exists, \nfile name is error!'.format(self.file_path))
return False
else:
print('file name error =({})!')
return False
return True
def show_image(self):
if self.image is not None:
plt.imshow(self.image)
def show_exif(self):
if self.exif_info is not None:
print(self.exif_info)
def __get_exif(self, file_name=None, reload=False):
_file_name = file_name
if file_name is None:
_file_name = self.file_name
if (self.image is None) or reload:
if not self.load_image():
return None
else:
if not self.load_image(file_name):
return None
get_exif = {'exif_items': [], 'exif_content': []} # 'no exif'
if hasattr(self.image, '_getexif'):
exifinfo = self.image._getexif()
if exifinfo != None:
for tag, value in exifinfo.items():
decoded = TAGS.get(tag, tag)
# get_exif[decoded] = value
get_exif['exif_items'].append(format(decoded, '30s'))
value = value if len(str(value)) < 50 else str(value)[:50]
get_exif['exif_content'].append(format(str(value), '50s'))
self.exif_info = pd.DataFrame(get_exif)
return self.exif_info
|
import os
import cv2
import argparse
import numpy as np
import albumentations
import albumentations.pytorch
import multiprocessing as mp
import torch.nn.functional as F
import segmentation_models_pytorch as smp
from importlib import import_module
from prettyprinter import cpprint
import torch
from src.utils import seed_everything, YamlConfigManager, get_dataloader, dense_crf_wrapper
from src.model import *
def inference(cfg, limit, crf):
SEED = cfg.values.seed
BACKBONE = cfg.values.backbone
MODEL_ARC = cfg.values.model_arc
IMAGE_SIZE = cfg.values.image_size
NUM_CLASSES = cfg.values.num_classes
SAVE_IMG_PATH = './prediction/'
COLORS =[
[129, 236, 236],
[2, 132, 227],
[232, 67, 147],
[255, 234, 267],
[0, 184, 148],
[85, 239, 196],
[48, 51, 107],
[255, 159, 26],
[255, 204, 204],
[179, 57, 57],
[248, 243, 212],
]
COLORS = np.vstack([[0, 0, 0], COLORS]).astype('uint8')
os.makedirs(os.path.join(SAVE_IMG_PATH, MODEL_ARC + 'CRF'), exist_ok=True)
checkpoint = cfg.values.checkpoint
test_batch_size = 1
# for reproducibility
seed_everything(SEED)
data_path = '/opt/ml/input/data'
test_annot = os.path.join(data_path, 'test.json')
checkpoint_path = f'/opt/ml/vim-hjk/results/{MODEL_ARC}'
test_transform = albumentations.Compose([
albumentations.Resize(IMAGE_SIZE, IMAGE_SIZE),
albumentations.Normalize(mean=(0.461, 0.440, 0.419), std=(0.211, 0.208, 0.216)),
albumentations.pytorch.transforms.ToTensorV2()])
test_loader = get_dataloader(data_dir=test_annot, mode='test', transform=None, batch_size=test_batch_size, shuffle=False)
LIMIT = limit if isinstance(limit, int) else len(test_loader)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model_module = getattr(import_module('segmentation_models_pytorch'), MODEL_ARC)
model = model_module(
encoder_name=BACKBONE,
in_channels=3,
classes=NUM_CLASSES
)
model = model.to(device)
model.load_state_dict(torch.load(os.path.join(checkpoint_path, checkpoint)))
print('Start Inference.\n')
model.eval()
with torch.no_grad():
for step, (imgs, image_infos) in enumerate(test_loader):
image_infos = image_infos
image = test_transform(image=np.stack(imgs)[0])['image'].unsqueeze(0)
outs = model(image.to(device))
probs = F.softmax(outs, dim=1).data.cpu().numpy()
if crf:
pool = mp.Pool(mp.cpu_count())
images = image.data.cpu().numpy().astype(np.uint8).transpose(0, 2, 3, 1)
probs = np.array(pool.map(dense_crf_wrapper, zip(images, probs)))
pool.close()
oms = np.argmax(probs, axis=1).squeeze(0)
org = np.stack(imgs)[0]
mask = COLORS[oms]
output = ((0.4 * org) + (0.6 * mask)).astype('uint8')
cv2.imwrite(os.path.join(SAVE_IMG_PATH, MODEL_ARC + 'CRF', f'{step}.jpg'), output)
if step % 10 == 0:
print(f'Progress({step + 1}/{LIMIT})...')
if (step + 1) == LIMIT: break
print('\nInference Done.')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--eval_config_file_path', type=str, default='./config/eval_config.yml')
parser.add_argument('--eval_config', type=str, default='base')
parser.add_argument('--crf', type=bool, default='True')
parser.add_argument('--limit', type=str, default='all')
args = parser.parse_args()
cfg = YamlConfigManager(args.eval_config_file_path, args.eval_config)
cpprint(cfg.values, sort_dict_keys=False)
args = parser.parse_args()
try:
limit = int(args.limit)
except:
limit = args.limit
print('\n')
inference(cfg, limit, args.crf)
|
from django.contrib.auth import get_user_model
from django.db import models
from rest_framework.authtoken.models import Token as DRFAuthTokenModel
from django.utils.translation import ugettext_lazy as _
# Create your models here.
from hospital.core.models import hospitals
USER = get_user_model()
class AuthToken(DRFAuthTokenModel):
user = models.ForeignKey(USER, on_delete=models.CASCADE, related_name='auth_tokens')
device_id = models.CharField(
verbose_name=_("Device ID"),
help_text=_("Unique device identifier"),
max_length=150, null=True
)
last_used = models.DateTimeField(null=True, blank=True)
# @classmethod
# def remove_sessions(cls, user_id, exclude=None):
# if exclude is None:
# exclude = []
# return cls.objects.filter(
# user_id=user_id
# ).exclude(
# key__in=exclude
# ).delete()
|
s, n, m = map(int, input().split(' '))
keyboards = list(map(int, input().split(' ')))
usbs = list(map(int, input().split(' ')))
nice = -1
for keyboard in keyboards:
for usb in usbs:
combo = keyboard + usb
if s >= combo and nice < combo:
nice = combo
print(nice)
|
from django.urls import path, include
from .views import (
RegistrationView,
PurchaseView,
WithdrawalView,
AccountView,
)
urlpatterns = [
path('oauth2/', include('oauth2_provider.urls', namespace='oauth2_provider')),
# account related
path('register/', RegistrationView.as_view(), name='register'),
path('account/', AccountView.as_view(), name='account'),
# balance related
path('purchase/', PurchaseView.as_view(), name='purchase'),
path('withdrawal/', WithdrawalView.as_view(), name='withdrawal'),
]
|
# coding = utf-8
'''
封装 requests 方法
返回response
'''
'''
调试接口
1.4、获取一个城市所有监测点的NO2数据
地址 http://www.pm25.in/api/querys/no2.json
方法 GET
参数
* city:必选
* avg:可选
* stations:可选
返回
一个数组,其中每一项数据包括
* aqi
* area
* no2
* no2_24h
* position_name
* primary_pollutant
* quality
* station_code
* time_point
注意有些接口是放返回页面格式会出现乱码的现象,如果打印类型可能得到ISO-8859-1,需要转码成 ‘bg2321’
下面是例子
# url="https://www.baidu.com"
# res=requests.get(url)
# print(res.status_code)
# print(res.encoding)
# res.encoding = 'gb2321'
# print(res.text)
'''
import requests
import json
from common.read_excel import read_excel
# class RunMain:
#
# def __init__(self, url, method, data=None): # 初始化方法 直接调用RunMain().res 就可以得到请求后的数据
# self.res = self.run_main(url, method, data)
#
# def send_get(self, url, data=None, headers=None): # 封装get
# try:
# res = requests.get(url=url, headers=headers, params=data, verify=False).json()
# return res
# except:
# print('请求失败')
# def send_post(self, url, data=None, headers=None): # 封装post
# try:
# res = requests.post(url=url, headers=headers, data=data, verify=False).json()
# return res
# except:
# print('请求失败')
#
# def run_main(self, url, method, data=None): # 根据请求方式判断调用哪个
# res = None
# if method == "GET":
# res = self.send_get(url, data)
# else:
# res= self.send_post(url, data)
# return res
#
# if __name__ == '__main__':
# url="http://www.pm25.in/api/querys/no2.json"
# data ={
# "city":"guangzhou",
# "token": "5j1znBVAsnSf5xQyNQyq"
# }
# # print(send_get(url,data))
# run= RunMain(url,"GET",data)
# print(run.res)
class Method:
# # 初始化run_method方法
# def __init__(self,url,method,data=None,header=None,file=None):
# self.res = self.run_method(url,method,data,header,file)
# get 方法
def send_get(self,url,data=None,header=None):
try:
res = None
if header != None:
res = requests.get(url=url, params=data, heades=header, verify=False).json()
else:
res = requests.get(url=url, params=data, verify=False).json()
return res
except:
print("get 请求失败")
# post 方法
def send_post(self,url,data,header=None,file=None):
try:
res = None
if header != "":
res =requests.post(url=url,data=data,heades=header,files=file,verify=False).json()
else:
res =requests.post(url=url,data=data,files=file,verify=False).json()
return res
except:
print("post 请求失败")
# 运行方法
def run_method(self,url,method,data=None,header=None,file=None):
try:
res = None
if method == "GET":
res = self.send_get(url,data,header)
else:
res = self.send_post(url,data,header,file)
return res
except:
print("run_method 请求失败")
if __name__ == '__main__':
url = "http://www.pm25.in/api/querys/no2.json"
method="GET"
header=None
data={'city': 'guangzhou', 'token': '5j1znBVAsnSf5xQyNQyq'}
print(Method().run_method(url,method,data,header))
|
from yamale.validators.constraints import Constraint
from email.utils import parseaddr
class EmailDomain(Constraint):
keywords = {'domain': str}
fail = '%s does not contain a valid domain. The acceptable domain value is %s'
def _is_valid(self,value):
name, email_addr = parseaddr(value)
email_parts = email_addr.split(u'@')
if (self.domain == email_parts[1]):
return True
else:
return False
def _fail(self,value):
return self.fail % (value,self.domain)
class Ipv4Constraint(Constraint):
def _is_valid(self, value):
digits = map(int, value.split("."))
if all(filter(lambda x: 0 <= x < 256, digits)):
return True
else:
return False
|
from .models import Answer
from django import forms
class Answerform(forms.ModelForm):
class Meta:
model = Answer
fields=["answer","question"]
|
# -*- coding: utf-8 -*-
"""
Created on 12/1/2018
@author: Grace Wu
PURPOSE: This script creates Supply Curves for RESOLVE for Wind, PV, and Geothermal
Previous filename: RESOLVEsupplyCurve_py3_112918.py
"""
##--------------------------------Preamble ----------------------------------
import arcpy
import numpy
import numpy.lib.recfunctions
import scipy.stats as stats
import math
import time
import os
import csv
import re
import pandas
import collections
start_time = time.time()
print(start_time)
# Check out any necessary licenses
arcpy.CheckOutExtension("spatial")
from arcpy import env
from arcpy.sa import *
import arcpy.cartography as CA
arcpy.env.overwriteOutput = True
'''
################################################################################
##---------------------Local Parameters and workspace------------------------###
################################################################################
'''
## assumptions:
tech = "solar" ## Select: "solar", "wind", "geothermal"
minArea = 1 # km2; all polygons below this threshold will not be inlcuded in the result
### Set workspace for saving outputs: create file geodatabase (fgdb) for run session outputs
mainInputFolder = "C:\\Users\\Grace\\Documents\\TNC_beyond50\\PathTo100\\dataCollection\\" #^^
mainOutputFolder = "C:\\Users\\Grace\\Documents\\TNC_beyond50\\PathTo100\\siteSuitabilityOutputs\\" #^^
gdbFileName = "112918_resourceAssessment.gdb"
supplyCurveFolder = "1118_results"
env.scratchWorkspace = os.path.join(mainOutputFolder, "scratch.gdb") # sets scratchworkspace to your output workspace
env.workspace = os.path.join(mainOutputFolder, gdbFileName) # sets environment workspace to your output workspace
# set input paths:
#stateBounds = os.path.join(mainInputFolder, "siteSuitabilityInputs_nonEnv.gdb\\SRTM_W_250m_proj_cl") ##^^ enter the path to your STATE boundary shapefile
templateRaster = os.path.join(mainInputFolder, "siteSuitabilityInputs_nonEnv.gdb\\SRTM_W_250m_proj_cl") ##^^ enter path to DEM data
# set environments for raster analyses
arcpy.env.snapRaster = templateRaster
arcpy.env.extent = templateRaster
arcpy.env.mask = templateRaster
arcpy.env.cellSize = templateRaster
#################
## INPUT FILES ##
#################
## renewable resource rasters
solarCF = os.path.join(mainInputFolder, "siteSuitabilityInputs_nonEnv.gdb\\CF_FixedPV_SAM_AC_CF_250m")
windCF = os.path.join(mainInputFolder, "siteSuitabilityInputs_nonEnv.gdb\\CF_WINDtoolkit_NREL_IDW_masked_NoDataVals_250m")
## Existing wind and solar power plants
existingWind = os.path.join(mainInputFolder, "existingEnergyInfrastructure\\energyInfrastructure.gdb\\Ventyx_USGS_merged_repowering")
existingSolar = os.path.join(mainInputFolder, "existingEnergyInfrastructure\\energyInfrastructure.gdb\\NationalSolarArrays_solarOnly")
## QRAs and SuperCREZs (study boundaries)
QRAfilePath = os.path.join(mainInputFolder, "siteSuitabilityInputs_nonEnv.gdb\\QRA_proj")
SuperCREZ = os.path.join(mainInputFolder, "siteSuitabilityInputs_nonEnv.gdb\\SUPERCREZ_proj")
statesFilePath = os.path.join(mainInputFolder, "siteSuitabilityInputs_nonEnv.gdb\\stateBound_baja")
scratch = os.path.join(mainOutputFolder, "scratch.gdb")
'''
######################################################################################
##-------SITE SUITABILITY USING ALL ENV DATA : ERASE + EXTRACT BY MASK-------------###
######################################################################################
PURPOSE: Creates site suitability maps results from MapRE Script Tool B Stage 1 using
only technical exclusions and erases environmental exclusion areas
'''
#### SET PATHS TO ORIGINAL ENV EXCLUSION CATEGORIES FOR EACH TECHNOLOGY
## Note: each Category is comrpised of one or more features
## SOLAR
if tech == "solar":
Cat1 = collections.OrderedDict([(os.path.join(mainInputFolder, "envData\\Cat1_solar\\Cat1_u_d_s.shp"), ["", "_cat1a"]),\
(os.path.join(mainInputFolder, "envData\\tnc_lands_cat1_2\\tnc_lands_cat1_easements_proj.shp"), ["_cat1a", "_cat1b"])])
Cat2 = collections.OrderedDict([(os.path.join(mainInputFolder, "envData\\Cat2\\both_p1\\Both_p1.shp"), ["_cat1b", "_cat2a"]),\
(os.path.join(mainInputFolder, "envData\\Cat2\\both_p2\\Both_p2.shp"), ["_cat2a", "_cat2b"]),\
(os.path.join(mainInputFolder, "envData\\Cat2\\solar_p1\\Solar_p1.shp"), ["_cat2b", "_cat2c"]),\
(os.path.join(mainInputFolder, "envData\\Cat2\\solar_p2\\Solar_p2.shp"), ["_cat2c", "_cat2d"]),\
(os.path.join(mainInputFolder, "envData\\Cat2\\0045_AHPRC_Cat2\\0045_AHPRC\\data\\v101\\nps_identified_high_potential_for_resource_conflict.gdb\\NPS_AHPRC"), ["_cat2d", "_cat2e"]),\
(os.path.join(mainInputFolder, "envData\\tnc_lands_cat1_2\\tnc_lands_cat2_feeAreas_proj.shp"), ["_cat2e", "_cat2f"])])
Cat3 = collections.OrderedDict([(os.path.join(mainInputFolder, "envData\\Cat3\\Cat3_solar_excl_base_proj.shp"), ["_cat2f", "_cat3c"])])
Cat4 = collections.OrderedDict([(os.path.join(mainInputFolder, "envData\\Cat4\\Cat4_u_d_s_proj.shp"), ["_cat3", "_cat4"])])
CFraster = solarCF
## Path to non-environmental site suitability results (created using MapRE Script Tool B Stage 1)
inputNAME = os.path.join(mainOutputFolder, gdbFileName, "solarPV_0_0_nonEnv_r1")
## WIND
if tech == "wind":
Cat1 = collections.OrderedDict([(os.path.join(mainInputFolder, "envData\\Cat1_wind\\Cat1_wind_u_d_s.shp"), ["", "_cat1a"]),\
(os.path.join(mainInputFolder, "envData\\tnc_lands_cat1_2\\tnc_lands_cat1_easements_proj.shp"), ["_cat1a", "_cat1b"])])
Cat2 = collections.OrderedDict([(os.path.join(mainInputFolder, "envData\\Cat2\\both_p1\\Both_p1.shp"), ["_cat1b", "_cat2aa"]),\
(os.path.join(mainInputFolder, "envData\\Cat2\\both_p2\\Both_p2.shp"), ["_cat2aa", "_cat2b"]),\
(os.path.join(mainInputFolder, "envData\\Cat2\\wind_p1\\Wind_p1.shp"), ["_cat2b", "_cat2c"]),\
(os.path.join(mainInputFolder, "envData\\Cat2\\wind_p2\\Wind_p2.shp"), ["_cat2c", "_cat2d"]),\
(os.path.join(mainInputFolder, "envData\\Cat2\\0045_AHPRC_Cat2\\0045_AHPRC\\data\\v101\\nps_identified_high_potential_for_resource_conflict.gdb\\NPS_AHPRC"), ["_cat2d", "_cat2e"]),\
(os.path.join(mainInputFolder, "envData\\tnc_lands_cat1_2\\tnc_lands_cat2_feeAreas_proj.shp"), ["_cat2e", "_cat2f"])])
Cat3 = collections.OrderedDict([(os.path.join(mainInputFolder, "envData\\Cat3\\Cat3_solar_excl_base_proj.shp"), ["_cat2f", "_cat3a"]),\
(os.path.join(mainInputFolder, "envData\\Cat3\\Cat3_wind_excl_p1_proj.shp"), ["_cat3a", "_cat3b"]),\
(os.path.join(mainInputFolder, "envData\\Cat3\\Cat3_wind_excl_p2_no0147_proj.shp"), ["_cat3b", "_cat3c"])])
Cat4 = collections.OrderedDict([(os.path.join(mainInputFolder, "envData\\Cat4\\Cat4_u_d_s_proj.shp"), ["_cat3c", "_cat4"])])
CFraster = windCF
## Path to non-environmental site suitability results (created using MapRE Script Tool B Stage 1)
inputNAME = os.path.join(mainOutputFolder, gdbFileName, "wind_0_03_nonEnv_r3")
selectSuffix = "_gt1km2"
envEx_ls = [Cat1, Cat2, Cat3, Cat4]
## For each env exclusion category, erase the env category using the previously saved feature class
for cat in envEx_ls:
for ex in cat:
ft = inputNAME + cat[ex][0]
print(ft)
outputFile = inputNAME + cat[ex][1]
print(outputFile)
## erase
print("Erasing " + str(ex))
arcpy.Erase_analysis(ft, ex, outputFile)
## Get outputfilename of last element of ordered dictionary for the category
lastOutput = inputNAME + cat[next(reversed(cat))][1]
## convert multipart to singlepart
print("Converting from multipart to singlepart")
ft_singlept_file = lastOutput + "_singlepart"
ft_singlept = arcpy.MultipartToSinglepart_management(in_features = lastOutput, out_feature_class = ft_singlept_file)
## recalculate area
fields = arcpy.ListFields(ft_singlept)
fieldList = []
for field in fields:
fieldList.append(field.name)
if "Area" not in fieldList:
print("Adding Area field")
arcpy.AddField_management(ft_singlept, "Area", "DOUBLE")
arcpy.CalculateField_management(in_table = ft_singlept, field = "Area", \
expression = "!Shape.Area@squarekilometers!", \
expression_type = "PYTHON_9.3")
## select areas greater than 1 or 2 km2
print("selecting ")
ft_singlept_select = arcpy.Select_analysis(ft_singlept,\
ft_singlept_file + selectSuffix, \
'"Area" >= ' + str(minArea))
## Create raster of capacity factors for each category
print("Extracting by mask")
outExtractByMask = ExtractByMask(CFraster, ft_singlept_select)
outExtractByMask.save(ft_singlept_file + selectSuffix + "_rast")
print("Done: select min area " + ft_singlept_file + selectSuffix)
'''
#############################################################################################
##----------RUN SCRIPT TOOL B STAGE 2: CREATE CANDIDATE PROJECT AREAS----------------------##
#############################################################################################
PURPOSE: Takes output of above site suitability maps and creates Candidate Project Area (CPAs)
By breaking up the polygons into project-sized polygons.
'''
# Import custom toolbox
#arcpy.ImportToolbox("F:\\MapRE_misc\\REzoningGIStools_allVersions\\REzoningGIStools_v1_4\\REzoning_models.tbx", "scriptToolBStage2CreateProjectAreas")
## alias: REzoningModels
# Run tool in the custom toolbox. The tool is identified by
# the tool name and the toolbox alias for example: arcpy.scriptToolBStage2CreateProjectAreas_REzoningModelss(arguments)
## the above gives a syntax error. dunno why so just copying and pasting the script tool manually here and converting to function (website: mapre.lbl.gov/gis-tools)
def scriptToolB2 (suitableSites,projectsOut,scratch,templateRaster,countryBounds,geoUnits,fishnetSize,fishnetDirectory,whereClauseMax, whereClauseMin, whereClauseMinContArea):
############################################
## Set environments and scratch workspace ##
############################################
# set environments for any raster analyses
arcpy.env.snapRaster = Raster(templateRaster)
arcpy.env.extent = countryBounds
arcpy.env.mask = countryBounds
arcpy.env.cellSize = Raster(templateRaster)
env.workspace = scratch
env.scratchWorkspace = scratch
#################################################
## Check for fishnet file and create if needed ##
#################################################
fishnet = "in_memory/fishnet_" + str(fishnetSize) + "km" ## MUST add .shp if not putting file in gdb (for add field function)
clippedFishnet = fishnetDirectory + "\\"+ "fishnet_" + str(fishnetSize) + "km"
env.outputCoordinateSystem = templateRaster
if not(arcpy.Exists(clippedFishnet)):
#Create fishnet if one does not already exist:
print("Creating fishnet " + str(fishnetSize) + " km in size to file: " + fishnet)
extent = Raster(templateRaster).extent
XMin = extent.XMin ## left
YMin = extent.YMin ## Bottom
origin = str(XMin) + " " + str(YMin)
YMax = extent.YMax ## top
ycoord = str(XMin) + " " + str(YMax)
arcpy.CreateFishnet_management(fishnet, origin, ycoord, \
fishnetSize * 1000,fishnetSize * 1000, '0', '0', "", "NO_LABELS", \
"#", "POLYGON")
fields = arcpy.ListFields(fishnet)
for field in fields:
print(field.name)
# Change fishnet Object ID name:
arcpy.AddField_management(fishnet, "Text", "Text", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
# Process: Calculate Field to create new alphanumeric OID column
arcpy.CalculateField_management(fishnet, "Text", "'A' + str(!OID!)", "PYTHON_9.3", "")
print("Creating country-boundary-clipped fishnet " + str(fishnetSize) + " km in size to file: " + clippedFishnet)
arcpy.Clip_analysis(fishnet, countryBounds, clippedFishnet)
print("Copying fishnet to memory :" + clippedFishnet)
fishnetInMemory = arcpy.CopyFeatures_management(clippedFishnet, "in_memory/clipped_fishnet")
# Temporary variables:
IntermediateIntersect_geoUnits = "in_memory/IntermediateIntersect_geoUnits"
Intermediate = "in_memory/intermediate_2"
IntermediateErased = "in_memory/intermediateErased_2"
IntermediateIntersect = "in_memory/IntermediateIntersect_2"
IntermediateIntersect_singlept = "in_memory/IntermediateIntersect_singlept"
#IntermediateAggregatedFeatures = "in_memory/IntermediateAggregatedFeatures_2"
#IntermediateIntersectErased = "in_memory/IntermediateIntersectErased_2"
IntermediateEliminated = "in_memory/IntermediateEliminated"
IntermediateEliminated2 = "in_memory/IntermediateEliminated2"
#IntermediateSelectedForAggregation1 = "in_memory/IntermediateSelectedForAggregation1_2"
#IntermediateSelectedForAggregation2 = "in_memory/IntermediateSelectedForAggregation2_2"
#IntermediateIntersect_geoUnits_2 = "in_memory/IntermediateIntersect_geoUnits_2"
###############
## Intersect ##
###############
## COPY SUITABLE SITES FEATURE CLASS TO MEMORY
sites = arcpy.CopyFeatures_management(suitableSites, "in_memory/suitableSites")
## INTERSECT Geographic Unit of Analysis, if provided
if arcpy.Exists(geoUnits):
print("Intersecting by geographic units of analysis")
arcpy.Intersect_analysis([sites, geoUnits], IntermediateIntersect_geoUnits, "NO_FID")
else:
IntermediateIntersect_geoUnits = sites
# calculate area:
arcpy.AddField_management(IntermediateIntersect_geoUnits, "Area", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
# Process: Calculate Field
arcpy.CalculateField_management(IntermediateIntersect_geoUnits, "Area", "!Shape.Area@squarekilometers!", "PYTHON_9.3", "")
# select polygons greater than max area to split
arcpy.Select_analysis(IntermediateIntersect_geoUnits, Intermediate, whereClauseMax)
# erase selected areas from potentialSites (isolate all polygons less than max to merge later)
arcpy.Erase_analysis(IntermediateIntersect_geoUnits, Intermediate, IntermediateErased)
# Intersect regions above max area using fishnet
print("Intersecting by fishnet")
arcpy.Intersect_analysis([Intermediate, fishnetInMemory], IntermediateIntersect, "NO_FID")
print("finished intersecting by fishnet")
# Process: Calculate Area
arcpy.CalculateField_management(IntermediateIntersect, "Area", "!Shape.Area@squarekilometers!", "PYTHON_9.3", "")
################################
## Create singlepart polygons ##
################################
## Multi-part to single part
arcpy.MultipartToSinglepart_management(in_features = IntermediateIntersect, out_feature_class = IntermediateIntersect_singlept)
## Recalculate area
arcpy.CalculateField_management(IntermediateIntersect_singlept, "Area", "!Shape.Area@squarekilometers!", "PYTHON_9.3", "")
###############################
## Eliminate slivers - twice ##
###############################
print("Starting elimination")
# Execute MakeFeatureLayer
tempLayer = arcpy.MakeFeatureLayer_management(IntermediateIntersect_singlept, "tempLayer")
# Execute SelectLayerByAttribute to define features to be eliminated
arcpy.SelectLayerByAttribute_management(in_layer_or_view = tempLayer, selection_type= "NEW_SELECTION" , where_clause = whereClauseMin)
# Execute Eliminate
arcpy.Eliminate_management("tempLayer", IntermediateEliminated, "LENGTH")
## iteration 2
# Execute MakeFeatureLayer
IntermediateEliminated_tempLayer = arcpy.MakeFeatureLayer_management(IntermediateEliminated, "IntermediateEliminated")
# Execute SelectLayerByAttribute to define features to be eliminated
arcpy.SelectLayerByAttribute_management(in_layer_or_view = IntermediateEliminated_tempLayer, selection_type= "NEW_SELECTION" , where_clause = whereClauseMin)
# Execute Eliminate
arcpy.Eliminate_management(IntermediateEliminated_tempLayer, IntermediateEliminated2, "LENGTH")
################################################
## Merge aggregated with intersected features ##
################################################
# Merge aggregated polygons with larger, split polygons
merged = arcpy.Merge_management([IntermediateErased, IntermediateEliminated2], "in_memory/intermediateProjects")
## AGAIN, INTERSECT Geographic Unit of Analysis, if provided
if arcpy.Exists(geoUnits):
print("Intersecting by geographic units of analysis")
arcpy.Intersect_analysis([merged, geoUnits], IntermediateIntersect_geoUnits , "NO_FID")
print("Finished intersecting by geographic units of analysis")
else:
IntermediateIntersect_geoUnits = merged
# recalculate area
arcpy.CalculateField_management(IntermediateIntersect_geoUnits, "Area", "!Shape.Area@squarekilometers!", "PYTHON_9.3", "")
# select areas above minimum and save ## CREATE PROJECT FEATURE CLASS
arcpy.Select_analysis(IntermediateIntersect_geoUnits, projectsOut, whereClauseMinContArea)
## Process: Summary Statistics
## arcpy.Statistics_analysis(selectOut, outputFGDB + filename + '_stats', "Area SUM", "") ## CREATE PROJECT STATS
print('Finished merging')
#############################################################
## APPLY scriptToolB2 FUNCTION TO SITE SUITABILITY OUTPUTS ##
#############################################################
## Create list of category inputs to loop over
## SOLAR:
if tech == "solar":
ft_ls = {"Cat1" : "solarPV_0_0_nonEnv_r1_cat1b_singlepart_gt1km2",\
"Cat2" : "solarPV_0_0_nonEnv_r1_cat2f_singlepart_gt1km2",\
"Cat3" : "solarPV_0_0_nonEnv_r1_cat3c_singlepart_gt1km2", \
"Cat4": "solarPV_0_0_nonEnv_r1_cat4_singlepart_gt1km2"}
zoneType_ls = {"state": [os.path.join(mainInputFolder, "siteSuitabilityInputs_nonEnv.gdb\\stateBound_baja"), "_PA_state"], \
"OOS_RESOLVEZONE": [os.path.join(mainInputFolder, "siteSuitabilityInputs_nonEnv.gdb\\QRA_proj_RESOLVE_ZONE_solar"), "_PA_OOS_RESOLVEZONE"], \
"CA_RESOLVEZONE": [os.path.join(mainInputFolder, "siteSuitabilityInputs_nonEnv.gdb\\SUPERCREZ_proj_CA_RESOLVE_ZONE"), "_PA_CA_RESOLVEZONE"]}
existingPlants = existingSolar
fishnetWidth = "2"
maxAreaAgg = "4"
minAreaAgg = "1"
## WIND:
if tech == "wind":
ft_ls = {"Cat1" : "wind_0_03_nonEnv_r3_cat1b_singlepart_gt1km2",\
"Cat2" : "wind_0_03_nonEnv_r3_cat2f_singlepart_gt1km2",\
"Cat3" : "wind_0_03_nonEnv_r3_cat3c_singlepart_gt1km2", \
"Cat4": "wind_0_03_nonEnv_r3_cat4_singlepart_gt1km2"}
zoneType_ls = {"state": [os.path.join(mainInputFolder, "siteSuitabilityInputs_nonEnv.gdb\\stateBound_baja"), "_PA_state"], \
"OOS_RESOLVEZONE": [os.path.join(mainInputFolder, "siteSuitabilityInputs_nonEnv.gdb\\QRA_proj_RESOLVE_ZONE_Wind"), "_PA_OOS_RESOLVEZONE"], \
"CA_RESOLVEZONE": [os.path.join(mainInputFolder, "siteSuitabilityInputs_nonEnv.gdb\\SUPERCREZ_proj_CA_RESOLVE_ZONE"), "_PA_CA_RESOLVEZONE"]}
existingPlants = existingWind
fishnetWidth = "3"
maxAreaAgg = "9"
minAreaAgg = "1"
## APPLY FUNCTION: loop through each category and zone type to create CPA feature classes
for cat in ft_ls:
print("")
print("")
print(cat)
for zoneType in zoneType_ls:
print("")
inputNAME = os.path.join(mainOutputFolder, gdbFileName, ft_ls[cat])
outputNAME = os.path.join(mainOutputFolder, gdbFileName, ft_ls[cat] + zoneType_ls[zoneType][1])
## Erase existing wind and solar projects out ot state
if zoneType == "OOS_RESOLVEZONE" or zoneType == "state":
print(" Erasing existing power plants")
inputNAME_erasedExisting = arcpy.Erase_analysis(inputNAME, existingPlants, "in_memory/erasedExisting")
## if it's CA, then don't erase
else:
print(" NOT erasing existing plants")
inputNAME_erasedExisting = inputNAME
print(" Working on " + outputNAME)
scriptToolB2(suitableSites= inputNAME_erasedExisting, \
projectsOut = outputNAME, \
scratch = scratch, \
templateRaster= os.path.join(mainInputFolder, "siteSuitabilityInputs_nonEnv.gdb/SRTM_W_250m_proj_cl"), \
countryBounds= os.path.join(mainInputFolder, "siteSuitabilityInputs_nonEnv.gdb/WECC_USboundaries"), \
geoUnits= zoneType_ls[zoneType][0], \
fishnetSize=int(fishnetWidth), \
fishnetDirectory= os.path.join(mainInputFolder, "siteSuitabilityInputs_nonEnv.gdb"), \
whereClauseMax='"Area" > ' + str(maxAreaAgg), \
whereClauseMin = 'Area < ' + str(minAreaAgg), \
whereClauseMinContArea = '"Area" > ' + str("1"))
print(" Finished")
'''
#####################################################################################################################################
##----------CALCULATE OOS AVG CF AND TOTAL MW BY RESOLVE ZONE (eg., Wyoming_Wind) AND QRA (e.g., WY_SO, WY_NO) -------------------###
#####################################################################################################################################
PURPOSE: Takes CPAs (previous output) and creates a CSV supply curve within RESOLVE ZONEs and QRAs (Constrained)
'''
## Function to calculate supply curve
def calcSupplyCurve(inFeature, inRaster, inFeatureFileName, category, QRAs, RESOLVE_ZONE_FIELD):
## Delete Name RESOLVE_ZONE_FIELD if it already exists:
fields = arcpy.ListFields(inFeature)
fieldList = []
for field in fields:
fieldList.append(field.name)
if RESOLVE_ZONE_FIELD in fieldList:
print("Deleting existing field: " + RESOLVE_ZONE_FIELD)
arcpy.DeleteField_management(inFeature, RESOLVE_ZONE_FIELD)
if "Name" in fieldList:
print("Deleting existing field: " + "Name")
arcpy.DeleteField_management(inFeature, "Name")
############ CF CALC ##############
## Get average CAPACITY FACTOR per QRA; result only contains the Name field from the QRA feature class
## use the resource raster dataset
CFtable = arcpy.sa.ZonalStatisticsAsTable(in_zone_data = QRAs, zone_field ="Name", \
in_value_raster = inRaster, \
out_table = "in_memory/zonalStats_CF", \
ignore_nodata = "DATA", statistics_type = "MEAN")
# Join zonal statistics table of avg CF to QRA file to add the RESOLVE_ZONE_FIELD to the stats table
arcpy.JoinField_management(in_data = CFtable, in_field = "Name", join_table = QRAs, \
join_field = "Name", fields = [RESOLVE_ZONE_FIELD])
# Rename field MEAN to CF_avg
arcpy.AlterField_management(in_table = CFtable, field = "MEAN", new_field_name = "CF_avg_" + category)
############ AREA CALC ##############
## calculating the area will require the feature class
## Clip suitable sites polygons using QRAs
QRAclip = arcpy.Clip_analysis(in_features = inFeature, clip_features = QRAs, \
# out_feature_class = inFeature + "_QRAclip")
out_feature_class = "in_memory/QRAclip")
print("Finished clipping QRA for " + inFeatureFileName)
## Calculate Area (geometry) of resources within QRAs
arcpy.CalculateField_management(in_table = QRAclip, field = "Area", \
expression = "!Shape.Area@squarekilometers!", expression_type = "PYTHON_9.3")
## Spatial join of clipped polygons with QRAs to add Name and RESOLVE_ZONE_FIELD to clipped polygons
QRAclip_joined = arcpy.SpatialJoin_analysis(target_features = QRAclip, join_features = QRAs, \
#out_feature_class = "in_memory/"+ inFeature + "_QRAclip" + "_QRAjoined", \
out_feature_class = "in_memory/QRAclip" + "_QRAjoined", \
join_operation = "JOIN_ONE_TO_ONE", join_type = "KEEP_ALL", \
match_option = "INTERSECT")
print("Finished spatial join of QRA fields to feature class for " + inFeatureFileName)
## summary statistics to get the total area per QRA ("Name") first;
## the resultant field in the table is automatically named "SUM_Area"
areaQRAtable = arcpy.Statistics_analysis(in_table = QRAclip_joined, \
out_table = "in_memory/sumStats_Area_QRA", \
statistics_fields = [["Area", "SUM"]], case_field = [RESOLVE_ZONE_FIELD, "Name"])
# Rename field SUM_Area to Area
arcpy.AlterField_management(in_table = areaQRAtable, field = "SUM_Area", new_field_name = "Area_" + category, \
new_field_alias = "Area_" + category)
## CaCLULATE capacity of each QRA from area
arcpy.AddField_management(areaQRAtable, "cap_MW_" + category, "DOUBLE")
arcpy.CalculateField_management(in_table = areaQRAtable, field = "cap_MW_" + category, \
expression = "!Area_" + category + "!*" + str(LUF), expression_type = "PYTHON_9.3")
## Calculate capacity for each RESOLVE_ZONE using summary stats on the QRA table (which has QRA and RESOLVE_ZONE_FIELD fields)
areaRZ_table = arcpy.Statistics_analysis(in_table = areaQRAtable, \
out_table = "in_memory/sumStats_Area_RZ", \
statistics_fields = [["Area_" + category, "SUM"],["cap_MW_" + category, "SUM"]], case_field = [RESOLVE_ZONE_FIELD])
# Rename field SUM_cap_MW to cap_MW_state
arcpy.AlterField_management(in_table = areaRZ_table, field = "SUM_cap_MW_" + category, \
new_field_name = "cap_MW_RESOLVE_ZONE_" + category, \
new_field_alias = "cap_MW_RESOLVE_ZONE_" + category)
arcpy.AlterField_management(in_table = areaRZ_table, field = "SUM_Area_" + category, \
new_field_name = "Area_" + category, \
new_field_alias = "Area_" + category)
## join table back to areaQRAtable to add cap_MW_state to the table
arcpy.JoinField_management(in_data = areaQRAtable, in_field = RESOLVE_ZONE_FIELD, join_table = areaRZ_table, \
join_field = RESOLVE_ZONE_FIELD, fields = ["cap_MW_RESOLVE_ZONE_" + category])
## Calculate capacity averaged CF per RESOLVE ZONE
## sum(capacity(QRA)/capacity(State) * CF) for each QRA within each state
## join areaQRAtable with solarCFtable using Name to get the CF_avg field in the main table
arcpy.JoinField_management(in_data = areaQRAtable, in_field = "Name", join_table = CFtable, \
join_field = "Name", fields = ["CF_avg_" + category])
## Calculate new field that is the (capacity(QRA)/capacity(State) * CF)
arcpy.AddField_management(areaQRAtable, "QRApropMW_" + category, "DOUBLE")
arcpy.CalculateField_management(in_table = areaQRAtable, field = "QRApropMW_" + category, \
expression = "(!cap_MW_" + category + "!/!cap_MW_RESOLVE_ZONE_" + category + "!)*!CF_avg_" + category + "!", \
expression_type = "PYTHON_9.3")
## sum (capacity(QRA)/capacity(State) * CF) for each state
avgCF_byQRA_RZ = arcpy.Statistics_analysis(in_table = areaQRAtable, \
out_table = "in_memory/avgCF_byQRA_RZ", \
statistics_fields = [["QRApropMW_" + category, "SUM"]], case_field = [RESOLVE_ZONE_FIELD])
arcpy.AlterField_management(in_table = avgCF_byQRA_RZ, field = "SUM_QRApropMW_" + category, \
new_field_name = "CF_avg_RESOLVE_ZONE_" + category, \
new_field_alias = "CF_avg_RESOLVE_ZONE_" + category)
## join RESOLVE_ZONE total MW to state average CF into a single table:
arcpy.JoinField_management(in_data = avgCF_byQRA_RZ, \
in_field = RESOLVE_ZONE_FIELD, join_table = areaRZ_table, \
join_field = RESOLVE_ZONE_FIELD, fields = ["Area_" + category, "cap_MW_RESOLVE_ZONE_" + category])#fields = ["CF_avg_RESOLVE_ZONE_" + category])
########################################
## CONVERT ARCPY TABLES TO PANDAS DFs ##
########################################
##### RESOLVE ZONE AVERAGES
fields = arcpy.ListFields(avgCF_byQRA_RZ)
fieldList = []
for field in fields:
fieldList.append(field.name)
pattern = r'RESOLVE_ZONE_\S|cap_MW_\S|Area_\S|CF_avg_\S'
fieldList = [x for x in fieldList if re.search(pattern, x)]
## convert gdb table to numpy array to Pandas DF (and transpose):
stateAvgCFMW_df = pandas.DataFrame(arcpy.da.TableToNumPyArray(avgCF_byQRA_RZ, fieldList))
##### QRA AVERAGES
fields = arcpy.ListFields(areaQRAtable)
fieldList = []
for field in fields:
fieldList.append(field.name)
fieldList.remove("cap_MW_RESOLVE_ZONE_" + category) ## remove extra field
pattern = r'RESOLVE_ZONE_\S|cap_MW_\S|Area_\S|CF_avg_\S|Name'
fieldList = [x for x in fieldList if re.search(pattern, x)]
## convert gdb table to numpy array to Pandas DF (and transpose):
QRAavgCFMW_df = pandas.DataFrame(arcpy.da.TableToNumPyArray(areaQRAtable, fieldList))
print("Finished processing " + inFeatureFileName)
return stateAvgCFMW_df, QRAavgCFMW_df
###################################################
## APPLY calcSupplyCurve FUNCTION TO CPA OUTPUTS ##
###################################################
## Create list of inputs to loop over
## SOLAR:
if tech == "solar":
ft_ls = {"Cat1" : ["solarPV_0_0_nonEnv_r1_cat1b_singlepart_gt1km2_PA_OOS_RESOLVEZONE", "solarPV_0_0_nonEnv_r1_cat1b_singlepart_gt1km2"],\
"Cat2" : ["solarPV_0_0_nonEnv_r1_cat2f_singlepart_gt1km2_PA_OOS_RESOLVEZONE", "solarPV_0_0_nonEnv_r1_cat2f_singlepart_gt1km2"],\
"Cat3" : ["solarPV_0_0_nonEnv_r1_cat3c_singlepart_gt1km2_PA_OOS_RESOLVEZONE", "solarPV_0_0_nonEnv_r1_cat3c_singlepart_gt1km2"], \
"Cat4": ["solarPV_0_0_nonEnv_r1_cat4_singlepart_gt1km2_PA_OOS_RESOLVEZONE", "solarPV_0_0_nonEnv_r1_cat4_singlepart_gt1km2"]}
LUF = 30 # MW/km
RESOLVE_ZONE_FIELD_param = "RESOLVE_ZONE_solar"
## WIND:
if tech == "wind":
ft_ls = {"Cat1" : ["wind_0_03_nonEnv_r3_cat1b_singlepart_gt1km2_PA_OOS_RESOLVEZONE", "wind_0_03_nonEnv_r3_cat1b_singlepart_gt1km2"],\
"Cat2" : ["wind_0_03_nonEnv_r3_cat2f_singlepart_gt1km2_PA_OOS_RESOLVEZONE","wind_0_03_nonEnv_r3_cat2f_singlepart_gt1km2"],\
"Cat3" : ["wind_0_03_nonEnv_r3_cat3c_singlepart_gt1km2_PA_OOS_RESOLVEZONE","wind_0_03_nonEnv_r3_cat3c_singlepart_gt1km2"], \
"Cat4": ["wind_0_03_nonEnv_r3_cat4_singlepart_gt1km2_PA_OOS_RESOLVEZONE","wind_0_03_nonEnv_r3_cat4_singlepart_gt1km2"]}
LUF = 6.1 # MW/km
RESOLVE_ZONE_FIELD_param = "RESOLVE_ZONE_wind"
## Create ouutput list to append to
stateAvgCFMW_ls = []
QRAavgCFMW_ls = []
## loop function over list of inputs
for cat in ft_ls:
stateAvgCFMW, QRAavgCFMW = calcSupplyCurve(inFeature = os.path.join(mainOutputFolder, gdbFileName, ft_ls[cat][0]), \
inRaster = os.path.join(mainOutputFolder, gdbFileName, ft_ls[cat][1] + "_rast"), \
inFeatureFileName = ft_ls[cat][0], \
category = cat, \
QRAs = QRAfilePath,\
RESOLVE_ZONE_FIELD = RESOLVE_ZONE_FIELD_param)
## append output list
stateAvgCFMW_ls.append(stateAvgCFMW)
QRAavgCFMW_ls.append(QRAavgCFMW)
## MERGE TABLES
# StateAvg: this table reports the average CF and total MW of resources within all QRAs in each state, or otherwise RESOLVE ZONE, e.g., "Wyoming_Wind"
stateAvgCFMW_merged = pandas.merge(stateAvgCFMW_ls[0], stateAvgCFMW_ls[1], how= 'outer', on = RESOLVE_ZONE_FIELD_param)
for tab in [stateAvgCFMW_ls[2], stateAvgCFMW_ls[3]]:
stateAvgCFMW_merged = pandas.merge(stateAvgCFMW_merged, tab, how= 'outer', on = RESOLVE_ZONE_FIELD_param)
# QRAavg: this table reports the average CF and total MW of resources within each QRA, e.g., WY_NO or WY_SO
QRAavgCFMW_merged = pandas.merge(QRAavgCFMW_ls[0], QRAavgCFMW_ls[1], how= 'outer',on = "Name")
for tab in [QRAavgCFMW_ls[2],QRAavgCFMW_ls[3]]:
QRAavgCFMW_merged = pandas.merge(QRAavgCFMW_merged, tab, how= 'outer',on = "Name")
## SAVE TO CSV
# This one will be used in the supply curve
stateAvgCFMW_merged.to_csv(os.path.join(mainOutputFolder, supplyCurveFolder, tech + "_OOS_RESOLVEZONE_avgCFMW_PA.csv"))
# This one will will not be used (just informational)
QRAavgCFMW_merged.to_csv(os.path.join(mainOutputFolder, supplyCurveFolder, tech + "_OOS_QRA_avgCFMW_PA.csv"))
'''
############################################################################################
## Create supply curves for state-wide (outside of QRAs) or CA RESOLVE ZONE MW and Avg CF ##
############################################################################################
PURPOSE: Takes CPAs (previous output) and creates a CSV supply curve within state boundaries (Unconstrained)
Uses same method as calcSupplyCurve for getting average CF and total MW per SuperCREZ
'''
## Create function
def calcSupplyCurve_state(inFeature, inRaster, inFeatureFileName, category, zonesFile, zoneField):
## Delete Name RESOLVE_ZONE_FIELD if it already exists:
fields = arcpy.ListFields(inFeature)
fieldList = []
for field in fields:
fieldList.append(field.name)
if zoneField in fieldList:
print("Deleting existing field: " + zoneField)
arcpy.DeleteField_management(inFeature, zoneField)
if "Name" in fieldList:
print("Deleting existing field: " + "Name")
arcpy.DeleteField_management(inFeature, "Name")
#############################
## CALC TOTAL MW PER STATE ##
#############################
## intersect to break up the features by state
inFeature_stateInt = arcpy.Intersect_analysis(in_features = [inFeature, zonesFile], out_feature_class = "in_memory/stateInt")
print("Finished intersect for " + inFeatureFileName)
## ReCalculate Area (geometry)
arcpy.CalculateField_management(in_table = inFeature_stateInt, field = "Area", \
expression = "!Shape.Area@squarekilometers!", expression_type = "PYTHON_9.3")
## summary statistics to get the total area per state; the resultant field in the table is "SUM_Area"
areaTable = arcpy.Statistics_analysis(in_table = inFeature_stateInt, \
out_table = "in_memory/inFeature_stateInt_AreaCalc", \
statistics_fields = [["Area", "SUM"]], case_field = [zoneField])
## Rename field from SUM_Area to Area
arcpy.AlterField_management(in_table = areaTable, field = "SUM_Area", new_field_name = "Area_" + category, \
new_field_alias = "Area_" + category)
## Calcuylate capacity from area
arcpy.AddField_management(areaTable, "cap_MW_" + category, "DOUBLE")
arcpy.CalculateField_management(in_table = areaTable, field = "cap_MW_" + category, \
expression = "!Area_" + category + "!*" + str(LUF), expression_type = "PYTHON_9.3")
###########################
## CALC AVG CF PER STATE ##
###########################
## Get average CAPACITY FACTOR per state
## use the resource raster dataset
CFtable = arcpy.sa.ZonalStatisticsAsTable(in_zone_data = zonesFile, zone_field = zoneField, \
in_value_raster = inRaster, \
out_table = "in_memory/zonalStats_CF", \
ignore_nodata = "DATA", statistics_type = "MEAN")
arcpy.AlterField_management(in_table = CFtable, field = "MEAN", new_field_name = "CF_avg_" + category)
#####################################
## Join MW, Area and CF_avg fields ##
#####################################
arcpy.JoinField_management(in_data = CFtable, in_field = zoneField, join_table = areaTable, \
join_field = zoneField, fields = ["Area_" + category, "cap_MW_" + category])
########################################
## CONVERT ARCPY TABLES TO PANDAS DFs ##
########################################
fields = arcpy.ListFields(CFtable)
fieldList = []
for field in fields:
fieldList.append(field.name)
pattern = r'CF_avg_\S|Area_\S|cap_MW_\S|NAME|RESOLVE_ZONE|STPOSTAL'
fieldList = [x for x in fieldList if re.search(pattern, x)]
## convert gdb table to numpy array to Pandas DF:
df = pandas.DataFrame(arcpy.da.TableToNumPyArray(CFtable, fieldList))
print("FINISHED processing " + inFeatureFileName )
return df
#########################################################
## APPLY calcSupplyCurve_state FUNCTION TO CPA OUTPUTS ##
#########################################################
## List of inputs to loop over
if tech == "solar":
ft_ls = {"Cat1" : "solarPV_0_0_nonEnv_r1_cat1b_singlepart_gt1km2",\
"Cat2" : "solarPV_0_0_nonEnv_r1_cat2f_singlepart_gt1km2",\
"Cat3" : "solarPV_0_0_nonEnv_r1_cat3c_singlepart_gt1km2", \
"Cat4": "solarPV_0_0_nonEnv_r1_cat4_singlepart_gt1km2"}
LUF = 30 # MW/km
statesFileFieldName = "RESOLVE_ZONE_solar"
if tech == "wind":
ft_ls = {"Cat1" : "wind_0_03_nonEnv_r3_cat1b_singlepart_gt1km2",\
"Cat2" : "wind_0_03_nonEnv_r3_cat2f_singlepart_gt1km2",\
"Cat3" : "wind_0_03_nonEnv_r3_cat3c_singlepart_gt1km2", \
"Cat4": "wind_0_03_nonEnv_r3_cat4_singlepart_gt1km2"}
LUF = 6.1 # MW/km
statesFileFieldName = "RESOLVE_ZONE_wind"
zoneType_ls = {"_PA_state": [statesFilePath, statesFileFieldName, "_OOS_state_avgCFMW_PA.csv"], \
"_PA_CA_RESOLVEZONE": [SuperCREZ, "RESOLVE_ZONE","_CA_RESOLVEZONE_avgCFMW_PA.csv"]}
## APPLY FUNCTION IN LOOP
## for each zone type (california RESOLVE Zones or OOS wall-to-wall/state)
for zone in zoneType_ls:
## output list to append
stateAvgCFMW_w2w_ls = []
inFeatureSuffix = zone ## "_PA_CA_RESOLVEZONE" or "_PA_state"
zonesFileName = zoneType_ls[zone][0] ## feature class with boundaries: statesFilePath (for states) or SuperCREZ (for superCREZ or RESOLVE_ZONES in CA)
zoneFieldName = zoneType_ls[zone][1] ## fieldname with the zone attribute: NAME (aggregate by superCREZ) or RESOLVE_ZONE (aggregate by RESOLVE_ZONE) or "STPOSTAL" (for states)
csvSuffix = zoneType_ls[zone][2] ## "_CA_RESOLVEZONE_avgCFMW.csv" (superCREZ) or "_CA_superCREZ_avgCFMW.csv" (superCREZ) or "_OOS_state_avgCFMW.csv" (state averages)
## loop function over list of inputs
for cat in ft_ls:
stateAvgCFMW_w2w = calcSupplyCurve_state(inFeature = os.path.join(mainOutputFolder,gdbFileName, ft_ls[cat] + inFeatureSuffix), \
inRaster = os.path.join(mainOutputFolder, gdbFileName, ft_ls[cat] + "_rast"), \
inFeatureFileName = ft_ls[cat] + inFeatureSuffix, \
category = cat, \
zonesFile = zonesFileName,\
zoneField = zoneFieldName)
## append output list
stateAvgCFMW_w2w_ls.append(stateAvgCFMW_w2w)
## MERGE TABLES
# StateAvg
stateAvgCFMW_w2w_merged = pandas.merge(stateAvgCFMW_w2w_ls[0], stateAvgCFMW_w2w_ls[1], how= 'outer', on = zoneFieldName)
for tab in [stateAvgCFMW_w2w_ls[2], stateAvgCFMW_w2w_ls[3]]:
stateAvgCFMW_w2w_merged = pandas.merge(stateAvgCFMW_w2w_merged, tab, how= 'outer', on = zoneFieldName)
## SAVE TO CSV
stateAvgCFMW_w2w_merged.to_csv(path_or_buf = os.path.join(mainOutputFolder, supplyCurveFolder, tech + csvSuffix), index = False)
'''
########################################
## OPTIONAL: COMPARE WITH ORB RESULTS ##
########################################
'''
## append column with RESOLVE_ZONE
fields = arcpy.ListFields(os.path.join(SuperCREZ))
fieldList = []
for field in fields:
fieldList.append(field.name)
pattern = r'NAME|RESOLVE_ZONE'
fieldList = [x for x in fieldList if re.search(pattern, x)]
## convert gdb table to numpy array to Pandas DF:
df_superCREZ = pandas.DataFrame(arcpy.da.TableToNumPyArray(SuperCREZ, fieldList))
stateAvgCFMW_w2w_merged2 = pandas.merge(df_superCREZ, stateAvgCFMW_w2w_merged, how= 'outer', on = zoneFieldName)
df_ORB = pandas.read_csv(r"C:\Users\Grace\Documents\TNC_beyond50\ORBvisualization\RPSCalculator6_relatedMaterials\allTech_PotentialAreas_RPScalc_withFreshwater_copiedForPTOcomparison.csv")
df_PV = df_ORB.filter(regex=('\SPV\S|\SPV|PV\S|NAME'), axis =1)
df_wind = df_ORB.filter(regex=('\Swind\S|\Swind|Wind\S|NAME'), axis =1)
stateAvgCFMW_w2w_merged_wind = pandas.merge(stateAvgCFMW_w2w_merged2, df_wind, how= 'outer', on = zoneFieldName)
## SAVE TO CSV
stateAvgCFMW_w2w_merged_wind.to_csv(path_or_buf = os.path.join(mainOutputFolder, tech + "__CAavgCFMW_w2w_superCREZ_allCat_withORB.csv"), index = False)
stateAvgCFMW_w2w_merged_PV = pandas.merge(stateAvgCFMW_w2w_merged2, df_PV, how= 'outer', on = zoneFieldName)
## SAVE TO CSV
stateAvgCFMW_w2w_merged_PV.to_csv(path_or_buf = os.path.join(mainOutputFolder, tech + "__CAavgCFMW_w2w_superCREZ_allCat_withORB.csv"), index = False)
'''
###########################################################
## GET STATE-WIDE MW FOR GEOTHERMAL WITHIN RESOLVE ZONES ##
###########################################################
PURPOSE: Geothermal rsources are point locations, not polygons, which requires a different set of analyses
Does everything above for wind and solar, but for geothermal
'''
##################################################
## CREATE SITE SUITABILITY AREAS FOR GEOTHERMAL ##
##################################################
## Geothermal environmental exclusion categories
Cat1 = collections.OrderedDict([(os.path.join(mainInputFolder, "envData\\Cat1_solar\\Cat1_u_d_s.shp"), ["", "_cat1a"]),\
(os.path.join(mainInputFolder, "envData\\tnc_lands_cat1_2\\tnc_lands_cat1_easements_proj.shp"), ["_cat1a", "_cat1b"])])
Cat2 = collections.OrderedDict([(os.path.join(mainInputFolder, "envData\\Cat2\\both_p1\\Both_p1.shp"), ["_cat1b", "_cat2aa"]),\
(os.path.join(mainInputFolder, "envData\\Cat2\\both_p2\\Both_p2.shp"), ["_cat2aa", "_cat2b"]),\
(os.path.join(mainInputFolder, "envData\\Cat2\\geothermal_p1_p2\\Geothermal_p1.shp"), ["_cat2b", "_cat2c"]),\
(os.path.join(mainInputFolder, "envData\\Cat2\\geothermal_p1_p2\\Geothermal_p2.shp"), ["_cat2c", "_cat2d"]),\
(os.path.join(mainInputFolder, "envData\\Cat2\\0045_AHPRC_Cat2\\0045_AHPRC\\data\\v101\\nps_identified_high_potential_for_resource_conflict.gdb\\NPS_AHPRC"), ["_cat2d", "_cat2e"]),\
(os.path.join(mainInputFolder, "envData\\tnc_lands_cat1_2\\tnc_lands_cat2_feeAreas_proj.shp"), ["_cat2e", "_cat2f"])])
Cat3 = collections.OrderedDict([(os.path.join(mainInputFolder, "envData\\Cat3\\Cat3_geotherm_excl_base_proj.shp"), ["_cat2f", "_cat3"])])
Cat4 = collections.OrderedDict([(os.path.join(mainInputFolder, "envData\\Cat4\\Cat4_u_d_s_proj.shp"), ["_cat3", "_cat4"])])
gdbFileName = "070618_resourceAssessment.gdb"
inputNAME = os.path.join(mainOutputFolder, gdbFileName, "geothermal")
envEx_ls = [Cat1, Cat2, Cat3, Cat4]
## loop over each set of environmental exclusions for each category
for cat in envEx_ls:
for ex in cat:
ft = inputNAME + cat[ex][0]
print(ft)
outputFile = inputNAME + cat[ex][1]
print(outputFile)
## erase
print("Erasing " + str(ex))
arcpy.Erase_analysis(ft, ex, outputFile)
########################################
## CREATE SUPPLY CURVE FOR GEOTHERMAL ##
########################################
def calcSupplyCurve_geothermal(inFeature, inFeatureFileName, category, zonesFile, zonesToSelect, zonesType, zoneField):
#############################
## CALC TOTAL MW PER STATE ##
#############################
## Spatial join of points with zones (QRAs or SuperCREZs)
inFeature_joined = arcpy.SpatialJoin_analysis(target_features = inFeature, join_features = zonesFile, \
out_feature_class = inFeature + "_" + zonesType + "Joined", \
join_operation = "JOIN_ONE_TO_ONE", join_type = "KEEP_ALL", match_option = "INTERSECT")
print("Finished spatial join for " + inFeatureFileName)
MWtable = arcpy.Statistics_analysis(in_table = inFeature_joined, \
out_table = "in_memory/inFeature_MWCalc", \
statistics_fields = [["MW", "SUM"]], case_field = [zoneField])
## Rename field from SUM_Area to Area
arcpy.AlterField_management(in_table = MWtable, field = "SUM_MW", new_field_name = "cap_MW_" + category, \
new_field_alias = "cap_MW_" + category)
########################################
## CONVERT ARCPY TABLES TO PANDAS DFs ##
########################################
fields = arcpy.ListFields(MWtable)
fieldList = []
for field in fields:
fieldList.append(field.name)
print(fieldList)
pattern = r'cap_MW_\S|' + zoneField
fieldList = [x for x in fieldList if re.search(pattern, x)]
print("after selection: ")
print(fieldList)
## convert gdb table to numpy array to Pandas DF:
df = pandas.DataFrame(arcpy.da.TableToNumPyArray(MWtable, fieldList))
print("FINISHED processing " + inFeatureFileName)
return df
##############################################
## APPLY calcSupplyCurve_geothermal TO QRAs ##
##############################################
## List of inputs to loop over
ft_ls = {"Cat1" : "geothermal_cat1b",\
"Cat2" : "geothermal_cat2f",\
"Cat3" : "geothermal_cat3", \
"Cat4": "geothermal_cat4"}
tech = "geothermal"
## output list to append
resolveZone_MW_ls = []
gdbFileName = "070618_resourceAssessment.gdb"
zoneFieldName = "RESOLVE_ZONE_geothermal"
## loop function over list of inputs
for cat in ft_ls:
resolveZone_MW = calcSupplyCurve_geothermal(inFeature = os.path.join(mainOutputFolder, gdbFileName, ft_ls[cat]), \
inFeatureFileName = ft_ls[cat], \
category = cat, \
zonesFile = QRAfilePath, \
zonesType = "QRA", \
## zoneField = "State"
zoneField = zoneFieldName, \
zonesToSelect = "Name In ('NV_EA', 'NV_WE', 'OR_SO', 'OR_WE')")
## append output list
resolveZone_MW_ls.append(resolveZone_MW)
## MERGE TABLES
# StateAvg
resolveZone_MW_merged = pandas.merge(resolveZone_MW_ls[0], resolveZone_MW_ls[1], how= 'outer', on = zoneFieldName)
for tab in [resolveZone_MW_ls[2], resolveZone_MW_ls[3]]:
resolveZone_MW_merged = pandas.merge(resolveZone_MW_merged, tab, how= 'outer', on = zoneFieldName)
## SAVE TO CSV
resolveZone_MW_merged.to_csv(path_or_buf = os.path.join(mainOutputFolder, "0718_results", tech + "_OOS_RESOLVEZONE_MW.csv"), index = False)
#####################################################
## APPLY calcSupplyCurve_geothermal TO Super CREZs ##
#####################################################
## output list to append
resolveZone_MW_ls = []
gdbFileName = "070618_resourceAssessment.gdb"
zoneFieldName = "RESOLVE_ZONE"
## loop function over list of inputs
for cat in ft_ls:
resolveZone_MW = calcSupplyCurve_geothermal(inFeature = os.path.join(mainOutputFolder, gdbFileName, ft_ls[cat]), \
inFeatureFileName = ft_ls[cat], \
category = cat, \
zonesFile = SuperCREZ, \
zonesType = "SuperCREZ", \
zoneField = zoneFieldName, \
zonesToSelect = "NAME In ('Imperial North', 'Imperial South', 'Round Mountain - A', 'Lassen North')")#,\
#csv_suffix = "_RESOLVEzonesMW_SuperCREZ")
## append output list
resolveZone_MW_ls.append(resolveZone_MW)
## MERGE TABLES
# StateAvg
resolveZone_MW_merged = pandas.merge(resolveZone_MW_ls[0], resolveZone_MW_ls[1], how= 'outer', on = zoneFieldName)
for tab in [resolveZone_MW_ls[2], resolveZone_MW_ls[3]]:
resolveZone_MW_merged = pandas.merge(resolveZone_MW_merged, tab, how= 'outer', on = zoneFieldName)
## SAVE TO CSV
resolveZone_MW_merged.to_csv(path_or_buf = os.path.join(mainOutputFolder, supplyCurveFolder, tech + "_CA_RESOLVEZONE_MW.csv"), index = False)
################################################
## APPLY calcSupplyCurve_geothermal TO STATES ##
################################################
## output list to append
resolveZone_MW_ls = []
zoneFieldName = "State"
## loop function over list of inputs
for cat in ft_ls:
resolveZone_MW = calcSupplyCurve_geothermal(inFeature = os.path.join(mainOutputFolder, gdbFileName, ft_ls[cat]), \
inFeatureFileName = ft_ls[cat], \
category = cat, \
zonesFile = statesFilePath, \
zonesType = "state", \
zoneField = zoneFieldName,\
zonesToSelect = "NAME In ('Imperial North', 'Imperial South', 'Round Mountain - A', 'Lassen North')")
## append output list
resolveZone_MW_ls.append(resolveZone_MW)
## MERGE TABLES
# StateAvg
resolveZone_MW_merged = pandas.merge(resolveZone_MW_ls[0], resolveZone_MW_ls[1], how= 'outer', on = zoneFieldName)
for tab in [resolveZone_MW_ls[2], resolveZone_MW_ls[3]]:
resolveZone_MW_merged = pandas.merge(resolveZone_MW_merged, tab, how= 'outer', on = zoneFieldName)
## SAVE TO CSV
resolveZone_MW_merged.to_csv(path_or_buf = os.path.join(mainOutputFolder, supplyCurveFolder, tech + "_state_MW.csv"), index = False)
elapsed_time = (time.time() - start_time)/(60)
print(str(elapsed_time) + " minutes")
'''
#################################################
## REMOVE BASELINE RESOURCES FROM SUPPLY CURVE ##
#################################################
IMPORTANT NOTE: Run after creating supply curves for all technologies
PURPOSE: aggregates baseline (existing) resources for all technologies
and subtracts it from the supply curve values
'''
############################################
## SUM BASELINE RESOURCES BY RESOLVE_ZONE ##
############################################
df_baseline = pandas.read_csv(r"C:\Users\Grace\Documents\TNC_beyond50\PathTo100\RESOLVE_related_data\RESOLVE-CPUCRPS_listComparison_AllTechSum_EL_noBaselineOOS.csv")
df_baseline['Geothermal'] = df_baseline['Geothermal'].convert_objects(convert_numeric=True)
df_geo =df_baseline[["RESOLVE_ZONE","Geothermal"]].groupby(['RESOLVE_ZONE']).sum()
df_geo.reset_index(inplace=True)
df_geo.to_csv(path_or_buf = r"C:\Users\Grace\Documents\TNC_beyond50\PathTo100\RESOLVE_related_data\geothermal_baseline_noBaselineOOS.csv", index = True)
df_baseline['Solar PV'] = df_baseline['Solar PV'].convert_objects(convert_numeric=True)
df_PV =df_baseline[["RESOLVE_ZONE","Solar PV"]].groupby(['RESOLVE_ZONE']).sum()
df_PV.reset_index(inplace=True)
df_PV.to_csv(path_or_buf = r"C:\Users\Grace\Documents\TNC_beyond50\PathTo100\RESOLVE_related_data\PV_baseline_noBaselineOOS.csv", index = True)
df_baseline['Wind'] = df_baseline['Wind'].convert_objects(convert_numeric=True)
df_wind =df_baseline[["RESOLVE_ZONE","Wind"]].groupby(['RESOLVE_ZONE']).sum()
df_wind.reset_index(inplace=True)
df_wind.to_csv(path_or_buf = r"C:\Users\Grace\Documents\TNC_beyond50\PathTo100\RESOLVE_related_data\wind_baseline_noBaselineOOS.csv", index = True)
####################################################
## SUBTRACT BASELINE RESOURCES FROM MW ESTIMATES ##
###################################################
## import RESOLVE supply curve values:
resolveSupplyCurve = pandas.read_csv(r"C:\Users\Grace\Documents\TNC_beyond50\PathTo100\siteSuitabilityOutputs\0618_results_archived\summaryResults_061818.csv")
## import STPOSTAL_RESOLVEZONE_key csv:
stpostalKey = pandas.read_csv(r"C:\Users\Grace\Documents\TNC_beyond50\PathTo100\RESOLVE_related_data\STPOSTAL_RESOLVEZONE_key.csv")
## FUNCTION to subtract baseline from supply curve
def subtractBaseline (df_merged, baselineColName):
pattern = r'cap_MW_\S'
MWcolList = [x for x in list(df_merged) if re.search(pattern, x)]
for col in df_merged:
if col in MWcolList:
df_merged[col + "_net"] = df_merged[col].sub(df_merged[baselineColName],fill_value=0)
return df_merged
###################
## APPLY TO WIND ##
###################
## Updates on 11/30/18: No longer subtracting baseline resources from site suitability results for OOS RESOLVE ZONE or State-wide because we erased
## wind and solar existing power plants when creating the potential project area feature classes
## instead, subtract 500 MW from NM and 1500 MW from PNW
extTxMW = pandas.DataFrame(data = {'RESOLVE_ZONE_wind': ["New_Mexico_Wind", "Pacific_Northwest_Wind"], 'extTxZones' : [500, 1500]})
## import wind zones - OOS
df_wind_OOS_RESOLVE = pandas.read_csv(os.path.join(mainOutputFolder, supplyCurveFolder, "wind_OOS_RESOLVEZONE_avgCFMW_PA.csv"))
## subtract 500 MW from NM and 1500 MW from PNW
df_wind_OOS_RESOLVE_merged = pandas.merge(df_wind_OOS_RESOLVE, extTxMW, how= 'left', left_on = "RESOLVE_ZONE_wind", right_on = "RESOLVE_ZONE_wind")
df_wind_OOS_RESOLVE_merged_sub = subtractBaseline(df_merged= df_wind_OOS_RESOLVE_merged, baselineColName = "extTxZones")
## rename columns:
newColNames =[name.replace('_RESOLVE_ZONE',"") for name in df_wind_OOS_RESOLVE_merged_sub.columns]
df_wind_OOS_RESOLVE_merged_sub.columns = newColNames
## save to csv:
df_wind_OOS_RESOLVE_merged_sub.to_csv(path_or_buf = os.path.join(mainOutputFolder, supplyCurveFolder, "wind_OOS_RESOLVEZONE_avgCFMW_PA_net.csv"), index = False)
## import wind zones - CA
df_wind_CA_RESOLVE = pandas.read_csv(os.path.join(mainOutputFolder, supplyCurveFolder, "wind_CA_RESOLVEZONE_avgCFMW_PA.csv"))
## join the df_baseline table using RESOLVE_ZONE
df_wind_CA_RESOLVE_merged = pandas.merge(df_wind, df_wind_CA_RESOLVE, how= 'right', left_on = "RESOLVE_ZONE", right_on = "RESOLVE_ZONE")
## apply substractBaseline function
df_wind_CA_RESOLVE_merged_sub = subtractBaseline(df_merged= df_wind_CA_RESOLVE_merged, baselineColName = "Wind")
## Append "_Wind" to end of RESOLVE_ZONE name:
df_wind_CA_RESOLVE_merged_sub["RESOLVE_ZONE"] = df_wind_CA_RESOLVE_merged_sub['RESOLVE_ZONE'].astype(str) + "_Wind"
df_wind_CA_RESOLVE_merged_sub.rename(columns={'RESOLVE_ZONE':'RESOLVE_ZONE_wind'}, inplace=True)
## merge with RESOLVE supply curve values
#df_wind_CA_RESOLVE_merged_sub_compare = pandas.merge(resolveSupplyCurve, df_wind_CA_RESOLVE_merged_sub, how = "outer", left_on = "RESOLVE Resource Name",right_on = "RESOLVE_ZONE")
## save to csv:
df_wind_CA_RESOLVE_merged_sub.to_csv(path_or_buf = os.path.join(mainOutputFolder, supplyCurveFolder, "wind_CA_RESOLVEZONE_avgCFMW_PA_net.csv"), index = False)
## import wind zones - WALL TO WALL
df_wind_state = pandas.read_csv(os.path.join(mainOutputFolder, supplyCurveFolder, "wind_OOS_state_avgCFMW_PA.csv"))
## merge STPOSTAL key to get RESOLVEZONE names
#df_wind_state = pandas.merge(stpostalKey[["STPOSTAL", "RESOLVE_ZONE_wind"]], df_wind_state, how = "outer", left_on = "STPOSTAL",right_on = "STPOSTAL")
#df_wind_state.groupby(["RESOLVE_ZONE_wind"])[]
## subtract 500 MW from NM and 1500 MW from PNW
df_wind_state_merged = pandas.merge(df_wind_state, extTxMW, how= 'left', left_on = "RESOLVE_ZONE_wind", right_on = "RESOLVE_ZONE_wind")
df_wind_state_merged_sub = subtractBaseline(df_merged= df_wind_state_merged, baselineColName = "extTxZones")
## save to csv:
df_wind_state_merged_sub.to_csv(path_or_buf = os.path.join(mainOutputFolder, supplyCurveFolder, "wind_OOS_state_avgCFMW_PA_net.csv"), index = False)
####################
## APPLY TO SOLAR ##
####################
## import PV zones - OOS : no need to subtract baseline
df_PV_OOS_RESOLVE = pandas.read_csv(os.path.join(mainOutputFolder, supplyCurveFolder, "solar_OOS_RESOLVEZONE_avgCFMW_PA.csv"))
## rename columns:
newColNames =[name.replace('_RESOLVE_ZONE',"") for name in df_PV_OOS_RESOLVE.columns]
df_PV_OOS_RESOLVE.columns = newColNames
## join the df_baseline table using RESOLVE_ZONE
df_PV_OOS_RESOLVE_merged = pandas.merge(df_PV, df_PV_OOS_RESOLVE, how= 'right', left_on = "RESOLVE_ZONE", right_on = "RESOLVE_ZONE_solar")
## Delete first RESOLVE_ZONE Column in df_PV:
df_PV_OOS_RESOLVE_merged = df_PV_OOS_RESOLVE_merged.drop(["RESOLVE_ZONE"], axis = 1)
## apply substractBaseline function
df_PV_OOS_RESOLVE_merged = subtractBaseline(df_merged= df_PV_OOS_RESOLVE_merged, baselineColName = "Solar PV")
## join the df_baseline table using RESOLVE_ZONE
#df_PV_OOS_RESOLVE_merged = pandas.merge(df_PV, df_PV_OOS_RESOLVE, how= 'right', left_on = "RESOLVE_ZONE", right_on = "RESOLVE_ZONE_solar")
## apply substractBaseline function
#df_PV_OOS_RESOLVE_merged_sub = subtractBaseline(df_merged= df_PV_OOS_RESOLVE_merged, baselineColName = "Solar PV")
## merge with RESOLVE supply curve values
#df_PV_OOS_RESOLVE_merged_sub_compare = pandas.merge(resolveSupplyCurve, df_PV_OOS_RESOLVE_merged_sub, how = "outer", left_on = "RESOLVE Resource Name", right_on = "RESOLVE_ZONE_solar")
## save to csv:
df_PV_OOS_RESOLVE_merged.to_csv(path_or_buf = os.path.join(mainOutputFolder, supplyCurveFolder, "solar_OOS_RESOLVEZONE_avgCFMW_PA_renamedCol.csv"), index = False)
## import PV zones - CA
df_PV_CA_RESOLVE = pandas.read_csv(os.path.join(mainOutputFolder, supplyCurveFolder, "solar_CA_RESOLVEZONE_avgCFMW_PA.csv"))
## join the df_baseline table using RESOLVE_ZONE
df_PV_CA_RESOLVE_merged = pandas.merge(df_PV, df_PV_CA_RESOLVE, how= 'right', left_on = "RESOLVE_ZONE", right_on = "RESOLVE_ZONE")
## apply substractBaseline function
df_PV_CA_RESOLVE_merged_sub = subtractBaseline(df_merged= df_PV_CA_RESOLVE_merged, baselineColName = "Solar PV")
## Append "_Solar" to end of RESOLVE_ZONE name:
df_PV_CA_RESOLVE_merged_sub["RESOLVE_ZONE"] = df_PV_CA_RESOLVE_merged_sub['RESOLVE_ZONE'].astype(str) + "_Solar"
df_PV_CA_RESOLVE_merged_sub.rename(columns={'RESOLVE_ZONE':'RESOLVE_ZONE_solar'}, inplace=True)
## merge with RESOLVE supply curve values
df_PV_CA_RESOLVE_merged_sub_compare = pandas.merge(resolveSupplyCurve, df_PV_CA_RESOLVE_merged_sub, how = "outer", left_on = "RESOLVE Resource Name",right_on = "RESOLVE_ZONE_solar")
## save to csv:
df_PV_CA_RESOLVE_merged_sub_compare.to_csv(path_or_buf = os.path.join(mainOutputFolder, supplyCurveFolder, "solar_CA_RESOLVEZONE_avgCFMW_PA_net.csv"), index = False)
## import PV zones - State
df_PV_state = pandas.read_csv(os.path.join(mainOutputFolder, supplyCurveFolder, "solar_OOS_state_avgCFMW_PA.csv"))
## join the df_baseline table using RESOLVE_ZONE
df_PV_state_merged = pandas.merge(df_PV, df_PV_state, how= 'right', left_on = "RESOLVE_ZONE", right_on = "RESOLVE_ZONE_solar")
## Delete first RESOLVE_ZONE Column in df_PV:
df_PV_state_merged = df_PV_state_merged.drop(["RESOLVE_ZONE"], axis = 1)
## apply substractBaseline function
df_PV_state_merged_sub = subtractBaseline(df_merged= df_PV_state_merged, baselineColName = "Solar PV")
## merge with RESOLVE supply curve values
#df_PV_state_merged_sub_compare = pandas.merge(resolveSupplyCurve, df_PV_state_merged, how = "outer", left_on = "RESOLVE Resource Name", right_on = "RESOLVE_ZONE_solar")
## save to csv:
df_PV_state_merged_sub.to_csv(path_or_buf = os.path.join(mainOutputFolder, supplyCurveFolder, "solar_OOS_state_avgCFMW_PA_net.csv"), index = False)
########################################
## MERGE SOLAR AND WIND SUPPLY CURVES ##
########################################
####### RESOLVE ZONES:
## WIND: concat OOS and CA RESOLVE ZONE supply curves and then merge with original RESOLVE supply curve values
RESOLVE_ZONES_wind_merged = pandas.concat([df_wind_OOS_RESOLVE_merged_sub, df_wind_CA_RESOLVE_merged_sub], axis = 0)
RESOLVE_ZONES_wind_merged.rename(columns={"RESOLVE_ZONE_wind": "RESOLVE_ZONE"}, inplace=True)
## SOLAR: concat OOS and CA RESOLVE ZONE supply curves and then merge with original RESOLVE supply curve values
RESOLVE_ZONES_solar_merged = pandas.concat([df_PV_OOS_RESOLVE_merged, df_PV_CA_RESOLVE_merged_sub], axis = 0)
RESOLVE_ZONES_solar_merged.rename(columns={"RESOLVE_ZONE_solar": "RESOLVE_ZONE"}, inplace=True)
## merge WIND AND SOLAR
RESOLVE_ZONES_merged = pandas.concat([RESOLVE_ZONES_solar_merged, RESOLVE_ZONES_wind_merged], axis = 0)
## merge with RESOLVE supply curve values
RESOLVE_ZONES_merged_compare = pandas.merge(resolveSupplyCurve, RESOLVE_ZONES_merged, how = "outer", left_on = "RESOLVE Resource Name",right_on = "RESOLVE_ZONE")
RESOLVE_ZONES_merged_compare= RESOLVE_ZONES_merged_compare.drop(["Unnamed: 0"], axis = 1)
## save to csv
RESOLVE_ZONES_merged_compare.to_csv(os.path.join(mainOutputFolder, supplyCurveFolder, "supplyCurvesForRESOLVE", "envSupplyCurves_RESOLVEZONES.csv"), index=False)
####### WALL TO WALL:
## merge WIND AND SOLAR
df_wind_state_merged_sub.rename(columns={"RESOLVE_ZONE_wind": "RESOLVE_ZONE"}, inplace=True)
w2w_merged = pandas.concat([df_PV_state_merged_sub, \
df_wind_state_merged_sub], axis =0)
## merge with RESOLVE supply curve values
w2w_merged_compare = pandas.merge(resolveSupplyCurve, w2w_merged, how = "outer", left_on = "RESOLVE Resource Name",right_on = "RESOLVE_ZONE")
## save to csv
w2w_merged_compare.to_csv(os.path.join(mainOutputFolder, supplyCurveFolder, "supplyCurvesForRESOLVE", "envSupplyCurves_w2w.csv"), index=False)
#########################
## APPLY TO GEOTHERMAL ##
#########################
## import geothermal zones - OOS
df_geo_OOS_RESOLVE = pandas.read_csv(r"C:\Users\Grace\Documents\TNC_beyond50\PathTo100\siteSuitabilityOutputs\0718_results\geothermal_OOS_RESOLVEZONE_MW.csv")
## join the df_baseline table using RESOLVE_ZONE
df_geo_OOS_RESOLVE_merged = pandas.merge(df_geo, df_geo_OOS_RESOLVE, how= 'right', left_on = "RESOLVE_ZONE", right_on = "RESOLVE_ZONE_geothermal")
## apply substractBaseline function
df_geo_OOS_RESOLVE_merged_sub = subtractBaseline(df_merged= df_geo_OOS_RESOLVE_merged, baselineColName = "Geothermal")
## merge with RESOLVE supply curve values
df_geo_OOS_RESOLVE_merged_sub_compare = pandas.merge(resolveSupplyCurve, df_geo_OOS_RESOLVE_merged_sub, how = "outer", left_on = "RESOLVE Resource Name", right_on = "RESOLVE_ZONE_geothermal")
## save to csv:
df_geo_OOS_RESOLVE_merged_sub_compare.to_csv(path_or_buf = os.path.join(mainOutputFolder, supplyCurveFolder, "geothermal_OOS_RESOLVEZONE_MW_net.csv"), index = False)
## import geothermal zones - CA
df_geo_CA_RESOLVE = pandas.read_csv(r"C:\Users\Grace\Documents\TNC_beyond50\PathTo100\siteSuitabilityOutputs\0718_results\geothermal_CA_RESOLVEZONE_MW.csv")
## join the df_baseline table using RESOLVE_ZONE
df_geo_CA_RESOLVE_merged = pandas.merge(df_geo, df_geo_CA_RESOLVE, how= 'right', left_on = "RESOLVE_ZONE", right_on = "RESOLVE_ZONE")
## apply substractBaseline function
df_geo_CA_RESOLVE_merged_sub = subtractBaseline(df_merged= df_geo_CA_RESOLVE_merged, baselineColName = "Geothermal")
## Append "_Solar" to end of RESOLVE_ZONE name:
df_geo_CA_RESOLVE_merged_sub["RESOLVE_ZONE"] = df_geo_CA_RESOLVE_merged_sub['RESOLVE_ZONE'].astype(str) + "_Geothermal"
## merge with RESOLVE supply curve values
df_geo_CA_RESOLVE_merged_sub_compare = pandas.merge(resolveSupplyCurve, df_geo_CA_RESOLVE_merged_sub, how = "outer", left_on = "RESOLVE Resource Name",right_on = "RESOLVE_ZONE")
## save to csv:
df_geo_CA_RESOLVE_merged_sub_compare.to_csv(path_or_buf = os.path.join(mainOutputFolder, supplyCurveFolder, "geothermal_CA_RESOLVEZONE_MW_net.csv"), index = False) |
#! /usr/bin/env python
import os,sys
from time import strftime
fn = raw_input("file to create: ")
if os.path.exists(fn):
print "file exists!"
sys.exit(1)
hg = raw_input("Header guard: ")
f = open(fn , 'w')
f.write("""//
// """ + fn + """: redox language
// created """ + strftime("%Y-%m-%d %H:%M:%S") + """
// created by patrick kage
//
#ifndef """ + hg + """
#define """ + hg + """
#endif
""")
f.close()
|
# What is the largest prime factor of the number 600851475143?
NUMBER = 600851475143
primes = [2]
def get_max():
for num in range(3, int(NUMBER / 2), 2):
for i in range(3, int(num / 2), 2):
if num % i == 0 or NUMBER % num != 0:
break
else:
if num > primes[-1]:
primes.append(num)
get_max()
print('prime factors found %s' % (primes))
print('highest factor %d' % (primes[-1]))
|
from matplotlib import pyplot as pl
def parser(name):
'''
Parse the output files for part 3
inputs:
name = The name of the file containing data
outputs:
'''
fpr = []
tpr = []
with open(name) as file:
for line in file:
values = line.strip().split(',')
if len(values) > 1:
fpr.append(float(values[0]))
tpr.append(float(values[-1]))
return fpr, tpr
pathvotes = '../plotdata/part4roc'
pathsvotes = []
pathsdigits = []
kvals = []
for i in [10, 20, 30]:
pathsvotes.append(pathvotes+str(i))
kvals.append(i)
percents = [i for i in range(10, 100+1, 10)]
voteslengths = []
votesacc = []
for name in pathsvotes:
items = parser(name)
voteslengths.append(items[0])
votesacc.append(items[1])
fig, axvotes = pl.subplots()
count = 0
for k in kvals:
axvotes.plot(voteslengths[count], votesacc[count], marker='.', label='k='+str(k))
count += 1
axvotes.set_xlabel('FPR for votes data')
axvotes.set_ylabel('TPR for votes data')
axvotes.grid()
axvotes.legend(loc='lower right')
fig.tight_layout()
pl.savefig('../plotimages/roc_curve.pdf')
|
import torch
import torch.nn as nn
import torch.nn.init as init
import numpy as np
from torch.nn.modules.loss import _Loss
import math
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def initialize_weights(m):
if isinstance(m, nn.Conv2d):
init.xavier_uniform_(m.weight, gain=1)
# init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def params_count(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return params
class mean_squared_error(_Loss):
def __init__(self):
super(mean_squared_error, self).__init__()
def forward(self, input, target):
# _assert_no_grad(target)
return torch.nn.functional.mse_loss(input, target)
def get_full_name_list(list_name, stage_flag):
full_image_list = []
with open(list_name) as f:
image_list = f.readlines() # 00001/0001 -> 0001
if stage_flag == 'train':
str_print = 'len(train_image_list):'
elif stage_flag == 'test':
str_print = 'len(test_image_list):'
print(str_print + str(len(image_list)))
for i in range(0, len(image_list)):
if stage_flag == 'train':
if i % 2 == 0:
til = image_list[i].rstrip()
for j in range(1, 8):
til_png = til + '/im' + str(j) + '.png'
full_image_list.append(til_png)
elif stage_flag == 'test':
til = image_list[i].rstrip()
til_png = til + '/im4.png'
full_image_list.append(til_png)
if stage_flag == 'train':
str_print = 'len(full_train_image_list):'
elif stage_flag == 'test':
str_print = 'len(full_test_image_list):'
print(str_print + str(len(full_image_list)))
return full_image_list
def calculate_psnr(input, prediction, label):
psnrs = []
mse_pre = math.sqrt(torch.mean((prediction - label) ** 2.0))
psnr_pre = 20 * math.log10(1.0/mse_pre)
mse_input = math.sqrt(torch.mean((input - label) ** 2.0))
psnr_input = 20 * math.log10(1.0/mse_input)
psnrs.append(psnr_pre)
psnrs.append(psnr_input)
return psnrs
def calculate_psnr_single(prediction, label):
mse_pre = math.sqrt(torch.mean((prediction - label) ** 2.0))
psnr_pre = 20 * math.log10(1.0/mse_pre)
return psnr_pre
|
import pandas as pd
from requests import get
import sys
pathtk = r"D:\PPP"
sys.path.insert(0, pathtk)
import bfsearch
sp = {"sep":"\n\n", "end":"\n\n"}
base_url = "http://api.census.gov/data/timeseries/idb/5year"
secret_key = bfsearch.key
parameters = {"key": secret_key,
"get": ",".join(["NAME", "POP", "CBR", "CDR", "E0", "AREA_KM2"]),
"time": "from 2013 to 2017",
"FIPS": "*"}
response = get(base_url, params=parameters)
print(response.status_code, response.url, **sp)
response.url
response.content
resp_obj = response.json()
# type(resp_obj) # <class 'list'>
popdata = pd.DataFrame(resp_obj[1:], columns=resp_obj[0])
print(popdata.head(), popdata.tail(), **sp)
["NAME","POP","CBR","CDR","E0","AREA_KM2","time","FIPS"]
col = ["NAME", "POP", "AREA_KM2", "ASFR15_19", "ASFR20_24", "ASFR25_29",
"ASFR40_44", "ASFR40_44", "ASFR45_49", "CBR", "E0", "E0_F", "FIPS",
"FMR0_4", "FMR0_4", "FMR1_4", "FPOP", "FPOP0_4"]
fullcol = ['AREA_KM2', 'ASFR15_19', 'ASFR20_24', 'ASFR25_29', 'ASFR30_34',
'ASFR35_39', 'ASFR40_44', 'ASFR45_49', 'CBR', 'CDR', 'E0',
'E0_F', 'E0_M', 'FIPS', 'FMR0_4', 'FMR1_4', 'FPOP', 'FPOP0_4',
'FPOP10_14', 'FPOP100_', 'FPOP15_19', 'FPOP20_24', 'FPOP25_29',
'FPOP30_34', 'FPOP35_39', 'FPOP40_44', 'FPOP45_49', 'FPOP5_9',
'FPOP50_54', 'FPOP55_59', 'FPOP60_64', 'FPOP65_69', 'FPOP70_74',
'FPOP75_79', 'FPOP80_84', 'FPOP85_89', 'FPOP90_94', 'FPOP95_99',
'GR', 'GRR', 'IMR', 'IMR_F', 'IMR_M', 'MMR0_4', 'MMR1_4', 'MPOP',
'MPOP0_4', 'MPOP10_14', 'MPOP100_', 'MPOP15_19', 'MPOP20_24',
'MPOP30_34', 'MPOP35_39', 'MPOP40_44', 'MPOP45_49', 'MPOP5_9',
'MPOP50_54', 'MPOP55_59', 'MPOP60_64', 'MPOP65_69', 'MPOP70_74',
'MPOP75_79', 'MPOP80_84', 'MPOP85_89', 'MPOP90_94', 'MPOP95_99',
'MR0_4', 'MR1_4', 'NAME', 'NMR', 'POP', 'POP0_4', 'POP10_14',
'POP100_', 'POP15_19', 'POP20_24', 'POP25_29', 'POP30_34',
'POP40_44', 'POP45_49', 'POP5_9', 'POP50_54', 'POP55_59',
'POP65_69', 'POP70_74', 'POP75_79', 'POP80_84', 'POP85_89',
'POP95_99', 'RNI', 'SRB', 'TFR', 'time', 'YR', 'MPOP25_29',
'POP60_64', 'POP90_94', 'POP90_94', 'POP35_39']
par2 = {'key': secret_key,
'get': ','.join(col),
'time': 'from 2016 to 2018',
'FIPS': '*'}
resp2 = get(base_url, params=par2)
print(resp2.status_code, **sp)
resp2_obj = resp2.json()
ppdata = pd.DataFrame(resp2_obj[1:], columns=resp2_obj[0])
print(ppdata.head(), ppdata.tail(), **sp)
# Using url directly;
response2 = get("https://api.census.gov/data/timeseries/idb/5year?get=NAME,POP,CBR,CDR,E0,AREA_KM2&FIPS=%2A&time=2012")
response2 = response2.json()
ppdata2 = pd.DataFrame(response2[1:], columns=response2[0])
print(ppdata2.head(), ppdata2.tail(), **sp)
|
## ========================================================================= ##
## Copyright (c) 2019 Agustin Durand Diaz. ##
## This code is licensed under the MIT license. ##
## hud_steering.py ##
## ========================================================================= ##
from core.hud_base import HudBase
from enums import ScreenType
class HudSteering(HudBase):
def __init__(self, width, height):
HudBase.__init__(self, width, height)
def init(self):
self.addLabel((80, 30), (150, 30), 'Simple Steering')
self.addButton((725, 40), (100, 50), 'Back', self.gotoMetamap)
self.addImage((725, 550), (50, 50), "assets/imageqt.png")
def gotoMetamap(self):
self.m_manager.gotoScreen(ScreenType.META_MAP) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.