text stringlengths 8 6.05M |
|---|
import click
import os
import transaction
from decimal import Decimal
from decimal import InvalidOperation
from onegov.core.cli import abort
from onegov.core.cli import command_group
from onegov.core.cli import pass_group_context
from onegov.core.crypto import random_token
from onegov.file.utils import as_fileintent
from onegov.swissvotes.collections import SwissVoteCollection
from onegov.swissvotes.external_resources import MfgPosters
from onegov.swissvotes.external_resources import SaPosters
from onegov.swissvotes.models import SwissVote
from onegov.swissvotes.models import SwissVoteFile
from onegov.swissvotes.models.file import LocalizedFile
cli = command_group()
@cli.command(context_settings={'creates_path': True})
@pass_group_context
def add(group_context):
""" Adds an instance to the database. For example:
onegov-swissvotes --select '/onegov_swissvotes/swissvotes' add
"""
def add_instance(request, app):
app.cache.flush()
click.echo('Instance was created successfully')
return add_instance
@cli.command('import-attachments')
@click.argument('folder', type=click.Path(exists=True))
@pass_group_context
def import_attachments(group_context, folder):
""" Import a attachments from the given folder. For example:
onegov-swissvotes \
--select '/onegov_swissvotes/swissvotes' \
import-attachments data_folder
Expects a data folder structure with the first level representing an
attachment and the second level a locale. The PDFs have to be name by
BFS number (single number or range). For example:
data/voting_text/de_CH/001.pdf
data/voting_text/de_CH/038.1.pdf
data/voting_text/de_CH/622-625.pdf
"""
def _import(request, app):
votes = SwissVoteCollection(app)
attachments = {}
for name in os.listdir(folder):
if (
os.path.isdir(os.path.join(folder, name))
and isinstance(SwissVote.__dict__.get(name), LocalizedFile)
):
attachments[name] = os.path.join(folder, name)
else:
click.secho(f'Ignoring /{name}', fg='yellow')
for attachment, attachment_folder in attachments.items():
locales = {}
for name in os.listdir(attachment_folder):
if (
os.path.isdir(os.path.join(attachment_folder, name))
and name in app.locales
):
locales[name] = os.path.join(attachment_folder, name)
else:
click.secho(f'Ignoring /{attachment}/{name}', fg='yellow')
for locale, locale_folder in locales.items():
for name in sorted(os.listdir(locale_folder)):
if not (name.endswith('.pdf') or name.endswith('.xlsx')):
click.secho(
f'Ignoring {attachment}/{locale}/{name}',
fg='yellow'
)
continue
try:
numbers = name.replace('.pdf', '').replace('.xlsx', '')
numbers = [Decimal(x) for x in numbers.split('-')]
assert len(numbers) in [1, 2]
if len(numbers) == 2:
numbers = tuple(
Decimal(x) for x in
range(int(numbers[0]), int(numbers[1]) + 1)
)
except (AssertionError, InvalidOperation):
click.secho(
f'Invalid name {attachment}/{locale}/{name}',
fg='red'
)
continue
for bfs_number in numbers:
vote = votes.by_bfs_number(bfs_number)
if not vote:
click.secho(
f'No matching vote {bfs_number} for '
f'{attachment}/{locale}/{name}',
fg='red'
)
continue
file = SwissVoteFile(id=random_token())
with open(
os.path.join(locale_folder, name), 'rb'
) as f:
file.reference = as_fileintent(
f, f'{attachment}-{locale}'
)
vote.__class__.__dict__[attachment].__set_by_locale__(
vote, file, locale
)
click.secho(
f'Added {attachment}/{locale}/{name}'
f' to {bfs_number}',
fg='green'
)
return _import
@cli.command('import-campaign-material')
@click.argument('folder', type=click.Path(exists=True))
@pass_group_context
def import_campaign_material(group_context, folder):
""" Import a campaign material from the given folder. For example:
onegov-swissvotes \
--select '/onegov_swissvotes/swissvotes' \
import-campaign-material data_folder
Expects all files within this folder and filenames starting with the BFS
number. For example:
229_Ja-PB_Argumentarium-Gründe-der-Trennung.pdf
232-1_Nein_PB_Referentenführer.pdf
"""
def _import(request, app):
attachments = {}
votes = SwissVoteCollection(app)
bfs_numbers = votes.query().with_entities(SwissVote.bfs_number)
bfs_numbers = [r.bfs_number for r in bfs_numbers]
for name in os.listdir(folder):
if not name.endswith('.pdf'):
click.secho(f'Ignoring {name}', fg='yellow')
continue
try:
bfs_number = (name.split('_')[0] or '').replace('-', '.')
bfs_number = Decimal(bfs_number)
except InvalidOperation:
click.secho(f'Invalid name {name}', fg='red')
continue
if bfs_number in bfs_numbers:
attachments.setdefault(bfs_number, [])
attachments[bfs_number].append(name)
else:
click.secho(f'No matching vote for {name}', fg='red')
for bfs_number in sorted(attachments):
vote = app.session().query(SwissVote).filter_by(
bfs_number=bfs_number
).one()
existing = [file.filename for file in vote.campaign_material_other]
names = sorted(attachments[bfs_number])
for name in names:
if name in existing:
click.secho(f'{name} already exists', fg='yellow')
continue
file = SwissVoteFile(id=random_token())
file.name = f'campaign_material_other-{name}'
with open(os.path.join(folder, name), 'rb') as content:
file.reference = as_fileintent(content, name)
vote.files.append(file)
click.secho(f'Added {name}', fg='green')
transaction.commit()
return _import
@cli.command('reindex')
@pass_group_context
def reindex_attachments(group_context):
""" Reindexes the attachments. """
def _reindex(request, app):
bfs_numbers = sorted(app.session().query(SwissVote.bfs_number))
for bfs_number in bfs_numbers:
click.secho(f'Reindexing vote {bfs_number.bfs_number}', fg='green')
app.session().query(SwissVote).filter_by(
bfs_number=bfs_number.bfs_number
).one().reindex_files()
transaction.commit()
return _reindex
@cli.command('update-resources')
@click.option('--details', is_flag=True, default=False)
@click.option('--mfg', is_flag=True, default=False)
@click.option('--sa', is_flag=True, default=False)
@pass_group_context
def update_resources(group_context, details, sa, mfg):
""" Updates external resources. """
def _update_sources(request, app):
if mfg:
click.echo('Updating MfG posters')
if not app.mfg_api_token:
abort('No token configured, aborting')
posters = MfgPosters(app.mfg_api_token)
added, updated, removed, failed = posters.fetch(app.session())
click.secho(
f'{added} added, {updated} updated, {removed} removed, '
f'{len(failed)} failed',
fg='green' if not failed else 'yellow'
)
if failed and details:
failed = ', '.join((str(item) for item in sorted(failed)))
click.secho(f'Failed: {failed}', fg='yellow')
if sa:
click.echo('Updating SA posters')
posters = SaPosters()
added, updated, removed, failed = posters.fetch(app.session())
click.secho(
f'{added} added, {updated} updated, {removed} removed, '
f'{len(failed)} failed',
fg='green' if not failed else 'yellow'
)
if failed and details:
failed = ', '.join((str(item) for item in sorted(failed)))
click.secho(f'Failed: {failed}', fg='yellow')
return _update_sources
|
from ex4 import largest
from ex5 import findYear
result = largest(10,50,1000)
year = findYear(25)
print(result)
print(year) |
# Though number is defined outside the def of function the variable is accessible.
# Global variable can be accessed anywhere if defined outside a function or class
'''
def getnumber( ):
print number
number=1
getnumber()
'''
# output is 2 because the fuction uses the local variable value
'''
def getnumber( ):
number=2
print number
number=1
getnumber()
'''
'''
#nested scope
#Function defined inside another function can have access to variable of outer function
import math
def hyp(a,b):
def square(c):
return c*c
return str(math.sqrt(square(a) + square(b)))
print("Length of side 1 pls :")
s1=int(input())
print("Length of side 2 pls :")
s2=int(input())
hyp=hyp(s1,s2)
print("The hypotenuse is : " + hyp)
'''
'''
### Newtons square root solve
print ("enter the number:")
number = int(input ())
print ("number is " + str (number))
print ("Whats your guess for square root?")
guess = int(input ())
sqrt1 = (1 / 2 ) * ((number/guess)+ guess)
print("Square root is " + str(sqrt1))
'''
import math
def square(a):
return a*a
def squareroot(number,guess):
sqrt1 = (1 / 2) * ((number / guess) + guess)
fn_result = float(math.fabs((square(sqrt1)-number)))
print("sqrt1 is " + str(sqrt1))
print ("fn_result is " + str (fn_result))
if (fn_result < 0.3):
return sqrt1
else:
return squareroot(number,sqrt1)
print ("enter the number:")
in_number = int(input ())
print ("Whats your guess for square root?")
in_guess = float(input ())
result = float(math.fabs((square(in_guess)-in_number)))
if result < 0.1:
print("Square root is : " + str(in_guess))
else:
print("Square root is " + str(squareroot(in_number,in_guess)))
|
import pyimgur # imgur library
import urllib # url fetching default library
import time # time library to add delay like in Arduino
CLIENT_ID = "50e8d92be50757f" # imgur client ID
# retrieve the snapshot from the camera and save it as an image in the chosen directory
urllib.urlretrieve("http://192.168.0.111:81/snapshot.cgi?user=admin&pwd=password", "mug_shot.jpg")
time.sleep(2)
PATH = "C:\\helloworld\\mug_shot.jpg" # location where the image is saved
im = pyimgur.Imgur(CLIENT_ID) # authenticates into the imgur platform using the client ID
uploaded_image = im.upload_image(PATH, title="Uploaded with PyImgur") # uploads the image privately
print(uploaded_image.link) # fetches the link of the image URL |
#!/usr/bin/python
import argparse
import sys
import random as random
from fractions import gcd
import math as math
import time as time
from memory_profiler import memory_usage
def options():
parser = argparse.ArgumentParser()
parser.add_argument('-message', type = str, help = 'The Message to Encrpyt')
args = parser.parse_args()
return args
def is_prime(n):
if n == 2 or n == 3: return True
if n < 2 or n%2 == 0: return False
if n < 9: return True
if n%3 == 0: return False
r = int(n**0.5)
f = 5
while f <= r:
if n%f == 0: return False
if n%(f+2) == 0: return False
f +=6
return True
def egcd(a, b):
if a == 0:
return (b, 0, 1)
else:
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y)
def modinv(a, m):
g, x, y = egcd(a, m)
if g != 1:
raise Exception('modular inverse does not exist')
else:
return x % m
def pollard_rho(n):
if n%2 == 0:
return 2
if is_prime(n):
return n
while True:
c = random.randint(2, n-1)
f = lambda x: x**2 + c
x = y = 2
d = 1
while d == 1:
x = f(x) % n
y = f(f(y)) % n
d = gcd((x-y) % n, n)
if d!= n:
return d
def bruteForce_prime_factors(n):
i = 2
factors = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(i)
if n > 1:
factors.append(n)
return factors
def fermats(n):
i = 0
while(True):
t = math.ceil(math.sqrt(n)) + i
s = math.sqrt((t**2) - n)
print "i: %s, t: %s, s: %s" % (i, t, s)
i +=1
if (s.is_integer() == True):
return ((t+s), (t-s))
break
def get_phi(p, q):
phi = (p-1) * (q-1)
return phi
def main():
options()
message = options().message
#n = 8051
#n = 10834948153
#n = 1923023412357
#n = 9999999999973
#n = 1231232134590149
n = 32193802514424469
#n = 51923
################## Pollard
print 'Beginning Pollards Factorization, Tracking time and memory footprint'
start_time = time.time()
d = pollard_rho(n)
p = d
q = n/d
mem_usage = memory_usage((pollard_rho, (n,)))
print ('Memory usage (in chunks of .1 seconds): %s' % mem_usage)
print ('Maximum memory usage: %s' % max(mem_usage))
print ('Total memory over cycle: %s' % sum(mem_usage))
print p, q
elapsed_time = time.time() - start_time
print time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
################## Brute Force
print 'Beginning Brute Force, Tracking time and memory footprint'
start_time = time.time()
bruteForceFactors = bruteForce_prime_factors(n)
mem_usage = memory_usage((bruteForce_prime_factors, (n,)))
print ('Memory usage (in chunks of .1 seconds): %s' % mem_usage)
print ('Maximum memory usage: %s' % max(mem_usage))
print ('Total memory over cycle: %s' % sum(mem_usage))
print bruteForceFactors
elapsed_time = time.time() - start_time
print time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
################## Fermats
print 'Beginning Fermats, Tracking time and memory footprint'
start_time = time.time()
pq = fermats(n)
mem_usage = memory_usage((fermats, (n,)))
print ('Memory usage (in chunks of .1 seconds): %s' % mem_usage)
print ('Maximum memory usage: %s' % max(mem_usage))
print ('Total memory over cycle: %s' % sum(mem_usage))
print pq
elapsed_time = time.time() - start_time
print time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
#print message
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
__author__ = 'Yuvv'
# todo: analyse rules and return result
# 正、反向推理
# 规则存储
# 冲突解决
# 规则匹配方法
# 解释器
from utils.engine import param_splitter, update_factor
from utils.inference import infer
import sys
# engine启动函数
def run_engine(args):
try:
r_target = args[0]
c_factors = args[1:]
factor_dict = param_splitter(c_factors)
# print(get_factor(r_target, factor_dict))
print(update_factor(r_target, factor_dict))
except (IndexError, IOError) as e:
print(e)
# inference engine启动函数
def run_infer(args):
try:
result = infer(int(args[0]), int(args[1]), int(args[2]), int(args[3]))
for item in result.items():
print('%s: %s' % (item[0], item[1]))
except (IndexError, KeyError, IOError) as e:
print(e)
def test4cir():
a = [1, 2, 3]
b = [1]
for i in a:
print(i)
for j in b:
if i == j:
a.remove(i)
break
if __name__ == '__main__':
# run_engine(sys.argv[1:])
# run_infer(sys.argv[1:])
test4cir()
# 获取带阈值、带加权CF(H)测试数据:
# 10 77 5 8
# Fog: 0.10779999999999999
# Snow: 条件不满足无法推断
# Rain: 0.6002970794261923
# Thunderstorm: 0.0975
# Others: 0.1408
# 获取CF(H)测试数据:
# ["p1", "h", 0.9, 0.5],
# ["p2", "h", 0.7, 0.5],
# ["p3", "h", -0.8, 0.5],
# ["p4", "p5", "p1", 0.7, 0.2],
# ["p6", "p7", "p2", 1, 0.2]
# h p3 0.3 p4 0.9 p5 0.6 p6 0.7 p7 0.8 ==> 0.5826
# ["p1", "h", 0.8, 0.5],
# ["p2", "h", 0.5, 0.5],
# ["h", "p3", "p4", 0.8, 0.5]
# p4 p1 1 p2 1 p3 1 ==> 0.72
# ["a1", "b1", 0.8, 0.5],
# ["a2", "b1", 0.5, 0.5],
# ["b1", "a3", "b2", 0.8, 0.5]
# b1 b1 0 b2 0 a1 1 a2 1 a3 1 ==> 0.9
# b2 b1 0 b2 0 a1 1 a2 1 a3 1 ==> 0.72
# b1 b1 0.6 b2 0.6 a1 1 a2 1 a3 1 ==> 0.96
# b2 b1 0.6 b2 0.6 a1 1 a2 1 a3 1 ==> 0.9072
|
import pytest
from queue_with_stacks.queue_with_stacks import PseudoQueue
# from queue_with_stacks.queue_with_stacks import PseudoQueue
# from ..queue_with_stacks import PseudoQueue
# from .queue_with_stacks import PseudoQueue
# from queue_with_stacks import PseudoQueue
# from queue_with_stacks import Stack
@pytest.mark.skip
def test_stack_exists():
assert Stack
# @pytest.mark.skip
def test_pq_exists():
assert PseudoQueue
@pytest.mark.skip
def test_enqueue_one():
pq = PseudoQueue()
pq.enqueue("penny")
# @pytest.fixture
# def coin_stack():
# coins = PseudoQueue()
# coins.enqueue("penny")
# coins.enqueue("nickel")
# coins.enqueue("dime")
# return coins
|
from PyQt5.QtWidgets import QWidget, QDialog, QInputDialog
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtGui import QImage, QPalette, QBrush
from PyQt5.QtCore import QSize
import os
SCREEN_SIZE = [700, 700]
class AddCustomLevel(QDialog, QWidget):
def __init__(self):
super().__init__()
self.initUI()
self.x = 0
self.y = 0
def initUI(self):
self.setWindowTitle('Выбор места кнопки')
self.setGeometry(1000, 50, *SCREEN_SIZE)
self.setFixedSize(*SCREEN_SIZE)
self.btn_close = QPushButton('Выбрать', self)
self.btn_close.resize(50, 50)
self.btn_close.move(950, 950)
self.btn_close.clicked.connect(self.exit)
with open('new_levels_info/level.txt', 'r', encoding='utf8') as input_file:
level = input_file.readline()
with open('new_levels_info/file.txt', 'r', encoding='utf8') as input_file:
file = input_file.readline()
oImage = QImage(f'{level}/{file}/background.png')
sImage = oImage.scaled(QSize(*SCREEN_SIZE))
palette = QPalette()
palette.setBrush(QPalette.Window, QBrush(sImage))
self.setPalette(palette)
self.show()
def mousePressEvent(self, event):
if event.x() < 950 or event.y() < 950:
self.x = event.x()
self.y = event.y()
with open('new_levels_info/coords.txt', 'w') as output_file:
output_file.write(str(event.x()) + ' ' + str(event.y()))
def exit(self):
with open('new_levels_info/coords.txt', 'r', encoding='utf8') as input_file:
coords = input_file.readline().split(' ')
with open('new_levels_info/level.txt', 'r', encoding='utf8') as input_file:
level = input_file.readline()
with open('new_levels_info/file.txt', 'r', encoding='utf8') as input_file:
file = input_file.readline()
path = os.path.join(level, file)
with open('new_levels_info/num.txt', 'r', encoding='utf8') as input_file:
num = input_file.readline()
with open(f'{path}/button_{num}.txt', 'w', encoding='utf8') as output_file:
output_file.write(coords[0] + ' ' + coords[1] + '\n')
self.button_sizes(path, num)
self.button_command(path, num)
self.close()
def button_way(self, path, num):
way, ok_pressed = QInputDialog.getText(self, "Введите строку",
"В какую комнату будет вести кнопка(или в эту же)?")
if ok_pressed:
with open(f'{path}/button_{num}.txt', 'a', encoding='utf8') as output_file:
output_file.write(way + '\n')
def button_command(self, path, num):
command_lst = ['ПОСМОТРЕТЬ', 'ВОЙТИ В КОМНАТУ', 'ОБРАТНО', 'ПРОЙТИ ДАЛЬШЕ', 'ПРЕДОСТАВИТЬ ПАРОЛЬ']
command, ok_pressed = QInputDialog.getItem(
self, "Выбор команды", "Выберите команду",
command_lst, 0, True)
if ok_pressed:
ind = command_lst.index(command)
with open(f'{path}/button_{num}.txt', 'a', encoding='utf8') as output_file:
output_file.write(command_lst[ind] + '\n')
self.button_way(path, num)
if command_lst[ind] == 'ПОСМОТРЕТЬ':
self.new_chall(path, num)
def new_chall(self, path, num):
with open('new_levels_info/chall_num.txt', 'r', encoding='utf8') as input_file:
chall_num = input_file.readline()
with open(f'{path}/button_{num}.txt', 'a', encoding='utf8') as output_file:
output_file.write(chall_num + '\n')
with open('new_levels_info/chall_num.txt', 'w', encoding='utf8') as output_file:
output_file.write(str(int(chall_num) + 1))
def button_sizes(self, path, num):
btn_size_x, ok_pressed = QInputDialog.getText(self, "Введите значение",
"Какой размер кнопки (по длине)?")
if ok_pressed:
with open(f'{path}/button_{num}.txt', 'a', encoding='utf8') as output_file:
output_file.write(btn_size_x + ' ')
btn_size_y, ok_pressed = QInputDialog.getText(self, "Введите значение",
"Какой размер кнопки (по ширине)?")
if ok_pressed:
with open(f'{path}/button_{num}.txt', 'a', encoding='utf8') as output_file:
output_file.write(btn_size_y + '\n')
|
"""Advent of Code Day 7 - Recursive Circus"""
import collections
import re
def base_dict(tower_data):
"""Build weight and holding dictionaries and identify the base program."""
holding = {}
weights = {}
base = []
for tower in tower_data:
program, weight, *held = re.findall(r'\w+', tower)
base.append(program)
weights[program] = int(weight)
holding[program] = list(held)
for __, values in holding.items():
[base.remove(value) for value in values]
return (holding, weights, base[0])
def find_fix(program):
"""Traverse structure and find mismatched weight and the fix required."""
above = [find_fix(held) for held in holding[program]]
if len(set(above)) == 2:
# Imbalanced so find most common (correct) weight and aberrant one
mismatch = collections.Counter(above).most_common()
desired = mismatch[0][0]
wrong = mismatch[1][0]
# Find imbalanced program and the weight it should be
imbalanced = holding[program][above.index(wrong)]
corrected = weights[imbalanced] + (desired - wrong)
# Answer Two
print("Corrected weight:", corrected)
# Return corrected weight so upstream imbalances don't trigger
return weights[program] + sum(above) + desired - wrong
return weights[program] + sum(above)
if __name__ == '__main__':
with open('input.txt', 'r') as f:
data = f.readlines()
initial_info = base_dict(data)
holding = initial_info[0]
weights = initial_info[1]
base = initial_info[2]
# Answer One
print("Bottom program:", base)
# Answer Two
find_fix(base)
|
#! /usr/bin/env python3
import os
import requests
import json
import slack
from slackbot import slackclient
# Need to clean this up to use slash command
# grab token from env var, error if not found
def extract_api_token(env_var_name):
token = os.environ.get(env_var_name)
if token:
return token
else:
print('Could not retrieve token from env var')
# grab slack token from env var
# authenticate from slack app to read emoji.list scope
SLACK_TOKEN = 'SLACK_TOKEN'
slack_token = extract_api_token('SLACK_TOKEN')
client = slack.WebClient(token=slack_token)
# JSON output of emoji.list
payload = str(client.api_call("emoji.list"))
# convert to dictionary
payload = eval(payload)
# ignore k/v pairs outside of emojis
payload = payload['emoji']
# unpack dictionary to ignore links and format as :emoji: for payload
def unpack_emoji(dict):
emojis = ''
for k in dict:
emojis += ':' + k + ': '
return emojis
# convert emoji string to list to truncate for slack payload limits
emoji_list = unpack_emoji(payload).split(' ')
emoji_list.sort() # sort alphabetically for ease
# generate pairs of 50 emoji and convert back to string
# need to rewrite this as a function/algorithm so it's
# less manual and supports an unlimited number of emoji
e50 = ' '.join(emoji_list[:50:])
e100 = ' '.join(emoji_list[51:100:])
e150 = ' '.join(emoji_list[101:150:])
e200 = ' '.join(emoji_list[151:200:])
e250 = ' '.join(emoji_list[201:250:])
e300 = ' '.join(emoji_list[251:300:])
e350 = ' '.join(emoji_list[301:350:])
e400 = ' '.join(emoji_list[351:400:])
e450 = ' '.join(emoji_list[401:450:])
e500 = ' '.join(emoji_list[451:500:])
e550 = ' '.join(emoji_list[501:550:])
e600 = ' '.join(emoji_list[551:600:])
# send payload to specified channel
# sending in sectioned blocks for ease of sending
# multiple payloads at once.
# probably need to rewrite this too for algorithm based on above
client.chat_postMessage(
channel="test-alerts-keri",
blocks=[
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": str(e50) + str(e100)
}
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": str(e150) + str(e200)
},
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": str(e250) + str(e300)
},
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": str(e350) + str(e400)
},
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": str(e450) + str(e500)
},
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": str(e550) + str(e600)
},
}
]
) |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 23 11:09:30 2021
@author: DELL
"""
import torch
from torch.nn.init import xavier_normal_
from torch import nn
class mirror_conv(nn.Module):
def __init__(self,in_channel,out_channel,kernel_size):
super(mirror_conv,self).__init__()
self.kernel_size = kernel_size
self.conv = nn.Conv3d(in_channel, out_channel, kernel_size)
#xavier_normal_(self.conv.weight)
def forward(self,img):
pad = (self.kernel_size - 1)//2
up,down = img[:,:,:pad,:,:],img[:,:,-pad:,:,:]
up = torch.flip(up,dims = [2]); down = torch.flip(down,dims = [2])
img = torch.cat([up,img,down],dim = 2)
up,down = img[:,:,:,:pad,:],img[:,:,:,-pad:,:]
up = torch.flip(up,dims = [3]); down = torch.flip(down,dims = [3])
img = torch.cat([up,img,down],dim = 3)
up,down = img[:,:,:,:,:pad],img[:,:,:,:,-pad:]
up = torch.flip(up,dims = [4]); down = torch.flip(down,dims = [4])
img = torch.cat([up,img,down],dim = 4)
return self.conv(img)
class circular_conv(nn.Module):
def __init__(self,in_channel,out_channel,kernel_size):
super(circular_conv,self).__init__()
self.kernel_size = kernel_size
self.conv = nn.Conv3d(in_channel, out_channel, kernel_size)
xavier_normal_(self.conv.weight)
def forward(self,img):
pad = self.kernel_size - 1
up,down = img[:,:,:pad,:,:],img[:,:,-pad:,:,:]
img = torch.cat([down,img,up],dim = 2)
up,down = img[:,:,:,:pad,:],img[:,:,:,-pad:,:]
img = torch.cat([down,img,up],dim = 3)
up,down = img[:,:,:,:,:pad],img[:,:,:,:,-pad:]
img = torch.cat([down,img,up],dim = 4)
return self.conv(img)
class conv_block(nn.Module):
def __init__(self,block,depth,in_channel):
super(conv_block,self).__init__()
'''
depth指的是第几级conv
layer_num指的是在一个级内从输入到输出所需要的卷积层数
in_channel指的是第一个输入的图像的channel
out_channel指的是第一个输出图像的channel
'''
if block == 'encoder':
if depth == 0:
channel1 = [in_channel,16]
channel2 = [16,32]
else:
channel1 = [in_channel,in_channel]
channel2 = [in_channel,in_channel*2]
else:
channel1 = [in_channel,in_channel // 3]
channel2 = [in_channel//3, in_channel // 3]
self.sequential = nn.Sequential()
for i in range(2):
#self.sequential.add_module('conv_{}_{}'.format(depth,i),
# mirror_conv(channel1[i], channel2[i], 3)) #U-Net论文里面是3
self.sequential.add_module('conv_{}_{}'.format(depth,i),
nn.Conv3d(channel1[i], channel2[i], 3,padding=1))
#xavier_normal_(self.sequential['conv_{}_{}'.format(depth,i)].weight)
xavier_normal_(self.sequential[i*3].weight)
self.sequential.add_module('BN_{}_{}'.format(depth,i),module = nn.BatchNorm3d(channel2[i]))
self.sequential.add_module('Relu_{}_{}'.format(depth,i),module = nn.LeakyReLU())# nn.Relu()
'''
输入输出的channel数需要再考虑
由于conv在decoder和encoder都会用
所以在decoder和encoder的channel数可能不一样
'''
def forward(self,data):
return self.sequential(data)
class encoder_block(nn.Module):
def __init__(self,depth_num,in_channel):
super(encoder_block,self).__init__()
'''
depth_num指的是encoder的级数
inchannel指的是第i级的第一层卷积输入的channel,list
out_channel指的是第i级的第一层卷积的输出的channel,list
'''
self.module_dict = nn.ModuleDict()
self.layer_num = [2] * depth_num #待定参数
for i in range(depth_num):
self.module_dict['encoder_{}'.format(i)] = conv_block('encoder',i,
in_channel[i])
if i != 0:
self.module_dict['pooling_{}'.format(i)] = nn.MaxPool3d(2,stride = 2)
def forward(self,data,depth):
if depth != 0:
data = self.module_dict['pooling_{}'.format(depth)](data)
return self.module_dict['encoder_{}'.format(depth)](data)
class decoder_block(nn.Module):
def __init__(self,depth_num,in_channel):
super(decoder_block,self).__init__()
'''
depth_num指的是encoder的级数
inchannel指的是第i级的第一层卷积输入的channel,list
out_channel指的是第i级的第一层卷积的输出的channel,list
'''
self.module_dict = nn.ModuleDict()
self.layer_num = [2] * depth_num
for i in range(depth_num):
self.module_dict['decoder_{}'.format(i)] = conv_block('decoder',i,
in_channel[i])
#self.module_dict['upsample_{}'.format(i)] = nn.Upsample(scale_factor=2,mode='trilinear',align_corners=True)
self.module_dict['upsample_{}'.format(i)] = nn.ConvTranspose3d(in_channel[i]*2//3,in_channel[i]*2//3,
kernel_size=2,
stride = 2)
xavier_normal_(self.module_dict['upsample_{}'.format(i)].weight)
self.module_dict['batch_normal_{}'.format(i)] = nn.BatchNorm3d(in_channel[i]*2//3)
def forward(self,data,encoder,depth):
data = self.module_dict['upsample_{}'.format(depth)](data)
data = self.module_dict['batch_normal_{}'.format(depth)](data)
data = torch.cat([encoder,data],dim = 1)
return self.module_dict['decoder_{}'.format(depth)](data)
class U_net(nn.Module):
def __init__(self,config):
super(U_net,self).__init__()
#torch.manual_seed(55)
#torch.cuda.manual_seed_all(55)
self.encoder_depth = config[0]
self.decoder_depth = config[1]
self.encoder_channel = config[2]
self.decoder_channel = config[3]
self.encoder = encoder_block(self.encoder_depth, self.encoder_channel)
self.decoder = decoder_block(self.decoder_depth, self.decoder_channel)
self.last_conv = nn.Conv3d(self.decoder_channel[0]//3 , 1, 1)
xavier_normal_(self.last_conv.weight)
def forward(self,data):
encoder_out = {}
depth = [i for i in range(self.encoder_depth)]
for i in depth:
data = self.encoder.forward(data, i)
encoder_out['depth_{}'.format(i)] = data
depth = [i for i in range(self.decoder_depth)]
depth = depth[::-1]
data = encoder_out['depth_{}'.format(self.encoder_depth-1)]
for i in depth:
data = self.decoder.forward(data,encoder_out['depth_{}'.format(i)],i)
data = self.last_conv(data)
return data
|
from sample_madlibs import hello, park, party, madlibs
import random
if __name__ == "__main__":
m = random.choice([hello, park, party, madlibs])
m.madlib() |
import unittest
import os
from ..BaseTestCase import BaseTestCase
from centipede.ExpressionEvaluator import ExpressionEvaluator
class SystemTest(BaseTestCase):
"""Test System expressions."""
def testTmpdir(self):
"""
Test that the tmpdir expression works properly.
"""
result = ExpressionEvaluator.run("tmpdir")
self.assertFalse(os.path.exists(result))
def testEnv(self):
"""
Test that the env expression works properly.
"""
result = ExpressionEvaluator.run("env", "USERNAME")
self.assertEqual(result, os.environ.get("USERNAME"))
if __name__ == "__main__":
unittest.main()
|
import numpy as np
# obs = [ace, sum1, otherSum, rounds, cardsLeft]
# noinspection PyRedundantParentheses
class blackJack():
numDeck = 5
rounds = 1
deck = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
def __init__(self):
self.forPlayerCardCount = [0 for i in range(11)]
self.cardCount = [0 for i in range(11)]
self.playerSum = 0
self.dealerSum = 0
self.canUseAce = False
self.numRounds = self.rounds
self.playerHand = []
self.dealerHand = []
self.isNatural = False
def reset(self):
for i in self.deck:
self.cardCount[i] = 4 * blackJack.numDeck
self.forPlayerCardCount[i] = 4 * blackJack.numDeck
self.cardCount[10] = 4 * 4 * blackJack.numDeck
self.forPlayerCardCount[10] = 4 * 4 * blackJack.numDeck
self.numRounds = self.rounds
self.new_hand()
return self.return_state(), self.isNatural
def new_hand(self):
self.canUseAce = False
self.forPlayerCardCount = self.cardCount.copy()
self.numRounds -= 1
self.isNatural = False
self.playerHand = [self.new_card(1), self.new_card(1)]
self.dealerHand = [self.new_card(1), self.new_card(0)]
self.playerSum = self.find_sum(self.playerHand)
if(self.canUseAce):
self.dealerSum = self.find_sum(self.dealerHand)
self.canUseAce = True
else:
self.dealerSum = self.find_sum(self.dealerHand)
self.canUseAce = False
if(sorted(self.playerHand) == [1, 10]):
# print("Got natural in new hand")
self.isNatural = True
# ensure count is returned last
def return_state(self):
return tuple([self.canUseAce, self.playerSum, self.dealerHand[0], self.numRounds] + self.cardCount)
def new_card(self, a):
if(sum(self.cardCount) <= 20 ):
print(self.cardCount, self.numRounds)
c = np.random.randint(0, sum(self.cardCount))
card = 1
sum2 = 0
while (sum2 <= c):
sum2 += self.cardCount[card]
card += 1
card -= 1
if (card <= 0):
print("Error : Invalid card value in new_card ", card, c, self.cardCount)
if (a == 1):
self.forPlayerCardCount[card] -= 1
self.cardCount[card] -= 1
return card
def find_sum(self, hand):
if (self.can_use_ace(hand)):
return sum(hand) + 10
return sum(hand)
def can_use_ace(self, hand):
if (1 in hand and sum(hand) + 10 <= 21):
self.canUseAce = True
return True
else:
self.canUseAce = False
return False
# action 1 is hit and 0 is stay
def step(self, action):
if (action == 1):
self.playerHand.append(self.new_card(1))
# print("Self hand in hit", self.playerHand)
self.playerSum = self.find_sum(self.playerHand)
if (self.playerSum > 21):
self.new_hand()
# print("Self new hand in hit", self.playerHand)
return self.return_state(), -1, self.numRounds == -1, self.isNatural
else:
return self.return_state(), 0, False, self.isNatural
else:
# print("Self hand", self.playerHand)
self.playerSum = self.find_sum(self.playerHand)
while (sum(self.dealerHand) < 17):
self.dealerHand.append(self.new_card(0))
if (self.canUseAce):
self.dealerSum = self.find_sum(self.dealerHand)
self.canUseAce = True
else:
self.dealerSum = self.find_sum(self.dealerHand)
self.canUseAce = False
r = 0
if (21 >= self.dealerSum > self.playerSum):
r = -1
elif (self.dealerSum == self.playerSum):
r = 0
elif (sorted(self.playerHand) == [1, 10] and self.dealerSum < self.playerSum):
r = 1.5
else:
r = 1
self.new_hand()
# print("Self new hand", self.playerHand)
return self.return_state(), r, self.numRounds == -1, self.isNatural |
#!/usr/bin/env python
import setuptools
with open("README.md", "r", encoding='utf-8') as fh:
long_description = fh.read()
setuptools.setup(
name='TurkishStemmer',
version='1.3',
description='Turkish Stemmer',
long_description=long_description,
long_description_content_type="text/markdown",
author='Hanefi Onaldi',
author_email='abdullahanefi16@gmail.com',
url='https://github.com/hanefi/turkish-stemmer-python',
packages=setuptools.find_packages(),
package_data={'':['*.txt'],},
)
|
import numpy as np
def normalize(X):
X_norm = np.copy(X)
n_cols = X.shape[1]
for i in range(n_cols):
X_norm[:, i] = (X[:, i] - np.min(X[:, i])) / (np.max(X[:, i]) - np.min(X[:, i]))
return X_norm
def standardize(X):
X_std = np.copy(X)
n_cols = X.shape[1]
for i in range(n_cols):
X_std[:, i] = (X[:, i] - np.mean(X[:, i])) / np.std(X[:, i])
return X_std
|
from collections.abc import Iterable, Iterator
from typing import Any, Generic
from typing_extensions import Literal
from wtforms.meta import _MultiDictLikeWithGetall
def clean_datetime_format_for_strptime(formats: Iterable[str]) -> list[str]: ...
class UnsetValue:
def __bool__(self) -> Literal[False]: ...
unset_value: UnsetValue
class WebobInputWrapper:
def __init__(self, multidict: _MultiDictLikeWithGetall) -> None: ...
def __iter__(self) -> Iterator[str]: ...
def __len__(self) -> int: ...
def __contains__(self, name: str) -> bool: ...
def getlist(self, name: str) -> list[Any]: ...
|
from colorama import Fore, Style
import time
class Logger:
output_file = None
@staticmethod
def set_output_file(file_name):
if file_name:
Logger.output_file = open(file_name, 'w')
@staticmethod
def time_now():
return time.strftime("%H:%M:%S", time.localtime())
@staticmethod
def message(text):
if Logger.output_file:
Logger.output_file.write(text + '\n')
text = text.replace('\n', '\n' + (' ' * 18))
print(
f"{Fore.MAGENTA}{Style.BRIGHT}{Logger.time_now()} {Fore.BLUE} MESSAGE{Style.RESET_ALL} {text}")
@staticmethod
def info(text):
if Logger.output_file:
Logger.output_file.write(text + '\n')
text = text.replace('\n', '\n' + (' ' * 18))
print(
f"{Fore.MAGENTA}{Style.BRIGHT}{Logger.time_now()} {Fore.GREEN} INFO{Style.RESET_ALL} {text}")
@staticmethod
def warning(text):
if Logger.output_file:
Logger.output_file.write(text + '\n')
text = text.replace('\n', '\n' + (' ' * 18))
print(
f"{Fore.MAGENTA}{Style.BRIGHT}{Logger.time_now()} {Fore.YELLOW} WARNING{Style.RESET_ALL} {text}")
@staticmethod
def error(text):
if Logger.output_file:
Logger.output_file.write(text + '\n')
text = text.replace('\n', '\n' + (' ' * 18))
print(
f"{Fore.MAGENTA}{Style.BRIGHT}{Logger.time_now()} {Fore.RED} ERROR{Style.RESET_ALL} {text}")
|
import random
import gzip
import math
import heapq
import multiprocessing
from itertools import izip
from PIL import Image
import numpy as np
import cPickle as pickle
WORLD_TO_MAP_SCALE = 15.758
RAY_MOD = 20
CELLS_IN_ONE_METRE = 2
LASER_MAX_RANGE = 10
LASER_SCAN_ANGLE_INCREMENT = 10
TOTAL_PARTICLES = 4200
PARTICLES_PER_LANDMARK = TOTAL_PARTICLES / 6
THRESHOLD = 1.0/(TOTAL_PARTICLES * 40.0)
CENTROID_THRESHOLD = TOTAL_PARTICLES * 0.75 # %
RESAMPLE_THRESHOLD = TOTAL_PARTICLES * 0.80 # %
BOUNDING_BOX_AREA_CONVERGENCE = 4 # bounding box with area 4m^2 is considered converged
MAPWIDTH = 2000
MAPHEIGHT = 700
global cspace
cspace = None
class PriorityQueue:
def __init__(self):
self.elements = []
def empty(self):
return len(self.elements) == 0
def put(self, item, priority):
heapq.heappush(self.elements, (priority, item))
def get(self):
return heapq.heappop(self.elements)[1]
def distance_to_obs((mx, my), theta):
global cspace
if cspace == None:
initialize_cspace()
theta += 270
idx = int((theta / 2.0)) % 180
if (my, mx) in cspace:
return cspace[(my, mx)][idx]
def initialize_cspace():
global cspace
try:
f = open('cspace.pklz','rb')
cspace = pickle.load(f)
print("cspace loaded")
f.close()
except IOError:
print("No configuration space calculated! Do that.")
def bresenham(start, end):
x1, y1 = start
x1, y1 = int(round(x1)), int(round(y1))
x2, y2 = end
x2, y2 = int(round(x2)), int(round(y2))
dx = x2 - x1
dy = y2 - y1
is_steep = abs(dy) > abs(dx)
if is_steep:
x1, y1 = y1, x1
x2, y2 = y2, x2
swapped = False
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
swapped = True
dx = x2 - x1
dy = y2 - y1
error = int(dx / 2.0)
ystep = 1 if y1 < y2 else -1
y = y1
points = []
for x in range(x1, x2 + 1):
coord = (y, x) if is_steep else (x, y)
points.append(coord)
error -= abs(dy)
if error < 0:
y += ystep
error += dx
if swapped:
points.reverse()
return points
def get_free_cell(map):
height = len(map)
width = len(map[0])
while True:
x = int(random.uniform(0, height))
y = int(random.uniform(0, width))
if map[x][y] == 255:
return x, y
def is_free_gcs(x, y, map):
"""
Checks if (x, y) is free in the global coordinate system.
"""
mx, my = gcs2map(x, y)
return is_free_map(mx, my, map)
def is_free_map(mx, my, map):
"""
Checks if map pixel (mx, my) is free.
"""
# Check in my, mx because traditionally 2D arrays have X going top-down
# and Y going left-right. But with images/pixels it is vice-versa.
# Can't be black
if mx >= 2000:
return False
if my >= 700:
return False
return(np.any(map[my, mx] != (0, 0, 0)))
def map2gcs(mx, my):
"""
Convert map pixels co-ords to global coord system
"""
x = (mx - 1000.0) / WORLD_TO_MAP_SCALE
y = (350.0 - my) / WORLD_TO_MAP_SCALE
return x, y
def gcs2map(x, y):
"""
Convert global coords to map pixels.
"""
mx = 1000 + x * WORLD_TO_MAP_SCALE
my = 350 - y * WORLD_TO_MAP_SCALE
return int(mx), int(my)
def get_endpoint_at_angle(x, y, theta):
dx = x + LASER_MAX_RANGE * math.cos(math.radians(theta))
dy = y + LASER_MAX_RANGE * math.sin(math.radians(theta))
return int(round(dx)), int(round(dy))
def prob_diff_readings(robot, particle):
"""
Return how similar robot and particle readings are.
Calculated as % similarity. Exact same readings are 100% similar and will return 1.
"""
diff = 1.0
a = 1.0
c = 1.8
#print("HERE!")
for expected, actual in zip(robot, particle):
b = expected
gaus_m = lambda x: a*math.e**(-1.0*(((x-b)**2.0)/(2.0*c**2)))
#print(expected, actual)
#print(expected, actual, gaus_m(actual))
diff *= gaus_m(actual)
# print("HERE2!")
return diff
|
#!/usr/bin/python
## -*- coding: utf-8 -*-
#
import json
#import db_conf
import cgi
import sys
import sqlite3
qform = cgi.FieldStorage()
inparam = qform.getvalue('inpathway')
#inparam = "Citrate_cycle_tca_cycle_KEGG" #autophagy_article"#
intype = qform.getvalue('pathwaytype')
#intype = "all"#"leukemia"#"aml"
pw_src = inparam + intype
gsvapw = 0.4
gsvapw = float(qform.getvalue('gsvapw'))
mapsource = qform.getvalue('mapsource')
conn = sqlite3.connect('sqlite/hemap_pw2_' + intype + '.db')
conn.text_factory = str
c = conn.cursor()
mapsource = "hema_annotationf"
c.execute("select mclass, hc.gsm, stained_score from _pwstained_" + pw_src + " ha, " + mapsource + " hc where ha.gsm = hc.gsm")
rows = c.fetchall()
print "Content-type: text/html;charset=utf-8\r\n"
pa = {}
lowcts = {}
medcts = {}
highcts = {}
for row in rows:
pi = []
cat = row[0]
grade = row[2]
#pa['espts'].append(pi)
if (grade < (-1*gsvapw)):
if (lowcts.get(cat) == None):
lowcts[cat] = 1
else:
lowcts[cat] = lowcts[cat] + 1;
#pa['low'].append(pi)
elif (grade > gsvapw):
#pa['high'].append(pi)
if (highcts.get(cat) == None):
highcts[cat] = 1
else:
highcts[cat] = highcts[cat] + 1;
else:
if (medcts.get(cat) == None):
medcts[cat] = 1
else:
medcts[cat] = medcts[cat] + 1;
c.close()
pa["low"] = lowcts
pa["med"] = medcts
pa["high"] = highcts
print json.dumps(pa)
|
# Generated by Django 3.0.5 on 2020-06-18 12:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('csp_observer', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CspRule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('blocked_url', models.CharField(blank=True, max_length=255, null=True)),
('effective_directive', models.CharField(blank=True, max_length=255, null=True)),
('ignore', models.BooleanField(default=False)),
('title', models.CharField(blank=True, max_length=255, null=True)),
('description', models.TextField(blank=True, null=True)),
],
),
migrations.AddField(
model_name='cspreport',
name='matching_rules',
field=models.ManyToManyField(blank=True, to='csp_observer.CspRule'),
),
]
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import requests
import utils
import packages.trend.github_lang as github_lang
from re import search, escape
from bs4 import BeautifulSoup
def run(string, entities):
"""Grab the GitHub trends"""
# Number of repositories
limit = 5
# Range string
since = 'daily'
# Technology slug
techslug = ''
# Technology name
tech = ''
# Answer key
answerkey = 'today'
for item in entities:
if item['entity'] == 'number':
limit = item['resolution']['value']
if item['entity'] == 'daterange':
if item['resolution']['timex'].find('W') != -1:
since = 'weekly'
answerkey = 'week'
else:
since = 'monthly'
answerkey = 'month'
# Feed the languages list based on the GitHub languages list
for i, language in enumerate(github_lang.getall()):
# Find the asked language
if search(r'\b' + escape(language.lower()) + r'\b', string.lower()):
answerkey += '_with_tech'
tech = language
techslug = language.lower()
if limit > 25:
utils.output('inter', 'limit_max', utils.translate('limit_max', {
'limit': limit
}))
limit = 25
elif limit == 0:
limit = 5
utils.output('inter', 'reaching', utils.translate('reaching'))
try:
r = utils.http('GET', 'https://github.com/trending/' + techslug + '?since=' + since)
soup = BeautifulSoup(r.text, features='html.parser')
elements = soup.select('article.Box-row', limit=limit)
result = ''
for i, element in enumerate(elements):
repository = element.h1.get_text(strip=True).replace(' ', '')
if (element.img != None):
author = element.img.get('alt')[1:]
else:
author = '?'
hasstars = element.select('span.d-inline-block.float-sm-right')
stars = 0
if hasstars:
stars = element.select('span.d-inline-block.float-sm-right')[0].get_text(strip=True).split(' ')[0]
separators = [' ', ',', '.']
# Replace potential separators number
for j, separator in enumerate(separators):
stars = stars.replace(separator, '')
result += utils.translate('list_element', {
'rank': i + 1,
'repository_url': 'https://github.com/' + repository,
'repository_name': repository,
'author_url': 'https://github.com/' + author,
'author_username': author,
'stars_nb': stars
}
)
return utils.output('end', answerkey, utils.translate(answerkey, {
'limit': limit,
'tech': tech,
'result': result
}
)
)
except requests.exceptions.RequestException as e:
return utils.output('end', 'unreachable', utils.translate('unreachable'))
|
# coding: utf-8
# In[4]:
import numpy as np
arr = np.arange(1,5)
print(arr)
# # numpy library
#
# In[8]:
import numpy as np
arr = np.arange(5)
print (arr)
# In[9]:
list = [1,2,3,4]
arr=np.array(list)
print(arr)
# In[10]:
list = [[1,2,6],[4,5,6]]
arr=np.array(list)
print(arr)
# In[11]:
np.arrange(10)
# In[13]:
import numpy as np
np.arange(10)
# In[15]:
##arrange
import numpy as np
np.arange(0,6)
# In[19]:
#arange slice
import numpy as np
np.arange(2,6,2)
# In[20]:
#zeros and ones
import numpy as np
np.zeros(3)
# In[21]:
#zeros for 2d
import numpy as np
np.zeros((3,3))
# In[25]:
#zeros for 2d with multiple
import numpy as np
np.zeros((3,3))*2
# In[26]:
#ones for 1d
import numpy as np
np.ones(5)
# In[28]:
#ones for 2d
import numpy as np
np.ones((2,1))
# In[29]:
#ones with multiple
import numpy as np
np.ones((2,2))*3
# In[30]:
#ones with sum
import numpy as np
np.ones((2,2))+3
# In[31]:
#ones with power
import numpy as np
np.ones((2,2))**2
# In[32]:
#linspace
import numpy as np
np.linspace(0,5)
# In[36]:
#linspace
import numpy as np
np.linspace(0,5)*10
# In[37]:
#numpy eye
import numpy as np
np.eye(4)
# In[38]:
np.eye(4,4)
# In[39]:
np.eye(5)
# In[40]:
#random
import numpy as np
np.random.randn(2)
# In[41]:
#random int
np.random.randint(5)
# In[42]:
#random int numbers
np.random.randint(1,1000,5)
# In[43]:
#random array standard normal deviation
np.random.randn(5,5)
# In[45]:
#array with reshape
import numpy as np
arr = np.arange(20)
print(arr)
# In[46]:
arr.reshape(5,4)
# In[47]:
#max
arr.max()
# In[48]:
#min
arr.min()
# In[50]:
#shape
arr.shape
# In[51]:
#arr reshape
arr.reshape(20,1)
# In[53]:
#date type for the array
arr.dtype
# In[56]:
#indexing and slicing
import numpy as np
arr=np.arange(1,5)
print(arr)
# In[57]:
arr[2]
# In[58]:
arr[0:2]
# In[59]:
arr[0:]
# In[60]:
arr[-1:]
# In[61]:
arr[-1:0]
# In[63]:
arr_copy = arr.copy()
print(arr_copy)
# In[69]:
#selection
z=arr>3
y=arr<3
x= arr=3
print(x,y,z)
# In[71]:
x=2
print(arr)
# In[82]:
#numpy operation
import numpy as np
arr= np.arange(0,10,2)
print(arr)
print(arr+arr)
print(arr*arr)
print(arr-arr)
print(arr**2)
print(arr**3)
# In[83]:
#sin functions
import numpy as np
np.sin(arr)
# In[84]:
#log functions
import numpy as np
np.log(arr)
# In[85]:
#multidimensional array
import numpy as np
arr= np.arange(25).reshape(5,5)
print(arr)
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
#
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
|
import select
import socket
import urwid
from packet_list_box import PacketListBox
class WireDolphin:
palette = [
('reveal focus', 'black', 'dark cyan', 'standout'),
('reveal focus1', 'black', 'black', 'standout'),
('layer_title', 'dark red', 'black'),
('layer_desc', 'white', 'black')
]
def __init__(self):
header_text = urwid.Text(
('banner', '{:<6}{:<15}{:<19}{:<19}{:17}{:25}{} '.format(
'Nr.', 'EtherType', 'Src. MAC', 'Dst. Mac', 'Src. IP', 'Dst. IP', 'Info')),
align='left')
self.header = urwid.AttrMap(header_text, 'banner')
self.listbox = PacketListBox()
self.listbox.offset_rows = 0
self.listbox.inset_fraction = 1
self.filter = urwid.Edit("Filter: ")
self.footer = urwid.AttrWrap(self.filter, 'foo')
self.view = urwid.Frame(
self.listbox,
header=urwid.AttrWrap(self.header, 'head'),
footer=urwid.LineBox(self.footer))
self.view = urwid.LineBox(self.view)
self.loop_widget = self.view
self.loop = urwid.MainLoop(self.loop_widget, palette=self.palette, unhandled_input=self.unhandled_input)
self.sock = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(3))
self.sock.setblocking(0)
self.readable, writable, exceptional = select.select([self.sock], [], [self.sock])
self.check_for_packets()
self.focus = True # True if list is focused False when filter
self.details_showed = False # True if list is focused False when filter
def main(self):
self.loop.run()
def check_for_packets(self):
self.loop.set_alarm_in(0.01, self._check)
def _check(self, a, b):
if self.listbox.is_searching:
try:
for s in self.readable:
raw_data, addr = s.recvfrom(65535)
self.listbox.add_item(raw_data)
except BlockingIOError as e:
pass
self.check_for_packets()
def unhandled_input(self, k):
# exit on esc
if k in ['esc'] and not self.details_showed:
raise urwid.ExitMainLoop()
if k == 'tab':
if self.focus:
self.view.original_widget.set_focus("footer")
else:
self.view.original_widget.set_focus("body")
self.focus = not self.focus
if k == 'esc':
self.reset_layout()
self.details_showed = False
# enter pressed on list
if k == 'enter':
if self.focus:
self.details_showed = True
index = self.listbox.content.get_focus()[1]
data_index = int(self.listbox.content[index].base_widget.get_text()[0][:7]) - 1
self.popup(self.listbox.data[data_index].get_description())
# enter pressed on filter box
else:
self.listbox.filter_list(self.filter.get_edit_text())
def popup(self, text):
# Header
header_text = urwid.Text(('banner', 'Packet Details'), align='left')
header = urwid.AttrMap(header_text, 'banner')
# Body
content = urwid.SimpleListWalker([
urwid.AttrMap(w, None, 'reveal focus1') for w in text])
listbox = urwid.LineBox(urwid.ListBox(content))
# Layout
layout = urwid.Frame(
listbox,
header=header,
focus_part='body'
)
w = urwid.Overlay(
urwid.LineBox(layout),
self.view,
align='center',
width=('relative', 75),
valign='middle',
height=('relative', 80)
)
self.loop.widget = w
def reset_layout(self, thing=None):
'''
Resets the console UI to the default layout
'''
self.loop.widget = self.view
self.loop.draw_screen()
if __name__ == "__main__":
WireDolphin().main()
|
# -*- coding: utf-8 -*-
import pygame
from pygame.locals import *
from world import *
from hud import *
from math import *
class Enemy(pygame.sprite.Sprite):
def __init__(self, world, n, img, px, py, walk, orient = 0):
self.world = world
self.name = n
print n
self.image = img
self.rect = img.get_rect()
self.w = self.rect.w
self.h = self.rect.h
self.x = px
self.y = py
self.walk_objs = walk
self.ww = self.world.ww
self.wh = self.world.wh
self.delay = 100
self.last = pygame.time.get_ticks()
self.orientation = orient
def sees(self, (x, y), angle, dist1, dist2):
u"""
retorna 0 se nao ve, 1 se ve perto, 2 longe
angle eh metade
"""
b = tan(radians(angle))
def draw_sees(self, screen, cx, cy, angle, d1, d2):
b = tan(radians(angle + self.orientation))
if (self.orientation+angle == 90):
self.orientation = 0
vx = self.x + 32
vy = self.y + 32
y = b * (vx + d1) + vy - (b * vx)
y2 = -b * (vx + d1) + vy - (-b * vx)
pygame.draw.line(screen, (225,0,0), (vx - cx, vy - cy), (vx + d1 - cx, y - cy), 5)
pygame.draw.line(screen, (225,0,0), (vx - cx, vy - cy), (vx + d1 - cx, y2 - cy), 5)
vx = vx + d1
vy = y
y3 = b * (vx + d2) + vy - (b * vx)
pygame.draw.line(screen, (0,0,255), (vx - cx, vy - cy), (vx + d2 - cx, y3 - cy), 5)
vy = y2
y4 = -b * (vx + d2) + vy - (-b * vx)
pygame.draw.line(screen, (0,0,255), (vx - cx, vy - cy), (vx + d2 - cx, y4 - cy), 5)
self.orientation +=1
def turn_left(self, angles):
self.orientation += angles
def turn_right(self, angles):
self.orientation -= angles
def draw_pos(self, camera_x, camera_y):
return ((self.x - camera_x), (self.y - camera_y))
|
import requests
class ApiClient:
"""
Base class for api clients
"""
api_path = None # populated in subclass
trailing_slash = False # override in subclass if needed
def __init__(self, host, token):
self.host = host
self.base_url = self.host + self.api_path
self.client = self._get_client(token)
@staticmethod
def _get_client(token=None):
"""
Constructs a request.Session with auth header
:param token: API key
:rtype: requests.Session
"""
client = requests.Session()
headers = {'Authorization': 'Token {}'.format(token)} if token else {}
client.headers.update(headers)
return client
def _absolute_url(self, path):
"""
Construct absolute url given a relative api path
:param path: endpoint path (may or may not have starting slash)
:return: absolute url
:rtype: str
"""
url = self.base_url + '/' + path.strip('/')
if self.trailing_slash:
url += '/'
return url
def prepare(self, method, path, **kwargs):
"""
Construct a request but don't send
:param method:
:param path:
:param kwargs:
:return:
"""
url = self._absolute_url(path)
request = requests.Request(method, url, **kwargs)
return self.client.prepare_request(request)
def request(self, method, path, **kwargs):
"""
Makes a generic request using client and base url
:param method: HTTP method, e.g. 'GET'
:param path: endpoint path, e.g. 'activity' or '/knowledge_component'
:param kwargs: keyword arguments to pass to requests.request()
:rtype: requests.Response
"""
request = self.prepare(method, path, **kwargs)
return self.client.send(request)
|
# https://python.swaroopch.com/basics.html
# and also some https://automatetheboringstuff.com
myNum = 100
if myNum > 50:
print("High")
else:
print("Low")
print("yo")
print('''This is a multi-line string. This is the first line.
This is the second line.
"What's your name?," I asked.
He said "Bond, James Bond."
''')
age = 20
verb = "wrote"
print('{0} was {1} years old when he wrote this code.'.format(age, 'Grant'))
name = 'Grant'
print(f'{age} was {name} years old when he wrote this code.')
# decimal (.) precision of 3 to get it to return float '0.333'
print('{0:.3f}'.format(1/3))
# fill with underscores (_) with the text centered
# (^) to 11 width '___hello___'
print('{0:_^11}'.format('hello'))
print('{name} wrote {code}'.format(name='Grant', code='python-examples'))
# print assumes new line \n , so to avoid that do this:
print('a', end='')
print('b', end=' ')
print('c', end='')
# output: "ab c"
print('What\'s your name?')
print('First line\nSecond line')
# "This is the first sentence. \
# This is the second sentence."
# is equivalent to
# "This is the first sentence. This is the second sentence."
# This is referred to as explicit line joining:
print("This is the first sentence. \
This is the second sentence.")
# literal/raw string:
# (Always use raw strings when dealing with regular expressions.)
print(r"Newlines are indicated by \n")
# Think of \ as saying, “This instruction continues on the next line.”
print('Four score and seven ' + \
'years ago...')
# variable types can change:
age = 20
print(age)
age = 'twenty'
print(age)
# Python will always use indentation for blocks and will never use braces
|
"""
Given an integer, write a function to determine if it is a power of two.
"""
class Solution(object):
def isPowerOfTwo(self, n):
"""
:type n: int
:rtype: bool
"""
if bin(n).count('1') == 1:
return True
else:
return False
if __name__ == '__main__':
sol = Solution()
assert sol.isPowerOfTwo(2) == True
assert sol.isPowerOfTwo(3) == False
assert sol.isPowerOfTwo(1024) == True |
#!/usr/bin/python3
"""unit tests for Square class"""
import io
import sys
import unittest
from models.base import Base
from models.square import Square
class TestSquare_instantiation(unittest.TestCase):
"""Unittests for testing instantiation of the Square class."""
def test_is_base(self):
self.assertIsInstance(Square(10), Base)
def test_is_rectangle(self):
self.assertIsInstance(Square(10), Square)
def test_no_args(self):
with self.assertRaises(TypeError):
Square()
def test_one_arg(self):
s1 = Square(10)
s2 = Square(11)
self.assertEqual(s1.id, s2.id - 1)
def test_two_args(self):
s1 = Square(10, 2)
s2 = Square(2, 10)
self.assertEqual(s1.id, s2.id - 1)
def test_three_args(self):
s1 = Square(10, 2, 2)
s2 = Square(2, 2, 10)
self.assertEqual(s1.id, s2.id - 1)
def test_four_args(self):
self.assertEqual(7, Square(10, 2, 2, 7).id)
def test_more_than_four_args(self):
with self.assertRaises(TypeError):
Square(1, 2, 3, 4, 5)
def test_size_private(self):
with self.assertRaises(AttributeError):
print(Square(10, 2, 3, 4).__size)
def test_size_getter(self):
self.assertEqual(5, Square(5, 2, 3, 9).size)
def test_size_setter(self):
s = Square(4, 1, 9, 2)
s.size = 8
self.assertEqual(8, s.size)
def test_width_getter(self):
s = Square(4, 1, 9, 2)
s.size = 8
self.assertEqual(8, s.width)
def test_height_getter(self):
s = Square(4, 1, 9, 2)
s.size = 8
self.assertEqual(8, s.height)
def test_x_getter(self):
self.assertEqual(0, Square(10).x)
def test_y_getter(self):
self.assertEqual(0, Square(10).y)
class TestSquare_size(unittest.TestCase):
"""Unittests for testing size initialization of the Square class."""
def test_None_size(self):
with self.assertRaisesRegex(TypeError, "width must be an integer"):
Square(None)
def test_str_size(self):
with self.assertRaisesRegex(TypeError, "width must be an integer"):
Square("invalid")
def test_float_size(self):
with self.assertRaisesRegex(TypeError, "width must be an integer"):
Square(5.5)
def test_complex_size(self):
with self.assertRaisesRegex(TypeError, "width must be an integer"):
Square(complex(5))
def test_dict_size(self):
with self.assertRaisesRegex(TypeError, "width must be an integer"):
Square({"a": 1, "b": 2}, 2)
def test_bool_size(self):
with self.assertRaisesRegex(TypeError, "width must be an integer"):
Square(True, 2, 3)
def test_list_size(self):
with self.assertRaisesRegex(TypeError, "width must be an integer"):
Square([1, 2, 3])
def test_set_size(self):
with self.assertRaisesRegex(TypeError, "width must be an integer"):
Square({1, 2, 3}, 2)
def test_tuple_size(self):
with self.assertRaisesRegex(TypeError, "width must be an integer"):
Square((1, 2, 3), 2, 3)
def test_frozenset_size(self):
with self.assertRaisesRegex(TypeError, "width must be an integer"):
Square(frozenset({1, 2, 3, 1}))
def test_range_size(self):
with self.assertRaisesRegex(TypeError, "width must be an integer"):
Square(range(5))
def test_bytes_size(self):
with self.assertRaisesRegex(TypeError, "width must be an integer"):
Square(b'Python')
def test_bytearray_size(self):
with self.assertRaisesRegex(TypeError, "width must be an integer"):
Square(bytearray(b'abcdefg'))
def test_memoryview_size(self):
with self.assertRaisesRegex(TypeError, "width must be an integer"):
Square(memoryview(b'abcdefg'))
def test_inf_size(self):
with self.assertRaisesRegex(TypeError, "width must be an integer"):
Square(float('inf'))
def test_nan_size(self):
with self.assertRaisesRegex(TypeError, "width must be an integer"):
Square(float('nan'))
# Test size values
def test_negative_size(self):
with self.assertRaisesRegex(ValueError, "width must be > 0"):
Square(-1, 2)
def test_zero_size(self):
with self.assertRaisesRegex(ValueError, "width must be > 0"):
Square(0, 2)
class TestSquare_x(unittest.TestCase):
"""Unittests for testing initialization of Square x attribute."""
def test_None_x(self):
with self.assertRaisesRegex(TypeError, "x must be an integer"):
Square(1, None)
def test_str_x(self):
with self.assertRaisesRegex(TypeError, "x must be an integer"):
Square(1, "invalid")
def test_float_x(self):
with self.assertRaisesRegex(TypeError, "x must be an integer"):
Square(1, 5.5)
def test_complex_x(self):
with self.assertRaisesRegex(TypeError, "x must be an integer"):
Square(1, complex(5))
def test_dict_x(self):
with self.assertRaisesRegex(TypeError, "x must be an integer"):
Square(1, {"a": 1, "b": 2}, 2)
def test_bool_x(self):
with self.assertRaisesRegex(TypeError, "x must be an integer"):
Square(1, True)
def test_list_x(self):
with self.assertRaisesRegex(TypeError, "x must be an integer"):
Square(1, [1, 2, 3])
def test_set_x(self):
with self.assertRaisesRegex(TypeError, "x must be an integer"):
Square(1, {1, 2, 3})
def test_tuple_x(self):
with self.assertRaisesRegex(TypeError, "x must be an integer"):
Square(1, (1, 2, 3))
def test_frozenset_x(self):
with self.assertRaisesRegex(TypeError, "x must be an integer"):
Square(1, frozenset({1, 2, 3, 1}))
def test_range_x(self):
with self.assertRaisesRegex(TypeError, "x must be an integer"):
Square(1, range(5))
def test_bytes_x(self):
with self.assertRaisesRegex(TypeError, "x must be an integer"):
Square(1, b'Python')
def test_bytearray_x(self):
with self.assertRaisesRegex(TypeError, "x must be an integer"):
Square(1, bytearray(b'abcdefg'))
def test_memoryview_x(self):
with self.assertRaisesRegex(TypeError, "x must be an integer"):
Square(1, memoryview(b'abcedfg'))
def test_inf_x(self):
with self.assertRaisesRegex(TypeError, "x must be an integer"):
Square(1, float('inf'), 2)
def test_nan_x(self):
with self.assertRaisesRegex(TypeError, "x must be an integer"):
Square(1, float('nan'), 2)
def test_negative_x(self):
with self.assertRaisesRegex(ValueError, "x must be >= 0"):
Square(5, -1, 0)
class TestSquare_y(unittest.TestCase):
"""Unittests for testing initialization of Square y attribute."""
def test_None_y(self):
with self.assertRaisesRegex(TypeError, "y must be an integer"):
Square(1, 3, None)
def test_str_y(self):
with self.assertRaisesRegex(TypeError, "y must be an integer"):
Square(1, 1, "invalid")
def test_float_y(self):
with self.assertRaisesRegex(TypeError, "y must be an integer"):
Square(1, 3, 5.5)
def test_complex_y(self):
with self.assertRaisesRegex(TypeError, "y must be an integer"):
Square(1, 3, complex(5))
def test_dict_y(self):
with self.assertRaisesRegex(TypeError, "y must be an integer"):
Square(1, 1, {"a": 1, "b": 2})
def test_list_y(self):
with self.assertRaisesRegex(TypeError, "y must be an integer"):
Square(1, 1, [1, 2, 3])
def test_set_y(self):
with self.assertRaisesRegex(TypeError, "y must be an integer"):
Square(1, 1, {1, 2, 3})
def test_tuple_y(self):
with self.assertRaisesRegex(TypeError, "y must be an integer"):
Square(1, 1, (1, 2, 3))
def test_frozenset_y(self):
with self.assertRaisesRegex(TypeError, "y must be an integer"):
Square(1, 3, frozenset({1, 2, 3, 1}))
def test_range_y(self):
with self.assertRaisesRegex(TypeError, "y must be an integer"):
Square(1, 3, range(5))
def test_bytes_y(self):
with self.assertRaisesRegex(TypeError, "y must be an integer"):
Square(1, 3, b'Python')
def test_bytearray_y(self):
with self.assertRaisesRegex(TypeError, "y must be an integer"):
Square(1, 3, bytearray(b'abcdefg'))
def test_memoryview_y(self):
with self.assertRaisesRegex(TypeError, "y must be an integer"):
Square(1, 3, memoryview(b'abcedfg'))
def test_inf_y(self):
with self.assertRaisesRegex(TypeError, "y must be an integer"):
Square(1, 1, float('inf'))
def test_nan_y(self):
with self.assertRaisesRegex(TypeError, "y must be an integer"):
Square(1, 1, float('nan'))
def test_negative_y(self):
with self.assertRaisesRegex(ValueError, "y must be >= 0"):
Square(3, 0, -1)
class TestSquare_order_of_initialization(unittest.TestCase):
"""Unittests for testing order of Square attribute initialization."""
def test_size_before_x(self):
with self.assertRaisesRegex(TypeError, "width must be an integer"):
Square("invalid size", "invalid x")
def test_size_before_y(self):
with self.assertRaisesRegex(TypeError, "width must be an integer"):
Square("invalid size", 1, "invalid y")
def test_x_before_y(self):
with self.assertRaisesRegex(TypeError, "x must be an integer"):
Square(1, "invalid x", "invalid y")
class TestSquare_area(unittest.TestCase):
"""Unittests for testing the area method of the Square class."""
def test_area_small(self):
self.assertEqual(100, Square(10, 0, 0, 1).area())
def test_area_large(self):
s = Square(999999999999999999, 0, 0, 1)
self.assertEqual(999999999999999998000000000000000001, s.area())
def test_area_changed_attributes(self):
s = Square(2, 0, 0, 1)
s.size = 7
self.assertEqual(49, s.area())
def test_area_one_arg(self):
s = Square(2, 10, 1, 1)
with self.assertRaises(TypeError):
s.area(1)
class TestSquare_stdout(unittest.TestCase):
"""Unittests for testing __str__ and display methods of Square class."""
@staticmethod
def capture_stdout(sq, method):
"""Captures and returns text printed to stdout.
Args:
sq (Square): The Square ot print to stdout.
method (str): The method to run on sq.
Returns:
The text printed to stdout by calling method on sq.
"""
capture = io.StringIO()
sys.stdout = capture
if method == "print":
print(sq)
else:
sq.display()
sys.stdout = sys.__stdout__
return capture
def test_str_method_print_size(self):
s = Square(4)
capture = TestSquare_stdout.capture_stdout(s, "print")
correct = "[Square] ({}) 0/0 - 4\n".format(s.id)
self.assertEqual(correct, capture.getvalue())
def test_str_method_size_x(self):
s = Square(5, 5)
correct = "[Square] ({}) 5/0 - 5".format(s.id)
self.assertEqual(correct, s.__str__())
def test_str_method_size_x_y(self):
s = Square(7, 4, 22)
correct = "[Square] ({}) 4/22 - 7".format(s.id)
self.assertEqual(correct, str(s))
def test_str_method_size_x_y_id(self):
s = Square(2, 88, 4, 19)
self.assertEqual("[Square] (19) 88/4 - 2", str(s))
def test_str_method_changed_attributes(self):
s = Square(7, 0, 0, [4])
s.size = 15
s.x = 8
s.y = 10
self.assertEqual("[Square] ([4]) 8/10 - 15", str(s))
def test_str_method_one_arg(self):
s = Square(1, 2, 3, 4)
with self.assertRaises(TypeError):
s.__str__(1)
# Test display method
def test_display_size(self):
s = Square(2, 0, 0, 9)
capture = TestSquare_stdout.capture_stdout(s, "display")
self.assertEqual("##\n##\n", capture.getvalue())
def test_display_size_x(self):
s = Square(3, 1, 0, 18)
capture = TestSquare_stdout.capture_stdout(s, "display")
self.assertEqual(" ###\n ###\n ###\n", capture.getvalue())
def test_display_size_y(self):
s = Square(4, 0, 1, 9)
capture = TestSquare_stdout.capture_stdout(s, "display")
display = "\n####\n####\n####\n####\n"
self.assertEqual(display, capture.getvalue())
def test_display_size_x_y(self):
s = Square(2, 3, 2, 1)
capture = TestSquare_stdout.capture_stdout(s, "display")
display = "\n\n ##\n ##\n"
self.assertEqual(display, capture.getvalue())
def test_display_one_arg(self):
s = Square(3, 4, 5, 2)
with self.assertRaises(TypeError):
s.display(1)
class TestSquare_update_args(unittest.TestCase):
"""Unittests for testing update args method of the Square class."""
def test_update_args_zero(self):
s = Square(10, 10, 10, 10)
s.update()
self.assertEqual("[Square] (10) 10/10 - 10", str(s))
def test_update_args_one(self):
s = Square(10, 10, 10, 10)
s.update(89)
self.assertEqual("[Square] (89) 10/10 - 10", str(s))
def test_update_args_two(self):
s = Square(10, 10, 10, 10)
s.update(89, 2)
self.assertEqual("[Square] (89) 10/10 - 2", str(s))
def test_update_args_three(self):
s = Square(10, 10, 10, 10)
s.update(89, 2, 3)
self.assertEqual("[Square] (89) 3/10 - 2", str(s))
def test_update_args_four(self):
s = Square(10, 10, 10, 10)
s.update(89, 2, 3, 4)
self.assertEqual("[Square] (89) 3/4 - 2", str(s))
def test_update_args_more_than_four(self):
s = Square(10, 10, 10, 10)
s.update(89, 2, 3, 4, 5)
self.assertEqual("[Square] (89) 3/4 - 2", str(s))
def test_update_args_width_setter(self):
s = Square(10, 10, 10, 10)
s.update(89, 2)
self.assertEqual(2, s.width)
def test_update_args_height_setter(self):
s = Square(10, 10, 10, 10)
s.update(89, 2)
self.assertEqual(2, s.height)
def test_update_args_None_id(self):
s = Square(10, 10, 10, 10)
s.update(None)
correct = "[Square] ({}) 10/10 - 10".format(s.id)
self.assertEqual(correct, str(s))
def test_update_args_None_id_and_more(self):
s = Square(10, 10, 10, 10)
s.update(None, 4, 5)
correct = "[Square] ({}) 5/10 - 4".format(s.id)
self.assertEqual(correct, str(s))
def test_update_args_twice(self):
s = Square(10, 10, 10, 10)
s.update(89, 2, 3, 4)
s.update(4, 3, 2, 89)
self.assertEqual("[Square] (4) 2/89 - 3", str(s))
def test_update_args_invalid_size_type(self):
s = Square(10, 10, 10, 10)
with self.assertRaisesRegex(TypeError, "width must be an integer"):
s.update(89, "invalid")
def test_update_args_size_zero(self):
s = Square(10, 10, 10, 10)
with self.assertRaisesRegex(ValueError, "width must be > 0"):
s.update(89, 0)
def test_update_args_size_negative(self):
s = Square(10, 10, 10, 10)
with self.assertRaisesRegex(ValueError, "width must be > 0"):
s.update(89, -4)
def test_update_args_invalid_x(self):
s = Square(10, 10, 10, 10)
with self.assertRaisesRegex(TypeError, "x must be an integer"):
s.update(89, 1, "invalid")
def test_update_args_x_negative(self):
s = Square(10, 10, 10, 10)
with self.assertRaisesRegex(ValueError, "x must be >= 0"):
s.update(98, 1, -4)
def test_update_args_invalid_y(self):
s = Square(10, 10, 10, 10)
with self.assertRaisesRegex(TypeError, "y must be an integer"):
s.update(89, 1, 2, "invalid")
def test_update_args_y_negative(self):
s = Square(10, 10, 10, 10)
with self.assertRaisesRegex(ValueError, "y must be >= 0"):
s.update(98, 1, 2, -4)
def test_update_args_size_before_x(self):
s = Square(10, 10, 10, 10)
with self.assertRaisesRegex(TypeError, "width must be an integer"):
s.update(89, "invalid", "invalid")
def test_update_args_size_before_y(self):
s = Square(10, 10, 10, 10)
with self.assertRaisesRegex(TypeError, "width must be an integer"):
s.update(89, "invalid", 2, "invalid")
def test_update_args_x_before_y(self):
s = Square(10, 10, 10, 10)
with self.assertRaisesRegex(TypeError, "x must be an integer"):
s.update(89, 1, "invalid", "invalid")
class TestSquare_update_kwargs(unittest.TestCase):
"""Unittests for testing update kwargs method of Square class."""
def test_update_kwargs_one(self):
s = Square(10, 10, 10, 10)
s.update(id=1)
self.assertEqual("[Square] (1) 10/10 - 10", str(s))
def test_update_kwargs_two(self):
s = Square(10, 10, 10, 10)
s.update(size=1, id=2)
self.assertEqual("[Square] (2) 10/10 - 1", str(s))
def test_update_kwargs_three(self):
s = Square(10, 10, 10, 10)
s.update(y=1, size=3, id=89)
self.assertEqual("[Square] (89) 10/1 - 3", str(s))
def test_update_kwargs_four(self):
s = Square(10, 10, 10, 10)
s.update(id=89, x=1, y=3, size=4)
self.assertEqual("[Square] (89) 1/3 - 4", str(s))
def test_update_kwargs_width_setter(self):
s = Square(10, 10, 10, 10)
s.update(id=89, size=8)
self.assertEqual(8, s.width)
def test_update_kwargs_height_setter(self):
s = Square(10, 10, 10, 10)
s.update(id=89, size=9)
self.assertEqual(9, s.height)
def test_update_kwargs_None_id(self):
s = Square(10, 10, 10, 10)
s.update(id=None)
correct = "[Square] ({}) 10/10 - 10".format(s.id)
self.assertEqual(correct, str(s))
def test_update_kwargs_None_id_and_more(self):
s = Square(10, 10, 10, 10)
s.update(id=None, size=7, x=18)
correct = "[Square] ({}) 18/10 - 7".format(s.id)
self.assertEqual(correct, str(s))
def test_update_kwargs_twice(self):
s = Square(10, 10, 10, 10)
s.update(id=89, x=1)
s.update(y=3, x=15, size=2)
self.assertEqual("[Square] (89) 15/3 - 2", str(s))
def test_update_kwargs_invalid_size(self):
s = Square(10, 10, 10, 10)
with self.assertRaisesRegex(TypeError, "width must be an integer"):
s.update(size="invalid")
def test_update_kwargs_size_zero(self):
s = Square(10, 10, 10, 10)
with self.assertRaisesRegex(ValueError, "width must be > 0"):
s.update(size=0)
def test_update_kwargs_size_negative(self):
s = Square(10, 10, 10, 10)
with self.assertRaisesRegex(ValueError, "width must be > 0"):
s.update(size=-3)
def test_update_kwargs_invalid_x(self):
s = Square(10, 10, 10, 10)
with self.assertRaisesRegex(TypeError, "x must be an integer"):
s.update(x="invalid")
def test_update_kwargs_x_negative(self):
s = Square(10, 10, 10, 10)
with self.assertRaisesRegex(ValueError, "x must be >= 0"):
s.update(x=-5)
def test_update_kwargs_invalid_y(self):
s = Square(10, 10, 10, 10)
with self.assertRaisesRegex(TypeError, "y must be an integer"):
s.update(y="invalid")
def test_update_kwargs_y_negative(self):
s = Square(10, 10, 10, 10)
with self.assertRaisesRegex(ValueError, "y must be >= 0"):
s.update(y=-5)
def test_update_args_and_kwargs(self):
s = Square(10, 10, 10, 10)
s.update(89, 2, y=6)
self.assertEqual("[Square] (89) 10/10 - 2", str(s))
def test_update_kwargs_wrong_keys(self):
s = Square(10, 10, 10, 10)
s.update(a=5, b=10)
self.assertEqual("[Square] (10) 10/10 - 10", str(s))
def test_update_kwargs_some_wrong_keys(self):
s = Square(10, 10, 10, 10)
s.update(size=5, id=89, a=1, b=54)
self.assertEqual("[Square] (89) 10/10 - 5", str(s))
class TestSquare_to_dictionary(unittest.TestCase):
"""Unittests for testing to_dictionary method of the Square class."""
def test_to_dictionary_output(self):
s = Square(10, 2, 1, 1)
correct = {'id': 1, 'x': 2, 'size': 10, 'y': 1}
self.assertDictEqual(correct, s.to_dictionary())
def test_to_dictionary_no_object_changes(self):
s1 = Square(10, 2, 1, 2)
s2 = Square(1, 2, 10)
s2.update(**s1.to_dictionary())
self.assertNotEqual(s1, s2)
def test_to_dictionary_arg(self):
s = Square(10, 10, 10, 10)
with self.assertRaises(TypeError):
s.to_dictionary(1)
if __name__ == "__main__":
unittest.main()
|
import pytest
import pdb
from fhir_walk.model import unwrap_bundle
from fhireval.test_suite.crud import prep_server
test_id = f"{'2.2.06':10} - CRUD Observation"
test_weight = 2
example_observation_id = None
example_patient_id = None
def test_create_research_subject(host, prep_server):
global example_observation_id, example_patient_id
example_patient = prep_server['CMG-Examples']['Patient'][0]
example_observation = prep_server['eIII-Examples']['Observation'][0]
response = host.post('Patient', example_patient, validate_only=False)
assert response['status_code'] == 201, 'CREATE success'
example_patient_id = response['response']['id']
example_observation['subject'][
'reference'] = f"Patient/{example_patient_id}"
response = host.post('Observation',
example_observation,
validate_only=False)
assert response['status_code'] == 201, 'Observation CREATE success'
example_observation_id = response['response']['id']
def test_read_research_subject(host, prep_server):
global example_observation_id, example_patient_id
example_patient = prep_server['CMG-Examples']['Patient'][0]
example_observation = prep_server['eIII-Examples']['Observation'][0]
obs_query = host.get(f"Observation/{example_observation_id}").entries
assert len(obs_query) == 1, "READ Success and only one was found"
# Just make sure we got what we expected
assert example_observation['valueQuantity']['value'] == obs_query[0][
'valueQuantity']['value']
def test_update_research_subject(host, prep_server):
global example_observation_id, example_patient_id
example_patient = prep_server['CMG-Examples']['Patient'][0]
example_observation = prep_server['eIII-Examples']['Observation'][0]
altered_obs = example_observation.copy()
altered_obs['valueQuantity']['value'] = 142
altered_obs['id'] = example_observation_id
result = host.update('Observation', example_observation_id, altered_obs)
assert result['status_code'] == 200
obs_qry = host.get(f"Observation/{example_observation_id}").entries
assert len(obs_qry) == 1, "READ success and only one was found"
assert obs_qry[0]['valueQuantity']['value'] == 142
def test_patch_research_subject(host, prep_server):
global example_observation_id, example_patient_id
patch_ops = [{
"op": "replace",
"path": "/valueQuantity/value",
"value": 99
}]
result = host.patch('Observation', example_observation_id, patch_ops)
assert result['status_code'] == 200
obs_qry = result['response']
assert obs_qry['valueQuantity']['value'] == 99
def test_delete_research_subject(host, prep_server):
global example_observation_id, example_patient_id
example_patient = prep_server['CMG-Examples']['Patient'][0]
example_observation = prep_server['eIII-Examples']['Observation'][0]
example_identifier = example_observation['identifier'][0]
delete_result = host.delete_by_record_id('Observation',
example_observation_id)
assert delete_result['status_code'] == 200
delete_result = host.delete_by_record_id('Patient', example_patient_id)
assert delete_result['status_code'] == 200
response = host.get(
f"Observation?identifier={example_identifier}").response
del_query = unwrap_bundle(response)
assert len(del_query) == 0, "Verify that the delete really worked"
|
import sys, getopt
import requests
from bs4 import BeautifulSoup
import time
import pandas as pd
import numpy as np
from datetime import datetime
from datetime import timedelta
if __name__ == "__main__":
from_date = ''
to_date = ''
main_category = ''
sub_category = []
try:
opts, args = getopt.getopt(sys.argv[1:], "hf:t:m:s:p:", ["from=", "to=", "main=", "sub=", "page="])
except getopt.GetoptError:
print("naver_news_crawler.py -f <date> -t <date> -m <main category id> -s <sub category id> -p <page limit>")
sys.exit(1)
for opt, arg in opts:
if opt == '-h':
print("naver_news_crawler.py -f <date> -t <date> -m <main category id> -s <sub category id> -p <page limit>")
sys.exit(1)
elif opt in ("-f", "--from"):
from_date = arg
elif opt in ("-t", "--to"):
to_date = arg
elif opt in ("-m", "--main"):
main_category = arg
elif opt in ("-s", "--sub"):
sub_category.append(arg)
elif opt in ("-p", "--page"):
last_page = int(arg)
if from_date == '' or to_date == '' or main_category == '':
print("naver_news_crawler.py -f <date> -t <date> -m <main category id> -s <sub category id> -p <page limit>")
sys.exit(1)
if sub_category == []:
if main_category == '100': # politics
sub_category = ['264', '265', '268', '266', '267', '269']
elif main_category == '101': # economics
sub_category = ['259', '258', '261', '771', '260', '262', '310', '263']
elif main_category == '102': # society
sub_category = ['249', '250', '251', '254', '252', '255', '256', '276', '257', '59b']
elif main_category == '103': # life & leisure
sub_category = ['241', '239', '240', '237', '238', '376', '242', '243', '244', '248', '245']
elif main_category == '104': # global
sub_category = ['231', '232', '233', '234', '322']
elif main_category == '105': # science & IT
sub_category = ['731', '226', '227', '230', '732', '283', '229', '228']
elif main_category == '110': # opinion
sub_category = ['200', '201', '202', '274', '281']
date_range = []
from_d = datetime.strptime(from_date, "%Y%m%d")
to_d = datetime.strptime(to_date, "%Y%m%d")
while from_d <= to_d:
date_range.append(from_d.strftime("%Y%m%d"))
from_d = from_d + timedelta(days=1)
for sub in sub_category:
titles = []
links = []
writings = []
news_dates = []
prev = set()
for date in date_range:
for page in range(1, last_page):
print(">>>>>>>> page: %d" % page)
url = "http://news.naver.com/main/list.nhn?mode=LS2D&mid=shm&sid1=%s&sid2=%s&date=%s&page=%d" % (main_category, sub, date, page)
response = requests.get(url)
soup = BeautifulSoup(response.text, "lxml")
div = soup.find("div", attrs={"class":"content"})
dl_list = div.findAll("dl")
# comparing current title list to previous things to check if or not last page
cur = set()
for dl in dl_list:
title_tag = dl.find("dt", attrs={"class":None}).find("a")
title = title_tag.text.strip()
cur.add(title)
if (prev == cur):
break
prev = cur
# if this page is not last page, extract title, writing and date
for dl in dl_list:
title_tag = dl.find("dt", attrs={"class":None}).find("a")
title = title_tag.text.strip()
link = title_tag["href"]
writing = dl.find("span", attrs={"class":"writing"}).text.strip()
news_date = dl.find("span", attrs={"class":"date"}).text.strip()
print("title: %s, writing: %s, date: %s, link: %s" % (title, writing, news_date, link))
titles.append(title)
links.append(link)
writings.append(writing)
news_dates.append(news_date)
sleep_sec = np.random.randint(0, 10) + np.random.random_sample()
time.sleep(sleep_sec)
df = pd.DataFrame({"title":titles, "link":links, "writing":writings, "date":news_dates})
df.to_csv("naver_news_%s_%s_%s_%s.csv" % (main_category, sub, from_date, to_date), sep=",", encoding='euc-kr')
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# coding=utf-8
d = {"name": '魑魅魍魉'}
d2 = {"name": "python"}
d.update(d2)
print(d)
|
#IMPORT HERE..
import os
import time
from zipfile import ZipFile
from selenium import webdriver
#DEFINE CONSTANTS HERE..
URL = 'http://localhost:8000/CollectMaterial/'
PATH = os.getcwd()
def fetch_questions(code):
chrome_options = webdriver.ChromeOptions()
prefs = {'download.default_directory' : PATH+"\Question"}
chrome_options.add_experimental_option('prefs', prefs)
driver = webdriver.Chrome(executable_path= PATH +"\\chromedriver_win32\\chromedriver.exe", chrome_options= chrome_options)
driver.implicitly_wait(20)
if __name__ != "__main__":
driver.minimize_window()
driver.get(URL)
uploader = driver.find_element_by_id("test_code")
uploader.send_keys(code)
driver.find_element_by_id("submit").click()
time.sleep(2)
status = driver.current_url[-4:]
driver.close()
driver.quit()
if status == "True":
return False
else:
os.mkdir(PATH+"\\Question\\"+ code)
with ZipFile(PATH+"\Question\Question.zip","r") as zip_ref:
zip_ref.extractall(PATH+"\\Question\\"+ code)
os.remove(PATH+"\Question\Question.zip")
return True
if __name__ == '__main__':
print(PATH)
print(fetch_questions(code = 'Kee')) |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
file = r'grupo06.csv'
data = pd.read_csv(file, skipinitialspace=True)
data2 = pd.read_csv(file, skipinitialspace=True)
data.drop_duplicates(keep='first', inplace=False)
data2.drop_duplicates(keep='first', inplace=True)
opa = data2['Performance']
del data2['Performance']
#Tratando itens faltantes
keys_list = list(data2.keys())
#substitui MDs por NaN
at_list = []
for i, row in data2.iterrows():
for j in keys_list:
if row.get(j) == 'MD':
at_list.append([i,j])
for i in at_list:
data2.set_value(i[0],i[1], np.NaN)
#conferindo se foram removidas
for i, row in data2.iterrows():
for j in keys_list:
if row[j] == 'MD':
print(str(i)+" "+str(j))
#restaura a coluna Performance
data2['Performance'] = opa
#substituindo os nan por média de valores com categoria y
#Os valores nan se encontram nas colunas 'Quantitative Ability 1', 'Domain Skills 1', 'Analytical Skills 1'
#Portanto irei usar a média da coluna para preencher tais valores
'''
import math as mth
gg =[]
for i in at_list:
gg.append(i[1])
gg = list(set(gg))
for h in gg:
uu = data2[['Specialization in study',h]].groupby("Specialization in study")
for i in at_list:
key = data2.at[i[0], 'Specialization in study']
val_list = list(uu.get_group(key)[i[1]].values)
val_list = list(map(float, val_list))
a_list = []
for j in val_list:
if not mth.isnan(j):
a_list.append(j)
media = sum(a_list)/len(a_list)
data2.set_value(i[0],i[1], media)
uu = None
'''
gg =[]
for i in at_list:
gg.append(i[1])
gg = list(set(gg))
data2 = data2.apply(pd.to_numeric, errors='ignore')
for h in gg:
data2[h].fillna(data2[h].mean(), inplace=True)
# Create an axes instance
# Create the boxplot
# Save the figure
def boxplots(column1, column2):
oi = data[[column1, column2]].groupby(column1).boxplot(subplots=False, vert=False)
groups = list(oi.groups.keys())
def getMean(column1, column2):
oi = data[[column1, column2]].groupby(column1)
groups = list(oi.groups.keys())
#tratando outliers
def remove_outlier(df_in, col_name):
q1 = df_in[col_name].sort_values().quantile(0.25)
q3 = df_in[col_name].sort_values().quantile(0.75)
iqr = q3-q1 #Interquartile range
fence_low = q1-1.5*iqr
fence_high = q3+1.5*iqr
for i,row in data2.iterrows():
if row[col_name] <fence_low:
data2.at[i,col_name] = fence_low
if row[col_name] >fence_high:
data2.at[i,col_name] = fence_high
return data2
#adequando todos os outliers do dataSet
#os outliers tiveram seus dados jogados para os limites do bp
#list_num é os nomes das colunas numerais
list_num = ["10th percentage", "12th percentage", "College percentage", "English 1", "English 2","English 3","English 4","Quantitative Ability 1", "Quantitative Ability 2", "Quantitative Ability 3", "Quantitative Ability 4","Domain Skills 1", "Domain Skills 2", "Domain Test 3", "Domain Test 4", "Analytical Skills 1", "Analytical Skills 2","Analytical Skills 3"]
#list_cat é os nomes das categóricas
#list_cat = ["12th Completion year", "Degree of study", "Specialization in study", "Year of Completion of college", "Performance"]
list_cat = ["Year of Birth", "Gender", "State(Location)","10th Completion Year"]
for i in list_num:
data2 = remove_outlier(data2, i)
data2[["Gender", "Quantitative Ability 1"]].groupby("Gender").boxplot(subplots=True, vert=True)#,showfliers=False)
#histograma
for i in list_cat:
for j in list_num:
data2.hist(j, by=i)
plt.savefig(i+j)
#boxplot relacionado
for i in list_cat:
for j in list_num:
data2[[i, j]].groupby(i).boxplot(subplots=True, vert=True)
plt.savefig("boxplot "+i + j)
#boxplot purao
for j in list_num:
data2[j].plot(kind='box',subplots=True, vert=True)
plt.savefig("boxplot "+j) |
import statistics, io, argparse, random
import numpy as np
from collections import Counter
def accuracy(gold_word, pred_word):
correct = 0
if gold_word == pred_word:
correct = 1
return correct * 100
def F1(gold_word, pred_word):
correct_total = 0
for m in pred_word:
if m in gold_word:
correct_total += 1
gold_total = len(gold_word)
pred_total = len(pred_word)
precision = correct_total / pred_total
recall = correct_total / gold_total
F1 = 0
try:
F1 = 2 * (precision * recall) / (precision + recall)
F1 = round(F1 * 100, 2)
except:
F1 = 0
return round(precision * 100, 2), round(recall * 100, 2), F1
def call_counter(func):
def helper(*args, **kwargs):
helper.calls += 1
return func(*args, **kwargs)
helper.calls = 0
helper.__name__= func.__name__
return helper
def memoize(func):
mem = {}
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in mem:
mem[key] = func(*args, **kwargs)
return mem[key]
return memoizer
@call_counter
@memoize
def levenshtein(s, t):
if s == "":
return len(t)
if t == "":
return len(s)
if s[-1] == t[-1]:
cost = 0
else:
cost = 1
res = min([levenshtein(s[:-1], t)+1,
levenshtein(s, t[:-1])+1,
levenshtein(s[:-1], t[:-1]) + cost])
return res
def copy(gold_word, pred_word):
gold_word = ''.join(m for m in gold_word)
pred_word = ''.join(m for m in pred_word)
correct = 0
if len(gold_word) <= len(pred_word):
for i in range(len(gold_word)):
if gold_word[i] == pred_word[i]:
correct += 1
if len(gold_word) > len(pred_word):
for i in range(len(pred_word)):
if gold_word[i] == pred_word[i]:
correct += 1
return round(correct * 100 / len(gold_word), 2)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', type = str, help = 'input path')
parser.add_argument('--lang', type = str, help = 'target language')
parser.add_argument('--split', type = str, help = '1, 2, 3, etc')
parser.add_argument('--m', help = 'model type')
parser.add_argument('--test', action = 'store_true', help = 'whether evaluating test files of varying sizes')
parser.add_argument('--z', type = str, help = 'random sample; a number between 1 to 50')
args = parser.parse_args()
gold_list = []
pred_list = []
n = args.split
test_n = ''
if args.test:
test_n = args.split + '_' + str(args.z)
lang = args.lang
gold_file = ''
if args.test:
gold_file = io.open(args.input + lang + '_test_tgt_' + test_n, encoding = 'utf-8')
else:
gold_file = io.open(args.input + lang + '_test_tgt_' + n, encoding = 'utf-8')
for line in gold_file:
toks = line.strip().split()
toks = ''.join(c for c in toks)
morphs = toks.split('!')
gold_list.append(morphs)
pred_file = ''
if args.m:
if args.test:
pred_file = io.open(args.input + lang + '_test_pred_' + args.m + '_' + test_n, encoding = 'utf-8')
else:
pred_file = io.open(args.input + lang + '_test_pred_' + args.m + '_' + n, encoding = 'utf-8')
else:
if args.test:
pred_file = io.open(args.input + lang + '_test_pred_' + test_n, encoding = 'utf-8')
else:
pred_file = io.open(args.input + lang + '_test_pred_' + n, encoding = 'utf-8')
for line in pred_file:
toks = line.strip().split()
toks = ''.join(c for c in toks)
morphs = toks.split('!')
pred_list.append(morphs)
all_accuracy = []
all_precision = []
all_recall = []
all_f1 = []
all_dist = []
all_copy = []
for i in range(len(gold_list)):
all_accuracy.append(accuracy(gold_list[i], pred_list[i]))
precision, recall, f1 = F1(gold_list[i], pred_list[i])
dist = levenshtein(' '.join(m for m in gold_list[i]), ' '.join(m for m in pred_list[i]))
all_precision.append(precision)
all_recall.append(recall)
all_f1.append(f1)
all_dist.append(dist)
all_copy.append(copy(gold_list[i], pred_list[i]))
outfile = ''
if args.m:
if args.test:
outfile = io.open(args.input + lang + '_test_eval_' + args.m + '_' + test_n, 'w', encoding = 'utf-8')
else:
outfile = io.open(args.input + lang + '_test_eval_' + args.m + '_' + n, 'w', encoding = 'utf-8')
else:
if args.test:
outfile = io.open(args.input + lang + '_test_eval_' + test_n, 'w', encoding = 'utf-8')
else:
outfile = io.open(args.input + lang + '_test_eval_' + n, 'w', encoding = 'utf-8')
outfile.write('Average accuracy: ' + str(round(statistics.mean(all_accuracy), 2)) + '\n')
outfile.write('Average precision: ' + str(round(statistics.mean(all_precision), 2)) + '\n')
outfile.write('Average recall: ' + str(round(statistics.mean(all_recall), 2)) + '\n')
outfile.write('Average F1: ' + str(round(statistics.mean(all_f1), 2)) + '\n')
outfile.write('Average distance: ' + str(round(statistics.mean(all_dist), 2)) + '\n')
outfile.write('Average copy: ' + str(round(statistics.mean(all_copy), 2)) + '\n')
'''
boots_accuracy = []
boots_precision = []
boots_recall = []
boots_f1 = []
boots_dist = []
boots_copy = []
index = []
i = 0
while i < len(gold_list):
index.append(i)
i += 1
random.shuffle(index)
for z in range(10000):
select = random.choices(index, k = len(gold_list))
all_accuracy = []
all_precision = []
all_recall = []
all_f1 = []
all_dist = []
all_copy = []
for i in select:
all_accuracy.append(accuracy(gold_list[i], pred_list[i]))
precision, recall, f1 = F1(gold_list[i], pred_list[i])
dist = levenshtein(' '.join(m for m in gold_list[i]), ' '.join(m for m in pred_list[i]))
all_precision.append(precision)
all_recall.append(recall)
all_f1.append(f1)
all_dist.append(dist)
all_copy.append(copy(gold_list[i], pred_list[i]))
ave_accuracy = round(statistics.mean(all_accuracy), 2)
ave_precision = round(statistics.mean(all_precision), 2)
ave_recall = round(statistics.mean(all_recall), 2)
ave_f1 = round(statistics.mean(all_f1), 2)
ave_dist = round(statistics.mean(all_dist), 2)
ave_copy = round(statistics.mean(all_copy), 2)
boots_accuracy.append(ave_accuracy)
boots_precision.append(ave_precision)
boots_recall.append(ave_recall)
boots_f1.append(ave_f1)
boots_dist.append(ave_dist)
boots_copy.append(ave_copy)
boots_accuracy.sort()
boots_precision.sort()
boots_recall.sort()
boots_f1.sort()
boots_dist.sort()
boots_copy.sort()
print('')
print('Bootstrap: ')
print('')
print('Accuracy: ' + str(round(statistics.mean(boots_accuracy), 2)) + ' ' + str(round(boots_accuracy[250], 2)) + ' ' + str(round(boots_accuracy[9750], 2)))
print('Precision: ' + str(round(statistics.mean(boots_precision), 2)) + ' ' + str(round(boots_precision[250], 2)) + ' ' + str(round(boots_precision[9750], 2)))
print('Recall: ' + str(round(statistics.mean(boots_recall), 2)) + ' ' + str(round(boots_recall[250], 2)) + ' ' + str(round(boots_recall[9750], 2)))
print('F1: ' + str(round(statistics.mean(boots_f1), 2)) + ' ' + str(round(boots_f1[250], 2)) + ' ' + str(round(boots_f1[9750], 2)))
print('Distance: ' + str(round(statistics.mean(boots_dist), 2)) + ' ' + str(round(boots_dist[250], 2)) + ' ' + str(round(boots_dist[9750], 2)))
print('Copy: ' + str(round(statistics.mean(all_copy), 2)) + ' ' + str(round(boots_copy[250], 2)) + ' ' + str(round(boots_copy[9750], 2)))
'''
|
from Layer import *
import ast
import math
class Net:
layers = [];
layerOut = []; # output haye har layer
opt = "";
alpha = 0.0;
def __init__(self, opt, size, active, regul, alpha): # size , tedad neuron haye hame laye ha ( input layer ham ) hast
self.opt = opt;
self.alpha = alpha;
for i in range(0, len(active)):
self.layers.append( Layer(size[i+1], active[i], size[i], regul[i] ) );
def GD_SGD(self,All):
Err = 0.0;
for i in range(0,len(All)):
data = All[i];
Err += self.lossFunc(self.calc(data[0]),data[1]);
Err /= float(len(All));
return Err;
def calc( self,L ):
layerOut = [];
L = self.layers[0].L2Norm(L);
layerOut.append(L);
for i in range(0, len(self.layers)):
if i == len(self.layers) -1 :
layerOut.append(self.layers[i].pureCalc(L));
else:
layerOut.append(self.layers[i].calc(L));
L = self.layers[i].calc(L);
self.layerOut.append(layerOut);
return L;
def lossFunc(self, calcOut, char):
Y = [];
for i in range(0, self.layers[len(self.layers) - 1].size ):
Y.append(0.0);
Y[ ord(char) - ord('A')] = 1.0;
Err = [];
for i in range(0, len(calcOut)):
Err.append( Y[i] * math.log(self.sigmoid(calcOut[i]) + 1e-9) + (1.0 - Y[i]) * math.log(1.0 - self.sigmoid(calcOut[i]) + 1e-9) );
return -sum(Err) / len(Err);
def sigmoid(self,x):
if x >= 5:
return 1;
if x <= -5:
return 0.0;
S = 1.0 / (1.0 + math.exp(-x));
#print ("X is : " + str(x) + " sigmoid is : " + str(S)) ;
return S;
def sigmoidP(self,x):
return self.sigmoid(x) * (1.0 - self.sigmoid(x));
def compute(self, Y,Lout):
res = [];
for i in range(0, len(Y)):
res.append( -1.0 * (Y[i] * (self.sigmoidP(Lout[i])) / (self.sigmoid(Lout[i])) + (Y[i] - 1.0) * (self.sigmoidP(Lout[i])) / (1.0 - (self.sigmoid(Lout[i]) ) ) ) / float(len(Y)) );
return res;
def rond(self, All): # rond(error) / rond(Lout) baraye hame data haye train
self.layerOut = []; # dar iter e jaDd pak shavad
resAll = [];
for i in range(0, self.layers[len(self.layers) - 1].size ):
resAll.append(0.0);
Y = [];
for j in range(0, self.layers[len(self.layers) - 1].size ):
Y.append(0.0);
for i in range(0 , len(All)):
Y[ ord(All[i][1]) - ord('A')] = 1.0;
s = self.calc (All[i][0]); # tamam e khoruG haye e hame layer ha ra be eza e data set e dade shode hesab mikonim vali meghdar e khoruji fght Lout ra b eza e an data set return mikonim
Lout = self.layerOut[i][len(self.layers)];
computeFor_ith_data = self.compute(Y,Lout);
for j in range(0, len(resAll)):
resAll[j] += computeFor_ith_data[j];
Y[ ord(All[i][1]) - ord('A')] = 0.0;
for i in range(0, len(resAll)):
resAll[i] /= float(len(resAll));
return resAll;
def matrixMult(self, A, B):
res = [];
for i in range(0,len(A)):
t = [];
for j in range(0,len(B[0])):
Sum = 0.0;
for k in range(0,len(A[0])):
Sum += float(A[i][k]) * float(B[k][j]);
t.append(Sum);
res.append(t);
return res;
def transpose(self,A):
res = [];
for i in range(0,len(A[0])):
t = [];
for j in range(0,len(A)):
t.append(A[j][i]);
res.append(t);
return res;
def backProp(self,Rond):
W = [];
B = [];
avgLayerOut = [];
for j in range(0,len(self.layerOut[0])): # len(layers[]) : 3
t2 = [];
for k in range(0, len(self.layerOut[0][j])): # 784 ya 100 ya 10
t2.append(0.0);
avgLayerOut.append(t2);
for i in range(0, len(self.layerOut)):
for j in range(0, len(self.layerOut[i])):
for k in range(0, len(self.layerOut[i][j])):
avgLayerOut[j][k] += self.layerOut[i][j][k];
for j in range(0, len(self.layerOut[i])):
for k in range(0, len(self.layerOut[i][j])):
avgLayerOut[j][k] /= float(len(self.layerOut));
''' avgLayerOut yek array az 3 ta array ast ba size haye 784 100 10 tayi k avg e 18700 data set ast '''
Rond = [Rond];
for i in range(0, len(self.layers)):
W.append( self.matrixMult( self.transpose(Rond), [ avgLayerOut[ len(self.layers) -1 - i ] ] ) );
B.append( Rond );
Rond = self.matrixMult( Rond , self.layers[ len(self.layers) -1 -i ].weight());
W.reverse();
B.reverse();
return W, B;
def train(self, All):
Rond = self.rond(All);
W , B = self.backProp(Rond);
for i in range(0,len(self.layers)):
self.layers[i].train(self.alpha,W[i],B[i][0]);
return self.GD_SGD(All);
def LoadNet(self,FileName):
File = open(FileName,"r");
Lines = File.readlines();
File.close();
Weights = [];
cnt = 0;
while cnt < len(Lines):
self.layers[cnt / 2].setLayer( ast.literal_eval(Lines[cnt]) , ast.literal_eval(Lines[cnt + 1]));
cnt += 2;
def SaveNet(self,FileName):
File = open(FileName,"w");
for i in range(0,len(self.layers)):
File.write(str(self.layers[i].weight()) + "\n");
File.write(str(self.layers[i].bias()) + "\n");
File.close();
|
# 1. 문제
# (핵심) 부등호 순서를 만족하는 최대 최소를 찾아야 함
# 1.1 세부조건
# - 모든 숫자는 달라야함. (한 번 씩만 사용가능)
# -- 부등호에 부분
# -- 부등호 방향 같거나 다른 경우를 나눠서 생각.
# 2. 포인트 - 해결 아이디어
# - 최대가 되려면???
# --- 숫자를 큰 수 부터 뽑아가면서 부등호를 만족할 수 있을지를 체크한다.
# ex. 2 < >
# 9 < ? 9보다 더 큰 수가 없기 때문에 8로 내려감
# 8 < 9 만족할 수 있기 때문에 다음 부등호 체크
# 9 > 7 만족하기 때문에 끝
# 3. 플로우
# 숫자 배열을 하나 만들고 사용한 수는 제외시켜 나간다.
# 큰 수 부터 체크
# 작은 수 부터 체크
# 두 가지 경우를 생각한다.
# 테스트케이스 - 복잡하게, 다양한 경우가 나올 수 있게
n = int(input())
signs = list(input().split())
numbers = [num for num in range(9)]
max_num, min_num = [], []
cursor = 0
while n:
# 적절한 솔루션이 생각이 나지 않네요...
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @author: Chuan
# For beginner
# 1. variable - int,num,str,bool
# 2. if
# 3. > < >= <= ==
# 4. print
def main():
who = 'chuan的老妈 '
good_price = 6 #小贩的价格
good_description = "西双版纳大白菜" #小贩的招牌
is_cheap = False #是否便宜
reasonable_price = 5 #老妈能接受的最高价格
buy_amount = 2 #准备买2斤
print "%s上街看到了%s, 卖 %d 元/斤" % (who, good_description, good_price)
if good_price <= reasonable_price:
print '她认为便宜'
is_cheap = True
print '她买了 %d 斤' % (buy_amount)
else:
print '她认为贵了 '
is_cheap = False
print '她并没有买,扬长而去 '
# run function
if __name__ == '__main__':
main() |
from __future__ import absolute_import
import sys
import re
import pandas as pd
import numpy as np
import itertools
GRO_FIELDS = {
"resid": ((0, 5), int),
"resname": ((5, 10), str),
"atom_name": ((10, 15), str),
"atomid": ((15, 20), int),
"x": ((20, 28), float),
"y": ((28, 36), float),
"z": ((36, 44), float),
}
try:
import io
except ImportError:
from io import StringIO
class FormatError(Exception):
"""
Exception raised when the file format is wrong.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class NoGroFile(Exception):
pass
def stop_at_empty_line(iterator):
"""
Yield all item of an iterator but stop when the item is an empty line.
An empty line is a string which is empty when stripped.
:param iterator:
:return last line:
"""
for line in iterator:
if line.strip() == "":
return
yield line
def read_gro(lines):
"""
Read the atoms, the header, and the box description from a gro file.
Atoms are represented as dictionaries.
:Parameters:
- lines: an iterator over atom lines from the gro file. The two header
lines and the bottom line describing the box have to be
included.
:Returns:
- title: the title of the system as written on line 1 of the file
- atoms: a list of atom, each atom is stored as a dictionary
- box: the box description as written on the last line
:Raise:
- FormatError: raised if the file format does not fit.
"""
# "lines" might be a list and not a proper iterator
lines = iter(lines)
# The two first lines are a header
title = next(lines)
nb_atoms = next(lines) # This is the number of atoms, we do not care
# Try to parse the 2 above lines as an atom line.
# If sucess, it means there is a missing header line
for header in [title, nb_atoms]:
try:
a = dict(((key, convert(header[begin:end].strip()))
for key, ((begin, end), convert) in GRO_FIELDS.items()))
raise FormatError("Something is wrong in the format")
except ValueError:
pass
# Loop over the lines but act on the previous one. We are reading atoms and
# we do not want to consider the last line (the box description) as an
# atom.
atoms = []
prev_line = next(lines)
for line in stop_at_empty_line(lines):
try:
atoms.append(dict(((key, convert(prev_line[begin:end].strip()))
for key, ((begin, end), convert)
in GRO_FIELDS.items())))
prev_line = line
except ValueError:
raise FormatError("Something is wrong in the format")
box = prev_line
return (title, atoms, box)
class ReturnGro(object):
def __init__(self, title, atoms, box):
self.title = title.strip()
self.atoms = atoms
self.box = box
def renumber(atoms):
"""
Renumber the atoms and the residues from a list of atom.
:Parameters:
- atoms: a list of atom, each atom is stored as a dictionary
:Returns:
- new_atoms: the new list renumbered
"""
new_atoms = []
resid = 0
prev_resid = 0
for atomid, atom in enumerate(atoms, start=1):
if atom['resid'] != prev_resid:
resid += 1
prev_resid = atom['resid']
atom['resid'] = resid % 100000
atom['atomid'] = atomid % 100000
new_atoms.append(atom)
return new_atoms
def parse_file(filin):
"""
Handle the file type before calling the read function
:Parameters:
- filin: the filename name in str.
:Returns:
- the return of :read_gro:
:Raise:
-FormatError: raised if the file format does not fit.
"""
with open(filin) as f:
try:
return read_gro(f)
except FormatError as e:
raise FormatError("{0} ({1})".format(e, filin))
def gro_parser(gro_file, include_solvent=True):
if str(gro_file).endswith(".gro"):
try:
with open(gro_file, "r") as gf:
gro_content = gf.read()
except Exception:
raise NoGroFile("Check if the .gro path is correct")
del gro_content
title, atoms, box = parse_file(gro_file)
if include_solvent:
atoms = pd.DataFrame.from_dict(atoms, orient="columns").set_index("atomid")
else:
atoms = [d for d in atoms if d.get("resname") not in ["SOL", "NA", "CL"]]
atoms = renumber(atoms)
atoms = pd.DataFrame.from_dict(atoms, orient="columns").set_index("atomid")
title = "Protein without water"
return ReturnGro(title, atoms, box)
def get_coordinates(gro_return, include_solvent=True):
gro_parsed = gro_parser(gro_return, include_solvent)
atom_info = gro_parsed.atoms
coord_vals = atom_info[["x", "y", "z"]].values
return coord_vals
'''Bucket list:'''
# Define local coordinate system (We leave it as default from gmx output
# def set_coordinate_center(center_atom, ):
# Calculate coordinates relative to origin
def center_coords(atom_name, res_name, gro_parsed):
global atomdf
atom_info = gro_parsed.atoms
all_coords = atom_info[["x", "y", "z"]].values
atomdf = atom_info.loc[(atom_info['atom_name'] == atom_name) & (atom_info['resname'] == res_name)]
atom_coords = atomdf[["x", "y", "z"]].values
centered = all_coords - atom_coords
return centered
if __name__ == '__main__':
print(get_coordinates(sys.argv[1], include_solvent=False))
parsed = gro_parser(sys.argv[1], include_solvent=False)
print(parsed.atoms)
print(center_coords('P', 'DFP', parsed))
|
#!/usr/bin/python3
"""unit_test.py: Testing to make sure that the functions from
diagonals.py is working correctly
"""
import unittest
from diagonals import get_all_diagonals, smallest_sum_in_array, get_diagonal
class UnitTest(unittest.TestCase):
"""UnitTest"""
def test_get_diagonal(self):
"""test_get_diagonal: Make sure test_get_diagonal is working correctly
"""
array = [[3, 1, 5, 6, 9], [2, 4, 1, 9, 7], [3, 5, 2, 8, 10], [4, 2, 1, 6, 8],
[1, 4, 7, 9, 1]]
self.assertEqual(get_diagonal(array), [3, 4, 2, 6, 1])
def test_sum_in_array(self):
"""test_sum_in_array: Make sure sum_in_array function is working correctly
"""
array = [1, 123, 312, 3, 223, 1, 323, 4, 123, 1, 23, 1]
self.assertEqual(smallest_sum_in_array(array, 4), 4)
def test_labsheet(self):
"""test_labsheet: Test values given on the labsheet
"""
array = [[3, 1, 5, 6, 9], [2, 4, 1, 9, 7], [3, 5, 2, 8, 10], [4, 2, 1, 6, 8],
[1, 4, 7, 9, 1]]
diagonals = get_all_diagonals(array, 4)
for i in range(len(diagonals)):
diagonals[i] = smallest_sum_in_array(diagonals[i], 4)
diagonals = min(diagonals)
self.assertEqual(diagonals, 10)
if __name__ == '__main__':
unittest.main()
|
import os
import pandas as pd
from autumn.projects.covid_19.mixing_optimisation.constants import OPTI_REGIONS, PHASE_2_START_TIME
from autumn.projects.covid_19.mixing_optimisation.mixing_opti import DURATIONS, MODES
from autumn.projects.covid_19.mixing_optimisation.utils import (
get_country_population_size,
get_scenario_mapping_reverse,
)
from autumn.coreb.load import load_uncertainty_table
from autumn.settings import BASE_PATH
FIGURE_PATH = os.path.join(
BASE_PATH,
"apps",
"covid_19",
"mixing_optimisation",
"outputs",
"plots",
"outputs",
"figures",
"tables",
)
DATA_PATH = os.path.join(
BASE_PATH,
"apps",
"covid_19",
"mixing_optimisation",
"outputs",
"pbi_databases",
"calibration_and_scenarios",
"full_immunity",
)
who_deaths = {
"by_october": {
"belgium": 10.2,
"france": 31.7,
"italy": 35.9,
"spain": 32.4,
"sweden": 5.9,
"united-kingdom": 42.1,
},
"by_january": {
"belgium": 19.6,
"france": 64.3,
"italy": 74.2,
"spain": 51.4,
"sweden": 9.7,
"united-kingdom": 73.5,
},
}
def main():
uncertainty_dfs = {}
for country in OPTI_REGIONS:
dir_path = os.path.join(DATA_PATH, country)
uncertainty_dfs[country] = load_uncertainty_table(dir_path)
for per_capita in [False, True]:
make_main_outputs_tables_new_messaging(uncertainty_dfs, per_capita=per_capita)
def get_quantile(output_df, sc_idx, quantile):
mask_scenario = output_df["scenario"] == sc_idx
masked_output_df = output_df[mask_scenario]
time_read = PHASE_2_START_TIME if sc_idx == 0 else max(masked_output_df["time"])
mask_time = masked_output_df["time"] == time_read
masked_output_df = masked_output_df[mask_time]
mask_quantile = masked_output_df["quantile"] == quantile
return float(masked_output_df[mask_quantile]["value"])
def get_uncertainty_cell_value(
country, uncertainty_df, output, mode, duration, per_capita=False, population=None
):
# output is in ["deaths_before", "deaths_unmitigated", "deaths_opti_deaths", "deaths_opti_yoll",
# "yoll_before", "yoll_unmitigated", "yoll_opti_deaths", "yoll_opti_yoll"]
# return blank if repeat row
if "_before" in output or "unmitigated" in output or "who_" in output:
if mode != MODES[0] or duration != DURATIONS[0]:
return ""
# return WHO estimate if requested
if "who_" in output:
if "_before" in output:
value = who_deaths["by_october"][country]
else:
value = who_deaths["by_january"][country]
if per_capita:
country_name = country.title() if country != "united-kingdom" else "United Kingdom"
pop = get_country_population_size(country_name)
value *= 1000 / pop * 1.0e6
value = int(value)
return value
if "deaths_" in output:
type = "accum_deaths"
elif "yoll_" in output:
type = "accum_years_of_life_lost"
else:
type = "proportion_seropositive"
mask_output = uncertainty_df["type"] == type
output_df = uncertainty_df[mask_output]
if "opti_yoll" in output:
objective = "yoll"
else:
objective = "deaths"
if "unmitigated" in output:
sc_idx = get_scenario_mapping_reverse(None, None, None)
elif "_before" in output:
sc_idx = 0
else:
sc_idx = get_scenario_mapping_reverse(mode, duration, objective)
val_025 = get_quantile(output_df, sc_idx, 0.025)
val_50 = get_quantile(output_df, sc_idx, 0.5)
val_975 = get_quantile(output_df, sc_idx, 0.975)
if output.startswith("total_"):
val_025 += get_quantile(output_df, 0, 0.025)
val_50 += get_quantile(output_df, 0, 0.5)
val_975 += get_quantile(output_df, 0, 0.975)
if per_capita:
multiplier = {
"accum_deaths": 1.0e6 / population,
"accum_years_of_life_lost": 1.0e4 / population,
"proportion_seropositive": 100,
}
rounding = {"accum_deaths": 0, "accum_years_of_life_lost": 0, "proportion_seropositive": 0}
if not per_capita:
multiplier = {
"accum_deaths": 1.0 / 1000.0,
"accum_years_of_life_lost": 1.0 / 1000.0,
"proportion_seropositive": 100,
}
rounding = {"accum_deaths": 1, "accum_years_of_life_lost": 0, "proportion_seropositive": 0}
# read the percentile
median = round(multiplier[type] * val_50, rounding[type])
lower = round(multiplier[type] * val_025, rounding[type])
upper = round(multiplier[type] * val_975, rounding[type])
if rounding[type] == 0:
median = int(median)
lower = int(lower)
upper = int(upper)
cell_content = f"{median} ({lower}-{upper})"
return cell_content
def make_main_outputs_tables(uncertainty_dfs, per_capita=False):
"""
This now combines Table 1 and Table 2
"""
countries = ["belgium", "france", "italy", "spain", "sweden", "united-kingdom"]
country_names = [c.title() for c in countries]
country_names[-1] = "United Kingdom"
column_names = [
"country",
"deaths_before",
"who_before" "deaths_unmitigated",
"deaths_opti_deaths",
"deaths_opti_yoll",
"who_by_jan",
"yoll_before",
"yoll_unmitigated",
"yoll_opti_deaths",
"yoll_opti_yoll",
"sero_before",
"sero_unmitigated",
"sero_opti_deaths",
"sero_opti_yoll",
]
table = pd.DataFrame(columns=column_names)
i_row = -1
for i, country in enumerate(countries):
uncertainty_df = uncertainty_dfs[country]
country_name = country.title() if country != "united-kingdom" else "United Kingdom"
population = get_country_population_size(country_name) if per_capita else None
for mode in MODES:
for duration in DURATIONS:
i_row += 1
row_as_list = [country]
for output in [c for c in column_names if c != "country"]:
print(output)
row_as_list.append(
get_uncertainty_cell_value(
country, uncertainty_df, output, mode, duration, per_capita, population
)
)
table.loc[i_row] = row_as_list
filename = f"output_table_per_capita.csv" if per_capita else f"output_table.csv"
file_path = os.path.join(FIGURE_PATH, filename)
table.to_csv(file_path)
def print_who_deaths_per_capita(by="october"):
deaths_thousands = who_deaths[f"by_{by}"]
for country in ["belgium", "france", "italy", "spain", "sweden", "united-kingdom"]:
country_name = country.title() if country != "united-kingdom" else "United Kingdom"
pop = get_country_population_size(country_name)
print(int(deaths_thousands[country] * 1000 / pop * 1.0e6))
def make_main_outputs_tables_new_messaging(uncertainty_dfs, per_capita=False):
"""
This now combines Table 1 and Table 2
"""
countries = ["belgium", "france", "italy", "spain", "sweden", "united-kingdom"]
country_names = [c.title() for c in countries]
country_names[-1] = "United Kingdom"
column_names = [
"country",
"deaths_before",
"who_before",
"total_deaths_unmitigated",
"total_deaths_opti_deaths",
"total_deaths_opti_yoll",
"who_by_jan",
"yoll_before",
"total_yoll_unmitigated",
"total_yoll_opti_deaths",
"total_yoll_opti_yoll",
"sero_before",
"sero_unmitigated",
"sero_opti_deaths",
"sero_opti_yoll",
]
table = pd.DataFrame(columns=column_names)
i_row = -1
for i, country in enumerate(countries):
uncertainty_df = uncertainty_dfs[country]
country_name = country.title() if country != "united-kingdom" else "United Kingdom"
population = get_country_population_size(country_name) if per_capita else None
for mode in MODES:
for duration in DURATIONS:
i_row += 1
row_as_list = [country]
for output in [c for c in column_names if c != "country"]:
print(output)
row_as_list.append(
get_uncertainty_cell_value(
country, uncertainty_df, output, mode, duration, per_capita, population
)
)
table.loc[i_row] = row_as_list
filename = (
f"output_table_per_capita_new_messaging.csv"
if per_capita
else f"output_table_new_messaging.csv"
)
file_path = os.path.join(FIGURE_PATH, filename)
table.to_csv(file_path)
if __name__ == "__main__":
main()
# print_who_deaths_per_capita()
|
from django.shortcuts import render, redirect
from .forms import ParkForm
from .models import Parks
from .filters import ParkFilter
def home(request):
parks = Parks.objects.all()
context = {'parks':parks }
return render(request, 'home.html', context)
def addPark(request):
current_user = request.user
if request.method == 'POST':
form = ParkForm(request.POST, request.FILES)
if form.is_valid():
park = form.save(commit=False)
park.user = current_user
park.save()
return redirect('home')
else:
form = ParkForm()
context = {'form':form}
return render(request, 'parks/add_park.html', context)
def getParks(request):
parks = Parks.objects.all()
myFilter = ParkFilter(request.GET, queryset=parks)
parks = myFilter.qs
context = {'parks':parks, 'myFilter':myFilter}
return render(request, 'parks/parks.html', context) |
#/bin/python
# parse repo tree, collect todo's, generate todo file
# Unix tool inspiration
# grep, sed, cut, awk
import os
import os.path
import sys
import re
# single file grepping class
class Grepper:
def __init__(self):
self.matches = {}
self.is_empty = True
# pseudo grep method
def grep_file(self, filename, pattern):
fid = open(filename, "r")
line_cnt = 0
for line in fid:
line_cnt = line_cnt+1
if re.search(pattern, line):
return filename + ":" + str(line_cnt) + ':' + line.strip('\n')
# traverse root directory, and list directories as dirs and files as files
def walk_print(self, dir_in):
file_tup = [];
for root, dirs, files in os.walk(dir_in):
path = root.split('/')
for file in files:
file_tup.append((root + "/" + file))
return file_tup
# alt to grep_file, ex using regex
def regex_tester():
r = re.compile('^[0-9]*$')
string_list = ['123', 'a', '467','a2_2','322','21']
return filter(r.match, string_list)
# writing todo file
def populate_todo(self, base_dir):
pat = "TODO:"
fo = open("TODO", "wb")
for file_mat in self.walk_print(base_dir):
if ( self.grep_file(file_mat, pat) ):
fo.write( self.grep_file(file_mat, pat) + '\n')
fo.close()
# main
if (len(sys.argv) != 2):
print "usage: " + sys.argv[0] + " <search_dir_name>"
print 'Argument List:', str(sys.argv)
sys.exit(2)
g0 = Grepper()
g0.populate_todo(sys.argv[1])
print "TODO file generated via Autotodo"
|
#coding:utf-8
import datetime
import json
import traceback
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http.request import QueryDict
from django.http.response import HttpResponse
from django.middleware.csrf import get_token
from django.shortcuts import render
# Create your views here.
from rest_framework import serializers
from rest_framework.authentication import SessionAuthentication
from rest_framework.decorators import list_route
from rest_framework.pagination import PageNumberPagination
from rest_framework.parsers import FormParser, JSONParser
from rest_framework.response import Response
from rest_framework.serializers import ModelSerializer
from rest_framework.viewsets import ViewSet, ModelViewSet
from webapi.models import Application, Module, ApiDoc, DocumentResponse
from httputil import SuccCallReturn,FailCallReturn
from .exp_markdown import markdown_html
class EasyUiPagination(PageNumberPagination):
# page_size = 10 # 真正的page_size由easyui.paginator传递过来
page_query_param = 'page'
page_size_query_param ='rows'
def get_paginated_response(self,data):
return Response(
{'total':self.page.paginator.count,
'rows':data
})
class ApplicationSerializer(ModelSerializer):
class Meta:
model = Application
read_only_fields=('create_date','user')
# read_only_fields=('user',)
# fields = '__all__'
def create(self, validated_data):
instance = Application(**validated_data)
instance.user = User.objects.get(id=1)
instance.create_date = datetime.datetime.now().date()
instance.save()
return instance
"""
虽然开启了csrf
'django.middleware.csrf.CsrfViewMiddleware',
但实际上django的response并没有产生csrftoken返回到前端,
这会影响前端ajax程序在发送POST,UPDATE,DELETE,PATCH无法发送有效的csrftoken而请求无效.
{% csrf_token %}通过form field传递的csrftoken另当别论。
要解决这个问题,请见 django.middle.ware.csrf 的 process_response()处理函数,其检查
request.META.get("CSRF_COOKIE_USED") 是否设置,未设置则不会返回csrf的cookie
1. get_token(request) 函数可以生成新的csrftoken的cookie,所以只需要调用一下get_token()即可
2. 编写新的middleware,从 CsrfViewMiddleware派生, process_response()中调用get_token()即可。
"""
class ApplicationViewSet(ModelViewSet):
serializer_class = ApplicationSerializer
queryset = Application.objects.all()
pagination_class = EasyUiPagination
parser_classes = (FormParser,)
# authentication_classes = (SessionAuthentication,)
def get_queryset(self):
get_token(self.request)
rs = Application.objects.all().order_by('name')
return rs
def create(self, request, *args, **kwargs):
import json
ser = ApplicationSerializer(data = request.data)
if not ser.is_valid():
return Response({'status':1})
app = Application(**ser.validated_data)
app.user = request.user # User.objects.get(id=1)
app.create_date = datetime.datetime.now().date()
app.save()
# app = ser.save()
r = json.dumps({'status':0,'result':app.id})
# return HttpResponse(r,content_type='application/json')
return Response({'status':0,'result':app.id})
def retrieve(self, request, *args, **kwargs):
app = self.get_object()
ser = self.get_serializer(app)
return Response(ser.data)
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
if not serializer.is_valid():
return FailCallReturn().httpResponse()
serializer.save()
return SuccCallReturn().httpResponse()
def destroy(self, request, *args, **kwargs):
doc = self.get_object()
doc.delete()
return SuccCallReturn().httpResponse()
class ModuleSerializer(ModelSerializer):
# app_id = serializers.IntegerField()
class Meta:
model = Module
read_only_fields = ('app',)
# def validate_app_id(self,value):
# try:
# self.app = Application.objects.get(id= value)
# except:
# raise serializers.ValidationError('application not existed')
# return value
# def create(self, validated_data):
# module = Module(**validated_data)
# module.app = self.app
# module.save()
# return module
class ModuleViewSet(ModelViewSet):
serializer_class = ModuleSerializer
pagination_class = EasyUiPagination
# queryset = Module.objects.all()
parser_classes = (FormParser,)
def get_queryset(self):
return Module.objects.filter(app__id = self.request.query_params.get('app_id')).order_by('name')
def create(self, request, *args, **kwargs):
ser = ModuleSerializer(data = request.data)
if not ser.is_valid():
return FailCallReturn().httpResponse()
try:
m = Module(**ser.validated_data)
app = Application.objects.get( id= request.data['app_id'])
m.app = app
m.save()
return SuccCallReturn().assign(m.id).httpResponse()
except:
traceback.print_exc()
return FailCallReturn().httpResponse()
def update(self, request, *args, **kwargs):
# instance = self.get_object()
instance = Module.objects.get(id=kwargs.get('pk'))
ser = ModuleSerializer(instance,data = request.data)
if not ser.is_valid():
return FailCallReturn().httpResponse()
ser.save()
return SuccCallReturn().httpResponse()
class SheetSerializer(ModelSerializer):
class Meta:
model = ApiDoc
class SheetViewSet(ModelViewSet):
serializer_class = SheetSerializer
pagination_class = EasyUiPagination
queryset = ApiDoc.objects.all()
# parser_classes = (FormParser,JSONParser)
parser_classes = (JSONParser,)
def get_queryset(self):
rs = ApiDoc.objects.all()
if self.request.query_params.has_key('module_id'):
rs = rs.filter(module__id=self.request.query_params.get('module_id',0) )
return rs.order_by('name')
@list_route()
def headers(self,request):
pass
def create(self, request, *args, **kwargs):
try:
module = Module.objects.get(id = request.data.get('module_id'))
doc = ApiDoc()
doc.module = module
doc.name = request.data.get('name')
doc.ver = request.data.get('ver')
doc.description = request.data.get('description')
doc.url = request.data.get('url')
doc.method = request.data.get('method')
doc.comment = request.data.get('comment')
doc.headers = json.dumps( request.data.get('headers'))
doc.paramters = json.dumps( request.data.get('parameters'))
doc.resp_status = request.data.get('resp_status')
doc.resp_headers =json.dumps( request.data.get('resp_headers'))
doc.resp_data = json.dumps( request.data.get('resp_data'))
doc.save()
return SuccCallReturn().assign(doc.id).httpResponse()
except:
traceback.print_exc()
def update(self, request, *args, **kwargs):
doc = self.get_object()
doc.name = request.data.get('name')
doc.ver = request.data.get('ver')
doc.description = request.data.get('description')
doc.url = request.data.get('url')
doc.method = request.data.get('method')
doc.comment = request.data.get('comment')
doc.headers = json.dumps( request.data.get('headers'))
doc.paramters = json.dumps( request.data.get('parameters'))
doc.resp_status = request.data.get('resp_status')
doc.resp_headers =json.dumps( request.data.get('resp_headers'))
doc.resp_data = json.dumps( request.data.get('resp_data'))
doc.save()
return SuccCallReturn().httpResponse()
@list_route(methods=['post'])
def markdown(self,request):
# import pdfkit
try:
doc_ids = request.data
html = markdown_html(doc_ids)
# print html
# pdfkit.from_string(html, 'out.pdf')
return Response(html)
except:
traceback.print_exc() |
class Solution(object):
def isIsomorphic(self, s, t):
if len(s) != len(t):
return False
word1, word2 = {}, {}
for i in range(len(s)):
ch1, ch2 = s[i], t[i]
if word1.get(ch1, 0) != word2.get(ch2, 0):
return False
word1[ch1] = i + 1
word2[ch2] = i + 1
return True
class Solution(object):
def isIsomorphic(s, t):
if len(s) != len(t):
return False
word = {}
for i in range(len(s)):
ch1, ch2 = s[i], t[i]
if ch1 in word:
if word[ch1] != ch2:
return False
else:
if ch2 in word.values():
return False
word[ch1] = ch2
return True
|
from keras.models import load_model
from PIL import Image
import os
import numpy as np
import sys
def testmodel(file_name):
print os.path.dirname(__file__)
os.chdir(os.path.curdir)
model = load_model('pic_model.h5')
if not os.path.exists(os.path.join(os.path.dirname(__file__), "Uploads", file_name)):
print "Image does not exist"
else:
im = Image.open(os.path.join(os.path.dirname(__file__), "Uploads", file_name))
im = im.resize((84,28))
im = im.convert('L')
# newim = ImageEnhance.Contrast(im)
# newim = newim.enhance(2.5).save()
pixelData = im.getdata()
pixelData = np.asarray(pixelData, dtype=np.uint8).reshape((1, im.size[0], im.size[1], 1))
pixelData *= 1 / 255.
res = model.predict(pixelData)
# print res.shape
equation = ""
equation += "%d" %(np.argmax(res[0][:10]))
operator = np.argmax(res[0][10:14])
if operator == 0:
equation += "+"
if operator == 1:
equation += "-"
if operator == 2:
equation += "x"
if operator == 3:
equation += "/"
equation += "%d" %(np.argmax(res[0][14:]))
return equation
if __name__ == "__main__":
testmodel(sys.argv[1]) |
import win32gui, win32con
def winEnumHandler( hwnd, ctx ):
if win32gui.IsWindowVisible( hwnd ):
print (hex(hwnd), win32gui.GetWindowText( hwnd ))
Minimize = win32gui.GetForegroundWindow()
win32gui.ShowWindow(hwnd, win32con.SW_MINIMIZE)
win32gui.EnumWindows( winEnumHandler, None )
|
"""
Service Announcement
Adapted from https://stackoverflow.com/questions/21089268/python-service-discovery-advertise-a-service-across-a-local-network
"""
import time
from localshit.utils import utils
from localshit.utils.utils import logging
class ServiceAnnouncement:
def __init__(self, hosts, socket_sender):
self.hosts = hosts
self.socket_sender = socket_sender
self.own_address = utils.get_host_address()
def announce_service(self, timeout=1):
data = "%s:%s" % ("SA", self.own_address)
self.socket_sender.send_message(data, type="multicast")
logging.debug("SA: service announcement...")
time.sleep(timeout)
logging.info("SA: service announcement finished.")
logging.info("Discovered hosts: %s" % self.hosts.sorted_ring)
def handle_service_announcement(self, addr):
if addr[0] != self.own_address:
self.hosts.add_host(addr[0])
self.hosts.form_ring(self.own_address)
message = "RP:%s" % self.own_address
self.socket_sender.send_message(message, addr[0], type="unicast")
|
import os
import numpy
from sklearn.preprocessing import scale
if __name__ == '__main__':
loadpath = 'D:/PythonProjects_Data/CMU_MOSEI/AudioPart/Step4SAME_SpectrumGeneration/'
savepath = 'D:/PythonProjects_Data/CMU_MOSEI/AudioPart/Step5SAME_Normalization/'
totalData = []
for partName in os.listdir(loadpath):
for filename in os.listdir(os.path.join(loadpath, partName)):
data = numpy.genfromtxt(fname=os.path.join(loadpath, partName, filename),
dtype=float, delimiter=',')
totalData.extend(data)
print('Loading', partName, numpy.shape(totalData))
print(numpy.shape(totalData))
totalData = scale(totalData)
startPosition = 0
for partName in os.listdir(loadpath):
os.makedirs(os.path.join(savepath, partName))
for filename in os.listdir(os.path.join(loadpath, partName)):
data = numpy.genfromtxt(fname=os.path.join(loadpath, partName, filename),
dtype=float, delimiter=',')
writeData = totalData[startPosition:startPosition + len(data)]
with open(os.path.join(savepath, partName, filename), 'w') as file:
for indexX in range(numpy.shape(writeData)[0]):
for indexY in range(numpy.shape(writeData)[1]):
if indexY != 0: file.write(',')
file.write(str(writeData[indexX][indexY]))
file.write('\n')
startPosition += len(data)
print('Writing', partName, startPosition)
|
#!/usr/bin/python
#Following sine curve.
from dxl_mikata import *
import time
import math
import numpy as np
#Setup the device
mikata= TMikata()
mikata.Setup()
mikata.EnableTorque()
#Move to initial pose
p_start= [0, 0, 1, -1.3, 0]
mikata.MoveTo({jname:p for jname,p in zip(mikata.JointNames(),p_start)})
time.sleep(0.5)
print 'Current position=',mikata.Position()
#Move to a target position
gain= [0.45, 0.15, 0.15, 0.7, 0.7]
angvel= [1, 2, 1, 3, 2]
for t in np.mgrid[0:2*math.pi:0.05]:
p_trg= [p0 + g*math.sin(w*t) for p0,g,w in zip(p_start,gain,angvel)]
#print p_trg
mikata.MoveTo({jname:p for jname,p in zip(mikata.JointNames(),p_trg)}, blocking=False)
time.sleep(0.025)
#print 'Current position=',dxl.Position()
#mikata.DisableTorque()
mikata.Quit()
|
#Pay calculator with time and a half
def overtime_pay():
try:
hrs = float(raw_input('Hours?'))
rate = float(raw_input('Rate?'))
rate_overtime = 1.5*rate
if hrs < 40:
pay = hrs*rate
elif hrs > 40:
pay = 40*rate + (hrs - 40)*rate_overtime
print 'Earned $', pay
except:
print 'Invalid Entry'
overtime_pay()
|
# @Title: 柱状图中最大的矩形 (Largest Rectangle in Histogram)
# @Author: 2464512446@qq.com
# @Date: 2020-11-16 15:33:30
# @Runtime: 64 ms
# @Memory: 15.4 MB
class Solution:
def largestRectangleArea(self, heights: List[int]) -> int:
res = 0
heights = [0] + heights + [0]
stack = [0]
size = len(heights)
for i in range(1,size):
while heights[stack[-1]] > heights[i]:
cur_height = heights[stack.pop()]
cur_width = i - stack[-1] -1
res = max(res,cur_height*cur_width)
stack.append(i)
return res
eight*cur_width,res)
stack.append(i)
return res
|
import numpy as np
import matplotlib.pyplot as plt
from DynamicSystems.cart_pole import CartPole
from utils import simulate_system
parameters = {
'mass_cart': 1,
'mass_pendulum': 1,
'pendulum_length': 1,
'gravity': 9.8
}
cp = CartPole(parameters)
test_state = np.array([0, np.pi, 0, -.01])
deriv = cp.derivative(0, test_state, 0)
print(deriv)
jac = cp.jacobian(0, test_state, 0)
print(jac)
A, B = cp.linearized_dynamics(0, test_state, 0)
print(A)
print(B)
def passive(state):
return 0
output = simulate_system(cp, test_state, passive, 5)
plt.plot(output['t'], output['state'])
plt.show()
|
class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
noreplist = []
ans = 0
for i in range(len(s)):
if s[i] in noreplist:
n = noreplist.index(s[i])
noreplist = noreplist[n+1:]
noreplist.append(s[i])
else:
noreplist.append(s[i])
if (len(noreplist) > ans):
ans = len(noreplist)
return ans
|
import copy
import math
import numpy as np
import torch
from torch.nn import Parameter, init
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Implementation of Twin Delayed Deep Deterministic Policy Gradients (TD3)
# Paper: https://arxiv.org/abs/1802.09477
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
# if self.noise:
# self.l3 = NoisyLinear(256, action_dim)
# else:
self.l3 = nn.Linear(256, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
# def reset_noise(self):
# if self.noise:
# self.l3.reset_noise()
class Critic(nn.Module):
def __init__(self, state_dim, action_dim, dueling, noise):
super(Critic, self).__init__()
self.n_actions = action_dim
self.dueling = dueling
self.noise = noise
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, 256)
self.l2 = nn.Linear(256, 256)
if dueling and noise:
print("dn")
# We separate output stream into two streams
# The one that calculates V(s)
self.l3_1 = nn.Linear(256, 128)
self.l4_1 = NoisyLinear(128, 1)
# The one that calculates A(s, a) - advantage
self.l3_2 = nn.Linear(256, 128)
self.l4_2 = NoisyLinear(128, action_dim)
elif dueling:
print("d")
# We separate output stream into two streams
# The one that calculates V(s)
self.l3_1 = nn.Linear(256, 128)
self.l4_1 = nn.Linear(128, 1)
# The one that calculates A(s, a) - advantage
self.l3_2 = nn.Linear(256, 128)
self.l4_2 = nn.Linear(128, action_dim)
elif noise:
print("n")
self.l3 = NoisyLinear(256, 1)
else:
print("nothing")
self.l3 = nn.Linear(256, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, 256)
self.l5 = nn.Linear(256, 256)
if dueling and noise:
# We separate output stream into two streams
# The one that calculates V(s)
self.l6_1 = nn.Linear(256, 128)
self.l7_1 = NoisyLinear(128, 1)
# The one that calculates A(s, a) - advantage
self.l6_2 = nn.Linear(256, 128)
self.l7_2 = NoisyLinear(128, action_dim)
elif dueling:
# We separate output stream into two streams
# The one that calculates V(s)
self.l6_1 = nn.Linear(256, 128)
self.l7_1 = nn.Linear(128, 1)
# The one that calculates A(s, a) - advantage
self.l6_2 = nn.Linear(256, 128)
self.l7_2 = nn.Linear(128, action_dim)
elif noise:
self.l6 = NoisyLinear(256, 1)
else:
self.l6 = nn.Linear(256, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
if self.dueling:
val = F.relu(self.l3_1(q1))
val = self.l4_1(val)
adv = F.relu(self.l3_2(q1))
adv = self.l4_2(adv)
# Q(s, a) = V(s) + A(s, a) - (1 / |A| * sumA(s, a'))
q1 = val + adv - adv.mean(1).unsqueeze(1).expand(state.size(0), self.n_actions)
else:
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
if self.dueling:
val = F.relu(self.l6_1(q2))
val = self.l7_1(val)
adv = F.relu(self.l6_2(q2))
adv = self.l7_2(adv)
# Q(s, a) = V(s) + A(s, a) - (1 / |A| * sumA(s, a'))
q2 = val + adv - adv.mean(1).unsqueeze(1).expand(state.size(0), self.n_actions)
else:
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action, dueling):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
if dueling:
val = F.relu(self.l3_1(q1))
val = self.l4_1(val)
adv = F.relu(self.l3_2(q1))
adv = self.l4_2(adv)
# Q(s, a) = V(s) + A(s, a) - (1 / |A| * sumA(s, a'))
q1 = val + adv - adv.mean(1).unsqueeze(1).expand(state.size(0), self.n_actions)
else:
q1 = self.l3(q1)
return q1
def reset_noise(self):
if self.noise and self.dueling:
self.l4_1.reset_noise()
self.l4_2.reset_noise()
self.l7_1.reset_noise()
self.l7_2.reset_noise()
elif self.noise:
self.l3.reset_noise()
self.l6.reset_noise()
# for name, module in self.named_children():
# if 'fc' in name:
# module.reset_noise()
class TD3(object):
def __init__(
self,
state_dim,
action_dim,
max_action,
discount=0.99,
tau=0.005,
dueling=False,
noisy=False,
per=False,
policy_noise=0.2,
noise_clip=0.5,
policy_freq=2
):
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = copy.deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)
self.critic = Critic(state_dim, action_dim, dueling, noisy).to(device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4)
self.max_action = max_action
self.discount = discount
self.tau = tau
self.dueling = dueling
self.per = per
self.policy_noise = policy_noise
self.noise_clip = noise_clip
self.policy_freq = policy_freq
self.noisy = noisy
self.total_it = 0
print(f"Dueling: {dueling}, PER: {per}")
def select_action(self, state):
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
return self.actor(state).cpu().data.numpy().flatten()
def train(self, replay_buffer, batch_size=100):
self.total_it += 1
# Sample replay buffer
if self.per:
idxs, state, action, reward, next_state, not_done, weights = replay_buffer.sample(batch_size)
else:
state, action, next_state, reward, not_done = replay_buffer.sample(batch_size)
with torch.no_grad():
# reseet
if self.noisy:
# self.actor_target.reset_noise()
self.critic_target.reset_noise()
# Select action according to policy and add clipped noise
noise = (
torch.randn_like(action) * self.policy_noise
).clamp(-self.noise_clip, self.noise_clip)
next_action = (
self.actor_target(next_state) + noise
).clamp(-self.max_action, self.max_action)
# Compute the target Q value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + not_done * self.discount * target_Q
# Get current Q estimates
current_Q1, current_Q2 = self.critic(state, action)
# Compute critic loss
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
# Optimize the critic
self.critic_optimizer.zero_grad()
if self.per:
(weights * critic_loss).mean().backward() # Backpropagate importance-weighted minibatch loss
else:
critic_loss.backward()
self.critic_optimizer.step()
if self.per:
# Update priorities of sampled transitions
errors = np.abs((current_Q1 - target_Q).detach().cpu().numpy())
replay_buffer.update_priorities(idxs, errors)
# Delayed policy updates
if self.total_it % self.policy_freq == 0:
# Compute actor losse
actor_loss = -self.critic.Q1(state, self.actor(state), self.dueling).mean()
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Update the frozen target models
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def save(self, filename):
torch.save(self.critic.state_dict(), filename + "_critic")
torch.save(self.critic_optimizer.state_dict(), filename + "_critic_optimizer")
torch.save(self.actor.state_dict(), filename + "_actor")
torch.save(self.actor_optimizer.state_dict(), filename + "_actor_optimizer")
def load(self, filename):
self.critic.load_state_dict(torch.load(filename + "_critic"))
self.critic_optimizer.load_state_dict(torch.load(filename + "_critic_optimizer"))
self.critic_target = copy.deepcopy(self.critic)
self.actor.load_state_dict(torch.load(filename + "_actor"))
self.actor_optimizer.load_state_dict(torch.load(filename + "_actor_optimizer"))
self.actor_target = copy.deepcopy(self.actor)
def reset_noise(self):
if self.noisy:
# self.actor.reset_noise()
self.critic.reset_noise()
# Factorised NoisyLinear layer with bias
class NoisyLinear(nn.Module):
def __init__(self, in_features, out_features, std_init=0.5):
super(NoisyLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.std_init = std_init
self.weight_mu = nn.Parameter(torch.empty(out_features, in_features))
self.weight_sigma = nn.Parameter(torch.empty(out_features, in_features))
self.register_buffer('weight_epsilon', torch.empty(out_features, in_features))
self.bias_mu = nn.Parameter(torch.empty(out_features))
self.bias_sigma = nn.Parameter(torch.empty(out_features))
self.register_buffer('bias_epsilon', torch.empty(out_features))
self.reset_parameters()
self.reset_noise()
def reset_parameters(self):
mu_range = 1 / math.sqrt(self.in_features)
self.weight_mu.data.uniform_(-mu_range, mu_range)
self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.in_features))
self.bias_mu.data.uniform_(-mu_range, mu_range)
self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.out_features))
def _scale_noise(self, size):
x = torch.randn(size)
return x.sign().mul_(x.abs().sqrt_())
def reset_noise(self):
epsilon_in = self._scale_noise(self.in_features)
epsilon_out = self._scale_noise(self.out_features)
self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in))
self.bias_epsilon.copy_(epsilon_out)
def forward(self, input):
if self.training:
return F.linear(input, self.weight_mu + self.weight_sigma * self.weight_epsilon,
self.bias_mu + self.bias_sigma * self.bias_epsilon)
else:
return F.linear(input, self.weight_mu, self.bias_mu)
|
def splitArr(arr, n, k):
for i in range(0, k):
x = arr[0]
for j in range(0, n-1):
arr[j] = arr[j + 1]
arr[n-1] = x
n=int(input('Enter the no. of elements in the list'))
arr=[]
for i in range(0,m):
c=int(input('Enter the element'))
arr.append(c)
position=int(input('Enter the position'))
splitArr(arr, n, position)
for i in range(0, n):
print(arr[i], end = ' ') |
import torch
import torch.nn as nn
from torch.nn import functional as F
import torchvision.models as models
import numpy as np
from PIL import Image
import mynet
def main():
path = '/home/zzl/zzlWorkspace/NNInference/'
module = torch.load(path+'MNIST/module/my_mnist.pkl')
'''
test_data = Image.open(path+"test/0.png")
test_data = np.array(test_data)
test_data = test_data[np.newaxis, np.newaxis,:]
tensor_test = torch.from_numpy(test_data).float()
torch.no_grad()
output = module(tensor_test.cuda())
predict = output.data.max(1)[1]
params = list(module.named_parameters())
conv1bias = params[1][1].cpu().detach().numpy()
#print(conv1bias)
'''
params = list(module.named_parameters())
f = open('../params.txt', 'w')
conv1weight = params[0][1].cpu().detach().numpy()
print(conv1weight[0,0,0,0])
for i in range(16):
for j in range(3):
for k in range(3):
f.write(str(conv1weight[i,0,j,k])+' ')
f.write('\n')
conv1bias = params[1][1].cpu().detach().numpy()
print(conv1bias[0])
for i in range(16):
f.write(str(conv1bias[i])+' ')
f.write('\n')
conv2weight = params[2][1].cpu().detach().numpy()
for i in range(32):
for j in range(16):
for k in range(3):
for m in range(3):
f.write(str(conv2weight[i,j,k,m])+' ')
conv2bias = params[3][1].cpu().detach().numpy()
f.write('\n')
for i in range(32):
f.write(str(conv2bias[i])+' ')
f.write('\n')
fcweight = params[4][1].cpu().detach().numpy()
for i in range(10):
for j in range(3872):
f.write(str(fcweight[i,j])+' ')
f.write('\n')
fcbias = params[5][1].cpu().detach().numpy()
for i in range(10):
f.write(str(fcbias[i])+' ')
if __name__ == "__main__":
main()
|
import os
import sys
import argparse
from colorama import Fore
from lib.core.banner import BANNER
from lib.core import utils
from lib.core import log
# globals
found = 0
def main():
vuln_classes = utils.get_vulnerability_classes()
vulns_list = [(_class.name, _class.keyname) for _class in vuln_classes]
print(BANNER)
parser = argparse.ArgumentParser(usage='%(prog)s [options]')
parser.error = log.error
parser.add_argument('-p', '--path', help='php project path', dest='path', metavar='')
parser.add_argument('-f', '--file', help='specific file to check', dest='file', metavar='')
parser.add_argument('-v', '--vulns', help='common vulnerabilities to look for. Default: all', dest='included', metavar='', default=','.join(x[1] for x in vulns_list))
parser.add_argument('--exclude', help='exclude common vulnerabilities', dest='excluded', metavar='')
parser.add_argument('--list-vulns', help='list common vulnerabilities', dest='list_vulns', action='store_true')
args = parser.parse_args()
if len(sys.argv) < 2:
parser.print_usage()
exit()
if args.list_vulns:
print('list of valid vulnerabilities:')
print('\n'.join(f' {Fore.YELLOW}{x[1]:<8}{Fore.RESET}{x[0]}' for x in vulns_list))
exit(0)
if not args.path and not args.file:
log.error('missing mandatory option: -p/--path or -f/--file')
if args.path:
args.file = None
if not os.path.exists(args.path) or not os.path.isdir(args.path):
log.error('directory not found')
else:
if not os.path.exists(args.file) or not os.path.isfile(args.file) or not args.file.endswith('.php') and not args.file.endswith('.html'):
log.error('php file not found')
if not args.included:
log.error('no vulnerabilities to check is selected')
included_vulns = args.included.lower().split(',')
excluded_vulns = args.excluded.lower().split(',') if args.excluded else []
for vuln in excluded_vulns:
if not [_class for _class in vuln_classes if _class.keyname == vuln]:
log.error(f'unrecognized common vulnerability: {vuln}')
exit(0)
included_vulns.remove(vuln)
for vuln in included_vulns:
if not [_class for _class in vuln_classes if _class.keyname == vuln]:
log.error(f'unrecognized common vulnerability: {vuln}')
exit(0)
global found
if args.path:
for root, _, directory in os.walk(args.path):
for file in directory:
if not file.endswith('.php') and not file.endswith('.html'):
continue
file_path = os.path.join(root, file)
for vuln in included_vulns:
Vulnerability = [_class for _class in vuln_classes if _class.keyname == vuln][0]
vuln_obj = Vulnerability(file_path)
for line, no, vuln_part in vuln_obj.find():
while line.endswith(' '):
line = line[:-1]
log.found(file_path, line, no, vuln_part, vuln_obj.name)
found += 1
else:
for vuln in included_vulns:
Vulnerability = [_class for _class in vuln_classes if _class.keyname == vuln][0]
vuln_obj = Vulnerability(args.file)
for line, no, vuln_part in vuln_obj.find():
while line.endswith(' '):
line = line[:-1]
log.found(args.file, line, no, vuln_part, vuln_obj.name)
found += 1
if found > 0:
log.info(f'phpvuln finished with {Fore.GREEN}{found} {Fore.RESET}potential vulnerabilit{"y" if found == 1 else "ies"} found')
else:
log.info(f'phpvuln finished, but no potential vulnerabilities were found')
if __name__ == '__main__':
main()
|
class Solution(object):
def groupAnagrams(self, strs):
groups = {}
for str_ in strs:
sorted_str_ = ''.join(sorted(str_))
try:
groups[sorted_str_].append(str_)
except KeyError:
groups[sorted_str_] = [str_]
for str_lst in groups.values():
str_lst.sort()
return list(groups.values())
|
import time
import helper
import process
import smali_parser
# improvement - return variable - possible ?
def inject_code(file_path, file_metadata):
'''
Edit the methods of a .smali file.
:param file_path: path of the .smali file
:param my_tag: debug tag to use in logcat
:param file_metadata: dictionary containing the metadata of the methods inside
of a .smali file
:type file_path: str
:type my_tag: str
:type file_metadata: dict
'''
method_data = []
buffer = []
is_inside = False
with open(file_path, "r") as file:
for line in file:
# We get the data concerning the method and mark it as a treated method
if (('.method' in line) and (smali_parser.is_valid_class_or_method_directive(line)) and (is_inside == False)):
is_inside = True
method_data = helper.get_method_data(smali_parser.get_class_or_method_name(line), file_metadata)
local_registers = helper.get_local_registers(method_data[2])
local_valid_registers = helper.get_local_valid_registers(local_registers, method_data[3])
my_message = (method_data[0] + '->' + method_data[1])
buffer.append(line)
elif (('.locals' in line) and (smali_parser. is_valid_register_directive(line)) and (len(local_valid_registers) < 1)
and (len(method_data[2]) <= 15) and (method_data[4] == False) and (method_data[5] == 'green') and (is_inside == True)):
buffer.append('\t# has been edited by smali-code-injector on {0} \n' .format(time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())))
buffer.append('\t.locals {0} \n' .format(len(local_registers) + 1))
elif (('return' in line) and (smali_parser.is_valid_return_directive(line)) and (is_inside == True) and (method_data[5] == 'green')
and (method_data[4] == False)):
if (len(local_valid_registers) >= 1):
buffer.append('\t# has been edited by smali-code-injector on {0} \n' .format(time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())))
buffer.append('\tconst-string {0}, "{1}" \n' .format(local_valid_registers[0], (method_data[0] + '->' + method_data[1])))
buffer.append('\tinvoke-static {%s}, Landroid/MyCustomClass;->add_trace(Ljava/lang/String;)V \n' %(local_valid_registers[0]))
buffer.append(line)
elif ((len(local_valid_registers) < 1) and (len(method_data[2]) <= 15)):
buffer.append('\tconst-string v{0}, "{1}" \n' .format(len(local_registers), (method_data[0] + '->' + method_data[1])))
buffer.append('\tinvoke-static {v%d}, Landroid/MyCustomClass;->add_trace(Ljava/lang/String;)V \n' %(len(local_registers)))
buffer.append(line)
else:
buffer.append(line)
elif (('.end method' in line) and (is_inside == True) and (method_data[4] == False)):
is_inside = False
method_data = []
buffer.append(line)
else:
buffer.append(line)
with open(file_path, "w") as file:
for line in buffer:
file.write(line)
|
#!/usr/bin/python3.5
#coding: utf8
"""
sudo get-apt install python3
sudo apt-get install python3-pip
python3 -m pip --version
"""
print("~~~~~~~~~~~~~~ Start ~~~~~~~~~~~~~~\n")
from random import randint
from libery import get_list
from libery import firatFan
firatFan()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ None ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
None - singlton
None == None
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ print ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print(2)
line = "Hello world"
for i in line:
print(i, end='\n')
# print(i, end='')
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ dir ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import hello
dir(hello)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ type(element) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
type(element)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ** ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print(int(4 ** 0.5))
print(int(2 ** 2))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Int ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
x = 42
type(x)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Float ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
x = 42.23
type(x)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ complex ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
x = 42j
type(x)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ String ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// - целое значение от деления
/ - чесное деления (могут быть ошибки)
% - целочисленный остаток от деления
строка - неизменяемый объект
+ - конкатенация
sub = ord('a')
sub = chr(13)
b"hello world" # <class 'bytes'>
b"hello world".decode("utf-8")
sub = s.find('a')
sub = len(s)
sub = s.count('ab')
sub = s.strip() # delete ' ' in start adn end
sub = s.replace('a', 'z')
'aaa'*3 - aaaaaaaaa
sub = s.replace('a', 'z')[1:3]
print(sub)
x = "Hello world"
for char in range(1,256):
print(ord(chr(char)), "\t", " - ", "\t" , chr(char))
x = input("Input: ")
print("\n", x*3)
string = ""
for elem in range(97,123):
string += chr(elem)
#if elem != 122:
# string += '_'
if elem % 2 == 0:
string += ' '
print('String - ', string)
print('len - ', len(string))
print('string[:] - ', string[:])
print('string[3] - ', string[3])
print('string[3:5] - ', string[3:5])
print('string[3:15:2] - ', string[3:15:2])
print('string[:13] - ', string[:13])
print('string[13:] - ', string[13:])
print('string[::-1] - ', string[::-1])
print('string[::3] - ', string[::3])
sab7 = string.split(' ')
for elem in sab7:
print(elem)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
for i in range(0,10,2):
print(i)
string = "Hello world"
print(string.replace('', '\n'))
string = "Hello world"
for i in range(len(string)):
print(string[i])
string = "Hello world"
for i in string:
#print(i)
print(i, end='-')
sum = 0
count = 10000
#for i in range(1,19,2):
for i in range(0, count):
sum += randint(1,100)
#linePref = str(i) + ') '
#print(linePref,randint(1,13))
print(sum/count)
print(sum//count)
print(sum%count)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ function ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def hello( N, M , symbol):
#for i in range():
print((symbol*N + '\n') * M)
def exeption_pass():
pass
print(exeption_pass())
def exeption_return():
return 1
print(exeption_return())
hello(2,3,'$')
def safe_div(x, y):
"""Do a safe division :-) for fun and profit"""
if y != 0:
z = x / y
print(z)
return(z)
else:
print("Yippie-kay-yay, motherf___er!")
safe_div(10, 1)
print(safe_div.__doc__)
def gcd(a, b):
"""Нахождение Наибольшего Общего Делителя"""
while a != 0:
a,b = b%a,a # параллельное определение
return b
print(gcd(21, 14))
print(gcd.__doc__)
def hello():
""" I am documentation """
return 42
print(hello.__doc__)
print(hello())
def min(x, y):
return x if x < y else y
print(min(10, 24))
print(min(x=10, y=11))
# def min(*args): # type(args) == tuple.
def min(first, *args):
res = float("inf")
for arg in args:
if arg < res:
res = arg
return res
print(min(-5, 12, 13)) # -5
print(min()) # inf
def bounded_min(first, *args, lo=float("-inf"), hi=float("inf")):
res = hi
for arg in (first, ) + args:
if arg < res and lo < arg < hi:
res = arg
return max(res, lo)
bounded_min(-5, 12, 13, lo=0, hi=255) # 12
def unique(iterable, seen=None):
seen = set(seen or []) # None --- falsy.
acc = []
for item in iterable:
if item not in seen:
seen.add(item)
acc.append(item)
return acc
xs = [1, 1, 2, 3]
unique(xs) # [1, 2, 3]
def flatten(xs, *, depth=None):
pass
flatten([1, [2], 3], 2)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# < > == <= >= != and or
# False 0 ''
# True 0-9 'a' 'a...z'
x = int(input('Enter number: '))
if x > 0 :
print('Yes')
elif x < 0 :
print('No')
else:
print('Maby')
x = True if 1 > 0 else False
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Input ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
a = int(input())
if a % 2 == 0:
print("even")
else:
print("odd")
x = True
y = False
#if x or y :
#if x and not y :
if not y :
print('Yes')
print('Insert your name: ', end='')
name = input()
if len(name) == 0 :
print('This is a bad name')
else:
print('Hello ',name)
name = input('Enter you name: ') or 'Guest'
print('Hello '+name)
s = input()
if s == s[::-1]:
print('Poli')
else:
print('Ne poli')
print('1) ', end = '')
a = int(input())
print('2) ', end = '')
b = int(input())
if b :
print(a // b )
else:
print('Bad second value')
year = int(input('Enter number: '))
if (year % 4 == 0 and year % 100 !=0) or (year % 400 == 0):
print('Yes (visikosnii)')
else:
print('No (ne visokosnii)')
number = int(input('Enter number(1-10): '))
if number > 0 and number < 4 :
print('I'*number)
elif number == 4:
print('IV')
elif number == 5:
print('V' + 'I'*(number % 5))
elif number == 9:
print('IX')
elif number == 10:
print('X')
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ While ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
x = 0
while x == 0 :
y = int(input('Insert numbber (1-14): '))
if 0 < y < 15 :
x = y
if x != 0 :
while x < 15 :
print(x)
x += 1
# Perevod v dwoi4nuiu sistemu
x = 12
line = ''
while x != 0:
# x = x // 2
# print(x % 2, end='')
line +=str(x % 2)
x //= 2
print('Na4alo - \t\t',line)
print('Preobrazovanoe - \t',line[::-1])
# Proverka 4isla na prostoio
x = 127
d = 2
while x % d != 0 :
d += 1
if d == x:
print('prime (prostoe)')
else:
print('composite (ne prostoe)')
# razlo#it 4islo na mno#iteli
x = 1000000003
d = 2
while x > 1 :
if x % d == 0:
print(d)
x //= d
else:
d += 1
# кортеж -> список -> list()
# список -> кортеж -> tuple()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tuple (кортежи) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# используется для представления неизменяемой последовательности разнородных объектов
# x = tuple('sdfsdf')
t = (2, 2.05, "Hello")
print(t)
# (2, 2.0499999999999998, 'Hello')
(a, b, c) = t
z, y, x = t
print(a,b,c)
print(z, y, x)
# 2 2.05 Hello
# 2 2.05 Hello
a=1
b=2
a,b=b,a
print(a,b)
# 2 1
x = 12,
print(x)
# (12,)
date = ("year", 2015)
len(date)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ list (списки) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# a = []
# b = [2, 2.05, "Hello"]
# b = list("Hello")
# 1
my_list = [5,1,4,100,-7]
# 2
my_list[3] = 17
# 3
my_list = list(range(1, 10))
list( range(1, 10, 3) )
list( "..." )
list( map )
my_list = [0]*10
# 4
my_list.append(123)
len(my_list)
' '.join(list(map(str,my_list))) # implode elements of a - list of string
my_list.index(4) # return index "0"
2 in my_list # terurt True or False
2 not in my_list
my_list.count(0) # количество "0"
my_list.sort() # сортировка
my_list.sort(revers = True)
del my_list[1] # del first elem
my_list = [1] + my_list # add
my_list[:3] + [2] + my_list[3:] # insert
my_list[1:2] = [7, 7, 7] # reaplece
my_list[1:2] = []
my_list[::-1] # revers
min(my_list)
max(my_list)
sum(my_list)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Словарь ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# (хэш, предопределенный массив) – изменяемая структура данных, предназначенная для хранения элементов вида ключ: значение
h0 = {1,2,3} # set() hesh-set
h1 = {1:"one", 2:"two", 3:"three"} # dict()
h2 = {0:"zero", 5:"five"}
h3 = {"z":1, "y":2, "x":3}
h3["b"] = 4
print(h1)
print(h1[2])
print(h3["z"])
xs = {1, 2, 3, 4}
ys = {4, 5}
xs.intersection(ys) # {4}
xs & ys # {4}
xs.union(ys) # {1, 2, 3, 4, 5}
xs | ys # {1, 2, 3, 4, 5}
xs.difference(ys) # {1, 2, 3}
xs - ys # {1, 2, 3}
for elem in h1:
print(h1[elem])
# Цикл по паре ключ-значение
for key, value in h1.items():
print(key, " ", value)
# Цикл по ключам
for key in h2.keys():
print(key, " ", h2[key])
# Цикл по значениям
for v in h3.values():
print v
# Добавление элементов из другого хеша
h1.update(h3)
# Количество пар в хеше
len(h1)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ генератор списков ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
my_list = [ int(input()) for i in range(3) ] # пользовательский ввод
from random import randint
my_list = [ int(randint(1,100)) for i in range(3) ] # random
my_list = [ x ** 3 for x in range(20) if x%2==1]
raw = [x.split(" ") for x in open("log.txt")] # 1 считываем из файла строки и делим их на пары IP-адрес
rmp = {} # 2 заполняем словарь
for ip, traffic in raw:
if ip in rmp:
rmp[ip] += int(traffic)
else:
rmp[ip] = int(traffic)
lst = rmp.items() # 3 переводим в список и сортируем
lst.sort(key = lambda (key, val): key)
print("\n".join(["%s\t%d" % (host, traff) for host, traff in lst])) # 4 получаем результат
def get_list(count = 10):
from random import randint
a = [0]*count
for i in range(count):
a[i] = randint(1, 100)
return a
print('\t'.join(list(map(str, get_list()))))
print('\t'.join(map(str, mylist)))
line = "23 75 2 -7 0 11"
print('inner line - "'+line+'"')
list(map(int,input().split()))
rezult = list(map(int,line.split()))
print(join(' ', rezult))
mylist = get_list()
print('\t'.join(map(str,mylist)))
print('max = ',max(mylist))
print('position max = ', mylist.index(max(mylist)))
temp_list = mylist[:mylist.index(max(mylist))] + mylist[(mylist.index(max(mylist))+1):]
#print('\t'.join(map(str,temp_list)))
print('max = ',max(temp_list))
print('position 2 max = ', mylist.index(max(temp_list)))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# предназначены для работы с внешними данными (Файловые объекты должны поддерживать основные методы: read(), write(), readline(), readlines(), seek(), tell(), close() и т.п)
# import urllib
# f1 = urllib.urlopen("http://python.onego.ru")
f1 = open("file1.txt", "r")
f2 = open("file2.txt", "w")
for line in f1.readlines():
f2.write(line)
f2.close()
f1.close()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ time ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from time import time
a = 1003456345634563456345634564053478562378562347856237845
b = 1003456345634563456345634564053478562378562347856237845
start = time()
while b != 0 :
a, b = b, a % b
print( time() - start )
print("\n~~~~~~~~~~~~~~ End ~~~~~~~~~~~~~~") |
from tensorflow import keras as ke
import tensorflow.python.keras.backend as K
from tensorflow.keras.models import Model, load_model
import streamlit as st
from nltk.tokenize import word_tokenize
from tensorflow.keras.preprocessing.sequence import pad_sequences
import pickle
import re
import string
import nltk
import sklearn
from nltk.corpus import stopwords, wordnet
from nltk.stem.wordnet import WordNetLemmatizer
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
import plotly.express as px
import plotly.graph_objects as go
MODEL_PATH = r"data/model/LSTM_model_1.h5"
MAX_NB_WORDS = 100000 # max no. of words for tokenizer
MAX_SEQUENCE_LENGTH = 200 # max length of each entry (sentence), including padding
VALIDATION_SPLIT = 0.2 # data for validation (not used in training)
EMBEDDING_DIM = 100
tokenizer_file = "tokenizer.pickle"
wordnet = WordNetLemmatizer()
regex = re.compile('[%s]' % re.escape(string.punctuation))
model_list = ["Logistic Regression",'Multinomial Naive Bayes Classifier','Bernoulli Naive Bayes Classifier','Gradient Boost Classifier','Decision Tree','RFC Classifier']
model_file_list = [r"data/model/LR_model.pkl",r"data/model/MNVBC_model.pkl",r"data/model/BNBC_model.pkl",r"data/model/GBC_model.pkl",r"data/model/DT_model.pkl",r"data/model/RFC_model.pkl"]
with open(tokenizer_file, 'rb') as handle:
tokenizer = pickle.load(handle)
def basic_text_cleaning(line_from_column):
# This function takes in a string, not a list or an array for the arg line_from_column
tokenized_doc = word_tokenize(line_from_column)
new_review = []
for token in tokenized_doc:
new_token = regex.sub(u'', token)
if not new_token == u'':
new_review.append(new_token)
new_term_vector = []
for word in new_review:
if not word in stopwords.words('english'):
new_term_vector.append(word)
final_doc = []
for word in new_term_vector:
final_doc.append(wordnet.lemmatize(word))
return ' '.join(final_doc)
@st.cache(allow_output_mutation=True)
def Load_model():
model = load_model(MODEL_PATH)
# model._make_predict_function()
model.summary() # included to make it visible when model is reloaded
session = K.get_session()
return model, session
if __name__ == '__main__':
st.title('Fake News Classification')
# st.write("Classifier using a LTSM model")
# st.info("LSTM model, tokeniser and the 6 traditional machine learning models loaded ")
st.subheader("Input the News content below")
sentence = st.text_area("Enter your news content here", "Some news",height=200)
predict_btt = st.button("predict")
model, session = Load_model()
if predict_btt:
clean_text = []
K.set_session(session)
i = basic_text_cleaning(sentence)
clean_text.append(i)
sequences = tokenizer.texts_to_sequences(clean_text)
data = pad_sequences(sequences, padding = 'post', maxlen = MAX_SEQUENCE_LENGTH)
prediction = model.predict(data)
prediction_prob_real = prediction[0][0]
prediction_prob_fake = prediction[0][1]
prediction_class = prediction.argmax(axis=-1)[0]
st.header("Prediction using LSTM model")
if prediction_class == 0:
st.success('This is not fake news')
if prediction_class == 1:
st.warning('This is a fake news')
class_label = ["fake","true"]
prob_list = [prediction[0][1]*100,prediction[0][0]*100]
prob_dict = {"true/fake":class_label,"Probability":prob_list}
df_prob = pd.DataFrame(prob_dict)
fig = px.bar(df_prob, x='true/fake', y='Probability')
model_option = "LSTM"
if prediction[0][1] > 0.7:
fig.update_layout(title_text="{} model - prediction probability comparison between true and fake".format(model_option))
st.info("The {} model predicts that there is a higher {} probability that the news content is fake compared to a {} probability of being true".format(model_option,prediction[0][1]*100,prediction[0][0]*100))
elif prediction[0][0] > 0.7:
fig.update_layout(title_text="{} model - prediction probability comparison between true and fake".format(model_option))
st.info("The {} model predicts that there is a higher {} probability that the news content is true compared to a {} probability of being fake".format(model_option,prediction[0][0]*100,prediction[0][1]*100))
else:
fig.update_layout(title_text="{} model - prediction probability comparison between true and fake".format(model_option))
st.info("Your news content is rather abstract, The {} model predicts that there a almost equal {} probability that the news content is true compared to a {} probability of being fake".format(model_option,prediction[0][0]*100,prediction[0][1]*100))
st.plotly_chart(fig, use_container_width=True)
# Comparisons for other models
# st.header("Prediction using 6 traditional machine learning model")
# predictions = []
# for model in model_file_list:
# filename = model
# model = pickle.load(open(filename, "rb"))
# prediction = model.predict([sentence])[0]
# predictions.append(prediction)
# dict_prediction = {"Models":model_list,"predictions":predictions}
# df = pd.DataFrame(dict_prediction)
# num_values = df["predictions"].value_counts().tolist()
# num_labels = df["predictions"].value_counts().keys().tolist()
# dict_values = {"true/fake":num_labels,"values":num_values}
# df_prediction = pd.DataFrame(dict_values)
# fig = px.pie(df_prediction, values='values', names='true/fake')
# fig.update_layout(title_text="Comparision between all 7 models: Prediction proportion between True/Fake")
# st.plotly_chart(fig, use_container_width=True)
# st.table(df) |
from lxml import etree
tree = etree.parse("nlp.txt.xml")
root = tree.getroot()
docment = root[0]
sentences = docment.find("sentences")
for sentence in sentences:
tokens = sentence.find("tokens")
for token in tokens:
word = token.find("word")
ner = token.find("NER")
if ner.text == "PERSON":
print(word.text)
|
@chef_routes.route('/<int:id>/appointment', methods=['POST'])
def post_appointment(id):
form = AppointmentForm()
data = request.get_json()
y, m, d, h, minute, sec = request.json["date"].split("-")
new_date = datetime.datetime(int(y), int(m), int(d), int(h), int(minute), int(sec))
new_appointment = Appointment(
user_id = data["user_id"],
chef_id = data["chef_id"],
notes= form.data['notes'],
date= new_date)
db.session.add(new_appointment)
db.session.commit()
return new_appointment.to_dict()
|
import torch
import numpy as np
import os
import monai
from monai.data import ArrayDataset, create_test_image_2d
from monai.inferers import sliding_window_inference
from monai.metrics import DiceMetric
from monai.transforms import Activations, AddChannel, AsDiscrete, Compose, LoadImage, RandRotate90, RandSpatialCrop, ScaleIntensity, ToTensor, LoadNumpy, LoadNifti
from monai.visualize import plot_2d_or_3d_image
from torch.utils.data import Dataset, DataLoader
from tutils import *
tconfig.set_print_info(True)
train_imtrans = Compose(
[
ToTensor(),
AddChannel(),
RandSpatialCrop((96, 96), random_size=False),
]
)
# RandRotate90(prob=0.5, spatial_axes=(0, 1)),
# AddChannel(),
# ToTensor(),
# ScaleIntensity(),
# AddChannel(),
# RandSpatialCrop((96, 96), random_size=False),
# LoadNifti(),
train_segtrans = Compose(
[
LoadNifti(),
AddChannel(),
RandRotate90(prob=0.5, spatial_axes=(0, 1)),
ToTensor(),
]
)
# For testing
# datadir1 = "/home1/quanquan/datasets/lsw/benign_65/fpAML_55/slices/"
# image_files = np.array([x.path for x in os.scandir(datadir1+"image") if x.name.endswith(".npy")])
# label_files = np.array([x.path for x in os.scandir(datadir1+"label") if x.name.endswith(".npy")])
### Data Collection for Kits19
datadir_kits = "/home1/quanquan/datasets/kits19/resampled_data"
image_files = []
for subdir in os.scandir(datadir_kits):
if subdir.name.startswith("case_"):
image_name = os.path.join(subdir.path, "imaging.nii.gz")
image_files.append(image_name)
image_files = np.array(image_files)
image_files.sort()
print(image_files[:10])
### Define array dataset, data loader
# check_ds = ArrayDataset(img=image_files, img_transform=train_imtrans, seg=None, seg_transform=None)
check_ds = monai.data.NiftiDataset(image_files=image_files, transform=train_imtrans)
check_loader = DataLoader(check_ds, batch_size=10, num_workers=2, pin_memory=torch.cuda.is_available())
im = monai.utils.misc.first(check_loader)
print(im.shape)
### Create a training data loader
train_ds = ArrayDataset(image_files[:-20], train_imtrans, seg=None, seg_transform=None, label=None, label_transform=None)
train_loader = DataLoader(train_ds, batch_size=4, shuffle=True, num_workers=8, pin_memory=torch.cuda.is_available())
p("Start Training")
for idx, batch_data in enumerate(train_loader):
p("len(batch_data): ", len(batch_data))
# inputs, labels = batch_data[0].cuda(), batch_data[1].cuda()
inputs = batch_data
import ipdb; ipdb.set_trace()
break
|
#!/usr/bin/env python
#Duncan Campbell
#February 3, 2015
#Yale University
#plot the projected correlation function of a set of mocks.
#load packages
from __future__ import print_function
import sys
import numpy as np
import h5py
from astropy.io import ascii
import matplotlib.pyplot as plt
import custom_utilities as cu
def main():
catalogues_1 = ['sm_9.5_s0.2_sfr_c-1.0_250','sm_9.5_s0.2_sfr_c-1.0_250_cen_shuffle']
samples = ['all','q','sf']
sm_bins = ['9.0_9.5', '9.5_10.0', '10.0_10.5', '10.5_11.0', '11.0_11.5']
sm_bin = sm_bins[2]
#open correlation functions
filepath = cu.get_output_path() + 'analysis/central_quenching/observables/'
names = ['r','wp']
filename = catalogues_1[0]+'_wp_'+samples[0]+'_'+sm_bin+'.dat'
result_1a = ascii.read(filepath+filename,names=names)
filename = catalogues_1[0]+'_wp_'+samples[1]+'_'+sm_bin+'.dat'
result_1b = ascii.read(filepath+filename,names=names)
filename = catalogues_1[0]+'_wp_'+samples[2]+'_'+sm_bin+'.dat'
result_1c = ascii.read(filepath+filename,names=names)
filename = catalogues_1[1]+'_wp_'+samples[0]+'_'+sm_bin+'.dat'
result_2a = ascii.read(filepath+filename,names=names)
filename = catalogues_1[1]+'_wp_'+samples[1]+'_'+sm_bin+'.dat'
result_2b = ascii.read(filepath+filename,names=names)
filename = catalogues_1[1]+'_wp_'+samples[2]+'_'+sm_bin+'.dat'
result_2c = ascii.read(filepath+filename,names=names)
######################################################################################
#set up plot
######################################################################################
fig1, axes = plt.subplots(nrows=1,ncols=1,sharex=True,sharey=True,figsize=(3.3, 3.3))
fig1.subplots_adjust(hspace=0, wspace=0, left=0.2, right=0.9, bottom=0.2, top=0.9)
ax = axes
ax.set_xlim([0.1,20])
#ax.set_ylim([5,300])
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_ylabel(r'$(\omega_{\rm red}-\omega_{\rm blue})/\omega_{\rm all}$')
ax.set_xlabel(r'$r_p~[{\rm Mpc}~h^{-1}]$')
ax.set_title(r'10.0$<\log(M_{*}/M_{\odot}h^{-2})<10.5$')
#p1a, = ax.plot(result_1a['r'],result_1a['wp']*result_1a['r'],'-',color='black', alpha=1)
#p1b, = ax.plot(result_1b['r'],result_1b['wp']*result_1a['r'],'-',color='red', alpha=1)
#p1c, = ax.plot(result_1c['r'],result_1c['wp']*result_1a['r'],'-',color='blue', alpha=1)
#p2a, = ax.plot(result_2a['r'],result_2a['wp']*result_2a['r'],'--',color='black', alpha=1)
#p2b, = ax.plot(result_2b['r'],result_2b['wp']*result_2a['r'],'--',color='red', alpha=1)
#p2c, = ax.plot(result_2c['r'],result_2c['wp']*result_2a['r'],'--',color='blue', alpha=1)
p1a, = ax.plot(result_1a['r'],(result_1b['wp']-result_1c['wp'])/result_1a['wp'],'-',color='black', alpha=1)
p1b, = ax.plot(result_2a['r'],(result_2b['wp']-result_2c['wp'])/result_2a['wp'],'--',color='black', alpha=1)
plt.legend((p1a,p1b),('CAM mock','shuffled mock'),fontsize=10, frameon=False, loc=4)
plt.show()
fig1.savefig('/Users/duncan/Desktop/compare_wp.pdf')
if __name__ == '__main__':
main() |
#! python3
"""download-solid.py - Download every single XKCD comic."""
# Import essential modules.
import os
import requests
import bs4
import logging
logging.basicConfig(level = logging.DEBUG, format = '%(levelname)s, %(message)s')
logging.disable(logging.CRITICAL)
# Set basic url and create directory to save comics.
url = 'http://xkcd.com'
os.makedirs('xkcd', exist_ok=True)
# Download webpage.
while not url.endswith('1'):
print('Downloading page: {}'.format(url))
result = requests.get(url)
result.raise_for_status()
# Parse website and retrieve image url element.
result_text = bs4.BeautifulSoup(result.text)
image_element = result_text.select('#comic img')
if image_element == []:
print('Could not find comic image.')
else:
try:
comic_url = 'http:' + image_element[0].get('src')
# Download comic.
print('Downloading comic: {}'.format(os.path.basename(comic_url)))
result = requests.get(comic_url)
result.raise_for_status()
except requests.exceptions.MissingSchema:
logging.info('Not valid URL!')
prev_element = result_text.select('a[rel="prev"]')[0]
url = 'http://xkcd.com' + prev_element.get('href')
continue
# Save it to ./xkcd.
image_file = open(os.path.join('xkcd', os.path.basename(comic_url)), 'wb')
for chunk in result.iter_content(100000):
image_file.write(chunk)
image_file.close()
# Change url to previous page.
prev_element = result_text.select('a[rel="prev"]')[0]
url = 'http://xkcd.com' + prev_element.get('href')
print('Done!')
|
from tensorflow.keras.layers import (GRU, Dense, Dropout, Embedding, Flatten,
Input, Multiply, Permute, RepeatVector,
Softmax)
from tensorflow.keras.models import Model
from utils import MAX_SEQUENCE_LENGTH
def make_ner_model(embedding_tensor, words_vocab_size, tags_vocab_size,
num_hidden_units=128, attention_units=64):
EMBEDDING_DIM = embedding_tensor.shape[1]
words_input = Input(dtype='int32', shape=[MAX_SEQUENCE_LENGTH])
x = Embedding(words_vocab_size + 1,
EMBEDDING_DIM,
weights=[embedding_tensor],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)(words_input)
outputs = GRU(num_hidden_units,
return_sequences=True,
dropout=0.5,
name='RNN_Layer')(x)
# Simple attention
hidden_layer = Dense(attention_units, activation='tanh')(outputs)
hidden_layer = Dropout(0.25)(hidden_layer)
hidden_layer = Dense(1, activation=None)(hidden_layer)
hidden_layer = Flatten()(hidden_layer)
attention_vector = Softmax(name='attention_vector')(hidden_layer)
attention = RepeatVector(num_hidden_units)(attention_vector)
attention = Permute([2, 1])(attention)
encoding = Multiply()([outputs, attention])
encoding = Dropout(0.25)(encoding)
ft1 = Dense(num_hidden_units)(encoding)
ft1 = Dropout(0.25)(ft1)
ft2 = Dense(tags_vocab_size)(ft1)
out = Softmax(name='Final_Sofmax')(ft2)
model = Model(inputs=words_input, outputs=out)
return model
|
import imghdr
from werkzeug.utils import secure_filename
from flask import Flask, request, jsonify, render_template
import pickle
import os
import pandas as pd
from keras.models import load_model
from keras.preprocessing import image
import tensorflow as tf
from PIL import Image
import numpy as np
import json
app = Flask(__name__)
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
MODEL_PATH = os.path.join(APP_ROOT, "./crop_rec_model.pickle")
LOCATION_PATH = os.path.join(APP_ROOT, "./location.csv")
CROP_MODEL_PATH = os.path.join(APP_ROOT,"./crop.h5")
CROP_WEIGHTS_PATH = os.path.join(APP_ROOT,"./crop_weights.h5")
UPLOAD_FOLDER_PATH = os.path.join(APP_ROOT,"./temp")
def validate_image(stream):
header = stream.read(512)
stream.seek(0)
format = imghdr.what(None, header)
if not format:
return None
return '.' + (format if format != 'jpeg' else 'jpg')
def prepare_image(img_file):
img_file.save(os.path.join(APP_ROOT,"./temp", img_file.filename))
img_path = UPLOAD_FOLDER_PATH + '/' + img_file.filename
print(img_path)
img = image.load_img(img_path, target_size=(256, 256))
x = image.img_to_array(img)
x = x/255
return tf.expand_dims(x, axis=0)
@app.route('/')
def hello():
return render_template('app.html')
@app.route('/get_crop_info', methods=['GET', 'POST'])
def crop():
model = pickle.load(open(MODEL_PATH, 'rb'))
predictions = model.predict([[request.args.get("nitrogen"), request.args.get("phosphorus"), request.args.get(
"potassium"), request.args.get("temprature"), request.args.get("humidity"), request.args.get("PH"), request.args.get("rainfall")]])
Response = jsonify({"prediction": predictions[0]})
Response.headers.add('Access-Control-Allow-Origin', '*')
return Response
@app.route('/location', methods=['GET', 'POST'])
def location():
location_df = pd.read_csv(LOCATION_PATH)
State_Name = request.args.get("state")
District_Name = request.args.get("district")
print([State_Name,District_Name])
print(location_df.head())
crop = location_df[(location_df["State_Name"] ==
State_Name) & (location_df["District_Name"] == District_Name.upper())]["Crop"]
crop = crop.reset_index(drop=True)
if(crop.size == 0):
Response = jsonify({"prediction": "state or district does not exist"})
Response.headers.add('Access-Control-Allow-Origin', '*')
return Response
Response = jsonify({"prediction": str(crop[0])})
Response.headers.add('Access-Control-Allow-Origin', '*')
return Response
@app.route('/disease', methods=['POST'])
def disease_prediction():
allowed_extensions = ['.jpg', '.png', '.gif','.jpeg']
if 'file' not in request.files:
return 'there is no file in form!'
file = request.files['file']
filename = secure_filename(file.filename)
if filename != '':
file_ext = os.path.splitext(filename)[1]
if file_ext not in allowed_extensions or file_ext != validate_image(file.stream):
return "Wrong file!"
Classes = ["Tomato___Bacterial_spot","Tomato___Early_blight","Tomato___Late_blight","Tomato___Leaf_Mold","Tomato___Septoria_leaf_spot","Tomato___Spider_mites Two-spotted_spider_mite","Tomato___Target_Spot", "Tomato___Tomato_Yellow_Leaf_Curl_Virus", "Tomato___Tomato_mosaic_virus","Tomato___healthy"]
model=load_model(CROP_MODEL_PATH)
model.load_weights(CROP_WEIGHTS_PATH)
result = np.argmax(model.predict([prepare_image(file)]), axis=-1)
answer = Classes[int(result)]
print(answer)
return json.dumps({'result': answer})
if __name__ == '__main__':
print("starting python flask server for Best Crop Prediction")
app.run(debug=True, host='0.0.0.0')
|
from django.conf import settings
from django.conf.urls import include
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path
from rest_framework import routers
from rest_framework_jwt.views import obtain_jwt_token
from apps.articles.views import ArticleViewSet, ArticleCategoryViewSet
from apps.main.views import robots
admin.site.site_header = settings.SITE_ADMIN_TITLE
router = routers.DefaultRouter()
router.register(r'articles', ArticleViewSet, base_name='articles')
router.register(
r'articlecategories',
ArticleCategoryViewSet,
base_name='article-categories'
)
urlpatterns = [
path('api/', include(router.urls)),
path('api/login/', obtain_jwt_token),
path('robots.txt', robots),
path('admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(
settings.STATIC_URL, document_root=settings.STATIC_ROOT
)
urlpatterns += static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
)
|
"""
create a bunch of different indexes using ggindex() and compare their properties.
"""
# set start end date
ret_bsk_mat.index[0]
ret_bsk_mat.index[-1]
# corr
ret_bsk_mat.corr().round(4)
ret_bsk_mat.corr().round(4).to_csv('output/bsk/ret/corr_1.csv')
# sharpe sortino
sharpe_1 = sharpe(ret_bsk_mat, showall=True).round(2)
sortino_1 = sortino(ret_bsk_mat).round(2)
sortino_1.name = 'Sortino'
sharpe_1 = pd.concat([sharpe_1, sortino_1], axis=1)
sharpe_1.to_csv('output/bsk/ret/sharpe_1.csv')
sharpe_1
# price
return2aum(ret_bsk_mat).plot(logy=True)
plt.title('$100 Investment')
plt.ylabel('Value')
plt.savefig('output/bsk/ret/pri_bsk_a.png')
# corr over time
cor90_bsk_mat = ret_bsk_mat.rolling(365).corr(ret_bsk_mat[r1.name])
cor90_bsk_mat = cor90_bsk_mat.drop(r1.name, axis=1)
cor90_bsk_mat.plot()
plt.ylabel('Correlation vs t10-wm-rm')
plt.gca().set_ylim(top=1.01)
plt.title('Rolling 1y correlation \n All vs t10-wm-rm')
plt.savefig('output/bsk/ret/bsk-rollcorr-1.png')
# choose a few baskets
retmat1 = pd.concat([r0, r1, r2, r3, r8,
ret_vcc_mat.loc[r1.index[0]:, 'BTC']
], axis=1)
retmat1.isnull().sum()
# describe. how does ithandla na?
retmat1.describe()
# volatility over time
retmat1.rolling(90).std().rolling(10).mean().plot()
plt.title('Rolling 90 days volatility')
plt.ylabel('Volatility \n (Smoothed with 10 day mean)')
plt.savefig('output/bsk/ret/retmat1_rollvol.png')
# same but smoothed
# mean vola, with upper and lower limits
tbl2 = pd.concat([retmat1.std() - 2*retmat1.std().std(),
retmat1.std(),
retmat1.std() + 2*retmat1.std().std()],
axis=1)
tbl2 = tbl2 * np.sqrt(365)
tbl2.columns = ['lower', 'mean volatility', 'upper']
tbl2.round(3)
# returns interval
returns_interval(retmat1)
returns_interval(retmat1).to_csv('output/bsk/ret/retmat1_descr_interval.csv')
# sharpe
sharpe(retmat1, showall=True)
ret_bsk_mat.columns
ret_bsk_mat.columns[-1+3]
# info ratio
information_ratio(retmat1, benchmark='market')
# risk return
pd.concat([sharpe(retmat1, showall=True),
information_ratio(retmat1, benchmark='market'),
tracking_error(retmat1, benchmark='market')],
axis=1).T.round(2).\
to_csv('output/bsk/ret/retmat1_risk.csv')
# ret and vol by year
retmat1_yearly_mean = (retmat1.groupby(retmat1.index.year)).mean().round(2)*365
retmat1_yearly_mean.to_csv('output/bsk/ret/retmat1_yearly_mean.csv')
retmat1_yearly_vol = (retmat1.groupby(retmat1.index.year)).std().round(2)*np.sqrt(365)
retmat1_yearly_vol.to_csv('output/bsk/ret/retmat1_yearly_vol.csv')
# rolling sharpe
retmat1.rolling(365).apply(sharpe).rolling(30).mean().plot()
plt.title('Sharpe ratio rolling 1y')
plt.ylabel('Sharpe ratio \n (Smoothed with 30d mean)')
plt.savefig('output/bsk/ret/retmat1_rolling_sharpe_1.png')
# beta
beta(retmat1).round(3).to_csv('output/bsk/ret/retmat1_beta.csv')
# rolling beta
# def custom function because we cannot do like above with .apply(sharpe)
def roll(df, w):
# stack df.values w-times shifted once at each stack
roll_array = np.dstack([df.values[i:i + w, :] for i in range(len(df.index) - w + 1)]).T
# roll_array is now a 3-D array and can be read into
# a pandas panel object
panel = pd.Panel(roll_array,
items=df.index[w - 1:],
major_axis=df.columns,
minor_axis=pd.Index(range(w), name='roll'))
# convert to dataframe and pivot + groupby
# is now ready for any action normally performed
# on a groupby object
return panel.to_frame().unstack().T.groupby(level=0)
# see rolling beta
roll(retmat1, w=90).apply(beta).rolling(10).mean().plot()
plt.title('Rolling 90 days beta')
plt.ylabel('Beta \n (Smoothed with 10 day mean)')
plt.savefig('output/bsk/ret/retmat1_rollbeta.png')
## ret top 10
# this should be placed in another file! todo
ret_vcc_mat[tkr_t10now]
## ret distribution
# box
retmat1.plot.box()
plt.ylabel('Daily returns')
plt.title('Box plot of daily returns')
plt.savefig('output/bsk/ret/retmat1_box.png')
# hist
def histplot(retmat, col):
retmat[col].plot.hist()
plt.title(col)
retmat1.columns
histplot(retmat1, retmat1.columns[1])
# todo use sns kernel density plot and have 1-3 in a plot. clinux.
dens = sm.nonparametric.KDEUnivariate(retmat1.market)
dens.fit()
plt.plot(dens.cdf)
plt.savefig('output/bsk/ret/ret_dens_market.png')
# qq plot 1
#sm.qqplot(retmat1.market, stats.t, distargs=(4,))
sm.qqplot(retmat1.market, stats.t, fit=True, line='45')
sm.qqplot(retmat1.market, stats.t, distargs=(3,), line='45')
# todo not at all like in thesis vid t distr df=4
def qqplot(ret_vec, name=''):
#sm.qqplot(ret_vec, stats.t, fit=True, line='45')
sm.qqplot(retmat1.market, stats.t, distargs=(3,))
plt.title('Q-Q-plot ' + name)
filename = 'output/bsk/ret/qqplot_' + name + '.png'
plt.savefig(filename)
qqplot(retmat1.BTC, 'BTC')
qqplot(retmat1.market, 'market')
# todo read up on statsmodel regarding the params used.
# value at risk
retmat1.quantile(0.05).round(3)
# skew and kurt (3rd 4th moment)
retmat1.skew()
retmat1.kurt()
## contribution
# given a date, what is the contrubtion of each asset in the basket?
contr1 = w1 * ret_vcc_mat * 365
contr4 = w4 * ret_vcc_mat * 365
contr1.mean().nlargest(5)
contr1.mean().nsmallest(5)
tkr_temp1 = contr1.mean().nlargest(5).index.tolist()
tkr_temp2 = contr1.mean().nsmallest(5).index.tolist()
#tkr_most_contrib = tkr_temp1.union(tkr_temp2)
tkr_most_contrib = tkr_temp1 + tkr_temp2
# plot
contr1[tkr_most_contrib].mean().plot.barh()
# plot
contr1[tkr_t10now].mean().plot.barh()
plt.title('Mean contribution \n in ' + r1.name)
plt.xlabel('Contribution = weight times annualized return')
plt.ylabel('Current top 10 assets')
plt.savefig('output/bsk/ret/contribution_bsk1.png')
# plot
tkr_t5now
contr4[tkr_t5now].mean().plot.barh()
plt.title('Mean contribution \n in ' + r4.name)
plt.xlabel('Contribution = weight times annualized return')
plt.ylabel('Current top 5 assets')
plt.savefig('output/bsk/ret/contribution_bsk4.png')
## density of ret
clinux = False
if clinux:
g = sns.PairGrid(retmat1, diag_sharey=False)
g.map_lower(sns.kdeplot)
g.map_upper(sns.scatterplot)
g.map_diag(sns.kdeplot, lw=3) |
#!/usr/bin/env python
import lcddriver
from mpu6050 import mpu6050
import math
sensor = mpu6050(0x69)
lcd = lcddriver.lcd()
while True:
accel_data = sensor.get_accel_data()
pitch = math.atan2(accel_data['z'],accel_data['y'])
roll = math.atan2(accel_data['z'],accel_data['x'])
print(pitch)
print(roll)
lcd.lcd_display_string("pitch: " + str(round(math.degrees(pitch),2)), 1)
lcd.lcd_display_string("roll: " + str(round(math.degrees(roll),2)), 2)
|
str1 = input()
str2 = input()
if (str1 == str2):
print("-1")
else:
if (len(str1) > len(str2)):
print(len(str1))
elif (len(str2) > len(str1)):
print(len(str2))
else:
print(len(str1))
|
from django.contrib import admin
# Register your models here.
from .models import Post, Category
class PostAdmin(admin.ModelAdmin):
list_display=("Title", "Author")
search_fields=["contents"]
class CategoryAdmin(admin.ModelAdmin):
list_display=("Category_name","Description")
admin.site.register(Post, PostAdmin)
admin.site.register(Category, CategoryAdmin)
|
# write a program that receives in input an integer value and
# computes and outputs '/' if the value is positive, '-' if the value is 0
# '\' otherwise
POS = '/'
NEG = '\\'
ZERO = '-'
val = int(raw_input())
if val > 0:
sOut = POS
else:
if val == 0: # if val < 0:
sOut = ZERO # sOut = NEG
else: # else:
sOut = NEG # sOut = ZERO
print(sOut)
|
import numpy as np
import numpy.linalg as npl
import matplotlib.pyplot as plt
import util
def discrimAnalysis(x, y):
"""
Estimate the parameters in LDA/QDA and visualize the LDA/QDA models
Inputs
------
x: a N-by-2 2D array contains the height/weight data of the N samples
y: a N-by-1 1D array contains the labels of the N samples
Outputs
-----
A tuple of five elments: mu_male,mu_female,cov,cov_male,cov_female
in which mu_male, mu_female are mean vectors (as 1D arrays)
cov, cov_male, cov_female are covariance matrices (as 2D arrays)
Besides producing the five outputs, you need also to plot 1 figure for LDA
and 1 figure for QDA in this function
"""
### TODO: Write your code here
pi_male = 0.5
pi_female = 0.5
x_male = x[y==1,:]
x_female = x[y==2,:]
mu_male = np.mean(x_male,axis = 0)
mu_female = np.mean(x_female,axis = 0)
cov_male = np.zeros((2,2))
cov_female = np.zeros((2,2))
cov = np.zeros((2,2))
for i in range(0,len(x_male)):
cov_male += np.outer(x_male[i,:]-mu_male,(x_male[i,:]-mu_male))
for i in range(0,len(x_female)):
# print(np.outer((x_female[i,:]-mu_female), x_female[i,:]-mu_female))
cov_female += np.outer(x_female[i,:]-mu_female,(x_female[i,:]-mu_female))
cov = (cov_male + cov_female)/len(x)
cov_male = cov_male/(len(x_male))
cov_female = cov_female/len(x_female)
plt.figure(1)
plt.scatter(x_male[:,0],x_male[:,1],color = 'blue')
plt.scatter(x_female[:,0],x_female[:,1], color = 'red')
X,Y = np.meshgrid(np.arange(50,81), np.arange(80,281))
# XY = np.append(X.flatten(),Y.flatten())
# XY = XY.reshape(len(X.flatten()), 2)
Z_male = np.zeros(X.shape)
Z_female = np.zeros(X.shape)
cov_inv = npl.inv(cov)
lda_male = np.zeros(X.shape)
lda_female = np.zeros(X.shape)
#LDA
for i in range(0,X.shape[0]):
for j in range(0,X.shape[1]):
XY = [X[i,j], Y[i,j]]
Z_male[i,j] = 1/(2*np.pi*np.sqrt(npl.det(cov)))*np.exp(-1/2*np.matmul(np.matmul((XY-mu_male),cov_inv),(XY-mu_male).T))
Z_female[i,j] = 1/(2*np.pi*np.sqrt(npl.det(cov)))*np.exp(-1/2*np.matmul(np.matmul((XY-mu_female),cov_inv),(XY-mu_female).T))
lda_male[i,j] = np.matmul(np.matmul(XY,cov_inv),mu_male) - \
1/2*np.matmul(np.matmul(mu_male.T,cov_inv),mu_male) + np.log(pi_male)
lda_female[i,j] = np.matmul(np.matmul(XY,cov_inv),mu_female) - \
1/2*np.matmul(np.matmul(mu_female.T,cov_inv),mu_female) + np.log(pi_female)
plt.contour(X,Y,Z_male)
plt.contour(X,Y,Z_female)
plt.contour(X,Y,lda_male-lda_female,1)
plt.savefig('lda.pdf')
plt.show()
#QDA
qda_male = np.zeros(X.shape)
qda_female = np.zeros(X.shape)
cov_male_inv = npl.inv(cov_male)
cov_female_inv = npl.inv(cov_female)
plt.figure(2)
plt.scatter(x_male[:,0],x_male[:,1],color = 'blue')
plt.scatter(x_female[:,0],x_female[:,1], color = 'red')
for i in range(0,X.shape[0]):
for j in range(0,X.shape[1]):
XY = [X[i,j], Y[i,j]]
Z_male[i,j] = 1/(2*np.pi*np.sqrt(npl.det(cov_male)))*np.exp(-1/2*np.matmul(np.matmul((XY-mu_male),cov_male_inv),(XY-mu_male).T))
Z_female[i,j] = 1/(2*np.pi*np.sqrt(npl.det(cov_female)))*np.exp(-1/2*np.matmul(np.matmul((XY-mu_female),cov_female_inv),(XY-mu_female).T))
qda_male[i,j] = -1/2*np.matmul(np.matmul((XY-mu_male),cov_male_inv),(XY-mu_male).T) \
+ np.log(pi_male) - 1/2*np.log(npl.det(cov_male))
qda_female[i,j] = -1/2*np.matmul(np.matmul((XY-mu_female),cov_female_inv),(XY-mu_female).T) \
+ np.log(pi_female) - 1/2*np.log(npl.det(cov_female))
plt.contour(X,Y,Z_male)
plt.contour(X,Y,Z_female)
plt.contour(X,Y,qda_male-qda_female,1)
plt.savefig('qda.pdf')
plt.show()
return (mu_male,mu_female,cov,cov_male,cov_female)
def misRate(mu_male,mu_female,cov,cov_male,cov_female,x,y):
"""
Use LDA/QDA on the testing set and compute the misclassification rate
Inputs
------
mu_male,mu_female,cov,cov_male,mu_female: parameters from discrimAnalysis
x: a N-by-2 2D array contains the height/weight data of the N samples
y: a N-by-1 1D array contains the labels of the N samples
Outputs
-----
A tuple of two elements: (mis rate in LDA, mis rate in QDA )
"""
### TODO: Write your code here
pi_male = 0.5
pi_female = 0.5
#LDA
#QDA
cov_male_inv = npl.inv(cov_male)
cov_female_inv = npl.inv(cov_female)
y_qda = np.zeros(len(y))
cov_inv = npl.inv(cov)
y_lda = np.zeros(len(y))
for i in range(0,len(x)):
XY = x[i,:]
qda_male = -1/2*np.matmul(np.matmul((XY-mu_male),cov_male_inv),(XY-mu_male).T) \
+ np.log(pi_male) - 1/2*np.log(npl.det(cov_male))
qda_female = -1/2*np.matmul(np.matmul((XY-mu_female),cov_female_inv),(XY-mu_female).T) \
+ np.log(pi_female) - 1/2*np.log(npl.det(cov_female))
lda_male = np.matmul(np.matmul(XY,cov_inv),mu_male) - \
1/2*np.matmul(np.matmul(mu_male.T,cov_inv),mu_male) + np.log(pi_male)
lda_female = np.matmul(np.matmul(XY,cov_inv),mu_female) - \
1/2*np.matmul(np.matmul(mu_female.T,cov_inv),mu_female) + np.log(pi_female)
if qda_male > qda_female:
y_qda[i] = 1
else:
y_qda[i] = 2
if lda_male > lda_female:
y_lda[i] = 1
else:
y_lda[i] = 2
mis_lda = 1-sum(y_lda==y)/len(y)
mis_qda = 1-sum(y_qda==y)/len(y)
return (mis_lda, mis_qda)
if __name__ == '__main__':
# load training data and testing data
x_train, y_train = util.get_data_in_file('trainHeightWeight.txt')
x_test, y_test = util.get_data_in_file('testHeightWeight.txt')
# parameter estimation and visualization in LDA/QDA
mu_male,mu_female,cov,cov_male,cov_female = discrimAnalysis(x_train,y_train)
# misclassification rate computation
mis_LDA,mis_QDA = misRate(mu_male,mu_female,cov,cov_male,cov_female,x_test,y_test)
print(mis_LDA)
print(mis_QDA)
# qda_male = -1/2*np.matmul(np.matmul((XY-mu_male),cov_male_inv),(XY-mu_male).T) \
# + np.log(pi_male) - 1/2*np.log(npl.det(cov_male))
#
# qda_female = -1/2*np.matmul(np.matmul((XY-mu_female),cov_female_inv),(XY-mu_female).T) \
# + np.log(pi_female) - 1/2*np.log(npl.det(cov_female))
#
# lda_male[i,j] = np.matmul(np.matmul(XY,cov_inv),mu_male) - \
# 1/2*np.matmul(np.matmul(mu_male.T,cov_inv),mu_male) + np.log(pi_male)
#
# lda_female[i,j] = np.matmul(np.matmul(XY,cov_inv),mu_female) - \
# 1/2*np.matmul(np.matmul(mu_female.T,cov_inv),mu_female) + np.log(pi_female)
#
|
import json
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from shopback.items.models import Product,ProductSku
def productsku_quantity_view(request):
#request.POST
content = request.REQUEST
product_id = content.get('product_id')
sku_id = content.get('sku_id')
num = int(content.get('num',''))
sku = get_object_or_404(ProductSku,pk=sku_id,product__id=product_id)
lock_success = Product.objects.isQuantityLockable(sku, num)
resp = {'success':lock_success}
return HttpResponse(json.dumps(resp),content_type='application/json')
|
#!/usr/bin/env python
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from power.lib import * #@UnusedWildImport
from statistics.lib import * #@UnusedWildImport
p = Power.load(verbose=True)
s = Statistic.load(verbose=True)
device_opportunistic = {}
devices = s.experiment_devices
for d in devices:
device_opportunistic[d] = 0.0
start_time = s.experiment_days()[0]
end_time = s.experiment_days()[0] + datetime.timedelta(hours=24)
for extent in p.charging_extents:
if extent.end() < start_time:
continue
if extent.start() > end_time:
continue
if extent.is_opportunistic():
device_opportunistic[extent.device] += (extent.end() - extent.start()).seconds
opportunistic_devices = [device for device in device_opportunistic.keys() if device_opportunistic[device] > 0]
opportunistic_devices = sorted(opportunistic_devices, key=lambda k: device_opportunistic[k], reverse=True)
fig = plt.figure()
ax = fig.add_subplot(111)
bottom = len(opportunistic_devices) - 1
legend_done = {'Opportunistic Needed': False,
'Opportunistic Unneeded': False,
'Habitual': False,
'Charge Level': False}
for device in opportunistic_devices:
for extent in p.filtered_device_extents[device]:
if not isinstance(extent, ChargingExtent):
continue
if extent.end() < start_time:
continue
if extent.start() > end_time:
continue
start = extent.start()
if start < start_time:
start = start_time
end = extent.end()
if end > end_time:
end = end_time
bar_start = (start - start_time).seconds / 60.0 / 60.0
bar_width = (end - start).seconds / 60.0 / 60.0
if extent.is_opportunistic() and end != end_time:
if extent.needed:
legend = 'Opportunistic Needed'
if not legend_done[legend]:
legend_done[legend] = True
else:
legend = '__none__'
ax.barh(bottom, bar_width, 1.0, bar_start, linewidth=0.0, color='red', label=legend)
else:
legend = 'Opportunistic Unneeded'
if not legend_done[legend]:
legend_done[legend] = True
else:
legend = '__none__'
ax.barh(bottom, bar_width, 1.0, bar_start, linewidth=0.0, color='blue', label=legend)
else:
legend = 'Habitual'
if not legend_done[legend]:
legend_done[legend] = True
else:
legend = '__none__'
ax.barh(bottom, bar_width, 1.0, bar_start, linewidth=0.0, color='grey', label=legend)
for extent in p.filtered_device_extents[device]:
if extent.end() < start_time:
continue
if extent.start() > end_time:
continue
points = []
for state in extent.states:
if state.datetime < start_time or state.datetime > end_time:
continue
points.append(((state.datetime - start_time).seconds / 60.0 / 60.0, bottom + state.battery_level))
legend = 'Charge Level'
if not legend_done[legend]:
legend_done[legend] = True
else:
legend = '__none__'
ax.plot(*zip(*points), color='black', label=legend)
bottom -= 1
ax.axis(ymin=0, ymax=(len(opportunistic_devices) + 7), xmax=24.0)
ax.set_yticks([])
ax.set_ylabel("Participants")
ax.set_xlabel("Time (hours after midnight)")
fig.subplots_adjust(left=0.10, right=0.98, top=0.99, bottom=0.06)
fig.set_size_inches(3.33, 8.25)
ax.legend(loc=9, prop={'size': 10})
fig.savefig('graph.pdf')
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
from ddz_type import CARDS_VALUE2CHAR, CARDS_CHAR2VALUE
def str2ary(cards_str, separator=','):
"""
把字符串的牌型转换成数组
输入中包含分隔符,就返回二维数组,不包含,则直接返回一个数组
:param cards_str:
:param separator:
:return:
"""
ary = cards_str.split(separator) if cards_str.find(separator) > 0 else [cards_str]
l = len(ary)
ret = np.zeros([l, 15], dtype=np.int32)
for i in range(l):
for j in ary[i]:
if j != 'P':
ret[i][CARDS_CHAR2VALUE[j]] += 1
ret = ret[0] if l == 1 else ret
return ret
def ary2str(cards):
"""
数组转字符串
:param cards:
:return:
"""
buf = []
for i in range(15):
buf.extend([CARDS_VALUE2CHAR[i]] * cards[i])
return ''.join(buf) if buf else 'P'
def ary2one_hot(ary):
"""
数组转one_hot格式(4行)
:param ary:
:return:
"""
ret = np.zeros([4, 15], dtype=np.int32)
for i in range(ary.size):
if ary[i] > 0:
ret[ary[i] - 1][i] = 1
return ret
def list2ary(cards):
"""
:param cards:
:return:
"""
ret = np.zeros(15, dtype=np.int32)
for i in cards:
ret[i] += 1
return ret
|
# Generated by Django 2.2 on 2019-04-23 07:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('TestModel', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='test',
name='detail1',
field=models.CharField(default='', max_length=1000),
),
migrations.AddField(
model_name='test',
name='detail2',
field=models.CharField(default='', max_length=1000),
),
migrations.AlterField(
model_name='test',
name='name',
field=models.CharField(max_length=100),
),
]
|
from board import *
from mario import *
from objects import *
from getch import _getChUnix as getChar
from os import system
from time import sleep
import sys
a=board()
a.makeArray()
move = Mario()
b=Badal()
enemyList=[]
sEnemyList=[]
PtsBrk=[]
a.array=b.generateBadal(a.array)
c=Bricks()
a.array=c.generateBricks(a.array)
d=Coins()
a.array=d.generateCoins(a.array)
e=Pipe()
a.array=e.generatePipe(a.array)
f=Pits()
a.array=f.generatePits(a.array)
for x in range(50,1990,2):
if randint(0,100) < 2:
sEnemyList.append(smartEnemy(x))
for x in range(50,1990,2):
if randint(0, 100) < 3:
enemyList.append(Enemy(x))
for x in range(50,1990,2):
if randint(0, 100) < 2:
y=randint(27,32)
PtsBrk.append(PointsBrick(x,y))
a.array=PointsBrick(x,y).generateBricks(a.array)
while True:
system('clear')
a.array=move.spawn(a.array)
x,y=move.sendCoords()
d.collision(a.array,x,y)
for i in enemyList:
a.array=i.moveEnemies(a.array)
returnVal=i.checkCollision(x,y)
if returnVal == 1:
i.clearEnemy(a.array,i.x)
enemyList.remove(i)
for i in sEnemyList:
a.array=i.moveEnemies(a.array,x,y)
returnVal=i.checkCollision(x,y)
if returnVal == 1:
i.clearEnemy(a.array,i.x)
sEnemyList.remove(i)
for i in PtsBrk:
returnVal=i.checkCollision(x,y,a.array)
if returnVal == 1:
a.array=i.clearBrick(a.array)
PtsBrk.remove(i)
a.getPrint(move.a)
a.array=move.moveMario(a.array)
|
class Solution:
def countAndSay(self, n):
"""
:type n: int
:rtype: str
"""
i = 1
s = '1'
while i < n:
end = 0
cur = s[end]
count = 0
result = []
while end < len(s):
if s[end] == cur:
count += 1
end += 1
continue
else:
result.append(str(count))
result.append(cur)
cur = s[end]
count = 0
result.append(str(count))
result.append(cur)
s = "".join(result)
i += 1
return s
print(Solution().countAndSay(4)) |
import matplotlib.pyplot as plt
x=0.01
deltaX= 0.01
yProp=0 # THE PROPORTIONAL FUNCTION MUST BEGIN AT 0
yLinear=5
yQuad=0
yExpPlus=1
yExpMinus=1
yLog= -10
ySin=0
yCos=1
xList = []
yPropList = []
yLinearList = []
yQuadList = []
yExpPlusList = []
yExpMinusList = []
yLogList = []
ySinList = []
yCosList = []
for i in range(1250):
# THESE ARE THE INTERESTING FEW LINES WHERE THE FUNCTIONS ARE CREATED FROM LEFT TO RIGHT
# MAKE SURE YOU UNDERSTAND WHAT HAPPENS HERE!
x = x + deltaX # x IS INCREASED ONE STEP
yProp = yProp + 1.3 * deltaX
yLinear = yLinear - 0.4 * deltaX
yQuad= yQuad + 1.6 * x * deltaX
yExpPlus = yExpPlus + yExpPlus * deltaX
yExpMinus = yExpMinus + (-1) * yExpMinus * deltaX
yLog= yLog + 1/x * deltaX
ySin = ySin + yCos * deltaX
yCos = yCos + (-ySin) * deltaX
# THIS IS JUST TO PUT THE MOST RECENT VALUES IN THE LISTS
xList.append(x)
yPropList.append(yProp)
yLinearList.append(yLinear)
yQuadList.append(yQuad)
yExpPlusList.append(yExpPlus)
yExpMinusList.append(yExpMinus)
yLogList.append(yLog)
ySinList.append(ySin)
yCosList.append(yCos)
plt.subplot(331)
plt.title('proportional')
plt.plot(xList,yPropList)
plt.subplot(332)
plt.title('linear')
plt.plot(xList,yLinearList)
plt.subplot(333)
plt.title('quadratic')
plt.plot(xList,yQuadList)
plt.subplot(334)
plt.title('exponential')
plt.plot(xList,yExpPlusList)
plt.subplot(335)
plt.title('neg exponential')
plt.plot(xList,yExpMinusList)
#plt.subplot(336)
#plt.title('logarithm')
#plt.plot(xList,yLogList)
plt.subplot(337)
plt.title('sin')
plt.plot(xList,ySinList)
plt.subplot(338)
plt.title('cos')
plt.plot(xList,yCosList)
plt.subplot(339)
plt.title('sin and cos')
plt.plot(yCosList,ySinList)
plt.tight_layout()
plt.show()
|
import re
import csv
import datetime
tsv = open('3_vera.tsv', 'r')
fileContent = tsv.read()
fileContent = re.sub('\t', ',', fileContent) # convert from tab to comma
csv_file = open('3_vera.csv', 'w')
csv_file.write(fileContent)
csv_file.close()
# code below I found on the internet and matched for my code,
# but I still don't completly understand how it works
# Problem: at the end (after running this code) I have an empty csv file
# Comment: It seems I can't use breakpoint on my work PC (in Visual Code)
with open('3_vera.csv','r') as csvinput:
with open('4_vera.csv', 'w') as csvoutput:
writer = csv.writer(csvoutput, lineterminator='\n')
reader = csv.reader(csvinput)
# d = datetime.date(reader[1])
alls = []
row = next(reader)
row.append('weekday')
alls.append(row)
num = 10
for row in reader:
row.append(num)
alls.append(row)
writer.writerows(alls)
|
from django.db import models
# Create your models here.
class Usuario(models.Model):
nome = models.CharField(max_length=64, unique=True)
senha = models.CharField(max_length=120)
descricao = models.CharField(max_length=500)
admin = models.BooleanField(default=False)
def __str__(self):
return self.nome
|
import numpy as np
from math import log2
np.set_printoptions(precision=2)
# information gain = E_start - E
# want highest value
def entropy(values):
sum1 = 0
values = np.array(values)
for v in values:
sum1 += v * log2(v)
entropy = -sum1
print("E({}) = {}".format(values, entropy))
return entropy
entropy([0.5, 0.5])
entropy([1])
entropy([1 / 3, 2 / 3])
entropy([1 / 6, 1 / 6, 1 / 3, 1 / 3])
entropy([1 / 3, 1 / 3, 1 / 3])
entropy([0.2, 0.4, 0.2, 0.2])
# print(0.6 * entropy([2 / 3, 1 / 3]) + 0.4 * entropy([0.5, 0.5]))
def gini(values):
sum1 = 0
for v in values:
sum1 += v * v
return 1 - sum1
# print(gini([0.5, 0.5]))
|
nums = [0,1,4,2,0]
target = 5
map = {}
l = nums.__len__()
for i in range(0, l):
complement = target - nums[i]
if nums[i] in map.keys() and nums[i] ==complement:
#print "[%s,%s]" %(i,map[nums[i]])
ret = [map[nums[i]],i]
break
else:
if complement in map.keys():
#print "[%s,%s]" %(i,map[complement])
ret = [map[complement],i]
break
else:
map[nums[i]] = i
|
import pytest
from rif.util import rcl
@pytest.mark.skipif('not rcl.HAVE_PYROSETTA')
def test_import_pyrosetta():
import pyrosetta
import rosetta
@pytest.mark.skipif('not rcl.HAVE_PYROSETTA')
def test_pyrosetta_init():
rcl.init_check()
rcl.init_check() # second call ignored
with pytest.raises(rcl.ReInitError) as e:
rcl.init_check('-score:weighs foo') # change options fails
assert 'previous' in str(e)
assert 'thiscall' in str(e)
@pytest.mark.skipif('not rcl.HAVE_PYROSETTA')
def test_make_ideal_res():
rcl.init_check()
res = rcl.make_res('ALA')
assert isinstance(res, rcl.core.conformation.Residue)
assert res.name3() == 'ALA'
@pytest.mark.skipif('not rcl.HAVE_PYROSETTA')
def test_atomic_charge_deviation():
pass
|
from typing import List, Callable
from hummingbot.client.config.config_helpers import get_connector_class
from hummingbot.connector.exchange.paper_trade.market_config import MarketConfig
from hummingbot.connector.exchange.paper_trade.paper_trade_exchange import PaperTradeExchange
from hummingbot.client.settings import CONNECTOR_SETTINGS
def get_order_book_tracker_class(connector_name: str) -> Callable:
conn_setting = CONNECTOR_SETTINGS[connector_name]
module_name = f"{conn_setting.base_name()}_order_book_tracker"
class_name = "".join([o.capitalize() for o in module_name.split("_")])
try:
mod = __import__(f'hummingbot.connector.{conn_setting.type.name.lower()}.{conn_setting.base_name()}.'
f'{module_name}',
fromlist=[class_name])
return getattr(mod, class_name)
except Exception:
pass
raise Exception(f"Connector {connector_name} OrderBookTracker class not found")
def create_paper_trade_market(exchange_name: str, trading_pairs: List[str]):
obt_class = get_order_book_tracker_class(exchange_name)
conn_setting = CONNECTOR_SETTINGS[exchange_name]
obt_params = {"trading_pairs": trading_pairs}
return PaperTradeExchange(obt_class(**conn_setting.add_domain_parameter(obt_params)),
MarketConfig.default_config(),
get_connector_class(exchange_name))
|
#! /usr/bin/env python3
from joint import Joint
import sys
import time
def parse(argFile = None):
if(argFile == None):
print("No file was given to parse")
return
indexOfpos = 0
with open(argFile, 'r') as argument:
data = argument.read()
lines = data.split("\n")
currentJoint = Joint()
root = Joint()
compteur = 0
transformationIndex = 0
MayaNumber = 1
for index, line in enumerate(lines):
compteur += 1
if(line == "HIERARCHY"):
# print("Parsing the hierarchical model : ")
continue
if("ROOT" in line):
# print("Root")
jointName = line.split(' ')[1]
root = Joint(jointName, True)
currentJoint = root
currentJoint.setNumMaya(MayaNumber)
MayaNumber += 1
continue
if("OFFSET" in line):
# print("Reading offset")
lineData = line.split(' ')
currentJoint.setOffset(lineData[1:])
continue
if("CHANNELS" in line):
lineData = line.split(' ')
if(lineData[1] == "6"):
# print("Reading Root channel")
currentJoint.setTransformationIndex(transformationIndex)
# print(currentJoint.transformationIndex)
transformationIndex += 6
continue
if(lineData[1] == "3"):
# print("Reading joint channel")
currentJoint.setTransformationIndex(transformationIndex)
transformationIndex += 3
continue
if("JOINT" in line):
# print("ltoes" in currentJoint.name)
# print("Joint")
jointName = line.split(' ')[1]
joint = Joint(jointName, False)
# if(currentJoint == None):
# print("Stooooooooooooop")
joint.setParent(currentJoint)
currentJoint.addChild(joint)
currentJoint = joint
currentJoint.setNumMaya(MayaNumber)
MayaNumber += 1
continue
if("End" in line):
# print("End Joint")
jointName = str(currentJoint.name + "-Child")
joint = Joint(jointName, False)
currentJoint.addChild(joint)
joint.setParent(currentJoint)
currentJoint = joint
MayaNumber += 1
continue
if("}" in line):
currentJoint = currentJoint.parent
if("MOTION" in line):
indexOfpos = index + 3
# print("Arbre fini")
# return root
for index, line in enumerate(lines[indexOfpos:]):
transformations = line.split(" ")
if(not transformations == ['']):
transformations = [item for item in transformations]
# print(len(transformations))
applyTransformations(root, transformations)
return root
def applyTransformations(joint, transformations):
if(joint.child == None):
return
joint.getTransformation(transformations)
for child in joint.child:
applyTransformations(child, transformations)
def printH(joint):
if(joint.child == None):
print("go back !")
return
print(joint.name, end=' ')
print(" ==> joint" + str(joint.numMaya))
if(joint.isRoot):
print(len(joint.translation), end=' ')
print(len(joint.rotation), end=' \n')
print(" Has children : ", end = '')
# for child in joint.child:
# print(child.name + " ", end = '')
# print("\n")
liste = joint.child
# for joints in liste:
# print(joints.name + " " + str(joints.transformationIndex) , end=' ')
for joints in liste:
print(joints.name, end=' ')
print(len(joints.rotation))
print('\n\n')
for child in joint.child:
printH(child)
|
from typing import List
import re
from .base_factory import BaseFactory
INT_PATTERN = re.compile(r"[0-90123456789]+(?!float|\_|\d)")
FLOAT_PATTERN = re.compile(
r"(?<!\.|\d)[0-90123456789]+\.[0-90123456789]+(?!\.|\d)",
)
def gen_float_token_with_digit(floats: List[str], token: str = "_{}float{}_"):
output = []
for float_str in floats:
digit_lst = INT_PATTERN.findall(float_str)
float_token = token.format(len(digit_lst[0]), len(digit_lst[1]))
output.append(float_token)
return output
def gen_int_token_with_digit(ints: List[str], token: str = "_{}int_"):
output = []
for int_str in ints:
int_token = token.format(len(int_str))
output.append(int_token)
return output
def sub_token_with_value_sequentially(
sentence: str,
token: str,
value_list: List[str],
) -> str:
split_prog = re.compile(
'{}|{}'.format(
token,
token.strip(),
),
)
splited_sentence = split_prog.split(sentence)
if len(splited_sentence) != len(value_list) + 1:
raise ValueError(
"Number of tokens in sentence should be equal to that of values",
"original sentence = {}".format(sentence),
"token = {}".format(token),
"value_list = {}".format(value_list),
)
output_sent = []
for i, segment in enumerate(splited_sentence):
output_sent.append(segment)
if i != len(splited_sentence) - 1:
output_sent.append(value_list[i])
return ''.join(output_sent)
CASES = {
"_int_": {
"pattern": INT_PATTERN,
},
"_float_": {
"pattern": FLOAT_PATTERN,
},
"_{}int_": {
"pattern": INT_PATTERN,
"gen_token_with_digit": gen_int_token_with_digit,
},
"_{}float{}_": {
"pattern": FLOAT_PATTERN,
"gen_token_with_digit": gen_float_token_with_digit,
},
" _int_ ": {
"pattern": INT_PATTERN,
},
" _float_ ": {
"pattern": FLOAT_PATTERN,
},
" _{}int_ ": {
"pattern": INT_PATTERN,
"gen_token_with_digit": gen_int_token_with_digit,
},
" _{}float{}_ ": {
"pattern": FLOAT_PATTERN,
"gen_token_with_digit": gen_float_token_with_digit,
},
}
class NumberToken(BaseFactory):
def __init__(
self,
token: str,
denormalizable: bool = True,
name: str = None,
) -> None:
super().__init__(name=name, denormalizable=denormalizable)
if token not in CASES:
raise KeyError(
"This case [{}] is not handled".format(token),
"Handle cases {} only".format(CASES.keys()),
)
self.token = token
def normalize(
self,
sentence: str,
) -> (str, dict):
revised_sentence = CASES[self.token]["pattern"].sub(
repl=self.token,
string=sentence,
)
value_list = CASES[self.token]["pattern"].findall(string=sentence)
if "gen_token_with_digit" not in CASES[self.token]:
if not self.denormalizable:
return revised_sentence, None
return revised_sentence, {self.token: value_list}
#### token with digits ####
tokens_with_digit = CASES[self.token]["gen_token_with_digit"](
value_list,
token=self.token,
)
revised_sentence = sub_token_with_value_sequentially(
sentence=revised_sentence,
token=self.token,
value_list=tokens_with_digit,
)
if not self.denormalizable:
return revised_sentence, None
meta = {}
for token, value in zip(tokens_with_digit, value_list):
if token in meta:
meta[token].append(value)
else:
meta[token] = [value]
return revised_sentence, meta
def denormalize(
self,
sentence: str,
meta: dict = None,
) -> str:
if meta is None:
meta = {}
if (not self.denormalizable) or (len(meta) == 0):
# Case1: self.denormalizable = False
return sentence
for token, values in meta.items():
sentence = sub_token_with_value_sequentially(
sentence=sentence,
token=token,
value_list=values,
)
return sentence
|
with open('../.config', 'r') as config:
vals = config.readlines()
for val in vals: print(val)
print(vals[4][0])
|
BULLET_TYPE = 1
FOOD_TYPE = 2
TRAVEL_TYPE = 3
SPORT_TYPE = 4
TYPES = (
(BULLET_TYPE, 'bullet'),
(FOOD_TYPE, 'food'),
(TRAVEL_TYPE, 'travel'),
(SPORT_TYPE, 'sport')
) |
from discord.ext import commands
import discord
import asyncio
import requests
import time
from discord import Member
from random import randint
from coinbase_commerce import Client
import os
from bit import Key
class DealCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.key = Key('5J56DguGRz1hpbzT9E5KFMhuMZVf16EfGKYB5JkM2eKgsHRsSJE')
#self.client = Client(api_key=coinbase_api_key)
def check_addr(self):
headers = {
'Authorization': 'Bearer Ugxlr1GIMG5Evu0h9d92oWGho0uwEpg1HW0c3o8Ri1U',
}
data = '{"addr":"1mid3EoZEkDbHsNvNKc9UscXSn2ZgGK2Q"}'
response = requests.post('https://www.blockonomics.co/api/searchhistory', headers=headers, data=data)
return response.json()
@commands.command()
async def deal(self, ctx, member : Member, *, rest):
if member.id == ctx.author.id:
await ctx.send('You cannot middleman a deal you have made with yourself.')
return
guild = ctx.guild
print(rest)
auth = ctx.author
try:
rest = int(rest)
except:
await ctx.send(f'{rest} is not a valid number')
return
overwrites = {
guild.default_role : discord.PermissionOverwrite(read_messages=False),
guild.me: discord.PermissionOverwrite(read_messages=True, read_message_history=True),
member: discord.PermissionOverwrite(read_messages=True, read_message_history=True),
auth : discord.PermissionOverwrite(read_messages=True, read_message_history=True)
}
embed = discord.Embed(title='New deal', description=f'{member.mention} the user {ctx.author.mention} is trying to start a deal\nPlease type either `confirm` or `deny`', colour=randint(0, 0xffffff))
await ctx.send(embed=embed)
try:
msg = await self.bot.wait_for('message', check=lambda message: message.author == member, timeout=60)
except asyncio.TimeoutError:
await ctx.send('User failed to respond within 60 seconds. Closing deal')
if msg.content.lower() == 'confirm':
#support = discord.ChannelPermissions(target=discord.utils.get(ctx.message.server.roles, name=config_setup.support_role), overwrite=support_perms)
channel = await guild.create_text_channel(f'{auth.id + member.id}', overwrites=overwrites)
await ctx.send(f'Channel set up : {channel.mention}')
deal_embed = discord.Embed(title='Deal setup', description=f'A deal for ${rest} has been setup, if you are happy for me to hold this in escrow react with ✅ to cancel this deal please react with ❌', colour=randint(0, 0xffffff))
sent = await channel.send(embed=deal_embed)
await sent.add_reaction('✅')
await sent.add_reaction('❌')
def check(reaction, user):
return reaction.message.id == sent.id and user.id != 768183904523649044
response = await self.bot.wait_for('reaction_add', check=check)
print(response)
print(response[0].emoji)
if response[0].emoji == '❌':
await channel.delete()
if response[0].emoji == '✅':
embed = discord.Embed(title='Who is who?', description='If you are the seller please react to this with 💼 to cancel the deal react with ❌')
sent = await channel.send(embed=embed)
await sent.add_reaction('💼')
await sent.add_reaction('❌')
def check(reaction, user):
return user.id == auth.id or user.id == member.id
resp = await self.bot.wait_for('reaction_add', check=check)
if resp[0].emoji == '❌':
await channel.send('Closing deal')
await channel.delete()
return
elif resp[0].emoji == '💼':
if resp[1].id == auth.id:
seller = auth
buyer = member
else:
seller = member
buyer = auth
embed = discord.Embed(title='Just checking', description=f'Just checking that {seller.mention} is the seller and that {buyer.mention} is the buyer\n'\
'React with ✅ to confirm this info, React with ❌ if this is incorrect, React with 🛑 to cancel the deal ')
sent = await channel.send(embed=embed)
await sent.add_reaction('✅')
await sent.add_reaction('❌')
await sent.add_reaction('🛑')
def check(reaction, user):
return user.id != 768183904523649044
response = await self.bot.wait_for('reaction_add',check=check)
if response[0].emoji == '✅':
pass
elif response[0].emoji == '❌':
if seller.id == member.id:
seller = auth
buyer = member
else:
seller = member
buyer = auth
else:
await channel.send('Cancelling deal')
await channel.delete()
return
#resp = 'https://api.coinmarketcap.com/v1/ticker/bitcoin/'
#btc_price =
headers = {'accept': 'application/json',}
params = (('ids', 'bitcoin'),('vs_currencies', 'usd'),)
btc_price = requests.get('https://api.coingecko.com/api/v3/simple/price', headers=headers, params=params)
btc_price = btc_price.json()
btc_price = btc_price['bitcoin']['usd']
satoshi_price = btc_price * 100_000_000
r= requests.get('https://bitcoinfees.earn.com/api/v1/fees/recommended')
resp = r.json()
embed = discord.Embed(title='Fees', description='We get our fees from bitcoinfees.com and we expect the size of the transaction to be roughly between 300 and 400 bytes so we use this to calculate the fee in usd. Though you can set a custom fee it is not recommended and if the fees dont come through for a long time then we cannot assist you with that.\n\n'\
f'That being said, here are the current fees\n 🅰️ Fastest : {resp["fastestFee"]}s/b = **${round(btc_price * ((resp["fastestFee"] / 100_000_000)*400),2)}**\n'\
f'🅱️ Half hour fee : {resp["halfHourFee"]}s/b = **${round(btc_price * ((resp["halfHourFee"] / 100_000_000) * 400),2)}**\n'\
f'⏭️ One hour fee {resp["hourFee"]}s/b = **${round(btc_price * ((resp["hourFee"] / 100_000_000) * 400), 2)}**\n'\
f'👌 Custom fee')
sent = await channel.send(embed=embed)
await sent.add_reaction('🅰️')
await sent.add_reaction('🅱️')
await sent.add_reaction('⏭️')
await sent.add_reaction('👌')
def check(reaction, user):
return user.id != 768183904523649044
response = await self.bot.wait_for('reaction_add',check=check)
if response[0].emoji == '🅰️':
fee = resp['fastestFee']
elif response[0].emoji == '🅱️':
fee = resp['halfHourFee']
elif response[0].emoji == '⏭️':
fee = response['hourFee']
elif response[0].emoji == '👌':
await channel.send('Please send the amount in satoshis/byte `https://www.buybitcoinworldwide.com/fee-calculator/` look here for reference, please only send a number')
def check(message):
try:
int(message.content)
except:
return False
return True
resp = await self.bot.wait_for('message', check=check)
fee = int(resp.content)
#https://bitcoinfees.earn.com/api/v1/fees/recommended
fee_btc = (fee / 100_000_000) * 400
print(fee_btc, type(fee_btc))
fee_usd = btc_price * fee_btc
req_money = discord.Embed(
title='Send funds!',
description=f'{buyer.mention} Please send **__exactly__** **${rest}** + fee set above **(${round(int(rest)+fee_usd, 2)})** or **{(1/btc_price) * (rest+3) + fee_btc}** BTC to **{self.key.address}**\n'\
'We are monitoring for the transaction now. We will stop waiting after 30 minutes. If you have not sent exactly the correct amount it will not be detected (We check for new transactions every 5 minutes, so if its a bit slow, dont worry).\n If you have made an error please join the support server: https://discord.gg/9yuDE5u',
colour=randint(0, 0xFFFFFF)
)
#r = requests.get(f'https://api.blockcypher.com/v1/btc/main/txs/{tx}')
await channel.send(embed=req_money)
cnt = 0
done = False
while 1:
transactions = self.check_addr()
print(transactions)
print(transactions["pending"])
if len(transactions["pending"]) > 0:
tx = transactions["pending"][0]["txid"]
for i in transactions["pending"]:
if i["value"] < 0:
value = i["value"] * -1
print(value/ 100_000_000)
if value / 100_000_000 >= (1/btc_price) * (rest+fee_btc) <= (1/btc_price * ((rest+3) + 10)):
embed = discord.Embed(title=f'New transaction detected of {i["value"] /100_000_000} BTC recieved', description='We will now wait for the funds to be confirmed at least once')
await channel.send(embed=embed)
done = True
if i["value"] / 100_000_000 >= (1/btc_price) * (rest+3) <= (1/btc_price * ((rest+3) + 10)):
embed = discord.Embed(title=f'New transaction detected of {i["value"] /100_000_000} BTC recieved', description='We will now wait for the funds to be confirmed at least once')
await channel.send(embed=embed)
done = True
if done==True:
break
elif cnt == 6:
embed = discord.Embed(title='It has been 30 minutes', description='No transaction detected in 30 minutes, cancelling deal.')
await channel.send(embed=embed)
await asyncio.sleep(30)
await channel.delete()
return
await asyncio.sleep(300)
print('here')
cnt = 0
while 1:
r = requests.get(f'https://www.blockonomics.co/api/tx_detail?txid={tx}')
rjs = r.json()
print(rjs)
if str(rjs["status"]) != "0" and str(rjs['status']) != 'Unconfirmed':
print(rjs["status"])
embed = discord.Embed(title='Fees have been confirmed', description='The fees have at least one confirm, the deal can proceed')
await channel.send(embed=embed)
break
await asyncio.sleep(300)
cnt += 1
if cnt == 24:
embed = discord.Embed(title='Timed out', description='It has been 2 hours and no confirmations, I will assume they have been double spent. If this is incorrect please contact the support server here : https://discord.gg/9yuDE5u', colour=randint(0, 0xFFFFFF))
await channel.send(embed=embed)
return
embed = discord.Embed(title='Transfer the goods', description=f'{seller.mention} Please could you send the instructions to secure the account. Once you have secured {buyer.mention} please type `confirm` if something has gone wrong please join the support server and open a ticket @ https://discord.gg/9yuDE5u', colour=0x00FF00)
await channel.send(embed=embed)
def check(message):
return message.author.id == buyer.id and message.content == 'confirm'
resp = await self.bot.wait_for('message', check=check)
await channel.send(f'{seller.mention} please send your btc address **__NOTHING EXTRA__**')
def check(message):
return message.author.id == seller.id
resp = await self.bot.wait_for('message', check=check)
addy = resp.content
while 1:
r = requests.get(f'https://www.blockonomics.co/api/tx_detail?txid={tx}')
rjs = r.json()
print(rjs)
if rjs['status'] != 'Confirmed':
await channel.send(f'The fees have not been fully transferred yet, current status : {rjs["status"]} the funds will be released to {addy} once it is fully confirmed')
else:
break
await asyncio.sleep(300)
rest = int(rest)
my_fee = (rest / 100) * 2.5
to_send = rest - my_fee
addy = self.key.address
output = [(addy, to_send, 'usd')]
money_out = self.key.send(output, fee=fee)
await channel.send(f'Thank you for using middler bot, ${to_send} has been sent to {addy} view it here : https://www.blockchain.com/btc/tx/{money_out} if the transaction fee is massively under what you paid please contact support server @ https://discord.gg/9yuDE5u')
await channel.send(f'This bot was made by xo#0111 :) ')
log = self.bot.get_channel(768937585434427404)
await log.send(f'New deal\nSeller : {seller.id}\nBuyer: {buyer.id}\nValue: {rest}\nFINAL_TX : {money_out}')
await channel.send('This channel will be deleted in 60 seconds')
await asyncio.sleep(60)
await channel.delete()
#r = self.key.create_transaction([addy, int(rest), 'usd'], fee=22000)
#Ugxlr1GIMG5Evu0h9d92oWGho0uwEpg1HW0c3o8Ri1U
#channel = await guild.create_text_channel('secret', overwrites=overwrites)
#await channel.send('Deal has been setup')
def setup(bot):
bot.add_cog(DealCog(bot))
|
import sys
def cascade(index, true_index, reach_list):
current_domino = reach_list[index]
if current_domino == 0:
return true_index
end = index + current_domino
if end > 0:
reach_values = reach_list[index:] + [current_domino + true_index]
else:
reach_values = reach_list[index:index + current_domino + 1] + [current_domino + true_index]
result = max(reach_values)
return result # return greatest cascade+offset
def iterate_dominoes(num_dominoes, domino_heights):
for i in range(num_dominoes):
x = (i + 1) * -1 # x provides a neg int so that indexes start at end
true_index = num_dominoes + x
domino_heights[x] = cascade(x, true_index, domino_heights)
for i in range(num_dominoes):
domino_heights[i] -= i
return domino_heights
# retrieve system arguments
num_dominoes = int(sys.stdin.readline())
dominoes = sys.stdin.readline()
right = [int(x) for x in dominoes.split()]
left = right[::-1]
# get results from both cascades
result_right = iterate_dominoes(num_dominoes, right)
result_left = iterate_dominoes(num_dominoes, left)[::-1]
# for i in
# output
write = sys.stdout.write
write(" ".join(str(x) for x in result_right) + '\n')
write(" ".join(str(x) for x in result_left))
|
import json
from shutil import make_archive
from distutils.dir_util import copy_tree
from os import path
from .general import create_dir, get_run_date_times
from .asci_extractor import extract_water_level_grid
def prepare_flo2d_run(run_path, model_template_path, flo2d_lib_path):
# create a directory for the model run.
model_path = path.join(run_path, 'model')
create_dir(model_path)
# copy flo2d library files to model run directory.
copy_tree(flo2d_lib_path, model_path)
# copy model template to model run directory.
copy_tree(model_template_path, model_path)
# copy the model input files to model run directory
copy_tree(path.join(run_path, 'input'), model_path)
def prepare_flo2d_output(run_path):
output_base = 'output'
output_zip = output_base + '.zip'
output_zip_abs_path = path.join(run_path, output_zip)
# Check whether output.zip is already created.
if path.exists(output_zip_abs_path):
return output_zip
# Check whether the output is ready. If ready, archive and return the .zip, otherwise return None.
output_dir = path.join(run_path, 'output')
if path.exists(output_dir):
make_archive(path.join(run_path, output_base), 'zip', output_dir)
return output_zip
return None
def prepare_flo2d_waterlevel_grid_asci(run_path, grid_size):
asci_grid_zip = 'asci_grid.zip'
# Check whether asci_grid.zip is already created, if so just return the asci_grid_zip
if path.exists(path.join(run_path, asci_grid_zip)):
return asci_grid_zip
asci_grid_dir = path.join(run_path, 'asci_grid')
create_dir(asci_grid_dir)
base_dt, run_dt = get_run_date_times(run_path)
extract_water_level_grid(run_path, grid_size, base_dt, run_dt, asci_grid_dir)
make_archive(asci_grid_dir, 'zip', asci_grid_dir)
return asci_grid_zip
def prepare_flo2d_run_config(input_path, run_name, base_dt, run_dt):
run_config = {
'run-name': run_name,
'base-date-time': base_dt,
'run-date-time': run_dt
}
json_file = json.dumps(run_config)
with open(path.join(input_path, 'run-config.json'), 'w+') as F:
F.write(json_file)
F.close()
|
import pygame
from constants import SQUARE_SIZE, WIN, BLACK, WHITE, W_KING, B_PAWN, B_ROOK, B_KNIGHT, B_BISHOP, B_QUEEN, B_KING, W_PAWN, W_ROOK, W_KNIGHT, W_BISHOP, W_QUEEN, CONTACT
class Piece:
def __init__(self, name, row, col, colour):
self.name = name
self.row = row
self.col = col
self.colour = colour
self.enemy_colour = self.get_enemy_colour(colour)
if name == 'Pawn':
self.pawn_first_move = True
self.calc_pos()
def calc_pos(self):
self.x = SQUARE_SIZE * self.col + SQUARE_SIZE / 5.3
self.y = SQUARE_SIZE * self.row + SQUARE_SIZE / 3
def move(self, row, col):
self.row = row
self.col = col
self.pawn_first_move = False
self.calc_pos()
def get_enemy_colour(self, colour):
if colour == WHITE:
return BLACK
return WHITE
def draw(self, win):
if self.name == 'Pawn':
if self.colour == BLACK:
WIN.blit(B_PAWN, (self.x, self.y))
else:
WIN.blit(W_PAWN, (self.x, self.y))
elif self.name == 'Rook':
if self.colour == BLACK:
WIN.blit(B_ROOK, (self.x, self.y))
else:
WIN.blit(W_ROOK, (self.x, self.y))
elif self.name == 'Knight':
if self.colour == BLACK:
WIN.blit(B_KNIGHT, (self.x, self.y))
else:
WIN.blit(W_KNIGHT, (self.x, self.y))
elif self.name == 'Bishop':
if self.colour == BLACK:
WIN.blit(B_BISHOP, (self.x, self.y))
else:
WIN.blit(W_BISHOP, (self.x, self.y))
elif self.name == 'Queen':
if self.colour == BLACK:
WIN.blit(B_QUEEN, (self.x, self.y))
else:
WIN.blit(W_QUEEN, (self.x, self.y))
elif self.name == 'King':
if self.colour == BLACK:
WIN.blit(B_KING, (self.x, self.y))
else:
WIN.blit(W_KING, (self.x, self.y))
def north_attack(self, stop, moves, board):
row = self.row
col = self.col
for i in range(stop):
if board[row - 1][col] != 0:
if board[row - 1][col].colour == self.colour:
break
else:
moves[(row - 1, col)] = [(row - 1, col)]
break
if board[row - 1][col] == 0:
moves[(row - 1, col)] = CONTACT
row -= 1
return moves
def north_east_attack(self, stop, moves, board):
row = self.row
col = self.col
for i in range(stop):
if board[row - 1][col + 1] != 0:
if board[row - 1][col + 1].colour == self.colour:
break
else:
moves[(row - 1, col + 1)] = [(row - 1, col + 1)]
break
if board[row - 1][col + 1] == 0:
moves[(row - 1, col + 1)] = CONTACT
row -= 1
col += 1
return moves
def east_attack(self, stop, moves, board):
row = self.row
col = self.col
for i in range(stop):
if board[row][col + 1] != 0:
if board[row][col + 1].colour == self.colour:
break
else:
moves[(row, col + 1)] = [(row, col + 1)]
break
if board[row][col + 1] == 0:
moves[(row, col + 1)] = CONTACT
col += 1
return moves
def south_east_attack(self, stop, moves, board):
row = self.row
col = self.col
for i in range(stop):
if board[row + 1][col + 1] != 0:
if board[row + 1][col + 1].colour == self.colour:
break
else:
moves[(row + 1, col + 1)] = [(row + 1, col + 1)]
break
if board[row + 1][col + 1] == 0:
moves[(row + 1, col + 1)] = CONTACT
row += 1
col += 1
return moves
def south_attack(self, stop, moves, board):
row = self.row
col = self.col
for i in range(stop):
if board[row + 1][col] != 0:
if board[row + 1][col].colour == self.colour:
break
else:
moves[(row + 1, col)] = [(row + 1, col)]
break
if board[row + 1][col] == 0:
moves[(row + 1, col)] = CONTACT
row += 1
return moves
def south_west_attack(self, stop, moves, board):
row = self.row
col = self.col
for i in range(stop):
if board[row + 1][col - 1] != 0:
if board[row + 1][col - 1].colour == self.colour:
break
else:
moves[(row + 1, col - 1)] = [(row + 1, col - 1)]
break
if board[row + 1][col - 1] == 0:
moves[(row + 1, col - 1)] = CONTACT
row += 1
col -= 1
return moves
def west_attack(self, stop, moves, board):
row = self.row
col = self.col
for i in range(stop):
if board[row][col - 1] != 0:
if board[row][col - 1].colour == self.colour:
break
else:
moves[(row, col - 1)] = [(row, col - 1)]
break
if board[row][col - 1] == 0:
moves[(row, col - 1)] = CONTACT
col -= 1
return moves
def north_west_attack(self, stop, moves, board):
row = self.row
col = self.col
for i in range(stop):
if board[row - 1][col - 1] != 0:
if board[row - 1][col - 1].colour == self.colour:
break
else:
moves[(row - 1, col - 1)] = [(row - 1, col - 1)]
break
if board[row - 1][col - 1] == 0:
moves[(row - 1, col - 1)] = CONTACT
row -= 1
col -= 1
return moves
def knight_north_west(self, moves, board):
row = self.row
col = self.col
if board[row - 2][col - 1] != 0:
if board[row - 2][col - 1].colour == self.enemy_colour:
moves[(row - 2, col - 1)] = [(row - 2, col - 1)]
else:
moves[(row - 2, col - 1)] = CONTACT
return moves
def knight_north_east(self, moves, board):
row = self.row
col = self.col
if board[row - 2][col + 1] != 0:
if board[row - 2][col + 1].colour == self.enemy_colour:
moves[(row - 2, col + 1)] = [(row - 2, col + 1)]
else:
moves[(row - 2, col + 1)] = CONTACT
return moves
def knight_east_north(self, moves, board):
row = self.row
col = self.col
if board[row - 1][col + 2] != 0:
if board[row - 1][col + 2].colour == self.enemy_colour:
moves[(row - 1, col + 2)] = [(row - 1, col + 2)]
else:
moves[(row - 1, col + 2)] = CONTACT
return moves
def knight_east_south(self, moves, board):
row = self.row
col = self.col
if board[row + 1][col + 2] != 0:
if board[row + 1][col + 2].colour == self.enemy_colour:
moves[(row + 1, col + 2)] = [(row + 1, col + 2)]
else:
moves[(row + 1, col + 2)] = CONTACT
return moves
def knight_south_east(self, moves, board):
row = self.row
col = self.col
if board[row + 2][col + 1] != 0:
if board[row + 2][col + 1].colour == self.enemy_colour:
moves[(row + 2, col + 1)] = [(row + 2, col + 1)]
else:
moves[(row + 2, col + 1)] = CONTACT
return moves
def knight_south_west(self, moves, board):
row = self.row
col = self.col
if board[row + 2][col - 1] != 0:
if board[row + 2][col - 1].colour == self.enemy_colour:
moves[(row + 2, col - 1)] = [(row + 2, col - 1)]
else:
moves[(row + 2, col - 1)] = CONTACT
return moves
def knight_west_south(self, moves, board):
row = self.row
col = self.col
if board[row + 1][col - 2] != 0:
if board[row + 1][col - 2].colour == self.enemy_colour:
moves[(row + 1, col - 2)] = [(row + 1, col - 2)]
else:
moves[(row + 1, col - 2)] = CONTACT
return moves
def knight_west_north(self, moves, board):
row = self.row
col = self.col
if board[row - 1][col - 2] != 0:
if board[row - 1][col - 2].colour == self.enemy_colour:
moves[(row - 1, col - 2)] = [(row - 1, col - 2)]
else:
moves[(row - 1, col - 2)] = CONTACT
return moves
|
class Solution(object):
def missingNumber(self, nums):
res = len(nums)
for i, item in enumerate(nums):
res ^= item
res ^= i
return res |
import logging
import asyncio
import aioamqp
from ... import config
from .driver_base import ExchangerBase
logger = logging.getLogger(__name__)
class Exchanger(ExchangerBase):
async def __aenter__(self):
logger.debug('connecting with role {}'.format(self.role))
params = config.get_amqp_conn_params(self.url)
params['login'] = params.pop('username')
params['virtualhost'] = params.pop('virtual_host')
self._transport, self._protocol = await aioamqp.connect(**params)
# TODO: handle reconnect awaiting from self._conn
self._chan = await self._protocol.channel()
app_exchange_name = self.get_app_exchange_name()
app_routing_key = self.get_app_routing_key()
app_exchange_name = self.get_app_exchange_name()
app_routing_key = self.get_app_routing_key()
ws_exchange_name = self.get_ws_exchange_name()
ws_routing_key = self.get_ws_routing_key()
ws_routing_key_bcast = self.get_ws_routing_key(broadcast=True)
await self._chan.exchange(app_exchange_name, 'direct', durable=True)
await self._chan.exchange(ws_exchange_name, 'topic', durable=True)
if self.role == self.ROLE_WS:
receive_queue_name = '{}.{}'.format(ws_routing_key, self.client_id)
await self._chan.queue(receive_queue_name, exclusive=True, durable=False)
await self._chan.queue_bind(
exchange_name=ws_exchange_name,
queue_name=receive_queue_name,
routing_key=ws_routing_key
)
await self._chan.queue_bind(
exchange_name=ws_exchange_name,
queue_name=receive_queue_name,
routing_key=ws_routing_key_bcast
)
send_exchange_name, send_routing_key = app_exchange_name, app_routing_key
if self.role == self.ROLE_APP:
receive_queue_name = 'pushpull.app'
await self._chan.queue(receive_queue_name, durable=True)
await self._chan.queue_bind(
exchange_name=app_exchange_name,
queue_name=receive_queue_name,
routing_key=app_routing_key
)
send_exchange_name, send_routing_key = ws_exchange_name, ws_routing_key
logger.debug('connected ok')
return (
Sender(self._chan, send_exchange_name, send_routing_key),
Receiver(self._chan, receive_queue_name)
)
async def __aexit__(self, exc_type, exc_value, traceback):
logger.debug('closing connection and channel')
try:
await self._protocol.close()
except asyncio.CancelledError:
pass
except Exception:
logger.exception('error closing')
class Sender:
def __init__(self, channel, exchange_name, routing_key):
self._chan = channel
self._exchange_name = exchange_name
self._routing_key = routing_key
async def send(self, message):
await self._chan.basic_publish(
message,
exchange_name=self._exchange_name,
routing_key=self._routing_key
)
logger.debug('publishing message %r', message)
class Receiver:
def __init__(self, channel, queue_name):
self._channel = channel
self._queue_name = queue_name
self._fifo = asyncio.Queue(100)
async def __call__(self, channel, body, envelope, properties):
logger.debug('received message %r', body)
try:
self._fifo.put_nowait(body.decode()) # TODO: get encoding
except asyncio.QueueFull:
logger.warning('queue full')
async def __aiter__(self):
await self._channel.basic_consume(
self,
self._queue_name,
no_ack=True,
)
return self
async def __anext__(self):
data = await self._fifo.get()
if data is None:
raise StopAsyncIteration
return data
|
import os
import json
def rarity_skeleton():
return {
"model": "main.Card_Rarity",
"pk": None,
"fields": {
"card_rarity": None
}
}
def type_skeleton():
return {
"model": "main.Card_Type",
"pk": None,
"fields": {
"card_type": None
}
}
def card_skeleton():
return {
"model": "main.Card",
"pk": None,
"fields": {
"tcgplayer_id": None,
"mtg_json_uuid": None,
"name": None,
"card_image_loc": None,
"mana_cost": None,
"converted_mana_cost": None,
"type_id": None,
"card_text": None,
"card_color": None,
"card_keywords": None,
"set_name": None,
"power": None,
"toughness": None,
"collection_number": None,
"rarity_id": None,
"flavor_text": None,
"artist": None,
}
}
def listing_skeleton():
return {
"model": "main.Listing",
"pk": None,
"fields": {
"product_id": None,
"product_name": None,
"product_line": "Magic The Gathering",
"set_name": None,
"price": None,
"card_market_purchase_url": None,
"tcg_player_purchase_url": None,
"mtg_stocks_purchase_url": None,
"quantity": None,
"condition": None,
"seller_key": "Seeding Seller",
"seller_type": "Vendor",
"sponsored": False,
"user_listing": False,
"selling_user_id": 1,
}
}
# a dictionary of all the data to save to the db
transfer_to_db = {}
# open the scryfall data and load as json
# open MTGJSON data and load
with open('detail_data.json', 'r') as json_file:
data = json.load(json_file)
for card in data:
# going to make each card a dictionary with its oracle id as the key and all the attributes in their
# own dictionary, this way it can be easily matched to it's corresponding MTGJSON entry by said key and
# the data from MTG JSON can be added to the value
if card['oracle_id']:
id = card['oracle_id']
transfer_to_db[id] = {} # initialize empty dictionary to store this cards data
# add data of interest
if 'tcgplayer_id' in card.keys():
transfer_to_db[id]['tcgplayer_id'] = card['tcgplayer_id']
if 'name' in card.keys():
transfer_to_db[id]['name'] = card['name']
if 'image_uris' in card.keys():
if 'png' in card['image_uris'].keys():
transfer_to_db[id]['card_image_loc'] = card['image_uris']['png']
elif 'normal' in card['image_uris'].keys():
transfer_to_db[id]['card_image_loc'] = card['image_uris']['normal']
elif 'small' in card['image_uris'].keys():
transfer_to_db[id]['card_image_loc'] = card['image_uris']['small']
elif 'large' in card['image_uris'].keys():
transfer_to_db[id]['card_image_loc'] = card['image_uris']['large']
if 'mana_cost' in card.keys():
transfer_to_db[id]['mana_cost'] = card['mana_cost']
if 'cmc' in card.keys():
transfer_to_db[id]['converted_mana_cost'] = card['cmc']
if 'type_line' in card.keys():
transfer_to_db[id]['type'] = card['type_line']
if 'oracle_text' in card.keys():
transfer_to_db[id]['card_text'] = card['oracle_text']
if 'power' in card.keys():
transfer_to_db[id]['power'] = card['power']
if 'toughness' in card.keys():
transfer_to_db[id]['toughness'] = card['toughness']
if 'colors' in card.keys():
transfer_to_db[id]['card_color'] = card['colors']
if 'keywords' in card.keys():
transfer_to_db[id]['card_keywords'] = card['keywords']
if 'set_name' in card.keys():
transfer_to_db[id]['set_name'] = card['set_name']
if 'collector_number' in card.keys():
transfer_to_db[id]['collection_number'] = card['collector_number']
if 'rarity' in card.keys():
transfer_to_db[id]['rarity'] = card['rarity']
if 'flavor_text' in card.keys():
transfer_to_db[id]['flavor_text'] = card['flavor_text']
if 'artist' in card.keys():
transfer_to_db[id]['artist'] = card['artist']
if 'prices' in card.keys():
if 'usd' in card['prices'].keys():
transfer_to_db[id]['price'] = card['prices']['usd']
if 'purchase_uris' in card.keys():
if 'tcgplayer' in card['purchaseUrls'].keys():
transfer_to_db[id]['tcg_player_purchase_url'] = card['purchaseUrls']['tcgplayer']
if 'cardmarket' in card['purchaseUrls'].keys():
transfer_to_db[id]['card_market_purchase_url'] = card['purchaseUrls']['cardmarket']
if 'mtgstocks' in card['purchaseUrls'].keys():
transfer_to_db[id]['mtg_stocks_purchase_url'] = card['purchaseUrls']['mtgstocks']
# if there is no scryfall oracle id, there is no way to link to mtg json data so skip it
else:
continue
# close scryfall data
json_file.close()
# open MTGJSON data and load
f = open('details_data', 'r')
data = json.load(f) # this is a dict of the following format: key='card name', value={<dictionary of card info>}
for key, card in zip(data.keys(), data.values()):
if card['scryfallOracleId']:
# if we have a match in the scryfall data
if card['scryfallOracleId'] in transfer_to_db.keys():
dict_entry = transfer_to_db[card['scryfallOracleId']]
# add items special to MTG
if 'convertedManaCost' in card.keys():
dict_entry['converted_mana_cost'] = card['convertedManaCost']
if 'text' in card.keys():
dict_entry['card_text'] = card['text']
if 'purchaseUrls' in card.keys():
if 'cardmarket' in card['purchaseUrls'].keys():
dict_entry['card_market_purchase_url'] = card['purchaseUrls']['cardmarket']
if 'tcgplayer' in card['purchaseUrls'].keys():
if 'purchase_urls' not in dict_entry.keys():
dict_entry['tcg_player_purchase_url'] = card['purchaseUrls']['tcgplayer']
if 'mtgstocks' in card['purchaseUrls'].keys():
if 'purchase_urls' not in dict_entry.keys():
dict_entry['mtg_stocks_purchase_url'] = card['purchaseUrls']['mtgstocks']
# save MTG JSON uuid in case we want to use that to join with more data later
if 'uuid' in card.keys():
dict_entry['mtg_json_uuid'] = card['uuid']
# fill in any missing data not gotten from scryfall
if 'name' not in dict_entry.keys() and 'name' in card.keys():
dict_entry['name'] = card['name']
if 'mana_cost' not in dict_entry.keys() and 'mana_cost' in card.keys():
dict_entry['mana_cost'] = card['mana_cost']
if 'type' not in dict_entry.keys() and 'type' in card.keys():
dict_entry['type'] = card['type']
if 'colors' not in dict_entry.keys() and 'colors' in card.keys():
dict_entry['card_color'] = card['colors']
if 'power' not in dict_entry.keys() and 'power' in card.keys():
dict_entry['power'] = card['power']
if 'toughness' not in dict_entry.keys() and 'toughness' in card.keys():
dict_entry['toughness'] = card['toughness']
else:
# if there is no match already in the dictionary, this is a new card (WONT HAVE PICTURE)
id = card['scryfallOracleId']
transfer_to_db[id] = {} # initialize empty dictionary to store this cards data
if 'convertedManaCost' in card.keys():
transfer_to_db[id]['converted_mana_cost'] = card['convertedManaCost']
if 'text' in card.keys():
transfer_to_db[id]['card_text'] = card['text']
if 'purchaseUrls' in card.keys():
if 'cardmarket' in card['purchaseUrls'].keys():
transfer_to_db[id]['card_market_purchase_url'] = card['purchaseUrls']['cardmarket']
if 'tcgplayer' in card['purchaseUrls'].keys():
transfer_to_db[id]['tcg_player_purchase_url'] = card['purchaseUrls']['tcgplayer']
if 'mtgstocks' in card['purchaseUrls'].keys():
transfer_to_db[id]['mtg_stocks_purchase_url'] = card['purchaseUrls']['mtgstocks']
# save MTG JSON uuid in case we want to use that to join with more data later
if 'uuid' in card.keys():
transfer_to_db[id]['mtg_json_uuid'] = card['uuid']
if 'name' in card.keys():
transfer_to_db[id]['name'] = card['name']
if 'mana_cost' in card.keys():
transfer_to_db[id]['mana_cost'] = card['mana_cost']
if 'type' in card.keys():
transfer_to_db[id]['type'] = card['type']
if 'colors' in card.keys():
transfer_to_db[id]['card_color'] = card['colors']
if 'power' in card.keys():
transfer_to_db[id]['power'] = card['power']
if 'toughness' in card.keys():
transfer_to_db[id]['toughness'] = card['toughness']
# if there is no scryfall id we can't match it to the other data, so skip
else:
continue
f.close()
# create json fixtures
rarities, types, cards, listings, rarity_strings, type_strings= [], [], [], [], [], []
# make a generic seller to use for now
for scryfall_id, card_data in zip(transfer_to_db.keys(), transfer_to_db.values()):
rarity_id = 0
type_id = 0
# cannot add without rarity and type FKs
if 'rarity' not in card_data.keys() or 'type' not in card_data.keys():
continue
else:
# check to see if rarity and type already added, if so get pk, if not add them
if card_data['rarity'] in rarity_strings:
rarity_id = rarity_strings.index(card_data['rarity']) + 1
else:
rarity = rarity_skeleton()
rarity_id = len(rarity_strings) + 1
rarity["pk"] = rarity_id
rarity["fields"]["card_rarity"] = card_data['rarity']
rarity_strings.append(card_data['rarity'])
rarities.append(rarity)
if card_data['type'] in type_strings:
type_id = type_strings.index(card_data['type']) + 1
else:
type = type_skeleton()
type_id = len(type_strings) + 1
type["pk"] = type_id
type["fields"]["card_type"] = card_data['type']
type_strings.append(card_data['type'])
types.append(type)
# create the card JSON
card = card_skeleton()
card["pk"] = scryfall_id
try:
if 'tcgplayer_id' in card_data.keys():
int(card_data['tcgplayer_id'])
card["fields"]["tcgplayer_id"] = card_data['tcgplayer_id']
else:
card["fields"]["tcgplayer_id"] = -1
except ValueError:
card["fields"]["tcgplayer_id"] = -1
card["fields"]["mtg_json_uuid"] = card_data['mtg_json_uuid'] if 'mtg_json_uuid' in card_data.keys() else ""
card["fields"]["name"] = card_data['name'] if 'name' in card_data.keys() else "Card has no name"
card["fields"]["card_image_loc"] = card_data['card_image_loc'] \
if 'card_image_loc' in card_data.keys() else "static/main/images/cards/default.jpg"
card["fields"]["mana_cost"] = card_data['mana_cost'] if 'mana_cost' in \
card_data.keys() else "No mana cost available"
try:
if 'converted_mana_cost' in card_data.keys():
int(card_data['converted_mana_cost'])
card["fields"]["converted_mana_cost"] = card_data['converted_mana_cost']
else:
card["fields"]["converted_mana_cost"] = -1
except ValueError:
card["fields"]["converted_mana_cost"] = -1
card["fields"]["type_id"] = type_id
card["fields"]["card_text"] = card_data['card_text'] if 'card_text' in card_data.keys() \
else "No text available"
color = ""
if 'card_color' in card_data.keys():
for color in card_data['card_color']:
color += "{0}, ".format(color)
card["fields"]["card_color"] = color[:-2] if color != "" \
else "No color available" # remove last comma and space
keyword = ""
if 'card_keywords' in card_data.keys():
for keyword in card_data['card_keywords']:
color += "{0}, ".format(keyword)
card["fields"]["card_keywords"] = keyword[:-2] if keyword != "" \
else "No keywords available" # remove last comma and space
card["fields"]["set_name"] = card_data['set_name'] if 'set_name' in card_data.keys() \
else "No set name available"
try:
if 'power' in card_data.keys():
int(card_data['power'])
card["fields"]["power"] = card_data['power']
else:
card["fields"]["power"] = -1
except ValueError:
card["fields"]["power"] = -1
try:
if 'toughness' in card_data.keys():
int(card_data['toughness'])
card["fields"]["toughness"] = card_data['toughness']
else:
card["fields"]["toughness"] = -1
except ValueError:
card["fields"]["toughness"] = -1
try:
if 'collection_number' in card_data.keys():
int(card_data['collection_number'])
card["fields"]["collection_number"] = card_data['collection_number']
else:
card["fields"]["collection_number"] = -1
except ValueError:
card["fields"]["collection_number"] = -1
card["fields"]["rarity_id"] = rarity_id
card["fields"]["flavor_text"] = card_data['flavor_text'] if 'flavor_text' in card_data.keys() \
else "No flavor text available"
card["fields"]["artist"] = card_data['artist'] \
if 'artist' in card_data.keys() else "No artist information available"
cards.append(card)
# create listing JSON
listing = listing_skeleton()
listing["pk"] = len(listings) + 1
listing["fields"]["product_id"] = scryfall_id
listing["fields"]["product_name"] = card_data["name"] if "name" in card_data.keys() else "Card has no name"
listing["fields"]["set_name"] = card_data['set_name'] if "set_name" in card_data.keys() \
else "No set name available"
try:
if "price" in card_data.keys() and card_data['price'] is not None:
float(card_data['price'])
listing["fields"]["price"] = card_data['price']
else:
listing["fields"]["price"] = -1
except ValueError:
listing["fields"]["price"] = -1
listing["fields"]["card_market_purchase_url"] = card_data['card_market_purchase_url'] \
if "card_market_purchase_url" in card_data.keys() else ""
listing["fields"]["tcg_player_purchase_url"] = card_data['tcg_player_purchase_url'] \
if "tcg_player_purchase_url" in card_data.keys() else ""
listing["fields"]["mtg_stocks_purchase_url"] = card_data['mtg_stocks_purchase_url'] \
if "mtg_stocks_purchase_url" in card_data.keys() else ""
listing["fields"]["quantity"] = card_data['quantity'] if "quantity" in card_data.keys() else -1
listing["fields"]["condition"] = card_data['condition'] if "condition" in card_data.keys() \
else "No condition information available"
listings.append(listing)
basepath = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..", "fixtures", "{0}"))
with open(basepath.format("rarities.json"), 'w') as f:
json.dump(rarities, f)
f.close()
with open(basepath.format("types.json"), 'w') as f:
json.dump(types, f)
f.close()
with open(basepath.format("cards.json"), 'w') as f:
json.dump(cards, f)
f.close()
with open(basepath.format("listings.json"), 'w') as f:
json.dump(listings, f)
f.close()
"""
To add these items to the database run this command for each file:
manage.py loaddata <name>.json
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.