text
stringlengths 8
6.05M
|
|---|
import data
dataList = data.result
resultCount = 0
w, h = 1008, 1008
matrix = [[0 for x in range(w)] for y in range(h)]
# iterate over dataList [#][x, y][w, h]
for dataArr in dataList:
x = dataArr[0][0]
y = dataArr[0][1]
w = dataArr[1][0]
h = dataArr[1][1]
# iterating over how many rows
for rows in range(h):
# iterating over width
for cols in range(w):
matrix[y + rows][x + cols] += 1
for row in matrix:
for item in row:
if item > 1:
resultCount += 1
print(resultCount)
|
from django.shortcuts import render
from datetime import datetime
# Create your views here.
def Wish_django(request):
date=datetime.now()
msg='i am proud to be an indian'
my_dict={'date':date,'msg':msg}
return render('request'sixthapp/display.html',context=my_dict)
|
import argparse
import json
import logging
import os
import random
from builtins import ValueError
from collections import defaultdict
from io import open
import numpy as np
import torch
import yaml
from easydict import EasyDict as edict
from tqdm import tqdm
from evaluator import final_evaluate
from mmt.metrics import get_consistency_score
from tools.registry import registry
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def get_config():
""" Set default and command line arguments. """
parser = argparse.ArgumentParser()
parser.add_argument("--task_file", required=True, type=str, help="joint config file")
parser.add_argument("--tag", required=True, type=str, help="tag for the experiment")
args = parser.parse_args()
with open(args.task_file, "r") as f:
task_cfg = edict(yaml.safe_load(f))
set_seeds(task_cfg)
registry.update(task_cfg)
logger.info("-" * 20 + "Config Start" + "-" * 20)
print(json.dumps(vars(args), indent=2))
print(json.dumps(vars(task_cfg), indent=2))
logger.info("-" * 20 + "Config End" + "-" * 20)
return task_cfg, args
def set_seeds(task_cfg):
""" Set seeds for reproducibility """
seed = task_cfg["seed"]
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def set_device_folder(task_cfg, args):
if torch.cuda.is_available():
device = torch.device("cuda")
multi_gpu = torch.cuda.device_count() > 1
else:
raise ValueError("Cuda not available!")
# build experiment directory
save_path = os.path.join("save", args.tag)
if not os.path.exists(save_path):
os.makedirs(save_path)
# dump full experiment configuration (helps in reproducibility)
with open(os.path.join(save_path, "command.txt"), "w") as f:
print(args, file=f) # Python 3.x
print("\n", file=f)
print(task_cfg, file=f)
return device, multi_gpu, save_path
def build_checkpoint(
model, optimizer, warmup_scheduler, global_step, vqa_score, cs_scores, cs_bt_scores
):
""" Generate a storable checkpoint from model"""
model_to_save = model.module if hasattr(model, "module") else model
checkpoint_dict = {
"model_state_dict": model_to_save.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"warmup_scheduler_state_dict": warmup_scheduler.state_dict(),
"global_step": global_step,
"vqa_score": vqa_score,
"cs_scores": cs_scores,
"cs_bt_scores": cs_bt_scores,
}
return checkpoint_dict
def main():
""" Trains a model and evaluates it."""
task_cfg, args = get_config()
from mmt.mmt import MMT, BertConfig
from mmt.task_utils import (
clip_gradients,
forward_train,
get_optim_scheduler,
load_dataset,
)
base_lr = task_cfg["lr"]
device, multi_gpu, save_path = set_device_folder(task_cfg, args)
# load datasets
dataloaders = load_dataset(task_cfg)
# build model
mmt_config = BertConfig.from_dict(task_cfg["MMT"])
text_bert_config = BertConfig.from_dict(task_cfg["TextBERT"])
model = MMT(mmt_config, text_bert_config)
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
logger.info(f"Training Parameters: {trainable_params}")
# load optimizers
optimizer_grouped_parameters = model.get_optimizer_parameters(base_lr)
optimizer, warmup_scheduler = get_optim_scheduler(
task_cfg, optimizer_grouped_parameters, base_lr
)
# send to gpu
model.to(device)
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
if multi_gpu:
model = torch.nn.DataParallel(model)
# store best values
eval_iter_factor = task_cfg["eval_iter_factor"]
best_vqa, best_cs = -1, -1
loss_hist, score_hist = [], []
global_step = 0
start_epoch = 0
eval_ckpts_file = os.path.join(save_path, "ckpts.txt")
# train loop
num_iters = len(dataloaders["train_scl"] if registry.alt_train else dataloaders["train_ce"])
model.train()
for epochId in tqdm(range(start_epoch, task_cfg["num_epoch"]), desc="Epoch"):
for step in tqdm(range(num_iters), desc="Iters"):
assert model.training
if global_step > registry.hard_stop:
logger.info(f"Breaking w/ hard-stop at {registry.hard_stop}")
break
iter_id = step + (epochId * num_iters)
# set run-type ("scl" vs "ce")
if registry.alt_train and iter_id % registry.ce_freq == 1:
train_type = "scl"
else:
train_type = "ce"
loss, score = forward_train(device, dataloaders, model, train_type)
loss.backward()
if task_cfg["grad_clip_mode"] == "all":
clip_gradients(
model, task_cfg["max_grad_norm"], task_cfg["grad_clip_mode"]
)
optimizer.step()
warmup_scheduler.step()
model.zero_grad()
optimizer.zero_grad()
global_step += 1
if train_type == "ce" or (not registry.alt_train):
loss_hist.append(float(loss))
score_hist.append(float(score))
del loss
del score
if step % 20 == 0 and step != 0:
logger.info(
f"Score: {sum(score_hist)/len(score_hist)}, Loss: {sum(loss_hist)/len(loss_hist)}"
)
loss_hist, score_hist = [], []
if (iter_id != 0 and iter_id % eval_iter_factor == 0) or (
global_step == registry.hard_stop
):
logger.info("Starting Validation Run....")
curr_val_score, curr_val_loss, cs_scores, cs_bt_scores = run_evaluation(
dataloaders, device, model
)
# log current results
ckpt_string = f"Iter: {global_step} | VQA: {curr_val_score} | CS: {cs_scores} | CS-BT: {cs_bt_scores}"
with open(eval_ckpts_file, "a") as f:
f.write(ckpt_string + "\n")
logger.info(ckpt_string)
# build dict for storing the checkpoint
checkpoint_dict = build_checkpoint(
model,
optimizer,
warmup_scheduler,
global_step,
curr_val_score,
cs_scores,
cs_bt_scores,
)
# checkpoint based on best vqa-score
if task_cfg["monitor_value"] == "vqa_score":
if best_vqa < curr_val_score:
output_checkpoint = os.path.join(save_path, f"vqa_best.tar")
torch.save(checkpoint_dict, output_checkpoint)
best_vqa = curr_val_score
logger.info(f"Monitoring vqa-score, best: {best_vqa} | current: {curr_val_score}")
# checkpoint based on best cs-score on back-translation rephrasings
elif task_cfg["monitor_value"] == "cs_score":
if best_cs < cs_bt_scores[-1]:
output_checkpoint = os.path.join(save_path, f"cs_best.tar")
torch.save(checkpoint_dict, output_checkpoint)
best_cs = cs_bt_scores[-1]
logger.info(f"Monitoring CS-4 score, best: {best_cs} | current: {cs_bt_scores[-1]}")
else:
raise ValueError
# break at hard-stop
if global_step > registry.hard_stop:
break
# Run final-evaluation and generate the EvalAI files.
for split in ["test", "val"]:
final_evaluate(
evaluate_rephrasings, device, model, dataloaders, save_path, split
)
def reset_evaluation_bins():
""" Reset rephrasing bins for each evaluation """
if registry.revqa_eval:
from easydict import EasyDict
dd = defaultdict(list)
dd_bt = defaultdict(list)
super(EasyDict, registry).__setattr__("revqa_bins", dd)
super(EasyDict, registry).__setitem__("revqa_bins", dd)
super(EasyDict, registry).__setattr__("revqa_bt_bins", dd_bt)
super(EasyDict, registry).__setitem__("revqa_bt_bins", dd_bt)
def evaluate_rephrasings(dataloaders, model, device):
""" Run evaluation on human and back-translated rephrasings """
from mmt.task_utils import forward_eval
reset_evaluation_bins()
for batch in tqdm(dataloaders["revqa"], desc="Evaluate (Human Rephrasings)"):
with torch.no_grad(): # turn off autograd engine
forward_eval(device, batch, model, revqa_eval=True, revqa_split="revqa")
# collect consensus results
human_cs_scores = get_consistency_score(bins_key="revqa_bins")
for batch in tqdm(
dataloaders["revqa_bt"], desc="Evaluate (Back Translated Rephrasings)"
):
with torch.no_grad(): # turn off autograd engine
forward_eval(device, batch, model, revqa_eval=True, revqa_split="revqa_bt")
# collect consensus results
bt_cs_scores = get_consistency_score(bins_key="revqa_bt_bins")
# filter out consensus scores
bt_cs_scores = [bt_cs_scores[key] for key in ["1_bt", "2_bt", "3_bt", "4_bt"]]
human_cs_scores = [human_cs_scores[str(key)] for key in [1, 2, 3, 4]]
return human_cs_scores, bt_cs_scores
def run_evaluation(
dataloaders,
device,
model,
):
""" Run evaluation on minival (VQA-score) and rephrasings (Consensus Scores) """
from mmt.task_utils import forward_eval
model.eval() # turn off dropout/batch-norm
# run on validation-set
val_scores, val_losses, batch_sizes = [], [], []
for i, batch in tqdm(
enumerate(dataloaders["minval"]),
total=len(dataloaders["minval"]),
desc="Evaluate (Mini-Val)",
):
with torch.no_grad(): # turn off autograd engine
loss, score, batch_size = forward_eval(
device, batch, model, revqa_eval=False
)
val_scores.append(score * batch_size)
val_losses.append(loss * batch_size)
batch_sizes.append(batch_size)
# run consensus evaluation on human and back-translated rephrasings
if registry.revqa_eval:
human_cs_scores, bt_cs_scores = evaluate_rephrasings(dataloaders, model, device)
else:
human_cs_scores, bt_cs_scores = None, None
vqa_score = sum(val_scores) / sum(batch_sizes)
vqa_loss = sum(val_losses) / sum(batch_sizes)
model.train() # return to train state
return vqa_score, vqa_loss, human_cs_scores, bt_cs_scores
if __name__ == "__main__":
main()
|
import calendar
from datetime import date
import requests
import json
from configparser import ConfigParser
import keyboard
hotkey = "ctrl + x"
remind = open("reminders.txt")
today = date.today()
month = today.month
year = today.year
calendar = calendar.month(year,month)
ascii_art = open("ascii_art")
print(ascii_art.read())
print("Options: ")
print("Calendar:\tc\t\tLeave Inputs:\tctrl + x")
print("Weather:\tw")
print("Reminder:\tr")
print("Summary:\tsum")
print("Search: \tsearch")
print("Close PA:\tstop")
print("\n")
pa = "a"
while pa == "c" or "w" or "r" or "s" or "search":
pa = (input("How may I be of assistance? "))
if pa == "c":
print("\n")
print("Today: ", today)
print(calendar)
elif pa == "w":
try:
config_object = ConfigParser()
config_object.read("config.ini")
weather_api_key = config_object['weather_api_key']
city_lat = config_object['city_lat']
city_lon = config_object['city_lon']
api_key = weather_api_key["api_key"]
lat = city_lat["lat"]
lon = city_lon["lon"]
Final_url = "https://api.openweathermap.org/data/2.5/onecall?lat=%s&lon=%s&appid=%s&units=metric" % (lat, lon, api_key)
weather_data = requests.get(Final_url).json()
temp = weather_data["current"]["temp"]
print("current temp: ", temp)
except KeyError:
print("Please add OpenWeather api-key, latitude, and longitude to config.ini")
continue
elif pa == "r":
remind.seek(0)
print(remind.read())
reminder = (input("\nadd:\t+\nremove:\t-\nWhat would you like to do? "))
if reminder == "+":
remind = open("reminders.txt" , "a")
remind.write("\n")
remind.write(input("add reminder: "))
remind.close()
remind = open("reminders.txt", "a+")
remind.seek(0)
print(remind.read())
if keyboard.is_pressed(hotkey):
break
continue
elif reminder == "-":
fn = 'reminders.txt'
f = open(fn)
output = []
str = input("first word/words of reminder: ")
for line in f:
if not line.startswith(str):
output.append(line)
f.close()
f = open(fn, 'w')
f.writelines(output)
f.close()
remind.seek(0)
print(remind.read())
if keyboard.is_pressed(hotkey):
break
else:
break
elif pa == "sum":
remind.seek(0)
print(remind.read())
print("Today: ", today)
print(calendar)
try:
config_object = ConfigParser()
config_object.read("config.ini")
weather_api_key = config_object['weather_api_key']
city_lat = config_object['city_lat']
city_lon = config_object['city_lon']
api_key = weather_api_key["api_key"]
lat = city_lat["lat"]
lon = city_lon["lon"]
Final_url = "https://api.openweathermap.org/data/2.5/onecall?lat=%s&lon=%s&appid=%s&units=metric" % (lat, lon, api_key)
weather_data = requests.get(Final_url).json()
temp = weather_data["current"]["temp"]
print("current temp: ", temp)
except KeyError:
print("Please complete weather parameters.")
elif pa == "search":
from urllib.request import urlretrieve
from urllib.parse import quote
import webbrowser
query = quote(input("Search: "))
search = webbrowser.open("https://duckduckgo.com/?q=" + query)
if keyboard.is_pressed(hotkey):
break
continue
elif pa == "stop":
print("Goodbye!")
break
else:
print("Please Re-state")
|
# Generated by Django 2.2.2 on 2019-07-23 07:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('teachingtask', '0007_remove_teachingtask_level'),
('student', '0002_student_nick_name'),
('attendance', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='attendance',
name='attendance_detail',
field=models.CharField(max_length=20, null=True),
),
migrations.AlterUniqueTogether(
name='attendance',
unique_together={('task', 'student', 'attendance_time', 'attendance_detail')},
),
]
|
#!/usr/bin/env python3
"""
Converts Satisfactory save games (.sav) into a Python dict
"""
import struct
import sys
def to_py(file_path):
f = open(file_path, 'rb')
# determine the file size so that we can
f.seek(0, 2)
file_size = f.tell()
f.seek(0, 0)
bytesRead = 0
def assert_fail(message):
print('assertion failed: ' + message, file=sys.stderr)
# show the next bytes to help debugging
print(read_hex(32))
input()
assert False
def read_int():
global bytesRead
bytesRead += 4
return struct.unpack('i', f.read(4))[0]
def read_float():
global bytesRead
bytesRead += 4
return struct.unpack('f', f.read(4))[0]
def read_long():
global bytesRead
bytesRead += 8
return struct.unpack('q', f.read(8))[0]
def read_byte():
global bytesRead
bytesRead += 1
return struct.unpack('b', f.read(1))[0]
def assert_null_byte():
global bytesRead
bytesRead += 1
zero = f.read(1)
if zero != b'\x00':
assert_fail('not null but ' + str(zero))
def read_length_prefixed_string():
"""
Reads a string that is prefixed with its length
"""
global bytesRead
length = read_int()
if length == 0:
return ''
chars = f.read(length - 1)
zero = f.read(1)
bytesRead += length
if zero != b'\x00': # We assume that the last byte of a string is alway \x00
if length > 100:
assert_fail('zero is ' + str(zero) + ' in ' + str(chars[0:100]))
else:
assert_fail('zero is ' + str(zero) + ' in ' + str(chars))
return chars.decode('ascii')
def read_hex(count):
"""
Reads count bytes and returns their hex form
"""
global bytesRead
bytesRead += count
chars = f.read(count)
c = 0
result = ''
for i in chars:
result += format(i, '02x') + ' '
c += 1
if c % 4 == 0 and c < count - 1:
result += ' '
return result
# Read the file header
save_header_type = read_int()
save_version = read_int() # Save Version
build_version = read_int() # BuildVersion
map_name = read_length_prefixed_string() # MapName
map_options = read_length_prefixed_string() # MapOptions
session_name = read_length_prefixed_string() # SessionName
play_duration_seconds = read_int() # PlayDurationSeconds
save_date_time = read_long() # SaveDateTime
'''
to convert this FDateTime to a unix timestamp use:
saveDateSeconds = save_date_time / 10000000
# see https://stackoverflow.com/a/1628018
print(saveDateSeconds-62135596800)
'''
session_visibility = read_byte() # SessionVisibility
entry_count = read_int() # total entries
save_dict = {
'save_header_type': save_header_type,
'save_version': save_version,
'build_version': build_version,
'map_name': map_name,
'map_options': map_options,
'session_name': session_name,
'play_duration_seconds': play_duration_seconds,
'save_date_time': save_date_time,
'session_visibility': session_visibility,
'objects': []
}
def read_actor():
class_name = read_length_prefixed_string()
level_name = read_length_prefixed_string()
path_name = read_length_prefixed_string()
need_transform = read_int()
a = read_float()
b = read_float()
c = read_float()
d = read_float()
x = read_float()
y = read_float()
z = read_float()
sx = read_float()
sy = read_float()
sz = read_float()
was_placed_in_level = read_int()
return {
'type': 1,
'class_name': class_name,
'level_name': level_name,
'path_name': path_name,
'need_transform': need_transform,
'transform': {
'rotation': [a, b, c, d],
'translation': [x, y, z],
'scale3d': [sx, sy, sz],
},
'was_placed_in_level': was_placed_in_level
}
def read_object():
class_name = read_length_prefixed_string()
level_name = read_length_prefixed_string()
path_name = read_length_prefixed_string()
outer_path_name = read_length_prefixed_string()
return {
'type': 0,
'class_name': class_name,
'level_name': level_name,
'path_name': path_name,
'outer_path_name': outer_path_name
}
for i in range(0, entry_count):
type = read_int()
if type == 1:
save_dict['objects'].append(read_actor())
elif type == 0:
save_dict['objects'].append(read_object())
else:
assert_fail('unknown type ' + str(type))
element_count = read_int()
# So far these counts have always been the same and
# the entities seem to belong 1 to 1 to the actors/objects read above
if element_count != entry_count:
assert_fail('element_count (' + str(element_count) +
') != entry_count(' + str(entry_count) + ')')
def read_property(properties):
name = read_length_prefixed_string()
if name == 'None':
return
prop = read_length_prefixed_string()
length = read_int()
zero = read_int()
if zero != 0:
print(name + ' ' + prop)
assert_fail('not null: ' + str(zero))
property = {
'name': name,
'type': prop,
'_length': length
}
if prop == 'IntProperty':
assert_null_byte()
property['value'] = read_int()
elif prop == 'StrProperty':
assert_null_byte()
property['value'] = read_length_prefixed_string()
elif prop == 'StructProperty':
type = read_length_prefixed_string()
property['structUnknown'] = read_hex(17) # TODO
if type == 'Vector' or type == 'Rotator':
x = read_float()
y = read_float()
z = read_float()
property['value'] = {
'type': type,
'x': x,
'y': y,
'z': z
}
elif type == 'Box':
min_x = read_float()
min_y = read_float()
min_z = read_float()
max_x = read_float()
max_y = read_float()
max_z = read_float()
is_valid = read_byte()
property['value'] = {
'type': type,
'min': [min_x, min_y, min_z],
'max': [max_x, max_y, max_z],
'is_valid': is_valid
}
elif type == 'LinearColor':
r = read_float()
g = read_float()
b = read_float()
a = read_float()
property['value'] = {
'type': type,
'r': r,
'g': g,
'b': b,
'a': a
}
elif type == 'Transform':
props = []
while read_property(props):
pass
property['value'] = {
'type': type,
'properties': props
}
elif type == 'Quat':
a = read_float()
b = read_float()
c = read_float()
d = read_float()
property['value'] = {
'type': type,
'a': a,
'b': b,
'c': c,
'd': d
}
elif type == 'RemovedInstanceArray' or type == 'InventoryStack':
props = []
while read_property(props):
pass
property['value'] = {
'type': type,
'properties': props
}
elif type == 'InventoryItem':
unk1 = read_length_prefixed_string() # TODO
item_name = read_length_prefixed_string()
level_name = read_length_prefixed_string()
path_name = read_length_prefixed_string()
props = []
read_property(props)
# can't consume null here because it is needed by the entaingling struct
property['value'] = {
'type': type,
'unk1': unk1,
'item_name': item_name,
'level_name': level_name,
'path_name': path_name,
'properties': props
}
else:
assert_fail('Unknown type: ' + type)
elif prop == 'ArrayProperty':
item_type = read_length_prefixed_string()
assert_null_byte()
count = read_int()
values = []
if item_type == 'ObjectProperty':
for j in range(0, count):
values.append({
'level_name': read_length_prefixed_string(),
'path_name': read_length_prefixed_string()
})
elif item_type == 'StructProperty':
struct_name = read_length_prefixed_string()
struct_type = read_length_prefixed_string()
struct_size = read_int()
zero = read_int()
if zero != 0:
assert_fail('not zero: ' + str(zero))
type = read_length_prefixed_string()
property['struct_name'] = struct_name
property['struct_type'] = struct_type
property['structInnerType'] = type
property['structUnknown'] = read_hex(17) # TODO what are those?
property['_structLength'] = struct_size
for i in range(0, count):
props = []
while read_property(props):
pass
values.append({
'properties': props
})
elif item_type == 'IntProperty':
for i in range(0, count):
values.append(read_int())
else:
assert_fail('unknown item_type ' + item_type)
property['value'] = {
'type': item_type,
'values': values
}
elif prop == 'ObjectProperty':
assert_null_byte()
property['value'] = {
'level_name': read_length_prefixed_string(),
'path_name': read_length_prefixed_string()
}
elif prop == 'BoolProperty':
property['value'] = read_byte()
assert_null_byte()
elif prop == 'FloatProperty': # TimeStamps that are FloatProperties are negative to
# the current time in seconds?
assert_null_byte()
property['value'] = read_float()
elif prop == 'EnumProperty':
enum_name = read_length_prefixed_string()
assert_null_byte()
value_name = read_length_prefixed_string()
property['value'] = {
'enum': enum_name,
'value': value_name,
}
elif prop == 'NameProperty':
assert_null_byte()
property['value'] = read_length_prefixed_string()
elif prop == 'MapProperty':
name = read_length_prefixed_string()
value_type = read_length_prefixed_string()
for i in range(0, 5):
assert_null_byte()
count = read_int()
values = {
}
for i in range(0, count):
key = read_int()
props = []
while read_property(props):
pass
values[key] = props
property['value'] = {
'name': name,
'type': value_type,
'values': values
}
elif prop == 'ByteProperty': # TODO
unk1 = read_length_prefixed_string() # TODO
if unk1 == 'None':
assert_null_byte()
property['value'] = {
'unk1': unk1,
'unk2': read_byte()
}
else:
assert_null_byte()
unk2 = read_length_prefixed_string() # TODO
property['value'] = {
'unk1': unk1,
'unk2': unk2
}
elif prop == 'TextProperty':
assert_null_byte()
property['textUnknown'] = read_hex(13) # TODO
property['value'] = read_length_prefixed_string()
else:
assert_fail('Unknown property type: ' + prop)
properties.append(property)
return True
def read_entity(with_names, length):
global bytesRead
bytesRead = 0
entity = {}
if with_names:
entity['level_name'] = read_length_prefixed_string()
entity['path_name'] = read_length_prefixed_string()
entity['children'] = []
child_count = read_int()
if child_count > 0:
for i in range(0, child_count):
level_name = read_length_prefixed_string()
path_name = read_length_prefixed_string()
entity['children'].append({
'level_name': level_name,
'path_name': path_name
})
entity['properties'] = []
while read_property(entity['properties']):
pass
# read missing bytes at the end of this entity.
# maybe we missed something while parsing the properties?
missing = length - bytesRead
if missing > 0:
entity['missing'] = read_hex(missing)
elif missing < 0:
assert_fail('negative missing amount: ' + str(missing))
return entity
for i in range(0, element_count):
length = read_int() # length of this entry
if save_dict['objects'][i]['type'] == 1:
save_dict['objects'][i]['entity'] = read_entity(True, length)
else:
save_dict['objects'][i]['entity'] = read_entity(False, length)
# store the remaining bytes as well so that we can recreate the exact same save file
save_dict['missing'] = read_hex(file_size - f.tell())
return save_dict
|
# -*- coding: utf-8 -*-
"""
===============================================================================
Cube_and_Cuboid -- A standard Cubic pore and Cuboic throat model
===============================================================================
"""
from OpenPNM.Geometry import models as gm
from OpenPNM.Geometry import GenericGeometry
class Cube_and_Cuboid(GenericGeometry):
r"""
Toray090 subclass of GenericGeometry
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._generate()
def _generate(self):
self.models.add(propname='pore.seed',
model=gm.pore_misc.random)
self.models.add(propname='throat.seed',
model=gm.throat_misc.neighbor,
pore_prop='pore.seed',
mode='min')
self.models.add(propname='pore.diameter',
model=gm.pore_diameter.sphere,
psd_name='weibull_min',
psd_shape=1.5,
psd_loc=14e-6,
psd_scale=2e-6)
self.models.add(propname='pore.area',
model=gm.pore_area.cubic)
self.models.add(propname='pore.volume',
model=gm.pore_volume.cube)
self.models.add(propname='throat.diameter',
model=gm.throat_diameter.cylinder,
tsd_name='weibull_min',
tsd_shape=1.5,
tsd_loc=14e-6,
tsd_scale=2e-6)
self.models.add(propname='throat.length',
model=gm.throat_length.straight)
self.models.add(propname='throat.volume',
model=gm.throat_volume.cuboid)
self.models.add(propname='throat.area',
model=gm.throat_area.cuboid)
self.models.add(propname='throat.surface_area',
model=gm.throat_surface_area.cuboid)
|
from django.conf.urls import url
from states import views
from django.urls import path
urlpatterns = [
url(r'^countries/(?P<country_code>[A-z]+)/states/', views.state_list),
#url('^<state>', views.state_delete),
url(r'^countries/(?P<country_code>[A-z]+)/(?P<state>[A-z]+)', views.state_detail),
]
|
from django.db import models
# Create your models here.
from django.contrib.auth.models import User
class Company(models.Model):
class Meta:
db_table = 'tb_companies'
name = models.CharField(max_length=100, unique=True)
ceo = models.CharField(max_length=50, blank=True, null=True)
phone = models.CharField(max_length=50, blank=True, null=True)
company_slogan = models.TextField(blank=True, null=True)
logo = models.ImageField(upload_to='logos', blank=True, null=True)
created_date = models.DateField(auto_now_add=True)
created_time = models.TimeField(auto_now_add=True)
last_update_date = models.DateField(auto_now=True)
last_update_time = models.DateField(auto_now=True)
def __str__(self):
return self.name
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
self.name = str(self.name).upper()
return super(Company, self).save()
class Office(models.Model):
ch_mikoa = (
('Dodoma', 'Dodoma'),
('Arusha', 'Arusha'),
('Kilimanjaro', 'Kilimanjaro'),
('Tanga', 'Tanga'),
('Morogoro', 'Morogoro'),
('Pwani', 'Pwani'),
('Dar es Salaam', 'Dar es Salaam'),
('Lindi', 'Lindi'),
('Mtwara', 'Mtwara'),
('Ruvuma', 'Ruvuma'),
('Iringa', 'Iringa'),
('Mbeya', 'Mbeya'),
('Songwe', 'Songwe'),
('Singida', 'Singida'),
('Tabora', 'Tabora'),
('Rukwa', 'Rukwa'),
('Kigoma', 'Kigoma'),
('Shinyanga', 'Shinyanga'),
('Kagera', 'Kagera'),
('Mwanza', 'Mwanza'),
('Mara', 'Mara'),
('Manyara', 'Manyara'),
('Njombe', 'Njombe'),
('Katavi', 'Katavi'),
('Simiyu', 'Simiyu'),
('Geita', 'Geita'),
('Unguja Kaskazini', 'Unguja Kaskazini'),
('Unguja Kusini', 'Unguja Kusini'),
('Unguja Mjini Magharibi', 'Unguja Mjini Magharibi'),
('Pemba Kaskazini', 'Pemba Kaskazini'),
('Pemba Kusini', 'Pemba Kusini')
)
class Meta:
db_table = 'tb_office'
unique_together = ['company', 'name', 'region', 'location']
ordering = ['-id']
created_date = models.DateField(auto_now_add=True)
created_time = models.TimeField(auto_now_add=True)
pay_date = models.DateField(blank=True, null=True)
last_update_date = models.DateField(auto_now=True)
last_update_time = models.DateField(auto_now=True)
company = models.ForeignKey(Company, on_delete=models.CASCADE)
name = models.CharField(max_length=50)
region = models.CharField(max_length=50, choices=ch_mikoa)
location = models.CharField(max_length=50)
phone = models.CharField(max_length=20, blank=True, null=True)
is_active = models.BooleanField(default=True)
def __str__(self):
return f'{self.name} - {self.company.name} '
class Department(models.Model):
d_name = models.CharField(max_length=100)
location = models.CharField(max_length=100)
class Employee(models.Model):
class Meta:
db_table = 'tb_employee'
user = models.OneToOneField(User, on_delete=models.CASCADE)
f_name = models.CharField(max_length=100)
m_name = models.CharField(max_length=100, blank=True, null=True)
l_name = models.CharField(max_length=100)
gender = models.CharField(max_length=50, choices=(('M', 'Male'), ('F', 'Female')))
phone = models.CharField(max_length=50)
department = models.ForeignKey(Department, on_delete=models.CASCADE)
picture = models.ImageField(upload_to='photos/employee', blank=True, null=True)
office = models.ForeignKey(Office, on_delete=models.CASCADE, related_name='user_office', null=True, blank=True)
companies = models.ForeignKey(Company, blank=True, null=True, on_delete=models.CASCADE,
related_name='user_branches')
created_date = models.DateField(auto_now_add=True)
created_time = models.TimeField(auto_now_add=True)
last_update_date = models.DateField(auto_now=True)
last_update_time = models.DateField(auto_now=True)
def name(self):
return f'{self.user.get_full_name()} - {self.user.username}'
def __str__(self):
return self.user.username
|
"""
File: pydaq.py
Author: Allen Sanford (ras9841@rit.edu)
Description:
Module of python classes and functions build to aide
in data acquisition. Fitting relies on scipy's odr
module.
"""
# Imports
import matplotlib.pyplot as plt
from numpy import linspace
from scipy.stats import chi2
from scipy.odr import *
#from scipy.odr import ODR, Model, RealData
# Classes
class Graph:
""" Represents an object used for fitting and plotting """
def __init__(self, x, y, dx, dy):
"""
Builds the graph.
Keyword args
x = data points for x-axis [array]
y = data points for y-axis [array]
dx = error in x data [array]
dy = error in y data [array]
"""
self.x = x
self.y = y
self.dy = dy
self.dx = [0 for _ in x] if len(dx) == 0 else dx
self.set_labels()
def __str__(self):
""" String representation of a Graph object """
return self.title+": "+self.ylabel+" vs. "+self.xlabel
def set_labels(self, title="", xlabel="", ylabel=""):
""" Stores graphs labels """
self.title = title
self.xlabel = xlabel
self.ylabel = ylabel
class Fit:
""" Represents a fit of RealData to a Graph """
def __init__(self, args, label="", chi=None, pval=None):
"""
Stores the fit information.
Keyword args
args = fit parameters [tuple]
label = label used for graphing [string]
chi = chi-square of fit [float]
pval = p-value of chi-squared[float]
"""
self.params = args
self.label = label
self.chi = chi
self.pval = pval
def __str__(self):
""" String representation of a Fit """
return self.label
# Functions
def reduced_chi_square(xvals, yvals, sigy, func, numparam):
"""
Returns the reduced chi-squared, pvalue, and DOF of the fit.
"""
c = 0
n = len(xvals) - numparam
for x, y, s in zip(xvals, yvals, sigy):
c += (y-func(x))**2/(s**2)
return c/n, float(1-chi2.cdf(c,n)), n
def make_fit(graph, func, flabel="", x0=0, xf=0):
"""
Returns a Fit for the Graph using the fitting function.
Keyword args
graph = contains data for the fit [Graph]
func = function with arguments (*args, x) [function]
init = initial guess of fit parameter values [array]
flabel = label used to plot the function
x0 = fit starting point
xf = fit ending point
"""
xf = len(graph.x) if xf == 0 else xf
xdata = graph.x[x0:xf]
ydata = graph.y[x0:xf]
dxdata = graph.dx[x0:xf]
dydata = graph.dy[x0:xf]
print("*"*80)
print("Fitting "+str(graph)+" from x=%f to x=%f."%(xdata[0],xdata[-1]))
model = Model(func)
data = RealData(xdata, ydata, sx=dxdata, sy=dydata)
print(type(model))
print(type(data))
odr = ODR(data, model, beta0=[1.0,1.0])
out = odr.run()
f = lambda x: func(out.beta,x)
chi, p, dof = reduced_chi_square(graph.x, graph.y, graph.dy, f, len(out.beta))
fit = Fit(out.beta, label=flabel, chi=chi, pval=p)
print("\nScipy ODR fit results...")
out.pprint()
print("\nSummary of results for "+str(graph)+" ...")
print(fit)
print("Reduced chi=%f\tp=%f\tDOF=%d"%(chi, p, dof))
return fit
|
#!/usr/bin/env python3
"""
Send a result to RabbitMQ.
"""
import amqp
import datetime
import pscheduler
import sys
import time
import urllib.parse
MAX_SCHEMA = 2
log_prefix="archiver-rabbitmq"
log = pscheduler.Log(prefix=log_prefix, quiet=True)
class AMQPExpiringConnection(object):
"""
Maintains an expiring connection to RabbitMQ
"""
def expired(self):
"""
Determine if the connection is expired
"""
return (self.connection_expires is not None) and (datetime.datetime.now() > self.connection_expires)
def __disconnect(self):
"""
INTERNAL: Close down the connection
"""
try:
self.connection.close()
except Exception:
pass # This is best-effort.
self.channel = None
self.connection = None
self.connection_expires = None
def __connect(self):
"""
INTERNAL: Establish a connection if one is needed
"""
# If the connection has expired, drop it.
if self.expired():
self.__disconnect()
if self.channel is not None:
# Already connected.
return
self.connection = amqp.connection.Connection(
host=self.host,
ssl=self.ssl,
virtual_host=self.virtual_host,
login_method=self.auth_method,
userid=self.userid,
password=self.password,
connect_timeout=self.timeout,
read_timeout=self.timeout,
write_timeout=self.timeout,
confirm_publish=True
)
self.connection.connect()
self.channel = amqp.channel.Channel(self.connection)
self.channel.open()
if self.expire_time is not None:
self.connection_expires = datetime.datetime.now() + self.expire_time
def __init__(self, url, key,
exchange='',
timeout=None,
expire_time=None
):
"""
Construct a connection to AMQP
"""
self.url = url
self.key = key
if not isinstance(exchange, str):
raise ValueError("Invalid exchange.")
self.exchange = exchange
if timeout is not None and not isinstance(timeout, datetime.timedelta):
raise ValueError("Invalid timeout")
self.timeout = datetime.timedelta.total_seconds(timeout)
if expire_time is not None and not isinstance(expire_time, datetime.timedelta):
raise ValueError("Invalid expiration time")
self.expire_time = expire_time
parsed_url = urllib.parse.urlparse(url)
# Set default port and SSL flag based on URL scheme
if (parsed_url.scheme == "amqp"):
port = 5672
self.ssl = False
elif (parsed_url.scheme == "amqps"):
port = 5671
self.ssl = True
else:
raise ValueError("URL must be amqp[s]://...")
# Use port if specified in URL
if parsed_url.port:
port = parsed_url.port
self.host = "%s:%s" % (parsed_url.hostname, port)
# Remove leading slash from path to match pika parsing convention
self.virtual_host = parsed_url.path[1:] or ""
# These are the amqp module's defaults
self.userid = parsed_url.username or "guest"
self.password = parsed_url.password or "guest"
self.auth_method = "AMQPLAIN" if (self.userid is not None or self.password is not None) else None
self.connection = None
self.channel = None
self.connection_expires = None
self.__connect()
def __del__(self):
"""
Destroy the connection
"""
self.__disconnect()
def publish(self, message):
"""
Publish a message to the connection
"""
self.__connect()
try:
self.channel.basic_publish_confirm(
amqp.Message(message),
exchange=self.exchange,
routing_key=self.key,
mandatory=True,
immediate=False,
timeout=self.timeout,
confirm_timeout=self.timeout)
except Exception as ex:
# Any error means we start over next time.
self.__disconnect()
raise ex
connections = {}
GROOM_INTERVAL = datetime.timedelta(seconds=20)
next_groom = datetime.datetime.now() + GROOM_INTERVAL
def groom_connections():
"""
Get rid of expired connections. This is intended to sweep up
connections that are no longer used. Those in continuous use will
self-expire and re-create themeselves.
"""
global next_groom
if datetime.datetime.now() < next_groom:
# Not time yet.
return
log.debug("Grooming connections")
for groom in list([ key
for key, connection in connections.items()
if connection.expired()
]):
log.debug("Dropping expired connection {}".format(groom))
del connections[groom]
next_groom += GROOM_INTERVAL
log.debug("Next groom at {}".format(next_groom))
def archive(json):
data = json["data"]
log.debug("Archiving: %s" % data)
schema = data.get("schema", 1)
if schema > MAX_SCHEMA:
return {
"succeeded": False,
"error": "Unsupported schema version %d; max is %d" % (
schema, MAX_SCHEMA)
}
# Figure out the routing key
routing_key_raw = data.get("routing-key", "")
if isinstance(routing_key_raw, str):
data["routing-key"] = routing_key_raw
else:
# JQ Transform
log.debug("Using transform for routing key.")
# This will already have been validated.
transform = pscheduler.JQFilter(routing_key_raw)
try:
data["routing-key"] = str(transform(json["result"])[0])
except pscheduler.JQRuntimeError as ex:
return {
"succeeded": False,
"error": "Routing key transform failed: %s" % (str(ex))
}
log.debug("Routing key is '%s'" % (data["routing-key"]))
key = None
try:
key = "%s```%s```%s```%s" % (
data["_url"],
data.get("exchange", ""),
data.get("routing-key", ""),
data.get("connection-expires", "")
)
expires = pscheduler.iso8601_as_timedelta(data.get("connection-expires","PT1H"))
timeout = pscheduler.iso8601_as_timedelta(data.get("timeout","PT10S"))
try:
connection = connections[key]
except KeyError:
connection = AMQPExpiringConnection(data["_url"],
data.get("routing-key", ""),
exchange=data.get("exchange", ""),
expire_time=expires,
timeout=timeout
)
connections[key] = connection
connection.publish(pscheduler.json_dump(json["result"]))
result = {'succeeded': True}
except Exception as ex:
# The connection will self-recover from failures, so no need
# to do anything other than complain about it.
result = {
"succeeded": False,
"error": str(ex)
}
if "retry-policy" in data:
policy = pscheduler.RetryPolicy(data["retry-policy"], iso8601=True)
retry_time = policy.retry(json["attempts"])
if retry_time is not None:
result["retry"] = retry_time
groom_connections()
return result
PARSER = pscheduler.RFC7464Parser(sys.stdin)
EMITTER = pscheduler.RFC7464Emitter(sys.stdout)
for parsed in PARSER:
EMITTER(archive(parsed))
pscheduler.succeed()
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Mois(models.Model):
titre = models.CharField(max_length=255)
description = models.TextField()
image = models.ImageField(upload_to='img', blank=True)
statut = models.BooleanField(default=True)
date_add = models.DateTimeField(auto_now_add=True)
date_upd = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = "Mois"
verbose_name_plural = "Mois"
def __str__(self):
return self.titre
class Module(models.Model):
mois = models.ForeignKey('Mois', on_delete = models.CASCADE, related_name='mois_module')
langage = models.CharField(max_length=255)
description = models.TextField()
image = models.ImageField(upload_to='img', blank=True)
prix = models.IntegerField()
statut = models.BooleanField(default=True)
date_add = models.DateTimeField(auto_now_add=True)
date_upd = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = "Module"
verbose_name_plural = "Modules"
def __str__(self):
return self.langage
class Chapitre(models.Model):
module = models.ForeignKey('Module', on_delete = models.CASCADE, related_name='module_chapitre')
titre = models.CharField(max_length=255)
description = models.TextField()
image = models.ImageField(upload_to='img', blank=True)
statut = models.BooleanField(default=True)
date_add = models.DateTimeField(auto_now_add=True)
date_upd = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = "Chapitre"
verbose_name_plural = "Chapitres"
def __str__(self):
return self.titre
class Cours(models.Model):
chapitre = models.ForeignKey('Chapitre', on_delete = models.CASCADE, related_name='chapitre_cours')
titre = models.CharField(max_length=255)
video = models.FileField()
image = models.ImageField(upload_to='img', blank=True)
statut = models.BooleanField(default=True)
date_add = models.DateTimeField(auto_now_add=True)
date_upd = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = "Cours"
verbose_name_plural = "Cours"
def __str__(self):
return self.titre
class User_cours(models.Model):
module = models.ForeignKey('Module', on_delete = models.CASCADE, related_name='module_user')
user = models.ForeignKey(User, on_delete = models.CASCADE, related_name='cour_user')
statut = models.BooleanField(default=True)
date_add = models.DateTimeField(auto_now_add=True)
date_upd = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = "User"
verbose_name_plural = "Users"
def __str__(self):
return self.user
#python3 manage.py admin_generator Api >> Api/admin.py
#python manage.py seed Api --number=15
|
import random
class jobscheduling():
"""this class is to handle various functions of the scheduling algorithms"""
process_list = [] #a list of list to store the processes
total_processes = 0 #variable to keep track of total number of processes
total_time = 0 #variable to keep track of total time that CPU will need to processes all the processes
time_slice = 5 #time slice for round robin by default it is set to 5
def __init__(self):
"""this is constructor that initializes the variables """
self.process_list = self.readfile() #read the list from a file
self.process_list.sort(key=lambda x: x[1]) # sort the processes according to arrival time
self.time() #calculate the total time
def rand_generate(self, n):
"""this function creates random processes with arival time and burst time """
#the number of processes to be created is passes as argument by taking the user's input
list1 = []
list2 = []
self.total_processes = 0
for x in range(0,n):
#this loop generates random integer twice and appends it to list1
#so list 1 will contain total 3 integers in which 1 is process id 2 is arival time
# and 3 is burst time and then the list1 is appended to list2 which contains list of list for storing processes
list1.append(x)
list1.append(random.randint(1, 20)) #apend random int to list 1 which is >1 and <20
list1.append(random.randint(1,30)) #apend random int to list 1 which is >1 and <30
list2.append(list1) #append the list2 to list2
self.total_processes += 1 #updates total number of processes
list1 = []
self.process_list = list2 #assign list2 to processes_list variable
self.process_list.sort(key=lambda x: x[1]) # sort the processes according to arrival time
print(self.process_list)
def readfile(self):
"""this function reads the processes from the file and stores this in the respective variables"""
with open('processes.txt') as f: #loop file pointer till the end of file
list = []
for line in f: #read by each line from the file
list.append([int(x) for x in line.split()]) #this part seperates the id, arival time and burst time from the line read from file
self.total_processes += 1 #update total processes number
return(list)
def time(self):
"""this function is to calculate total time required to process all the processes"""
#this can be achieved by adding burst time of all processes and adding the avrival time of first process.
for x in range(0,self.total_processes):
self.total_time = self.total_time + self.process_list[x][2]
self.total_time = self.total_time + self.process_list[0][1]
def fcfs(self):
"""this function implements the FCFS algorithm with verbose mode"""
#here we start from 0 and loop till total time needed to processes all processes
#in each step we check if any processes has arrives or has any processes terminated.
#Respective messages are displayed. If processes arrives while other processes is running it is added in ready queue.
a = 0
skip_a = 0
b = 0
true = 1
time_counter = self.process_list[0][1]
for x in range(0,self.total_time+1): #loop from 0 to total time
if skip_a == 0: #to check if all processes have arrived
if x == self.process_list[a][1]: #this condition checks if at time x any process has arrived or not and if has then places it in ready list and prints the message
print("\nAt time ",x," Process ",self.process_list[a][0]," Ready.")
if a == 0: #checks if it is first process or not. if first process the move it to running list and print message.
print("\nAt time ",x," Process ",self.process_list[a][0]," Ready -> Running.")
#below condition is to check if any other process arrived at same time or not
#if yes the process is added to ready list and message is printed
while a < self.total_processes-1 and self.process_list[a][1] == self.process_list[a+1][1]:
a += 1
print("\nAt time ",x," Process ",self.process_list[a][0]," Ready.")
a += 1
if a == self.total_processes : #check if last process is processed. if yes then make flag so it stops checking for process arrival
skip_a = 1
if b < self.total_processes: #checks if any process is running or not if all processes have finished then skip
if x == self.process_list[b][2] + time_counter: #checks if the process has finished execution for the time it was supposed to
# if yes the prints termination message and selects next process from ready list
time_counter = self.process_list[b][2] + time_counter
print("\nAt time ",x," Process ",self.process_list[b][0]," Running -> Terminated.")
b = b + 1
true = 0
if true == 0 and b < self.total_processes: # checks if no process is running and all processes have not finished select next process and print message
print("\nAt time ",x," Process ",self.process_list[b][0]," Ready -> Running.")
true = 1
def fcfs_result(self):
"""this is same function as above just in this function we do not print any status message"""
a = 0
skip_a = 0
b = 0
true = 1
time_counter = self.process_list[0][1]
result = []
for x in range(0,self.total_time+1):
if skip_a == 0:
if x == self.process_list[a][1]:
pass
#print("\nAt time ",x," Process ",self.process_list[a][0]," Ready.")
if a == 0:
#print("\nAt time ",x," Process ",self.process_list[a][0]," Ready -> Running.")
a = a + 1
if a < self.total_processes-1 and self.process_list[a][1] == self.process_list[a+1][1]:
a += 1
#print("\nAt time ",x," Process ",self.process_list[a][0]," Ready.")
if a == self.total_processes :
skip_a = 1
if b < self.total_processes:
if x == self.process_list[b][2] + time_counter:
time_counter = self.process_list[b][2] + time_counter
result.append([self.process_list[b][0],x])
#print("\nAt time ",x," Process ",self.process_list[b][0]," Running -> Terminated.")
b = b + 1
true = 0
if true == 0 and b< self.total_processes:
#print("\nAt time ",x," Process ",self.process_list[b][0]," Ready -> Running.")
true = 1
for y in range(0,self.total_processes):
print(result[y][0]," ",result[y][1])
def sjf(self):
"""this function implements SJRT algorithm"""
#here we start from 0 and loop till total time needed to processes all processes
#in each step we check if any processes has arrives or has any processes terminated.
#Respective messages are displayed.
# If processes arrives while other processes is running it checks the run time and compares with the one that is running
# if the time for new process is less then it is executed and the one that is running is placed in ready list.
# the ready list sorts the list if any new processes is added so every time a process terminates the process with least time left
# is executed first.
a = 0
skip_a = 0
b = 0
true = 1
running = []
ready_list = []
for x in range(0,self.total_time+1):
if skip_a == 0:
if x == self.process_list[a][1]:
print("\nAt time ",x," Process ",self.process_list[a][0]," Ready.")
ready_list.append(self.process_list[a])
ready_list.sort(key=lambda x: x[2]) #this step sorts the ready list by the least run time first approach
if a == 0:
print("\nAt time ",x," Process ",self.process_list[a][0]," Ready -> Running.")
running = self.process_list[a]
a += 1
if a < self.total_processes-1 and self.process_list[a][1] == self.process_list[a+1][1]:
a += 1
print("\nAt time ",x," Process ",self.process_list[a][0]," Ready.")
if a == self.total_processes :
skip_a = 1
if b < self.total_processes:
if a>0 and running != ready_list[b] and ready_list[b][2] !=0:
print("\nAt time ",x," Process ",running[0]," Running -> Ready.")
print("\nAt time ",x," Process ",ready_list[b][0]," Ready -> Running.")
running = ready_list[b]
if a>0:
if ready_list[b][2] == 0:
print("\nAt time ",x," Process ",ready_list[b][0]," Running -> Terminated.")
b += 1
true = 0
if x < self.total_time+1 and b < self.total_processes:
#print(ready_list)
ready_list[b][2] -= 1
#print("\t\t",ready_list[b])
if true == 0 and b< self.total_processes:
print("\nAt time ",x," Process ",ready_list[b][0]," Ready -> Running.")
running = ready_list[b]
true = 1
def sjf_result(self):
"""this is same function as above just in this function we do not print any status message"""
a = 0
skip_a = 0
b = 0
true = 1
running = []
ready_list = []
result = []
#time_counter = self.process_list[0][1]
for x in range(0,self.total_time+1):
if skip_a == 0:
if x == self.process_list[a][1]:
#print("\nAt time ",x," Process ",self.process_list[a][0]," Ready.")
ready_list.append(self.process_list[a])
ready_list.sort(key=lambda x: x[2])
if a == 0:
#print("\nAt time ",x," Process ",self.process_list[a][0]," Ready -> Running.")
running = self.process_list[a]
a += 1
if a < self.total_processes-1 and self.process_list[a][1] == self.process_list[a+1][1]:
a += 1
#print("\nAt time ",x," Process ",self.process_list[a][0]," Ready.")
if a == self.total_processes :
skip_a = 1
if b < self.total_processes:
if a>0 and running != ready_list[b] and ready_list[b][2] !=0:
#print("\nAt time ",x," Process ",running[0]," Running -> Ready.")
#print("\nAt time ",x," Process ",ready_list[b][0]," Ready -> Running.")
running = ready_list[b]
if a>0:
if ready_list[b][2] == 0:
result.append([ready_list[b][0],x])
result.sort(key=lambda x: x[0])
#print("\nAt time ",x," Process ",ready_list[b][0]," Running -> Terminated.")
b += 1
true = 0
if x < self.total_time+1 and b < self.total_processes:
#print(ready_list)
ready_list[b][2] -= 1
#print("\t\t",ready_list[b])
if true == 0 and b< self.total_processes:
#print("\nAt time ",x," Process ",ready_list[b][0]," Ready -> Running.")
running = ready_list[b]
true = 1
for y in range(0,self.total_processes):
print(result[y][0]," ",result[y][1])
def roundrobin(self):
"""this function is the implementation of Round Robin algorithm"""
#here the implementaton uses different list to do the the swithc from one process to another
#to switch we use a pointer to point which process is executed next. once all processes are executed and still there are
#processes in ready list the pointer starts from first again.
ready_list = [] #ready list stores the process i.e works like queue
a = 0 #pointer to point next process to arrive
b = 0 #pointer to point next process in ready list
skip_a = 0
key = self.time_slice #time slice value that is used to allocate specific time after which the processes switches to another process.
sum = 0
count = 0 #counter to count number of cycles executed by the process
list_item = 0
for x in range(0,self.total_time+1):
if skip_a == 0:
if x == self.process_list[a][1]:
print("\nAt time ",x," Process ",self.process_list[a][0]," Ready.")
ready_list.append(self.process_list[a])
list_item += 1
if a == 0:
print("\nAt time ",x," Process ",self.process_list[a][0]," Ready -> Running.")
running = self.process_list[a]
a += 1
if a < self.total_processes-1 and self.process_list[a][1] == self.process_list[a+1][1]:
a += 1
print("\nAt time ",x," Process ",self.process_list[a][0]," Ready.")
if a == self.total_processes :
skip_a = 1
if len(ready_list) > 0: #this checks if there is more than one value in ready list.
if count >= key: #counter to check if time period is over or not if over switch by moving current process to ready list
print("\nAt time ",x," Process ",ready_list[b][0]," Running-> Ready.")
count = 0
b += 1 #pointer to point the next process in the ready list.
if b == list_item:
b = 0
print("\nAt time ",x," Process ",ready_list[b][0]," Ready -> Running.")
ready_list[b][2] -= 1 #reduces the time every cycle of operation. for every cycle, the time of the process is reduced by unit
count += 1
if ready_list[b][2] == 0:
#once process terminates the counter is set to 0 and the process is removed from the ready list
print("\nAt time ",x," Process ",ready_list[b][0]," Running -> Terminated.")
del ready_list[b]
count = 0
list_item -= 1
b += 1
if b >= list_item:
b = 0
if len(ready_list) >0:
print("\nAt time ",x," Process ",ready_list[b][0]," Ready -> Running.")
def rr_result(self):
"""this is same function as above just in this function we do not print any status message"""
ready_list = []
a = 0
b = 0
skip_a = 0
key = 8
sum = 0
count = 0
list_item = 0
result = []
for x in range(0,self.total_time+1):
if skip_a == 0:
if x == self.process_list[a][1]:
#print("\nAt time ",x," Process ",self.process_list[a][0]," Ready.")
ready_list.append(self.process_list[a])
list_item += 1
if a == 0:
#print("\nAt time ",x," Process ",self.process_list[a][0]," Ready -> Running.")
running = self.process_list[a]
a += 1
if a < self.total_processes-1 and self.process_list[a][1] == self.process_list[a+1][1]:
a += 1
print("\nAt time ",x," Process ",self.process_list[a][0]," Ready.")
if a == self.total_processes :
skip_a = 1
if len(ready_list) > 0:
if count >= key:
#print("\nAt time ",x," Process ",ready_list[b][0]," Running-> Ready.")
count = 0
b += 1
if b == list_item:
b = 0
#print("\nAt time ",x," Process ",ready_list[b][0]," Ready -> Running.")
ready_list[b][2] -= 1
count += 1
if ready_list[b][2] == 0:
result.append([ready_list[b][0], x]) #this part stores the result and is displayed after all processs terminated
result.sort(key=lambda x: x[0])
#print("\nAt time ",x," Process ",ready_list[b][0]," Running -> Terminated.")
del ready_list[b]
count = 0
list_item -= 1
b += 1
if b >= list_item:
b = 0
if len(ready_list) >0:
pass
#print("\nAt time ",x," Process ",ready_list[b][0]," Ready -> Running.")
for y in range(0,self.total_processes):
print(result[y][0]," ",result[y][1])
str = input("Enter the command:")
obj = jobscheduling()
if str == "-f -v -r": #fcfs with verbose mode and random generation of processes
num = int(input("Enter max number of processes ( greater than 3 and less than 15 "))
obj.rand_generate(num)
obj.fcfs()
elif str == "-f -r": #fcfs with random generation of processes
num = int(input("Enter max number of processes ( greater than 3 and less than 15 "))
obj.rand_generate(num)
obj.fcfs_result()
elif str == "-f -processes.txt -v": #fcfs with verbose mode and reading from file
obj.fcfs()
elif str == "-f -processes.txt": #fcfs with reading from file
obj.fcfs_result()
elif str == "-sjf -v -r": #sjrf with verbose mode and random generation of processes
num = int(input("Enter max number of processes ( greater than 3 and less than 15 "))
obj.rand_generate(num)
obj.sjf()
elif str == "-sjf -r": #sjrf with random generation of processes
num = int(input("Enter max number of processes ( greater than 3 and less than 15 "))
obj.rand_generate(num)
obj.sjf_result()
elif str == "-sjf -processes.txt -v": #sjrf with verbose mode and reading from file
obj.sjf()
elif str == "-sjf -processes.txt": #sjrf with reading from file
obj.sjf_result()
elif str == "-rr -v -r": #round robin with verbose mode and random generation of processes
num = int(input("Eter max number of processes ( greater than 3 and less than 15 "))
obj.rand_generate(num)
obj.roundrobin()
elif str == "-rr -v -r -t": #round robin with verbose mode and random generation of processes and users time slice
num = int(input("Eter max number of processes ( greater than 3 and less than 15 "))
key = int(input("enter time slice(less than 10)"))
obj.rand_generate(num)
obj.roundrobin()
elif str == "-rr -r": #round robin with random generation of processes
num = int(input("Enter max number of processes ( greater than 3 and less than 15 "))
obj.rand_generate(num)
obj.rr_result()
elif str == "-rr -r -t": #round robin with random generation of processes and users time slice
num = int(input("Eter max number of processes ( greater than 3 and less than 15 "))
key = int(input("enter time slice(less than 10)"))
obj.rand_generate(num)
obj.time_slice = key
obj.rr_result()
elif str == "-rr -processes.txt -v": #round robin with verbose mode and reading process from file
obj.roundrobin()
elif str == "-rr -processes.txt -v -t": #round robin with verbose mode and reading process from file and users time slice
key = int(input("enter time slice(less than 10)"))
obj.time_slice = key
obj.roundrobin()
elif str == "-rr -processes.txt": #round robin with reading process from file
obj.rr_result()
elif str == "-rr -processes.txt -t": #round robin with reading process from file and users time slice
key = int(input("enter time slice(less than 10)"))
obj.time_slice = key
obj.rr_result()
|
import random
tabela = ('Santos','Palmeiras','Flamengo','Atlético-MG','Corinthians','São Paulo','Internacional','Athletico-PR','Botafogo','Bahia','Ceará SC','Goiás','Grêmio','Fortaleza','Vasco da Gama','Fluminense','Chapecoense','Cruzeiro','CSA','Avaí')
print(f'Os 5 primeiros colocados são {tabela[:5]}')
print(f'Os 4 últimos colocados são {tabela[-4:]}')
print(f'Organizados em ordem alfabética temos {sorted(tabela)}')
print(f'A Chapecoense está na {tabela.index("Chapecoense")+1}ª posição')
print('='*10,'Tabela','='*10)
|
import numpy as np
import pytest
import pyqtgraph as pg
app = pg.mkQApp()
@pytest.mark.parametrize('orientation', ['left', 'right', 'top', 'bottom'])
def test_PlotItem_shared_axis_items(orientation):
"""Adding an AxisItem to multiple plots raises RuntimeError"""
ax1 = pg.AxisItem(orientation)
ax2 = pg.AxisItem(orientation)
layout = pg.GraphicsLayoutWidget()
_ = layout.addPlot(axisItems={orientation: ax1})
pi2 = layout.addPlot()
# left or bottom replaces, right or top adds new
pi2.setAxisItems({orientation: ax2})
with pytest.raises(RuntimeError):
pi2.setAxisItems({orientation: ax1})
def test_PlotItem_maxTraces():
item = pg.PlotItem()
curve1 = pg.PlotDataItem(np.random.normal(size=10))
item.addItem(curve1)
assert curve1.isVisible(), "curve1 should be visible"
item.ctrl.maxTracesCheck.setChecked(True)
item.ctrl.maxTracesSpin.setValue(0)
assert not curve1.isVisible(), "curve1 should not be visible"
item.ctrl.maxTracesCheck.setChecked(False)
assert curve1.isVisible(), "curve1 should be visible"
curve2 = pg.PlotDataItem(np.random.normal(size=10))
item.addItem(curve2)
assert curve2.isVisible(), "curve2 should be visible"
item.ctrl.maxTracesCheck.setChecked(True)
item.ctrl.maxTracesSpin.setValue(1)
assert curve2.isVisible(), "curve2 should be visible"
assert not curve1.isVisible(), "curve1 should not be visible"
assert curve1 in item.curves, "curve1 should be in the item's curves"
item.ctrl.forgetTracesCheck.setChecked(True)
assert curve2 in item.curves, "curve2 should be in the item's curves"
assert curve1 not in item.curves, "curve1 should not be in the item's curves"
def test_PlotItem_preserve_external_visibility_control():
item = pg.PlotItem()
curve1 = pg.PlotDataItem(np.random.normal(size=10))
curve2 = pg.PlotDataItem(np.random.normal(size=10))
item.addItem(curve1)
curve1.hide()
item.addItem(curve2)
assert not curve1.isVisible()
item.removeItem(curve2)
assert not curve1.isVisible()
def test_plotitem_menu_initialize():
"""Test the menu initialization of the plotitem"""
item = pg.PlotItem()
assert item.menuEnabled() is True
viewbox = item.vb
assert viewbox is not None
assert viewbox.menu is not None
assert viewbox.menuEnabled() is True
item = pg.PlotItem(enableMenu=False)
assert item.menuEnabled() is False
viewbox = item.vb
assert viewbox is not None
assert viewbox.menu is None
assert viewbox.menuEnabled() is False
viewbox = pg.ViewBox()
item = pg.PlotItem(viewBox=viewbox, enableMenu=False)
assert item.menuEnabled() is False
viewbox = item.vb
assert viewbox is not None
assert viewbox.menu is not None
assert viewbox.menuEnabled() is True
viewbox = pg.ViewBox(enableMenu=False)
item = pg.PlotItem(viewBox=viewbox)
assert item.menuEnabled() is True
viewbox = item.vb
assert viewbox is not None
assert viewbox.menu is None
assert viewbox.menuEnabled() is False
|
import numpy as np
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
import plotly
import seaborn as sns
import matplotlib.pyplot as plt
pio.renderers.default = "browser"
# https://habr.com/ru/post/468295/
def generate_data(start_x, end_x, step=0.1, spread=10, bias=0):
x = np.arange(start_x, end_x, step)
y = []
for xi in x:
y.append(-2 * xi + np.random.normal(0, spread / 3) + bias)
y = np.array(y)
return x, y
def distance(x0, y0, k, b):
return abs(-k*x0 + y0 - b) / (k**2 + 1)**0.5
def formula_regularization(train, trainRes, M, koefOfReg=0):
# M- count of parameters of model
# N- len(train)
def getBasisFunc(degree):
return lambda x: x**degree
basisFunctions = [getBasisFunc(i) for i in range(M)]
FI = lambda x: [func(x) for func in basisFunctions]
matrixOfPlan = [FI(x) for x in train]
matrixOfPlan = np.reshape(matrixOfPlan, (len(train), M))
I = np.array([[int(i == j) for j in range(len(matrixOfPlan[0]))] for i in range(len(matrixOfPlan[0]))])
I[0][0] = 0
# we dont regularizate w0, because it's a bias, a bias maybe very big.
# It's not a problem, cos our selection maybe be on 1000 above the 0X
w = np.dot(
np.dot(
np.linalg.inv(np.dot(matrixOfPlan.transpose(), matrixOfPlan) + I * koefOfReg),
matrixOfPlan.transpose()),
trainRes)
print(np.int32(w))
return w, FI
def getModel1(train, trainRes, M, koefOfReg=0):
w, FI = formula_regularization(train, trainRes, M)
loss = 0
for i in range(len(train)):
loss += (trainRes[i] - np.dot(w.transpose(), FI(train[i])))**2
loss = loss / 2
print(M, "loss", loss)
print()
return lambda x: np.dot(w.transpose(), FI(x))
def grad_descent_with_my_func_loss(x, y):
# cost func is func of distance
k = 0
b = 0
epochs = 10000
N = len(x)
error = 0
learning_rate = 10
previous_error = 2**64
for epoch in range(epochs):
nablaK = 0
nablaB = 0
error = 0
for xi, yi in zip(x, y):
error += distance(xi, yi, k, b) ** 2
nablaK += ((-k*xi + yi - b) / (abs(-k*xi + yi - b)) * -xi * np.sqrt(k**2 + 1) - abs(k * xi - yi + b) * k / np.sqrt(k**2 + 1)) / (k**2 + 1)
nablaB += -1 / np.sqrt(k**2 + 1) * (-k*xi + yi - b) / (abs(-k*xi + yi - b))
error /= N
if previous_error < error and abs(error - previous_error) > 1:
learning_rate /= 10
print("CHANGE lr, epoch =", epoch, "because", previous_error, error,
abs(error - previous_error), "\n")
if abs(previous_error - error) < 0.0000001 and error < 1:
print(epoch, "stop because it's stoped")
break
k -= (nablaK / N) * learning_rate
b -= (nablaB / N) * learning_rate
previous_error = error
print("error", error)
return k, b
def parse_data(path):
f = open(path)
xs = np.array([])
ys = np.array([])
for line in f.readlines():
l = line.split(",")
xs = np.append(xs, l[:-1])
ys = np.append(ys, l[-1])
xs, ys = xs.astype(float), ys.astype(float)
xs = xs.reshape((len(xs) // len(l[:-1]), len(l[:-1])))
return xs, ys
def loss_RMSE(ys_predict, ys):
loss = np.power(ys_predict - ys, 2)
loss = np.sum(loss)
loss /= len(ys)
return loss
def surface_of_loss(theta0, theta1, x, y):
surf = np.zeros((len(theta0), len(theta1)))
for i, t0 in enumerate(theta0):
for j, t1 in enumerate(theta1):
y_pred = t0 + t1 * x
surf[i, j] = loss_RMSE(y_pred, y)
return surf
def get_nabla(x, y_pred, y, count_params):
grad = np.zeros(count_params)
grad[0] = (y_pred - y).sum()
grad[1:] = ((y_pred - y) * x).sum(axis=0)
grad /= x.shape[0]
return grad
def get_lin_reg(x, y, epochs, learning_rate, count_params):
error = 0
theta = np.random.random(count_params)
for epoch in range(epochs):
y_pred = theta[0] + (x * theta[1:]).sum(axis=1)
y_pred = np.reshape(y_pred, (y_pred.shape[0], 1))
error = loss_RMSE(y_pred, y)
nabla = get_nabla(x, y_pred, y, count_params)
theta -= nabla * learning_rate
if epoch % 250 == 0:
print(f"№{epoch} loss={np.round(error, 3)}"
f" nabla={np.round(nabla, 4)}"
f" theta={np.round(theta, 4)}")
print("error", error)
return lambda x_: np.sum(theta[0] + theta[1:] * x_)
def plot_lin_reg_model(x, y, model):
plot_x = np.arange(np.min(x), np.max(x), 0.1)
plot_y = np.array([model(xi) for xi in plot_x]).reshape(-1)
fig = go.Figure()
fig.add_trace(go.Scatter(x=x.reshape(-1), y=y.reshape(-1), mode='markers'))
fig.add_trace(go.Scatter(x=plot_x, y=plot_y, mode="lines"))
fig.show()
def plot_lin_reg_model_with_2_features(x, y, model):
fig = go.Figure()
# size = x[:, 1] * std_x[1] + mean_x[1]
fig.add_trace(go.Scatter3d(x=x[:, 0].reshape(-1), y=x[:, 1].reshape(-1), z=y.reshape(-1),
mode='markers'))
plot_x0 = np.linspace(np.min(x[:, 0]), np.max(x[:, 0]), 100)
plot_x1 = np.linspace(np.min(x[:, 1]), np.max(x[:, 1]), 100)
plot_y = np.zeros((len(plot_x0), len(plot_x1)))
for i, xi0 in enumerate(plot_x0):
for j, xj1 in enumerate(plot_x1):
plot_y[i, j] = model(np.array([xi0, xj1]))
fig.add_trace(go.Surface(x=plot_x0, y=plot_x1, z=plot_y))
fig.show()
def plot_surface_of_error():
surf_theta0 = np.arange(-10, 10, 0.1)
surf_theta1 = np.arange(-10, 10, 0.1)
surface = surface_of_loss(surf_theta0, surf_theta1, train_x, train_y)
fig = plotly.subplots.make_subplots(rows=1, cols=2)
fig.add_trace(go.Surface(x=surf_theta0, y=surf_theta1, z=surface))
fig.add_trace(go.Contour(x=surf_theta0, y=surf_theta1, z=surface))
fig.update_layout(title='Loss surface', autosize=False,
width=1980, height=1080)
fig.show()
if __name__ == "__main__":
# (340412.66, 110631.05, -6649.47)
p = r"C:\Users\Norma\Downloads\files01\ex1data2.txt"
train_x, train_y = parse_data(p)
train_y = np.reshape(train_y, (train_y.shape[0], 1))
print(train_x.shape, train_y.shape)
mean_x = np.mean(train_x, axis=0)
std_x = np.std(train_x, axis=0)
norm_train_x = (train_x - mean_x) / std_x
params = train_x.shape[1] + 1
lin_reg = get_lin_reg(norm_train_x, train_y, epochs=1001, learning_rate=0.05,
count_params=params)
# plot_lin_reg_model(train_x, train_y, lin_reg)
plot_lin_reg_model_with_2_features(norm_train_x, train_y, lin_reg)
# plot_surface_of_error()
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import re
import logging
from collections import defaultdict
from powerline.lib.threaded import ThreadedSegment
from powerline.lib.unicode import unicode
from powerline.lint.markedjson.markedvalue import MarkedUnicode
from powerline.lint.markedjson.error import DelayedEchoErr, Mark
from powerline.lint.selfcheck import havemarks
from powerline.lint.context import JStr, list_themes
from powerline.lint.imp import WithPath, import_function, import_segment
from powerline.lint.spec import Spec
from powerline.lint.inspect import getconfigargspec
list_sep = JStr(', ')
generic_keys = set((
'exclude_modes', 'include_modes',
'exclude_function', 'include_function',
'width', 'align',
'name',
'draw_soft_divider', 'draw_hard_divider',
'priority',
'after', 'before',
'display'
))
type_keys = {
'function': set(('function', 'args', 'draw_inner_divider')),
'string': set(('contents', 'type', 'highlight_groups', 'divider_highlight_group')),
'segment_list': set(('function', 'segments', 'args', 'type')),
}
required_keys = {
'function': set(('function',)),
'string': set(()),
'segment_list': set(('function', 'segments',)),
}
highlight_keys = set(('highlight_groups', 'name'))
def get_function_strings(function_name, context, ext):
if '.' in function_name:
module, function_name = function_name.rpartition('.')[::2]
else:
module = context[0][1].get(
'default_module', MarkedUnicode('powerline.segments.' + ext, None))
return module, function_name
def check_matcher_func(ext, match_name, data, context, echoerr):
havemarks(match_name)
import_paths = [os.path.expanduser(path) for path in context[0][1].get('common', {}).get('paths', [])]
match_module, separator, match_function = match_name.rpartition('.')
if not separator:
match_module = 'powerline.matchers.{0}'.format(ext)
match_function = match_name
with WithPath(import_paths):
try:
func = getattr(__import__(str(match_module), fromlist=[str(match_function)]), str(match_function))
except ImportError:
echoerr(context='Error while loading matcher functions',
problem='failed to load module {0}'.format(match_module),
problem_mark=match_name.mark)
return True, False, True
except AttributeError:
echoerr(context='Error while loading matcher functions',
problem='failed to load matcher function {0}'.format(match_function),
problem_mark=match_name.mark)
return True, False, True
if not callable(func):
echoerr(context='Error while loading matcher functions',
problem='loaded “function” {0} is not callable'.format(match_function),
problem_mark=match_name.mark)
return True, False, True
if hasattr(func, 'func_code') and hasattr(func.func_code, 'co_argcount'):
if func.func_code.co_argcount != 1:
echoerr(
context='Error while loading matcher functions',
problem=(
'function {0} accepts {1} arguments instead of 1. '
'Are you sure it is the proper function?'
).format(match_function, func.func_code.co_argcount),
problem_mark=match_name.mark
)
return True, False, False
def check_ext(ext, data, context, echoerr):
havemarks(ext)
hadsomedirs = False
hadproblem = False
if ext not in data['lists']['exts']:
hadproblem = True
echoerr(context='Error while loading {0} extension configuration'.format(ext),
context_mark=ext.mark,
problem='extension configuration does not exist')
else:
for typ in ('themes', 'colorschemes'):
if ext not in data['configs'][typ] and not data['configs']['top_' + typ]:
hadproblem = True
echoerr(context='Error while loading {0} extension configuration'.format(ext),
context_mark=ext.mark,
problem='{0} configuration does not exist'.format(typ))
else:
hadsomedirs = True
return hadsomedirs, hadproblem
def check_config(d, theme, data, context, echoerr):
if len(context) == 4:
ext = context[-2][0]
else:
# local_themes
ext = context[-3][0]
if ext not in data['lists']['exts']:
echoerr(context='Error while loading {0} extension configuration'.format(ext),
context_mark=ext.mark,
problem='extension configuration does not exist')
return True, False, True
if (
(ext not in data['configs'][d] or theme not in data['configs'][d][ext])
and theme not in data['configs']['top_' + d]
):
echoerr(context='Error while loading {0} from {1} extension configuration'.format(d[:-1], ext),
problem='failed to find configuration file {0}/{1}/{2}.json'.format(d, ext, theme),
problem_mark=theme.mark)
return True, False, True
return True, False, False
def check_top_theme(theme, data, context, echoerr):
havemarks(theme)
if theme not in data['configs']['top_themes']:
echoerr(context='Error while checking extension configuration (key {key})'.format(key=context.key),
context_mark=context[-2][0].mark,
problem='failed to find top theme {0}'.format(theme),
problem_mark=theme.mark)
return True, False, True
return True, False, False
def check_color(color, data, context, echoerr):
havemarks(color)
if (color not in data['colors_config'].get('colors', {})
and color not in data['colors_config'].get('gradients', {})):
echoerr(
context='Error while checking highlight group in colorscheme (key {key})'.format(
key=context.key),
problem='found unexistent color or gradient {0}'.format(color),
problem_mark=color.mark
)
return True, False, True
return True, False, False
def check_translated_group_name(group, data, context, echoerr):
return check_group(group, data, context, echoerr)
def check_group(group, data, context, echoerr):
havemarks(group)
if not isinstance(group, unicode):
return True, False, False
colorscheme = data['colorscheme']
ext = data['ext']
configs = None
if ext:
def listed_key(d, k):
try:
return [d[k]]
except KeyError:
return []
if colorscheme == '__main__':
colorscheme_names = set(data['ext_colorscheme_configs'][ext])
colorscheme_names.update(data['top_colorscheme_configs'])
colorscheme_names.discard('__main__')
configs = [
(
name,
listed_key(data['ext_colorscheme_configs'][ext], name)
+ listed_key(data['ext_colorscheme_configs'][ext], '__main__')
+ listed_key(data['top_colorscheme_configs'], name)
)
for name in colorscheme_names
]
else:
configs = [
(
colorscheme,
listed_key(data['ext_colorscheme_configs'][ext], colorscheme)
+ listed_key(data['ext_colorscheme_configs'][ext], '__main__')
+ listed_key(data['top_colorscheme_configs'], colorscheme)
)
]
else:
try:
configs = [(colorscheme, [data['top_colorscheme_configs'][colorscheme]])]
except KeyError:
pass
hadproblem = False
for new_colorscheme, config_lst in configs:
not_found = []
new_data = data.copy()
new_data['colorscheme'] = new_colorscheme
for config in config_lst:
havemarks(config)
try:
group_data = config['groups'][group]
except KeyError:
not_found.append(config.mark.name)
else:
proceed, echo, chadproblem = check_group(
group_data,
new_data,
context,
echoerr,
)
if chadproblem:
hadproblem = True
if not proceed:
break
if not_found and len(not_found) == len(config_lst):
echoerr(
context='Error while checking group definition in colorscheme (key {key})'.format(
key=context.key),
problem='name {0} is not present anywhere in {1} {2} {3} colorschemes: {4}'.format(
group, len(not_found), ext, new_colorscheme, ', '.join(not_found)),
problem_mark=group.mark
)
hadproblem = True
return True, False, hadproblem
def check_key_compatibility(segment, data, context, echoerr):
havemarks(segment)
segment_type = segment.get('type', MarkedUnicode('function', None))
havemarks(segment_type)
if segment_type not in type_keys:
echoerr(context='Error while checking segments (key {key})'.format(key=context.key),
problem='found segment with unknown type {0}'.format(segment_type),
problem_mark=segment_type.mark)
return False, False, True
hadproblem = False
keys = set(segment)
if not ((keys - generic_keys) < type_keys[segment_type]):
unknown_keys = keys - generic_keys - type_keys[segment_type]
echoerr(
context='Error while checking segments (key {key})'.format(key=context.key),
context_mark=context[-1][1].mark,
problem='found keys not used with the current segment type: {0}'.format(
list_sep.join(unknown_keys)),
problem_mark=list(unknown_keys)[0].mark
)
hadproblem = True
if not (keys >= required_keys[segment_type]):
missing_keys = required_keys[segment_type] - keys
echoerr(
context='Error while checking segments (key {key})'.format(key=context.key),
context_mark=context[-1][1].mark,
problem='found missing required keys: {0}'.format(
list_sep.join(missing_keys))
)
hadproblem = True
if not (segment_type == 'function' or (keys & highlight_keys)):
echoerr(
context='Error while checking segments (key {key})'.format(key=context.key),
context_mark=context[-1][1].mark,
problem=(
'found missing keys required to determine highlight group. '
'Either highlight_groups or name key must be present'
)
)
hadproblem = True
return True, False, hadproblem
def check_segment_module(module, data, context, echoerr):
havemarks(module)
with WithPath(data['import_paths']):
try:
__import__(str(module))
except ImportError as e:
if echoerr.logger.level >= logging.DEBUG:
echoerr.logger.exception(e)
echoerr(context='Error while checking segments (key {key})'.format(key=context.key),
problem='failed to import module {0}'.format(module),
problem_mark=module.mark)
return True, False, True
return True, False, False
def check_full_segment_data(segment, data, context, echoerr):
if 'name' not in segment and 'function' not in segment:
return True, False, False
ext = data['ext']
theme_segment_data = context[0][1].get('segment_data', {})
main_theme_name = data['main_config'].get('ext', {}).get(ext, {}).get('theme', None)
if not main_theme_name or data['theme'] == main_theme_name:
top_segment_data = {}
else:
top_segment_data = data['ext_theme_configs'].get(main_theme_name, {}).get('segment_data', {})
if segment.get('type', 'function') == 'function':
function_name = segment.get('function')
if function_name:
module, function_name = get_function_strings(function_name, context, ext)
names = [module + '.' + function_name, function_name]
else:
names = []
elif segment.get('name'):
names = [segment['name']]
else:
return True, False, False
segment_copy = segment.copy()
for key in ('before', 'after', 'args', 'contents'):
if key not in segment_copy:
for segment_data in [theme_segment_data, top_segment_data]:
for name in names:
try:
val = segment_data[name][key]
k = segment_data[name].keydict[key]
segment_copy[k] = val
except KeyError:
pass
return check_key_compatibility(segment_copy, data, context, echoerr)
highlight_group_spec = Spec().ident().copy
_highlight_group_spec = highlight_group_spec().context_message(
'Error while checking function documentation while checking theme (key {key})')
def check_hl_group_name(hl_group, context_mark, context, echoerr):
'''Check highlight group name: it should match naming conventions
:param str hl_group:
Checked group.
:param Mark context_mark:
Context mark. May be ``None``.
:param Context context:
Current context.
:param func echoerr:
Function used for error reporting.
:return: ``False`` if check succeeded and ``True`` if it failed.
'''
return _highlight_group_spec.match(hl_group, context_mark=context_mark, context=context, echoerr=echoerr)[1]
def check_segment_function(function_name, data, context, echoerr):
havemarks(function_name)
ext = data['ext']
module, function_name = get_function_strings(function_name, context, ext)
if context[-2][1].get('type', 'function') == 'function':
func = import_segment(function_name, data, context, echoerr, module=module)
if not func:
return True, False, True
hl_groups = []
divider_hl_group = None
hadproblem = False
if func.__doc__:
NO_H_G_USED_STR = 'No highlight groups are used (literal segment).'
H_G_USED_STR = 'Highlight groups used: '
LHGUS = len(H_G_USED_STR)
D_H_G_USED_STR = 'Divider highlight group used: '
LDHGUS = len(D_H_G_USED_STR)
pointer = 0
mark_name = '<{0} docstring>'.format(function_name)
for i, line in enumerate(func.__doc__.split('\n')):
if H_G_USED_STR in line:
idx = line.index(H_G_USED_STR) + LHGUS
if hl_groups is None:
idx -= LHGUS
mark = Mark(mark_name, i + 1, idx + 1, func.__doc__, pointer + idx)
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
context_mark=function_name.mark,
problem=(
'found highlight group definition in addition to sentence stating that '
'no highlight groups are used'
),
problem_mark=mark,
)
hadproblem = True
continue
hl_groups.append((
line[idx:],
(mark_name, i + 1, idx + 1, func.__doc__),
pointer + idx
))
elif D_H_G_USED_STR in line:
idx = line.index(D_H_G_USED_STR) + LDHGUS + 2
mark = Mark(mark_name, i + 1, idx + 1, func.__doc__, pointer + idx)
divider_hl_group = MarkedUnicode(line[idx:-3], mark)
elif NO_H_G_USED_STR in line:
idx = line.index(NO_H_G_USED_STR)
if hl_groups:
mark = Mark(mark_name, i + 1, idx + 1, func.__doc__, pointer + idx)
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
context_mark=function_name.mark,
problem=(
'found sentence stating that no highlight groups are used '
'in addition to highlight group definition'
),
problem_mark=mark,
)
hadproblem = True
continue
hl_groups = None
pointer += len(line) + len('\n')
if divider_hl_group:
r = hl_exists(divider_hl_group, data, context, echoerr, allow_gradients=True)
if r:
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
context_mark=function_name.mark,
problem=(
'found highlight group {0} not defined in the following colorschemes: {1}\n'
'(Group name was obtained from function documentation.)'
).format(divider_hl_group, list_sep.join(r)),
problem_mark=divider_hl_group.mark,
)
hadproblem = True
if check_hl_group_name(divider_hl_group, function_name.mark, context, echoerr):
hadproblem = True
if hl_groups:
greg = re.compile(r'``([^`]+)``( \(gradient\))?')
parsed_hl_groups = []
for line, mark_args, pointer in hl_groups:
for s in line.split(', '):
required_pack = []
sub_pointer = pointer
for subs in s.split(' or '):
match = greg.match(subs)
try:
if not match:
continue
hl_group = MarkedUnicode(
match.group(1),
Mark(*mark_args, pointer=sub_pointer + match.start(1))
)
if check_hl_group_name(hl_group, function_name.mark, context, echoerr):
hadproblem = True
gradient = bool(match.group(2))
required_pack.append((hl_group, gradient))
finally:
sub_pointer += len(subs) + len(' or ')
parsed_hl_groups.append(required_pack)
pointer += len(s) + len(', ')
del hl_group, gradient
for required_pack in parsed_hl_groups:
rs = [
hl_exists(hl_group, data, context, echoerr, allow_gradients=('force' if gradient else False))
for hl_group, gradient in required_pack
]
if all(rs):
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
problem=(
'found highlight groups list ({0}) with all groups not defined in some colorschemes\n'
'(Group names were taken from function documentation.)'
).format(list_sep.join((h[0] for h in required_pack))),
problem_mark=function_name.mark
)
for r, h in zip(rs, required_pack):
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
problem='found highlight group {0} not defined in the following colorschemes: {1}'.format(
h[0], list_sep.join(r))
)
hadproblem = True
elif hl_groups is not None:
r = hl_exists(function_name, data, context, echoerr, allow_gradients=True)
if r:
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
problem=(
'found highlight group {0} not defined in the following colorschemes: {1}\n'
'(If not specified otherwise in documentation, '
'highlight group for function segments\n'
'is the same as the function name.)'
).format(function_name, list_sep.join(r)),
problem_mark=function_name.mark
)
hadproblem = True
return True, False, hadproblem
elif context[-2][1].get('type') != 'segment_list':
if function_name not in context[0][1].get('segment_data', {}):
main_theme_name = data['main_config'].get('ext', {}).get(ext, {}).get('theme', None)
if data['theme'] == main_theme_name:
main_theme = {}
else:
main_theme = data['ext_theme_configs'].get(main_theme_name, {})
if (
function_name not in main_theme.get('segment_data', {})
and function_name not in data['ext_theme_configs'].get('__main__', {}).get('segment_data', {})
and not any(((function_name in theme.get('segment_data', {})) for theme in data['top_themes'].values()))
):
echoerr(context='Error while checking segments (key {key})'.format(key=context.key),
problem='found useless use of name key (such name is not present in theme/segment_data)',
problem_mark=function_name.mark)
return True, False, False
def hl_group_in_colorscheme(hl_group, cconfig, allow_gradients, data, context, echoerr):
havemarks(hl_group, cconfig)
if hl_group not in cconfig.get('groups', {}):
return False
elif not allow_gradients or allow_gradients == 'force':
group_config = cconfig['groups'][hl_group]
while isinstance(group_config, unicode):
try:
group_config = cconfig['groups'][group_config]
except KeyError:
# No such group. Error was already reported when checking
# colorschemes.
return True
havemarks(group_config)
hadgradient = False
for ckey in ('fg', 'bg'):
color = group_config.get(ckey)
if not color:
# No color. Error was already reported when checking
# colorschemes.
return True
havemarks(color)
# Gradients are only allowed for function segments. Note that
# whether *either* color or gradient exists should have been
# already checked
hascolor = color in data['colors_config'].get('colors', {})
hasgradient = color in data['colors_config'].get('gradients', {})
if hasgradient:
hadgradient = True
if allow_gradients is False and not hascolor and hasgradient:
echoerr(
context='Error while checking highlight group in theme (key {key})'.format(
key=context.key),
context_mark=hl_group.mark,
problem='group {0} is using gradient {1} instead of a color'.format(hl_group, color),
problem_mark=color.mark
)
return False
if allow_gradients == 'force' and not hadgradient:
echoerr(
context='Error while checking highlight group in theme (key {key})'.format(
key=context.key),
context_mark=hl_group.mark,
problem='group {0} should have at least one gradient color, but it has no'.format(hl_group),
problem_mark=group_config.mark
)
return False
return True
def hl_exists(hl_group, data, context, echoerr, allow_gradients=False):
havemarks(hl_group)
ext = data['ext']
if ext not in data['colorscheme_configs']:
# No colorschemes. Error was already reported, no need to report it
# twice
return []
r = []
found = False
for colorscheme, cconfig in data['colorscheme_configs'][ext].items():
if hl_group_in_colorscheme(hl_group, cconfig, allow_gradients, data, context, echoerr):
found = True
else:
r.append(colorscheme)
if not found:
pass
return r
def check_highlight_group(hl_group, data, context, echoerr):
havemarks(hl_group)
r = hl_exists(hl_group, data, context, echoerr)
if r:
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
problem='found highlight group {0} not defined in the following colorschemes: {1}'.format(
hl_group, list_sep.join(r)),
problem_mark=hl_group.mark
)
return True, False, True
return True, False, False
def check_highlight_groups(hl_groups, data, context, echoerr):
havemarks(hl_groups)
rs = [hl_exists(hl_group, data, context, echoerr) for hl_group in hl_groups]
if all(rs):
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
problem='found highlight groups list ({0}) with all groups not defined in some colorschemes'.format(
list_sep.join((unicode(h) for h in hl_groups))),
problem_mark=hl_groups.mark
)
for r, hl_group in zip(rs, hl_groups):
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
problem='found highlight group {0} not defined in the following colorschemes: {1}'.format(
hl_group, list_sep.join(r)),
problem_mark=hl_group.mark
)
return True, False, True
return True, False, False
def check_segment_data_key(key, data, context, echoerr):
havemarks(key)
has_module_name = '.' in key
found = False
for ext, theme in list_themes(data, context):
for segments in theme.get('segments', {}).values():
for segment in segments:
if 'name' in segment:
if key == segment['name']:
found = True
break
else:
function_name = segment.get('function')
if function_name:
module, function_name = get_function_strings(function_name, ((None, theme),), ext)
if has_module_name:
full_name = module + '.' + function_name
if key == full_name:
found = True
break
else:
if key == function_name:
found = True
break
if found:
break
if found:
break
else:
if data['theme_type'] != 'top':
echoerr(context='Error while checking segment data',
problem='found key {0} that cannot be associated with any segment'.format(key),
problem_mark=key.mark)
return True, False, True
return True, False, False
threaded_args_specs = {
'interval': Spec().cmp('gt', 0.0),
'update_first': Spec().type(bool),
'shutdown_event': Spec().error('Shutdown event must be set by powerline'),
}
def check_args_variant(func, args, data, context, echoerr):
havemarks(args)
argspec = getconfigargspec(func)
present_args = set(args)
all_args = set(argspec.args)
required_args = set(argspec.args[:-len(argspec.defaults)])
hadproblem = False
if required_args - present_args:
echoerr(
context='Error while checking segment arguments (key {key})'.format(key=context.key),
context_mark=args.mark,
problem='some of the required keys are missing: {0}'.format(list_sep.join(required_args - present_args))
)
hadproblem = True
if not all_args >= present_args:
echoerr(context='Error while checking segment arguments (key {key})'.format(key=context.key),
context_mark=args.mark,
problem='found unknown keys: {0}'.format(list_sep.join(present_args - all_args)),
problem_mark=next(iter(present_args - all_args)).mark)
hadproblem = True
if isinstance(func, ThreadedSegment):
for key in set(threaded_args_specs) & present_args:
proceed, khadproblem = threaded_args_specs[key].match(
args[key],
args.mark,
data,
context.enter_key(args, key),
echoerr
)
if khadproblem:
hadproblem = True
if not proceed:
return hadproblem
return hadproblem
def check_args(get_functions, args, data, context, echoerr):
new_echoerr = DelayedEchoErr(echoerr)
count = 0
hadproblem = False
for func in get_functions(data, context, new_echoerr):
count += 1
shadproblem = check_args_variant(func, args, data, context, echoerr)
if shadproblem:
hadproblem = True
if not count:
hadproblem = True
if new_echoerr:
new_echoerr.echo_all()
else:
echoerr(context='Error while checking segment arguments (key {key})'.format(key=context.key),
context_mark=context[-2][1].mark,
problem='no suitable segments found')
return True, False, hadproblem
def get_one_segment_function(data, context, echoerr):
ext = data['ext']
function_name = context[-2][1].get('function')
if function_name:
module, function_name = get_function_strings(function_name, context, ext)
func = import_segment(function_name, data, context, echoerr, module=module)
if func:
yield func
common_names = defaultdict(set)
def register_common_name(name, cmodule, cname):
s = cmodule + '.' + cname
cmodule_mark = Mark('<common name definition>', 1, 1, s, 1)
cname_mark = Mark('<common name definition>', 1, len(cmodule) + 1, s, len(cmodule) + 1)
common_names[name].add((MarkedUnicode(cmodule, cmodule_mark), MarkedUnicode(cname, cname_mark)))
def get_all_possible_functions(data, context, echoerr):
name = context[-2][0]
module, name = name.rpartition('.')[::2]
if module:
func = import_segment(name, data, context, echoerr, module=module)
if func:
yield func
else:
if name in common_names:
for cmodule, cname in common_names[name]:
cfunc = import_segment(cname, data, context, echoerr, module=MarkedUnicode(cmodule, None))
if cfunc:
yield cfunc
for ext, theme_config in list_themes(data, context):
for segments in theme_config.get('segments', {}).values():
for segment in segments:
if segment.get('type', 'function') == 'function':
function_name = segment.get('function')
current_name = segment.get('name')
if function_name:
module, function_name = get_function_strings(function_name, ((None, theme_config),), ext)
if current_name == name or function_name == name:
func = import_segment(function_name, data, context, echoerr, module=module)
if func:
yield func
def check_exinclude_function(name, data, context, echoerr):
ext = data['ext']
module, name = name.rpartition('.')[::2]
if not module:
module = MarkedUnicode('powerline.selectors.' + ext, None)
func = import_function('selector', name, data, context, echoerr, module=module)
if not func:
return True, False, True
return True, False, False
def check_log_file_level(this_level, data, context, echoerr):
'''Check handler level specified in :ref:`log_file key <config-common-log>`
This level must be greater or equal to the level in :ref:`log_level key
<config-common-log_level>`.
'''
havemarks(this_level)
hadproblem = False
top_level = context[0][1].get('common', {}).get('log_level', 'WARNING')
top_level_str = top_level
top_level_mark = getattr(top_level, 'mark', None)
if (
not isinstance(top_level, unicode) or not hasattr(logging, top_level)
or not isinstance(this_level, unicode) or not hasattr(logging, this_level)
):
return True, False, hadproblem
top_level = getattr(logging, top_level)
this_level_str = this_level
this_level_mark = this_level.mark
this_level = getattr(logging, this_level)
if this_level < top_level:
echoerr(
context='Error while checking log level index (key {key})'.format(
key=context.key),
context_mark=this_level_mark,
problem='found level that is less critical then top level ({0} < {0})'.format(
this_level_str, top_level_str),
problem_mark=top_level_mark,
)
hadproblem = True
return True, False, hadproblem
def check_logging_handler(handler_name, data, context, echoerr):
havemarks(handler_name)
import_paths = [os.path.expanduser(path) for path in context[0][1].get('common', {}).get('paths', [])]
handler_module, separator, handler_class = handler_name.rpartition('.')
if not separator:
handler_module = 'logging.handlers'
handler_class = handler_name
with WithPath(import_paths):
try:
handler = getattr(__import__(str(handler_module), fromlist=[str(handler_class)]), str(handler_class))
except ImportError:
echoerr(context='Error while loading logger class (key {key})'.format(key=context.key),
problem='failed to load module {0}'.format(handler_module),
problem_mark=handler_name.mark)
return True, False, True
except AttributeError:
echoerr(context='Error while loading logger class (key {key})'.format(key=context.key),
problem='failed to load handler class {0}'.format(handler_class),
problem_mark=handler_name.mark)
return True, False, True
if not issubclass(handler, logging.Handler):
echoerr(context='Error while loading logger class (key {key})'.format(key=context.key),
problem='loaded class {0} is not a logging.Handler subclass'.format(handler_class),
problem_mark=handler_name.mark)
return True, False, True
return True, False, False
|
from django.test import TestCase
from django.urls import reverse
from waffle.models import Switch
from . import forms
from .models import Registrant, Location, EmailConfirmation
from .utils import generate_random_key
from django.contrib.messages import get_messages
from portal.services import NotifyService
from portal import container
class RegisterView(TestCase):
def test_start_page(self):
response = self.client.get(reverse("register:start"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "register/start.html")
def test_email_page(self):
response = self.client.get(reverse("register:registrant_email"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "register/registrant_email.html")
def test_name_page(self):
r = Registrant.objects.create(email="test@test.com")
session = self.client.session
session["registrant_id"] = str(r.id)
session.save()
response = self.client.get(reverse("register:registrant_name"))
self.assertEqual(response.status_code, 200)
def test_confirmation_page_logged_in(self):
email = "test@test.com"
r = Registrant.objects.create(email=email)
# add id and email to session (happens at confirmation)
session = self.client.session
session["registrant_id"] = str(r.id)
session["registrant_email"] = r.email
session.save()
response = self.client.get(reverse("register:confirmation"))
self.assertEqual(response.status_code, 200)
class RegisterEmailConfirmation(TestCase):
def setUp(self):
container.notify_service.override(NotifyService()) # Prevent sending emails
def test_email_form_empty(self):
form = forms.EmailForm(data={})
self.assertEqual(form.errors["email"], ["This field is required."])
def test_can_confirm_email(self):
email = "test@test.com"
# submit email
response = self.client.post(
reverse("register:registrant_email"), data={"email": email}
)
self.assertEqual(response.status_code, 302)
# confirmation screen
response = self.client.get(reverse("register:email_submitted"))
self.assertContains(response, "Confirm your email address")
self.assertContains(response, email)
# check the confirmation record
confirm = EmailConfirmation.objects.get(email=email)
self.assertEquals(confirm.email, email)
# generate the confirmation link
confirm_url = reverse(
"register:email_confirm",
kwargs={"pk": confirm.pk},
)
# visit the confirmation link
self.client.get(confirm_url)
# confirmation record should be deleted
self.assertIsNone(
EmailConfirmation.objects.filter(email=email).first(),
)
# email confirmed, should be able to get to the name step
self.client.get(reverse("register:registrant_name"))
self.assertEqual(response.status_code, 200)
class RegisterConfirmedEmailRequiredPages(TestCase):
def test_registrant_name_not_logged_in(self):
response = self.client.get(reverse("register:registrant_name"))
self.assertRedirects(response, reverse("register:registrant_email"))
message = list(get_messages(response.wsgi_request))[0]
self.assertEqual(message.tags, "error")
def test_location_address_not_logged_in(self):
response = self.client.get(
reverse("register:location_step", kwargs={"step": "address"})
)
self.assertRedirects(response, reverse("register:registrant_email"))
message = list(get_messages(response.wsgi_request))[0]
self.assertEqual(message.tags, "error")
def test_location_category_not_logged_in(self):
response = self.client.get(
reverse("register:location_step", kwargs={"step": "category"})
)
self.assertRedirects(response, reverse("register:registrant_email"))
message = list(get_messages(response.wsgi_request))[0]
self.assertEqual(message.tags, "error")
def test_location_name_not_logged_in(self):
response = self.client.get(
reverse("register:location_step", kwargs={"step": "name"})
)
self.assertRedirects(response, reverse("register:registrant_email"))
message = list(get_messages(response.wsgi_request))[0]
self.assertEqual(message.tags, "error")
def test_location_contact_not_logged_in(self):
response = self.client.get(
reverse("register:location_step", kwargs={"step": "contact"})
)
self.assertRedirects(response, reverse("register:registrant_email"))
message = list(get_messages(response.wsgi_request))[0]
self.assertEqual(message.tags, "error")
def test_location_summary_not_logged_in(self):
response = self.client.get(
reverse("register:location_step", kwargs={"step": "summary"})
)
self.assertRedirects(response, reverse("register:registrant_email"))
message = list(get_messages(response.wsgi_request))[0]
self.assertEqual(message.tags, "error")
def test_confirmation_not_logged_in(self):
response = self.client.get(reverse("register:confirmation"))
self.assertRedirects(response, reverse("register:registrant_email"))
message = list(get_messages(response.wsgi_request))[0]
self.assertEqual(message.tags, "error")
class RegisterLocationDetailsValidation(TestCase):
def test_location_category_empty(self):
form = forms.LocationCategoryForm(data={})
self.assertEqual(form.errors["category"], ["This field is required."])
def test_location_name_empty(self):
form = forms.LocationNameForm(data={})
self.assertEqual(form.errors["name"], ["This field is required."])
def test_location_address_address_field_empty(self):
form = forms.LocationAddressForm(data={"address": ""})
self.assertEqual(form.errors["address"], ["This field is required."])
def test_location_address_city_field_empty(self):
form = forms.LocationAddressForm(data={"city": ""})
self.assertEqual(form.errors["city"], ["This field is required."])
def test_location_address_province_field_empty(self):
form = forms.LocationAddressForm(data={"province": ""})
self.assertEqual(form.errors["province"], ["This field is required."])
def test_location_address_postal_field_empty(self):
form = forms.LocationAddressForm(data={"postal_code": ""})
self.assertEqual(form.errors["postal_code"], ["This field is required."])
def test_location_contact_email_empty(self):
form = forms.LocationContactForm(data={"contact_email": ""})
self.assertEqual(form.errors["contact_email"], ["This field is required."])
def test_location_contact_email_format(self):
form = forms.LocationContactForm(data={"contact_email": "notanemail"})
self.assertEqual(form.errors["contact_email"], ["Enter a valid email address."])
def test_location_contact_phone_empty(self):
form = forms.LocationContactForm(data={"contact_phone": ""})
self.assertEqual(form.errors["contact_phone"], ["This field is required."])
def test_location_contact_phone_format(self):
form = forms.LocationContactForm(data={"contact_phone": "notaphonenumber"})
self.assertEqual(
form.errors["contact_phone"],
["Enter a valid phone number (e.g. +12125552368)."],
)
class LocationModel(TestCase):
def test_location_model_generates_short_code_on_save(self):
location = Location.objects.create(
category="category",
name="Name of venue",
address="Address line 1",
city="Ottawa",
province="ON",
postal_code="K1K 1K1",
contact_email="test@test.com",
contact_phone="613-555-5555",
)
self.assertNotEqual(location.short_code, "")
self.assertEqual(len(location.short_code), 8)
self.assertTrue(location.short_code.isalnum)
class Utils(TestCase):
def test_generate_short_code_default_length(self):
code = generate_random_key()
self.assertEqual(len(code), 8)
def test_generate_short_code_custom_length(self):
code = generate_random_key(5)
self.assertEqual(len(code), 5)
def test_generate_short_code_alphanumeric(self):
code = generate_random_key()
self.assertTrue(code.isalnum())
|
#!/usr/bin/python3
import os
import time
import sys
from binance.client import Client
#init
api_key = os.environ.get('binance_api')
api_secret = os.environ.get('binance_secret')
pair_coin_symbol=sys.argv[1]
client = Client(api_key, api_secret)
coin_price = client.get_symbol_ticker(symbol=pair_coin_symbol)
print(coin_price["symbol"] + " : " + coin_price["price"])
|
from onegov.core.orm.abstract import AdjacencyListCollection
from onegov.gazette.models import Organization
class OrganizationCollection(AdjacencyListCollection):
""" Manage a list of organizations.
The list is ordered manually (through migration and/or backend).
"""
__listclass__ = Organization
def get_unique_child_name(self, name, parent):
""" Returns a unique name by treating the names as unique integers
and returning the next value.
"""
names = sorted([
int(result[0]) for result in self.session.query(Organization.name)
if result[0].isdigit()
])
next = (names[-1] + 1) if names else 1
return str(next)
|
#!env python3
# -*- coding: utf-8 -*-
import unittest
from unittest import mock
from unittest_mock_target import add_time, get_this_month
from datetime import datetime
def get_username(user):
return user.username
class TestMock(unittest.TestCase):
def test_mock(self):
dummy_object = mock.Mock()
dummy_object.username = 'joe'
self.assertEqual(get_username(dummy_object), 'joe')
@mock.patch('unittest_mock_target.time')
def test_mock_patch(self, m):
m.return_value = 1470620400
self.assertEqual(add_time(), 1470624000)
@mock.patch('unittest_mock_target.get_now')
def test_builtin_func(self, m):
m.return_value = datetime(2015, 8, 1, 12, 32, 0)
self.assertEqual(get_this_month(), 8)
if __name__ == '__main__':
unittest.main()
|
from plotly.graph_objs import Bar, Layout
from plotly import offline
from die import Die
# Create two D6 dice.
die_1 = Die()
die_2 = Die()
# Make some rolls, and store results in a list.
results = []
roll_times = 10000
for roll_num in range(roll_times):
result = die_1.roll() + die_2.roll()
results.append(result)
# Analyze the results.
frequencies = []
max_result = die_1.num_sides + die_2.num_sides
for value in range(2, max_result+1):
frequency = results.count(value)
frequencies.append(frequency)
Perc = []
for xyz in frequencies:
perc = ((xyz/sum(frequencies))*100).__round__(2)
Perc.append(perc)
print(len(Perc)+1, str(perc.__round__(2)) + " %")
# Visualize the results.
x_values = list(range(2, max_result+1))
y_result = Perc #or frequencies
data = [Bar(x=x_values, y=y_result)]
x_axis_config = {'title': 'Result', 'dtick': 1}
y_axis_config = {'title': ('Frequency of rolling ' + str(roll_times) + ' times')}
my_layout = Layout(title='Results of rolling two D6 ' + str(roll_times) + ' times',
xaxis=x_axis_config, yaxis=y_axis_config)
offline.plot({'data': data, 'layout': my_layout}, filename='d6_d6.html')
|
def likes(names):
if len(names) == 0:
return "No one like this"
elif len(names) == 1:
return names[0] + " likes this"
elif len(names) == 2:
return names[0] + " and "+ names[1] +" like this"
elif len(names) == 3:
return names[0] + ", "+ names[1] + " and "+ names[2] +" like this"
elif len(names) > 3:
return names[0] + ", "+ names[1] + " and " + str(len(names)-2) + " others like this"
def main():
list1=[]
list2=["Azhar"]
list3=["Azhar","Patrick"]
list4=["Azhar","Patrick","Jeeva"]
list5=["Azhar","Patrick","Jeeva","Shastri"]
print(likes(list1))
print(likes(list2))
print(likes(list3))
print(likes(list4))
print(likes(list5))
if __name__ == '__main__':
main()
|
from django.test import TestCase, SimpleTestCase
from django.urls import reverse, resolve
from django.contrib.auth.views import (
LogoutView,
LoginView,
PasswordResetView,
PasswordResetDoneView,
PasswordResetConfirmView,
PasswordResetCompleteView,
)
from users.views import profile, register, UserLoginView
class TestUsersUrls(SimpleTestCase):
def test_register_url(self):
url_register = reverse('register')
# print(resolve(url_register))
self.assertEqual(resolve(url_register).func, register)
def test_profile_url(self):
url_profile = reverse('profile')
# print(resolve(url_profile))
self.assertEqual(resolve(url_profile).func, profile)
def test_logout_url(self):
url_logout = reverse('logout')
# print(resolve(url_logout))
self.assertEqual(resolve(url_logout).func.view_class,
LogoutView
)
def test_login_url(self):
url_login = reverse('login')
# print(resolve(url_login))
self.assertEqual(resolve(url_login).func.view_class,
LoginView
)
def test_login_url(self):
url_login = reverse('login')
# print(resolve(url_login))
self.assertEqual(resolve(url_login).func.view_class,
LoginView
)
def test_password_reset_url(self):
url_password_reset = reverse('password_reset')
# print(resolve(url_password_reset))
self.assertEqual(resolve(url_password_reset).func.view_class,
PasswordResetView
)
def test_password_reset_done_url(self):
url_password_reset_done = reverse('password_reset_done')
# print(resolve(url_password_reset_done))
self.assertEqual(resolve(url_password_reset_done).func.view_class,
PasswordResetDoneView
)
def test_password_reset_confirm_url(self):
url_password_reset_confirm = reverse('password_reset_confirm',
args=['1234', 'abcd']
)
# print(resolve(url_password_reset_confirm))
self.assertEqual(resolve(url_password_reset_confirm).func.view_class,
PasswordResetConfirmView
)
def test_password_reset_complete_url(self):
url_password_reset_complete = reverse('password_reset_complete')
# print(resolve(url_password_reset_complete))
self.assertEqual(resolve(url_password_reset_complete).func.view_class,
PasswordResetCompleteView
)
|
im = open('006993_photoA.tif', 'rb')
ord(im.read(1))
chr(ord(im.read(1)))
|
# add path to the src and test directory
import os
import sys
PARENT_PATH = os.getenv('PYMCTS_ROOT')
SRC_PATH = PARENT_PATH +"src/"
sys.path.append(SRC_PATH+"algorithm")
import mcts
import connectfour_model
import heuristic_model
# Clear the shell
os.system("clear")
# Setup for MCTS
model = heuristic_model.ConnectFour()
#model = connectfour_model.ConnectFour()
print '> Input the maximum number of iteration in MCTS...'
playout_num = int(raw_input())
_mcts = mcts.MCTS()
_mcts.set_playout(playout_num)
_mcts.show_progress = True
# start the game !!
print 'Let\'s ConnectFour !!'
model.display()
while True:
# Player turn
print '> Input the column to make a move...'
action = int(raw_input())-1
end_flg, score = model.is_terminal(1, action)
model.update(action)
model.display()
if end_flg:
print '\nYou win !!!\n'
break
# MCTS CPU Turn
root, action = _mcts.start(model)
print 'MCTS make a move on column '+str(action+1)
end_flg, score = model.is_terminal(-1, action)
model.update(action)
model.display()
if end_flg:
print '\nYou lose ...\n'
break
|
# coding: utf-8
import csv
import os
import numpy as np
import unicodecsv
# check type of values in each coulumn
def checkType(data):
newData=list(data)
valueTypeArray=newData.pop(0)
valueTypeArray=[0 for i in range (0,len(valueTypeArray))]
for row in newData:
for i in range (0,len(row)):
item=row[i]
try:
int(item)
except ValueError:
try:
if valueTypeArray[i]==0:
valueTypeArray[i]=1
valueTypeArray[i]=max(valueTypeArray[i],len(item))
except:
print("Error: defected row. row is too short!\n")
print(row)
exit(0)
return valueTypeArray
# write the sql file from data file
def writeDataSqlFile(fscheme,fdata,data,tableName,valueTypeArray):
fscheme.write("CREATE TABLE {0} (\n".format(tableName))
headline=data.pop(0)
# create table
fscheme.write("\tID INT NOT NULL,\n".format(len(data)+1))
for i in range (0,len(headline)-1):
item=headline[i]
if valueTypeArray[i]==0:
fscheme.write("\t{0} INT,\n".format(item))
else:
if "comment" in item or "discription" in item or valueTypeArray[i]>1500:
fscheme.write("\t{0} TEXT,\n".format(item))
elif item=="name":
valueTypeArray[i]+=16
fscheme.write("\t{0} VARCHAR({1}) NOT NULL,\n".format(item,valueTypeArray[i]))
else:
valueTypeArray[i]+=16
fscheme.write("\t{0} VARCHAR({1}),\n".format(item, valueTypeArray[i]))
fscheme.write("\tPRIMARY KEY (ID)\n")
fscheme.write(");\n\n")
# insert values to table
for row in data:
printrow=True
# for item in row:
# try: # check encoding of row
# item.encode('utf8')
# except:
# printrow=False
# break
if printrow:
fdata.write("INSERT INTO {0} VALUES ({1},".format(tableName,row[len(row)-1]))
for i in range (0,len(row)-1):
item=row[i]
item=item.replace("\'","\''")
if valueTypeArray[i]==0:
item=int(item)
fdata.write("%d" % item)
else:
if item=='NULL':
fdata.write(item)
else:
fdata.write('\'')
# fdata.write(item.encode('utf8'))
fdata.write(item)
fdata.write('\'')
if i<len(headline)-2:
fdata.write(",")
fdata.write(");\n")
fdata.write("\n")
fdata.write("ALTER TABLE {0} MODIFY ID INT NOT NULL AUTO_INCREMENT;\n".format(tableName))
fdata.write("ALTER TABLE {0} AUTO_INCREMENT = {1};\n\n".format(tableName,len(data)+1))
# write the sql file from match file
def writeMatchSqlFile(fscheme,fdata,data,tableName,valueTypeArray):
fscheme.write("CREATE TABLE {0} (\n".format(tableName))
headline=data.pop(0)
# create table
for i in range (0,len(headline)):
item=headline[i]
fscheme.write("\t{0} INT\n".format(item))
refTable=tableName.split("_")[i]
if "Genre" in refTable and "Top" not in refTable:
refTable="MusicGenre"
fscheme.write("\t\tREFERENCES {0}(ID)".format(refTable))
if i<len(headline)-1:
fscheme.write (',\n')
else:
fscheme.write ('\n')
fscheme.write(");\n\n")
# insert values to table
for row in data:
fdata.write("INSERT INTO {0} VALUES (".format(tableName))
for i in range (0,len(row)):
item=row[i]
item=int(item)
fdata.write("%d" % item)
if i<len(headline)-1:
fdata.write(",")
fdata.write(");\n")
fdata.write("\n")
# create tables from dir
def createDataTable(fscheme,fdata,dirpath):
for filename in os.listdir(dirpath):
filepath=dirpath+'/'+filename
tableName=filename.split('.csv')[0]
if tableName == "Song" or tableName == "Single":
continue
with open(filepath) as f2:
data = list(csv.reader(f2))
data.reverse
f2.close
valueTypeArray=checkType(data)
writeDataSqlFile(fscheme,fdata,data,tableName,valueTypeArray)
# create tables from dir
def createMatchTable(fscheme,fdata,dirpath):
for filename in os.listdir(dirpath):
filepath=dirpath+'/'+filename
tableName=filename.split('.csv')[0]
if tableName == "Song" or tableName == "Single":
continue
with open(filepath) as f2:
data = list(csv.reader(f2))
data.reverse
f2.close
valueTypeArray=checkType(data)
writeMatchSqlFile(fscheme,fdata,data,tableName,valueTypeArray)
# write index file
def createIndex(f,dir1,dir2):
for filename in os.listdir(dir1):
tableName=filename.split('.csv')[0]
if tableName == "Song" or tableName == "Single":
continue
field="ID"
f.write("CREATE INDEX idIndex ON {0}({1});\n".format(tableName,field))
for filename in os.listdir(dir2):
with open (dir2+"/"+filename,"r") as tf:
field=str(tf.readline()).replace("\n","")
field=field.split(",")
tf.close
tableName=filename.split('.csv')[0]
f.write("CREATE INDEX idIndex1 ON {0}({1});\n".format(tableName,field[0]))
f.write("CREATE INDEX idIndex2 ON {0}({1});\n".format(tableName,field[1]))
# write all DB building queries into one SQL_DB file
def createSQLTables(dir1 ,dir2):
outputSchemePath="../SQL_DB/musicDB_schema.sql"
outputDataPath="../SQL_DB/musicDB_data.sql"
with open(outputSchemePath,'w') as fscheme:
with open(outputDataPath,'w') as fdata:
createDataTable(fscheme,fdata,dir1)
createMatchTable(fscheme,fdata,dir2)
createIndex(fscheme,dir1,dir2)
fdata.close
fscheme.close
|
from enum import Enum, unique
@unique
class Term(Enum):
FALL = 'F'
WINTER = 'W'
SPRING = 'SP'
INDETERMINATE = 'TBD'
@staticmethod
def from_str(string: str):
for term in Term:
if term.value == string:
return term
assert False, 'Cannot initialize term from {}'.format(string)
def __repr__(self):
return self.name
|
from sqlalchemy.orm import Session
# Local modules
from data.models import users_model
from data.schemas import schema_users
def create_user(db: Session, user: schema_users.UserCreate):
fake_hashed_password = user.password + "notreallyhashed"
db_user = users_model.UserModel(email=user.email,
hashed_password=fake_hashed_password)
db.add(db_user)
db.commit()
return db_user
def get_users(db: Session, skip: int = 0, limit: int = 100):
return db.query(users_model.UserModel).offset(skip).limit(limit).all()
def get_user(db: Session, user_id: int):
return db.query(users_model.UserModel).filter(users_model.UserModel.
id == user_id).first()
def get_user_by_email(db: Session, email: str):
return db.query(users_model.UserModel).filter(users_model.UserModel.
email == email).first()
|
# Generated by Django 3.2 on 2021-04-21 18:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pie', '0005_alter_showchart_customer_id'),
]
operations = [
migrations.AlterField(
model_name='showchart',
name='customer_id',
field=models.IntegerField(null=True),
),
]
|
file_name = 'pi_digits.txt'
with open(file_name) as file_object:
contents = file_object.read()
print(contents)
with open(file_name) as file_object:
for line in file_object:
print(line.rstrip())
with open(file_name) as file_object:
lines = file_object.readlines()
for line in lines:
print(line.rstrip())
pi_string = ''
for line in lines:
pi_string += line.rstrip()
print(pi_string)
print(len(pi_string))
pi_string = ''
for line in lines:
pi_string += line.strip()
print(pi_string)
print(len(pi_string))
some_number = str(16)
if some_number in pi_string:
print("it's in!!")
else:
print("Unfortunately not in!")
|
#Circular array rotation
inp = input().split(' ')
n , k , q = [int(x) for x in inp]
m = []
#print(n , k , q)
#n -- > num of ints in arrays
#k - num of rotations
#q - num of queries
arr = list(map(int , (input().split(' '))))
result = []
for i in range(q):
m = int(input())
k = k % n
if k == n:
result.append(arr[i])
elif m < k:
result.append(arr[n - k + m])
elif m >= k:
result.append(arr[m - k])
#print(result)
for a in result:
print(a)
|
#!/usr/bin/python
import sys
import os
if len(sys.argv) != 2:
print 'error ** usage: %s data predictions' % sys.argv[0]
print 'error ** please try: majority|even_odd|logistic_regression'
sys.exit()
algorithm = sys.argv[1]
match = 0
if algorithm == 'majority':
match = 1
elif algorithm == 'even_odd':
match = 1
elif algorithm == 'logistic_regression':
match = 1
if match == 0:
print 'error ** no matching for algorithm: %s' % sys.argv[1]
print 'error ** please try: majority|even_odd|logistic_regression'
sys.exit()
print 'classify algorithm: ' + algorithm
dataList = ['vision/vision','nlp/nlp','speech/speech','finance/finance','bio/bio','synthetic/easy','synthetic/hard']
dataName = ['vision','nlp','speech','finance','bio','easy','hard']
for data in dataList:
print '<'+dataName[dataList.index(data)]+'>'
cmd = 'java -cp ../lib/commons-cli-1.3.1.jar: cs475/Classify -mode train -algorithm ' + algorithm +' -model_file ../output/'+data+'.'+algorithm+'.model -data ../data/'+data+'.train'
os.system(cmd)
cmd = 'java -cp ../lib/commons-cli-1.3.1.jar: cs475/Classify -mode test -model_file ../output/'+data+'.'+algorithm+'.model -data ../data/'+data+'.dev -predictions_file ../output/'+data+'.dev.predictions'
os.system(cmd)
#print 'Accuracy: %f (%d/%d)' % ((float(match)/float(total)), match, total)
|
import enum
from datetime import datetime
from api.app import db
class MessageStatus(enum.Enum):
RECEIVED = 'received'
CONFIRMED = 'confirmed'
REVOKED = 'revoked'
UNDELIVERABLE = 'undeliverable'
class Message(db.Model):
created_at = db.Column(db.DateTime, nullable=False, default=lambda: datetime.utcnow())
updated_at = db.Column(
db.DateTime, nullable=False, default=lambda: datetime.utcnow(), onupdate=lambda: datetime.utcnow()
)
status = db.Column(
db.Enum(MessageStatus, values_callable=lambda enum: [e.value for e in enum], native_enum=False),
default=MessageStatus.CONFIRMED)
id = db.Column(db.Integer, primary_key=True)
payload = db.Column(db.JSON)
def __repr__(self):
return f'<Message id:{self.id}>'
|
from math import pi, sqrt
import numpy as np # This is used by other modules importing * from here
# ----------physical constants----------
a0 = 5.2917721092e-11
GEVperHartree = 27.21138505e-9 # GeV per Hartree
eVperHartree = 27.21138505 # eV per Hartree
secPerAU = 2.41888e-17 # seconds per au time
c = 137.035999074 # speed of light
c2, c3 = c**2, c**3
mu0 = 4.0*pi/c2
eps0 = 1.0/(4.0*pi)
eta0 = sqrt(mu0/eps0) # impedence of free space
# ----------global constant parameters (in a.u. when applicable)----------
mm = 1 # angular L-G index
nn = 1 # radial L-G index.
pertOrder = 1 # = j_max in the BGV sum, not 2j
phi0 = 3.0*pi/2.0 # initial phase
s = 70 # cycles = s-value: 3=64, 5=178, 6=256, 7=349, 8=456, 9=577, 10=712
lambda0 = 800.e-9/a0
w0 = 785.e-9/a0
omega0 = 2.0*pi*c/lambda0 # laser frequency (angular)
k = 2.0*pi/lambda0 # wave number
zr = 0.5*k*w0**2 # Rayleigh range
# polarization choice {1:linear(x), 2:radial}
polar = 2
grid = 200 # number of x,y grid cells for plotting and normalizing.
xmin = -3.0*w0
xmax = -xmin
ymin, ymax = xmin, xmax
dx = (xmax-xmin)/float(grid)
dy = dx
mmReal = float(mm)
nnReal = float(nn)
sReal = float(s)
ii = 1.0j # imaginary i
epsilonc2 = c/(2.0*zr*omega0) # \epsilon_c^2
|
class Solution:
def backspaceCompare(self, S: str, T: str) -> bool:
sList=[]
for item in S:
if(item=='#'):
if(len(sList)>0):
sList.pop()
else:
sList.append(item)
S="".join(sList)
tList=[]
for item in T:
if(item=='#'):
if(len(tList)>0):
tList.pop()
else:
tList.append(item)
T="".join(tList)
return S==T
#leetcode solution
# class Solution(object):
# def backspaceCompare(self, S, T):
# def build(S):
# ans = []
# for c in S:
# if c != '#':
# ans.append(c)
# elif ans:
# ans.pop()
# return "".join(ans)
# return build(S) == build(T)
solution=Solution()
print(solution.backspaceCompare("ab#c","ad#c"))
print(solution.backspaceCompare("a#","c####d#"))
print(solution.backspaceCompare("a##c#c#","cccc####"))
print(solution.backspaceCompare("a#c","b"))
|
from python_helper import Constant as c
from python_helper import ObjectHelper, log
from python_framework import Service, ServiceMethod
from domain import BrowserConstants, LoginConstants
from dto import QRCodeDto
@Service()
class QRCodeService:
browser = None
booting = BrowserConstants.DEFAULT_BROWSER_BOTTING_VALUE
booted = BrowserConstants.DEFAULT_BOOTED_VALUE
@ServiceMethod(requestClass=[QRCodeDto.QRCodeRequestDto])
def save(self, dto) :
return self.service.image.save(dto.qRCodeAsBase64, LoginConstants.QR_CODE_IMAGE_NAME)
@ServiceMethod()
def show(self) :
if self.isAvailable() :
self.accessUrl(LoginConstants.QR_CODE_AUTHENTICATION_PAGE)
self.foceRefresh()
else :
self.openIfNedded()
self.accessUrl(LoginConstants.QR_CODE_AUTHENTICATION_PAGE)
@ServiceMethod()
def foceRefresh(self) :
self.client.browser.hitControF5(self.browser)
@ServiceMethod()
def closeQRCode(self) :
self.tearDown()
@ServiceMethod()
def isBooting(self) :
return self.booting
@ServiceMethod()
def isBooted() :
return self.booted
@ServiceMethod()
def isAvailable(self) :
return ObjectHelper.isNotNone(self.browser) and not self.isBooting()
@ServiceMethod()
def isNotAvailable(self) :
return not self.isAvailable() or self.isBooting()
@ServiceMethod()
def openIfNedded(self, hidden=False) :
log.log(self.openIfNedded, 'Started')
if ObjectHelper.isNone(self.browser) and self.isNotBooting() :
self.open(hidden=hidden)
log.log(self.openIfNedded, 'Finished')
@ServiceMethod()
def open(self, hidden=False) :
log.log(self.open, 'Started')
self.booting = True
self.safelyClose()
self.browser = self.client.browser.getNewBrowser(hidden=hidden)
self.client.browser.maximize(self.browser)
sessionId = self.browser.session_id
commandExecutor = self.browser.command_executor._url
self.service.session.create(sessionId, commandExecutor)
self.booted = True
self.booting = False
log.log(self.open, 'Finished')
@ServiceMethod()
def accessUrl(self, url) :
self.client.browser.accessUrl(url, self.browser)
@ServiceMethod()
def tearDown(self) :
log.log(self.tearDown, 'Started')
self.safelyClose()
log.log(self.tearDown, 'Finished')
@ServiceMethod(requestClass=[str])
def existsByXpath(self, xpath) :
return self.client.browser.existsByXpath(xpath, self.browser)
def safelyClose(self) :
log.log(self.safelyClose, 'Started')
if ObjectHelper.isNotNone(self.browser) :
try :
self.client.browser.close(self.browser)
except Exception as exception :
log.log(self.safelyClose, 'Not possible co close browser', exception=exception)
self.browser = None
self.booted = False
log.log(self.safelyClose, 'Finished')
@ServiceMethod()
def isNotBooting(self) :
return not self.isBooting()
@ServiceMethod()
def isNotBooted() :
return not self.isBooted()
|
# Generated by Django 2.2 on 2019-04-25 02:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('assets', '0009_content'),
]
operations = [
migrations.AddField(
model_name='content',
name='file_remark',
field=models.CharField(default='', max_length=200),
),
]
|
import dash
from dash.dependencies import Input, Output, State
import dash_html_components as html
import dash_core_components as dcc
from dash.exceptions import PreventUpdate
import dash_table
from dash_table.Format import Format
import plotly.graph_objs as go
import numpy as np
from skimage import io, filters, measure
import pandas as pd
import PIL
from skimage import color, img_as_ubyte
from plotly import colors
from textwrap import dedent
def image_with_contour(img, labels, mode='lines', shape=None):
"""
Figure with contour plot of labels superimposed on background image.
Parameters
----------
img : URL, dataURI or ndarray
Background image. If a numpy array, it is transformed into a PIL
Image object.
labels : 2D ndarray
Contours are the isolines of labels.
shape: tuple, optional
Shape of the arrays, to be provided if ``img`` is not a numpy array.
"""
try:
sh_y, sh_x = shape if shape is not None else img.shape
except AttributeError:
print('''the shape of the image must be provided with the
``shape`` parameter if ``img`` is not a numpy array''')
if type(img) == np.ndarray:
img = img_as_ubyte(color.gray2rgb(img))
img = PIL.Image.fromarray(img)
labels = labels.astype(np.float)
custom_viridis = colors.PLOTLY_SCALES['Viridis']
custom_viridis.insert(0, [0, '#FFFFFF'])
custom_viridis[1][0] = 1.e-4
# Contour plot of segmentation
print('mode is', mode)
opacity = 0.4 if mode is None else 1
cont = go.Contour(z=labels[::-1],
contours=dict(start=0, end=labels.max() + 1, size=1,
coloring=mode),
line=dict(width=1),
showscale=False,
colorscale=custom_viridis,
opacity=opacity,
)
# Layout
layout= go.Layout(
images = [dict(
source=img,
xref="x",
yref="y",
x=0,
y=sh_y,
sizex=sh_x,
sizey=sh_y,
sizing="contain",
layer="below")],
xaxis=dict(
showgrid=False,
zeroline=False,
showline=False,
ticks='',
showticklabels=False,
),
yaxis=dict(
showgrid=False,
zeroline=False,
showline=False,
scaleanchor="x",
ticks='',
showticklabels=False,),
margin=dict(b=5, t=20))
fig = go.Figure(data=[cont], layout=layout)
return fig
# Image to segment
filename = 'https://upload.wikimedia.org/wikipedia/commons/a/ac/Monocyte_no_vacuoles.JPG'
img = io.imread(filename, as_gray=True)[:660:2, :800:2]
labels = measure.label(img < filters.threshold_otsu(img))
height, width = img.shape
canvas_width = 600
props = measure.regionprops(labels, img)
# Define table columns
list_columns = ['label', 'area', 'perimeter', 'eccentricity', 'euler_number', 'mean_intensity']
columns = [{"name": i, "id": i} for i in list_columns]
columns[2]['format'] = Format(precision=4)
columns[2]['type'] = 'numeric'
columns[3]['format'] = Format(precision=4)
columns[3]['type'] = 'numeric'
columns[5]['format'] = Format(precision=3)
columns[5]['type'] = 'numeric'
data = pd.DataFrame([[getattr(prop, col) for col in list_columns]
for prop in props], columns=list_columns)
app = dash.Dash(__name__)
server = app.server
app.config.suppress_callback_exceptions = True
app.layout = html.Div([html.Div([
html.Div([
html.H4('Explore objects properties'),
dcc.Graph(
id='graph',
figure=image_with_contour(img, labels, mode=None)),
], className="six columns"),
html.Div([
html.Img(src='https://github.githubassets.com/images/modules/logos_page/GitHub-Mark.png', width='30px'),
html.A(
id='gh-link',
children=['View on GitHub'],
href="http://github.com/plotly/canvas-portal/" "blob/master/apps/object-properties/app.py",
style={'color': 'black',
'border':'solid 1px black',
'float':'left'}
),
dash_table.DataTable(
id='table-line',
columns=columns,
data=data.to_dict("records"),
filtering=True,
row_deletable=True,
style_table={
'overflowY': 'scroll'
},
n_fixed_rows=1,
style_cell={'width': '85px'}
),
dcc.Store(id='cache', data=labels),
html.Div(id='row', hidden=True, children=None),
], className="six columns"),
], className="row"),
html.H4('How to use this app (see below)'),
dcc.Markdown(dedent('''
Hover over objects to highlight their properties in the table,
select cell in table to highlight object in image, or
filter objects in the table to display a subset of objects.
Learn more about [DataTable filtering syntax](https://dash.plot.ly/datatable/filtering)
for selecting ranges of properties.
''')
),
html.Img(id='help',
src='assets/properties.gif',
width='80%',
style={'border': '2px solid black',
'display': 'block',
'margin-left':'auto',
'margin-right':'auto'}
)
])
@app.callback(Output('table-line', 'style_data_conditional'),
[Input('graph', 'hoverData')])
def higlight_row(string):
"""
When hovering hover label, highlight corresponding row in table,
using label column.
"""
index = string['points'][0]['z']
return [{
"if": {
'filter': 'label eq num(%d)'%index
},
"backgroundColor": "#3D9970",
'color': 'white'
}]
@app.callback([Output('graph', 'figure'),
Output('cache', 'data'),
Output('row', 'children')],
[Input('table-line', 'derived_virtual_indices'),
Input('table-line', 'active_cell'),
Input('table-line', 'data')],
[State('cache', 'data'),
State('row', 'children')]
)
def highlight_filter(indices, cell_index, data, current_labels, previous_row):
"""
Updates figure and labels array when a selection is made in the table.
When a cell is selected (active_cell), highlight this particular label
with a white outline.
When the set of filtered labels changes, or when a row is deleted.
"""
if cell_index and cell_index[0] != previous_row:
current_labels = np.asanyarray(current_labels)
label = indices[cell_index[0]] + 1
mask = (labels == label).astype(np.float)
cont = go.Contour(z=mask[::-1],
contours=dict(coloring='lines'),
showscale=False,
line=dict(width=6),
colorscale='YlOrRd',
opacity=0.8,
hoverinfo='skip',
)
fig = image_with_contour(img, current_labels, mode=None)
fig.add_trace(cont)
return [fig, current_labels, cell_index[0]]
filtered_labels = np.array(pd.DataFrame(data).lookup(np.array(indices),
['label',]*len(indices)))
mask = np.in1d(labels.ravel(), filtered_labels).reshape(labels.shape)
new_labels = np.copy(labels)
new_labels *= mask
fig = image_with_contour(img, new_labels, mode=None)
return [fig, new_labels, previous_row]
if __name__ == '__main__':
app.run_server(debug=True)
|
a=1
#I went back to change 1
#changed in dev
#editted in master and dev
|
from sklearn.feature_selection import VarianceThreshold
X = [[0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 1, 1], [0, 1, 0], [0, 1, 1]]
sel = VarianceThreshold(threshold=(.8 * (1 - .8)))
X_sel = sel.fit_transform(X)
print (X)
print(X_sel)
|
import pytest
from lis import *
program = '(begin (define r 10) (* pi (* r r)))'
def test_tokenize():
result = ['(', 'begin', '(', 'define', 'r', '10', ')', '(', '*', 'pi', '(', '*', 'r', 'r', ')', ')', ')']
assert tokenize(program) == result
def test_parse():
result = ['begin', ['define', 'r', 10], ['*', 'pi', ['*', 'r', 'r']]]
assert parse(program) == result
def test_eval():
assert eval(parse(program)) == 314.1592653589793
def test_lambda_procedure():
program = '(begin (define circle-area (lambda (r) (* pi (* r r)))) (circle-area 10))'
assert eval(parse(program)) == 314.1592653589793
def test_norvig_suite():
"""
For each (expr, expected) test case, see if eval(parse(expr)) == expected.
"""
lis_tests = [
("(quote (testing 1 (2.0) -3.14e159))", ['testing', 1, [2.0], -3.14e159]),
("(+ 2 2)", 4),
("(+ (* 2 100) (* 1 10))", 210),
("(if (> 6 5) (+ 1 1) (+ 2 2))", 2),
("(if (< 6 5) (+ 1 1) (+ 2 2))", 4),
("(define x 3)", None), ("x", 3), ("(+ x x)", 6),
("(begin (define x 1) (set! x (+ x 1)) (+ x 1))", 3),
("((lambda (x) (+ x x)) 5)", 10),
("(define twice (lambda (x) (* 2 x)))", None), ("(twice 5)", 10),
("(define compose (lambda (f g) (lambda (x) (f (g x)))))", None),
("((compose list twice) 5)", [10]),
("(define repeat (lambda (f) (compose f f)))", None),
("((repeat twice) 5)", 20), ("((repeat (repeat twice)) 5)", 80),
("(define fact (lambda (n) (if (<= n 1) 1 (* n (fact (- n 1))))))", None),
("(fact 3)", 6),
("(fact 50)", 30414093201713378043612608166064768844377641568960512000000000000),
("(define abs (lambda (n) ((if (> n 0) + -) 0 n)))", None),
("(list (abs -3) (abs 0) (abs 3))", [3, 0, 3]),
("""(define combine (lambda (f)
(lambda (x y)
(if (null? x) (quote ())
(f (list (car x) (car y))
((combine f) (cdr x) (cdr y)))))))""", None),
("(define zip (combine cons))", None),
("(zip (list 1 2 3 4) (list 5 6 7 8))", [[1, 5], [2, 6], [3, 7], [4, 8]]),
("""(define riff-shuffle (lambda (deck) (begin
(define take (lambda (n seq) (if (<= n 0) (quote ()) (cons (car seq) (take (- n 1) (cdr seq))))))
(define drop (lambda (n seq) (if (<= n 0) seq (drop (- n 1) (cdr seq)))))
(define mid (lambda (seq) (/ (length seq) 2)))
((combine append) (take (mid deck) deck) (drop (mid deck) deck)))))""", None),
("(riff-shuffle (list 1 2 3 4 5 6 7 8))", [1, 5, 2, 6, 3, 7, 4, 8]),
("((repeat riff-shuffle) (list 1 2 3 4 5 6 7 8))", [1, 3, 5, 7, 2, 4, 6, 8]),
("(riff-shuffle (riff-shuffle (riff-shuffle (list 1 2 3 4 5 6 7 8))))", [1,2,3,4,5,6,7,8]),
]
for (expr, expected) in lis_tests:
assert eval(parse(expr)) == expected
def test_schemestr_str():
assert schemestr(123) == '123'
def test_schemestr_list():
assert schemestr([1, 2, 3]) == '(1 2 3)'
def test_parse_empty_line():
with pytest.raises(SyntaxError):
parse('')
def test_parse_extra_paren():
with pytest.raises(SyntaxError):
parse(')')
|
#coding:utf-8
#! /usr/bin/env python3
""" Contains the function to get the size map(map_size) and map generator(map_initialize) """
from fonction.get_file import get_file_path
from config import *
def map_size(file_to_open, folder_file):
""" Get the size of the map for pygame windows size"""
# loading of the file as "file"
loading_file = get_file_path(file_to_open, folder_file)
with open(loading_file, "r") as file:
# Reading each line
for line in file:
# delete the \n characters at the end of line
lengh_map = line.rstrip("\n")
lengh_map = len(lengh_map)
return lengh_map
def map_initialize(file_to_open, folder_file):
"""Get each letter in the file and add to a list"""
try:
loading_file = get_file_path(file_to_open, folder_file)
with open(loading_file, "r") as file:
mapping = []
# Reading each line
for line in file:
# delete the \n characters at the end of line
line = line.rstrip("\n")
line_list = []
# Read each letter in line
for letter in line:
line_list.append(letter)
# Add each line to mapping list
mapping.append(line_list)
return mapping
except:
print("Error occured during the map generator, please check the file.txt")
def main():
"""Print the level on a board with Pandas """
import pandas as pd
map_list_df = pd.DataFrame(map_initialize("LevelGame.txt", "map"))
print(map_list_df)
if __name__ == "__main__":
main()
|
# Create packed tuple.
pair = ("dog", "cat", "horse")
# Unpack tuple.
(key, value, key2, value2) = pair
# Display unpacked variables.
print(key)
print(value)
|
# 用生成器模拟多任务系统
def music(duration):
index = 1
while index <= duration:
print("音乐进行到第%d分钟" % index)
index += 1
yield None
def movie(duration):
index = 1
while index <= duration:
print("电影进行到第%d分钟" % index)
index += 1
yield None
def main():
music_iter = music(10)
movie_iter = movie(20)
music_stop = False
movie_stop = False
while True:
if movie_stop and music_stop:
break
if movie_stop is not True:
try:
next(movie_iter)
except StopIteration:
movie_stop = True
print("电影已经播放完了")
if music_stop is not True:
try:
next(music_iter)
except StopIteration:
music_stop = True
print("音乐已经播放完了")
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 11 16:58:04 2021
@author: Gustavo
@mail: gustavogodoy85@gmail.com
"""
# =============================================================================
# # 2.2 manejo de archivos
# =============================================================================
# %%
with open('../Data/camion.csv', 'rt') as f:
data = f.read()
# si llamo a la variable data obtengo toda la info sin formato
# si hago print(data) obtengo la info formateada
# De esta manera abro todo el archivo y lo cargo en memoria, lo cual
# solo es practico si el archivo es pequeño.
print(data)
# %%
# Para archivos grandes es mejor trabajar cargando en memoria partes del archivo
with open('../Data/camion.csv', 'rt') as f:
for line in f:
print(line, end='')
# %%
with open('../Data/camion.csv', 'rt') as f:
header = next(f).split(',')
print(header)
for line in f:
row = line.split(',')
print(row)
#%% Ejercicio 2.3
with open('../Data/precios.csv', 'rt') as f:
for line in f:
row = line.split(',')
print(line)
if 'Uva' in row:
print(row[1])
break
# %%
# el modulo gzip me permite abrir archivos comprimidos
# al usar este modulo si o si debo usar 'rt', sino leere cadenas de bytes
with gzip.open('../Data/camion.csv.gz', 'rt') as f:
for line in f:
print(line, end='')
# =============================================================================
# 2.3 Funciones
# =============================================================================
#%% definicion de funciones
def sumcount(n):
total = 0
while n>0:
total += n
n -= 1
return total
a = sumcount(100)
#%% excepciones
#Para el manejo de excepciones, python utiliza try-except.
numero_valido=False
while not numero_valido:
try:
a = input('Ingresá un número entero: ')
n = int(a)
numero_valido = True
except ValueError:
print('No es válido. Intentá de nuevo.')
print(f'Ingresaste {n}.')
#Con esto se dan los argumentos para que la funcion corra normalmente, y
#en caso de que la misma arroje un error este puede ser introducido en
#la sentencia except error, y se realiza la operacion que se deba realizar.
#%% generar excepciones
#para generar excepciones se usa el comando raise
# =============================================================================
# puede ser necesario lanzar una excepcion en nuestra funcion que no este
# declarada en las excepciones del sistema, y luego agarrarla para hacer
# las tareas que sean necesarias.
# =============================================================================
|
import re
import spacy
from tqdm import tqdm
# Compiled regex for tokenizer
HTML_TAGS = re.compile(r"</*\w+>", re.IGNORECASE)
PUNCT_START_END = re.compile(r"^\W+|\W+$")
PUNCT_ANYWHERE = re.compile(r"\W")
NON_ALPHANUMERIC = re.compile(r"[^a-zA-Z0-9\-\'\.]")
ONLY_NUMBERS = re.compile(r"^(\d\W*)+$")
# spacy English model
NLP = spacy.load('en_core_web_sm', disable=['ner', 'parser'])
def pre_process(line):
"""Pre-processes a line by stripping HTML tags and removing punctuation except dots (.),
hyphens and apostrophes."""
tokens = HTML_TAGS.sub('', line).split(' ')
return ' '.join([PUNCT_START_END.sub('', word) for word in tokens])
def post_process(token):
"""Processes a token by removing punctuation, usually dots and apostrophes. Retains hyphens if
needed."""
token = PUNCT_START_END.sub('', token)
token = ONLY_NUMBERS.sub('', token).strip()
results = PUNCT_ANYWHERE.split(token)
# filter
for t in results:
if len(t) > 1:
yield t
elif t == 'PRON':
continue
else:
continue
def lemmatize(line, doc_id):
"""Lemmatize and tokenize a line found in some document."""
doc = NLP(line)
for token in doc:
for el in post_process(token.lemma_):
yield (el, doc_id)
def token_stream(files):
"""Generates a stream of tokens from a list of files."""
for doc_id, file in tqdm(enumerate(files)):
with open(file, 'r') as fp:
# lines = [x.decode('utf-8') for x in fp.readlines()] # python 2.7 unicode issue
lines = fp.readlines() # python 3.7
for line in lines:
line = pre_process(line)
for term_doc in lemmatize(line, doc_id):
yield term_doc
if __name__ == '__main__':
import glob
res = list(token_stream(glob.glob('../../tokenizer/Cranfield/*')))
raw_lemmas = sorted(set([el[0] for el in res]))
processed = sorted(set([el[1] for el in res]))
with open('lemmas.txt', 'w') as fp:
for el in raw_lemmas:
fp.write(el)
fp.write('\n')
with open('processed.txt', 'w') as fp:
for el in processed:
fp.write(el)
fp.write('\n')
|
"""
Probability Calculator by Sofia Zavala
04/14/2021
"""
import random
import copy
class Hat:
def __init__(self, **kwargs):
""" Define the sample space.
Keyword args:
keys -- types of balls
values -- quantity of each type
"""
self.contents = [key for key, value in kwargs.items()
for x in range(value)]
def draw(self, num: int):
"""
Pick a ball at (pseudo)random from contents num times, w/o replacement.
Return a list of strings of the type of balls picked.
"""
if num >= len(self.contents): return self.contents
rec = []
for trial in range(num):
ball = random.choice(self.contents)
# Select a random ball.
rec.append(ball)
self.contents.remove(ball)
# Remove the picked ball from contents.
return rec
def experiment(hat, expected_balls: dict, num_balls_drawn: int,
num_experiments: int):
"""
Simulate an experiment of drawing a combination of balls from a hat.
Args:
hat -- a Hat object
expected_balls -- a dict with keys of ball colors and
values of expected number of draws eg. {'red': 2, 'blue': 3}
num_balls_drawn -- number of balls drawn for each experiment
num_experiements -- number of trials
Returns the probability of the drawing a certain combination.
"""
def compare(expect, result):
copy = result[:]
for item in expect:
if item in copy:
copy.remove(item)
return True if not copy else False
event = Hat(**expected_balls)
success = 0
for num in range(num_experiments):
cp = copy.deepcopy(hat)
res = cp.draw(num_balls_drawn)
if compare(event.contents, res):
success += 1
else: continue
prob = success / num_experiments
return prob
|
#!/bin/python
import sys
import copy
import re
import math
infile = open(sys.argv[1], "r")
instructions = []
for line in infile:
line = line.rstrip()
lineM = re.match(r"(N|S|E|W|L|R|F)(\d+)", line)
if not lineM:
print("poop")
instructions.append((lineM.group(1), int(lineM.group(2))))
#print(instructions)
class Waypoint:
def __init__(self, startDir):
self.ewpos = 10
self.nspos = 1
def execute_N(self, value):
self.nspos += value
def execute_S(self, value):
self.nspos -= value
def execute_E(self, value):
self.ewpos += value
def execute_W(self, value):
self.ewpos -= value
def genericRotate(self, value):
cth = round(math.cos(value))
sth = round(math.sin(value))
ewpos = int(self.ewpos * cth - self.nspos * sth)
nspos = int(self.ewpos * sth + self.nspos * cth)
self.ewpos = ewpos
self.nspos = nspos
def execute_L(self, value):
self.genericRotate(math.radians(value))
def execute_R(self, value):
value *= -1
self.genericRotate(math.radians(value))
def executeInstruction(self, instruction):
(action, value) = instruction
#print(instruction, self.ewpos, self.nspos)
eval("self.execute_%s(%d)" % (action, value))
#print("\t", self.ewpos, self.nspos)
def getVector(self):
return (self.ewpos, self.nspos)
class Ship:
def __init__(self):
self.ewpos = 0
self.nspos = 0
self.waypoint = Waypoint(90)
def execute_F(self, value):
(wewpos, wnspos) = self.waypoint.getVector()
self.ewpos += wewpos * value
self.nspos += wnspos * value
def executeInstruction(self, instruction):
(action, value) = instruction
#print(instruction, self.ewpos, self.nspos)
if action == 'F':
self.execute_F(value)
else:
self.waypoint.executeInstruction(instruction)
#print(instruction, self.ewpos, self.nspos)
def computeManhattan(self):
return (abs(self.nspos) + abs(self.ewpos))
ship = Ship()
for instruction in instructions:
ship.executeInstruction(instruction)
print(ship.computeManhattan())
|
'''
Title : sWAP cASE
Subdomain : Strings
Domain : Python
Author : Darpan Zope
Created :
Problem : https://www.hackerrank.com/challenges/swap-case/problem
'''
def swap_case(s):
newstring = ""
for item in s:
if item.isupper():
newstring += item.lower()
else:
newstring += item.upper()
return newstring
#------------------------------------------
print(input().swapcase())
|
import logging
import threading
import flask
from .requests import Request
__all__ = ['Skill']
class Skill(flask.Flask):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._sessions = {}
self._session_lock = threading.RLock()
def script(self, generator):
@self.route("/", methods=['POST'])
def handle_post():
flask.g.request = Request(flask.request.get_json())
logging.debug('Request: %r', flask.g.request)
content = self._switch_state(generator)
response = {
'version': flask.g.request['version'],
'session': flask.g.request['session'],
'response': content,
}
logging.debug('Response: %r', response)
return flask.jsonify(response)
return generator
def _switch_state(self, generator):
session_id = flask.g.request['session']['session_id']
with self._session_lock:
if session_id not in self._sessions:
state = self._sessions[session_id] = generator()
else:
state = self._sessions[session_id]
content = next(state)
if content['end_session']:
with self._session_lock:
del self._sessions[session_id]
return content
|
# 61. Rotate List
#
# Given a linked list, rotate the list to the right by k places, where k is non-negative.
#
# Example 1:
#
# Input: 1->2->3->4->5->NULL, k = 2
# Output: 4->5->1->2->3->NULL
# Explanation:
# rotate 1 steps to the right: 5->1->2->3->4->NULL
# rotate 2 steps to the right: 4->5->1->2->3->NULL
# Example 2:
#
# Input: 0->1->2->NULL, k = 4
# Output: 2->0->1->NULL
# Explanation:
# rotate 1 steps to the right: 2->0->1->NULL
# rotate 2 steps to the right: 1->2->0->NULL
# rotate 3 steps to the right: 0->1->2->NULL
# rotate 4 steps to the right: 2->0->1->NULL
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def __iter__(self):
item = self
while item is not None:
yield item.val
item = item.next
class Solution(object):
def rotateRight(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
if k == 0 or not head: return head
dummy = ListNode(-1)
dummy.next = head
length = 0
while dummy.next:
length += 1
dummy = dummy.next
dummy.next = head
steps = length - k % length - 1
p = head
for i in range(steps):
p = p.next
head = p.next
p.next = None
return head
if __name__ == '__main__':
obj = Solution()
head = ListNode(1)
node1 = ListNode(2)
node2 = ListNode(3)
node3 = ListNode(4)
node4 = ListNode(5)
head.next = node1
node1.next = node2
node2.next = node3
node3.next = node4
head = obj.rotateRight(head, 2)
assert list(head) == [4, 5, 1, 2, 3]
head = obj.rotateRight(ListNode(1), 1)
assert list(head) == [1]
head = ListNode(1)
node = ListNode(2)
head.next = node
head = obj.rotateRight(head, 1)
assert list(head) == [2, 1]
|
# def get_power_of3(x):
# '''
# purpose is to do this
# :param x: input character
# :return: a list
# :type x: integer
# '''
# import itertools as it
# '''itertools is a very powerful module
# '''
# assert isinstance(x,int)
# assert x>=1 and x<=40
#
# try:
# return get_powers_of3.d[n]
# except AttributeError:
# d -dict()
# for i,j,k,l in (it.product([-1,0,1],repeat=4)):
# d[sum([1*i+3*j+9*k+27*l])]=(i,j,k,l)
# get_powers_of3.d = d
# return get_power_of3.d[n]
def get_power_of3(n, mxpwr=3):
'''
purpose is to do this
:param x: input character
:return: a list
:type x: integer
'''
import itertools as it
from collections import defaultdict
assert isinstance(n, int)
assert isinstance(mxpwr, int)
try:
return get_powers_of3.d[mxpwr][n]
except AttributeError:
items = [[-1, 0, 1]] * (mxpwr + 1)
for t in it.product(*items):
sm = sum([k * 3 ** i for i, k in enumerate(t)])
d[mxpwr][sm] = t
return get_power_of3.d[mxpwr][n]
|
import globals
import numpy
import scipy
from scipy.optimize import minimize
def chisquared(vars, alignm, data, eps_data = None):
if not eps_data:
eps_data = len(data)*[1.0]
hit = numpy.array([vars[0],vars[1]])
sum_array = [ ((data[i] - numpy.sqrt((hit - alignm[i]).dot(hit-alignm[i])) + vars[2])/eps_data[i])**2 for i in range(len(data)) ]
return sum(sum_array)
def HitMinimizer(hit, hit_errors = None, start_hit = [globals.LENGTH * 0.5, globals.LENGTH * 0.5]):
tlist = hit.pulses
timelist = [ t.time for t in tlist]
redtimelist = [ t - min(timelist) for t in timelist]
r_var_list = [ globals.SPEED * t for t in redtimelist ]
if not hit_errors:
hit_errors = len(tlist) * [ 0.5 * globals.NS ]
r_err_list = [ globals.SPEED * t for t in hit_errors ]
vars = [start_hit[0], start_hit[1], 0.1*globals.LENGTH]
align = [numpy.array([pmt[0],pmt[1]]) for pmt in globals.PMT_COORDS]
output = minimize(chisquared, vars, args=(align, r_var_list, r_err_list), bounds=((0,globals.LENGTH),(0,globals.LENGTH),(0,globals.LENGTH)), method='L-BFGS-B')
vec_hit = numpy.array([output.x[0],output.x[1]])
#vec_hit_err = numpy.array([numpy.sqrt(output.hess_inv[0][0]), numpy.sqrt(output.hess_inv[1][1])]) # NO COVARIANCE MATRIX WITH BOUNDS FIXME
time = sum([tlist[i].time - numpy.sqrt((vec_hit - align[i]).dot(vec_hit - align[i]))/globals.SPEED for i in range(len(align))])/len(align)
#time_err = numpy.sqrt(sum([(((vec_hit[0] - pmt[0]) * vec_hit_err[0])**2 + ((vec_hit[1] - pmt[1]) * vec_hit_err[1])**2) / (vec_hit - pmt).dot(vec_hit - pmt) for pmt in align])/len(align)/globals.SPEED + sum([err**2 for err in hit_errors])/len(align))
vec_hit_err = numpy.array([0.,0.])
time_err = 0.
hit.setCoords(vec_hit[0], vec_hit[1], vec_hit_err[0], vec_hit_err[1], time, time_err, output.fun)
|
import nltk
from nltk import word_tokenize
import json
states = []
observations = []
transition_probability = {'<S>' : {"<E>" : 0}}
emission_probability = {}
previousObservations = []
previousTags = []
def train(data="data.txt"):
file = open("data.txt", "r")
for line in file:
text = word_tokenize(line.lower())
tags = nltk.pos_tag(text)
for idx, tag in enumerate(tags):
try:
previousTag = tags[idx - 1][1]
nextTag = tags[idx + 1][1]
except:
previousTag = "<S>"
nextTag = "<E>"
if tag[0] not in states:
states.append(tag[0])
if tag[1] not in observations:
observations.append(tag[1])
if tag[1] not in previousTags:
emission_probability[tag[1]] = {}
transition_probability[tag[1]] = {}
transition_probability["<S>"][tag[1]] = 0
transition_probability[tag[1]]["<E>"] = 0
for observation in previousObservations:
emission_probability[tag[1]][observation] = 0
previousTags.append(tag[1])
for ptag in previousTags:
transition_probability[ptag][tag[1]] = 0
transition_probability[tag[1]][ptag] = 0
if tag[0] not in previousObservations:
for ptag in previousTags:
emission_probability[ptag][tag[0]] = 0
previousObservations.append(tag[0])
# filling emission_probability
if tag[0] not in emission_probability[tag[1]]:
emission_probability[tag[1]][tag[0]] = 1
else:
emission_probability[tag[1]][tag[0]] += 1
# filling transition_probability
if previousTag == "<S>":
if tag[1] not in transition_probability["<S>"]:
transition_probability["<S>"][tag[1]] = 1
else:
transition_probability["<S>"][tag[1]] += 1
else:
if nextTag not in transition_probability[tag[1]]:
transition_probability[tag[1]][nextTag] = 1
else:
transition_probability[tag[1]][nextTag] += 1
print("Training succeeded")
file.close()
return(states, observations, transition_probability, emission_probability)
if __name__ == "__main__":
train()
|
#!/usr/bin/env python
# By Jacek Zienkiewicz and Andrew Davison, Imperial College London, 2014
# Based on original C code by Adrien Angeli, 2009
import random
import os
import time
import math
import brickpi
interface=brickpi.Interface()
interface.initialize()
#Sonar Motor
motor = 2
#Wheel motors
motors = [3,1]
interface.motorEnable(motor)
interface.motorEnable(motors[0])
interface.motorEnable(motors[1])
#Sonar port
sonar = 2
interface.sensorEnable(sonar, brickpi.SensorType.SENSOR_ULTRASONIC);
#Touch sensors
touch_portL = 1
touch_portR = 3
interface.sensorEnable(touch_portL, brickpi.SensorType.SENSOR_TOUCH)
interface.sensorEnable(touch_portR, brickpi.SensorType.SENSOR_TOUCH)
#Set motor params
motorParams = interface.MotorAngleControllerParameters()
motorParams.maxRotationAcceleration = 6.0
motorParams.maxRotationSpeed = 12.0
motorParams.feedForwardGain = 255/20.0
motorParams.minPWM = 20.0
motorParams.pidParameters.minOutput = -255
motorParams.pidParameters.maxOutput = 255
motorParams.pidParameters.k_p = 400
motorParams.pidParameters.k_i = 100
motorParams.pidParameters.k_d = 0
#Apply motor params to motors
interface.setMotorAngleControllerParameters(motors[0],motorParams)
interface.setMotorAngleControllerParameters(motors[1],motorParams)
motorParams = interface.MotorAngleControllerParameters()
motorParams.maxRotationAcceleration = 6.0
motorParams.maxRotationSpeed = 12.0
motorParams.feedForwardGain = 255/20.0
motorParams.minPWM = 20.0
motorParams.pidParameters.minOutput = -255
motorParams.pidParameters.maxOutput = 255
motorParams.pidParameters.k_p = 300.0
motorParams.pidParameters.k_i = 100.0
motorParams.pidParameters.k_d = 0.0
interface.setMotorAngleControllerParameters(motor,motorParams)
# Location signature class: stores a signature characterizing one location
class LocationSignature:
def __init__(self, no_bins = 180):
self.sig = [0] * no_bins
def print_signature(self):
for i in range(len(self.sig)):
print self.sig[i]
# --------------------- File management class ---------------
class SignatureContainer():
def __init__(self, size = 5):
self.size = size; # max number of signatures that can be stored
self.filenames = [];
# Fills the filenames variable with names like loc_%%.dat
# where %% are 2 digits (00, 01, 02...) indicating the location number.
for i in range(self.size):
self.filenames.append('loc_{0:02d}.dat'.format(i))
# Get the index of a filename for the new signature. If all filenames are
# used, it returns -1;
def get_free_index(self):
n = 0
while n < self.size:
if (os.path.isfile(self.filenames[n]) == False):
break
n += 1
if (n >= self.size):
return -1;
else:
return n;
# Delete all loc_%%.dat files
def delete_loc_files(self):
print "STATUS: All signature files removed."
for n in range(self.size):
if os.path.isfile(self.filenames[n]):
os.remove(self.filenames[n])
# Writes the signature to the file identified by index (e.g, if index is 1
# it will be file loc_01.dat). If file already exists, it will be replaced.
def save(self, signature, index):
filename = self.filenames[index]
if os.path.isfile(filename):
os.remove(filename)
f = open(filename, 'w')
for i in range(len(signature.sig)):
s = str(signature.sig[i]) + "\n"
f.write(s)
f.close();
# Read signature file identified by index. If the file doesn't exist
# it returns an empty signature.
def read(self, index):
ls = LocationSignature()
filename = self.filenames[index]
if os.path.isfile(filename):
f = open(filename, 'r')
for i in range(len(ls.sig)):
s = f.readline()
s = (s[1:len(s) - 1]).split('.')[0]
if (s != ''):
ls.sig[i] = int(s)
f.close();
else:
print "WARNING: Signature does not exist."
return ls
def readSonar():
readings = []
i = 0
while (i < 1):
r = interface.getSensorValue(sonar)[0]
readings.append(r)
i += 1
#print(readings)
return max(set(readings), key = readings.count)
def stop():
interface.setMotorRotationSpeedReferences([motor],[0.01])
interface.setMotorPwm(motor,0)
return
class Canvas:
def __init__(self,map_size=210):
self.map_size = map_size; # in cm;
self.canvas_size = 768; # in pixels;
self.margin = 0.05*map_size;
self.scale = self.canvas_size/(map_size+2*self.margin);
def drawLine(self,line):
x1 = self.__screenX(line[0]);
y1 = self.__screenY(line[1]);
x2 = self.__screenX(line[2]);
y2 = self.__screenY(line[3]);
print "drawLine:" + str((x1,y1,x2,y2))
def drawParticles(self,data):
display = [(self.__screenX(d[0]),self.__screenY(d[1])) + d[2:] for d in data];
print "drawParticles:" + str(display);
def __screenX(self,x):
return (x + self.margin)*self.scale
def __screenY(self,y):
return (self.map_size + self.margin - y)*self.scale
# A Map class containing walls
class Map:
def __init__(self):
self.walls = [];
def add_wall(self,wall):
self.walls.append(wall);
def clear(self):
self.walls = [];
def draw(self):
for wall in self.walls:
canvas.drawLine(wall);
canvas = Canvas(); # global canvas we are going to draw on
mymap = Map();
# Definitions of walls
# a: O to A
# b: A to B
# c: C to D
# d: D to E
# e: E to F
# f: F to G
# g: G to H
# h: H to O
mymap.add_wall((0,0,0,168)); # a
mymap.add_wall((0,168,84,168)); # b
mymap.add_wall((84,126,84,210)); # c
mymap.add_wall((84,210,168,210)); # d
mymap.add_wall((168,210,168,84)); # e
mymap.add_wall((168,84,210,84)); # f
mymap.add_wall((210,84,210,0)); # g
mymap.add_wall((210,0,0,0)); # h
mymap.draw();
# FILL IN: spin robot or sonar to capture a signature and store it in ls
def characterize_location(ls, x, y):
no = len(ls.sig)
angle = interface.getMotorAngle(sonar)[0]
for i in range(no):
if (i == no / 2):
interface.increaseMotorAngleReference(motor, -(math.pi * 2))
while not interface.motorAngleReferenceReached(motor) :
time.sleep(0.1)
time.sleep(2)
newangle = interface.getMotorAngle(sonar)[0]
print math.degrees(abs(angle - newangle))
angle = newangle
ls.sig[i] = readSonar()
interface.increaseMotorAngleReference(motor, (math.pi * 2) / no)
while not interface.motorAngleReferenceReached(motor) :
time.sleep(0.1)
#stop()
#time.sleep(1)
newangle = interface.getMotorAngle(sonar)[0]
print math.degrees(abs(angle - newangle))
angle = newangle
l = ls.sig[i]
a = i * (math.pi * 2) / no
X = (math.cos(a) * l) + x
Y = (math.sin(a) * l) + y
canvas.drawLine((x, y, X, Y))
#interface.increaseMotorAngleReference(motor, -(math.pi * 2))
#while not interface.motorAngleReferenceReached(motor) :
#time.sleep(0.01)
return
def print_loc(ls, x, y):
for i in range(len(ls.sig)):
l, a = ls.sig[i][0], ls.sig[i][1]
X = (math.cos(a) * l) + x
Y = (math.sin(a) * l) + y
canvas.drawLine((x, y, X, Y))
return
# FILL IN: compare two signatures
def compare_signatures(ls1, ls2):
dist = 0
for i in range(len(ls2.sig)):
dist += (ls1.sig[i] - ls2.sig[i])**2
return dist
# This function characterizes the current location, and stores the obtained
# signature into the next available file.
def learn_location():
ls = LocationSignature()
characterize_location(ls)
#print_loc(ls, 84, 30)
idx = signatures.get_free_index();
if (idx == -1): # run out of signature files
print "\nWARNING:"
print "No signature file is available. NOTHING NEW will be learned and stored."
print "Please remove some loc_%%.dat files.\n"
return
signatures.save(ls,idx)
print "STATUS: Location " + str(idx) + " learned and saved."
# This function tries to recognize the current location.
# 1. Characterize current location
# 2. For every learned locations
# 2.1. Read signature of learned location from file
# 2.2. Compare signature to signature coming from actual characterization
# 3. Retain the learned location whose minimum distance with
# actual characterization is the smallest.
# 4. Display the index of the recognized location on the screen
def recognize_location():
ls_obs = LocationSignature()
characterize_location(ls_obs)
lowest_dist = 30000000
lowest_idx = -1
# FILL IN: COMPARE ls_read with ls_obs and find the best match
for idx in range(signatures.size):
print "STATUS: Comparing signature " + str(idx) + " with the observed signature."
ls_read = signatures.read(idx)
dist = compare_signatures(ls_obs, ls_read)
if(dist < lowest_dist):
lowest_dist = dist
lowest_idx = idx
print "Spot is probably ", lowest_idx + 1
return lowest_idx + 1
def create_invariant_signature(readings):
histogram = [0] * 260
for reading in readings.sig:
histogram[int(reading)] += 1
return histogram
def compare_signatures_inv(ls1, ls2):
dist = 0
for i in range(len(ls2)):
dist += (ls1[i] - ls2[i])**2
return dist
def compare_signatures_angle(ls1, ls2, j):
dist = 0
for i in range(len(ls2.sig)):
dist += (ls1.sig[i+j if (i+j) < 180 else (i+j) - 180] - ls2.sig[i])**2
return dist
def calculate_angle(ls_sig, ls_obs):
lowest_dist = 30000000
angle = 0
for i in range(180):
dist = compare_signatures_angle(ls_sig, ls_obs, i)
if(dist < lowest_dist):
lowest_dist = dist
angle = i
return angle * 2
def recognize_location_inv():
ls_obs = LocationSignature()
characterize_location(ls_obs)
lowest_dist = 30000000
lowest_idx = -1
# FILL IN: COMPARE ls_read with ls_obs and find the best match
for idx in range(signatures.size):
print "STATUS: Comparing signature " + str(idx) + " with the observed signature."
ls_read = signatures.read(idx)
hist_ls_obs = create_invariant_signature(ls_obs)
hist_ls_read = create_invariant_signature(ls_read)
dist = compare_signatures_inv(hist_ls_obs, hist_ls_read)
if(dist < lowest_dist):
lowest_dist = dist
lowest_idx = idx
angle = calculate_angle(signatures.read(lowest_idx), ls_obs)
print "Spot is probably ", lowest_idx + 1,
print " with angle of ", angle
return lowest_idx + 1
def constant_scan():
interface.setMotorRotationSpeedReferences(motors, [2,2])
return
print "Running"
speed = 0.5
#interface.setMotorRotationSpeedReferences([motor], [speed])
#while not interface.motorRotationSpeedReferenceReached(motor):
# time.sleep(0.1)
start_angle = interface.getMotorAngles([motor])[0][0]
angle_range = math.pi * 0.7
right = -1
readings = []
interface.increaseMotorAngleReference(motor, angle_range)
while True:
if abs(interface.getMotorAngleReferences([motor])[0] - interface.getMotorAngle(motor)[0]) < 0.1 :
stop()
time.sleep(2)
interface.increaseMotorAngleReference(motor, right*angle_range)
right = right * -1
start_angle=interface.getMotorAngles([motor])[0][0]
mean = sum(readings) / float(len(readings))
print "Overall mean: ", mean
i = 0
rs = []
for r in readings:
rs.append(r)
i += 1
if i == 9:
rmean = sum(rs) / float(len(rs))
print "Mean: ", rmean
rs = []
i = 0
readings = []
print "reached"
print interface.getMotorAngle(motor)[0]
if right == -1:
reading = readSonar()
readings.append(reading)
#print "reading ", readSonar()
new_angle = interface.getMotorAngles([motor])[0][0]
a = (angle_range / 2) - abs(new_angle-start_angle)
a *= right
x = 84
y = 30
X = (math.cos(a) * reading) + x
Y = (math.sin(a) * reading) + y
canvas.drawLine((x, y, X, Y))
time.sleep(0.01)
interface.terminate()
# Prior to starting learning the locations, it should delete files from previous
# learning either manually or by calling signatures.delete_loc_files().
# Then, either learn a location, until all the locations are learned, or try to
# recognize one of them, if locations have already been learned.
#signatures = SignatureContainer(5);
#signatures.delete_loc_files()
#learn_location();
#recognize_location_inv();
|
"""Author Arianna Delgado
Created on May 28, 2020
"""
"""Display numbers from 50 t0 70"""
for i in range(50,71):
print(i)
|
import os
import sys
cur_path = os.getcwd()
sys.path.insert(0, '/'.join(cur_path.split('/')[:-1]))
import unittest
from parser.course import Course
from parser.coursecode import CourseCode
from parser.term import Term
from parser.unitrange import UnitRange
from storage.DBProxy import DBProxy
from storage.preprocessor import Preprocessor
from storage.DBPublisher import DBPublisher
class TestDBPublisher(unittest.TestCase):
def setUp(self) -> None:
self.proxy = DBProxy()
self.preprocessor = Preprocessor()
self.db = DBPublisher(self.proxy, self.preprocessor)
self.db.set_table_prefix("test_")
def tearDown(self) -> None:
self.proxy.disconnect()
# Wet run test, will add to database on Frank
def test_publish_catalog(self):
self.db.cleanup()
test_course_code = CourseCode("CSC/CPE", 466)
test_couse_name = "Knowledge Discovery from Data"
test_unit_range = UnitRange(1, 4)
test_terms = [Term.from_str("F"), Term.from_str("SP")]
test_iscrnc = 0
test_prereqs = "CSC 349 and one of the following: STAT 302, STAT 312, " \
"STAT 321 or STAT 350."
test_desc = "Overview of modern knowledge discovery from data (KDD) " \
"methods and technologies. Topics in data mining " \
"(association rules mining, classification, clustering), " \
"information retrieval, web mining. Emphasis on use of KDD " \
"techniques in modern software applications. 3 lectures, " \
"1 laboratory. "
test_courses = [Course(test_course_code,
test_couse_name,
test_unit_range,
test_terms,
test_iscrnc,
test_prereqs,
test_desc)]
self.db.publish_catalog(test_courses)
def test_publish_schedule(self):
cur_schedule = {466: [("Foaad Khosmood", "TR 12:10 AM-1:30 PM")],
471: [("Zoe J. Wood", "TR 8:10 AM-9:30AM")]}
next_schedule = {482: [("Foaad Khosmood", "TR 8:10 AM-9:30 PM")],
466: [("Foaad Khosmood", "TR 12:10 AM-1:30 PM"),
("Foaad Khosmood", "MWF 10:10 AM-11:00 AM")]}
self.db.publish_schedule((cur_schedule, next_schedule))
if __name__ == "__main__":
unittest.main()
|
# wrapper script around py.test so coverage can run py.test from inside tox
import sys
import pytest
sys.exit(pytest.main())
|
from flask import Flask, request, jsonify, render_template
from flask.globals import request
from flask.json import jsonify
from clarifai_grpc.channel.clarifai_channel import ClarifaiChannel
from clarifai_grpc.grpc.api import service_pb2_grpc
import fs
stub = service_pb2_grpc.V2Stub(ClarifaiChannel.get_grpc_channel())
from clarifai_grpc.grpc.api import service_pb2, resources_pb2
from clarifai_grpc.grpc.api.status import status_code_pb2
import base64
from flask_cors import CORS
import json
GROUP_INDEX = 0
PROBABILITY_INDEX = 1
SUB_INDEX = 2
food_dict = {
'DRINKS': ['DRINKS', 'coffee', 'tea', 'milkshake', 'iced tea', 'ALCOHOLIC BEVARAGES', 'liqueur', 'aliment', 'drink',
'alcohol', 'ale', 'aperitif', 'beer', 'wine', 'white wine', 'whisky', 'vodka', 'vino', 'champagne',
'cocktail', 'vermouth', 'cobbler', 'cognac', 'rum', 'tequila', 'stout', 'shandy', 'liquor', 'martini',
'red wine', 'gin', 'Liqueur', 'sake', 'lager', 'mead', 'booze', 'brandy', 'DRINKS', 'SOFT DRINKS',
'beverage', 'drink', 'coke', 'iced tea' 'milkshake', 'tonic', 'soda', 'fruit tea', 'ginger ale', 'DRINKS',
'WATER OR JUICE', 'beverage', 'drink', 'cider', 'water', 'compote', 'nectar', 'orange juice', 'smoothie',
'lemonade', 'juice', 'lime cordial', 'ice', 'DRINKS', 'HOT DRINKS', 'beverage', 'drink', 'black tea',
'cappuccino', 'espresso', 'black coffee', 'indian tea', 'decaffeinated coffee', 'herbal tea',
'hot chocolate', 'green tea', ' coffee', 'black coffee', 'mocha'],
'VEGETABLES': ['VEGETABLES', 'komatsuna', "miner's lettuce", 'lettuce', 'cucumber', 'bitter gourd', 'VEG DISH',
'polenta', 'summer squash', 'acorn squash', 'succotash', 'ginger', 'orange squash', 'galangal',
'marinated cucumber', 'puree', 'pattypan squash', 'yellow summer squash', 'VEGETABLES', 'STARCHY VEG',
'zucchin', 'acorn squash', 'arracacha', 'ahipa', 'yam', 'carrot', 'ulluco', 'tuber', 'taro',
'sweet potato', 'spuds', 'garlic', 'potato', "lamb's quarters", 'julienne', 'maize', 'mashed potatoes',
'mashua', 'puree', 'GREEN VEG', 'LEAFY GREEN', 'afalfa sprouts', 'arugula', 'water spinach', 'bay leaf',
'brussels sprout', 'cabbage', 'cauliflower', 'chard', 'chaya', 'collards', 'common purslane',
'coriander', 'cress', 'dandelion greens', 'dill', 'tatsoi', 'endive', 'fat hen', 'fennel', 'fiddlehead',
'swiss chard', 'florence fennel', 'summer purslane', 'french beans', 'spearmint', 'parsley', 'sorrel',
'lemongrass', 'guar', "lamb's lettuce", 'romaine', 'greater plantain', 'komatsuna/luffa',
"lettuce/miner's lettuce", 'luffa', 'malabar spinach', 'pak choy', 'radicchio', 'salal', 'spinach',
'yao choy', 'alfalfa sprouts', 'NON LEAFY GREEN', 'bok choy', 'bamboo shoots', 'courgette',
'broccolini', 'asparagus', 'cauliflower', 'cardoon', 'celeriac', 'celery', 'celtuce', 'drumstick',
'chinese artichoke', 'cilantro', 'squash', 'edamame', 'fava beans', 'string bean', 'tomatillo',
'snow pea', 'snap pea', 'indian pea', 'green pepper', 'ivy gourd', 'iceberg lettuce', 'orache',
'new zealand spinach', 'mizuna greens', 'malabar', 'spinach', 'kohlrabi', 'artichoke', 'broccoli',
'hot pepper', 'prussian asparagus', 'samphire', 'ROOT', 'beet', 'cassava', 'ulluco', 'turnip', 'tuber',
'truffle', 'taro', 'swede', 'sprouts', 'skirret', 'potato', 'rutabaga', 'radish', 'prairie turnip',
'horse radish', 'jicama', 'jerusalem artichoke', 'ginseng', 'parsnip', 'lotus root', 'hamburg parsley',
'beet root', 'burdock', 'salsify', 'NIGHTSHADE VEG OR OTHER', 'brinjal', 'cauliflower', 'cayenne',
'cherry tomato', 'chili pepper', 'aubergine', 'daikon', 'tomato', 'tomatillo', 'tamarillo',
'sweet pepper', 'mushroom', 'olive', 'onion', 'green onion', 'habanero pepper', 'cayenne pepper',
'red cabbage', 'jalapeno', 'rhubarb', 'pepper', 'radicchio', 'red pepper', 'vegetable'],
'FRUIT': ['breadfruit', 'rambutan', 'berry', 'mango', 'watercress', 'water caltrop', 'dried apricot', 'damson',
'dried fruit', 'fig', 'fluted pumpkin', 'sultanas', 'strawberry', 'grape', 'pineapple', 'pumpkin', 'pomelo',
'pomegranate', 'plum', 'jabouticaba', 'peppera', 'pear', 'kiwi fruit', 'passionfruit', 'miracle fruit',
'lychee', 'loquat', 'gourd', 'apple', 'prune', 'redcurrant', 'quince', 'raisin', 'TROPICAL', 'chayote',
'coconut', 'dragonfruit', 'durian', 'feijoa', 'mango', 'star fruit', 'papaya', 'honeydew melon', 'guava',
'jackfruit', 'jambul', 'banana', 'cherimoya', 'ensete', 'purple mangosteen', 'rapini', ' bitter gourd',
'STONE FRUIT (PIT)', 'date', 'sweet cherry', 'physalis', 'jujube', 'cherry', 'MELON', 'watermelon',
'wintermelon', 'bitter melon', 'cantaloupe', 'melon', 'tinda', 'winter melon', 'BERRIES', 'blueberry',
'boysenberry', 'bilberry', 'black currant', 'blackberry', 'whortleberry', 'cloudberry', 'cranberry',
'cranberries', 'currant', 'elderberry', 'mulberry', 'strawberry', 'marionberry', 'gooseberry', 'huckleberry',
'salmonberry', 'raspberry', 'persimmon', 'peppercorn', 'juniper berry', 'goji berry', 'avocado',
'CITRIC FRUIT', 'blood orange', 'citron', 'citrus', 'clementine', 'orange', 'ugli fruit', 'tangerine',
'lemon', 'grapefruit', 'grilled salmon', 'peach', 'lime', 'nectarine', 'mandarin orange', 'kumquat',
'satsuma'],
'GRAIN OR CEREAL BASED': ['GRAIN OR CEREAL', 'noodle', 'bran', 'barley', 'wheat flake', 'buckwheat', 'corn',
'cornflakes', 'couscous', 'lentil', 'grain', 'horse gram', 'granola', 'garbanzo',
'oatmeal cereal', 'oatmeal', 'frozen peas', 'oat', 'muesli bar', 'muesli', 'mochi', 'malt',
'groats', 'straw', 'PASTA AND NOODLES', 'angel-hair pasta', 'vermicelli', 'tortellini',
'tagliatelle', 'farfalle', 'fettuccine', 'spaghetti carbonara', 'spaghetti bolognese',
'spaghetti', 'ravioli', 'ramen', 'penne', 'macaroni', 'linguine', 'lasagne', 'pad thai',
'pasta', 'fusilli', 'BREAKFAST OR SNACK', 'cereal', 'cereal bar', 'tortilla chips',
'porridge', 'popcorn', 'nachos', 'kettle corn', 'crunch', 'supper', 'goody',
'oatmeal cookie', 'pumpernickel', 'RICE', 'brown rice', 'rice', 'rice flake', 'fried rice',
'pilaf', 'pike', 'paddy', 'BREAD', 'BREAD PRODUCTS', 'yeast', 'wheat', 'cracker', 'dough',
'tapioca', 'rye', 'garlic bread', 'hamburger bun', 'BREAD',
'CRACKER OR ALTERNATIVE BREAD BASE', 'crispbread', 'crouton', 'crust', 'toast', 'dumpling',
'flatbread', 'cracker', 'BREAD', 'BREAD PREPARED', 'bread rolls', 'breadcrumb', 'bread',
'bread pudding', 'brioche', 'brown bread', 'bun', 'canape', 'bread', 'bagel', 'wheat bread',
' baked alaska', 'ciabatta', 'corn bread', 'toast', 'french bread', 'sourdough bread',
'soda bread', 'sliced loaf', 'gingerbread', 'rye bread', 'raisin bread', 'meatloaf',
'baguette', 'breadstick', 'hot dog bun', 'pita bread'],
'PLANT-BASED': ['BEANS AND TOFU', 'pumpkin seeds', 'beans', 'velvet bean', 'baked beans', 'broad beans',
'yardlong bean', 'winged bean', 'common bean', 'urad bean', 'tofu', 'tepary bean', 'dolichos bean',
'soy', 'lima bean', 'mung bean', 'kidney bean', 'azuki bean', 'black beans', 'runner bean',
'moth bean', 'ricebean', 'quinoa', 'AROMATICS', 'wild leek', 'welsh onion', 'chives', 'cloves',
'tree onion', 'spring onion', 'shallot', 'scallion', 'potato onion', 'peppermint', 'pearl onion',
'oyster', 'garlic chives', 'marjoram', 'licorice', 'leek', 'land cress', 'lagos bologi', 'onion',
'SEEDS', 'amaranth', 'apricot pits', 'sunflower seeds', 'flax', 'sesame seed', 'pumpkin seeds ',
'pistachio', 'pea', 'okra', 'nopal', 'anise', 'millet', 'PICKLED OR PACKAGED', 'sweet corn',
'sour cabbage', 'relish', 'pickled cucumber', 'pickle', 'napa cabbage', 'gherkin', 'sauerkraut',
'LEGUMES OR NUTS', 'almond', 'walnut', 'water chestnut', 'beancurd', 'cashew', 'hazelnut', 'chickpeas',
'tigernut', 'tarwi', 'tamarind', 'split peas', 'soy', 'pine nut', 'pignut', 'pigeon pea', 'pecan',
'peanut', 'pea', 'macadamia nut', 'ground beef', 'chestnut', 'legume', 'nut'],
'MEAT AND CHICKEN': ['RAW MEAT', 'prime rib', 'loin', 'beef', 'chicken', 'meat', 'tartare', 'antipasto', 'venison',
'bruschetta', 'carpaccio', 'beef carpaccio', 'beef tartare', 'meat', 'elephant foot yam',
'elephant garlic', 'escargots', 'filet mignon', 'octopus', 'fowl', 'mutton', 'mince',
'prosciutto', 'SAUSAGE OR PROCESSED', 'pastrami', 'suet', 'bratwurst', 'wiener', 'blood sausage',
'chorizo', 'corned beef', 'spam', 'smoked sausage', 'sausage roll', 'sausage', 'salami',
'pepperoni', 'link sausages', 'FRIED MEAT', 'fritter', 'chicken', 'meat', 'steak', 'bacon',
'barbecue', 'bird', 'veal', 'chicken', 'chicken breast', 'chicken leg', 'chicken wings',
'beef steak', 'turkey breast', 'tenderloin', 'nugget', 'GRILLED/ROASTED', 'chicken', 'meat',
'steak', 'baby back ribs', 'brisket', 'veal cutlet', 'bird', 'veal', 'chicken', 'chicken breast',
'chicken leg', 'chicken wings', 'beef steak', 'ham', 'venison', 'turkey breast', 'tenderloin',
'frankfurters', 'spare ribs', 'skewer', 'sirloin', 'shish kebab', 'pork', 'rib', 'prime rib ',
'pot roast', 'pork chop', 'pancetta', 'lamb chops', 'lamb', 'MEAT DISH', 'marrow', 'chicken',
'meat', 'cutlet', 'casserole', 'chicken', 'chicken breast', 'chicken curry', 'chicken leg',
'chicken quesadilla', 'chicken wings', 'beef steak', 'cooked meat', 'turkey', 'stir-fry',
'venison', 'turkey breast', 'tongue', 'duck', 'spare ribs', 'skewer', 'sirloin', 'shish kebab',
'hamburger', 'kebab', 'roast beef', 'pate', 'paella', 'meatball', 'lunchmeat', 'goose',
'foie gras', 'hash'],
'DAIRY OR EGG': ['DAIRY', 'yogurt', 'scrambled', 'omelette', 'NON CHEESE', 'whey', 'chevre', 'chocolate ice cream',
'chocolate mousse', 'milk', 'curd', 'custard', 'dairy product', 'dairy', 'DAIRY', 'CHEESE',
'camembert', 'chayote', 'brie', 'cottage cheese', 'cheesecake', 'dairy product', 'cream cheese',
'edam cheese', 'emmental', 'swiss cheese', 'mozzarella', 'roquefort', 'parmesan', 'gouda cheese',
'goats cheese', 'blue cheese', 'cheddar', 'fondue', 'cheese', 'dairy', 'gouda', 'EGG', 'Omelette',
'yarrow', 'yolk', 'egg', 'deviled eggs', 'egg white', 'egg yolk', 'eggplant', 'fried egg', 'omelet',
'meringue', 'frittata', 'scrambled egg', 'dairy'],
'SEAFOOD': ['SEAFOOD DISH', 'ceviche', 'chowder', 'clam chowder', 'crab cakes', 'tempura', 'fried calamari', 'squid',
'lobster bisque', 'salmon steak', 'bream', 'seafood', 'RAW SEAFOOD', 'tuna tartare', 'PLANT', 'seaweed',
'dulse', 'sea lettuce', 'sea grape', 'seaweed salad', 'sea kale', 'sea beet', 'laver', 'kale',
'good king henry', 'hijiki', 'kombu', 'wakame', 'DELICACY', 'caviar', 'lobster', 'roe', 'FISH', 'anchovy',
'bass', 'carp', 'cockle', 'tuna', 'sardine', 'crab', 'cuttlefish', 'trout', 'fillet', 'fillet of sole',
'fish steak', 'flatfish', 'sturgeon', 'salted fish', 'smoked fish', 'smoked salmon', 'snapper',
'sea perch', 'sea bass', 'lox', 'haddock', 'halibut', 'herring', 'salmon steak', 'salmon', 'plaice',
'pilchard', 'perch', 'mackerel', 'kingfish', 'kipper', 'fish fillet', 'prawn', 'scampi', 'fish', 'marron',
'SHELLFISH', 'calamari', 'clam', 'crayfish', 'eel', 'mussel', 'shellfish', 'scallop', 'shrimp'],
'DESSERT': ['CAKE', 'red velvet cake', 'cookie', 'baked alaska', 'brownie', 'cake', 'cake mix', 'cake pop',
'carrot cake', 'cheesecake', 'chocolate cake', 'chocolate chip cake', 'cupcake', 'tiramisu', 'flan',
'spongecake', 'sponge cake', 'souffle', 'shortcake', 'panna cotta', 'fruitcake', 'knish', 'birthday cake',
'red velvet cake' 'PIE OR TART', 'apple pie', 'blueberry pie', 'whoopie pie', 'crumble', 'tartlet', 'tart',
'strudel', 'porridge', 'pie', 'mousse', 'meat pie', 'pork pie', 'quiche', 'BAKED GOODS',
'chocolate cookie', 'cinnamon roll', 'biscuits', 'viennese', 'croissant', 'crescent roll', 'crumble',
'eclair', 'english muffin', 'muffin', 'sesame roll', 'poppy seed roll', 'scone', 'raisin muffin', 'PASTRY',
'blueberry muffin', 'viennese', 'Baklava', 'beignets', 'roulade', 'millefeuille', 'blancmange', 'cannoli',
'chocolate cupcake', 'cinnamon roll', 'macaroon', 'baklava', 'danish pastry', 'galette', 'pastry',
'BATTER BASED', 'waffle', 'doughnut', 'macaron', 'pavlova', 'pancake', 'crepe', 'pavlova', 'popovers',
'CONFECTION', 'brittle', 'sweetmeat', 'sprinkles', 'sweet', 'CANDY', 'bonbon', 'candy', 'candy apple',
'candy bar', 'caramel apple', 'chocolate candy', 'chocolate cookie', 'chocolate ice cream', 'toffee',
'popsicle', 'marshmallow', 'jelly beans', 'jordan almonds', 'nougat', 'caramel', 'marzipan', 'bonbons',
'lollipop', 'GEL/LIQUID OR ICECREAM', 'apple sauce', 'brulee', 'parfait', 'chocolate mousse',
'cranberry sauce', 'creme brulee', 'tiramisu', 'syrup', 'sundae', 'chocolate ice cream', 'milk chocolate',
'spread', 'honey', 'sorbet', 'sherbet', 'ice cream', 'pudding', 'praline', 'frozen yogurt', 'mole sauce',
'marmalade', 'maple syrup', 'grape jelly', 'gelatin', 'jam', 'jelly', 'CHOCOLATE', 'wafer', 'candy bar',
'chocolate bar', 'chocolate cake', 'chocolate candy', 'm&m', 'torte', 'granola bar', 'souffle',
'granola bar', 'fudge', 'chocolate', 'FRIED', 'churros'],
'PREPARED DISHES OR SNACKS': ['ITALIAN', 'gyoza', 'pizza', 'spaghetti carbonara', 'spaghetti bolognese', 'spaghetti',
'ravioli', 'gorgonzola', 'parmesan', 'frozen pizza', 'risotto', 'DEEP FRIED',
'croquette', 'tempura', 'falafel', 'fish and chips', 'fish fingers', 'french fries',
'french toast', 'fried calamari', 'onion rings', 'samposa', 'SOUP OR STEW', 'chowder',
'casserole', 'soup', 'french onion soup', 'miso soup', 'pho', 'gazpacho',
'SALTY SPREADS', 'hummus', 'liver pate', 'BREAKFAST DISH', 'croque madame', 'falafel',
'huevos rancheros', 'ASIAN', 'takoyaki', 'sushi', 'spring rolls', 'bibimbap', 'sashimi',
'nigiri', 'mozuku', 'ogonori', 'hijiki', 'kombu', 'pad thai', 'nori', 'curry',
'MIDDLE-EASTERN', 'falafel', 'melokhia', 'tajine', 'tabouli', 'samosa', 'pita bread',
'ITALIAN', 'lasagna', 'focaccia', 'gnocchi', 'MEXICAN', 'tacos', 'huevos rancheros',
'SAVORY SNACK', 'chips', 'tempura', 'grissini', 'kombu', 'pretzel',
'SANDWICHES AND WRAPS', 'burrito', 'sandwich', 'gyro', 'shawarma',
'grilled cheese sandwich', 'hot dog', 'SALADS', 'caesar salad', 'caprese salad',
'corn salad', 'salad', 'cole slaw', 'tzatziki', 'slaw', 'seaweed salad', 'fruit salad',
'greek salad', 'coleslaw', 'hijiki', 'tamale', 'collation', 'marinated herring',
'ricotta', 'poutine', 'tabouli', 'beet salad'],
'MISC': ['VINEGAR', 'gravy', 'balsamic', 'vinaigrette', 'vinegar', 'SAUCE OR CONDIMENT', 'barbecue sauce', 'bechamel',
'white sauce', 'chili sauce', 'chutney', 'condiment', 'coulis', 'tomato sauce', 'teriyaki',
'sweet-and-sour sauce', 'steak sauce', 'french dressing', 'mustard', 'spaghetti sauce', 'soy sauce',
'sour cream', 'italian dressing', 'salsa', 'salad dressing', 'pesto', 'pasta sauce', 'garlic sauce',
'meat sauce', 'mayonnaise', 'wasabi', 'hot sauce', 'ketchup', 'guacamole', 'russian dressing', 'sauce',
'SAVORY LIQUID', 'broth', 'aspic', 'soup', 'goulash', 'OIL OR CREAM', 'butter', 'buttercream',
'whipped cream', 'cream', 'peanut butter', 'olive oil', 'margarine', 'butter', 'lard', 'oil'],
'HIDE': ['BAKING INGREDIENTS', 'ladle', 'mate', 'batter', 'cocoa', 'flour', 'plain flour', 'molasses', 'SPICES',
'allspice', 'caraway', 'cardamom', 'cayenne', 'chicory', 'chili', 'chili powder', 'anice', 'cinnamon',
'cumin', 'turmeric', 'thyme', 'salt', 'paprika', 'pepper', 'oregano', 'marzipan', 'garam masala',
'lemon peel', 'saffron', 'matcha', 'nutmeg', 'spices', 'HERBS', 'basil', 'chickweed', 'chicory', 'tarragon',
'thyme', 'sunflower', 'squash blossoms', 'sorghum', 'scorzonera', 'rosemary', 'rose', 'paracress', 'oregano',
'borage', 'sierra leone bologi', 'lavender', 'herb', 'mint', 'Brassicaceae', 'NONFOOD', 'aliment', 'aonori',
'broil', 'carbohydrate', 'comestible', 'dessert', 'dollop', 'feast', 'fodder', 'gastronomy', 'gem', 'grass',
'grub', 'hay', 'jug', 'kettle', 'micronutrient', 'nibble', 'papillote', 'pasture', 'peapod', 'platter',
'puff', 'ratatouille', 'ration', 'sage', 'saute', 'spatula', 'spork', 'supper', 'unleavened', 'cucurbitaceae']
}
pizzaSub = [
{"name": "Margarita Pizza", 'size': 0.6},
{"name": "Olive Pizza", 'size': 0.6},
{"name": "Mushroom Pizza", 'size': 0.6},
{"name": "Corn Pizza", 'size': 0.6}
]
def add_to_probability_list(probability_list, food_group, food_to_append, food_probability):
'''
This functino will try to add a specific food to the proability list if the food_group of it is already in the list
If the food_group is already it will add it there (add up the probbility and add the food to the SUB list) and return True!
Else, it won't add it and return False!
#probability_list = [[GROUP, PROBABILITY, [SUB]], [GROUP, PROBABILITY, [SUB]], [GROUP, PROBABILITY, [SUB]]]
'''
found = False # Making sure found is False for the sake of the for loop, otherwise the loop will endlessly find the sub_list, remove and append it.
for sub_list in probability_list: # Loop to find the item's group
if food_group == sub_list[GROUP_INDEX]:
sub = sub_list
probability_list.remove(sub_list)
sub[PROBABILITY_INDEX] += food_probability # Adding up the probability
sub[SUB_INDEX].append(food_to_append) # Appending the key to the SUB list
probability_list.append(sub)
found = True # Doing this because otherwise the loop will endlessly find the sub_list, remove and append it.
break # Stop the loop, we found what we need
return found
def bubbles_backend(result):
"""
This function will get the food suggestions from Clarifai ("result") and return a json in the form:
{data: [
{name: <GROUP_NAME>, size: <PROBABILITY>, sub: [{name:<FOOD_NAME>, size: <PROBABILITY>}, {name:<FOOD_NAME>, size: <PROBABILITY>}, ...]},
{name: <GROUP_NAME>, size: <PROBABILITY>, sub: [{name:<FOOD_NAME>, size: <PROBABILITY>}, {name:<FOOD_NAME>, size: <PROBABILITY>}, ...]},
{name: <GROUP_NAME>, size: <PROBABILITY>, sub: [{name:<FOOD_NAME>, size: <PROBABILITY>}, {name:<FOOD_NAME>, size: <PROBABILITY>}, ...]},
...
]}
"""
group_found = False
# probability_list = [[GROUP, PROBABILITY, [SUB]], [GROUP, PROBABILITY, [SUB]], [GROUP, PROBABILITY, [SUB]]]
probability_list = []
# This block of code will fill the probability list (above)
for key in result.keys(): # Running on all the outputs of Clarifai. key = the specific food
group_found = False # For each key
for group in food_dict.keys(): # Running on all the food groups in the food dict
# If the output from Clarifai in is this food group
if key in food_dict[group]:
# Declares that a (level B) food group was found
group_found = True
if add_to_probability_list(probability_list, group, key,
result[key]): # If the food group is already in the probability list
continue
else: # If the food group is not in the list
# Add new element: GROUP = group, SUB = [key]
probability_list.append([group, result[key], [key]])
if not group_found: # If a food group wasn't found for this specific food, make new group
if add_to_probability_list(probability_list, 'OTHER', key, result[key]):
continue
else:
# Add new element: GROUP = group, SUB = [key]
probability_list.append(['OTHER', result[key], [key]])
# This block of code will divide the probability sum by the num of items found in this group.
main_list = []
# probability_list = [[GROUP, PROBABILITY, [SUB]], [GROUP, PROBABILITY, [SUB]], [GROUP, PROBABILITY, [SUB]]]
for item in probability_list:
if item[GROUP_INDEX] == 'HIDE':
# This is a special case, if the answer from Clarifai contains something from the group 'HIDE' (See food_dict to understand) then it shouldn't show (for now at least))
continue
# {name: <GROUP_NAME>, size: <PROBABILITY>, sub: [{name:<FOOD_NAME>, size: <PROBABILITY>}, {name:<FOOD_NAME>, size: <PROBABILITY>}, ...]}
temp_dict = {}
# [{name:<FOOD_NAME>, size: <PROBABILITY>}, {name:<FOOD_NAME>, size: <PROBABILITY>}, ...]
temp_sub_list = []
for food in item[SUB_INDEX]:
if (food.lower() == "pizza"):
temp_sub_list.append(
{'name': food, 'size': result[food], "sub": pizzaSub})
else:
temp_sub_list.append(
{'name': food, 'size': result[food]})
temp_dict['name'] = item[GROUP_INDEX]
temp_dict['size'] = float(
item[PROBABILITY_INDEX]) / len(item[SUB_INDEX])
temp_dict['sub'] = temp_sub_list
main_list.append(temp_dict)
main_dict = {'data': main_list}
return json.dumps(main_dict)
# with open("C:/Users/adams/Downloads/Eq_it-na_pizza-margherita_sep2005_sml.jpg", "rb") as img_file:
# my_string = base64.b64encode(img_file.read())
filename = 'pizza.jpg' # I assume you have a way of picking unique filenames
def convert(my_string):
imgdata = base64.b64decode(my_string)
# print(imgdata)
with open(filename, 'wb') as f:
f.write(imgdata)
with open(filename, "rb") as f:
file_bytes = f.read()
# print(file_bytes)
print(1)
metadata = (('authorization', 'Key c8891da1d32a44f0b6af0b7133996c9b'),)
post_model_outputs_response = stub.PostModelOutputs(
service_pb2.PostModelOutputsRequest(
model_id="bd367be194cf45149e75f01d59f77ba7",
inputs=[
resources_pb2.Input(
data=resources_pb2.Data(
image=resources_pb2.Image(
base64=file_bytes
)
)
)
]
),
metadata=metadata
)
if post_model_outputs_response.status.code != status_code_pb2.SUCCESS:
raise Exception("Post model outputs failed, status: " + post_model_outputs_response.status.description)
# Since we have one input, one output will exist here.
output = post_model_outputs_response.outputs[0]
print(output.data.concepts[0])
y = []
z ={}
for x in output.data.concepts:
z[x.name] = x.value
return z
def bmi(weight, height):
return weight / (height ** 2)
def pounds(weight_in_kg):
return weight_in_kg * 2.205
def cal(weight, activity):
return pounds(weight) * 15 - (activity * 100)
def protein(weight, recommendation_dict):
if recommendation_dict['weight_gain'] is False and recommendation_dict['muscle_gain'] is False:
return weight * 0.7
if recommendation_dict['weight_gain'] is True and recommendation_dict['muscle_gain'] is False:
return weight * 1.5
if recommendation_dict['muscle'] is True:
return weight * 2
def fiber(sex, age, calorie_intake):
modifier = (calorie_intake - 2000) / 1000 * 14 if calorie_intake >= 2000 else 0
if sex == 'Male' and age < 50:
return 35 + modifier
if sex == 'Female' and age < 50:
return 25 + modifier
if sex == 'Male' and age > 50:
return 30 + modifier
if sex == 'Female' and age > 50:
return 20 + modifier
def carbs(calories, recommendation_dict):
return calories / 8 if recommendation_dict['reduce_carbs'] is False else calories / 10
def sugar(calories, age, recommendation_dict):
if age > 18 and recommendation_dict['reduce_sugar'] is False:
return calories / 53
if age < 18 and recommendation_dict['reduce_sugar'] is False:
return calories / 53 - 5
if age > 18 and recommendation_dict['reduce_sugar'] is True:
return calories / 53 - 10
if age < 18 and recommendation_dict['reduce_sugar'] is True:
return calories / 53 - 15
def total_lipid(calories, recommendation_dict):
if recommendation_dict['reduce_fats'] is False:
return calories / 25
else:
return calories / 30
def saturated(calories, recommendation_dict):
tot_lip = total_lipid(calories, recommendation_dict)
if recommendation_dict['reduce_fats'] is False:
return tot_lip / 3
else:
return tot_lip / 3 - 5
def trans(calories, recommendation_dict):
tot_lip = total_lipid(calories, recommendation_dict)
if recommendation_dict['reduce_fats'] is False:
return tot_lip / 12
else:
return tot_lip / 15
def cholesterol(recommendation_dict):
if recommendation_dict['reduce_cholesterol'] is False:
return 300
else:
return 200
def sodium(calories, age, recommendation_dict):
if age > 18 and recommendation_dict['reduce_sodium'] is False:
return calories * 1.15
elif age < 18 and recommendation_dict['reduce_sodium'] is False:
return calories * 1.15 - 200
else:
return calories * 0.9
def recommendation(height, weight, activity, sex, age, recommendation_dict):
"""
recommendation_dict = {
'weight_loss': bool,
'weight_gain': bool,
'muscle_gain': bool,
'weight_maintain': bool,
'reduce_carbs': bool,
'reduce_sugar': bool,
'increase_fiber': bool,
'reduce_fats': bool,
'reduce_cholesterol': bool,
'reduce_sodium': bool
}
^^ RECOMMENDATION DICT PASSED IN THIS FORMAT (ALL GOALS MUST EXIST)
:param bmi:
:param cal:
:param recommendation_dict:
:return:
"""
bmi_user = bmi(weight, height)
if bmi_user <= 19.5:
recommendation_dict['weight_loss'] = False
elif bmi_user > 32:
recommendation_dict['weight_gain'] = False
calories = cal(weight, activity)
nutrient_dict = {
'proteins': protein(weight, recommendation_dict),
'fibers': fiber(sex, age, calories),
'carbs': carbs(calories, recommendation_dict),
'sugars': sugar(calories, age, recommendation_dict),
'total_lipids': total_lipid(calories, recommendation_dict),
'saturated_fats': saturated(calories, recommendation_dict),
'trans_fats': trans(calories, recommendation_dict),
'cholesterol': cholesterol(recommendation_dict),
'sodium': sodium(calories, age, recommendation_dict)
}
return nutrient_dict
def CompareNut(json,nutrient_dict):
GoodDict = {}
BadDict = {}
if json['Carbohydrate, by difference'] >= nutrient_dict['carbs'] * 7:
GoodDict['carbs'] = json['Carbohydrate, by difference'] - (nutrient_dict['carbs'] * 7)
else:
BadDict['carbs'] = (nutrient_dict['carbs'] * 7) - json['Carbohydrate, by difference']
if json['Protein'] >= nutrient_dict['proteins'] * 7:
GoodDict['proteins'] = json['Protein'] - (nutrient_dict['proteins'] * 7)
else:
BadDict['proteins'] = (nutrient_dict['proteins'] * 7) - json['Protein']
if json['Fiber, total dietary'] <= nutrient_dict['fibers'] * 7:
GoodDict['fibers'] = (nutrient_dict['fibers'] * 7) - json['Fiber, total dietary']
else:
BadDict['fibers'] = json['Fiber, total dietary'] - (nutrient_dict['fibers'] * 7)
if json['Sugars, total including NLEA'] >= nutrient_dict['sugars'] * 7:
GoodDict['sugars'] = json['Sugars, total including NLEA'] - (nutrient_dict['sugars'] * 7)
else:
BadDict['sugars'] = (nutrient_dict['sugars'] * 7) - json['Sugars, total including NLEA']
if json['Fatty acids, total saturated'] >= nutrient_dict['saturated_fats'] * 7:
GoodDict['saturated_fats'] = json['Fatty acids, total saturated'] - (nutrient_dict['saturated_fats'] * 7)
else:
BadDict['saturated_fats'] = (nutrient_dict['saturated_fats'] * 7) - json['Fatty acids, total saturated']
if json['Total lipid (fat)'] >= nutrient_dict['total_lipids'] * 7:
GoodDict['total_lipids'] = json['Total lipid (fat)'] - (nutrient_dict['total_lipids'] * 7)
else:
BadDict['total_lipids'] = (nutrient_dict['total_lipids'] * 7) - json['Total lipid (fat)']
if json['Fatty acids, total trans'] >= nutrient_dict['trans_fats'] * 7:
GoodDict['trans_fats'] = json['Fatty acids, total trans'] - (nutrient_dict['trans_fats'] * 7)
else:
BadDict['trans_fats'] = (nutrient_dict['trans_fats'] * 7) - json['Fatty acids, total trans']
if json['Cholesterol'] >= nutrient_dict['cholesterol'] * 7:
GoodDict['cholesterol'] = json['Cholesterol'] - (nutrient_dict['cholesterol'] * 7)
else:
BadDict['cholesterol'] = (nutrient_dict['cholesterol'] * 7) - json['Cholesterol']
if json['Sodium, Na'] >= nutrient_dict['sodium'] * 7:
GoodDict['sodium'] = json['Sodium, Na'] - (nutrient_dict['sodium'] * 7)
else:
BadDict['sodium'] = (nutrient_dict['sodium'] * 7) - json['Sodium, Na']
return {GoodDict, BadDict}
app = Flask(__name__)
CORS(app)
@app.route('/')
def main():
return render_template("main.html")
@app.route('/img/', methods=['GET', 'POST'])
def index():
content = request.json
img = content['image_base64']
result = convert(img)
return result
@app.route('/imgfull/', methods=['GET', 'POST'])
def imgfull():
content = request.json
img = content['image_base64']
result = convert(img)
result['pizza'] = 0.9896452903747559
print(result)
return bubbles_backend(result)
@app.route('/recommendation/', methods=['GET', 'POST'])
def recommendation():
content = request.json
recommendation(content.height, content.weight, content.activity, content.sex, content.age, content.recommendation_dict)
if __name__ == '__main__':
app.run(debug=True)
|
from sqlobject import *
from sqlobject.sqlbuilder import *
from ceo import conf
from ceo import members
from ceo import terms
import time
from datetime import datetime, timedelta
CONFIG_FILE = "/etc/csc/library.cf"
cfg = {}
def configure():
"""
Load configuration
"""
cfg_fields = [ "library_connect_string" ]
temp_cfg = conf.read(CONFIG_FILE)
conf.check_string_fields(CONFIG_FILE, cfg_fields, temp_cfg)
cfg.update(temp_cfg)
sqlhub.processConnection = connectionForURI(cfg["library_connect_string"])
class Book(SQLObject):
"""
A book. This does all the stuff we could
ever want to do with a book.
"""
isbn = StringCol()
title = StringCol()
year = StringCol()
publisher = StringCol()
authors = SQLRelatedJoin("Author")
signouts = SQLMultipleJoin("Signout")
def sign_out(self, u):
"""
Call this with a username to sign out
a book.
"""
if members.registered(u, terms.current()):
s = Signout(username=u, book=self,
outdate=datetime.today(), indate=None)
def sign_in(self, u):
"""
Call this to check a book back in to
the library. Username is used to
disambiguate in case more than one
copy of this book has been signed out.
"""
s = self.signouts.filter(AND(Signout.q.indate==None, Signout.q.username==u))
if s.count() > 0:
list(s.orderBy(Signout.q.outdate).limit(1))[0].sign_in()
return True
else:
raise Exception("PEBKAC: Book not signed out!")
def __str__(self):
"""
Magic drugs to make books display
nicely.
"""
book = "%s [%s]" % (self.title, self.year)
book += "\nBy: "
for a in self.authors:
book += a.name
book += ", "
if self.authors.count() < 1:
book += "(unknown)"
book = book.strip(", ")
signouts = self.signouts.filter(Signout.q.indate==None)
if signouts.count() > 0:
book += "\nSigned Out: "
for s in signouts:
book += s.username + " (" + str(s.due_date) + "), "
book = book.strip(", ")
return book
class Author(SQLObject):
"""
An author can author many books, and a book
can have many authors. This lets us map
both ways.
"""
name = StringCol()
books = RelatedJoin("Book")
class Signout(SQLObject):
"""
An instance of a signout associates usernames,
books, signout dates, and return dates to mark
that a book has been signed out by a particular
user.
"""
username = StringCol()
book = ForeignKey("Book")
outdate = DateCol()
indate = DateCol()
def sign_in(self):
"""
Terminate the signout (return the book).
"""
self.indate = datetime.today()
def _get_due_date(self):
"""
Compute the due date of the book based on the sign-out
date.
"""
return self.outdate + timedelta(weeks=2)
if __name__ == "__main__":
print "This functionality isn't implemented yet."
|
from ED6ScenarioHelper import *
def main():
# 调试地图
CreateScenaFile(
FileName = 'T0034 ._SN',
MapName = 'map1',
Location = 'T0030.x',
MapIndex = 1,
MapDefaultBGM = "ed60010",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'00130雪拉待机', # 9
'00131雪拉移动', # 10
'00132雪拉攻击', # 11
'00133雪拉挨打', # 12
'00134雪拉倒下', # 13
'00135雪拉魔法咏唱', # 14
'00136雪拉魔法发动', # 15
'00137雪拉胜利', # 16
'00160提妲待机', # 17
'00161提妲移动', # 18
'00162提妲攻击', # 19
'00163提妲挨打', # 20
'00164提妲倒下', # 21
'00165提妲魔法咏唱', # 22
'00166提妲魔法发动', # 23
'00167提妲胜利', # 24
'00140科洛丝待机', # 25
'00141科洛丝移动', # 26
'00142科洛丝攻击', # 27
'00143科洛丝挨打', # 28
'00144科洛丝倒下', # 29
'00145科洛丝魔法咏唱', # 30
'00146科洛丝魔法发动', # 31
'00147科洛丝胜利', # 32
'00110约修亚待机', # 33
'00111约修亚移动', # 34
'00112约修亚攻击', # 35
'00113约修亚挨打', # 36
'00114约修亚倒下', # 37
'00115约修亚魔法咏唱', # 38
'00116约修亚魔法发动', # 39
'00117约修亚胜利', # 40
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 0,
Unknown_0C = 4,
Unknown_0E = 5,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 315,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 0,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT07/CH00110 ._CH', # 00
'ED6_DT07/CH00111 ._CH', # 01
'ED6_DT07/CH00112 ._CH', # 02
'ED6_DT07/CH00113 ._CH', # 03
'ED6_DT07/CH00114 ._CH', # 04
'ED6_DT07/CH00115 ._CH', # 05
'ED6_DT07/CH00116 ._CH', # 06
'ED6_DT07/CH00117 ._CH', # 07
'ED6_DT07/CH00113 ._CH', # 08
'ED6_DT07/CH00113 ._CH', # 09
'ED6_DT07/CH00113 ._CH', # 0A
'ED6_DT07/CH00113 ._CH', # 0B
'ED6_DT07/CH00120 ._CH', # 0C
'ED6_DT07/CH00121 ._CH', # 0D
'ED6_DT07/CH00122 ._CH', # 0E
'ED6_DT07/CH00123 ._CH', # 0F
'ED6_DT07/CH00124 ._CH', # 10
'ED6_DT07/CH00125 ._CH', # 11
'ED6_DT07/CH00126 ._CH', # 12
'ED6_DT07/CH00127 ._CH', # 13
'ED6_DT07/CH00123 ._CH', # 14
'ED6_DT07/CH00123 ._CH', # 15
'ED6_DT07/CH00123 ._CH', # 16
'ED6_DT07/CH00123 ._CH', # 17
'ED6_DT07/CH00160 ._CH', # 18
'ED6_DT07/CH00161 ._CH', # 19
'ED6_DT07/CH00162 ._CH', # 1A
'ED6_DT07/CH00163 ._CH', # 1B
'ED6_DT07/CH00164 ._CH', # 1C
'ED6_DT07/CH00165 ._CH', # 1D
'ED6_DT07/CH00166 ._CH', # 1E
'ED6_DT07/CH00167 ._CH', # 1F
'ED6_DT07/CH00163 ._CH', # 20
'ED6_DT07/CH00163 ._CH', # 21
'ED6_DT07/CH00163 ._CH', # 22
'ED6_DT07/CH00163 ._CH', # 23
'ED6_DT07/CH00140 ._CH', # 24
'ED6_DT07/CH00141 ._CH', # 25
'ED6_DT07/CH00142 ._CH', # 26
'ED6_DT07/CH00143 ._CH', # 27
'ED6_DT07/CH00144 ._CH', # 28
'ED6_DT07/CH00145 ._CH', # 29
'ED6_DT07/CH00146 ._CH', # 2A
'ED6_DT07/CH00147 ._CH', # 2B
'ED6_DT07/CH00143 ._CH', # 2C
'ED6_DT07/CH00143 ._CH', # 2D
'ED6_DT07/CH00143 ._CH', # 2E
'ED6_DT07/CH00143 ._CH', # 2F
)
AddCharChipPat(
'ED6_DT07/CH00110P._CP', # 00
'ED6_DT07/CH00111P._CP', # 01
'ED6_DT07/CH00112P._CP', # 02
'ED6_DT07/CH00113P._CP', # 03
'ED6_DT07/CH00114P._CP', # 04
'ED6_DT07/CH00115P._CP', # 05
'ED6_DT07/CH00116P._CP', # 06
'ED6_DT07/CH00117P._CP', # 07
'ED6_DT07/CH00113P._CP', # 08
'ED6_DT07/CH00113P._CP', # 09
'ED6_DT07/CH00113P._CP', # 0A
'ED6_DT07/CH00113P._CP', # 0B
'ED6_DT07/CH00120P._CP', # 0C
'ED6_DT07/CH00121P._CP', # 0D
'ED6_DT07/CH00122P._CP', # 0E
'ED6_DT07/CH00123P._CP', # 0F
'ED6_DT07/CH00124P._CP', # 10
'ED6_DT07/CH00125P._CP', # 11
'ED6_DT07/CH00126P._CP', # 12
'ED6_DT07/CH00127P._CP', # 13
'ED6_DT07/CH00123P._CP', # 14
'ED6_DT07/CH00123P._CP', # 15
'ED6_DT07/CH00123P._CP', # 16
'ED6_DT07/CH00123P._CP', # 17
'ED6_DT07/CH00160P._CP', # 18
'ED6_DT07/CH00161P._CP', # 19
'ED6_DT07/CH00162P._CP', # 1A
'ED6_DT07/CH00163P._CP', # 1B
'ED6_DT07/CH00164P._CP', # 1C
'ED6_DT07/CH00165P._CP', # 1D
'ED6_DT07/CH00166P._CP', # 1E
'ED6_DT07/CH00167P._CP', # 1F
'ED6_DT07/CH00163P._CP', # 20
'ED6_DT07/CH00163P._CP', # 21
'ED6_DT07/CH00163P._CP', # 22
'ED6_DT07/CH00163P._CP', # 23
'ED6_DT07/CH00140P._CP', # 24
'ED6_DT07/CH00141P._CP', # 25
'ED6_DT07/CH00142P._CP', # 26
'ED6_DT07/CH00143P._CP', # 27
'ED6_DT07/CH00144P._CP', # 28
'ED6_DT07/CH00145P._CP', # 29
'ED6_DT07/CH00146P._CP', # 2A
'ED6_DT07/CH00147P._CP', # 2B
'ED6_DT07/CH00143P._CP', # 2C
'ED6_DT07/CH00143P._CP', # 2D
'ED6_DT07/CH00143P._CP', # 2E
'ED6_DT07/CH00143P._CP', # 2F
)
DeclNpc(
X = 8000,
Z = 0,
Y = 4000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 12,
ChipIndex = 0xC,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 8000,
Z = 0,
Y = 8000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 13,
ChipIndex = 0xD,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 3,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 8000,
Z = 0,
Y = 12000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 14,
ChipIndex = 0xE,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 11,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 8000,
Z = 0,
Y = 16000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 15,
ChipIndex = 0xF,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 4,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 8000,
Z = 0,
Y = 20000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 16,
ChipIndex = 0x10,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 5,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 8000,
Z = 0,
Y = 24000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 17,
ChipIndex = 0x11,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 12,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 8000,
Z = 0,
Y = 28000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 18,
ChipIndex = 0x12,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 13,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 8000,
Z = 0,
Y = 32000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 19,
ChipIndex = 0x13,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 14,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 12000,
Z = 0,
Y = 4000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 24,
ChipIndex = 0x18,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 12000,
Z = 0,
Y = 8000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 25,
ChipIndex = 0x19,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 3,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 12000,
Z = 0,
Y = 12000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 26,
ChipIndex = 0x1A,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 15,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 12000,
Z = 0,
Y = 16000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 27,
ChipIndex = 0x1B,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 4,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 12000,
Z = 0,
Y = 20000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 28,
ChipIndex = 0x1C,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 5,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 12000,
Z = 0,
Y = 24000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 29,
ChipIndex = 0x1D,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 16,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 12000,
Z = 0,
Y = 28000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 30,
ChipIndex = 0x1E,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 17,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 12000,
Z = 0,
Y = 32000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 31,
ChipIndex = 0x1F,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 18,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 16000,
Z = 0,
Y = 4000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 36,
ChipIndex = 0x24,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 16000,
Z = 0,
Y = 8000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 37,
ChipIndex = 0x25,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 3,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 16000,
Z = 0,
Y = 12000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 38,
ChipIndex = 0x26,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 19,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 16000,
Z = 0,
Y = 16000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 39,
ChipIndex = 0x27,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 4,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 16000,
Z = 0,
Y = 20000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 40,
ChipIndex = 0x28,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 5,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 16000,
Z = 0,
Y = 24000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 41,
ChipIndex = 0x29,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 20,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 16000,
Z = 0,
Y = 28000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 42,
ChipIndex = 0x2A,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 21,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 16000,
Z = 0,
Y = 32000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 43,
ChipIndex = 0x2B,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 22,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 24000,
Z = 0,
Y = 4000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 24000,
Z = 0,
Y = 8000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x1,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 3,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 24000,
Z = 0,
Y = 12000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 2,
ChipIndex = 0x2,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 27,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 24000,
Z = 0,
Y = 16000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 3,
ChipIndex = 0x3,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 4,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 24000,
Z = 0,
Y = 20000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 4,
ChipIndex = 0x4,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 5,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 24000,
Z = 0,
Y = 24000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 5,
ChipIndex = 0x5,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 28,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 24000,
Z = 0,
Y = 28000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 6,
ChipIndex = 0x6,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 29,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = 24000,
Z = 0,
Y = 32000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 7,
ChipIndex = 0x7,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 30,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
ScpFunction(
"Function_0_62A", # 00, 0
"Function_1_62B", # 01, 1
"Function_2_62C", # 02, 2
"Function_3_642", # 03, 3
"Function_4_658", # 04, 4
"Function_5_673", # 05, 5
"Function_6_68E", # 06, 6
"Function_7_6A9", # 07, 7
"Function_8_6C4", # 08, 8
"Function_9_6DA", # 09, 9
"Function_10_711", # 0A, 10
"Function_11_72C", # 0B, 11
"Function_12_747", # 0C, 12
"Function_13_75D", # 0D, 13
"Function_14_794", # 0E, 14
"Function_15_7AF", # 0F, 15
"Function_16_7CA", # 10, 16
"Function_17_7E0", # 11, 17
"Function_18_817", # 12, 18
"Function_19_832", # 13, 19
"Function_20_84D", # 14, 20
"Function_21_863", # 15, 21
"Function_22_89A", # 16, 22
"Function_23_8B5", # 17, 23
"Function_24_8D0", # 18, 24
"Function_25_8E6", # 19, 25
"Function_26_91D", # 1A, 26
"Function_27_938", # 1B, 27
"Function_28_953", # 1C, 28
"Function_29_969", # 1D, 29
"Function_30_9A0", # 1E, 30
"Function_31_9BB", # 1F, 31
"Function_32_9D6", # 20, 32
)
def Function_0_62A(): pass
label("Function_0_62A")
Return()
# Function_0_62A end
def Function_1_62B(): pass
label("Function_1_62B")
Return()
# Function_1_62B end
def Function_2_62C(): pass
label("Function_2_62C")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_641")
OP_99(0xFE, 0x0, 0x7, 0x708)
Jump("Function_2_62C")
label("loc_641")
Return()
# Function_2_62C end
def Function_3_642(): pass
label("Function_3_642")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_657")
OP_99(0xFE, 0x0, 0x7, 0x7D0)
Jump("Function_3_642")
label("loc_657")
Return()
# Function_3_642 end
def Function_4_658(): pass
label("Function_4_658")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_672")
OP_99(0xFE, 0x0, 0x0, 0x5DC)
Sleep(500)
Jump("Function_4_658")
label("loc_672")
Return()
# Function_4_658 end
def Function_5_673(): pass
label("Function_5_673")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_68D")
OP_99(0xFE, 0x0, 0x3, 0x7D0)
Sleep(500)
Jump("Function_5_673")
label("loc_68D")
Return()
# Function_5_673 end
def Function_6_68E(): pass
label("Function_6_68E")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_6A8")
OP_99(0xFE, 0x0, 0x3, 0x7D0)
Sleep(500)
Jump("Function_6_68E")
label("loc_6A8")
Return()
# Function_6_68E end
def Function_7_6A9(): pass
label("Function_7_6A9")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_6C3")
OP_99(0xFE, 0x0, 0x7, 0x7D0)
Sleep(500)
Jump("Function_7_6A9")
label("loc_6C3")
Return()
# Function_7_6A9 end
def Function_8_6C4(): pass
label("Function_8_6C4")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_6D9")
OP_99(0xFE, 0x0, 0x3, 0x4B0)
Jump("Function_8_6C4")
label("loc_6D9")
Return()
# Function_8_6C4 end
def Function_9_6DA(): pass
label("Function_9_6DA")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_710")
SetChrChipByIndex(0xFE, 5)
OP_99(0xFE, 0x0, 0x3, 0x4B0)
OP_99(0xFE, 0x0, 0x3, 0x4B0)
SetChrChipByIndex(0xFE, 6)
OP_99(0xFE, 0x0, 0x1, 0x4B0)
Sleep(1000)
Jump("Function_9_6DA")
label("loc_710")
Return()
# Function_9_6DA end
def Function_10_711(): pass
label("Function_10_711")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_72B")
OP_99(0xFE, 0x0, 0xE, 0x7D0)
Sleep(500)
Jump("Function_10_711")
label("loc_72B")
Return()
# Function_10_711 end
def Function_11_72C(): pass
label("Function_11_72C")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_746")
OP_99(0xFE, 0x0, 0x9, 0x7D0)
Sleep(500)
Jump("Function_11_72C")
label("loc_746")
Return()
# Function_11_72C end
def Function_12_747(): pass
label("Function_12_747")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_75C")
OP_99(0xFE, 0x0, 0x3, 0x4B0)
Jump("Function_12_747")
label("loc_75C")
Return()
# Function_12_747 end
def Function_13_75D(): pass
label("Function_13_75D")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_793")
SetChrChipByIndex(0xFE, 17)
OP_99(0xFE, 0x0, 0x3, 0x4B0)
OP_99(0xFE, 0x0, 0x3, 0x4B0)
SetChrChipByIndex(0xFE, 18)
OP_99(0xFE, 0x0, 0x1, 0x4B0)
Sleep(1000)
Jump("Function_13_75D")
label("loc_793")
Return()
# Function_13_75D end
def Function_14_794(): pass
label("Function_14_794")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_7AE")
OP_99(0xFE, 0x0, 0xD, 0x7D0)
Sleep(500)
Jump("Function_14_794")
label("loc_7AE")
Return()
# Function_14_794 end
def Function_15_7AF(): pass
label("Function_15_7AF")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_7C9")
OP_99(0xFE, 0x0, 0xD, 0x7D0)
Sleep(500)
Jump("Function_15_7AF")
label("loc_7C9")
Return()
# Function_15_7AF end
def Function_16_7CA(): pass
label("Function_16_7CA")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_7DF")
OP_99(0xFE, 0x0, 0x3, 0x4B0)
Jump("Function_16_7CA")
label("loc_7DF")
Return()
# Function_16_7CA end
def Function_17_7E0(): pass
label("Function_17_7E0")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_816")
SetChrChipByIndex(0xFE, 29)
OP_99(0xFE, 0x0, 0x3, 0x4B0)
OP_99(0xFE, 0x0, 0x3, 0x4B0)
SetChrChipByIndex(0xFE, 30)
OP_99(0xFE, 0x0, 0x1, 0x4B0)
Sleep(1000)
Jump("Function_17_7E0")
label("loc_816")
Return()
# Function_17_7E0 end
def Function_18_817(): pass
label("Function_18_817")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_831")
OP_99(0xFE, 0x0, 0xE, 0x3E8)
Sleep(1000)
Jump("Function_18_817")
label("loc_831")
Return()
# Function_18_817 end
def Function_19_832(): pass
label("Function_19_832")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_84C")
OP_99(0xFE, 0x0, 0x7, 0x7D0)
Sleep(500)
Jump("Function_19_832")
label("loc_84C")
Return()
# Function_19_832 end
def Function_20_84D(): pass
label("Function_20_84D")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_862")
OP_99(0xFE, 0x0, 0x3, 0x4B0)
Jump("Function_20_84D")
label("loc_862")
Return()
# Function_20_84D end
def Function_21_863(): pass
label("Function_21_863")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_899")
SetChrChipByIndex(0xFE, 41)
OP_99(0xFE, 0x0, 0x3, 0x4B0)
OP_99(0xFE, 0x0, 0x3, 0x4B0)
SetChrChipByIndex(0xFE, 42)
OP_99(0xFE, 0x0, 0x1, 0x4B0)
Sleep(1000)
Jump("Function_21_863")
label("loc_899")
Return()
# Function_21_863 end
def Function_22_89A(): pass
label("Function_22_89A")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_8B4")
OP_99(0xFE, 0x0, 0x14, 0x7D0)
Sleep(500)
Jump("Function_22_89A")
label("loc_8B4")
Return()
# Function_22_89A end
def Function_23_8B5(): pass
label("Function_23_8B5")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_8CF")
OP_99(0xFE, 0x0, 0xC, 0x7D0)
Sleep(500)
Jump("Function_23_8B5")
label("loc_8CF")
Return()
# Function_23_8B5 end
def Function_24_8D0(): pass
label("Function_24_8D0")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_8E5")
OP_99(0xFE, 0x0, 0x3, 0x4B0)
Jump("Function_24_8D0")
label("loc_8E5")
Return()
# Function_24_8D0 end
def Function_25_8E6(): pass
label("Function_25_8E6")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_91C")
SetChrChipByIndex(0xFE, 53)
OP_99(0xFE, 0x0, 0x3, 0x4B0)
OP_99(0xFE, 0x0, 0x3, 0x4B0)
SetChrChipByIndex(0xFE, 54)
OP_99(0xFE, 0x0, 0x1, 0x4B0)
Sleep(1000)
Jump("Function_25_8E6")
label("loc_91C")
Return()
# Function_25_8E6 end
def Function_26_91D(): pass
label("Function_26_91D")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_937")
OP_99(0xFE, 0x0, 0x13, 0x7D0)
Sleep(500)
Jump("Function_26_91D")
label("loc_937")
Return()
# Function_26_91D end
def Function_27_938(): pass
label("Function_27_938")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_952")
OP_99(0xFE, 0x0, 0xC, 0x7D0)
Sleep(500)
Jump("Function_27_938")
label("loc_952")
Return()
# Function_27_938 end
def Function_28_953(): pass
label("Function_28_953")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_968")
OP_99(0xFE, 0x0, 0x3, 0x4B0)
Jump("Function_28_953")
label("loc_968")
Return()
# Function_28_953 end
def Function_29_969(): pass
label("Function_29_969")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_99F")
SetChrChipByIndex(0xFE, 5)
OP_99(0xFE, 0x0, 0x3, 0x4B0)
OP_99(0xFE, 0x0, 0x3, 0x4B0)
SetChrChipByIndex(0xFE, 6)
OP_99(0xFE, 0x0, 0x1, 0x4B0)
Sleep(1000)
Jump("Function_29_969")
label("loc_99F")
Return()
# Function_29_969 end
def Function_30_9A0(): pass
label("Function_30_9A0")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_9BA")
OP_99(0xFE, 0x0, 0x21, 0x7D0)
Sleep(500)
Jump("Function_30_9A0")
label("loc_9BA")
Return()
# Function_30_9A0 end
def Function_31_9BB(): pass
label("Function_31_9BB")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_9D5")
OP_99(0xFE, 0x0, 0x7, 0x7D0)
Sleep(500)
Jump("Function_31_9BB")
label("loc_9D5")
Return()
# Function_31_9BB end
def Function_32_9D6(): pass
label("Function_32_9D6")
TalkBegin(0xFE)
ChrTalk(
0xFE,
"你好。\x02",
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_32_9D6 end
SaveToFile()
Try(main)
|
def quicksort(lista):
quicksort_aux(lista,0,len(lista)-1)
def quicksort_aux(lista, inicio, fin):
if inicio < fin:
pivote = particion(lista, inicio, fin)
quicksort_aux(lista, inicio, pivote-1)
quicksort_aux(lista, pivote+1, fin)
def particion(lista, inicio, fin):
pivote = lista[inicio]
print("Valor del pivote {}".format(pivote))
izquierda = incio+1
derecha = fin
print("Indice izquierdo {}".format(izquierda))
print("Indice derecha {}".format(derecha))
bandera = False
while not bandera:
while izquierda <= derecha and lista[izquierda] <= pivote:
izquierda = izquierda + 1
while lista[derecha] >= pivote and derecha >= izquierda:
derecha = derecha -1
if derecha < izquierda:
bandera = True
else:
temp=lista[izquierda]
lista[izquierda]=lista[derecha]
lista[derecha]=temp
print(lista)
temp=lista[inicio]
lista[inicio]=lista[derecha]
lista[derecha]=temp
return derecha
lista = [21, 10, 0, 11, 9, 24, 20, 14, 1]
print ("Lista desordenada {}".format (lista))
quicksort(lista)
print("Lista ordenada {}".format(lista))
|
from tkinter import *
from tkinter import messagebox
import listfile as fileloc
import module_addword as modAddword
import module_checkfile as modCheckfile
import module_find as modFind
import module_history as modHist
main = Tk()
main.bind("<Escape>", exit)
main.geometry("700x700")
main.resizable(width = False, height = False)
main.title('Program Translate')
background_image = PhotoImage(file='landscape2.png')
background_label = Label(main, image=background_image)
background_label.place(relwidth=1, relheight=1)
#--------------------------------------------------------------
def Pop_result():
val = entry.get()
result = modFind.main(val)
root = Tk()
root.title('คำแปล')
S = Scrollbar(root)
T = Text(root, height=18, width=50,font=("Courier", 16))
S.pack(side=RIGHT, fill=Y)
T.pack(side=LEFT, fill=Y)
S.config(command=T.yview)
T.config(yscrollcommand=S.set)
quote = result
T.insert(END, quote)
T.config(state=DISABLED)
mainloop()
#--------------------------------------------------------------
def Pop_Rank():
hist = modHist.main()
list_hist = hist.top()
hist_word = ""
j = 1
main = Tk()
main.geometry('500x225')
main.resizable(width = False, height = False)
main.title('Top 5 Search')
frame = Frame(main)
frame.place(relheight=1, relwidth=1)
for i in list_hist:
if j == 6:
break
hist_word += "%d. %s %s ครั้ง\n" % (j, i[0], i[1])
j += 1
msg1 = Message(frame,font=("Perpetua", 20),text = hist_word,width = 300)#พื้นหลัง
msg1.place(relx=0.25,rely=0, relwidth=1, relheight=1, anchor='n')#พื้นหลัง
#--------------------------------------------------------------
def ask_user():
if messagebox.askretrycancel('Check','Not found, Please try again',) == True:
entry.delete(0, END)
else:
pass
#---------------------------------------------------------
def to_History():
hist = modHist.main()
list_hist = hist.printhist()
hist_word = ""
j = 1
main = Tk()
main.geometry('700x700')
main.resizable(width = False, height = False)
main.title('History')
frame = Frame(main)
frame.place(relheight=1, relwidth=1)
label = Label(main,text ='History',font = ("Perpetua", 20), fg = 'blue')
label.place(relx=0.5, rely=0.0000000000000000000000000001, relwidth=0.75, relheight=0.1,anchor='n')
for i in list_hist:
if j == 11:
break
hist_word += "%d. %s %s\n" % (j, i[0], i[1])
j += 1
msg = Message(frame,font=("Perpetua", 12),text =hist_word,width = 1000)#พื้นหลัง
msg.place(relx=0.5,rely=0.01, relwidth=10, relheight=1, anchor='n')#พื้นหลัง
button = Button(frame, text="Clear History", command = (clear_hist),bg = 'gray' ,fg = 'white')
button.place(relx=0.5, rely=0.87, relwidth=0.25, relheight=0.1,anchor='n')
#---------------------------------------------------------
def clear_hist():
messagebox.showinfo("Clear", "Done!")
hist = modHist.main()
hist.clear()
#---------------------------------------------------------
def about():
main = Tk()
main.geometry('550x300')
main.resizable(width = False, height = False)
main.title('เอกสารอ้างอิง & โปรแกรม')
label = Label(main,text ='เอกสารที่ใช้ : พจนานุกรมคอมพิวเตอร์และอินเตอร์เน็ต \n (ผู้เขียน ทีมบรรณาธิการหนังสือคอมพิวเตอร์) \n ------------------------------------------------- \n โปรแกรมที่ใช้ : Python (version 3.7),กล้องถ่ายรูป,Google Sheets \n Adobe Photoshop CC,Adobe Photoshop C6,Sublime Text \n Microsoft Powerpoint,Microsoft Word,Github' ,font = ("Perpetua", 15), fg = 'blue')
label.place(relx=0.5, relwidth=10, relheight=1, anchor='n')
#---------------------------------------------------------
def develop():
main = Tk()
main.geometry('650x450')
main.resizable(width = False, height = False)
main.title('Group : She sell seashell by the seashore')
frame = Frame(main)
frame.place(relheight=1, relwidth=1)
label = Label(main,text ='ผู้จัดทำ',font = ("Perpetua", 30), fg = 'blue')
label.place(relx=0.5, rely=0.09, relwidth=0.75, relheight=0.1,anchor='n')
msg = Message(frame,font=("Perpetua", 20),text = 'น.ส.วชิรดา ท้าวนอก 600510575\nน.ส.ชนกานต์ เอกอนันต์กุล 610510648\nน.ส.นนทรพร จันทร์มณีวงศ์ 610510654\nน.ส.จุฬาลักษณ์ สุจันทร์ 610510801\nนายธเนศ สิงห์ลอ 610510803\nนายวสันต์ แพทย์รัตน์ 610510809',width = 500)#พื้นหลัง
msg.place(relx=0.5,rely=0.01, relwidth=1, relheight=1, anchor='n')#พื้นหลัง
#---------------------------------------------------------Menu Bar
menubar = Menu(main)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="Top 5 Search", command=Pop_Rank)
filemenu.add_command(label="History", command=to_History)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=main.quit)
menubar.add_cascade(label="File", menu=filemenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label="Developer", command=develop)
helpmenu.add_command(label="About...", command=about)
menubar.add_cascade(label="Help", menu=helpmenu)
main.config(menu=menubar)
#--------------------------------------------------------- เฟรม Program Translate
top_frame = Frame(main, bg='white', bd=10)
top_frame.place(relx=0.28, rely=0.05, relwidth=0.45, relheight=0.125)
label = Label(top_frame,text ='Program Translate',font = ("Perpetua", 20), bg='#BFE9FF')
label.place(relwidth=1, relheight=1)
#---------------------------------------------------------เฟรม กรอกข้อความ
frame = Frame(main, bg='#80c1ff', bd=5)
frame.place(relx=0.5, rely=0.2, relwidth=0.75, relheight=0.1,anchor='n')
entry = Entry(frame, font=40,justify='center')
entry.place(relwidth=1, relheight=1)
#---------------------------------------------------------เฟรม ปุ่ม
frame = Frame(main, bg='#80c1ff', bd=5)
frame.place(relx=0.5, rely=0.32, relwidth=0.25, relheight=0.1,anchor='n')
#---------------------------------------------------------
def click_translate():
msg1 = Message(lower_frame,font=("Perpetua", 20),bd = 3)#พื้นหลัง
msg1.place(relx=0.5, rely=0, relwidth=1, relheight=1, anchor='n')#พื้นหลัง
msg2 = Message(lower_frame,text = 'คำอธิบาย',font=("Cordia New", 18),bd = 3,width = 100000) #คำแปล
msg2.place(relx=0.4)
val = entry.get()
result = modFind.main(val)
if result == None:
ask_user()
elif len(result) <= 200:
msg = Message(lower_frame,text =result,font=("Cordia New", 16),width=420) #ยาว 30 คำ 6 บรรทัด
msg.place(relx=0.05, rely=0.1341)
elif len(result) >= 201:
Pop_result()
#------------------------------------------------------------------ Enter to trans
def enter(obj):
msg1 = Message(lower_frame,font=("Perpetua", 20),bd = 3)#พื้นหลัง
msg1.place(relx=0.5, rely=0, relwidth=1, relheight=1, anchor='n')#พื้นหลัง
msg2 = Message(lower_frame,text = 'คำอธิบาย',font=("Cordia New", 18),bd = 3,width = 100000) #คำแปล
msg2.place(relx=0.4)#คำแปล
val = entry.get()
result = modFind.main(val)
if result == None:
ask_user()
elif len(result) <= 200:
msg = Message(lower_frame,text =result,font=("Cordia New", 16),width=420) #ยาว 30 คำ 6 บรรทัด
msg.place(relx=0.05, rely=0.1341)
elif len(result) >= 201:
Pop_result()
button = Button(frame, text="Translate", font=40, command = click_translate)
button.place(relx=0.5, rely=0.009, relwidth=1, relheight=1,anchor='n')
main.bind('<Return>', enter)
#------------------------------------------------------------------
lower_frame = Frame(main, bg='#80c1ff', bd=10)
lower_frame.place(relx=0.5, rely=0.45, relwidth=0.75, relheight=0.5, anchor='n')
msg1 = Message(lower_frame,font=("Cordia New", 20),bd = 3)#พื้นหลัง
msg1.place(relx=0.5, rely=0, relwidth=1, relheight=1, anchor='n')#พื้นหลัง
msg2 = Message(lower_frame,text = 'คำอธิบาย',font=("Cordia New", 18),bd = 3,width = 100000) #คำแปล
msg2.place(relx=0.4)#คำแปล
main.mainloop()
|
__author__ = 'gjbelang'
import pickle
from athlete_list import AthleteList
def get_coach_data(filename):
try:
with open(filename) as f:
data= f.readline()
templ = data.strip().split(',')
return AthleteList(templ.pop(0), templ.pop(0), templ)
except IOError as ioerr:
print('File error: ' + str(ioerr))
return None
def put_to_store(files_list):
all_athletes = {}
for each_file in files_list:
ath = (get_coach_data(each_file))
all_athletes[ath.name] = ath
try:
with open('athletedata.pickle', 'wb') as athf:
pickle.dump(all_athletes, athf)
except IOError as ioerr:
print('File error (put_and_store):' + str(ioerr))
return all_athletes
def get_from_store():
all_athletes = {}
try:
with open('athletedata.pickle', 'rb') as athf:
all_athletes = pickle.load(athf)
except IOError as ioerr:
print('File error (get_from_store):' + str(ioerr))
return all_athletes
def get_names_from_store():
athletes = get_from_store()
response = [athletes[each_ath].name for each_ath in athletes]
return response
"""
the_files = ['sarah2.txt', 'james2.txt', 'mikey2.txt', 'julie2.txt']
data = put_to_store(the_files)
print(data)
for each_atlete in data:
print(data[each_atlete].name + ' ' + data[each_atlete].dob)
print("---------------------------")
data_copy = get_from_store()
for each_athlete in data_copy:
print(data_copy[each_athlete].name + ' ' + data_copy[each_athlete].dob)"""
|
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from gen import views
urlpatterns = [
path('gen/', views.generate)
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
from django.contrib import admin
from simple_history.admin import SimpleHistoryAdmin
from .models import Practice
from contacts.models import Contact
from core.actions.export_to_csv import export_to_csv
class ContactInline(admin.StackedInline):
model = Contact
extra = 1
class PracticeAdmin(SimpleHistoryAdmin):
change_list_filter_template = "admin/filter_listing.html"
list_select_related = True
fieldsets = (
('Author', {'fields': ('author',)}),
('Address', {'classes': ('grp-collapse grp-open',),
'fields': ('practice', 'practice_address_line1',
'practice_address_line2',
'city', 'county', 'post_code')}),
('Practice Type', {'fields': ('category',)}),
('Web site', {'fields': ('web_site',)}),
('Note', {'fields': ('practice_note',)})
)
list_display = ["practice",
"category",
"city",
"county",
"tob_sent",
"tob_received",
"practice_note", ]
list_editable = ('practice_note',)
list_per_page = 15
list_filter = ("practice",
"category__category",
"city",
"county",
"post_code",)
search_fields = ["^practice",
"^city",
"^county__county",
"^post_code", ]
actions = [export_to_csv]
inlines = [
ContactInline,
]
admin.site.register(Practice, PracticeAdmin)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.optim as optim
from utils.helpers import Experience
class Agent(object):
def __init__(self, args, env_prototype, circuit_prototype):
# logging
self.mode = args.mode # NOTE: when mode==2 we visualize accessor states
self.logger = args.logger
# prototypes for env & model & memory
self.env_prototype = env_prototype # NOTE: instantiated in inherited Agents
self.env_params = args.env_params
self.circuit_prototype = (
circuit_prototype
) # NOTE: instantiated in inherited Agents
self.circuit_params = args.circuit_params
# TODO: let's decide what to save later
# params
self.model_name = (
args.model_name
) # NOTE: will save the current model to model_name
self.model_file = (
args.model_file
) # NOTE: will load pretrained model_file if not None
self.render = args.render
self.visualize = args.visualize
if self.visualize:
self.vis = args.vis
self.refs = args.refs
self.save_best = args.save_best
if self.save_best:
self.best_step = None # NOTE: achieves best_reward at this step
self.best_reward = (
None
) # NOTE: only save a new model if achieves higher reward
self.use_cuda = args.use_cuda
self.dtype = args.dtype
# agent_params
# criteria and optimizer
self.criteria = args.criteria
self.optim = args.optim
# hyperparameters
self.steps = args.steps
self.batch_size = args.batch_size
self.early_stop = args.early_stop
self.clip_grad = args.clip_grad
# self.clip_value = args.clip_value
self.lr = args.lr
self.optim_eps = args.optim_eps
self.optim_alpha = args.optim_alpha
self.eval_freq = args.eval_freq
self.eval_steps = args.eval_steps
self.prog_freq = args.prog_freq
self.test_nepisodes = args.test_nepisodes
def _reset_experience(self):
self.experience = Experience(
state0=None, action=None, reward=None, state1=None, terminal1=False
)
def _load_model(self, model_file):
if model_file:
self.logger.warning("Loading Model: " + self.model_file + " ...")
self.circuit.load_state_dict(torch.load(model_file))
self.logger.warning("Loaded Model: " + self.model_file + " ...")
else:
self.logger.warning("No Pretrained Model. Will Train From Scratch.")
def _save_model(self, step, curr_reward=0.0):
self.logger.warning(
"Saving Model @ Step: " + str(step) + ": " + self.model_name + " ..."
)
if self.save_best:
if self.best_step is None:
self.best_step = step
self.best_reward = curr_reward
if curr_reward >= self.best_reward:
self.best_step = step
self.best_reward = curr_reward
torch.save(self.circuit.state_dict(), self.model_name)
self.logger.warning(
"Saved Model @ Step: "
+ str(step)
+ ": "
+ self.model_name
+ ". {Best Step: "
+ str(self.best_step)
+ " | Best Reward: "
+ str(self.best_reward)
+ "}"
)
else:
torch.save(self.circuit.state_dict(), self.model_name)
self.logger.warning(
"Saved Model @ Step: " + str(step) + ": " + self.model_name + "."
)
def _forward(self, observation):
raise NotImplementedError("not implemented in base calss")
def _backward(self, reward, terminal):
raise NotImplementedError("not implemented in base calss")
def fit_model(self): # training
raise NotImplementedError("not implemented in base calss")
def _eval_model(self): # evaluation during training
raise NotImplementedError("not implemented in base calss")
def test_model(self): # testing pre-trained models
raise NotImplementedError("not implemented in base calss")
|
# Python Do-While loop to catch upper and lower boundaries levels
while True:
N = int(input("Type the number of competitors: "))
if 2 <= N <= 10000:
break
# create an list
lst = []
# append the values into the list by iterating with the range of the numbers of competitors
for i in range(N):
numbers = int(input())
lst.append(numbers)
# catch upperbound limit values
if sum(lst) >= 100000:
break
num = max(lst) # def num as the biggest number in the list
# def a function that check if all the values are the same
def all_same(items):
return len(set(items)) < 2
# use the set function and the length to see if there are equal values
for i in range(N):
# check if the biggest number in the list is the first one
if num == lst[0]:
print("S")
break
# call the all_same function and check if there are an equal
elif all_same(lst) == True:
print("S")
break
else:
print("N")
break
|
<<<<<<< HEAD
# -*- coding: utf-8 -*-
from django.shortcuts import render
from django.views.generic.base import View
from pure_pagination import Paginator, PageNotAnInteger
from django.shortcuts import redirect
from itertools import chain
import docker
import socket
import urllib3
import time
import random
from .models import Experiment, Docker
from ctf.models import Docker as ctfDocker
class ExpView(View):
"""
漏洞体验列表功能
"""
def get(self, request):
CATEGORY_CHOICES = {
"sql": u"SQL注入漏洞",
"xss": u"跨站脚本漏洞",
"weak_password": u"弱口令漏洞",
"http": u"HTTP报头追踪漏洞",
"struct2": u"Struct2远程命令执行漏洞",
"fishing": u"框架钓鱼漏洞",
"file_upload": u"文件上传漏洞",
"script": u"应用程序测试脚本泄露",
"ip": u"私有IP地址泄露漏洞",
"login": u"未加密登录请求",
"message": u"敏感信息泄露漏洞",
"comprehensive": u"综合"
}
CATEGORY_CHOICES2 = {
u"SQL注入漏洞": "sql",
u"跨站脚本漏洞": "xss",
u"弱口令漏洞": "weak_password",
u"HTTP报头追踪漏洞": "http",
u"Struct2远程命令执行漏洞": "struct2",
u"框架钓鱼漏洞": "fishing",
u"文件上传漏洞": "file_upload",
u"应用程序测试脚本泄露": "script",
u"私有IP地址泄露漏洞": "ip",
u"未加密登录请求": "login",
u"敏感信息泄露漏洞": "message",
u"综合": "comprehensive"
}
DEGREE = {
"cj": u"初级",
"zj": u"中级",
"gj": u"高级",
}
all_exps = Experiment.objects.all().order_by("-click_nums")
tags = []
for exp in all_exps:
tags.append(CATEGORY_CHOICES[exp.category])
tags = list(set(tags))
# 用户选择的分类信息
category = request.GET.get('category', "")
if not category:
pass
else:
try:
category = CATEGORY_CHOICES2[category]
except:
category = ''
if category:
all_exps = Experiment.objects.filter(category=category).order_by("-click_nums")
try:
category = CATEGORY_CHOICES[category]
except:
category = ''
# 页面标签列表
for exp in all_exps:
exp.degree = DEGREE[exp.degree]
exp.category = CATEGORY_CHOICES[exp.category]
# 分页
try:
exp_page = request.GET.get('exp_page', 1)
except PageNotAnInteger:
exp_page = 1
# 每页显示4条记录
p1 = Paginator(all_exps, 4, request=request)
hot_exps = p1.page(exp_page)
return render(request, 'exp_list.html', {
"category": category,
"tags": tags,
"hot_exps": hot_exps,
})
class ExpDetailView(View):
"""
漏洞docker页面
"""
def get(self, request, exp_id):
if not request.user.is_authenticated():
return render(request, "login.html", {"logintimes": 0})
else:
exp = Experiment.objects.get(id=int(exp_id))
exp.students += 1
exp.save()
# 调用docker
exist = Docker.objects.filter(image=exp.images, user=request.user.username)
# 得到本机IP
try:
my_ip = get_ip_address()
except:
my_ip = "127.0.0.1"
if not exist:
client = docker.from_env()
# 得到用户在此之前实例化的docker
existed_exp = Docker.objects.filter(user=request.user.username)
existed_ctf = ctfDocker.objects.filter(user=request.user.username)
# 得到已经被占用的端口
exp_ports = list(Docker.objects.values_list('port', flat=True))
ctf_ports = list(ctfDocker.objects.values_list('port', flat=True))
# 删除之前为用户初始化的镜像
for doc in chain(existed_exp, existed_ctf):
id = doc.con_id
try:
container = client.containers.get(id)
container.kill()
container.remove(force=True)
except:
pass
finally:
doc.delete()
# 得到一个未被占用的端口
ports = [i for i in range(1024, 65535) if i not in (exp_ports + ctf_ports)]
port = ports[random.randint(0, len(ports) - 1)]
while port_is_used(my_ip, port):
ports.pop(ports.index(port))
port = ports[random.randint(0, len(ports) - 1)]
con = client.containers.run(exp.images, detach=True, ports={str(exp.port) + '/tcp': str(port)})
container = Docker(user=request.user.username, image=exp.images, port=port, con_id=con.id)
container.save()
url = "http://%s:" % (my_ip) + str(Docker.objects.get(user=request.user, image=exp.images).port)
# 监听docker可以访问
http = urllib3.PoolManager()
try:
content = http.request('GET', url)
except:
content = []
while not content:
try:
content = http.request('GET', url)
except:
content = []
time.sleep(0.5)
return redirect(url)
def get_ip_address():
"""
获取本机IP地址
参考:https://www.chenyudong.com/archives/python-get-local-ip-graceful.html
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
return ip
def port_is_used(ip, port):
"""
判断此IP的此端口是否被占用(此端口是否打开)
被占用返回True, 没有被占用返回False
参考:http://www.jb51.net/article/79000.html
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, int(port)))
s.shutdown(2)
return True
except:
return False
=======
from django.shortcuts import render
# Create your views here.
>>>>>>> github/master
|
import sys
input = sys.stdin.readline
from collections import defaultdict
def main():
N, M = map( int, input().split())
X = list( map( int, input().split()))
d = defaultdict( int)
Same = [0]*M
Mod = [0]*M
for x in X:
Mod[x%M] += 1
if d[x] > 0:
d[x] = 0
Same[x%M] += 2
else:
d[x] += 1
ans = 0
ans += Mod[0]//2
Mod[0] = Mod[0]%2
if M%2 == 0:
ans += Mod[M//2]//2
Mod[M//2] = Mod[M//2]%2
for i in range(1,(M+1)//2):
m = min(Mod[i], Mod[-i])
ans += m
Mod[i] = Mod[i] - m
Mod[-i] = Mod[-i] - m
for i in range(M):
ans += min(Mod[i], Same[i])//2
print(ans)
if __name__ == '__main__':
main()
|
from flask import Flask
from flask_ask import Ask, statement, question, session
from bs4 import BeautifulSoup
import json
import requests
import time
import unidecode
# import urllib2
# import ssl
# This restores the same behavior as before.
# context = ssl._create_unverified_context()
# urllib.urlopen("https://no-valid-cert", context=context)
app = Flask(__name__)
ask = Ask(app, "/")
def get_headerlines(part):
sess = requests.Session()
time.sleep(1)
url = 'https://news.northwestern.edu/' + part
html = sess.get(url)
headlines = "";
soup = BeautifulSoup(html.content.decode('utf-8'), 'html.parser')
for article in soup.find_all('article'):
for h3 in article.find_all('h3'):
headlines += '...' + h3.find('a').string;
return headlines;
@app.route('/')
def homepage():
return "hi there, how ya doin?"
@ask.launch
def start_skill():
welcome_message = 'Hello there, here is Northwestern News headlines. What kind of news do you like to hear?...'
# welcomd_message += 'We have University, Health, Science and Tech, Arts and humanities, nation and world, people and perspectives'
return question(welcome_message)
@ask.intent("YesIntent")
def share_headlines1():
return process_headline('university')
@ask.intent('UniversityIntent')
def university_intent():
return process_headline('university')
@ask.intent('HealthIntent')
def health_intent():
return process_headline('health')
@ask.intent('TechIntent')
def tech_intent():
return process_headline('science-and-tech')
@ask.intent('ArtsIntent')
def arts_intent():
return process_headline('arts-and-humanities')
@ask.intent('WorldIntent')
def world_intent():
return process_headline('nation-and-world')
@ask.intent('PeopleIntent')
def people_intent():
return process_headline('people-and-perspectives')
@ask.intent("NoIntent")
def no_intent():
bye_text = 'I am not sure why you asked me to run then, buy okay ... bye'
return statement(bye_text)
def process_headline(part):
headlines = get_headerlines(part)
headlines_msg = 'The current ' + part + ' news headlines in the Northwestern are' + headlines
return statement(headlines_msg)
@ask.intent('HelpIntent')
def help_intent():
help_text = 'This app is very easy to use. All you need to do is to tell Alexa why kind of news you want to hear, '\
'such as University, Health, Science and Tech, Arts and humanities, nation and world, people and perspectives'
return question(help_text)
@ask.intent('StopIntent')
def stop_headline():
return statement('Okay ... see you later')
@ask.session_ended
def session_ended():
return "{}", 200
if __name__ == '__main__':
app.run(debug=True)
|
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
switch_pin = 23
GPIO.setup(switch_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
while True:
if GPIO.input(switch_pin) == False:
print("Button Pressed")
time.sleep(0.2)
|
import tensorflow as tf
from tensorflow.contrib import slim
from graph_lm.models.networks.utils.dag_utils import message_passing
from ...stats import get_bias
def vae_decoder_dag_supervised(latent, dag, dag_bw, vocab_size, params, tags, tag_size, weights_regularizer=None, is_training=True):
# latent (N, L, D)
with tf.variable_scope('decoder'):
tag_embeddings = tf.get_variable(
dtype=tf.float32,
name="tag_embeddings",
shape=[tag_size, params.decoder_dim],
initializer=tf.initializers.truncated_normal(
stddev=1. / tf.sqrt(tf.constant(params.encoder_dim, dtype=tf.float32))))
h_tags = tf.nn.embedding_lookup(params=tag_embeddings, ids=tags)
h = tf.concat([latent, h_tags], axis=-1)
with tf.variable_scope("forward"):
h = message_passing(
latent=h,
dag_bw=dag_bw,
params=params
)
with tf.variable_scope("backward"):
h = message_passing(
latent=h,
dag_bw=dag,
params=params
)
with tf.variable_scope('output_mlp'):
h = slim.fully_connected(
inputs=h,
activation_fn=tf.nn.leaky_relu,
num_outputs=params.decoder_dim,
scope='output_1'
)
logits = slim.fully_connected(
inputs=h,
num_outputs=vocab_size,
activation_fn=None,
scope='output_2',
weights_regularizer=weights_regularizer,
biases_initializer=tf.initializers.constant(
value=get_bias(smoothing=params.bias_smoothing),
verify_shape=True)
) # (N,L,V)
return logits
|
from django.contrib import admin
from .models import Post
# Register your models here.
class PostAdmin(admin.ModelAdmin):
list_display=('id','title','created','author')
admin.site.register(Post,PostAdmin)
|
from .S2Identified import S2Identified
from .terms import Prov
from .terms import SBOL2
from rdflib import URIRef
from rdflib.namespace import RDF
class S2ProvAssociation(S2Identified):
def __init__(self, g, uri):
super(S2ProvAssociation, self).__init__(g, uri)
@property
def agent(self):
return self.get_identified_property(Prov.agent)
@agent.setter
def agent(self, agent):
self.set_identified_property(Prov.agent, agent)
@property
def plan(self):
return self.get_identified_property(Prov.hadPlan)
@plan.setter
def plan(self, plan):
self.set_identified_property(Prov.hadPlan, plan)
@property
def role(self):
return self.get_uri_property(Prov.hadRole)
@role.setter
def role(self, role):
self.set_uri_property(Prov.hadRole, role)
|
"""discreteplot
Make plots from data for discrete distributions.
Usage:
plot.py [-v] [-q] infect <filename1> <filename2>
plot.py [-v] [-q] detect <filename1>
plot.py [-v] [-q] quarantine <filename1>
plot.py [-v] [-q] locations <filename1> [--cutoff=CUTOFF]
plot.py [-v] [-q] disease <filename1> <filename2> [--runcnt=RUNCNT]
runc:
-h --help Print this screen.
-v Verbose
-q Quiet
-t Testing flag
"""
import logging
import sys
import docopt
import shutil
import os
import itertools
import math
import re
import tempfile
import numpy as np
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
import h5py
import yaml
from docopt import docopt
import lifelines
from PyPDF2 import PdfFileWriter, PdfFileReader
import pyfarms.util as util
import pyfarms.dataformat as dataformat
import pyfarms.naadsm as naadsm
import pyfarms.farms as farms
logger=logging.getLogger("pyfarms.plot")
def AddPdfKeys(pdf_file, keydict):
"""
Add a dictionary of key-value pairs to a pdf file.
Adds metadata to PDF files so you know what generated them.
"""
multifiles=list()
if isinstance(pdf_file, list):
multifiles=pdf_file
else:
multifiles=[pdf_file]
for of in multifiles:
outstream=tempfile.NamedTemporaryFile(mode="wb", delete=False)
logger.debug("AddPdfKeys created {0}".format(outstream.name))
CopyPdfKeys(of, outstream, keydict)
outstream.close()
shutil.copyfile(outstream.name, of)
os.remove(outstream.name)
def SaveFig(pdfname, md):
pdfname=normalize_name(pdfname)
logger.info("Writing {0}".format(pdfname))
plt.savefig(pdfname)
AddPdfKeys(pdfname, md)
def CopyPdfKeys(pdf_file, outstream, keydict):
pdfDict={u"/{0}".format(k) : v for (k, v) in keydict.items()}
infile=PdfFileReader(pdf_file)
outfile=PdfFileWriter()
outfile.addMetadata(pdfDict)
for page in range(infile.getNumPages()):
outfile.addPage(infile.getPage(page))
outfile.write(outstream)
def normalize_name(title):
return re.sub(r"-\\\"\(\)\{\}", "", re.sub(r"\s", "_", title))
def plot_unit_survival(kmf, ax, fired, fired_max, when_max, name):
if len(fired)<fired_max:
P=[1]*len(fired) + [0]*(fired_max-len(fired))
ftoplot=fired+([when_max]*(fired_max-len(fired)))
else:
P=[1]*len(fired)
ftoplot=fired
kmf.fit(ftoplot, P, label=name)
if ax:
ay=kmf.plot(ax=ax)
else:
ay=kmf.plot()
return ay
def compare_unit_survival(infect0, infect1, unit,
traj_cnt0, traj_cnt1, when_max, md):
kmf=lifelines.KaplanMeierFitter()
ax=plot_unit_survival(kmf, None, infect0, traj_cnt0, when_max, "Continuous")
plot_unit_survival(kmf, ax, infect1, traj_cnt1, when_max, "NAADSM")
SaveFig("unit_survival{0}.pdf".format(unit), md)
plt.clf()
plt.close()
def plot_infect(filenames, md):
"""
The goal is to make a comparison plot between two simulations,
one plot per unit. Each plot shows the distribution of the number
of days it takes for that unit to become infected. It's
a survival plot, so it starts at one. If it doesn't go to zero,
that means that, for some runs, that unit doesn't get infected.
"""
infection_times0, traj_cnt0=dataformat.infection_time(filenames[0])
# The NAADSM output is numbered from 0.
in1, traj_cnt1=dataformat.infection_time(filenames[1])
infection_times1={n+1 : v for (n,v) in in1.items()}
logger.debug(infection_times0.keys())
logger.debug(infection_times1.keys())
unitsa=set([int(x) for x in infection_times0.keys()])
unitsb=set([int(x) for x in infection_times1.keys()])
units=unitsa&unitsb
logger.info("Units in a, not b {0}".format(unitsa-unitsb))
logger.info("Units in b, not a {0}".format(unitsb-unitsa))
fired_max=-1
when_max=-1
for events in itertools.chain(infection_times0.values(),
infection_times1.values()):
fired_max=max(fired_max, len(events))
when_max=max(when_max, max(events))
# a last event much later that ends the simulation.
for unit in sorted(units):
inf0=infection_times0[unit]
inf1=infection_times1[unit]
compare_unit_survival(inf0, inf1, unit,
traj_cnt0, traj_cnt1, when_max, md)
# Now make a bubble plot.
just_sizes0=[len(x) for x in infection_times0.values()]
largest=[max(just_sizes0)]
smallest=[min(just_sizes0)]
just_sizes1=[len(x) for x in infection_times1.values()]
largest.append(max(just_sizes1))
smallest.append(min(just_sizes1))
print("Trajectory counts for 0 {0} 1 {1}".format(traj_cnt0, traj_cnt1))
print("unit, times0 times1 percent0 percent1")
for sunit in sorted(units):
print("{0} {1} {2} {3} {4}".format(sunit, len(infection_times0[sunit]),
len(infection_times1[sunit]), len(infection_times0[sunit])/traj_cnt0,
len(infection_times1[sunit])/traj_cnt1))
landscape=locations_from_filename(filenames[0])
x=list()
y=list()
color=list()
colors=["b", "r"]
size=list()
events=list()
for farm_name in units:
events.append((farm_name-1,
len(infection_times0[farm_name])/traj_cnt0, 0))
events.append((farm_name-1,
len(infection_times1[farm_name])/traj_cnt1, 1))
# sort by inverse size, so print large ones first.
events.sort(key=lambda x: -x[1])
logger.debug("sorted events {0}".format(events))
for (idx, big, who) in events:
loc=landscape.farm_locations[idx]
x.append(loc[1])
y.append(loc[0])
color.append(colors[who])
size.append(3.14*(10*big)**2)
fig=plt.figure(1)
ax=fig.add_subplot(111)
ax.set_title("Infection Frequency")
logger.debug("sizes {0}".format(size))
logger.debug("colors {0}".format(color))
plt.scatter(np.array(x), np.array(y), s=np.array(size),
marker='o', c=np.array(color))
SaveFig("scatter_compare.pdf", md)
def plot_detect(filename, name, event_id, md):
"""
What is the distribution of times that infection is first detected.
"""
detection_times, none_detected=dataformat.first_of_event(filename, event_id)
logger.info("Detected {0} times out of {1}".format(len(detection_times),
len(detection_times)+none_detected))
if len(detection_times) is 0:
logger.info("The event {0} did not happen.".format(event_id))
sys.exit(0)
kmf=lifelines.KaplanMeierFitter()
last=max(detection_times)+1
detection=np.hstack([np.array(detection_times),
last*np.ones((none_detected,), dtype=np.double)])
P=[1]*len(detection_times)+[0]*none_detected
kmf.fit(detection, P, label=name)
ax=kmf.plot()
ax.set_title(name)
ax.set_xlabel("Days")
ax.set_ylabel("Survival")
SaveFig("{0}_survival.pdf".format(name), md)
plt.clf()
plt.close()
def locations_from_filename(filename):
f=h5py.File(filename, "r")["/trajectory"]
logger.debug(f.keys())
run=f[list(f.keys())[0]]
herd=run.attrs["herd"]
scenario=run.attrs["scenario"]
sxml, hxml, ns, initial, monitor=naadsm.load_naadsm_scenario(scenario, herd)
landscape=farms.Landscape()
landscape.from_naadsm_file(hxml, ns)
return landscape
def plot_locations(filename, color_cutoff, md):
landscape=locations_from_filename(filename)
for idx, farm in enumerate(landscape.premises):
if int(farm.name) == 21:
print("farm 21 is idx {0}".format(idx))
print(landscape.distances[20])
total=0.0
for dx in landscape.distances[20]:
if dx>1e-6:
total+=0.05**dx
print("Total hazard for exponential is {0}".format(total))
total=0.0
for dx in landscape.distances[20]:
if dx>1e-6 and dx<20:
total+=0.05*(20-dx)/19
print("Total hazard for linear is {0}".format(total))
infects=dataformat.causal_infection(filename, 0)
fig=plt.figure(1)
ax=fig.add_subplot(111)
ax.set_title("Cause of Infection")
locations=landscape.farm_locations
names=[f.name for f in landscape.premises]
prods=[f.production_type for f in landscape.premises]
prod_colors={"backyard" : "r", "broilers" : "b",
"layers" : "g", "turkeys" : "c"}
logger.info("Unit sizes {0}".format([f.size for f in landscape.premises]))
for idx in range(len(names)):
x0, y0=locations[idx]
plt.plot((y0,), (x0,), prod_colors[prods[idx]]+"o")
idxof={int(n) : i for (i, n) in enumerate(names)}
totals=list()
for (i, j) in itertools.combinations(range(1, len(locations)+1), 2):
if i < j:
total=0
if (i, j) in infects:
total+=infects[(i,j)]
if (j, i) in infects:
total+=infects[(j,i)]
totals.append((i, j, total))
maximum=max([x[2] for x in totals])
totals.sort(key=lambda x: x[2])
for (i, j, t) in totals:
x0, y0=locations[idxof[i]]
x1, y1=locations[idxof[j]]
dx=x0-x1
dy=y0-y1
if math.sqrt(dx*dx+dy*dy)<color_cutoff:
plt.plot([y0, y1], [x0, x1],
color=plt.cm.gray((1-t/maximum)**3),
alpha=t/maximum, linewidth=3)
else:
if t>0:
plt.plot([y0, y1], [x0, x1],
color=plt.cm.autumn(1-t/maximum), alpha=t/maximum,
linewidth=3)
ax.legend([Circle((0,0), radius=3, color="r"),
Circle((0,0), radius=3, color="b"),
Circle((0,0), radius=3, color="g"),
Circle((0,0), radius=3, color="c")],
["backyard", "broilers", "layers", "turkeys"],
loc="upper left")
SaveFig("connectivity.pdf", md)
plt.clf()
fig=plt.figure(1)
ax=fig.add_subplot(111)
ax.set_title("Farm Locations by Name")
idxof={int(n) : i for (i, n) in enumerate(names)}
for idx in range(len(names)):
x0, y0=locations[idx]
plt.text(y0, x0, str(names[idx]))
plt.plot((y0,), (x0,), prod_colors[prods[idx]]+"o")
SaveFig("locations.pdf", md)
def disease_comparison(times0, times1, name, md):
logger.debug("times0 len {0} times1 len {1}".format(
len(times0), len(times1)))
plt.clf()
fig=plt.figure(1, figsize=(4,3))
ax=fig.add_subplot(111)
kmf=lifelines.KaplanMeierFitter()
logger.info("Truncating times at 50.")
for tidx in range(len(times0)):
if times0[tidx]>50:
times0[tidx]=50
P0=[1]*len(times0)
kmf.fit(times0, P0, label="Continuous")
ax=kmf.plot(ax=ax)
ax.set_title(name)
P1=[1]*len(times1)
kmf.fit(times1, P1, label="NAADSM")
kmf.plot(ax=ax)
plt.tight_layout()
SaveFig("disease_comparison{0}.pdf".format(name), md)
def plot_disease(filenames, runcnt, md):
ds0=dataformat.disease_states(filenames[0], 21, runcnt)
ds1=dataformat.disease_states(filenames[1], 21, runcnt)
disease_comparison(ds0["latclin"], ds1["latclin"], "Latent to Clinical", md)
disease_comparison(ds0["clinrec"], ds1["clinrec"], "Clinical to Recovered", md)
if __name__ == "__main__":
arguments = docopt(__doc__, version="pyfarms.plot 1.0")
if arguments["-v"]:
logging.basicConfig(level=logging.DEBUG)
elif arguments["-q"]:
logging.basicConfig(level=logging.ERROR)
else:
logging.basicConfig(level=logging.INFO)
md={
"script" : __file__, "arguments" : str(arguments),
"date" : util.when()
}
if arguments["infect"]:
plot_infect([arguments["<filename1>"], arguments["<filename2>"]], md)
if arguments["detect"]:
plot_detect(arguments["<filename1>"], "First Detection", 10, md)
if arguments["quarantine"]:
plot_detect(arguments["<filename1>"], "First Quarantine", 11, md)
if arguments["locations"]:
if arguments["--cutoff"]:
color_cutoff=float(arguments["--cutoff"])
else:
color_cutoff=float("inf")
plot_locations(arguments["<filename1>"], color_cutoff, md)
if arguments["disease"]:
if arguments["--runcnt"]:
runcnt=int(arguments["--runcnt"])
logger.info("Using runcnt={0}".format(runcnt))
else:
runcnt=float("inf")
plot_disease([arguments["<filename1>"], arguments["<filename2>"]],
runcnt, md)
|
import sys
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
# Login credentials
login = {
'userName': '<inputUserName>',
'password': '<inputPassword>'
}
# Form information
credentials = {
'firstName': '<inputFirstName>',
'lastName': '<inputLastName>',
'city': '<inputCity>',
'email': '<inputEmail>',
'phone': '<inputPhone>',
'age': '<inputAge>'
}
def initialize_browser(website):
browser = webdriver.Chrome()
print("---- Initializing Browser ----")
# Browser will load website specified in parameter
browser.get(website)
return browser
def get_login(browser):
print("---- Clicking on login link ----")
# Click on login link
browser.execute_script("document.getElementsByClassName('ex-content-click log-in-link capture_modal_open')[0].click();")
print("---- Filling in login credentials ----")
# Wait until login page loads or after 5 seconds and then fill in username and password
usernameInput = WebDriverWait(browser, 5).until(EC.presence_of_element_located((By.ID,'capture_signIn_traditionalSignIn_emailAddress')))
usernameInput.send_keys(login['userName'])
passwordInput = browser.find_element_by_id('capture_signIn_traditionalSignIn_password')
passwordInput.send_keys(login['password'])
print("---- Submitting ----")
# Find the login button and submit
submitInput = browser.find_element_by_id('capture_signIn_traditionalSignIn_signInButton')
submitInput.submit()
def fill_submit_form(browser):
print("---- Finding iFrame form ----")
# Wait until the iFrame loads and switch selection into it
wait = WebDriverWait(browser, 10).until(EC.frame_to_be_available_and_switch_to_it((By.CSS_SELECTOR, 'iframe[data-ss-embed="iframe"][scrolling="no"]')))
print("---- Finding Input in iFrame form ----")
# Wait until the class name loads inside iFrame and select for it by class name
wait = WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.CLASS_NAME, 'ssRegistrationField')))
frameElems = browser.find_elements_by_class_name('ssRegistrationField')
labels = browser.find_elements_by_css_selector('span.ssLabelText')
# Fill in form according to the credentials dictionary
for i, elem in enumerate(frameElems):
print(f"---- Filling in credentials: {labels[i].text} with {list(credentials.values())[i]} ----")
elem.send_keys(list(credentials.values())[i])
print("---- Submitting ----")
# Find submit button and click it
browser.execute_script("document.getElementsByClassName('ssButtonContinue')[0].click();")
def closeBrowser(browser):
print("---- Closing browser in 5 seconds ----")
time.sleep(5)
browser.quit()
# Entry point for program
if __name__ == '__main__':
if len(sys.argv) > 1:
browser = initialize_browser(sys.argv[1])
else:
browser = initialize_browser('https://www.680news.com/2019/02/05/win-passes-to-the-2019-canadian-international-autoshow/')
get_login(browser)
fill_submit_form(browser)
closeBrowser(browser)
|
from rest_framework import permissions
class UpdateOwnProfile(permissions.BasePermission):
""" Allow users to edit their own profiles """
def has_object_permission(self, request, view, obj):
""" Check user has permissions to edit their own profiles """
#checks if the action falls under SAFE_METHODS : GET
if request.method in permissions.SAFE_METHODS:
return True
#Comparing the id of the user allowed to make changes is same as the
#Id of the validated user
#Will only allow edits for the profile the user is logged in with
return obj.id == request.user.id
|
"""Abstract base classes for GaussianFamily statistics.
"""
from __future__ import annotations
from typing import Optional, Sequence, final
from abc import abstractmethod
import sacc
import pyccl
import pyccl.nl_pt
from .....modeling_tools import ModelingTools
from .....parameters import ParamsMap
from .....updatable import Updatable
class SourceSystematic(Updatable):
"""An abstract systematic class (e.g., shear biases, photo-z shifts, etc.).
This class currently has no methods at all, because the argument types for
the `apply` method of different subclasses are different."""
def read(self, sacc_data: sacc.Sacc):
"""This method is called to allow the systematic object to read from the
appropriated sacc data."""
class Source(Updatable):
"""An abstract source class (e.g., a sample of lenses).
Parameters
----------
systematics : list of str, optional
A list of the source-level systematics to apply to the source. The
default of `None` implies no systematics.
"""
systematics: Sequence[SourceSystematic]
cosmo_hash: Optional[int]
tracers: Sequence[Tracer]
@final
def read(self, sacc_data: sacc.Sacc):
"""Read the data for this source from the SACC file."""
if hasattr(self, "systematics"):
for systematic in self.systematics:
systematic.read(sacc_data)
self._read(sacc_data)
@abstractmethod
def _read(self, sacc_data: sacc.Sacc):
"""Abstract method to read the data for this source from the SACC file."""
def _update_source(self, params: ParamsMap):
"""Method to update the source from the given ParamsMap. Any subclass
that needs to do more than update its contained :python:`Updatable`
objects should implement this method."""
@abstractmethod
def _reset_source(self):
"""Abstract method to reset the source."""
@final
def _update(self, params: ParamsMap):
"""Implementation of Updatable interface method `_update`.
This clears the current hash and tracer, and calls the abstract method
`_update_source`, which must be implemented in all subclasses."""
self.cosmo_hash = None
self.tracers = []
self._update_source(params)
@final
def _reset(self) -> None:
"""Implementation of the Updatable interface method `_reset`.
This calls the abstract method `_reset_source`, which must be implemented by
all subclasses."""
self._reset_source()
@abstractmethod
def get_scale(self) -> float:
"""Abstract method to return the scales for this `Source`."""
@abstractmethod
def create_tracers(self, tools: ModelingTools):
"""Abstract method to create tracers for this `Source`, for the given
cosmology."""
@final
def get_tracers(self, tools: ModelingTools) -> Sequence[Tracer]:
"""Return the tracer for the given cosmology.
This method caches its result, so if called a second time with the same
cosmology, no calculation needs to be done."""
ccl_cosmo = tools.get_ccl_cosmology()
cur_hash = hash(ccl_cosmo)
if hasattr(self, "cosmo_hash") and self.cosmo_hash == cur_hash:
return self.tracers
self.tracers, _ = self.create_tracers(tools)
self.cosmo_hash = cur_hash
return self.tracers
class Tracer:
"""Bundles together a pyccl.Tracer object with optional information about the
underlying 3D field, a pyccl.nl_pt.PTTracer, and halo profiles."""
@staticmethod
def determine_field_name(field: Optional[str], tracer: Optional[str]) -> str:
"""This function encapsulates the policy for determining the value to be
assigned to the :python:`field` member variable of a :python:`Tracer`.
It is a static method only to keep it grouped with the class for which it is
defining the initialization policy.
"""
if field is not None:
return field
if tracer is not None:
return tracer
return "delta_matter"
def __init__(
self,
tracer: pyccl.Tracer,
tracer_name: Optional[str] = None,
field: Optional[str] = None,
pt_tracer: Optional[pyccl.nl_pt.PTTracer] = None,
halo_profile: Optional[pyccl.halos.HaloProfile] = None,
halo_2pt: Optional[pyccl.halos.Profile2pt] = None,
):
"""Initialize a new Tracer based on the given pyccl.Tracer which must not be
None.
Note that the :python:`pyccl.Tracer` is not copied; we store a reference to the
original tracer. Be careful not to accidentally share :python:`pyccl.Tracer`s.
If no tracer_name is supplied, then the tracer_name is set to the name of the
:python:`pyccl.Tracer` class that was used.
If no field is given, then field is set to either (1) the tracer_name, if one
was given,, or (2) 'delta_matter'.
"""
assert tracer is not None
self.ccl_tracer = tracer
self.tracer_name: str = tracer_name or tracer.__class__.__name__
self.field = Tracer.determine_field_name(field, tracer_name)
self.pt_tracer = pt_tracer
self.halo_profile = halo_profile
self.halo_2pt = halo_2pt
@property
def has_pt(self) -> bool:
"""Return True if we have a pt_tracer, and False if not."""
return self.pt_tracer is not None
@property
def has_hm(self) -> bool:
"""Return True if we have a halo_profile, and False if not."""
return self.halo_profile is not None
|
#!/usr/bin/env python
# coding: utf-8
# In[11]:
import math
import re
import sys
import matplotlib.pyplot as plt
import os
import numpy as np
# In[12]:
train_data_directory = "train/"
# ### Word Count
# In[13]:
ham_dictionary = {} # stores words in ham files and their frequencies
spam_dictionary = {} # stores words in spam files and their frequencies
vocabulary = set() # stores unique words present in all files (spam and ham)
def word_count_directory(train_data_directory):
# list of file paths for files in train_data_directory
file_list = [os.path.join(train_data_directory,f) for f in os.listdir(train_data_directory)]
# intialize no of spam and ham files
no_of_spam_files = 0
no_of_ham_files = 0
for file_path in file_list:
with open(file_path,encoding='latin-1') as infile:
# to store type of file 'spam' or 'ham'
file_type = ''
if 'spam' in file_path:
file_type = 'spam'
no_of_spam_files += 1
elif 'ham' in file_path:
file_type = 'ham'
no_of_ham_files += 1
# Loop through each line of the file
for line in infile:
line = line.strip() # Remove the leading spaces and newline character
lower_line = str.lower(line) # Convert characters in line to lowercase to avoid case mismatch
valid_words = re.split('[^a-zA-Z]',lower_line) # filter words following the given regex
valid_words = list(filter(None, valid_words)) # filter words with length greater than 0
# Iterate over each word in line
for word in valid_words:
if file_type == 'ham':
# Check if the word is already in dictionary
if word in ham_dictionary:
ham_dictionary[word] += 1
else:
ham_dictionary[word] = 1 # add word to dictionary with count 1
vocabulary.add(word) # add word to vocabulary set
# if this word is not present in spam_dictionary, add it with count 0
if word not in spam_dictionary:
spam_dictionary[word] = 0
elif file_type == 'spam':
# Check if the word is already in dictionary
if word in spam_dictionary:
spam_dictionary[word] += 1
else:
spam_dictionary[word] = 1 # add word to dictionary with count 1
vocabulary.add(word) # add word to vocabulary set
# if this word is not present in ham_dictionary, add it with count 0
if word not in ham_dictionary:
ham_dictionary[word] = 0
return no_of_spam_files,no_of_ham_files
# ### Prior Probabilities
# In[14]:
no_of_spam_files, no_of_ham_files = word_count_directory(train_data_directory)
total_no_of_files = no_of_spam_files + no_of_ham_files
prior_prob_of_spam = no_of_spam_files / total_no_of_files
prior_prob_of_ham = no_of_ham_files / total_no_of_files
# ### Train model
# In[15]:
def create_model(vocabulary,ham_dictionary,spam_dictionary):
vocabulary = sorted(vocabulary) # sorting the vocabulary to maintain order in model.txt
f = open("model.txt","w+") # creating file that would store the model
N = len(vocabulary) # getting size of vocabulary
delta = 0.5 # smoothing value
smoothed_N = (delta * N)
# calculating smoothed denominator for calculating condinational probability of ham words
ham_denominator = sum(ham_dictionary.values()) + smoothed_N
# calculating smoothed denominator for calculating condinational probability of spam words
spam_denominator = sum(spam_dictionary.values()) + smoothed_N
for i,word in enumerate(vocabulary):
freq_in_ham = ham_dictionary[word] # frequency of word in ham dictionary
c_p_in_ham = (freq_in_ham + delta) / ham_denominator # conditional probabiltiy of word in ham
freq_in_spam = spam_dictionary[word] # frequency of word in spam dictionary
c_p_in_spam = (freq_in_spam + delta) / spam_denominator # conditional probabiltiy of word in spam
ham_dictionary[word] = c_p_in_ham
spam_dictionary[word] = c_p_in_spam
# writing all data to model.txt
f.write(str(i+1)+' '+word+' '+str(freq_in_ham)+' '+str( "{:.8f}".format(float( c_p_in_ham )) )+' '+str(freq_in_spam)+' '+str( "{:.8f}".format(float( c_p_in_spam )) )+'\n')
f.close() # closing the file
create_model(vocabulary,ham_dictionary,spam_dictionary)
# ### Evaluate the model
# In[16]:
test_data_directory = "test/"
# applying log10 on prior probabilities
log_of_ham = math.log10(prior_prob_of_ham)
log_of_spam = math.log10(prior_prob_of_spam)
# initialising variables needed for confusion matrix
true_positive = 0 # correct Ham -> result Ham
true_negative = 0 # correct Spam -> result Spam
false_positive = 0 # correct Spam -> result Ham
false_negative = 0 # correct Ham -> result Spam
file_list = [os.path.join(test_data_directory,f) for f in os.listdir(test_data_directory)] # file paths of test files
temp_counter = 0 # counter to store the test file count
f = open("result.txt", "w+") # 'w+' for reading and writing
f.truncate(0)
for file_path in file_list:
with open(file_path,encoding = 'latin-1') as infile:
file_name = file_path.rsplit('/',1)[1] # file name to store in result.txt
temp_counter = temp_counter + 1
score_log_ham = log_of_ham # score for ham
score_log_spam = log_of_spam # score for spam
if("test-ham" in file_path):
correct_classification = "ham"
else:
correct_classification = "spam"
vocab_test = [] # stores words in test file
for line in infile:
line = line.strip() # Remove the leading spaces and newline character
lower_line = str.lower(line) # Convert characters in line to lowercase to avoid case mismatch
valid_words = re.split('[^a-zA-Z]',lower_line) # filter words following the given regex
valid_words = list(filter(None, valid_words)) # filter words with length greater than 0
vocab_test = vocab_test + valid_words # appending valid_words to vocab_test
for word in vocab_test:
if word in vocabulary:
# add log10 of conditional probability of word in ham_dictionary
score_log_ham = score_log_ham + math.log10(ham_dictionary[word])
# add log10 of conditional probability of word in ham_dictionary
score_log_spam = score_log_spam + math.log10(spam_dictionary[word])
if(score_log_ham > score_log_spam):
predicted_classification = "ham"
else:
predicted_classification = "spam"
if(correct_classification == predicted_classification):
label = "right"
else:
label = "wrong"
if(correct_classification == "ham" and predicted_classification == "ham"):
true_positive = true_positive + 1
elif(correct_classification == "spam" and predicted_classification == "spam"):
true_negative = true_negative + 1
elif(correct_classification == "spam" and predicted_classification == "ham"):
false_positive = false_positive + 1
elif(correct_classification == "ham" and predicted_classification == "spam"):
false_negative = false_negative + 1
# format scores to appropriate string value
score_log_ham = str( "{:.8f}".format(float(score_log_ham)))
score_log_spam = str( "{:.8f}".format(float(score_log_spam)))
# writing results to result.txt
f.write(str(str(temp_counter)+" "+str(file_name)+" "+str(predicted_classification)+" "+str(score_log_ham)+" "+str(score_log_spam)+" "+str(correct_classification)+" "+str(label)+"\n"))
f.close() # closing file
# In[24]:
spam_true_positive = true_negative
spam_true_negative = true_positive
spam_false_positive = false_negative
spam_false_negative = false_positive
# ### Confusion Matrix
# In[17]:
def confusion_matrix(values , class_name , title='Confusion matrix'):
label_array=np.array([[ "(True Positive)", "(False Positive)"],
[ "(False Negative)", "(True Negative)"]])
plt.figure(figsize=(4, 4))
plt.imshow(values)
plt.title(title)
plt.colorbar()
tick = np.arange(len(class_name))
plt.xticks(tick, class_name)
plt.yticks(tick, class_name)
for i in range (values.shape[0]):
for j in range (values.shape[1]):
plt.text(j, i, "{:,}".format(values[i, j]),
color="red",horizontalalignment="center",verticalalignment="bottom")
plt.text(j, i,label_array[i,j],
color="red",horizontalalignment="center",verticalalignment="top")
plt.tight_layout()
plt.ylabel('Predicted ')
plt.xlabel('Actual ')
plt.show()
# In[25]:
confusion_matrix(values= np.array([[ true_positive, false_positive ],
[ false_negative, true_negative]]), class_name = ['Ham', 'Spam'],title="Confusion matrix for Ham Class")
confusion_matrix(values= np.array([[ spam_true_positive , spam_false_positive ],
[ spam_false_negative, spam_true_negative]]), class_name = ['Spam', 'Ham'],title="Confusion matrix for Spam class")
# ### Evaluation Metrics
# In[26]:
accuracy_ham = true_positive/(true_positive+false_negative)*100
accuracy_spam = spam_true_positive/(spam_true_positive+spam_false_negative)*100
accuracy_model=(true_positive+true_negative)/(true_positive+true_negative+false_positive+false_negative)*100
precision_ham = true_positive/(true_positive+false_positive)
precision_spam = spam_true_positive/(spam_true_positive+spam_false_positive)
recall_ham = true_positive/ (true_positive + false_negative)
recall_spam = spam_true_positive/(spam_true_positive+spam_false_negative)
F1measure_ham = 2*(recall_ham * precision_ham) / (recall_ham + precision_ham)
F1measure_spam = 2*(recall_spam * precision_spam) / (recall_spam + precision_spam)
# In[27]:
data = [["Ham", accuracy_ham, str("{:.3f}".format(float(precision_ham))), recall_ham, str("{:.3f}".format(float(F1measure_ham))) ],
["Spam", accuracy_spam, str("{:.3f}".format(float(precision_spam))) , recall_spam, str("{:.3f}".format(float(F1measure_spam))) ],
["Model", accuracy_model, "", "", ""]]
fig, axs =plt.subplots(2,1)
collabel=("Class","Accuracy", "Precision", "Recall","F1-measure")
axs[0].axis('tight')
axs[0].axis('off')
axs[1].axis("off")
the_table = axs[0].table(cellText=data,colLabels=collabel,loc='center')
the_table.auto_set_font_size(False)
the_table.set_fontsize(10)
the_table.scale(1, 2)
plt.show()
# In[ ]:
|
import pygame
from inputManager import InputManager
from gameObject import game_objects, GameObject, add
from map.canvas import Background
from map.matrixMap import generate_map
from point import Point
from boxes import Boxes
from createPlayer import createplayer,playerL
from createBoxes import createBoxes, bs
from createLines import createLines, lines
# 0. setup game
inputManager = InputManager()
# bg = Background()
# add(bg)
# player = Player(inputManager)
createplayer(inputManager)
point = Point()
add(point)
# add(player)
createBoxes()
createLines()
generate_map("assets/maps/map2.json")
# 1. Init pygame
pygame.init()
# 2. Set screen
SIZE = (32*25, 20*32)
canvas = pygame.display.set_mode(SIZE)
pygame.display.set_caption('Hello baibe')
# 3. Clock
clock = pygame.time.Clock()
loop = True
while loop:
# 1. Event processing
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
loop = False
else:
inputManager.update(event)
canvas.fill((0, 0, 0))
for player in playerL:
player.update()
player.render(canvas)
for obj in game_objects:
obj.render(canvas)
obj.update()
for b in bs:
b.update()
b.render(canvas)
for l in lines:
l.update()
l.render(canvas)
# 3. Flip
pygame.display.flip()
clock.tick(60)
|
from django import forms
from django.contrib.auth.models import Group
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from .models import UserProfile, City, Post
from django.contrib.auth.models import User
class RegistrationForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput, label="Password")
class Meta:
model = UserProfile
fields = ("email", "username", "password")
def clean_email(self):
email = self.cleaned_data.get("email")
qs = UserProfile.objects.filter(email=email)
if qs.exists():
raise forms.ValidationError("email is taken")
return email
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
user = super().save(commit=False)
user.set_password(self.cleaned_data["password"])
if commit:
User.save()
return User
class ProfileForm(forms.ModelForm):
password1 = forms.CharField(
label="Password", widget=forms.PasswordInput)
password2 = forms.CharField(
label="Password Confirmation", widget=forms.PasswordInput)
class Meta:
model = User
fields = ('email', 'username')
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords Don't Match!")
return password2
def save(self, commit=True):
user = super().save(commit=False)
user.set_password(user.password)
if commit:
UserProfile.save(User)
user.save(User)
return user
class UserChangeForm(forms.ModelForm):
password = ReadOnlyPasswordHashField()
class Meta:
model = UserProfile
fields = ('email', 'first_name', 'last_name', 'location',
'picture')
def clean_password(self):
return self.initial["password"]
class CityForm(forms.ModelForm):
class Meta:
model = City
fields = ['city']
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ['user', 'title', 'body', 'city']
|
participant = ["iksu", "ingyu", "inan", "iksu"]
completion = ["iksu"]
d = {}
for x in participant:
d[x] = d.get(x, 0) + 1
for x in completion:
d[x]-=1
dnf = [k for k,v in d.items() if v > 0] # keys in dict, if v > 0 append to array
print(dnf)
|
from django.shortcuts import render, redirect
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.contrib.auth.models import Group, User
from .forms import CreateUserForm
from .decorators import unauthenticated_user, allowed_users
@unauthenticated_user
def registerPage(request):
form = CreateUserForm()
if request.method == 'POST':
form = CreateUserForm(request.POST)
if form.is_valid():
user = form.save()
username = form.cleaned_data.get('username')
group = Group.objects.get(name='member')
user.groups.add(group)
messages.success(request, 'An account was created for ' + username )
return redirect('login')
context = {'form' : form }
return render(request, 'app_users/register.html', context)
@unauthenticated_user
def loginPage(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
try:
user = authenticate(request, username=User.objects.get(email=username), password=password)
except:
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('home')
else:
messages.info(request, 'Username or Password is Incorrect')
return render(request, 'app_users/login.html')
def logoutUser(request):
logout(request)
return redirect('login')
|
import pandas as pd
import yfinance as yf
import csv
import requests
import numpy as np
from pathlib import Path
import sqlalchemy as sql
# Pulling S&P Data from wiki and outputing html
url = 'https://en.wikipedia.org/wiki/List_of_S%26P_500_companies'
# Read html
sp500_html = pd.read_html(url)
# Obtain first table
sp500_html = sp500_html[0]
# Create dataframe
sp500_df = pd.DataFrame(sp500_html)
# sp500_df.head()
sp500_all_sectors_df = pd.DataFrame(
columns=['GICS Sector', 'Symbol'],
data=sp500_df
)
# sp500_all_sectors_df.head()
# Delete index
sp500_df_wo_index = sp500_all_sectors_df.set_index("Symbol")
# sp500_df_wo_index
# isolate symbols in order to pass list to yfinance to get market cap info
sp500_all_symbols = sp500_all_sectors_df['Symbol'].values.tolist()
# sp500_all_symbols
# one issue with how the wikipedia symbols come is that they come with a "." instead of a "-"
# yahoo finance needs to have the "-" in order to pull the data
# this step might need to go in front of the part where we break the sectors out individually
stocks = []
for stock_ticker in sp500_all_symbols:
ticker = stock_ticker.replace(".","-")
stocks.append(ticker)
# print(stocks)
def market_cap(stocks):
market_cap = {}
for stock in stocks:
ticker = yf.Ticker(stock)
market_cap[stock] = ticker.info['marketCap']
# we want to return a sorted Pandas DataFrame based on market cap
# since the columns will originally be the ticker we us ".T" to transpose the table
# then we use .sort_values to sort by the "first column" [0] and sort in decending order
# on average this takes 2400 seconds (37 minutes) to run for entire SP500
return pd.DataFrame(market_cap, index=[0]).T.sort_values(by=[0], ascending=False)
market_cap_df = market_cap(stocks)
# market_cap_df
# rename the column and index to be merged
market_cap_df.columns = ['Market_Cap']
market_cap_df.index.names = ['Symbol']
# merge sp500_df_wo_index and market_cap_df to create 1 complete data frame to be sliced for analysis
stock_industry_marketcap = pd.merge(sp500_df_wo_index, market_cap_df, left_index=True, right_index=True)
stock_industry_marketcap.sort_values(by=['GICS Sector', 'Market_Cap'], ascending=False, inplace=True)
# save new dataframe to csv to be used in other code
stock_industry_marketcap.to_csv("stock_industry_marketcap.csv")
|
from datetime import datetime
from auth import jwt
import json
import psycopg2
def authenticate(cursor, unique_id, token, secret):
valid, data = jwt.verifyJWT(token, secret)
if valid:
#fetch token from database
query = 'SELECT token FROM tokens WHERE owner_unique=\'{}\';'.format(unique_id)
cursor.execute(query)
query_results = cursor.fetchone()
cursor.close()
#check if there is such a user
if not query_results:
return (False, 'owner_no_token')
else:
dbtoken = query_results[0]
#check if the token for the user is the one in the database
if not dbtoken == token:
return (False, 'token_different_owner')
else:
return (True, data['owner_unique'])
else:
cursor.close()
return (False, 'token_bad_signature')
def generateToken(cursor, unique_id, secret):
header = {}
header['alg'] = 'HS256'
header['typ'] = 'JWT'
data = {}
data['owner_unique'] = unique_id
#make the token
token = jwt.makeJWT(json.dumps(header), json.dumps(data), secret)
#check if there is already any token for the user
query = 'SELECT token FROM tokens WHERE owner_unique=\'{}\';'.format(unique_id)
cursor.execute(query)
result = cursor.fetchone()
#if there isnt insert it into the database otherwise replace it
if not result:
query = 'INSERT INTO tokens (owner_unique, token) VALUES (\'{}\',\'{}\');'.format(unique_id, token)
cursor.execute(query)
else:
query = 'UPDATE tokens SET token=\'{}\' WHERE owner_unique=\'{}\';'.format(token, unique_id)
cursor.execute(query)
cursor.close()
return token
|
#!/usr/bin/python3
def weight_average(my_list=[]):
if not my_list:
return 0
else:
numerator = float(sum(x[0]*x[1] for x in my_list))
denominator = float(sum(y[1] for y in my_list))
return numerator / denominator
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 15 11:05:38 2018
@author: fahad
@contributor: Lianxin Zhang
"""
import time
import xlsxwriter
import random
from collections import deque
import globalvar as gl
from ina219 import INA219
from ina219 import DeviceRangeError
def sensor():
global heading
global PWM1
global PWM2
Shunt_OHMS = 0.1 # For this sensor it is 0.1 ohm
try:
print('Starting Current Sensor')
print('Collecting Sensor Values...')
start = time.time() # Start Time
#global DataPoints
DataPoints = deque(maxlen=None) # Creating Array of datatype Deque to store values
a = 0.9664 # Regression Fitting Parameter
b = 0.0285 # Regression Fitting Parameter
ina = INA219(Shunt_OHMS) # Auto Gain
ina.configure()
print('Current Sensor Configured Successfully')
while True:
if gl.get_value('flag'):
#print('Breaking loop')
# Break when flag = True
break
#print('Bus Voltage: %.3f V' % ina.voltage())
try:
#print('Bus Current: %.3f mA' % ina.current())
#print('Power: %.3f mW' % ina.power())
currentvalue = round((a*ina.current())+b) # Rounding off values to nearest integer
voltagevalue = float('{0:.1f}'.format(ina.voltage())) # Floating point up to one decimal point
powervalue = round(currentvalue*voltagevalue)
timevalue = float('{0:.1f}'.format(time.time()-start)) # Elapsed time in Seconds with 1 decimal point floating number
headingvalue = float('{0:.2f}'.format(gl.get_value('heading')))
DataPoints.append([timevalue, currentvalue, voltagevalue, powervalue, gl.get_value('PWM1'), gl.get_value('PWM2'), headingvalue]) # Updating DataPoints Array
#print('Current: ',currentvalue,'Voltage: ',voltagevalue,'Power: ',powervalue)
except DeviceRangeError:
print('Device Range Error')
time.sleep(0.5) # Reading value after 0.5 second
except:
print('Exception Occurred, Current Sensor Stopped \n')
Wt = input('Do you want to store the sensor values Y/N? ')
if Wt == 'y':
writing(DataPoints)
else:
print('Ending without saving sensor data \n')
print('Sensor Stopped!\n')
#------------------------------------------------
#sensor()
def writing(Data):
rnd = random.randint(1,100)
runDate = time.ctime()
workbook = xlsxwriter.Workbook('SensorValues(%d).xlsx'%rnd,{'constant_memory': True}) # Creating XLSX File for Data Keeping
worksheet = workbook.add_worksheet() # Generating worksheet
bold = workbook.add_format({'bold':True}) # Formating for Bold text
worksheet.write('A1', 'Time', bold) # Writing Column Titles
worksheet.write('B1', 'Current (mA)', bold)
worksheet.write('C1', 'Voltage (v)', bold)
worksheet.write('D1', 'Power (mW)', bold)
worksheet.write('E1', 'PWM1', bold)
worksheet.write('F1', 'PWM2', bold)
worksheet.write('G1', 'Heading Angle', bold)
worksheet.write('H1', 'Start Time', bold)
worksheet.write('H2', runDate)
row = 1 # Starting Row (0 indexed)
col = 0 # Starting Column (0 indexed)
n = len(Data) # Total number of rows
print('Total number of rows: ',n)
print('Writing Data into Worksheet')
for Time, value1, value2, value3, value4, value5, value6 in (Data):
# Writing Data in XLSX file
worksheet.write(row, col, Time)
worksheet.write(row, col+1, value1)
worksheet.write(row, col+2, value2)
worksheet.write(row, col+3, value3)
worksheet.write(row, col+4, value4)
worksheet.write(row, col+5, value5)
worksheet.write(row, col+6, value6)
row += 1
chart1 = workbook.add_chart({'type': 'line'}) # adding chart of type 'Line' for Current values
chart2 = workbook.add_chart({'type': 'line'}) # Chart for Voltage
chart3 = workbook.add_chart({'type': 'line'}) # Chart for Power
chart1.add_series({'name':['Sheet1',0,1],
'categories': ['Sheet1', 1,0,n,0],
'values': ['Sheet1', 1,1,n,1]
})
chart2.add_series({'name':['Sheet1',0,2],
'categories': ['Sheet1', 1,0,n,0],
'values': ['Sheet1', 1,2,n,2]
})
chart3.add_series({'name':['Sheet1',0,3],
'categories': ['Sheet1', 1,0,n,0],
'values': ['Sheet1', 1,3,n,3]
})
chart1.set_title({'name': 'Current Chart'}) # Setting Title name
chart1.set_x_axis({'name': 'Elapsed Time (s)'}) # Setting X-Axis name
chart1.set_y_axis({'name': 'Value'}) # Setting Y-Axis name
chart2.set_title({'name': 'Voltage Chart'})
chart2.set_x_axis({'name': 'Elapsed Time (s)'})
chart2.set_y_axis({'name': 'Value'})
chart3.set_title({'name': 'Power Chart'})
chart3.set_x_axis({'name': 'Elapsed Time (s)'})
chart3.set_y_axis({'name': 'Value'})
chart1.set_style(8) # Setting Chart Color
chart2.set_style(5)
chart2.set_style(9)
worksheet.insert_chart('D2', chart1, {'x_offset': 25, 'y_offset': 10}) # Inserting Charts in the Worksheet
worksheet.insert_chart('D2', chart2, {'x_offset': 25, 'y_offset': 10}) # //
worksheet.insert_chart('D5', chart3, {'x_offset': 25, 'y_offset': 10}) # //
workbook.close() # Closing Workbook
time.sleep(1)
print('Sensor Writing successfull \n')
#-------------------------------------------------
|
def print_frames(audio_frames):
print(audio_frames)
|
from collections import OrderedDict
import json
import os
import matplotlib.pyplot
import matplotlib.pyplot
import numpy as np
import sklearn.cluster
import distance
from nltk.stem.wordnet import WordNetLemmatizer
class HelperFunctions(object):
ASC = 1
DESC = 0
KEY = 0
VALUE = 1
@staticmethod
def get_ordered_dict(original_dict, order, element):
'''
Orders a dictionary by the key elements.
:param original_dict:
:param order , must be set to ASC or DESC:
:parameter element,must be set to KEY or VALUE
:returns an order dictionary:
'''
ordered_tags = OrderedDict(sorted(original_dict.items(),
key=lambda kv: kv[element], reverse=order))
return ordered_tags
@staticmethod
def get_dict_from_json(file_name):
'''
:param file_name:
:return a dictionary:
'''
current_path = os.path.dirname(__file__)
with open(file_name, 'r') as file_reader:
dict_tag = json.load(file_reader)
return dict_tag
@staticmethod
def print_dict(dict_to_be_printed):
'''
Prints a dictionary to the console
:param dict_to_be_printed:
'''
for key, value in dict_to_be_printed.iteritems():
print key, value
@staticmethod
def group_tags_by_subtags(original_tags):
tags = original_tags.keys()
list = []
for first_tag in tags:
similar_tags = [first_tag]
for second_tag in tags:
if first_tag in second_tag and first_tag != second_tag:
similar_tags.append(second_tag)
# tags.remove(second_tag)
# print similar_tags
list.append(similar_tags)
grouped_tags = []
exceptions = ['c', 'int', 'r']
for el in list:
if len(el) > 2 and el[0] not in exceptions:
grouped_tags.append(el)
for grouped_tag in grouped_tags:
for index in xrange(1, len(grouped_tag)):
original_tags[grouped_tag[0]] += original_tags[grouped_tag[index]]
for grouped_tag in grouped_tags:
for index in xrange(1, len(grouped_tag)):
if grouped_tag[index] in original_tags.keys():
del original_tags[grouped_tag[index]]
return original_tags
@staticmethod
def plot(tags):
new_dict = {'others': 0}
i = 0
for elem, key in tags.iteritems():
if i < 19:
new_dict[elem] = key
else:
new_dict['others'] += key
i += 1
new_d = OrderedDict(sorted(new_dict.items(),
key=lambda kv: kv[1], reverse=True))
total_sum = 0
for elem, key in new_d.iteritems():
total_sum += key
# print elem, key
for elem, key in new_d.iteritems():
# print elem,key,total_sum
new_d[elem] = float(key) / total_sum * 100
for elem, key in new_d.iteritems():
print elem, key
matplotlib.pyplot.pie([float(v) for v in new_d.values()], labels=[(k) for k in new_d.keys()],
autopct=None)
matplotlib.pyplot.savefig('initial_pie_chart.jpg')
matplotlib.pyplot.show()
if __name__ == '__main__':
test = {'python': 2, 'flask': 4, 'a': 1, 'b': 3, 'c': 2}
print 'Started'
print HelperFunctions.ASC
print (HelperFunctions.KEY)
new_test = HelperFunctions.get_ordered_dict(test, HelperFunctions.ASC, HelperFunctions.KEY)
print new_test
print HelperFunctions.get_dict_from_json('DataFiles/tags.json')
|
import requests
import json
import random
VK_API = 'https://api.vk.com/method/{0}?{1}'
class VkApi:
def __init__(self, domain, method_name):
self.domain = domain
self.method_name = method_name
self.count = '10'
if domain.startswith('-') == False:
self.paramaters = 'domain={0}&count={1}'.format(
self.domain,
self.count
)
if domain.startswith('-'):
self.paramaters = 'owner_id={0}&count={1}'.format(
self.domain,
self.count
)
self.json_object = requests.get(VK_API.format(
self.method_name,
self.paramaters
)
)
self.response = self.json_object.json()['response']
def generate_video_list(self):
video_list = []
vid = 0
video_owner_id = 0
for dict_response in self.response[1:]:
attachments = dict_response['attachments'][0]
if 'video' in attachments:
video_owner_id = attachments['video']['owner_id']
vid = attachments['video']['vid']
video_list.append(vid)
return video_list, video_owner_id
def generate_photo_list(self):
photos = []
for dict_response in self.response[1:]:
attachments = dict_response['attachments'][0]
if 'photo' in attachments:
link_of_photo = attachments['photo']['src_big']
photos.append(link_of_photo)
return photos
def generate_gif_list(self):
gifs = []
for dict_response in self.response[1:]:
attachments = dict_response['attachments'][0]
if 'doc' in attachments:
link_of_gif = attachments['doc']['url']
gifs.append(link_of_gif)
return gifs
def generate_post_list(self):
posts = []
for dict_response in self.response[1:]:
posts.append(dict_response['id'])
owner_id = dict_response['from_id']
return posts, owner_id
class VkRandom:
def __init__(self, posts, video, photos, gifs):
self.posts = posts
self.video = video
self.photos = photos
self.gifs = gifs
def random_post(self):
if len(self.posts) > 0:
random_number = random.randint(0, len(self.posts) - 1)
return str(self.posts[random_number])
def last_post(self):
if len(self.posts) > 0:
return str(self.posts[1])
def random_video(self):
if len(self.video) > 0:
random_number = random.randint(0, len(self.video) - 1)
return str(self.video[random_number])
def random_photo(self):
if len(self.photos) > 0:
random_number = random.randint(0, len(self.photos) - 1)
return str(self.photos[random_number])
def random_gif(self):
if len(self.gifs) > 0:
random_number = random.randint(0, len(self.gifs) - 1)
return str(self.gifs[random_number])
class VkLinks:
def __init__(self, domain, group_id, post_id, video_owner_id, vid, last):
self.domain = domain
self.group_id = group_id
self.post_id = post_id
self.video_owner_id = video_owner_id
self.vid = vid
self.last = last
def generate_link(self):
if self.domain.startswith('-') == False and len(self.post_id) > 1:
link = 'https://vk.com/{0}?w=wall{1}_{2}'.format(
self.domain,
str(self.group_id),
str(self.post_id)
)
return str(link)
else:
return None
if self.domain.startswith('-') and len(self.post_id) > 1:
link = 'https://vk.com/club?{0}w=wall{1}_{2}'.format(
self.domain[1:],
str(self.group_id),
str(self.post_id)
)
return str(link)
else:
return None
def generate_last_link(self):
if self.domain.startswith('-') == False and len(self.post_id) > 1:
link = 'https://vk.com/{0}?w=wall{1}_{2}'.format(
self.domain,
str(self.group_id),
str(self.last)
)
return str(link)
else:
return None
if self.domain.startswith('-') and len(self.post_id) > 1:
link = 'https://vk.com/club?{0}w=wall{1}_{2}'.format(
self.domain[1:],
str(self.group_id),
str(self.last)
)
return str(link)
else:
return None
def generate_video_link(self):
if self.domain.startswith('-') == False and len(self.vid) > 1:
if self.video_owner_id > 1:
link = 'https://vk.com/{0}?z=video{1}_{2}'.format(
self.domain,
str(self.video_owner_id),
str(self.vid)
)
return str(link)
else:
return None
else:
return None
if self.domain.startswith('-') and len(self.vid) > 1:
if self.video_owner_id > 1:
link = 'https://vk.com/club{0}?z=video{1}_{2}'.format(
self.domain[1:],
str(self.video_owner_id),
str(self.vid)
)
return str(link)
else:
return None
else:
return None
class GenerateElements:
def __init__(self, post_link, video_link, photo_link, gif_link, last_link):
self.post_link = post_link
self.video_link = video_link
self.photo_link = photo_link
self.gif_link = gif_link
self.last_link = last_link
def generate_vk_post(self):
return self.post_link
def generate_vk_last_post(self):
return self.last_link
def generate_vk_video(self):
return self.video_link
def generate_vk_photo(self):
return self.photo_link
def generate_vk_gif(self):
return self.gif_link
def vk_main(domain, method_name):
vkapi = VkApi(domain, method_name)
vkrandom = VkRandom(
vkapi.generate_post_list()[0],
vkapi.generate_video_list(),
vkapi.generate_photo_list(),
vkapi.generate_gif_list()
)
links = VkLinks(
domain,
vkapi.generate_post_list()[1],
vkrandom.random_post(),
vkapi.generate_video_list()[1],
vkrandom.random_video(),
vkrandom.last_post()
)
generate_elements = GenerateElements(
links.generate_link(),
links.generate_video_link(),
vkrandom.random_photo(),
vkrandom.random_gif(),
links.generate_last_link()
)
return generate_elements
|
# (c) 2012 Urban Airship and Contributors
from django.test import TestCase
from mithril.decorators import exempt, resettable
import random
class TestOfMithrilDecorators(TestCase):
def test_exempt_attaches_appropriate_flag(self):
anything = lambda *a: a
expected = random.randint(0, 10)
anything = exempt(anything)
self.assertTrue(anything.mithril_exempt)
self.assertEqual(anything(expected), (expected,))
def test_resettable_attaches_arg_to_fn(self):
anything = lambda *a: a
expected = random.randint(0, 10)
anything = resettable(expected)(anything)
self.assertEqual(anything.mithril_reset, expected)
self.assertEqual(anything(expected), (expected,))
|
n = int(input("Enter number n: "))
m = int(input("Enter number m: "))
if n % 10 > m % 10:
print(n)
elif n % 10 < m % 10:
print(m)
else:
if n > m:
print(n)
elif n < m:
print(m)
else:
print("n is equal to m")
|
# encoding:utf-8
__author__ = 'hanzhao'
import sys
def run(msg):
print '[info] 魔方小工具模块载入中。。'
if '<br/>' in msg: #为群聊消息时候
[FromUser,msg] = msg.split('<br/>')
else: #为个人消息时候
pass
if msg in ['打开计时器','.计时器']:
print '[tool]自动回答'
return 'http://zhtimer.cn/htimer/simple'
else :
print '[tool]未找到匹配值'
return None
|
from Flask import Flask, render_template, request, session, redirect, url_for
##import utils
app = Flask(__name__)
@app.route("/")
@app.route("/home")
@app.route("/home/")
def home():
if "logged_in" in session and session["logged_in"]:
return render_template("home.html")
else:
return redirect(url_for("login"))
@app.route("/login", methods = ["GET","POST"])
@app.route("/login/", methods = ["GET","POST"])
def login():
if request.method == "GET":
if "logged_in" in session and session["logged_in"]:
return render_template(url_for("home"))
else:
return render_template("login.html")
else:
return "not get"
if __name__ == "__name__":
app.debug = True
app.run(host = "0.0.0.0", port=8000)
|
for a in range(1,10):
for b in range(1,10):
for c in range(a+1,10):
if (10*a+b)/(10*b+c) == a/c:
print(10*a + b, 10*b + c)
|
from django.shortcuts import render, render_to_response
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from .models import *
from .forms import *
from django.views.generic import TemplateView, CreateView, DetailView, UpdateView, DeleteView
# Create your views here.
#def index(request):
# return HttpResponse("You're at the login index.")
class clientes_view(TemplateView):
template_name = 'ver_clientes.html'
def get_context_data(self, **kwargs):
context = super(clientes_view, self).get_context_data(**kwargs)
return context
def DetalleCliente(request, id):
empresas = Empresa.objects.filter(Cliente=id).all()
return render_to_response('detalle_cliente.html', {'empresas':empresas}, context_instance=RequestContext(request))
def clientes_view(request):
clientes = Cliente.objects.all()
return render_to_response('ver_clientes.html', {'clientes':clientes}, context_instance=RequestContext(request))
def principalcliente_view(request):
return render(request,'principal_clientes.html', {})
def nuevocliente_view(request):
if request.method == 'POST':
formulario = clientesForm(request.POST)
formulario2 = empresaForm(request.POST)
formulario3 = sucursalForm(request.POST)
if formulario.is_valid() and formulario2.is_valid() and formulario3.is_valid():
objec = formulario.save()
IDcliente = objec.id
objempresa = formulario2.save(commit=False)
clientes = Cliente.objects.get(id=IDcliente)
objempresa.Cliente = clientes
objempresa.save()
obje = formulario2.save()
IDempresa = obje.id
objsucursal = formulario3.save(commit=False)
empresas = Empresa.objects.get(id=IDempresa)
objsucursal.Empresa = empresas
objsucursal.save()
return HttpResponseRedirect('/clientes/nuevocliente/')
else:
formulario = clientesForm()
formulario2 = empresaForm()
formulario3 = sucursalForm()
return render_to_response('nuevo_cliente.html', {'formulario': formulario, 'formulario2':formulario2, 'formulario3':formulario3}, context_instance = RequestContext(request))
|
# if temperature is greater than 30, it's a hot day other wise if it's less than 10;
# it's a cold day;otherwise,it's neither hot nor cold.
temperature=int(input("enter the number: "))
if temperature>30:
print("it's hot day")
elif temperature<10:
print("it's cold day")
else:
print("it's neither hot nor cold")
|
from OOP.PlanetSystem_VV import solarsystem, planet
import numpy as np
n = 10000
h = 0.01
Earth_mass = 6.0E24/2.0E30
Sun_mass = 1
Earth_posx = 1.0
Earth_posy = 0
Sun_posx = 0
Sun_posy = 0
Earth_velx = 0
velocitys = np.linspace(8.5, 9.5, num=100)
#Grid search over posible velocities
for Earth_vely in velocitys:
planetlist = [[0,0,0,0,1, "sun"],
[Earth_posx,Earth_posy,Earth_velx,Earth_vely, Earth_mass, "earth"]]
Model = solarsystem(h, n, planetlist)
Model.run()
print("ending radius ", np.sqrt(Model.planets[1].x**2+Model.planets[1].y**2), " for a velocity of ", Earth_vely)
#accepts the velocity as escape velocit if it gets pas 100AU
if np.sqrt(Model.planets[1].x**2+Model.planets[1].y**2) > 100:
print("Escape velocity in (AU/day): ", Earth_vely)
break
Model.displaypaths()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.