text stringlengths 8 6.05M |
|---|
from django.apps import AppConfig
class MultipleauthConfig(AppConfig):
name = 'multipleauth'
|
import contextlib
import datetime
import json
import logging
import random
import string
import pytest
from django.db import transaction
from django.utils import timezone
from oauth2_provider.models import AccessToken, Application
from share.models import NormalizedData, RawDatum
from share.models import ShareUser
from share.models import SourceUniqueIdentifier
from tests import factories
from tests.share.normalize.factories import GraphBuilder
logger = logging.getLogger(__name__)
@pytest.fixture
def client():
from django.test.client import Client
class JSONAPIClient(Client):
def _parse_json(self, response, **extra):
if 'application/vnd.api+json' not in response.get('Content-Type'):
raise ValueError(
'Content-Type header is "{0}", not "application/vnd.api+json"'
.format(response.get('Content-Type'))
)
return json.loads(response.content.decode(), **extra)
return JSONAPIClient()
@pytest.fixture(autouse=True)
def apply_test_settings(settings):
settings.CELERY_ALWAYS_EAGER = True
@pytest.fixture
def trusted_user():
user = ShareUser(username='trusted_tester', is_trusted=True)
user.save()
return user
@pytest.fixture
def robot_user(settings):
username = 'robot_tester'
user = ShareUser.objects.create_robot_user(username=username, robot='Tester')
application_user = ShareUser.objects.get(username=settings.APPLICATION_USERNAME)
application = Application.objects.get(user=application_user)
client_secret = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(64))
AccessToken.objects.create(
user=user,
application=application,
expires=(timezone.now() + datetime.timedelta(weeks=20 * 52)), # 20 yrs
scope=settings.HARVESTER_SCOPES,
token=client_secret
)
return user
@pytest.fixture
def system_user(settings):
return ShareUser.objects.get(username=settings.APPLICATION_USERNAME)
@pytest.fixture
def share_user():
user = ShareUser(
username='tester',
)
user.save()
# add source
factories.SourceFactory(user=user),
return user
@pytest.fixture
def source(share_user):
return share_user.source
@pytest.fixture
def source_config():
return factories.SourceConfigFactory()
@pytest.fixture
def suid(source_config):
suid = SourceUniqueIdentifier(identifier='this is a record', source_config=source_config)
suid.save()
return suid
@pytest.fixture
def raw_data(suid):
raw_data = RawDatum(suid=suid, datum='{}')
raw_data.save()
return raw_data
@pytest.fixture
def raw_data_id(raw_data):
return raw_data.id
@pytest.fixture
def normalized_data(share_user):
normalized_data = NormalizedData(source=share_user, data={})
normalized_data.save()
return normalized_data
@pytest.fixture
def normalized_data_id(normalized_data):
return normalized_data.id
@pytest.fixture
def Graph():
return GraphBuilder()
@pytest.fixture
def ExpectedGraph(Graph):
def expected_graph(*args, **kwargs):
return Graph(*args, **kwargs, normalize_fields=True)
return expected_graph
@contextlib.contextmanager
def rolledback_transaction(loglabel):
class ExpectedRollback(Exception):
pass
try:
with transaction.atomic():
print(f'{loglabel}: started transaction')
yield
raise ExpectedRollback('this is an expected rollback; all is well')
except ExpectedRollback:
print(f'{loglabel}: rolled back transaction (as planned)')
else:
raise ExpectedRollback('expected a rollback but did not get one; something is wrong')
@pytest.fixture(scope='class')
def class_scoped_django_db(django_db_setup, django_db_blocker, request):
"""a class-scoped version of the `django_db` mark
(so we can use class-scoped fixtures to set up data
for use across several tests)
recommend using via the `nested_django_db` fixture,
or use directly in another class-scoped fixture.
"""
with django_db_blocker.unblock():
with rolledback_transaction(f'class_scoped_django_db({request.node})'):
yield
@pytest.fixture(scope='function')
def nested_django_db(class_scoped_django_db, request):
"""wrap each function and the entire class in transactions
(so fixtures can have scope='class' for reuse across tests,
but what happens in each test stays in that test)
recommend using via the `nested_django_db` mark
"""
with rolledback_transaction(f'nested_django_db({request.node})'):
yield
|
a=int(input())
b=input().split()
for i in range(len(b)):
print(b[i],i)
|
"""
csrf 通过服务器生成随机值,通过cookie下发到浏览器,在POST提交数据时携带此随机值到server,保证请求的合法性 。
# html页面的form加入
{ csrf_token }
"""
"""
cors - 跨域访问管理
其主要是对 HEADER,HTTP-METHOD的控制
浏览器在跨域请求时提交 OPTIONS请求到目标server,server返回是否支持 GET/POST/PUT/DELETE/FETCH等操作.
CORS还可以返回客户机http请求header中可以包含哪些字段
"""
""""
pip install djang-cors-headers
"""
MIDDLEWARE_CLASSES = (
'corsheaders.middleware.CorsMiddleware',
)
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_METHODS = (
'GET',
'POST',
'PUT',
'PATCH',
'DELETE',
'OPTIONS'
)
CORS_ALLOW_CREDENTIALS = False
CORS_ALLOW_HEADERS = (
'x-requested-with',
'content-type',
'accept',
'origin',
'authorization',
'x-csrftoken',
'if-version',
'session-token',
'token'
)
"""
ajax使用django生成的csrf-token
虽然开启了csrf
'django.middleware.csrf.CsrfViewMiddleware',
但实际上django的response并没有产生csrftoken返回到前端,
这会影响前端ajax程序在发送POST,UPDATE,DELETE,PATCH无法发送有效的csrftoken而请求无效.
{% csrf_token %}通过form field传递的csrftoken另当别论。
要解决这个问题,请见 django.middle.ware.csrf 的 process_response()处理函数,其检查
request.META.get("CSRF_COOKIE_USED") 是否设置,未设置则不会返回csrf的cookie
1. get_token(request) 函数可以生成新的csrftoken的cookie,所以只需要调用一下get_token()即可
2. 编写新的middleware,从 CsrfViewMiddleware派生, process_response()中调用get_token()即可。
ajax使用csrftoken最简单的方式使用 restframework/js/csrf.js
"""
|
#! python3
# please install tkcalendar and babel (if needed) before using.
from datetime import datetime
try:
import tkinter as tk
from tkinter import ttk
except ImportError:
import Tkinter as tk
import ttk
from tkcalendar import Calendar, DateEntry
# convert string to date with multiple formats
def str_to_date(date_text):
for fmt in ('%Y-%m-%d', '%Y/%m/%d', '%d-%m-%Y', '%d/%m/%Y'):
try:
date = datetime.strptime(date_text, fmt).date()
return date
except ValueError:
pass
raise ValueError('No valid date format found, see the list below for valid date formats:\n 1. %Y-%m-%d\n 2. %Y/%m/%d\n 3. %d-%m-%Y\n 4. %d/%m/%Y')
class Todo():
__TodoList = []
__IDcount = []
def __init__(self, work_name, start_date, end_date, status):
self.ID = 'ID_' + str(len(Todo.__IDcount) + 1)
self.work_name = work_name
self.start_date = str_to_date(start_date)
self.end_date = str_to_date(end_date)
if status in ['Planning', 'Doing', 'Complete']:
self.status = status
else:
print('The status can only be: Planning, Doing or Complete')
return
Todo.__TodoList.append(self)
Todo.__IDcount.append(len(Todo.__IDcount) + 1)
Todo.__sort()
def __sort():
Todo.__TodoList.sort(key = lambda x: x.start_date)
def __edit(ID, **kwargs):
for job in Todo.__TodoList:
if job.ID == ID:
for k,w in kwargs.items():
if (k == 'start_date') | (k == 'end_date'):
w = str_to_date(w)
job.__dict__[k] = w
Todo.__sort()
def __delete(ID):
for job in Todo.__TodoList:
if job.ID == ID:
Todo.__TodoList.remove(job)
Todo.__sort()
# Controller functions
def TDL_add(work_name, start_date, end_date, status):
Todo(work_name, start_date, end_date, status)
print('Your job is added.')
def TDL_edit(ID, **kwargs):
Todo._Todo__edit(ID, **kwargs)
print('Your job is edited.')
def TDL_delete(ID):
Todo._Todo__delete(ID)
print('Your job is deleted.')
def TDL_view():
TDL = Todo._Todo__TodoList.copy()
root = tk.Tk()
root.title('To do list viewer')
cal = Calendar(root)
for job in TDL:
cal.calevent_create(job.start_date, job.work_name, job.status)
view_config(cal)
root.lift()
root.attributes('-topmost', True)
root.attributes('-topmost', False)
root.mainloop()
def TDL_print():
TDL = Todo._Todo__TodoList.copy()
for i in range(len(TDL)):
print('{}. {}'.format(i + 1, TDL[i].work_name))
print('Job\'s ID: {} (use this to refer to the job)'.format(TDL[i].ID))
print('Start Date: ' + str(TDL[i].start_date))
print('End Date: ' + str(TDL[i].end_date))
print('Status: ' + TDL[i].status)
# View configurations
def view_config(cal_obj):
cal_obj.tag_config('Planning', background = 'blue')
cal_obj.tag_config('Doing', background = 'red')
cal_obj.tag_config('Complete', background = 'green')
cal_obj.pack(fill = 'both')
|
from hcj import snds
from pippi import dsp, tune
def play(ctl):
pianos = ['sawvib', 'piano', 'pianooct1', 'harp', 'saw']
pianos = ['sawvib', 'piano']
piano = snds.load('genie/%s.wav' % dsp.randchoose(pianos))
notes = [1,3,5]
chord = tune.fromdegrees([ dsp.randchoose(notes) + (dsp.randchoose([0, 1]) * 8) for _ in range(dsp.randint(1, 3)) ], octave=0)
length = dsp.stf(dsp.rand(1, 4))
layers = []
for freq in chord:
p = dsp.transpose(piano, freq / 220.0)
p = dsp.amp(p, 0.35)
#p = dsp.pan(p, dsp.rand())
p = dsp.fill(p, length)
if dsp.rand() > 0.25:
p = dsp.vsplit(p, dsp.mstf(100), dsp.mstf(500))
p = [ dsp.pan(pp, dsp.rand()) for pp in p ]
p = [ dsp.amp(pp, dsp.rand(0,2)) for pp in p ]
p = [ dsp.transpose(pp, dsp.randchoose([1,2,4,8])) for pp in p ]
p = [ dsp.taper(pp, 20) for pp in p ]
p = [ dsp.pad(pp, 0, dsp.mstf(dsp.rand(0, 100))) for pp in p ]
p = dsp.randshuffle(p)
p = ''.join(p)
if dsp.rand() > 0.75:
p = dsp.alias(p)
#p = dsp.env(p, 'phasor')
layers += [ p ]
out = dsp.mix(layers)
return out
|
# -*- coding: utf-8 -*-
import requests
def get_money(pair, base): #курс бата к доллару, рублю
url1 = 'https://api.fixer.io/latest?base=' + base
response = requests.get(url1).json()
price = response['rates'][pair]
return str(price)
def get_btc(): #курс биткоина к доллару
url1 = 'https://yobit.net/api/2/btc_usd/ticker'
response = requests.get(url1).json()
price = response['ticker']['last']
return str(price) + ' usd' |
import pytest
from person import Person
class TestPerson(object):
@pytest.fixture
def person(self):
return Person('Daisuke', 99)
def test_type(self, person):
assert isinstance(person, Person)
def test_val(self, person):
assert person.age == 99
class TestPerson2(object):
@pytest.fixture
def person(self):
return Person('Daisuke', 99)
def test_type(self, person):
assert isinstance(person, Person)
def test_val(self, person):
assert person.age == 99
if __name__ == '__main__':
pytest.main()
|
# 파일 생성하기
f = open('새파일.txt','w') # 파일 객체 = open(파일이름, 파일 열기 모드)
for i in range(1, 11):
data = '%d rine.\n' %i
f.write(data)
f.close()
# 프로그램의 외부에 저장된 파일을 읽는 여러 가지 방법
# readline() 함수 이용하기
f = open('새파일.txt','r')
line = f.readline()
print('한 줄만 출력 : ')
print(line)
print()
f.close()
print('여러줄 출력: ')
f = open('새파일.txt','r')
while True:
line = f.readline()
if not line : break
print(line)
f.close()
print()
# readlines() 함수 이용하기
print('readlines() 함수 이용')
f = open('새파일.txt','r')
lines = f.readlines() # readlines() 함수는 파일의 모든 라인을 읽어서 각각의 줄을 요소로 갖는 리스트로 리턴한다.
for line in lines:
print(line)
f.close()
# read() 함수 이용하기
print('read() 함수 이용')
f = open('새파일.txt','r')
data = f.read()
print(data)
f.close()
print()
# 파일에 새로운 내용 추가하기
print('내용 추가')
f = open('새파일.txt','a')
for i in range(11,20):
data = '%d line.'%i
f.write(data)
f.close()
print()
# with문과 함께 사용하기
f = open('foo.txt','w')
f.write('Life is too short,you need python\n')
f.close()
with open('foo.txt','a') as f:
f.write('Life is too long')
|
num=[]
while True:
x=input("Enter a number or type done if no more:" )
if x == 'done' or len(x)<1:
break
elif x!='done':
num=num+[x]
mx=[float(i) for i in num]
mx.sort()
print (mx)
print ("max :", max(mx))
print ("min :", min(mx)) |
#!/usr/bin/python
import xml.etree.ElementTree as etree;
keybindings = [];
ns = {"openbox_config": "http://openbox.org/3.4/rc"};
root = etree.parse("/home/xavier/.config/openbox/rc.xml").getroot();
keyboard = root.find("openbox_config:keyboard", ns);
for keybind in keyboard.findall("openbox_config:keybind", ns):
if keybind.get("export") == "true":
kbdg = [];
kbdg.append(keybind.get("key"));
action = keybind[0];
if action is not None:
startup = action[0];
if startup is not None and len(startup) > 0:
name = startup[1];
if name is not None and name.text is not None:
kbdg.append(name.text);
if len(kbdg) < 2:
kbdg.append(keybind.find("openbox_config:action", ns).get("name"));
keybindings.append(kbdg);
print("${color grey}"+kbdg[0]+"${tab 20}$color "+kbdg[1]);
#print(keybindings)
|
from django.contrib import admin
from .models import Sudoku
admin.site.register(Sudoku)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import utils
def run(string, entities):
"""Leon says good bye"""
return utils.output('end', 'good_bye', utils.translate('good_bye'))
|
L = 1
R = 1000
def check_arm(num):
s = 0
n = num
while(num):
dig = num%10
num = int(num/10)
s = s + dig**3
if n==s:
return 1
else:
return 0
num_arm = 0
for num in range(L,R):
num_arm+=check_arm(num)
print(num_arm)
|
with open("hightemp.txt") as f:
lines = f.readlines()
with open("col1.txt", "w") as f1:
with open("col2.txt", "w") as f2:
cols = len(lines[0].split("\t"))
for l in lines:
splitted = l.split("\t")
assert len(splitted) == 4
f1.write("{}\n".format(splitted[0]))
f2.write("{}\n".format(splitted[1]))
|
import pickle
import requests
irisdata = requests.get("https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data")
text_1=irisdata.text
#print(text_1)
x = iter(text_1.split("\n"))
p=list(x)
#print(p)
#print(type(p))
#create piokel
""""file = "iris.pkl"
fileobj = open(file,'wb')
pickle.dump(p,fileobj)
fileobj.close()"""
file = "iris.pkl"
fileobj = open(file,'rb')
irris=pickle.load(fileobj)
print(irris)
|
import math
x = 2.9
print(round(x))
print(abs(-2.9))
# Math module
print(math.ceil(2.9))
print(math.floor(2.9))
#If Statements
# is_hot = True
is_hot = False
is_cold = False
if(is_hot):
print("It's a hot day")
print("Drink plenty of water")
elif(is_cold):
print("Its a cold day")
print("Wear warm cloths")
else:
print("It's a lovely day")
print("Enjoy your day.")
price = 1000000
has_good_credit = False
if(has_good_credit):
down_payment = (price * 10)/100;
else:
down_payment = (price * 20)/100;
# print(F"Down Payment: ${down_payment}");
# Logical Operators
has_high_income = False
has_good_credit = True
if has_good_credit and has_high_income:
print("Eligible for Loan-and")
if has_good_credit or has_high_income:
print("Eligible for Loan-or")
has_criminal_record = True
if has_good_credit and not has_criminal_record:
print("Eligible for Loan-and-not")
temperature = 30
if temperature == 30:
print("It's a hot day")
else:
print("It's not a hot day")
name = "ab"
if len(name) < 3:
print("Name must be atleast 3 char")
elif len(name) > 50:
print("Name must be longer than 50 char")
|
# Labo 01 - basis variabelen ("#" = commentaar)
# print("Hello world, dag Brent") #String= tekst
# #print= methode, argumenten bevinden zich tussen ()
# #1= integer, 1.0= double
# print('Hello world, dag Brent') #''=""
# print("Hello world, \t dag Brent") #\= escape funtcie = speciale actie, \t= tab, \n= new line
# print("Hello world, \n dag Brent")
#oef1
naam = input("Geef mij jouw naam: ")#prompt= input = een vraagstelling
voornaam = input("Geef mij uw voornaam: ")
leeftijd = input("Geef mij uw leeftijd: ")
#datatype conversie int, float ... = Leeftijd = int(leeftijd)
print("\nVoornaam: {0} \nAchternaam: {1} \nLeeftijd: {2}".format(voornaam , naam , leeftijd))#print("\nVoornaam: {0} \nAchternaam: {1}").format(voornaam , naam)format werkt niet na prints, wel op strings.
|
# Generated by Django 3.1.7 on 2021-03-20 21:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Join',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uname', models.CharField(max_length=200)),
('pname', models.CharField(max_length=200)),
('uemail', models.EmailField(max_length=200)),
('pword', models.CharField(max_length=200)),
],
),
migrations.AlterField(
model_name='contact',
name='subject',
field=models.TextField(max_length=700),
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2020-03-17 03:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0011_auto_20200316_1537'),
]
operations = [
migrations.AddField(
model_name='user',
name='sidus_avatar',
field=models.CharField(default='', max_length=125, verbose_name='Sidus头像'),
),
migrations.AlterField(
model_name='user',
name='avatar',
field=models.ImageField(blank=True, default='user_avatar/default.jpg', null=True, upload_to='user_avatar/%Y/%m/%d/%s', verbose_name='头像'),
),
]
|
# -*- coding: utf-8 -*-
import torch
from collections import OrderedDict
from functools import reduce
class PreDefinedVocab(object):
"""Defines a vocabulary object that will be used to numericalize a field.
Attributes:
stoi: A collections.defaultdict instance mapping token strings to
numerical identifiers.
itos: A list of token strings indexed by their numerical identifiers.
"""
SPECIAL_TOKEN_ATTRIBUTES = [
'unk_token', 'pad_token', 'bos_token', 'eos_token',
'mask_token', 'sep_token', 'cls_token',
]
def __init__(self, vocab_file, **kwargs):
"""Create a Vocab object from a collections.Counter.
Arguments:
vocab_file: A path to vocabulary file.
unk_token: The string token used to represent OOV words.
pad_token: The string token used as padding.
bos_token: A token that will be prepended to every example using this field
eos_token: A token that will be appended to every example using this field
mask_token: The string token used as masking.
sep_token: The string token used to separate sentences.
cls_token: The string token used to classification.
"""
with open(vocab_file, 'r') as f:
self.itos = [w.rstrip() for w in f]
for key, value in kwargs.items():
if key in self.SPECIAL_TOKEN_ATTRIBUTES and value is not None:
setattr(self, key, value)
setattr(self, key.split('_')[0] + '_id', self.itos.index(value))
self.stoi = OrderedDict({w: i for i, w in enumerate(self.itos)})
def __getitem__(self, token):
return self.stoi.get(token, self.stoi.get(self.unk_token))
def __eq__(self, other):
if self.stoi != other.stoi:
return False
if self.itos != other.itos:
return False
return True
def __len__(self):
return len(self.itos)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class WordpieceTokenizer(object):
"""WordPiece tokenization."""
def __init__(self, vocab, max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = vocab.unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab.stoi:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def string2ids(self, tokens):
return [self.vocab(w) for w in tokens]
def ids2string(self, arr):
return [self.vocab.itos[i] for i in arr]
def encode(self, x):
tokenized = self.tokenize(x)
return string2ids(tokenized)
def decode(self, arr):
if hasattr(self.vocab, 'eos_id') and self.vocab.eos_id in arr:
arr = arr[:arr.index(self.vocab.eos_id)]
arr = self.ids2string(arr)
return reduce(lambda x, y: f"{x}{y.lstrip('##')}"
if y.startswith('##') else f"{x} {y}", arr)
class TextField(object):
def __init__(self, vocab_file, eos_token="[EOS]", bos_token="[BOS]",
pad_token="[PAD]", unk_token="[UNK]", mask_token=None, sep_token=None,
cls_token=None, lower=False, preprocessing=None,
postprocessing=None, include_lengths=False, batch_first=False,
dtype=torch.long, truncate_first=False, stop_words=None, is_target=False):
self.vocab = PreDefinedVocab(
vocab_file=vocab_file,
unk_token=unk_token,
pad_token=pad_token,
bos_token=bos_token,
eos_token=eos_token,
mask_token=mask_token,
sep_token=sep_token,
cls_token=cls_token,
)
self.preprocessing = preprocessing
self.postprocessing = postprocessing
self.dtype = dtype
self.lower = lower
self.tokenizer = WordpieceTokenizer(self.vocab)
self.include_lengths = include_lengths
self.batch_first = batch_first
self.truncate_first = truncate_first
try:
self.stop_words = set(stop_words) if stop_words is not None else None
except TypeError:
raise ValueError("Stop words must be convertible to a set")
self.is_target = is_target
def __eq__(self, other):
if not isinstance(other, CandleField):
return False
return self.__dict__ == other.__dict__
def preprocess(self, x):
"""Load a single example using this field."""
if self.lower:
x = x.lower()
x = self.tokenizer.tokenize(x.rstrip('\n'))
if self.stop_words is not None:
x = [w for w in x if w not in self.stop_words]
if self.preprocessing is not None:
return self.preprocessing(x)
else:
return x
def process(self, batch, device=None):
""" Process a list of examples to create a torch.Tensor.
Pad, numericalize, and postprocess a batch and create a tensor.
Args:
batch (list(object)): A list of object from a batch of examples.
Returns:
torch.autograd.Variable: Processed object given the input
and custom postprocessing Pipeline.
"""
padded = self.pad(batch)
tensor = self.numericalize(padded, device=device)
return tensor
def pad(self, minibatch):
"""Pad a batch of examples using this field.
Pads to the length of the longest example in the batch. Prepends
self.vocab.bos_token and appends self.eos_token if those attributes are
not None. Returns a tuple of the padded list and a list containing
lengths of each example if `self.include_lengths` is `True`, else
just returns the padded list.
"""
minibatch = list(minibatch)
max_len = max(len(x) for x in minibatch)
padded, lengths = [], []
for x in minibatch:
padded.append(
([] if self.vocab.bos_token is None else [self.vocab.bos_token])
+ list(x[-max_len:] if self.truncate_first else x[:max_len])
+ ([] if self.vocab.eos_token is None else [self.vocab.eos_token])
+ [self.vocab.pad_token] * max(0, max_len - len(x)))
lengths.append(len(padded[-1]) - max(0, max_len - len(x)))
if self.include_lengths:
return (padded, lengths)
return padded
def numericalize(self, arr, device=None):
"""Turn a batch of examples that use this field into a Variable.
If the field has include_lengths=True, a tensor of lengths will be
included in the return value.
Arguments:
arr (List[List[str]], or tuple of (List[List[str]], List[int])):
List of tokenized and padded examples, or tuple of List of
tokenized and padded examples and List of lengths of each
example if self.include_lengths is True.
device (str or torch.device): A string or instance of `torch.device`
specifying which device the Variables are going to be created on.
If left as default, the tensors will be created on cpu. Default: None.
"""
if self.include_lengths and not isinstance(arr, tuple):
raise ValueError(
"Field has include_lengths set to True, but input data is not"
"a tuple of (data batch, batch lengths).")
if isinstance(arr, tuple):
arr, lengths = arr
lengths = torch.tensor(lengths, dtype=self.dtype, device=device)
arr = [[self.vocab.stoi[x] for x in ex] for ex in arr]
if self.postprocessing is not None:
arr = self.postprocessing(arr, self.vocab)
var = torch.tensor(arr, dtype=self.dtype, device=device)
if not self.batch_first:
var.t_()
var = var.contiguous()
if self.include_lengths:
return var, lengths
return var
def decode(self, arr):
""" Convert a index sequence into a token sequence"""
if isinstance(arr, torch.Tensor):
arr = arr.tolist()
return self.tokenizer.decode(arr)
|
# -*- coding: utf-8 -*-
import turtle
import math
import random
import pygame
import tkinter
pygame.init()
from pygame.mixer import Sound
# criando a tela
turtle.setup(800,800)
wn = turtle.Screen()
wn.bgcolor('black')
wn.title('Space Invaders')
wn.bgpic('bg.gif')
#registrando as imagens (shapes)
turtle.register_shape('invader.gif')
turtle.register_shape('player.gif')
turtle.register_shape('foguete.gif')
turtle.register_shape('heart.gif')
#criando os sons
explod = pygame.mixer.Sound('explosion.wav')
laser = pygame.mixer.Sound('laser.wav')
amb = pygame.mixer.music
amb.load('amb.wav')
amb.set_volume(0.2)
amb.play()
laser.set_volume(0.5)
explod.set_volume(0.2)
# desenhando as bordas
caneta = turtle.Turtle()
caneta.speed(0)
caneta.color('white')
caneta.penup()
caneta.setposition(-300, -300)
caneta.pendown()
caneta.pensize(3)
for side in range(4):
caneta.fd(600)
caneta.lt(90)
caneta.hideturtle()
#criando e desenhando o score(placar)
score = 0
#desenhandoo score
scorepen = turtle.Turtle()
scorepen.color('white')
scorepen.speed(0)
scorepen.penup()
scorepen.setposition(-290,275)
#texto do score + a variavel
scorestring = (f'SCORE= {score}')
scorepen.write(scorestring, False, align = 'left', font = ('Arial', 14,'normal'))
scorepen.hideturtle()
#criando as vidas
n_vida = 5#variavel de controle de vidas, iniciando com 5 vidas
vidas = [] #lista que contem as vidas
for v in range (n_vida):
vidas.append(turtle.Turtle())
#desenhando as vidas
y=300
for vida in vidas:
#vida= turtle.Turtle()
vida.hideturtle()
vida.color('red')
vida.shape('heart.gif')
vida.speed(0)
vida.penup()
vida.setposition(-320,y)
vida.showturtle()
y-=35
#criando niveis de dificuldade
nivel =1 #variavel de controle de nivel, iniciada em 1
nivelpen = turtle.Turtle()
nivelpen.color('white')
nivelpen.speed(0)
nivelpen.penup()
nivelpen.setposition(200,275)
#escrevendo o nivel na tela
nivelpen.clear()
nivelstring = f'Nivel: {nivel}'
nivelpen.write(nivelstring,False, align= 'left', font =('Arial',14,'normal'))
nivelpen.ht()
#Criando o player
player = turtle.Turtle()
player.color('blue')
player.shape('player.gif')
player.penup()
player.speed(0)
player.setposition(0,-250)
player.setheading(90)
playerspeed = 15
#criando os inimigos
n_inimigos = 5 #variavel de controle da quantidade de inimigos em tela
inimigos = [] # lista de inimigos em tela
for i in range (n_inimigos):
inimigos.append(turtle.Turtle()) #preenchendo a lista com inimigos
#dando caracteristicas a todos os inimigos da lista
for inimigo in inimigos:
inimigo.color('red')
inimigo.shape('invader.gif')
inimigo.penup()
inimigo.speed(0)
#sorteando a próxima posição de aparição do inimigo
x = random.randint(-200,200)
y = random.randint(100,180)
inimigo.setposition(x,y)
inimigospeed = 2
#criando o foguete do player:
foguete = turtle.Turtle()
foguete.color('yellow')
foguete.shape('foguete.gif')
foguete.penup()
foguete.speed(0)
foguete.setheading(90)
foguete.shapesize(0.25,0.25)
foguete.hideturtle()
foguetespeed=20
#definindo estado do foguete
#pronto = pronto para atirar
#fogo = foguete disparado
estado_foguete = 'pronto'
#funções de movimento do player
def moveesquerda():
x = player.xcor()
x-=playerspeed
if x <-280:
x=-280
player.setx(x)
def movedireita():
x = player.xcor()
x+=playerspeed
if x > 280:
x = 280
player.setx(x)
def movefrente():
y = player.ycor()
y+=playerspeed
if y >0:
y=0
player.sety(y)
def movetras():
y = player.ycor()
y-=playerspeed
if y <-280:
y=-280
player.sety(y)
#função de tiro
def atirar():
#define o estado_foguete como global
global estado_foguete
#cuidando para que só atire se já estiver pronto
if estado_foguete == 'pronto':
#som do foguete
laser.play()
estado_foguete = 'atirando'
#movendo o foguete a partir do player
x = player.xcor()
y = player.ycor()+10
foguete.setposition(x,y)
foguete.showturtle()
#função que define se existe colisão entre os objetos do jogo (inimigo com player, foguete com inimigo...)
def isCollision(t1,t2):#preferi usar o nome em ingles por convensão, usa se dois argumentos, ou seja os dois objetos que podem ou não colidir.
#definindo a distancia entre os objetos
distancia = math.sqrt(math.pow(t1.xcor()-t2.xcor(),2)+math.pow(t1.ycor()-t2.ycor(),2))
if distancia <15:
return True
else:
return False
def sair():
amb.stop()
turtle.done()
wn.clear()
wn.bye()
#criando conexões de entrada de teclado
turtle.listen()
turtle.onkey(moveesquerda,'Left')
turtle.onkey(movedireita,'Right')
turtle.onkey(movefrente,'Up')
turtle.onkey(movetras,'Down')
turtle.onkey(atirar,'space')
turtle.onkey(sair,'Escape')
# criando o loop do jogo (maingameloop)
while True:
#definindo parametros para aumento de nivel
if score>=100 and score%(100*nivel)==0:
nivel+=1
n_inimigos *=nivel
nivelpen.clear()
nivelstring = f'Nivel: {nivel}'
nivelpen.write(nivelstring,False, align= 'left', font =('Arial',14,'normal'))
nivelpen.ht()
#aumentando os inimigos segundo os níveis
if len(inimigos)<=30: #limitando o máximo de inimigos em trinta
inimigo = turtle.Turtle()
inimigo.shape('invader.gif')
inimigo.penup()
inimigo.speed(0)
#sorteando a próxima posição de aparição do inimigo
x = random.randint(-200,200)
y = random.randint(100,180)
inimigo.setposition(x,y)
for i in range (nivel):
inimigos.append(inimigo)
for inimigo in inimigos:
#movendo todos os inimigos inimigo:
x = inimigo.xcor()
x+=inimigospeed
inimigo.setx(x)
if inimigo.xcor() >280:
for e in inimigos:
y = e.ycor()
y-=10
e.sety(y)
inimigospeed*=-1
if inimigo.xcor()<-280:
for e in inimigos:
y = e.ycor()
y-=10
e.sety(y)
inimigospeed*=-1
#verificando se há colisão entre foguete e inimigo
if isCollision(foguete,inimigo):
#som de explosão do inimigo
explod.play()
#resetando o foguete
foguete.hideturtle()#tornando o foguete invisivel("destruindo")
estado_foguete = 'pronto'# mudando estado do foguete pra que possa ser disparado denovo
foguete.setposition(0,-400)#escondendo para novo tiro
#resetando o inimigo
#sorteando a próxima posição de aparição do inimigo
x = random.randint(-200,200)
y = random.randint(100,180)
inimigo.setposition(x,y)
#atualizando score a cada inimigo abatido:
score+=10
scorestring = (f'SCORE= {score}')
scorepen.clear()
scorepen.write(scorestring, False, align = 'left', font = ('Arial', 14,'normal'))
#verificando se o inimigo atingiu o player
if isCollision(player,inimigo):
if n_vida >0:# tirando a vida até zerar
explod.play()
#resetando o inimigo
#sorteando a próxima posição de aparição do inimigo
x = random.randint(-200,200)
y = random.randint(100,180)
inimigo.setposition(x,y)
n_vida-=1#zerando as vidas
vidas[n_vida].ht()
continue
else:
explod.play()
player.hideturtle()
inimigo.hideturtle()
#criando o texto game over
gopen = turtle.Turtle()#criando a caneta que escreverá o texto
gopen.speed(0)
gopen.color('yellow')
gotext = ('GAME OVER!')#texto
gopen.write(gotext, False, align = 'center', font = ('Arial', 48,'bold'))
gopen.hideturtle()
amb.stop()
break
#movendo o foguete quando atirado
y = foguete.ycor()
y+=foguetespeed
foguete.sety(y)
#verificando se o foguete chegou no extremo da tela,
#destruindo o e mudando estado do foguete para novo tiro
if foguete.ycor()>275:
foguete.hideturtle()
estado_foguete = 'pronto'
amb.stop()
|
from enum import Enum
class ConnectionState(Enum):
CLOSED = 1
UNAUTHENTICATED = 2
BROADCAST = 3
|
from logics.dice import *
from logics.weapon import Weapon
class Barbarian(object):
name = None
strength = None
stamina = None
health_point = None
base_damage = None
def __init__(self, name):
self.name = name
self.strength = roll_d10()
self.stamina = roll_d10()
self.health_point = roll_custom(51) + 50 + self.stamina
self.base_damage = roll_d10() + self.strength
def battle_cry(self):
heal = roll_d10() + self.stamina
self.health_point += heal
return '%s has just used BattleCry, and healed %d hp. He has %d hp left.;' % (self.name, heal, self.health_point)
def use_weapon(self, mage):
sword = Weapon(True, False)
axe = Weapon(False, True)
hammer = Weapon(True, True)
which_weapon = roll_custom(3)
if which_weapon == 1:
damage = sword.weapon_damage + self.base_damage
self.health_point += sword.self_heal
self.health_point -= sword.self_damage
mage.health_point -= damage
return '%s used his Enchanted Sword, and caused %d damage on %s and healed %d on himself.;' % (
self.name, damage, mage.name, sword.self_heal)
elif which_weapon == 2:
damage = axe.weapon_damage + self.base_damage
self.health_point += axe.self_heal
self.health_point -= axe.self_damage
mage.health_point -= damage
return '%s used his Cursed Axe, and caused %d damage on %s and damaged %d on himself.;' % (
self.name, damage, mage.name, axe.self_damage)
else:
damage = hammer.weapon_damage + self.base_damage
self.health_point += hammer.self_heal
self.health_point -= hammer.self_damage
mage.health_point -= damage
return '%s used his Hammer of Absolute Destruction, and caused %d damage on %s and %d/%d on himself.;' % (
self.name, damage, mage.name, hammer.self_heal, hammer.self_damage)
def act(self, mage):
which_skill = roll_custom(2)
if which_skill == 1:
return self.battle_cry()
else:
return self.use_weapon(mage)
def get_status(self):
return '%s has the following stats: Strength: %d Stamina %d Base damage: %d HP: %d ;' % (self.name, self.strength, self.stamina, self.base_damage, self.health_point)
|
'''
Day 12
You walk through the village and record the ID of each program and the IDs
with which it can communicate directly (your puzzle input).
Each program has one or more programs with which it can communicate,
and these pipes are bidirectional; if 8 says it can communicate with 11,
then 11 will say it can communicate with 8.
You need to figure out how many programs are in the group that contains program ID 0.
For example, suppose you go door-to-door like a travelling salesman and record the following list:
0 <-> 2
1 <-> 1
2 <-> 0, 3, 4
3 <-> 2, 4
4 <-> 2, 3, 6
5 <-> 6
6 <-> 4, 5
In this example, the following programs are in the group that contains program ID 0:
Program 0 by definition.
Program 2, directly connected to program 0.
Program 3 via program 2.
Program 4 via program 2.
Program 5 via programs 6, then 4, then 2.
Program 6 via programs 4, then 2.
Therefore, a total of 6 programs are in this group; all but program 1, which has a pipe that connects it to itself.
How many programs are in the group that contains program ID 0?
'''
import re
def digitalPlumber(file):
with open(file) as f:
content = f.readlines()
content = [x.strip('\n') for x in content]
relatives = []
nonrelatives = [] # Sort twice to catch any missed relatives
for i in range(0, len(content)):
match = re.search("(^\d+)(?:\s{1}\W{3}\s{1})([\d+\W?\s?]+)", content[i])
parent = match.group(1)
children = match.group(2)
children = children.split(', ')
if parent == '0':
relatives.append(parent)
if '0' in children:
for k in children:
if not k in relatives:
relatives.append(k)
for j in children:
if j in relatives and parent not in relatives:
relatives.append(parent)
if parent not in relatives:
nonrelatives.append(match.group(0))
iter = 0
loopCounter = 0
while(nonrelatives):
copyNonRelatives = nonrelatives[:]
match = re.search("(^\d+)(?:\s{1}\W{3}\s{1})([\d+\W?\s?]+)", nonrelatives[iter])
parent = match.group(1)
children = match.group(2)
children = children.split(', ')
deleter = False
for j in children:
#print("* Child: " + str(j))
#print("* Parent: " + str(parent))
if j in relatives and parent not in relatives:
relatives.append(parent)
deleter = True
if deleter:
del nonrelatives[iter]
if iter >= len(nonrelatives) - 1:
iter = 0
else:
iter += 1
if copyNonRelatives == nonrelatives:
loopCounter += 1
else:
loopCounter = 0
#print (loopCounter)
if loopCounter > 20000:
break
#print (relatives)
#print ("---")
#print (nonrelatives)
#print ("\n")
return relatives
lst = digitalPlumber("input/day12.txt")
lst = sorted(lst)
print("\nAnswer: " + str(len(lst))) |
#!/usr/bin/env python
import caffe
import string
import numpy as np
import subprocess
import heapq
import os, sys, stat
# # the last 1000 have the largest loss weight.
# class PriorityQueue:
# def __init__(self):
# self._queue = []
# self._index = 0
# def push(self, item, priority):
# heapq.heappush(self._queue, (-priority, self._index, item))
# self._index += 1
# def pop(self):
# return heapq.heappop(self._queue)[-1]
# # weight * variation
# PRETRAIN_FILE = 'vgg.caffemodel'
# for s in range(16,17):
# MODEL_FILE = 'vgg-A.prototxt'
# np.set_printoptions(threshold='nan')
# net1 = caffe.Net(MODEL_FILE, PRETRAIN_FILE, caffe.TEST)
# top1000 = PriorityQueue()
# params_txt0 = 'vgg_fault'+str(s)+'/'+'0.txt'
# params_txt1 = 'vgg_fault'+str(s)+'/'+'1.txt'
# pf0 = open(params_txt0, 'r')
# pf1 = open(params_txt1, 'r')
# # for param_name in net1.params.keys():
# # weight = net1.params[param_name][0].data
# # bias = net1.params[param_name][1].data
# # if len(weight.shape)==4:
# # for x in range(weight.shape[0]):
# # for y in range(weight.shape[1]):
# # for z in range(weight.shape[2]):
# # for m in range(weight.shape[3]):
# # org_weight = weight[x,y,z,m]
# # variation0=string.atof(pf0.readline())
# # variation1=string.atof(pf1.readline())
# # if abs(variation0 - 1) > abs(variation1 - 1):
# # delta_weight = weight[x,y,z,m] * variation1
# # else:
# # delta_weight = weight[x,y,z,m] * variation0
# # #net1.params[param_name][0].data[x,y,z,m]= delta_weight
# # loss_weight = abs(org_weight - delta_weight)
# # top1000.push([param_name,org_weight,variation1,x,y,z,m],loss_weight)
# # elif len(weight.shape)==3:
# # for x in range(weight.shape[0]):
# # for y in range(weight.shape[1]):
# # for z in range(weight.shape[2]):
# # org_weight = weight[x,y,z]
# # variation0=string.atof(pf0.readline())
# # variation1=string.atof(pf1.readline())
# # if abs(variation0 - 1) > abs(variation1 - 1):
# # delta_weight = weight[x,y,z] * variation1
# # else:
# # delta_weight = weight[x,y,z] * variation0
# # #net1.params[param_name][0].data[x,y,z,m]= delta_weight
# # loss_weight = abs(org_weight - delta_weight)
# # top1000.push([param_name,org_weight,variation1,x,y,z],loss_weight)
# # elif len(weight.shape)==2:
# # for x in range(weight.shape[0]):
# # for y in range(weight.shape[1]):
# # org_weight = weight[x,y]
# # variation0=string.atof(pf0.readline())
# # variation1=string.atof(pf1.readline())
# # if abs(variation0 - 1) > abs(variation1 - 1):
# # delta_weight = weight[x,y] * variation1
# # else:
# # delta_weight = weight[x,y] * variation0
# # #net1.params[param_name][0].data[x,y,z,m]= delta_weight
# # loss_weight = abs(org_weight - delta_weight)
# # top1000.push([param_name,org_weight,variation1,x,y],loss_weight)
# # for x in range(bias.shape[0]):
# # org_weight = bias[x]
# # variation0=string.atof(pf0.readline())
# # variation1=string.atof(pf1.readline())
# # if abs(variation0 - 1) > abs(variation1 - 1):
# # delta_weight = bias[x] * variation1
# # else:
# # delta_weight = bias[x] * variation0
# # #net1.params[param_name][0].data[x,y,z,m]= delta_weight
# # loss_weight = abs(org_weight - delta_weight)
# # top1000.push([param_name,org_weight,variation1,x],loss_weight)
# net1.save('16_test.caffemodel')
# pf0.close()
# pf1.close()
# # keep the large loss weight stable
# net1 = caffe.Net(MODEL_FILE, '16_test.caffemodel', caffe.TEST)
# for jj in range(1000):
# original_data = top1000.pop()
# if len(original_data)==7:
# net1.params[original_data[0]][0].data[original_data[3],original_data[4],original_data[5],original_data[6]] = 0
# if len(original_data)==6:
# net1.params[original_data[0]][0].data[original_data[3],original_data[4],original_data[5]] = 0
# if len(original_data)==5:
# net1.params[original_data[0]][0].data[original_data[3],original_data[4]] = 0
# if len(original_data)==4:
# net1.params[original_data[0]][1].data[original_data[3]] = 0
# net1.save('16_test.caffemodel')
# do the training process
subprocess.call('../build/tools/caffe train -solver vgg_solver_adadelta_01_100000.prototxt -gpu 0'.split())
# os.chmod("vgg_adadelta_iter_30000.caffemodel", stat.S_IRWXU|stat.S_IRGRP|stat.S_IROTH)
# # transform .h5 to caffemodel
# np.set_printoptions(threshold='nan')
# #MODEL_FILE = 'cifar10_quick.prototxt'
# CHANGED_FILE = 'vgg_adadelta_iter_30000.caffemodel'
# net1 = caffe.Net(MODEL_FILE, CHANGED_FILE, caffe.TEST)
# net1.save('16_test.caffemodel')
# PRETRAIN_FILE = '16_test.caffemodel'
# np.set_printoptions(threshold='nan')
# net1 = caffe.Net(MODEL_FILE, PRETRAIN_FILE, caffe.TEST)
# params_txt0 = 'vgg_fault'+str(s)+'/'+'0.txt'
# params_txt1 = 'vgg_fault'+str(s)+'/'+'1.txt'
# pf0 = open(params_txt0, 'r')
# pf1 = open(params_txt1, 'r')
# for param_name in net1.params.keys():
# weight = net1.params[param_name][0].data
# bias = net1.params[param_name][1].data
# if len(weight.shape)==4:
# for x in range(weight.shape[0]):
# for y in range(weight.shape[1]):
# for z in range(weight.shape[2]):
# for m in range(weight.shape[3]):
# variation0=string.atof(pf0.readline())
# variation1=string.atof(pf1.readline())
# if abs(variation0 - 1) > abs(variation1 - 1):
# delta_weight = weight[x,y,z,m] * variation1
# else:
# delta_weight = weight[x,y,z,m] * variation0
# net1.params[param_name][0].data[x,y,z,m]= delta_weight
# elif len(weight.shape)==3:
# for x in range(weight.shape[0]):
# for y in range(weight.shape[1]):
# for z in range(weight.shape[2]):
# variation0=string.atof(pf0.readline())
# variation1=string.atof(pf1.readline())
# if abs(variation0 - 1) > abs(variation1 - 1):
# delta_weight = weight[x,y,z] * variation1
# else:
# delta_weight = weight[x,y,z] * variation0
# net1.params[param_name][0].data[x,y,z]= delta_weight
# elif len(weight.shape)==2:
# for x in range(weight.shape[0]):
# for y in range(weight.shape[1]):
# variation0=string.atof(pf0.readline())
# variation1=string.atof(pf1.readline())
# if abs(variation0 - 1) > abs(variation1 - 1):
# delta_weight = weight[x,y] * variation1
# else:
# delta_weight = weight[x,y] * variation0
# net1.params[param_name][0].data[x,y]= delta_weight
# for x in range(bias.shape[0]):
# variation0=string.atof(pf0.readline())
# variation1=string.atof(pf1.readline())
# if abs(variation0 - 1) > abs(variation1 - 1):
# delta_weight = bias[x] * variation1
# else:
# delta_weight = bias[x] * variation0
# net1.params[param_name][1].data[x]= delta_weight
# net1.save('test.caffemodel')
# pf0.close()
# pf1.close()
# subprocess.call('../build/tools/caffe test -model vgg-A.prototxt -weights test.caffemodel -gpu 0'.split())
|
# -*- coding: utf-8 -*-
import pandas as pd
from keras.optimizers import SGD, Adam
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.utils.np_utils import to_categorical
#%matplotlib inline
dataset = pd.read_csv("C:\\Users\\10651\\Desktop\\train.csv")
y_train = dataset['label']
x_train = dataset.drop('label', axis=1)
del dataset
x_train.describe()
x_train['pixel0'].plot()
dropped_columns = []
for column in x_train.columns:
if x_train[column].max() == x_train[column].min():
dropped_columns.append(column)
x_train.drop(dropped_columns, axis = 1, inplace = True)
print('Dropped columns:', len(dropped_columns))
print('New shape of training dataset:', x_train.shape)
for column in x_train.columns:
if x_train[column].isnull().any():
print('Null value detected in the feature:', column)
min_train = {}
max_train = {}
for column in x_train.columns:
min_train[column] = x_train[column].min()
max_train[column] = x_train[column].max()
x_train[column] = (x_train[column] - x_train[column].min()) / (x_train[column].max() - x_train[column].min()) # saving amplitudes
x_train = x_train.values
y_train.value_counts(normalize=False, sort=True, ascending=False, bins=None, dropna=False)
y_train = to_categorical(y_train, num_classes = 10)
print(y_train)
FEATURES = 708
layer_1 = 128
LABELS = 10
model_LR = Sequential()
model_LR.add(Dense(layer_1, input_shape = (FEATURES,)))
model_LR.add(Activation('relu'))
model_LR.add(Dense(LABELS))
model_LR.add(Activation('softmax'))
model_LR.summary()
model_LR.compile(loss = 'categorical_crossentropy', optimizer = Adam(), metrics = ['accuracy'])
EPOCHS = 10
BATCH_SIZE = 100
VALIDATION_SIZE = 0.1
training_history = model_LR.fit(x_train,
y_train,
batch_size = BATCH_SIZE,
epochs = EPOCHS,
verbose = 1,
validation_split = VALIDATION_SIZE)
# model_LR.get_weights()
# model_LR.getget_weights()
# print(model_LR.get_config())
# print(model_LR.get_weights()) |
__version__ = "1.9"
from .ac import AC
from .trie import Trie
|
from django.urls import path
from posts import views
urlpatterns=[path( route='',
view=views.PostFeedView.as_view(),
name='feed'
),
path( route='new/',
view=views.CreatePostView.as_view(),
name='new'
),
path( route='detail/<int:pk>',
view=views.PostDetailView.as_view(),
name='posts_detail'
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 20:00:29 2020
@author: nicolai
"""
import cv2
import numpy as np
from glob import glob
from subscripts.classes import movie_obj
import threading
import time
import pandas as pd
threading._shutdown()
"""
glob - Til at finde filer
pandas (også kaldet pd) - Til at arbejde med data og csv filer
cv2 - Computer Vision 2, bruges til at arbejde med billeder og videoer
numpy (også kaldet np) - Bruges til at lave matematik i python (gennemsnit osv)
"""
""" Change here for input video specifics """
video_paths = glob("analyse_files/*.mp4")
which_cam = "cam2" #ændre her i forhold til om det er cam1 eller cam2!
take_every_x_video = 2
input_video_fps = 25.0
focus_x_cam1 = [[145, 470], [140, 470], [132, 462], [505, 828], [495, 835], [505, 835]]
focus_y_cam1 = [[25, 230], [245, 455], [475, 690], [25, 230], [248, 465], [478, 708]]
focus_x_cam2 = [[560, 867], [534, 885], [537, 865], [924, 1273], [912, 1273], [903, 1263]]
focus_y_cam2 = [[46, 221], [251, 439], [460, 695], [40, 232], [265, 480], [490, 723]]
""" Change here for output video specifics """
save_every_x_image = 15 #
fps = 25 # how fast should the output videos be
frame_in_vid = 30000
color_factor = 20 # how much color should be in the coloured output video
datafile_output_ending = "_datafile.csv"
datafile_output_folder = "datafiles"
""" Change here for median filter specifics """
n = 100 # how far back should the filter look
use_every_x_frame = 8 # for the median filter
no_of_frames_in_array = n/use_every_x_frame
darkness_threshold = 50 # when does the light turn off
move_threshold_light = 15
move_threshold_dark = 5
""" Change here for analysing several videos simultaneously """
max_no_of_threads = 5
font = cv2.FONT_HERSHEY_SIMPLEX
org = (50, 50)
fontScale = 1
color = (255, 0, 0)
thickness = 2
make_movies = True
""" Morphological operator settings to reduce noise (dont worry about it) """
do_morph_ops = True
kernel_size = 3
kernel = np.ones([kernel_size, kernel_size])
opening_iterations = 1
closing_iterations = 1
if which_cam.lower() == "cam1":
focus_x = focus_x_cam1
focus_y = focus_y_cam1
elif which_cam.lower() == "cam2":
focus_x = focus_x_cam2
focus_y = focus_y_cam2
def bright_or_dark(frame_gray):
if np.mean(frame_gray) < darkness_threshold:
return move_threshold_dark
else:
return move_threshold_light
def analyse_video(path):
df_motion = pd.DataFrame({"Frame Number":[], "Time": []})
start_time = time.time()
video_name = path.split("\\")[-1][:-4]
cap = cv2.VideoCapture(path)
no_of_frames = np.min([n+save_every_x_image*frame_in_vid, int(cap.get(cv2.CAP_PROP_FRAME_COUNT))])
d = {}
print("Video %s is now being analysed"%(path))
for index in range(no_of_frames):
ret, frame = cap.read()
# If the first frame
if index == 0:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Check if light is on in room
move_threshold = bright_or_dark(gray)
# Initiate the background_images array
background_images = np.array([gray])
# Find its median
background = background_images[0]
# First frame is purely black
gray_motion = np.zeros(gray.shape)
# Make the first frame for the coloured movie
frame_plus_motion = frame.copy().astype(np.float)
# Initiate the movie files
save_path = "processed_videos/" + video_name
movie = movie_obj(save_path+"_colored", frame_plus_motion.astype(np.uint8), fps)
movie_mot = movie_obj(save_path+"_motion", gray_motion.astype(np.uint8), fps)
# Make the first datarow for the csv datafile
d["Frame Number"] = index
d["Time"] = 0
# Process all the defined areas
for i in range(len(focus_x)):
focus_area = gray_motion[focus_y[i][0]:focus_y[i][1], focus_x[i][0]:focus_x[i][1]]
d["Image Mean Box %d"%(i)] = np.mean(focus_area)
# Add datarow to the dataframe
df_motion = df_motion.append(d, ignore_index=True)
# If not the first frame
else:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Check if you should add another frame to the background_images array
if index%use_every_x_frame == 0:
background_images = np.append(background_images, [gray], 0)
# If you too many images in background_images, remove the oldest frame
if background_images.shape[0]>no_of_frames_in_array:
background_images = background_images[1:]
# Check if you should process the frame at hand
if (index%save_every_x_image)==0:
# Check if light is on in room
move_threshold = bright_or_dark(gray)
# Find background by taking the median of previous frames
background = np.median(background_images, axis=0)
# Subtract the background from the current image to find differences
gray_motion = np.abs(gray-background)
# If differences are small, set them to zero, otherwise 255
gray_motion[gray_motion<move_threshold] = 0
gray_motion[gray_motion!=0] = 255
# This is to reduce little noise speckles
if do_morph_ops:
closing = cv2.morphologyEx(gray_motion, cv2.MORPH_CLOSE, kernel, iterations=closing_iterations)
opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel, iterations=opening_iterations)
gray_motion = opening
# Make the frame for the coloured movie, and add threshold value
# in top corner
frame_plus_motion = frame.copy().astype(np.float)
frame_plus_motion[:,:,1] += (color_factor*gray_motion)
frame_plus_motion[frame_plus_motion>255] = 255
# Add processed frames to the movies
movie.add_image(frame_plus_motion.astype(np.uint8))
movie_mot.add_image(gray_motion.astype(np.uint8))
# Add data to a new datarow
d["Frame Number"] = index
d["Time"] = index/input_video_fps
# Process all the defined areas
for i in range(len(focus_x)):
focus_area = gray_motion[focus_y[i][0]:focus_y[i][1], focus_x[i][0]:focus_x[i][1]]
d["Image Mean Box %d"%(i)] = np.mean(focus_area)
# Add datarow to the dataframe and save the temporary datafile
df_motion = df_motion.append(d, ignore_index=True)
df_motion.to_csv("%s/%s%s"%(datafile_output_folder,video_name,datafile_output_ending))
# Close the opened video file
cap.release()
# If movies were made, close them before exiting
if make_movies:
movie.close_movie()
movie_mot.close_movie()
print("%s done in %.2f min"%(video_name, (time.time()-start_time)/60.0))
else:
cap.release()
print("%s done in %.2f min"%(video_name, (time.time()-start_time)/60.0))
""" Here is the code that runs the "analyse_video" function on all videos in folder """
# Here some of the video paths are put away in order to only analyse some of the videos
datafile_paths = glob('datafiles/*/*.csv')
for i in range(len(datafile_paths)):
temp = datafile_paths[i]
temp = temp.replace('_datafile.csv', '').split("\\")[-1]
datafile_paths[i] = temp
print("Starting")
choices = np.arange(0, len(video_paths), take_every_x_video)
new_video_paths = []
for i in choices:
temp = video_paths[i].replace('.mp4', '').split("\\")[-1]
if not temp in datafile_paths:
new_video_paths.append(video_paths[i])
if len(new_video_paths) == 0:
print("No new videos to analyse")
video_paths = new_video_paths
threads = []
time_start = time.time()
# While some paths are remaining, or some of the threads are still running
while len(video_paths) != 0 or len(threads) != 0:
# While there are more paths, and the maximum number of threads are not started
while len(threads) < max_no_of_threads and len(video_paths) != 0:
# Start a thread that runs the "analyse_video" function on the video_path
# next in line
t = threading.Thread(target=analyse_video, args=(video_paths[0],))
threads.append(t)
t.start()
# Remove the video path so the next thread will take the next in line
del video_paths[0]
# Print a status of the threads
print("%d/%d (%.2f min)"%(
len(threads),
len(threads)+len(video_paths),
(time.time()-time_start)/60.0))
time.sleep(10)
threads = [t for t in threads if t.is_alive()]
print("Finished")
|
#Print code that returns True if 10 and 10 are equal
print (10==10) |
import numpy as np
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('[%(asctime)-8s] [%(name)-8s] [%(levelname)-1s] [%(message)s]')
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
class EMGaussianMixture(object):
def __init__(self, X, num_clusters, max_iter=100, restarts=10):
self.X = X
self.num_clusters = num_clusters
self.rtol = 1e-3
self.max_iter = max_iter
self.restarts = restarts
def _E_step(self, pi, mu, sigma):
"""
Performs E-step on GMM model
Each input is numpy array:
X: (N x d), data points
pi: (C), mixture component weights
mu: (C x d), mixture component means
sigma: (C x d x d), mixture component covariance matrices
Returns:
gamma: (N x C), probabilities of clusters for objects
"""
N = self.X.shape[0] # number of objects
gamma = np.zeros((N, self.num_clusters)) # distribution q(T)
### YOUR CODE HERE
aux = np.zeros((N, self.num_clusters))
norm = np.zeros(self.num_clusters)
for c in range(self.num_clusters):
norm[c] = 1.0 / np.sqrt(np.linalg.det(sigma[c, :, :]))
z = (self.X - mu[c, :]).T # (X-mu_c)
y = np.linalg.solve(sigma[c, :, :], z) # Solve SIGMA*y = (X-mu_c) ==> y = SIGMA^-1*(X-mu_c)
aux[:, c] = np.asarray([np.inner(z[:, n], y[:, n]) for n in range(N)])
expo = norm * np.exp(-0.5 * (aux))
Z = np.sum(expo * pi, axis=1)
for c in range(self.num_clusters):
gamma[:, c] = pi[c] * expo[:, c] / Z
return gamma
def _M_step(self, gamma):
"""
Performs M-step on GMM model
Each input is numpy array:
X: (N x d), data points
gamma: (N x C), distribution q(T)
Returns:
pi: (C)
mu: (C x d)
sigma: (C x d x d)
"""
N = self.X.shape[0] # number of objects
d = self.X.shape[1] # dimension of each object
### YOUR CODE HERE
mu = np.zeros((self.num_clusters,d))
sigma = np.zeros((self.num_clusters,d,d))
pi = np.zeros(self.num_clusters)
for c in range(self.num_clusters):
a = gamma[:,c].reshape(-1,1)*self.X
mu[c,:] = np.sum(gamma[:,c].reshape(-1,1)*self.X,axis=0)/np.sum(gamma[:,c])
z = (self.X-mu[c,:])
aux = np.asarray([gamma[n,c]*np.outer(z[n,:],z[n,:].T) for n in range(N)])
sigma[c,:,:] = np.sum(aux,axis=0)/np.sum(gamma[:,c])
pi[c]=np.sum(gamma[:,c])/N
pi=pi/np.sum(pi)
return pi, mu, sigma
def _compute_vlb(self, pi, mu, sigma, gamma):
"""
Each input is numpy array:
X: (N x d), data points
gamma: (N x C), distribution q(T)
pi: (C)
mu: (C x d)
sigma: (C x d x d)
Returns value of variational lower bound
"""
N = self.X.shape[0] # number of objects
d = self.X.shape[1] # dimension of each object
### YOUR CODE HERE
logdet = np.zeros(self.num_clusters)
logpow = np.zeros((N,self.num_clusters))
for c in range(self.num_clusters):
logdet[c] = -0.5*np.log((2*np.pi)**d*np.linalg.det(sigma[c,:,:]))
z = (self.X-mu[c,:]).T # (X-mu_c)
y = np.linalg.solve(sigma[c,:,:],z) # Solve SIGMA*y = (X-mu_c) ==> y = SIGMA^-1*(X-mu_c)
logpow[:,c] = -0.5*np.asarray([np.inner(z[:,n],y[:,n]) for n in range(N)]) # (X-mu_c)*
loss = np.sum(np.sum( gamma*(np.log(pi)+logdet+logpow) -gamma*np.log(gamma),axis=1))
return loss
def train(self):
'''
Starts with random initialization *restarts* times
Runs optimization until saturation with *rtol* reached
or *max_iter* iterations were made.
X: (N, d), data points
C: int, number of clusters
'''
N = self.X.shape[0] # number of objects
d = self.X.shape[1] # dimension of each object
best_pi = None
best_mu = None
best_sigma = None
best_loss = -1e+5
for _ in range(self.restarts):
logger.info("Restart {}".format(_))
loss_old = 1.0
### Randomly initialize theta ###
pi = np.random.rand(self.num_clusters)
pi = pi / np.sum(pi)
mu = self.X.min(axis=0) + np.random.rand(self.num_clusters, d) * (self.X.max(axis=0) - self.X.min(axis=0))
sigma = np.random.rand(self.num_clusters, d, d)
for c in range(self.num_clusters):
sigma[c, :, :] = np.diag(np.ones(d))
### Start iterating ###
for it in range(self.max_iter):
### Perform E Step ###
gamma = self._E_step(pi, mu, sigma)
### Perform M Step ###
pi, mu, sigma = self._M_step(gamma)
### Compute losses ###
loss = self._compute_vlb(pi, mu, sigma, gamma)
### Check results ###
if np.isnan(loss):
break
else:
if loss >= best_loss:
best_loss, best_pi, best_mu, best_sigma = loss, pi, mu, sigma
### Check convergence criterion ###
eps = np.abs((loss - loss_old) / loss_old)
loss_old = loss
if eps <= self.rtol:
logger.info("Converged after {} iterations! Loss: {:.2f}".format(it, loss))
break
### Postprocessing ###
best_gamma = self._E_step(best_pi, best_mu, best_sigma)
labels = best_gamma.argmax(1)
self.loss = best_loss
self.pi = best_pi
self.mu = best_mu
self.sigma = sigma
self.labels = labels
logger.info("Training complete! Best loss: {:.2f}".format(loss))
return labels, best_pi, best_mu, best_sigma |
# EXTERNAL PACKAGES
from flask import Flask
from flask_sqlalchemy import SQLAlchemy, inspect
from sqlalchemy.ext.hybrid import hybrid_property
from flask_migrate import Migrate
import pandas as pd
# PYTHON STANDARD LIBRARY
import os
import csv
# CONFIGURE APPLICATION
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////database.db'
db = SQLAlchemy(app)
migrate = Migrate(app, db)
class BaseMixin(object):
"""Methods and properties inherited by all models"""
# COLUMNS
id = db.Column(db.Integer, primary_key=True)
@hybrid_property
def columns(self):
"""Return columns of model"""
return self.__table__.columns.keys()
@classmethod
def dataframe(cls):
"""Return pandas dataframe of table data"""
data = [{k: v for (k, v) in vars(obj).items() if k in obj.columns} for obj in cls.query.all()]
return pd.DataFrame.from_records(data)
@classmethod
def create(cls, **kw):
"""Create class object and save to database"""
obj = cls(**kw)
db.session.add(obj)
db.session.commit()
return obj
@classmethod
def get(cls, id):
"""Return one object where id=id"""
return db.session.query(cls).filter_by(id=id).one_or_none()
@classmethod
def unique(cls, **kw):
"""Return one object where kw=value and kw is a column with unique constraint"""
return db.session.query(cls).filter_by(**kw).one_or_none()
@classmethod
def all(cls):
"""Return all objects of class"""
return db.session.query(cls).all()
@classmethod
def filter_by(cls, **kw):
"""Return objects using filter of kwarg=value"""
return db.session.query(cls).filter_by(**kw)
class MyModel(db.Model, BaseMixin):
__tablename__ = 'my_model'
# COLUMNS
column1 = db.Column(db.String, unique=True, nullable=False)
column2 = db.Column(db.Integer)
column3 = db.Column(db.Decimal)
@app.route('/')
def index():
df = FirstModel.dataframe()
return(df.to_html)
if __name__ == '__main__':
app.run(debug=True)
|
"""Python ChartMogul Enrichment API Wrapper
This file implements a simple wrapper around the ChartMogul enrichment API.
"""
import requests
class ChartMogulEnrichmentClient:
"""Enrichment API Wrapper Class"""
def __init__(self, account_token=None, secret_key=None,
base_url='https://api.chartmogul.com/v1/'):
self.auth = (account_token, secret_key)
self.base_url = base_url
def list_customers(self, page=1, per_page=200):
payload = {
'page': page,
'per_page': per_page
}
endpoint = self.base_url + 'customers'
try:
response = requests.get(endpoint, auth=self.auth, params=payload)
except requests.exceptions.HTTPError:
response = requests.get(endpoint, auth=self.auth, params=payload)
response.raise_for_status()
return response.json()
def get_customer(self, uuid):
endpoint = self.base_url + 'customers/' + uuid
try:
response = requests.get(endpoint, auth=self.auth)
except requests.exceptions.HTTPError:
response = requests.get(endpoint, auth=self.auth)
response.raise_for_status()
return response.json()
def search_customers(self, email):
endpoint = self.base_url + 'customers/search?email=' + email
try:
response = requests.get(endpoint, auth=self.auth)
except requests.exceptions.HTTPError:
response = requests.get(endpoint, auth=self.auth)
response.raise_for_status()
return response.json()
|
import datetime
from django.db import models
from django.utils import timezone
class BaseModel(models.Model):
created_date = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Genre(BaseModel):
name = models.CharField(max_length=200)
def __str__(self):
return self.name
class Author(BaseModel):
name = models.CharField(max_length=200)
def __str__(self):
return self.name
class Book(BaseModel):
author = models.ForeignKey(Author, on_delete=models.CASCADE, related_name='books')
genre = models.ForeignKey(Genre, on_delete=models.CASCADE, related_name='books')
title = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.title
@property
def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=100)
|
import pandas as pd
from PIL import Image
import numpy as np
from skimage import metrics
import imagehash
from .flag_util import FlagUtil
import operator
import os
class FlagIdentifier:
def __init__(self):
'''
Initializes a FlagIdentifier object that has a FlagUtil object and DataFrame of
countries and their flags.
'''
self.util = FlagUtil()
# reading in a df of file names which store numpy arrays
self.flag_df = pd.read_csv(os.path.join(os.path.dirname(__file__), "flag_df.csv"), index_col = "country")
# converting the files to numpy arrays
self.flag_df["flag"] = self.flag_df["flag"].apply(self.util.makeArray)
def get_flag_df(self):
'''
Returns a copy of this FlagIdentifier's flag DataFrame.
Returns
-------
DataFrame
A copy of this FlagIdentifier's flag DataFrame
'''
return self.flag_df.copy()
def get_country_list(self):
'''
Returns a list of all 195 current countries stored in this FlagIdentifier's DataFrame
Return
------
list
A list of all the countries in this FlagIdentifier's DataFrame
'''
return list(self.get_flag_df().index.values)
def display(self, country):
'''
Displays the flag of the given country.
Parameters
----------
country : str
The country whose flag is to be displayed
'''
self.get_flag_img(country).show()
def get_flag_img(self, country):
'''
Returns an Image object of the flag of the given country.
Parameters
----------
country : str
The country whose flag is returned
Returns
-------
Image
An Image object of the flag of the given country
'''
return Image.fromarray(self.flag_df["flag"].loc[country.title()])
def flag_dist(self, countryA, countryB, method = "mse"):
'''
Uses the given method (one of mse, ssim, or hash) to find the distance
between the two flags of the two given countries.
Parameters
----------
countryA : str
The name of the first country
countryB : str
The name of the second country
method : str
The method (one of mse, ssim, or hash) used to find distance between the
flags of the two given countries. For both mse and hash, smaller values mean higher
similarity and a value of 0 means the two flags are identical. For ssim, higher values mean
higher similarity and the value must be between -1 and 1. A value of 1 for ssim
means the two flags are identical.
Returns
-------
float
The distance between the two flags of the two given countries
'''
flagA = self.flag_df["flag"].loc[countryA.title()]
flagB = self.flag_df["flag"].loc[countryB.title()]
if method == "mse":
return self.__mse(flagA, flagB)
elif method == "ssim":
return self.__ssim(flagA, flagB)
elif method == "hash":
return self.__hash(flagA, flagB)
else:
raise ValueError("method must be one of: mse, ssim, hash")
def __mse(self, imageA, imageB):
'''
Returns the mean-squared error between the two given images. Lower values mean
a higher similarity between the two images (0 is perfect similarity).
Parameters
----------
imageA : array
A numpy array representing the first image
imageB : array
A numpy array representing the second image
Returns
-------
float
The mean-squared error between the two given images
'''
# credit to: https://www.pyimagesearch.com/2014/09/15/python-compare-two-images/
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
return err
def __hash(self, imageA, imageB):
'''
Returns the hash distance between the two given images. Lower values mean
a higher similarity between the two images (0 is perfect similarity).
Parameters
----------
imageA : array
A numpy array representing the first image
imageB : array
A numpy array representing the second image
Returns
-------
int
The hash distance between the two given images
'''
firsthash = imagehash.average_hash(Image.fromarray(imageA))
otherhash = imagehash.average_hash(Image.fromarray(imageB))
return firsthash - otherhash
def __ssim(self, imageA, imageB):
'''
Returns the structural similarity index measure (ssim)
between the two given images. Higher values mean
a higher similarity between the two images (1 is perfect similarity).
Parameters
----------
imageA : array
A numpy array representing the first image
imageB : array
A numpy array representing the second image
Returns
-------
float
The structural similarity index measure (ssim) between the two given images
'''
return metrics.structural_similarity(imageA, imageB, multichannel=True)
def closest_flag(self, country, method = "mse"):
'''
Returns the name of the country whose flag is most similar to the flag
of the given country. Uses the given method to find the closest flag.
Parameters
----------
country : str
The name of the country
method : str
The method (one of mse, ssim, or hash) used to find the flag that is
most similar to that of the given country
Returns
-------
str
The name of the country whose flag is most similar to that of the given
country
'''
if method == "mse" or method == "hash":
return self.__abstract_compare_flags(country.title(), operator.lt, method)
elif method == "ssim":
return self.__abstract_compare_flags(country.title(), operator.gt, method)
else:
raise ValueError("method must be one of: mse, ssim, or hash")
def farthest_flag(self, country, method = "mse"):
'''
Returns the name of the country whose flag is least similar to the flag
of the given country. Uses the given method to find the farthest flag.
Parameters
----------
country : str
The name of the country
method : str
The method (one of mse, ssim, or hash) used to find the flag that is
least similar to that of the given country
Returns
-------
str
The name of the country whose flag is least similar to that of the given
country
'''
if method == "mse" or method == "hash":
return self.__abstract_compare_flags(country.title(), operator.gt, method)
elif method == "ssim":
return self.__abstract_compare_flags(country.title(), operator.lt, method)
else:
raise ValueError("method must be one of: mse, ssim, or hash")
def __abstract_compare_flags(self, country, op, method):
'''
Finds the country whose flag is "closest" (according to the given operation)
to the flag of the given country. Uses the given method to calculate the distance
between flags.
Parameters
----------
country : str
The name of the country
op : operator
The operator used to find the "closest" flag
method : str
The method (one of mse, ssim, or hash) used to find the distance between
flags
Returns
-------
str
The name of the country whose flag is "closest" to the flag of the given
country
'''
best_dist = -1
max_country = 0
for c in self.flag_df.index:
dist = self.flag_dist(country, c, method = method)
if (op(dist, best_dist) or best_dist == -1) and c != country:
best_dist = dist
max_country = c
return max_country
def identify(self, url, method = "mse"):
'''
Returns the name of the country whose flag is most similar to the flag
in the image represented by the given url.
Parameters
----------
url : str
The url that links to an image of a flag to be identified
method : str
The method (one of mse, ssim, or hash) used to find the flag that
is most similar to the one in the image of the given url
Returns
-------
str
The name of the country whose flag is most similar to the one
represented by the url
'''
if method == "mse":
return self.__abstract_identify(url, self.__mse, operator.lt)
elif method == "ssim":
return self.__abstract_identify(url, self.__ssim, operator.gt)
elif method == "hash":
return self.__abstract_identify(url, self.__hash, operator.lt)
else:
raise ValueError("method must one of: mse, hash, ssim")
def __abstract_identify(self, url, dist_func, op):
'''
Returns the name of the country whose flag is most similar to the
image represented by the url. Uses the given dist_func and operator to find the
closest flag.
Parameters
----------
url : str
The url that links to an image of a flag to be identified
dist_func : function
A distance function (either mse, ssim, or hash) to find the closest
flag to the one in the url
op : operator
The operator used to find the flag closest to the one represented
by the url
Returns
-------
str
The name of the country whose flag is most similar to the one in the
image of the url
'''
flag = self.util.process_img(url)
best_dist = -1
closest_index = 0
for c in self.flag_df.index:
cur_flag = self.flag_df["flag"].loc[c]
dist = dist_func(flag, cur_flag)
if op(dist, best_dist) or best_dist == -1:
best_dist = dist
closest_index = c
return closest_index |
#q1
# keys = ['Ten', 'Twenty', 'Thirty']
# values = [10, 20, 30]
# l=[]
# for i in keys:
# for j in values:
# l.append((i,j))
# a={}
# a.update(l)
# print(a)
##second method
# l=[]
# i=0
# while i<len(keys):
# l.append((keys[i],values[i]))
# i+=1
# a={}
# a.update(l)
# print(a)
# q2
# dict1 = {'Ten': 10, 'Twenty': 20, 'Thirty': 30}
# dict2 = {'Thirty': 30, 'Fourty': 40, 'Fifty': 50}
# l={}
# l.update(dict1)
# l.update(dict2)
# print(l)
#q3
# sampleDict = {
# "class":{
# "student":{
# "name":"Mike",
# "marks":{
# "physics":70,
# "history":80
# }
# }
# }
# }
# print(sampleDict["class"]["student"]["marks"]["history"])
#q4
employees = ['Kelly', 'Emma', 'John']
defaults = {"designation": 'Application Developer', "salary": 8000}
Dict1= dict.fromkeys(employees, defaults)
print(Dict1)
print(Dict1["Kelly"])
#q5
# sampleDict = {
# "name": "Kelly",
# "age":25,
# "salary": 8000,
# "city": "New york" }
# keys = ["name", "salary"] #not done
# for i in keys:
# dic1={}
# dic1.update({i:sampleDict[i]})
# print(dic1)
# l={}
# l.update(dic1)
# print(l)
#q6
# sampleDict = {
# "name": "Kelly", #not
# "age":25,
# "salary": 8000,
# "city": "New york"
# }
# keysToRemove = ["name", "salary"]
# # for i in sampleDict:
# # if "name" and "salary" in sampleDict:
# print()
|
from tkinter import *
import tkinter.filedialog
import tkinter.messagebox
import os
class Menus:
def __init__(self, master):
self.master = master
self.file_operator_obj = FileOperators()
self.text_appear_obj = TextAppearance()
main_menu = Menu(master)
master.config(menu=main_menu)
file_menu = Menu(main_menu, tearoff=FALSE)
main_menu.add_cascade(label="File", menu=file_menu)
file_menu.add_command(label="New", command=self.file_operator_obj.new_file)
file_menu.add_command(label="Open", command=self.file_operator_obj.open_file)
file_menu.add_command(label="Save", command=self.file_operator_obj.save_file)
file_menu.add_command(label="Save As...", command=self.file_operator_obj.save_as)
file_menu.add_separator()
file_menu.add_command(label="Exit", command=master.destroy)
appear_menu = Menu(main_menu, tearoff=FALSE)
main_menu.add_cascade(label="Appearance", menu=appear_menu)
appear_menu.add_command(label="Bold", command=self.text_appear_obj.bold)
appear_menu.add_command(label="Italic", command=self.text_appear_obj.italic)
appear_menu.add_command(label="Underline", command=self.text_appear_obj.underline)
appear_menu.add_command(label="Highlight", command=self.text_appear_obj.highlight)
appear_menu.add_separator()
appear_menu.add_command(label="Clear", command=self.text_appear_obj.clear)
about_menu = Menu(main_menu, tearoff=FALSE)
main_menu.add_cascade(label="About", menu=about_menu)
about_menu.add_command(label="Info", command=self.file_operator_obj.about_window)
class Buttons:
def __init__(self, master):
self.master = master
self.file_operator_obj = FileOperators()
self.text_appear_obj = TextAppearance()
btn_frame = Frame(master)
btn_frame.pack()
new_btn = Button(btn_frame, text="New", command=self.file_operator_obj.new_file)
new_btn.pack(side=LEFT, padx=2)
save_btn = Button(btn_frame, text="Save", command=self.file_operator_obj.save_file)
save_btn.pack(side=LEFT, padx=2)
bold_btn = Button(btn_frame, text="Bold", command=self.text_appear_obj.bold)
bold_btn.pack(side=LEFT, padx=2)
italic_btn = Button(btn_frame, text="Italic", command=self.text_appear_obj.italic)
italic_btn.pack(side=LEFT, padx=2)
quit_btn = Button(btn_frame, text="Quit", command=master.destroy)
quit_btn.pack(side=LEFT, padx=2)
class FileOperators:
def __init__(self):
self.filename = None
def new_file(self):
self.filename = "Untitled"
text.delete(0.0, END)
def open_file(self):
f = tkinter.filedialog.askopenfile(mode="r")
try:
t = f.read()
text.delete(0.0, END)
text.insert(0.0, t)
except AttributeError:
pass
global x
x = os.path.split(f.name)[1]
def save_file(self):
global x
try:
t = text.get(0.0, END)
f = open(x, "w")
f.write(t)
f.close()
except NameError:
pass
def save_as(self):
try:
f = tkinter.filedialog.asksaveasfile(mode="w", defaultextension=".txt")
t = text.get(0.0, END)
f.write(t.rstrip())
except AttributeError:
pass
def about_window(self):
tkinter.messagebox.showinfo("About", "Created by Pei Lin Li \n 2015")
class TextAppearance:
def bold(self):
text.tag_config("bt", font=("Georgia", "12", "bold"))
try:
text.tag_add("bt", "sel.first", "sel.last")
except TclError:
pass
def italic(self):
text.tag_config("it", font=("Georgia", "12", "italic"))
try:
text.tag_add("it", "sel.first", "sel.last")
except TclError:
pass
def underline(self):
text.tag_config("ut", font=("Georgia", "12", "underline"))
try:
text.tag_add("ut", "sel.first", "sel.last")
except TclError:
pass
def highlight(self):
text.tag_config("ht", font=("Georgia,", "12"), background="yellow")
try:
text.tag_add("ht", "sel.first", "sel.last")
except TclError:
pass
def clear(self):
try:
text.tag_remove("bt", "sel.first", "sel.last")
text.tag_remove("it", "sel.first", "sel.last")
text.tag_remove("ut", "sel.first", "sel.last")
text.tag_remove("ht", "sel.first", "sel.last")
except TclError:
pass
root = Tk()
root.title("Python Tkinter Text Editor")
root.minsize(width=450, height=580)
root.maxsize(width=450, height=580)
name = Label(root, text="Python Tkinter Text Editor v3.0", bd=2, anchor=N, font=24, relief=GROOVE)
name.pack(side=TOP, fill=X)
run_1 = Menus(root)
run_2 = Buttons(root)
text = Text(root, font=("Georgia,", "12"), width=55, height=32, relief=SUNKEN)
text.pack()
root.mainloop()
|
from openerp import models, fields
class Product_merk(models.Model):
_name = 'product.merk'
name = fields.Char('Merk', size=50, required=True)
class Product_template(models.Model):
_name = 'product.template'
_inherit = 'product.template'
merk_id = fields.Many2one('product.merk','Merk')
|
"""
"""
from .optimizations import \
( BasicOptimization
, ASTOptimization
, ByteCodeOptimization
, all_optimizations
, ast_optimizations
, bytecode_optimizations
, install
, uninstall
)
from .transform import \
( optimize
, get_source
)
from .hook import \
( activate
, deactivate
)
__version__ = (0, 2)
__author__ = 'Alexander Marshalov'
__email__ = '_@marshalov.org'
__url__ = 'https://github.com/Amper/opyum'
__all__ = \
[ "optimizations"
, "BasicOptimization"
, "ASTOptimization"
, "ByteCodeOptimization"
, "optimize"
, "get_source"
, "activate"
, "deactivate"
, "all_optimizations"
, "ast_optimizations"
, "bytecode_optimizations"
, "install"
, "uninstall"
]
|
# 분산처리
import sys
N = int(sys.stdin.readline().rstrip())
for _ in range(N):
A,B = map(int, sys.stdin.readline().rstrip().split())
# 뭘 곱해도 무조건 결과가 똑같은 밑수는 예외처리
if A == 1:
print(1)
continue
elif A == 5:
print(5)
continue
elif A == 6:
print(6)
continue
# String으로 간단히 해결될 문제가 아니다..
# 시간초과난다.
# 지수와 규칙을 갖고 구하는 방향이 더 빠르다.
# 지수로 규칙을 뽑음
result = []
temp = 1
for _ in range(B):
temp *= A
temp %= 10
if temp in result:
break
result.append(temp)
# 최종결과
final_result = result[B % len(result) - 1]
if final_result == 0:
print(10)
else: print(final_result)
|
from collections import namedtuple
from io import BytesIO
from os.path import splitext, basename
from sqlalchemy import and_
from onegov.org.models import GeneralFileCollection, GeneralFile
from onegov.translator_directory import _
from docxtpl import DocxTemplate, InlineImage
def fill_docx_with_variables(
original_docx, t, request, signature_file=None, **kwargs
):
""" Fills the variables in a docx file with the given key-value pairs.
The original_docx template contains Jinja-Variables that map to keys
in the template_variables dictionary.
Returns A tuple containing two elements:
- Variables that were found to be None or empty.
- The rendered docx file (bytes).
"""
docx_template = DocxTemplate(original_docx)
template_variables = {
'translator_last_name': t.last_name,
'translator_first_name': t.first_name,
'translator_nationality': t.nationality,
'translator_address': t.address,
'translator_city': t.city,
'translator_zip_code': t.zip_code,
'translator_occupation': t.occupation,
'translator_languages': '\n'.join(
''.join(
[request.translate(lang_type) + ': ']
+ [', '.join([str(language) for language in langs])]
)
for langs, lang_type in (
(t.spoken_languages, _('Spoken languages')),
(t.written_languages, _('Written languages')),
(t.monitoring_languages, _('Monitoring languages')),
)
if langs
),
'greeting': gendered_greeting(t),
'translator_functions': ', '.join(list(translator_functions(t))),
}
for key, value in kwargs.items():
template_variables[key] = value or ''
if signature_file:
template_variables['sender_signature'] = FixedInplaceInlineImage(
docx_template, signature_file
)
found_nulls = {k: v for k, v in template_variables.items() if not v}
if found_nulls:
non_null_values = {
k: v for k, v in template_variables.items() if
k not in found_nulls
}
return found_nulls, render_docx(docx_template, non_null_values)
else:
return {}, render_docx(docx_template, template_variables)
class FixedInplaceInlineImage(InlineImage):
def _insert_image(self):
pic = self.tpl.current_rendering_part.new_pic_inline(
self.image_descriptor, self.width, self.height
).xml
pic = self.fix_inline_image_alignment(pic)
return (
'</w:t></w:r><w:r><w:drawing>%s</w:drawing></w:r><w:r>'
'<w:t xml:space="default">' % pic
)
def fix_inline_image_alignment(self, orig_xml):
""" Fixes the position of the image by setting the `distL` to zero."""
fix_pos = ' distT=\"0\" distB=\"0\" distL=\"0\" distR=\"0\"'
index = orig_xml.find('wp:inline')
if index != -1:
return (
orig_xml[: index + len('wp:inline')]
+ fix_pos
+ orig_xml[index + len('wp:inline'):]
)
else:
return orig_xml
def render_docx(docx_template, template_variables):
""" Creates the word file.
substituted_variables: dictionary of values to find and replace in final
word file. Values not present are simply ignored.
"""
docx_template.render(template_variables)
in_memory_docx = BytesIO()
docx_template.save(in_memory_docx)
in_memory_docx.seek(0)
return in_memory_docx.read()
def translator_functions(translator):
if translator.written_languages:
yield 'Übersetzen'
if translator.spoken_languages:
yield 'Dolmetschen'
if translator.monitoring_languages:
yield 'Kommunikationsüberwachung'
def gendered_greeting(translator):
if translator.gender == "M":
return "Sehr geehrter Herr"
elif translator.gender == "F":
return "Sehr geehrte Frau"
else:
return "Sehr geehrte*r Herr/Frau"
def parse_from_filename(abs_signature_filename):
""" Parses information from the filename. The delimiter is '__'.
This is kind of implicit here, information about the user is stored in
the filename of the signature image of the user.
"""
filename, _ = splitext(basename(abs_signature_filename))
filename = filename.replace('Unterschrift__', '')
parts = filename.split('__')
Signature = namedtuple(
'Signature',
['sender_abbrev', 'sender_full_name', 'sender_function'],
)
return Signature(
sender_abbrev=parts[0],
sender_full_name=parts[1].replace('_', ' '),
sender_function=parts[2].replace('_', ' ')
)
def signature_for_mail_templates(request):
""" The signature of the current user. It is an image that is manually
uploaded. It should contain the string 'Unterschrift', as well as the
first and last name of the user. """
first_name, last_name = request.current_user.realname.split(' ')
query = GeneralFileCollection(request.session).query().filter(
and_(
GeneralFile.name.like('Unterschrift%'),
GeneralFile.name.like(f'%{first_name}%'),
GeneralFile.name.like(f'%{last_name}%'),
)
)
return query.first()
|
import inspect
import os
import re
import arg
from django.db import transaction
import djarg
import daf.contrib
import daf.registry
import daf.utils
class ActionMeta(type):
"""A metaclass for validating and registering actions"""
def __new__(meta, name, bases, class_dict):
cls = type.__new__(meta, name, bases, class_dict)
if not cls.is_abstract:
cls.check_class_definition()
if not cls.is_abstract and not cls.unregistered:
daf.registry._register_action(cls)
return cls
class Action(metaclass=ActionMeta):
"""
The core Action class.
Given an ``app_label`` and ``callable``, the Action class automatically
generates attributes that can be overridden by a user. These attributes
influence every interface built directly from the Action. Change attributes
on the Action object to affect every interface.
"""
###
# Static action properties.
#
# Static action properties can only be set directly on the class.
# These properties are all queryable in the action registry.
###
@daf.utils.classproperty
def name(cls):
"""The identifying name of the action"""
return arg.s()(cls.func).func.__name__
#: The app to which the action belongs.
app_label = ''
@daf.utils.classproperty
def uri(cls):
"""The URI is the unique identifier for the action."""
return f'{cls.app_label}.{cls.name}'
@daf.utils.classproperty
def url_name(cls):
"""The default URL name for URL-based interfaces"""
return f'{cls.app_label}_{cls.name}'
@daf.utils.classproperty
def url_path(cls):
"""The default URL name for URL-based interfaces"""
return os.path.join(
cls.app_label.replace('_', '-'), cls.name.replace('_', '-')
)
@daf.utils.classproperty
def permission_codename(cls):
"""
Returns the name of the permission associate with the action
"""
return f'{cls.app_label}_{cls.name}_action'
@daf.utils.classproperty
def permission_uri(cls):
"""
The full permission URI, which includes the "daf" app label
under which all DAF permissions are saved
"""
return f'daf.{cls.permission_codename}'
###
# Dynamic action properties
#
# Dynamic action properties can be set on the class or dynamically
# determined with an associated get_{property_name} function.
# Some dynamic properties will take different arguments depending on
# the context of how they are called. For example, the success URL
# is only obtained after a successful action run, so it contains
# all returned values.
###
@daf.utils.classproperty
def display_name(cls):
"""The display name is used to render UI headings and other elements"""
return cls.name.replace('_', ' ').title()
@daf.utils.classproperty
def success_message(cls):
"""The success message displayed after successful action runs"""
return f'Successfully performed "{cls.display_name.lower()}"'
@classmethod
def get_success_message(cls, args, results):
"""Obtains a success message based on callable args and results"""
return cls.success_message
#: The URL one goes to after a successful action
success_url = '.'
@classmethod
def get_success_url(cls, args, results):
"""Obtain a success url based on callable args and results"""
return cls.success_url
###
# Action running.
#
# The wrapper around the action function in constructed, and the
# action itself can be executed with __call__.
###
#: The main action callable
callable = None
#: The wrapper around the callable. Attach exception metadata
#: by default for interoperability with other tools
wrapper = arg.contexts(daf.contrib.attach_error_metadata)
@classmethod
def get_wrapper(cls):
# A utility so that instance methods can safely access
# the class wrapper variable. self.wrapper() will use
# "self" as an argument when calling
return cls.wrapper
@daf.utils.classproperty
def func(cls):
"""The function called by the action"""
return cls.get_wrapper()(cls.callable)
def __call__(self, *args, **kwargs):
"""
A utility for calling the main action. Note that this is not
used
"""
return self.func(*args, **kwargs)
###
# Action interfaces.
#
# These properties are not meant to be overridden. They are
# determined as interface classes are created for an action.
###
# The interfaces registered to the action
interfaces = {}
###
# Abstract properties.
#
# These properties help in creating abstract actions. Abstract
# actions are not registered and are used to build other actions.
###
# True if the class is abstract. Note this property must be
# overridden in each child class to declare it as abstract.
abstract = True
@daf.utils.classproperty
def is_abstract(cls):
"""
True if the action is an abstract action, False otherwise
Do not override this helper, otherwise actual abstract
actions could appear as concrete
"""
return cls.__dict__.get('abstract', False)
# True if the action should not populate the registry
unregistered = False
###
# Action class checkers.
#
# When actions are registered, class definitions are checked to ensure
# actions are set up correctly.
###
@classmethod
def definition_error(cls, msg):
raise AttributeError(f'{cls.__name__} - {msg}')
@classmethod
def check_class_definition(cls):
"""
Verifies all properties have been filled out properly for the action
class. Called by the metaclass only on concrete actions
"""
if not cls.callable:
cls.definition_error('Must provide "callable" attribute.')
if not re.match(r'\w+', cls.name):
cls.definition_error('Must provide alphanumeric "name" attribute.')
if not re.match(r'\w+', cls.app_label):
cls.definition_error(
'Must provide alphanumeric "app_label" attribute.'
)
if len(cls.permission_codename) > 100:
cls.definition_error(
f'The permission_codename "{cls.permission_codename}"'
' exceeds 100 characters. Try making a shorter action name'
' or manually overridding the permission_codename attribute.'
)
class ModelAction(Action):
"""
An action associated with a model.
Requires that the ``model`` attribute point to the
Django ``Model`` class associated with the action.
Includes all of the core properties of `Action`, but also defines
other properties and creates automatic default values for others:
"""
abstract = True
#: The model the action is associated with
model = None
@daf.utils.classproperty
def app_label(cls):
"""The app label to which this action belongs"""
return cls.model_meta.app_label
@daf.utils.classproperty
def model_meta(cls):
"""The model._meta instance"""
return cls.model._meta
@daf.utils.classproperty
def queryset(cls):
"""The main queryset, if any, the action is associated with"""
return cls.model._default_manager.all()
@classmethod
def check_class_definition(cls):
"""
Verifies all properties have been filled out properly for the action
class. Called by the metaclass only on concrete actions
"""
super().check_class_definition()
if not cls.model:
cls.definition_error('Must provide "model" attribute.')
class ObjectAction(ModelAction):
"""
An action associated with a single model object.
Similar to `ModelAction`, an `ObjectAction` updates a single model
object. It requires an ``object_arg`` attribute which specifies which
argument of ``callable`` is the model object.
`ObjectAction` exposes an ``object`` variable that is automatically
included as a default argument when running the wrapped callable.
Allowing your function to take an ``object`` parameter will make it
work seamlessly with object actions.
By default, the ``wrapper`` for `ObjectAction` automatically:
1. Parametrizes the run of the individual callable over multiple
objects if the ``objects`` parameter is passed to the callable.
2. Traps errors on each parametrized run of the callable and raises
all trapped errors as one ``django.core.exceptions.ValidationError``
if more than one error is trapped in a parameterized run.
3. Automatically maps the ``object`` argument to the argument identified
by the ``object_arg`` attribute.
4. Wraps everything in a transaction and applies a select_for_update to
the queryset if select_for_update is supplied.
"""
abstract = True
#: The name of the object arg for the action callable
object_arg = None
#: Select_for_update parameters if the action is atomic
select_for_update = ['self']
# Object actions default to operating on "object" or "objects"
# arguments. Object actions also trap individual errors and raise
# aggregate errors by default
@daf.utils.classproperty
def wrapper(cls):
arg_decs = []
if cls.select_for_update is not None: # pragma: no branch
arg_decs = [arg.contexts(transaction.atomic)]
arg_decs += [
arg.contexts(trapped_errors=daf.contrib.raise_trapped_errors),
arg.defaults(
objects=arg.first(
'objects',
daf.contrib.single_list('object'),
daf.contrib.single_list(cls.object_arg),
)
),
arg.defaults(
objects=djarg.qset(
'objects',
qset=cls.queryset,
select_for_update=cls.select_for_update,
)
),
arg.parametrize(**{cls.object_arg: arg.val('objects')}),
arg.contexts(daf.contrib.trap_errors),
super().wrapper,
]
return arg.s(*arg_decs)
@classmethod
def check_class_definition(cls):
"""
Verifies all properties have been filled out properly for the action
class. Called by the metaclass only on concrete actions
"""
super().check_class_definition()
if not cls.object_arg:
cls.definition_error('Must provide "object_arg" attribute.')
func_parameters = inspect.signature(arg.s()(cls.func).func).parameters
if cls.object_arg not in func_parameters:
cls.definition_error(
f'object_arg "{cls.object_arg}" not an argument to callable.'
f' Possible parameters={func_parameters}'
)
class ObjectsAction(ModelAction):
"""An action associated with multiple model objects.
The action is similar to `ObjectAction` except one
must define an ``objects_arg`` attribute that tells ``daf`` which
parameter to ``callable`` takes the list of objects. The callable must
work with a list of objects at once.
By default, the ``wrapper`` attribute ensures passing an ``object``
argument will be automatically expanded into a single-element list
(ensuring interoperability with object views). In contrast to
`ObjectAction`, `ObjectsAction` cannot trap and re-raise multiple
errors since it is up to the author of the bulk callable to handle
raising multiple failures at once. `ObjectsAction` is intended to
provide engineers the flexibility to optimize bulk routines if
the automatic parametrization of `ObjectAction` is insufficient for
their needs.
"""
abstract = True
#: The name of the objects arg for the action callable
objects_arg = None
#: Select_for_update parameters if the action is atomic
select_for_update = ['self']
# Objects actions default to operating on "object" or "objects"
# arguments.
@daf.utils.classproperty
def wrapper(cls):
arg_decs = []
if cls.select_for_update is not None: # pragma: no branch
arg_decs = [arg.contexts(transaction.atomic)]
arg_decs += [
arg.defaults(
**{
cls.objects_arg: arg.first(
'objects',
daf.contrib.single_list('object'),
cls.objects_arg,
)
}
),
arg.defaults(
**{
cls.objects_arg: djarg.qset(
cls.objects_arg,
qset=cls.queryset,
select_for_update=cls.select_for_update,
)
}
),
super().wrapper,
]
return arg.s(*arg_decs)
@classmethod
def check_class_definition(cls):
"""
Verifies all properties have been filled out properly for the action
class. Called by the metaclass only on concrete actions
"""
super().check_class_definition()
if not cls.objects_arg:
cls.definition_error('Must provide "objects_arg" attribute.')
func_parameters = inspect.signature(arg.s()(cls.func).func).parameters
if cls.objects_arg not in func_parameters:
cls.definition_error(
f'objects_arg "{cls.objects_arg}" not an argument to callable.'
f' Possible parameters={func_parameters}'
)
|
# Copyright 2017 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides command 'check_compat'."""
from __future__ import print_function
import argparse
import logging
from gsi_util.checkers import checker
from gsi_util.commands.common import image_sources
class CheckReporter(object):
"""Outputs the checker result with formatting."""
# The output will look like:
#
# check result 1 : pass
# check result 2 : pass
#
# ------------------------------------
# Pass/Total : 2/2
_OUTPUT_FORMAT = '{:30}: {}'
_ERR_MSE_FORMAT = ' {}'
_OUTPUT_MAX_LEN = 36
_SUMMARY_NAME = 'Pass/Total'
def __init__(self):
"""Whether to only output a summary result of all checks."""
self._only_summary = False
def set_only_summary(self):
"""Only outputs summary result.
When _only_summary is set, only shows the number of pass items over
the number of total check items.
"""
self._only_summary = True
@staticmethod
def _get_result_str(result_ok):
"""Gets the result string 'pass' or 'fail' based on the check result."""
return 'pass' if result_ok else 'fail'
def _output_result_item(self, result_item):
"""Outputs the result of a CheckResultItem().
Args:
result_item: a namedtuple of check_result.CheckResultItem().
Returns:
True if the test result passed. False otherwise.
"""
title, result_ok, stderr = result_item
if not self._only_summary:
result_str = self._get_result_str(result_ok)
print(self._OUTPUT_FORMAT.format(title, result_str))
if stderr:
print(self._ERR_MSE_FORMAT.format(stderr))
return result_ok
def _output_summary(self, num_pass_items, num_all_items):
"""Outputs a summary of all checker tests.
Args:
num_pass_items: The number of passing tests.
num_all_items: Total number of finished tests.
"""
print('-' * self._OUTPUT_MAX_LEN)
summary_result_str = '{}/{}'.format(num_pass_items, num_all_items)
print(self._OUTPUT_FORMAT.format(self._SUMMARY_NAME, summary_result_str))
def output(self, check_result_items):
"""The main public method to output a sequence of CheckResultItem()s."""
num_pass_items = 0
num_all_items = 0
for result_item in check_result_items:
result_ok = self._output_result_item(result_item)
if result_ok:
num_pass_items += 1
num_all_items += 1
self._output_summary(num_pass_items, num_all_items)
def _format_check_list(check_list):
"""Returns a string of check list item names."""
# The string is like: "'check_item1', 'check_item2', 'check_item3'".
return ', '.join('{!r}'.format(x.check_item) for x in check_list)
def do_list_checks(_):
"""Prints the all supported check items."""
print(_format_check_list(checker.Checker.get_all_check_list()))
def do_check_compat(args):
"""The actual function to do 'gsi_util check_compat' command."""
logging.info('==== CHECK_COMPAT ====')
logging.info(' system=%s vendor=%s', args.system, args.vendor)
check_list = (checker.Checker.make_check_list(args.CHECK_ITEM)
if args.CHECK_ITEM else checker.Checker.get_all_check_list())
logging.debug('Starting check list: %s', _format_check_list(check_list))
mounter = image_sources.create_composite_mounter_by_args(args)
with mounter as file_accessor:
the_checker = checker.Checker(file_accessor)
check_result = the_checker.check(check_list)
reporter = CheckReporter()
if args.only_summary:
reporter.set_only_summary()
reporter.output(check_result)
logging.info('==== DONE ====')
_CHECK_COMPAT_DESC = """
'check_compat' command checks compatibility between images.
You must assign both image sources by SYSTEM and VENDOR.
You could use command 'list_checks' to query all check items:
$ ./gsi_util.py list_checks
Here is an examples to check a system.img and a device are compatible:
$ ./gsi_util.py check_compat --system system.img --vendor adb"""
def setup_command_args(parser):
"""Sets up command 'list_checks' and 'check_compat'."""
# Command 'list_checks'.
list_check_parser = parser.add_parser(
'list_checks', help='lists all possible check items. Run')
list_check_parser.set_defaults(func=do_list_checks)
# command 'check_compat'
check_compat_parser = parser.add_parser(
'check_compat',
help='checks compatibility between a system and a vendor',
description=_CHECK_COMPAT_DESC,
formatter_class=argparse.RawTextHelpFormatter)
check_compat_parser.add_argument(
'-s',
'--only-summary',
action='store_true',
help='only output the summary result')
image_sources.add_argument_group(
check_compat_parser,
required_images=['system', 'vendor'])
check_compat_parser.add_argument(
'CHECK_ITEM',
type=str,
nargs='*',
help=('the check item to be performed\n'
'select one from: {}\n'.format(_format_check_list(
checker.Checker.get_all_check_list())) +
'if not given, it will check all'))
check_compat_parser.set_defaults(func=do_check_compat)
|
import requests
import json
import pickle
import random
## pun code
def findSubject():
#read from the noun list
nouns = pickle.load( open( "nouns", "rb" ))
choiceIndex = random.randrange(0, len(nouns))
word = nouns[choiceIndex]
return word.replace('\n','')
def findActionOrLocation():
#read from the action word list
words = pickle.load( open( "actions", "rb" ))
choiceIndex = random.randrange(0, len(words))
word = words[choiceIndex]
return word.replace('\n','')
#Query the DataMuse API and retrieve words similar to the input word
def findWordsSimilarTo(word):
res = requests.get("https://api.datamuse.com/words?ml="+word).json()
words = []
for wor in res:
if ' ' not in wor['word']:
words.append(wor['word'])
#Test Helpers
if word == "space":
words.append('sputnik')
if word == "barbarian":
words.append('visigoth')
if word == "that you can't see":
#words = []
words.append("invisible")
#print(words)
return words
#Query the DataMuse API for synonyms to the provided word
def findSynonymsTo(word):
res = requests.get("https://api.datamuse.com/words?rel_syn="+word).json()
words = []
for wor in res:
if ' ' not in wor['word']:
words.append(wor['word'])
return words
#Find a subject, and action or location word, and build a pun
def buildPun():
subject = findSubject()
actionOrLocation = findActionOrLocation()
answers = createPunAnswer(subject, actOrLocation)
bestAnswer = searchForBestAnswer(answers)
sentence = constructSentence(subject, actOrLocation, bestAnswer)
print(sentence)
#given a subject word, and an action or a location, retrieve similar words, and compare them until a suitable answer is created to the pun
# Candidate answers are retrieved if a common sequence of characters is identified, if so, the shorter word is inserted into the larger word as a candidate answer
# Each answer is saved and scored
def createPunAnswer(subject, actionOrLocation):
subjectActionOptionPairs = {}
similar_to_subject = findWordsSimilarTo(subject)
similar_to_actionOrLocation = findWordsSimilarTo(actionOrLocation)
for similarSubjectWord in similar_to_subject:
subjectActionOptionPairs[similarSubjectWord] = {}
for similarActOrLocWord in similar_to_actionOrLocation:
subjectActionOptionPairs[similarSubjectWord][similarActOrLocWord] = []
substrings = findSubstringBetweenSubjectandActionOrLocation(similarSubjectWord, similarActOrLocWord)
for substring in substrings:
subjectActionOptionPairs[similarSubjectWord][similarActOrLocWord].append(combineWords(similarSubjectWord, similarActOrLocWord, substring))
return subjectActionOptionPairs
#Searches for a common string of characters between the subject and the action or location
# Each substring found is returned
def findSubstringBetweenSubjectandActionOrLocation(subject, actionOrLocation):
substrings = []
#search for the shorter string in the longer string
if len(subject) < len(actionOrLocation):
shorter = subject
longer = actionOrLocation
else:
shorter = actionOrLocation
longer = subject
for i in range(0, len(longer)):
substring = findSubstring(longer[i:], shorter)
if substring:
substrings.append(substring)
return substrings
#find a substring longer than 3 characters that is common between both strings
def findSubstring(string1, string2):
index = string2.find(string1[0])
end = index
for i in range(index, min(len(string1), len(string2))):
if string1[i] == string2[i]:
end+=1
else:
break
if end - index >= 3:
return string1[index:end]
return False
#given a common substring shared between subject and action or location, insert the smaller word into the larger word at the index of that substring
def combineWords(subject, actionOrLocation, substring):
#print("trying to join: " + subject + " , and " + actionOrLocation + " at substring: " + substring)
#The word with the syllable accuring earlier in the string is our inserter
# which will be inserted into the other word
subIndex = subject.find(substring)
actIndex = actionOrLocation.find(substring)
# the root is the longer word
if len(subject) > len(actionOrLocation):
root = subject
inserter = actionOrLocation
index = subIndex
else:
root = actionOrLocation
inserter = subject
index = actIndex
#Prepending
if root.find(substring) == 0:
combined = root[index+len(inserter):]
combined = inserter + combined
#inserting
else:
combined = root[:index]
combined = combined + inserter
return combined
#Score each answer based on length, difference between subject and actOrLoc, and grammatical correctness
def scoringFunction(subject, actOrLocation, answer):
answerLength = len(answer)
#Score the answer by how much different the word is from the subject or actOrLocation, between 25% and 50% different is optimal
differenceScore = scoreDifference(subject, actOrLocation, answer)
#score the answer by change in length, the larger the increase the worse the score
lengthCompare = max(len(subject), len(actOrLocation))
lengthScore = (lengthCompare/answerLength)
#if the answer is shorter then the words, we probably have an issue
if lengthCompare < answerLength:
lengthScore = 0
#if the answer is too short to be a witty response, zero its score
if len(answer) < 5:
lengthScore = 0
#score the answer by its ability to maintain proper grammatical and pronunciation standards
grammarScore = 1
score = (differenceScore + lengthScore + grammarScore)/3
return score
#Generate a score based on how different the answer is from the subject and actionOrLocation, if the answer is between 25% and 50% different
# give the answer a score of 1, otherwise give the answer a score of zero
def scoreDifference(subject, actOrLocation, answer):
answerLength = len(answer)
iterationLength = min(len(subject), answerLength)
subjectDiffCount = 1
actOrLocCount = 1
for i in range(0, iterationLength):
#print("does " + subject[i] + " == " + answer[i])
if subject[i] != answer[i]:
subjectDiffCount += 1
iterationLength = min(len(actOrLocation), answerLength)
for i in range(0, iterationLength):
#print("does " + actOrLocation[i] + " == " + answer[i])
if actOrLocation[i] != answer[i]:
actOrLocCount += 1
differenceScore = 0
#if the answer is the same as the subject or location, return a 0
if subjectDiffCount/answerLength == 11 or actOrLocCount/answerLength == 1:
return 0
if (subjectDiffCount/answerLength >= 0.25 and subjectDiffCount/answerLength <= 0.5):
differenceScore = 1
if (actOrLocCount/answerLength >= 0.25 and actOrLocCount/answerLength <= 0.5):
differenceScore = 1
return differenceScore
#loop through each answer and its score, and return the answer with the highest score
def searchForBestAnswer(answers):
scoredAnswers = []
for key in answers:
for answerOptions in answers[key]:
for answer in answers[key][answerOptions]:
scoredAnswers.append((answer, scoringFunction(key, answerOptions, answer), key, answerOptions))
bestScore = 0
bestAnswer = ""
answers = []
for answer in scoredAnswers:
if answer[1] > bestScore:
bestScore = answer[1]
bestAnswer = answer[0]
if answer[1] == bestScore:
answers.append(answer[0])
return bestAnswer
#given a subject and an action or a location, build the pun question and answer to complete the problem
def constructSentence(subject, actOrLocation, bestAnswer):
sentence = "What do you call a "
sentence = sentence + subject + " "
if False: #isLocation(actOrLocation):
sentence = sentence + "in "
else:
sentence = sentence + "that can "
sentence = sentence + actOrLocation + "?"
sentence = sentence + " " + bestAnswer + "."
return sentence
subject = 'potato'
actOrLocation = 'space'
#answers = createPunAnswer(subject, actOrLocation)
#bestAnswer = searchForBestAnswer(answers)
#sentence = constructSentence(subject, actOrLocation, bestAnswer)
#print(sentence)
#subject = 'barbarian'
#actOrLocation = "that you can't see"
#answers = createPunAnswer(subject, actOrLocation)
#print(answers)
#bestAnswer = searchForBestAnswer(answers)
#sentence = constructSentence(subject, actOrLocation, bestAnswer)
#print(sentence)
lines = ""
index = 0
while index < 100:
subject = findSubject()
actOrLocation = findActionOrLocation()
#print("what do you call a " + subject + " that can " + actOrLocation + "?")
answers = createPunAnswer(subject, actOrLocation)
#print(answers)
bestAnswer = searchForBestAnswer(answers)
sentence = constructSentence(subject, actOrLocation, bestAnswer)
if (bestAnswer != ""):
index +=1
lines = lines + sentence + '\n'
fp = open('output.txt', 'a')
fp.write(lines)
fp.close()
#TestyBois
#print(findSubstringBetweenSubjectandActionOrLocation('visigoth', 'invisible'))
#print(findSubstringBetweenSubjectandActionOrLocation('spud', 'sputnik'))
#print(combineWords('visigoth', 'invisible', 'visi'))
#print(combineWords('spud', 'sputnik', 'spu'))
#splitWordIntoSyllables('sputnik')
#print(scoringFunction('spud', 'sputnik', 'spudnik'))
|
from django import forms
from unavis.forms import SecureBotForm
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth import get_user_model
from django.contrib.auth import authenticate
import logging
logger = logging.getLogger('django.forms')
class LoginForm(SecureBotForm):
email = forms.EmailField(required=True)
password = forms.CharField(required=True,
widget=forms.PasswordInput)
def clean(self):
email = self.cleaned_data.get('email', None)
password = self.cleaned_data.get('password', None)
UserModel = get_user_model()
if not self.errors:
# we havent any field errors
if UserModel.objects.filter(email=email).exists():
# we are in a simple login
logger.info('User {!s} logged in'.format(email))
self.user_session = authenticate(email=email,
password=password)
if self.user_session is None:
raise forms.ValidationError(
_('Password provided doesnt match the account'),
code='login_invalid_password',
)
else:
# we are signin
user = UserModel.objects.create(email=email)
user.set_password(password)
user.save()
logger.info('User signed-in: {!s}'.format(email))
self.user_session = authenticate(email=email,
password=password)
return self.cleaned_data
class RegenPasswordForm(SecureBotForm):
email = forms.EmailField(label=_('Email address'), required=True)
def clean_email(self):
UserModel = get_user_model()
email = self.cleaned_data['email']
if not UserModel.objects_all.filter(email=email).exists():
raise forms.ValidationError(
_('This email doesnt exists'),
code='regen_password_email_not_exists',
)
|
from app import app
# turn on dev mode for development
if __name__ == "__main__":
app.run()
|
# -*- coding: utf-8 -*-
import os
import pandas as pd
from pyspark import SparkConf
from pyspark.ml.classification import LogisticRegression, SparkContext
from pyspark.ml.linalg import Vectors
from pyspark.sql import SparkSession
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
os.environ['PYSPARK_PYTHON']='D:\\Anaconda3\\envs\\model\\python.exe'
# a = [x for x in range(5)]
# print(a)
#
# spark = SparkSession \
# .builder \
# .master("local[16]")\
# .appName('my_first_app_name') \
# .getOrCreate()
# data_1 = pd.read_csv(r'C:\Users\10651\Desktop\model\lucheng_data.csv', encoding='gbk')
# dfData = spark.createDataFrame(data_1)
# c = dfData.rdd.map(
# lambda row: (Vectors.dense(row[5],row[4]),row[61])
# )
# e = c.map(lambda x: (x[0],x[1])).toDF(["features",'label'])
# f = e.dropna()
# f.show()
# g = c.map(lambda x:(x[0], )).toDF(["features"])
# g.show()
# lr = LogisticRegression(maxIter=10, regParam=0.3)
# lrModel = lr.fit(f)
# print(lrModel.summary)
# pdValue = lrModel.transform(g).select('probability').toPandas()
# d = pdValue.applymap(lambda x :(x[1]))
# fpr, tpr, thresholds = roc_curve(data_1['Y'],d,pos_label = 1)
# plt.plot(fpr,tpr,linewidth=2,label="ROC")
# plt.xlabel("false presitive rate")
# plt.ylabel("true presitive rate")
# plt.ylim(0,1.05)
# plt.xlim(0,1.05)
# plt.legend(loc=4)
# plt.show()
# roc_auc = auc(fpr, tpr)
# gini = 2*roc_auc - 1
# print('AUC:',roc_auc,'\n','GINI:',gini,'\n')
import numpy as np
import matplotlib.pyplot as plt
data_path = "C:\\Users\\10651\\Desktop\\spark-2.3.0-bin-hadoop2.7\\data\\mllib\\sample_linear_regression_data.txt"
sc_conf = SparkConf()
sc_conf.setAppName("app")
sc_conf.setMaster('local[16]')
#sc_conf.set('spark.executor.memory', '2g')
#sc_conf.set('spark.executor.cores', '4')
#sc_conf.set('spark.cores.max', '40')
#sc_conf.set('spark.logConf', True)
sc = SparkContext(conf=sc_conf)
spark = SparkSession(sc)
rawData = sc.textFile(data_path)
def filterVar(line):
var = []
fields = line.split(' ')
var.append(fields[0])
var.append(fields[1].split(':')[1])
var.append(fields[2].split(':')[1])
var.append(fields[3].split(':')[1])
var.append(fields[4].split(':')[1])
var.append(fields[5].split(':')[1])
var.append(fields[6].split(':')[1])
var.append(fields[7].split(':')[1])
var.append(fields[8].split(':')[1])
var.append(fields[9].split(':')[1])
var.append(fields[10].split(':')[1])
return var
Data = rawData.map(lambda line : filterVar(line))
trainingData,testData = Data.randomSplit([0.5,0.5],6)
hasattr(trainingData, "toDF")
## True
traingData = trainingData.toDF().toPandas().values
testData = testData.toDF().toPandas().values
rate = 0.01
#一维变量
a = np.random.normal() # X1系数
#b = np.random.normal()
c = np.random.normal()
d = np.random.normal()
e = np.random.normal()
xs = np.random.normal()
# 10000 * 10
for i in range(100): #训练100次
sum_a = 0
#sum_b = 0
sum_c = 0
sum_d = 0
sum_e = 0
for line in traingData:
sum_a += rate * (float(line[0]) - (a * float(line[1]) + c * float(line[3]) + d * float(line[4]) + e * float(line[5]) )) * (float(line[1]))
#sum_b += rate * (float(line[0]) - (a * float(line[1]) + b * float(line[2]) + c * float(line[3]) + d * float(line[4]) + e * float(line[5]) + xs)) * (float(line[2]))
sum_c += rate * (float(line[0]) - (a * float(line[1]) + c * float(line[3]) + d * float(line[4]) + e * float(line[5]) )) * (float(line[3]))
sum_d += rate * (float(line[0]) - (a * float(line[1]) + c * float(line[3]) + d * float(line[4]) + e * float(line[5]) )) * (float(line[4]))
sum_e += rate * (float(line[0]) - (a * float(line[1]) + c * float(line[3]) + d * float(line[4]) + e * float(line[5]) )) * (float(line[5]))
#sum_xs+= rate * (float(line[0]) - (a * float(line[1]) + c * float(line[3]) + d * float(line[4]) + e * float(line[5]) + xs)) * (1)
a += sum_a
#b += sum_b
c += sum_c
d += sum_d
e += sum_e
preData = [(a * float(data[1])+c * float(data[3])+d * float(data[4])+e * float(data[5]) )for data in testData]
index = 0
sum_error = 0
for line in testData:
sum_error += float(preData[index]) - float(line[0])
index+=1
print(str(i)+' 次迭代,误差值为:'+str(sum_error)) |
import sys
g=int(input())
p=int(input())
k=[]
for i in range(p):
val=int(input())
k.append(val)
s=[0 for _ in range (g)]
cnt=0
for a in k:
if s[a-1]==0:
s[a-1]=1
cnt+=1
else:
t=0
while s[a-1-t]==1 and a-1-t>0:
t+=1
if a-1-t==0:
break
s[a-t]=1
cnt+=1
print(cnt)
|
from .eLABJournalObject import *
class StorageType(eLABJournalObject):
def __init__(self, api, data):
"""
Internal use only: initialize storage object
"""
if ((data is not None) & (type(data) == dict) &
("name" in data)
):
super().__init__(api, data, "storageTypeID", str(data["name"]))
else:
raise Exception("no (valid) storage type data")
def storages(self):
"""
Get storages for this storage type.
"""
return self._eLABJournalObject__api.storages(storageTypeID=self.id())
def type(self):
"""
Get deviceType.
"""
data = self.data()
if "deviceType" in data:
return(data["deviceType"])
else:
return(None)
|
../dummy_robot2.py |
from django.shortcuts import render, redirect
from books.models import Book
from django.contrib import auth
from libary.forms import SendMail
from .settings import ADMIN_EMAIL
from django.core.mail import send_mail as s_mail
from django.core.mail import EmailMessage
def base(request):
User = auth.get_user(request)
return render(request, 'base.html', {'User':User})
def login(request):
if request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
User = auth.authenticate(username=username, password=password)
if User is not None:
auth.login(request, User)
return redirect('/')
else:
return render(request, 'base.html', {'error': 'Invalid login or password'})
else:
return render(request, 'base.html')
def contact(requst):
if requst.method=="POST":
form = SendMail(requst.POST, requst.FILES)
if form.is_valid():
from_user = requst.POST.get('from_user')
title = requst.POST.get('title')
message = requst.POST.get('message')
msg = EmailMessage(title, message, from_user, [ADMIN_EMAIL, ])
if requst.FILES:
file = requst.FILES['file']
msg.attach(file.name, file.read(), file.content_type)
try:
msg.send()
except:
return render(requst, 'contact.html', {'form':SendMail(requst.POST),
'message': 'Fail'})
else:
return redirect('/')
else:
return render(requst, 'contact.html', {'form':SendMail(requst.POST)})
return render(requst, 'contact.html', {'form': SendMail})
|
try:
import urllib2
except ImportError:
import urllib.request as urllib2
import csv
import logging
import os
import re
import io
import requests
import scrapy
import zipfile
# to improve performance, regex statements are compiled only once per module
re_export = re.compile(r'.*?(http.*?export\.CSV\.zip)')
class GdeltCrawler(scrapy.Spider):
name = "GdeltCrawler"
ignored_allowed_domains = None
start_urls = None
original_url = None
log = None
config = None
helper = None
def __init__(self, helper, url, config, ignore_regex, *args, **kwargs):
self.log = logging.getLogger(__name__)
self.config = config
self.helper = helper
self.original_url = url
self.ignored_allowed_domain = self.helper.url_extractor \
.get_allowed_domain(url)
self.start_urls = [url] # [self.helper.url_extractor.get_start_url(url)]
super(GdeltCrawler, self).__init__(*args, **kwargs)
def parse(self, response):
"""
Parse the Rss Feed
:param obj response: The scrapy response
"""
return self.rss_parse(response)
def rss_parse(self, response):
"""
Extracts all article links and initiates crawling them.
:param obj response: The scrapy response
"""
# get last_update zip url
match = re.match(re_export, response.text)
if match:
last_update_zip_url = match.group(1)
# fetch zip file
r = requests.get(last_update_zip_url)
# unzip
z = zipfile.ZipFile(io.BytesIO(r.content))
extracted = z.namelist()
z.extractall('/tmp')
csv_file_path = '/tmp/%s' % extracted[0]
# read csv to get all urls
urls = set() # set to remove duplicates
with open(csv_file_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter='\t')
for row in csv_reader:
urls.add(row[-1])
# rm the file
os.remove(csv_file_path)
for url in urls:
yield scrapy.Request(url, lambda resp: self.article_parse(
resp, 'gdelt'))
def article_parse(self, response, rss_title=None):
"""
Checks any given response on being an article and if positiv,
passes the response to the pipeline.
:param obj response: The scrapy response
:param str rss_title: Title extracted from the rss feed
"""
if not self.helper.parse_crawler.content_type(response):
return
yield self.helper.parse_crawler.pass_to_pipeline_if_article(
response, self.ignored_allowed_domain, self.original_url,
rss_title)
@staticmethod
def only_extracts_articles():
"""
Meta-Method, so if the heuristic "crawler_contains_only_article_alikes"
is called, the heuristic will return True on this crawler.
"""
return True
@staticmethod
def supports_site(url):
"""
Rss Crawler is supported if the url is a valid rss feed
Determines if this crawler works on the given url.
:param str url: The url to test
:return bool: Determines wether this crawler work on the given url
"""
# TODO: check if the url is a valid RSS feed
return True
|
import os.path
class score_card:
"""Keeping score..."""
def __init__(self):
self.path = "./score.txt"
def addScore(self, score):
file = open(self.path, 'a')
file.write(score + '\n')
def readScores(self):
file = open(self.path, 'r')
return file.read()
def createScores(self):
if self.checkIfScoresExist():
return
else:
open(self.path, 'w')
def checkIfScoresExist(self):
return os.path.isfile(self.path)
def deleteCard(self):
os.remove(self.path)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Part2. Reference (参考资料)
===============================================================================
该部分主要细致地阐述了Python语言中的各种特性。
"""
|
import torch as tr
import numpy as np
import torchvision as trv
def iou(b1, b2):
eps = 1e-7
top_rightx, top_righty = tr.min(b1[0], b2[:, 0]), tr.min(b1[1], b2[:, 1])
bot_leftx, bot_lefty = tr.max(b1[2], b2[:, 2]), tr.max(b1[3], b2[:, 3])
inter_rightx, inter_righty = tr.min(b1[2], b2[:, 2]), tr.min(b1[3], b2[:, 3])
inter_leftx, inter_lefty = tr.max(b1[0], b2[:, 0]), tr.max(b1[1], b2[:, 1])
u_a = (bot_leftx - top_rightx) * (bot_lefty - top_righty)
i_a = (inter_rightx - inter_leftx).clamp(0) * (inter_righty - inter_lefty).clamp(0)
return i_a/(u_a+eps)
def NMS(labels, cind=-1, conf_ind=-2, nc=None, thresh=0.2):
c_list = []
boxes = []
if not nc:
c = labels[:, 5:]
nc = labels[:, 5:].shape[-1]
c = c.argmax(dim=-1)[:, None]
c_conf = labels[:, 5:][c]*labels[:, 4:5]
labels = tr.cat((labels[:, :4], c_conf, c), dim=-1)
for c in range(nc):
mask = labels[:, cind] == c
c_list.append(labels[mask])
for box in c_list:
count = 0
while box.shape[0]:
values, ind_c = box[:, conf_ind].max(0)
true_box = box[ind_c]
boxes.append(true_box)
box = box[tr.arange(box.shape[0], device=box.device) != ind_c]
count += 1
ious = iou(true_box, box)
mask = ious < thresh
box = box[mask]
return tr.stack(boxes)
|
#! /usr/bin/env python3
from collections import defaultdict
from functools import partial
import numpy as np
# this is now specific for my analysis
# expects to be handed all pieces with
# all scales still present, no other index
class get_delta(object):
def __init__(self, sig_sm, sig_bsm, bkg):
self.sig_sm = sig_sm
self.sig_bsm = sig_bsm
self.bkg = bkg
# self.delta = defaultdict(partial(np.ndarray, 0))
self.delta = defaultdict(partial(np.ndarray, 0))
self.sigrat = None
self.deltavar = defaultdict(partial(np.ndarray, 0))
self._tiny = 1E-9
def _delta(self):
# (sig_bsm - sig_sm)/sig_bkg
for scale in self.sig_sm.keys():
self.delta[scale] = np.divide(self.sig_bsm[scale] - self.sig_sm[scale], self.bkg[scale])
def _sigrat(self):
# (sig_bsm + bkg)/(sig_sm + bkg)
for scale in self.sig_sm.values():
self.sigrat = np.divide(self.sig_bsm + self.bkg, self.sig_sm + self.bkg)
def varyscaledelta(self, central='0.50'):
self._delta()
if len(list(self.sig_sm.keys())[0]) == 2:
scale_central = (central, central)
elif len(list(self.sig_sm.keys())[0]) == 3:
scale_central = (central, central, central)
else:
pass
self.deltavar['central'] = self.delta[scale_central]
delta_min = self.deltavar['central']
delta_max = self.deltavar['central']
for scale in self.sig_sm.keys():
_delta = self.delta[scale]
if np.all(_delta < self._tiny):
pass
else:
delta_min = np.minimum(delta_min, _delta)
delta_max = np.maximum(delta_max, _delta)
self.deltavar['minimum'] = delta_min
self.deltavar['maximum'] = delta_max
def varyone(self, sig_sm, sig_bsm, bkg):
newdelta = np.divide(sig_bsm - sig_sm, bkg)
delta_min = np.minimum(self.deltavar['minimum'], newdelta)
delta_max = np.maximum(self.deltavar['maximum'], newdelta)
self.deltavar['minimum'] = delta_min
self.deltavar['maximum'] = delta_max
|
import datetime
from cities.models import City
from django.db.models import Count
regions_names = {
"Alberta": "AB",
"British Columbia": "BC",
"Manitoba": "MB",
"New Brunswick": "NB",
"Newfoundland and Labrador": "NL",
"Northwest Territories": "NT",
"Nova Scotia": "NS",
"Nunavut": "NU",
"Ontario": "ON",
"Prince Edward Island": "PE",
"Quebec": "QC",
"Saskatchewan": "SK",
"Yukon": "YT"
}
def find_nearest_city(location, cities=None):
if not cities:
cities = City.objects.filter(venue__event__single_events__start_time__gte=datetime.datetime.now()).select_related("region", "region__country").annotate(Count('id'))
try:
return cities.distance(location).select_related("region", "region__country").order_by('distance')[0]
except:
return None
def get_dates_from_request(request):
start_date = request.GET.get("start_date", None)
end_date = request.GET.get("end_date", None)
if start_date:
start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d %H:%M:%S")
else:
start_date = datetime.datetime.now()
if end_date:
end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d %H:%M:%S")
else:
end_date = datetime.datetime.now()
return start_date, end_date
def get_times_from_request(request):
start_time = request.GET.get("start_time", 13)
end_time = request.GET.get("end_time", 20)
return start_time, end_time
def get_region_shortcut(region_name=None):
if region_name in regions_names:
return regions_names[region_name]
return region_name |
import numpy as np
import matplotlib.pyplot as plt
import math
def tail_prob(dist,t):
tail_Pr = []
for i in range(0,len(t)):
tail_Pr.append(dist[dist>(t[i]+np.mean(dist))].shape[0]/(1.0*len(dist)))
#subtracting the smaple mean from the random variable to make it centered
return tail_Pr
np.random.seed(42)
k = 10000
t = np.linspace(0.5,1000,1e5)
data = np.zeros((k,))
###########################################################
#data generation emperically
a=10
n=10
for i in range(n):
x = np.random.uniform(-a,a,(k,))
data = data + x
###########################################################
tail_probs = np.array(tail_prob(data,t))#computing the tail probability
std=10*n#distribution standard deviation
gaussian = np.random.normal(0,std,(k,))
bound =np.array(tail_prob(gaussian,t))#computing the bound
###########################################################
fig = plt.figure()
plt.plot(t,tail_probs,'b')
plt.plot(t,bound,'r')
labels = ['Sum of bounded random variables(Uniform [-10,10])','Reference Gaussian']
plt.legend(labels)
fig.suptitle('Sum of bounded random variables with zero mean is sub gaussian')
plt.xlabel('t')
plt.ylabel('Pr({X-u >t})')
plt.show()
|
a=1 #전역변수
#전역변수는 힙이라는 저장공간에 들어가며, 끝날 때까지 유효하다.
def vartest(a): #b는 지역변수
a=a+1
return a
print(vartest(a)) |
import csv
import inspect
import json
import sys
import traceback
import unittest
# We want to proceed nicely on systems that don't have termcolor installed.
try:
from termcolor import cprint
except ImportError:
sys.stderr.write('termcolor module not found\n')
def cprint(msg, *args, **kwargs):
"""
Fallback definition of cprint in case the termcolor module is not available.
Args:
msg: a str to be printed stdout.
args: additional positional arguments for cprint that are ignored.
kwargs: keyword arguments for cprint. It is ignored in dummy cprint.
"""
print(msg)
from hawkeye_utils import logger, ResponseInfo
class HawkeyeTestCase(unittest.TestCase):
"""
Extension of unittest.TestCase. It has `app` attribute for easy access to the
tested application.
"""
def __init__(self, methodName, application):
"""
Args:
methodName: A string representing test method name.
application: An Application object.
"""
super(HawkeyeTestCase, self).__init__(methodName)
self.app = application
@classmethod
def all_cases(cls, app):
"""
Helper method which is used for building suites. It creates a list of
test cases with specified `app` attribute. Test case is created for each
method with name starting with 'test' or method with name 'runTest'.
Args:
app: An Application object to be passed to __init__ of HawkeyeTestCase.
Returns:
A list of instances of cls (an instance for each test method).
"""
return [cls(method_name, app) for method_name in cls._cases_names()]
@classmethod
def _cases_names(cls):
return [
name for name, _ in inspect.getmembers(cls, predicate=inspect.ismethod)
if name.startswith("test") or name == "runTest"
]
class HawkeyeTestSuite(unittest.TestSuite):
"""
Usual TestSuite but with name and short_name which are used by hawkeye
"""
def __init__(self, name, short_name, **kwargs):
"""
Args:
name: A descriptive name for the test suite.
short_name: A shorter but unique name for the test suite.
Should be ideally just one word. This short name is used to name
log files and other command line options related to this
test suite.
kwargs: keyword arguments to be passed to super __init__.
"""
super(HawkeyeTestSuite, self).__init__(**kwargs)
self.name = name
self.short_name = short_name
class HawkeyeTestResult(unittest.TextTestResult):
"""
Like a usual unittest.TextTestResult but it writes logs to hawkeye logger
and saves report to dictionary like:
{
"tests.search_tests.SearchTest.runTest": "ok",
"tests.search_tests.GetRangeTest.test_default_limit": "FAIL",
"tests.search_tests.GetTest.test_unknown_doc": "skip",
"tests.search_tests.PutTest.test_invalid_id": "ERROR",
...
}
"""
ERROR = "ERROR"
FAILURE = "FAIL"
SUCCESS = "ok"
SKIP = "skip"
EXPECTED_FAILURE = "expected-failure"
UNEXPECTED_SUCCESS = "unexpected-success"
def __init__(self, stream, descriptions, verbosity):
super(HawkeyeTestResult, self).__init__(stream, descriptions, verbosity)
self.verbosity = verbosity
self.report_dict = {}
"""
Item of self.report_dict is pair of test IDs ('<class_name>.<method_name>')
and test status (one of 'ERROR', 'ok', ...)
"""
def startTest(self, test):
super(HawkeyeTestResult, self).startTest(test)
logger.info(
"==========================================\n"
"Starting {test_id}".format(test_id=test.id())
)
def addError(self, test, err):
super(HawkeyeTestResult, self).addError(test, err)
self.report_dict[test.id()] = self.ERROR
logger.error("{test_id} - failed with error:\n{trace}"
.format(test_id=test.id(),
trace=self._render_cut_traceback(test, err)))
def addFailure(self, test, err):
super(HawkeyeTestResult, self).addFailure(test, err)
self.report_dict[test.id()] = self.FAILURE
logger.error("{test_id} - failed with error:\n{trace}"
.format(test_id=test.id(),
trace=self._render_cut_traceback(test, err)))
def addSuccess(self, test):
super(HawkeyeTestResult, self).addSuccess(test)
self.report_dict[test.id()] = self.SUCCESS
logger.debug("{test_id} - succeeded".format(test_id=test.id()))
def addSkip(self, test, reason):
super(HawkeyeTestResult, self).addSkip(test, reason)
self.report_dict[test.id()] = self.SKIP
logger.debug("{test_id} - skipped".format(test_id=test.id()))
def addExpectedFailure(self, test, err):
super(HawkeyeTestResult, self).addExpectedFailure(test, err)
self.report_dict[test.id()] = self.EXPECTED_FAILURE
logger.info("{test_id} - failed as expected".format(test_id=test.id()))
def addUnexpectedSuccess(self, test):
super(HawkeyeTestResult, self).addUnexpectedSuccess(test)
self.report_dict[test.id()] = self.UNEXPECTED_SUCCESS
logger.warn("{test_id} - unexpectedly succeeded"
.format(test_id=test.id()))
def printErrors(self):
if self.verbosity > 1:
super(HawkeyeTestResult, self).printErrors()
else:
self.stream.write("\n")
def _render_cut_traceback(self, test, err):
"""
Simplified modification of
unittest.TestResult._exc_info_to_string(self, err, test).
It renders only cut stacktrace of error.
Args:
test: A TestCase object.
err: A tuple of exctype, value and tb.
Returns:
A string with stacktrace of error.
"""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msg_lines = traceback.format_exception(exctype, value, tb, length)
else:
msg_lines = traceback.format_exception(exctype, value, tb)
return "".join(msg_lines)
def save_report_dict_to_csv(report_dict, file_name):
"""
Persists dictionary to csv file in alphabetical order of keys.
Args:
report_dict: A dict with statuses of tests (<test_id>: <status>).
file_name: A string - name of csv file where report should be saved.
"""
with open(file_name, "w") as csv_file:
csv_writer = csv.writer(csv_file)
for test_id in sorted(report_dict.keys()):
csv_writer.writerow((test_id, report_dict[test_id]))
def load_report_dict_from_csv(file_name):
"""
Loads test statuses report from csv file.
Args:
file_name: A string representing name of source csv file.
Returns:
A dictionary with statuses of tests (<test_id>: <status>).
"""
with open(file_name, "r") as csv_file:
return {test_id: result.rstrip() for test_id, result in csv.reader(csv_file)}
class ReportsDiff(object):
"""
Util class which defines structure for storing
difference between test reports.
"""
def __init__(self):
self.match = [] # tuples (test_id, status)
self.do_not_match = [] # tuples (test_id, first, second)
self.missed_in_first = [] # tuples (test_id, second)
self.missed_in_second = [] # tuples (test_id, first)
def compare_test_reports(first_report_dict, second_report_dict):
"""
Compares two reports and returns ReportsDiff with detailed difference of
two reports. Supposed to be used for comparison to baseline.
Args:
first_report_dict: A dict with statuses of tests (<test_id>: <status>)..
second_report_dict: A dict with statuses of tests (<test_id>: <status>)..
Returns:
A ReportDiff with details about difference between 1st and 2nd reports..
"""
diff = ReportsDiff()
for test_id, first in first_report_dict.iteritems():
# Check tests which are presented in the first report
second = second_report_dict.get(test_id)
if second == first:
diff.match.append((test_id, first))
elif second is None:
diff.missed_in_second.append((test_id, first))
elif second != first:
diff.do_not_match.append((test_id, first, second))
for test_id, second in second_report_dict.iteritems():
# Find tests which are not presented in the first report
if test_id not in first_report_dict:
diff.missed_in_first.append((test_id, second))
# Order lists by test_id
diff.match = sorted(diff.match, key=lambda item: item[0])
diff.do_not_match = sorted(diff.do_not_match, key=lambda item: item[0])
diff.missed_in_first = sorted(diff.missed_in_first, key=lambda item: item[0])
diff.missed_in_second = sorted(diff.missed_in_second, key=lambda item: item[0])
return diff
class HawkeyeSuitesRunner(object):
def __init__(self, language, logs_dir, baseline_file, verbosity=1):
"""
Args:
language: A string ('python' or 'java').
logs_dir: A string - path to directory to save files with errors report.
baseline_file: A string representing name of baseline file.
verbosity: A flag. Is passed to TextTestRunner and HawkeyeTestResult.
Defines how many details will be written to stdout.
"""
self.language = language
self.logs_dir = logs_dir
self.baseline_file = baseline_file
self.verbosity = verbosity
self.suites_report = {}
def run_suites(self, hawkeye_suites):
"""
Iterates through hawkeye_suites and executes containing tests.
For each failed suite file with error details is saved.
Summarized report is built.
Args:
hawkeye_suites: A list of HawkeyeTestSuite objects.
"""
for suite in hawkeye_suites:
print("\n{}".format(suite.name))
print("=" * len(suite.name))
test_runner = unittest.TextTestRunner(resultclass=HawkeyeTestResult,
verbosity=self.verbosity,
stream=sys.stdout)
result = test_runner.run(suite)
""":type result: HawkeyeTestResult """
self.suites_report.update(result.report_dict)
if result.errors or result.failures:
self._save_error_details(suite.short_name, result)
ERR_TEMPLATE = (
"======================================================================\n"
"{flavour}: {test_id}\n"
"----------------------------------------------------------------------\n"
"{error}\n"
)
def _save_error_details(self, suite_short_name, text_test_result):
"""
Saves error details (related to specific suite)
to a file in logs directory.
Args:
suite_short_name: A string - short name of HawkeyeTestSuite.
text_test_result: A HawkeyeTestResult object - test result with errors.
"""
error_details_file = "{logs_dir}/{suite}-{lang}-errors.log".format(
logs_dir=self.logs_dir, suite=suite_short_name, lang=self.language)
with open(error_details_file, "w") as error_log:
error_log.writelines((
self.ERR_TEMPLATE.format(flavour="ERROR", test_id=test.id(), error=err)
for test, err in text_test_result.errors
))
error_log.writelines((
self.ERR_TEMPLATE.format(flavour="FAIL", test_id=test.id(), error=err)
for test, err in text_test_result.failures
))
def print_summary(self, verbosity):
"""
Prints comparison to baseline.
Args:
verbosity: An integer - if > 1 details about difference is printed.
"""
baseline_report = load_report_dict_from_csv(self.baseline_file)
diff = compare_test_reports(baseline_report, self.suites_report)
# Specify output styles depending on success or failure
if diff.do_not_match:
matched_style = None
different_style = ["bold"]
else:
matched_style = ["bold"]
different_style = None
# Print summary with nice formatting
cprint("\nComparison to baseline:", attrs=["bold"])
cprint(" {match:<3} tests matched baseline result"
.format(match=len(diff.match)), "green", attrs=matched_style)
cprint(" {different:<3} tests did not match baseline result"
.format(different=len(diff.do_not_match)), "red", attrs=different_style)
if verbosity > 1 and diff.do_not_match:
# Optionally print details about test which do not match baseline
different = "\n ".join((
"{} ... {} ({} was expected)".format(test_id, actual, expected)
for test_id, expected, actual in diff.do_not_match
))
cprint(" " + different, color="red")
cprint(" {missed_in_baseline:<3} tests ran, but not found in baseline"
.format(missed_in_baseline=len(diff.missed_in_first)), attrs=["bold"])
if verbosity > 1 and diff.missed_in_first:
# Optionally print details about test which are missed in baseline
missed_in_baseline = "\n ".join((
"{} ... {}".format(test_id, actual)
for test_id, actual in diff.missed_in_first
))
cprint(" " + missed_in_baseline)
cprint(" {missed_in_suites:<3} tests in baseline, but not ran"
.format(missed_in_suites=len(diff.missed_in_second)), attrs=["bold"])
if verbosity > 1 and diff.missed_in_second:
# Optionally print details about test which are missed in test suites
missed_in_suites = "\n ".join((
"{} ... {}".format(test_id, expected)
for test_id, expected in diff.missed_in_second
))
cprint(" " + missed_in_suites)
class DeprecatedHawkeyeTestCase(HawkeyeTestCase):
"""
This DEPRECATED abstract class provides a skeleton to implement actual
Hawkeye test cases. To implement a test case, this class must
be extended by providing an implementation for the runTest
method. Use the http_* methods to perform HTTP calls on backend
endpoints. All the HTTP calls performed via these methods are
traced and logged to hawkeye-logs/<lang>-detailed <datetime>.log.
"""
LANG = None
def __init__(self, methodName, application):
"""
Args:
methodName: A string representing name of test method (e.g.: "runTest").
application: An Application object.
"""
super(DeprecatedHawkeyeTestCase, self).__init__(methodName, application)
self.description_printed = False
def runTest(self):
"""
Called by the unittest framework to run a test case.
"""
self.run_hawkeye_test()
def run_hawkeye_test(self):
"""
Subclasses must implement this method and provide the actual
test case logic.
"""
raise NotImplementedError
def http_get(self, path, headers=None, prepend_lang=True,
use_ssl=False, **kwargs):
"""
Perform a HTTP GET request on the specified URL path using Application
object provided by new HawkeyeTestCase.
Args:
path: A URL path fragment (eg: /foo).
headers: A dictionary to be sent as HTTP headers.
prepend_lang: If True the value of hawkeye_utils.LANG will be
prepended to the provided URL path. Default is : .
use_ssl: If True use HTTPS to make the connection. Defaults to False.
Returns:
An instance of ResponseInfo.
"""
return self.__make_request('GET', path, headers=headers,
prepend_lang=prepend_lang, use_ssl=use_ssl, **kwargs)
def http_post(self, path, payload, headers=None, prepend_lang=True, **kwargs):
"""
Perform a HTTP POST request on the specified URL path using Application
object provided by new HawkeyeTestCase. If a content-type header is not set
defaults to 'application/x-www-form-urlencoded' content type.
Args:
path: A URL path fragment (eg: /foo).
payload: A string payload to be sent POSTed.
headers: A dictionary of headers.
prepend_lang: If True the value of hawkeye_utils.LANG will be
prepended to the provided URL path. Default is True.
Returns:
An instance of ResponseInfo.
"""
if headers is None:
headers = { 'Content-Type' : 'application/x-www-form-urlencoded' }
elif 'Content-Type' not in headers:
headers['Content-Type'] = 'application/x-www-form-urlencoded'
return self.__make_request('POST', path, payload, headers=headers,
prepend_lang=prepend_lang, **kwargs)
def http_put(self, path, payload, headers=None, prepend_lang=True, **kwargs):
"""
Perform a HTTP PUT request on the specified URL path using Application
object provided by new HawkeyeTestCase. If a content-type header is not set
defaults to 'application/x-www-form-urlencoded' content type.
Args:
path: A URL path fragment (eg: /foo).
payload: A string payload to be sent POSTed.
headers: A dictionary of headers.
prepend_lang: If True the value of hawkeye_utils.LANG will be
prepended to the provided URL path. Default is True.
Returns:
An instance of ResponseInfo.
"""
if headers is None:
headers = { 'Content-Type' : 'application/x-www-form-urlencoded' }
elif 'Content-Type' not in headers:
headers['Content-Type'] = 'application/x-www-form-urlencoded'
return self.__make_request('PUT', path, payload, headers=headers,
prepend_lang=prepend_lang, **kwargs)
def http_delete(self, path, prepend_lang=True, **kwargs):
"""
Perform a HTTP DELETE request on the specified URL path using Application
object provided by new HawkeyeTestCase.
Args:
path: A URL path fragment (eg: /foo).
prepend_lang: If True the value of hawkeye_utils.LANG will be
prepended to the provided URL path. Default is True.
Returns:
An instance of ResponseInfo.
"""
return self.__make_request(
'DELETE', path, prepend_lang=prepend_lang, **kwargs)
def assert_and_get_list(self, path):
"""
Executes a HTTP GET on the specified URL path and parse the output
payload into a list of entities. Semantics of the GET request are
similar to that of the http_get function of this class. Additionally
this function also asserts that the resulting list has at least one
element.
Args:
path: A URL path.
Returns:
A list of entities.
Raises:
AssertionError If the resulting list is empty.
"""
response = self.http_get(path)
self.assertEquals(response.status, 200)
list = json.loads(response.payload)
self.assertTrue(len(list) > 0)
return list
def __make_request(self, method, path, payload=None, headers=None,
prepend_lang=True, use_ssl=False, **kwargs):
"""
Make a HTTP call using the provided arguments. HTTP request and response
are traced and logged to hawkeye-logs/<lang>-detailed <datetime>.log.
Args:
method: HTTP method (eg: GET, POST).
path: URL path to execute on.
payload: Payload to be sent. Only used if the method is POST or PUT.
headers: Any HTTP headers to be sent as a dictionary.
prepend_lang: If True the value of hawkeye_utils.LANG will be prepended
to the URL.
use_ssl: If True use HTTPS to make the connection. Defaults to False.
Returns:
An instance of ResponseInfo.
"""
if prepend_lang:
path = "/" + self.LANG + path
response = self.app.request(
method, path, https=use_ssl, data=payload, headers=headers, **kwargs
)
response_info = ResponseInfo(response)
return response_info
|
1# plot the energies
# Created by Martin Gren 2014-10-25.
# imports
import matplotlib.pylab as plt
import numpy as np
# input file
filename = 'values.dat'
# import data
data = np.loadtxt(filename)
# initial size of plot window
plt.figure(figsize=(8,6))
# plot
dasploot=plt.plot(data[:,0], data[:,1],'-')
plt.plot(10.40, np.exp(-2), 'rx', markersize=15, label=r'$\phi=e^{-2}$'+' at k=10.40')
# labels
plt.xlabel('k', fontsize=20)
plt.ylabel('$\phi$(k)', fontsize=20)
plt.title('Plot of autocorrelation function with respect to the lag k')
# legend
plt.legend(loc='upper right')
leg = plt.gca().get_legend()
ltext = leg.get_texts()
plt.setp(ltext, fontsize=12)
# axis limits
#plt.xlim([0,data[-1,0]])
#plt.ylim([min(data[:,3])-0.002,max(data[:,1])+0.002])
# tick fontsize
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
# display the plot
plt.savefig('afc.png')
plt.show()
|
print("hello space") |
from flask import *
clientes = Blueprint('clientes', __name__, url_prefix='/clientes', template_folder='clientes_templates')
@clientes.route('/')
def home():
datos = g.data.select_many('SELECT * FROM clientes')
return render_template('clientes.home.html', datos=datos)
|
from tests.test_base import TestBase
from front.pages.login_page import LogInPage
import unittest
class TestLog(TestBase):
def test_login_in(self):
groups_page = LogInPage(self.driver)\
.user_name("dmytro")\
.user_password('1234')\
.submit_log_in()
if __name__ == '__main__':
unittest.main()
|
x=input()
a=x.split()
if(int(a[0])<=1000 and int(a[1])<1000):
q=int(a[0])
w=int(a[1])
if q>w:
greater = q
else:
greater = w
while True:
if(greater%q==0)and(greater%w==0):
lcm=greater
break
greater+=1
print(lcm)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-23 15:01
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.utils.timezone
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('threads', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='posts',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='thread',
name='created_at',
field=models.DateTimeField(default=datetime.datetime(2016, 11, 23, 15, 1, 39, 472000, tzinfo=utc)),
),
]
|
from .channels.ses import SES
from .channels.flash import Flash
class Publisher(object):
"""Class to publish notification objects to all channels"""
def __init__(self):
self.channels = list()
self.channels.append(SES())
self.channels.append(Flash())
def publish(self, notice):
"""Publish the provided `notice` object to all channels"""
for x in self.channels:
x.send(notice)
|
#!/usr/bin/python3
"""
x-request-id module
"""
if __name__ == "__main__":
import urllib.request
from sys import argv
req = urllib.request.Request(argv[1])
with urllib.request.urlopen(req) as response:
print(response.headers.get('X-Request-Id'))
|
#!/usr/bin/python
import sys
import fileinput
n = sys.argv[1]
m = sys.argv[2]
k = sys.argv[3]
sumFinal = sum(range(int(n),int(m)+1))
fname = 'out.' + str(k)
f = open(fname,'w')
f.write(str(sumFinal))
f.close()
|
import os
import pytz
##############################
# Config
##############################
STATUS = (
('active', 'Active'),
('inactive', 'Inactive'),
('deleted', 'Deleted'),
)
STATUS_DICT = dict(STATUS)
CATEGORIES = (
('All', 'All'),
('Main Dishes', 'Main Dishes'),
('Kids Menus', 'Kids Menus'),
('Hot Baguette', 'Hot Baguette'),
('Burger Bar', 'Burger Bar'),
)
CATEGORIES_DICT = dict(CATEGORIES)
|
from glypy.algorithms.subtree_search.inclusion import subtree_of
from glypy.structure.glycan import fragment_to_substructure
import time
import multiprocessing
from . import glycan_io
from . import __init__
from . import json_utility
import re
from glypy.io import wurcs
from glypy.io import glycoct
#### proposed function
def extract_substructure_wurcs_idx(a_glycan,sub_gly, branch=5,linkage_specific=True):
"""
:param a_glycan: Glycan obj
:param sub_gly: dictionary of substructures. WURCS substructure index. Values are dictionary of matched glycans with the number of times each glycan appears.
:param branch:
:return:
"""
if linkage_specific:
gw = wurcs.dumps(a_glycan)
else:
# strip linkage information
gct = glycoct.dumps(a_glycan)
# replace all linkages with -1
gct = re.sub('\(\d','(-1',gct)
a_glycan = glycoct.loads(gct)
gw = wurcs.dumps(a_glycan)
# iterate over glycan fragments
for i in a_glycan.fragments(max_cleavages=branch):
# print('aaa')
_frag_gly = wurcs.dumps(fragment_to_substructure(i, a_glycan))
# print('aab')
#add substructure 1st
try:
sub_gly[_frag_gly][gw] += 1
except:
try:
sub_gly[_frag_gly][gw] = 1
except:
sub_gly[_frag_gly] = {}
def extract_substructure(a_glycan, branch=5):
"""
:param a_glycan: Glycan obj
:param branch:
:return:
"""
extracted_substructure_dic = {}
for i in a_glycan.fragments(max_cleavages=branch):
# print('aaa')
_frag_gly = fragment_to_substructure(i, a_glycan)
if not str(len(_frag_gly)) in extracted_substructure_dic.keys():
extracted_substructure_dic[str(len(_frag_gly))] = [_frag_gly]
else:
extracted_substructure_dic[str(len(_frag_gly))].append(_frag_gly)
# print('ab')
extracted_substructure_dic[str(len(a_glycan))] = [a_glycan]
# print('ac')
return extracted_substructure_dic
def extract_substructure_wrapper(a_name, a_glycan_str, substructure_dic):
"""
:param idex: idex of Glycan in the list
:param a_name: the ID of the Glycan
:param a_glycan_str: Glycan obj
:param substructure_dic: dict for storing the extracted substructure dict
:return:
"""
try:
print('start', a_name)
start_time = time.time()
substructure_dic[a_name] = extract_substructure(a_glycan_str)
end_time = time.time()
print(a_name, len(substructure_dic[a_name]), end_time - start_time)
# print('has_substructure', substructure_dic[_name])
# for j in substructure_dic.keys():
# print(j, len(substructure_dic[j]))
except TypeError:
print(a_name, 'has error')
except AttributeError:
print(a_name, 'has error')
except KeyboardInterrupt:
print('break')
def extract_substructures_pip(glycan_dict, gly_len, output_file, num_processors):
"""Please set the prior=True to get the data file please run the NBT_GLYCAN_preprocess file
If prior=False, it will generate glycan substructure for all glycan in glytoucan database
1. load {glyacn_id: glycan_str}
2. convert to glypy.glycan obj {glyacn_id: glycan_}
3. extract {glyacn_id: substructure_dict}
4. save glycan_substructure_dic
:param num_processors:
:param glycan_dict:
:param gly_len: the max degree of the glycan that can be processed
:param output_file: store str type of glycan_substructure_dict
"""
# root = r'/Users/apple/PycharmProjects/'
# multiprocess
glycan_io.check_glycan_dict(glycan_dict)
manager = multiprocessing.Manager()
substructure_dic = manager.dict()
print('start parallel parsing', len(glycan_dict), 'glycans')
pool = multiprocessing.Pool(processes=num_processors)
pool_list = []
for idex, i in enumerate(glycan_dict):
if len(glycan_dict[i]) > gly_len:
print(i, 'larger than max')
continue
""" using get substructure with count wrapper
Also check exists wrapper
"""
pool_list.append(pool.apply_async(extract_substructure_wrapper, args=(i, glycan_dict[i], substructure_dic)))
result_list = [xx.get() for xx in pool_list]
# print('finished ', idex)
# print("closing poll")
pool.close()
# print('joining pool')
pool.join()
print('finished pool')
print('glycan_dict', len(substructure_dic))
glycan_substructure_dic = dict(substructure_dic)
str_substructure = {}
for i in glycan_dict:
# if len(glycan_dict[i]) > gly_len: continue
if i not in glycan_substructure_dic.keys():
print('missing', i)
continue
str_substructure[i] = {}
for j in glycan_substructure_dic[i]:
str_substructure[i][j] = [str(k) for k in glycan_substructure_dic[i][j]]
if output_file != '':
json_utility.store_json(output_file, str_substructure)
return glycan_substructure_dic
# def main():
# # glycan_dict = glycan_io.load_glycan_obj_from_database(topology_list_addr=__init__.topology_list_addr, output_file=__init__.glycan_dict_addr, loader=glycoct)
# # glycan_substructure_dic = get_substructure_pip(glycan_dict=glycan_dict, gly_len=23, output_file=__init__.glycan_substructure_dict_addr)
# pass
#
#
# if __name__ == '__main__':
# main()
|
from __future__ import print_function
from os.path import join, dirname, abspath
from xlrd.sheet import ctype_text
import xlrd
import xlwt
import sys
def readFile(xl_workbook, indice):
xl_sheet = xl_workbook.sheet_by_index(indice)
num_cols = xl_sheet.ncols # Number of columns
for row_idx in range(1, xl_sheet.nrows): # Iterate through rows
key = (xl_sheet.cell(row_idx, 0));
key = str(key.value);
descrip = str(xl_sheet.cell(row_idx, 1).value);
descrip = unicode(descrip, errors='replace')
cantidad = int(xl_sheet.cell(row_idx, 2).value);
print(xl_sheet.cell(row_idx, 2));
if (not config.get(key)):
config[key] = str(cantidad) + '_' + descrip;
else:
valor = int(config.get(key).split("_")[0]);
if (cantidad > valor):
config[key] = str(cantidad) + '_' + descrip;
config = {}
reload(sys)
sys.setdefaultencoding('utf8')
# --------------------- Reading file ---------------------
fname = 'Existencia.xls'
xl_workbook = xlrd.open_workbook(fname)
print("Reading page 0")
readFile(xl_workbook,0)
print("Reading page 1")
readFile(xl_workbook,1)
# --------------------- Saving new file ---------------------
workbook = xlwt.Workbook()
sheet = workbook.add_sheet('Hoja1')
date_format = xlwt.XFStyle()
date_format.num_format_str = '#,##0.00'
cant = 1;
for valor in config:
sheet.write(cant, 0, valor)
sheet.write(cant, 1, config.get(valor).split("_")[1])
sheet.write(cant, 2, int(config.get(valor).split("_")[0]))
cant = cant+1;
sheet.write(0, 0, 'Material')
sheet.write(0, 1, 'Texto')
sheet.write(0, 2, 'Libre utiliz')
workbook.save('Existencia_Edited.xls')
|
from flask import url_for, redirect, flash, render_template
from flask_login import login_required, current_user
from app.models.base import db
from app.models.wish import Wish
from app.viewmodel.my_wish import WishViewModel
from . import web
__author__ = '七月'
@web.route('/my/wish')
@login_required
def my_wish():
uid = current_user.id
wishes = Wish.get_user_wish(uid)
isbn_list = [wish.isbn for wish in wishes]
wish_count = Wish.wish_count(isbn_list)
wish_model = WishViewModel(wishes, wish_count)
return render_template('my_wish.html', wishes=wish_model.assamble())
@web.route('/wish/book/<isbn>')
@login_required
def save_to_wish(isbn):
if current_user.can_save_to_list(isbn):
wish = Wish()
wish.isbn = isbn
wish.uid = current_user.id
with db.auto_commit():
db.session.add(wish)
else:
flash("这本书已经添加进你的心愿清单或者赠送清单, 请勿重新添加")
return redirect(url_for("web.book_detail", isbn=isbn))
@web.route('/satisfy/wish/<int:wid>')
def satisfy_wish(wid):
pass
@web.route('/wish/book/<isbn>/redraw')
def redraw_from_wish(isbn):
pass
|
#! python3
# maplt.py - Launches a map in the browser using an address from the
# command line or clipboard
import sys, webbrowser, pyperclip, sys
print(sys.version)
if len(sys.argv) > 1:
# Get the address from command line.
address = ' '.join(sysargv[1:])
else:
#Get address from paperclip.
address = pyperclip.paste()
webbrowser.open('https://www.google.com.mx/maps/place/' + address)
|
#Collatz Conjecture
# if x is odd then next term 3*x + 1
# if x is even then next term x//2
def even(n):
return n%2==0
def nextVal(n):
if(even(n)):
return n//2
else:
return 3*n+1
num = int(input("Enter start parameter :- "))
i = 0
while(num != 1):
num = nextVal(num)
print(num)
i += 1
print("\n\n")
print(i)
#--------------------------------- |
"""Dijkstra's Algorithm.
Given a weighted graph, find the shortest paths to all nodes.
1. Initialize a set of all nodes. Give all nodes the value of infinity.
2. Initialize an empty set of all visited nodes.
3. Give the starting node a value of 0 and visit it
4. Remove the visited node from the set of all nodes.
5. Iterate through it's children
- new_value = node.value+edge
- if new_value < child.value: continue
- child.value = new_value
- if child in unvisited: visit(child) (go to 4)
# Alternative Algorithm: A star (A*)
Very similar to dijkstra's except that we are given a pre-defined heuristic function.
This algorithm visits each node a maximum of one time, using the heuristic weight
to determine the next node to query.
- Only "neighbor" nodes of the starting node (etc etc) are queried.
- The operation is performed only while discovered nodes are unvisited (to
avoid non-connected graphs)
- The "next node" to look at is determined by a minheap of the heurstic set.
- The heurstic value is determined by `cost_from_start + heurstic_cost(self, goal)`
"""
import copy
INF = float("inf")
class Node(object):
def __init__(self, children=None):
self.children = [] if children is None else children
self.value = INF
self.path = []
def update(self, parent, edge):
"""Perform the node update.
Return True if self.value was changed.
"""
value = parent.value + edge
if value < self.value:
self.value = value
self.path = copy.copy(parent.path)
self.path.append(self)
return True
else:
return False
class Visiting(object):
"""The visiting state, i.e. which nodes are visited/unvisited.
The `visit` method is used to implement Dijkstra's algorithm.
"""
def __init__(self, unvisited):
self.unvisited = unvisited
self.visited = {}
def visit(self, node, parent, edge):
nid = id(node)
if not node.update(parent, edge) and nid in self.visited:
# no update and already visited
return
self.mark_visited(nid, node)
for (edge, child) in node.children:
self.visit(child, node, edge)
def mark_visited(self, nid, node):
if nid in self.unvisited:
self.visited[nid] = node
del self.unvisited[nid]
# Acts as the dummy starting "parent" node.
STARTING_NODE = Node()
STARTING_NODE.value = 0
def shortest_path(starting):
"""Find the shortest path to all items in the graph, using the starting
node as the start.
This method mutates the input values.
Returns the {id: Node} dictionary of the algorithm computed.
"""
visiting = Visiting(get_nodes(starting))
visiting.visit(starting, STARTING_NODE, 0)
return visiting.visited
def get_nodes(root) -> "Dict[id,Node]":
"""Get all reachable nodes in the graph using a depth-first search."""
out = {}
_get_nodes(out, root)
return out
def _get_nodes(out, node):
nid = id(node)
if nid in out:
return
out[nid] = node
for (_, child) in node.children:
_get_nodes(out, child)
## TEST STUFF
class TestNodes(object):
"""
a-->9-->b--4-->e
| ^
2 |
| 1
v |
c---3---d
"""
def __init__(self):
self.a = Node()
self.b = Node()
self.c = Node()
self.d = Node()
self.e = Node()
self.a.children = [
(9, self.b),
(2, self.c),
]
self.b.children = [
(4, self.e),
]
self.c.children = [
(3, self.d),
]
self.d.children = [
(1, self.b),
]
self.all_nodes = [
self.a,
self.b,
self.c,
self.d,
self.e
]
self.node_map = {id(n): n for n in self.all_nodes}
def test_get_nodes():
nodes = TestNodes()
assert get_nodes(nodes.a) == nodes.node_map
def test_shortest_path():
nodes = TestNodes()
visited = shortest_path(nodes.a)
assert nodes.a.value == 0
assert nodes.c.value == 2
assert nodes.d.value == 5
assert nodes.b.value == 6
assert nodes.e.value == 10
|
import unittest
from math_series.series import fibonacci, lucas, sum_series
# fibonacci tests for 0, 1, 5, 8
def test_zero_fib():
actual = fibonacci(0)
expected = None
assert actual == expected
def test_one_fib():
actual = fibonacci(1)
expected = 0
assert actual == expected
def test_five_fib():
actual = fibonacci(5)
expected = 3
assert actual == expected
def test_eight_fib():
actual = fibonacci(8)
expected = 13
assert actual == expected
# lucas tests for 0, 1, 5, 8
def test_zero_lucas():
actual = lucas(0)
expected = None
assert actual == expected
def test_one_lucas():
actual = lucas(1)
expected = 2
assert actual == expected
def test_five_lucas():
actual = lucas(5)
expected = 7
assert actual == expected
def test_eight_lucas():
actual = lucas(8)
expected = 29
assert actual == expected
# sum series tests for fib
def test_optionals_fib_zero():
actual = sum_series(0)
expected = None
assert actual == expected
def test_optionals_fib_one():
actual = sum_series(1)
expected = 0
assert actual == expected
def test_optionals_fib_five():
actual = sum_series(5)
expected = 3
assert actual == expected
def test_optionals_fib_eight():
actual = sum_series(8)
expected = 13
assert actual == expected
# sum series tests for lucas
def test_optionals_lucas_zero():
actual = sum_series(0)
expected = None
assert actual == expected
def test_optionals_lucas_one():
actual = sum_series(1, 2, 1)
expected = 2
assert actual == expected
def test_optionals_lucas_five():
actual = sum_series(5, 2, 1)
expected = 7
assert actual == expected
def test_optionals_lucas_eight():
actual = sum_series(8, 2, 1)
expected = 29
assert actual == expected
# sum series tests for different series
def test_optionals_different_series():
actual = sum_series(3, 1, 1)
expected = 'different series'
assert actual == expected
|
import sys
import json
import ntpath
def minify(filepath):
filename = ntpath.basename(filepath)
fileparts = filename.split(".")
print(fileparts)
fileroot = fileparts[0]
fileext = fileparts[1]
newfilename = fileroot + "_minified." + fileext
with open(filepath) as json_file:
json_string = json.load(json_file)
json_minified = json.dumps(json_string, separators=(',', ':'))
with open(newfilename, "w") as write_file:
write_file.write(json_minified)
if __name__ == "__main__":
minify(sys.argv[1])
|
import base64, logging
from typing import Union
def log_response(response) -> str:
"""
renders a python-requests response as json or as a string
"""
try:
log_body = response.json()
except ValueError:
log_body = response.content[:40]
return log_body
def create_logger(name: str, log_level: Union[str, int]) -> logging.Logger:
"""
return a logger configured with name and log_level
"""
logger = logging.getLogger(name)
logger.setLevel(log_level)
if not logger.hasHandlers():
handler = logging.StreamHandler()
formatter = logging.Formatter("%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def safe_base64(un_encoded_data: Union[str, bytes]) -> str:
"return ACME-safe base64 encoding of un_encoded_data"
if isinstance(un_encoded_data, str):
un_encoded_data = un_encoded_data.encode("utf8")
r = base64.urlsafe_b64encode(un_encoded_data).rstrip(b"=")
return r.decode("utf8")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# */AIPND-revision/intropyproject-classify-pet-images/get_pet_labels.py
#
# PROGRAMMER: Marcus Bremer
# DATE CREATED: 03/02/2021
# REVISED DATE:
# PURPOSE: Create the function get_pet_labels that creates the pet labels from
# the image's filename. This function inputs:
# - The Image Folder as image_dir within get_pet_labels function and
# as in_arg.dir for the function call within the main function.
# This function creates and returns the results dictionary as results_dic
# within get_pet_labels function and as results within main.
# The results_dic dictionary has a 'key' that's the image filename and
# a 'value' that's a list. This list will contain the following item
# at index 0 : pet image label (string).
#
##
# Imports python modules
from os import listdir
# TODO 2: Define get_pet_labels function below please be certain to replace None
# in the return statement with results_dic dictionary that you create
# with this function
#
def get_pet_labels(image_dir):
#Get filenames from pet_images
in_files = listdir(image_dir)
#Create empty dictionary
results_dic = dict()
#add key value to dictionary if it doesn't already exist
for idx in range(0, len(in_files), 1):
if in_files[idx][0] != ".":
pet_label = ""
words_list_pet_image = in_files[idx].lower().split("_")
for word in words_list_pet_image:
if word.isalpha():
pet_label += word + ""
pet_label = pet_label.strip()
if in_files[idx] not in results_dic:
results_dic[in_files[idx]] = [pet_label]
else:
print('There might be a duplicate!',
in_files[idx])
return results_dic
|
class Assignment:
def __init__(self, left, right):
self.left = left
self.right = right
def eval(self, env):
env[self.left.id] = self.right.eval(env)
def __repr__(self):
return "Assignment: {0} = {1}".format(self.left, self.right)
|
from sys import meta_path
import psycopg2
from flask import Flask, render_template, request
import json
from json import JSONEncoder
import datetime
connect = psycopg2.connect(user='postgres',
password='javb1999',
host='127.0.0.1',
port='5433',
database='bd_Reto_Geteco')
app = Flask(__name__)
# REBDERIZADO DE VISTAS
@app.route('/')
def index():
return render_template('index.html')
@app.route('/create_data')
def create_data():
return render_template('crear.html')
@app.route('/edit_or_delete')
def edit_or_delete():
return render_template('editar_eliminar.html')
# LEER DATOS DE LA TABLA
@app.route('/show_data', methods=['POST'])
def show_datas():
if request.method == 'POST':
cursor = connect.cursor()
sql = 'select * from crud'
cursor.execute(sql)
r = cursor.fetchall()
class DateTimeEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, (datetime.date, datetime.datetime, datetime.time)):
return obj.isoformat()
respuesta = json.dumps(r, indent=7, cls=DateTimeEncoder)
print(respuesta)
cursor.close()
return respuesta
return 'error'
# CREAR NUEVO DATO (INSERTAR EN LA TABLA)
@app.route('/insert_data', methods=['POST'])
def insert_data():
if request.method == 'POST':
fecha_hora = request.form['fecha_hora']
empresa = request.form['empresa']
ciudad = request.form['ciudad']
asunto = request.form['asunto']
respuesta = request.form['respuesta']
cur = connect.cursor()
cur.execute('insert into crud (fecha_hora_atencion, hora_final, empresa, ciudad, asunto, respuesta, fecha_solicitud) values (%s,CURRENT_TIME,%s,%s,%s,%s,CURRENT_DATE)',
(fecha_hora, empresa, ciudad, asunto, respuesta)
)
connect.commit()
return 'si'
return 'error'
# BUSCAR ID DE LAS ATENCIONES
@app.route('/refresh', methods=['POST'])
def refresh():
cur = connect.cursor()
cur.execute('SELECT id FROM crud')
datos = cur.fetchall()
datas = str(json.dumps(datos))
return datas
# TRAER DATOS A EDITAR DEPENDIENDO DEL ID
@app.route('/datas_from_id', methods=['POST'])
def datas_from_id():
if request.method == 'POST':
id = request.form['id']
cur = connect.cursor()
cur.execute(
'select empresa, ciudad, asunto, respuesta from crud where id=%s', (id,))
dato = cur.fetchall()
data = str(json.dumps(dato))
return data
# EDITAR DATOS
@app.route('/edit', methods=['POST'])
def edit():
if request.method == 'POST':
id = request.form['id_atencion']
empresa=request.form['empresa_ed_del']
ciudad=request.form['ciudad_ed_del']
asunto=request.form['asunto_ed_del']
respuesta=request.form['repuesta_ed_del']
cur = connect.cursor()
cur.execute('UPDATE crud SET empresa=%s, ciudad=%s, asunto=%s, respuesta=%s WHERE id=%s',
(empresa, ciudad, asunto, respuesta, id)
)
connect.commit()
return 'si'
# ELIMINAR DATO
@app.route('/delete', methods=['POST'])
def delete():
if request.method == 'POST':
id = request.form['id_atencion']
cur = connect.cursor()
cur.execute('DELETE FROM crud WHERE id=%s',
(id,)
)
connect.commit()
return 'si'
if __name__ == '__main__':
app.run(port=3000, debug=True)
|
from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.models import Token
from rest_framework.exceptions import AuthenticationFailed
from datetime import timedelta, datetime
from django.utils import timezone
from django.conf import settings
import pytz
from rest_framework import status
import datetime
#this return left time
def expires_in(token):
# print(timezone.now(),"timezone.now()")
# print(token.created,"token.created")
time_elapsed = timezone.now() - token.created
data1 = token.created
# print(data1,"data1")
nextday = timezone.now() + datetime.timedelta(days=1)
# print(nextday,"nextday")
data2 = nextday.replace(hour=8, minute=0, second=0, microsecond=0)
diff = data2 - data1
# print(diff)
days, seconds = diff.days, diff.seconds
hours = days * 24 + seconds / 3600
# print(hours,"hours")
# print(time_elapsed,"time_elapsed")
# print(timedelta(seconds = 300),"timedelta(seconds = 300)")
# left_time = timedelta(seconds = settings.TOKEN_EXPIRED_AFTER_HOURS) - time_elapsed
left_time = timedelta(hours= hours) - time_elapsed
# print(left_time,"left_time")
return left_time
def is_token_expired(token):
# print(timedelta(seconds = 0),"timedelta(seconds = 0)")
# print(expires_in(token),"expires_in(token)")
return expires_in(token) < timedelta(seconds = 0)
# if token is expired new token will be established
# If token is expired then it will be removed
# and new one with different key will be created
def token_expire_handler(token):
is_expired = is_token_expired(token)
# print(is_expired,"is_expired")
if is_expired:
token.delete()
token = Token.objects.create(user = token.user)
return is_expired, token
#DEFAULT_AUTHENTICATION_CLASSES
class ExpiringTokenAuthentication(TokenAuthentication):
"""
If token is expired then it will be removed
and new one with different key will be created
"""
def authenticate_credentials(self, key):
try:
token = Token.objects.get(key = key)
except Token.DoesNotExist:
msg = {'status': status.HTTP_400_BAD_REQUEST,"message":"Invalid Token.",'error': True}
raise AuthenticationFailed(msg)
if not token.user.is_active:
amsg = {'status': status.HTTP_400_BAD_REQUEST,"message":"User is not active.",'error': True}
raise AuthenticationFailed(amsg)
is_expired, token = token_expire_handler(token)
if is_expired:
tmsg = {'status': status.HTTP_400_BAD_REQUEST,"message":"The Token is expired.",'error': True}
raise AuthenticationFailed(tmsg)
return (token.user, token)
def multiple_expire_handler(token):
token.delete()
token = Token.objects.create(user = token.user)
return token |
import pymatgen
import sys
from pymatgen.io.cif import CifParser
from pymatgen.core.lattice import Lattice
import os
import math
def cif_structure(file_name):
parser = CifParser(file_name)
structure = parser.get_structures()[0]
# print(structure[0].distance_matrix)
# print(type(structure[0].distance_matrix))
temp = structure.distance_matrix
# temp = (temp < 2.5) * tempz
# print(temp) z
# counter = 0
# for each in structure.sites:
# counter +=1
# print(each.species,distance(each.coords[0], each.coords[1], each.coords[2]))
# for i in range(counter):
# print(structure.distance_matrix[0][i])
return temp
def distance(x,y,z):
return math.sqrt(x**2 + y**2 + z**2)
def main():
os.chdir("../data/")
file = "AHOKOX_clean.cif"
distance_matrix = cif_structure(file)
# print(distance_matrix)
# cif_lattice(file)
main() |
from CountOfMy import my_count
if __name__ == '__main__':
my_count()
|
"""Given an array S of n integers, are there elements a, b, c, and d in S such that a + b + c + d = target? Find all unique quadruplets in the array which gives the sum of target.
Note: The solution set must not contain duplicate quadruplets.
For example, given array S = [1, 0, -1, 0, -2, 2], and target = 0.
A solution set is:
[
[-1, 0, 0, 1],
[-2, -1, 1, 2],
[-2, 0, 0, 2]
]
"""
class Solution(object):
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
if len(nums) < 4: return []
nums.sort()
dict, res = {}, set()
for i in range (len(nums)):
for j in range(i+1, len(nums)):
if (nums[i] + nums[j]) not in dict:
dict[nums[i] + nums[j]] = [(i,j)]
else:
dict[nums[i] + nums[j]].append([i,j])
for i in range(len(nums)):
for j in range(i+1, len(nums) - 2):
t = target-(nums[i]+nums[j])
if t in dict:
for k in dict[t]:
if k[0] > j:
res.add((nums[i], nums[j], nums[k[0]], nums[k[1]]))
return [list(i) for i in res]
if __name__ == '__main__':
nums = [1,0,-1,0,-2,2]
sol = Solution()
exp = [
[-1, 0, 0, 1],
[-2, -1, 1, 2],
[-2, 0, 0, 2]
]
res = sol.fourSum(nums, 0)
res.sort()
exp.sort()
assert res == exp |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import inspect
# print(inspect.getargspec(func))
if sys.version_info > (3, 4):
def signature():
"""
**中文文档**
``inspect.signature(callable)`` 能获得一个callable函数的输入参数和输出
值的信息。可以知道有哪些输入参数, 以及他们的默认值(如果有的话)
ref: https://docs.python.org/3/library/inspect.html#inspect.signature
"""
def func(a, b, c=1, **kwargs):
return a + b
sig = inspect.signature(func)
assert sig.parameters["a"].default is inspect._empty
assert sig.parameters["b"].default is inspect._empty
assert sig.parameters["c"].default == 1
assert sig.parameters["kwargs"].default is inspect._empty
signature()
|
from pprint import pprint
import matcher
def test_narc1():
text = "Kaatuneiden henkilöasiakirjat (kokoelma) - Perhonen Onni Aleksi, 16.10.1907; Kansallisarkisto: https://astia.narc.fi/uusiastia/kortti_aineisto.html?id=2684857838 / Viitattu 26.5.2022"
m = matcher.matchline([text])
assert m is not None
pprint(m.__dict__)
assert m.reponame == "Kaatuneiden henkilöasiakirjat (kokoelma)"
assert m.citationpage == "Perhonen Onni Aleksi, 16.10.1907"
def test_narc2():
text = "Kaatuneiden henkilöasiakirjat (kokoelma) - Perhonen Onni Aleksi, 16.10.1907, jakso 1; Kansallisarkisto: https://astia.narc.fi/uusiastia/viewer/?fileId=5527332269&aineistoId=2684857838 / Viitattu 26.5.2022"
m = matcher.matchline([text])
assert m is not None
pprint(m.__dict__)
assert m.reponame == "Kaatuneiden henkilöasiakirjat (kokoelma)"
assert m.citationpage == "jakso 1"
def test_sshy1():
text = "Tampereen tuomiokirkkoseurakunta - rippikirja, 1878-1887 (MKO166-181 I Aa:17) > 39: Clayhills tjenstespersoner; SSHY: http://www.sukuhistoria.fi/sshy/sivut/jasenille/paikat.php?bid=18233&pnum=39 / Viitattu 6.11.2018"
m = matcher.matchline([text])
assert m is not None
pprint(m.__dict__)
assert m.reponame == "Tampereen tuomiokirkkoseurakunta"
assert m.citationpage == "Clayhills tjenstespersoner"
def test_sshy2a():
text = "Tampereen tuomiokirkkoseurakunta rippikirja 1795-1800 (TK630 I Aa:2) N:o 1 Häggman, Kask, Grefvelin ; SSHY http://www.sukuhistoria.fi/sshy/sivut/jasenille/paikat.php?bid=15950&pnum=8 / Viitattu 03.02.2022"
m = matcher.matchline([text])
assert m is not None
pprint(m.__dict__)
assert m.reponame == "Tampereen tuomiokirkkoseurakunta"
assert m.citationpage == "N:o 1 Häggman, Kask, Grefvelin"
def test_sshy2b():
text = "Alastaro rippikirja 1751-1757 (JK478 I Aa1:3) Sivu 10 Laurois Nepponen ; SSHY http://www.sukuhistoria.fi/sshy/sivut/jasenille/paikat.php?bid=15846&pnum=13 / Viitattu 03.02.2022"
m = matcher.matchline([text])
assert m is not None
pprint(m.__dict__)
assert m.reponame == "Alastaro"
assert m.citationpage == "Sivu 10 Laurois Nepponen"
def test_sshy2c():
text = "Turun suomalainen syntyneet-vihityt-kuolleet 1721-1744 (AP I C1:2) ; SSHY http://www.sukuhistoria.fi/sshy/sivut/jasenille/paikat.php?bid=24277&pnum=144 / Viitattu 14.10.2022"
m = matcher.matchline([text])
assert m is not None
pprint(m.__dict__)
assert m.reponame == "Turun suomalainen"
assert m.sourcetitle == "Turun suomalainen syntyneet-vihityt-kuolleet 1721-1744 (AP I C1:2)"
assert m.citationpage == ""
def test_svar():
text = "Hajoms kyrkoarkiv, Husförhörslängder, SE/GLA/13195/A I/12 (1861-1872), bildid: C0045710_00045"
m = matcher.matchline([text])
assert m is not None
pprint(m.__dict__)
assert m.reponame == "Hajoms kyrkoarkiv"
assert m.citationpage == "SVAR bildid: C0045710_00045"
def test_kansalliskirjasto():
lines = """Vasabladet, 18.11.1911, nro 138, s. 4
https://digi.kansalliskirjasto.fi/sanomalehti/binding/1340877?page=4
Kansalliskirjaston Digitoidut aineistot""".splitlines()
print(lines)
m = matcher.matchline(lines)
assert m is not None
pprint(m.__dict__)
assert m.reponame == "Kansalliskirjaston Digitoidut aineistot"
assert m.citationpage == "18.11.1911, nro 138, s. 4"
def test_astia():
text = "Turun tuomiokirkkoseurakunnan arkisto. I C SYNTYNEIDEN JA KASTETTUJEN LUETTELOT. I C1 Suomalaisen seurakunnan syntyneet. Suomalaisen seurakunnan syntyneiden ja kastettujen luettelo (1721-1743). Tiedosto 146. Kansallisarkisto. Viitattu 14.10.2022."
m = matcher.matchline([text])
#assert m is not None
#assert m.reponame == ""
#assert m.citationpage == ""
def test_xxx():
text = ""
m = matcher.matchline([text])
#assert m is not None
#assert m.reponame == ""
#assert m.citationpage == ""
|
"""
Initialization for unit tests.
"""
from path import Path
from unittest import TestCase
from i18n import config
TEST_DATA_DIR = Path('.').abspath() / 'tests' / 'data'
MOCK_APPLICATION_DIR = TEST_DATA_DIR / 'mock-application'
MOCK_DJANGO_APP_DIR = TEST_DATA_DIR / 'mock-django-app'
class I18nToolTestCase(TestCase):
"""
Base class for all i18n tool test cases.
"""
def setUp(self):
super(I18nToolTestCase, self).setUp()
self.configuration = config.Configuration(root_dir=MOCK_APPLICATION_DIR)
|
#!/usr/bin/env python
import unittest
import os
import os.path
import logging
import threading
import shutil
from nose.plugins.attrib import attr
from WMCore.Agent.Configuration import Configuration
from WMQuality.TestInit import TestInit
from WMCore.Operations.RecoveryCode import PurgeJobs
from subprocess import Popen, PIPE
import WMCore.WMInit
class TestRecoveryCode(unittest.TestCase):
"""
Test for recoveryCode; disaster recovery system
"""
#This is an integration test
__integration__ = "So this guy walks into a bar..."
def setUp(self):
"""
Mimic remains of destroyed job
"""
self.testInit = TestInit(__file__)
self.testInit.setLogging()
self.testInit.setDatabaseConnection()
self.nJobs = 10
os.mkdir('test')
os.mkdir('test/Basic')
os.mkdir('test/Basic/Crap')
os.mkdir('test/Basic/Crap/JobCollection_1')
for i in range(self.nJobs):
os.mkdir('test/Basic/Crap/JobCollection_1/Job_%i' % (i))
self.logLevel = 'INFO'
#There must be a submit directory for jobSubmitter
if not os.path.isdir('submit'):
os.mkdir('submit')
#There must be a log directory for jobArchiver
if not os.path.isdir('logs'):
os.mkdir('logs')
def tearDown(self):
"""
Check to make sure that everything is good.
"""
if os.path.isdir('test'):
shutil.rmtree('test')
for file in os.listdir('logs'):
#You have to remove the staged out logs
os.remove(os.path.join('logs', file))
def getConfig(self):
"""
_getConfig_
For now, build a config file from the ground up.
Later, use this as a model for the JSM master config
"""
myThread = threading.currentThread()
self.testInit = TestInit()
self.testInit.setLogging()
self.testInit.setDatabaseConnection()
config = self.testInit.getConfiguration()
self.tempDir = self.testInit.generateWorkDir( config = config )
#Now we go by component
#First the JobCreator
config.component_("JobCreator")
config.JobCreator.namespace = 'WMComponent.JobCreator.JobCreator'
config.JobCreator.logLevel = self.logLevel
config.JobCreator.maxThreads = 1
config.JobCreator.UpdateFromSiteDB = True
config.JobCreator.pollInterval = 10
config.JobCreator.jobCacheDir = os.path.join(os.getcwd(), 'test')
config.JobCreator.defaultJobType = 'processing' #Type of jobs that we run, used for resource control
config.JobCreator.workerThreads = 2
config.JobCreator.componentDir = os.path.join(os.getcwd(), 'Components/JobCreator')
#JobMaker
config.component_('JobMaker')
config.JobMaker.logLevel = self.logLevel
config.JobMaker.namespace = 'WMCore.WMSpec.Makers.JobMaker'
config.JobMaker.maxThreads = 1
config.JobMaker.makeJobsHandler = 'WMCore.WMSpec.Makers.Handlers.MakeJobs'
#JobStateMachine
config.component_('JobStateMachine')
config.JobStateMachine.couchurl = os.getenv('COUCHURL', 'mnorman:theworst@cmssrv48.fnal.gov:5984')
config.JobStateMachine.couchDBName = "mnorman_test"
#JobSubmitter
config.component_("JobSubmitter")
config.JobSubmitter.logLevel = self.logLevel
config.JobSubmitter.maxThreads = 1
config.JobSubmitter.pollInterval = 10
config.JobSubmitter.pluginName = 'ShadowPoolPlugin'
config.JobSubmitter.pluginDir = 'JobSubmitter.Plugins'
config.JobSubmitter.submitDir = os.path.join(os.getcwd(), 'submit')
config.JobSubmitter.submitNode = os.getenv("HOSTNAME", 'badtest.fnal.gov')
config.JobSubmitter.submitScript = os.path.join(os.getcwd(), 'submit.sh')
config.JobSubmitter.componentDir = os.path.join(os.getcwd(), 'Components/JobSubmitter')
config.JobSubmitter.inputFile = os.path.join(os.getcwd(), 'FrameworkJobReport-4540.xml')
config.JobSubmitter.workerThreads = 1
config.JobSubmitter.jobsPerWorker = 100
#JobTracker
config.component_("JobTracker")
config.JobTracker.logLevel = self.logLevel
config.JobTracker.pollInterval = 10
config.JobTracker.trackerName = 'TestTracker'
config.JobTracker.pluginDir = 'WMComponent.JobTracker.Plugins'
config.JobTracker.runTimeLimit = 7776000 #Jobs expire after 90 days
config.JobTracker.idleTimeLimit = 7776000
config.JobTracker.heldTimeLimit = 7776000
config.JobTracker.unknTimeLimit = 7776000
#ErrorHandler
config.component_("ErrorHandler")
config.ErrorHandler.logLevel = self.logLevel
config.ErrorHandler.namespace = 'WMComponent.ErrorHandler.ErrorHandler'
config.ErrorHandler.maxThreads = 30
config.ErrorHandler.maxRetries = 10
config.ErrorHandler.pollInterval = 10
#RetryManager
config.component_("RetryManager")
config.RetryManager.logLevel = self.logLevel
config.RetryManager.namespace = 'WMComponent.RetryManager.RetryManager'
config.RetryManager.maxRetries = 10
config.RetryManager.pollInterval = 10
config.RetryManager.coolOffTime = {'create': 10, 'submit': 10, 'job': 10}
config.RetryManager.pluginPath = 'WMComponent.RetryManager.PlugIns'
config.RetryManager.pluginName = ''
config.RetryManager.WMCoreBase = WMCore.WMInit.getWMBASE()
#JobAccountant
config.component_("JobAccountant")
config.JobAccountant.logLevel = self.logLevel
#config.JobAccountant.logLevel = 'SQLDEBUG'
config.JobAccountant.pollInterval = 10
config.JobAccountant.workerThreads = 1
config.JobAccountant.componentDir = os.path.join(os.getcwd(), 'Components/JobAccountant')
#JobArchiver
config.component_("JobArchiver")
config.JobArchiver.pollInterval = 10
config.JobArchiver.logLevel = self.logLevel
#config.JobArchiver.logLevel = 'SQLDEBUG'
config.JobArchiver.logDir = os.path.join(os.getcwd(), 'logs')
return config
def submitJobs(self, nJobs):
"""
_submitJobs_
Submit some broken jdls to the local condor submitter
"""
submitFile = """
universe = globus
globusscheduler = thisisadummyname.fnal.gov/jobmanager-suck
should_transfer_executable = TRUE
transfer_output_files = FrameworkJobReport.xml
should_transfer_files = YES
when_to_transfer_output = ON_EXIT
log_xml = True
notification = NEVER
Output = condor.$(Cluster).$(Process).out
Error = condor.$(Cluster).$(Process).err
Log = condor.$(Cluster).$(Process).log
Executable = /home/mnorman/WMCORE/test/python/WMCore_t/Operations_t/submit.sh
initialdir = /home/mnorman/WMCORE/test/python/WMCore_t/Operations_t/test/Basic/Crap/JobCollection_1/Job_%i
+WMAgent_JobName = \"65bf3894-d873-11de-9e40-0030482c2dd0-1\"
+WMAgent_JobID = %i
Queue 1
"""
for i in range(10):
f = open('submit/submit_%i.jdl' %(i), 'w')
f.write(submitFile % (i, i))
f.close()
command = ["condor_submit", 'submit/submit_%i.jdl' %(i)]
pipe = Popen(command, stdout = PIPE, stderr = PIPE, shell = False)
pipe.wait()
@attr('integration')
def testPurge(self):
"""
_testPurge_
Test the purge function, which should remove all job objects
"""
config = self.getConfig()
self.submitJobs(self.nJobs)
self.assertEqual(len(os.listdir('submit')), self.nJobs, 'Only found %i submit files' %(len(os.listdir('submit'))))
self.assertEqual(len(os.listdir('logs')), 0, 'Please empty the logs directory')
#Check that ten jobs were actually submitted
jobCheckString = os.popen('condor_q %s' %os.getenv('USER')).readlines()[-1]
self.assertEqual(jobCheckString, '%i jobs; %i idle, 0 running, 0 held\n' % (self.nJobs, self.nJobs))
purgeJobs = PurgeJobs(config)
purgeJobs.run()
self.assertEqual(os.listdir('test'), [])
self.assertEqual(len(os.listdir('logs')), self.nJobs, \
'Found %i tarballs instead of %i in logOut directory' \
%(len(os.listdir('logs')), self.nJobs) )
self.assertEqual(os.listdir('submit'), [])
#Check that jobs were actually removed
jobCheckString = os.popen('condor_q %s' %os.getenv('USER')).readlines()[-1]
self.assertEqual(jobCheckString, '0 jobs; 0 idle, 0 running, 0 held\n' )
return
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
#
# Convert a test specification to command-line options
import pscheduler
from validate import spec_is_valid
from validate import MAX_SCHEMA
spec = pscheduler.json_load(exit_on_error=True, max_schema=MAX_SCHEMA)
valid, message = spec_is_valid(spec)
if not valid:
pscheduler.fail(message)
result = pscheduler.speccli_build_args(spec,
strings=[
# Strings
( 'count', 'count' ),
( 'dest', 'dest' ),
( 'flow-label', 'flow-label' ),
( 'interval', 'interval' ),
( 'ip-version', 'ip-version' ),
( 'source', 'source' ),
( 'source-node', 'source-node' ),
( 'ip-tos', 'ip-tos' ),
( 'length', 'length' ),
( 'ttl', 'ttl' ),
( 'deadline', 'deadline' ),
( 'timeout', 'timeout' ),
( 'port', 'port' ),
( 'protocol', 'protocol' ),
],
bools=[
( 'suppress-loopback', 'suppress-loopback' ),
( 'fragment', 'fragment' ),
( 'hostnames', 'hostnames' )
])
pscheduler.succeed_json(result)
|
import matplotlib.pyplot as plt
D = dict(
Q1=210,
Q2=260,
Q3=250,
Q4=230,
)
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice
fig1, ax1 = plt.subplots()
ax1.pie(list(D.values()), explode=explode, labels=list(D.keys()),
autopct='%1.1f%%', shadow=True, startangle=90
)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
|
""" FTPServerUsingTCP.py
Yuqi Liu, Libin Feng
CISC-650 Fall 2014
Sep 26, 2014
This module will:
a. Creates a TCP socket and listens to connection request on port 12306;
b. Accepts request from client and receives the client's desired filename;
c. Searches the file under current directory. If not found, sends "no" bcak to the client, and closes the TCP connection; Other wise, sends "yes" back to the client and transmit the content of the file;
d. Closes the TCP connection.
"""
from socket import *
serverPort = 12306
filePort = 12307
# Creates TCP welcoming and file transfer socket
serverSocket = socket(AF_INET,SOCK_STREAM)
fileSocket = socket(AF_INET,SOCK_STREAM)
try:
serverSocket.bind(("",serverPort))
except:
print ("***** FTPServerUsingTCP: error: Port 12306 is not available, quitting...")
exit(0)
try:
fileSocket.bind(("",filePort))
except:
print ("***** FTPServerUsingTCP: error: Port 12307 is not available, quitting...")
exit(0)
# Server begins listening for incoming TCP requests
serverSocket.listen(1)
print ("The FTP Server running over TCP is listening on port %d ..." % serverPort)
fileSocket.listen(1)
print ("The FTP Server running over TCP is listening on port %d ... \n" % filePort)
while 1:
# Waits for incoming requests; new socket created on return
connectionSocket, addr = serverSocket.accept()
print ("Connection established for client (IP, port) = %s" % str(addr))
# Reads the filename from socket sent by the client.
file_name = connectionSocket.recv(255)
file_name = file_name.decode("utf-8").strip()
# Opens the desired file.
# If success to open, send "yes" to the client, and closes the TCP control connection;
# otherwise, send "no" to the client, closes the TCP control connection, and continue to the next loop
try:
file_handler = open(file_name, 'rb')
except:
connectionSocket.send(b"no")
print ("***** Server log: file %s is not found, sent no to the client.\n" % file_name)
connectionSocket.close()
print("Connection to (IP, addr) = %s closed." % str(addr))
continue
connectionSocket.send(b"yes")
connectionSocket.close()
print("Connection to (IP, addr) = %s closed." % str(addr))
# accepts the new file transfer connection
transferSocket, addr = fileSocket.accept()
print ("File Transfer connection established for client (IP, port) = %s" % str(addr))
# Reads the content of the file
file_content = file_handler.read()
# Tries to send the file to the client using the TCP file transfer connection.
# On success, prints the success informtion on the screen;
# otherwise, prints the FAILURE information on the screen.
try:
transferSocket.send(file_content)
file_handler.close()
print ("file \"%s\" sent successfully!" % file_name)
except:
print ("***** Server log: file \"%s\" sent FAILED!!!" % file_name)
# Closes the TCP file transfer connection.
transferSocket.close()
print("Connection to (IP, addr) = %s closed.\n" % str(addr))
|
#!/Users/Ricko_Swuave/Documents/Python/refresh/refreshenv/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
from django.http import HttpResponse
from django.views.decorators.http import require_GET, require_POST
from django.shortcuts import get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from .models import Sensor
@csrf_exempt
@require_POST
def create_sensor(request):
body_unicode = request.body.decode('utf-8')
return HttpResponse(f"Creating sensor with params: {body_unicode}")
@csrf_exempt
@require_GET
def get_sensor(request):
sensors_list = Sensor.objects.order_by('-name')[:5]
return HttpResponse(sensors_list)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @author: youngmpjlt
#日期和时间 time calender
#获取时间戳
import time; #引入time模块
ticks = time.time()
print '当前时间戳为:', ticks
localtime = time.localtime(time.time())
print '本地时间为:',localtime #获取当前时间
localtime = time.asctime(time.localtime(time.time()))
print '本地时间为 :',localtime #获取格式化的时间 #Thu Apr 7 10:05:21 2016
#格式化日期
print time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) #2016-04-07 10:25:09
print time.strftime('%a %b %d %H:%M:%S',time.localtime()) #Thur Apr 07 10:25:09 2016
a = 'Sat Mar 28 22:24:24 2016'
print time.mktime(time.strptime(a,'%a %b %d %H:%M:%S %Y')) #出来时间戳:1459175064.0
import calendar
cal = calendar.month(2017,8)
print '以下输出2017年8月份的日历'
print cal;
import datetime
i = datetime.datetime.now()
print ('当前的日期和时间是 %s' % i)
print ('ISO格式的日期和时间是 %s' % i.isoformat())
print ('当前的年份是 %s' %i.year)
print ('当前的月份是 %s' %i.month)
print ('当前的日期是 %s' %i.day)
print ('dd/mm/yyyy 格式是 %s/%s/%s' %(i.day, i.month, i.year))
print ('当前小时是 %s' %i.hour)
print ('当前分钟是 %s' %i.minute)
print ('当前秒是 %s' %i.second)
|
# -*- coding: utf-8 -*-
import webbrowser
import config
from databasemanager import ImdbLocalDatabase
from htmlgenerator import HtmlGenerator
from moviecollection import OrderedMovieCollection
import sys
def main():
if sys.version_info < (3, 5):
class PythonVersionException(Exception):
pass
raise PythonVersionException("Fatal : must use python 3.5 or greater")
ild = ImdbLocalDatabase(config.IMDB_DATABASE_URLS, config.JSON_LOCAL_DATABASE_NAME)
ild.maybeUpdateDatabase()
omc = OrderedMovieCollection()
omc.build(config.JSON_LOCAL_DATABASE_NAME, config.IMDB_RATING_RSS)
hg = HtmlGenerator(config.HTML_LOCAL_NAME)
hg.generate(omc)
webbrowser.open_new_tab('movie-suggestion.html')
if __name__ == '__main__':
main()
|
import os
# https://stackoverflow.com/questions/15297834/infinite-board-conways-game-of-life-python
def main():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
inp = open("day22_input.txt").read().splitlines()
print(solve(inp))
def solve(inp, max_cycles=10000):
grid = {}
dim = len(inp[0]) // 2
for line, y in zip(inp, range(-dim, dim + 1)):
for c, x in zip(line, range(-dim, dim + 1)):
grid[(x, y)] = True if c == '#' else False
pos = [0, 0]
facing = 0
cycle = 0
infected = 0
while cycle < max_cycles:
if grid[tuple(pos)] is True:
facing = (facing + 1) % 4
else:
facing = (facing - 1) % 4
if grid[tuple(pos)] is not True:
grid[tuple(pos)] = True
infected += 1
else:
grid[tuple(pos)] = False
# move
if facing == 0:
pos[1] -= 1
elif facing == 1:
pos[0] += 1
elif facing == 2:
pos[1] += 1
elif facing == 3:
pos[0] -= 1
# add node if missing
if tuple(pos) not in grid.keys():
grid[tuple(pos)] = False
cycle += 1
return infected
if __name__ == '__main__':
main()
|
# Greatest product of three numbers
A = list(map(int, input().split()))
if len(A) == 3:
print(*A)
else:
Max1 = A[0]
Min1 = A[0]
for i in range(1, len(A)):
if A[i] > Max1:
Max1 = A[i]
if A[i] < Min1:
Min1 = A[i]
A.remove(Max1)
A.remove(Min1)
Max2 = A[0]
Min2 = A[0]
for i in range(1, len(A)):
if A[i] > Max2:
Max2 = A[i]
if A[i] < Min2:
Min2 = A[i]
A.remove(Max2)
Max3 = A[0]
for i in range(1, len(A)):
if A[i] > Max3:
Max3 = A[i]
if Min1 * Min2 * Max1 > Max1 * Max2 * Max3:
print(Min1, Min2, Max1)
else:
print(Max1, Max2, Max3)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.