text
stringlengths 8
6.05M
|
|---|
# coding: utf-8
# Copyright 2013 The Font Bakery Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
import os
from bakery_lint.base import BakeryTestCase as TestCase
from bakery_cli.pifont import PiFont
from bakery_cli.utils import UpstreamDirectory
class TestDiacritic(TestCase):
""" These tests are using text file with contents of diacritics glyphs """
name = __name__
targets = ['upstream-repo']
tool = 'lint'
def setUp(self):
path = os.path.realpath(os.path.dirname(__file__))
content = open(os.path.join(path, 'diacritics.txt')).read()
self.diacriticglyphs = [x.strip() for x in content.split() if x.strip()]
self.directory = UpstreamDirectory(self.operator.path)
def is_diacritic(self, glyphname):
for diacriticglyph in self.diacriticglyphs:
if glyphname.find(diacriticglyph) >= 1:
return True
def filter_diacritics_glyphs(self):
diacritic_glyphs = []
for filepath in self.directory.UFO:
pifont = PiFont(os.path.join(self.operator.path, filepath))
for glyphcode, glyphname in pifont.get_glyphs():
if not self.is_diacritic(glyphname):
continue
diacritic_glyphs.append(pifont.get_glyph(glyphname))
return diacritic_glyphs
def test_diacritic_made_as_own_glyphs(self):
""" Check that diacritic glyph are made completely with flat method """
diacritic_glyphs = self.filter_diacritics_glyphs()
flatglyphs = 0
for glyph in diacritic_glyphs:
if glyph.contours and not glyph.components and not glyph.anchors:
flatglyphs += 1
if flatglyphs and len(diacritic_glyphs) != flatglyphs:
percentage = flatglyphs * 100. / len(diacritic_glyphs)
self.fail('%.2f%% are made by Flat' % percentage)
def test_diacritic_made_as_component(self):
""" Check that diacritic glyph are made completely with composite """
diacritic_glyphs = self.filter_diacritics_glyphs()
compositeglyphs = 0
for glyph in diacritic_glyphs:
if glyph.components:
compositeglyphs += 1
if compositeglyphs and len(diacritic_glyphs) != compositeglyphs:
percentage = compositeglyphs * 100. / len(diacritic_glyphs)
self.fail('%.2f%% are made by Composite' % percentage)
def test_diacritic_made_as_mark_to_mark(self):
""" Check that diacritic glyph are made completely with mark method """
diacritic_glyphs = self.filter_diacritics_glyphs()
markglyphs = 0
for glyph in diacritic_glyphs:
if glyph.anchors:
markglyphs += 1
if markglyphs and len(diacritic_glyphs) != markglyphs:
percentage = markglyphs * 100. / len(diacritic_glyphs)
self.fail('%.2f%% are made by Mark' % percentage)
|
#리스트에서 특정 숫자의 위치 찾기
#입력 : 리스트 a, 찾는 값 x
#출력 : 찾으면 그 값의 위치, 찾지 못하면 -1
def search_list(a,x):
n = len(a)
for i in range(0,n):
if x == a[i]:
return i,a[i]
return -1
v=[18,97,5,46,84,21,5948,491,545]
print(search_list(v,21))
print(search_list(v,10))
print(search_list(v,18))
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from image_cropping import ImageCroppingMixin
from base.admin import BaseArticleAdmin, BaseArticleSectionInline
from press import models
from snippets.admin.admin import ModelTranlsationFieldsetsMixin
class NewsSectionInline(BaseArticleSectionInline):
"""Секции новости"""
fields = models.NewsSection().collect_fields()
model = models.NewsSection
@admin.register(models.News)
class NewsAdmin(ImageCroppingMixin, ModelTranlsationFieldsetsMixin, BaseArticleAdmin):
"""Новости и события"""
inlines = (NewsSectionInline,)
class Media:
js = ('admin/js/translit.js',)
|
'''
@Description: In User Settings Edit
@Author: suzhan
@Date: 2019-07-14 15:09:08
@LastEditTime: 2019-07-22 16:50:12
@LastEditors: Please set LastEditors
'''
from model_fasttext.basic_model import Model
import tensorflow as tf
from multiply import ComplexMultiply
import numpy as np
class Fasttext(Model):
def _get_embedding(self):
with tf.name_scope("embedding"):
self.embedding_W = tf.Variable(np.array(self.embeddings),name = "W" ,dtype="float32",trainable = self.trainable)
self.embedding_W_pos =tf.Variable(self.Position_Embedding(self.embedding_dim),name = 'W',trainable = self.trainable)
self.embedded_chars_q,self.embedding_chars_q_phase = self.concat_embedding(self.sentence,self.sentence_position)
self.embedded_chars_q = tf.reduce_sum([self.embedded_chars_q,self.embedding_chars_q_phase],0)
self.represent=tf.reduce_mean(self.embedded_chars_q,1)
print(self.represent)
def concat_embedding(self,words_indice,position_indice):
embedded_chars_q = tf.nn.embedding_lookup(self.embedding_W,words_indice) #[batch,sen_len,embed_size]
embedding_chars_q_phase=tf.nn.embedding_lookup(self.embedding_W_pos,words_indice)#[]batch,sen_len,embed_position_size
pos=tf.expand_dims(position_indice,2)
pos=tf.cast(pos,tf.float32)
embedding_chars_q_phase=tf.multiply(pos,embedding_chars_q_phase)
[embedded_chars_q, embedding_chars_q_phase] = ComplexMultiply()([embedding_chars_q_phase,embedded_chars_q])
return embedded_chars_q,embedding_chars_q_phase
def Position_Embedding(self,position_size):
seq_len = self.vocab_size
position_j = 1. / tf.pow(10000., 2 * tf.range(position_size, dtype=tf.float32) / position_size)
position_j = tf.expand_dims(position_j, 0)
position_i=tf.range(tf.cast(seq_len,tf.float32), dtype=tf.float32)
position_i=tf.expand_dims(position_i,1)
position_ij = tf.matmul(position_i, position_j)
position_embedding = position_ij
return position_embedding
|
import logging
def get_logger(current_frame, name):
try:
logger_name = current_frame.f_back.f_globals['__name__']
logger_obj = logging.getLogger(logger_name)
except:
logger_obj = logging.getLogger(name)
return logger_obj
|
# -*- coding: utf-8 -*-
__author__ = 'yuvv'
import sys
import pygame
from pygame.locals import *
SCREEN_SIZE = (480, 390)
# STAGE = 1
MAP_LIST = []
BALL_LIST = []
MAP_SIZE = (13, 16)
PIC_SIZE = (30, 30)
MAN_POS = [SCREEN_SIZE[0] // 2, SCREEN_SIZE[1] // 2]
def update_rect(position, direction):
x, y = position
x1, y1 = x + direction[0], y + direction[1]
x2, y2 = x1 + direction[0], y1 + direction[1]
if MAP_LIST[x1][y1] == 4: # first block is box
if MAP_LIST[x2][y2] == 2: # second block is floor
MAP_LIST[x][y] = 2 if MAP_LIST[x][y] == 6 else 3
MAP_LIST[x1][y1] = 6
MAP_LIST[x2][y2] = 4
elif MAP_LIST[x2][y2] == 3: # second block is ball
MAP_LIST[x][y] = 2 if MAP_LIST[x][y] == 6 else 3
MAP_LIST[x1][y1] = 6
MAP_LIST[x2][y2] = 5
else:
return []
return [(x, y), (x1, y1), (x2, y2)]
elif MAP_LIST[x1][y1] == 5: # first block is box_ball
if MAP_LIST[x2][y2] == 2: # second block is floor
MAP_LIST[x][y] = 2 if MAP_LIST[x][y] == 6 else 3
MAP_LIST[x1][y1] = 7
MAP_LIST[x2][y2] = 4
elif MAP_LIST[x2][y2] == 3: # second block is ball
MAP_LIST[x][y] = 2 if MAP_LIST[x][y] == 6 else 3
MAP_LIST[x1][y1] = 7
MAP_LIST[x2][y2] = 5
else:
return []
return [(x, y), (x1, y1), (x2, y2)]
elif MAP_LIST[x1][y1] == 3:
MAP_LIST[x][y] = 2 if MAP_LIST[x][y] == 6 else 3
MAP_LIST[x1][y1] = 7
return [(x, y), (x1, y1)]
elif MAP_LIST[x1][y1] == 2: # first block is floor
MAP_LIST[x][y] = 2 if MAP_LIST[x][y] == 6 else 3
MAP_LIST[x1][y1] = 6
return [(x, y), (x1, y1)]
else: # the man can't be moved
return []
def check_state():
for x, y in BALL_LIST:
if MAP_LIST[x][y] != 5:
return False
return True
def init_stage(stage=1):
try: # try open present stage map
with open('map/map_%d' % stage, 'r') as f:
for line in f:
this_line = []
for word in line:
if word == '\n':
continue
else:
this_line.append(int(word))
MAP_LIST.append(this_line)
except IOError:
sys.exit()
def main():
STAGE = 1
# initialize the window
pygame.init()
screen = pygame.display.set_mode(SCREEN_SIZE, 0, 32)
pygame.display.set_caption('BOXING')
back = pygame.image.load('pic/0.bmp').convert()
wall = pygame.image.load('pic/1.bmp').convert()
floor = pygame.image.load('pic/2.bmp').convert()
ball = pygame.image.load('pic/3.bmp').convert()
box = pygame.image.load('pic/4.bmp').convert()
box_ball = pygame.image.load('pic/5.bmp').convert()
man = pygame.image.load('pic/6.bmp').convert()
man_ball = pygame.image.load('pic/7.bmp').convert()
PIC_RES_LIST = [back, wall, floor, ball, box, box_ball, man, man_ball] # the resource list
init_stage()
# initialize the map
for x in range(MAP_SIZE[0]):
for y in range(MAP_SIZE[1]):
if MAP_LIST[x][y] == 6:
MAN_POS = [x, y] # recode the man's position
if MAP_LIST[x][y] in [3, 5, 7]:
BALL_LIST.append((x, y)) # recode the ball's position
screen.blit(PIC_RES_LIST[MAP_LIST[x][y]], (PIC_SIZE[1] * y, PIC_SIZE[0] * x))
# the main event circulation
while True:
if check_state():
STAGE += 1
BALL_LIST.clear()
MAP_LIST.clear()
MAN_POS = []
init_stage(STAGE)
# initialize the map
for x in range(MAP_SIZE[0]):
for y in range(MAP_SIZE[1]):
if MAP_LIST[x][y] == 6:
MAN_POS = [x, y] # recode the man's position
if MAP_LIST[x][y] in [3, 5, 7]:
BALL_LIST.append((x, y)) # recode the ball's position
screen.blit(PIC_RES_LIST[MAP_LIST[x][y]], (PIC_SIZE[1] * y, PIC_SIZE[0] * x))
direction = (0, 0)
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
if event.type == KEYDOWN:
if event.key == K_UP:
direction = (-1, 0)
elif event.key == K_DOWN:
direction = (1, 0)
if event.key == K_LEFT:
direction = (0, -1)
elif event.key == K_RIGHT:
direction = (0, 1)
rect = update_rect(MAN_POS, direction)
for x, y in rect:
screen.blit(PIC_RES_LIST[MAP_LIST[x][y]], (PIC_SIZE[1] * y, PIC_SIZE[0] * x))
if MAP_LIST[x][y] == 6 or MAP_LIST[x][y] == 7:
MAN_POS = (x, y)
pygame.display.update()
if __name__ == '__main__':
main()
|
import math
from numpy import random
import time
from collections.abc import Iterable
import csv
def fitness(sequence, evaluator):
result = evaluator.run(sequence)
res = result[0][0]
return res
def softmax(lst):
result = []
lst = list(map(math.exp, lst))
sigma = sum(lst)
for value in lst:
result.append(value/sigma)
return result
def fitnesses(population, best_value, evaluator, fitness_step):
result = []
for sequence in population:
temp_value = fitness(sequence, evaluator)
result.append(temp_value)
if temp_value > best_value:
best_value = temp_value
fitness_step += 1
return result, fitness_step
def mutation(parameter, mutation_rate):
if isinstance(parameter, Iterable):
result = []
for i in parameter:
result.append(mutation(i, mutation_rate))
else:
if random.random() < mutation_rate:
result = random.randint(0, 20)
else:
result = parameter
return result
def step(population, fitnesses_result, population_size, mutation_rate):
numbers = len(population[0])
indexes = list(range(0, len(population)))
roulette = softmax(fitnesses_result)
new_population = []
for i in range(population_size):
parents_index1, parents_index2 = random.choice(indexes, p=roulette), random.choice(indexes, p=roulette)
parents_1, parents_2 = population[parents_index1], population[parents_index2]
crossover = random.randint(0, numbers + 1)
offspring = parents_1[:crossover] + parents_2[crossover:]
offspring = mutation(offspring, mutation_rate)
new_population.append(offspring)
return new_population
def ga(population, mutation_rate, evaluator, fitness_step):
start = time.time()
f = open('result.csv', 'w')
wr = csv.writer(f)
generation_step = 0
best_value = 0
population_size = len(population)
total_population_size = 0
fitnesses_result, fitness_step = fitnesses(population, best_value, evaluator, fitness_step)
best_index = fitnesses_result.index(max(fitnesses_result))
best_value = fitnesses_result[best_index]
best_input = population[best_index]
if best_value >= 1.0:
f.close()
return best_input, best_value, fitness_step, total_population_size
while time.time() - start <= 400:
# while population_size <= 80000:
population = step(population, fitnesses_result, population_size, mutation_rate)
total_population_size += population_size
fitnesses_result, fitness_step = fitnesses(population, best_value, evaluator, fitness_step)
best_index = fitnesses_result.index(max(fitnesses_result))
if fitnesses_result[best_index] > best_value:
best_value = fitnesses_result[best_index]
best_input = population[best_index]
if total_population_size % 200 == 0:
print('population size = {}, best_value = {}'.format(total_population_size, best_value))
# wr.writerow([total_population_size, best_value])
wr.writerow([time.time() - start, best_value])
if best_value >= 1.0:
f.close()
return best_input, best_value, fitness_step, total_population_size
generation_step += 1
f.close()
return best_input, best_value, fitness_step, total_population_size
def main(population, mutation_rate, evaluator):
start = time.time()
best_input, best_value, fitness_step, total_population_size= ga(population, mutation_rate, evaluator, 0)
running_time = time.time() - start
return best_input, best_value, fitness_step, total_population_size, running_time
|
"""
Advent of Code 2019
Day 4
"""
def check_adjacent_digits_present(password):
password = "".join(password)
results = dict()
digits = [str(i) for i in range(0, 10)]
for digit in digits:
chklist = [str(digit * x) for x in range(2, 7)]
for chk in chklist:
if chk in password:
results[digit] = chk
chains = results.values()
double = False
triple = False
for chain in chains:
if len(chain) == 2:
double = True
elif len(chain) > 2:
triple = True
if double and triple:
return True
elif double and not triple:
return True
elif triple and not double:
return False
else:
return False
def check_values_dont_decrease(password):
if password[0] <= password[1] <= password[2] <= password[3] <= password[4] <= password[5]:
return True
else:
return False
def main():
to_test = [list(str(password)) for password in range(108457, 562041)]
good_passwords = list()
for password in to_test:
if len(password) == 6 and check_adjacent_digits_present(password) and check_values_dont_decrease(password):
good_passwords.append(password)
else:
continue
print(len(good_passwords))
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import os
import sys
import time
import xml.etree.ElementTree as et
import urllib2
import httplib
import psutil
HOST = os.environ.get('HOST', "127.0.0.1")
PORT = os.environ.get('PORT', "8053")
BINDSTATS_URL = "http://%s:%s" % (HOST, PORT)
PROCESS_NAME = "named"
Path_base = "bind/statistics"
Path_views = "bind/statistics/views/view"
GraphCategoryName = "bind_dns"
GraphConfig = (
('dns_queries_in',
dict(title='DNS Queries In',
enable=True,
stattype='counter',
args='-l 0',
vlabel='Queries/sec',
location='server/queries-in/rdtype',
config=dict(type='DERIVE', min=0, draw='AREASTACK'))),
('dns_server_stats',
dict(title='DNS Server Stats',
enable=True,
stattype='counter',
args='-l 0',
vlabel='Queries/sec',
location='server/nsstat',
fields=("Requestv4", "Requestv6", "ReqEdns0", "ReqTCP", "Response",
"TruncatedResp", "RespEDNS0", "QrySuccess", "QryAuthAns",
"QryNoauthAns", "QryReferral", "QryNxrrset", "QrySERVFAIL",
"QryFORMERR", "QryNXDOMAIN", "QryRecursion", "QryDuplicate",
"QryDropped", "QryFailure"),
config=dict(type='DERIVE', min=0))),
('dns_opcode_in',
dict(title='DNS Opcodes In',
enable=True,
stattype='counter',
args='-l 0',
vlabel='Queries/sec',
location='server/requests/opcode',
config=dict(type='DERIVE', min=0, draw='AREASTACK'))),
('dns_queries_out',
dict(title='DNS Queries Out',
enable=True,
stattype='counter',
args='-l 0',
vlabel='Count/sec',
view='_default',
location='rdtype',
config=dict(type='DERIVE', min=0, draw='AREASTACK'))),
('dns_cachedb',
dict(title='DNS CacheDB RRsets',
enable=True,
stattype='counter',
args='-l 0',
vlabel='Count/sec',
view='_default',
location='cache/rrset',
config=dict(type='DERIVE', min=0))),
('dns_resolver_stats',
dict(title='DNS Resolver Stats',
enable=True,
stattype='counter',
args='-l 0',
vlabel='Count/sec',
view='_default',
location='resstat',
config=dict(type='DERIVE', min=0))),
('dns_socket_stats',
dict(title='DNS Socket Stats',
enable=True,
stattype='counter',
args='-l 0',
vlabel='Count/sec',
location='server/sockstat',
fields=("UDP4Open", "UDP6Open",
"TCP4Open", "TCP6Open",
"UDP4OpenFail", "UDP6OpenFail",
"TCP4OpenFail", "TCP6OpenFail",
"UDP4Close", "UDP6Close",
"TCP4Close", "TCP6Close",
"UDP4BindFail", "UDP6BindFail",
"TCP4BindFail", "TCP6BindFail",
"UDP4ConnFail", "UDP6ConnFail",
"TCP4ConnFail", "TCP6ConnFail",
"UDP4Conn", "UDP6Conn",
"TCP4Conn", "TCP6Conn",
"TCP4AcceptFail", "TCP6AcceptFail",
"TCP4Accept", "TCP6Accept",
"UDP4SendErr", "UDP6SendErr",
"TCP4SendErr", "TCP6SendErr",
"UDP4RecvErr", "UDP6RecvErr",
"TCP4RecvErr", "TCP6RecvErr"),
config=dict(type='DERIVE', min=0))),
('dns_zone_stats',
dict(title='DNS Zone Maintenance',
enable=True,
stattype='counter',
args='-l 0',
vlabel='Count/sec',
location='server/zonestat',
config=dict(type='DERIVE', min=0))),
('dns_memory_usage',
dict(title='DNS Memory Usage',
enable=True,
stattype='memory',
args='-l 0 --base 1024',
vlabel='Memory In-Use',
location='memory/summary',
config=dict(type='GAUGE', min=0))),
)
def bindprocess():
for process in psutil.process_iter():
if process.name() == PROCESS_NAME:
return True
return False
def getstatsversion(etree):
"""return version of BIND statistics"""
return tree.find(Path_base).attrib['version']
def getkeyvals(path, location, stattype, getvals=False):
result = []
if stattype == 'memory':
statlist = path.find(location)
else:
statlist = path.findall(location)
for stat in statlist:
if stattype == 'memory':
key = stat.tag
else:
key = stat.findtext('name')
if getvals:
if stattype == 'memory':
value = stat.text
else:
value = stat.findtext('counter')
result.append((key,value))
else:
result.append(key)
return result
def getdata(graph, etree, getvals=False):
stattype = graph[1]['stattype']
location = graph[1]['location']
view = graph[1].get('view', None)
if view:
xmlpath = Path_views
for stat in etree.findall(xmlpath):
if stat.findtext('name') == view:
return getkeyvals(stat, location, stattype, getvals)
else:
xmlpath = "%s/%s" % (Path_base, location)
return getkeyvals(etree, xmlpath, stattype, getvals)
def validkey(graph, key):
fieldlist = graph[1].get('fields', None)
if fieldlist and (key not in fieldlist):
return False
else:
return True
def get_etree_root(url):
"""Return the root of an ElementTree structure populated by
parsing BIND9 statistics obtained at the given URL"""
data = urllib2.urlopen(url)
return et.parse(data).getroot()
def data(etree):
"""Generate data for the BIND stats plugin"""
metrics = []
for g in GraphConfig:
data = getdata(g, etree, getvals=True)
if data != None:
for (key, value) in data:
if validkey(g, key):
metrics.append("%s=%s;;;; " % (key, value))
return metrics
if bindprocess():
tree = get_etree_root(BINDSTATS_URL)
perf_data = data(tree)
output = "OK | "
for metric in perf_data:
output += metric
print output
sys.exit(0)
else:
print "Plugin Failed! Unable to connect to %s" % BINDSTATS_URL
sys.exit(2)
|
import threading
import time
number = 100
arr = [11,22]
def thread1():
global number
time.sleep(1)
number += 1
arr.append(33)
print("thread1:number++ is %d-%s" % (number,arr))
def thread2():
arr.append(44)
print("thread2:number is %d-%s" % (number,arr))
if __name__ == "__main__":
t1 = threading.Thread(target=thread1)
t2 = threading.Thread(target=thread2)
t1.start()
t2.start()
time.sleep(1)
print("main thread g_num is %d-%s" % (number,arr))
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
rListNode=ListNode(0)
tmpNode1=l1
tmp_numlist1=[]
tmp_num1=0
tmpNode2=l2
tmp_numlist2=[]
tmp_num2=0
while True:
tmp_numlist1.append(tmpNode1.val)
if tmpNode1.next is not None:
tmpNode1 = tmpNode1.next
else:
break
len_tmp = len(tmp_numlist1)
for i in range(1,len_tmp+1):
tmp_num1+=tmp_numlist1.pop()*(10**(len_tmp-i))
while True:
tmp_numlist2.append(tmpNode2.val)
if tmpNode2.next is not None:
tmpNode2 = tmpNode2.next
else:
break
len_tmp = len(tmp_numlist2)
for i in range(1,len_tmp+1):
tmp_num2+=tmp_numlist2.pop()*(10**(len_tmp-i))
sum = tmp_num1 + tmp_num2
count = 1
level = 0
while True:
count=count*10
level+=1
if sum<=count:
break
rtmpNode = rListNode
if sum==count:
rtmpNode.val=0
while count!=1:
if count == 10:
rtmpNode.next = ListNode(1)
count /= 10
else:
tmpNode=ListNode(0)
rtmpNode.next=tmpNode
rtmpNode=rtmpNode.next
count /= 10
return rListNode
else:
tmpList=[]
while count!=1:
count/=10
tmpList.append(sum/(count))
sum%=count
rtmpNode.val = tmpList.pop()
while level and tmpList:
tmpNode=ListNode(tmpList.pop())
rtmpNode.next=tmpNode
rtmpNode=rtmpNode.next
level-=1
return rListNode
|
from sound_utils import *
entries = GetEntryV1_1()
print len(entries)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras import Sequential, Model, Input
from tensorflow.keras import layers
from tensorflow.keras.layers import ReLU, Dense, Conv2D, Conv2DTranspose
from tensorflow.keras.layers import DepthwiseConv2D, SeparableConv2D, Dropout
from tensorflow.keras.layers import GlobalAveragePooling2D, Activation, BatchNormalization
from tensorflow.keras.regularizers import l2
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.compat.v1.keras.initializers import glorot_uniform, he_normal
from tensorflow.keras.callbacks import LearningRateScheduler
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import to_categorical
import tensorflow_datasets as tfds
import tensorflow.keras.backend as K
import numpy as np
from sklearn.model_selection import train_test_split
import random
import math
import sys
class Preprocess:
''' Preprocess base (super) class for Composable Models '''
def __init__(self):
""" Constructor
"""
pass
###
# Preprocessing
###
def normalization(self, x_train, x_test=None, centered=False):
""" Normalize the input
x_train : training images
y_train : test images
"""
if x_train.dtype == np.uint8:
if centered:
x_train = ((x_train - 1) / 127.5).astype(np.float32)
if x_test is not None:
x_test = ((x_test - 1) / 127.5).astype(np.float32)
else:
x_train = (x_train / 255.0).astype(np.float32)
if x_test is not None:
x_test = (x_test / 255.0).astype(np.float32)
return x_train, x_test
def standardization(self, x_train, x_test=None):
""" Standardize the input
x_train : training images
x_test : test images
"""
self.mean = np.mean(x_train)
self.std = np.std(x_train)
x_train = ((x_train - self.mean) / self.std).astype(np.float32)
if x_test is not None:
x_test = ((x_test - self.mean) / self.std).astype(np.float32)
return x_train, x_test
def label_smoothing(self, y_train, n_classes, factor=0.1):
""" Convert a matrix of one-hot row-vector labels into smoothed versions.
y_train : training labels
n_classes: number of classes
factor : smoothing factor (between 0 and 1)
"""
if 0 <= factor <= 1:
# label smoothing ref: https://www.robots.ox.ac.uk/~vgg/rg/papers/reinception.pdf
y_train *= 1 - factor
y_train += factor / n_classes
else:
raise Exception('Invalid label smoothing factor: ' + str(factor))
return y_train
|
# reference: https://github.com/stan-dev/stan/releases/download/v2.14.0/stan-reference-2.14.0.pdf
# the stan functions are from reference 2.8.0. Functions from 2.14.0 are very different and they are updated in functions.py
# type high -> type hidden -> types
# type hidden -> type high
distribution_type = {
"binary" : "discrete",
"bounded discrete": "discrete",
"unbounded discrete": "discrete",
"multivariate discrete": "discrete",
"unbounded continuous": "continuous",
"positive continuous": "continuous",
"non-negative continuous": "continuous",
"positive lower-bounded": "continuous",
"continuous on [0 1]": "continuous",
"circular": "continuous",
"bounded continuous": "continuous",
"unbounded vectors": "continuous",
"simplex": "continuous",
"correlation matrix": "continuous",
"covariance matrix": "continuous",
}
# types -> type hidden
# sampling statement: type # stan functions
distributions = {
"bernoulli": "binary", # _log, _cdf, _cdf_log, _ccdf_log, _rng
"bernoulli_logit": "binary", # _log
"binomial": "bounded discrete", # _log, _cdf, _cdf_log, _ccdf_log, _rng
"binomial_logit": "bounded discrete", # _log
"beta_binomial": "bounded discrete", # _log, _cdf, _cdf_log, _ccdf_log, _rng
"hypergeometric": "bounded discrete", # _log, _rng
"categorical": "bounded discrete", # _log, _rng
"categorical_logit": "bounded discrete", # _log, _rng
"ordered_logistic": "bounded discrete", # _log, _rng,
"neg_binomial": "unbounded discrete", # _log, _cdf, _cdf_log, _ccdf_log, _rng
"neg_binomial_2": "unbounded discrete", # _log, _cdf, _cdf_log, _ccdf_log, _rng
"neg_binomial_2_log": "unbounded discrete", # _log, _log_rng,
"poisson": "unbounded discrete", # _log, _cdf, _cdf_log, _ccdf_log, _rng
"poisson_log": "unbounded discrete", # _log, _rng
"multinomial": "multivariate discrete", # _log, _rng
"normal": "unbounded continuous", # _log, _cdf, _cdf_log, _ccdf_log, _rng
"exp_mod_normal": "unbounded continuous", # _log, _cdf, _ccdf_log, _rng
"skew_normal": "unbounded continuous", # _log, _cdf, _cdf_log, _rng
"student_t": "unbounded continuous", # _log, _cdf, _cdf_log, _ccdf_log, _rng
"cauchy": "unbounded continuous", # _log, _cdf, _cdf_log, _ccdf_log, _rng
"double_exponential": "unbounded continuous", # _log, _cdf, _cdf_log, _ccdf_log, _rng
"logistic": "unbounded continuous", # _log, _cdf, _cdf_log, _ccdf_log, _rng
"gumbel": "unbounded continuous", # _log, _cdf, _cdf_log, _ccdf_log, _rng
"lognormal": "positive continuous", # _log, _cdf, _cdf_log, _ccdf_log, _rng
"chi_square": "positive continuous", # _log, _cdf, _cdf_log, _ccdf_log, _rng
"inv_chi_square": "positive continuous", # _log, _cdf, _cdf_log, _ccdf_log, _rng
"scaled_inv_chi_square": "positive continuous", # _log, _cdf, _cdf_log, _ccdf_log, _rng
"exponential": "positive continuous", # _log, _cdf, _cdf_log, _ccdf_log, _rng
"gamma": "positive continuous", # _log, _cdf, _cdf_log, _ccdf_log, _rng
"inv_gamma": "positive continuous", # _log, _cdf, _cdf_log, _ccdf_log, _rng
"weibull": "positive continuous", # _log, _cdf, _cdf_log, _ccdf_log, _rng
"frechet": "positive continuous", # _log, _cdf, _cdf_log, _ccdf_log, _rng
"rayleigh": "non-negative continuous", # _log, _cdf, _cdf_log, _ccdf_log, _rng
"wiener": "non-negative continuous", # _log
"pareto": "positive lower-bounded", # _log, _cdf, _cdf_log, _ccdf_log, _rng
"pareto_type_2": "positive lower-bounded",# _log, _cdf, _cdf_log, _ccdf_log, _rng
"beta": "continuous on [0 1]", # _log, _cdf, _cdf_log, _ccdf_log, _rng
"von_mises": "circular", # _log, _rng
"uniform": "bounded continuous", # _log, _cdf, _cdf_log, _ccdf_log, _rng
"multi_normal": "unbounded vectors", # _log, _rng
"multi_normal_prec": "unbounded vectors", # _log
"multi_normal_cholesky": "unbounded vectors", # _log, _rng
"multi_gp": "unbounded vectors", # _log
"multi_gp_cholesky": "unbounded vectors", # _log
"multi_student_t": "unbounded vectors", #_log, _rng
"gaussian_dlm_obs": "unbounded vectors", # _log
"dirichlet": "simplex", # _log, _rng
"lkj_corr": "correlation matrix", # _log, _rng
"lkj_corr_cholesky": "correlation matrix", # _log, _rng
"wishart": "covariance matrix", # _log, _rng
"inv_wishart": "covariance matrix", # _log, _rng
}
|
#!/usr/bin/env python
# encoding: utf-8
from abc import abstractmethod, ABCMeta
import datetime
import random
import time
import unittest
class PRNG:
"""
Represents default PRNG (currently, wrapper class for random module).
"""
def __init__(self):
"""
Constructs PRNG instance
"""
# Default seed value to current datetime
self._seed = int(time.mktime(datetime.datetime.now().timetuple()))
# Initialize PRNG with the default seed value
random.seed(self._seed)
@property
def seed(self):
"""
Returns current seed value
"""
return self._seed
@seed.setter
def seed(self, seed):
"""
Sets new seed value
Arguments:
seed -- New seed value
"""
self._seed = seed
# Re-initialize PRNG with new seed value
random.seed(self._seed)
def randint(self, a, b):
"""
Returns a random integer N such that a <= N <= b
Arguments:
a -- Lower bound
b -- Upper bound
"""
return random.randint(a, b)
def uniform(self, a, b):
"""
Returns a random floating point number N such that
a <= N <= b for a <= b and b <= N <= a for b < a
Arguments:
a -- Lower bound
b -- Upper bound
"""
return random.uniform(a, b)
def expovariate(self, lambd):
"""
Returns a random floating point number N drawn
from an exponential distribution with parameter lambd (1 / E[X])
Arguments:
lambd -- Lambda parameter of exponential distribution (1 / E[X])
"""
return random.expovariate(lambd)
class Event:
"""
Represents an abstract event.
"""
def __init__(self, identifier, time, **kwargs):
"""
Constructs Event instance
Arguments:
identifier -- ID/type of this event
time -- Time of occurring of this event
Keyword arguments:
kwargs -- Optional keyword arguments
"""
self._identifier = identifier
self._time = time
self._kwargs = kwargs
@property
def identifier(self):
"""
Returns id of this event
"""
return self._identifier
@property
def time(self):
"""
Returns time of occurring
"""
return self._time
@property
def kwargs(self):
"""
Returns dictionary of optional arguments
"""
return self._kwargs
class EventHandler(metaclass=ABCMeta):
"""
Abstract base class for event handlers
"""
def __init__(self, simulation_engine):
"""
Constructs EventHandler object
Arguments:
simulation_engine = SimulationEngine instance
"""
# Connect with SimulationEngine
self._simulation_engine = simulation_engine
# Register callback functions:
# start of the simulation
self._simulation_engine.register_callback(self.handle_start, SimulationEngine.START_CALLBACK)
# stop of the simulation
self._simulation_engine.register_callback(self.handle_stop, SimulationEngine.STOP_CALLBACK)
# imminent event
self._simulation_engine.register_callback(self.handle_event, SimulationEngine.EVENT_CALLBACK)
@abstractmethod
def handle_start(self):
"""
Abstract method for handling start of the simulation
"""
pass
@abstractmethod
def handle_stop(self):
"""
Abstract method for handling stop of the simulation
"""
pass
@abstractmethod
def handle_event(self, event):
"""
Abstract method for handling imminent events
Arguments:
event -- Event to be handled
"""
pass
class SimulationEngine:
"""
Represents the main engine of a DES simulation platform
"""
# ID of the finishing event
END_EVENT = "End"
# Callback types
START_CALLBACK = "start"
STOP_CALLBACK = "stop"
EVENT_CALLBACK = "event"
def __init__(self):
"""
Constructs SimulationEngine instance
"""
# Create empty event list
self._event_list = []
# Initialize current simulation time
self.simulation_time = 0
# Initialize finish time
self._finish_time = 0
# Flag representing finishing event
self._finish_event_exists = False
# Initialize callback dictionary
self._callback_dict = {self.START_CALLBACK: [], self.STOP_CALLBACK: [], self.EVENT_CALLBACK: []}
# Initialize default PRNG
self.prng = PRNG()
# Initialize event handler
self.event_handler = None
def start(self):
"""
Starts simulation
"""
# Check whether an EventHandler is attached; if not, throw an error
if not self.event_handler:
raise Exception("No EventHandler attached!")
# Notify of the start of simulation; event handlers should
# generate first event
self._notify_start()
# Traverse the event list
while len(self._event_list) > 0:
# Remove the imminent event from the event list
imminent = self._event_list.pop()
# Advance clock to the imminent event
self.simulation_time = imminent.time
# Notify of the current event
self._notify_event(imminent)
# Notify of the end of the simulation
self._notify_stop()
def stop(self, finish_time):
"""
Schedules finishing event
Arguments:
finish_time -- Time of occurrence of the finishing event
"""
# Check if finishing event already scheduled
if not self._finish_event_exists:
# Set finish time
self._finish_time = finish_time
# Schedule finishing event
self._event_list += [Event(self.END_EVENT, self._finish_time)]
self._finish_event_exists = True
def schedule(self, event):
"""
Schedules event (adds it to the event list)
Arguments:
event -- Event to be scheduled
"""
# Discard new event if happens after the finishing event
if event.time < self._finish_time:
# Add the event to the event list
self._event_list += [event]
# Sort the list in a LIFO style
self._event_list.sort(key=lambda x: x.time)
self._event_list.reverse()
def register_callback(self, func, ttype):
"""
Register function for callback when simulation ends
Arguments:
func -- Function to call back
ttype -- Type of the callback
"""
self._callback_dict[ttype] += [func]
def _notify_start(self):
"""
Notifies of start of the simulation
"""
for func in self._callback_dict[self.START_CALLBACK]: func()
def _notify_stop(self):
"""
Notifies of stop of the simulation
"""
for func in self._callback_dict[self.STOP_CALLBACK]: func()
def _notify_event(self, event):
"""
Notifies of an imminent event
Arguments:
event -- The imminent event
"""
for func in self._callback_dict[self.EVENT_CALLBACK]: func(event)
|
import numpy as np
import glob
from . import radxfer as rxf
from . import convolve2aeri as c2a
import sys
from . import panel_file
sys.path.append('../')
from . import apodizer
from . import tape7_reader as t7r
import subprocess
from scipy import convolve
"""
Object for Reading in LBLRTM data and doing performing calculations from the data.
Last time this was run: (October 2013?)
Written by: Greg Blumberg (OU/CIMMS)
Email: wblumberg@ou.edu, greg.blumberg@noaa.gov
TODO: Allow for upwelling and downwelling monochromatic calculations.
Support calculating total flux calculations
Perform convolution with boxcar filter function.
Support the three methods of reducing the RTM calculations:
1.) Convolve the raw optical depths before RTE
2.) Transform ODs to Transmission then convolve then back to ODs then RTE
3.) Compute layer to instrument transmission then convolve then back to ODs.
"""
def smoothAERI(a, dp, base_wnum, ods):
"""
smoothAERI()
smoothes the AERI spectra
"""
idxs = np.where((base_wnum > a - dp) & (base_wnum < a + dp))[0]
ans = np.mean(np.exp(ods[:,idxs]*-1.), axis=1)
return -1. * np.log(ans)
def read_and_interpODs(OD_dir):
"""
read_and_interpODs()
Takes in the output directory from lblrun that has the ODdeflt_*
files and reads in all of the optical depth files. Because
the LBLRTM TAPE5 writer writes the ODdeflt files out to different
wavenumber grids, this function interpolates all of those to the
file with the maximum resolution. This function returns
the 2D array of optical depths and the maximum resolution wavenumber
grid.
"""
switch = 1
files = np.sort(glob.glob(OD_dir + '/ODdeflt_*'))[::-1]
print("Reading in optical depth files...")
#This code loads in the highest OD layer into memory, the wnum grid
#for this layer becomes the standard wavenumber grid for all other layers
if switch == 0:
fd = panel_file.panel_file(files[0], do_load_data=True)
base_wnum = fd.v
print("\tReading in:",files[0])
ods = np.empty((len(files), len(base_wnum)))
ods[len(files)-1] = fd.data1
begin_idx = 1
else:
fd = panel_file.panel_file(files[-1], do_load_data=True)
base_wnum = fd.v
ods = np.empty((len(files), len(base_wnum)))
begin_idx = 0
#This code loads in the rest of the OD layers and interpolated them to the
#wavenumber grid that is of the highest layer
for f in np.arange(begin_idx,len(files),1):
print("\tReading in:",files[f])
fd = panel_file.panel_file(files[f], do_load_data=True)
ods[len(files)-1-f] = np.interp(base_wnum, fd.v, fd.data1)
return ods, base_wnum
def computeJacobian(spectra1, spectra2, deltaX):
return (spectra1 - spectra2)/deltaX
class LBLPkg:
def __init__(self, lblOUTdir):
self.lbl_datadir = lblOUTdir
ods, wnum = read_and_interpODs(lblOUTdir)
print("Reading in TAPE7...")
z, t = t7r.readTape(lblOUTdir + '/TAPE7')
print("LBLOUT files loaded.")
self.ods = ods #Read in ODs here
self.temp = t #Temperature profile
self.z = z #height profile
self.q = 3 #wv mixing ratio profile interpolated from the LBLRTM TAPE7 (not implemented yet, don't know how to do this).
self.base_wnum = wnum #base wnum for the ODs in cm-1
self.base_wlen = (1./wnum)* 10000 # Returns in micrometers
#self.aeri_wnums = np.load('/home/greg.blumberg/python_pkgs/aeri_wnumgrid.npy')
def getLBLdir(self):
# Returns the LBLRUN output data directory
return self.lbl_datadir
def trueJacobian(self, pert):
# Calculates the Jacobian
wnum, fx = self.radianceTrue()
print(self.getLBLdir() + "../OUT_TPERT/TAPE7")
try:
reg_z, pert_temp = t7r.readTape(self.getLBLdir()+"../OUT_TPERT/TAPE7")
# The below is commented because my TAPE7 reader doesn't read in the interpolated WVMR grid yet.
#reg_z, pert_mxr = t7r.readTape(self.getLBLdir()+"../OUT_QPERT/TAPE7")
except:
raise Exception("Perturbation TAPE7s not in the correct directories ../OUT_T/ & ../OUT_Q")
aeri_jacobian = np.empty((len(reg_z), len(wnum)))
true_fxprime = np.empty((len(reg_z), len(wnum)))
levels = np.arange(0, len(reg_z))
for t in range(len(levels)):
i = levels[t]
print("Modifying profile at height: " + str(reg_z[i]) + ' km')
temporary_ods = 1.*self.ods
temp_temp = 1.*self.temp
temp_temp[i] = pert_temp[i]
if i == 0:
fd = panel_file.panel_file(self.lbl_datadir + '/ODdeflt_001', do_load_data=True)
temporary_ods[0] = np.interp(self.base_wnum, fd.v, fd.data1)
print("Bottommost layer, changing only the bottommost OD layer and temp[0].")
elif i+1 == len(self.temp):
print(self.lbl_datadir + '/ODdeflt_' + str((3-len(str(i+1))) * '0' + str(i)))
fd = panel_file.panel_file(self.lbl_datadir + '/ODdeflt_' + str((3-len(str(i+1))) * '0' + str(i)), do_load_data=True)
temporary_ods[len(self.temp)-1-1] = np.interp(self.base_wnum, fd.v, fd.data1)
print("Top of the atmosphere: changing only the topmost OD layer and temp.")
else:
#Replace the optical depths for the layer below level i (makes it layer i).
fdbo = panel_file.panel_file(self.lbl_datadir + '/ODdeflt_' + str((3-len(str(i))) * '0' + str(i)), do_load_data=True)
temporary_ods[i-1] = np.interp(self.base_wnum, fdbo.v, fdbo.data1)
#Replace the optical depths for the layer above level i (makes it layer i+1)
fdup = panel_file.panel_file(self.lbl_datadir + '/ODdeflt_' + str((3-len(str(i+1))) * '0' + str(i+1)), do_load_data=True)
temporary_ods[i] = np.interp(self.base_wnum, fdup.v, fdup.data1)
#If the AERI observation suggests clouds are being viewed cloud optical depths may be added in here.
#Calculate the true way of doing the AERI Radiances
print("Computing perturbed AERI radiance w/o apodization.")
true_aeri_wnum, true_fxp = self.radianceTrue(self.base_wnum, temp_temp, temporary_ods)
print("Calculating Jacobian for height: ", str(reg_z[i]))
aeri_jacobian[i] = computeJacobian(fx, true_fxp, pert)
true_fxprime[i] = true_fxp
return aeri_jacobian, true_fxprime
def aeri_radiance(self, wnums=None, temp=None, ods=None):
"""
aeri_radiance()
Calculates the radiance values that would be observed by the AERI using the LBLRTM output data.
Unless the arguments above are specified, this function uses the optical depth and temperature data
loaded in through this LBLPkg object.
"""
if wnums is None:
wnums = self.base_wnum
if temp is None:
temp = self.temp
if ods is None:
ods = self.ods
rad = rxf.rt(wnums, temp, ods)
wnum, rad = c2a.convolve_to_aeri(wnums, rad)
rad = apodizer.apodizer(rad)
idxs = np.where((wnum >= self.aeri_wnums[0]) & (wnum <= (self.aeri_wnums[-1] + .1)))[0]
rad = rad[idxs]
return wnum[idxs], rad
def monoRadiance(self, zenith_angle=0, sfc_t=None, sfc_e=None, upwelling=False):
"""
monoRadiance()
Calculates the monochromatic radiance depending upon certain parameters.
By default, it calculates the downwelling monochromatic radiance values.
Parameters
----------
zenith_angle : zenith angle of the calculation (degrees; default=0)
sfc_t : surface temperature (Kelvin; default=None)
sfc_e : surface emissivity (unitless)
upwelling : switch to compute upwelling vs downwelling radiance
False - downwelling
True - upwelling
"""
wnums = self.base_wnum
temp = self.temp
ods = self.ods
rad = rxf.rt(wnums, temp, ods, zenith_angle=zenith_angle, sfc_t=sfc_t, sfc_e=sfc_e, upwelling=upwelling)
return wnums, rad
def filterSpectra(self, y, wnum, delta=5):
"""
filterSpectra
Convolves a spectrum with the boxcar function specified by the delta
argument. Returns the spectrum back to the user once done.
Arguments
---------
y - the spectrum requested (on a specific wavenumber grid)
wnum - the evenly spaced wavenumber grid (to determine the window of the boxcar)
delta - the size of the boxcar window (cm-1)
"""
dv = np.diff(wnum)[0]
N = delta/dv
r = convolve(y, np.ones(N)/N, mode='valid')
return r
def filterRadiance(self, delta=5, method=1, zenith_angle=0, sfc_t=None, sfc_e=None, upwelling=False, debug=False):
"""
filterRadiance()
This function performes the radiative transfer calculations using the optical depth
files and TAPE7 data from the LBLRTM run. Instead of convolving the final
result using a boxcar filter, this function uses one of three methods to speed up the
radiative transfer calculations.
Methods 1-3:
1.) Convolve the boxcar filter with the optical depth spectra and then perform RT
2.) Transform optical depth spectra to transmission, convolve and then perform RT
3.) Convert optical depth spectra to layer-to-instrument transmission and then convolve.
Convert back into optical depth space and then perform RT (most accurate).
Arguments
---------
delta : size of the boxcar filter (cm-1; default = 5)
method : choose which method to perform RT (default = 1)
zenith_angle: the angle of the calculation (degrees; default = 0)
sfc_t : the surface temperature of the Earth (Kelvin; default=None)
sfc_e : the surface emissivity (unitless; default=None)
upwelling : switch to compute upwelling vs downwelling radiance
False - downwelling
True - upwelling (needs sfc_t and sfc_e)
"""
wnums = self.base_wnum
filtered_wnum = self.filterSpectra(wnums, wnums, delta)
filtered_ods = np.empty((len(self.ods), len(filtered_wnum)))
if method == 1:
# Do convolution in OD space.
for i in range(len(self.ods)):
if debug is True:
print("Convolving ODs @ LEVEL:", i)
print("Max:", np.max(self.ods[i,:]), "Min:", np.min(self.ods[i,:]))
filtered_ods[i,:] = self.filterSpectra(self.ods[i,:], wnums, delta)
elif method == 2:
# Do convolution in transmission space.
for i in range(len(self.ods)):
trans = self.od2t(self.ods[i,:])
if debug is True:
print("Convolving transmission @ level:",i)
print("Max:", np.max(trans), "Min:", np.min(trans))
trans_cov = self.filterSpectra(trans, wnums, delta)
filtered_ods[i,:] = self.t2od(trans_cov)
elif method == 3:
# Do the convolution in layer-to-instrument transmission.
# Convert ODs to transmission
trans = self.od2t(self.ods)
if upwelling is True:
trans = trans[::-1,:] # Reverse the transmissivity
new_l2i_t = np.empty((len(self.ods), len(filtered_wnum)))
new_trans = np.empty((len(self.ods), len(filtered_wnum)))
for i in range(len(new_trans)):
# Compute layer-to-instrument transmission.
l2i_t = np.prod(trans[:i,:], axis=0)
# Convolve that mother.
new_l2i_t[i,:] = self.filterSpectra(l2i_t, wnums, delta)
# Set the first level equal to the layer to instruemtn transmissivity
new_trans[0,:] = new_l2i_t[0,:]
for i in range(1, len(new_trans)):
# Divide current layer L2I Transmissivity by the layer "below" that.
# e.g. (trans_1 * trans_2 * trans_3) / (trans_1 * trans_2) = trans_3
new_trans[i,:] = np.divide(new_l2i_t[i,:], new_l2i_t[i-1,:])
# Convert back to the optical depth space.
filtered_ods = self.t2od(new_trans)
if upwelling is True:
# If we're computing upwelling, reverse the optical depths again
filtered_ods = filtered_ods[::-1,:]
else:
print("Method not supported.")
return None, None
rad = rxf.rt(filtered_wnum, self.temp, filtered_ods, zenith_angle=zenith_angle, sfc_t=sfc_t, sfc_e=sfc_e, upwelling=upwelling)
return filtered_wnum, rad
def od2t(self, od=None):
if od is None:
od = self.ods
return np.exp(-od)
def t2od(self, t):
return -np.log(t)
def computeFlux(self, upwelling=False, v1=400, v2=2000, sfc_t=300, sfc_e=1, dtheta=30):
# if layer = 0
# compute TOA flux
# if layer = 1
# compute BOA flux
#
# integrate I_v(theta) sin(theta) cos(theta) d(theta) d(phi) d(wavenumber)
#wnum = self.base_wnum
print("Upwelling?:", upwelling)
range_of_thetas = np.arange(0,90 + dtheta,dtheta)
radiances = np.empty((len(range_of_thetas), len(self.base_wnum)))
for i in range(len(range_of_thetas)):
theta = range_of_thetas[i]
#print theta
#print self.monoRadiance(theta, sfc_t, sfc_e, upwelling)
#radiances[i,:] = self.monoRadiance(theta, sfc_t, sfc_e, upwelling)[0]# * np.sin(np.radians(theta)) * np.cos(np.radians(theta))
#plot(self.base_wnum, radiances[i,:])
radiances[i,:] = self.monoRadiance(theta, sfc_t, sfc_e, upwelling)[1] * np.sin(np.radians(theta)) * np.cos(np.radians(theta))
#plot(self.base_wnum, radiances[i,:])
show()
print(radiances.shape)
# After integrating over theta
integrated = np.trapz(radiances, range_of_thetas, dx=dtheta, axis=0)
print(integrated)
# After integrating over phi
integrated = integrated * (2*np.pi)
print(integrated)
print(integrated.shape)
#print integrate.quad(lambda x: np.interp(x, self.base_wnum, integrated), v1, v2)[0] * 0.001
def addCloud_computeRadiance(self, cloud_height=2, cloud_tau=0, zenith_angle=0, sfc_t=None, sfc_e=None, upwelling=False):
wnums = self.base_wnum
temp = self.temp
ods = self.ods.copy()
z = self.z
if np.max(z) < cloud_height or np.min(z) > cloud_height:
print("Can't add a cloud at the specified height.")
print("Need to use a cloud between: ", np.min(z), 'and', np.max(z), 'km')
return 0,0
else:
idx = np.argmin(np.abs(z - cloud_height))
ods[idx,:] = ods[idx,:] + cloud_tau
print("The temperature of the cloud you're adding is:", self.temp[idx])
rad = rxf.rt(wnums, temp, ods, zenith_angle=zenith_angle, sfc_t=sfc_t, sfc_e=sfc_e, upwelling=upwelling)
return wnums, rad
|
# Structure this script entirely on your own.
# See Chapter 8: Strings Exercise 5 for the task.
# Please do provide function calls that test/demonstrate your
# function.
def rotate_word(word,shiftint):
word1=''
numa=ord('a')
numz=ord('z')
for s in word:
if shiftint>0 and ord(s)+shiftint>numz:
word1=word1+chr(ord(s)+shiftint-26)
elif shiftint<0 and ord(s)+shiftint<numa:
word1=word1+chr(ord(s)+shiftint+26)
else:
word1=word1+chr(ord(s)+shiftint)
return word1
rotate_word('Hello',10)
rotate_word('World',-8)
|
#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0
# Copyright Thomas Gleixner <tglx@linutronix.de>
from argparse import ArgumentParser
from ply import lex, yacc
import locale
import traceback
import sys
import git
import re
import os
class ParserException(Exception):
def __init__(self, tok, txt):
self.tok = tok
self.txt = txt
class SPDXException(Exception):
def __init__(self, el, txt):
self.el = el
self.txt = txt
class SPDXdata(object):
def __init__(self):
self.license_files = 0
self.exception_files = 0
self.licenses = [ ]
self.exceptions = { }
# Read the spdx data from the LICENSES directory
def read_spdxdata(repo):
# The subdirectories of LICENSES in the kernel source
# Note: exceptions needs to be parsed as last directory.
license_dirs = [ "preferred", "dual", "deprecated", "exceptions" ]
lictree = repo.head.commit.tree['LICENSES']
spdx = SPDXdata()
for d in license_dirs:
for el in lictree[d].traverse():
if not os.path.isfile(el.path):
continue
exception = None
for l in open(el.path).readlines():
if l.startswith('Valid-License-Identifier:'):
lid = l.split(':')[1].strip().upper()
if lid in spdx.licenses:
raise SPDXException(el, 'Duplicate License Identifier: %s' %lid)
else:
spdx.licenses.append(lid)
elif l.startswith('SPDX-Exception-Identifier:'):
exception = l.split(':')[1].strip().upper()
spdx.exceptions[exception] = []
elif l.startswith('SPDX-Licenses:'):
for lic in l.split(':')[1].upper().strip().replace(' ', '').replace('\t', '').split(','):
if not lic in spdx.licenses:
raise SPDXException(None, 'Exception %s missing license %s' %(exception, lic))
spdx.exceptions[exception].append(lic)
elif l.startswith("License-Text:"):
if exception:
if not len(spdx.exceptions[exception]):
raise SPDXException(el, 'Exception %s is missing SPDX-Licenses' %exception)
spdx.exception_files += 1
else:
spdx.license_files += 1
break
return spdx
class id_parser(object):
reserved = [ 'AND', 'OR', 'WITH' ]
tokens = [ 'LPAR', 'RPAR', 'ID', 'EXC' ] + reserved
precedence = ( ('nonassoc', 'AND', 'OR'), )
t_ignore = ' \t'
def __init__(self, spdx):
self.spdx = spdx
self.lasttok = None
self.lastid = None
self.lexer = lex.lex(module = self, reflags = re.UNICODE)
# Initialize the parser. No debug file and no parser rules stored on disk
# The rules are small enough to be generated on the fly
self.parser = yacc.yacc(module = self, write_tables = False, debug = False)
self.lines_checked = 0
self.checked = 0
self.spdx_valid = 0
self.spdx_errors = 0
self.curline = 0
self.deepest = 0
# Validate License and Exception IDs
def validate(self, tok):
id = tok.value.upper()
if tok.type == 'ID':
if not id in self.spdx.licenses:
raise ParserException(tok, 'Invalid License ID')
self.lastid = id
elif tok.type == 'EXC':
if id not in self.spdx.exceptions:
raise ParserException(tok, 'Invalid Exception ID')
if self.lastid not in self.spdx.exceptions[id]:
raise ParserException(tok, 'Exception not valid for license %s' %self.lastid)
self.lastid = None
elif tok.type != 'WITH':
self.lastid = None
# Lexer functions
def t_RPAR(self, tok):
r'\)'
self.lasttok = tok.type
return tok
def t_LPAR(self, tok):
r'\('
self.lasttok = tok.type
return tok
def t_ID(self, tok):
r'[A-Za-z.0-9\-+]+'
if self.lasttok == 'EXC':
print(tok)
raise ParserException(tok, 'Missing parentheses')
tok.value = tok.value.strip()
val = tok.value.upper()
if val in self.reserved:
tok.type = val
elif self.lasttok == 'WITH':
tok.type = 'EXC'
self.lasttok = tok.type
self.validate(tok)
return tok
def t_error(self, tok):
raise ParserException(tok, 'Invalid token')
def p_expr(self, p):
'''expr : ID
| ID WITH EXC
| expr AND expr
| expr OR expr
| LPAR expr RPAR'''
pass
def p_error(self, p):
if not p:
raise ParserException(None, 'Unfinished license expression')
else:
raise ParserException(p, 'Syntax error')
def parse(self, expr):
self.lasttok = None
self.lastid = None
self.parser.parse(expr, lexer = self.lexer)
def parse_lines(self, fd, maxlines, fname):
self.checked += 1
self.curline = 0
try:
for line in fd:
line = line.decode(locale.getpreferredencoding(False), errors='ignore')
self.curline += 1
if self.curline > maxlines:
break
self.lines_checked += 1
if line.find("SPDX-License-Identifier:") < 0:
continue
expr = line.split(':')[1].strip()
# Remove trailing comment closure
if line.strip().endswith('*/'):
expr = expr.rstrip('*/').strip()
# Special case for SH magic boot code files
if line.startswith('LIST \"'):
expr = expr.rstrip('\"').strip()
self.parse(expr)
self.spdx_valid += 1
#
# Should we check for more SPDX ids in the same file and
# complain if there are any?
#
break
except ParserException as pe:
if pe.tok:
col = line.find(expr) + pe.tok.lexpos
tok = pe.tok.value
sys.stdout.write('%s: %d:%d %s: %s\n' %(fname, self.curline, col, pe.txt, tok))
else:
sys.stdout.write('%s: %d:0 %s\n' %(fname, self.curline, col, pe.txt))
self.spdx_errors += 1
def scan_git_tree(tree):
for el in tree.traverse():
# Exclude stuff which would make pointless noise
# FIXME: Put this somewhere more sensible
if el.path.startswith("LICENSES"):
continue
if el.path.find("license-rules.rst") >= 0:
continue
if not os.path.isfile(el.path):
continue
with open(el.path, 'rb') as fd:
parser.parse_lines(fd, args.maxlines, el.path)
def scan_git_subtree(tree, path):
for p in path.strip('/').split('/'):
tree = tree[p]
scan_git_tree(tree)
if __name__ == '__main__':
ap = ArgumentParser(description='SPDX expression checker')
ap.add_argument('path', nargs='*', help='Check path or file. If not given full git tree scan. For stdin use "-"')
ap.add_argument('-m', '--maxlines', type=int, default=15,
help='Maximum number of lines to scan in a file. Default 15')
ap.add_argument('-v', '--verbose', action='store_true', help='Verbose statistics output')
args = ap.parse_args()
# Sanity check path arguments
if '-' in args.path and len(args.path) > 1:
sys.stderr.write('stdin input "-" must be the only path argument\n')
sys.exit(1)
try:
# Use git to get the valid license expressions
repo = git.Repo(os.getcwd())
assert not repo.bare
# Initialize SPDX data
spdx = read_spdxdata(repo)
# Initilize the parser
parser = id_parser(spdx)
except SPDXException as se:
if se.el:
sys.stderr.write('%s: %s\n' %(se.el.path, se.txt))
else:
sys.stderr.write('%s\n' %se.txt)
sys.exit(1)
except Exception as ex:
sys.stderr.write('FAIL: %s\n' %ex)
sys.stderr.write('%s\n' %traceback.format_exc())
sys.exit(1)
try:
if len(args.path) and args.path[0] == '-':
stdin = os.fdopen(sys.stdin.fileno(), 'rb')
parser.parse_lines(stdin, args.maxlines, '-')
else:
if args.path:
for p in args.path:
if os.path.isfile(p):
parser.parse_lines(open(p, 'rb'), args.maxlines, p)
elif os.path.isdir(p):
scan_git_subtree(repo.head.reference.commit.tree, p)
else:
sys.stderr.write('path %s does not exist\n' %p)
sys.exit(1)
else:
# Full git tree scan
scan_git_tree(repo.head.commit.tree)
if args.verbose:
sys.stderr.write('\n')
sys.stderr.write('License files: %12d\n' %spdx.license_files)
sys.stderr.write('Exception files: %12d\n' %spdx.exception_files)
sys.stderr.write('License IDs %12d\n' %len(spdx.licenses))
sys.stderr.write('Exception IDs %12d\n' %len(spdx.exceptions))
sys.stderr.write('\n')
sys.stderr.write('Files checked: %12d\n' %parser.checked)
sys.stderr.write('Lines checked: %12d\n' %parser.lines_checked)
sys.stderr.write('Files with SPDX: %12d\n' %parser.spdx_valid)
sys.stderr.write('Files with errors: %12d\n' %parser.spdx_errors)
sys.exit(0)
except Exception as ex:
sys.stderr.write('FAIL: %s\n' %ex)
sys.stderr.write('%s\n' %traceback.format_exc())
sys.exit(1)
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.models import DAG
from airflow.operators import ArmadaOperator
from airflow.operators import DeckhandValidateSiteDesignOperator
from airflow.operators import DryDockOperator
# Location of shiyard.conf
# Note that the shipyard.conf file needs to be placed on a volume
# that can be accessed by the containers
config_path = '/usr/local/airflow/plugins/shipyard.conf'
def validate_site_design(parent_dag_name, child_dag_name, args):
'''
Subdag to delegate design verification to the UCP components
'''
dag = DAG(
'{}.{}'.format(parent_dag_name, child_dag_name),
default_args=args)
deckhand_validate_docs = DeckhandValidateSiteDesignOperator(
task_id='deckhand_validate_site_design',
shipyard_conf=config_path,
main_dag_name=parent_dag_name,
sub_dag_name=child_dag_name,
dag=dag)
drydock_validate_docs = DryDockOperator(
task_id='drydock_validate_site_design',
shipyard_conf=config_path,
action='validate_site_design',
main_dag_name=parent_dag_name,
sub_dag_name=child_dag_name,
retries=3,
dag=dag)
armada_validate_docs = ArmadaOperator(
task_id='armada_validate_site_design',
shipyard_conf=config_path,
action='validate_site_design',
main_dag_name=parent_dag_name,
sub_dag_name=child_dag_name,
retries=3,
dag=dag)
return dag
|
#!/usr/bin/python3
"""This module creates a class Amenity that inherits from BaseModel"""
from models.base_model import BaseModel
class Amenity(BaseModel):
"""
This is a Amenity class with the public class attributes:
- name: string - empty string
"""
name = ''
|
from __future__ import print_function
from __future__ import division
import string
import datetime
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn import metrics
from sklearn.preprocessing import OneHotEncoder
from keras.models import Model
from keras.optimizers import SGD
from keras.layers import Input, Dense, Dropout, Flatten
from keras.layers.convolutional import Convolution1D, MaxPooling1D
from keras.layers.normalization import BatchNormalization
np.random.seed(0123)
##### PARAMETERS #####
maxlen = options.maxlen
nb_epoch = options.num_epoch
batch_size = options.batch_size
z1 = options.z1
z2 = options.z2
train = options.train
test = options.test
######################
def makeClean(text, numWords):
line_split = text.split()
tokens_text = line_split[0:numWords]
tokens_text = [w.lower() for w in tokens_text]
return tokens_text
##################################
# LOAD DATA
#################################
print('Loading data...')
import csv
X_train = []
Y_train = []
with open('./data/'+train, 'r') as f:
reader = csv.reader(f)
for line in reader:
X_train.append(line[1].strip() + line[2].strip())
Y_train.append(int(line[0].strip()))
X_train = np.array(X_train,dtype=object)
Y_train = np.array(Y_train)
X_test = []
Y_test = []
with open('./data/'+test, 'r') as f:
reader = csv.reader(f)
for line in reader:
X_test.append(line[1].strip() + line[2].strip())
Y_test.append(int(line[0].strip()))
X_test = np.array(X_test,dtype=object)
Y_test = np.array(Y_test)
X_train_clean = []
for text in X_train:
X_train_clean.append(" ".join(makeClean(text,200)))
X_train = np.array(X_train_clean,dtype=object)
del X_train_clean
X_test_clean = []
for text in X_test:
X_test_clean.append(" ".join(makeClean(text,200)))
X_test = np.array(X_test_clean,dtype=object)
del X_test_clean
enc = OneHotEncoder()
Y_train = enc.fit_transform(Y_train[:, np.newaxis]).toarray()
Y_test = enc.fit_transform(Y_test[:, np.newaxis]).toarray()
##################################
# PROCESS DATA
#################################
print('Get characters...')
alphabet = (list(string.ascii_lowercase) + list(string.digits) + [' '] +
list(string.punctuation) + ['\n'])
chars = set(alphabet)
vocab_size = len(chars)
print('Vocab size:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
print('Vectorization...')
X_train_char = np.zeros((len(X_train), maxlen, len(chars)), dtype=np.bool)
for i, sentence in enumerate(X_train):
for t, char in enumerate(sentence):
X_train_char[i, t, char_indices[char]] = 1
X_test_char = np.zeros((len(X_test), maxlen, len(chars)), dtype=np.bool)
for i, sentence in enumerate(X_test):
for t, char in enumerate(sentence):
X_test_char[i, t, char_indices[char]] = 1
print('train shape: ',X_train_char.shape)
print('test shape: ',X_test_char.shape)
##################################
# CNN SETUP
#################################
main_input = Input(shape=(maxlen,vocab_size), name='main_input')
conv = Convolution1D(nb_filter=256, filter_length=7,
border_mode='valid', activation='relu',
input_shape=(maxlen, vocab_size))(main_input)
conv = MaxPooling1D(pool_length=3)(conv)
conv1 = Convolution1D(nb_filter=256, filter_length=7,
border_mode='valid', activation='relu')(conv)
conv1 = MaxPooling1D(pool_length=3)(conv1)
conv2 = Convolution1D(nb_filter=256, filter_length=3,
border_mode='valid', activation='relu')(conv1)
conv3 = Convolution1D(nb_filter=256, filter_length=3,
border_mode='valid', activation='relu')(conv2)
conv4 = Convolution1D(nb_filter=256, filter_length=3,
border_mode='valid', activation='relu')(conv3)
conv5 = Convolution1D(nb_filter=256, filter_length=3,
border_mode='valid', activation='relu')(conv4)
conv5 = MaxPooling1D(pool_length=3)(conv5)
conv5 = Flatten()(conv5)
#Two dense layers with dropout of .5
z = Dropout(0.5)(Dense(z1, activation='relu')(conv5))
z = Dropout(0.5)(Dense(z2, activation='relu')(z))
out = Dense(Y_train.shape[1], activation='softmax')(z)
model = Model(input=main_input, output=out)
sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9)
model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])
############################
# RUN MODEL
###########################
history = model.fit(X_train_char,Y_train,batch_size=batch_size,nb_epoch=nb_epoch,validation_data=(X_test_char, Y_test))
score, acc = model.evaluate(X_test_char, Y_test, batch_size=batch_size)
val_acc = history.history['val_acc']
val_acc_max = np.amax(val_acc)
val_acc_max_ep = np.argmax(val_acc)
print('Best epoch:', val_acc_max_ep+1)
print('Best test error:', 1-val_acc_max)
|
__author__ = 'Kostya'
|
import numpy as np
import pandas as pd
from dataset_loader import DatasetLoader
from keras.models import load_model
dataset = DatasetLoader()
x = dataset.load_test_data('./fgd_prediction/dataset/test.csv')
x /= 255
model = load_model('./fgd_prediction/model.h5')
pred = model.predict(x)
index = 0
for result in pred:
print('index:{}, class:{}'.format(index, np.argmax(result)))
index += 1
|
# Determine if a list is monotonically increasing.
def IsAscending(A):
i = 1
maxN = A[0]
while i < len(A) and A[i] > maxN:
maxN = A[i]
i += 1
return i - len(A) == 0
A = list(map(int, input().split()))
IsAscending(A)
if IsAscending(A) is True:
print('YES')
elif IsAscending(A) is False:
print('NO')
|
#!/usr/bin/env python
# Funtion:
# Filename:
# 自定义异常,与系统定义异常不同名
class AlexException(Exception):
def __init__(self, msg):
self.message = msg
def __str__(self): # 基类已经写了,可以不定义该函数
return self.message
# return 'dfjdkj'
try :
raise AlexException('数据库连不上')
except AlexException as e:
print(e)
# 不应该尝试下面的情况
# 自定义的异常与系统异常同名,会使得系统异常抓不到
class IndexError(Exception):
def __init__(self, msg):
self.message = msg
def __str__(self): # 基类已经写了,可以不定义该函数
return self.message
# return 'dfjdkj'
try :
name = []
print(name[3])
raise IndexError('数据库连不上')
except IndexError as e:
print(e)
|
import aiohttp
from aiohttp import web
from jinja2 import Environment, FileSystemLoader
import json
from .filegetter import DebufFileGetter
from .. import messagesToHtml
from ..utils import templates_list, js
from pathlib import Path
selfdir = Path(__file__).parent
module = selfdir.parent
loader = FileSystemLoader(module / "templates")
env = Environment(loader=loader, enable_async=True, cache_size=0)
filegetter = DebufFileGetter()
app = web.Application()
routes = web.RouteTableDef()
@routes.get("/")
async def debug(req: web.Request):
messages = []
with open(selfdir / "messages.json", encoding="utf-8") as f:
messages = json.loads(f.read())
css = open(module / "css/light.css").read()
css += "body {background-color: grey}"
rendered = await messagesToHtml(
messages, env, files=filegetter, templates=templates_list
)
html = (
f"""
<!!DOCTYPE html>
<html>
<head>
<title>TelegramImage debug</title>
</head>
<body>
<style>
{css}
</style>
{rendered}
</body>
</html>
"""
)
return web.Response(body=html, content_type="text/html")
app.add_routes(routes)
if __name__ == "__main__":
web.run_app(debug.app)
|
import random
from hand import rock
from hand import paper
from hand import scissors
game_images = [rock, paper, scissors]
user_input = input("What do you choose ? Type 0 for Rock, 1 for Paper or 2 for Scissors. ")
user_choice = int(user_input)
if user_choice > 2 or user_choice < 0:
print("I could'nt understand, please replay")
else:
print(game_images[user_choice])
computer_choice = random.randint(0, 2)
print(f"Computer chose: {computer_choice}")
print(game_images[computer_choice])
if user_choice == computer_choice:
print("Nobody losed, nobody win, it's a draw")
elif user_choice == 0 and computer_choice == 1:
print("Computer win")
elif user_choice == 0 and computer_choice == 2:
print("You win")
elif user_choice == 1 and computer_choice == 0:
print("You win")
elif user_choice == 1 and computer_choice == 2:
print("Computer win")
elif user_choice == 2 and computer_choice == 0:
print("Computer win")
elif user_choice == 2 and computer_choice == 1:
print("You win")
|
#multiplication table
def mul_table(n) :
for i in range(1,9) :
print("%d * %d = %d" % (n,i,n*i))
n = input("Give me the Number ")
mul_table(n)
|
from pulp import LpMaximize, LpProblem, LpVariable, LpInteger, lpSum
def score_team(t, opt):
return {
'team_form': sum(float(p['form']) for p in t),
'team_price_change': sum(float(p['price_change']) for p in t),
'num_games': sum(float(p['next_gameweek']) for p in t),
'team_KPI': sum(float(p['KPI']) for p in t),
opt: sum(float(p[opt]) for p in t),
'total_cost': sum(float(p['sell_price']) for p in t)
}
class Opt:
max_players_per_position = {
'G': 2,
'D': 5,
'M': 5,
'F': 3
}
def __init__(self, opt_parameter, data, n_subs=None, budget=999, desired=None, remove=None):
# set instance variables
self.max_price = budget
self.prob = None
self.decision = None
self.opt_max_min = None
if n_subs:
self.wildcard = False
self.n_subs = n_subs
else:
self.wildcard = True
self.desired = desired
self.remove = remove
self.opt_parameter = opt_parameter
self.output = []
# split data parameters into data type
self.master_table = data.master_table
self.team_data = data.team_list
self.account_data = data.account_data
# use the number of games in the next gameweek to weight the optimisation parameter
# need to be VERY careful if this is a good idea or not
self.opt_list = []
for p in self.master_table:
if p[self.opt_parameter]:
self.opt_list.append(float(p[self.opt_parameter]) * p['next_gameweek'])
else:
self.opt_list.append(0)
# score current team
self.score_current_team()
# construct optimisation parameters lists
# if players to exclude have been specified then mark these
if self.remove:
self.mark_players_to_exclude()
self.remove_constraints = [p['remove'] for p in self.master_table]
# if players to include have been specified then mark these
if self.desired:
self.mark_players_to_include()
self.desired_constraints = [p['desired'] for p in self.master_table]
# if it is not a wildcard sim (i.e. a transfer sim) then mark currently owned players
if not self.wildcard:
self.mark_owned_players()
self.in_team_constraints = [p['in_team'] for p in self.master_table]
self.id_list = [p['id'] for p in self.master_table]
self.price_list = [float(item) for item in [p['sell_price'] for p in self.master_table]]
pos_lookup = ['G', 'D', 'M', 'F']
self.pos_constraints = self.create_constraint_switches_from_master(pos_lookup, 'position')
team_lookup = list(range(1, 21))
self.team_constraints = self.create_constraint_switches_from_master(team_lookup, 'team')
# get length of data
self.data_length = range(len(self.id_list))
# run simulation and post processing
self.squad_ids = self.run_optimisation()
self.squad = self.lookup_team_by_id()
@classmethod
def wildcard_simulation(cls, opt_param, data, budget, desired=None, remove=None):
n_subs = 0
return cls(opt_param, data, n_subs, budget, desired, remove)
@classmethod
def transfer_simulation(cls, opt_param, data, n_subs, budget, desired=None, remove=None):
return cls(opt_param, data, n_subs, budget, desired, remove)
def mark_players_to_exclude(self):
for p in self.master_table:
p['remove'] = 0
for rem in self.remove:
for p in self.master_table:
if rem == p['web_name']:
p['remove'] = 1
def mark_players_to_include(self):
for p in self.master_table:
p['desired'] = 0
for des in self.desired:
for p in self.master_table:
if des == p['web_name']:
p['desired'] = 1
def mark_owned_players(self):
# remove existing players from master table
for p in self.master_table:
p['in_team'] = 0
for player in self.team_data:
for idx, p in enumerate(self.master_table):
if player['id'] == p['id']:
p['in_team'] = 1
def calculate_improvement(self):
old_squad_score = sum(float(p[self.opt_parameter]) for p in self.team_data)
new_squad_score = sum(float(p[self.opt_parameter]) for p in self.squad)
return old_squad_score, new_squad_score
def extract_subs(self):
# get players that are being subbed out
existing_player_u = [i for i in self.team_data if i not in self.squad]
existing_player = sorted(existing_player_u, key=lambda k: k['position'], reverse=True)
# get players that are being subbed in
sub_u = [i for i in self.squad if i not in self.team_data]
sub = sorted(sub_u, key=lambda k: k['position'], reverse=True)
return existing_player, sub
def lookup_team_by_id(self):
team_list = []
for i in self.squad_ids:
for p in self.master_table:
if p['id'] == i:
team_list.append(p)
return team_list
def create_constraint_switches_from_master(self, lookup, attr):
"""
Creates a dictionary of 'switches' which can be used to generate constraints in optimisation problems.
This is fed with data from master_table.
:param lookup: a list of values to lookup in master_table i.e. ['G', 'D', 'M', 'F']
:param attr: the attribute in master_table to look for the lookup values i.e. 'position'
:return: A dictionary of lists, where each list corresponds to a value in lookup
"""
outlist = {}
for d in lookup:
list_result = []
for p in self.master_table:
if p[attr] == d:
list_result.append(1)
else:
list_result.append(0)
outlist[d] = list_result
return outlist
def score_current_team(self):
# score current or 'old' team
self.ots = score_team(self.team_data, self.opt_parameter)
self.old_team_score = {}
for key in self.ots:
self.old_team_score['old_' + key] = self.ots[key]
def run_optimisation(self):
# Declare problem instance, max/min problem
self.prob = LpProblem("Squad", LpMaximize)
# Declare decision variable - 1 if a player is part of the squad else 0
self.decision = LpVariable.matrix("decision", list(self.data_length), 0, 1, LpInteger)
# Objective function -> Maximize specified optimisation parameter
self.prob += lpSum(self.opt_list[i] * self.decision[i] for i in self.data_length)
# Constraint definition
self.add_constraints()
# solve problem
self.prob.solve()
# extract selected players and return
return [self.id_list[i] for i in self.data_length if self.decision[i].varValue]
def add_constraints(self):
# team constraints
# maximum of 3 players per team
for team in self.team_constraints:
self.prob += lpSum(self.team_constraints[team][i] * self.decision[i] for i in self.data_length) <= 3
# position constraints
# constrains the team to have 2 GK, 5 DEF, 5 MIN and 3 FW
for pos in self.pos_constraints:
self.prob += lpSum(self.pos_constraints[pos][i] * self.decision[i] for i in self.data_length) == \
self.max_players_per_position[pos]
# price constraint
# limits the overall price of the team
self.prob += lpSum(self.price_list[i] * self.decision[i] for i in self.data_length) <= self.max_price
# desired player constraint
if self.desired:
self.prob += lpSum(self.desired_constraints[i] * self.decision[i] for i in self.data_length) == len(
self.desired)
# players to remove constraint
if self.remove:
self.prob += lpSum(self.remove_constraints[i] * self.decision[i] for i in self.data_length) == 0
# initial squad constraint - ONLY USE IN TRANSFER SIMULATION
# ensures that the final team has (15 - number of subs) players from the initial team
if not self.wildcard:
self.prob += lpSum(
self.in_team_constraints[i] * self.decision[i] for i in self.data_length) == 15 - self.n_subs
|
#!/usr/bin/env python
'''
Detects a word and changes it to "****"
'''
def censor(text, word):
words = text.split() # Split the text into a list of words
for i in range(len(words)): # Loop through words
if words[i] == word:
words[i] = "*" * len(word)
return " ".join(words)
text = 'Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been '\
'the industry\'s standard dummy text ever since the 1500s, when an unknown printer took a galley of '\
'type and scrambled it to make a type specimen book. It has survived not only five centuries, but also '\
'the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s '\
'with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop '\
'publishing software like Aldus PageMaker including versions of Lorem Ipsum.'
word = 'Lorem'
print(censor(text, word))
|
import tensorflow as tf
# 1、sigmoid
x = tf.linspace(-10., 10., 10) # startNum浮点数, endNum浮点数, Number of elements
print(x)
with tf.GradientTape() as tape:
tape.watch(x)
y = tf.sigmoid(x)
print(y)
grads = tape.gradient(y, [x])
print(grads)
'''
x:
[-10. -7.7777777 -5.5555553 -3.333333 -1.1111107 1.1111116
3.333334 5.5555563 7.7777786 10. ]
y:
[4.5418739e-05 4.1875243e-04 3.8510859e-03 3.4445167e-02 2.4766389e-01 7.5233626e-01
9.6555483e-01 9.9614894e-01 9.9958128e-01 9.9995458e-01]
grads:
[4.5416677e-05, 4.1857705e-04, 3.8362551e-03, 3.3258699e-02, 1.8632649e-01, 1.8632641e-01,
3.3258699e-02, 3.8362255e-03, 4.1854731e-04, 4.5416677e-05]
当 x = -3.333333 时,y = 3.4445167e-02已经很接近于0了。梯度为3.3258699e-02也接近于0.
当 x = 3.333334 时,y = 9.6555483e-01已经很接近于1了。梯度为3.3258699e-02也很接近于0.
'''
print("====================================================================")
# 2、tanh:tanh = 2sigmoid(2x) - 1
x = tf.linspace(-5., 5., 10) # startNum浮点数, endNum浮点数, Number of elements
y = tf.tanh(x) # 值在 -1 到 1 之间
print(y)
print("====================================================================")
# 3、relu:当 x < 0 时梯度为0;当 x > 0 时,梯度为1。不会放大也不会缩小,搜索最优解时导数计算简单。
x = tf.linspace(-1., 1., 10)
y = tf.nn.relu(x)
print(y)
y = tf.nn.leaky_relu(x)
print(y)
|
from .stage import PipelineStage
import matplotlib
matplotlib.use("agg")
matplotlib.rcParams['text.usetex'] = False
import matplotlib.pyplot as plt
from chainconsumer import ChainConsumer
latex_names = {
"cosmological_parameters--omega_b" : r"\Omega_b",
"cosmological_parameters--omega_m" : r"\Omega_m",
"cosmological_parameters--omega_nu" : r"Omega_\nu",
"cosmological_parameters--ommh2" : r"\Omega_m h^2",
"cosmological_parameters--omch2" : r"\Omega_c h^2",
"cosmological_parameters--ombh2" : r"\Omega_b h^2",
"cosmological_parameters--omnuh2" : r"\Omega_\nu h^2",
"cosmological_parameters--h0" : r"h",
"cosmological_parameters--hubble" : r"H_0",
"cosmological_parameters--w" : r"w",
"cosmological_parameters--wa" : r"w_a",
"cosmological_parameters--omega_k" : r"\Omega_k",
"cosmological_parameters--omega_l" : r"\Omega_\Lambda",
"cosmological_parameters--tau" : r"\tau",
"cosmological_parameters--n_s" : r"n_s",
"cosmological_parameters--A_s" : r"A_s",
"cosmological_parameters--sigma_8" : r"\sigma_8",
"cosmological_parameters--sigma8_input" : r"\sigma_8",
"cosmological_parameters--r_t" : r"r_t",
"cosmological_parameters--yhe" : r"Y_\mathrm{He}",
"supernova_params--alpha" : r"\alpha",
"supernova_params--beta" : r"\beta",
"supernova_params--M0" : r"M_0",
"supernova_params--deltam" : r"\Delta M",
"planck--A_ps_100" : r"A^100_{\mathrm{PS}}",
"planck--A_ps_143" : r"A^143_{\mathrm{PS}}",
"planck--A_ps_217" : r"A^217_{\mathrm{PS}}",
"planck--A_cib_143" : r"A^143_{\mathrm{CIB}}",
"planck--A_cib_217" : r"A^217_{\mathrm{CIB}}",
"planck--A_sz" : r"A_{\mathrm{SZ}}",
"planck--r_ps" : r"r_{\mathrm{PS}}",
"planck--r_cib" : r"r_{\mathrm{CIB}}",
"planck--n_Dl_cib" : r"r_{\mathrm{PS}}",
"planck--cal_100" : r"{\cal C}_{100}",
"planck--cal_143" : r"{\cal C}_{143}",
"planck--cal_217" : r"{\cal C}_{217}",
"planck--xi_sz_cib" : r"\xi^{\mathrm{SZ}}_{\mathrm{CIB}}",
"planck--A_ksz" : r"A_\mathrm{KSZ}",
"planck--Bm_1_1" : r"B_{11}",
"planck--A" : r"A_{\mathrm{planck}}",
"bias_parameters--alpha" : r"\alpha",
"bias_parameters--b0" : r"b_0",
"bias_parameters--a" : r"a",
"bias_parameters--A" : r"A",
"bias_parameters--c" : r"c",
"post_friedmann_parameters--d_0" : r"D_0",
"post_friedmann_parameters--d_inf" : r"D_\infty",
"post_friedmann_parameters--q_0" : r"Q_0",
"post_friedmann_parameters--q_inf" : r"Q_\infty",
"post_friedmann_parameters--s" : r"s",
"post_friedmann_parameters--k_c" : r"k_c",
"clusters--M_max" : r"M_\mathrm{max}",
"intrinsic_alignment_parameters--A" : r"A",
}
class Plots(PipelineStage):
name = "plots"
inputs = {
"chain" : ("cosmology", "chain.txt") ,
}
outputs = {
"corner" : "corner.png",
}
def run(self):
self.engine=chainconsumer.ChainConsumer()
self.engine.add_chain(filename, name="1", parameters=names, posterior=post)
self.engine.configure(sigmas=[0,1,2], kde=False)
self.engine.plot()
def read_chain(self):
filename = self.input_path("chain")
data = np.loadtxt(filename)
names = open(filename).readline().strip("#").split()
def write(self):
pass
|
#!/usr/bin/python
prots='../list/ZINC_protein_index.tsv' #list of unique proteins with protein index
blast='../list/ZINC_blast_result.dat' #list of BLAST results for ZINC proteins
idx2prot={}
prot2idx={}
with open(prots,"r") as protline:
next(protline)
for line in protline:
line=line.strip().split("\t")
idx=str(line[0])
prot=str(line[1])
idx2prot[idx]=prot
prot2idx[prot]=idx
maxbitscore={}
with open(blast,"r") as bline:
for line in bline:
line=line.strip().split("\t")
query=str(line[0]).strip().split("|")[1] #Query gene ID
target=str(line[1]).strip().split("|")[1] #Target gene ID
bitscore=float(line[-1]) #bit score for query-target pair
if query == target:
maxbitscore[query]=bitscore #define self-query score
with open(blast,"r") as bline:
for line in bline:
line=line.strip().split("\t")
query=str(line[0]).strip().split("|")[1] #Query gene ID
target=str(line[1]).strip().split("|")[1] #Target gene ID
bitscore=float(line[-1]) #bit score for query-target pair
if query == target:
#skip self-queries
continue
else:
qIdx=prot2idx[query]
tIdx=prot2idx[target]
simscore=float(bitscore/maxbitscore[query])
print "%s, %s, %s"%(qIdx,tIdx,str(simscore))
|
# learning scheme part2
# tensor and torch.autograd
"""
tensor可以记住他们自己来自什么运算,以及,其起源的父张量,并且提供相对于输入的导数链,因此无需手动对模型求导
不管如何嵌套,只要给出前向传播表达式,pytorch都会自动提供该表达式相对于其参数的梯度
在定义tensor的时候,required_grad=True,表示,pytorch需要追踪在params上进行运算而产生的所有tensor,换句话说,任何以params为祖先的Tensor都可以访问从params到该tensor所调用的函数链,如果这些函数是可微的,如果这些函数是可微的(大多数pytorch的tensor运算都是可微的),则导数的值会自动存储在参数tensor的grad属性中,存储在哪个tensor的grad属性中???
一般来说,所有pytorch的tensor都有一个初始化为空的grad属性
一般需要做的就是将required_grad设置为True,然后调用模型,计算损失值,然后对损失tensor:loss调用backward
torch Module中的optim subModule,可以在其中找到实现不同优化算法的类
查看算法
import torch.optim as optim
dir(optim)
overfiting模型过拟合:
方式过拟合的方法:
假设有足够多的数据,则应确保能够拟合训练数据的模型在数据点之间尽可能正则化。
有几种方法实现此目标:
一种是在损失函数中添加所谓的惩罚项,以使模型的行为更平稳,变得更慢(到一定程度)
另一种方法是向输入样本添加噪声,在训练数据样本之间人为创建新的数据,并迫使模型也尝试拟合他们
处上述方法外,可选择正确大小(就参数而言)的神经网络模型基于两个步骤:增大模型大小直到成功拟合数据,然后逐渐缩小直到不再过拟合
(其中一种理论就是,在拟合模型时,在训练集评估一次损失,然后再验证集上评估一次损失(但不进行参数更新))
在拟合和过拟合之间的平衡,可以通过以相同的方式对t_u和t_c进行打乱,然后生成的数据随机分为两部分从而得到训练集和验证集
"""
# review the part1 in 'pytorch-officeTutorial2_3'
import torch
t_c = [0.5, 14.0, 15.0, 28.0, 11.0, 8.0, 3.0, -4.0, 6.0, 13.0, 21.0]
t_u = [35.7, 55.9, 58.2, 81.9, 56.3, 48.9, 33.9, 21.8, 48.4, 60.4, 68.4]
t_c = torch.tensor(t_c)
t_u = torch.tensor(t_u)
# 定义模型
def model(t_u, w, b):
return w * t_u + b
# 定义损失函数
def loss_fn(t_p, t_c):
squared_diffs = (t_p - t_c) ** 2
return squared_diffs.mean()
# # 初始化参数
# params = torch.tensor([1.0, 0.0], requires_grad=True)
#
# # 计算损失函数
# loss = loss_fn(model(t_u, *params), t_c)
# # 调用反向传播
# loss.backward()
# # 查看计算得到的梯度
# print(params.grad) # 这里params的grad属性,包含损失loss关于parmas的每个元素的导数
"""
见文档,图解很详细
"""
"""
你可以将(包含任意数量的张量)的required_grad设置为True,以及组合任何函数。在这种情况下,pytorch会沿整个函数链(即计算图)计算损失的导数,并在这些张量(即计算图的叶节点)的grad属性中将这些导数值累积起来
注意,导数值是累积的,???
调用backward()函数会导致导数值在叶节点处累积,所以将其用于参数更新后,需要将梯度显式清零
重复调用backward会导致导数在叶节点处累积,因此,如果调用了backward,然后再次计算损失并再次调用backward()如在训练循环中一样,那么在每个叶节点上的梯度会被累积(求和)在前一次迭代计算出的那个叶节点上,导致梯度值得不正确,因此为防止这种情况发生,需要再每次迭代时将梯度显式清零,直接使用zero_()函数
if params.grad is not None:
params.grad.zero_()
"""
# # 定义训练策略
# def training_loop(n_epochs, learning_rate, params, t_u, t_c):
# for epoch in range(1, n_epochs + 1):
# if params.grad is not None: # 一开始创建的时候,因为没有执行model即没有执行前向传播,那么会自动生成一个grad的空属性
# params.grad.zero_() # 每次epoch将梯度清零
#
# t_p = model(t_u, *params) # 执行前向传播
# loss = loss_fn(t_p, t_c) # 计算损失函数
#
# # 自动计算梯度
# loss.backward() # 执行反向传播,得到损失相对于参数的梯度值,并放在params.grad属性中
#
# # 手动更新参数
# params = (params - learning_rate * params.grad).detach().requires_grad_() # 更新参数
#
# if epoch % 500 == 0:
# print('Epoch%d,Loss%f' % (epoch, float(loss)))
# return params
'''
.detch().requires_grad_()
p1=(p0*lr*p0.grad)
其中p0用于初始化模型的随机权重,p0.grad是通过损失函数根据p0和训练数据计算出来的
现在进行第二次迭代p2=(p1*lr*p1.grad)
.detatch()将新的params张量从与其更新表达式关联的计算图中分离出来。这样,params就会丢失关于生成它的相关运算的记忆。然后,你可以调用.requires_grad_(),这是一个就地(in place)操作(注意下标“_”)
,以重新启用张量的自动求导。现在,你可以释放旧版本params所占用的内存,并且只需通过当前权重进行反向传播,???
'''
# # 开始训练
# t_un = 0.1 * t_u
# training_loop(
# n_epochs=5000,
# learning_rate=1e-2,
# params=torch.tensor([1.0, 0.0], requires_grad=True),
# t_u=t_un,
# t_c=t_c)
# 优化器
"""
优化器的目的就是要更新参数 optimizer=optim.certain_optimMethod(para1,para2...)
最常用的是SGD随机梯度下降法
momentum优化算法是基于SGD的,只需将momentum参数传递给SGD算法,就能实现momentum算法
t_p=model(t_u,*params)
loss=loss_fn(t_p,t_c)
loss.backward()
optimizer.step()
调用step后params值就会更新,无需亲自更新它,调用step发生的事情是:优化器通过将params减去learning_rate与grad的乘积来更新params,这与之前手动编写的更新过程完全相同
"""
# 利用pytorch中有的torch.optim来重新定义
def training_loop(n_epochs, optimizer, params, t_u, t_c):
for epoch in range(1, n_epochs + 1):
t_p = model(t_u, params)
loss = loss_fn(t_p, t_c)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 500 == 0:
print('epoch%d,loss%f' % (epoch, float(loss)))
return params
params = torch.tensor([1.0, 0.0], requires_grad=True)
learning_rate = 1e-2
optimizer = torch.optim.SGD([params], lr=learning_rate)
# training_loop(n_epochs=5000,optimizer=optimizer,params=params,t_c=t_u*0.1,t_c=t_c) #???
# 对张量的元素进行打算等价于重新排列其索引,使用randperm函数
n_samples = t_u.shape[0]
n_val = int(0.2 * n_samples)
|
from xml.etree import ElementTree
from xml.etree.ElementTree import SubElement
import adsk.core
import adsk.fusion
import traceback
def write_xml_param_state(root, new_state, design):
# Create a new State in the xml tree
state = SubElement(root, 'state', name=new_state)
user_params = design.userParameters
for param in user_params:
# Record parameter value state
if param is not None:
SubElement(state, 'parameter', value=str(param.value), name=param.name)
xml_string = ElementTree.tostring(root, encoding='unicode', method='xml')
return xml_string
def read_xml_param_state(root, state):
app = adsk.core.Application.get()
design = adsk.fusion.Design.cast(app.activeProduct)
# Get All parameters in design
all_params = design.allParameters
for param in all_params:
# Apply Saved dimension info
if param is not None:
test = root.find("state[@name='%s']/parameter[@name='%s']" % (state, param.name))
if test is not None:
param.value = float(test.get('value'))
|
# coding: utf-8
# ## Task 1
#
# ### 1. Write a program which will find all such numbers which are divisible by 7 but are not a multipleof 5, between 2000 and 3200 (both included). The numbers obtained should be printed in a comma-separated sequence on a single line.
# In[72]:
b=[]
for x in range(2000,3201):
if (x%7==0) and (x%5!=0):
b.append(str(x))
print (','.join(b))
#print(b)
len(b)
# ### 2. Write a Python program to accept the user's first and last name and then getting them printed in the the reverse order with a space between first name and last name.
# In[223]:
n = input("Whats ur name :: ")
m= input("Enter last name :: ")
print("Reverser order ::" + m+" "+ n)
# In[99]:
n = input("Whats ur name :: ")
for x in range(len(n)-1,-1,-1):
print(n[x])
# ### 3. Write a Python program to find the volume of a sphere with diameter 12 cm.
# ##### Formula: V=4/3 * π * r 3
# In[146]:
r = 6
v = (4/3)*3.14*r*r*r
v
# ## Task 2
#
# ### 1. Write a program which accepts a sequence of comma-separated numbers from console and generate a list.
#
# In[151]:
n=int(input("Enter number of elements you want to input :: "))
m=[]
for x in range(0, n):
e = int(input())
m.append(e)
print(m)
# ### 2. Create the below pattern using nested for loop in Python.
#
# In[187]:
n=5
for x in range(0,5):
for j in range(0, x+1):
print('* ', end="")
print()
for y in range(0,4):
for i in range(4,y,-1):
print('* ', end="")
print()
# ### 3. Write a Python program to reverse a word after accepting the input from the user.
# In[189]:
m = input("Whats ur name :: ")
n=[]
for x in range(len(m)-1,-1,-1):
n.append(str(m[x]))
print (''.join(n))
# ### 4. Write a Python Program to print the given string in the format specified in the sample output.
# WE, THE PEOPLE OF INDIA, having solemnly resolved to constitute India into a
# SOVEREIGN, SOCIALIST, SECULAR, DEMOCRATIC REPUBLIC and to secure to all
# its citizens
# In[219]:
string = "WE, THE PEOPLE OF INDIA,\n having solemnly resolved to constitute India into a SOVEREIGN,!\n SOCIALIST, SECULAR, DEMOCRATIC REPUBLIC \n and to secure to all its citizens"
print(string)
|
from Login.NewContract import *
from register.RegisterPage import *
from Login.logger import *
class TestNewContract(unittest.TestCase, Page):
'''新建合同'''
def setUp(self):
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(10)
test_name = self._testMethodName + '>>>>>>>>>>>>>>开始用例'
logger.info(test_name)
def test_newcontract(self):
'''新建未上传合同文件'''
driver = self.driver
logger.info('登录销售账号')
test_user_login(driver)
logger.info('新建合同')
test_contract(driver)
logger.info('不上传合同文件')
sleep(1)
self.get_screenshot("未上传合同文件截图")
logger.info('截图')
sleep(2)
logger.info('断言')
text = self.alert_text()
self.assertEqual(text, "请先上传合同文件")
def test_newcontract2(self):
'''正常新建'''
driver = self.driver
logger.info('登录销售账号')
test_user_login(driver)
logger.info('新建合同')
test_contract2(driver)
sleep(1)
self.get_screenshot("上传了合同文件截图")
logger.info('截图')
sleep(2)
self.alert_accept()
def tearDown(self):
sleep(3)
for method_name, error in self._outcome.errors:
if error:
case_name = self._testMethodName
self.get_screenshot(case_name)
logger.error(method_name)
logger.error(error)
self.driver.quit()
test_name = self._testMethodName + '>>>>>>>>>>>>>>完成用例'
logger.info(test_name)
if __name__ == '__main__':
unittest.main()
|
import cPickle as pickle
import numpy as np
import os, glob
"""
Store symmetry operations in .pickle format, with keys denoting the point group operation.
For each point group, the symmetry operations contain both the rotation and translational
operations; the final column corresponds to the translational element, and the zeroeth key
corresponds to the identity operation.
Also store restricted phase information in .pickle format, with keys denoting the point
group operation and values as np.array([caxis_value, cslice_value]) in degrees. Central
axis and central slice reflections are expected to be integer multiples of these values.
Usage: python sym_ops.py
Output is saved to reference/sym_ops.pickle and reference/restricted_phases.pickle.
"""
################################## USEFUL FUNCTIONS ##################################
def str_to_matrix(remarks):
"""
Convert a dictionary whose values are symmetry operations in string
format (as listed in REMARK 290 section of PDB header) to a dict of
matrices corresponding to the translational and rotational elements.
Inputs:
-------
remarks: dictionary of symmetry operations in string format
Outputs:
--------
sym_ops: dictionary of symmetry operations in matrix format
"""
sym_ops = dict()
for key in remarks:
m = np.zeros((3,4))
elems = remarks[key].split(",")
for row in range(3):
r_t = elems[row].split("+")
# handle rotational portion
if r_t[0][-1] == "X": m[row][0] = 1.0
if r_t[0][-1] == "Y": m[row][1] = 1.0
if r_t[0][-1] == "Z": m[row][2] = 1.0
if r_t[0][0] == "-": m[row] *= -1.0
# handle translational portion, if present
if len(r_t) > 1:
num, den = r_t[1].split("/")
m[row][-1] = float(num)/float(den)
sym_ops[key] = m
return sym_ops
def extract_remarks290(filename):
"""
Extract the symmetry operations listed in the REMARKS 290 section of
PDB header and store the string representations in output dictionary.
Inputs:
-------
filename: path to PDB file
Outputs:
--------
remarks: dictionary of symmetry operations in string format
"""
counter = 0
remarks = dict()
with open(filename, "r") as f:
# only extract lines corresonding to symmetry operations
extract = False
for line in f:
if "REMARK 290 NNNMMM OPERATOR" in line:
extract = True
continue
elif line.split()[-1] == "290":
extract = False
continue
elif extract:
remarks[counter] = line.split()[-1]
counter += 1
return remarks
def extract_space_group(filename):
"""
Extract the space group symbol (Hermann-Mauguin notation) listed in the
CRYST1 record of the PDB header.
Inputs:
-------
filename: path to PDB file
Outputs:
--------
sg_symbol: space group symbol, string
"""
with open(filename, "r") as f:
for line in f:
if "CRYST1" in line:
sg_symbol = line[55:65].rstrip()
return sg_symbol
################################# SYMMETRY OPERATIONS #################################
sym_ops = dict()
# symmetry relationships from PDB files
filenames = dict()
filenames = glob.glob("reference/pdb_files/*.pdb")
for fname in filenames:
key = extract_space_group(fname)
as_str = extract_remarks290(fname)
sym_ops[key] = str_to_matrix(as_str)
# generate reference directory if it doesn't already exist
if not os.path.isdir("reference"):
os.mkdir("reference")
# save dictionary as reference/sym_ops.pickle
with open("reference/sym_ops.pickle", "wb") as handle:
pickle.dump(sym_ops, handle)
################################# RESTRICTED PHASES ###################################
res_phases = dict()
res_phases['P 21 21 21'] = np.array([180.0, 90.0])
res_phases['P 43 21 2'] = np.array([180.0, 45.0])
res_phases['F 4 3 2'] = np.array([180.0, 180.0])
# save dictionary as reference/restricted_phases.pickle
with open("reference/phase_restrictions.pickle", "wb") as handle:
pickle.dump(res_phases, handle)
############################## REINDEXING OPERATIONS #################################
reidx_ops = dict()
reidx_ops['P 43 21 2'] = dict()
reidx_ops['P 43 21 2'][0] = np.array([[1,0,0],[0,1,0],[0,0,1]]) # (h,k,l)
reidx_ops['P 43 21 2'][1] = np.array([[-1,0,0],[0,-1,0],[0,0,1]]) # (-h,-k,l)
reidx_ops['P 43 21 2'][2] = np.array([[0,1,0],[1,0,0],[0,0,-1]]) # (k,h,-l)
reidx_ops['P 43 21 2'][3] = np.array([[0,-1,0],[-1,0,0],[0,0,-1]]) # (-k,-h,-l)
# save dictionary as reference/reindexing_ops.pickle
with open("reference/reindexing_ops.pickle", "wb") as handle:
pickle.dump(reidx_ops, handle)
|
# -*- coding: utf-8 -*-
#Bézout等式
#對兩正整數a與b,求出使得s*a+t*b=(a,b)的整數s和t
def bezoutEquation(a=1, b=1):
if a < b: c = a; a = b; b = c #交換a與b的次序,使得a≥b
q = extendedEucrideanDivision(a,b) #廣義歐幾里德除法,求不完全商數組q
s = coefficient_s(q) #求係數s
t = coefficient_t(q) #求係數t
return s, t
def extendedEucrideanDivision(a, b, qSet=[0]):
q = a / b
r = a % b
if r == 0:
return qSet #(r,0) = r
else:
qSet.append(q)
return extendedEucrideanDivision(b, r, qSet) #(a,b) = (r_-2,r_-1) = (r_-1,r_0) = … = (r_n,r_n+1) = (r_n,0) = r_n
def coefficient_s(q_j, s_j1=0, s_j2=1, ctr=0):
try:
s = -1 * q_j[ctr] * s_j1 + s_j2 #s_j = (-q_j) * s_j-1 + s_j-2
except IndexError:
return s_j1
s_j2 = s_j1
s_j1 = s
ctr += 1
return coefficient_t(q_j, s_j1, s_j2, ctr)
def coefficient_t(q_j, t_j1=1, t_j2=0, ctr=0):
try:
t = -1 * q_j[ctr] * t_j1 + t_j2 #t_j = (-q_j) * t_j-1 + t_j-2
except IndexError:
return t_j1
t_j2 = t_j1
t_j1 = t
ctr += 1
return coefficient_t(q_j, t_j1, t_j2, ctr)
if __name__ == '__main__':
while True:
try:
a = int(raw_input('The first number is '))
if a <= 0:
print 'The number must be positive.'
continue
except ValueError:
print 'Invalid input.'
continue
break
while True:
try:
m = int(raw_input('The second number is '))
if m <= 0:
print 'The number must be positive.'
continue
except ValueError:
print 'Invalid input.'
continue
break
(s,t) = bezoutEquation(a,m)
print '%d*%d + %d*%d = (%d,%d)' %(s, a, t, m, a, m)
|
# Problem Statement : Accept number from user and check whether number is even or
# odd.
def EvenOdd(iNo):
if(iNo%2==0):
return True
else:
return False
def main():
iNo = int(input("Enter a number :\n"))
iRet = EvenOdd(iNo)
if(iRet==True):
print("{} is Even".format(iNo))
else:
print("{} is Odd".format(iNo))
if __name__ == '__main__':
main()
|
from random import random, sample, choice
from math import floor
from tqdm import tqdm
from numpy import array, dot, mean
from numpy.linalg import pinv
from sys import exit
#SST: the total error in a model, it is the sum of all deviations squared.
#SSR: a measure of the explained variation in SST
#COD: stands for ‘coefficient of determination’ which is basically a measure of how good a model is
#error: the average error, is an average of all deviations from expected values.
def multiple_linear_regression(inputs, outputs,coeffs):
X, Y = inputs, outputs #array(inputs), array(outputs)
X_t, Y_t = X.transpose(), Y.transpose()
#coeff = dot((pinv((dot(X_t, X)))), (dot(X_t, Y)))
Y_p = dot(X, coeffs)
Y_mean = mean(Y)
SST = array([(i - Y_mean) ** 2 for i in Y]).sum()
SSR = array([(i - j) ** 2 for i, j in zip(Y, Y_p)]).sum()
COD = (1 - (SSR / SST)) * 100.0
av_error = (SSR / len(Y))
#return {'COD': COD, 'coeff': coeff, 'error': av_error}
return COD
|
def step_class(page, step):
return page.pyquery(f'[data-step="{step}"]').attr('class')
|
#!/usr/bin/env python
import rospy
import math
import json
import time
from utilities import PORT_RENFREW_LATLON, MAUI_LATLON
from std_msgs.msg import Float64
import local_pathfinding.msg as msg
from geopy.distance import distance
# Constants for this class
PUBLISH_PERIOD_SECONDS = 10.0 # Can keep high to simulate real global pathfinding
NEW_GLOBAL_PATH_PERIOD_SECONDS = 3600.0 * 24
AVG_WAYPOINT_DISTANCE_KM = 30 # TODO: Set this to match global pathfinding
# Global variables for tracking boat position
boatLat = None
boatLon = None
def gpsCallback(data):
global boatLat
global boatLon
boatLat = data.lat
boatLon = data.lon
# Global variable for speedup
speedup = 1.0
def speedupCallback(data):
global speedup
speedup = data.data
def create_path(init, goal):
path = []
# Insert the initial position
init_wp = msg.latlon()
init_wp.lat = init[0]
init_wp.lon = init[1]
path.append(init_wp)
# Just do some linear interpolation
total_distance_km = distance(init, goal).kilometers
num_global_waypoints = int(round(total_distance_km / AVG_WAYPOINT_DISTANCE_KM))
for i in range(1, num_global_waypoints):
coeff = float(i)/(num_global_waypoints)
lat = (1 - coeff)*init[0] + coeff*goal[0]
lon = (1 - coeff)*init[1] + coeff*goal[1]
print(lat, lon)
wp = msg.latlon()
wp.lat = lat
wp.lon = lon
path.append(wp)
# Insert the goal
last_wp = msg.latlon()
last_wp.lat = goal[0]
last_wp.lon = goal[1]
path.append(last_wp)
return path
def MOCK_global():
global boatLat
global boatLon
global speedup
rospy.init_node('MOCK_global_planner', anonymous=True)
pub = rospy.Publisher("globalPath", msg.path, queue_size=4)
r = rospy.Rate(float(1) / PUBLISH_PERIOD_SECONDS) # Hz
# Subscribe to GPS to publish new global paths based on boat position
rospy.Subscriber("GPS", msg.GPS, gpsCallback)
# Subscribe to speedup
rospy.Subscriber("speedup", Float64, speedupCallback)
# Wait to get boat position
while boatLat is None or boatLon is None:
rospy.loginfo("Waiting for boat GPS")
time.sleep(1)
rospy.loginfo("Received boat GPS")
init = [boatLat, boatLon]
# Create goal
goal_file = rospy.get_param('goal_file', default=None)
if goal_file:
with open(goal_file) as f:
record = json.loads(f.read())
lat = record[0]
lon = record[1]
goal = [lat, lon]
else:
goal = [MAUI_LATLON.lat, MAUI_LATLON.lon]
path = create_path(init, goal)
# Publish new global path periodically
# Publish new global path more often with speedup
republish_counter = 0
newGlobalPathPeriodSecondsSpeedup = NEW_GLOBAL_PATH_PERIOD_SECONDS / speedup
numPublishPeriodsPerUpdate = int(NEW_GLOBAL_PATH_PERIOD_SECONDS / PUBLISH_PERIOD_SECONDS)
while not rospy.is_shutdown():
# Send updated global path
if republish_counter >= numPublishPeriodsPerUpdate:
republish_counter = 0
init = [boatLat, boatLon]
path = create_path(init, goal)
else:
republish_counter += 1
pub.publish(msg.path(path))
r.sleep()
if __name__ == '__main__':
MOCK_global()
|
import sys
def splitInput(lines):
stack_data = []
moves = []
parsing_stack = True
for line in lines:
if not line:
parsing_stack = False
continue
if parsing_stack:
stack_data.append(line)
else:
moves.append(line)
stack_count = int(stack_data[-1].split()[-1])
return stack_count, stack_data[:-1], moves
def parseStacks(count, data):
stacks = [[] for _ in range(count)]
for row in data:
print(row)
for i, c in enumerate(range(1, len(row), 4)):
if row[c].strip():
stacks[i].append(row[c])
stacks = [stack[::-1] for stack in stacks]
return stacks
def parseMoves(moves):
for i in range(len(moves)):
words = moves[i].split()
move = [words[1], words[3], words[5]] # [count, from, to]
move = list(map(int, move))
move[1] -= 1 # Use 0 based indexing
move[2] -= 1
moves[i] = move
def execute(moves, stacks):
for (count, s, t) in moves:
for _ in range(count):
stacks[t].append(stacks[s][-1])
stacks[s] = stacks[s][:-1]
if __name__ == "__main__":
lines = [l[:-1] for l in sys.stdin]
stack_count, stack_data, moves = splitInput(lines)
stacks = parseStacks(stack_count, stack_data)
parseMoves(moves)
execute(moves, stacks)
answer = [" " for _ in range(stack_count)]
for i, stack in enumerate(stacks):
if stack:
answer[i] = stack[-1]
print("".join(answer))
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.3.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # WRC2020_View
#
# Visualisers applied directly to `WRCEvent` data object.
# %load_ext autoreload
# %autoreload 2
from WRC_2020 import WRCEvent
# +
# !rm wrc2020swetest1.db
event = WRCEvent(dbname='wrc2020swetest1.db')
event.rallyslurper()
# -
event.getSplitTimes()[1][1566]
# TO DO - do we have startlists for each stage?
event.getStartlist()
event.stages
dir(event)
|
from pyspark.sql import SparkSession
def check_log_file():
sparkSession = SparkSession.builder.appName("example-pyspark-read-and-write").getOrCreate()
df_load = sparkSession.read.parquet('hdfs://192.168.23.200:9000/data/Parquet/AdnLog/*')
df_load.show()
return None
if __name__ == '__main__':
check_log_file()
|
import numpy as np
import sys
import unittest
sys.path.append('..')
from src import minimize
class testMinimize(unittest.TestCase):
def test_minimize(self):
n, p = 20, 4
A = np.random.rand(n, n)
A = (A + A.T)/2
def f1(y):
return np.sum(np.diag(np.dot(np.dot(y.T, A), y)))*1./2
def f1y(y):
return np.dot((A + A.T), y)*1./2
def f1yy(y):
B = np.zeros((n*p, n*p))
for j in range(p):
B[j*n:(j+1)*n, j*n:(j+1)*n] = A
return B
y0 = np.vstack([np.eye(p), np.zeros((n-p, p))])
opt_res = minimize(y0, f1, f1y, f1yy)
optval = np.sum(np.sort(np.linalg.eigvals(A))[:p])/2
self.assertTrue(np.isclose(opt_res['value'], optval))
if __name__ == '__main__':
unittest.main()
|
import os
# Zamienia linie w pliku na liste krotek w postaci
# [((lewy_kraniec pierwszego przedzialu,prawy pierwszego przedzialu),(lewy_kraniec drugiego przedzialu,prawy_kraniec drugiego przedzialu),(litera))]
def zamien(plik):
przedzialy = []
for linia in plik:
a = linia.split('|')[0]
b = linia.split('|')[1]
litera = linia.split('|')[2]
a = a.replace(',', '.').replace(' ', '').replace('[', '').replace('(', '').replace(']', '').\
replace(')', '').split(';')
b = b.replace(',', '.').replace(' ', '').replace(']', '').replace(')', '').replace('[', '').\
replace('(', '').split(';')
a = float(a[0]), float(a[1])
b = float(b[0]), float(b[1])
litera = litera.strip()
przedzialy.append((a, b, litera))
return przedzialy
# Funkcja przechodzi rekordy po 30 na raz, po czym drukuje w konsoli ilosć wystąpień w danych przedziałąch
# a następnie wybiera ten w którym występuje najczęściej
def sprawdz(przedzialy, x, y):
znalezione = {
'A': 0,
'B': 0,
'C': 0
}
for numer_linii, przedzialy in enumerate(przedzialy):
if (przedzialy[0][0] <= x[0] and x[1] < przedzialy[0][1]) and (przedzialy[1][0] <= y[0] and y[1] < przedzialy[1][1]):
znalezione[przedzialy[2]] += 1
if numer_linii == 29:
print('Po pierwszej iteracji:')
if max(znalezione.values()) > 0:
print('Ilość w przedziałach: ', znalezione)
print('Najczęściej występuje w przedziale: ', max(znalezione, key=znalezione.get))
else:
print("Nie znaleziono")
elif numer_linii == 59:
print('Po drugiej iteracji:')
if max(znalezione.values()) > 0:
print('Ilość w przedziałach: ', znalezione)
print('Najczęściej występuje w przedziale: ', max(znalezione, key=znalezione.get))
else:
print("Nie znaleziono")
elif numer_linii == 80:
print('Po trzeciej iteracji:')
if max(znalezione.values()) > 0:
print('Ilość w przedziałach: ', znalezione)
print('Najczęściej występuje w przedziale: ', max(znalezione, key=znalezione.get))
else:
print("Nie znaleziono")
elif numer_linii == 119:
print('Po czwartej iteracji:')
if max(znalezione.values()) > 0:
print('Ilość w przedziałach: ', znalezione)
print('Najczęściej występuje w przedziale: ', max(znalezione, key=znalezione.get))
else:
print("Nie znaleziono")
elif numer_linii == 149:
print('Po piątej iteracji:')
if max(znalezione.values()) > 0:
print('Ilość w przedziałach: ', znalezione)
print('Najczęściej występuje w przedziale: ', max(znalezione, key=znalezione.get))
else:
print("Nie znaleziono")
if max(znalezione.values()) > 0:
return max(znalezione, key=znalezione.get)
else:
return 'Nie znaleziono liczb w przedziale'
def main():
# otwiera plik i zamienia go na listę krotek
f = open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'learning.txt'), "r")
przedzialy = zamien(f)
f.close()
print('Podaj dwa przedziały zatwierdzając każdy klawiszem enter, przedziały rozdzielając średnikiem(;)')
x = input().split(';')
y = input().split(';')
# Konwersja typu "string" z wejścia na typ float niezbędny do obliczeń
prop_x = [float(i) for i in x]
prop_y = [float(i) for i in y]
znalezione = sprawdz(przedzialy, prop_x, prop_y)
print('Finalnie: ')
print("Liczby te znajdują się w większości w przedziale: " + znalezione)
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 12 20:16:12 2020
@author: altsai
"""
import os
import sys
import shutil
import re
import numpy as np
import pandas as pd
#file_AA_idx='AA_idx.list'
file_AA_idx='AAA_idx.txt'
#file_AA='AA.list'
file_author='export-ads_A00.txt'
file_author='export-custom_author_all.txt'
file_affiliation='export-custom_aff_all.txt'
#file_all='export-custom_all_author_all_aff.txt'
file_all='export-custom_lFYTQu_tab.txt'
pd_all=pd.read_csv(file_all,sep='\t')
cmd_AA_idx="cat "+file_AA_idx
list_AA_idx=os.popen(cmd_AA_idx,"r").read().splitlines()
#sys.exit(0)
list_idx=[]
list_AA=[]
for i in list_AA_idx:
#print(i)
AA_idx=[i.split(' ',-1)][0]
# print(AA_idx)
AA=AA_idx[0]
# print(AA)
idx=AA_idx[1]
# print(AA,idx)
list_AA.append(AA)
list_idx.append(idx)
#print(list_AA)
#print(list_idx)
pd_AA=pd.read_csv(file_AA_idx,sep=' ',header=None)
idx_AA=pd_AA[1]
cmd_authors="cat "+file_author
list_authors=os.popen(cmd_authors,"r").read().splitlines()
#list_authors=[list_authors.replace('&','') for list_authors in list_authors]
for i in list_authors:
# print(i)
if "&" in i:
i=i.replace('& ','')
# print(i)
#author_per_paper=[i.split(';',-1)][0]
author_per_paper=[i.split('.,',-1)]
print(author_per_paper)
#sys.exit(0)
cmd_affiliation="cat "+file_affiliation
list_affiliation_per_paper=os.popen(cmd_affiliation,"r").read().splitlines()
list_NCU_AA=[]
list_NCU_idx=[]
NCU_key="Institute of Astronomy, National Central University"
idx_i=-1
for i in list_affiliation_per_paper:
#print(i)
idx_i=idx_i+1
affiliation_per_paper=[i.split(");",-1)][0]
# print(affiliation_per_paper)
# jj=0
list_author_per_paper=[]
list_NCU_AA_per_paper=[]
list_NCU_idx_per_paper=[]
for j in affiliation_per_paper:
# jj=jj+1
if NCU_key in j:
NCU_AA_in_paper=j.split("(",-1)[0][-3:]
NCU_AA_in_paper=NCU_AA_in_paper.replace(" ","")
# print(NCU_AA_in_paper)
list_NCU_AA_per_paper.append(NCU_AA_in_paper)
# NCU_idx=list_idx
NCU_idx_in_paper=idx_AA[idx_AA==NCU_AA_in_paper].index[0]
# print(NCU_idx_in_paper)
list_NCU_idx_per_paper.append(NCU_idx_in_paper)
# print(list_NCU_idx_per_paper)
list_author_per_paper=list_authors[idx_i].split('.,',-1)
# list_author_per_paper.replace('&','')
n_author_per_paper=len(list_author_per_paper)
print("# of authors = ", n_author_per_paper)
list_NCU_idx.append(list_NCU_idx_per_paper)
author1=list_author_per_paper[0]
if n_author_per_paper==1:
# print("1")
author=str(author1)
print(author)
elif n_author_per_paper==2:
# print("2")
author2=list_author_per_paper[1]
author=str(author1)+" and"+str(author2)
print(author)
elif n_author_per_paper==3:
# print("3")
author2=list_author_per_paper[1]
author3=list_author_per_paper[2]
author=str(author1)+","+str(author2)+", and"+str(author3)
print(author)
else:
# print(">3")
author_NCU=""
author2=list_author_per_paper[1]
author3=list_author_per_paper[2]
idx_k=-1
for k in list_NCU_idx_per_paper[:-1]:
if k>2:
idx_k=idx_k+1
n_NCU_idx_per_paper=len(list_NCU_idx_per_paper)
# author_k=list_author_per_paper[k]
# n_NCU_idx_per_paper=len(list_NCU_idx_per_paper)
# author_NCU=author_NCU+str(author_k)+', '
author_k1=list_author_per_paper[k]
if n_NCU_idx_per_paper==0:
author_NCU=""
elif n_NCU_idx_per_paper==1:
author_NCU=str(author_k1)
else:
author_k2=list_author_per_paper[k]
author_NCU=author_NCU+str(author_k2)+".,"
author4=author_NCU+" and"+str(list_author_per_paper[-1])
# author4=str(author_NCU)
author=str(author1)+","+str(author2)+".,"+str(author3)+"., et al. (including"+str(author4)+" from NCU)"
print(author)
# for k in list_NCU_idx_per_paper:
# if k=1:
# author=list_authors[idx_i][0]
# elif k=2
# if k>3:
# author3=list_authors[idx_i].split(';',-1)[0:4]
## print(author3)
# author4="et al. (including )"
## print(author4)
# author=author3+author4
# print(author)
# elif k<=3:
# print("including")
#print(list_NCU_idx)
|
#!/usr/bin/env python
# echo_client.py
import socket
host = socket.gethostname()
port = 12345 # The same port as used by the server
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
print('Sending Hello, world')
s.sendall('Hello, world'.encode())
data = s.recv(1024)
s.close()
print('Received', repr(data))
|
from .core import ez
from .profiles import Profile
|
import regex as re
import csv
import time
from googleSearch import search_it
import numpy as np
from os.path import dirname, abspath ,join
d = dirname(dirname(abspath(__file__))) #set files directory path
import sys
# insert at position 1 in the path, as 0 is the path of this file.
sys.path.insert(1, d)
import Log
def create_splitted_file(queries_file="SearchQueries_Processed.txt", splitted_file='preprocessed_data_splitted.csv',
range_start=0,
range_end=10,
google_search='y',
write_columns=True,
file_write_type='a'):
f = open(join(d,"files\SearchQueries\processed",queries_file), "r",encoding='utf8')
queries_list_last =f.read().splitlines()
queries_list_last = list(dict.fromkeys(queries_list_last))
f.close()
#create splitted text csv file:
with open(join(d,"files",splitted_file), file_write_type, encoding='utf-8',newline='') as csvFile:
writer = csv.writer(csvFile)
if write_columns and range_start==0:
writer.writerows([['query', 'titles','links','descs']])
if range_end==-1:
range_finish = len(queries_list_last)
elif range_end<=len(queries_list_last):
range_finish = range_end
else:
range_finish = len(queries_list_last)
for i in range(range_start,range_finish):
query=queries_list_last[i]
print(i)
#dont use google
if google_search=='n':
links_string=""
descripstions_string=""
titles_string=""
writer.writerows([[str(query),str(titles_string),str(links_string),str(descripstions_string)]])
csvFile.flush()
#get google results for query
else:
search_results = search_it(query)
while(search_results==None):
search_results = search_it(query)
links_string=""
descripstions_string=""
titles_string=""
if search_results!=None:
for item in search_results:
splitted=item.text.splitlines()
if(len(splitted)>=3):
titles_string=titles_string+splitted[0]+"\r" #one title in each line
links_string=links_string+splitted[1]+"\r" #one link in each line
#filter results and delete unwanted parts
# if("Translate this page" in splitted[2]):
# splitted[2]= splitted[2].replace("Translate this page", '')
# if("Rating: " in splitted[2]):
# splitted[2]= ''
if(len(splitted)>=4):
descripstions_string=descripstions_string+splitted[3]+"\r" #one description in each line
elif(len(splitted)==3):
descripstions_string=descripstions_string+splitted[2]+"\r" #one description in each line
else:
descripstions_string="\r"
writer.writerows([[str(query),str(titles_string),str(links_string),str(descripstions_string)]])
csvFile.flush()
time.sleep(np.random.randint(4,10))
# logging:
if google_search=='n':
Log.log(str(range_finish-range_start)+" data saved correctly to : "+splitted_file,"gatheringData.py")
elif google_search=='y':
Log.log(str(range_finish-range_start)+" data saved correctly (google search included) to : "+splitted_file,"gatheringData.py")
else:
Log.log(str(range_finish-range_start)+" data saved correctly (google search included) to : "+splitted_file,"gatheringData.py")
Log.warning("Google search not selected Correctly, using default","gatheringData.py")
csvFile.close()
'''
askForSearch = input("Search google for the queries?(y/n) : ")
if askForSearch=='n':
print("Ok! we don't use google search.")
else:
print("Ok! we use google search.")
print("starting...")
create_splitted_file(google_search=askForSearch)
'''
|
# -*- coding: utf-8 -*-
"""Module contains method that will be replaced by the plugin.
:author: Pawel Chomicki
"""
import os
import re
def pytest_runtest_logstart(self, nodeid, location):
"""Signal the start of running a single test item.
Hook has to be disabled because additional information may break output formatting.
"""
def pytest_runtest_logreport(self, report):
"""Process a test setup/call/teardown report relating to the respective phase of executing a test.
Hook changed to define SPECIFICATION like output format. This hook will overwrite also VERBOSE option.
"""
res = self.config.hook.pytest_report_teststatus(report=report)
cat, letter, word = res
self.stats.setdefault(cat, []).append(report)
if not letter and not word:
return
if not _is_nodeid_has_test(report.nodeid):
return
test_path = _get_test_path(report.nodeid, self.config.getini('spec_header_format'))
if test_path != self.currentfspath:
self.currentfspath = test_path
_print_class_information(self)
if not isinstance(word, tuple):
test_name = _get_test_name(report.nodeid)
markup, test_status = _format_results(report)
_print_test_result(self, test_name, test_status, markup)
def _is_nodeid_has_test(nodeid):
if len(nodeid.split("::")) >= 2:
return True
return False
def _get_test_path(nodeid, header):
levels = nodeid.split("::")
if len(levels) > 2:
class_name = levels[1]
test_case = _split_words(_remove_class_prefix(class_name))
else:
module_name = os.path.split(levels[0])[1]
class_name = ''
test_case = _capitalize_first_letter(_replace_underscores(
_remove_test_prefix(_remove_file_extension(module_name))))
return header.format(path=levels[0], class_name=class_name, test_case=test_case)
def _print_class_information(self):
if hasattr(self, '_first_triggered'):
self._tw.line()
self._tw.line()
self._tw.write(self.currentfspath)
self._first_triggered = True
def _remove_class_prefix(nodeid):
return re.sub("^Test", "", nodeid)
def _split_words(nodeid):
return re.sub(r"([A-Z])", r" \1", nodeid).strip()
def _remove_file_extension(nodeid):
return os.path.splitext(nodeid)[0]
def _remove_module_name(nodeid):
return nodeid.rsplit("::", 1)[1]
def _remove_test_prefix(nodeid):
return re.sub("^test_+", "", nodeid)
def _replace_underscores(nodeid):
return nodeid.replace("__", " ").strip().replace("_", " ").strip()
def _capitalize_first_letter(s):
return s[:1].capitalize() + s[1:]
def _get_test_name(nodeid):
test_name = _capitalize_first_letter(_replace_underscores(
_remove_test_prefix(_remove_module_name(nodeid))))
if test_name[:1] is ' ':
test_name_parts = test_name.split(' ')
if len(test_name_parts) == 1:
return test_name.strip().capitalize()
return 'The ({0}) {1}'.format(test_name_parts[0][1:].replace(' ', '_'), test_name_parts[1])
return test_name
def _format_results(report):
if report.passed:
return {'green': True}, 'PASS'
elif report.failed:
return {'red': True}, 'FAIL'
elif report.skipped:
return {'yellow': True}, 'SKIP'
def _print_test_result(self, test_name, test_status, markup):
self._tw.line()
self._tw.write(
" " + self.config.getini('spec_test_format').format(result=test_status, name=test_name), **markup)
def logstart_replacer(self, nodeid, location):
def wrapper():
return pytest_runtest_logstart(self, nodeid, location)
return wrapper()
def report_replacer(self, report):
def wrapper():
return pytest_runtest_logreport(self, report)
return wrapper()
def pytest_configure(config):
if config.option.spec:
import imp
import wooqi.pytest._pytest as _pytest
_pytest.terminal.TerminalReporter.pytest_runtest_logstart = logstart_replacer
_pytest.terminal.TerminalReporter.pytest_runtest_logreport = report_replacer
imp.reload(_pytest)
|
"""
Helper functions for my KENDA scripts
"""
import sys
from datetime import datetime, timedelta
from subprocess import check_output
from git import Repo
from cosmo_utils.pywgrib import getfobj_ens, getfobj
from cosmo_utils.helpers import yyyymmddhhmmss_strtotime, ddhhmmss_strtotime, \
yymmddhhmm
from cosmo_utils.scores.SAL import compute_SAL
from config import * # Import config file
from cosmo_utils.pyncdf import getfobj_ncdf
import numpy as np
import matplotlib.pyplot as plt
from cosmo_utils.scores.probab import FSS
sys.path.append('/home/s/S.Rasp/repositories/enstools/')
from enstools.scores import crps_sample
from scipy.signal import convolve2d
from scipy.ndimage.filters import convolve
import pdb
np.seterr(invalid='ignore')
plt.rcParams['lines.linewidth'] = 1.7
def save_fig_and_log(fig, fig_name, plot_dir):
"""
Save the given figure along with a log file
"""
# Step 1: save figure
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
print('Saving figure: %s' % (plot_dir + '/' + fig_name + '.pdf'))
fig.savefig(plot_dir + '/' + fig_name + '.pdf')
# Step 2: Create and save log file
time_stamp = datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
pwd = check_output(['pwd']).rstrip() # Need to remove trailing /n
git_dir = pwd.rsplit('/', 1)[0]
git_hash = Repo(git_dir).heads[0].commit
exe_str = ' '.join(sys.argv)
s = check_output(['conda', 'env', 'list'])
for l in s.split('\n'):
if '*' in l:
py_env = l
assert 'py_env' in locals(), 'No active conda environemnt found.'
log_str = ("""
Time: %s\n
Executed command:\n
python %s\n
In directory: %s\n
Git hash: %s\n
Anaconda environment: %s\n
""" % (time_stamp, exe_str, pwd, str(git_hash), py_env))
logf = open(plot_dir + '/' + fig_name + '.log', 'w+')
logf.write(log_str)
logf.close()
radarpref = 'raa01-rw_10000-'
radarsufx = '-dwd---bin.nc'
precsuf = '_15'
precsuf_da = '_prec'
gribpref = 'lfff'
gribpref_da = 'lff'
nens = 20
nens_da = 40
# Define loading functions
def load_det(datadir, date, t, return_array=False):
topdir = datadir + '/' + date + '/'
gribfn = gribpref + t + precsuf
detfn = topdir + 'det/' + gribfn
detfobj = getfobj(detfn, fieldn='PREC_PERHOUR')
# Minus one hour
# gribfnm1 = gribpref + ddhhmmss(ddhhmmss_strtotime(t) -
# timedelta(hours = 1)) + precsuf
# detfnm1 = topdir + 'det/' + gribfnm1
# detfobjm1 = getfobj(detfnm1, fieldn = 'TOT_PREC')
# detfobj.data = detfobj.data - detfobjm1.data
if return_array:
return detfobj.data
else:
return detfobj
def load_det_cape_cin(datadir, date, t, return_array=False):
topdir = datadir + '/' + date + '/'
gribfn = gribpref + t + precsuf
detfn = topdir + 'det/' + gribfn
capefobj = getfobj(detfn, fieldn='CAPE_ML_S')
cinfobj = getfobj(detfn, fieldn='CIN_ML_S')
# Minus one hour
# gribfnm1 = gribpref + ddhhmmss(ddhhmmss_strtotime(t) -
# timedelta(hours = 1)) + precsuf
# detfnm1 = topdir + 'det/' + gribfnm1
# detfobjm1 = getfobj(detfnm1, fieldn = 'TOT_PREC')
# detfobj.data = detfobj.data - detfobjm1.data
if return_array:
return capefobj.data, cinfobj.data
else:
return capefobj, cinfobj
def load_radar(date, t='00000000', return_array=False):
dateobj = (yyyymmddhhmmss_strtotime(date) + ddhhmmss_strtotime(t))
radardt = timedelta(minutes=10) # TODO Is this correct???
radardateobj = dateobj - radardt
radarfn = radardir + radarpref + yymmddhhmm(radardateobj) + radarsufx
radarfobj = getfobj_ncdf(radarfn, fieldn='pr', dwdradar=True)
if return_array:
return radarfobj.data
else:
return radarfobj
def load_ens(datadir, date, t, return_array=False):
topdir = datadir + '/' + date + '/'
gribfn = gribpref + t + precsuf
ensfobjlist = getfobj_ens(topdir, 'sub', mems=nens, gribfn=gribfn,
dir_prefix='ens', fieldn='PREC_PERHOUR',
para=4)
if return_array:
return [fobj.data for fobj in ensfobjlist]
else:
return ensfobjlist
def load_det_da(datadir, t, return_array=False):
detfn = datadir + gribpref_da + t + precsuf_da + '.det'
detfobj = getfobj(detfn, fieldn='TOT_PREC_S')
if return_array:
return detfobj.data
else:
return detfobj
def load_ens_da(datadir, t, return_array=False):
gribfn = gribpref_da + t + precsuf_da
ensfobjlist = getfobj_ens(datadir, 'same', mems=nens_da, gribfn=gribfn,
fieldn='TOT_PREC_S', para=4)
if return_array:
return [fobj.data for fobj in ensfobjlist]
else:
return ensfobjlist
def strip_expid(expid):
return expid.replace('DA_', '').replace('_ens', '').replace('v2', ''). \
replace('_2JUN', '')
def set_plot(ax, title, args, hourlist_plot, adjust=True):
plt.sca(ax)
ax.set_xlabel('Time [UTC]')
ax.legend(loc=0, fontsize=8, frameon=False)
ymax = np.ceil(ax.get_ylim()[1] * 10) / 10.
ax.set_ylim(0, ymax)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_position(('outward', 3))
ax.spines['bottom'].set_position(('outward', 3))
try:
ax.set_xticks(range(args.hint + 1)[::6])
ax.set_xticklabels(hourlist_plot[::6])
ax.set_xlim(0, 24)
except AttributeError: # Must be DA plots
ax.set_xticks(range(24)[::6])
ax.set_xlim(0, 24)
ax.set_title(title)
if adjust:
plt.subplots_adjust(bottom=0.18, left=0.18, right=0.97)
def set_panel(ax, no_xlim=False):
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_position(('outward', 3))
ax.spines['bottom'].set_position(('outward', 3))
if not no_xlim:
ax.set_xticks(range(24)[::6])
ax.set_xlim(0, 24)
# ax.set_xlabel('Time [UTC]')
def compute_det_stats(nanradar, nandet, convradar, convdet):
"""
Compute RMSE and FSS for determinsitc forecast
"""
rmse = np.sqrt(np.nanmean((convradar - convdet) ** 2))
fss01 = FSS(0.1, 21, nanradar, nandet, python_core=True)
fss10 = FSS(1.0, 21, nanradar, nandet, python_core=True)
return rmse, fss01, fss10
def compute_prec_hist(nanfield, bin_edges):
"""
Compute precipitation histograms.
"""
a = np.ravel(nanfield)
a = a[np.isfinite(a)]
hist = np.histogram(a, bin_edges)[0]
return hist
def compute_ens_stats(convradar, convfieldlist, ens_norm_type,
norm_thresh=0.1, bs_threshold=1.):
"""
convfieldlist dimensions = (ens, x, y)
Compute spread and rmse of ensemble with given normalization type.
0: no normalization
1: grid point normalization
2: domain normalization
"""
meanfield = np.mean(convfieldlist, axis=0)
ensradarmean = 0.5 * (convradar + meanfield)
if ens_norm_type == 0: # No normalization
spread = np.nanmean(np.std(convfieldlist, axis=0, ddof=1))
rmv = np.sqrt(np.nanmean(np.var(convfieldlist, axis=0, ddof=1)))
rmse = np.sqrt(np.nanmean((convradar - meanfield) ** 2))
mean = np.nanmean(meanfield)
mean_std = np.std(np.nanmean(convfieldlist, axis=(1, 2)), ddof=1)
bs = compute_bs(convradar, convfieldlist, threshold=bs_threshold)
results = (rmse, rmv, bs, mean, mean_std)
elif ens_norm_type == 1: # Grid point normalization
spread = np.nanmean((np.std(convfieldlist, axis=0, ddof=1) /
meanfield)[meanfield >= norm_thresh])
rmse = np.sqrt(np.nanmean(((convradar - meanfield) ** 2 /
ensradarmean ** 2)[ensradarmean >= 0.1]))
results = (spread, rmse)
elif ens_norm_type == 2: # Domain normalization
spread = (np.nanmean(np.std(convfieldlist, axis=0, ddof=1)
[meanfield >= 0.1]) /
np.nanmean(meanfield[meanfield >= 0.1]))
rmse = (np.sqrt(np.nanmean(((convradar - meanfield) ** 2)
[ensradarmean >= 0.1])) /
np.nanmean(ensradarmean[ensradarmean >= 0.1]))
results = (spread, rmse)
else:
raise Exception, 'Wrong ens_norm_type'
return results
def compute_crps(obs, enslist):
"""
Compute the sample CRPS gripointwise and then average
"""
obs_flat = np.ravel(obs)
mask = np.isfinite(obs_flat)
obs_flat = obs_flat[mask]
ens_flat = np.array([np.ravel(e)[mask] for e in enslist])
crps = crps_sample(obs_flat, ens_flat)
return np.mean(crps)
def compute_bs(obs, enslist, threshold):
"""
Compute the Brier score for a given threshold.
"""
prob_fc = np.sum((enslist > threshold), axis=0) / enslist.shape[0]
bin_obs = obs > threshold
bs = np.nanmean((prob_fc - bin_obs) ** 2)
return bs
def make_fig_fc_ens(args, x, rd, title, it=None):
"""
Plot ensemble panels:
1. Mean precipitation with ensemble error bars
2. RMSE and RMV
3. BS
"""
aspect = 0.4
# Set up figure
fig, axes = plt.subplots(1, 3, figsize=(pw, aspect * pw))
# Loop over experiments
for ie, expid in enumerate(args.expid):
if ie == 0:
if it is None:
it = np.ones(rd[expid]['radar'].shape[0], dtype=bool)
axes[0].plot(x, rd[expid]['radar'][it], c='k', lw=2, label='Radar')
# Plot mean with error bars in first panel
axes[0].errorbar(x, rd[expid]['ensmean'][it], yerr=rd[expid]['ensmean_std'], c=cdict[expid],
label=expid)
# Plot RMSE and RMV in second plot
axes[1].plot(x, rd[expid]['ensrmse'][it], lw=1.5, c=cdict[expid])
axes[1].plot(x, rd[expid]['ensrmv'][it], lw=1.5, c=cdict[expid], ls='--')
# Plot Brier Score in thrid panel
axes[2].plot(x, rd[expid]['ensbs'][it], lw=1.5, c=cdict[expid])
# Define labels
axes[0].set_title('Mean precip pm std')
axes[0].set_ylabel('[mm/h]')
axes[1].set_title('RMSE and RMV')
axes[1].set_ylabel('[mm/h]')
axes[2].set_title('Brier Score')
# Adjust spines and x-label
for ax in axes:
set_panel(ax)
axes[0].legend(loc=0, fontsize=5, frameon=False)
fig.suptitle(title, fontsize=10)
plt.tight_layout(rect=(0, 0, 1, 0.95))
return fig
def make_fig_fc_det_rmse(args, x, rd, title, it=None):
"""
Plot deterministic panels.
1. Mean precipitation
2. RMSE
3. FSS
"""
aspect = 0.4
# Set up figure
fig, axes = plt.subplots(1, 3, figsize=(pw, aspect * pw))
# Loop over experiments
for ie, expid in enumerate(args.expid):
if ie == 0:
if it is None:
it = np.ones(rd[expid]['radar'].shape[0], dtype=bool)
axes[0].plot(x, rd[expid]['radar'][it], c='k', lw=2, label='Radar')
axes[0].plot(x, rd[expid]['detmean'][it], c=cdict[expid], label=expid)
axes[1].plot(x, rd[expid]['detrmse'][it], c=cdict[expid])
axes[2].plot(x, rd[expid]['fss10'][it], c=cdict[expid])
# Define labels
axes[0].set_title('Mean precip')
axes[0].set_ylabel('[mm/h]')
axes[1].set_title('RMSE')
axes[1].set_ylabel('[mm/h]')
axes[2].set_title('FSS')
axes[2].set_ylabel('[1 mm/h, 60 km]')
# Adjust spines and x-label
for ax in axes:
set_panel(ax)
axes[0].legend(loc=0, fontsize=5, frameon=False)
fig.suptitle(title, fontsize=10)
plt.tight_layout(rect=(0, 0, 1, 0.95))
return fig
def make_fig_fc_det_fss(args, x, rd, title, it=None):
"""
Plot deterministic panels.
1. Mean precipitation
2. FSS 0.1
3. FSS 1.0
"""
aspect = 0.4
# Set up figure
fig, axes = plt.subplots(1, 3, figsize=(pw, aspect * pw))
# Loop over experiments
for ie, expid in enumerate(args.expid):
if ie == 0:
if it is None:
it = np.ones(rd[expid]['radar'].shape[0], dtype=bool)
axes[0].plot(x, rd[expid]['radar'][it], c='k', lw=2, label='Radar')
axes[0].plot(x, rd[expid]['detmean'][it], c=cdict[expid], label=expid)
axes[1].plot(x, rd[expid]['fss01'][it], c=cdict[expid])
axes[2].plot(x, rd[expid]['fss10'][it], c=cdict[expid])
# Define labels
axes[0].set_title('Mean precip')
axes[0].set_ylabel('[mm/h]')
axes[1].set_title('FSS')
axes[1].set_ylabel('[0.1 mm/h, 60 km]')
axes[2].set_title('FSS')
axes[2].set_ylabel('[1 mm/h, 60 km]')
# Adjust spines and x-label
for ax in axes:
set_panel(ax)
axes[0].legend(loc=0, fontsize=5, frameon=False)
fig.suptitle(title, fontsize=10)
plt.tight_layout(rect=(0, 0, 1, 0.95))
return fig
def make_fig_hist(args, rd, title, it=None):
"""
Plot precipitation histogram of different experiments.
"""
x = np.arange(bin_edges.shape[0] - 1)
fig, ax = plt.subplots(1, 1, figsize=(pw/2, pw/2))
for ie, expid in enumerate(args.expid):
if ie == 0:
if it is None:
it = np.ones(rd[expid]['radar_hist'].shape[0], dtype=bool)
ax.bar(x, rd[expid]['radar_hist'][it], 0.1, color='k',
label='Radar')
ax.bar(x + 0.1*(ie+1), rd[expid]['hist'][it], 0.1,
color=cdict[expid], label=expid)
ax.set_xticks(range(bin_edges.shape[0]))
ax.set_xticklabels(['%.1f'%i for i in list(bin_edges)])
ax.set_yscale('log')
ax.set_xlabel('mm/h')
ax.legend(loc=0, fontsize=5)
plt.title(title)
plt.tight_layout()
return fig
def make_timelist(date_start, date_stop, hours_inc):
"""
Args:
date_start: yyyymmddhhmmss time string
date_stop: yyyymmddhhmmss time string
hour_inc: increment in h
Returns:
timelist: List with datetime objects
"""
dt_start = yyyymmddhhmmss_to_dt(date_start)
dt_stop = yyyymmddhhmmss_to_dt(date_stop)
td_inc = timedelta(hours=hours_inc)
timelist = []
t = dt_start
while t <= dt_stop:
timelist.append(t)
t += td_inc
return timelist
def yyyymmddhhmmss_to_dt(yyyymmddhhmmss):
f = '%Y%m%d%H%M%S'
return datetime.strptime(yyyymmddhhmmss, f)
def dt_to_yyyymmddhhmmss(dt):
f = '%Y%m%d%H%M%S'
return datetime.strftime(dt, f)
def handle_nans(radar_data, fc_data, radar_thresh, combine_masks=None):
"""Handle NaNs on a daily basis.
Args:
radar_data: Radar data array with dimensions [time, x, y]
fc_data: Forecast data array with dimensions [time, x, y] or
[time, ens, x, y]
radar_thresh: Threshold, NaNs above.
combine_masks: If True, combine daily masks from both fields.
Returns:
radar_data, fc_data: Same arrays with NaNs
"""
mask = np.max(radar_data, axis=0) > radar_thresh
if combine_masks is not None:
mask = np.logical_or(mask, np.max(combine_masks, axis=0) > radar_thresh)
radar_data[:, mask] = np.nan
if fc_data.ndim == 3:
missing = np.isnan(np.sum(fc_data, axis=(1, 2)))
fc_data[:, mask] = np.nan
else:
missing = np.isnan(np.sum(fc_data, axis=(1, 2, 3)))
fc_data[:, :, mask] = np.nan
# Check if forecast data is missing
if missing.sum() > 0:
radar_data[missing, :] = np.nan
fc_data[missing, :] = np.nan
return radar_data, fc_data
def upscale_fields(data, scale):
"""Upscale fields.
Args:
data: data array with dimensions [time, x, y] or [time, ens, x, y]
scale: Kernel size in grid points
Returns:
up_data: Upscaled data with same size
"""
assert scale % 2 == 1, 'Kernel size must be odd.'
if data.ndim == 3:
kernel = np.ones((1, scale, scale)) / float((scale * scale))
elif data.ndim == 4:
kernel = np.ones((1, 1, scale, scale)) / float((scale * scale))
else:
raise ValueError('Wrong shape of data array.')
data = convolve(data, kernel, mode='constant')
return data
# New compute_*metric* functions
# These work with input of dimension [hour, x, y] or [hour, ens, x, y]
def compute_det_rmse(radar_data, fc_data):
"""Compute deterministic rmse
Args:
radar_data: Radar data
fc_data: forecast data
Returns:
rmse: Numpy array with dimensions [hour]
"""
rmse = np.sqrt(np.nanmean((radar_data - fc_data) ** 2, axis=(1, 2)))
return rmse
def compute_det_sal(radar_data, fc_data, sal_thresh):
"""Compute deterministic SAL
Args:
radar_data: Radar data
fc_data: forecast data
sal_thresh: threshold for object identification
Returns:
rmse: Numpy array with dimensions [hour]
"""
s = []
a = []
l = []
for i in range(radar_data.shape[0]):
r = radar_data[i]
f = fc_data[i]
r[np.logical_or(np.isnan(r), r < 0)] = 0
f[np.logical_or(np.isnan(f), f < 0)] = 0
out = compute_SAL(r, f, sal_thresh)
s.append(out[0])
a.append(out[1])
l.append(out[2])
return np.array([s, a, l])
def compute_det_domain_mean(data):
"""Compute deterministic mean
Args:
data: data
Returns:
mean: Numpy array with dimensions [hour]
"""
mean = np.nanmean(data, axis=(1, 2))
return mean
def compute_det_domain_median(data):
"""Compute deterministic median
Args:
data: data
Returns:
median: Numpy array with dimensions [hour]
"""
median = np.nanmedian(data, axis=(1, 2))
return median
def compute_det_fss(radar_data, fc_data, fss_thresh, fss_size):
"""Compute deterministic rmse
Args:
radar_data: Radar data
fc_data: forecast data
fss_thresh : Threshold value in mm/h
fss_size: Neighborhood size in grid points
Returns:
rmse: Numpy array with dimensions [hour]
"""
l = []
for i in range(radar_data.shape[0]):
l.append(FSS(fss_thresh, fss_size, radar_data[i], fc_data[i],
python_core=True))
return np.array(l)
def compute_ens_crps(radar_data, fc_data):
"""Compute Ensemble CRPS
Args:
radar_data: Radar data [hour, x, y]
fc_data: forecast data of ensemble [hour, ens, x, y]
Returns:
rmse: Numpy array with dimensions [hour]
"""
l = []
for i in range(radar_data.shape[0]):
radar_flat = np.ravel(radar_data[i])
mask = np.isfinite(radar_flat)
radar_flat = radar_flat[mask]
ens_flat = np.reshape(fc_data[i], (fc_data[i].shape[0], -1))
ens_flat = ens_flat[:, mask]
l.append(np.mean(crps_sample(radar_flat, ens_flat)))
return np.array(l)
def compute_ens_rmse(radar_data, fc_data):
"""Compute RMSE of ensemble mean
Args:
radar_data: Radar data [hour, x, y]
fc_data: forecast data of ensemble [hour, ens, x, y]
Returns:
rmse: Numpy array with dimensions [hour]
"""
ens_mean = np.mean(fc_data, axis=1)
rmse = np.sqrt(np.nanmean((radar_data - ens_mean) ** 2, axis=(1, 2)))
return rmse
def compute_ens_rmv(fc_data):
"""Compute Ensemble root mean variance
Args:
fc_data: forecast data of ensemble [hour, ens, x, y]
Returns:
rmv: Numpy array with dimensions [hour]
"""
rmv = np.sqrt(np.nanmean(np.var(fc_data, axis=1, ddof=1), axis=(1,2)))
return rmv
def compute_ens_bs(radar_data, fc_data, bs_thresh, bs_size=1):
"""Compute Ensemble Brier Score
Args:
radar_data: Radar data [hour, x, y]
fc_data: forecast data of ensemble [hour, ens, x, y]
bs_thresh : Threshold value in mm/h
bs_size: If given upscale the probability fields
Returns:
bs: Numpy array with dimensions [hour]
"""
mask = np.isnan(radar_data)
prob_fc = np.mean((fc_data > bs_thresh), axis=1)
bin_obs = np.array(radar_data > bs_thresh, dtype=float)
prob_fc[mask] = np.nan
bin_obs[mask] = np.nan
if bs_size > 1:
prob_fc = upscale_fields(prob_fc, bs_size)
bin_obs = upscale_fields(bin_obs, bs_size)
bs = np.nanmean((prob_fc - bin_obs) ** 2, axis=(1,2))
return bs
def compute_det_prec_hist(data):
"""Compute deterministic preciitation histogram
Args:
data: data
Returns:
mean: Numpy array with dimensions [hour]
"""
l = []
for i in range(data.shape[0]):
d = np.ravel(data[i])
d = d[np.isfinite(d)]
l.append(np.histogram(d, bin_edges)[0])
return np.array(l)
# Panel plotting functions
def plot_line(plot_list, exp_ids, metric, title):
"""Plot line plot panel
Args:
plot_list: List of metrics [exp_id][time, metric_dim]
exp_ids: List with exp id names
metric: name of metric
title: title string
Returns:
fig: Figure object
"""
fig, ax = plt.subplots(1, 1, figsize=(0.5 * pw, 0.5 * pw))
x = np.arange(1, 25)
for ie, e in enumerate(exp_ids):
ax.plot(x, plot_list[ie], c=cdict[e], label=e)
ax.set_xlabel('Forecast lead time [h]')
split_metric = metric.split('-')
if len(split_metric) == 1:
ax.set_ylabel(metric_dict[metric]['ylabel'])
else:
yl = metric_dict[split_metric[0]]['ylabel'] % tuple(split_metric[1:])
ax.set_ylabel(yl)
ax.set_title(title)
ax.legend(loc=0, fontsize=6)
set_panel(ax)
plt.tight_layout()
return fig
def plot_sal(plot_list, exp_ids, metric, title):
"""Plot SAL plot
Args:
plot_list: List of metrics [exp_id][time, metric_dim]
exp_ids: List with exp id names
metric: name of metric
title: title string
Returns:
fig: Figure object
"""
fig, ax = plt.subplots(1, 1, figsize=(0.5 * pw, 0.5 * pw))
x = np.arange(1, 25)
for ie, e in enumerate(exp_ids):
ax.plot(x, plot_list[ie][0], c=cdict[e], label=e)
ax.plot(x, plot_list[ie][1], c=cdict[e], linestyle='--')
ax.plot(x, plot_list[ie][2], c=cdict[e], linestyle=':')
ax.axhline(0, c='gray', zorder=0.1)
ax.set_xlabel('Forecast lead time [h]')
split_metric = metric.split('-')
if len(split_metric) == 1:
ax.set_ylabel(metric_dict[metric]['ylabel'])
else:
yl = metric_dict[split_metric[0]]['ylabel'] % tuple(split_metric[1:])
ax.set_ylabel(yl)
ax.set_title(title)
ax.legend(loc=0, fontsize=6)
set_panel(ax)
plt.tight_layout()
return fig
def plot_hist(plot_list, exp_ids, metric, title, normalize=False):
"""Plot histogram panel.
At the moment the first bin containing all the 0s is ignored.
Args:
plot_list: List of metrics [exp_id][time, metric_dim]
exp_ids: List with exp id names
metric: name of metric
title: title string
Returns:
fig: Figure object
"""
fig, ax = plt.subplots(1, 1, figsize=(0.5 * pw, 0.5 * pw))
x = np.arange(bin_edges[1:].shape[0] - 1)
for ie, e in enumerate(exp_ids):
p = np.mean(plot_list[ie], axis=0)[1:]
if normalize:
p = p / np.sum(p)
ax.bar(x + 0.1 * ie, p, 0.1, color=cdict[e], label=e)
ax.set_xticks(range(bin_edges[1:].shape[0]))
ax.set_xticklabels(['%.1f' % i for i in list(bin_edges[1:])],
fontsize=5)
ax.set_xlabel('mm/h')
ax.set_ylabel(metric_dict[metric]['ylabel'])
ax.set_title(title)
ax.legend(loc=0, fontsize=6)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_position(('outward', 3))
ax.spines['bottom'].set_position(('outward', 3))
plt.tight_layout()
return fig
def plot_synop(plot_list, exp_ids, title, ylabel):
"""Plot SYNOP plot
Args:
plot_list: List of metrics [exp_id][time, metric_dim]
exp_ids: List with exp id names
title: title string
ylabel: y label
Returns:
fig: Figure object
"""
fig, ax = plt.subplots(1, 1, figsize=(0.5 * pw, 0.5 * pw))
x = np.arange(2, 25)
for ie, e in enumerate(exp_ids):
ax.plot(x, plot_list[ie][0], c=cdict[e], label=e)
ax.plot(x, plot_list[ie][1], c=cdict[e], linestyle='--')
ax.axhline(0, c='gray', zorder=0.1)
ax.set_xlabel('Forecast lead time [h]')
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.legend(loc=0, fontsize=6)
set_panel(ax)
plt.tight_layout()
return fig
def plot_air(plot_list, exp_ids, title, ylabel, obs):
"""Plot TEMP/AIREP plot
Args:
plot_list: List of metrics [exp_id][height, metric_dim]
exp_ids: List with exp id names
title: title string
ylabel: y label
obs: TEMP or AIREP
Returns:
fig: Figure object
"""
fig, ax = plt.subplots(1, 1, figsize=(0.5 * pw, 0.5 * pw))
b = temp_bin_edges / 100. if obs == 'TEMP' else airep_bin_edges
z = np.mean([b[1:], b[:-1]], axis=0)
for ie, e in enumerate(exp_ids):
ax.plot(plot_list[ie][0], z, c=cdict[e], label=e)
ax.plot(plot_list[ie][1], z, c=cdict[e], linestyle='--')
ax.axvline(0, c='gray', zorder=0.1)
ax.set_xlabel(ylabel)
if obs == 'TEMP':
ax.set_ylabel('hPa')
ax.invert_yaxis()
else:
ax.set_ylabel('m')
ax.set_title(title)
ax.legend(loc=0, fontsize=6)
set_panel(ax, no_xlim=True)
plt.tight_layout()
return fig
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import os
import re
import numpy as np
import pandas as pd
import jieba as jb
import json
import word
exclude_re = re.compile(u"[,,【】<>{};??'\"]")
filepath = os.path.split(os.path.realpath(__file__))[0]
jb.load_userdict("{}/fenci.txt".format(filepath))
def save_file(data, fpath):
with open(fpath, "w") as pf:
json.dump(data, pf)
def load_task():
trainfile = "{}/data/pattern.csv".format(filepath)
traindf = pd.read_csv(trainfile, encoding="utf_8")
train = []
test = []
for index, row in traindf.iterrows():
train.append([row["question"], row["category"]])
return train, test
def cut2list(string):
result = []
string = exclude_re.sub("", string)
cutgen = jb.cut(string)
for i in cutgen:
if i != " ":
result.append(i)
return result
def vectorize_data(data):
wordaip = word.Word()
Q = []
C = []
Ans = []
maxsize = 0
for question, _ in data:
size = len(cut2list(question))
if size > maxsize:
maxsize = size
sentence_size = maxsize
for question, category in data:
quesl = cut2list(question)
lq = max(0, sentence_size - len(quesl))
q = []
for w in quesl:
vecjs = wordaip.word_vec(w)
vec = json.loads(vecjs)
while isinstance(vec, unicode):
vec = json.loads(vec)
q.append(vec)
q.extend([[0]*len(q[0])]*lq)
Q.append(q)
if category not in Ans:
Ans.append(category)
answer_size = len(Ans)
for _, category in data:
y = np.zeros(answer_size)
index = Ans.index(category)
y[index] = 1
C.append(y)
save_file(Ans, "./data/ans.json")
return np.array(Q), np.array(C), answer_size, sentence_size
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 10 16:08:50 2020
@author: ashwi
"""
from pyntcloud import PyntCloud
import numpy as np
import open3d as o3d
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
bin_pcd = np.fromfile('Mention ther path/.bin', dtype=np.float32)
# Reshape and drop reflection values
points = bin_pcd.reshape((-1, 4))[:, 0:3]
# Convert to Open3D point cloud
o3d_pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(points))
# Save to whatever format you like
o3d.io.write_point_cloud('Mention ther path/.ply', o3d_pcd)
cloud = PyntCloud.from_file('Path of the cconverted ply file')
cloud.points.keys()
x_points = []
y_points = []
z_points = []
x = cloud.points['x']
y = cloud.points['y']
z = cloud.points['z']
# For removal of unwanted points
"""
for i in range(len(x)):
#if z[i] > -0.5 and z[i] < 0.5:
info = x[i], y[i], z[i]
data.append(info)
"""
for i in range(len(x)):
#if z[i] > -2.5 and z[i] < 1.5:
x_points.append(x[i])
y_points.append(y[i])
z_points.append(z[i])
data = (x_points, y_points, z_points)
ax.scatter(data[0], data[1], data[2], s=3)
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
ax.view_init(azim=200)
plt.show()
|
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
for char in set(t):
if t.count(char) > s.count(char):
return char
|
# Regular -- Parse tree node strategy for printing regular lists
import sys
from Special import Special
from Tree import *
class Regular(Special):
# TODO: Add fields and modify the constructor as needed.
def __init__(self):
pass
def print(self, t, n, p):
# TODO: Implement this function.
if p == False:
sys.stdout.write("(")
#Nil.getInstance()
t.getCar().print(0)
if not t.getCdr().isNull():
sys.stdout.write(" ")
t.getCdr().print(0, True)
|
#通用装饰器(无参):可以给需要的函数套上一层外衣
def w1(func):
def func_in(*args,**kwargs):
return func(*args,**kwargs)
return func_in
@w1
def fun1(a,b):
print("%s-----%s"%(a,b))
# @w1
# def fun1(*args,**kwargs):
# print("%s-----%s"%(str(args),str(kwargs)))
fun1(44,55)
#通用装饰器(有参):可以给需要的函数套上一层外衣
def w2(pre_arg):
def w1(func):
def func_in(*args,**kwargs):
print(pre_arg)
return func(*args,**kwargs)
return func_in
return w1
@w2("hahaha") #通过这个参数,可以让装饰器做不同的事情
def fun2(a,b):
print("%s-----%s"%(a,b))
fun2(55,66)
"""
变量的作用域遵循LEGB原则
L:local局部变量
E:enclosing闭包中常见
G:global全局变量
B:builtins系统内嵌
"""
|
import unittest
from pracownik import Pracownik
class testPracownik(unittest):
pracownik = Pracownik("Jan", "Kowalski", "Nauczyciel stażysta", 2000)
def test_zwykly_awans(self):
self.pracownik.zwykly_awans()
self.assertEqual(self.pracownik.pensja, 2000*1.2)
def test_degradacja_kierownicza(self):
self.pracownik.degradacja_kierownicza()
self.assertEqual(self.pracownik.pensja, 2000*0.8)
|
#!/usr/bin/env python
"""Upload a FreeSurfer directory structure as RDF to a SPARQL triplestore
"""
# standard library
from datetime import datetime as dt
import hashlib
import os
import pwd
from socket import getfqdn
import uuid
from utils import (prov, foaf, dcterms, fs, nidm, niiri, obo, nif, crypto,
hash_infile, get_id)
# map FreeSurfer filename parts
fs_file_map = [('T1', [nif["nlx_inv_20090243"]]), # 3d T1 weighted scan
('lh', [(nidm["AnatomicalAnnotation"], obo["UBERON_0002812"])]), # left cerebral hemisphere
('rh', [(nidm["AnatomicalAnnotation"], obo["UBERON_0002813"])]), # right cerebral hemisphere
('BA.', [(nidm["AnatomicalAnnotation"], obo["UBERON_0013529"])]), # Brodmann area
('BA1.', [(nidm["AnatomicalAnnotation"], obo["UBERON_0006099"])]), # Brodmann area 1
('BA2.', [(nidm["AnatomicalAnnotation"], obo["UBERON_0013533"])]), # Brodmann area 2
('BA3a.', [(nidm["AnatomicalAnnotation"], obo["UBERON_0006100"]), # Brodmann area 3a
(nidm["AnatomicalAnnotation"], obo["FMA_74532"])]), # anterior
('BA3b.', [(nidm["AnatomicalAnnotation"], obo["UBERON_0006100"]), # Brodmann area 3b
(nidm["AnatomicalAnnotation"], obo["FMA_74533"])]), # posterior
('BA44.', [(nidm["AnatomicalAnnotation"], obo["UBERON_0006481"])]), # Brodmann area 44
('BA45.', [(nidm["AnatomicalAnnotation"], obo["UBERON_0006482"])]), # Brodmann area 45
('BA4a.', [(nidm["AnatomicalAnnotation"], obo["UBERON_0013535"]), # Brodmann area 4a
(nidm["AnatomicalAnnotation"], obo["FMA_74532"])]), # anterior
('BA4p.', [(nidm["AnatomicalAnnotation"], obo["UBERON_0013535"]), # Brodmann area 4p
(nidm["AnatomicalAnnotation"], obo["FMA_74533"])]), # posterior
('BA6.', [(nidm["AnatomicalAnnotation"], obo["UBERON_0006472"])]), # Brodmann area 6
('V1.', [(nidm["AnatomicalAnnotation"], obo["UBERON_0002436"])]),
('V2.', [(nidm["AnatomicalAnnotation"], obo["UBERON_0006473"])]),
('MT', [(nidm["AnatomicalAnnotation"], fs["MT_area"])]),
('entorhinal', [(nidm["AnatomicalAnnotation"], obo["UBERON_0002728"])]),
('exvivo', [(nidm["AnnotationSource"], fs["exvivo"])]),
('label', [(fs["FileType"], fs["label_file"])]),
('annot', [(fs["FileType"], fs["annotation_file"])]),
('cortex', [(nidm["AnatomicalAnnotation"], obo["UBERON_0000956"])]),
('.stats', [(fs["FileType"], fs["statistic_file"])]),
('aparc.annot', [(nidm["AtlasName"], fs["default_parcellation"])]),
('aparc.a2009s', [(nidm["AtlasName"], fs["a2009s_parcellation"])]),
('.ctab', [(fs["FileType"], fs["color_table"])])
]
# files or directories that should be ignored
ignore_list = ['bak', 'src', 'tmp', 'trash', 'touch']
def create_entity(graph, fs_subject_id, filepath, hostname):
""" Create a PROV entity for a file in a FreeSurfer directory
"""
# identify FreeSurfer terms based on directory and file names
_, filename = os.path.split(filepath)
relpath = filepath.split(fs_subject_id)[1].lstrip(os.path.sep)
fstypes = relpath.split('/')[:-1]
additional_types = relpath.split('/')[-1].split('.')
file_md5_hash = hash_infile(filepath, crypto=hashlib.md5)
file_sha512_hash = hash_infile(filepath, crypto=hashlib.sha512)
if file_md5_hash is None:
print('Empty file: %s' % filepath)
#url = "file://%s%s" % (hostname, filepath)
url = filepath
url_get = prov.URIRef("http://localhost/file?file=%s" % filepath)
url_get = prov.URIRef("file://%s" % filepath)
obj_attr = [(prov.PROV["label"], filename),
(fs["relative_path"], "%s" % relpath),
(prov.PROV["location"], url_get),
(crypto["md5"], "%s" % file_md5_hash),
(crypto["sha"], "%s" % file_sha512_hash)
]
for key in fstypes:
obj_attr.append((nidm["tag"], key))
for key in additional_types:
obj_attr.append((nidm["tag"], key))
for key, uris in fs_file_map:
if key in filename:
if key.rstrip('.').lstrip('.') not in fstypes + additional_types:
obj_attr.append((nidm["tag"], key.rstrip('.').lstrip('.')))
for uri in uris:
if isinstance(uri, tuple):
obj_attr.append((uri[0], uri[1]))
else:
obj_attr.append((prov.PROV["type"], uri))
id = get_id()
return graph.entity(id, obj_attr)
def encode_fs_directory(g, basedir, subject_id, hostname=None,
n_items=100000):
""" Convert a FreeSurfer directory to a PROV graph
"""
# directory collection/catalog
collection_hash = get_id()
fsdir_collection = g.collection(collection_hash)
fsdir_collection.add_extra_attributes({prov.PROV['type']: fs['subject_directory'],
fs['subject_id']: subject_id})
directory_id = g.entity(get_id())
if hostname == None:
hostname = getfqdn()
url = "file://%s%s" % (hostname, os.path.abspath(basedir))
directory_id.add_extra_attributes({prov.PROV['location']: prov.URIRef(url)})
g.wasDerivedFrom(fsdir_collection, directory_id)
a0 = g.activity(get_id(), startTime=dt.isoformat(dt.utcnow()))
user_agent = g.agent(get_id(),
{prov.PROV["type"]: prov.PROV["Person"],
prov.PROV["label"]: pwd.getpwuid(os.geteuid()).pw_name,
foaf["name"]: pwd.getpwuid(os.geteuid()).pw_name})
g.wasAssociatedWith(a0, user_agent, None, None,
{prov.PROV["Role"]: nidm["LoggedInUser"]})
g.wasGeneratedBy(fsdir_collection, a0)
i = 0
for dirpath, dirnames, filenames in os.walk(os.path.realpath(basedir)):
for filename in sorted(filenames):
if filename.startswith('.'):
continue
i += 1
if i > n_items:
break
file2encode = os.path.realpath(os.path.join(dirpath, filename))
if not os.path.isfile(file2encode):
print "%s not a file" % file2encode
continue
ignore_key_found = False
for key in ignore_list:
if key in file2encode:
ignore_key_found = True
continue
if ignore_key_found:
continue
try:
entity = create_entity(g, subject_id, file2encode, hostname)
g.hadMember(fsdir_collection, entity.get_identifier())
except IOError, e:
print e
return g
def to_graph(subject_specific_dir):
# location of FreeSurfer $SUBJECTS_DIR
basedir = os.path.abspath(subject_specific_dir)
subject_id = basedir.rstrip(os.path.sep).split(os.path.sep)[-1]
# location of the ProvToolBox commandline conversion utility
graph = prov.ProvBundle(identifier=get_id())
graph.add_namespace(foaf)
graph.add_namespace(dcterms)
graph.add_namespace(fs)
graph.add_namespace(nidm)
graph.add_namespace(niiri)
graph.add_namespace(obo)
graph.add_namespace(nif)
graph.add_namespace(crypto)
graph = encode_fs_directory(graph, basedir, subject_id)
return graph
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(prog='fs_dir_to_graph.py',
description=__doc__)
parser.add_argument('-s', '--subject_dir', type=str, required=True,
help='Path to subject directory to upload')
parser.add_argument('-p', '--project_id', type=str, required=True,
help='Project tag to use for the subject directory.')
parser.add_argument('-e', '--endpoint', type=str,
help='SPARQL endpoint to use for update')
parser.add_argument('-g', '--graph_iri', type=str,
help='Graph IRI to store the triples')
parser.add_argument('-o', '--output_dir', type=str,
help='Output directory')
parser.add_argument('-n', '--hostname', type=str,
help='Hostname for file url')
args = parser.parse_args()
if args.output_dir is None:
args.output_dir = os.getcwd()
graph = to_graph(args.subject_dir, args.project_id, args.output_dir,
args.hostname)
|
import json
import logging
from aws import helper
from aws.helper import DeveloperMode
logger = logging.getLogger()
logger.setLevel(logging.INFO)
@DeveloperMode(True)
def lambda_handler(event, context):
input_json = json.loads(event["body"])
if not "refresh_token" in input_json:
return helper.build_response({"message": "Refesh token is required."}, 403)
if not "client_id" in input_json:
return helper.build_response({"message": "Client ID is required."}, 403)
refreshToken = input_json["refresh_token"]
client_id = input_json["client_id"]
username = None
if "username" in input_json:
username = input_json["username"]
resp, msg = helper.refresh_auth(
username=username, refreshToken=refreshToken, client_id=client_id
)
# error with cognito if msg is not None
if msg != None:
logging.info(msg)
return helper.build_response({"message": msg}, 403)
if resp.get("AuthenticationResult"):
res = resp["AuthenticationResult"]
return helper.build_response(
{
"id_token": res["IdToken"],
"access_token": res["AccessToken"],
"expires_in": res["ExpiresIn"],
"token_type": res["TokenType"],
},
200,
)
else: # this code block is relevant only when MFA is enabled
return helper.build_response(
{"error": True, "success": False, "data": None, "message": None}, 200
)
|
# Modified tissot function to take custom globe and draw multiple spots at once
# Also, adds spots to given axis and returns matplotlib paths
def tissot(rads_km=None, lons=None, lats=None, n_samples=80, globe=None, ax=None, draw=False, **kwargs):
import numpy as np
import cartopy.crs as ccrs
import cartopy.feature as feature
import shapely.geometry as sgeom
from cartopy import geodesic
import cartopy.mpl.patch as cpatch
geod = geodesic.Geodesic(radius=globe.semimajor_axis, flattening=globe.flattening)
geoms = []
rads_km = np.asarray(rads_km)
lons = np.asarray(lons)
lats = np.asarray(lats)
for i in range(len(lons)):
circle = geod.circle(lons[i], lats[i], rads_km[i],
n_samples=n_samples)
geoms.append(sgeom.Polygon(circle))
polys = cpatch.geos_to_path(geoms)
if draw:
if ax==None: ax = plt.gca()
f = feature.ShapelyFeature(geoms, ccrs.Geodetic(globe=globe),
**kwargs)
ax.add_feature(f)
return polys
def splitPoly(points, edge, **kwargs):
import numpy as np
import matplotlib.patches as patches
if (np.all(points[:,0] >= 0) and np.all(points[:,0] <= edge)) or (np.all(points[:,0] <= 0) and np.all(points[:,0] >= -edge)):
# All points between 0 and 180 or between -180 and 0
poly = patches.Polygon(points,closed=True, **kwargs)
return [poly]
else:
mask = np.ma.masked_inside(points[:,0],0,edge).mask
mask2 = np.ma.masked_inside(points[:,0],-edge,0).mask
vs1 = points[mask]
vs2 = points[mask2]
startx_pts1 = vs1[0,0]
startx_pts2 = vs2[0,0]
if startx_pts1 - startx_pts2 < edge:
poly = patches.Polygon(points,closed=True, **kwargs)
return [poly]
else:
poly1 = patches.Polygon(vs1,closed=True, **kwargs)
poly2 = patches.Polygon(vs2,closed=True, **kwargs)
return [poly1, poly2]
# Check if a value is inside an interval
def checkInBetween(val, interval):
if interval[0] > interval[1]:
return interval[1] <= val <= interval[0]
else:
return interval[0] <= val <= interval[1]
|
# interp_runge.py
from numpy import *
from pylab import plot, show, xlabel, ylabel, title, subplot
from poly import newtonCoeffs,evalPoly
def runge(x):
return 1.0/(1.0+25.0*x*x)
numnodes = input("Number of evenly spaced nodes: ")
x = linspace(-1.,1.,numnodes,endpoint=True)
y = runge(x)
c = newtonCoeffs(x,y)
xx = linspace(x[0],x[-1],2000)
poly = evalPoly(x,c,xx)
truerunge = runge(xx)
plot( x, y, 'bo')
plot( xx,poly , 'm')
plot( xx,truerunge, 'k')
xlabel('x')
title('Polynomial interpolation (magenta) of runge (black)')
show()
|
"""
为 fast-rcnn生成训练数据
"""
import numpy as np
from config import Config
from utils.bbox_overlaps import bbox_overlaps
from utils.bbox_transform import bbox_transform
config = Config()
def proposal_target_layer(rpn_rois, gt_boxes, classes_num):
rois, labels, bbox_targets, bbox_inside_weights, bbox_outside_weights = \
_proposal_target_layer_py(rpn_rois, gt_boxes, classes_num)
bbox_targets = np.concatenate([bbox_targets, bbox_inside_weights, bbox_outside_weights], axis=-1)
labels = np.expand_dims(labels, axis=0)
bbox_targets = np.expand_dims(bbox_targets, axis=0)
return rois, labels, bbox_targets
def _proposal_target_layer_py(rpn_rois, gt_boxes, classes_num):
all_rois = rpn_rois
zeros = np.zeros((gt_boxes.shape[0], 1), dtype=gt_boxes.dtype)
# Include ground-truth boxes in the set of candidate rois
# 将ground_th加入候选区域用于训练
all_rois = np.vstack(
(all_rois, np.hstack((zeros, gt_boxes[:, :4])))
)
# Sanity check: single batch only
assert np.all(all_rois[:, 0] == 0), \
'Only single item batches are supported'
# 为每张图片设置正负样本数目
num_images = 1
rois_per_image = config.train_batch_size // num_images
# 128 * 0.25 = 32
fg_rois_per_image = np.round(config.train_fg_fraction * rois_per_image).astype(np.int32)
# Sample rois with classification labels and bounding box regression
# targets
# 生成训练用的labels 和 边框回归数据
labels, rois, bbox_targets, bbox_inside_weights = _sample_rois(
all_rois, gt_boxes, fg_rois_per_image,
rois_per_image, classes_num)
rois = rois.reshape(-1, 5)
labels = labels.reshape(-1, 1)
bbox_targets = bbox_targets.reshape(-1, 4)
bbox_inside_weights = bbox_inside_weights.reshape(-1, 4)
# 正负样本权重
bbox_outside_weights = np.array(bbox_inside_weights > 0).astype(np.float32)
return rois, labels, bbox_targets, bbox_inside_weights, bbox_outside_weights
def _sample_rois(all_rois, gt_boxes, fg_rois_per_image, rois_per_image, num_classes):
"""
Generate a random sample of RoIs comprising foreground and background
examples.
"""
# 这里是将数组装进连续内存并计算iou
overlaps = bbox_overlaps(
np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),
np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))
gt_assignment = overlaps.argmax(axis=1)
max_overlaps = overlaps.max(axis=1)
# 为每个anchor设置所属类别 与哪个gt_boxes相交iou最大就是对应的class
labels = gt_boxes[gt_assignment, 4]
# 这里是设置正负样本数目
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(max_overlaps >= config.train_fg_thresh)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = min(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
# 随机抽样
fg_inds = np.random.choice(fg_inds, size=fg_rois_per_this_image, replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((max_overlaps < config.train_bg_thresh_hi) &
(max_overlaps >= config.train_bg_thresh_lo))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = min(bg_rois_per_this_image, bg_inds.size)
# Sample background regions without replacement
if bg_inds.size > 0:
bg_inds = np.random.choice(bg_inds, size=bg_rois_per_this_image, replace=False)
# The indices that we're selecting (both fg and bg)
# 得到
keep_inds = np.append(fg_inds, bg_inds)
# Select sampled values from various arrays:
# labels的size 为 128
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
# 前32个是正样本 后面的都是负样本 0表示背景
labels[fg_rois_per_this_image:] = 0
# 128个
rois = all_rois[keep_inds]
# 将候选区域根据坐标回归公式进行转换
bbox_target_data = _compute_targets(
rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], labels)
# 生成坐标回归用的训练数据
# 将 n * 5 -> n * 4k (k是class_num)
bbox_targets, bbox_inside_weights = \
_get_bbox_regression_labels(bbox_target_data, num_classes)
return labels, rois, bbox_targets, bbox_inside_weights
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets (bbox_target_data) are stored in a
compact form N x (class, tx, ty, tw, th)
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets).
Returns:
bbox_target (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
bbox_targets[ind, :] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, :] = (1, 1, 1, 1)
return bbox_targets, bbox_inside_weights
def _compute_targets(ex_rois, gt_rois, labels):
"""Compute bounding-box regression targets for an image."""
assert ex_rois.shape[0] == gt_rois.shape[0]
assert ex_rois.shape[1] == 4
assert gt_rois.shape[1] == 4
targets = bbox_transform(ex_rois, gt_rois)
if config.train_bbox_normalize_targets_precomputed:
# Optionally normalize targets by a precomputed mean and stdev
targets = ((targets - np.array(config.train_bbox_normalize_means))
/ np.array(config.train_bbox_normalize_stds))
# 将类别拼接到第一维
return np.hstack((labels[:, np.newaxis], targets)).astype(np.float32, copy=False)
|
#!/home/epicardi/bin/python27/bin/python
# Copyright (c) 2013-2014 Ernesto Picardi <ernesto.picardi@uniba.it>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys, os, getopt, time
try: import pysam
except: sys.exit('Pysam module not found.')
#pid=str(os.getpid()+random.randint(0,999999999))
def usage():
print """
USAGE: python SearchInTable.py [options]
Options:
-i Sorted table file (first col=reference; second col=coordinate 1 based)
or tabix indexed table (ending with .gz)
-q Query (file or single positions: chr21:123456)
-C Sequence name column [1]
-S Start column [2]
-E End column; can be identical to '-S' [2]
-P Print to stdout found lines
-p Print position header (like a fasta header >chr21:123456)
-n Print "Not found"
-s Print simple statistics on standard error
-k Skip lines starting with in query file
-o Save found/not found positions on file
-h Print this help
"""
#-k skip first INT lines [0]
try:
opts, args = getopt.getopt(sys.argv[1:], "i:q:k:pso:hnC:S:E:O:P",["help"])
if len(opts)==0:
usage()
sys.exit(2)
except getopt.GetoptError as err:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
tablefile=''
query=''
outfile=''
outfile2=''
pr,prn,prf=0,0,0
ps=0
sv,sv2=0,0
sk=0
ski=''
skil=0
scol,stcol,ecol=0,1,1
for o, a in opts:
if o in ("-h","--help"):
usage()
sys.exit()
elif o == "-i":
tablefile=a
if not os.path.exists(tablefile):
usage()
sys.exit('Table file not found')
elif o == "-q":
query=a
if query=='':
usage()
sys.exit('Query not found.')
elif o == "-p": pr=1
elif o == "-C": scol=int(a)-1
elif o == "-S": stcol=int(a)-1
elif o == "-E": ecol=int(a)-1
elif o == "-n": prn=1
elif o == "-P": prf=1
elif o == "-k":
ski=a
skil=1
elif o == "-s": ps=1
elif o == "-o":
outfile=a
sv=1
elif o == "-O":
outfile2=a
sv2=1
else:
assert False, "Unhandled Option"
script_time=time.strftime("%d/%m/%Y %H:%M:%S", time.localtime(time.time()))
sys.stdout.write("Script time --> START: %s\n"%(script_time))
if not os.path.exists(tablefile):
#sys.stderr.write('Compressing table file.\n')
#pysam.tabix_index(tablefile, tablefile+'.gz')
sys.stderr.write('Indexing table file.\n')
tablefile=pysam.tabix_index(tablefile, seq_col=scol, start_col=stcol, end_col=ecol)
#if tablefile.endswith('.gz') and not tablefile.endswith('.tbi'):
# tablefile=pysam.tabix_index(tablefile, seq_col=scol, start_col=stcol, end_col=ecol)
tabix=pysam.Tabixfile(tablefile)
allref=tabix.contigs
positions=[]
if os.path.exists(query):
f=open(query)
for i in f:
if i.strip()=='': continue
if i.startswith('#'): continue
if i.startswith('Region'): continue
if skil:
if i.startswith(ski): continue
l=(i.strip()).split()
positions.append((l[0],int(l[1])-1))
f.close()
elif query.count(":")==1:
l=(query.strip()).split(':')
positions.append((l[0],int(l[1])-1))
else: sys.exit('I cannot read the query.')
if sv:
outf=open(outfile+'_found','w')
outnf=open(outfile+'_notfound','w')
if sv2:
outf2=open(outfile2+'_foundInSortedTable','w')
xx=0
for pos in positions:
res=[]
if pos[0] in allref:
res=[kk for kk in tabix.fetch(reference=pos[0],start=pos[1],end=pos[1]+1)]
if pr: sys.stdout.write('>%s:%i\n' %(pos[0],pos[1]+1))
if len(res)==0:
if prn: sys.stdout.write('Not Found\n')
if sv: outnf.write('%s\t%i\n' %(pos[0],pos[1]+1))
else:
#if sv: outf.write(res[0]+'\n')
if sv: outf.write(res[0]+'\n')
if prf: sys.stdout.write(res[0]+'\n')
xx+=1
tabix.close()
if sv:
outf.close()
outnf.close()
if ps:
sys.stdout.write('Positions in query: %i\n' %(len(positions)))
sys.stdout.write('Positions found: %i\n' %(xx))
sys.stdout.write('Positions not found: %i\n' %(len(positions)-xx))
if sv:
sys.stdout.write('Found line(s) saved on: %s\n' %(outfile+'_found'))
sys.stdout.write('Not found line(s) saved on: %s\n' %(outfile+'_notfound'))
script_time=time.strftime("%d/%m/%Y %H:%M:%S", time.localtime(time.time()))
sys.stdout.write("Script time --> END: %s\n"%(script_time))
|
f = float(input("Enter a Decimal number: "))
x = float(input("Enter a Decimal number: "))
y = float(input("Enter a Decimal number: "))
print("Sum is: " + str(f + x + y))
print( 16 - 2 * 5 // 3 + 1)
print(2 ** 2 ** 3 * 3)
|
from networkz.algorithms.link_analysis.pagerank_alg import *
from networkz.algorithms.link_analysis.hits_alg import *
|
import time
import numpy as np
from gatheringData import create_splitted_file
def automate(sleep=500,start=0,end=1000,interval=100):
new_start=start
if new_start+interval <= end:
new_end=new_start+interval
else:
new_end=end
for i in range(0,(end-start)//interval):
create_splitted_file(queries_file="SearchQueries_all_extracted.txt", splitted_file='Dataset_Splitted_datas.csv',
range_start=new_start,
range_end=new_end,
google_search='y',
write_columns=False,
file_write_type='a')
timer=np.random.randint(sleep-30,sleep+30)
print("sleep for "+str(timer) + " seconds.")
time.sleep(timer)
new_start=new_end+1
if new_start+interval-1 <= end:
new_end=new_start+interval-1
else:
new_end=end
automate(500,6400,7400)
|
#!/usr/bin/python
class Node:
def __init__(self,data=None,nextNode=None):
self.data = data
self.nextNode = nextNode
class LinkedList:
def __init__(self):
self.head = None
self.tail = None
def insert_at_head(self,i):
n = Node()
n.data = i
n.nextNode = None
if(self.head==None):
self.head = n
self.tail = n
else:
n.nextNode = self.head
self.head = n
def delete_at_head(self):
x = self.head.data
self.head = self.head.nextNode
if(self.head==None):
self.tail = None
return x
def insert_at_tail(self,i):
n = Node()
n.data = i
n.next = None
if (self.head==None):
self.head = n
self.tail = n
elif (self.head == self.tail):
self.head.next = n
self.tail = n
else:
self.tail.nextNode = n
self.tail = n
class Queue:
def __init__(self):
self.head = None
self.tail= None
self.N = 0
def put(self,i):
Nnew = Node()
Nnew.data = i
# null list
if (self.head == None):
self.head = Nnew
self.tail = Nnew
# one node
elif (self.head==self.tail):
self.head.nextNode = Nnew
self.tail = Nnew
# all other cases
else:
self.tail.nextNode = Nnew
self.tail = Nnew
self.N = self.N + 1
def get(self):
if(self.head == None):
raise Exception("Error: Empty Stack")
i = self.head.data
self.head = self.head.nextNode
self.N = self.N - 1
if self.N == 0:
self.head = None
self.tail = None
return i
def front(self):
if(self.head == None):
raise Exception("Error: Empty Stack")
i = self.head.data
return i
def size(self):
sizeQ = self.N
return sizeQ
def isEmpty(self):
if self.N == 0:
return True
else:
return False
|
import logging
import time
import sys
import lib.hwinfo.gpu as gpu
from lib.utils import *
logger = logging.getLogger(__name__)
__lastLoadedModel = ''
def loadModel(modelName, limit_memory_percentage = 85):
logger.debug('enter loadModel')
def checkUsageGpuMemory():
gpuUsage = gpu.gpuUsage()
if limit_memory_percentage < gpuUsage.memory:
logger.fatal("not enough memory %f" % gpuUsage.memory)
sys.exit()
global __lastLoadedModel
if __lastLoadedModel == modelName:
logger.info('Model Cached')
else:
with RedisLock('test'):
logger.info('Model Loading %r' % modelName)
__lastLoadedModel = modelName
checkUsageGpuMemory()
time.sleep(5)
def run(job):
logger.info('Received %r' % job)
time.sleep(1)
return True
|
#=========================================================================
# pisa_lui_test.py
#=========================================================================
import pytest
import random
import pisa_encoding
from pymtl import Bits, sext, zext
from PisaSim import PisaSim
from pisa_inst_test_utils import *
#-------------------------------------------------------------------------
# gen_basic_test
#-------------------------------------------------------------------------
def gen_basic_test():
return """
lui r1, 0x0001
nop
nop
nop
nop
nop
nop
nop
nop
mtc0 r1, proc2mngr > 0x00010000
nop
nop
nop
nop
nop
nop
nop
nop
"""
#-------------------------------------------------------------------------
# gen_dest_byp_test
#-------------------------------------------------------------------------
def gen_dest_byp_test():
return [
gen_imm_dest_byp_test( 5, "lui", 0x0001, 0x00010000 ),
gen_imm_dest_byp_test( 4, "lui", 0x0002, 0x00020000 ),
gen_imm_dest_byp_test( 3, "lui", 0x0003, 0x00030000 ),
gen_imm_dest_byp_test( 2, "lui", 0x0004, 0x00040000 ),
gen_imm_dest_byp_test( 1, "lui", 0x0005, 0x00050000 ),
]
#-------------------------------------------------------------------------
# gen_value_test
#-------------------------------------------------------------------------
def gen_value_test():
return [
gen_imm_value_test( "lui", 0x0000, 0x00000000 ),
gen_imm_value_test( "lui", 0xffff, 0xffff0000 ),
gen_imm_value_test( "lui", 0x7fff, 0x7fff0000 ),
gen_imm_value_test( "lui", 0x8000, 0x80000000 ),
]
#-------------------------------------------------------------------------
# gen_random_test
#-------------------------------------------------------------------------
def gen_random_test():
asm_code = []
for i in xrange(100):
imm = Bits( 16, random.randint(0,0xffff) )
dest = zext(imm,32) << 16
asm_code.append( gen_imm_value_test( "lui", imm.uint(), dest.uint() ) )
return asm_code
#-------------------------------------------------------------------------
# test_basic
#-------------------------------------------------------------------------
@pytest.mark.parametrize( "name,test", [
asm_test( gen_basic_test ),
asm_test( gen_dest_byp_test ),
asm_test( gen_value_test ),
asm_test( gen_random_test ),
])
def test( name, test ):
sim = PisaSim( trace_en=True )
sim.load( pisa_encoding.assemble( test() ) )
sim.run()
|
import random
import re
def load_word():
'''
A function that reads a text file of words and randomly selects one to use as the secret word from the list.
Returns:
string: The secret word to be used in the spaceman guessing game
'''
with open('../data/words.txt', 'r') as f:
words_list = f.read().split(' ')
return random.choice(words_list)
def is_word_guessed(secret_word, letters_guessed):
'''
A function that checks if all the letters of the secret word have been guessed.
Args:
secret_word (string): the random word the user is trying to guess.
letters_guessed (list of strings): list of letters that have been guessed so far.
Returns:
bool: True only if all the letters of secret_word are in letters_guessed, False otherwise
'''
for letter in secret_word:
if letter not in letters_guessed:
return False
return True
def get_guessed_word(secret_word, letters_guessed):
'''
A function that is used to get a string showing the letters guessed so far in the secret word and underscores for letters that have not been guessed yet.
Args:
secret_word (string): the random word the user is trying to guess.
letters_guessed (list of strings): list of letters that have been guessed so far.
Returns:
string: letters and underscores. For letters in the word that the user has guessed correctly, the string should contain the letter at the correct position. For letters in the word that the user has not yet guessed, shown an _ (underscore) instead.
'''
return re.sub(f'[^{"".join(letters_guessed)}]', '_', secret_word)
def is_guess_in_word(guess, secret_word):
'''
A function to check if the guessed letter is in the secret word
Args:
guess (string): The letter the player guessed this round
secret_word (string): The secret word
Returns:
bool: True if the guess is in the secret_word, False otherwise
'''
return guess in secret_word
def is_guess_valid(guess, letters_guessed):
'''
'''
if guess in letters_guessed:
print('You\'ve already guessed that!')
return False
return guess.isalpha() and len(guess) == 1
def spaceman(secret_word):
'''
A function that controls the game of spaceman. Will start spaceman in the command line.
Args:
secret_word (string): the secret word to guess.
'''
letters_guessed = []
num_fails = len(secret_word)
while not is_word_guessed(secret_word, letters_guessed):
# TODO: show the player information about the game according to the project spec
guess = input('Guess a letter! ')
if not is_guess_valid(guess, letters_guessed):
print('Invalid guess')
continue
else:
letters_guessed.append(guess)
if is_guess_in_word(guess, secret_word):
print(f'Good work! You still have {num_fails} remaining')
else:
num_fails -= 1
if num_fails <= 0:
break
print(f'That was incorrect, now you only have {num_fails} remaining')
print(f'The current progress is {get_guessed_word(secret_word, letters_guessed)}')
if num_fails < 0:
print('You lose!')
else:
print('You won!')
print(f'The word is: {secret_word}')
if __name__ == '__main__':
play = True
while play:
spaceman(load_word())
play = input('Do you want to play again? y/any').lower() == 'y'
|
# encoding: utf-8
import os, sys, io
import shutil
import tempfile
import datetime
import defs
class StaticDirectory(object):
def __init__(self, name, dirpath):
self.name = name
self.path = dirpath
class File(object):
def __init__(self, name):
self.name = name
f, self.path = tempfile.mkstemp()
os.close(f)
# delete the tempfile
def delete(self):
if os.path.exists(self.path):
os.remove(self.path)
# input data has to be unicode
def render(self, data):
if data is not None:
with open(self.path, 'w') as f:
f.write(data.encode('utf-8'))
class Archive(object):
def __init__(self, *kwds):
self.list = list(kwds)
self.path = tempfile.mkdtemp()
def __copy(self, file_or_dir):
output = os.path.join(self.path, file_or_dir.name)
copy = shutil.copytree if os.path.isdir(file_or_dir.path) else shutil.copyfile
copy(file_or_dir.path, output)
def save(self, archive_name):
if not self.list:
raise ValueError("nothing for archive")
for item in self.list:
self.__copy(item)
shutil.move(self.path, './')
os.rename(os.path.basename(self.path), archive_name)
# whether docs exist or not
def exists(self, archive_name):
dirname = os.path.abspath(os.path.dirname(__file__))
return os.path.exists(os.path.join(dirname, archive_name))
|
print("CSYK"[input()%2::2])
|
# Generated by Django 2.2.5 on 2019-12-11 15:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tem', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='copomapp',
old_name='CO_id',
new_name='COid',
),
migrations.AlterField(
model_name='examscheme',
name='Code_patt',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='tem.Employee'),
),
migrations.AlterField(
model_name='teachingscheme',
name='Code_patt',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='tem.Employee'),
),
migrations.AlterField(
model_name='weights',
name='Code_patt',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='tem.Employee'),
),
]
|
#!/usr/bin/env python
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
str = type('')
import io
import os
import time
import datetime as dt
import locale
from collections import namedtuple, deque
from pprint import pprint
import pg8000
from chameleon import PageTemplate
class html(str):
def __html__(self):
return str(self)
locale.setlocale(locale.LC_ALL, '')
Stock = namedtuple('Stock', ('symbol', 'name', 'price', 'change', 'volume'))
pg8000.paramstyle = 'qmark'
conn = pg8000.connect(database='stocks', unix_sock='/var/run/postgresql/.s.PGSQL.5432')
with io.open('template.html', 'rb') as f:
template = PageTemplate(f.read().decode('utf-8'))
def market_states():
current_date = None
state = {}
cur = conn.cursor()
cur.arraysize = 1000
cur.execute("""
SELECT date, symbol, name, adj_close, volume
FROM market_view
ORDER BY date, symbol
""")
while True:
row = cur.fetchone()
if row is None:
break
date, symbol, name, close, volume = row
if date != current_date:
if state:
yield current_date, state
current_date = date
state[symbol] = Stock(symbol, name, close, None, volume)
yield date, state
def back_fill(states):
blank_stock = Stock(None, None, None, 0.0, 0)
old_state = {}
for date, new_state in states:
filled_state = {
symbol: Stock(
s.symbol,
s.name,
old_price if s.price is None else s.price,
(
s.price - old_price if s.price is not None and old_price is not None else
0.0 if s.price is None and old_price is not None else
None
),
s.volume or 0)
for symbol, s in new_state.items()
for old_price in (old_state.get(symbol, blank_stock).price,)
}
yield date, filled_state
old_state = filled_state
def filter_blank(states):
for date, state in states:
if not all(s.price is None for s in state.values()):
yield date, {
symbol: stock for symbol, stock in state.items()
if stock.price is not None
}
def main():
for date, state in filter_blank(back_fill(market_states())):
print('Writing state for {date:%Y-%m-%d}'.format(date=date))
with io.open('market.html.new', 'wb') as f:
f.write(template(
date=date,
state=state,
sign=lambda n: (1 if n > 0 else -1) if n else 0,
locale=locale).encode('utf-8'))
os.rename('market.html.new', 'market.html')
time.sleep(2)
if __name__ == '__main__':
main()
|
# Wrapper function to run developed Random Spanning Tree Approximation algorithm parallelly on interactive cluster, for the purpose of multiple parameters and datasets.
# The script uses Python thread and queue package.
# Implement worker class and queuing system.
# The framework looks at each parameter combination as a job and pools all job_queue in a queue.
# It generates a group of workers (computing nodes).
# Each worker will always take and process the first job from the queue.
# In case that job is not completed by the worker, it will be push back to the queue, and will be processed later on.
import math
import re
import Queue
from threading import ThreadError
from threading import Thread
import os
import sys
import commands
sys.path.append('/cs/taatto/group/urenzyme/workspace/netscripts/')
from get_free_nodes import get_free_nodes
import multiprocessing
import time
import logging
import random
logging.basicConfig(format='%(asctime)s %(filename)s %(funcName)s %(levelname)s:%(message)s', level=logging.INFO)
job_queue = Queue.PriorityQueue()
# Worker class
# job is a tuple of parameters
class Worker(Thread):
def __init__(self, job_queue, node):
Thread.__init__(self)
self.job_queue = job_queue
self.node = node
self.penalty = 0 # penalty parameter which prevents computing node with low computational resources getting job_queue from job queue
pass # def
def run(self):
all_done = 0
while not all_done:
try:
time.sleep(random.randint(5000,6000) / 1000.0) # get some rest :-)
time.sleep(self.penalty*120) # bad worker will rest more
job = self.job_queue.get(0)
add_penalty = singleRSTA(self.node, job)
self.penalty += add_penalty
if self.penalty < 0:
self.penalty = 0
except Queue.Empty:
all_done = 1
pass # while
pass # def
pass # class
global_rundir = ''
# function to check if the result file already exist in the destination folder
def checkfile(filename,graph_type,t,kth_fold,l_norm,kappa,slack_c,loss_scaling_factor,newton_method):
file_exist = 0
file_exist += os.path.isfile("%s/%s_%s_%s_f%s_l%s_k%s_c%s_s%s_n%s_RSTAs.log" % (global_rundir,filename,graph_type,t,kth_fold,l_norm,kappa,slack_c,loss_scaling_factor,newton_method))
file_exist += os.path.isfile("%s/%s/c%s/%s_%s_%s_f%s_l%s_k%s_c%s_s%s_n%s_RSTAs.log" % (global_rundir,filename,slack_c,filename,graph_type,t,kth_fold,l_norm,kappa,slack_c,loss_scaling_factor,newton_method))
if file_exist > 0:
return 1
else:
return 0
pass # checkfile
def singleRSTA(node, job):
(priority, job_detail) = job
(filename,graph_type,t,kth_fold,l_norm,kappa,slack_c,loss_scaling_factor,newton_method) = job_detail
try:
if checkfile(filename,graph_type,t,kth_fold,l_norm,kappa,slack_c,loss_scaling_factor,newton_method):
logging.info('\t--< (priority) %d (node)%s,(f)%s,(type)%s,(t)%s,(f)%s,(l)%s,(k)%s,(c)%s,(s)%s,(n)%s' % ( priority, node,filename,graph_type,t,kth_fold,l_norm,kappa,slack_c,loss_scaling_factor,newton_method))
fail_penalty = 0
else:
logging.info('\t--> (priority) %d (node)%s,(f)%s,(type)%s,(t)%s,(f)%s,(l)%s,(k)%s,(c)%s,(s)%s,(n)%s' %( priority, node,filename,graph_type,t,kth_fold,l_norm,kappa,slack_c,loss_scaling_factor,newton_method))
os.system(""" ssh -o StrictHostKeyChecking=no %s 'cd /cs/taatto/group/urenzyme/workspace/colt2014/experiments/L2RTA/inference_codes/; rm -rf /var/tmp/.matlab; export OMP_NUM_THREADS=32; nohup matlab -nodisplay -nosplash -r "run_RSTA '%s' '%s' '%s' '0' '%s' '%s' '%s' '%s' '%s' '%s' '/var/tmp' '%s'" > /var/tmp/tmp_%s_%s_%s_f%s_l%s_k%s_c%s_s%s_n%s_RSTAs' """ % (node,filename,graph_type,t,kth_fold,l_norm,kappa,slack_c,loss_scaling_factor,newton_method,global_rundir,filename,graph_type,t,kth_fold,l_norm,kappa,slack_c,loss_scaling_factor,newton_method) )
logging.info('\t--| (priority) %d (node)%s,(f)%s,(type)%s,(t)%s,(f)%s,(l)%s,(k)%s,(c)%s,(s)%s,(n)%s' %( priority, node,filename,graph_type,t,kth_fold,l_norm,kappa,slack_c,loss_scaling_factor,newton_method))
fail_penalty = -1
except Exception as excpt_msg:
print excpt_msg
job_queue.put((priority, job_detail))
logging.info('\t--= (priority) %d (node)%s,(f)%s,(type)%s,(t)%s,(f)%s,(l)%s,(k)%s,(c)%s,(s)%s,(n)%s' %( priority, node,filename,graph_type,t,kth_fold,l_norm,kappa,slack_c,loss_scaling_factor,newton_method))
fail_penalty = 1
if not os.path.isfile("%s/%s_%s_%s_f%s_l%s_k%s_c%s_s%s_n%s_RSTAs.log" % (global_rundir,filename,graph_type,t,kth_fold,l_norm,kappa,slack_c,loss_scaling_factor,newton_method)):
job_queue.put((job))
logging.info('\t--x (priority) %d (node)%s,(f)%s,(type)%s,(t)%s,(f)%s,(l)%s,(k)%s,(c)%s,(s)%s,(n)%s' %( priority, node,filename,graph_type,t,kth_fold,l_norm,kappa,slack_c,loss_scaling_factor,newton_method))
fail_penalty = 1
time.sleep(10)
return fail_penalty
pass # def
def run():
is_main_run_factor=5
#filenames=['toy10','toy50','emotions','medical','enron','yeast','scene','cal500','fp','cancer']
#filenames=['cancer']
filenames=['toy10','toy50','emotions','yeast','scene','enron','fp','medical']
n=0
# generate job_queue
logging.info('\t\tGenerating priority queue.')
for newton_method in ['1','0']:
for filename in filenames:
for slack_c in ['1', '10', '0.1']:
for t in [1, 5, 10, 20, 30]:
para_t="%d" % (t)
graph_type = 'tree'
for kappa in ['1','2','3','4','5','6','8','10','12','14','16']:
for l_norm in ['2']:
#for kth_fold in ['1','2','3','4','5']:
for kth_fold in ['1']:
for loss_scaling_factor in ['0.1','1']:
if checkfile(filename,graph_type,para_t,kth_fold,l_norm,kappa,slack_c,loss_scaling_factor,newton_method):
continue
else:
n=n+1
job_queue.put( (n, (filename,graph_type,para_t,kth_fold,l_norm,kappa,slack_c,loss_scaling_factor,newton_method)) )
pass # for newton_method
pass # for loss_scaling_factor
pass # for slack_c
pass # for |T|
pass # for l
pass # for kappa
pass # for datasets
pass # for k fole
# get computing nodes
cluster = get_free_nodes()[0] # if you have access to some interactive computer cluster, get the list of hostnames of the cluster
#cluster = ['melkinkari'] # if you don't have access to any computer cluster, just use your machine as the only computing node
# running job_queue
job_size = job_queue.qsize()
logging.info( "\t\tProcessing %d job_queue" % (job_size))
threads = []
for i in range(len(cluster)):
if job_queue.empty():
break
t = Worker(job_queue, cluster[i])
time.sleep(is_main_run_factor)
try:
t.start()
threads.append(t)
except ThreadError:
logging.warning("\t\tError: thread error caught!")
pass
for t in threads:
t.join()
pass
pass # def
# It's actually not necessary to have '__name__' space, but whatever ...
if __name__ == "__main__":
global_rundir = sys.argv[1]
run()
pass
|
'''
Created on 2012-3-13
@author: 301645
'''
import os
import shutil
from common.pyruncmd import pyruncmd
class pywincmds(object):
'''
封装一些经常使用的windows命令
'''
py_cmd = pyruncmd()
@staticmethod
def call_cmd(py_curcmd):
pywincmds.py_cmd.command_str = py_curcmd
pywincmds.py_cmd.is_cmd_succeeded()
@staticmethod
def makedirs(absolute_path):
os.makedirs(absolute_path, exist_ok=True)
@staticmethod
def copy_make_parentedirs(src, dst):
#=======================================================================
# 拷贝文件,如果des上层目录不存在,则新建目录
#des需要含有文件名
#=======================================================================
par_dir = os.path.dirname(dst).strip()
if not os.path.exists(par_dir):
pywincmds.makedirs(par_dir)
if os.path.exists(src):
shutil.copy2(src, dst)
@staticmethod
def robocopy(py_source, py_dest, pyfiles = "", py_exclude_dirs="", py_exclude_files=""):
"""排除文件目录可以就模式,如/XD .svn obj config Properties "Web References" /XF *.cs *.csproj *.pdb *.resx *.csproj.user"""
if not os.path.exists(py_source):
raise("源文件" + py_source + "不存在!")
files = xd = xf = ""
robocopy_path = os.getenv("WORKSPACE", r"D:\Documents and Settings\binliu\workspace") + os.sep + "sbp_2.0" + os.sep + "tools" + os.sep + "robocopy"
if pyfiles.strip() != "":
files = " " + pyfiles + " "
if py_exclude_dirs.strip() != "":
xd = " /XD " + py_exclude_dirs + " "
if py_exclude_files.strip()!="":
xf = " /XF " + py_exclude_files + " "
pywincmds.py_cmd.command_str = "\"" + robocopy_path + "\"" + " \"" + py_source + "\" \"" + py_dest + "\"" + files + xd + xf + " /E"
pywincmds.py_cmd.is_cmd_succeeded(7)
@staticmethod
def copy(py_source, py_dest):
'''
拷贝单个文件
'''
if not os.path.exists(py_source):
raise("源文件" + py_source + "不存在!")
pywincmds.py_cmd.command_str = "copy \"" + py_source + "\" \"" + py_dest + "\" /Y"
pywincmds.py_cmd.is_cmd_succeeded()
@staticmethod
def del_dir(py_dir):
'''
删除目录py_dir,即使它包含子目录和文件
'''
try:
if os.path.isdir(py_dir):
shutil.rmtree(py_dir)
except Exception as e:
raise(e)
@staticmethod
def del_all_except_hidden_directories(py_dir_root):
"""删除除.svn的其他文件和目录"""
pywincmds.py_cmd.command_str = "dir /B \"" + py_dir_root + "\""
pywincmds.py_cmd.is_cmd_succeeded()
lists = pywincmds.py_cmd.get_stdout_lines()
for line in lists:
if os.path.isfile(py_dir_root + os.sep + line.strip()):
pywincmds.py_cmd.command_str = 'del /F /Q "' + py_dir_root + os.sep + line.strip() + '"'
else:
pywincmds.py_cmd.command_str = 'rmdir /S /Q "' + py_dir_root + os.sep + line.strip() + '"'
pywincmds.py_cmd.is_cmd_succeeded()
@staticmethod
def py_write_svn_message_to_file(py_message, py_file):
"""多行需要以\n分隔而不是\r\n"""
if os.path.exists(py_file):
os.system("del /F /Q \"" + py_file + "\"")
f = open(py_file, 'w')
f.write(py_message)
f.close()
@staticmethod
def restart_app_pool(cur_file_path, py_apppool):
import platform
if platform.release().find("2008Server") > -1:
pywincmds.py_cmd.command_str = r"C:\Windows\SysWOW64\inetsrv\appcmd.exe recycle apppool " + py_apppool
else:
pywincmds.py_cmd.command_str = "\"" + cur_file_path + os.sep + "tools" + os.sep + "cscript.exe\" \"" + cur_file_path + os.sep + "tools" + os.sep + "iisapp.vbs\" /a " + py_apppool + " /r"
pywincmds.py_cmd.is_cmd_succeeded()
@staticmethod
def web_check(py_url, py_keyword, py_time, py_decode):
#time in seconds
#其中py_url为检测的地址,py_keyword为搜索的关键字,py_time为间隔时间,总的间隔时间为60*py_time seconds, 如果发现,返回真,否则为假
#py_decode为编码,比如gb2312,为字符串
import time
import urllib.request
for i in range(0,60):
time.sleep(float(py_time))
req = urllib.request.urlopen(py_url,timeout=180)
content = req.readall()
page=""
try:
page = str(content, py_decode)
except:
req = urllib.request.urlopen(py_url,timeout=180)
if py_decode == "utf-8":
page = str(content, "gb2312")
else:
page = str(content, "utf-8")
if page.find(py_keyword) > -1:
return(True)
return(False)
if __name__ == 'main':
pywincmds.list_all_children(r'E:\sourcecode\51tao')
pywincmds.py_robocopy(r'E:\sourcecode\51tao\WebUI', r'E:\sourcecode\51tao\WebUI1', r'obj', r'*.dll')
|
# -*- coding: utf-8 -*-
"""Utility functions."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import sys
import os.path as op
from inspect import getargspec
from ..ext.six import string_types, exec_
from ..ext.six.moves import builtins, cPickle
#------------------------------------------------------------------------------
# Pickle utility functions
#------------------------------------------------------------------------------
def _load_pickle(path):
path = op.realpath(op.expanduser(path))
assert op.exists(path)
with open(path, 'rb') as f:
return cPickle.load(f)
def _save_pickle(path, data):
path = op.realpath(op.expanduser(path))
with open(path, 'wb') as f:
cPickle.dump(data, f, protocol=2)
#------------------------------------------------------------------------------
# Various Python utility functions
#------------------------------------------------------------------------------
def _read_python(path):
path = op.realpath(op.expanduser(path))
assert op.exists(path)
with open(path, 'r') as f:
contents = f.read()
metadata = {}
exec_(contents, {}, metadata)
metadata = {k.lower(): v for (k, v) in metadata.items()}
return metadata
def _fun_arg_count(f):
"""Return the number of arguments of a function.
WARNING: with methods, only works if the first argument is named 'self'.
"""
args = getargspec(f).args
if args and args[0] == 'self':
args = args[1:]
return len(args)
def _is_in_ipython():
return '__IPYTHON__' in dir(builtins)
def _is_interactive():
"""Determine whether the user has requested interactive mode."""
# The Python interpreter sets sys.flags correctly, so use them!
if sys.flags.interactive:
return True
# IPython does not set sys.flags when -i is specified, so first
# check it if it is already imported.
if '__IPYTHON__' not in dir(builtins):
return False
# Then we check the application singleton and determine based on
# a variable it sets.
try:
from IPython.config.application import Application as App
return App.initialized() and App.instance().interact
except (ImportError, AttributeError):
return False
def _show_shortcut(shortcut):
if isinstance(shortcut, string_types):
return shortcut
elif isinstance(shortcut, tuple):
return ', '.join(shortcut)
def _show_shortcuts(shortcuts, name=''):
print()
if name:
name = ' for ' + name
print('Keyboard shortcuts' + name)
for name in sorted(shortcuts):
print('{0:<40}: {1:s}'.format(name, _show_shortcut(shortcuts[name])))
print()
|
from bs4 import BeautifulSoup
import requests
import pandas as pd
import json
from requests.compat import urljoin
from datetime import datetime
import re
def game_data():
print(str(datetime.now().time())[:8])
all_items = []
pg_ids = []
pg_links = []
for pg in range(1, 21):
pg_items = []
ct = 0
soup_pg = browse_games(pg)
pg_ids_tmp, pg_links_tmp = extract_game_ids(soup_pg)
pg_ids += pg_ids_tmp
pg_links += pg_links_tmp
print(str(datetime.now().time())[:8])
print(5)
return all_items
def extract_xml(soup, game_links):
item_list = []
items = soup.find_all('item')
for idx, item in enumerate(items):
item_list.append(extract_item(item, game_links[idx]))
return item_list
def extract_item(game_item, game_url):
game_dict = {'name': game_item.find('name')['value'],
'game_id': game_item['id']}
values_int = ['yearpublished', 'minplayers', 'maxplayers', 'playingtime', 'minplaytime', 'maxplaytime', 'minage']
for vals in values_int:
game_dict[vals] = game_item.find(vals)['value']
link_categ = ['boardgamecategory', 'boardgamemechanic', 'boardgamefamily', 'boardgameexpansion', 'boardgameartist',
'boardgamecompilation', 'boardgameimplementation', 'boardgamedesigner', 'boardgamepublisher',
'boardgameintegration']
for categ in link_categ:
game_dict[categ] = [x['value'] for x in game_item.find_all('link', {'type': categ})]
stats_float = ['average', 'bayesaverage', 'stddev', 'median', 'averageweight']
for stat in stats_float:
game_dict[stat] = float(game_item.find(stat)['value'])
stats_int = ['usersrated', 'owned', 'trading', 'wanting', 'wishing', 'numcomments', 'numweights']
for stat in stats_int:
game_dict[stat] = int(game_item.find(stat)['value'])
for game_cat in game_item.find_all('rank'):
cat_name = re.sub('\W', '', game_cat['friendlyname'])
game_dict[cat_name] = int(game_cat['value'])
return game_dict
def browse_games(page_num):
bs_url = 'https://boardgamegeek.com/browse/boardgame/page/'
pg_url = f'{bs_url}{page_num}'
pg = requests.get(pg_url)
soup = BeautifulSoup(pg.content, 'html.parser')
return soup
def extract_game_ids(soup):
bs_pg = 'https://boardgamegeek.com/'
all_games = soup.find_all('td', {'class': 'collection_objectname'})
game_ids = [x.find('a')['href'].split('/')[-2] for x in all_games]
game_pages = [urljoin(bs_pg, x.find('a')['href']) for x in all_games]
return game_ids, game_pages
if __name__ == '__main__':
game_items = game_data()
# export_csv(game_items)
# print(5)
|
import network, machine, ssd1306, test, oled_ssd1306, time, menu_framework
import uasyncio as asyncio
loop = asyncio.get_event_loop()
board_station = network.WLAN(network.STA_IF)
board_AP = network.WLAN(network.AP_IF)
def reboot():
machine.reset()
async def ipcfg():
# allows easy setting of AP connection via terminal
ssid = board_AP.config('essid')
print('current connection AP setting: ' + ssid)
ssid = input('enter ssid of wifi connection')
pw = input('enter password of connection')
board_station.connect(ssid, pw)
print('connecting...')
await asyncio.sleep(10)
if board_station.isconnected():
print('connected!...rebooting')
board_AP.active(False)
time.sleep(2)
reboot()
else:
print('connection failed')
ipcfg()
async def wifi_status():
# ensures wifi connection, otherwise turns on AP so user can connect via term and set connection
x = 0
while not board_station.isconnected():
oled_ssd1306.scr.text('.', x, oled_ssd1306.active_line)
oled_ssd1306.scr.show()
x += 8
await asyncio.sleep_ms(500)
if x > 88:
oled_ssd1306.wipe()
oled_ssd1306.pt('no con. X\'ing AP')
time.sleep(2)
oled_ssd1306.wipe()
oled_ssd1306.pt('ssid: <ssid>')
oled_ssd1306.pt('pw: <password>')
oled_ssd1306.pt('connect to')
oled_ssd1306.pt('web REPL')
board_AP.active(True)
await asyncio.sleep(10)
loop.create_task(ipcfg())
if board_station.isconnected():
oled_ssd1306.wipe()
oled_ssd1306.pt('wifi connected')
time.sleep(1)
board_AP.active(False)
break
oled_ssd1306.pt('connecting wifi')
loop.create_task(wifi_status())
loop.run_forever()
while 1:
# loads the main menu once the board has connected to the wifi
if board_station.isconnected():
oled_ssd1306.wipe()
menu_framework.show_menu(menu_framework.main_menu)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('subscriptions', '0002_subscriber_name'),
]
operations = [
migrations.AddField(
model_name='subscriber',
name='token',
field=models.CharField(default='1bajhigh4783h', max_length=200),
preserve_default=False,
),
]
|
# coding: utf-8
import requests
import re
import os
import base64
def decode(s):
return str(base64.b64decode(s),'utf-8')
UN = decode(os.getenv('UN'))
UP = decode(os.getenv('UP'))
HOST = decode('d3d3LnYyZXguY29t')
def main():
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36',
'Referer': 'https://%s/signin'%(HOST,),
'Origin': 'https://%s'%(HOST,)
}
session = requests.Session()
session.headers.update(headers)
resp = session.get('https://%s/signin'%(HOST,))
u, p = re.findall(r'class="sl" name="([0-9A-Za-z]{64})"', resp.text)
once_code = re.search(r'value="(\d+)" name="once"', resp.text).group(1)
resp = session.post('https://%s/signin'%(HOST,), {u: UN, p: UP, 'once':once_code, 'next':'/'})
resp = session.get('https://%s/mission/daily'%(HOST,))
if u'已领取' in resp.text:
print('Have Done!')
else:
resp = session.get('https://%s'%(HOST,) + re.search(r'/mission/daily/redeem\?once=\d+', resp.text).group())
if resp.ok:
print('Done!')
if __name__ == '__main__':
main()
|
import sqlite3
def isLoginSuccess(user_id, password):
conn = sqlite3.connect('Database.db3')
cursor = conn.cursor()
cursor.execute("select * from User where user_id = '" + user_id + "' and password = '" + password + "'")
result = cursor.fetchall()
cursor.close()
conn.close()
if len(result) == 0:
return False
else :
return True
def setUserAccount(user_id, password, user_name):
conn = sqlite3.connect('Database.db3')
conn.execute("INSERT INTO User (user_id, password, user_name) VALUES ( '" + user_id + "', '" + password + "', '" + user_name + "')");
conn.commit()
conn.close()
return True
def changePassword(user_id, new_password):
conn = sqlite3.connect('Database.db3')
conn.execute("UPDATE User SET password = '" + new_password + "' WHERE user_id = '" + user_id + "'")
conn.commit()
conn.close()
return True
|
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
@login_required(login_url=settings.LOGIN)
def index(request):
return render(request, 'users/dashboard.html')
|
from two_stream_rgb_flow.model.AU_rcnn.utils.resize_bbox import resize_bbox
from two_stream_rgb_flow.model.AU_rcnn.utils.random_flip import random_flip
from two_stream_rgb_flow.model.AU_rcnn.utils.flip_bbox import flip_bbox
|
import random
a = []
def getN(num1,num2):
def sameout(num1):
num = random.randint(1,num1)
if num in a:
sameout(num1)
else:
a.append(num)
for i in range(num2):
sameout(num1)
return a
getN(num1,num2)
a.sort()
print a
|
import tensorflow as tf
import os
import logging
from tensorflow.python.keras.backend import flatten
from utils.all_utils import get_timestamp
def get_VGG_16_model(input_shape,model_path):
model=tf.keras.applications.vgg16.VGG16(
input_shape=input_shape,
weights="imagenet",
include_top=False
)
model.save(model_path)
logging.info(f"VGG16 model saved at: {model_path}")
return model
def prepare_model(model,CLASSES,freeze_all,freeze_till,learning_rate):
if freeze_all:
for layer in model.layers:
layer.trainable=False
elif freeze_till is not None and freeze_till > 0:
for layer in model.layers[:-freeze_till]:
layer.trainable=False
flatten_in=tf.keras.layers.Flatten()(model.output)
prediction=tf.keras.layers.Dense(
units=CLASSES,activation='softmax'
)(flatten_in)
full_model=tf.keras.Model(
inputs=model.input,
outputs=prediction
)
full_model.compile(
optimizer=tf.keras.optimizers.SGD(
learning_rate=learning_rate
),
loss=tf.keras.losses.CategoricalCrossentropy(),
metrics=["accuracy"]
)
logging.info(f"custom model is compiled and ready to trained")
return full_model
def load_full_model(untrained_full_model_path):
model = tf.keras.models.load_model(untrained_full_model_path)
logging.info(f"untrained model is read from: {untrained_full_model_path}")
return model
def get_unique_path_to_save_model(trained_model_dir, model_name="model"):
timestamp = get_timestamp(model_name)
unique_model_name = f"{timestamp}_.h5"
unique_model_path = os.path.join(trained_model_dir, unique_model_name)
return unique_model_path
|
# encoding:utf-8
__author__ = 'hanzhao'
import sys
import requests
import urllib
import urllib2
import json
import re
# http://www.zhtimer.cn:2014/scramble/.json?=333*1
SCRAMBLE_URL = 'http://www.zhtimer.cn:2014/scramble/.json?='
def run(msg):
print '[info] 魔方小工具模块载入中。。'
if '<br/>' in msg: #为群聊消息时候
[FromUser,msg] = msg.split('<br/>')
else: #为个人消息时候
pass
if msg in ['.2','222','二阶打乱']: #
print '[tool]自动回答'
puzzle_type = '222'
scramble_api = SCRAMBLE_URL + puzzle_type
scramble_raw = json.loads(requests.post(scramble_api).text)
scramble_text = str(scramble_raw[0]['scrambles'][0])
print scramble_text
return scramble_text
elif msg in ['.3','333','三阶打乱']: #
print '[tool]自动回答'
puzzle_type = '333'
scramble_api = SCRAMBLE_URL + puzzle_type
scramble_raw = json.loads(requests.post(scramble_api).text)
scramble_text = str(scramble_raw[0]['scrambles'][0])
print scramble_text
return scramble_text
elif msg in ['.4','444','四阶打乱']: #
print '[tool]自动回答'
puzzle_type = '444'
scramble_api = SCRAMBLE_URL + puzzle_type
scramble_raw = json.loads(requests.post(scramble_api).text)
scramble_text = str(scramble_raw[0]['scrambles'][0])
print scramble_text
return scramble_text
elif msg in ['.5','555','五阶打乱']: #
print '[tool]自动回答'
puzzle_type = '555'
scramble_api = SCRAMBLE_URL + puzzle_type
scramble_raw = json.loads(requests.post(scramble_api).text)
scramble_text = str(scramble_raw[0]['scrambles'][0])
print scramble_text
return scramble_text
elif msg in ['.6','666','六阶打乱']: #
print '[tool]自动回答'
puzzle_type = '666'
scramble_api = SCRAMBLE_URL + puzzle_type
scramble_raw = json.loads(requests.post(scramble_api).text)
scramble_text = str(scramble_raw[0]['scrambles'][0])
print scramble_text
return scramble_text
elif msg in ['.7','777','七阶打乱']: #
print '[tool]自动回答'
puzzle_type = '777'
scramble_api = SCRAMBLE_URL + puzzle_type
scramble_raw = json.loads(requests.post(scramble_api).text)
scramble_text = str(scramble_raw[0]['scrambles'][0])
print scramble_text
return scramble_text
elif msg in ['.3bf','333ni','三盲打乱']: #
print '[tool]自动回答'
puzzle_type = '333ni'
scramble_api = SCRAMBLE_URL + puzzle_type
scramble_raw = json.loads(requests.post(scramble_api).text)
scramble_text = str(scramble_raw[0]['scrambles'][0])
print scramble_text
return scramble_text
elif msg in ['.4bf','444ni','四盲打乱']: #
print '[tool]自动回答'
puzzle_type = '444ni'
scramble_api = SCRAMBLE_URL + puzzle_type
scramble_raw = json.loads(requests.post(scramble_api).text)
scramble_text = str(scramble_raw[0]['scrambles'][0])
print scramble_text
return scramble_text
elif msg in ['.5bf','555ni','五盲打乱']: #
print '[tool]自动回答'
puzzle_type = '555ni'
scramble_api = SCRAMBLE_URL + puzzle_type
scramble_raw = json.loads(requests.post(scramble_api).text)
scramble_text = str(scramble_raw[0]['scrambles'][0])
print scramble_text
return scramble_text
elif msg in ['.minx','五魔打乱']: #
print '[tool]自动回答'
puzzle_type = 'minx'
scramble_api = SCRAMBLE_URL + puzzle_type
scramble_raw = json.loads(requests.post(scramble_api).text)
scramble_text = str(scramble_raw[0]['scrambles'][0])
print scramble_text
return scramble_text
elif msg in ['.py','pyram','金字塔打乱']: #
print '[tool]自动回答'
puzzle_type = 'pyram'
scramble_api = SCRAMBLE_URL + puzzle_type
scramble_raw = json.loads(requests.post(scramble_api).text)
scramble_text = str(scramble_raw[0]['scrambles'][0])
print scramble_text
return scramble_text
elif msg in ['.sq','.sq1','square1','sq1打乱']: #
print '[tool]自动回答'
puzzle_type = 'sq1'
scramble_api = SCRAMBLE_URL + puzzle_type
scramble_raw = json.loads(requests.post(scramble_api).text)
scramble_text = str(scramble_raw[0]['scrambles'][0])
print scramble_text
return scramble_text
elif msg in ['.sk','skewb','斜转打乱']: #
print '[tool]自动回答'
puzzle_type = 'skewb'
scramble_api = SCRAMBLE_URL + puzzle_type
scramble_raw = json.loads(requests.post(scramble_api).text)
scramble_text = str(scramble_raw[0]['scrambles'][0])
print scramble_text
return scramble_text
elif msg in ['.cl','clock','魔表打乱']: #
print '[tool]自动回答'
puzzle_type = 'clock'
scramble_api = SCRAMBLE_URL + puzzle_type
scramble_raw = json.loads(requests.post(scramble_api).text)
scramble_text = str(scramble_raw[0]['scrambles'][0])
print scramble_text
return scramble_text
else :
print '[tool]未找到匹配值'
return None
|
l1 = [1 ,4 ,5, 6, 9]
index = [0, 2, 0, 4, 2, 4, 4, 0, 1, 3, 3]
l2 = []
for i in index:
l2.append(str(l1[i]))
tel = ''.join(l2)
print('WeChat And Tel:' + tel)
|
import os, sys, re;
import string;
def mkChanges(scheme, lttr, nodes):
#print(nodes[1]);
for node in nodes[1]:
print('---',node, scheme[node], scheme[node][0]);
scheme[node][0].remove(lttr);
del scheme[lttr];
print('***\n\n',scheme,'***\n\n');
return;
step1 = [];
step2 = [];
alpha = list(string.ascii_uppercase);
step = [];
scheme = dict();
for l in open('input.txt','r'):
lpart = re.search(r'Step (.) must be finished before step (.) can begin.',l);
#print(lpart, lpart.groups(), lpart.group(1));
start = lpart.group(1);
end = lpart.group(2);
if (start not in step1) :
step1.append(start);
if (end not in step2) :
step2.append(end);
if (not start in scheme): scheme[start]=([],[end]);
else: scheme[start][1].append(end);
if (not end in scheme): scheme[end] = ([start],[]);
else: scheme[end][0].append(start);
step.append((lpart.group(1), lpart.group(2)));
print('\n\n',scheme,'\n\n');
step1.sort();
step2.sort();
endNode = [item for item in alpha if item not in step1]
startNode = [item for item in alpha if item not in step2];
print(step1, endNode, startNode, step);
print(step2, len(step2));
isOut = True;
k = list(scheme.keys());
k.sort();
print(k, '\n\n',scheme,'\n\n');
part1 = '';
while (isOut) :
k = list(scheme.keys());
k.sort();
for lttr in k:
print(lttr, );
if (len(scheme[lttr][0])==0) :
part1 += lttr;
mkChanges(scheme, lttr, scheme[lttr]);
print(lttr, scheme);
break;
if (len(scheme)==0): isOut = False;
print('Part1: ',part1);
|
import bigbenclock
import customreply
from internal import const as C
host = '127.0.0.1'
port = 9876
super_user = 10000
scheduler_opt = {
'apscheduler.timezone': 'Asia/Shanghai'
}
bot_commands = {
# keyword: [callback_func, cooldown in secs, grp/priv, enabled groups, regex, msg, at_sender]
r'.*有人.+[吗嘛][\??]?': [customreply.guna, 0, C.GROUP, set(), C.REGEX, C.NO_MSG, C.NO_AT_SENDER],
r'^(\d+)刚[刚才]说了(啥|什么)[??]?$': [customreply.stalker, 0, C.GROUP, set(), C.REGEX, C.NO_MSG, C.AT_SENDER],
r'/echo': [customreply.echo_priv, 0, C.PRIVATE, {20000}, C.NOT_REGEX, C.MSG, C.NO_AT_SENDER],
r'\.(baidu|百度)': [customreply.cnm, 0, C.GROUP, set(), C.REGEX, C.MSG, C.AT_SENDER]
}
default_proc = [
# (callbacks, cooldown in secs, grp/priv, enabled groups)
(customreply.nature_of_human, 0, C.GROUP, set())
]
scheduled_tasks = [
# (callback_func, {cron typed time dict})
(bigbenclock.bigben, {'hour': '*'})
]
|
import redis
import msgpack
class UserCache(object):
_instance = None
SEEN_USERS_SET_KEY = 'seen_users'
@classmethod
def get_instance(cls):
if cls._instance is None:
# TODO set passwords, logical db and such
cls._instance = redis.StrictRedis()
return cls._instance
@classmethod
def user_parsed(cls, username):
return cls.get_instance().sismember(cls.SEEN_USERS_SET_KEY, username)
@classmethod
def add_to_parsed(cls, username):
cls.get_instance().sadd(cls.SEEN_USERS_SET_KEY, username)
@classmethod
def set_following(cls, user, followers_list):
cls.get_instance().set(user, msgpack.packb(followers_list))
@classmethod
def get_following(cls, user):
return msgpack.unpackb(cls.get_instance().get(user))
@classmethod
def remove_user(cls, user):
return cls.get_instance().delete(user)
@classmethod
def get_all_parsed_user_following(cls):
all_following = set()
all_users = cls.get_instance().keys()
for user in all_users:
if user != cls.SEEN_USERS_SET_KEY:
user_following = cls.get_following(user)
if user_following is not None:
for followee in user_following:
all_following.add(followee)
return all_following
|
# Copyright 2018 Nicholas Li
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import os
from galini_dashboard.API.Connection import Connection
class ConnectionManager:
class __ConnectionManager:
def __init__(self, staticPath):
self.connections = dict()
if not os.path.isdir(staticPath):
raise NotADirectoryError(staticPath + " : is not a valid path")
self.staticPath = staticPath
instance = None
def __init__(self, staticPath):
if ConnectionManager.instance is None:
ConnectionManager.instance = ConnectionManager.__ConnectionManager(staticPath)
def establishNewConnection(self):
userId = uuid.uuid4()
self.instance.connections[str(userId)] = Connection(self.instance.staticPath)
return str(userId)
def getConnection(self, id):
if id not in self.instance.connections:
return None
return self.instance.connections[id]
|
# -*- coding: utf-8 -*-
import nysol._nysolshell_core as n_core
from nysol.mcmd.nysollib.core import NysolMOD_CORE
from nysol.mcmd.nysollib import nysolutil as nutil
class Nysol_Mcat(NysolMOD_CORE):
_kwd,_inkwd,_outkwd = n_core.getparalist("mcat",3)
def __init__(self,*args, **kw_args) :
super(Nysol_Mcat,self).__init__("mcat",nutil.args2dict(args,kw_args,Nysol_Mcat._kwd))
def mcat(self,*args, **kw_args):
return Nysol_Mcat(nutil.args2dict(args,kw_args,Nysol_Mcat._kwd)).addPre(self)
setattr(NysolMOD_CORE, "mcat", mcat)
|
from os import path
from datetime import timedelta
from .general import get_run_date_times
def extract_water_levels(run_path, channel_cell_map, flood_plain_map):
HYCHAN_OUT_PATH = path.join(run_path, 'output', 'HYCHAN.OUT')
TIMDEP_OUT_PATH = path.join(run_path, 'output', 'TIMDEP.OUT')
base_dt, run_dt = get_run_date_times(run_path)
channel_tms_length = _get_timeseries_length(HYCHAN_OUT_PATH)
channel_tms = _get_channel_timeseries(HYCHAN_OUT_PATH, 'water-level', channel_tms_length, base_dt, channel_cell_map)
flood_plain_tms = _get_flood_plain_timeseries(TIMDEP_OUT_PATH, base_dt, flood_plain_map)
return _change_keys(channel_cell_map, channel_tms), _change_keys(flood_plain_map, flood_plain_tms)
def extract_water_discharge(run_path, channel_cell_map):
HYCHAN_OUT_PATH = path.join(run_path, 'output', 'HYCHAN.OUT')
channel_tms_length = _get_timeseries_length(HYCHAN_OUT_PATH)
base_dt, run_dt = get_run_date_times(run_path)
channel_tms = _get_channel_timeseries(HYCHAN_OUT_PATH, 'discharge', channel_tms_length, base_dt, channel_cell_map)
return _change_keys(channel_cell_map, channel_tms)
def _get_timeseries_length(hychan_file_path):
# Calculate the size of time series
bufsize = 65536
series_length = 0
with open(hychan_file_path) as infile:
is_water_level_lines = False
is_counting = False
count_series_size = 0 # HACK: When it comes to the end of file, unable to detect end of time series
while True:
lines = infile.readlines(bufsize)
if not lines or series_length:
break
for line in lines:
if line.startswith('CHANNEL HYDROGRAPH FOR ELEMENT NO:', 5):
is_water_level_lines = True
elif is_water_level_lines:
cols = line.split()
if len(cols) > 0 and cols[0].replace('.', '', 1).isdigit():
count_series_size += 1
is_counting = True
elif is_water_level_lines and is_counting:
series_length = count_series_size
break
return series_length
def _get_channel_timeseries(hychan_file_path, output_type, series_length, base_time, cell_map):
hychan_out_mapping = {
'water-level': 1,
'water-depth': 2,
'discharge': 4
}
# Extract Channel Water Level elevations from HYCHAN.OUT file
ELEMENT_NUMBERS = cell_map.keys()
MISSING_VALUE = -999
bufsize = 65536
waterLevelSeriesDict = dict.fromkeys(ELEMENT_NUMBERS, [])
with open(hychan_file_path) as infile:
is_water_level_lines = False
is_series_complete = False
waterLevelLines = []
seriesSize = 0 # HACK: When it comes to the end of file, unable to detect end of time series
while True:
lines = infile.readlines(bufsize)
if not lines:
break
for line in lines:
if line.startswith('CHANNEL HYDROGRAPH FOR ELEMENT NO:', 5):
seriesSize = 0
elementNo = line.split()[5]
if elementNo in ELEMENT_NUMBERS:
is_water_level_lines = True
waterLevelLines.append(line)
else:
is_water_level_lines = False
elif is_water_level_lines:
cols = line.split()
if len(cols) > 0 and isfloat(cols[0]):
seriesSize += 1
waterLevelLines.append(line)
if seriesSize == series_length:
is_series_complete = True
if is_series_complete:
timeseries = []
elementNo = waterLevelLines[0].split()[5]
print('Extracted Cell No', elementNo, cell_map[elementNo])
for ts in waterLevelLines[1:]:
v = ts.split()
if len(v) < 1:
continue
# Get flood level (Elevation)
value = v[hychan_out_mapping[output_type]]
# Get flood depth (Depth)
# value = v[2]
if not isfloat(value):
value = MISSING_VALUE
continue # If value is not present, skip
if value == 'NaN':
continue # If value is NaN, skip
timeStep = float(v[0])
currentStepTime = base_time + timedelta(hours=timeStep)
dateAndTime = currentStepTime.strftime("%Y-%m-%d %H:%M:%S")
timeseries.append([dateAndTime, value])
waterLevelSeriesDict[elementNo] = timeseries
is_water_level_lines = False
is_series_complete = False
waterLevelLines = []
return waterLevelSeriesDict
def _get_flood_plain_timeseries(timdep_file_path, base_time, cell_map):
# Extract Flood Plain water elevations from BASE.OUT file
bufsize = 65536
MISSING_VALUE = -999
ELEMENT_NUMBERS = cell_map.keys()
with open(timdep_file_path) as infile:
waterLevelLines = []
waterLevelSeriesDict = dict.fromkeys(ELEMENT_NUMBERS, [])
while True:
lines = infile.readlines(bufsize)
if not lines:
break
for line in lines:
if len(line.split()) == 1:
if len(waterLevelLines) > 0:
waterLevels = _get_water_level_of_channels(waterLevelLines, ELEMENT_NUMBERS)
# Get Time stamp Ref:http://stackoverflow.com/a/13685221/1461060
ModelTime = float(waterLevelLines[0].split()[0])
currentStepTime = base_time + timedelta(hours=ModelTime)
dateAndTime = currentStepTime.strftime("%Y-%m-%d %H:%M:%S")
for elementNo in ELEMENT_NUMBERS:
tmpTS = waterLevelSeriesDict[elementNo][:]
if elementNo in waterLevels:
tmpTS.append([dateAndTime, waterLevels[elementNo]])
else:
tmpTS.append([dateAndTime, MISSING_VALUE])
waterLevelSeriesDict[elementNo] = tmpTS
waterLevelLines = []
waterLevelLines.append(line)
return waterLevelSeriesDict
def _change_keys(key_map, dict_to_be_mapped):
dict_ = {}
for key in key_map.keys():
dict_[key_map[key]] = dict_to_be_mapped[key]
return dict_
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def _get_water_level_of_channels(lines, channels=None):
"""
Get Water Levels of given set of channels
:param lines:
:param channels:
:return:
"""
if channels is None:
channels = []
water_levels = {}
for line in lines[1:]:
if line == '\n':
break
v = line.split()
if v[0] in channels:
# Get flood level (Elevation)
water_levels[v[0]] = v[5]
# Get flood depth (Depth)
# water_levels[int(v[0])] = v[2]
return water_levels
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @author: Anna
def jijiji(is_cheap, buy_amount, good_price):
all_price = good_price * buy_amount
if is_cheap:
print '老妈在小本子记了买菜花销%d元 ' % (all_price)
def talktalktalk(is_cheap, buy_amount, good_price):
if is_cheap:
print '老妈回到家里,跟老爸说:"今天菜很便宜, 我买了%d斤"。' % (buy_amount)
else:
print '老妈回到家里,跟老爸说:"今天菜好贵,没买"。'
def maimaimai():
who = 'xiao的老妈 '
good_price = 1 #会变
reasonable_price = 5
buy_amount = 2 #会变
good_description = "西双版纳大白菜"
is_cheap = False
print "%s上街看到了%s, 卖%d 元/斤" % (who, good_description, good_price)
if good_price <= reasonable_price:
print "她觉着便宜"
is_cheap = True
#5-2 4-3 3-4 2-4
buy_amount = 2 + (reasonable_price - good_price)
if buy_amount > 4:
buy_amount = 4
print '她买了%d斤 '% (buy_amount)
else:
print "她认为贵了"
is_cheap = False
print "她没有买, 扬长而去"
return is_cheap, buy_amount, good_price
# run function
if __name__ == '__main__':
is_cheap, buy_amount, good_price = maimaimai()
talktalktalk(is_cheap, buy_amount, good_price)
jijiji(is_cheap, buy_amount, good_price)
|
import velocity
import sys
import atexit
import uuid
import datetime
DB_NAME = r'vscDatabase'
DB_USER = 'script'
DB_PASS = 'script'
# if workstation
DB_PATH = r'/Velocity/Databases/vscDatabase'
# if grid
DB_IP = '127.0.0.1'
DB_PORT = 57000
# requires "sixCBCT, AdaptiveMonitoring" data already imported
PATIENT_ID = 'AW3Y6TA684'
PRIMARY_UID = '1.3.12.2.1107.5.1.4.54841.30000011071412175920300003025'
SECONDARY_UID = '1.2.246.352.61.2.4621874044879001489.17114159699319862401'
REGISTRATION_NAME = 'CBCT MULTI'
RIGID_NAME = "RIGID"
e = velocity.VelocityEngine()
def orThrow(c, e=e):
if not c or (hasattr(c, 'isValid') and not c.isValid()):
raise RuntimeError(e.getErrorMessage())
#orThrow(e.loginToWorkstation(DB_USER, DB_PASS, DB_PATH, True))
orThrow(e.loginToGrid(DB_USER, DB_PASS, DB_IP, DB_PORT, DB_NAME))
atexit.register(e.logout)
orThrow(e.loadPatientByPatientId(PATIENT_ID))
print('Loaded patient: {}'.format(PATIENT_ID))
orThrow(e.loadPrimaryVolumeByUID(PRIMARY_UID))
print('Loaded primary volume: {}'.format(PRIMARY_UID))
orThrow(e.loadSecondaryVolumeByUID(SECONDARY_UID))
print('Loaded secondary volume: {}'.format(SECONDARY_UID))
orThrow(e.loadRegistrationByName(REGISTRATION_NAME))
print('Loaded registration: {}'.format(REGISTRATION_NAME))
rops = e.getRegistrationOperations()
sops = e.getStructureOperations()
primaryVol = e.getPrimaryVolume()
secondaryVol = e.getSecondaryVolume()
# find an external structure, throws StopIteration if not found
primarySet = next(s for s in primaryVol.getStructureSets() if s.getName() == 'Original SIM')
structure = next(s for s in primarySet.getStructures() if s.getName() == 'Mandible')
print('Using structure "{}" from structure set "{}"'.format(structure.getName(), primarySet.getName()))
# create a new structure set on the secondary volume
targetSetName = datetime.datetime.now().isoformat()[:16]
targetSet = sops.createStructureSet(targetSetName, False)
orThrow(targetSet, sops)
# copy the external to the new structure set
print('Copying structure to secondary...')
newStructures = sops.copyStructuresToSecondary([structure.getVelocityId()], targetSet.getVelocityId())
# IMPORTANT: call save after finishing a set of modifications to a structure set
targetSet = sops.saveStructureSet(targetSet.getVelocityId())
orThrow(len(newStructures) == 1, sops)
newStructureId = next(iter(newStructures))
newStructure = newStructures[newStructureId]
print('Structure copied to secondary, computing metrics...')
def metrics():
# compute metrics on the copied external structure
c = sops.conformality(structure.getVelocityId(), newStructure.getVelocityId())
print('Conformality: {}'.format(c))
if c < 0.0:
print('Error: {}'.format(sops.getErrorMessage()))
mets = sops.surfaceDistanceMetrics(structure.getVelocityId(), newStructure.getVelocityId())
if not mets.isValid:
print('Error: {}'.format(sops.getErrorMessage()))
else:
print('Metrics: Hausdorff={}, min={}, median={}, mean={}, stddev={}'.format(mets.hausdorffDistance, mets.min, mets.median, mets.mean, mets.standardDeviation))
# show metrics on registration used for copying
metrics()
# now on an alternative registration
orThrow(e.loadRegistrationByName(RIGID_NAME))
print('Loaded registration: {}'.format(RIGID_NAME))
metrics()
|
from .tok import Token
class LexicalTable:
def __init__(self):
self.tokenslist: Token = []
def __len__(self):
return len(self.tokenslist)
def __getitem__(self, item):
return self.tokenslist[item]
def append(self, new: Token):
self.tokenslist.append(new)
def __str__(self):
return str([str(i) for i in self.tokenslist])
def __eq__(self, other):
if not isinstance(other, LexicalTable):
raise Exception("comparable object isn't LexicalTable")
return self.tokenslist == other.tokenslist
def __ne__(self, other):
return not self == other
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.