text stringlengths 38 1.54M |
|---|
import csv
import time
from googleapiclient.discovery import build
from setup import Setup
from data import Data
class Update:
"""
Update the existing spreadsheets with the latest grade reports.
"""
def __init__(self, d):
self.credentials = d.credentials
self.date = d.date
self.semester = d.semester
self.student_data = d.student_data
self.time_stamp = self.date.strftime('%Y-%m-%d %H:%M:%S')
def update_spreadsheets(self):
"""Sends a grade report to students and advisers."""
print('Running grade reports...')
index = (lambda i: -13 if self.semester == 1 else -12)(self.semester)
self.new_students() # Check for new students first
# Read storage.csv and create a dictionary of existing students.
existing_students = {} # Dictionary of tuples.
with open('storage.csv', 'r') as storage:
reader = csv.reader(storage)
for row in reader:
existing_students[row[0]] = (row[1], row[2])
service = build('sheets', 'v4', credentials=self.credentials) # Call the Sheets API.
# Update each student's sheet with current grade information.
for s in self.student_data.index:
time.sleep(3)
spreadsheet_id = existing_students[self.student_data.loc[s]['Student Email']][1]
values = [[self.time_stamp] +
self.student_data.iloc[s, :3].tolist() +
[(lambda j: 'First' if self.semester == 1 else 'Second')(self.semester)] +
[self.get_letter(self.student_data.iloc[s, index])] +
self.student_data.iloc[s, 8:].tolist()]
body = {'values': values, 'majorDimension': 'rows'}
try:
result = service.spreadsheets().values().append(spreadsheetId=spreadsheet_id,
valueInputOption='RAW',
range='Sheet1!A1',
body=body).execute()
except Exception as e:
print('Not updated: {}'.format(self.student_data.loc[s]['Student Email']))
print(e)
else:
print(values[0]) # Verify success.
print('{} cells appended.'.format(result.get('updates').get('updatedCells')))
def new_students(self):
"""Checks data for new students."""
print('Checking for new students...')
# Creates list of existing students.
existing_students = []
with open('storage.csv', 'r') as storage:
reader = csv.reader(storage)
for row in reader:
existing_students.append(row[0])
def mask(email):
if email in existing_students:
return False
return True
# Reduces student_data DataFrame to new students only.
new_data = self.student_data[self.student_data['Student Email'].apply(mask)]
new_students = Data(df=new_data)
if len(new_students.student_data) > 0:
print('New students found.')
print(new_students.student_data)
Setup(new_students)
else:
print('No new students found.')
@staticmethod
def get_letter(g):
"""Returns the letter equivalent of the current grade."""
grade = int(g)
letters = {range(90, 93): 'A-', range(93, 97): 'A', range(97, 110): 'A+',
range(80, 83): 'B-', range(83, 87): 'B', range(87, 90): 'B+',
range(70, 73): 'C-', range(73, 77): 'C', range(77, 80): 'C+',
range(60, 63): 'D-', range(63, 67): 'D', range(68, 70): 'D+',
range(60): 'E'
}
for scale in letters:
if grade in scale:
return letters[scale]
@staticmethod
def remove_students():
"""Removes a student when they drop the course."""
# TODO Remove sharing.
# TODO Delete spreadsheet
# TODO Delete student info from storage.csv
|
#! /usr/bin/env python
import time
import rospy
from std_msgs.msg import *
from geometry_msgs.msg import *
from mavros_msgs.msg import *
from mavros_msgs.srv import *
from geographic_msgs.msg import *
from trajectory_msgs.msg import *
from nav_msgs.msg import Odometry
import math
trans = Transform()
cmd_vel = Twist()
pub = rospy.Publisher('/hydrone_aerial_underwater/command/trajectory', MultiDOFJointTrajectory, queue_size=10)
def velocity_callback(data):
global cmd_vel
cmd_vel = data
def position_callback(data):
#print(data.pose.pose.position.x)
trans.translation.x = data.pose.pose.position.x
trans.translation.y = data.pose.pose.position.y
trans.translation.z = data.pose.pose.position.z
trans.rotation.x = data.pose.pose.orientation.x
trans.rotation.y = data.pose.pose.orientation.y
trans.rotation.z = data.pose.pose.orientation.z
trans.rotation.w = data.pose.pose.orientation.w
# cmd_vel.linear.z = 0.1
#cmd_vel.linear.x = 0.0
#cmd_vel.angular.z = 0.5
point = MultiDOFJointTrajectoryPoint()
velocity = MultiDOFJointTrajectory()
point.transforms.append(trans)
point.velocities.append(cmd_vel)
velocity.points.append(point)
pub.publish(velocity)
if __name__ == "__main__":
rospy.init_node("mission_planner_node", anonymous=False)
rospy.Subscriber("/hydrone_aerial_underwater/ground_truth/odometry", Odometry, position_callback)
rospy.Subscriber("/hydrone_aerial_underwater/cmd_vel", Twist, velocity_callback)
rospy.spin() |
# uncompyle6 version 3.2.3
# Python bytecode 3.6 (3379)
# Decompiled from: Python 3.6.2 (v3.6.2:5fd33b5, Jul 8 2017, 04:57:36) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: C:\Users\ZHANGDorisXStudent\Desktop\LightBlue_NLTK\src\ChatBotDesign\chatbot.py
# Compiled at: 2018-10-09 06:51:54
from memory import Memory
from understanding import *
import json
import time, csv
class Chatbot:
def __init__(self):
self.identity = generate_random_str()
self.memory = Memory()
self.workMemory = WorkMemory()
self.specialMode = 0
self.unknownWord = None
self.state = 0
self.lastInputSentence = ''
#def reprJSON(self):
#return dict(identity=self.identity, memory=self.memory, workMemory=self.workMemory, specialMode=self.specialMode, unknownWord=self.unknownWord, state=self.state, lastInputSentence=self.lastInputSentence)
def copy(self, other):
self.identity = other.identity
self.memory = other.memory
self.workMemory = other.workMemory
self.specialMode = other.specialMode
self.unknownWord = other.unknownWord
self.state = other.state
self.lastInputSentence = other.lastInputSentence
def chat(self, inputSent, timeStamp):
write_memory = True
mode = 'Retrieval'
if self.specialMode == 1:
inputList = sentenceFilter(inputSent)
oldPattern = conbineWords(inputList)
self.memory.connectPatterns(self.unknownWord.index, oldPattern)
inputSent = self.lastInputSentence
self.workMemory.clean()
if self.specialMode == 2:
self.workMemory.clean()
#print("[may be change]: ", self.workMemory.action)
if self.workMemory.action:
if not self.specialMode:
self.memory.addConnections(self.workMemory.backward())
self.lastInputSentence = inputSent
inputList = sentenceFilter(inputSent)
self.specialMode = 0
print('[INPUT LIST]:', inputList)
new_words = updateVocabulary(inputList, self.memory.vocabulary)
print("[NEW WORDS]:", new_words)
self.unknownWord = self.workMemory.spikePatterns(inputList, self.memory.patterns, level=10)
print("[UNKNOWNWORD]:", self.unknownWord)
actionIndex = self.workMemory.getAction()
print('[ACTIONINDEX]:', actionIndex)
if actionIndex != None:
for i in actionIndex:
print('[POSSIBLE OUTPUT]', self.memory.actions[i])
outputSent = ""
if self.unknownWord:
self.specialMode = 1
mode = 'Question'
outputSent = 'What does' + self.unknownWord.index + 'mean?'
else:
if actionIndex == None or len(actionIndex) == 0:
outputSent = "I don't know how to answer."
self.specialMode = 2
mode = 'Confused'
print("pass1")
else:
if len(actionIndex) >= 1:
print("[CHATSTRENGTH]: " , self.workMemory.maxStrength)
if self.workMemory.maxStrength < 0.2:
print("[CHATSTRENGTH]: " , self.workMemory.maxStrength)
outputSent = "I don't know how to answer."
self.specialMode = 2
mode = 'Confused'
outputSent = self.memory.actions[actionIndex[-1]]
# print("[hhda]: ", actionIndex[-1], " ", outputSent)
self.memory.action = actionIndex[-1]
# print("[mmda]: ", actionIndex[-1], " ", self.memory.action)
outputTime = time.strftime('%d %b %H:%M:%S', time.localtime())
#print("[may be change]: ", self.workMemory.action)
if write_memory:
self.memory.addHistory(ChatItem(inputSent, timeStamp, outputSent, outputTime, mode))
print("pass2")
#print("[may be change]: ", self.workMemory.action)
# print(outputSent)
return (outputSent, outputTime)
def change(self, inputSent):
index = 0
inputSent_l = inputSent.lower()
for i in range(len(self.memory.actions)):
if self.memory.actions[i].lower() == inputSent_l:
index = i
self.memory.actions[i] = inputSent
break
if not index:
index = len(self.memory.actions)
self.memory.actions.append(inputSent)
self.workMemory.action = index
self.memory.changeHistory(inputSent)
self.specialMode = 0
def like(self):
if self.workMemory.action:
self.memory.addConnections(self.workMemory.backward(clean=False))
self.memory.likeHistory()
def currentDialogs(self):
dialog = []
l = len(self.memory.chatHistory)
index = max(l - 20, 0)
for item in self.memory.chatHistory[index:]:
dialog.append([item.inputSent, item.inputTime, item.outputSent, item.outputTime])
return dialog
def getVocabulary(self):
voca = []
for word in self.memory.vocabulary:
voca.append((word, self.memory.vocabulary[word]))
return voca
def getStatus(self):
return (
len(self.memory.vocabulary), len(self.memory.patterns), len(self.memory.actions), self.memory.connections)
def analysis(self):
if not self.workMemory.action:
return "I don't know at all the meaning of this sentence."
else:
patterns = self.workMemory.analysis()
reason = 'Based on past experience, I think this is a suitable reply for the sentence containing:'
for pattern in patterns:
reason += ' '
reason += "'"
reason += pattern
reason += "'"
reason += '.'
return reason
def import_file(self, filename):
correct = 0
f = open(filename)
fi = open("similar.txt", "w")
input = f.readline().strip()
output = f.readline().strip()
count = 0
while input:
if output:
if count < 10001:
input = sentencePreprocessing(input)
output = sentencePreprocessing(output)
print("[Question]: " + input)
print("[Answer]: " + output)
timeStamp = time.strftime('%d %b %H:%M:%S', time.localtime())
answer, _ = self.chat(input, timeStamp)
if output == answer:
correct = correct + 1
fi.write(input + "\n")
self.change(output)
input = f.readline().strip()
output = f.readline().strip()
count += 1
else:
print(count)
break
f.close()
fi.close()
print("[CORRECT ANSWER]: ", correct)
def export_history(self, filepath):
try:
csvout = open(filepath, 'w')
txtout = open(filepath[:-3] + 'txt', 'w')
csv_writer = csv.writer(csvout)
temp = ['User', 'Chatbot', 'State', 'Liked']
csv_writer.writerow(temp)
for item in self.memory.chatHistory:
csv_writer.writerow([item.inputSent, item.outputSent, item.mode, item.like])
txtout.write(item.inputSent + '\n' + item.outputSent + '\n')
csvout.close()
txtout.close()
vocout = open(filepath[:-4] + '(vocabulary)' + '.csv', 'w')
csv_writer = csv.writer(vocout)
csv_writer.writerow(['Word', 'Count'])
for word in self.memory.vocabulary:
csv_writer.writerow([word, self.memory.vocabulary[word]])
vocout.close()
except:
print('Export failed.') |
import pandas as pd
import numpy as np
def load_landsat_data(filename):
'''
Utility function to load Landsat dataset.
https://github.com/abarthakur/trepan_python/blob/master/run.py
Landsat dataset : https://archive.ics.uci.edu/ml/datasets/Statlog+(Landsat+Satellite)
num_classes= 7, but 6th is empty.
This functions
- Reads the data
- Renames the class 7 to 6
- Generates one-hot vector labels
'''
data = pd.read_csv(filename, sep=r" ", header=None)
data = data.values
dataX = np.array(data[:,range(data.shape[1]-1)])
dataY = np.array(data[np.arange(data.shape[0]),data.shape[1]-1])
# convert dataY to one-hot, 6 classes
num_classes = 6
dataY = np.array([x-2 if x==7 else x-1 for x in dataY]) # re-named class 7 to 6 as class 6 is empty
dataY_onehot = np.zeros([dataY.shape[0], num_classes])
dataY_onehot[np.arange(dataY_onehot.shape[0]), dataY] = 1
return pd.DataFrame(dataX), pd.DataFrame(dataY_onehot) |
import collections
import os
import pickle
import shutil
import numpy
from pwdmodels.semantic_word2vec_optimal import SemanticModel, Struct
def combine_semantic_to_word2vec(semantic_model_dir, word2vec_model_dir, combine_model_dir):
word2vec_cluster = {}
shutil.copy(os.path.join(word2vec_model_dir, "seg.txt"), os.path.join(combine_model_dir, "seg.txt"))
shutil.copy(os.path.join(word2vec_model_dir, "struct.pickle"), os.path.join(combine_model_dir, "struct.pickle"))
shutil.copy(os.path.join(word2vec_model_dir, "cluster.txt"), os.path.join(combine_model_dir, "cluster.txt"))
grammar_dict, _ = SemanticModel("", word2vec_model_dir, init=False).load_pickle()
with open(os.path.join(word2vec_model_dir, "cluster.txt"), "r") as fin_cluster:
for line in fin_cluster:
line = line.strip("\r\n")
word, str_class = line.split(" ")
word2vec_cluster[word] = {"class_id": int(str_class), "num": 0}
with open(os.path.join(word2vec_model_dir, "seg.txt"), "r") as fin_seg:
for line in fin_seg:
line = line.strip("\r\n")
words = line.split(" ")
for word in words:
if word in word2vec_cluster:
word2vec_cluster[word]["num"] += 1
for root, dirs, files in os.walk(os.path.join(semantic_model_dir, "nonterminals")):
for file in files:
overlap_with = collections.defaultdict(int)
path = os.path.join(root, file)
fin = open(path, "r")
words = []
for line in fin:
word, prob = line.split("\t")
words.append(word)
# find one and break
if word in word2vec_cluster:
overlap_with[word2vec_cluster[word]["class_id"]] += 1
fin.close()
if len(overlap_with) == 0:
continue
class_id = max(overlap_with, key=overlap_with.get)
for word in words:
grammars, _, _ = grammar_dict[(Struct.letter, class_id)]
if word not in grammars:
grammars[word] = word2vec_cluster[word]["num"]
grammars, _, _ = grammar_dict[(Struct.letter, class_id)]
grammar_dict[(Struct.letter, class_id)] = (
grammars, list(grammars.keys()), numpy.array(list(grammars.values())).cumsum())
fout_grammar = open(os.path.join(combine_model_dir, "grammar.pickle"), "wb")
pickle.dump(grammar_dict, fout_grammar)
fout_grammar.close()
pass
# combine_semantic_to_word2vec(
# "/home/cw/Codes/Python/Chaunecy/fudan-monte-carlo-pwd/models/rockyou-semantic-01-255",
# "/home/cw/Codes/Python/Chaunecy/fudan-monte-carlo-pwd/models/rockyou-word2vec-01-255",
# "/home/cw/Codes/Python/Chaunecy/fudan-monte-carlo-pwd/models/rockyou-combine-01-255/semantic-to-word2vec")
# combine_word2vec_to_semantic(
# "/home/cw/Codes/Python/Chaunecy/fudan-monte-carlo-pwd/models/rockyou-semantic-14-255",
# "/home/cw/Codes/Python/Chaunecy/fudan-monte-carlo-pwd/models/rockyou-word2vec-14-255",
# "/home/cw/Codes/Python/Chaunecy/fudan-monte-carlo-pwd/models/rockyou-combine-14-255/word2vec-to-semantic"
# )
# combine_word2vec_to_semantic(
# "/home/cw/Codes/Python/Chaunecy/fudan-monte-carlo-pwd/models/rockyou-semantic-01-255",
# "/home/cw/Codes/Python/Chaunecy/fudan-monte-carlo-pwd/models/rockyou-word2vec-01-255",
# "/home/cw/Codes/Python/Chaunecy/fudan-monte-carlo-pwd/models/rockyou-combine-01-255/word2vec-to-semantic"
# )
# haha = SemanticModel("f",
# model_name="/home/cw/Codes/Python/Chaunecy/fudan-monte-carlo-pwd/models/rockyou-combine-14-255"
# "/semantic-to-word2vec",
# init=False)
# grammar_dict, struct_dict = haha.load_pickle()
# print(grammar_dict[(Struct.letter, 6)][0].get("craving"))
|
#-*- coding: utf-8 -*-
def printn(n):
if n<0:
return
num=[0]*n
for i in range(10):
num[0]=str(i)
printrec(num)
def printrec(num,idx=0):
if idx==len(num)-1:
print(''.join(num))
return
for i in range(10):
num[idx+1]=str(i)
printrec(num,idx+1)
printn(4)
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from config import config
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_login import LoginManager, current_user
import flask_whooshalchemyplus
login_manager = LoginManager()
login_manager.login_view = 'auth.login'
login_manager.login_message_category = 'info'
db = SQLAlchemy(session_options={"expire_on_commit": False})
bootstrap = Bootstrap()
moment = Moment()
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
login_manager.init_app(app)
db.init_app(app)
bootstrap.init_app(app)
moment.init_app(app)
if app.config['SSL_REDIRECT']:
from flask_sslify import SSLify
sslify = SSLify(app)
from .error import error as error_blueprint
from .api import api as api_blueprint
from .auth import auth as auth_blueprint
from .main import main as main_blueprint
app.register_blueprint(error_blueprint)
app.register_blueprint(api_blueprint)
app.register_blueprint(auth_blueprint, url_prefix='/auth')
app.register_blueprint(main_blueprint)
return app #app instance
from app import models
|
from django.views.generic import ListView, DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.exceptions import PermissionDenied
from django.db.models import Q
from django.urls import reverse, reverse_lazy
from .models import Blog, Comment
from .forms import CommentForm
class BlogListView(ListView):
model = Blog
template_name = 'blog/index.html'
context_object_name='blogs'
paginate_by = 5
class BlogDetailView(DetailView):
model = Blog
template_name='blog/detail.html'
context_object_name = 'blog'
class BloggerListView(ListView):
context_object_name = 'bloggers'
template_name = 'blog/bloggers.html'
def get_queryset(self):
permission = Permission.objects.get(name='create update and delete blogs')
return get_user_model().objects.filter(Q(user_permissions=permission) | Q(is_superuser=True)).distinct()
class BloggerDetailView(DetailView):
context_object_name = 'blogger'
template_name = 'blog/blogger.html'
def get_queryset(self):
permission = Permission.objects.get(name='create update and delete blogs')
return get_user_model().objects.filter(Q(user_permissions=permission) | Q(is_superuser=True), pk=self.kwargs['pk'])
class CreateCommentView(LoginRequiredMixin, SuccessMessageMixin, CreateView):
model = Comment
template_name = 'blog/comment_form.html'
form_class = CommentForm
success_message = 'The comment has been created.'
def get_success_url(self):
return reverse('blog:detail', args=[self.kwargs['slug']])
def form_valid(self, form):
form.instance.user = self.request.user
form.instance.blog = Blog.objects.get(slug=str(self.kwargs['slug']))
return super().form_valid(form)
class UpdateCommentView(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = Comment
template_name = 'blog/comment_form.html'
form_class = CommentForm
success_message = 'The blog has been updated.'
def get_success_url(self):
comment = self.get_object()
return comment.blog.get_absolute_url()
def form_valid(self, form):
comment = self.get_object()
form.instance.user = self.request.user
form.instance.blog = comment.blog
return super().form_valid(form)
def dispatch(self, request, *args, **kwargs):
obj = self.get_object()
if obj.user != self.request.user:
raise PermissionDenied
return super().dispatch(request, *args, **kwargs)
class DeleteCommentView(LoginRequiredMixin, DeleteView):
model = Comment
template_name = 'blog/delete.html'
def get_success_url(self):
comment = self.get_object()
return comment.blog.get_absolute_url()
def dispatch(self, request, *args, **kwargs):
obj = self.get_object()
if obj.user != self.request.user:
raise PermissionDenied
return super().dispatch(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
message = f"The comment has been deleted."
messages.warning(self.request, message)
return super().delete(request, *args, **kwargs) |
import collections, gzip, time
import numpy as np
import tensorflow as tf
import utils
import sys
OPTIMIZERS = ['sgd', 'adam', 'sgd_momentum']
class MediumConfig(object):
"""Medium config."""
init_scale = 0.05
learning_rate = 0.25
max_grad_norm = 20
num_layers = 3
num_steps = 50
hidden_size = 1500
max_epoch = 14
max_max_epoch = 50
keep_prob = 0.3
# correction: for wsj model, we use 0.9.
lr_decay = 0.9
batch_size = 20
downscale_loss_by_num_steps = False
optimizer = 'sgd'
class PTBModel(object):
def __init__(self, is_training, config):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
size = config.hidden_size
vocab_size = config.vocab_size
self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps])
self._targets = tf.placeholder(tf.int32, [batch_size, num_steps])
self._weights = tf.placeholder(tf.float32, [batch_size, num_steps])
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(size, forget_bias=1.0,
state_is_tuple=True)
if is_training and config.keep_prob < 1:
lstm_cell = tf.nn.rnn_cell.DropoutWrapper(
lstm_cell, output_keep_prob=config.keep_prob)
cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * config.num_layers,
state_is_tuple=True)
self._initial_state = cell.zero_state(batch_size, tf.float32)
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [vocab_size, size])
inputs = tf.nn.embedding_lookup(embedding, self._input_data)
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
inputs = [tf.squeeze(input_, [1])
for input_ in tf.split(1, num_steps, inputs)]
self._inputs = inputs
outputs, state = tf.nn.rnn(cell, inputs, initial_state=self._initial_state)
self._outputs = outputs
self._state = state
output = tf.reshape(tf.concat(1, outputs), [-1, size])
self._output = output
softmax_w = tf.get_variable("softmax_w", [size, vocab_size])
softmax_b = tf.get_variable("softmax_b", [vocab_size])
logits = tf.matmul(output, softmax_w) + softmax_b
self._logits = logits
loss = tf.nn.seq2seq.sequence_loss_by_example(
[logits],
[tf.reshape(self._targets, [-1])],
[tf.reshape(self._weights, [-1])])
self._loss = loss
self._log_probs = tf.nn.log_softmax(logits)
if config.downscale_loss_by_num_steps:
print("batch loss will be normalized by number of tokens")
cost = tf.reduce_sum(loss) / tf.reduce_sum(self._weights)
else:
print("batch loss will be normalized by batch size")
cost = tf.reduce_sum(loss) / batch_size
self._cost = loss
self._final_state = state
self.downscale_loss_by_num_steps = config.downscale_loss_by_num_steps
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
config.max_grad_norm)
if config.optimizer == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(self.lr)
elif config.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(self.lr)
elif config.optimizer == 'sgd_momentum':
optimizer = tf.train.MomentumOptimizer(self.lr, 0.9)
else:
raise ValueError("invalid optimizer %s" % config.optimizer)
self._train_op = optimizer.apply_gradients(zip(grads, tvars))
def assign_lr(self, session, lr_value):
session.run(tf.assign(self.lr, lr_value))
@property
def input_data(self):
return self._input_data
@property
def targets(self):
return self._targets
@property
def weights(self):
return self._weights
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def log_probs(self):
return self._log_probs
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
def _build_vocab(filename):
data = _read_words(filename)
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
word_to_id = dict(zip(words, range(len(words))))
return word_to_id
def _read_words(filename):
with open_file(filename) as f:
return f.read().replace('\n', '<eos>').split()
def chop(data, eos, prepend_eos=False):
new_data = []
sent = []
if prepend_eos:
sent.append(eos)
for w in data:
sent.append(w)
if w == eos:
new_data.append(sent)
sent = []
if prepend_eos:
sent.append(eos)
return new_data
def open_file(path):
if path.endswith('.gz'):
return gzip.open(path, 'rb')
else:
return open(path, 'r')
# iterator used for nbest data.
def ptb_iterator2(raw_data, batch_size, num_steps, idx2tree, eos):
dummy1 = 0
dummy2 = (-1, -1)
remainder = len(raw_data) % batch_size
if remainder != 0:
raw_data = raw_data + [dummy1 for x in range(batch_size - remainder)]
idx2tree = idx2tree + [dummy2 for x in range(batch_size - remainder)]
raw_data = np.array(raw_data, dtype=np.int32)
data_len = len(raw_data)
batch_len = data_len // batch_size
remainder = (data_len // batch_size) % num_steps
data = np.zeros([batch_size, batch_len + num_steps - remainder + 1],
dtype=np.int32)
for i in range(batch_size):
data[i, 1:batch_len+1] = raw_data[batch_len * i:batch_len * (i + 1)]
if i == 0:
data[i, 0] = eos
else:
# TODO: should be batch_len*i - 1
data[i, 0] = raw_data[batch_len - 1]
idx2tree = np.array(idx2tree, dtype=np.dtype('int, int'))
tree = np.zeros([batch_size, batch_len + num_steps - remainder],
dtype=np.dtype('int, int'))
for i in range(batch_size):
tree[i, :batch_len] = idx2tree[batch_len * i:batch_len * (i + 1)]
tree[i, batch_len:] = [dummy2 for x in range(num_steps - remainder)]
epoch_size = (batch_len + num_steps - remainder) // num_steps
if epoch_size == 0:
raise ValueError("epoch_size == 0, decrease batch_size or num_steps")
for i in range(epoch_size):
x = data[:, i*num_steps:(i+1)*num_steps]
y = data[:, i*num_steps+1:(i+1)*num_steps+1]
z = tree[:, i*num_steps:(i+1)*num_steps]
yield (x, y, z)
def run_epoch(session, m, data, eval_op, verbose=False):
"""Runs the model on the given data."""
epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
start_time = time.time()
costs = 0.0
iters = 0
state = []
for c, h in m.initial_state: # initial_state: ((c1, m1), (c2, m2))
state.append((c.eval(), h.eval()))
weights = np.ones((m.batch_size, m.num_steps), dtype=np.float32)
for step, (x, y) in enumerate(ptb_iterator(data, m.batch_size,
m.num_steps)):
fetches = []
fetches.append(m.cost)
fetches.append(eval_op)
for c, h in m.final_state: # final_state: ((c1, m1), (c2, m2))
fetches.append(c)
fetches.append(h)
feed_dict = {}
feed_dict[m.input_data] = x
feed_dict[m.targets] = y
feed_dict[m.weights] = weights
for i, (c, h) in enumerate(m.initial_state):
feed_dict[c], feed_dict[h] = state[i]
res = session.run(fetches, feed_dict)
cost = res[0]
state_flat = res[2:] # [c1, m1, c2, m2]
state = [state_flat[i:i+2] for i in range(0, len(state_flat), 2)]
loss = np.sum(cost) / m.batch_size
costs += loss
if m.downscale_loss_by_num_steps:
loss /= m.num_steps
print("loss: %s" % loss)
iters += m.num_steps
if verbose and step % (epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters),
iters * m.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
def run_epoch_separate_batched(session, model, data, eval_op, eos_index, verbose=False):
"""Runs the model on the given data."""
costs = 0.0
iters = 0
trees_list = chop(data, eos_index, prepend_eos=True)
epoch_size = len(trees_list) // model.batch_size
start_time = time.time()
for step, xyms in enumerate(utils.separate_trees_iterator(trees_list, eos_index, model.batch_size, model.num_steps)):
state = []
for c, h in model.initial_state: # initial_state: ((c1, m1), (c2, m2))
state.append((c.eval(), h.eval()))
for x, y, m in xyms:
fetches = []
fetches.append(model.cost)
fetches.append(eval_op)
for c, h in model.final_state: # final_state: ((c1, m1), (c2, m2))
fetches.append(c)
fetches.append(h)
feed_dict = {}
feed_dict[model.input_data] = x
feed_dict[model.targets] = y
feed_dict[model.weights] = m
for i, (c, h) in enumerate(model.initial_state):
feed_dict[c], feed_dict[h] = state[i]
res = session.run(fetches, feed_dict)
cost = res[0]
state_flat = res[2:] # [c1, m1, c2, m2]
state = [state_flat[i:i+2] for i in range(0, len(state_flat), 2)]
# for a, b, c in zip(x, m, cost.reshape(model.batch_size, model.num_steps)):
# print("x", a)
# print("m", b)
# print("c", c)
# print
# print
costs += np.sum(cost)
iters += np.sum(m)
num_tokens = sum(len(l) - 1 for l in trees_list[:(step+1) * model.batch_size])
assert(num_tokens == iters)
if verbose and step % (epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters),
iters / (time.time() - start_time)))
# print("total steps", iters)
return np.exp(costs / iters)
def run_epoch2(session, m, nbest, eval_op, eos, verbose=False):
"""Runs the model on the given data."""
counts = []
loss = []
prev = (-1, -1)
for pair in nbest['idx2tree']:
if pair[0] != prev[0]:
counts.append([0])
loss.append([0.])
elif pair[1] == prev[1] + 1:
counts[-1].append(0)
loss[-1].append(0.)
counts[-1][-1] += 1
prev = pair
data = nbest['data']
epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
start_time = time.time()
costs = 0.0
iters = 0
state = []
weights = np.ones((m.batch_size, m.num_steps), dtype=np.float32)
for c, h in m.initial_state: # initial_state: ((c1, m1), (c2, m2))
state.append((c.eval(), h.eval()))
for step, (x, y, z) in enumerate(
ptb_iterator2(data, m.batch_size, m.num_steps,
nbest['idx2tree'], eos)):
fetches = []
fetches.append(m.cost)
fetches.append(eval_op)
for c, h in m.final_state: # final_state: ((c1, m1), (c2, m2))
fetches.append(c)
fetches.append(h)
feed_dict = {}
feed_dict[m.input_data] = x
feed_dict[m.targets] = y
feed_dict[m.weights] = weights
for i, (c, h) in enumerate(m.initial_state):
feed_dict[c], feed_dict[h] = state[i]
res = session.run(fetches, feed_dict)
cost = res[0]
state_flat = res[2:] # [c1, m1, c2, m2]
state = [state_flat[i:i+2] for i in range(0, len(state_flat), 2)]
costs += np.sum(cost) / m.batch_size
iters += m.num_steps
cost = cost.reshape((m.batch_size, m.num_steps))
for idx, val in np.ndenumerate(cost):
tree_idx = z[idx[0]][idx[1]]
if tree_idx[0] == -1: # dummy
continue
counts[tree_idx[0]][tree_idx[1]] -= 1
loss[tree_idx[0]][tree_idx[1]] += cost[idx[0]][idx[1]]
if verbose and step % (epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters),
iters * m.batch_size / (time.time() - start_time)))
scores = nbest['scores']
num = 0
gold, test, matched = 0, 0, 0
bad = []
for i in range(len(scores)):
good = True
ag = 0
min_val = 10000000
for j in range(len(scores[i])):
if counts[i][j] != 0:
bad.append(i)
good = False
break
if loss[i][j] < min_val:
min_val = loss[i][j]
ag = j
if good:
num += 1
gold += scores[i][ag]['gold']
test += scores[i][ag]['test']
matched += scores[i][ag]['matched']
if bad:
print('bad: %s' % ', '.join([str(x) for x in bad]))
return 200. * matched / (gold + test), num
def run_epoch2_separate_batched(session, m, nbest, eval_op, eos, verbose=False):
import score
"""Runs the model on the given data."""
data = nbest['data']
scores = nbest['scores']
split_nbest = chop(data, eos, prepend_eos=True)
assert(len(split_nbest) == sum(len(s) for s in scores))
losses = score.score_trees_separate_batching(session, m, split_nbest, eval_op, eos)
assert(len(split_nbest) == len(losses))
unflattened_losses = []
counter = 0
for sc in scores:
next_counter = counter + len(sc)
unflattened_losses.append(losses[counter:next_counter])
counter = next_counter
assert(len(unflattened_losses) == len(scores))
num = len(unflattened_losses)
gold, test, matched = 0, 0, 0
for l, sc in zip(unflattened_losses, scores):
best_loss, best_score = min(zip(l, sc), key=lambda p: p[0])
gold += best_score['gold']
test += best_score['test']
matched += best_score['matched']
return 200. * matched / (gold + test), num
def unkify(ws):
uk = 'unk'
sz = len(ws)-1
if ws[0].isupper():
uk = 'c' + uk
if ws[0].isdigit() and ws[sz].isdigit():
uk = uk + 'n'
elif sz <= 2:
pass
elif ws[sz-2:sz+1] == 'ing':
uk = uk + 'ing'
elif ws[sz-1:sz+1] == 'ed':
uk = uk + 'ed'
elif ws[sz-1:sz+1] == 'ly':
uk = uk + 'ly'
elif ws[sz] == 's':
uk = uk + 's'
elif ws[sz-2:sz+1] == 'est':
uk = uk + 'est'
elif ws[sz-1:sz+1] == 'er':
uk = uk + 'ER'
elif ws[sz-2:sz+1] == 'ion':
uk = uk + 'ion'
elif ws[sz-2:sz+1] == 'ory':
uk = uk + 'ory'
elif ws[0:2] == 'un':
uk = 'un' + uk
elif ws[sz-1:sz+1] == 'al':
uk = uk + 'al'
else:
for i in range(sz):
if ws[i] == '-':
uk = uk + '-'
break
elif ws[i] == '.':
uk = uk + '.'
break
return '<' + uk + '>'
def convert_to_ptb_format(id_to_token, indices, gold_tokens=None, gold_tags=None):
indices = list(indices)
if id_to_token[indices[0]] == '<eos>':
indices.pop(0)
if id_to_token[indices[-1]] == '<eos>':
indices.pop()
ptb_tokens = []
stack = []
word_ix = 0
for id_ in indices:
token = id_to_token[id_]
if token.startswith('('):
nt = token[1:]
stack.append(nt)
ptb_tokens.append(token)
elif token.startswith(')'):
nt = token[1:]
assert(nt == stack[-1])
stack.pop()
ptb_tokens.append(')')
else:
# create pos tags above the terminal
if gold_tokens is not None:
token_to_print = gold_tokens[word_ix]
else:
token_to_print = token
if gold_tags is not None:
tag_to_print = gold_tags[word_ix]
else:
tag_to_print = "XX"
ptb_tokens.extend(["(%s %s)" % (tag_to_print, token_to_print)])
word_ix += 1
assert(not stack)
if gold_tokens:
assert(word_ix == len(gold_tokens))
if gold_tags:
assert(word_ix == len(gold_tags))
return ptb_tokens
def ptb_iterator(raw_data, batch_size, num_steps):
raw_data = np.array(raw_data, dtype=np.int32)
data_len = len(raw_data)
batch_len = data_len // batch_size
data = np.zeros([batch_size, batch_len], dtype=np.int32)
for i in range(batch_size):
data[i] = raw_data[batch_len * i:batch_len * (i + 1)]
epoch_size = (batch_len - 1) // num_steps
if epoch_size == 0:
raise ValueError("epoch_size == 0, decrease batch_size or num_steps")
for i in range(epoch_size):
x = data[:, i*num_steps:(i+1)*num_steps]
y = data[:, i*num_steps+1:(i+1)*num_steps+1]
yield (x, y)
def separate_trees_iterator(separate_trees, eos_index, batch_size, num_steps):
# given a list of lists of token indices (one list per parse), return an iterator over lists
# (x, y, m), where x is inputs, y is targets, m is a mask, and all are dim (batch_size, num_steps)
# we return lists of (x, y, m) in case some sentence within that batch is longer than num_steps
# in that case, the hidden states should be passed between the lstm application to each tuple in the list
for tree in separate_trees:
assert(tree[0] == eos_index)
assert(tree[-1] == eos_index)
for sent_offset in range(0, len(separate_trees), batch_size):
batch = separate_trees[sent_offset:sent_offset+batch_size]
if len(batch) < batch_size:
batch += [[]] * (batch_size - len(batch))
assert(len(batch) == batch_size)
# get the smallest multiple of num_steps which is at least the length of the longest sentence minus one (since we will zip source and targets)
width = ((max(len(x) - 1 for x in batch) + num_steps - 1) // num_steps) * num_steps
# pad sequences
mask = np.zeros((batch_size, width), dtype=np.float32)
padded = np.zeros((batch_size, width + 1), dtype=np.int32)
for row, tree in enumerate(batch):
mask[row,:len(tree)-1] = 1
padded[row,:len(tree)] = tree
all_xym = []
for j in range(0, width, num_steps):
x = padded[:,j:j+num_steps]
y = padded[:,j+1:j+num_steps+1]
m = mask[:,j:j+num_steps]
assert(x.shape == y.shape)
assert(m.shape == y.shape)
assert(m.shape == (batch_size, num_steps))
all_xym.append((x,y,m))
yield all_xym
|
#!/usr/bin/env python
import sys, os, random
file_dir="video_list"
random.seed(0)
with open(file_dir + "/video_esea_2019-0409.txt") as f:
for line in f:
line = line.strip()
if line:
print(line + "#video_downsample/esea/")
MAX_ALL_QTY=5000
arr = []
with open(file_dir + "/video_all_2019-0409.txt") as f:
for line in f:
line = line.strip()
if line:
arr.append(line)
random.shuffle(arr)
for line in arr[0:MAX_ALL_QTY]:
print(line + "#video_downsample/all/")
|
from flask import Flask, request
from processing import calculate
app = Flask(__name__)
app.config["DEBUG"] = True
@app.route("/", methods=["GET", "POST"])
def adder_page():
errors = ""
if request.method == "POST":
number1 = None
number2 = None
number3 = None
number4 = None
number5 = None
try:
number1 = float(request.form["number1"])
except:
errors += "<p>{!r} is not a number.</p>\n".format(request.form["number1"])
try:
number2 = float(request.form["number2"])
except:
errors += "<p>{!r} is not a number.</p>\n".format(request.form["number2"])
try:
number3 = float(request.form["number3"])
except:
errors += "<p>{!r} is not a number.</p>\n".format(request.form["number3"])
try:
number4 = int(request.form["number4"])
except:
errors += "<p>{!r} is not a number.</p>\n".format(request.form["number4"])
try:
number5 = int(request.form["number5"])
except:
errors += "<p>{!r} is not a number.</p>\n".format(request.form["number5"])
if number1 is not None and number2 is not None:
if number3 is not None and number4 is not None:
if number5 is not None:
balance = calculate(number1, number2, number3, number4, number5)
return '''
<html>
<body>
<p>Maturity Value : RM {result}</p>
<p><a href="/">Click here to calculate again</a>
</body>
</html>
'''.format(result = balance)
return '''
<html>
<body>
{errors}
<p>Enter your numbers:</p>
<form method="post" action=".">
<label>Loan amount (ex : 200,000) = </label>
<input type="text" name="number1" /><br>
<label>Asb return (ex : 8%) = </label>
<input type="text" name="number2" /><br>
<label>Loan interest (ex : 4.5%) = </label>
<input type="text" name="number3" /><br>
<label>Loan tenure (ex : 30 years) = </label>
<input type="text" name="number4" /><br>
<label>Termination year (ex : 10 years) = </label>
<input type="text" name="number5" /><br>
<p><input type="submit" value="Do calculation" /></p>
</form>
</body>
</html>
'''.format(errors=errors) |
import libreria
def pedir_habitacion():
libreria.pedir_nombre("ingrese matrimonial")
print("se agrega matrimonial")
def pedir_suite():
print("se agrega suite")
def pedir_doble():
print("se agrega doble")
def pedir_presidencial():
print("se agrega presidencial")
def pedir_extra():
libreria.pedir_nombre("ingrese jacussi")
print("se agrego jacussi")
def pedir_cama_agua():
print("se agrego cama agua")
def pedir_mini_bar():
print("se agrego mini_bar")
def agregar_habitacion():
input("agregar habitacion")
print("se agrega habitacion")
def agregar_extras():
input("agregar extra")
print("agregar extras")
def agregar_extras():
opc=0
max=4
while(opc!= max):
print("#### EXTRAS ####")
print("#1.matrimonial #")
print("#2.suite #")
print("#3.doble #")
print("#4.presidencial #")
print("#5.salir #")
print("####################")
opc=libreria.pedir_numero("ingrese opcion:",1,5)
if (opc==1):
agregar_matrimonial()
if(opc==2):
agregar_suite()
if(opc==3):
agregar_doble()
if (opc==4):
agregar_presidencial()
opc=0
max=3
while(opc!=max):
print("##### HOTEL ######")
print("#1.Habitacion #")
print("#2.extras #")
print("#3.salir #")
opc=libreria.pedir_numero("ingrese opcion", 1,4)
if (opc==1):
agregar_habitacion()
if(opc==2):
agregar_extras()
print(''fin del programa'')
#fin_menu
|
import pytest
from django.urls import NoReverseMatch
from django.urls.base import reverse
from model_bakery import baker
from bpp.models import Autor, Jednostka, Praca_Doktorska, Praca_Habilitacyjna, Zrodlo
from bpp.models.cache import Autorzy, Rekord
from bpp.models.patent import Patent, Patent_Autor
from bpp.models.wydawnictwo_ciagle import Wydawnictwo_Ciagle, Wydawnictwo_Ciagle_Autor
from bpp.models.wydawnictwo_zwarte import Wydawnictwo_Zwarte, Wydawnictwo_Zwarte_Autor
@pytest.mark.parametrize(
"klass",
[
Wydawnictwo_Ciagle,
Wydawnictwo_Zwarte,
Patent,
Praca_Doktorska,
Praca_Habilitacyjna,
],
)
def test_safe_html_dwa_tytuly_DwaTytuly(
klass,
admin_app,
typy_odpowiedzialnosci,
):
"""Upewnij sie, ze bleach jest uruchamiany dla tych dwóch pól z DwaTytuly"""
i = baker.make(klass, rok=2020)
if hasattr(i, "zrodlo"):
z = baker.make(Zrodlo)
i.zrodlo = z
i.save()
if hasattr(i, "promotor"):
p = baker.make(Autor)
i.promotor = p
i.save()
url = reverse(f"admin:bpp_{klass._meta.model_name}_change", args=(i.pk,))
page = admin_app.get(url)
page.forms[1]["tytul_oryginalny"].value = "<script>hi</script>"
if hasattr(i, "tytul"):
page.forms[1]["tytul"].value = "<script>hi</script>"
page.forms[1].submit()
i.refresh_from_db()
assert i.tytul_oryginalny == "hi"
if hasattr(i, "tytul"):
assert i.tytul == "hi"
@pytest.mark.parametrize(
"klass,autor_klass,name,url",
[
(
Wydawnictwo_Ciagle,
Wydawnictwo_Ciagle_Autor,
"wydawnictwo_ciagle",
"admin:bpp_wydawnictwo_ciagle_change",
),
(
Wydawnictwo_Zwarte,
Wydawnictwo_Zwarte_Autor,
"wydawnictwo_zwarte",
"admin:bpp_wydawnictwo_zwarte_change",
),
(Patent, Patent_Autor, "patent", "admin:bpp_patent_change"),
],
)
def test_zapisz_wydawnictwo_w_adminie(klass, autor_klass, name, url, admin_app):
if klass == Wydawnictwo_Ciagle:
wc = baker.make(klass, zrodlo__nazwa="Kopara", rok=2020)
else:
wc = baker.make(klass, rok=2020)
wca = baker.make(
autor_klass,
autor__imiona="Jan",
autor__nazwisko="Kowalski",
zapisany_jako="Jan Kowalski",
rekord=wc,
)
url = reverse(url, args=(wc.pk,))
res = admin_app.get(url)
form = res.forms[name + "_form"]
ZMIENIONE = "J[an] Kowalski"
form["autorzy_set-0-zapisany_jako"].options.append((ZMIENIONE, False, ZMIENIONE))
form["autorzy_set-0-zapisany_jako"].value = ZMIENIONE
res2 = form.submit().maybe_follow()
assert res2.status_code == 200
assert "Please correct the error" not in res2.text
assert "Proszę, popraw poniższe błędy." not in res2.text
wca.refresh_from_db()
assert wca.zapisany_jako == ZMIENIONE
Rekord.objects.all().delete()
Autorzy.objects.all().delete()
from django.apps import apps
@pytest.mark.parametrize("model", apps.get_models())
@pytest.mark.django_db
def test_widok_admina(admin_client, model):
"""Wejdź na podstrony admina 'changelist' oraz 'add' dla każdego modelu z aplikacji
'bpp' który to istnieje w adminie (został zarejestrowany) i do którego to admin_client
ma uprawnienia.
W ten sposób możemy wyłapać błędy z nazwami pól w adminie, których to Django nie wyłapie
przed uruchomieniem aplikacji.
"""
# for model in apps.get_models():
app_label = model._meta.app_label
model_name = model._meta.model_name
if app_label != "bpp":
return
url_name = f"admin:{app_label}_{model_name}_changelist"
try:
url = reverse(url_name)
except NoReverseMatch:
return
res = admin_client.get(url)
assert res.status_code == 200, "changelist failed for %r" % model
res = admin_client.get(url + "?q=fafa")
assert res.status_code == 200, "changelist query failed for %r" % model
MODELS_WITHOUT_ADD = [("bpp", "bppmultiseekvisibility")]
if (app_label, model_name) in MODELS_WITHOUT_ADD:
return
url_name = f"admin:{app_label}_{model_name}_add"
url = reverse(url_name)
res = admin_client.get(url)
assert res.status_code == 200, "add failed for %r" % model
@pytest.mark.django_db
def test_admin_jednostka_sortowanie(uczelnia, admin_client):
url_name = reverse("admin:bpp_jednostka_changelist")
baker.make(Jednostka)
baker.make(Jednostka)
baker.make(Jednostka)
uczelnia.sortuj_jednostki_alfabetycznie = True
uczelnia.save()
assert admin_client.get(url_name).status_code == 200
uczelnia.sortuj_jednostki_alfabetycznie = False
uczelnia.save()
assert admin_client.get(url_name).status_code == 200
@pytest.mark.django_db
@pytest.mark.parametrize("url", ["wydawnictwo_zwarte", "wydawnictwo_ciagle"])
def test_admin_zewnetrzna_baza_danych(admin_client, url):
url_name = reverse(f"admin:bpp_{url}_add")
res = admin_client.get(url_name)
assert "z zewn. bazami" in res.content.decode("utf-8")
@pytest.mark.django_db
def test_BppTemplateAdmin_zapis_dobrej_templatki(admin_app):
url = reverse("admin:dbtemplates_template_add")
res = admin_app.get(url)
res.forms["template_form"]["content"] = "dobry content"
res.forms["template_form"]["name"] = "nazwa.html"
res = res.forms["template_form"].submit().maybe_follow()
res.mustcontain("został(a)(-ło) dodany(-na)(-ne) pomyślnie")
@pytest.mark.django_db
def test_BppTemplateAdmin_zapis_zlej_templatki(admin_app):
url = reverse("admin:dbtemplates_template_add")
res = admin_app.get(url)
res.forms["template_form"]["content"] = "dobry content{%if koparka %}"
res.forms["template_form"]["name"] = "nazwa.html"
res = res.forms["template_form"].submit().maybe_follow()
res.mustcontain("Błąd przy próbie analizy")
|
# 50명 승객과 매칭 기회, 총 탑승 승객 수를 구하는 프로그램 작성
# 조건1 : 승객별 운행 소요 시간 5~50 사이의 난수
# 조건2 : 소요시간 5~15분 사이의 승객만 매칭
# #출력문 예제
# [0] 1번째 손님 (소요시간 : 15분)
# [ ] 2번째 손님 (소요시간 : 50분)
# [0] 3번째 손님 (소요시간 : 5분)
# ...
# [ ] 50번재 손님 (소요시간 : 16분)
# #총 탑승 승객 : 2분
from random import *
cnt = 0 # 총 탑승 승객수
for i in range(1, 51): #1~50이라는 수 (승객)
time = randrange(5, 51) #5~ 50분 소요시간
if 5 <= time <= 15: #5~15분 이내의 손님, 탑승 수 증가 처리
print ("[0] {0}번째 손님 (소요시간 : {1}분)".format(i, time))
cnt += 1
else : #매칭 실패한 경우, 카운트 증가 필요 없음
print ("[ ] {0}번째 손님 (소요시간 : {1}분)".format(i, time))
print("총 탑승 승객 : {0}분".format(cnt))
|
password = "pass"
password_input = input("Introduzca su contraseña: ")
if password_input.lower() == password.lower():
print(":)")
else:
print(":(")
|
class Solution(object):
def isValidSudoku(self, board):
"""
:type board: List[List[str]]
:rtype: bool
"""
rowset=[]
colset=[]
gridset=[]
digit=set()
for i in range(9):
rowset.append(set())
colset.append(set())
gridset.append(set())
digit.add(str(i+1))
for i in range(len(board)):
for j in range(len(board[0])):
row=i
col=j
grid=(i/3)*3+j/3
# print(i,j,row,col,grid,board[i][j],rowset,colset,gridset)
if board[i][j] in digit:
if board[i][j] in rowset[row] or board[i][j] in colset[col] or board[i][j] in gridset[grid]:
return False
rowset[row].add(board[i][j])
colset[col].add(board[i][j])
gridset[grid].add(board[i][j])
return True
if __name__=="__main__":
a=Solution()
board=[["5","3",".",".","7",".",".",".","."],["6",".",".","1","9","5",".",".","."],[".","9","8",".",".",".",".","6","."],["8",".",".",".","6",".",".",".","3"],["4",".",".","8",".","3",".",".","1"],["7",".",".",".","2",".",".",".","6"],[".","6",".",".",".",".","2","8","."],[".",".",".","4","1","9",".",".","5"],[".",".",".",".","8",".",".","7","9"]]
print(a.isValidSudoku(board))
|
import torch
import torch.nn as nn
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
# helpers
def map_values(fn, x):
out = {}
for (k, v) in x.items():
out[k] = fn(v)
return out
def dict_chunk(x, chunks, dim):
out1 = {}
out2 = {}
for (k, v) in x.items():
c1, c2 = v.chunk(chunks, dim = dim)
out1[k] = c1
out2[k] = c2
return out1, out2
def dict_sum(x, y):
out = {}
for k in x.keys():
out[k] = x[k] + y[k]
return out
def dict_subtract(x, y):
out = {}
for k in x.keys():
out[k] = x[k] - y[k]
return out
def dict_cat(x, y, dim):
out = {}
for k, v1 in x.items():
v2 = y[k]
out[k] = torch.cat((v1, v2), dim = dim)
return out
def dict_set_(x, key, value):
for k, v in x.items():
setattr(v, key, value)
def dict_backwards_(outputs, grad_tensors):
for k, v in outputs.items():
torch.autograd.backward(v, grad_tensors[k], retain_graph = True)
def dict_del_(x):
for k, v in x.items():
del v
del x
def values(d):
return [v for _, v in d.items()]
# following example for saving and setting rng here https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
class Deterministic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py
# once multi-GPU is confirmed working, refactor and send PR back to source
class ReversibleBlock(nn.Module):
def __init__(self, f, g):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
def forward(self, x, **kwargs):
training = self.training
x1, x2 = dict_chunk(x, 2, dim = -1)
y1, y2 = None, None
with torch.no_grad():
y1 = dict_sum(x1, self.f(x2, record_rng = training, **kwargs))
y2 = dict_sum(x2, self.g(y1, record_rng = training))
return dict_cat(y1, y2, dim = -1)
def backward_pass(self, y, dy, **kwargs):
y1, y2 = dict_chunk(y, 2, dim = -1)
dict_del_(y)
dy1, dy2 = dict_chunk(dy, 2, dim = -1)
dict_del_(dy)
with torch.enable_grad():
dict_set_(y1, 'requires_grad', True)
gy1 = self.g(y1, set_rng = True)
dict_backwards_(gy1, dy2)
with torch.no_grad():
x2 = dict_subtract(y2, gy1)
dict_del_(y2)
dict_del_(gy1)
dx1 = dict_sum(dy1, map_values(lambda t: t.grad, y1))
dict_del_(dy1)
dict_set_(y1, 'grad', None)
with torch.enable_grad():
dict_set_(x2, 'requires_grad', True)
fx2 = self.f(x2, set_rng = True, **kwargs)
dict_backwards_(fx2, dx1)
with torch.no_grad():
x1 = dict_subtract(y1, fx2)
dict_del_(y1)
dict_del_(fx2)
dx2 = dict_sum(dy2, map_values(lambda t: t.grad, x2))
dict_del_(dy2)
dict_set_(x2, 'grad', None)
x2 = map_values(lambda t: t.detach(), x2)
x = dict_cat(x1, x2, dim = -1)
dx = dict_cat(dx1, dx2, dim = -1)
return x, dx
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, kwargs):
input_keys = kwargs.pop('input_keys')
split_dims = kwargs.pop('split_dims')
input_values = x.split(split_dims, dim = -1)
x = dict(zip(input_keys, input_values))
ctx.kwargs = kwargs
ctx.split_dims = split_dims
ctx.input_keys = input_keys
for block in blocks:
x = block(x, **kwargs)
ctx.y = map_values(lambda t: t.detach(), x)
ctx.blocks = blocks
x = torch.cat(values(x), dim = -1)
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
kwargs = ctx.kwargs
input_keys = ctx.input_keys
split_dims = ctx.split_dims
dy = dy.split(split_dims, dim = -1)
dy = dict(zip(input_keys, dy))
for block in ctx.blocks[::-1]:
y, dy = block.backward_pass(y, dy, **kwargs)
dy = torch.cat(values(dy), dim = -1)
return dy, None, None
class SequentialSequence(nn.Module):
def __init__(self, blocks):
super().__init__()
self.blocks = blocks
def forward(self, x, **kwargs):
for (attn, ff) in self.blocks:
x = attn(x, **kwargs)
x = ff(x)
return x
class ReversibleSequence(nn.Module):
def __init__(self, blocks):
super().__init__()
self.blocks = nn.ModuleList([ReversibleBlock(f, g) for (f, g) in blocks])
def forward(self, x, **kwargs):
blocks = self.blocks
x = map_values(lambda t: torch.cat((t, t), dim = -1), x)
input_keys = x.keys()
split_dims = tuple(map(lambda t: t.shape[-1], x.values()))
block_kwargs = {'input_keys': input_keys, 'split_dims': split_dims, **kwargs}
x = torch.cat(values(x), dim = -1)
x = _ReversibleFunction.apply(x, blocks, block_kwargs)
x = dict(zip(input_keys, x.split(split_dims, dim = -1)))
x = map_values(lambda t: torch.stack(t.chunk(2, dim = -1)).mean(dim = 0), x)
return x
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the countSort function below.
def countSort(arr):
arrlen=len(arr)
firsthalf=int(arrlen/2)
countingArray=list()
j=0
for i in range(0,100):
countingArray.append(list())
for i in arr:
index=int(i[0])
element=None
if(j<firsthalf):
element="-"
else:
element=i[1]
countingArray[index].append(element)
j+=1
for i in countingArray:
for j in i:
print(j,end=' ')
if __name__ == '__main__':
n = int(input().strip())
arr = []
for _ in range(n):
arr.append(input().rstrip().split())
countSort(arr)
|
#Codechef
#https://www.codechef.com/ICL2019/problems/ICL1901
t=int(input())
while t>0:
k,n=list(map(int,input().split()))
k=str(k)
k=set(k)
if len(k)==3:
print(27)
elif len(k)==2:
print(8)
else:
print(1)
t-=1
|
import requests
import simplejson
class TagMe:
"""
A Python wrapper for the TagMe REST API, which provides a text annotation service:
https://tagme.d4science.org/tagme/
It is able to identify on-the-fly meaningful short-phrases (called "spots") in an unstructured text and link them
to a pertinent Wikipedia page in a fast and effective way. English and Italian languages are supported.
Methods:
- query_tagme: post the proper query to the REST API service, and return the JSON reply.
- get_entities: process the JSON reply from the REST API, discarding uncertain annotations according to a
parametric threshold.
Input:
- token (mandatory): a string containing the user token for accessing the REST API service.
"""
def __init__(self, token):
self.api = "https://tagme.d4science.org/tagme/tag"
self.token = token
def query_tagme(self, text, lang):
"""
Post the proper query to the REST API service, and return the JSON reply.
Input:
- text (mandatory): a string containing an unstructured text to be annotated;
- lang (mandatory): a string identifier representing the text language. Must be one out of ["en", "it"].
Output:
- the JSON reply from the REST API.
"""
payload = {"text": text, "gcube-token": self.token, "lang": lang}
if len(text) > 4000:
payload["long_text"] = 3
r = requests.post(self.api, payload)
if r.status_code != 200:
print(r.content)
return None
return simplejson.loads(r.content)
@staticmethod
def get_entities(tagme_response, min_rho):
"""
Process the JSON reply from the REST API, discarding uncertain annotations according to a parametric threshold.
Input:
- tagme_response (mandatory): the JSON reply from the REST API.
- min_rho: a float in [0, 1.0) interval. Annotation with rho <= min_rho are discarded.
Output:
- a list, containing the filtered annotations.
"""
ann = tagme_response["annotations"]
ann = filter(lambda d: float(d["rho"]) > min_rho, ann)
return list(map(lambda d: d["title"], ann))
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from batchgenerators.utilities.file_and_folder_operations import maybe_mkdir_p, join
default_num_threads = 4
RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD = 4
# do not modify these unless you know what you are doing
my_output_identifier = "DeepLabV3+"
default_plans_identifier = "nnUNetPlansv2.1"
default_data_identifier = 'nnUNetData_plans_v2.1'
default_trainer = "nnUNetTrainer"
default_cascade_trainer = "nnUNetTrainerV2CascadeFullRes"
# nnUNet_raw_data_base='/data/nnUNetFrame/DATASET/nnUNet_raw'
# nnUNet_preprocessed='/data/nnUNetFrame/DATASET/nnUNet_preprocessed'
# RESULTS_FOLDER='/data/nnUNetFrame/DATASET/nnUNet_trained_models'
nnUNet_raw_data_base='/mnt/lustre/luoxiangde.vendor/projects/nnUNetFrame/DATASET/nnUNet_raw'
nnUNet_preprocessed='/mnt/lustre/luoxiangde.vendor/projects/nnUNetFrame/DATASET/nnUNet_preprocessed'
RESULTS_FOLDER='/mnt/lustre/luoxiangde.vendor/projects/nnUNetFrame/DATASET/nnUNet_trained_models'
"""
PLEASE READ paths.md FOR INFORMATION TO HOW TO SET THIS UP
"""
base = nnUNet_raw_data_base
preprocessing_output_dir = nnUNet_preprocessed
network_training_output_dir_base = RESULTS_FOLDER
if base is not None:
nnUNet_raw_data = join(base, "nnUNet_raw_data")
nnUNet_cropped_data = join(base, "nnUNet_cropped_data")
maybe_mkdir_p(nnUNet_raw_data)
maybe_mkdir_p(nnUNet_cropped_data)
else:
print("nnUNet_raw_data_base is not defined and nnU-Net can only be used on data for which preprocessed files "
"are already present on your system. nnU-Net cannot be used for experiment planning and preprocessing like "
"this. If this is not intended, please read documentation/setting_up_paths.md for information on how to set this up properly.")
nnUNet_cropped_data = nnUNet_raw_data = None
if preprocessing_output_dir is not None:
maybe_mkdir_p(preprocessing_output_dir)
else:
print("nnUNet_preprocessed is not defined and nnU-Net can not be used for preprocessing "
"or training. If this is not intended, please read documentation/setting_up_paths.md for information on how to set this up.")
preprocessing_output_dir = None
if network_training_output_dir_base is not None:
network_training_output_dir = join(network_training_output_dir_base, my_output_identifier)
maybe_mkdir_p(network_training_output_dir)
else:
print("RESULTS_FOLDER is not defined and nnU-Net cannot be used for training or "
"inference. If this is not intended behavior, please read documentation/setting_up_paths.md for information on how to set this "
"up.")
network_training_output_dir = None
|
# Generated by Django 2.2.3 on 2021-10-20 12:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('superSu', '0010_auto_20211020_0743'),
]
operations = [
migrations.AddField(
model_name='prueba',
name='apellido',
field=models.CharField(max_length=50, null=True, verbose_name='Apellido'),
),
]
|
#!/usr/bin/env python
import argparse
import sys
def _lines(stream):
l = stream.readline()
while l != "":
yield l
l = stream.readline()
def main(argv):
parser = argparse.ArgumentParser(
description=("Add a value in every line so that you add a column in "
"tabular data")
)
parser.add_argument(
"value",
help="Which value to add"
)
parser.add_argument(
"--prepend",
action="store_true",
help="Add the value as the first column instead of as the last"
)
parser.add_argument(
"--header",
default=None,
help="Use this value as header for the column"
)
parser.add_argument(
"--delimiter", "-d",
default=" ",
help="Use this delimiter to separate the columns"
)
parser.add_argument(
"--increment",
default=0.0,
type=float,
help="Add this value to the initial"
)
parser.add_argument(
"--no_flush",
dest="flush",
action="store_false",
help="Do not flush after every line"
)
args = parser.parse_args(argv)
v = args.value
for i, line in enumerate(_lines(sys.stdin)):
if i == 0 and args.header:
v = args.header
if args.prepend:
print v + args.delimiter + line.strip()
else:
print line.strip() + args.delimiter + v
if args.flush:
sys.stdout.flush()
if args.increment != 0:
try:
v = str(float(v) + args.increment)
except ValueError:
pass
if __name__ == "__main__":
main(sys.argv[1:])
|
import os.path
import json
import jsonpickle
data_path = "data/roomdata.json"
class Data(object):
def __init__(self):
self.main_channel_id = -1
self.inventory = []
self.state = 0
self.map_msg_id = -1
self.progress_msg_id = {}
self.cooldown = {}
self.light_lvl = 0
def read_json(filename):
file = open(filename, 'r')
data_dict = json.load(file)
file.close()
return data_dict
def read_saves():
if os.path.isfile(data_path):
file = open(data_path)
data = jsonpickle.decode(file.read())
file.close()
print("loaded old save")
else:
data = Data()
file = open(data_path, 'w+')
file.write(jsonpickle.encode(data))
file.close()
print("created new save")
return data
def save(data):
file = open(data_path, 'w+')
file.write(jsonpickle.encode(data))
file.close()
def reset():
data = Data()
file = open(data_path, 'w+')
file.write(jsonpickle.encode(data))
file.close()
print("created new save")
return data |
# -*- coding: utf-8 -*-
"""
Tests for all number generators
"""
from fauxfactory import FauxFactory
import sys
import unittest
class TestNumbers(unittest.TestCase):
"""
Test number generators
"""
@classmethod
def setUpClass(cls):
"""
Instantiate our factory object
"""
cls.factory = FauxFactory()
def test_generate_integer_1(self):
"""
@Test: Create a random integer with no range limits
@Feature: Numbers Generator
@Assert: A random integer is created
"""
result = self.factory.generate_integer()
self.assertTrue(
isinstance(result, int), "A valid integer was not generated.")
def test_generate_integer_2(self):
"""
@Test: Create a random integer with set minimum limit
@Feature: Numbers Generator
@Assert: Integer is created and greater than minimum
"""
try:
# Change system max int to a smaller number
old_sys_maxsize = sys.maxsize
sys.maxsize = 5
for turn in range(10):
result = self.factory.generate_integer(min_value=1)
self.assertTrue(
result <= sys.maxsize, "Integer is greater than max_value"
)
self.assertTrue(
result >= 1, "Integer is less than specified minimum"
)
finally:
# Reset system max int back to original value
sys.maxsize = old_sys_maxsize
def test_generate_integer_3(self):
"""
@Test: Create a random integer with set maximum limit
@Feature: Numbers Generator
@Assert: Integer is created and less than maximum value
"""
try:
# Change system max int to a smaller number
old_sys_maxsize = sys.maxsize
sys.maxsize = 5
min_value = - sys.maxsize - 1
for turn in range(10):
result = self.factory.generate_integer(max_value=1)
self.assertTrue(
result >= min_value, "Integer is less than min_value"
)
self.assertTrue(
result <= 1, "Integer is greater than specified maximum"
)
finally:
# Reset system max int back to original value
sys.maxsize = old_sys_maxsize
def test_generate_integer_4(self):
"""
@Test: Create a random integer with set min/max limits
@Feature: Numbers Generator
@Assert: An integer is created and falls within the specified range
"""
for turn in range(10):
result = self.factory.generate_integer(
min_value=1, max_value=3)
self.assertTrue(
result >= 1, "Integer is less than min_value"
)
self.assertTrue(
result <= 3, "Integer is greater than specified maximum"
)
def test_generate_integer_5(self):
"""
@Test: Create a random integer with disallowed minimum limit
@Feature: Numbers Generator
@Assert: An integer number is not created due to value error
"""
# This is lower than allowed platform minimum
low_min = - sys.maxsize - 2
with self.assertRaises(ValueError):
self.factory.generate_integer(min_value=low_min)
def test_generate_integer_6(self):
"""
@Test: Create a random integer with disallowed maximum limit
@Feature: Numbers Generator
@Assert: An integer number is not created due to value error
"""
# This is greater than allowed platform maximum
high_max = sys.maxsize + 1
with self.assertRaises(ValueError):
self.factory.generate_integer(max_value=high_max)
def test_generate_integer_7_0(self):
"""
@Test: Create a random integer using empty strings as args
@Feature: Numbers Generator
@Assert: An integer number is not created due to value error
"""
with self.assertRaises(ValueError):
self.factory.generate_integer(min_value='')
def test_generate_integer_7_1(self):
"""
@Test: Create a random integer using empty strings as args
@Feature: Numbers Generator
@Assert: An integer number is not created due to value error
"""
with self.assertRaises(ValueError):
self.factory.generate_integer(max_value='')
def test_generate_integer_7_2(self):
"""
@Test: Create a random integer using empty strings as args
@Feature: Numbers Generator
@Assert: An integer number is not created due to value error
"""
with self.assertRaises(ValueError):
self.factory.generate_integer(min_value='', max_value='')
def test_generate_integer_8_0(self):
"""
@Test: Create a random integer using whitespace as args
@Feature: Numbers Generator
@Assert: An integer number is not created due to value error
"""
with self.assertRaises(ValueError):
self.factory.generate_integer(min_value=' ')
def test_generate_integer_8_1(self):
"""
@Test: Create a random integer using whitespace as args
@Feature: Numbers Generator
@Assert: An integer number is not created due to value error
"""
with self.assertRaises(ValueError):
self.factory.generate_integer(max_value=' ')
def test_generate_integer_8_2(self):
"""
@Test: Create a random integer using whitespace as args
@Feature: Numbers Generator
@Assert: An integer number is not created due to value error
"""
with self.assertRaises(ValueError):
self.factory.generate_integer(min_value=' ', max_value=' ')
def test_generate_integer_9_0(self):
"""
@Test: Create a random integer using alpha strings as args
@Feature: Numbers Generator
@Assert: An integer number is not created due to value error
"""
with self.assertRaises(ValueError):
self.factory.generate_integer(min_value='a')
def test_generate_integer_9_1(self):
"""
@Test: Create a random integer using alpha strings as args
@Feature: Numbers Generator
@Assert: An integer number is not created due to value error
"""
with self.assertRaises(ValueError):
self.factory.generate_integer(max_value='a')
def test_generate_integer_9_2(self):
"""
@Test: Create a random integer using alpha strings as args
@Feature: Numbers Generator
@Assert: An integer number is not created due to value error
"""
with self.assertRaises(ValueError):
self.factory.generate_integer(min_value='a', max_value='b')
def test_generate_positive_integer_1(self):
"""
@Test: Create a random positive integer
@Feature: Numbers Generator
@Assert: A positive number is created
"""
result = self.factory.generate_positive_integer()
self.assertTrue(result >= 0, "Generated integer is not positive")
def test_generate_negative_integer_1(self):
"""
@Test: Create a random negative integer
@Feature: Numbers Generator
@Assert: A negative number is created
"""
result = self.factory.generate_negative_integer()
self.assertTrue(result <= 0, "Generated integer is not negative")
|
#!/usr/bin/env python
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
import codecs
import re
import os
from setuptools import setup, find_packages
VERSION_RE = re.compile(r""".*__version__ = ["'](.*?)['"]""", re.S)
PROJECT_DIR = os.path.dirname(os.path.realpath(__file__))
def _load_readme():
readme_path = os.path.join(PROJECT_DIR, 'README.md')
with codecs.open(readme_path, 'r', 'utf-8') as f:
return f.read()
def _load_version():
init_path = os.path.join(PROJECT_DIR, 'awsiot', '__init__.py')
with open(init_path) as fp:
return VERSION_RE.match(fp.read()).group(1)
setup(
name='awsiotsdk',
version=_load_version(),
license='License :: OSI Approved :: Apache Software License',
description='AWS IoT SDK based on the AWS Common Runtime',
long_description=_load_readme(),
long_description_content_type='text/markdown',
author='AWS SDK Common Runtime Team',
url='https://github.com/aws/aws-iot-device-sdk-python-v2',
packages=find_packages(include=['awsiot*']),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
install_requires=[
'awscrt==0.19.1',
],
python_requires='>=3.7',
)
|
from collections import Counter
class Solution:
def longestPalindrome(self, s: str) -> int:
odd = sum(val & 1 for key, val in Counter(s).items())
return len(s) - odd + 1 if odd > 1 else len(s)
def longestPalindrome(self, s):
odds = sum(v & 1 for v in collections.Counter(s).values())
return len(s) - odds + bool(odds) |
from __future__ import unicode_literals
from django.db import models
from django.contrib import messages
from django.contrib.messages import get_messages
import re
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-copyZ0-9._-]+\.[a-zA-Z]+$')
class UserManager(models.Manager):
def login(self,request):
if len(request.POST['email'])<2 and len(request.POST['password'])<2:
messages.add_message(request,messages.ERROR,"please enter username and password")
return False
print("trying password......")
user=User.objects.get(email=request.POST['email'])
if(user.email==request.POST['email']):
print("User match")
print('checking password')
if user.password==request.POST['password']:
print("password match")
return True
else:
messages.add_message(request,messages.ERROR,"User Doesn't Exist")
return False
def register(self,request):
if len(request.POST['first_name'])<2:
messages.add_message(request,messages.ERROR,"First Name can't be less than 2 letters")
print("name Error")
if len(request.POST['last_name'])<2:
messages.add_message(request,messages.ERROR,"Last Name can't be less than 2 letters")
print("desc Error")
if not EMAIL_REGEX.match(request.POST['email']):
messages.add_message(request,messages.ERROR,"please use valid Email address")
if User.objects.filter(email=request.POST['email']).count() >0:
messages.add_message(request,messages.ERROR,"Email already registerd")
if len(request.POST['password'])<8:
messages.add_message(request,messages.ERROR,"Password can't be less than 8 letters")
print("password Error")
if(request.POST['password']!=request.POST['confirm_password']):
messages.add_message(request,messages.ERROR,"Password do not match")
if len(get_messages(request))>0:
return False
else:
print("creating object")
User.objects.create(first_name=request.POST['first_name'] ,last_name=request.POST['last_name'] , email=request.POST['email'] , password=request.POST['password'])
print(User.objects.all())
return True
class User(models.Model):
first_name = models.CharField(max_length = 255)
last_name = models.CharField(max_length = 255)
email = models.CharField(max_length = 255)
password = models.CharField(max_length = 255)
objects=UserManager()
|
import argparse
import hashlib
import inspect
import os
import time
from typing import Callable, List
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import h5py
from matplotlib.backends.backend_pdf import PdfPages
import crack_detection as crack_detection
import crack_metrics as crack_metrics
import data_augmentation as data_augmentation
import data_loading as data_loading
import plotting as plotting
from Dataset import Dataset
from FiberCrackConfig import FiberCrackConfig
from PythonExtras.volume_tools import write_volume_to_datraw, write_volume_sequence
np.random.seed(13) # Fix the seed for reproducibility.
def slice_along_axis(index, axis, ndim):
"""
Return a selector that takes a single-element slice (subtensor) of an nd-array
along a certain axis (at a given index).
The result would be an (n-1)-dimensional array. E.g. data[:, :, 5, :].
The advantage of this function over subscript syntax is that you can
specify the axis with a variable.
"""
return tuple([index if axis == i else None for i in range(0, ndim)])
def load_data(config: FiberCrackConfig):
if config.dataConfig.dataFormat == 'csv':
return data_loading.load_csv_data(config.dataConfig)
elif config.dataConfig.dataFormat == 'tiff':
return data_loading.load_tiff_data(config.dataConfig)
else:
raise ValueError("Unknown data format: {}".format(config.dataConfig.dataFormat))
def augment_data(dataset: 'Dataset', config: FiberCrackConfig):
"""
Extends the raw data with some pre-processing.
Doesn't compute any 'results', but rather information that can help compute the results.
:param dataset:
:param config:
:return:
"""
header = dataset.get_header()
metaheader = dataset.get_metaheader()
if 'imageShiftX' in metaheader and 'camera' in header and 'matched' in header:
print("Data already augmented, skipping.")
return dataset
# For now we require that all the data is present, or none of it.
assert('imageShiftX' not in metaheader)
assert('camera' not in header)
assert('matched' not in header)
# Add the data to image mapping to the dataset.
data_augmentation.append_data_image_mapping(dataset)
# Add the physical dimensions of the data (in millimeters).
data_augmentation.append_physical_frame_size(dataset)
# Add the image shift to the metadata.
imageShift = data_augmentation.compute_avg_flow(dataset)
dataset.create_or_update_metadata_column('imageShiftX', imageShift[..., 0])
dataset.create_or_update_metadata_column('imageShiftY', imageShift[..., 1])
print("Adding the camera images...")
data_augmentation.append_camera_image(dataset, config.dataConfig)
print("Adding the crack ground truth images...")
data_augmentation.append_ground_truth_image(dataset, config.dataConfig)
print("Adding the matched pixels...")
data_augmentation.append_matched_pixels(dataset, config.dataConfig)
print("Adding crack area ground truth...")
data_augmentation.append_crack_area_ground_truth(dataset, config.dataConfig)
print("Zeroing the pixels that lost tracking.")
data_augmentation.zero_pixels_without_tracking(dataset)
return dataset
def apply_function_if_code_changed(dataset: 'Dataset', config: FiberCrackConfig, func: Callable[..., None]):
"""
Calls a function that computes and writes data to the dataset.
Stores the hash of the function's source code as metadata.
If the function has not changed, it isn't applied to the data.
:param dataset:
:param config:
:param func:
:return:
"""
# Get a string containing the full function source.
sourceLines = inspect.getsourcelines(func)
functionSource = ''.join(sourceLines[0])
functionName = func.__name__
callSignature = inspect.signature(func)
callArguments = {}
for callParameter in callSignature.parameters:
if callParameter in config.__dict__:
callArguments[callParameter] = config.__dict__[callParameter]
callArgumentsString = ''.join([key + str(callArguments[key]) for key in sorted(callArguments)])
attrName = '_functionHash_' + functionName
currentHash = hashlib.sha1((functionSource + callArgumentsString).encode('utf-8')).hexdigest()
if 'cameraImageVar' in dataset.get_header() and not config.recomputeResults:
oldHash = dataset.get_attr(attrName) if dataset.has_attr(attrName) else None
if currentHash == oldHash:
print("Function {} has not changed, skipping.".format(functionName))
return
print("Applying function {} to the dataset.".format(functionName))
callArguments['dataset'] = dataset
func(**callArguments)
dataset.set_attr(attrName, currentHash)
def compute_and_append_results(dataset: 'Dataset', config: FiberCrackConfig):
# Compute derived parameters.
mappingMin, mappingMax, mappingStep = dataset.get_data_image_mapping()
dicKernelRadius = int((config.dataConfig.dicKernelSize - 1) / 2 / mappingStep[0])
textureKernelMultipliers = config.allTextureKernelMultipliers
config.dicKernelRadius = dicKernelRadius
config.textureKernelRadius = int(dicKernelRadius * config.textureKernelMultiplier)
config.hybridKernelRadius = int(dicKernelRadius * config.hybridKernelMultiplier)
config.allTextureKernelRadii = [int(dicKernelRadius * mult) for mult in textureKernelMultipliers]
apply_function_if_code_changed(dataset, config, data_augmentation.append_texture_features)
apply_function_if_code_changed(dataset, config, crack_detection.append_crack_from_tracking_loss)
apply_function_if_code_changed(dataset, config, crack_detection.append_crack_from_unmatched_pixels)
apply_function_if_code_changed(dataset, config, crack_detection.append_crack_from_variance)
apply_function_if_code_changed(dataset, config, crack_detection.append_crack_from_entropy)
apply_function_if_code_changed(dataset, config, crack_detection.append_crack_from_unmatched_and_entropy)
apply_function_if_code_changed(dataset, config, crack_detection.append_reference_frame_crack)
# todo this runs always, because there are too many dependencies.
crack_metrics.append_estimated_crack_area(dataset)
def plot_frame_data_figures(dataset: 'Dataset', config: FiberCrackConfig, targetFrame=None):
# figures = [plt.figure(dpi=300) for i in range(figureNumber)]
# axes = [fig.add_subplot(1, 1, 1) for fig in figures]
figures = []
axes = []
labels = []
def axis_builder(label: str) -> plt.Axes:
fig = plt.figure(dpi=300)
ax = fig.add_subplot(1, 1, 1)
figures.append(fig)
axes.append(ax)
labels.append(label)
return ax
for frame in range(dataset.get_frame_number()):
# If the target frame is specified, skip the other frames.
if targetFrame is not None and dataset.get_frame_map()[frame] != targetFrame:
continue
frameData = dataset.h5Data[frame, ...]
frameLabel = dataset.get_frame_map()[frame]
# Plot the data.
plotting.plot_original_data_for_frame(axis_builder, frameData, dataset.get_header())
plotting.plot_unmatched_cracks_for_frame(axis_builder, frameData, dataset.get_header(), config.magnifiedRegion)
plotting.plot_image_cracks_for_frame(axis_builder, frameData, dataset.get_header(), config.magnifiedRegion)
plotting.plot_reference_crack_for_frame(axis_builder, frameData, dataset.get_header())
figuresDir = os.path.join(config.outDir, 'figures-{}'.format(config.dataConfig.metadataFilename))
if not os.path.exists(figuresDir):
os.makedirs(figuresDir)
for figure, ax, label in zip(figures, axes, labels):
# Skip unused axes.
if label == '':
continue
# Configure the axes to cover the whole figure and render to an image file.
ax.axis('off')
ax.set_frame_on(False)
ax.set_xticks([])
ax.set_yticks([])
plt.axis('off')
figure.savefig(os.path.join(figuresDir, '{}-{}'.format(frameLabel, label)), bbox_inches='tight', pad_inches=0)
for ax in axes:
ax.clear()
# Cleanup, since pyplot doesn't do it automatically.
for fig in figures:
plt.close(fig)
def plot_crack_area_figures(dataset: 'Dataset', config: FiberCrackConfig):
figuresDir = os.path.join(config.outDir, 'figures-{}'.format(config.dataConfig.metadataFilename))
if not os.path.exists(figuresDir):
os.makedirs(figuresDir)
fig = plotting.plot_crack_area_chart(dataset, csvOutPath=os.path.join(figuresDir, 'crack-area-data.csv'))
fig.savefig(os.path.join(figuresDir, 'crack-area'), dpi=300)
def plot_figures(dataset: 'Dataset', config: FiberCrackConfig, frame=None):
plot_crack_area_figures(dataset, config)
plot_frame_data_figures(dataset, config, frame)
def plot_to_pdf(dataset: 'Dataset', config: FiberCrackConfig,
plotFrameFunction: Callable[[Callable[[str], plt.Axes], np.ndarray, List[str]], None]):
h5Data, header, frameMap, *r = dataset.unpack_vars()
# Prepare for plotting
pdfPath = os.path.join(config.outDir, 'fiber-crack.pdf')
print("Plotting to {}".format(pdfPath))
pdf = PdfPages(pdfPath)
# Prepare a figure with subfigures.
fig = plt.figure(dpi=300)
axes = []
def axis_builder(label: str) -> plt.Axes:
if len(axes) >= 6 * 6:
raise RuntimeError("PDF layout doesn't have enough subplots.")
ax = fig.add_subplot(6, 6, len(axes) + 1)
axes.append(ax)
ax.axis('off')
return ax
fig.subplots_adjust(hspace=0.025, wspace=0.025)
# Draw the frame plots.
for f in range(0, dataset.get_frame_number()):
timeStart = time.time()
frameIndex = frameMap[f]
print("Plotting frame {}".format(frameIndex))
fig.suptitle("Frame {}".format(frameIndex))
frameData = h5Data[f, :, :, :]
# The actual plotting is done by the provided function.
plotFrameFunction(axis_builder, frameData, header)
pdf.savefig(fig, bbox_inches='tight', dpi=300)
for a in axes:
a.clear()
a.axis('off')
axes = []
print("Rendered in {:.2f} s.".format(time.time() - timeStart))
# Crack area figure.
fig = plotting.plot_crack_area_chart(dataset)
fig.suptitle('Crack area in the current frame')
pdf.savefig(fig, bbox_inches='tight', dpi=300)
# Print the data-to-camera mapping.
fig = plotting.plot_data_mapping(dataset)
pdf.savefig(fig, bbox_inches='tight', dpi=300)
pdf.close()
print("Finished plotting to {}".format(pdfPath))
def plot_crack_extraction_view(axisBuilder: Callable[[str], plt.Axes], frameData, header):
plotting.plot_original_data_for_frame(axisBuilder, frameData, header)
plotting.plot_unmatched_cracks_for_frame(axisBuilder, frameData, header)
plotting.plot_image_cracks_for_frame(axisBuilder, frameData, header)
plotting.plot_reference_crack_for_frame(axisBuilder, frameData, header)
plotting.plot_feature_histograms_for_frame(axisBuilder, frameData, header)
def plot_crack_prediction_view(axisBuilder: Callable[[str], plt.Axes], frameData, header):
plotting.plot_image_cracks_for_frame(axisBuilder, frameData, header)
plotting.plot_crack_prediction_for_frame(axisBuilder, frameData, header)
def plot_optic_flow(dataset: 'Dataset'):
frameIndex = dataset.get_frame_map().index(1800)
figure = plt.figure()
ax = figure.add_subplot(1, 1, 1)
plotting.plot_optic_flow_for_frame(ax, dataset, frameIndex)
plt.show()
def export_crack_volume(dataset: 'Dataset', config: FiberCrackConfig):
"""
Build a volume by concatenating crack areas from each frame,
save to the disk in datraw format.
:param dataset:
:param config:
:return:
"""
frameSize = dataset.get_frame_size()
framesToSkip = config.exportedVolumeSkippedFrames
frameWidth = config.exportedVolumeTimestepWidth
frameNumber = dataset.get_frame_number() - framesToSkip
# The volume should be exported in Z,Y,X,C with C-order.
volume = np.zeros((frameNumber * frameWidth, frameSize[1], frameSize[0], 4), dtype=np.uint8)
strain = dataset.get_metadata_column('Strain (%)')
minStrain = config.exportedVolumeStrainMin
maxStrain = config.exportedVolumeStrainMax
colorMap = plt.get_cmap('plasma')
for f in range(0, frameNumber):
if not dataset.get_metadata_val(f, 'hasCameraImage'):
continue
crackArea = dataset.get_column_at_frame(f, 'hybridCracks')
crackAreaUint8 = np.zeros(crackArea.shape[0:2] + (4,), dtype=np.uint8)
# Can be negative, since we map color not to the full strain range.
t = max(0.0, (strain[f] - minStrain) / (maxStrain - minStrain))
crackAreaUint8[crackArea == 1.0] = np.asarray(colorMap(t)) * 255
volumeSlabSelector = slice_along_axis(slice(f * frameWidth, f * frameWidth + frameWidth), 0, volume.ndim)
volume[volumeSlabSelector] = crackAreaUint8.swapaxes(0, 1)
write_volume_to_datraw(np.flip(volume, 0), os.path.join(config.outDir, 'crack-volume.raw'))
# Export the color mapping legend.
fig = plt.figure(figsize=(4, 1))
ax = fig.add_axes([0.05, 0.5, 0.9, 0.25])
norm = mpl.colors.Normalize(vmin=minStrain, vmax=maxStrain)
colorBar = mpl.colorbar.ColorbarBase(ax, cmap=colorMap, norm=norm, orientation='horizontal')
colorBar.set_label('Strain (%)')
fig.savefig(os.path.join(config.outDir, 'crack-volume-legend.png'))
def export_displacement_volume(dataset: 'Dataset', config: FiberCrackConfig):
frameSize = dataset.get_frame_size()
frameNumber = dataset.get_frame_number()
# The volume should be exported in Z,Y,X with C-order.
volume = np.zeros((frameNumber, 1, frameSize[1], frameSize[0]), dtype=np.float)
for f in range(0, frameNumber):
displacementX = dataset.get_column_at_frame(f, 'u')
displacementY = dataset.get_column_at_frame(f, 'v')
volume[f, 0, ...] = np.transpose(np.sqrt(np.square(displacementX) + np.square(displacementY)))
maxValue = np.max(volume)
meanValue = np.mean(volume)
print("Max displacement value (mapped to 255): {}".format(maxValue))
print("Mean displacement value: {}".format(meanValue))
# Manually set the mapping range to make it consistent across different datasets.
mappingMax = 700
if mappingMax < maxValue:
raise RuntimeError("Dataset values are getting clipped when mapping to volume values. Max value: {}"
.format(maxValue))
for f in range(0, frameNumber):
sigma = dataset.get_column_at_frame(f, 'sigma')
volume[f, 0, ...] = volume[f, 0, ...] / mappingMax * 127 + 127
for y in range(volume.shape[2]):
volume[f, 0, y, :][sigma[:, y] < 0] = 0 # Set to zero areas with no tracking.
volumeDir = os.path.join(config.outDir, os.path.basename(config.dataConfig.metadataFilename))
write_volume_sequence(volumeDir, volume, clip=(0, 255), dtype=np.uint8)
def export_crack_propagation(dataset: 'Dataset', config: FiberCrackConfig):
"""
Export data required for crack propagation comparison into an HDF file.\
:param dataset:
:param config:
:return:
"""
# Create the output dir.
crackDataDir = os.path.join(config.outDir, 'crack-propagation')
if not os.path.exists(crackDataDir):
os.makedirs(crackDataDir)
# Remove the old file, if exists.
crackDataFilePath = os.path.join(crackDataDir, 'crack-propagation_{}.hdf'.format(config.dataConfig.metadataFilename))
if os.path.exists(crackDataFilePath):
os.remove(crackDataFilePath)
print("Exporting crack propagation data to {}.".format(crackDataFilePath))
h5File = h5py.File(crackDataFilePath, 'w')
header = dataset.get_header()
crack = dataset.h5Data[..., header.index('sigmaSkeleton')] # type: np.ndarray
frameMap = dataset.get_frame_map()
strain = dataset.get_metadata_column('Strain (%)')
h5File.create_dataset('sigmaSkeleton', data=crack)
h5File.create_dataset('frameMap', data=frameMap)
h5File.create_dataset('strain', data=strain)
h5File.close()
def fiber_crack_run(command: str, config: FiberCrackConfig, frame: int = None):
# Define which commands are possible.
commandMap = {
'plot': lambda: plot_to_pdf(dataset, config, plot_crack_extraction_view),
'export-crack-volume': lambda: export_crack_volume(dataset, config),
'export-displacement-volume': lambda: export_displacement_volume(dataset, config),
'optic-flow': lambda: plot_optic_flow(dataset),
'export-figures': lambda: plot_figures(dataset, config, frame),
'export-figures-only-area': lambda: plot_crack_area_figures(dataset, config),
'export-crack-propagation': lambda: export_crack_propagation(dataset, config)
}
timeStart = time.time()
print("Loading the data.")
dataset = load_data(config)
print("Data loaded in {:.3f} s. Shape: {} Columns: {}".format(time.time() - timeStart, dataset.h5Data.shape,
dataset.get_header()))
print("Data attributes: {}".format(dataset.get_all_attrs()))
timeStart = time.time()
print("Augmenting the data.")
augment_data(dataset, config)
print("Data augmented in {:.3f} s.".format(time.time() - timeStart))
timeStart = time.time()
compute_and_append_results(dataset, config)
print("Results computed and appended in {:.3f} s.".format(time.time() - timeStart))
timeStart = time.time()
print("Executing command: {}".format(command))
commandMap[command]()
print("Command executed in {:.3f} s.".format(time.time() - timeStart))
def main():
commandList = [
'plot',
'export-crack-volume',
'export-displacement-volume',
'optic-flow',
'export-figures',
'export-figures-only-area',
'export-crack-propagation'
]
# Parse the arguments.
parser = argparse.ArgumentParser('Fiber crack.')
parser.add_argument('-d', '--data-path', required=True, type=str)
parser.add_argument('-p', '--preloaded-path', required=True, type=str)
parser.add_argument('-o', '--out-path', required=True, type=str)
parser.add_argument('-c', '--command', default='plot', choices=commandList)
parser.add_argument('-g', '--config', required=True, type=str)
parser.add_argument('-f', '--frame', default=None, type=int)
args = parser.parse_args()
configPath = args.config
config = FiberCrackConfig()
config.read_from_file(configPath)
# Prepend the paths passed at runtime to the configuration file.
config.dataConfig.basePath = os.path.join(args.data_path, config.dataConfig.basePath)
config.dataConfig.preloadedDataDir = args.preloaded_path
config.outDir = args.out_path
fiber_crack_run(args.command,
config,
args.frame if 'frame' in args.__dict__ else None)
if __name__ == '__main__':
main()
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--job_name", help="target job name") # noqa: E501
parser.add_argument(
"--job_list", help="list of existing jobs from databricks cli"
) # noqa: E501
args = parser.parse_args()
job_name = args.job_name.lower()
jobs = args.job_list.splitlines()
target_id = ""
for job in jobs:
kv = job.split(" ", 1)
id = kv[0]
name = kv[1].strip()
if name.lower() == job_name:
if target_id == "":
target_id = id
else:
raise Exception(
f"job with same name already exists: {target_id} and {id}"
) # noqa: E501
print(target_id)
|
import numpy as np
from three_wolves.deep_whole_body_controller.utility import trajectory, reward_utils, pc_reward
CUBE_MASS = 0.094
CUBE_INERTIA = np.array([[0.00006619, 0, 0],
[0, 0.00006619, 0],
[0, 0, 0.00006619]])
class PositionController:
def __init__(self, kinematics, observer, step_size):
self.step_size = step_size
self.kinematics = kinematics
self.observer = observer
self.t = 0
self.tg = None
self.desired_contact_points = None
self.contact_face_ids = None
self.reach_time = 4.0
self.complement = False
def reset(self):
pass
def reset_tg(self, init_pos, tar_pos, desired_speed=0.1):
obj_goal_dist = reward_utils.ComputeDist(init_pos, tar_pos)
total_time = obj_goal_dist / desired_speed
self.t = 0
self.tg = trajectory.get_path_planner(init_pos=init_pos,
tar_pos=tar_pos,
start_time=0,
reach_time=total_time)
def update(self, contact_points, contact_face_ids):
self.contact_face_ids = contact_face_ids
self.desired_contact_points = contact_points
self.reset_tg(self.observer.dt['object_position'], self.observer.dt['goal_position'])
self.complement = False
def get_action(self):
# first trajectory
desired_position = self.tg(self.t)[0] + self.desired_contact_points
desired_joint_position, _ = self.kinematics.inverse_kinematics(desired_position,
self.observer.dt['joint_position'])
# complement trajectory
if not self.complement and self.tg(self.t)[1]:
goal_residual = self.observer.dt['goal_position'] - self.observer.dt['object_position']
# self.desired_contact_points += goal_residual
self.reset_tg(self.observer.dt['object_position'], self.observer.dt['goal_position'] + goal_residual, 0.05)
self.complement = True
self.t += 0.001 * self.step_size
return desired_joint_position
def _get_clip_yaw(self, c=np.pi / 4):
# transfer to -pi/4 to pi/4
theta = self.observer.dt['object_rpy'][2]
if theta < -c or theta > c:
n = (theta + c) // (2 * c)
beta = theta - np.pi * n / 2
else:
beta = theta
return beta
def tips_reach(self, apply_action, tip_force_offset):
s = 2
pre_finger_scale = np.array([[1, s, 1],
[s, 1, 1],
[1, s, 1],
[s, 1, 1]])[self.contact_face_ids]
P0 = np.array([list(self.observer.dt[f'tip_{i}_position'][:2]) + [0.08] for i in range(3)])
P1 = self.desired_contact_points * pre_finger_scale + [0, 0, 0.05]
P2 = self.desired_contact_points * pre_finger_scale
P3 = self.desired_contact_points
key_points = [P0, P1, P2, P3]
key_interval = np.array([0.2, 0.2, 0.3, 0.3]) * self.reach_time
for points, interval in zip(key_points, key_interval):
if (points == P1).all() and tip_force_offset == []:
tip_force_offset.append(self.observer.dt['tip_force'])
_clip_yaw = self._get_clip_yaw()
rotated_key_pos = np.array([trajectory.Rotate([0, 0, _clip_yaw], points[i]) for i in range(3)])
tar_tip_pos = self.observer.dt['object_position'] + rotated_key_pos
self._to_point(apply_action, tar_tip_pos, interval)
def _to_point(self, apply_action, tar_tip_pos, total_time):
init_tip_pos = np.hstack([self.observer.dt[f'tip_{i}_position'] for i in range(3)])
tg = trajectory.get_path_planner(init_pos=init_tip_pos,
tar_pos=tar_tip_pos.flatten(),
start_time=0,
reach_time=total_time * 0.8)
t = 0
while t < total_time:
tg_tip_pos = tg(t)[0]
arm_joi_pos = self.observer.dt['joint_position']
to_goal_joints, _error = self.kinematics.inverse_kinematics(tg_tip_pos.reshape(3, 3),
arm_joi_pos)
apply_action(to_goal_joints)
t += 0.001 * self.step_size
def get_reward(self):
goal_reward = pc_reward.TrajectoryFollowing(self.observer.dt, self.tg(self.t)[0], wei=-500)
return goal_reward
|
from utils import load_pickle
import glob
import numpy as np
from sklearn.utils import shuffle
from preprocess import W2VTransformer
from utils import pad_with_vectors
from keras.utils.np_utils import to_categorical
from preprocess import get_int_representation_from_vocab
from utils import get_imdb_vocab
def load_imdb_tfidf():
data = load_pickle('data/imdb_tfidf.pkl')
return data
def load_w2v_transformer(path="w2v_transformer.pkl"):
return load_pickle(path)
def _file_gen(path):
for filename in glob.glob(path):
yield filename
def _imdb_filename_labeller_gen(parent_path,batch_size=32):
files_pos = list(_file_gen(parent_path+"/pos/*"))
files_neg = list(_file_gen(parent_path+"/neg/*"))
# test_files_pos = list(file_gen("data/imdb/test/pos"))
# test_files_neg = list(file_gen("data/imdb/test/neg"))
total_files = np.array(files_pos + files_neg)
labels = np.concatenate((np.ones(len(files_pos)),np.zeros(len(files_neg))))
inds = shuffle(np.arange(len(labels)))
total_files = total_files[inds]
labels = labels[inds]
total_size = len(labels)
n_batches = total_size/batch_size
while(1):
for i in xrange(n_batches):
low = i*batch_size
up = (i+1)*batch_size
if(up<total_size-total_size%batch_size - batch_size):
yield (total_files[low:up],labels[low:up])
else:
yield (total_files[up:],labels[up:])
def imdb_gen(data='train',batch_size=32,max_words=500,mode='w2v'):
gen = _imdb_filename_labeller_gen("data/imdb/"+data,batch_size)
w2v = load_w2v_transformer()
vocab = get_imdb_vocab()
while(1):
x_batch_fnames, y_batch = gen.next()
x = []
for fname in x_batch_fnames:
with open(fname) as file:
content = file.read()
x.append(content)
if mode is 'w2v':
x_batch = w2v.transform(x)
if max_words is not None:
x_batch = pad_with_vectors(x_batch,max_words)
elif mode is 'int':
x_batch = get_int_representation_from_vocab(x,max_words=max_words,vocab=vocab)
else:
raise AttributeError("attribute mode should be one of 'w2v' or 'int'.")
y_batch = to_categorical(y_batch,2)
yield x_batch,y_batch
|
"""The "Ikeda map" is a discrete-time dynamical system of size 2.
Source: [Wiki](https://en.wikipedia.org/wiki/Ikeda_map) and Colin Grudzien.
See `demo` for more info.
"""
import numpy as np
from numpy import cos, sin
import dapper.mods as modelling
import dapper.tools.liveplotting as LP
# Constant 0.6 <= u <= 1.
u = 0.9
x0 = np.zeros(2)
Tplot = 10.0
@modelling.ens_compatible
def step(x, _t, _dt):
s, t, x1, y1 = aux(*x)
return 1+u*x1, u*y1
def aux(x, y):
"""Comps used both by step and its jacobian."""
s = 1 + x**2 + y**2
t = 0.4 - 6 / s
# x1= x*cos(t) + y*cos(t) # Colin's mod
x1 = x*cos(t) - y*sin(t)
y1 = x*sin(t) + y*cos(t)
return s, t, x1, y1
def dstep_dx(x, _t, _dt):
s, t, x1, y1 = aux(*x)
x, y = x
dt_x = 12/s**2 * x
dt_y = 12/s**2 * y
dx_x = -y1*dt_x + cos(t)
dy_x = +x1*dt_x + sin(t)
dx_y = -y1*dt_y - sin(t)
dy_y = +x1*dt_y + cos(t)
return u * np.array([[dx_x, dx_y], [dy_x, dy_y]])
# Liveplotting
params = dict(labels='xy')
def LPs(jj=None, params=params): return [
(1, LP.sliding_marginals(obs_inds=jj, zoomy=0.8, **params)),
(1, LP.phase_particles(
is_3d=False, obs_inds=jj, zoom=0.8, Tplot=0, **params)),
]
|
# Generated by Django 3.1.7 on 2021-04-20 00:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Job', '0016_auto_20210420_0845'),
]
operations = [
migrations.AddField(
model_name='resume',
name='practice',
field=models.TextField(null=True),
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-05 19:26
from __future__ import unicode_literals
from django.db import migrations, models
import documents.models
class Migration(migrations.Migration):
dependencies = [
('documents', '0012_auto_20160212_2053'),
]
operations = [
migrations.AddField(
model_name='document',
name='file_thumbnail_height',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='document',
name='file_thumbnail_width',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='document',
name='file_thumbnail',
field=models.ImageField(blank=True, height_field='file_thumbnail_height', null=True, upload_to=documents.models.document_file_thumbnail_upload_path, width_field='file_thumbnail_width'),
),
]
|
import numpy as np
import matplotlib.pyplot as plt
for person in [30, 60]:
for sav_name in ['3layerAE', '3layerVAE']:
orig_ts = np.load(f'encoder_comparisons/{sav_name}_1_0_{person}_orig.npy')
recons_ts = np.load(f'encoder_comparisons/{sav_name}_1_0_{person}_recons.npy')
for i in range(50):
fig, ax = plt.subplots(nrows=2, ncols=1)
ax[0].plot(orig_ts[i, :])
ax[0].set_title(f'Original - Person {person}, ICA {i}')
ax[1].plot(recons_ts[i, :])
ax[1].set_title(f'Reconstructed using {sav_name} - Person {person}, ICA {i}')
plt.tight_layout()
plt.savefig(f'encoder_comparisons/fig_{sav_name}_{person}_{i}.png')
plt.close() |
#!/bin/env python
# coding: utf-8
"""
asyncio support sqlite
"""
import os
import re
import sys
from setuptools import setup, find_packages
PY_VER = sys.version_info
INSTALL_REQUIRES = []
if PY_VER >= (3, 4):
pass
elif PY_VER >= (3, 3):
INSTALL_REQUIRES.append('asyncio')
else:
raise RuntimeError("aiomysql doesn't suppport Python earllier than 3.3")
def find_version(*file_paths):
"""
read __init__.py
"""
file_path = os.path.join(*file_paths)
with open(file_path, 'r') as version_file:
line = version_file.readline()
while line:
if line.startswith('__version__'):
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]",
line,
re.M
)
if version_match:
return version_match.group(1)
line = version_file.readline()
raise RuntimeError('Unable to find version string.')
EXTRAS_REQUIRE = {'sa': ['sqlalchemy>=0.9']}
setup(
name='aiosqlite3',
version=find_version('aiosqlite3', '__init__.py'),
packages=find_packages(exclude=["tests", "tests.*"]),
url='https://github.com/zeromake/aiosqlite3',
license='MIT',
author='zeromake',
author_email='a390720046@gmail.com',
description='sqlite3 support for asyncio.',
platforms=['POSIX'],
classifiers=[
'License :: OSI Approved :: MIT License',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: PyPy',
'Operating System :: POSIX',
'Environment :: Web Environment',
'Topic :: Database',
'Framework :: AsyncIO',
],
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE
)
|
import re
from django import template
from django.template import Context, Template
from django.core.urlresolvers import resolve, Resolver404
from datetime import datetime
from datetime import datetime_delta
register = template.Library()
@register.simple_tag
def expires_in(item):
# a datetime object is constructed instead of using django datetime because django datetime is really strange and wont run timedeltas correctly
datetime_expiration = datetime(item.expiration_date.year, item.expiration_date.month, item.expiration_date.day, 0, 0)
datetime_current = datetime.now()
datetime_delta = datetime_expiration - datetime_current
days_til_expires = datetime_delta.days
return str(days_til_expires + ' days')
|
def workbook(n, k, arr):
# n - num of chapters(len(arr))
# k - max-prob on page can contain
pageNo = 1
speProbs = 0
for idx in range(n):
if k >= arr[idx]:
if pageNo in range(arr[idx] + 1):
speProbs += 1
pageNo += 1
else:
probNo = 1
while True:
if pageNo in range(probNo, probNo + k if arr[idx] > probNo + k else arr[idx] + 1):
speProbs += 1
if probNo > arr[idx]:
break
probNo += k
pageNo += 1
return speProbs
print(workbook(5, 3, [4, 2, 6, 1, 10]))
|
# Generated by Django 3.0.8 on 2020-08-27 07:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('frontend', '0007_auto_20200711_1042'),
]
operations = [
migrations.CreateModel(
name='Team',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=30)),
('roll_no', models.CharField(max_length=10)),
('image', models.ImageField(upload_to='assets/images')),
],
),
]
|
import sys
import logging
from .sentry import get_client as get_sentry_client
from .job_status import set_status
from .misc import get_http_log_path
from .config import config as teuth_config
from .exceptions import ConnectionLostError
from copy import deepcopy
log = logging.getLogger(__name__)
def import_task(name):
internal_pkg = __import__('teuthology.task', globals(), locals(), [name],
0)
if hasattr(internal_pkg, name):
return getattr(internal_pkg, name)
else:
external_pkg = __import__('tasks', globals(), locals(),
[name], 0)
if hasattr(external_pkg, name):
return getattr(external_pkg, name)
raise ImportError("Could not find task '%s'" % name)
def run_one_task(taskname, **kwargs):
submod = taskname
subtask = 'task'
if '.' in taskname:
(submod, subtask) = taskname.rsplit('.', 1)
# Teuthology configs may refer to modules like ceph_deploy as ceph-deploy
submod = submod.replace('-', '_')
task = import_task(submod)
try:
fn = getattr(task, subtask)
except AttributeError:
log.error("No subtask of %s named %s was found", task, subtask)
raise
return fn(**kwargs)
def run_tasks(tasks, ctx):
stack = []
try:
for taskdict in tasks:
try:
((taskname, config),) = taskdict.iteritems()
except (ValueError, AttributeError):
raise RuntimeError('Invalid task definition: %s' % taskdict)
log.info('Running task %s...', taskname)
manager = run_one_task(taskname, ctx=ctx, config=config)
if hasattr(manager, '__enter__'):
manager.__enter__()
stack.append((taskname, manager))
except BaseException as e:
if isinstance(e, ConnectionLostError):
# Prevent connection issues being flagged as failures
set_status(ctx.summary, 'dead')
else:
# the status may have been set to dead, leave it as-is if so
if not ctx.summary.get('status', '') == 'dead':
set_status(ctx.summary, 'fail')
if 'failure_reason' not in ctx.summary:
ctx.summary['failure_reason'] = str(e)
log.exception('Saw exception from tasks.')
sentry = get_sentry_client()
if sentry:
config = deepcopy(ctx.config)
tags = {
'task': taskname,
'owner': ctx.owner,
}
if 'teuthology_branch' in config:
tags['teuthology_branch'] = config['teuthology_branch']
if 'branch' in config:
tags['branch'] = config['branch']
# Remove ssh keys from reported config
if 'targets' in config:
targets = config['targets']
for host in targets.keys():
targets[host] = '<redacted>'
job_id = ctx.config.get('job_id')
archive_path = ctx.config.get('archive_path')
extra = dict(config=config,
)
if job_id:
extra['logs'] = get_http_log_path(archive_path, job_id)
exc_id = sentry.get_ident(sentry.captureException(
tags=tags,
extra=extra,
))
event_url = "{server}/search?q={id}".format(
server=teuth_config.sentry_server.strip('/'), id=exc_id)
log.exception(" Sentry event: %s" % event_url)
ctx.summary['sentry_event'] = event_url
if ctx.config.get('interactive-on-error'):
ctx.config['interactive-on-error'] = False
from .task import interactive
log.warning('Saw failure during task execution, going into interactive mode...')
interactive.task(ctx=ctx, config=None)
# Throughout teuthology, (x,) = y has been used to assign values
# from yaml files where only one entry of type y is correct. This
# causes failures with 'too many values to unpack.' We want to
# fail as before, but with easier to understand error indicators.
if type(e) == ValueError:
if e.message == 'too many values to unpack':
emsg = 'Possible configuration error in yaml file'
log.error(emsg)
ctx.summary['failure_info'] = emsg
finally:
try:
exc_info = sys.exc_info()
while stack:
taskname, manager = stack.pop()
log.debug('Unwinding manager %s', taskname)
try:
suppress = manager.__exit__(*exc_info)
except Exception as e:
if isinstance(e, ConnectionLostError):
# Prevent connection issues being flagged as failures
set_status(ctx.summary, 'dead')
else:
set_status(ctx.summary, 'fail')
if 'failure_reason' not in ctx.summary:
ctx.summary['failure_reason'] = str(e)
log.exception('Manager failed: %s', taskname)
if exc_info == (None, None, None):
# if first failure is in an __exit__, we don't
# have exc_info set yet
exc_info = sys.exc_info()
if ctx.config.get('interactive-on-error'):
from .task import interactive
log.warning(
'Saw failure during task cleanup, going into interactive mode...')
interactive.task(ctx=ctx, config=None)
else:
if suppress:
sys.exc_clear()
exc_info = (None, None, None)
if exc_info != (None, None, None):
log.debug('Exception was not quenched, exiting: %s: %s',
exc_info[0].__name__, exc_info[1])
raise SystemExit(1)
finally:
# be careful about cyclic references
del exc_info
|
# Generated by Django 3.2.8 on 2021-10-12 04:58
import datetime
from decimal import Decimal
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DoacaoAgendada',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dataAgendamento', models.DateField(auto_now_add=True, db_column='data_agendamento', verbose_name='Data Agendamento')),
('quantidade', models.PositiveSmallIntegerField(db_column='qtde_agendada', default=1, validators=[django.core.validators.MinValueValidator(1)], verbose_name='Quantidade')),
('status', models.CharField(choices=[('PEN', 'Pendente'), ('REC', 'Recebido'), ('CAN', 'Cancelado')], default='PEN', max_length=5, verbose_name='Status')),
('doador', models.ForeignKey(db_column='doador_user_id', on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL, verbose_name='Doador')),
],
),
migrations.CreateModel(
name='GrupoProduto',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('descricao', models.CharField(help_text='Informe a descrição do grupo', max_length=50, verbose_name='Descrição')),
('unidadeEmbalagem', models.CharField(choices=[('KG', 'Quilograma (kg)'), ('GR', 'Grama (g)'), ('MG', 'Miligrama (mg)'), ('LTR', 'Litro (l)'), ('ML', 'Mililitro (ml)'), ('PCT', 'Pacote (pct)'), ('UN', 'Unidade (und)')], db_column='unidade_embalagem', help_text='Informe a unidade de embalagem do grupo', max_length=5, verbose_name='Unidade Embalagem')),
('qtdeNaEmbalagem', models.DecimalField(db_column='qtde_na_embalagem', decimal_places=3, default=1, help_text='Informe a quantidade na embalagem', max_digits=8, validators=[django.core.validators.MinValueValidator(Decimal('0.001000000000000000020816681711721685132943093776702880859375'))], verbose_name='Qtde na Embalagem')),
('unidadesNaCesta', models.PositiveSmallIntegerField(db_column='unidades_na_cesta', default=1, help_text='Informe a quantidade que vai na cesta', validators=[django.core.validators.MinValueValidator(1)], verbose_name='Unidades na Cesta')),
('compoeCesta', models.BooleanField(db_column='compoe_cesta', default=True, verbose_name='Item Compõe a Cesta?')),
('diasValidadeMinima', models.PositiveSmallIntegerField(db_column='dias_validade_minima', default=10, help_text='Informe em dias a validade mínima para cesta', validators=[django.core.validators.MinValueValidator(1)], verbose_name='Validade Mínima')),
],
),
migrations.CreateModel(
name='UnidadeOrganizacao',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=50)),
('ativo', models.BooleanField(default=True)),
('metaQtdeCestas', models.PositiveSmallIntegerField(db_column='meta_qtde_cestas', default=1, verbose_name='Meta Quantidade Cestas')),
('diasEsperaAgendadas', models.PositiveSmallIntegerField(db_column='dias_espera_agendadas', default=1, verbose_name='Agendadas: Dias Espera')),
],
),
migrations.CreateModel(
name='Produto',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('descricao', models.CharField(help_text='Informe a descrição do produto', max_length=50, verbose_name='Descrição')),
('qtdeNaEmbalagem', models.DecimalField(db_column='qtde_na_embalagem', decimal_places=3, default=1, help_text='Informe a quantidade na embalagem', max_digits=8, validators=[django.core.validators.MinValueValidator(Decimal('0.001000000000000000020816681711721685132943093776702880859375'))], verbose_name='Qtde na Embalagem')),
('aceitaDoacao', models.BooleanField(db_column='aceita_doacao', default=True, verbose_name='Aceita Doação?')),
('grupoProduto', models.ForeignKey(db_column='grupo_produto_id', help_text='Selecione o grupo do produto', on_delete=django.db.models.deletion.PROTECT, to='pages.grupoproduto', verbose_name='Grupo')),
('unidadeOrganizacao', models.ForeignKey(db_column='unidade_org_id', default=1, on_delete=django.db.models.deletion.PROTECT, to='pages.unidadeorganizacao', verbose_name='Unidade Organização')),
],
),
migrations.AddField(
model_name='grupoproduto',
name='unidadeOrganizacao',
field=models.ForeignKey(db_column='unidade_org_id', default=1, on_delete=django.db.models.deletion.PROTECT, to='pages.unidadeorganizacao', verbose_name='Unidade Organização'),
),
migrations.CreateModel(
name='DoacaoRecebida',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dataRecebimento', models.DateField(db_column='data_recebimento', default=datetime.date.today, help_text='Informe uma data menor ou igual a data atual', validators=[django.core.validators.MaxValueValidator(limit_value=datetime.date.today)], verbose_name='Data Recebimento')),
('dataBaixa', models.DateField(blank=True, db_column='data_baixa', help_text='Informe uma data menor ou igual a data atual', null=True, validators=[django.core.validators.MaxValueValidator(limit_value=datetime.date.today)], verbose_name='Data da Baixa')),
('quantidade', models.PositiveSmallIntegerField(db_column='qtde_recebida', default=1, help_text='Informe o número de unidades doadas', validators=[django.core.validators.MinValueValidator(1)], verbose_name='Quantidade')),
('dataValidade', models.DateField(db_column='data_validade', help_text='Informe a data de validade do produto', validators=[django.core.validators.MinValueValidator(limit_value=datetime.date.today)], verbose_name='Data Validade')),
('status', models.CharField(choices=[('EST', 'Estoque'), ('ENT', 'Entregue'), ('PER', 'Perda')], default='EST', max_length=5, verbose_name='Status')),
('doacaoViaWeb', models.BooleanField(db_column='doacao_via_web', default=True, verbose_name='Doação via Web?')),
('saldoDeEntrega', models.BooleanField(db_column='saldo_de_entrega', default=False, verbose_name='Saldo de Entrega?')),
('doacaoAgendada', models.ForeignKey(blank=True, db_column='doacao_agendada_id', null=True, on_delete=django.db.models.deletion.PROTECT, to='pages.doacaoagendada', verbose_name='Doação Agendada')),
('doador', models.ForeignKey(blank=True, db_column='doador_user_id', help_text='Selecione o doador. Em branco se não identificado', null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL, verbose_name='Doador')),
('produto', models.ForeignKey(db_column='produto_id', help_text='Selecione o produto que foi doado', on_delete=django.db.models.deletion.PROTECT, to='pages.produto', verbose_name='Produto')),
('unidadeOrganizacao', models.ForeignKey(db_column='unidade_org_id', default=1, on_delete=django.db.models.deletion.PROTECT, to='pages.unidadeorganizacao', verbose_name='Unidade Organização')),
],
),
migrations.AddField(
model_name='doacaoagendada',
name='produto',
field=models.ForeignKey(db_column='produto_id', help_text='Selecione o produto que foi doado', on_delete=django.db.models.deletion.PROTECT, to='pages.produto', verbose_name='Produto'),
),
migrations.AddField(
model_name='doacaoagendada',
name='unidadeOrganizacao',
field=models.ForeignKey(db_column='unidade_org_id', default=1, on_delete=django.db.models.deletion.PROTECT, to='pages.unidadeorganizacao', verbose_name='Unidade Organização'),
),
]
|
import pulp
from LineupGenerator import LineupGenerator
class Nhl(LineupGenerator):
def __init__(self, sport, num_lineups, overlap, player_limit, teams_limit, stack, solver, correlation_file, players_file, defenses_goalies_file, output_file):
super().__init__(sport, num_lineups, overlap, player_limit, teams_limit, stack, solver, correlation_file, players_file, defenses_goalies_file, output_file)
self.salary_cap = 55000
self.header = ['C', 'C', 'W', 'W', 'D', 'D', 'Util', 'Util', 'G']
def generate(self, lineups):
prob = pulp.LpProblem('NHL', pulp.LpMaximize)
players_lineup = [pulp.LpVariable("player_{}".format(i+1), cat="Binary") for i in range(self.num_players)]
goalies_lineup = [pulp.LpVariable("goalie_{}".format(i+1), cat="Binary") for i in range(self.num_goalies)]
# sets player and defense limits for each lineup
prob += (pulp.lpSum(players_lineup[i] for i in range(self.num_players)) == 8)
prob += (pulp.lpSum(goalies_lineup[i] for i in range(self.num_goalies)) == 1)
# sets positional limits for each lineup
prob += (pulp.lpSum(self.positions['C'][i]*players_lineup[i] for i in range(self.num_players)) >= 2)
prob += (pulp.lpSum(self.positions['W'][i]*players_lineup[i] for i in range(self.num_players)) >= 2)
prob += (pulp.lpSum(self.positions['D'][i]*players_lineup[i] for i in range(self.num_players)) == 2)
prob += (pulp.lpSum(self.positions['C'][i]*players_lineup[i] for i in range(self.num_players))
+ pulp.lpSum(self.positions['W'][i]*players_lineup[i] for i in range(self.num_players)) == 6)
# sets max salary
prob += ((pulp.lpSum(self.players.loc[i, 'Salary']*players_lineup[i] for i in range(self.num_players)) +
pulp.lpSum(self.goalies.loc[i, 'Salary']*goalies_lineup[i] for i in range(self.num_goalies))) <= self.salary_cap)
# used_team variable used to keep track of which teams used for each lineup
used_team = [pulp.LpVariable("u{}".format(i+1), cat="Binary") for i in range(self.num_teams)]
used_team_players = [pulp.LpVariable("us{}".format(i+1), cat="Binary") for i in range(self.num_teams)]
for i in range(self.num_teams):
prob += (used_team[i] <= (pulp.lpSum(self.players_teams[k][i]*players_lineup[k] for k in range(self.num_players)) +
pulp.lpSum(self.goalies_teams[k][i]*goalies_lineup[k] for k in range(self.num_goalies))))
prob += (used_team_players[i] <= (pulp.lpSum(self.players_teams[k][i]*players_lineup[k] for k in range(self.num_players))))
# ensures that there are no more than 4 players and goalies from a single team
prob += ((pulp.lpSum(self.players_teams[k][i]*players_lineup[k] for k in range(self.num_players)) +
pulp.lpSum(self.goalies_teams[k][i]*goalies_lineup[k] for k in range(self.num_goalies))) <= 4*used_team[i])
prob += ((pulp.lpSum(self.players_teams[k][i]*players_lineup[k] for k in range(self.num_players))) <= 4*used_team_players[i])
# only X teams can be used when generating a lineup
prob += (pulp.lpSum(used_team_players[i] for i in range(self.num_teams)) == self.teams_limit)
# no goalies against players constraint
for i in range(self.num_goalies):
prob += (6*goalies_lineup[i] + pulp.lpSum(self.goalies_opponents[k][i]*players_lineup[k] for k in range(self.num_players)) <= 6)
if self.stack == "3-2":
# must have at least one complete line in each lineup
line_stack_3 = [pulp.LpVariable("ls3{}".format(i+1), cat="Binary") for i in range(self.num_lines)]
for i in range(self.num_lines):
prob += (3*line_stack_3[i] <= pulp.lpSum(self.team_lines[k][i]*players_lineup[k] for k in range(self.num_players)))
prob += (pulp.lpSum(line_stack_3[i] for i in range(self.num_lines)) >= 1)
# must have at least 2 lines with at least 2 players
line_stack_2 = [pulp.LpVariable("ls2{}".format(i+1), cat="Binary") for i in range(self.num_lines)]
for i in range(self.num_lines):
prob += (2*line_stack_2[i] <= pulp.lpSum(self.team_lines[k][i]*players_lineup[k] for k in range(self.num_players)))
prob += (pulp.lpSum(line_stack_2[i] for i in range(self.num_lines)) >= 2)
elif self.stack == "3-3":
# must have at least 2 complete lines in each lineup
line_stack_3 = [pulp.LpVariable("ls3{}".format(i+1), cat="Binary") for i in range(self.num_lines)]
for i in range(self.num_lines):
prob += (3*line_stack_3[i] <= pulp.lpSum(self.team_lines[k][i]*players_lineup[k] for k in range(self.num_players)))
prob += (pulp.lpSum(line_stack_3[i] for i in range(self.num_lines)) >= 2)
elif self.stack == "2-2-2":
# must have at least 3 lines with at least 2 players
line_stack_2 = [pulp.LpVariable("ls2{}".format(i+1), cat="Binary") for i in range(self.num_lines)]
for i in range(self.num_lines):
prob += (2*line_stack_2[i] <= pulp.lpSum(self.team_lines[k][i]*players_lineup[k] for k in range(self.num_players)))
prob += (pulp.lpSum(line_stack_2[i] for i in range(self.num_lines)) >= 3)
# each new lineup can't have more than the overlap variable number of combinations of players in any previous lineups
for i in range(len(lineups)):
prob += ((pulp.lpSum(lineups[i][k]*players_lineup[k] for k in range(self.num_players)) +
pulp.lpSum(lineups[i][self.num_players+k]*goalies_lineup[k] for k in range(self.num_goalies))) <= self.overlap)
# can't use the same player more times than set by player_limit variable
for i in range(self.num_players):
prob += ((pulp.lpSum(lineups[k][i]*players_lineup[i] for k in range(len(lineups)))) <= self.player_limit)
for i in range(self.num_goalies):
prob += ((pulp.lpSum(lineups[k][self.num_players+i]*goalies_lineup[i] for k in range(len(lineups)))) <= min(self.player_limit, 50))
#add the objective
prob += pulp.lpSum((pulp.lpSum(self.players.loc[i, 'Proj FP']*players_lineup[i] for i in range(self.num_players)) +
pulp.lpSum(self.goalies.loc[i, 'Proj FP']*goalies_lineup[i] for i in range(self.num_goalies))))
#solve the problem
status = prob.solve(self.solver)
#check if the optimizer found an optimal solution
if status != pulp.LpStatusOptimal:
print('Only {} feasible lineups produced'.format(len(lineups)), '\n')
return None
# Puts the output of one lineup into a format that will be used later
lineup_copy = []
for i in range(self.num_players):
if players_lineup[i].varValue == 1:
lineup_copy.append(1)
else:
lineup_copy.append(0)
for i in range(self.num_goalies):
if goalies_lineup[i].varValue == 1:
lineup_copy.append(1)
else:
lineup_copy.append(0)
return lineup_copy
def fill_lineups(self, lineups):
filled_lineups = []
for lineup in lineups:
a_lineup = ["", "", "", "", "", "", "", "", ""]
players_lineup = lineup[:self.num_players]
goalies_lineup = lineup[-1*self.num_goalies:]
total_proj = 0
if self.actuals:
total_actual = 0
for num, player in enumerate(players_lineup):
if player == 1:
if self.positions['C'][num] == 1:
if a_lineup[0] == "":
a_lineup[0] = self.players.loc[num, 'Player Name'] + ',' + self.players.loc[num, 'Team']
elif a_lineup[1] == "":
a_lineup[1] = self.players.loc[num, 'Player Name'] + ',' + self.players.loc[num, 'Team']
elif a_lineup[6] == "":
a_lineup[6] = self.players.loc[num, 'Player Name'] + ',' + self.players.loc[num, 'Team']
elif a_lineup[7] == "":
a_lineup[7] = self.players.loc[num, 'Player Name'] + ',' + self.players.loc[num, 'Team']
elif self.positions['W'][num] == 1:
if a_lineup[2] == "":
a_lineup[2] = self.players.loc[num, 'Player Name'] + ',' + self.players.loc[num, 'Team']
elif a_lineup[3] == "":
a_lineup[3] = self.players.loc[num, 'Player Name'] + ',' + self.players.loc[num, 'Team']
elif a_lineup[6] == "":
a_lineup[6] = self.players.loc[num, 'Player Name'] + ',' + self.players.loc[num, 'Team']
elif a_lineup[7] == "":
a_lineup[7] = self.players.loc[num, 'Player Name'] + ',' + self.players.loc[num, 'Team']
elif self.positions['D'][num] == 1:
if a_lineup[4] == "":
a_lineup[4] = self.players.loc[num, 'Player Name'] + ',' + self.players.loc[num, 'Team']
elif a_lineup[5] == "":
a_lineup[5] = self.players.loc[num, 'Player Name'] + ',' + self.players.loc[num, 'Team']
total_proj += self.players.loc[num, 'Proj FP']
if self.actuals:
total_actual += self.players.loc[num, 'Actual FP']
for num, goalie in enumerate(goalies_lineup):
if goalie == 1:
if a_lineup[8] == "":
a_lineup[8] = self.goalies.loc[num, 'Player Name'] + ',' + self.goalies.loc[num, 'Team']
total_proj += self.goalies.loc[num, 'Proj FP']
if self.actuals:
total_actual += self.goalies.loc[num, 'Actual FP']
a_lineup.append(round(total_proj, 2))
if self.actuals:
a_lineup.append(round(total_actual, 2))
filled_lineups.append(a_lineup)
return filled_lineups
|
from flask import Flask
app = Flask(__name__)
@app.route('/')
def home():
return '<h3> Go to /[number] to see the number squared</h3>'
@app.route('/<int:number>')
def squared(number):
return '<h1>' + str(number*number) + '</h1>'
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
|
from sklearn.base import BaseEstimator
import config
class DistanceModel(BaseEstimator):
""" This models predicts always the class from which the anchor point is the closest to the cursor. """
def __init__(self):
pass
def predict(self, X):
y_pred = []
for x in X:
x_ = list(x[:7])
min_val = min(x_)
min_val_index = x_.index(min_val)
y_pred.append(config.targets[min_val_index])
return y_pred
def fit(self, X, y):
return self |
start = int(input("Enter lower bound = "))
end = int(input("Enter upper bound = "))
for val in range(start,end+1):
if str(val) == str(val)[::-1]:
print(val,end=" ")
print()
|
#!/usr/bin/env python3
"""
2D Controller Class to be used for the CARLA waypoint follower demo.
"""
import cutils
import numpy as np
from matplotlib import pyplot as plt
from NavigationLibrary.controllers.LongitudinalPID import LongitudinalPID
class Controller2D(object):
def __init__(self, waypoints, controller_type="MPC"):
self.vars = cutils.CUtils()
self._lookahead_distance = 3.0
self._lookahead_time = 1.0
self._current_x = 0
self._current_y = 0
self._current_yaw = 0
self._current_speed = 0
self._desired_speed = 0
self._current_frame = 0
self._current_timestamp = 0
self._start_control_loop = False
self._set_throttle = 0
self._set_brake = 0
self._set_steer = 0
self._waypoints = waypoints
self._conv_rad_to_steer = 180.0 / 70.0 / np.pi
self.longitudinal_controller = LongitudinalPID(self._current_speed,
Kp=1.0,
Kd=0.1,
Ki=0.1,
integrator_max=10.0,
integrator_min=-10.0)
if controller_type == "PurePursuit":
from NavigationLibrary.controllers.PurePursuit import PurePursuit
self.lateral_controller = PurePursuit(self._current_x, self._current_y,
self._current_yaw, self._current_speed,
K=1.5)
elif controller_type == "StanleyController":
from NavigationLibrary.controllers.StanleyController import StanleyController
self.lateral_controller = StanleyController(self._current_x, self._current_y,
self._current_yaw, self._current_speed,
K=1.0)
elif controller_type == "MPC":
from NavigationLibrary.controllers.MPC import MPC
Q = np.eye(4)
R = 0.01*np.eye(2)
Qf = 5*np.eye(4)
Rd = np.eye(2)
self.controller = MPC(x=self._current_x, y=self._current_y, yaw=self._current_yaw,
v=self._current_speed, delta=0,
L=2, Q=Q, R=R, Qf=Qf, Rd=Rd,
len_horizon=10)
self.controller_type = controller_type
def update_values(self, x, y, yaw, speed, timestamp, frame):
self._current_x = x
self._current_y = y
self._current_yaw = yaw
self._current_speed = speed
self._current_timestamp = timestamp
self._current_frame = frame
if self._current_frame:
self._start_control_loop = True
def get_lookahead_index(self, lookahead_distance):
min_idx = 0
min_dist = float("inf")
for i in range(len(self._waypoints)):
dist = np.linalg.norm(np.array([
self._waypoints[i][0] - self._current_x,
self._waypoints[i][1] - self._current_y]))
if dist < min_dist:
min_dist = dist
min_idx = i
total_dist = min_dist
lookahead_idx = min_idx
for i in range(min_idx + 1, len(self._waypoints)):
if total_dist >= lookahead_distance:
break
total_dist += np.linalg.norm(np.array([
self._waypoints[i][0] - self._waypoints[i-1][0],
self._waypoints[i][1] - self._waypoints[i-1][1]]))
lookahead_idx = i
return lookahead_idx
def update_desired_speed(self):
min_idx = 0
min_dist = float("inf")
desired_speed = 0
for i in range(len(self._waypoints)):
dist = np.linalg.norm(np.array([
self._waypoints[i][0] - self._current_x,
self._waypoints[i][1] - self._current_y]))
if dist < min_dist:
min_dist = dist
min_idx = i
self._desired_speed = self._waypoints[min_idx][2]
def smooth_yaw(self, yaws):
for i in range(len(yaws) - 1):
dyaw = yaws[i+1] - yaws[i]
while dyaw >= np.pi/2.0:
yaws[i+1] -= 2.0 * np.pi
dyaw = yaws[i+1] - yaws[i]
while dyaw <= -np.pi/2.0:
yaws[i+1] += 2.0 * np.pi
dyaw = yaws[i+1] - yaws[i]
return yaws
def update_waypoints(self, new_waypoints):
self._waypoints = new_waypoints
def get_commands(self):
return self._set_throttle, self._set_steer, self._set_brake
def set_throttle(self, input_throttle):
# Clamp the throttle command to valid bounds
throttle = np.fmax(np.fmin(input_throttle, 1.0), 0.0)
self._set_throttle = throttle
def set_steer(self, input_steer_in_rad):
# Covnert radians to [-1, 1]
input_steer = self._conv_rad_to_steer * input_steer_in_rad
# Clamp the steering command to valid bounds
steer = np.fmax(np.fmin(input_steer, 1.0), -1.0)
self._set_steer = steer
def set_brake(self, input_brake):
# Clamp the steering command to valid bounds
brake = np.fmax(np.fmin(input_brake, 1.0), 0.0)
self._set_brake = brake
def update_controls(self):
######################################################
# RETRIEVE SIMULATOR FEEDBACK
######################################################
x = self._current_x
y = self._current_y
yaw = self._current_yaw
v = self._current_speed
self.update_desired_speed()
v_desired = self._desired_speed
t = self._current_timestamp
waypoints = self._waypoints
throttle_output = 0
steer_output = 0
brake_output = 0
self.vars.create_var('t_prev', 0.0)
# Skip the first frame to store previous values properly
if self._start_control_loop:
dt = t - self.vars.t_prev
throttle_output = self.longitudinal_controller.get_throttle_input(
v, dt, v_desired)
if self.controller_type == "PurePursuit":
lookahead_distance = self._lookahead_distance + self._lookahead_time * v
lookahead_idx = self.get_lookahead_index(lookahead_distance)
target_wp = [self._waypoints[lookahead_idx]
[0], self._waypoints[lookahead_idx][1]]
steer_output = self.lateral_controller.get_steer_input(x, y,
yaw, v,
target_wp)
if self.controller_type == "StanleyController":
wp = np.array(self._waypoints)
steer_output = self.lateral_controller.get_steer_input(x, y, yaw, v,
wp.T)
if self.controller_type == "MPC":
cyaw = [yaw]
cx = []
cy = []
speed_profile = []
for i in range(len(self._waypoints)-1):
cyaw.append(np.arctan2(self._waypoints[i+1][1] - self._waypoints[i][1],
self._waypoints[i+1][0] - self._waypoints[i][0]))
cx.append(self._waypoints[i][0])
cy.append(self._waypoints[i][1])
speed_profile.append(self._waypoints[i][2])
cyaw.append(cyaw[-1])
cx.append(self._waypoints[-1][0])
cy.append(self._waypoints[-1][1])
speed_profile.append(self._waypoints[-1][2])
ck = [0.0] * len(self._waypoints)
cyaw = self.smooth_yaw(cyaw)
del cyaw[0]
plt.figure(0)
plt.cla()
plt.plot(cx, cy, '-c')
acceleration, steer_output, xs, ys, vs, yaws = \
self.controller.get_inputs(x, y, yaw,
v, np.stack((cx, cy, cyaw, ck)),
speed_profile,
0.1)
######################################################
# SET CONTROLS OUTPUT
######################################################
self.set_throttle(throttle_output) # in percent (0 to 1)
self.set_steer(steer_output) # in rad (-1.22 to 1.22)
self.set_brake(brake_output) # in percent (0 to 1)
self.vars.t_prev = t
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("runRivetAnalysis")
process.options = cms.untracked.PSet(
allowUnscheduled = cms.untracked.bool(False)
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(10) )
process.source = cms.Source("PoolSource", fileNames = cms.untracked.vstring(
# compare AOD and MINIAOD
#'/store/mc/RunIISpring16MiniAODv2/GluGluHToZZTo4L_M125_13TeV_powheg2_JHUgenV6_pythia8/MINIAODSIM/PUSpring16RAWAODSIM_reHLT_80X_mcRun2_asymptotic_v14-v1/10000/2C7F3153-393B-E611-9323-0CC47AA98A3A.root'
#'/store/mc/RunIISpring16reHLT80/GluGluHToZZTo4L_M125_13TeV_powheg2_JHUgenV6_pythia8/AODSIM/PUSpring16RAWAODSIM_reHLT_80X_mcRun2_asymptotic_v14-v1/10000/52E144D7-793A-E611-B70F-0025904A8ECC.root',
#'/store/mc/RunIISpring16reHLT80/GluGluHToZZTo4L_M125_13TeV_powheg2_JHUgenV6_pythia8/AODSIM/PUSpring16RAWAODSIM_reHLT_80X_mcRun2_asymptotic_v14-v1/10000/D650466F-A13A-E611-AA11-0CC47A13CD56.root'
# just run some MINIAOD
'/store/mc/RunIIAutumn18MiniAOD/GluGluHToZZTo4L_M125_13TeV_powheg2_JHUGenV7011_pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/80000/6F4F411E-8111-684D-827D-B5962A0CB94F.root',
'/store/mc/RunIIAutumn18MiniAOD/GluGluHToZZTo4L_M125_13TeV_powheg2_JHUGenV7011_pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/80000/49CB36B2-E124-2249-A0F8-CE867CF4F8A6.root',
'/store/mc/RunIIAutumn18MiniAOD/GluGluHToZZTo4L_M125_13TeV_powheg2_JHUGenV7011_pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/80000/79F49EC1-42B4-3349-A268-59510E899BCC.root',
'/store/mc/RunIIAutumn18MiniAOD/GluGluHToZZTo4L_M125_13TeV_powheg2_JHUGenV7011_pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/80000/D65A4D51-2E80-AD41-B50D-E4083BA2A668.root',
),
)
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
process.rivetProducerHTXS = cms.EDProducer('HTXSRivetProducer',
HepMCCollection = cms.InputTag('myGenerator','unsmeared'),
LHERunInfo = cms.InputTag('externalLHEProducer'),
#ProductionMode = cms.string('GGF'),
ProductionMode = cms.string('AUTO'),
)
#MINIAOD
process.mergedGenParticles = cms.EDProducer("MergedGenParticleProducer",
inputPruned = cms.InputTag("prunedGenParticles"),
inputPacked = cms.InputTag("packedGenParticles"),
)
process.myGenerator = cms.EDProducer("GenParticles2HepMCConverter",
genParticles = cms.InputTag("mergedGenParticles"),
genEventInfo = cms.InputTag("generator"),
signalParticlePdgIds = cms.vint32(25), ## for the Higgs analysis
)
process.p = cms.Path(process.mergedGenParticles*process.myGenerator*process.rivetProducerHTXS)
# # AOD
#process.myGenerator = cms.EDProducer("GenParticles2HepMCConverterHTXS",
# genParticles = cms.InputTag("genParticles"),
# genEventInfo = cms.InputTag("generator"),
#)
#process.p = cms.Path(process.myGenerator*process.rivetProducerHTXS)
process.out = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring('drop *','keep *_*_*_runRivetAnalysis','keep *_generator_*_*','keep *_externalLHEProducer_*_*'),
fileName = cms.untracked.string('testHTXSRivet_ggH4l_MINIAOD_100k.root')
)
process.o = cms.EndPath( process.out )
|
import json
from autoprotocol.protocol import Protocol
from autoprotocol.container import WellGroup
from autoprotocol_utilities.resource_helpers import ref_kit_container
p = Protocol()
#wells 1-4: pglo + bacteria + ara + lb amp
#well 5-8: same but no ara (control)
#well 9-12: no bactera, no ara (control)
num_picks = 4
selected_well=2
ara_vol=2.5
grow_plate = p.ref("grow_plate", cont_type="96-flat", storage="cold_4")
p.fluorescence(grow_plate, grow_plate.wells_from(0,num_picks*3),
excitation="483:nanometer", emission="535:nanometer",
dataref="grow_plate")
print json.dumps(p.as_dict(), indent=2)
|
# 生成两端点所连直线上的点的坐标
from skimage.draw import line
import numpy as np
img = np.zeros((10, 10), dtype=np.uint8)
rr, cc = line(1, 1, 8, 8)
img[rr, cc] = 1
print(img)
|
from __future__ import annotations
from typing import TYPE_CHECKING
from flowchem.devices.flowchem_device import FlowchemDevice
from ...components.technical.power import PowerSwitch
if TYPE_CHECKING:
from .bubble_sensor import PhidgetBubbleSensor, PhidgetPowerSource5V
from flowchem.components.sensors.base_sensor import Sensor
class PhidgetBubbleSensorComponent(Sensor):
hw_device: PhidgetBubbleSensor # just for typing
def __init__(self, name: str, hw_device: FlowchemDevice):
"""A generic Syringe pump."""
super().__init__(name, hw_device)
self.add_api_route("/set-data-Interval", self.power_on, methods=["PUT"])
self.add_api_route("/read-voltage", self.read_voltage, methods=["GET"])
self.add_api_route("/acquire-signal", self.acquire_signal, methods=["GET"])
async def power_on(self) -> bool:
self.hw_device.power_on()
return True
async def power_off(self) -> bool:
self.hw_device.power_off()
return True
async def read_voltage(self) -> float:
"""Read from sensor in Volt"""
return self.hw_device.read_voltage()
async def acquire_signal(self) -> float:
"""transform the voltage from sensor to be expressed in percentage(%)"""
return self.hw_device.read_intensity()
async def set_dataInterval(self, datainterval: int) -> bool:
"""set data interval at the range 20-60000 ms"""
self.hw_device.set_dataInterval(datainterval)
return True
class PhidgetBubbleSensorPowerComponent(PowerSwitch):
hw_device: PhidgetPowerSource5V # just for typing
async def power_on(self) -> bool:
self.hw_device.power_on()
return True
async def power_off(self) -> bool:
self.hw_device.power_off()
return True
|
import ConfigParser, os, inspect
class Config:
def __init__(self, section="main"):
self.section = section
self.parser = ConfigParser.ConfigParser()
base_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
local_config_path = os.path.join(base_path,'mount.cfg')
for config_path in [ "/etc/moodledata-fuse/mount.cfg", local_config_path ]:
if os.path.isfile(config_path):
self.parser.readfp(open(config_path))
def __getitem__(self,key):
return self.parser.get(self.section, key)
def __contains__(self,key):
return self.parser.has_option(self.section, key)
|
import cv2
from classes.PlateFinder import PlateFinder
from classes.NeuralNetwork import NeuralNetwork
if __name__ == '__main__':
findPlate = PlateFinder()
# Initialize the Neural Network
model = NeuralNetwork()
cap = cv2.VideoCapture('test/video.MOV')
# while cap.isOpened():
# ret, img = cap.read()
# if ret:
# cv2.imshow('original video', img)
# if cv2.waitKey(25) & 0xFF == ord('q'):
# break
# possible_plates = findPlate.find_possible_plates(img)
# if possible_plates is not None:
# for i, p in enumerate(possible_plates):
# chars_on_plate = findPlate.char_on_plate[i]
# recognized_plate, _ = model.label_image_list(chars_on_plate, imageSizeOuput=128)
# print(recognized_plate)
# cv2.imshow('plate', p)
# if cv2.waitKey(25) & 0xFF == ord('q'):
# break
# else:
# break
# cap.release()
img = cv2.imread('test/test2.jpeg')
possible_plates = findPlate.find_possible_plates(img)
print(possible_plates, 1)
if possible_plates is not None:
for i, p in enumerate(possible_plates):
chars_on_plate = findPlate.char_on_plate[i]
recognized_plate, _ = model.label_image_list(chars_on_plate, imageSizeOuput=128)
# cv2.imshow('plate', recognized_plate)
# cv2.waitKey(0)
cv2.imshow('plate', p)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
clk_src.count = 29
sbclk_src.count = 27
clk_div.count = 0
clk_dfs_mode.count = 1
clk_dll_mode.count = 0
clk_mul.count = 7
clk_shift_stepsize = 8.594e-12
clock_period_external = 2.841441861258077e-09
clock_period_internal = 2.857142857142857e-09
p0_div_1kHz.count = 275
clk_88Hz_div_1kHz.count = 89100
hlc_div = 12
nsl_div = 48
p0fd2.count = 1
p0d2.count = 317
p0_shift.offset = -3.611472605659016e-06
psod3.offset = 7.68e-09
hlcnd.count = 1035645
hlcnd.offset = -0.010752607022907704
hlcad.offset = -0.010752607023
hlctd.offset = 1.1365768090935692e-08
ch1.PP_enabled = True
ch1.input.count = 0
ch1.description = 'X scope trig'
ch1.mnemonic = 'xosct'
ch1.special = ''
ch1.specout.count = 0
ch1.offset_HW = 6.803185418800114e-06
ch1.offset_sign = 1.0
ch1.pulse_length_HW = nan
ch1.offset_PP = nan
ch1.pulse_length_PP = nan
ch1.counter_enabled = 1
ch1.enable.count = 1
ch1.timed = 'probe'
ch1.gated = ''
ch1.override.count = 0
ch1.state.count = 0
ch2.PP_enabled = False
ch2.input.count = 0
ch2.description = 'HLC ext freq'
ch2.mnemonic = 'hlc'
ch2.special = ''
ch2.specout.count = 0
ch2.offset_HW = nan
ch2.offset_sign = 1.0
ch2.pulse_length_HW = nan
ch2.offset_PP = nan
ch2.pulse_length_PP = nan
ch2.counter_enabled = 0
ch2.enable.count = 0
ch2.timed = ''
ch2.gated = ''
ch2.override.count = 0
ch2.state.count = 0
ch3.PP_enabled = False
ch3.input.count = 1
ch3.description = 'HLC enc IN'
ch3.mnemonic = ''
ch3.special = ''
ch3.specout.count = 0
ch3.offset_HW = nan
ch3.offset_sign = 1.0
ch3.pulse_length_HW = nan
ch3.offset_PP = nan
ch3.pulse_length_PP = nan
ch3.counter_enabled = 0
ch3.enable.count = 0
ch3.timed = ''
ch3.gated = ''
ch3.override.count = 0
ch3.state.count = 0
ch4.PP_enabled = False
ch4.input.count = 0
ch4.description = 'HS chop'
ch4.mnemonic = 'hsc'
ch4.special = ''
ch4.specout.count = 0
ch4.offset_HW = -0.0009859719999999999
ch4.offset_sign = -1.0
ch4.pulse_length_HW = nan
ch4.offset_PP = nan
ch4.pulse_length_PP = nan
ch4.counter_enabled = 0
ch4.enable.count = 1
ch4.timed = ''
ch4.gated = ''
ch4.override.count = 0
ch4.state.count = 0
ch5.PP_enabled = False
ch5.input.count = 1
ch5.description = 'HS chop IN'
ch5.mnemonic = ''
ch5.special = ''
ch5.specout.count = 0
ch5.offset_HW = nan
ch5.offset_sign = 1.0
ch5.pulse_length_HW = nan
ch5.offset_PP = nan
ch5.pulse_length_PP = nan
ch5.counter_enabled = 0
ch5.enable.count = 0
ch5.timed = ''
ch5.gated = ''
ch5.override.count = 0
ch5.state.count = 0
ch6.PP_enabled = True
ch6.input.count = 0
ch6.description = 'ms shutter'
ch6.mnemonic = 'ms'
ch6.special = 'ms'
ch6.specout.count = 0
ch6.offset_HW = nan
ch6.offset_sign = 1.0
ch6.pulse_length_HW = nan
ch6.offset_PP = -16.0
ch6.pulse_length_PP = 3.0
ch6.counter_enabled = 0
ch6.enable.count = 0
ch6.timed = 'probe'
ch6.gated = 'probe'
ch6.override.count = 0
ch6.state.count = 0
ch7.PP_enabled = True
ch7.input.count = 0
ch7.description = 'X det trig'
ch7.mnemonic = 'xdet'
ch7.special = ''
ch7.specout.count = 0
ch7.offset_HW = nan
ch7.offset_sign = 1.0
ch7.pulse_length_HW = nan
ch7.offset_PP = -6.0
ch7.pulse_length_PP = 1.0
ch7.counter_enabled = 1
ch7.enable.count = 0
ch7.timed = 'period'
ch7.gated = 'detector'
ch7.override.count = 0
ch7.state.count = 0
ch8.PP_enabled = True
ch8.input.count = 0
ch8.description = 'L cam trig'
ch8.mnemonic = 'lcam'
ch8.special = ''
ch8.specout.count = 0
ch8.offset_HW = 6.822639999999999e-06
ch8.offset_sign = 1.0
ch8.pulse_length_HW = nan
ch8.offset_PP = nan
ch8.pulse_length_PP = nan
ch8.counter_enabled = 0
ch8.enable.count = 0
ch8.timed = 'pump'
ch8.gated = 'pump'
ch8.override.count = 0
ch8.state.count = 0
ch9.PP_enabled = True
ch9.input.count = 0
ch9.description = 'S cam shutter'
ch9.mnemonic = 's1'
ch9.special = ''
ch9.specout.count = 0
ch9.offset_HW = nan
ch9.offset_sign = 1.0
ch9.pulse_length_HW = nan
ch9.offset_PP = -15.0
ch9.pulse_length_PP = 15.0
ch9.counter_enabled = 0
ch9.enable.count = 0
ch9.timed = 'pump'
ch9.gated = 'pump'
ch9.override.count = 0
ch9.state.count = 1
ch10.PP_enabled = True
ch10.input.count = 0
ch10.description = 'S cam LED'
ch10.mnemonic = 'scl'
ch10.special = ''
ch10.specout.count = 0
ch10.offset_HW = nan
ch10.offset_sign = 1.0
ch10.pulse_length_HW = nan
ch10.offset_PP = 0.0
ch10.pulse_length_PP = 72.0
ch10.counter_enabled = 0
ch10.enable.count = 0
ch10.timed = 'period'
ch10.gated = ''
ch10.override.count = 1
ch10.state.count = 0
ch11.PP_enabled = True
ch11.input.count = 0
ch11.description = 'sample trans'
ch11.mnemonic = 'trans'
ch11.special = 'trans'
ch11.specout.count = 0
ch11.offset_HW = nan
ch11.offset_sign = 1.0
ch11.pulse_length_HW = nan
ch11.offset_PP = 0.0
ch11.pulse_length_PP = 3.0
ch11.counter_enabled = 0
ch11.enable.count = 0
ch11.timed = 'period'
ch11.gated = ''
ch11.override.count = 0
ch11.state.count = 0
ch12.PP_enabled = False
ch12.input.count = 0
ch12.description = 'Diagnostics 1'
ch12.mnemonic = ''
ch12.special = ''
ch12.specout.count = 2
ch12.offset_HW = nan
ch12.offset_sign = 1.0
ch12.pulse_length_HW = nan
ch12.offset_PP = nan
ch12.pulse_length_PP = nan
ch12.counter_enabled = 0
ch12.enable.count = 0
ch12.timed = ''
ch12.gated = ''
ch12.override.count = 0
ch12.state.count = 0
ch13.PP_enabled = True
ch13.input.count = 0
ch13.description = 'ps L oscill'
ch13.mnemonic = 'pso'
ch13.special = 'pso'
ch13.specout.count = 1
ch13.offset_HW = nan
ch13.offset_sign = 1.0
ch13.pulse_length_HW = nan
ch13.offset_PP = nan
ch13.pulse_length_PP = nan
ch13.counter_enabled = 0
ch13.enable.count = 0
ch13.timed = 'pump'
ch13.gated = ''
ch13.override.count = 0
ch13.state.count = 0
ch14.PP_enabled = True
ch14.input.count = 0
ch14.description = 'ps L trig'
ch14.mnemonic = 'pst'
ch14.special = ''
ch14.specout.count = 0
ch14.offset_HW = 2.3975699999999997e-06
ch14.offset_sign = 1.0
ch14.pulse_length_HW = nan
ch14.offset_PP = nan
ch14.pulse_length_PP = nan
ch14.counter_enabled = 0
ch14.enable.count = 0
ch14.timed = 'pump'
ch14.gated = 'pump'
ch14.override.count = 0
ch14.state.count = 0
ch15.PP_enabled = True
ch15.input.count = 0
ch15.description = ''
ch15.mnemonic = 'psg'
ch15.special = ''
ch15.specout.count = 0
ch15.offset_HW = nan
ch15.offset_sign = 1.0
ch15.pulse_length_HW = nan
ch15.offset_PP = nan
ch15.pulse_length_PP = nan
ch15.counter_enabled = 0
ch15.enable.count = 0
ch15.timed = 'pump'
ch15.gated = 'pump'
ch15.override.count = 0
ch15.state.count = 0
ch16.PP_enabled = True
ch16.input.count = 0
ch16.description = 'L scope trig'
ch16.mnemonic = 'losct'
ch16.special = ''
ch16.specout.count = 0
ch16.offset_HW = 5.89053e-06
ch16.offset_sign = 1.0
ch16.pulse_length_HW = nan
ch16.offset_PP = nan
ch16.pulse_length_PP = nan
ch16.counter_enabled = 1
ch16.enable.count = 0
ch16.timed = 'pump'
ch16.gated = ''
ch16.override.count = 0
ch16.state.count = 0
ch17.PP_enabled = True
ch17.input.count = 0
ch17.description = 'ns L flash'
ch17.mnemonic = 'nsf'
ch17.special = 'nsf'
ch17.specout.count = 0
ch17.offset_HW = -0.00062272
ch17.offset_sign = 1.0
ch17.pulse_length_HW = nan
ch17.offset_PP = nan
ch17.pulse_length_PP = nan
ch17.counter_enabled = 0
ch17.enable.count = 0
ch17.timed = 'pump'
ch17.gated = ''
ch17.override.count = 0
ch17.state.count = 0
ch18.PP_enabled = True
ch18.input.count = 0
ch18.description = 'ns L Q-sw'
ch18.mnemonic = 'nsq'
ch18.special = ''
ch18.specout.count = 0
ch18.offset_HW = 6.677895511828916e-06
ch18.offset_sign = 1.0
ch18.pulse_length_HW = nan
ch18.offset_PP = nan
ch18.pulse_length_PP = nan
ch18.counter_enabled = 0
ch18.enable.count = 0
ch18.timed = 'pump'
ch18.gated = 'pump'
ch18.override.count = 0
ch18.state.count = 0
ch19.PP_enabled = True
ch19.input.count = 0
ch19.description = 'ns L 2 flash'
ch19.mnemonic = ''
ch19.special = ''
ch19.specout.count = 0
ch19.offset_HW = nan
ch19.offset_sign = 1.0
ch19.pulse_length_HW = nan
ch19.offset_PP = nan
ch19.pulse_length_PP = nan
ch19.counter_enabled = 0
ch19.enable.count = 0
ch19.timed = 'period'
ch19.gated = ''
ch19.override.count = 0
ch19.state.count = 0
ch20.PP_enabled = True
ch20.input.count = 0
ch20.description = 'CW laser'
ch20.mnemonic = 'cwl'
ch20.special = ''
ch20.specout.count = 0
ch20.offset_HW = nan
ch20.offset_sign = 1.0
ch20.pulse_length_HW = nan
ch20.offset_PP = 35.0
ch20.pulse_length_PP = 2.0
ch20.counter_enabled = 0
ch20.enable.count = 0
ch20.timed = 'period'
ch20.gated = ''
ch20.override.count = 0
ch20.state.count = 0
ch21.PP_enabled = True
ch21.input.count = 0
ch21.description = ''
ch21.mnemonic = 's3'
ch21.special = ''
ch21.specout.count = 0
ch21.offset_HW = nan
ch21.offset_sign = 1.0
ch21.pulse_length_HW = nan
ch21.offset_PP = nan
ch21.pulse_length_PP = 2.0
ch21.counter_enabled = 0
ch21.enable.count = 0
ch21.timed = ''
ch21.gated = ''
ch21.override.count = 0
ch21.state.count = 0
ch22.PP_enabled = True
ch22.input.count = 0
ch22.description = ''
ch22.mnemonic = ''
ch22.special = ''
ch22.specout.count = 0
ch22.offset_HW = nan
ch22.offset_sign = 1.0
ch22.pulse_length_HW = nan
ch22.offset_PP = nan
ch22.pulse_length_PP = nan
ch22.counter_enabled = 0
ch22.enable.count = 0
ch22.timed = ''
ch22.gated = ''
ch22.override.count = 0
ch22.state.count = 0
ch23.PP_enabled = True
ch23.input.count = 0
ch23.description = 'S cam trig'
ch23.mnemonic = 'sct'
ch23.special = ''
ch23.specout.count = 0
ch23.offset_HW = nan
ch23.offset_sign = 1.0
ch23.pulse_length_HW = nan
ch23.offset_PP = 0.0
ch23.pulse_length_PP = 1.0
ch23.counter_enabled = 0
ch23.enable.count = 0
ch23.timed = 'period'
ch23.gated = ''
ch23.override.count = 0
ch23.state.count = 0
ch24.PP_enabled = False
ch24.input.count = 0
ch24.description = 'Diagnostics 2'
ch24.mnemonic = ''
ch24.special = ''
ch24.specout.count = 3
ch24.offset_HW = nan
ch24.offset_sign = 1.0
ch24.pulse_length_HW = nan
ch24.offset_PP = nan
ch24.pulse_length_PP = nan
ch24.counter_enabled = 0
ch24.enable.count = 0
ch24.timed = ''
ch24.gated = ''
ch24.override.count = 0
ch24.state.count = 0 |
# pylint # {{{
# vim: tw=100 foldmethod=indent
# pylint: disable=bad-continuation, invalid-name, superfluous-parens
# pylint: disable=bad-whitespace, mixed-indentation
# pylint: disable=redefined-outer-name
# pylint: disable=missing-docstring, trailing-whitespace, trailing-newlines, too-few-public-methods
# pylint: disable=unused-argument
# }}}
import json
import logging
from mqtt_to_influx.config import CONFIG
from mqtt_to_influx.influx_client import influx_client
logger = logging.getLogger(__name__)
class Process_mqtt_message:
def __init__(self, mqtt_client, userdata, msg):
configname = __name__.split('.')[1]
logger.debug(configname)
if int(CONFIG[configname].get('verbose', 0)) > 0:
logger.info("processing: {: <30}{}".format(msg.topic, msg.payload.decode()))
device_name = msg.topic.split("/STATE")[0].lstrip("/").replace("/",".")
# make sure we have a json object
try:
payload_json = json.loads(msg.payload.decode())
except json.decoder.JSONDecodeError:
return None
if int(CONFIG[configname].get('verbose', 0)) > 1:
logger.info ("payload_json: ")
logger.info(json.dumps(payload_json, sort_keys=True, indent=4, separators=(',', ': ')))
try:
for (k,v) in payload_json.items():
# logger.info ("key: %s - value: %s" % (k,v))
payload_json[k]=float(v)
# logger.info ("key: %s - value: %s" % (k,payload_json[k]))
except ValueError as e:
pass
# copy over selected values into new json
try:
new_payload_json = {}
for entry in ("POWER1", "POWER2"):
new_payload_json[entry] = payload_json[entry]
except ValueError as e:
pass
# Create final json_body for writing into influxdb
try:
json_body = [
{ # sensor.4
"measurement": str(device_name),
"fields": new_payload_json
}
]
# logger.info(json.dumps(json_body, sort_keys=True, indent=4, separators=(',', ': ')))
# logger.info(json.dumps(payload_json, sort_keys=True, indent=4, separators=(',', ': ')))
if CONFIG[configname].getboolean('do_write_to_influx'):
influx_client.write_points(json_body)
if int(CONFIG[configname].get('verbose', 0)) > 0:
logger.info ("output json for storage in influx:")
logger.info(json.dumps(json_body, sort_keys=True, indent=4, separators=(',', ': ')))
if int(CONFIG[configname].get('verbose', 0)) > 0:
logger.info("------\n")
except Exception as e:
logger.info (F"{e!r}")
return None
# logger.info(F"{__name__} imported")
|
import random
import sys
import time
from cache import *
from cpath import data_path
from data_generator import tokenizer_wo_tf as tokenization
from misc_lib import TimeEstimator
from job_manager.marked_task_manager import MarkedTaskManager
from tlm.wiki import bert_training_data as btd
working_path ="/mnt/nfs/work3/youngwookim/data/bert_tf"
def parse_wiki(file_path):
f = open(file_path, "r")
documents = []
doc = list()
for line in f:
if line.strip():
doc.append(line)
else:
documents.append(doc)
doc = list()
return documents
class Worker:
def __init__(self, out_path):
vocab_file = os.path.join(data_path, "bert_voca.txt")
self.tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_file, do_lower_case=True)
self.masked_lm_prob = 0.15
self.short_seq_prob = 0.1
self.problem_per_job = 100 * 1000
self.max_seq_length = 512
self.max_predictions_per_seq = 20
self.dupe_factor = 1
self.out_dir = out_path
seed = time.time()
self.rng = random.Random(seed)
print("Loading documents")
self.documents = self.load_documents_from_pickle()
print("Loading documents Done : ", len(self.documents))
def load_documents_from_pickle(self):
seg_id = self.rng.randint(0, 9)
file_path = "/mnt/nfs/work3/youngwookim/data/enwiki4bert/tokens/enwiki_train_tokens.{}"
all_docs = []
for j in range(100):
full_id = seg_id * 100 + j
f = open(file_path.format(full_id), "rb")
all_docs.extend(pickle.load(f))
return all_docs
def load_documents(self):
i = self.rng.randint(0,9)
file_path = "/mnt/nfs/work3/youngwookim/data/enwiki4bert/enwiki_train.txt.line.{}".format(i)
print(file_path)
docs = parse_wiki(file_path)
out_docs = []
# Empty lines are used as document delimiters
ticker = TimeEstimator(len(docs))
for doc in docs:
out_docs.append([])
for line in doc:
line = line.strip()
tokens = self.tokenizer.tokenize(line)
if tokens:
out_docs[-1].append(tokens)
ticker.tick()
assert out_docs[3]
return out_docs
def work(self, job_id):
output_file = os.path.join(self.out_dir, "{}".format(job_id))
instances = btd.create_training_instances(
self.documents, self.tokenizer, self.max_seq_length, self.dupe_factor,
self.short_seq_prob, self.masked_lm_prob, self.max_predictions_per_seq,
self.rng)
btd.write_instance_to_example_files(instances, self.tokenizer, self.max_seq_length,
self.max_predictions_per_seq, [output_file])
def main():
mark_path = os.path.join(working_path, "wiki_p2_mark")
out_path = os.path.join(working_path, "tf")
if not os.path.exists(out_path):
os.mkdir(out_path)
mtm = MarkedTaskManager(100, mark_path, 1)
worker = Worker(out_path)
job_id = mtm.pool_job()
print("Job id : ", job_id)
while job_id is not None:
worker.work(job_id)
job_id = mtm.pool_job()
print("Job id : ", job_id)
def simple():
out_path = os.path.join(working_path, "tf")
worker = Worker(out_path)
worker.work(int(sys.argv[1]))
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('students', '0003_auto_20150324_2059'),
]
operations = [
migrations.CreateModel(
name='Exam',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=256, verbose_name='\u041d\u0430\u0437\u0432\u0430')),
('date', models.DateTimeField(null=True, verbose_name='\u0414\u0430\u0442\u0430 \u0456 \u0447\u0430\u0441 \u043f\u0440\u043e\u0432\u0435\u0434\u0435\u043d\u043d\u044f')),
('teacher', models.CharField(max_length=256, null=True, blank=True)),
('groups', models.ForeignKey(verbose_name=b'\xd0\x93\xd1\x80\xd1\x83\xd0\xbf\xd0\xb0', blank=True, to='students.Group', null=True)),
],
options={
'verbose_name': '\u0415\u043a\u0437\u0430\u043c\u0435\u043d',
'verbose_name_plural': '\u0415\u043a\u0437\u0430\u043c\u0435\u043d\u0438',
},
bases=(models.Model,),
),
]
|
import time
from twilio.rest import Client
from random import *
def sleep():
time.sleep(randint(3, 6))
def text_me(message):
twilio_number = '+19562720613'
jamie_number = '+19568214550'
valeria_number = '+19564370322'
#phone_number = '+1%s' % input('What is your phone number?')
client.messages.create(to=jamie_number,
from_=twilio_number,
body=message)
client = Client('AC190d9ac5ae8e8d522ee14d55704ae686', 'cc9f66925040f499193c5cd92427b1a2') # For Twilio
error = 1
while error == 1:
try:
text_me('Python still working :)')
except Exception as err:
text_me('Python error :(' + repr(err))
error -= 1
time.sleep(3600)
# Comments - |
from common.until import printf, logging_except, logging_sql
from db.DB_Redis import RedisClient
from project.Jijinwang import Spider_basic_list as MS, Spider_List
class Schedule(object):
def __init__(self):
self.main = self.main_spider
self.spider_list = Spider_List
def main_spider(self):
Main = MS()
Main.init()
Main.start()
Main.close()
def start(self):
logging_except(module='init')
logging_sql()
while True:
main_redis = RedisClient("info_basic_info")
if not main_redis.get():
self.main_spider()
main_redis.close()
for cls in self.spider_list:
spider_name = cls.__name__
redis_key = spider_name.replace("Spider", "info")
redis_client = RedisClient(redis_key)
if redis_client.get():
self.work(cls)
redis_client.close()
printf("Wait for all spider end!")
def work(self, spider_cla):
spider = spider_cla()
try:
spider.start()
except Exception as e:
logging_except(e, cls_name=spider_cla.__name__+' '+spider.url)
print(e)
finally:
spider.pop()
spider.close()
if __name__ == "__main__":
program = Schedule()
program.start()
|
#
# Copyright (c) 2017-2023 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import abc
import keyring
import six
from sqlalchemy.orm.exc import NoResultFound
from sysinv.common import constants
from sysinv.common import utils
from sysinv.common import exception
from sysinv.helm import common as helm_common
from sysinv.puppet import quoted_str
@six.add_metaclass(abc.ABCMeta)
class BasePuppet(object):
"""Base class to encapsulate puppet operations for hiera configuration"""
CONFIG_WORKDIR = '/tmp/config'
DEFAULT_REGION_NAME = 'RegionOne'
DEFAULT_SERVICE_PROJECT_NAME = 'services'
DEFAULT_KERNEL_OPTIONS = constants.SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_DEFAULT_OPTS
SYSTEM_CONTROLLER_SERVICES = [
'keystone',
'dcorch'
]
def __init__(self, operator):
self._operator = operator
@property
def dbapi(self):
return self._operator.dbapi
@property
def config_uuid(self):
return self._operator.config_uuid
@property
def context(self):
return self._operator.context
@property
def config(self):
return self._operator.config
def get_static_config(self):
return {}
def get_secure_static_config(self):
return {}
def get_system_config(self):
return {}
def get_secure_system_config(self):
return {}
def get_host_config(self, host):
return {}
def get_host_config_upgrade(self, host):
return {}
@staticmethod
def quoted_str(value):
return quoted_str(value)
@staticmethod
def _generate_random_password(length=16):
return utils.generate_random_password(length=length)
def _get_database_password(self, service):
passwords = self.context.setdefault('_database_passwords', {})
if service not in passwords:
passwords[service] = self._get_keyring_password(service,
'database')
return passwords[service]
def _get_database_username(self, service):
return 'admin-%s' % service
def _get_keyring_password(self, service, user):
password = keyring.get_password(service, user)
if not password:
password = self._generate_random_password()
keyring.set_password(service, user, password)
return password
def _get_system(self):
system = self.context.get('_system', None)
if system is None:
system = self.dbapi.isystem_get_one()
self.context['_system'] = system
return system
def _sdn_enabled(self):
if self.dbapi is None:
return False
system = self._get_system()
return system.capabilities.get('sdn_enabled', False)
def _https_enabled(self):
if self.dbapi is None:
return False
system = self._get_system()
return system.capabilities.get('https_enabled', False)
def _region_config(self):
if self.dbapi is None:
return False
system = self._get_system()
return system.capabilities.get('region_config', False)
def _vswitch_type(self):
if self.dbapi is None:
return False
system = self._get_system()
return system.capabilities.get('vswitch_type', None)
def _distributed_cloud_role(self):
if self.dbapi is None:
return None
system = self._get_system()
return system.distributed_cloud_role
def _region_name(self):
"""Returns the local region name of the system"""
if self.dbapi is None:
return self.DEFAULT_REGION_NAME
system = self._get_system()
return system.region_name
def _get_service_project_name(self):
if self.dbapi is None:
return self.DEFAULT_SERVICE_PROJECT_NAME
system = self._get_system()
return system.service_project_name
def _get_service(self, service_name):
if self.dbapi is None:
return None
try:
service = self.dbapi.service_get(service_name)
except exception.ServiceNotFound:
# service not configured
return None
return service
def _get_shared_services(self):
if self.dbapi is None:
return []
system = self._get_system()
return system.capabilities.get('shared_services', [])
def _get_address_by_name(self, name, networktype):
"""
Retrieve an address entry by name and scoped by network type
"""
addresses = self.context.setdefault('_address_names', {})
address_name = utils.format_address_name(name, networktype)
address = addresses.get(address_name)
if address is None:
address = self.dbapi.address_get_by_name(address_name)
addresses[address_name] = address
return address
def _get_management_address(self):
address = self._get_address_by_name(
constants.CONTROLLER_HOSTNAME, constants.NETWORK_TYPE_MGMT)
return address.address
def _get_pxeboot_address(self):
address = self._get_address_by_name(
constants.CONTROLLER_HOSTNAME, constants.NETWORK_TYPE_PXEBOOT)
return address.address
def _get_oam_address(self):
address = self._get_address_by_name(
constants.CONTROLLER_HOSTNAME, constants.NETWORK_TYPE_OAM)
return address.address
def _get_admin_address(self):
address = self._get_address_by_name(
constants.CONTROLLER_HOSTNAME, constants.NETWORK_TYPE_ADMIN)
return address.address
def _get_cluster_host_address(self):
address = self._get_address_by_name(
constants.CONTROLLER_HOSTNAME, constants.NETWORK_TYPE_CLUSTER_HOST)
return address.address
def _get_cluster_pod_subnet(self):
address = self._get_address_by_name(
constants.CONTROLLER_HOSTNAME, constants.NETWORK_TYPE_CLUSTER_POD)
subnet = address.address + '/' + address.prefix
return subnet
def _get_subcloud_endpoint_address(self):
try:
address = self._format_url_address(self._get_admin_address())
except exception.AddressNotFoundByName:
address = self._format_url_address(self._get_management_address())
pass
return address
def _get_host_cpu_list(self, host, function=None, threads=False):
"""
Retreive a list of CPUs for the host, filtered by function and thread
siblings (if supplied)
"""
cpus = []
for c in self.dbapi.icpu_get_by_ihost(host.id):
if c.thread != 0 and not threads:
continue
if c.allocated_function == function or not function:
cpus.append(c)
return cpus
def _get_vswitch_cpu_list(self, host):
cpus = self._get_host_cpu_list(host, constants.VSWITCH_FUNCTION)
return sorted(cpus, key=lambda c: c.cpu)
def _get_platform_cpu_list(self, host):
cpus = self._get_host_cpu_list(host, constants.PLATFORM_FUNCTION)
return sorted(cpus, key=lambda c: c.cpu)
def _is_all_platform_cpu(self, host):
"""
Check all cpus are for platform
"""
for c in self.dbapi.icpu_get_by_ihost(host.id):
if c.allocated_function != constants.PLATFORM_FUNCTION:
return False
return True
def _get_hyperthreading_enabled(self, host):
"""
Check if the Hyper-Threading feature is enabled on host
"""
return self.dbapi.icpu_is_hyper_threading_enabled(host.id)
def _get_service_parameters(self, service=None):
service_parameters = []
if self.dbapi is None:
return service_parameters
try:
service_parameters = self.dbapi.service_parameter_get_all(
service=service)
# the service parameter has not been added
except NoResultFound:
pass
return service_parameters
def _get_security_feature(self):
if self.dbapi is None:
return self.DEFAULT_KERNEL_OPTIONS
system = self._get_system()
return system.security_feature
@staticmethod
def _service_parameter_lookup_one(service_parameters, section, name,
default):
for param in service_parameters:
if param['section'] == section and param['name'] == name:
return param['value']
return default
def _format_service_parameter(self, service_parameters, section, group, name):
parameter = {}
key = group + name
value = self._service_parameter_lookup_one(service_parameters, section,
name, 'undef')
if value != 'undef':
parameter[key] = value
return parameter
@staticmethod
def _format_url_address(address):
return utils.format_url_address(address)
# TODO (jgauld): Refactor to use utility has_openstack_compute(labels)
def is_openstack_compute(self, host):
if self.dbapi is None:
return False
for obj in self.dbapi.label_get_by_host(host.id):
if helm_common.LABEL_COMPUTE_LABEL == obj.label_key:
return True
return False
|
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib
import pandas as pd
import argparse
def mail_to_list(email_list, info_list, gmail, password, subject, test_only=False):
"""Send an email to everyone in the provided csv."""
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.ehlo()
print("GMAIL : {}, PASSWORD: {}".format(gmail, password))
#server.login(gmail, password)
# Count how many emails have been sent
i = 0
num_users = len(info_list)
for email, info in zip(email_list, info_list):
body = """PUT TEXT OF YOUR EMAIL HERE AND USE .format(info) to insert
email-specific information into the email."""
if test_only: # If you want to do a test run first
print(
"_______________________________________________________________\n"
"TEST ONLY: {}".format(test_only))
print("TESTING: ROW {}".format(i))
print("Email body: {} \n GMAIL (sending from): {} \n"
"Current email: {} \n Info: {} \n Subject: {}".format(
body, gmail, email, info, subject))
print("_______________________________________________________________")
i += 1
else: # Once happy with formatting, send the emails
msg = MIMEMultipart()
msg['From'] = gmail
msg['To'] = email
msg['Subject'] = subject
msg.attach(MIMEText(body, 'plain'))
text = msg.as_string()
server.sendmail(gmail, email, text)
print('Sent email %s out of %s' % ((i + 1), num_users))
i += 1
# Notify user that emails have finished sending
print('Done sending emails.')
def parse_args():
"""Standard command-line argument parser."""
# Create command line args
parser = argparse.ArgumentParser()
parser.add_argument("gmail", help="Your gmail username, including "
"@gmail.com", type=str)
parser.add_argument("gmail_PW", help="Your gmail password.", type=str)
parser.add_argument("subject", help="Email subject.", type=str)
parser.add_argument("file", help="Read from Excel file", type=str)
parser.add_argument("-t", "--test", required=False, action="store_true")
# Parse arguments
results = vars(parser.parse_args())
# Display
print("Your email management selections: \n {}".format(results))
# Get variables
gmail = results["gmail"]
pw = results["gmail_PW"]
subject = results["subject"]
file = results["file"]
test = results["test"]
return gmail, pw, subject, file, test
def main():
# Parse args and read in data, if we have
gmail, pw, subject, file, test = parse_args()
df = pd.read_excel(file) # Read as excel file
# Default form: Spreadsheet headers are 'email' and 'info' (where info can be more than one column)
email_list = df['email'] # Email column
info_list = df['info'] # Info column(s)
mail_to_list(email_list, info_list, gmail, pw, subject, test_only=test)
if __name__ == "__main__":
main()
|
test_data="""
insert into graphs values (1, 1, 0);
insert into nodes values (1, 1, "A");
insert into nodes values (1, 2, "B");
insert into nodes values (1, 3, "C");
insert into nodes values (1, -3, "D");
insert into edges values (1, 2, NULL);
insert into edges values (1, 3, NULL);
insert into edges values (3, -3, NULL);
insert into graphs values (2, 1, 0);
insert into nodes values (2, 4, "A");
insert into nodes values (2, 5, "B");
insert into nodes values (2, 6, "C");
insert into nodes values (2, -6, "E");
insert into edges values (4, 5, NULL);
insert into edges values (4, 6, NULL);
insert into edges values (4, -6, NULL);
insert into nodes values (2, 7, "D");
insert into edges values (6, 7, NULL);
insert into graphs values(3, 1, 1);
insert into nodes values (3, 8, "A");
insert into nodes values (3, 9, "B");
insert into nodes values (3, 10, "X");
insert into edges values (8, 9, NULL);
insert into edges values (8, 10, NULL);
insert into graphs values (4,1, 0);
insert into nodes values (4, 11, "A");
insert into nodes values (4, 12, "X");
insert into nodes values (4, 13, "B");
insert into nodes values (4, 14, "D");
insert into edges values (11, 12, NULL);
insert into edges values (11, 13, NULL);
insert into edges values (12, 14, NULL);
"""
|
import unittest
import pytest
import json
from app.api.v1 import views
from app.api.app import create_app
class BaseTest(unittest.TestCase):
def setUp(self):
self.app = create_app(config_name="testing")
self.client = self.app.test_client
self.client1 = self.app.test_client()
self.add_office = json.dumps({"type": "Government", "name": "Senate"})
self.add_party = json.dumps({
"name": "Chaa chetu",
"hqAddress": "Thika",
"logoUrl": "images"
})
def tearDown(self):
self.app.testing = False
|
from statistics import mean
from math import sin, cos, atan, pi
def add_tuple(t1, t2):
a = t1[0] + t2[0]
b = t1[1] + t2[1]
return (a, b)
def sub_tuple(t1, t2):
a = t1[0] - t2[0]
b = t1[1] - t2[1]
return (a, b)
def taxi_distance(coord1, coord2):
return (abs(coord1[0] - coord2[0]) + abs(coord1[1] - coord2[1]))
def in_masked(check, range_list): # looks for values in a list of discrete values and range pairs
result = False
if check in range_list[2]:
result = True
else:
for pair in range_list[1]:
if pair[0] < pair[1]:
if pair[0] <= check <= pair[1]:
result = True
elif pair[1] > pair[0]: # Pair includes 0
if pair[0] <= check:
result = True
elif pair[1] >= check:
result = True
return result
def add_to_mask(range_list, rad, short_angle):
prev_rad = rad - 1
if prev_rad is not 0:
prev_min_angle = (1/(prev_rad*4))
bracket_high = short_angle + (1/(rad*4))
bracket_low = short_angle - (1/(rad*4))
if bracket_low < 0:
bracket_low += 1
if bracket_high >= 1:
bracket_high -= 1
mask_added = False
# Simple case: we've got a value between two previous values so block them out as a range
if (bracket_high in range_list[2]):
range_list[2].remove(bracket_high)
range_list[1].append((short_angle, bracket_high))
mask_added = True
elif (bracket_low in range_list[2]):
range_list[2].remove(bracket_low)
range_list[1].append((bracket_low, short_angle))
mask_added = True
else: # we don't have matching discrete angles
for range_pair in range_list[1]:
if range_pair[0] < range_pair[1]: # need to check if the range includes zero
if bracket_high > bracket_low: # need to check if the test angle includes zero
# Check if the lower bracket is captured within an existing masked range
if (range_pair[0] <= bracket_low <= range_pair[1]) and (range_pair[1] < short_angle):
range_list[1].remove(range_pair)
range_list[1].append((range_pair[0], short_angle)) # expand this range pair upwards
mask_added = True
# Check if the upper bracket is captured within an existing masked range
elif (range_pair[0] <= bracket_high <= range_pair[1]) and (range_pair[0] > short_angle):
range_list[1].remove(range_pair)
range_list[1].append((short_angle, range_pair[1])) # expand this range pair downwards
mask_added = True
else: # test range includes zero
# range_pair doens't include zero, bracket_low/high does include zero.
# bracket_low is larger than bracket high and short angle in 4th quadrant
if (range_pair[0] <= bracket_low <= range_pair[1]) and (range_pair[1] < short_angle) \
and (short_angle > 0.75):
range_list[1].remove(range_pair)
range_list[1].append((range_pair[0], short_angle)) # expand this range pair upwards
mask_added = True
# bracket_low is larger than bracket high and short angle in 1st quadrant
elif (range_pair[0] <= bracket_low <= range_pair[1]) and (range_pair[1] > short_angle) \
and (range_pair[0] > short_angle) and (short_angle < 0.25):
range_list[1].remove(range_pair)
range_list[1].append((range_pair[0], short_angle)) # expand this range pair upwards
mask_added = True
# bracket_low is larger than bracket high and short angle in 4th quadrant
elif (range_pair[0] <= bracket_high <= range_pair[1]) and (range_pair[0] < short_angle) \
and (short_angle > 0.75):
range_list[1].remove(range_pair)
range_list[1].append((short_angle, range_pair[1])) # expand this range pair downwards
mask_added = True
# bracket_low is larger than bracket high and short angle in 1st quadrant
elif (range_pair[0] <= bracket_high <= range_pair[1]) and (range_pair[0] > short_angle) \
and (range_pair[1] > short_angle) and (short_angle < 0.25):
range_list[1].remove(range_pair)
range_list[1].append((short_angle, range_pair[1])) # expand this range pair downwards
mask_added = True
else: # range pair includes zero (eg from the range from 0.75 to 0.25)
if bracket_high > bracket_low: # need to check if the test angle includes zero
# Check if the lower bracket is captured within an existing masked range
if (0 <= bracket_low <= range_pair[1]) or (range_pair[0] <= bracket_low <= 1) \
and (short_angle > range_pair[1]):
range_list[1].remove(range_pair)
range_list[1].append((range_pair[0], short_angle)) # expand this range pair upwards
mask_added = True
# Check if the upper bracket is captured within an existing masked range
elif (0 <= bracket_high <= range_pair[1]) or (range_pair[0] <= bracket_high <= 1) \
and (short_angle < range_pair[0]):
range_list[1].remove(range_pair)
range_list[1].append((short_angle, range_pair[1])) # expand this range pair downwards
mask_added = True
else: # bracket_pair also contain zero
# range_pair includes zero, bracket_low/high includes zero.
# bracket_low is larger than bracket high and short angle in 1st quadrant
if (range_pair[0] <= bracket_low <= 1) or (0 <= bracket_low <= range_pair[1]) \
and (range_pair[1] < short_angle) and (short_angle < 0.25):
range_list[1].remove(range_pair)
range_list[1].append((range_pair[0], short_angle)) # expand this range pair upwards
mask_added = True
# bracket_low is larger than bracket high and short angle in 4th quadrant
elif (range_pair[0] <= bracket_high <= 1) or (0 <= bracket_high <= range_pair[1]) \
and (range_pair[0] > short_angle) and (short_angle > 0.75):
range_list[1].remove(range_pair)
range_list[1].append((short_angle, range_pair[1])) # expand this range pair downwards
mask_added = True
if not mask_added:
range_list[2].append(short_angle)
else: # this is one of the first blocked angles - a range can't be formed yet
range_list[2].append(short_angle)
if prev_rad is not 0:
prev_min_angle = (1/(prev_rad*4))
# Need to check if this closes out the region
new_pairs = []
for range_pair in range_list[1]:
new_pair_added = False
for range_pair2 in range_list[1]:
# both monotonic contiguous pairs
if range_pair[0] < range_pair[1]:
if range_pair2[0] < range_pair2[1]:
if range_pair2[0] <= range_pair[0] <= range_pair2[1] and range_pair[1] >= range_pair2[1]:
new_pairs.append((range_pair2[0], range_pair[1]))
new_pair_added = True
elif range_pair2[0] <= range_pair[1] <= range_pair2[1] and range_pair[0] <= range_pair2[0]:
new_pairs.append((range_pair[0], range_pair2[1]))
new_pair_added = True
# range_pair monotonic, range_pair2 contains 0
else:
if range_pair2[0] <= range_pair[0] and range_pair[1] >= range_pair2[1]:
new_pairs.append((range_pair2[0], range_pair[1]))
new_pair_added = True
elif range_pair[0] <= range_pair2[1] and range_pair[1] >= range_pair2[1]:
new_pairs.append((range_pair2[0], range_pair[1]))
new_pair_added = True
elif range_pair2[0] <= range_pair[1] and range_pair[0] <= range_pair2[1]:
new_pairs.append((range_pair[0], range_pair2[1]))
new_pair_added = True
elif range_pair[1] <= range_pair2[1] and range_pair[0] <= range_pair2[1]:
new_pairs.append((range_pair[0], range_pair2[1]))
new_pair_added = True
else:
# range_pair contains 0, range_pair2 monotonic
if range_pair2[0] < range_pair2[1]:
if range_pair2[0] <= range_pair[0] <= range_pair2[1] and range_pair[0] <= range_pair2[0]:
new_pairs.append((range_pair2[0], range_pair[1]))
new_pair_added = True
elif range_pair2[0] <= range_pair[1] <= range_pair2[1] and range_pair[0] <= range_pair2[0]:
new_pairs.append((range_pair[0], range_pair2[1]))
new_pair_added = True
# both ranges contain 0
else:
if range_pair2[0] <= range_pair[0] and range_pair[1] >= range_pair2[1]:
new_pairs.append((range_pair2[0], range_pair[1]))
new_pair_added = True
elif range_pair[0] <= range_pair2[1] and range_pair[1] >= range_pair2[1]:
new_pairs.append((range_pair2[0], range_pair[1]))
new_pair_added = True
elif range_pair2[0] <= range_pair[1] and range_pair[0] <= range_pair2[1]:
new_pairs.append((range_pair[0], range_pair2[1]))
new_pair_added = True
elif range_pair[1] <= range_pair2[1] and range_pair[0] <= range_pair2[1]:
new_pairs.append((range_pair[0], range_pair2[1]))
new_pair_added = True
if not new_pair_added:
new_pairs.append(range_pair)
cumulate = 0
for range_pair in range_list[1]:
if range_pair[0] < range_pair[1]:
cumulate += (range_pair[1] - range_pair[0])
else:
cumulate += (1 - range_pair[0]) + range_pair[1]
# if cumulate > 1:
# print(range_pair)
# print('too much')
# exit()
if cumulate > 0.90:
range_list[0] = True
# print('BOUNDED!!!')
# print('pairs: %i' % len(range_list[1]))
# print('disc: %i ' % len(range_list[2]))
# if len(range_list[1]) > 0:
# print(range_list[1])
return range_list
raw = []
with open('Data/input006.txt', 'r') as file:
for line in file:
raw.append(line)
coords = []
for pair in raw:
x, y = pair.split(',')
coords.append((int(x), int(y)))
xmax = max(coords, key=lambda x: x[0])[0]
ymax = max(coords, key=lambda x: x[1])[1]
xmin = min(coords, key=lambda x: x[0])[0]
ymin = min(coords, key=lambda x: x[1])[1]
buf = 50
grid_xmax = xmax - xmin + buf
grid_ymax = ymax - ymin + buf
points_size = {}
norm_points = []
for pair in coords:
normed_point = sub_tuple(pair, (xmin, ymin))
normed_point = add_tuple(normed_point, (buf, buf))
norm_points.append(normed_point)
points_size[normed_point] = [1, [False, [],[]]] # Assuming no duplicates - each space at contains its own origin
# points_size[normed_point][0] is the number of squares/coordinates associated with the region
# points_size[normed_point][1][0] indicates if the region is complete and bounded
# points_size[normed_point][1][1] is a list of range tuples that describe bound angles
# points_size[normed_point][1][2] is a list of discrete angles that have been bound
print('max %i, %i' % (grid_xmax, grid_ymax))
grid = [[None]*(grid_ymax + 1) for i in range(grid_xmax + 1)]
radius = 0
active_regions = True
while active_regions:
if radius % 10 == 0:
print(radius)
radius += 1
active_regions = False
for point in points_size:
if not points_size[point][1][0]:
if radius > 850:
print(point)
if radius > 851:
exit()
active_regions = True
# noinspection PyTypeChecker
grid[point[0]][point[1]] = [point, 0] # This gets done over and over which isn't great, but shouldn't hurt.
d = radius * 4
for r in range(d): # We iterate over points on the Manhatten circle
ratio = r/d
if not in_masked(ratio, points_size[point][1]):
angle = 2 * pi * ratio
x_delta = round(cos(angle) * radius)
y_delta = round(sin(angle) * radius)
check_point = add_tuple(point, (x_delta, y_delta))
if 0 <= check_point[0] < grid_xmax and 0 <= check_point[1] < grid_ymax:
# noinspection PyTypeChecker
if grid[check_point[0]][check_point[1]] is None:
# update grid and points list
grid[check_point[0]][check_point[1]] = [point, radius]
points_size[point][0] += 1
elif grid[check_point[0]][check_point[1]][1] > radius: # probably shouldn't happen...
# remove incorrect point
offending = grid[check_point[0]][check_point[1]][0]
points_size[offending][0] -= 1
# update grid and points list
grid[check_point[0]][check_point[1]] = [point, radius]
points_size[point][0] += 1
elif grid[check_point[0]][check_point[1]][1] == radius: # we've just bumped into another region
# noinspection PyTypeChecker
grid[check_point[0]][check_point[1]] = ['#', 0]
points_size[point][1] = add_to_mask(points_size[point][1], radius, ratio)
elif grid[check_point[0]][check_point[1]][1] < radius: # we've just crossed into another region
points_size[point][1] = add_to_mask(points_size[point][1], radius, ratio)
else: # we've hit a boundary
points_size[point][1] = add_to_mask(points_size[point][1], radius, ratio)
print('complete')
print(max(points_size.items(), key=lambda x: x[1][0]))
t = sorted(points_size.items(), key=lambda x: x[1][0])
maxlist = {}
for row in grid:
for item in row:
if item is not None:
if item[0] in maxlist:
maxlist[item[0]] += 1
else:
maxlist[item[0]] = 1
print(max(maxlist.items(), key=lambda x: x[1]))
a = sorted(maxlist.items(), key=lambda x: x[1])
print(len(a))
for row in a:
print(row)
|
import math
import backtrader as bt
class GoldenCrossStrategy(bt.Strategy):
params = (('fast', 7), ('slow', 21), ('order_percentage', 0.80), ('ticker', 'AAPL'))
def log(self, txt, dt=None):
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def __init__(self):
self.size = 0
self.dataclose = self.datas[0].close
self.fast_moving_average = bt.indicators.SMA(
self.data.close, period=self.params.fast, plotname='7 day moving average'
)
self.slow_moving_average = bt.indicators.SMA(
self.data.close, period=self.params.slow, plotname='21 day moving average'
)
self.crossover = bt.indicators.CrossOver(self.fast_moving_average, self.slow_moving_average)
def next(self):
# print('next')
self.log('Close, %.2f' % self.dataclose[0])
if self.position.size == 0 and self.crossover > 0:
amount_to_invest = (self.params.order_percentage * self.broker.cash)
self.size = math.floor(amount_to_invest / self.data.close)
print('BUY {} shares of {} at {}'.format(self.size, self.params.ticker, self.data.close[0]))
self.buy(size=self.size)
elif self.position.size > 0 > self.crossover:
print('SELL {} shares of {} at {}'.format(self.size, self.params.ticker, self.data.close[0]))
self.close()
|
from typing import List, Optional
from fibonacci.services import (
fibonacci_recursive_with_database,
fibonacci_up_to_index,
fibonacci_up_to_value,
)
def up_to_including_index(n: int) -> List[int]:
""" This function calls the fibonacci_up_to_including_index_database function using the given n (Fibonacci index).
This function is created for code readability.
"""
return fibonacci_up_to_index(n)
def up_to_value(n: int) -> Optional[List[str]]:
""" This function calls the fibonacci_up_to_value_database function using the given n (random integer).
This function is created for code readability.
"""
return fibonacci_up_to_value(n)
def for_index(n: int) -> str:
return str(fibonacci_recursive_with_database(n))
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'login_window.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
import re
from PyQt5 import QtCore, QtGui, QtWidgets
from client import Client
from user import User
from chat_window import Ui_ChatWindow
from chat_window import ChatWindow
import time
class Ui_LoginWindow(object):
def openChatWindow(self):
self.window=ChatWindow(self.main_window,self.client)
self.ui = Ui_ChatWindow(self.client,self.window)
self.ui.setupUi(self.window)
self.main_window.hide()
self.window.show()
def setupUi(self, LoginWindow):
self.main_window=LoginWindow
LoginWindow.setObjectName("LoginWindow")
LoginWindow.resize(425, 320)
LoginWindow.setLayoutDirection(QtCore.Qt.LeftToRight)
self.centralwidget = QtWidgets.QWidget(LoginWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.label_title = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(24)
font.setItalic(True)
self.label_title.setFont(font)
self.label_title.setMouseTracking(False)
self.label_title.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label_title.setAlignment(QtCore.Qt.AlignCenter)
self.label_title.setObjectName("label_title")
self.verticalLayout.addWidget(self.label_title)
self.label_info = QtWidgets.QLabel(self.centralwidget)
self.label_info.setFrameShape(QtWidgets.QFrame.NoFrame)
self.label_info.setAlignment(QtCore.Qt.AlignCenter)
self.label_info.setObjectName("label_info")
self.verticalLayout.addWidget(self.label_info)
self.formLayout = QtWidgets.QFormLayout()
self.formLayout.setObjectName("formLayout")
self.label_serverIP = QtWidgets.QLabel(self.centralwidget)
self.label_serverIP.setObjectName("label_serverIP")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_serverIP)
self.text_ipaddr = QtWidgets.QLineEdit(self.centralwidget)
self.text_ipaddr.setObjectName("text_ipaddr")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.text_ipaddr)
self.label_port = QtWidgets.QLabel(self.centralwidget)
self.label_port.setObjectName("label_port")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_port)
self.text_port = QtWidgets.QLineEdit(self.centralwidget)
self.text_port.setObjectName("text_port")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.text_port)
self.label_username = QtWidgets.QLabel(self.centralwidget)
self.label_username.setObjectName("label_username")
self.formLayout.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.label_username)
self.text_username = QtWidgets.QLineEdit(self.centralwidget)
self.text_username.setObjectName("text_username")
self.formLayout.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.text_username)
self.label_bio = QtWidgets.QLabel(self.centralwidget)
self.label_bio.setObjectName("label_bio")
self.formLayout.setWidget(6, QtWidgets.QFormLayout.LabelRole, self.label_bio)
self.text_bio = QtWidgets.QLineEdit(self.centralwidget)
self.text_bio.setObjectName("text_bio")
self.formLayout.setWidget(6, QtWidgets.QFormLayout.FieldRole, self.text_bio)
self.label_colour=QtWidgets.QLabel(self.centralwidget)
self.label_colour.setObjectName("label_colour")
self.formLayout.setWidget(7,QtWidgets.QFormLayout.LabelRole,self.label_colour)
self.text_colour=QtWidgets.QLineEdit(self.centralwidget)
self.text_colour.setObjectName("text_colour")
self.formLayout.setWidget(7,QtWidgets.QFormLayout.FieldRole,self.text_colour)
self.button_connect = QtWidgets.QPushButton(self.centralwidget)
self.button_connect.setEnabled(True)
self.button_connect.setObjectName("button_connect")
self.button_connect.clicked.connect(self.startConnection)
#Enter button pressed leads to connect
self.text_ipaddr.returnPressed.connect(self.startConnection)
self.text_bio.returnPressed.connect(self.startConnection)
self.text_port.returnPressed.connect(self.startConnection)
self.text_username.returnPressed.connect(self.startConnection)
self.text_colour.returnPressed.connect(self.startConnection)
self.formLayout.setWidget(8, QtWidgets.QFormLayout.SpanningRole, self.button_connect)
self.verticalLayout.addLayout(self.formLayout)
self.label_connection_info = QtWidgets.QLabel(self.centralwidget)
self.label_connection_info.setObjectName("label_connection_info")
self.verticalLayout.addWidget(self.label_connection_info)
LoginWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(LoginWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 425, 22))
self.menubar.setObjectName("menubar")
LoginWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(LoginWindow)
self.statusbar.setObjectName("statusbar")
LoginWindow.setStatusBar(self.statusbar)
self.retranslateUi(LoginWindow)
QtCore.QMetaObject.connectSlotsByName(LoginWindow)
def validateInputs(self):
ipregex = re.compile("^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
if not ipregex.match(self.text_ipaddr.text()):
return (False,"{} is not a valid IPv4 address, try again!".format(self.text_ipaddr.text()))
portNumber = self.text_port.text()
if not portNumber.isnumeric():
return (False,"{} is not an integer port number, try again!".format(portNumber))
usernameregex = re.compile("^\w{4,12}$")
if not usernameregex.match(self.text_username.text()):
return (False,"Your username must be between 4 and 12 characters long with no spaces!")
colourregex=re.compile("^#[A-F0-9]{6}$")
if not colourregex.match(self.text_colour.text()) and self.text_colour.text()!="":
return (False,"Your colour must be in hexadecimal form (i.e #0A1B2C)")
return (True,"OK")
def userAccepted(self):
self.updateStatusLabel('Status:Waiting for input...')
self.enableButton(True)
self.openChatWindow()
def userDenied(self,msg):
self.errBox('Connection error','Rejected from server:{}'.format(msg))
self.enableButton(True)
self.updateStatusLabel('Status:Waiting for input...')
def errBox(self,title,msg):
msgbox = QtWidgets.QMessageBox()
msgbox.setWindowTitle(title)
msgbox.setText(msg)
msgbox.setIcon(QtWidgets.QMessageBox.Critical)
msgbox.exec_()
def updateStatusLabel(self,msg):
self.label_connection_info.setText(msg)
def enableButton(self,status):
self.button_connect.setEnabled(status)
def initClient(self,user,addr,port):
self.client=Client(user,addr,port)
self.client.signal_user_accepted.connect(self.userAccepted)
self.client.signal_user_denied.connect(self.userDenied)
def startConnection(self):
result = self.validateInputs()
if result[0]:
self.button_connect.setEnabled(False)
self.updateStatusLabel("Status:Attempting to connect to server...")
colourregex=re.compile("^#[A-Fa-f0-9]{6}$")
self.user = User(self.text_username.text(),self.text_bio.text())
if colourregex.match(self.text_colour.text()):#If they have actually entered a colour
self.user.colour=self.text_colour.text()
self.initClient(self.user,self.text_ipaddr.text(),int(self.text_port.text()))
res = self.client.connect_to_server()
if res[0]:
#Display new window here now, but just update status for now
self.updateStatusLabel("Status:Connected! Sending user info...")
self.client.send_user_info(self.user)
else:
self.errBox("Connection error",res[1])
self.button_connect.setEnabled(True)
self.updateStatusLabel("Status:Waiting for input...")
else:
self.errBox("Input error!",result[1])
def retranslateUi(self, LoginWindow):
_translate = QtCore.QCoreApplication.translate
LoginWindow.setWindowTitle(_translate("LoginWindow", "PyRC Login"))
self.label_title.setText(_translate("LoginWindow", "PyRC Client"))
self.label_info.setText(_translate("LoginWindow", "A Python chat application made by Charles Hampton-Evans"))
self.label_serverIP.setText(_translate("LoginWindow", "Server IP:"))
self.label_port.setText(_translate("LoginWindow", "Port:"))
self.label_username.setText(_translate("LoginWindow", "Username:"))
self.label_bio.setText(_translate("LoginWindow", "Bio:"))
self.button_connect.setText(_translate("LoginWindow", "Connect"))
self.label_connection_info.setText(_translate("LoginWindow", "Status:Waiting for input..."))
self.label_colour.setText(_translate("LoginWindow","Colour:"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
LoginWindow = QtWidgets.QMainWindow()
ui = Ui_LoginWindow()
ui.setupUi(LoginWindow)
LoginWindow.show()
sys.exit(app.exec_())
|
#Given a 32-bit signed integer, reverse digits of an integer.
# Example 1:
# Input: 123
# Output: 321
class Solution:
def reverse(self, num):
revd = int(str(num)[::-1]) if num>= 0 else -int(str(num)[1:][::-1])
if revd > 2**31 or revd < -2**31:
return 0
else:
return revd
print(reverse("", 213)) |
import pickle as pkl
import numpy as np
import json
from collections import Counter
import csv
import random
import matplotlib.pyplot as plt
# from nltk.twitter import Query, Streamer, Twitter, TweetViewer, TweetWriter, credsfromfile
#vector helpers
#note this code is adapted from Joel Grus's Excellent Data Science from scratch
#https://github.com/joelgrus/data-science-from-scratch
def vector_add(v,w):
return[v_i + w_i for v_i, w_i in zip(v,w)]
def vector_sum(vectors):
result = vectors[0]
for vector in vectors[1:]:
result = vector_add(result, vector)
return result
def vector_subtract(v, w):
return [v_i - w_i for v_i, w_i in zip(v,w)]
def scalar_multiply(c, v):
return[c * v_i for v_i in v]
def vector_mean(vectors):
n = len(vectors)
return scalar_multiply(1/n, vector_sum(vectors))
def dot(v, w):
return sum(v_i * w_i for v_i, w_i in zip(v,w))
def sum_of_squares(v):
return dot(v,v)
def squared_distance(v,w):
return sum_of_squares(vector_subtract(v,w))
class KMeans(object):
"""performs k-means clustering"""
def __init__(self, k):
self.k = k # number of clusters
self.means = None # means of clusters
def classify(self, input):
"""return the index of the cluster closest to the input"""
return min(range(self.k),
key=lambda i: squared_distance(input, self.means[i]))
def train(self, inputs):
self.means = random.sample(inputs, self.k)
assignments = None
while True:
# Find new assignments
new_assignments = list(map(self.classify, inputs))
# If no assignments have changed, we're done.
if assignments == new_assignments:
return
# Otherwise keep the new assignments,
assignments = new_assignments
for i in range(self.k):
i_points = [p for p, a in zip(inputs, assignments) if a == i]
# avoid divide-by-zero if i_points is empty
if i_points:
self.means[i] = vector_mean(i_points)
def squared_clustering_errors(inputs, k):
"""finds the total squared error from k-means clustering the inputs"""
clusterer = KMeans(k)
clusterer.train(inputs)
means = clusterer.means
assignments = list(map(clusterer.classify, inputs))
return sum(squared_distance(input,means[cluster])
for input, cluster in zip(inputs, assignments))
def plot_squared_clustering_errors(inputs):
ks = range(1, 20)
errors = [squared_clustering_errors(inputs, k) for k in ks]
plt.plot(ks, errors)
plt.xticks(ks)
plt.xlabel("k")
plt.ylabel("total squared error")
plt.show()
def rms(data, x_mean, y_mean, count):
"""
Calculates the root mean squared error in the data more here
http://statweb.stanford.edu/~susan/courses/s60/split/node60.html
For now this is the metric we use to calculate clustering, however
I think it will have to be improved to take into account how well the points
cluster around 'hubs'
data: the data array populated with the datum dicts
x_mean: the mean of the x coordinates
y_mean: the mean of the y coordinates
count: number of samples in the data array
"""
s = 0
for d in data:
x_coor = d['coordinates'][0]
y_coor = d['coordinates'][1]
s += (x_coor - x_mean)**2 + (y_coor - y_mean)**2
return (s / count)**0.5
def get_stop_words():
stop_word_set = None
with open('stopwords.csv', 'r') as f:
reader = csv.reader(f)
for row in reader:
stop_word_set = set(row)
if stop_word_set:
return stop_word_set
def to_json(data_array):
# #initialize geo_data json (just a dict here) to feed in to the maps
geo_data = {
"type": "FeatureCollection",
"features": []
}
#populate the json file
for d in data_array:
geo_json_feature = {
"type": "Feature",
"geometry": {"type" : "Point", "coordinates" : d['coordinates']},
"properties": {
"text": d['text'],
"created_at": d['created_at']
}
}
geo_data['features'].append(geo_json_feature)
#write the json out to a file
with open('geo_data.json', 'w') as fout:
fout.write(json.dumps(geo_data, indent=4))
def main():
stop_word_set = get_stop_words()
f = open('/Users/calvin/Documents/Lehigh/English/Research/data/cap1.pkl', 'rb')
unpickler = pkl.Unpickler(f)
data_array = []
count = 0
x_sum = 0
y_sum = 0
#pull out the first 10000 tweets, note this is easy to change, but speed and space
#concerns make this limited. I think that doing a random sample would be better
for x in range(0,100000):
try:
dd = pkl.load(f)
except EOFError:
break
except Exception:
print(count)
count += 1
unpickler.load()
continue
else:
#right now we just take the first coordinate in the bounding box as the actual
#we could average to find the middle, but this seems good enough for now
if dd['coordinates'] == None:
if dd['place'] == None:
continue
dd['coordinates'] = dd['place']['bounding_box']['coordinates'][0][0]
else:
#account for edge case where coordinates are wrapped
dd['coordinates'] = dd['coordinates']['coordinates']
#count how many samples we take
count += 1
# print(dd.keys())
# print(dd)
#sum up the coordinate values
x_sum += dd['coordinates'][0]
y_sum += dd['coordinates'][1]
#append the data point to the data array
data_array.append(dd)
#todo make it average the bounding box
# x1 =
# x2 =
# y1 =
# y2 =
#take the mean of the x coordinates and y coordinates
# x_mean = x_sum / count
# y_mean = y_sum / count
# text_list = []
# print(rms(data_array,x_mean,y_mean,count))
# for d in data_array:
# tok = d['text'].split()
# for w in tok:
# l = w.lower()
# if l in stop_word_set:
# continue
# text_list.append(l)
#
#
# counts = Counter(text_list)
# print(counts.most_common(15))
inputs = []
for d in data_array:
inputs.append(d['coordinates'])
plot_squared_clustering_errors(inputs)
cluster = KMeans(20)
cluster.train(inputs)
print(cluster.means)
#
# cluster_data = []
# for d in data_array:
# if cluster.classify(d['coordinates']) == 0:
# cluster_data.append(d)
# print(len(cluster_data))
#
# to_json(cluster_data)
if __name__ == '__main__':
main()
|
def iterator(): # 迭代器
test = [1, 2, 3, 4]
test = iter(test) # 使test成为迭代器
print(next(test)) # 结果为1
print(next(test)) # 结果为2
class TestIter:
def __init__(self, value):
self._value = value
self._children = []
def __repr__(self): # TestIter的返回值,__str__则为print(TestIter)的返回值;repr面向程序员;str面向用户
return 'test({!r})'.format(self._value) # !r对应非转义字符串
def add_child(self, num):
self._children.append(num)
def __iter__(self):
return iter(self._children) # 返回的必须是可迭代对象
def depth_first(self): # 返回自身本身,并迭代每一个子节点,并通过调用子节点的depth_first方法返回对应元素
yield self
for c in self:
yield from c.depth_first() # yield from可理解为获取c.depth_first()函数中yield的值
'''def __str__(self): # 返回值为string形式
return "value__" + str(self._value)'''
root = TestIter(0)
root.add_child(11) # 迭代器中的返回值为11
root.add_child(TestIter(22))
for ch in root:
print(ch) # ch即为迭代时__repr__中设定的返回值
print(root, '\n') # 结果为value__0
root = TestIter(0)
child1 = TestIter(1)
child2 = TestIter(2)
root.add_child(child1)
child1.add_child(TestIter(3))
child2.add_child(TestIter(4))
root.add_child(child2)
child2.add_child(TestIter(5))
# root(0)下有两个节点child1(1), child(2);child1下有两个节点3, 4;child2下有一个节点5
for ch in root.depth_first():
print(ch)
# 在循环中先迭代到root(0)打印,在depth_first函数的for循环中去获取root下的内容,即child1(1), child(2);同理可继续向下查找
def new_iterator():
def frange(start, end, step): # 自定义的迭代器
x = start
while x < end:
yield x
x += step
for n in frange(0, 4, 0.5):
print(n)
print(list(frange(1, 3, 0.5)))
def back_iter(): # 反向迭代
a = [1, 2, 3, 4]
for i in reversed(a): # reversed的参数必须为列表
print(i)
class Countdown: # 自定义类反向迭代
def __init__(self, start):
self.start = start
def __iter__(self):
n = self.start
while n > 0:
yield n
n -= 1
def __reversed__(self):
n = 1
while n <= self.start:
yield n
n += 1
for rr in reversed(Countdown(5)): # reverse定义与iter相反,因此结果为递增
print(rr)
for rr in Countdown(5): # iter定义为递减因此结果为由大到小
print(rr)
def interaction_iter(): # 与外部数据有交互的类的iter设置
from collections import deque
class LineHistory:
def __init__(self, lines, histlen=3):
self.lines = lines
self.history = deque(maxlen=histlen) # 长度为3的队列
def __iter__(self):
for lineno, line in enumerate(self.lines, 1): # 枚举list中的每一个元素,1表示下标从1开始
self.history.append((lineno, line)) # 将结果以元组的形式存入history
yield line # 迭代下一个line
def clear(self):
self.history.clear()
f = ["I love python", "python is the best language", "python is beautiful", "lisp is too complex",
"let us use python"]
lines = LineHistory(f) # 在iter方法中要使用enumerate(枚举函数,返回字符串中的每一个单词的序号和单词)
for line in lines: # 迭代时会顺次向class中定义好的长度为3的队列中插入新的f中的元素
if 'python' in line: # 当迭代对象lines当前next()返回的对象符合条件,打印history
for lineno, hline in lines.history:
print('{}:{}'.format(lineno, hline), end='\n')
def iter_slice(): # 迭代器切片
import itertools
def count(n):
while 1:
yield n
n += 1
test = count(0)
for x in itertools.islice(test, 10, 20): # 完成10-19的切片
print(x)
text = ["I love python", "python is the best language", "python is beautiful", "lisp is too complex",
"let us use python"]
for line in itertools.dropwhile(lambda line: 'python' in line, text):
# dropwhile函数会从开头跳过符合lambda表达式的元素,从第一个不符合lambda的元素开始迭代,不会跳过除起始段以外符合lambda的元素
print(line) # 结果为lisp is too complex,let us use python
def permutation(): # 排列组合
items1 = ['a', 'b', 'c']
items2 = ('a', 'b', 'c')
items3 = {'a': 1, 'b': 2}
from itertools import permutations, combinations
for x in permutations(items1, 2): # 排列的对象可以是列表,元组和字典,对字典进行排列时只显示key值,第二个参数可以是限定某长度的所有结果
print(x)
for x in combinations(items2, 2): # 组合
print(x)
def serial_num(): # 标序号,追踪
test = ['a', 'b', 'c', 'd']
for idnum, char in enumerate(test, 1): # 参数1意为从1开始标号
print(idnum, char)
def multiple_iter(): # 同时迭代多个迭代器
x = [1, 2, 3, 4, 5, 6]
y = [11, 22, 33, 44, 55]
z = ['a', 'b', 'c', 'd', 'e']
for a, b in zip(x, y): # 用zip完成同时迭代,迭代长度与最短序列保持一致
print(a, b)
from itertools import zip_longest
for a, b in zip_longest(x, y): # 用zip_longest,则迭代长度与最长序列保持一致
print(a, b)
print(dict(zip(y, z))) # 将y, z打包成字典
# zip函数只创建一个迭代器,如果要保存相关数据使用list(zip())
from itertools import chain
for a in chain(x, y): # 将x, y合并成一个集合迭代
print(a)
def pipe_data():
import os
import fnmatch
import re
def gen_find(filepat, top):
for path, dirlist, filelist in os.walk(top): # os.walk从top路径下获取其下所有的文件夹名与文件,返回(root,dirs,files)
for name in fnmatch.filter(filelist, filepat): # fnmatch.filter匹配filelist是否符合filepat的格式
yield os.path.join(path, name) # os.path.join把目录和文件名合成
def gen_opener(filenames): # 打开文件
for filename in filenames:
f = open(filename, 'rt')
yield f
f.close()
def gen_concatenate(iterators):
for it in iterators:
yield from it # 返回一个生成器it
def gen_grep(lines): # 条件筛选
for line in lines:
if 'capture' in line:
yield line
lognames = gen_find('*.txt', 'C:\\Users\\Ronnie Yang\\PycharmProjects\\notes\\python_cookbook')
files = gen_opener(lognames)
lines = gen_concatenate(files)
pylines = gen_grep(lines)
for line in pylines:
print(line)
def yield_from(): # yield from使用,其主要用于在某个生成器中想调用自身或其他生成器时使用
from collections.abc import Iterable
def flatten(items, ignore_types=(str, bytes)):
for x in items:
if isinstance(x, Iterable) and not isinstance(x, ignore_types): # 当x可迭代并且将字符串和字符排除在可迭代对象外
yield from flatten(x) # 迭代x(类似递归)
else:
yield x # 执行下一项
items = [1, 2, [3, 4, ['five', 'six'], 7], 8]
for x in flatten(items):
print(x)
def merge(): # 在长序列中使用很有优势,因为不会直接读取所有的序列
import heapq
a = [1, 3, 7, 8, 13]
b = [2, 6, 10, 11]
for c in heapq.merge(a, b): # a和b必须预先排序
print(c) # 返回值为1,2,3,6,7,8,10,11,13
c = [6, 4, 2]
for c in heapq.merge(a, c):
print(c) # 返回值为1,3,6,4,2,7,8,13
|
from django.contrib import admin
from django.urls import path,include
from . import views
from django.views.generic.base import RedirectView
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.home, name='home'),
path('delete/<int:id>/',views.delete_data , name="deletestudent"),
path('update/<int:id>/',views.update_data , name="updatedata ")
] |
# python has 3 control state ments
# pass: it can be used in conditionaln stmts loos functions
# break: it is exit the loop in a condition when the condition is true
# continue: it is exits the the loop at a condition if the the condition is true and return backs the loop after the condititon is exititue
# collections:-it is derived data types it stores multible values
# it has five collections:-
# 1.list
# 2.tuples
# 3.sets
# 4.Ranges
# 5.Dictionaries
# lists: it can be index,sliced,concatenated,iterator
# it has heterogeneous
# these are order 1 one n values
# lists are mutible datatypes
# it store multible values
# listsyntax
# <listname>=[<ele>,<ele2>,etc]
# stack vs queue
# ->stack is LIFO
# ->queue is FIFO
bal=2000
pin = int(input('Please Enter You 4 Digit Pin: '))
if pin == (1234):
print("1.savings")
print("2.current")
print("3.pin change")
print("4.balance Enq")
print("5.update ur account")
val=int(input("enter correct option:"))
if val==1:
money=int(input("enter amount to withdraw:"))
print("collect your cash:%d"%(money))
elif val==2:
money=int(input("enter current account money to withdraw:"))
print("collect your cash:%d"%(money))
elif val==3:
oldpin=int(input("enter your old pin:"))
newpin=int(input("enter your new pin:"))
conpin=int(input("Re-enter your new pin:"))
print("your pin is change")
elif val==4:
print("your balance is: %d"%(bal))
elif val==5:
print("1.update ur pin")
print("2.update ur contact")
print("3.update ur profile")
else:
print("enter valid pin")
|
from django.contrib.auth.backends import ModelBackend
import re
from .models import User
def get_user_by_accoutn(account):
try:
if re.match('^1[3-9]\d{9}$', account):
# 手机号登录
user = User.objects.get(mobile=account)
else:
# 用户名登录
user = User.objects.get(username=account)
except User.DoesNotExist:
return None
else:
return user
class UsernameMobileAuthBackend(ModelBackend):
'''自定义用户认证后端'''
def authenticate(self ,request, username=None, password=None,**kwargs ):
'''重写认证方法,实现多账号登录'''
# 根据传入的username获取usr对象,usernam可以是手机号也可以是账号
user = get_user_by_accoutn(username)
# 校验user是否存在并校验密码是否正确
if user and user.check_password(password):
return user |
import sonnet as snt
import tensorflow as tf
a = snt._resampler(tf.constant([0.]),tf.constant([0.]))
print(a) |
import re
import numpy as np
import pandas as pd
import operator
# This is used in able to coalesce all the listings that
# are not in a cluster, into one.
# It does this by going through all clusters, then
# finding out
combined_listings = pd.read_csv("current_target_out.csv")
df = pd.DataFrame(columns=['cluster_id', 'cluster_name', 'title', 'total_price', 'condition', 'details', 'num_images', 'target'])
for index, row in combined_listings.iterrows():
current_cluster_id = row.iloc[0];
current_title = row.iloc[2];
current_total_price = row.iloc[3];
current_cond = row.iloc[4]
current_details = row.iloc[5]
current_num_images = row.iloc[6];
current_target = row.iloc[7]
current_cluster_name = row.iloc[11]
new_series = [current_cluster_id, current_cluster_name, current_title, current_total_price, current_cond, current_details,current_num_images,current_target]
#print current_cluster_id
df.loc[len(df) + 1] = new_series
df.sort(columns="cluster_id", inplace=True)
# find the min and max cluster values
#min_value = df.iloc[1, 0];
max_value = df.iloc[len(df) - 1, 0];
#print df
#print min_value;
#print max_value;
max_cluster_val = -1;
best_cluster_accuracy = 0;
best_cluster_name = "";
best_cluster_id = 0;
running_count = 0;
for i in range(max_value + 1):
current_cluster = df[df["cluster_id"] == i]
running_count += len(current_cluster);
if len(current_cluster) == 1:
if (max_cluster_val == -1):
max_cluster_val = i - 1;
# strip away extraneous characters, and special characters
# and sega saturn
current_name = current_cluster.iloc[0,2];
current_name = current_name.lower()
current_name = re.sub(r'[^a-zA-Z0-9_\s]', ' ', current_name)
current_name = re.sub(r'complete', ' ', current_name);
current_name = re.sub(r'cib', ' ', current_name);
current_name = re.sub(r'nib', ' ', current_name);
current_name = re.sub(r'complete\s+in\+box', ' ', current_name);
#print current_name
current_name = re.sub(r'sega saturn', ' ', current_name)
current_name = re.sub(r'manual', ' ', current_name);
current_name = re.sub(r'original case', ' ', current_name);
current_name = re.sub(r'amp', ' ', current_name);
current_name = re.sub(r'manual', ' ', current_name);
current_name = re.sub(r'excellent condition', ' ', current_name)
current_name = re.sub(r'free.*shipping', ' ', current_name)
current_name = re.sub(r'in great condition', ' ', current_name)
current_name = re.sub(r'\s\s', ' ', current_name);
current_name = re.sub(r'^\s', '', current_name);
current_name = re.sub(r'game', ' ', current_name)
current_name = re.sub(r'disc only', ' ', current_name)
current_name = re.sub(r'disk only', ' ', current_name)
current_name = re.sub(r'box', ' ', current_name);
current_name = re.sub(r'near mint', ' ', current_name)
current_name = re.sub(r'rare', ' ', current_name)
current_name = re.sub(r'\s+for\s+', ' ', current_name)
current_name = current_name.split();
current_name = set(current_name);
for j in range(max_cluster_val + 1):
clusters = df[df["cluster_id"] == j]
#if i == 18:
cluster_name = clusters.iloc[0,1];
cluster_name = re.sub(r'sega saturn', ' ', cluster_name)
cluster_name = cluster_name.split();
cluster_name = set(cluster_name);
union_set = cluster_name.union(current_name);
intersection_set = cluster_name.intersection(current_name);
current_percent = float(len(intersection_set))/float(len(union_set));
# Use the Jaccard distance
if (current_percent > best_cluster_accuracy):
best_cluster_accuracy = current_percent;
best_cluster_name = cluster_name;
best_cluster_id = j;
df.iloc[running_count - 1, 0] = best_cluster_id;
best_cluster_accuracy = 0;
best_cluster_name = "";
best_cluster_id = 0;
df.sort(columns="cluster_id", inplace=True)
df.to_csv("current_cluster_fix.csv") |
from rest_framework import serializers
from person.models import Permission
class PermissionSerializer(serializers.ModelSerializer):
object_id = serializers.SerializerMethodField()
def get_object_id(self, obj):
"""
Delete course run
:param obj:
:return:
"""
return obj.prepare_object_id()
class Meta:
model = Permission
exclude = ("id", "user")
|
# Generated by Django 3.0.3 on 2020-02-11 20:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('runner', '0009_auto_20200211_2104'),
]
operations = [
migrations.RenameField(
model_name='viewable',
old_name='enables',
new_name='enabled',
),
migrations.AlterField(
model_name='screen',
name='screen_pin',
field=models.IntegerField(default=2257),
),
]
|
# -*- coding: utf-8 -*-
import tensorflow as tf
from datetime import datetime as dt
# Key of the flags to ingore
ignore_keys = set(["h", "help", "helpfull", "helpshort"])
def get_options():
tf.app.flags.DEFINE_string("save_dir", "saved",
"checkpoints,log,options save directory")
tf.app.flags.DEFINE_float("learning_rate", 1e-4, "learning rate")
tf.app.flags.DEFINE_integer("steps", 10 * (10**5), "training steps")
tf.app.flags.DEFINE_integer("save_interval", 5000, "saving interval")
tf.app.flags.DEFINE_integer("test_interval", 10000, "test interval")
tf.app.flags.DEFINE_integer("batch_size", 10, "batch size")
tf.app.flags.DEFINE_boolean("training", True, "whether to train or not")
tf.app.flags.DEFINE_string("desc", "normal experiment", "experiment description")
return tf.app.flags.FLAGS
def save_flags(flags):
dic = flags.__flags
lines = []
# Record current time
time_str = dt.now().strftime('# %Y-%m-%d %H:%M')
lines.append(time_str + "\n")
for key in sorted(dic.keys()):
if key in ignore_keys:
# Keys like "helpfull" are ignored
continue
value = dic[key].value
line = "{}={}".format(key, value)
lines.append(line + "\n")
file_name = flags.save_dir + "/options.txt"
f = open(file_name, "w")
f.writelines(lines)
f.close()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [('auvsi_suas', '0001_initial'), ]
operations = [
migrations.RemoveField(model_name='missionconfig',
name='ir_target_pos', ),
migrations.AddField(
model_name='missionconfig',
name='ir_primary_target_pos',
field=models.ForeignKey(related_name=
'missionconfig_ir_primary_target_pos',
to='auvsi_suas.GpsPosition'),
preserve_default=False, ),
migrations.AddField(
model_name='missionconfig',
name='ir_secondary_target_pos',
field=models.ForeignKey(related_name=
'missionconfig_ir_secondary_target_pos',
to='auvsi_suas.GpsPosition'),
preserve_default=False, ),
]
|
import mnist_input
# import mnist_model
# reload(mnist_model)
import numpy as np
import tensorflow as tf
mnist = mnist_input.read_data_sets('MNIST_data', one_hot=True)
import mnist_noise_model
from importlib import reload
reload(mnist_noise_model)
# checkpoint_path = "save_models/baseline_40epochs.ckpt"
checkpoint_path = "save_models/robust_model/"
x, y_ = mnist_noise_model.place_holders()
y_conv, keep_prob, variable_dict = mnist_noise_model.model(x)
################# Sess ##############################
cross_entropy = -tf.reduce_sum(y_*tf.log(tf.clip_by_value(y_conv,1e-10,1.0))) #avoid 0*log(0) error
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# saver = tf.train.Saver(variable_dict)
sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())
absolute_sums = []
for variable in variable_dict.values():
absolute_sums.append(tf.reduce_sum(tf.abs(variable)))
################# Validation Accuracy ##############################
def print_test_accuracy(test_acc_list):
idx = 0
batch_size = 500
num_correct = 0
while(idx < len(mnist.test.images)):
num_correct += np.sum(correct_prediction.eval(feed_dict = {
x: mnist.test.images[idx:idx+batch_size],
y_: mnist.test.labels[idx:idx+batch_size], keep_prob: 1.0
}))
idx+=batch_size
test_acc = float(num_correct)/float(len(mnist.test.images))
test_acc_list.append(test_acc)
print ("test accuracy: %f" %(test_acc))
################# Training ##############################
test_acc_list = []
train_acc_list = []
# for i in range(70000):
for i in range(7000):
batch = mnist.train.next_batch(128)
# if i%1000 == 0: #every 1000 batches we save the model and output the train accuracy
if i%100 == 0:
print_test_accuracy(test_acc_list)
saver.save(sess, checkpoint_path)
train_accuracy = accuracy.eval(feed_dict={
x:batch[0], y_: batch[1], keep_prob: 1.0})
train_acc_list.append(train_accuracy)
print("step %d, training accuracy %g"%(i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
################# Save Model ##############################
import pickle
with open('test_acc_list.p', 'wb') as fp:
pickle.dump(test_acc_list, fp)
with open('test_acc_list.p', 'wb') as fp:
pickle.dump(test_acc_list, fp)
|
import numpy as np
import cPickle as pickle
from scipy import ndimage
age = 1E8 # --- duration of constant SF / yr
n = 100000 # --- number of star particles
Z = 0.001 # --- metallicity
ages = age*np.random.random(n) # --- assume ages are uniformly spread over the age of the galaxy
metallicities = Z*np.ones(n) # --- assume they all have the same metallicity
mass = age/float(n) # --- mass of each star particle to give SFR = 1 Msol/yr
grid = 'BPASSv2.1.binary_ModSalpeter_300'
# grid = 'BPASSv2.1.binary_ModSalpeter_100'
# grid = 'P2_ModSalpeter_100'
f = 'FAKE.FAKE.1500'
print '--------',f
L_grid = pickle.load(open('../'+grid+'/FILTERS/'+f+'.p','r')) # --- open grid
L = {}
for t in ['total', 'stellar', 'nebular', 'nebular_continuum']: L[t] = 0.0
for age, metallicity in zip(ages, metallicities):
p = {'log10age': np.log10(age), 'log10Z': np.log10(metallicity)}
params = [[np.interp(p[parameter], L_grid[parameter], range(len(L_grid[parameter])))] for parameter in ['log10age','log10Z']] # used in interpolation
for t in ['total', 'stellar', 'nebular', 'nebular_continuum']: L[t] += mass * ndimage.map_coordinates(L_grid[t], params, order=1)[0] # interpolate grid
for t in ['total', 'stellar', 'nebular', 'nebular_continuum']: print t, L[t] # --- print luminosity from each component
print '-----'
print 'ratio of nebular to total:', L['nebular']/L['total'] # --- print ratio of nebular to total
# --- determine the approximate SFR calibration for the FUV. Based on Kennicutt & Evans 2012 this should be around 10^43.35 and you'd expect a value around this for 100 Myr constant SF.
print 'FUV SFR calibration:', np.log10(L['total']*3E8/(1500E-10)) |
from typing import Set, List
import numpy as np
import pandas
from Siamese.data_types import PatchDesc, TupleInt
from Siamese.feature_processing import EnsembleFeatureMetadata
def compute_query_ranking_metrics(supportFeatureNames: Set[str], patchesSorted: List[PatchDesc],
metadataFiltered: EnsembleFeatureMetadata, patchShape: TupleInt,
precisionRanks: List[int]):
# Computing feature overlap.
featureCoverageExact, patchesForCoverageExact = metadataFiltered.get_total_feature_coverage(
supportFeatureNames, patchShape, patchesSorted, isExactNotPartial=True
)
featureCoveragePartial, patchesForCoveragePartial = metadataFiltered.get_total_feature_coverage(
supportFeatureNames, patchShape, patchesSorted, isExactNotPartial=False
)
# For all target patches (ranked matches), check if it matches the feature from the support set.
matches = []
for patch in patchesSorted:
targetFeatureNames = metadataFiltered.get_patch_features(patch, patchShape)
if targetFeatureNames == supportFeatureNames:
matches.append('exact')
elif len(targetFeatureNames & supportFeatureNames) > 0:
matches.append('partial')
else:
matches.append('false')
# Compute the precision and the recall at various ranking cutoff values (top 10, top 50, etc.).
exactPrecisions, exactRecalls = [], []
partialPrecisions, partialRecalls = [], []
def _compute_precisions(rank):
# This can happen when we compute precision-at-coverage, with no exact coverage possible.
if rank == 0:
return 0, 0
countExact = sum(1 for match in matches[:rank] if match == 'exact')
countPartial = sum(1 for match in matches[:rank] if match in ['exact', 'partial'])
exact = countExact / rank * 100
partial = countPartial / rank * 100
return exact, partial
for precisionRank in precisionRanks:
pExact, pPartial = _compute_precisions(precisionRank)
exactPrecisions.append(pExact)
partialPrecisions.append(pPartial)
precisionAtCoverageExact, _ = _compute_precisions(patchesForCoverageExact)
_, precisionAtCoveragePartial = _compute_precisions(patchesForCoveragePartial)
return {
'CnE': patchesForCoverageExact,
'CnP': patchesForCoveragePartial,
'C%E': featureCoverageExact,
'C%P': featureCoveragePartial,
'P@CnE': precisionAtCoverageExact,
'P@CnP': precisionAtCoveragePartial,
**dict(zip(['P@{}E'.format(p) for p in precisionRanks], exactPrecisions)),
**dict(zip(['P@{}P'.format(p) for p in precisionRanks], partialPrecisions)),
}
def aggregate_metrics_table(tableGrouped):
aggregatedRows = []
for keys, group in tableGrouped: # type: pandas.DataFrame
textFields = group.select_dtypes(exclude=np.number).iloc[0] # These are the same for all rows. # todo check?
dataToAggregate = group.select_dtypes(include=np.number)
means = dataToAggregate.mean().rename(lambda col: col + '-m')
stds = dataToAggregate.std().rename(lambda col: col + '-s')
row = pandas.concat((textFields, means, stds))
aggregatedRows.append(row)
aggregatedTable = pandas.DataFrame(aggregatedRows)
return aggregatedTable
|
## Two trees are considered "leaf-similar" if the order of their leaves is the same from left to right.
## This function uses DFS to check if two given trees are "leaf-similar".
class Node:
def __init__(self, val, visited):
self.val = val
self.visited = visited
self.left = None
self.right = None
# global variables "leaves" is used since leafSimilar is recursive.
leaves = []
# This uses DFS instead of BFS since we're looking to match the leaves
# in exact left to right order.
def leafSimilar(root):
global leaves
if root == None:
return ""
stack = [root]
root.visited = True
if root.left == None and root.right == None:
leaves.append(root.val)
else:
if root.left != None:
stack.append(root.left)
if root.right != None:
stack.append(root.right)
for node in stack:
if node.visited == False:
node.visited = True
leafSimilar(node)
if __name__ == "__main__":
# 3
# 5 1
# 6 2
# leaf order: [6, 2, 1]
rootOne = Node(3, False)
rootOne.left = Node(5, False)
rootOne.right = Node(1, False)
rootOne.left.left = Node(6, False)
rootOne.left.right = Node(2, False)
leafSimilar(rootOne)
leafOne = leaves
leaves = []
# 7
# 2 1
# 6 2
# leaf order: [6, 2, 1]
rootTwo = Node(7, False)
rootTwo.left = Node(2, False)
rootTwo.right = Node(1, False)
rootTwo.left.left = Node(6, False)
rootTwo.left.right = Node(2, False)
leafSimilar(rootTwo)
leafTwo = leaves
leaves = []
# Ternary operator: (false, true)[test]
print(("False", "True")[leafOne == leafTwo])
print(leafOne, leafTwo) |
import pickle
import sys
from src.configuration import Configuration
def pack_args(population, server_id, config: Configuration):
""" Compiles a list of arguments for parallel training """
config_str = pickle.dumps(config)
# Each server gets a portion of the jobs:
server_job_args = [[] for _ in range(len(config.servers[server_id].devices))]
# Calculating size of each job:
sized = []
for ind in population:
epochs = config.training.epochs \
if config.training.fixed_epochs \
else int(ind.number_of_operations() * config.training.epochs)
sized += [epochs if epochs > 0 else 1]
total_epochs = sum(sized)
# Create balanced workloads for each process by estimate:
for i in range(len(population)):
dev_id = i % len(config.servers[server_id].devices)
job_id = sized.index(max(sized))
epochs = int(sized[job_id])
# Compiling list of arguments:
server_job_args[dev_id] += [(
pickle.dumps(population[job_id]),
config_str,
epochs,
server_id,
dev_id,
job_id
)]
sized[job_id] = - sys.maxsize
return server_job_args, total_epochs
|
a = [10,9,8,7,6,5,4,3,2,1]
def merge(l, m, r):
global a
x = a[l:m+1]
y = a[m+1:r+1]
i = l
while x and y:
if x[0] < y[0]:
a[i] = x.pop(0)
else:
a[i] = y.pop(0)
i += 1
while x:
a[i] = x.pop(0)
i += 1
while y:
a[i] = y.pop(0)
i += 1
print a
def merge_sort(l, r):
global a
if l >= r:
return
m = (l+r)/2
merge_sort(l, m)
merge_sort(m+1, r)
merge(l, m, r)
print a
merge_sort(0, len(a)-1)
|
# File Name: dog_cat.py
# 此类是狗的集合,此类的实例是具体的某一只狗
class Dog(object):
def __init__(self, name):
# Python 的私有属性用一个或两个下划线开头表示
# 一个下划线开头的私有属性表示外部调用者不应该直接调用这个属性,但还是可以调用
# 两个下划线外部就不能直接调用了,但也有办法
self._name = name
# 私有属性 _name 不可以被直接调用
# 因此需要定义两个方法来修改和获取该属性值
# get_name 用来获取属性值,set_name 用来修改属性值
def get_name(self):
return self._name
def set_name(self, value):
self._name = value
# 该方法用来打印动物的叫声
def say(self):
# 这里 self._name 就是直接调用私有属性
# 在类的内部可以这样写,在类的外部不可以哦
print(self._name + 'is making sound wang wang wang...')
# 此类是猫的集合,此类的实例是具体的某一只猫
class Cat(object):
def __init__(self, name):
self._name = name
def get_name(self):
return self._name
def set_name(self, value):
self._name = value
def say(self):
print(self._name + 'is making sound miu miu miu...') |
from scipy import integrate, interpolate, fftpack
import numpy as np
import matplotlib.pyplot as plt
import pandas
import csv
class YAGTS_DataBrowser:
def __init__(self, date, shotNo, shotSt):
self.date = date
self.shotNo = shotNo
self.shotSt = shotSt
#self.filepath = '/Volumes/share/DPO4054B/' + str(self.date) + '/tek' + str(self.shotNo-self.shotSt).zfill(4) + 'ALL.csv'
self.filepath = '/Volumes/share/DPO4054B/tek' + str(self.shotNo-self.shotSt).zfill(4) + 'ALL.csv'
def open_with_pandas(self):
df = pandas.read_csv(self.filepath)
header = df.columns.values.tolist()
data = df.values
return df, header, data
def open_with_numpy(self):
data = np.loadtxt(self.filepath, delimiter=',', skiprows=18)
return data
def show_graph(self, isIntegrate=False):
data = self.open_with_numpy()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(1e9*data[:, 0], 5.0e-2*(data[:, 1]-np.mean(data[:4000,1])), label='CH1')
ax.plot(1e9*data[:, 0], data[:, 2]-np.mean(data[:4000,2]), label='CH2')
ax.plot(1e9*data[:, 0], data[:, 3]-np.mean(data[:4000,3]), label='CH3')
ax.plot(1e9*data[:, 0], data[:, 4]-np.mean(data[:4000,4]), label='CH4')
ax.legend(loc='lower right')
max_ch1 = np.max(data[:, 1]-np.mean(data[:4000, 1]))*1.0e3
min_ch2 = np.min(data[:, 2]-np.mean(data[:4000, 2]))*1.0e3
min_ch3 = np.min(data[:, 3]-np.mean(data[:4000, 3]))*1.0e3
min_ch4 = np.min(data[:, 4]-np.mean(data[:4000, 4]))*1.0e3
ax.text(-1500, 5.0e-5*max_ch1, 'CH1: %.3f mV' % (max_ch1))
ax.text(-1500, 1.0e-3*min_ch2, 'CH2: %.3f mV' % min_ch2)
ax.text(-1500, 1.0e-3*min_ch3, 'CH3: %.3f mV' % min_ch3)
ax.text(-1500, 1.0e-3*min_ch4, 'CH4: %.3f mV' % min_ch4)
ax.text(0.75, 0.45, 'CH2/CH3: %.3f' % (min_ch2/min_ch3), transform=ax.transAxes)
ax.text(0.75, 0.4, 'CH2/CH4: %.3f' % (min_ch2/min_ch4), transform=ax.transAxes)
ax.text(0.75, 0.35, 'CH3/CH4: %.3f' % (min_ch3/min_ch4), transform=ax.transAxes)
plt.title("Date: %d, Shot No.: %d" % (self.date,self.shotNo), loc='right', fontsize=20, fontname="Times New Roman")
print('CH1: %.5f V' % max_ch1)
print('CH2: %.5f V' % min_ch2)
print('CH3: %.5f V' % min_ch3)
print('CH4: %.5f V' % min_ch4)
#plt.plot(1e9*data[:, 0], data[:, 2])
#plt.plot(1e9*data[:, 0], data[:, 3])
#plt.plot(1e9*data[:, 0], data[:, 4])
plt.xlim(-2000, 2000)
plt.xlabel('Time [nsec]')
plt.ylabel('Output[V]')
filepath = "figure/"
filename = "YAGTS_%d_%d" % (self.date, self.shotNo)
plt.savefig(filepath + filename)
plt.clf()
def plot_shotlog(self, num_st, num_ed):
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['xtick.top'] = 'True'
plt.rcParams['ytick.right'] = 'True'
plt.rcParams['ytick.direction'] = 'in'
filename = "YAGTS_log_%d_%d_%d.npz" % (self.date, num_st, num_ed)
shot_log = np.load(filename)
file_num = shot_log['file_num']
max_ch1 = np.array(shot_log['max_ch1'])
min_ch2 = np.array(shot_log['min_ch2'])
min_ch3 = np.array(shot_log['min_ch3'])
min_ch4 = np.array(shot_log['min_ch4'])
#shot_list_1 = np.array([30, 53, 61, 64, 66, 68, 69, 72, 73, 75, 76, 78, 91, 92, 94, 96, 97, 106])
#pressure_mPa_1 = np.array([3, 3, 3, 3, 3, 3, 3, 0.7, 0.7, 0.7, 0.7, 5, 7, 7, 7, 7, 7, 0.7])
#shot_list_2 = np.arange(32, 56)
#pressure_mPa_2 = np.zeros(56-32)
#shot_list = np.r_[shot_list_1, shot_list_2]
#pressure_mPa = np.r_[pressure_mPa_1, pressure_mPa_2]
pressure_mPa = np.zeros(120)
pressure_mPa[2:11] = -2.0
pressure_mPa[14:24] = 1.0
pressure_mPa[24:27] = 2.0
pressure_mPa[27:29] = 3.0
pressure_mPa[30:32] = 3.0
pressure_mPa[56:70] = 3.0
pressure_mPa[70:77] = 0.7
pressure_mPa[77:85] = 5.0
pressure_mPa[85:104] = 7.0
pressure_mPa[104:121] = 0.7
pressure_mPa[98:100] = -1.0
pressure_mPa[108:118] = -1.0
pressure_mPa[101:103] = 0.0
pressure_mPa[112] = -1.0
pressure_mPa[0] = -1.0
pressure_mPa[1] = -1.0
pressure_mPa[12] = -1.0
#shot_list = np.array([30, 53, 61, 66, 68, 72, 73, 75, 76, 78, 91, 92, 94, 96, 97, 106])
#pressure_mPa = np.array([3, 3, 3, 3, 3, 0.7, 0.7, 0.7, 0.7, 5, 7, 7, 7, 7, 7, 0.7])
#shot_list = np.array([78, 91, 92, 96, 97, 106])
#for i,x in enumerate(shot_list):
# #plt.plot(file_num[x], min_ch2[x]/min_ch3[x], "o", color='red')
# #plt.plot(file_num[x], min_ch2[x]/min_ch4[x], "x", color='blue')
# #plt.plot(file_num[x], min_ch3[x]/min_ch4[x], "^", color='green')
# if(min_ch2[i] < -15):
# plt.plot(pressure_mPa[i], min_ch2[x]/min_ch3[x], "o", color='red', label='ch2/ch3')
# plt.plot(pressure_mPa[i], min_ch2[x]/min_ch4[x], "x", color='blue', label='ch2/ch4')
# plt.plot(pressure_mPa[i], min_ch3[x]/min_ch4[x], "^", color='green', label='ch3/ch4')
#threshold = -15
#for i in range(120):
# if(min_ch2[i] < threshold):
# plt.plot(pressure_mPa[i], min_ch2[i]/min_ch3[i], "o", color='red', label='ch2/ch3')
# plt.plot(pressure_mPa[i], min_ch2[i]/min_ch4[i], "x", color='blue', label='ch2/ch4')
# plt.plot(pressure_mPa[i], min_ch3[i]/min_ch4[i], "^", color='green', label='ch3/ch4')
# #plt.plot(i, min_ch2[i]/min_ch3[i], "o", color='red', label='ch2/ch3')
# #plt.plot(i, min_ch2[i]/min_ch4[i], "x", color='blue', label='ch2/ch4')
# #plt.plot(i, min_ch3[i]/min_ch4[i], "^", color='green', label='ch3/ch4')
#plt.plot(min_ch2, color='red', label='ch2')
#plt.plot(min_ch3, color='blue', label='ch3')
#plt.plot(min_ch4, color='green', label='ch4')
plt.plot(min_ch2/min_ch3, color='red', label='ch2/ch3')
plt.plot(min_ch2/min_ch4, color='blue', label='ch2/ch4')
plt.plot(min_ch3/min_ch4, color='green', label='ch3/ch4')
plt.xlabel("shot No.")
#plt.title("ch2 < %d" % threshold, loc='right')
#plt.xlabel("Pressure [mPa]")
plt.ylabel("Ratio")
#plt.ylabel("Signal [mV]")
#plt.xlim(-0.5, 8)
#plt.ylim(0, 8)
plt.legend()
plt.show()
def make_shotlog(date, num_st, num_ed):
file_num = []
max_ch1 = []
min_ch2 = []
min_ch3 = []
min_ch4 = []
for i in range(num_st, num_ed):
file_num.append(i)
ytdb = YAGTS_DataBrowser(date=date, shotNo=i, shotSt=0)
print('Load' + '/tek' + str(i).zfill(4))
data = ytdb.open_with_numpy()
max_ch1.append(np.max(data[:, 1]-np.mean(data[:4000, 1]))*1.0e3)
min_ch2.append(np.min(data[:, 2]-np.mean(data[:4000, 2]))*1.0e3)
min_ch3.append(np.min(data[:, 3]-np.mean(data[:4000, 3]))*1.0e3)
min_ch4.append(np.min(data[:, 4]-np.mean(data[:4000, 4]))*1.0e3)
filename = "YAGTS_log_%d_%d_%d" % (date, num_st, num_ed)
np.savez(filename, file_num=file_num, max_ch1=max_ch1, min_ch2=min_ch2, min_ch3=min_ch3, min_ch4=min_ch4)
def integrate_SL(date, num_st, num_ed, isSerial=True):
st_integrate = 4000
#shot_list = np.array([12, 15, 16, 17, 64])
shot_list = np.arange(65, 71)
#shot_list = np.r_[shot_list, np.arange(29, 32)]
#shot_list = np.arange(9, 12)
#shot_list = np.r_[shot_list, np.arange(18, 29)]
#shot_list = np.r_[shot_list, np.arange(32, 50)]
#shot_list = np.r_[np.arange(59, 61), np.arange(55, 59)]
#shot_list = np.arange(85, 88)
if isSerial == True:
for i in range(num_st, num_ed):
ytdb = YAGTS_DataBrowser(date=date, shotNo=i, shotSt=0)
print('Load' + ' tek' + str(i).zfill(4))
data = ytdb.open_with_numpy()
if i == num_st:
data_integrated = np.zeros(np.shape(data))
data_integrated += data
data = data_integrated/(num_ed-num_st)
elif isSerial == False:
for i,x in enumerate(shot_list):
ytdb = YAGTS_DataBrowser(date=date, shotNo=x, shotSt=0)
print('Load' + ' tek' + str(x).zfill(4))
data = ytdb.open_with_numpy()
if i == 0:
data_integrated = np.zeros(np.shape(data))
data_integrated += data
data = data_integrated/shot_list.size
data[:, 1:] -= np.mean(data[:st_integrate, 1:], axis=0)
#plt.plot(data_integrated)
#plt.show()
data_integrate_cumtrapz = integrate.cumtrapz(data[st_integrate:st_integrate+2**13+1, :], axis=0)
print("ch1[1500]: %.5f" % data_integrate_cumtrapz[5100-st_integrate, 1])
print("min. ch2: %.5f" % np.min(data_integrate_cumtrapz[:, 2]))
print("min. ch3: %.5f" % np.min(data_integrate_cumtrapz[:, 3]))
print("min. ch4: %.5f" % np.min(data_integrate_cumtrapz[:, 4]))
print("ch2/LP: %.5f" % (np.min(data_integrate_cumtrapz[:, 2])/data_integrate_cumtrapz[5100-st_integrate, 1]))
print("ch3/LP: %.5f" % (np.min(data_integrate_cumtrapz[:, 3])/data_integrate_cumtrapz[5100-st_integrate, 1]))
print("ch4/LP: %.5f" % (np.min(data_integrate_cumtrapz[:, 4])/data_integrate_cumtrapz[5100-st_integrate, 1]))
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['xtick.top'] = 'True'
plt.rcParams['ytick.right'] = 'True'
plt.rcParams['ytick.direction'] = 'in'
#plt.plot(1e9*data[st_integrate+1:st_integrate+2**13,0], data_integrate_cumtrapz/data_integrate_cumtrapz[5100-st_integrate, 1])
#plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(1e9*data[:, 0], 1.0e-3*(data[:, 1]), label='CH1')
ax.plot(1e9*data[:, 0], data[:, 2], label='CH2')
ax.plot(1e9*data[:, 0], data[:, 3], label='CH3')
ax.plot(1e9*data[:, 0], data[:, 4], label='CH4')
ax.legend(loc='lower right')
max_ch1 = np.max(data[:, 1])*1.0e3
min_ch2 = np.min(data[:, 2])*1.0e3
min_ch3 = np.min(data[:, 3])*1.0e3
min_ch4 = np.min(data[:, 4])*1.0e3
ax.text(-1500, 1.0e-6*max_ch1, 'CH1: %.3f mV' % (max_ch1))
ax.text(-1500, 1.0e-3*min_ch2, 'CH2: %.3f mV' % min_ch2)
ax.text(-1500, 1.0e-3*min_ch3, 'CH3: %.3f mV' % min_ch3)
ax.text(-1500, 1.0e-3*min_ch4, 'CH4: %.3f mV' % min_ch4)
ax.text(0.75, 0.90, 'CH2/LP: %.5f' % (min_ch2/max_ch1), transform=ax.transAxes)
ax.text(0.75, 0.85, 'CH3/LP: %.5f' % (min_ch3/max_ch1), transform=ax.transAxes)
ax.text(0.75, 0.80, 'CH4/LP: %.5f' % (min_ch4/max_ch1), transform=ax.transAxes)
ax.text(0.75, 0.75, 'CH2/CH3: %.3f' % (min_ch2/min_ch3), transform=ax.transAxes)
ax.text(0.75, 0.70, 'CH2/CH4: %.3f' % (min_ch2/min_ch4), transform=ax.transAxes)
ax.text(0.75, 0.65, 'CH3/CH4: %.3f' % (min_ch3/min_ch4), transform=ax.transAxes)
print('CH1: %.5f V' % max_ch1)
print('CH2: %.5f V' % min_ch2)
print('CH3: %.5f V' % min_ch3)
print('CH4: %.5f V' % min_ch4)
print('CH1/LP: %.5f V' % (max_ch1/max_ch1))
print('CH2/LP: %.5f V' % (min_ch2/max_ch1))
print('CH3/LP: %.5f V' % (min_ch3/max_ch1))
print('CH4/LP: %.5f V' % (min_ch4/max_ch1))
if isSerial == True:
plt.title("Date: %d, File No.: %d - %d" % (date, num_st, num_ed-1), loc='right', fontsize=20, fontname="Times New Roman")
filename = "YAGTS_integrated_%d_FileNo%dto%d" % (date, num_st, num_ed-1)
else:
plt.title("Date: %d, File No.: %d - %d" % (date, shot_list[0], shot_list[-1]), loc='right', fontsize=20, fontname="Times New Roman")
filename = "YAGTS_integrated_%d_FileNo%dto%d_discrete_%dshots" % (date, shot_list[0], shot_list[-1], shot_list.size)
plt.xlim(-2000, 2000)
plt.xlabel('Time [nsec]')
plt.ylabel('Output[V]')
fig.tight_layout()
#plt.show()
filepath = "figure/"
plt.savefig(filepath + filename)
plt.clf()
filename = "YAGTS_integrated_%d_FileNo%dto%d_discrete_%dshots.npz" % (date, shot_list[0], shot_list[-1], shot_list.size)
np.savez(filename, shot_list=shot_list, data=data)
def subtract_straylight():
st_integrate = 4900
ed_integrate = 5200
filename_stray = "YAGTS_integrated_20180328_FileNo12to31_discrete_6shots.npz" #6Pa
filename_plasma = "YAGTS_integrated_20180328_FileNo9to49_discrete_32shots.npz" #6Pa
#filename_stray = "YAGTS_integrated_20180329_FileNo85to87_discrete_3shots.npz" #へそ外し
#filename_stray = "YAGTS_integrated_20180329_FileNo80to84_discrete_5shots.npz" #へそ外し
#filename_stray = "YAGTS_integrated_20180329_FileNo15to49_discrete_6shots.npz"
#filename_stray = "YAGTS_integrated_20180329_FileNo47to49_discrete_3shots.npz"
#filename_plasma = "YAGTS_integrated_20180329_FileNo18to46_discrete_29shots.npz"
#filename_plasma = "YAGTS_integrated_20180329_FileNo50to79_discrete_30shots.npz"
#filename_plasma = "YAGTS_integrated_20180328_FileNo81to95_discrete_8shots.npz" #6mPa 浮上
stray = np.load(filename_stray)
plasma = np.load(filename_plasma)
data_stray = stray['data']
data_plasma = plasma['data']
data_stray_integrate_cumtrapz = integrate.cumtrapz(data_stray[st_integrate:st_integrate+2**13+1, :], axis=0)
data_plasma_integrate_cumtrapz = integrate.cumtrapz(data_plasma[st_integrate:st_integrate+2**13+1, :], axis=0)
plt.plot(1e9*data_plasma[st_integrate+1:st_integrate+2**13,0], data_plasma_integrate_cumtrapz[:, 1]/data_plasma_integrate_cumtrapz[5100-st_integrate, 1], color='blue', label='CH1')
plt.plot(1e9*data_plasma[st_integrate+1:st_integrate+2**13,0], data_plasma_integrate_cumtrapz[:, 2]/data_plasma_integrate_cumtrapz[5100-st_integrate, 1], color='orange', label='CH2')
plt.plot(1e9*data_plasma[st_integrate+1:st_integrate+2**13,0], data_plasma_integrate_cumtrapz[:, 3]/data_plasma_integrate_cumtrapz[5100-st_integrate, 1], color='green', label='CH3')
plt.plot(1e9*data_plasma[st_integrate+1:st_integrate+2**13,0], data_plasma_integrate_cumtrapz[:, 4]/data_plasma_integrate_cumtrapz[5100-st_integrate, 1], color='red', label='CH4')
plt.plot(1e9*data_stray[st_integrate+1:st_integrate+2**13,0], data_stray_integrate_cumtrapz[:, 1]/data_stray_integrate_cumtrapz[5100-st_integrate, 1], linestyle='dashed', color='blue', label='CH1(Stray)')
plt.plot(1e9*data_stray[st_integrate+1:st_integrate+2**13,0], data_stray_integrate_cumtrapz[:, 2]/data_stray_integrate_cumtrapz[5100-st_integrate, 1], linestyle='dashed', color='orange', label='CH2(Stray)')
plt.plot(1e9*data_stray[st_integrate+1:st_integrate+2**13,0], data_stray_integrate_cumtrapz[:, 3]/data_stray_integrate_cumtrapz[5100-st_integrate, 1], linestyle='dashed', color='green', label='CH3(Stray)')
plt.plot(1e9*data_stray[st_integrate+1:st_integrate+2**13,0], data_stray_integrate_cumtrapz[:, 4]/data_stray_integrate_cumtrapz[5100-st_integrate, 1], linestyle='dashed', color='red', label='CH4(Stray)')
plt.xlabel('Time [nsec]')
plt.ylabel('Accumulation value [a.u.]')
plt.legend(loc="lower right")
plt.title(filename_plasma)
plt.show()
data_SL_integrated_ch1 = data_plasma_integrate_cumtrapz[:, 1]/data_plasma_integrate_cumtrapz[5100-st_integrate, 1] - data_stray_integrate_cumtrapz[:, 1]/data_stray_integrate_cumtrapz[5100-st_integrate, 1]
data_SL_integrated_ch2 = data_plasma_integrate_cumtrapz[:, 2]/data_plasma_integrate_cumtrapz[5100-st_integrate, 1] - data_stray_integrate_cumtrapz[:, 2]/data_stray_integrate_cumtrapz[5100-st_integrate, 1]
data_SL_integrated_ch3 = data_plasma_integrate_cumtrapz[:, 3]/data_plasma_integrate_cumtrapz[5100-st_integrate, 1] - data_stray_integrate_cumtrapz[:, 3]/data_stray_integrate_cumtrapz[5100-st_integrate, 1]
data_SL_integrated_ch4 = data_plasma_integrate_cumtrapz[:, 4]/data_plasma_integrate_cumtrapz[5100-st_integrate, 1] - data_stray_integrate_cumtrapz[:, 4]/data_stray_integrate_cumtrapz[5100-st_integrate, 1]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(1e9*data_stray[st_integrate+1:st_integrate+2**13,0], data_SL_integrated_ch1, label='ch1')
ax.plot(1e9*data_stray[st_integrate+1:st_integrate+2**13,0], data_SL_integrated_ch2, label='ch2')
ax.plot(1e9*data_stray[st_integrate+1:st_integrate+2**13,0], data_SL_integrated_ch3, label='ch3')
ax.plot(1e9*data_stray[st_integrate+1:st_integrate+2**13,0], data_SL_integrated_ch4, label='ch4')
min_ch2 = np.min(data_SL_integrated_ch2[ed_integrate-st_integrate])
min_ch3 = np.min(data_SL_integrated_ch3[ed_integrate-st_integrate])
min_ch4 = np.min(data_SL_integrated_ch4[ed_integrate-st_integrate])
#min_ch2 = np.min(data_SL_integrated_ch2)
#min_ch3 = np.min(data_SL_integrated_ch3)
#min_ch4 = np.min(data_SL_integrated_ch4)
ax.text(0.75, 0.90, 'CH2/LP: %.5f' % (min_ch2), transform=ax.transAxes)
ax.text(0.75, 0.85, 'CH3/LP: %.5f' % (min_ch3), transform=ax.transAxes)
ax.text(0.75, 0.80, 'CH4/LP: %.5f' % (min_ch4), transform=ax.transAxes)
ax.text(0.75, 0.75, 'CH2/CH3: %.3f' % (min_ch2/min_ch3), transform=ax.transAxes)
ax.text(0.75, 0.70, 'CH2/CH4: %.3f' % (min_ch2/min_ch4), transform=ax.transAxes)
ax.text(0.75, 0.65, 'CH3/CH4: %.3f' % (min_ch3/min_ch4), transform=ax.transAxes)
ax.vlines(1e9*data_plasma[ed_integrate, 0], 0, min_ch2*2, linestyles='dashed')
#plt.title("Date: %d, Shot No.: %d" % (self.date,self.shotNo), loc='right', fontsize=20, fontname="Times New Roman")
print("Load stray: %s" % filename_stray)
print("Load plasma: %s" % filename_plasma)
print('CH2: %.5f V' % min_ch2)
print('CH3: %.5f V' % min_ch3)
print('CH4: %.5f V' % min_ch4)
te_12, te_14, te_24 = get_Te(min_ch2/min_ch4, min_ch2/min_ch3, min_ch4/min_ch3)
plt.title(filename_plasma)
plt.xlabel('Time [nsec]')
plt.ylabel('Integrated Value')
plt.legend()
plt.show()
max_ch1_stray = np.max(data_stray[:, 1]-np.mean(data_stray[:4000, 1]))#*1.0e3
max_ch1_plasma = np.max(data_plasma[:, 1]-np.mean(data_plasma[:4000, 1]))#*1.0e3
data_ch2 = max_ch1_plasma*((data_plasma[:, 2]-np.mean(data_plasma[:4000,2]))/max_ch1_plasma - (data_stray[:, 2]-np.mean(data_stray[:4000,2]))/max_ch1_stray)
#data_ch2 = (data_stray[:, 2]-np.mean(data_stray[:4000,2]))/max_ch1_stray
data_ch3 = max_ch1_plasma*((data_plasma[:, 3]-np.mean(data_plasma[:4000,3]))/max_ch1_plasma - (data_stray[:, 3]-np.mean(data_stray[:4000,3]))/max_ch1_stray)
data_ch4 = max_ch1_plasma*((data_plasma[:, 4]-np.mean(data_plasma[:4000,4]))/max_ch1_plasma - (data_stray[:, 4]-np.mean(data_stray[:4000,4]))/max_ch1_stray)
data = max_ch1_plasma*(data_plasma/max_ch1_plasma - data_stray/max_ch1_stray)
plt.plot(data)
plt.show()
filename_data = filename_plasma[:-4] + "_woStray.txt"
np.savetxt(filename_data, data, delimiter=',')
#plt.plot(data_ch2)
#plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(1e9*data_plasma[:, 0], data_ch2, label='CH2')
ax.plot(1e9*data_plasma[:, 0], data_ch3, label='CH3')
ax.plot(1e9*data_plasma[:, 0], data_ch4, label='CH4')
ax.legend(loc='lower right')
min_ch2 = np.min(data_ch2-np.mean(data_ch2[:4000]))*1.0e3
min_ch3 = np.min(data_ch3-np.mean(data_ch3[:4000]))*1.0e3
min_ch4 = np.min(data_ch4-np.mean(data_ch4[:4000]))*1.0e3
ax.text(-1500, 1.0e-3*min_ch2, 'CH2: %.3f mV' % min_ch2)
ax.text(-1500, 1.0e-3*min_ch3, 'CH3: %.3f mV' % min_ch3)
ax.text(-1500, 1.0e-3*min_ch4, 'CH4: %.3f mV' % min_ch4)
ax.text(0.75, 0.45, 'CH2/CH3: %.3f' % (min_ch2/min_ch3), transform=ax.transAxes)
ax.text(0.75, 0.4, 'CH2/CH4: %.3f' % (min_ch2/min_ch4), transform=ax.transAxes)
ax.text(0.75, 0.35, 'CH3/CH4: %.3f' % (min_ch3/min_ch4), transform=ax.transAxes)
#plt.title("Date: %d, Shot No.: %d" % (self.date,self.shotNo), loc='right', fontsize=20, fontname="Times New Roman")
print('CH2: %.5f V' % min_ch2)
print('CH3: %.5f V' % min_ch3)
print('CH4: %.5f V' % min_ch4)
#plt.plot(1e9*data[:, 0], data[:, 2])
#plt.plot(1e9*data[:, 0], data[:, 3])
#plt.plot(1e9*data[:, 0], data[:, 4])
plt.xlim(-2000, 2000)
plt.xlabel('Time [nsec]')
plt.ylabel('Output[V]')
plt.show()
#filepath = "figure/"
#filename = "YAGTS_woStray_integrated_20180328_FileNo9to49_discrete_32shots"
#plt.savefig(filepath + filename)
num_convolve = 150
v = np.ones(num_convolve)/num_convolve
data_ch2_convolved = np.convolve(data_ch2, v, mode='same')
data_ch3_convolved = np.convolve(data_ch3, v, mode='same')
data_ch4_convolved = np.convolve(data_ch4, v, mode='same')
plt.plot(1e9*data_plasma[:, 0], data_ch2_convolved, label='CH2')
plt.plot(1e9*data_plasma[:, 0], data_ch3_convolved, label='CH3')
plt.plot(1e9*data_plasma[:, 0], data_ch4_convolved, label='CH4')
plt.show()
n = data_ch2.__len__()
dt = data_plasma[1, 0] - data_plasma[0, 0]
yf = fftpack.fft(data_ch2)/(n/2)
freq = fftpack.fftfreq(n, dt)
fs = 4e7
yf2 = np.copy(yf)
yf2[(freq > fs)] = 0
yf2[(freq < 0)] = 0
y2 = np.real(fftpack.ifft((yf2)*n))
plt.plot(1e9*data_plasma[:, 0], y2, label='CH2')
plt.show()
plt.clf()
def load_ratio_ptncnt():
file_path = "Ratio_PtnCnt_P25.txt"
ratio_ptncnt = np.loadtxt(file_path, delimiter='\t', skiprows=1)
#plt.plot(ratio_ptncnt[:, 0], ratio_ptncnt[:, 1], label='ch1/ch2')
#plt.plot(ratio_ptncnt[:, 0], ratio_ptncnt[:, 3], label='ch1/ch4')
#plt.plot(ratio_ptncnt[:, 0], ratio_ptncnt[:, 6], label='ch2/ch4')
#plt.legend()
#plt.ylim(0, 10)
#plt.xscale("log")
#plt.show()
return ratio_ptncnt
def load_cofne():
num_pol = 25
num_pnts = 1000
file_path = "Cofne_Mar2018_HJPol25.txt"
cofne = np.loadtxt(file_path, delimiter='\t', skiprows=num_pol*num_pnts+(num_pol-1)*num_pnts+1)
#te = np.linspace(10, 8000, 1000)
#plt.plot(te, cofne)
#plt.ylim(0, 5)
#plt.xscale("log")
#plt.show()
return cofne
def get_Te(ratio_12, ratio_14, ratio_24):
ratio_ptncnt = load_ratio_ptncnt()
te = np.linspace(1, 9000, 90000-1)
ratio_ptncnt_spline_1 = interpolate.interp1d(ratio_ptncnt[:, 0], ratio_ptncnt[:, 1])
ratio_ptncnt_spline_3 = interpolate.interp1d(ratio_ptncnt[:, 0], ratio_ptncnt[:, 3])
ratio_ptncnt_spline_6 = interpolate.interp1d(ratio_ptncnt[:, 0], ratio_ptncnt[:, 6])
#plt.plot(te, ratio_ptncnt_spline_1(te), label='ch1/ch2')
#plt.plot(te, ratio_ptncnt_spline_3(te), label='ch1/ch4')
#plt.plot(te, ratio_ptncnt_spline_6(te), label='ch2/ch4')
#plt.legend()
#plt.ylim(0, 10)
#plt.xscale("log")
#plt.show()
idx_12 = getNearestValue(ratio_ptncnt_spline_1(te[10:]), ratio_12)
idx_14 = getNearestValue(ratio_ptncnt_spline_3(te[10:]), ratio_14)
idx_24 = getNearestValue(ratio_ptncnt_spline_6(te[10:]), ratio_24)
print("Te(ch1/ch2) = %.2f [eV]" % te[10+idx_12])
print("Te(ch1/ch4) = %.2f [eV]" % te[10+idx_14])
print("Te(ch2/ch4) = %.2f [eV]" % te[10+idx_24])
return te[idx_12], te[idx_14], te[idx_24]
def get_ne(Te1_eV, Te4_eV, Te2_eV, output1_mV, output4_mV, output2_mV):
cofne = load_cofne()
te = np.linspace(10, 8000, 1000)
te_interp = np.linspace(10, 8000, 100000)
cofne_spline_1 = interpolate.interp1d(te, cofne[:, 0])
#cofne_spline_4 = interpolate.interp1d(te, cofne[:, 3])
#cofne_spline_2 = interpolate.interp1d(te, cofne[:, 1])
print("ne(ch1) = %.4f [x10^16 m^-3]" % (output1_mV*cofne_spline_1(Te1_eV)))
#print("ne(ch4) = %.2f [x10^16 m^-3]" % (output4_mV*cofne_spline_4(Te4_eV)))
#print("ne(ch2) = %.2f [x10^16 m^-3]" % (output2_mV*cofne_spline_2(Te2_eV)))
#return output1_mV*cofne_spline_1[idx_1], output4_mV*cofne_spline_4[idx_4], output2_mV*cofne_spline_2[idx_2]
def getNearestValue(list, num):
"""
概要: リストからある値に最も近い値を返却する関数
@param list: データ配列
@param num: 対象値
@return 対象値に最も近い値
"""
# リスト要素と対象値の差分を計算し最小値のインデックスを取得
idx = np.abs(np.asarray(list) - num).argmin()
#return list[idx]
return idx
if __name__ == "__main__":
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['xtick.top'] = 'True'
plt.rcParams['ytick.right'] = 'True'
plt.rcParams['ytick.direction'] = 'in'
#ytdb = YAGTS_DataBrowser(date=20180328, shotNo=15, shotSt=0)
#ytdb.plot_shotlog(0, 120)
#ytdb.open_with_pandas()
#ytdb.show_graph()
#make_shotlog(date=20180327, num_st=0, num_ed=120)
integrate_SL(date=20180622, num_st=34, num_ed=42, isSerial=True)
#subtract_straylight()
#get_Te()
#get_ne(40.3, 40.3, 40.3, 0.6546303, 0.6546303, 0.6546303)
#get_ne(124.79, 40.3, 40.3, (0.06300-0.04959)*143.243, 0.6546303, 0.6546303)
#get_ne(21.3, 40.3, 40.3, (0.03693-0.03673)*144.98, 0.6546303, 0.6546303)
|
import matplotlib.pyplot as plt
import numpy
x = numpy.arange(0,10,0.1)
s = numpy.sim(x)
print(s)
plt.plot(s)
plt.show()
|
# -*-coding:Latin-1 -*
class TableauNoir:
objets_crees= 0 #attribut de classe, identique a tout les objets de la classe
def __init__(self): #Constructeur: il n'est pas vraiment obligatoire.
#chaque fonction speciale a deux tiret bas, celle ci sert a definir les attributs
"""À chaque fois qu'on crée un objet,
on incrémente le compteur"""
TableauNoir.objets_crees+= 1
self.surface = "\n salut! " #self designe la classe, donc surface est un attribut de classe
self._lieu_residence = "Paris"
# La convention veut qu'on n'accède pas, depuis l'extérieur de la classe, à un attribut commençant par un souligné _
def combien(cls): #methode de classe
print("Jusqu'à présent, {} objets ont été créés.".format(cls.objets_crees))
combien = classmethod(combien) #pour que Python reconnaisse une méthode de classe, il faut appeler la fonction classmethod et utiliser cls comme attribut
#classmethod est necessaire pour la definition
def ecrire(self, message_a_ecrire): # methode d'objet, methode d'instance, on utilise self
if self.surface != "":
self.surface += "\n"
self.surface += message_a_ecrire
def afficher(): #methode statique, ni self ni cls. indépendente de toute donnée, contenue dans l'instance de l'objet et de la classe.
print("On affiche la même chose.")
print("peu importe les données de l'objet ou de la classe.")
afficher = staticmethod(afficher) #necessaire pour la declaration
#l'access au mutateur et accesseur se fait soit en appellant la fonction ou directement la variable qui appellera la fonction
#si l'le get ou le set n'est pas definit, l'objet ne pourra guerre etrre modifé!!
def _get_lieu_residence(self):
print("On accède à l'attribut lieu_residence:")
return self._lieu_residence
def _set_lieu_residence(self, nouvelle_residence):
print("Attention, il semble que {} déménage ")
self._lieu_residence = nouvelle_residence
# On va dire à Python que notre attribut lieu_residence pointe vers une
# propriété
lieu_residence = property(_get_lieu_residence, _set_lieu_residence)
# lieu_residence = property(_get_lieu_residence, _set_lieu_residence,_effaceur_lieu_residence, _helperof_lieu_residence)
# Il s'agit de la définition d'une propriété. On lui dit que l'attribut lieu_residence (cette fois, sans signe souligné _) doit être une propriété. On définit dans notre propriété, dans l'ordre, la méthode d'accès (l'accesseur) et celle de modification (le mutateur).
def __repr__(self):
"""
Definition de l'objet instance, qd on print l'objet directement, cette methode est appellé"""
return "surface={},lieu_residence={} ".format(self.surface,self._lieu_residence)
def __getattr__(self, nom):
"""qd on fait objet.attribut=valeur, Si Python ne trouve pas l'attribut nommé nom, il appelle
cette méthode"""
print("Alerte ! Il n'y a pas d'attribut {} ici !".format(nom))
def __setattr__(self, nom_attr, val_attr):
""" objet.nom_attr = val_attr"""
object.__setattr__(self, nom_attr, val_attr)
def __hasattr__(self, nom): # Renvoie True si l'attribut "nom" existe, False sinon
"""qd on fait objet.attribut=valeur, Si Python ne trouve pas l'attribut nommé nom, il appelle
cette méthode"""
class ZDict:
"""Classe enveloppe d'un dictionnaire"""
def __init__(self):
"""Notre classe n'accepte aucun paramètre"""
self._dictionnaire = {}
def __setitem__(self, index, valeur):
"""objet[index] = valeur ==== self._dictionnaire[index] = valeur"""
self._dictionnaire[index] = valeur
def __getitem__(self, index):
"""on fait objet[index] ==== self._dictionnaire[index]"""
try:
return self._dictionnaire[index]
except:
return "Indexe non valide!"
def __delitem__(self, index):
""""del objet[i] : l'object a i est supprimé"""
print("suppression de {}".format(self._dictionnaire[index]))
del self._dictionnaire[index]
def __repr__(self):
"""
Definition de l'objet instance, qd on print l'objet directement, cette methode est appellé"""
return "{}".format(self._dictionnaire)
#***** ****#
# ma_liste = [1, 2, 3, 4, 5] #
# 8 in ma_liste Revient au même que ma_liste.__contains__(8) #
#***** ****#
def __contains__(self, object):
"""
si l'objet est dans le dictionnaire, return true
"""
return object in self._dictionnaire.values()
def __len__(self):
return len(self._dictionnaire)
class Duree:
"""Classe contenant des durées sous la forme d'un nombre de minutes
et de secondes"""
def __init__(self, min=0, sec=0):
"""Constructeur de la classe"""
self.min = min # Nombre de minutes
self.sec = sec # Nombre de secondes
def __str__(self):
"""Affichage un peu plus joli de nos objets"""
return "{0:02}:{1:02}".format(self.min, self.sec) #formatage: tuple de 2 element, i:nombre de 0
# d1 + 4 equivalent a d1.__add__(4), on redefinit add:
def __add__(self,nombre):
"""
d'autres meethodes
__sub__ : - ;
__mul__ : * ;
__truediv__ : / ;
__floordiv__ : // (division entière) ;
__mod__ : % (modulo) ;
__pow__ : ** (puissance) ;
__iadd__: +=
__isub__: -=
__eq__ : ==
__ne__ : !=
__gt__ : >
__ge__ : >=
__lt__ : <
__le__ : <=
"""
nbr= self.sec+ nombre
if(nbr >=60):
self.min += nbr // 60
self.sec = nbr % 60
else:
self.sec = nbr
return "{0:02}:{1:02}".format(self.min, self.sec)
def __radd__(self, objet_a_ajouter):
"""Cette méthode est appelée si on écrit object1 + objet2 et que
le premier objet ne sait pas comment ajouter
le second. On redirige sur __add__ puisque
"""
return self + objet_a_ajouter
class Temp:
"""Classe contenant plusieurs attributs, dont un temporaire
L'objectif que nous nous étions fixé peut être atteint par ces deux méthodes.
Soit notre classe met en œuvre une méthode __getstate__, soit elle met en œuvre une méthode __setstate__.
Dans le premier cas, on modifie le dictionnaire des attributs avant la sérialisation.
Le dictionnaire des attributs enregistré est celui que nous avons modifié avec la valeur
de notre attribut temporaire à 0.
Dans le second cas, on modifie le dictionnaire d'attributs après la désérialisation.
Le dictionnaire que l'on récupère contient un attribut attribut_temporaire avec une
valeur quelconque (on ne sait pas laquelle) mais avant de récupérer l'objet, on met cette valeur à 0.
A vous de choisir la tienne.
"""
def __init__(self):
"""Constructeur de notre objet"""
self.attribut_1 = "une valeur"
self.attribut_2 = "une autre valeur"
self.attribut_temporaire = 5
def __getstate__(self):
"""Renvoie le DICTIONNAIRE D'ATTRIBUTS à sérialiser"""
dict_attr = dict(self.__dict__)
dict_attr["attribut_temporaire"] = 0
return dict_attr
def __setstate__(self, dict_attr):
"""Méthode appelée lors de la désérialisation de l'objet"""
dict_attr["attribut_temporaire"] = 0
self.__dict__ = dict_attr
class Personne:
def __init__(self, nom):
self.nom = nom
self.prenom = "Martin"
def __str__(self):
return "{0} {1}".format(self.prenom, self.nom)
class AgentSpecial(Personne):
def __init__(self, nom, matricule):
Personne.__init__(self, nom)
self.matricule = matricule
def __str__(self):
return "Agent {0}, matricule {1}".format(self.nom, self.matricule)
#Cree sa propre exception
class ErreurAnalyseFichier(Exception):
""" fichier -- le nom du fichier posant problème
ligne -- le numéro de la ligne posant problème
message -- le problème proprement dit"""
def __init__(self, fichier, ligne, message):
self.fichier = fichier
self.ligne = ligne
self.message = message
def __str__(self):
"""Affichage de l'exception"""
return "[fichier {}, ligne {}]:{}".format(self.fichier,self.ligne,self.message)
|
# test_util.py
"""Module to provide testing utility functions, objects, etc."""
from unittest.mock import MagicMock
class AsyncMock(MagicMock):
"""
AsyncMock is the async version of a MagicMock.
We use this class in place of MagicMock when we want to mock
asynchronous callables.
Source: https://stackoverflow.com/a/32498408
"""
async def __call__(self, *args, **kwargs):
"""Allow MagicMock to work its magic too."""
return super(AsyncMock, self).__call__(*args, **kwargs)
class ObjectLiteral:
"""
ObjectLiteral transforms named arguments into object attributes.
This is useful for creating object literals to be used as return
values from mocked API calls.
Source: https://stackoverflow.com/a/3335732
"""
def __init__(self, **kwds):
"""Add attributes to ourself with the provided named arguments."""
self.__dict__.update(kwds)
|
# perform face detection
# display detected face frame
# display FPS info in webcam video feed
# This is the official sample demo file desribed in the installer documentation
# Date: 2020 01 26
# Install OpenVINO™ toolkit for Raspbian* OS
# http://docs.openvinotoolkit.org/2019_R1/_docs_install_guides_installing_openvino_raspbian.html
import cv2
import time
import imutils
# Load the model.
net = cv2.dnn.readNet('face-detection-adas-0001.xml',
'face-detection-adas-0001.bin')
# Specify target device.
# ERROR net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
# OK net.setPreferableBackend(cv2.dnn.DNN_BACKEND_INFERENCE_ENGINE)
# ERROR net.setPreferableBackend(cv2.dnn.DNN_BACKEND_HALIDE)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_INFERENCE_ENGINE)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)
# open video frame
video_capture = cv2.VideoCapture(0)
while True:
start_time = time.time()
ret, frame = video_capture.read()
# frame resize to improve performance
frame = imutils.resize(frame, width=648, height=480)
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Prepare input blob and perform an inference.
blob = cv2.dnn.blobFromImage(rgb_frame, size=(640, 480), ddepth=cv2.CV_8U)
net.setInput(blob)
out = net.forward()
# Draw detected faces on the frame.
for detection in out.reshape(-1, 7):
confidence = float(detection[2])
xmin = int(detection[3] * frame.shape[1])
ymin = int(detection[4] * frame.shape[0])
xmax = int(detection[5] * frame.shape[1])
ymax = int(detection[6] * frame.shape[0])
if confidence > 0.5:
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color=(0, 255, 0))
#display FPS
fpsInfo = "FPS: " + str(1.0 / (time.time() - start_time)) # FPS = 1 / time to process loop
print(fpsInfo)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, fpsInfo, (10, 20), font, 0.4, (255, 255, 255), 1)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows() |
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches
import skimage
import skimage.measure
import skimage.color
import skimage.restoration
import skimage.io
import skimage.filters
import skimage.morphology
import skimage.segmentation
from nn import *
from q4 import *
# do not include any more libraries here!
# no opencv, no sklearn, etc!
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=UserWarning)
def cluster(bboxes, K):
from sklearn.cluster import KMeans
bboxes = bboxes[np.argsort(bboxes[:, 2])]
rows = bboxes[:, 2].reshape(-1, 1)
kmeans = KMeans(n_clusters=K, random_state=0).fit(rows)
labels = kmeans.labels_
lines, line = [], []
prev_label = labels[0]
for i in range(len(labels)):
curr_label = labels[i]
if curr_label == prev_label:
line.append(bboxes[i])
else:
line = sorted(line, key=lambda x: x[1])
lines.append(line)
line = [bboxes[i]]
prev_label = curr_label
if len(line) > 0:
line = sorted(line, key=lambda x: x[1])
lines.append(line)
return lines
true_K = [8, 5, 3, 3] # ground truth lines of texts
characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
test_y = ["TODOLIST1MAKEATODOLIST2CHECKOFFTHEFIRSTTHINGONTODOLIST3REALIZEYOUHAVEALREADYCOMPLETED2THINGS4REWARDYOURSELFWITHANAP",
"ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890",
"HAIKUSAREEASYBUTSOMETIMESTHEYDONTMAKESENSEREFRIGERATOR",
"DEEPLEARNINGDEEPERLEARNINGDEEPESTLEARNING"]
for i, img in enumerate(os.listdir('../images')):
im1 = skimage.img_as_float(skimage.io.imread(os.path.join('../images',img)))
bboxes, bw = findLetters(im1)
plt.imshow(bw, cmap='gray')
for bbox in bboxes:
minr, minc, maxr, maxc = bbox
rect = matplotlib.patches.Rectangle((minc, minr), maxc - minc, maxr - minr,
fill=False, edgecolor='red', linewidth=2)
plt.gca().add_patch(rect)
plt.show()
# find the rows using..RANSAC, counting, clustering, etc.
lines = cluster(np.array(bboxes), true_K[i])
# load the weights
# run the crops through your neural network and print them out
import pickle
import string
letters = np.array([_ for _ in string.ascii_uppercase[:26]] + [str(_) for _ in range(10)])
params = pickle.load(open('q3_weights.pickle','rb'))
# crop the bounding boxes
# note.. before you flatten, transpose the image (that's how the dataset is!)
# consider doing a square crop, and even using np.pad() to get your images looking more like the dataset
classified_texts = ""
pad_width = 5
acc, j = 0, 0
for line in lines:
for bbox in line:
y1, x1, y2, x2 = bbox
max_side = max(y2 - y1, x2 - x1)
y_center, x_center = (y1 + y2) // 2, (x1 + x2) // 2
image = bw[y_center - max_side // 2: y_center + max_side // 2, x_center - max_side // 2: x_center + max_side // 2]
image_cropped = skimage.transform.resize(image.astype(float), (32 - 2 * pad_width, 32 - 2 * pad_width))
image_padded = np.pad(image_cropped, ((pad_width, pad_width), (pad_width, pad_width)), 'constant', constant_values=1)
image_padded = (image_padded > 0.9).astype(int)
x = image_padded.transpose().reshape(1, -1)
h1 = forward(x, params, 'layer1')
probs = forward(h1, params, 'output', softmax)
character = characters[np.argmax(probs)]
acc += int(character == test_y[i][j])
classified_texts += character
j += 1
classified_texts += '\n'
print("-"*80 + "\n" + img + ":\n")
print(classified_texts)
print("Accuracy:", acc / len(test_y[i]), "\n\n")
|
from .deck import Deck
from .comp_dealer import CompDealer
class BlackJackGame:
def __init__(self, player1):
self.player1 = player1
self.dealer = CompDealer()
self.player1.current_score = 0 # setting human player's score to zero
self.bet_amount = 0
self.deck = Deck()
self.deck.shuffle()
def deal_card(self, player, num_draw):
"""Deal certain number of cards to a player."""
for i in range(num_draw):
card_drawn = self.deck.draw_card()
player.add_card(card_drawn)
def player_turn(self, currentplayer):
"""Lets the player take hit and returns their state: BUSTED, WIN, or PASS"""
while True:
print(f"Current player's score is: {currentplayer.current_score}")
if currentplayer.take_hit(): # ask if player wants to take a hit
self.deal_card(currentplayer, 1) # deal a player a card
if currentplayer.bust_check(): # check if the player bust
return "busted"
if currentplayer.win_check(): # check if the player wins
return "win"
else:
return "pass"
def player_win(self):
print("Congratulation, you win!")
self.player1.update_money(self.bet_amount)
def player_lose(self):
print("You lose!")
self.player1.update_money(self.bet_amount * (-1))
def play_game(self):
self.bet_amount = self.player1.bet()
self.deal_card(self.player1, 2)
self.deal_card(self.dealer, 2)
# HUMAN PLAYER'S TURN
print(f"This is your turn.")
human_outcome = self.player_turn(self.player1)
if human_outcome == "win":
self.player_win()
elif human_outcome == "busted":
print("You busted.")
self.player_lose()
else:
print("This the dealer's turn.")
dealer_outcome = self.player_turn(self.dealer)
if dealer_outcome == "busted":
print("The dealer busted.")
self.player_win()
elif dealer_outcome == "win":
print("Dealer wins!")
self.player_lose()
elif self.player1.current_score > self.dealer.current_score:
print("You score is greater than the dealer's.")
self.player_win()
elif self.player1.current_score < self.dealer.current_score:
print("You score is less than the dealer's.")
self.player_lose()
else:
print("It is a tie!")
def replay(self):
"""Determine if the users want to play game again.
Returns boolean True if they do."""
play_again = input('Do you want to play again? (Y/N):').upper()
while play_again != 'Y' and play_again != 'N':
play_again = input("Please enter 'Y' or 'N': ")
return play_again == 'Y' |
import pymysql
db = pymysql.connect("localhost","root","12343249","sparsh" )
cursor = db.cursor()
sql = """INSERT INTO vidhi(ID,Name)
VALUES (1,'I love you')"""
try:
cursor.execute(sql)
db.commit()
except:
db.rollback()
db.close() |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-25 15:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('filters', '0003_filteredresource_status'),
]
operations = [
migrations.AlterField(
model_name='filteredresource',
name='status',
field=models.CharField(choices=[('in_progress', 'In Progress'), ('done', 'Done')], default='in_progress', max_length=15, verbose_name='Status'),
),
]
|
#!/usr/bin/env python
# encoding: utf-8
import os.path
from datetime import timedelta
from decimal import Decimal
import flask
from flask.ext.script import Manager
from jinja2 import Markup, escape
import yaml
pages = flask.Blueprint('pages', __name__)
missing = object()
def q(value, places):
return value.quantize(1 / Decimal(10 ** places))
defaults = {
'name': None,
'local': False,
'details': "",
'vat_number': "",
'address': "",
'delegate': "",
'bank': "",
'account': None,
'accounts': {},
'exchange_rate': {},
}
class Company(object):
def __init__(self, data, code):
self._data = data
self.code = code
def __getitem__(self, key):
rv = self._data.get(key, missing)
if rv is missing:
rv = defaults.get(key)
return rv
class Supplier(Company):
def __init__(self, data, code):
super(Supplier, self).__init__(data, code)
self.invoice_number_format = data.get('invoice_number_format', '{}')
def format_invoice_number(self, number):
return self.invoice_number_format.format(number)
class Contract(object):
def __init__(self, data, code, client):
self._data = data
self.code = code
self.client = client
def __getitem__(self, key):
rv = self._data.get(key, missing)
if rv is missing:
rv = self.client[key]
return rv
class Invoice(object):
def __init__(self, data, supplier, contract):
self._data = data
self.supplier = supplier
self.contract = contract
self.code = "{s[date]}-{s[number]}".format(s=self)
self.number = supplier.format_invoice_number(self['number'])
self.due_date = self['date'] + timedelta(days=self['due_days'])
self.quantity = Decimal(self['quantity'])
self.exchange_rate = {k: Decimal(v) for k, v in
self['exchange_rate'].items()}
price_per_unit = self['price_per_unit']
price_per_unit_str, currency = price_per_unit.split()
self.price_per_unit = Decimal(price_per_unit_str)
self.currency = currency
payment_currency = "RON" if self['local'] else currency
self.payment_currency = payment_currency
self.account = self['account'] or supplier['accounts'][payment_currency]
if self['local']:
if currency != payment_currency:
exchange = self.exchange_rate[currency]
else:
exchange = 1
self.price_per_unit = q(q(self.price_per_unit * exchange, 2), 4)
self.total = q(self.price_per_unit * self.quantity, 2)
self.total_ron = self.total
else:
self.total = q(self.price_per_unit * self.quantity, 2)
self.total_ron = q(self.total * self.exchange_rate[currency], 2)
def __getitem__(self, key):
rv = self._data.get(key, missing)
if rv is missing:
rv = self.contract[key]
return rv
@property
def client(self):
return self.contract.client
def __str__(self):
return u"{s[date]} #{s[number]} – {s.client.code}".format(s=self)
class Model(object):
def __init__(self, data):
self.supplier = Supplier(data['supplier'], None)
self.clients = {c: Company(d, c) for c, d in data['clients'].items()}
self.contracts = {c: Contract(d, c, self.clients[d['client']])
for c, d in data['contracts'].items()}
self.invoices = [
Invoice(i, self.supplier, self.contracts[i['contract']])
for i in data['invoices']
]
def read_model():
with open(flask.current_app.config['DATAFILE'], 'rb') as f:
return Model(yaml.load(f))
@pages.route('/')
def home():
return flask.render_template('home.html', model=read_model())
@pages.route('/invoice/<code>')
def invoice(code):
model = read_model()
for invoice in model.invoices:
if invoice.code == code:
if invoice['local']:
flask.g.lang = 'ro'
return flask.render_template('invoice_page.html', **{
'supplier': model.supplier,
'invoice': invoice,
'client': invoice.client,
'n': flask.request.args.get('n', '1', type=int),
})
else:
flask.abort(404)
@pages.app_template_filter()
def nl2br(value):
return escape(value).replace('\n', Markup('<br>\n'))
@pages.app_template_filter('datefmt')
def datefmt(date):
return date.strftime('%d.%m.%Y')
@pages.app_url_defaults
def bust_cache(endpoint, values):
if endpoint == 'static':
filename = values['filename']
file_path = os.path.join(flask.current_app.static_folder, filename)
if os.path.exists(file_path):
mtime = os.stat(file_path).st_mtime
key = ('%x' % mtime)[-6:]
values['t'] = key
def translate(text_en, text_ro):
return text_ro if flask.g.get('lang') == 'ro' else text_en
def create_app():
app = flask.Flask(__name__)
app.config.from_pyfile(os.path.join(app.root_path, 'settings.py'))
app.register_blueprint(pages)
app.jinja_env.globals['_'] = translate
return app
def create_manager(app):
manager = Manager(app)
@manager.command
def dump():
model = read_model()
for invoice in model.invoices:
print invoice['date'], invoice.total_ron
return manager
|
from rest_framework import serializers
from Offers.models import Offers
class OfferSerializer(serializers.ModelSerializer):
class Meta:
model = Offers
fields = ('url',)
#url = serializers.URLField()
|
from django.contrib import admin
from .models import Cliente
class ClienteAdmin(admin.ModelAdmin):
list_display = ["nome", "sobrenome", "cpf", "telefone", "email"]
admin.site.register(Cliente, ClienteAdmin)
|
from domain.square import Square
class UI:
def __init__(self, g):
self._game = g
def _readMove(self):
while True:
try:
tokens = input("Enter move >> ").split(' ')
if len(tokens) != 2:
raise ValueError
x = int(tokens[1]) - 1
y = ord(tokens[0]) - ord('A')
self._game._board.copy().move(Square(x, y), 'X')
return Square(int(x), int(y))
except Exception:
print("Invalid move!")
def _getDifficulty(self):
'''
Get difficulty from player
1 - Easy
2 - Hard
'''
ok = False
while not ok:
print("Difficulty: \n1 - Easy \n2 - Hard")
try:
diff = int(input("Enter difficulty: "))
except Exception as e:
print(e.args[0])
continue
if diff != 1 and diff != 2:
print("Incorrect Input!")
continue
ok = True
return diff
def start(self):
b = self._game.board
playerSymbol = 'X'
computerSymbol = 'O'
self._game.setDifficulty(self._getDifficulty())
playerMove = True
while not b.isWon():
print(b)
if playerMove:
move = self._readMove()
try:
self._game.moveHuman(move, playerSymbol)
except Exception as e:
print(e.args[0])
else:
self._game.moveComputer(computerSymbol, playerSymbol)
playerMove = not playerMove
print("Game over!")
print(b)
if playerMove == True:
print("Computer wins!")
else:
print("Player wins!") |
# Created by MechAviv
# Quest ID :: 17613
# [Commerci Republic] The Minister's Son
sm.setNpcOverrideBoxChat(9390241)
sm.sendNext("I'm... fine, you... meddling dumb-dumb!")
sm.setSpeakerID(9390241)
sm.flipSpeaker()
sm.flipDialoguePlayerAsSpeaker()
sm.setBoxChat()
sm.setColor(1)
sm.sendSay("#b(He's not very polite...)#k\r\nEr, you're Leon Daniella, right? You all right? Those cats really did a number on you.")
sm.setNpcOverrideBoxChat(9390241)
sm.sendSay("You idiot, I... said I was-- Hold up. You know my name? Are you that noble I sent money to via that foreign bank account? You promised me a present!")
sm.setSpeakerID(9390241)
sm.flipSpeaker()
sm.flipDialoguePlayerAsSpeaker()
sm.setBoxChat()
sm.setColor(1)
sm.sendSay("Um, no. I came from somewhere really far away... and that was just recently... ")
sm.setNpcOverrideBoxChat(9390241)
sm.sendSay("But you knew my name.")
sm.setSpeakerID(9390241)
sm.flipSpeaker()
sm.flipDialoguePlayerAsSpeaker()
sm.setBoxChat()
sm.setColor(1)
sm.sendSay("We've never met before. Mayor Berry told me about you. I was actually just on my way to see you.")
sm.setNpcOverrideBoxChat(9390241)
sm.sendSay("Of course you're here to see me! You must be a trader. I'm sure you don't know since you came from so far away, but this is just an insignificant fishing village. If you're looking for real profits, #bSan Commerci#k is where you should trade. It's the capital, and a port, so there are lots of people there.")
sm.setSpeakerID(9390241)
sm.flipSpeaker()
sm.flipDialoguePlayerAsSpeaker()
sm.setBoxChat()
sm.setColor(1)
sm.sendSay("No, no. I'm not a trader, but I AM trying to go to San Commerci.")
sm.setNpcOverrideBoxChat(9390241)
sm.sendSay("You came from a far away place but you're not a trader, and you're here to see me? But why else would you come from so far away to see me?")
sm.setSpeakerID(9390241)
sm.flipSpeaker()
sm.flipDialoguePlayerAsSpeaker()
sm.setBoxChat()
sm.setColor(1)
sm.sendSay("Well, I didn't come here specifically to see you, it's more like #eI heard about you after I arrived#n... Err... It's easier to say that I came here to speak with your father, the Prime Minister, about a peace treaty, but I heard you were here.")
sm.setNpcOverrideBoxChat(9390241)
sm.sendSay("A peace treaty? Wait, are you from the #e#bHeaven Empire#k#n!? What do you want from us this time?")
sm.setSpeakerID(9390241)
sm.flipSpeaker()
sm.flipDialoguePlayerAsSpeaker()
sm.setBoxChat()
sm.setColor(1)
sm.sendSay("(What's the #e#bHeaven Empire#k#n? I've never heard of it, but they must not be on good terms with Commerci judging by Leon's reaction. I should clear things up before he gets mad.)")
sm.setSpeakerID(9390241)
sm.flipSpeaker()
sm.flipDialoguePlayerAsSpeaker()
sm.setBoxChat()
sm.setColor(1)
sm.sendSay("I don't know anything about that empire, but we have no ties to them. I'm here as a representative of Empress Cygnus to establish peaceful relations.")
sm.setNpcOverrideBoxChat(9390241)
sm.sendSay("So... you're some peace ambassador... But you're not even from the Heaven Empire? Well, then forget it.\r\nIn that case, you can be my best friend sidekick. Pat yourself on the back. Leon Daniella doesn't befriend just anyone! What's your name, anyway?")
sm.setSpeakerID(9390241)
sm.flipSpeaker()
sm.flipDialoguePlayerAsSpeaker()
sm.setBoxChat()
sm.setColor(1)
sm.sendSay("You can call me #h0#.")
sm.setNpcOverrideBoxChat(9390241)
sm.sendSay("#h0#? Okay. Now, you pat my back, and I'll pat yours. There, doesn't that feel nice?")
sm.setSpeakerID(9390241)
sm.flipSpeaker()
sm.flipDialoguePlayerAsSpeaker()
sm.setBoxChat()
sm.setColor(1)
sm.sendSay("#b(Hahaha, what a loony, but I kinda like him.)#k\r\nAll right. Let's be friends.")
# Unhandled Message [INC_COMMITMENT_MESSAGE] Packet: 09 01 00 00 00 00
sm.completeQuest(17613)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.