text
stringlengths 8
6.05M
|
|---|
'''
This is a script to split sample points into test & training sets,
then fit to linear regression.
INPUTS (in parameter file:
-samplePointsCsv
-targetField
-predictFields
-percentTraining
-outputPath
OUTPUTS:
-csv of results
'''
import sys, os, gdal
import numpy as np
from sklearn import linear_model
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
def getTxt(file):
'''reads parameter file & extracts inputs'''
txt = open(file, 'r')
next(txt)
for line in txt:
if not line.startswith('#'):
lineitems = line.split(':')
title = lineitems[0].strip(' \n')
var = lineitems[1].strip(' \n')
if title.lower() == 'samplepointscsv':
inputPath = var
elif title.lower() == 'targetfield':
targetField = var.upper()
elif title.lower() == 'predictfields':
predictFields = var.split(',')
elif title.lower() == 'percenttraining':
percentTraining = float(var)
elif title.lower() == 'outputpath':
outputPath = var
txt.close()
return inputPath, targetField, predictFields, percentTraining, outputPath
def splitTrainingSets(inputData, targetField, predictFields, percentTraining):
totalPoints = len(inputData[targetField])
numTraining = int((percentTraining/100.)*totalPoints)
data = inputData[[i for i in predictFields]][:numTraining]
data_noTuples = [list(x) for x in data]
data_as_array = np.array(data_noTuples)
trainingDict = {'target': np.array(inputData[targetField][:numTraining]),
'data': data_as_array,
'x': np.array(inputData['X'][:numTraining]),
'y': np.array(inputData['Y'][:numTraining])}
data = inputData[[i for i in predictFields]][numTraining:]
data_noTuples = [list(x) for x in data]
data_as_array = np.array(data_noTuples)
testingDict = {'target': np.array(inputData[targetField][numTraining:]),
'data': data_as_array,
'x': np.array(inputData['X'][numTraining:]),
'y': np.array(inputData['Y'][numTraining:])}
return Struct(**trainingDict), Struct(**testingDict)
def fitLinearRegression(x_train, y_train):
regr = linear_model.LinearRegression()
regr.fit(x_train, y_train)
return regr
def testLinearRegression(x_test, y_test, regr):
'''returns mean squared & R squared values'''
predictions = regr.predict(x_test)
mean_sq_error = np.mean((predictions-y_test)**2)
r_sq = regr.score(x_test, y_test)
return predictions, mean_sq_error, r_sq
def main(paramFile):
inputPath, targetField, predictFields, percentTraining, outputPath = getTxt(paramFile)
#extract csv data
print "\nExtracting Input CSV data..."
inputFile = open(inputPath, 'rb')
inputData = np.genfromtxt(inputFile, delimiter=',', names=True, case_sensitive=False, dtype=None) #structured array of strings
inputFile.close()
print "\nSplitting Sets..."
trainSet, testSet = splitTrainingSets(inputData, targetField, predictFields, percentTraining)
print "Training Set: ", len(trainSet.target), "pts; ", "Testing Set: ", len(testSet.target), "pts."
print "\nTraining Model..."
regressionModel = fitLinearRegression(trainSet.data, trainSet.target)
print "\nPredicting with new Linear Regression Model..."
predictions, mean_sq_error, r_sq = testLinearRegression(testSet.data, testSet.target, regressionModel)
print "R Squared Value: ", r_sq
#save results
print "\nSaving results..."
coeffs = regressionModel.coef_
labels = ['X', 'Y', 'TARGET', 'PREDICTION', 'MEAN_SQ_ERROR', 'R_SQ', 'INTERCEPT'] + ['COEFF'+str(i+1) for i in range(len(coeffs))]
model_array = np.zeros(len(testSet.target),dtype=[(l,'f8') for l in labels]) #structured array
model_array['X'] = testSet.x
model_array['Y'] = testSet.y
model_array['TARGET'] = testSet.target
model_array['PREDICTION'] = predictions
model_array['MEAN_SQ_ERROR'][0] = mean_sq_error
model_array['R_SQ'][0] = r_sq
model_array['INTERCEPT'][0] = regressionModel.intercept_
for ind,i in enumerate(labels[7:]):
model_array[i][0] = coeffs[ind]
np.savetxt(outputPath, model_array, delimiter=",", comments="", header=",".join(i for i in model_array.dtype.names), fmt='%s')
print "\n\n Done!"
if __name__ == '__main__':
args = sys.argv
if os.path.exists(args[1]):
main(args[1])
else:
sys.exit('\nParameter File Not Found. Exiting.')
|
import argparse
import time
import numpy as np
import collections
import torch
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
from trading_session_gym.envs.trading_session_gym import TradingSession
MEAN_REWARD_BOUND = 0.95
GAMMA = 0
BATCH_SIZE = 20
REPLAY_SIZE = 100000
LEARNING_RATE = 1e-4
SYNC_TARGET_STEPS = 1000
REPLAY_START_SIZE = 10000
EPSILON_DECAY = 10**6
EPSILON_START = 1.0
EPSILON_FINAL = 0.02
Experience = collections.namedtuple('Experience', field_names=['state', 'action', 'reward', 'done', 'new_state'])
class ExperienceBuffer:
def __init__(self, capacity):
self.buffer = collections.deque(maxlen=capacity)
def __len__(self):
return len(self.buffer)
def append(self, experience):
self.buffer.append(experience)
def sample(self, batch_size):
indices = np.random.choice(len(self.buffer), batch_size, replace=False)
states, actions, rewards, dones, next_states = zip(*[self.buffer[idx] for idx in indices])
return np.array(states), np.array(actions), np.array(rewards, dtype=np.float32), \
np.array(dones), np.array(next_states)
class DQN(nn.Module):
"""Deep Q-network with target network"""
def __init__(self, n_inputs, n_outputs):
super(DQN, self).__init__()
# network
self.fc = nn.Sequential(
nn.Linear(n_inputs, 4*n_inputs),
nn.ReLU(),
nn.Linear(4*n_inputs, 4*n_inputs),
nn.ReLU(),
nn.Linear(4*n_inputs, n_outputs)
)
def forward(self, x):
x = x.float()
return self.fc(x)
class Agent:
def __init__(self, env, exp_buffer):
self.env = env
self.exp_buffer = exp_buffer
self.min_price = None
self._reset()
def _reset(self):
self.state = self.env.reset()
self.total_reward = 0.0
self.min_price = None
def play_step(self, net, epsilon=0.0, device="cpu"):
done_reward = None
max_reward = None
if self.min_price == None or self.min_price > self.env.session_prices.min():
self.min_price = self.env.session_prices.min()
if np.random.random() < epsilon:
action = self.env.action_space.sample()
else:
state_a = np.array([self.state], copy=False)
state_v = torch.tensor(state_a).to(device)
q_vals_v = net(state_v)
_, act_v = torch.max(q_vals_v, dim=1)
action = int(act_v.item())
# do step in the environment
new_state, reward, is_done, _ = self.env.step(action)
self.total_reward += reward
exp = Experience(self.state, action, reward, is_done, new_state)
self.exp_buffer.append(exp)
self.state = new_state
if is_done:
done_reward = self.total_reward
max_reward = 1e19*self.env.boundary/self.min_price**10
#print("Done: {}".format(done_reward))
#print("Max: {}".format(max_reward))
print("Perf.: {}%".format(round(100*done_reward/max_reward, 3)))
self._reset()
return done_reward/max_reward
else:
return None
def calc_loss(batch, net, tgt_net, device="cpu", cuda_async=False, gamma=0):
states, actions, rewards, dones, next_states = batch
states_v = torch.tensor(states).to(device)
next_states_v = torch.tensor(next_states).to(device)
actions_v = torch.tensor(actions).to(device)
rewards_v = torch.tensor(rewards).to(device)
done_mask = torch.ByteTensor(dones).to(device)
if device=="cuda":
states_v = states_v.cuda(non_blocking=cuda_async)
next_states_v = next_states_v.cuda(non_blocking=cuda_async)
actions_v = actions_v.cuda(non_blocking=cuda_async)
rewards_v = rewards_v.cuda(non_blocking=cuda_async)
done_mask = done_mask.cuda(non_blocking=cuda_async)
state_action_values = net(states_v).gather(1, actions_v.unsqueeze(-1).long()).squeeze(-1)
next_state_values = tgt_net(next_states_v).max(1)[0]
next_state_values[done_mask] = 0.0
next_state_values = next_state_values.detach()
expected_state_action_values = next_state_values * gamma + rewards_v
return nn.MSELoss()(state_action_values, expected_state_action_values)
if __name__ == '__main__':
writer = SummaryWriter(comment="-trading_session")
if torch.cuda.is_available():
device = torch.device("cuda")
print("cuda available")
else:
device = torch.device("cpu")
print("cuda not available")
env = TradingSession(action_space_config = 'discrete')
net = DQN(env.observation_space.shape[0], env.action_space.n).to(device)
tgt_net = DQN(env.observation_space.shape[0], env.action_space.n).to(device)
buffer = ExperienceBuffer(REPLAY_SIZE)
agent = Agent(env, buffer)
epsilon = EPSILON_START
optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE)
total_rewards = []
step_idx = 0
ts_step = 0
ts = time.time()
best_mean_reward = None
while True:
step_idx += 1
epsilon = max(EPSILON_FINAL, EPSILON_START - step_idx / EPSILON_DECAY)
reward = agent.play_step(net, epsilon, device=device)
if reward is not None:
total_rewards.append(reward)
speed = (step_idx - ts_step) / (time.time() - ts)
ts_step = step_idx
ts = time.time()
mean_reward = np.mean(total_rewards[-100:])
print("%d: done %d episodes, mean reward %.3f, eps %.2f, speed %.2f steps/s" % (step_idx, len(total_rewards), mean_reward, epsilon, speed))
writer.add_scalar("epsilon", epsilon, step_idx)
writer.add_scalar("speed", speed, step_idx)
writer.add_scalar("reward_100", mean_reward, step_idx)
writer.add_scalar("reward", reward, step_idx)
if best_mean_reward is None or best_mean_reward < mean_reward:
torch.save(net.state_dict(), "model.dat")
if best_mean_reward is not None:
print("Best mean reward updated %.3f -> %.3f, model saved" % (best_mean_reward, mean_reward))
best_mean_reward = mean_reward
if mean_reward > MEAN_REWARD_BOUND:
print("Solved in %d steps!" % step_idx)
break
if len(buffer) < REPLAY_START_SIZE:
continue
if step_idx % SYNC_TARGET_STEPS == 0:
tgt_net.load_state_dict(net.state_dict())
optimizer.zero_grad()
batch = buffer.sample(BATCH_SIZE)
loss_t = calc_loss(batch, net, tgt_net, device=device, cuda_async = False, gamma = GAMMA)
loss_t.backward()
optimizer.step()
writer.close()
|
import datetime
import pickle
from flask import Flask, render_template, request, flash
import untangle
import requests
from requests import ConnectTimeout
app = Flask(__name__)
app.secret_key = 'fjas7df98afh879sfh8'
file_name = 'todays-movie-id.txt'
history_file_name = 'history.txt'
section = 1
http_port = 5000
plex_server_ip = '10.0.0.14'
plex_server_port = 32400
plex_server_identifier = '28cff6063535af7fd68b313ed816c6bde08d7e8d'
plex_client_ip = '10.0.0.27'
plex_client_port = 3005
plex_client_identifier = 'c3cf6e7d-46e2-4bf0-bf47-931b80d17a91'
def write_todays_movie(movie_id):
with open(file_name, 'w') as file:
file.truncate()
file.write(movie_id)
def write_history(movies, todays_id, history):
last_movie = [x for x in movies if x['id'] == todays_id][0]
today = datetime.datetime.now()
if today.hour < 19:
today = today - datetime.timedelta(days=1)
if len(history) == 0 or history[0]['title'] != last_movie['title']:
history.insert(0, {"day": today.date(), "title": last_movie['title']})
pickle.dump(history, open(history_file_name, "wb"))
@app.route('/', methods=['GET', 'POST'])
def index():
file = open(file_name, 'r')
todays_id = file.read()
movies = untangle.parse('http://{plex_server_ip}:{plex_server_port}/library/sections/{section}/all'.format(
plex_server_ip=plex_server_ip, plex_server_port=plex_server_port, section=section))
movies = [{'id': movie['ratingKey'], 'title': movie['title'], 'art': movie['thumb']} for movie in
movies.MediaContainer.Video]
try:
history = pickle.load(open(history_file_name, "rb"))
except EOFError:
history = []
if request.method == 'POST':
write_todays_movie(request.form['movie'])
write_history(movies, todays_id, history)
todays_id = request.form['movie']
todays_movie = [x for x in movies if x['id'] == todays_id][0]
if 'play' in request.form:
try:
url = "http://{plex_client_ip}:{plex_client_port}/player/playback/stop" \
.format(plex_client_ip=plex_client_ip, plex_client_port=plex_client_port)
requests.get(url, timeout=10)
url = "http://{plex_client_ip}:{plex_client_port}/player/playback/playMedia?key=/library/metadata/{movie_id}" \
"&address={plex_server_ip}&port={plex_server_port}&X-Plex-Client-Identifier={plex_client_identifier}" \
"&machineIdentifier={plex_server_identifier}&protocol=http" \
"&path=http://{plex_server_ip}:{plex_server_port}/library/metadata/{movie_id}" \
.format(plex_client_ip=plex_client_ip, plex_client_port=plex_client_port,
plex_client_identifier=plex_client_identifier,
plex_server_ip=plex_server_ip, plex_server_port=plex_server_port,
plex_server_identifier=plex_server_identifier,
movie_id=todays_id)
requests.get(url, timeout=10)
flash('Started playback of movie ' + todays_movie['title'], 'info')
except (ConnectionError, ConnectTimeout):
flash('Could not connect to client or server', 'error')
else:
flash('Set next movie to ' + todays_movie['title'], 'info')
context = {
'movies': movies,
'todays_id': todays_id,
'history': history[:20],
'plex_server_address': "{}:{}".format(plex_server_ip, plex_server_port)
}
return render_template('index.html', **context)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=http_port, debug=True, threaded=True)
|
# a = ('val1', (1, 2 ), (2.5, 5.6, 8.2))
# print(a)
# print(type(a))
# print(a[1][1])
'''b = [['gmc', 'audi', 'bmw', 'ford'], ['saquib', 'ayush', 'dan', 'maniac'], [1, 2, 3, 4], [2.3, 4.6, 8.2, 3.4]]
print(b)
print(type(b))
print(b[0][-1:-3])'''
lst = [1, 1, 2, 3, 3]
a = tuple(lst)
print(a)
b = list(a)
print(len(b))
c = set(b)
print(len(c))
d = list(c)
print(len(d))
x = range(1, 11, 1)
for i in x:
print(i)
e = list(x)
print(type(e))
|
import unittest
import decimal as d
import complex as c
import math
class ComplexDecimalTest(unittest.TestCase):
def decimal_almost_equals(self, lhs, rhs, epsilon=7):
epsilon = pow(10, -epsilon)
if abs(d.Decimal(lhs - rhs)) < epsilon:
return True
return False
def check_type(self, result):
if isinstance(result.real, d.Decimal) and isinstance(result.imag, d.Decimal):
return True
return False
def check_answer(self, comp, real, imag, epsilon=7):
if self.decimal_almost_equals(comp.real, real, epsilon) is False:
return False
if self.decimal_almost_equals(comp.imag, imag, epsilon) is False:
return False
return True
def test_add(self):
a = c.ComplexDecimal(5, 6)
b = c.ComplexDecimal(-3, 4)
result = a + b
self.assertTrue(self.check_answer(result, d.Decimal(2), d.Decimal(10)))
self.assertTrue(self.check_type(result))
def test_sub(self):
a = c.ComplexDecimal(5, 6)
b = c.ComplexDecimal(-3, 4)
result = a - b
self.assertTrue(self.check_answer(result, d.Decimal(8), d.Decimal(2)))
self.assertTrue(self.check_type(result))
def test_mult(self):
a = c.ComplexDecimal(5, 6)
b = c.ComplexDecimal(-3, 4)
result = a * b
self.assertTrue(
self.check_answer(result, d.Decimal(-39), d.Decimal(2)))
self.assertTrue(self.check_type(result))
def test_div(self):
a = c.ComplexDecimal(5, 6)
b = c.ComplexDecimal(-3, 4)
result = a / b
self.assertTrue(
self.check_answer(result, d.Decimal(0.36), d.Decimal(-1.52)))
self.assertTrue(self.check_type(result))
def test_conjugate(self):
a = c.ComplexDecimal(5, 6)
result = a.conjugate()
self.assertTrue(self.check_answer(result, d.Decimal(5), d.Decimal(-6)))
self.assertTrue(self.check_type(result))
def test_sqrt(self):
a = c.ComplexDecimal(5, 6)
result = a.sqrt()
self.assertTrue(
self.check_answer(result, d.Decimal(2.5083), d.Decimal(1.18538), 1))
a = c.ComplexDecimal(5, 0)
result = a.sqrt()
self.assertTrue(
self.check_answer(result, d.Decimal(2.23667), d.Decimal(0), 1))
a = c.ComplexDecimal(-5, 0)
result = a.sqrt()
self.assertTrue(
self.check_answer(result, d.Decimal(0), d.Decimal(2.23667), 1))
self.assertTrue(self.check_type(result))
a = c.ComplexDecimal(0, -2)
result = a.sqrt()
self.assertTrue(
self.check_answer(result, d.Decimal(1), d.Decimal(-1)))
self.assertTrue(self.check_type(result))
a = c.ComplexDecimal(0, 2)
result = a.sqrt()
self.assertTrue(
self.check_answer(result, d.Decimal(1), d.Decimal(1)))
self.assertTrue(self.check_type(result))
def test_print(self):
a = c.ComplexDecimal(5, 6)
ans = str(a)
self.assertEqual(ans, "5 + 6i")
a = c.ComplexDecimal(5, 0)
ans = str(a)
self.assertEqual(ans, "5")
a = c.ComplexDecimal(0, 6)
ans = str(a)
self.assertEqual(ans, "6i")
a = c.ComplexDecimal(5, -6)
ans = str(a)
self.assertEqual(ans, "5 - 6i")
if __name__ == '__main__':
unittest.main()
|
import gdcm
import numpy
import sys
import os
from PIL import Image, ImageOps
def get_gdcm_to_numpy_typemap():
"""Returns the GDCM Pixel Format to numpy array type mapping."""
_gdcm_np = {gdcm.PixelFormat.UINT8 :numpy.uint8,
gdcm.PixelFormat.INT8 :numpy.int8,
gdcm.PixelFormat.UINT16 :numpy.uint16,
gdcm.PixelFormat.INT16 :numpy.int16,
gdcm.PixelFormat.UINT32 :numpy.uint32,
gdcm.PixelFormat.INT32 :numpy.int32,
gdcm.PixelFormat.FLOAT32:numpy.float32,
gdcm.PixelFormat.FLOAT64:numpy.float64 }
return _gdcm_np
def get_numpy_array_type(gdcm_pixel_format):
"""Returns a numpy array typecode given a GDCM Pixel Format."""
return get_gdcm_to_numpy_typemap()[gdcm_pixel_format]
def gdcm_to_numpy(image):
"""Converts a GDCM image to a numpy array.
"""
pf = image.GetPixelFormat().GetScalarType()
print 'pf', pf
print image.GetPixelFormat().GetScalarTypeAsString()
assert pf in get_gdcm_to_numpy_typemap().keys(), \
"Unsupported array type %s"%pf
d = image.GetDimension(0), image.GetDimension(1)
print 'Image Size: %d x %d' % (d[0], d[1])
dtype = get_numpy_array_type(pf)
gdcm_array = image.GetBuffer()
result = numpy.frombuffer(gdcm_array, dtype=dtype)
maxV = float(result[result.argmax()])
## linear gamma adjust
#result = result + .5*(maxV-result)
## log gamma
result = numpy.log(result+50) ## 50 is apprx background level
maxV = float(result[result.argmax()])
result = result*(2.**8/maxV) ## histogram stretch
result.shape = d
return result
def readFile(filename):
r = gdcm.ImageReader()
r.SetFileName( filename )
if not r.Read(): sys.exit(1)
numpy_array = gdcm_to_numpy( r.GetImage() )
pilImage = Image.frombuffer('L',
numpy_array.shape,
numpy_array.astype(numpy.uint8),
'raw','L',0,1)
pilImage = ImageOps.autocontrast(pilImage, cutoff=.1)
pilImage.save(filename+'.jpg')
print(filename+'.jpg')
def readFilesInDir(path):
names = os.listdir(path)
for name in names:
fullname = os.path.join(path, name) # получаем полное имя
if os.path.isfile(fullname):
readFile(fullname)
if os.path.isdir(fullname):
readFilesInDir(fullname)
if __name__ == "__main__":
#readFile('75336.dcm')
#readFile('64118.dcm')
readFilesInDir('D:\Python\TestDicom\TEST_DB')
|
import random
def rohanmultiplication(number):
wrong = random.randint(1,9)
# print(wrong)
tabel = [i * number for i in range(1,11)]
# print(tabel[wrong])
tabel[wrong] = tabel[wrong]+random.randint(1,8)
#print(tabel)
return tabel
def iscorrect(table,number):
for i in range(1,11):
if table[i-1]!= i * number:
print(table[i-1])
return i
if __name__ == "__main__":
number= int(input("Enter any no"))
mytable = rohanmultiplication(number)
print(mytable)
print(iscorrect(mytable,number))
|
# python 2.7.3
import sys
import math
from collections import deque
def first2second(n, magic):
isBlack = [[False for x in range(magic)] for y in range(magic)]
leftmost, lowest = magic, magic
for i in range(n):
[x, y] = map(int, sys.stdin.readline().split())
isBlack[x][y] = True
if x < leftmost:
leftmost = x
if y < lowest:
lowest = y
dq = deque()
dq.append((leftmost, lowest))
isBlack[leftmost][lowest] = False
info = 'RTLB'
Delta = [(1, 0), (0, 1), (-1, 0), (0, -1)]
print leftmost, lowest
while True:
current = dq.popleft()
# print 'current is:', current
neighbour = ''
for delta in Delta:
[xtemp, ytemp] = map(lambda a, b: a + b, current, delta)
if isBlack[xtemp][ytemp] == True:
neighbour += info[Delta.index(delta)]
dq.append([xtemp, ytemp])
isBlack[xtemp][ytemp] = False
if len(dq) != 0:
print neighbour + ','
else:
print '.'
break
def seconde2first(data, magic):
isBlack = [[False for x in range(magic)] for y in range(magic)]
isBlack[data[0]][data[1]] = True
dq = deque()
dq.append(tuple(data))
info = 'RTLB'
Delta = [(1, 0), (0, 1), (-1, 0), (0, -1)]
while True:
message = raw_input()
current = dq.popleft()
if message[:-1]:
for ch in message[:-1]:
delta = Delta[info.index(ch)]
[xtemp, ytemp] = map(lambda a, b: a + b, current, delta)
isBlack[xtemp][ytemp] = True
dq.append((xtemp, ytemp))
if message[-1] == '.':
break
cnt = 0
for x in range(magic):
for y in range(magic):
if isBlack[x][y]:
cnt += 1
print cnt
for x in range(magic):
for y in range(magic):
if isBlack[x][y]:
print x, y
magic = 15
data = map(int, sys.stdin.readline().split())
if len(data) == 1:
first2second(data[0], magic)
else:
seconde2first(data, magic)
|
# Generated by Django 2.2 on 2019-03-14 19:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wikiApp', '0006_auto_20190314_1629'),
]
operations = [
migrations.RemoveField(
model_name='newusermodel',
name='foreignkeyToUser',
),
migrations.RemoveField(
model_name='relativeitemsmodel',
name='foreignkeyToUser',
),
migrations.CreateModel(
name='wikipostModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='', max_length=200)),
('text', models.TextField(default='', max_length=200)),
('datecreated', models.DateField(default='', null=True)),
('lastupdate', models.DateField(default='', null=True)),
('author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='wikiApp.newuserModel')),
],
),
migrations.AddField(
model_name='relativeitemsmodel',
name='links',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='wikiApp.wikipostModel'),
),
]
|
import random
TOTAL = 1000000
RANGE = 100
def main():
accumulative_count = 0
with open("binary.bin", "w") as file:
while accumulative_count < TOTAL:
count = random.randrange(min((RANGE, TOTAL - accumulative_count)) + 1)
file.write(count * str(random.randrange(2)))
accumulative_count += count
if __name__ == "__main__":
main()
|
"""The Python implementation of the GRPC client."""
from __future__ import print_function
import logging
import sys
import os
from concurrent import futures
import grpc
from grpc_reflection.v1alpha import reflection
from keysearch.proto import keysearch_pb2
from typing import Text
from keysearch.proto.keysearch_pb2_grpc import KeywordSearchStub
def get_fileindex(stub: KeywordSearchStub, theword: str) -> keysearch_pb2.RepeatedResult:
return stub.Whohas(
keysearch_pb2.Query(word=theword)
)
def anotherfunction(stub: KeywordSearchStub, sourcedId: str) -> keysearch_pb2.RepeatedResult:
return None
getters: dict = {
'whohas': get_fileindex,
'anotherservice':anotherfunction
}
def run(getter: str, word: str):
#credentials = grpc.ssl_channel_credentials()
with grpc.insecure_channel('localhost:50051') as channel:
stub = KeywordSearchStub(channel)
response = getters[getter](stub, word)
print(f"response:\n{response}")
#example : make client
if __name__ == '__main__':
logging.basicConfig()
print(sys.argv[1])
run(sys.argv[1], sys.argv[2])
|
from yusuan_PO.BasePage.BasePage import *
from yusuan_PO.BasePage.logger import *
from selenium.webdriver.support.ui import Select
class NewProject(Page):
all_button_loc = (By.LINK_TEXT, '全部')
project_list = (By.CLASS_NAME, 'list-over-effect') # 项目列表
new_button_loc = (By.CLASS_NAME, 'text-effect') # 【新建】按钮
popup_loc = (By.XPATH, '//*[@id="q"]/div/div[2]/div[3]/div[1]/label/input')
submit_button_loc = (By.XPATH, '//*[@id="q"]/div/div[3]/input[1]')
offer_area_loc = (By.CLASS_NAME, 'ht_52_1')
area_loc = (By.XPATH, '/html/body/div[1]/div[1]/div[2]/div[2]/div[7]/div[3]/div[1]/span')
regular_project_new = (By.XPATH, '//*[@id="project"]/div[1]/div[3]/div[1]/div[2]/a') # 常规项目的新增
design_part_loc = (By.XPATH, '//*[@id="project"]/div[1]/div[3]/div[1]/div[3]/div[1]/span') # 设计部分
live_decoration_loc = (By.XPATH, '//*[@id="project"]/div[1]/div[3]/div[1]/div[3]/div[2]/span') # 现场装修部分
showcase_part_loc = (By.XPATH, '//*[@id="project"]/div[1]/div[3]/div[1]/div[3]/div[3]/span') # 商业展柜部分
design_part_new_loc = (By.XPATH, '//*[@id="project"]/div[2]/div/div[1]/div[3]/div[1]/div[2]/a') # 设计部分新增
design_button_loc = (By.XPATH, '//*[@id="project"]/div[2]/div/div[1]/div[3]/div[1]/div[3]/div/span') # 设计
design_new_loc = (By.XPATH, '//*[@id="project"]/div[2]/div/div[2]/div/div[1]/div[3]/a') # 设计新增
save_loc = (By.ID, 'create_save') # 保存
submit_loc = (By.ID, 'create_submit_aduit') # 提交审批
assessor_loc = (By.ID, 'butget_assessor') # 选择审核人
assessor_submit_loc = (By.ID, 'btnOk') # 提交审核,弹窗确认
ceiling_engineering_button = (By.XPATH, '//*[contains(@class,third_new_add) and @create-item="7"]')
# 点击全部
def all_button(self):
logger.info('点击全部')
self.click_button(self.all_button_loc)
# 点击新建
def new_button(self, pid):
rows = self.find_elements(*self.project_list)
for row in rows:
projectId = row.find_element_by_xpath('.//div[contains(@class,"w_15")]').text
if pid == projectId:
global curr_row
curr_row = row
break
sleep(2)
logger.info('找到项目id,点击新建')
curr_row.find_element(By.CLASS_NAME,"text-effect").click() # 编辑按钮
# 点击弹窗完全新建
def popup(self):
logger.info('点击弹窗完全新建')
self.click_button(self.popup_loc)
# 点击确认
def submit_button(self):
logger.info('点击弹窗确认')
self.click_button(self.submit_button_loc)
# 点击报价区域:成都
def offer_area(self):
# 鼠标点击报价区域
logger.info('点击报价区域')
self.click_button(self.offer_area_loc)
time.sleep(1)
logger.info('选择成都')
self.click_button(self.area_loc)
# 点击常规项目新增
def regular_new(self):
logger.info('点击常规项目新增')
self.click_button(self.regular_project_new)
sleep(1)
# 点击设计部分
def design_part(self):
logger.info('点击设计部分')
self.click_button(self.design_part_loc)
# 点击设计部分新增
def design_part_new(self):
logger.info('点击设计部分新增')
self.click_button(self.design_part_new_loc)
sleep(1)
# 点击设计
def design_button(self):
logger.info('点击设计')
self.click_button(self.design_button_loc)
# 点击设计新增
def design_new(self):
for i in range(2): # 新增两条
logger.info('点击设计新增,两条')
self.click_button(self.design_new_loc)
sleep(1)
# 完善设计资料
def complete_design_information(self): # 完善设计资料
logger.info('完善设计资料')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div/div[2]/div/div[2]/div[2]/div[1]/div[1]/input[1]')\
.send_keys('设计1') # 第一个设计名称
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div/div[2]/div/div[2]/div[2]/div[1]/div[2]/input')\
.send_keys('10') # 预算数量
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div/div[2]/div/div[2]/div[2]/div[1]/div[3]/input')\
.send_keys('10') # 报价
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div/div[2]/div/div[2]/div[2]/div[1]/div[4]/div[1]/input')\
# .send_keys('9') # 含税价
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div/div[2]/div/div[2]/div[2]/div[2]/div[1]/input[1]')\
.send_keys('设计2') # 第二个设计名称
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div/div[2]/div/div[2]/div[2]/div[2]/div[2]/input')\
.send_keys('10') # 预算数量
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div/div[2]/div/div[2]/div[2]/div[2]/div[3]/input')\
.send_keys('10') # 报价
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div/div[2]/div/div[2]/div[2]/div[2]/div[4]/div[1]/input')\
# .send_keys('9') # 含税价
# 收起设计模块
def packup_design(self):
logger.info('收起设计模块')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[1]/div[2]/div/div[1]/div[1]/span').click()
# 点击现场装修部分
def live_decoration(self):
logger.info('点击现场装修部分')
self.click_button(self.live_decoration_loc)
# 点击现场装修部分新增
def live_decoration_new(self):
logger.info('点击现场装修部分新增')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[1]/div[3]/div[1]/div[2]/a').click()
print(self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[1]/div[3]/div[1]/div[2]/a').text)
sleep(2)
# 点击天棚工程
def ceiling_engineering(self):
logger.info('点击天棚工程')
self.click_button(self.ceiling_engineering_button)
# 点击天棚工程新增
def ceiling_engineering_new(self):
for i in range(2):
logger.info('点击天棚工程新增,两条')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div/div[1]/div[3]/a').click()
sleep(1)
# 完善天棚工程资料
def complete_ceiling_engineering(self):
logger.info('完善天棚工程')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div/div[2]/div[2]/div[1]/div[1]/div[1]/input[1]')\
.send_keys('天棚工程1') # 天棚工程名称
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div/div[2]/div[2]/div[1]/div[1]/div[2]/select/option[2]')\
.click() # 单位:平方米
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div/div[2]/div[2]/div[1]/div[1]/div[3]/input')\
.send_keys('10') # 预算数量
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div/div[2]/div[2]/div[1]/div[2]/div[1]/div/div[1]/input')\
# .send_keys('10') # 材料报价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div/div[2]/div[2]/div[1]/div[2]/div[1]/div/div[2]/input')\
.send_keys('10') # 材料报价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div/div[2]/div[2]/div[1]/div[2]/div[2]/div/div[1]/input')\
# .send_keys('10') # 人工报价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div/div[2]/div[2]/div[1]/div[2]/div[2]/div/div[2]/input')\
.send_keys('10') # 人工报价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div/div[2]/div[2]/div[1]/div[3]/div[1]/div/div[1]/input')\
# .send_keys('9') # 材料成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div/div[2]/div[2]/div[1]/div[3]/div[1]/div/div[2]/input')\
.send_keys('9') # 材料成本实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div/div[2]/div[2]/div[1]/div[3]/div[2]/div/div[1]/input')\
# .send_keys('9') # 人工成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div/div[2]/div[2]/div[1]/div[3]/div[2]/div/div[2]/input')\
.send_keys('9') # 人工成本实际
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div/div[2]/div[2]/div[1]/div[4]/div[1]/input')\
.send_keys('这是天棚1备注信息') # 备注
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div/div[2]/div[2]/div[2]/div[1]/div[1]/input[1]')\
.send_keys('天棚工程2') # 天棚工程名称
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div/div[2]/div[2]/div[2]/div[1]/div[2]/select/option[3]')\
.click() # 单位:m
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div/div[2]/div[2]/div[2]/div[1]/div[3]/input')\
.send_keys('10') # 预算数量
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div/div[2]/div[2]/div[2]/div[2]/div[1]/div/div[1]/input')\
# .send_keys('10') # 材料报价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div/div[2]/div[2]/div[2]/div[2]/div[1]/div/div[2]/input')\
.send_keys('10') # 材料报价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div/div[2]/div[2]/div[2]/div[2]/div[2]/div/div[1]/input')\
# .send_keys('10') # 人工报价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div/div[2]/div[2]/div[2]/div[2]/div[2]/div/div[2]/input')\
.send_keys('10') # 人工报价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div/div[2]/div[2]/div[2]/div[3]/div[1]/div/div[1]/input')\
# .send_keys('9') # 材料成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div/div[2]/div[2]/div[2]/div[3]/div[1]/div/div[2]/input')\
.send_keys('9') # 材料成本实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div/div[2]/div[2]/div[2]/div[3]/div[2]/div/div[1]/input')\
# .send_keys('9') # 人工成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div/div[2]/div[2]/div[2]/div[3]/div[2]/div/div[2]/input')\
.send_keys('9') # 人工成本实际
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div/div[2]/div[2]/div[2]/div[4]/div[1]/input')\
.send_keys('这是天棚2备注信息') # 备注
# 收起天棚模块
def packup_ceiling(self):
logger.info('收起天棚工程')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[1]/div[1]/div[1]/span').click()
# 点击地面工程
def flooring_work(self):
logger.info('点击地面工程')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[1]/div[3]/div[1]/div[3]/div[2]/span').click()
# 点击地面工程新增
def flooring_work_new(self):
for i in range(2):
logger.info('点击地面工程新增')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[1]/div[3]/a').click()
sleep(1)
# 完善地面工程的信息
def complete_flooring_work(self):
logger.info('完善地面工程')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[1]/div[1]/div[1]/input[1]')\
.send_keys('地面工程1') # 地面工程名称
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[1]/div[1]/div[2]/select/option[2]')\
.click() # 单位:平方米
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[1]/div[1]/div[3]/input')\
.send_keys('10') # 预算数量
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[1]/div[2]/div[1]/div/div[1]/input')\
# .send_keys('10') # 材料报价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[1]/div[2]/div[1]/div/div[2]/input')\
.send_keys('10') # 材料报价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[1]/div[2]/div[2]/div/div[1]/input')\
# .send_keys('10') # 人工报价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[1]/div[2]/div[2]/div/div[2]/input')\
.send_keys('10') # 人工报价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[1]/div[3]/div[1]/div/div[1]/input')\
# .send_keys('9') # 材料成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[1]/div[3]/div[1]/div/div[2]/input')\
.send_keys('9') # 材料成本实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[1]/div[3]/div[2]/div/div[1]/input')\
# .send_keys('9') # 人工成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[1]/div[3]/div[2]/div/div[2]/input')\
.send_keys('9') # 人工成本实际
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[1]/div[4]/div[1]/input')\
.send_keys('这是地面1备注信息') # 备注
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[1]/div[1]/input[1]')\
.send_keys('地面工程2') # 地面工程名称
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[1]/div[2]/select/option[3]')\
.click() # 单位:m
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[1]/div[3]/input')\
.send_keys('10') # 预算数量
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[1]/div/div[1]/input')\
# .send_keys('10') # 材料报价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[1]/div/div[2]/input')\
.send_keys('10') # 材料报价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div/div[1]/input')\
# .send_keys('10') # 人工报价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div/div[2]/input')\
.send_keys('10') # 人工报价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[3]/div[1]/div/div[1]/input')\
# .send_keys('9') # 材料成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[3]/div[1]/div/div[2]/input')\
.send_keys('9') # 材料成本实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[3]/div[2]/div/div[1]/input')\
# .send_keys('9') # 人工成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[3]/div[2]/div/div[2]/input')\
.send_keys('9') # 人工成本实际
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[4]/div[1]/input')\
.send_keys('这是地面2备注信息') # 备注
# 收起地面模块
def packup_flooring(self):
logger.info('收起地面工程')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[2]/div[1]/div[1]/span').click()
# 点击墙面工程
def metope_engineering(self):
logger.info('点击墙面工程')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[1]/div[3]/div[1]/div[3]/div[3]/span').click()
# 点击墙面工程新增
def metope_engineering_new(self):
for i in range(2):
logger.info('点击墙面工程新增')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[3]/div[1]/div[3]/a').click()
sleep(1)
# 完善墙面工程信息
def complete_metope_engineering(self):
logger.info('完善墙面工程')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[3]/div[2]/div[2]/div[1]/div[1]/div[1]/input[1]')\
.send_keys('墙面工程1') # 墙面工程名称
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[3]/div[2]/div[2]/div[1]/div[1]/div[2]/select/option[2]') \
.click() # 单位:平方米
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[3]/div[2]/div[2]/div[1]/div[1]/div[3]/input') \
.send_keys('10') # 预算数量
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[3]/div[2]/div[2]/div[1]/div[2]/div[1]/div/div[1]/input') \
# .send_keys('10') # 单价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[3]/div[2]/div[2]/div[1]/div[2]/div[1]/div/div[2]/input') \
.send_keys('10') # 单价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[3]/div[2]/div[2]/div[1]/div[3]/div[1]/div/div[1]/input') \
# .send_keys('9') # 成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[3]/div[2]/div[2]/div[1]/div[3]/div[1]/div/div[2]/input') \
.send_keys('9') # 成本实际
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[3]/div[2]/div[2]/div[1]/div[4]/div[1]/input') \
.send_keys('这是墙面1备注信息') # 备注
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[3]/div[2]/div[2]/div[2]/div[1]/div[1]/input[1]')\
.send_keys('墙面工程2') # 墙面工程名称
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[3]/div[2]/div[2]/div[2]/div[1]/div[2]/select/option[2]') \
.click() # 单位:平方米
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[3]/div[2]/div[2]/div[2]/div[1]/div[3]/input') \
.send_keys('10') # 预算数量
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[3]/div[2]/div[2]/div[2]/div[2]/div[1]/div/div[1]/input') \
# .send_keys('10') # 单价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[3]/div[2]/div[2]/div[2]/div[2]/div[1]/div/div[2]/input') \
.send_keys('10') # 单价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[3]/div[2]/div[2]/div[2]/div[3]/div[1]/div/div[1]/input') \
# .send_keys('10') # 成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[3]/div[2]/div[2]/div[2]/div[3]/div[1]/div/div[2]/input') \
.send_keys('10') # 成本实际
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[3]/div[2]/div[2]/div[2]/div[4]/div[1]/input') \
.send_keys('这是墙面2备注信息') # 备注
# 收起墙面模块
def packup_metope(self):
logger.info('收起墙面工程')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[3]/div[1]/div[1]/span').click()
# 点击水电工程
def waterpower_engineering(self):
logger.info('点击水电工程')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[1]/div[3]/div[1]/div[3]/div[4]/span').click()
# 点击水电工程新增
def waterpower_engineering_new(self):
for i in range(2):
logger.info('点击水电工程新增')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[1]/div[3]/a').click()
sleep(1)
# 完善水电工程信息
def complete_waterpower_engineering(self):
logger.info('完善地面工程')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[2]/div[2]/div[1]/div[1]/div[1]/input[1]')\
.send_keys('水电工程1') # 水电工程名称
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[2]/div[2]/div[1]/div[1]/div[2]/select/option[2]')\
.click() # 单位:平方米
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[2]/div[2]/div[1]/div[1]/div[3]/input')\
.send_keys('10') # 预算数量
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[2]/div[2]/div[1]/div[2]/div[1]/div/div[1]/input')\
# .send_keys('10') # 材料报价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[2]/div[2]/div[1]/div[2]/div[1]/div/div[2]/input')\
.send_keys('10') # 材料报价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[2]/div[2]/div[1]/div[2]/div[2]/div/div[1]/input')\
# .send_keys('10') # 人工报价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[2]/div[2]/div[1]/div[2]/div[2]/div/div[2]/input')\
.send_keys('10') # 人工报价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[2]/div[2]/div[1]/div[3]/div[1]/div/div[1]/input')\
# .send_keys('9') # 材料成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[2]/div[2]/div[1]/div[3]/div[1]/div/div[2]/input')\
.send_keys('9') # 材料成本实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[2]/div[2]/div[1]/div[3]/div[2]/div/div[1]/input')\
# .send_keys('9') # 人工成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[2]/div[2]/div[1]/div[3]/div[2]/div/div[2]/input')\
.send_keys('9') # 人工成本实际
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[2]/div[2]/div[1]/div[4]/div[1]/input')\
.send_keys('这是水电1备注信息') # 备注
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[2]/div[2]/div[2]/div[1]/div[1]/input[1]')\
.send_keys('水电工程2') # 水电工程名称
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[2]/div[2]/div[2]/div[1]/div[2]/select/option[2]')\
.click() # 单位:m
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[2]/div[2]/div[2]/div[1]/div[3]/input')\
.send_keys('10') # 预算数量
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[2]/div[2]/div[2]/div[2]/div[1]/div/div[1]/input')\
# .send_keys('10') # 材料报价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[2]/div[2]/div[2]/div[2]/div[1]/div/div[2]/input')\
.send_keys('10') # 材料报价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[2]/div[2]/div[2]/div[2]/div[2]/div/div[1]/input')\
# .send_keys('10') # 人工报价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[2]/div[2]/div[2]/div[2]/div[2]/div/div[2]/input')\
.send_keys('10') # 人工报价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[2]/div[2]/div[2]/div[3]/div[1]/div/div[1]/input')\
# .send_keys('9') # 材料成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[2]/div[2]/div[2]/div[3]/div[1]/div/div[2]/input')\
.send_keys('9') # 材料成本实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[2]/div[2]/div[2]/div[3]/div[2]/div/div[1]/input')\
# .send_keys('9') # 人工成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[2]/div[2]/div[2]/div[3]/div[2]/div/div[2]/input')\
.send_keys('9') # 人工成本实际
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[2]/div[2]/div[2]/div[4]/div[1]/input')\
.send_keys('这是水电2备注信息') # 备注
# 收起水电模块
def packup_waterpower(self):
logger.info('收起水电工程')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[4]/div[1]/div[1]/span').click()
# 点击店招工程
def signage_engineering(self):
logger.info('点击店招工程')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[1]/div[3]/div[1]/div[3]/div[5]/span').click()
# 点击店招工程新增
def signage_engineering_new(self):
for i in range(2):
logger.info('点击店招工程新增')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[1]/div[3]/a').click()
sleep(1)
# 完善店招工程信息
def complete_signage_engineering(self):
logger.info('完善店招工程')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[2]/div[2]/div[1]/div[1]/div[1]/input[1]')\
.send_keys('店招工程1') # 店招工程名称
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[2]/div[2]/div[1]/div[1]/div[2]/select/option[2]')\
.click() # 单位:平方米
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[2]/div[2]/div[1]/div[1]/div[3]/input')\
.send_keys('10') # 预算数量
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[2]/div[2]/div[1]/div[2]/div[1]/div/div[1]/input')\
# .send_keys('10') # 材料报价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[2]/div[2]/div[1]/div[2]/div[1]/div/div[2]/input')\
.send_keys('10') # 材料报价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[2]/div[2]/div[1]/div[2]/div[2]/div/div[1]/input')\
# .send_keys('10') # 人工报价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[2]/div[2]/div[1]/div[2]/div[2]/div/div[2]/input')\
.send_keys('10') # 人工报价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[2]/div[2]/div[1]/div[3]/div[1]/div/div[1]/input')\
# .send_keys('9') # 材料成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[2]/div[2]/div[1]/div[3]/div[1]/div/div[2]/input')\
.send_keys('9') # 材料成本实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[2]/div[2]/div[1]/div[3]/div[2]/div/div[1]/input')\
# .send_keys('9') # 人工成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[2]/div[2]/div[1]/div[3]/div[2]/div/div[2]/input')\
.send_keys('9') # 人工成本实际
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[2]/div[2]/div[1]/div[4]/div[1]/input')\
.send_keys('这是店招1备注信息') # 备注
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[2]/div[2]/div[2]/div[1]/div[1]/input[1]')\
.send_keys('店招工程2') # 店招工程名称
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[2]/div[2]/div[2]/div[1]/div[2]/select/option[2]')\
.click() # 单位:m
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[2]/div[2]/div[2]/div[1]/div[3]/input')\
.send_keys('10') # 预算数量
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[2]/div[2]/div[2]/div[2]/div[1]/div/div[1]/input')\
# .send_keys('10') # 材料报价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[2]/div[2]/div[2]/div[2]/div[1]/div/div[2]/input')\
.send_keys('10') # 材料报价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[2]/div[2]/div[2]/div[2]/div[2]/div/div[1]/input')\
# .send_keys('10') # 人工报价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[2]/div[2]/div[2]/div[2]/div[2]/div/div[2]/input')\
.send_keys('10') # 人工报价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[2]/div[2]/div[2]/div[3]/div[1]/div/div[1]/input')\
# .send_keys('9') # 材料成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[2]/div[2]/div[2]/div[3]/div[1]/div/div[2]/input')\
.send_keys('9') # 材料成本实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[2]/div[2]/div[2]/div[3]/div[2]/div/div[1]/input')\
# .send_keys('9') # 人工成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[2]/div[2]/div[2]/div[3]/div[2]/div/div[2]/input')\
.send_keys('9') # 人工成本实际
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[2]/div[2]/div[2]/div[4]/div[1]/input')\
.send_keys('这是店招2备注信息') # 备注
# 收起店招模块
def packup_signage(self):
logger.info('完善店招工程')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[5]/div[1]/div[1]/span').click()
# 点击拆除建渣工程
def demolition_engineering(self):
logger.info('点击拆除建渣工程')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[1]/div[3]/div[1]/div[3]/div[6]/span').click()
# 点击拆除建渣工程新增
def demolition_engineering_new(self):
for i in range(2):
logger.info('点击拆除建渣工程新增')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[6]/div[1]/div[3]/a').click()
sleep(1)
# 完善拆除建渣工程资料
def complete_demolition_engineering(self):
logger.info('完善拆除建渣工程')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[6]/div[2]/div[2]/div[1]/div[1]/div[1]/input[1]')\
.send_keys('拆除建渣工程1') # 拆除建渣工程名称
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[6]/div[2]/div[2]/div[1]/div[1]/div[2]/select/option[2]') \
.click() # 单位:平方米
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[6]/div[2]/div[2]/div[1]/div[1]/div[3]/input') \
.send_keys('10') # 预算数量
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[6]/div[2]/div[2]/div[1]/div[2]/div[1]/div/div[1]/input') \
# .send_keys('10') # 单价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[6]/div[2]/div[2]/div[1]/div[2]/div[1]/div/div[2]/input') \
.send_keys('10') # 单价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[6]/div[2]/div[2]/div[1]/div[3]/div[1]/div/div[1]/input') \
# .send_keys('9') # 成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[6]/div[2]/div[2]/div[1]/div[3]/div[1]/div/div[2]/input') \
.send_keys('9') # 成本实际
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[6]/div[2]/div[2]/div[1]/div[4]/div[1]/input') \
.send_keys('这是拆除建渣工程1备注信息') # 备注
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[6]/div[2]/div[2]/div[2]/div[1]/div[1]/input[1]')\
.send_keys('拆除建渣工程2') # 拆除建渣工程名称
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[6]/div[2]/div[2]/div[2]/div[1]/div[2]/select/option[2]') \
.click() # 单位:平方米
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[6]/div[2]/div[2]/div[2]/div[1]/div[3]/input') \
.send_keys('10') # 预算数量
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[6]/div[2]/div[2]/div[2]/div[2]/div[1]/div/div[1]/input') \
# .send_keys('10') # 单价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[6]/div[2]/div[2]/div[2]/div[2]/div[1]/div/div[2]/input') \
.send_keys('10') # 单价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[6]/div[2]/div[2]/div[2]/div[3]/div[1]/div/div[1]/input') \
# .send_keys('9') # 成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[6]/div[2]/div[2]/div[2]/div[3]/div[1]/div/div[2]/input') \
.send_keys('9') # 成本实际
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[6]/div[2]/div[2]/div[2]/div[4]/div[1]/input') \
.send_keys('这是拆除建渣2备注信息') # 备注
# 收起拆除建渣
def packup_demolition(self):
logger.info('收起拆除建渣工程')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[6]/div[1]/div[1]/span').click()
# 其它工程
def other_projects(self):
logger.info('点击其它工程')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[1]/div[3]/div[1]/div[3]/div[7]/span').click()
# 其他工程新增
def other_projects_new(self):
for i in range(2):
logger.info('点击其它工程新增')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[7]/div[1]/div[3]/a').click()
sleep(1)
# 完善其它工程资料
def complete_other_projects(self):
logger.info('完善其它工程')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[7]/div[2]/div[2]/div[1]/div[1]/div[1]/input[1]')\
.send_keys('其它工程1') # 其它工程名称
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[7]/div[2]/div[2]/div[1]/div[1]/div[2]/select/option[2]') \
.click() # 单位:平方米
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[7]/div[2]/div[2]/div[1]/div[1]/div[3]/input') \
.send_keys('10') # 预算数量
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[7]/div[2]/div[2]/div[1]/div[2]/div[1]/div/div[1]/input') \
# .send_keys('10') # 单价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[7]/div[2]/div[2]/div[1]/div[2]/div[1]/div/div[2]/input') \
.send_keys('10') # 单价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[7]/div[2]/div[2]/div[1]/div[3]/div[1]/div/div[1]/input') \
# .send_keys('9') # 成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[7]/div[2]/div[2]/div[1]/div[3]/div[1]/div/div[2]/input') \
.send_keys('9') # 成本实际
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[7]/div[2]/div[2]/div[1]/div[4]/div[1]/input') \
.send_keys('这是其它工程1备注信息') # 备注
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[7]/div[2]/div[2]/div[2]/div[1]/div[1]/input[1]')\
.send_keys('其它工程2') # 其它工程名称
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[7]/div[2]/div[2]/div[2]/div[1]/div[2]/select/option[2]') \
.click() # 单位:平方米
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[7]/div[2]/div[2]/div[2]/div[1]/div[3]/input') \
.send_keys('10') # 预算数量
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[7]/div[2]/div[2]/div[2]/div[2]/div[1]/div/div[1]/input') \
# .send_keys('10') # 单价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[7]/div[2]/div[2]/div[2]/div[2]/div[1]/div/div[2]/input') \
.send_keys('10') # 单价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[7]/div[2]/div[2]/div[2]/div[3]/div[1]/div/div[1]/input') \
# .send_keys('9') # 成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[7]/div[2]/div[2]/div[2]/div[3]/div[1]/div/div[2]/input') \
.send_keys('9') # 成本实际
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[7]/div[2]/div[2]/div[2]/div[4]/div[1]/input') \
.send_keys('这是其它工程2备注信息') # 备注
# 收起其它工程模块
def packup_live_other(self):
logger.info('完善其它工程')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[2]/div[7]/div[1]/div[1]/span').click()
# 收起现场装修模块
def packup_live(self):
logger.info('收起现场装修模块')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[2]/div[1]/div[1]/span').click()
# 点击商业展柜部分
def showcase_part(self):
logger.info('点击商业展柜部分')
self.click_button(self.showcase_part_loc)
# 点击商业展柜部分新增
def showcase_part_new(self):
logger.info('点击商业展柜部分新增')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[1]/div[3]/div[1]/div[2]/a').click()
sleep(1)
# 点击展柜制作
def under_production(self):
logger.info('点击商业展柜制作')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[1]/div[3]/div[1]/div[3]/div[1]/span').click()
# 点击展柜制作新增
def under_production_new(self):
for i in range(2):
logger.info('点击商业展柜制作新增')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div/div[1]/div[3]/a').click()
sleep(1)
# 完善展柜制作资料
def complete_under_production(self):
logger.info('完善商业展柜')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div/div[2]/div[2]/div[1]/div[1]/div[1]/input[1]')\
.send_keys('展柜制作1') # 展柜制作名称
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div/div[2]/div[2]/div[1]/div[1]/div[2]/select/option[2]') \
.click() # 单位:平方米
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div/div[2]/div[2]/div[1]/div[1]/div[3]/input') \
.send_keys('10') # 预算数量
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div/div[2]/div[2]/div[1]/div[2]/div[1]/div/div[1]/input') \
# .send_keys('10') # 单价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div/div[2]/div[2]/div[1]/div[2]/div[1]/div/div[2]/input') \
.send_keys('10') # 单价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div/div[2]/div[2]/div[1]/div[3]/div[1]/div/div[1]/input') \
# .send_keys('9') # 成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div/div[2]/div[2]/div[1]/div[3]/div[1]/div/div[2]/input') \
.send_keys('9') # 成本实际
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div/div[2]/div[2]/div[1]/div[4]/div[1]/input') \
.send_keys('这是展柜制作1备注信息') # 备注
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div/div[2]/div[2]/div[2]/div[1]/div[1]/input[1]')\
.send_keys('展柜制作2') # 展柜制作名称
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div/div[2]/div[2]/div[2]/div[1]/div[2]/select/option[2]') \
.click() # 单位:平方米
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div/div[2]/div[2]/div[2]/div[1]/div[3]/input') \
.send_keys('10') # 预算数量
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div/div[2]/div[2]/div[2]/div[2]/div[1]/div/div[1]/input') \
# .send_keys('10') # 单价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div/div[2]/div[2]/div[2]/div[2]/div[1]/div/div[2]/input') \
.send_keys('10') # 单价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div/div[2]/div[2]/div[2]/div[3]/div[1]/div/div[1]/input') \
# .send_keys('9') # 成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div/div[2]/div[2]/div[2]/div[3]/div[1]/div/div[2]/input') \
.send_keys('9') # 成本实际
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div/div[2]/div[2]/div[2]/div[4]/div[1]/input') \
.send_keys('这是展柜制作2备注信息') # 备注
# 收起展柜制作模块
def packup_under_production(self):
logger.info('收起商业展柜')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[1]/div[1]/div[1]/span').click()
# 点击美工制作
def artists_create(self):
logger.info('点击美工制作')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[1]/div[3]/div[1]/div[3]/div[2]/span').click()
# 点击美工制作新增
def artists_create_new(self):
for i in range(2):
logger.info('点击美工制作新增')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[2]/div[1]/div[3]/a').click()
sleep(1)
# 完善美工制作资料
def complete_artists_create(self):
logger.info('完善美工制作')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[2]/div[2]/div[2]/div[1]/div[1]/div[1]/input[1]')\
.send_keys('美工制作1') # 美工制作名称
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[2]/div[2]/div[2]/div[1]/div[1]/div[2]/select/option[2]') \
.click() # 单位:平方米
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[2]/div[2]/div[2]/div[1]/div[1]/div[3]/input') \
.send_keys('10') # 预算数量
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[2]/div[2]/div[2]/div[1]/div[2]/div[1]/div/div[1]/input') \
# .send_keys('10') # 单价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[2]/div[2]/div[2]/div[1]/div[2]/div[1]/div/div[2]/input') \
.send_keys('10') # 单价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[2]/div[2]/div[2]/div[1]/div[3]/div[1]/div/div[1]/input') \
# .send_keys('9') # 成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[2]/div[2]/div[2]/div[1]/div[3]/div[1]/div/div[2]/input') \
.send_keys('9') # 成本实际
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[2]/div[2]/div[2]/div[1]/div[4]/div[1]/input') \
.send_keys('这是美工制作1备注信息') # 备注
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[2]/div[2]/div[2]/div[2]/div[1]/div[1]/input[1]')\
.send_keys('美工制作2') # 美工制作名称
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[2]/div[2]/div[2]/div[2]/div[1]/div[2]/select/option[2]') \
.click() # 单位:平方米
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[2]/div[2]/div[2]/div[2]/div[1]/div[3]/input') \
.send_keys('10') # 预算数量
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[1]/div/div[1]/input') \
# .send_keys('10') # 单价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[2]/div[2]/div[2]/div[2]/div[2]/div[1]/div/div[2]/input') \
.send_keys('10') # 单价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[2]/div[2]/div[2]/div[2]/div[3]/div[1]/div/div[1]/input') \
# .send_keys('9') # 成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[2]/div[2]/div[2]/div[2]/div[3]/div[1]/div/div[2]/input') \
.send_keys('9') # 成本实际
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[2]/div[2]/div[2]/div[2]/div[4]/div[1]/input') \
.send_keys('这是美工制作2备注信息') # 备注
# 收起美工制作模块
def packup_artists(self):
logger.info('收起美工制作')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[2]/div[1]/div[1]/span').click()
# 点击商业展柜其它工程
def showcase_other(self):
logger.info('点击其它工程')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[1]/div[3]/div[1]/div[3]/div[3]/span').click()
# 点击商业展柜其它新增
def showcase_other_new(self):
for i in range(2):
logger.info('点击其它工程新增')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[3]/div[1]/div[3]/a').click()
sleep(1)
# 完善商业展柜其它资料
def complete_showcase_other(self):
logger.info('完善其它工程')
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[3]/div[2]/div[2]/div[1]/div[1]/div[1]/input[1]')\
.send_keys('商业展柜其它1') # 商业展柜其它名称
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[3]/div[2]/div[2]/div[1]/div[1]/div[2]/select/option[2]') \
.click() # 单位:平方米
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[3]/div[2]/div[2]/div[1]/div[1]/div[3]/input') \
.send_keys('10') # 预算数量
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[3]/div[2]/div[2]/div[1]/div[2]/div[1]/div/div[1]/input') \
# .send_keys('10') # 单价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[3]/div[2]/div[2]/div[1]/div[2]/div[1]/div/div[2]/input') \
.send_keys('10') # 单价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[3]/div[2]/div[2]/div[1]/div[3]/div[1]/div/div[1]/input') \
# .send_keys('9') # 成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[3]/div[2]/div[2]/div[1]/div[3]/div[1]/div/div[2]/input') \
.send_keys('9') # 成本实际
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[3]/div[2]/div[2]/div[1]/div[4]/div[1]/input') \
.send_keys('这是商业展柜其它1备注信息') # 备注
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[3]/div[2]/div[2]/div[2]/div[1]/div[1]/input[1]')\
.send_keys('商业展柜其它2') # 商业展柜其它名称
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[3]/div[2]/div[2]/div[2]/div[1]/div[2]/select/option[2]') \
.click() # 单位:平方米
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[3]/div[2]/div[2]/div[2]/div[1]/div[3]/input') \
.send_keys('10') # 预算数量
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[3]/div[2]/div[2]/div[2]/div[2]/div[1]/div/div[1]/input') \
# .send_keys('10') # 单价标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[3]/div[2]/div[2]/div[2]/div[2]/div[1]/div/div[2]/input') \
.send_keys('10') # 单价实际
# self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[3]/div[2]/div[2]/div[2]/div[3]/div[1]/div/div[1]/input') \
# .send_keys('9') # 成本标准
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[3]/div[2]/div[2]/div[2]/div[3]/div[1]/div/div[2]/input') \
.send_keys('9') # 成本实际
self.find_element(By.XPATH, '//*[@id="project"]/div[2]/div[3]/div[2]/div[3]/div[2]/div[2]/div[2]/div[4]/div[1]/input') \
.send_keys('这是商业展柜其它2备注信息') # 备注
# 全部收起、展开、滑动查看
def all_open_close(self):
sleep(2)
self.find_element(By.XPATH, '/html/body/div[1]/div[1]/div[2]/div[4]/div[1]/div[1]/span').click()
sleep(2)
js = "window.scrollTo(0,document.body.scrollHeight)"
self.driver.execute_script(js)
sleep(2)
js = "window.scrollTo(0,0)"
self.driver.execute_script(js)
sleep(2)
self.find_element(By.XPATH, '/html/body/div[1]/div[1]/div[2]/div[4]/div[1]/div[2]/span').click()
# 添加品牌
def add_brand(self):
self.find_element(By.XPATH, '/html/body/div[1]/div[1]/div[2]/div[4]/div[1]/div[3]/span').click() # 添加品牌
self.find_element(By.ID, 'brand_auto').send_keys('周大生')
sleep(1)
self.find_element(By.XPATH, '//*[@id="brand_list"]/div/div[1]').click()
self.find_element(By.XPATH, '/html/body/div[1]/div[2]/div[2]/div[1]/div[3]/div[1]/div[2]/a').click() # 品牌新增
self.find_element(By.XPATH, '/html/body/div[1]/div[2]/div[2]/div[1]/div[3]/div[1]/div[3]/div[1]/span').click() # 新增设计部分
self.find_element(By.XPATH, '/html/body/div[1]/div[2]/div[2]/div[2]/div/div[1]/div[3]/div[1]/div[2]/a').click() # 设计部分新增
self.find_element(By.XPATH, '/html/body/div[1]/div[2]/div[2]/div[2]/div/div[1]/div[3]/div[1]/div[3]/div/span').click() # 点击设计
self.find_element(By.XPATH, '/html/body/div[1]/div[2]/div[2]/div[2]/div/div[2]/div/div[1]/div[3]/a').click()
self.find_element(By.XPATH, '/html/body/div[1]/div[2]/div[2]/div[2]/div/div[2]/div/div[2]/div[2]/div/div[1]/input[1]')\
.send_keys('品牌设计') # 品牌设计名称
self.find_element(By.XPATH, '/html/body/div[1]/div[2]/div[2]/div[2]/div/div[2]/div/div[2]/div[2]/div/div[2]/input')\
.send_keys('10') # 预算数量
self.find_element(By.XPATH, '/html/body/div[1]/div[2]/div[2]/div[2]/div/div[2]/div/div[2]/div[2]/div/div[3]/input')\
.send_keys('10') # 报价
# self.find_element(By.XPATH, '/html/body/div[1]/div[2]/div[2]/div[2]/div/div[2]/div/div[2]/div[2]/div/div[4]/div[1]/input')\
# .send_keys('9') # 含税价
# 保存
def save(self):
self.find_element(*self.save_loc).click()
self.alert_accept()
# 提交审批
def submit(self, text):
self.find_element(*self.submit_loc).click()
Select(self.find_element(*self.assessor_loc)).select_by_visible_text(text)
self.find_element(*self.assessor_submit_loc).click()
def test_new_project_base(driver, pid):
new_page = NewProject(driver)
# 设计
new_page.all_button()
new_page.new_button(pid)
new_page.popup()
new_page.submit_button()
new_page.offer_area()
new_page.regular_new()
new_page.design_part()
new_page.design_part_new()
new_page.design_button()
new_page.design_new()
new_page.complete_design_information()
new_page.packup_design()
new_page.regular_new()
new_page.live_decoration()
# 天棚
new_page.live_decoration_new()
new_page.ceiling_engineering()
new_page.ceiling_engineering_new()
new_page.complete_ceiling_engineering()
new_page.packup_ceiling()
# 地面
new_page.live_decoration_new()
new_page.flooring_work()
new_page.flooring_work_new()
new_page.complete_flooring_work()
new_page.packup_flooring()
# 墙面
new_page.live_decoration_new()
new_page.metope_engineering()
new_page.metope_engineering_new()
new_page.complete_metope_engineering()
new_page.packup_metope()
# 水电
new_page.live_decoration_new()
new_page.waterpower_engineering()
new_page.waterpower_engineering_new()
new_page.complete_waterpower_engineering()
new_page.packup_waterpower()
# 店招
new_page.live_decoration_new()
new_page.signage_engineering()
new_page.signage_engineering_new()
new_page.complete_signage_engineering()
new_page.packup_signage()
# 拆除建渣
new_page.live_decoration_new()
new_page.demolition_engineering()
new_page.demolition_engineering_new()
new_page.complete_demolition_engineering()
new_page.packup_demolition()
# 其它
new_page.live_decoration_new()
new_page.other_projects()
new_page.other_projects_new()
new_page.complete_other_projects()
new_page.packup_live_other()
new_page.packup_live()
new_page.regular_new()
new_page.showcase_part()
# 展柜制作
new_page.showcase_part_new()
new_page.under_production()
new_page.under_production_new()
new_page.complete_under_production()
new_page.packup_under_production()
# 美工制作
new_page.showcase_part_new()
new_page.artists_create()
new_page.artists_create_new()
new_page.complete_artists_create()
new_page.packup_artists()
# 其它
new_page.showcase_part_new()
new_page.showcase_other()
new_page.showcase_other_new()
new_page.complete_showcase_other()
new_page.all_open_close()
new_page.add_brand()
# 保存
def test_new_project_save(driver, pid):
new_page = NewProject(driver)
test_new_project_base(driver, pid)
new_page.save()
# 提交审核
def test_new_project_submit(driver, pid, text='黑桃K'):
new_page = NewProject(driver)
test_new_project_base(driver, pid)
new_page.submit(text)
|
from django.urls import path, re_path, include
from . import views
urlpatterns = [
path('', views.dashboard, name='dashboard'),
#path('authmail/', views.auth_mail, name='authmail'),
path('settings/', include('apps.users.settingsurls')),
path('signup/', views.userSignup, name='usersignup'),
path('fblogin/', views.fbUserLogin, name='fblogin'),
path('login/', views.userLogin, name='userlogin'),
path('logout/', views.userLogout, name='userlogout'),
path('registercontest/', views.registerContest, name='registercontest'),
re_path('u/(?P<username>[a-z0-9_]{0,})/$', views.userProfile, name='userprofile'),
]
|
'''
This script is intended to iterate over a range of errors in natural frequency to determine the
residual vibration amplitude of a boom crane subject to luff
Created by: Daniel Newman
Date: 01-13-2017
'''
from timeit import default_timer as timer
import numpy as np
from matplotlib import pyplot as plt
import os
from scipy.optimize import minimize
import sys
sys.path.append('/Users/Daniel/Github/Crawlab-Student-Code/Daniel Newman/Python Modules')
import InputShaping as shaping
import Boom_Crane as BC
import Generate_Plots as genplt
start = timer()
# Use lab plot style
plt.style.use('Crawlab')
# define constants
DEG_TO_RAD = np.pi / 180
G = 9.81 # m / s**2
vtol = 1. * DEG_TO_RAD #radians
# t vector characteristics
tmax = 30
t_step = .01
t = np.arange(0, tmax, t_step)
#Startt = np.arange(0,2,.01)
Startt = np.array([0.])
Startt_step = np.round(Startt[0]/t_step).astype(int)
# Geometry of the system
mass = 10 # Mass of the payload in kg
r = 15 # length of the boom in meters
# Initial angular conditions
gamma_init = 40. # Luff angle
gamma_dot_init = 0.
phi_init = -5. #-0.0859 / DEG_TO_RAD # Radial swing angle
phi_dot_init = 0. #-0.0147 / DEG_TO_RAD
l_init = 10
l_dot_init = 0.
# Actuator constraints
Amax = 83.33 # deg / s^2
Vmax = 6.67 # deg / s
X0 = np.array([phi_init,phi_dot_init,gamma_init, gamma_dot_init, 0,0,False])
null_guess = np.array([0.,0.,0.,0.])
X0 *= DEG_TO_RAD
X0[-3] = l_init
X0[-2] = l_dot_init
# C = array of actuator constraints
C = np.array([Amax, Vmax])
C *= DEG_TO_RAD
# Desired final angles
gamma_fin = 70.
theta_fin = 0.
Distance = np.array([gamma_fin - gamma_init])
Distance *= DEG_TO_RAD
# Xf = array of final desired values
Xf = np.array([gamma_fin])
Xf *= DEG_TO_RAD
# Initial height of the payload above the luffing axis
h_init = r*np.sin(X0[2]) - l_init
# The final height of the payload is an input to the system
#h_fin = h_init
h_fin = r * np.sin(Xf[0]) - l_init
# L = array of initial and final cable lengths and heights
L = [l_init, h_init, h_fin]
# Check for height constraints
if h_init <= 0:
print('Caution: initial height of the payload is at or below \
the luffing axis by {} meters.'.format(abs(h_init)))
if h_fin >= r:
print('Error: final desired height greater than boom length.')
exit()
disturbance = [0,0]
normalized_error = np.arange(0.4,1.6,0.1)
UMZV_Amp = np.zeros_like(normalized_error)
ZV_IC_Amp = np.zeros_like(normalized_error)
ZV_Amp = np.zeros_like(normalized_error)
Unshaped_Amp = np.zeros_like(normalized_error)
p = [C, l_init, r, Startt, gamma_init * DEG_TO_RAD,gamma_init * DEG_TO_RAD,t_step,'Asymmetric']
umzvd_test_response = BC.response(null_guess,X0, t, p,Distance)
umzvd_test_amp = BC.response(null_guess,X0, t[Startt_step:len(t)], p,
Distance,False)[0,0]
print(umzvd_test_amp)
p = [C, l_init, r, Startt, gamma_init * DEG_TO_RAD,gamma_init * DEG_TO_RAD,t_step,'Asymmetric']
zv_ic_phase = BC.optimize_ZV(X0,t,p,Distance)
zv_ic_response = BC.response(zv_ic_phase,X0, t, p,Distance)
zv_dup_response = BC.response(null_guess,X0, t, p,Distance,omega_n_error=1.)
for i in np.arange(0,len(normalized_error)):
ZV_IC_Amp[i] = BC.response(zv_ic_phase,X0, t[Startt_step:len(t)], p,
Distance,False,omega_n_error=normalized_error[i])[0,0]
X0[-3] = l_init
p = [C, l_init, r, Startt, gamma_init * DEG_TO_RAD,gamma_init * DEG_TO_RAD,t_step,'UMZVIC']
umzv_phase = BC.optimize_UMZV(X0,t,p,Distance)
for i in np.arange(0,len(normalized_error)):
UMZV_Amp[i] = BC.response(umzv_phase,X0, t[Startt_step:len(t)], p,
Distance,False,omega_n_error=normalized_error[i])[0,0]
X0[-3] = l_init
umzv_response = BC.response(umzv_phase,X0, t, p,Distance)
umzv_dup_response = BC.response(null_guess,X0, t, p,Distance,omega_n_error=1.)
# Pack relevant variables
p = [C, l_init, r, Startt, gamma_init * DEG_TO_RAD,gamma_init * DEG_TO_RAD,t_step,'Unshaped']
unshaped_response = BC.response(null_guess,X0, t, p,Distance,level_luffing=False)
for i in np.arange(0,len(normalized_error)):
Unshaped_Amp[i] = BC.response(null_guess,X0, t, p,
Distance,False,omega_n_error=normalized_error[i])[0,0]
X0[-3] = l_init
# Pack relevant variables
p = [C, l_init, r, Startt, gamma_init * DEG_TO_RAD,gamma_fin * DEG_TO_RAD,t_step,'ZV Shaped']
# Call Boom crane response based on given values
zv_response = BC.response(null_guess,X0, t, p,Distance,level_luffing=False)
zv_dup_response = BC.response(null_guess,X0, t, p,Distance,omega_n_error=1.)
for i in np.arange(0,len(normalized_error)):
ZV_Amp[i] = BC.response(null_guess,X0, t, p,
Distance,False,omega_n_error=normalized_error[i])[0,0]
UMZV_IC_Amp = np.divide(ZV_IC_Amp,Unshaped_Amp)*100
UMZV_Amp = np.divide(UMZV_Amp,Unshaped_Amp)*100
ZV_Amp = np.divide(ZV_Amp,Unshaped_Amp)*100
folder = 'Figures/Normalized Error/Crane/Luff_{}_{}'.format(gamma_init,gamma_fin)
genplt.compare_responses(t,zv_response[:,0],'ZV',umzv_response[:,0],'UMZV-IC',unshaped_response[:,0],'Unshaped',zv_ic_response[:,0],'ZV-IC',xlabel='Time (sec)',ylabel='Swing Angle (rad)',title='Swing Angle',folder=folder)
genplt.compare_responses(t,zv_dup_response[:,0],'ZV',umzv_dup_response[:,0],'UMZV-IC',unshaped_response[:,0],'Unshaped',zv_dup_response[:,0],'UMZVD-IC',xlabel='Time (sec)',ylabel='Swing Angle (rad)',title='Swing Angle Test',folder=folder)
genplt.compare_responses(normalized_error,UMZV_Amp,'UMZV',ZV_Amp,'ZV',title='Percent Vibration Vs. Normalized Error',xlabel=r'Normalized Error $(\frac{\omega}{\omega_m})$',ylabel='Percent Residual Vibration',folder=folder)
genplt.compare_responses(normalized_error,ZV_IC_Amp,'ZV-IC',UMZV_Amp,'UMZV',ZV_Amp,'ZV',title='Percent Vibration Vs. Normalized Error',xlabel=r'Normalized Error $(\frac{\omega}{\omega_m})$',ylabel='Percent Residual Vibration',folder=folder)
|
# 用于跟python解释器进行交互
import sys
# print(sys.argv)
# 执行:python sys模块.py post download
# 结果:['sys模块.py', 'post', 'download']
def post():
print('post')
def download():
print('download')
# if sys.argv[1] =='post':
# post()
# elif sys.argv[1] == 'download':
# download()
# sys.exit(0) # 0表示正常退出
import time
print(sys.path)
print(sys.platform) # 返回操作系统平台名称
val = sys.stdin.readline() # 输入
sys.stdout.write(val) # 输出
|
"""
The following function verifies if, given a 'board' setup, the cell (i, j) is safe - can take a queen which would not be attacked.
We should verify:
- There exists an integer c for which board[i][c] = 1 ;
- There exists an integer l for which board[l][j] = 1 ;
- There exists an integer k for which board[l +- k][j +- k] = 1 ;
If any of these conditions are verified, then the cell (i, j) is not safe.
@params: board is a 2D matrix representing the chess board, cells with
"""
def isSafe(board, i, j):
if (len(board) < 3): return False
safe: bool = True
size = len(board)
# There exists an integer c for which board[i][c] = 1
for c in range(size):
if board[i-1][c] == 1:
safe = False
# There exists an integer l for which board[l][j] = 1
for l in range(size):
if board[l][j-1] == 1:
safe = False
# There exists an integer k for which board[l +- k][j +- k] = 1 ;
_i = i - 1
_j = j - 1
_size = size - 1 # ERASE
for k in range(size):
# for down-right diagonal cells
if (_i + k < size) and (_j + k < size):
if board[_i + k][_j + k] == 1:
safe = False
# for up-right diagonal cells
if (_i - k >= 0) and (_j + k < size):
if board[_i - k][_j + k] == 1:
safe = False
# for up-left diagonal cells
if (_i - k >= 0) and (_j - k >= 0):
if board[_i - k][_j - k] == 1:
safe = False
#for up-right diagonal cells
if (_i + k < size) and (_j - k >= 0):
if board[_i + k][_j - k] == 1:
safe = False
return safe
def examplePrinter(board, x, y):
attacked = isSafe(board, x, y)
print('isSafe ?', attacked)
## Example, based on a
# case of a queen attacking from the upper-left diagonal
board_up_left_unsafe = [[0, 1, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]
board_up_left_safe = [[1, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]
# examplePrinter(board_up_left_unsafe, 3, 4)
# examplePrinter(board_up_left_safe, 3, 4)
# case of a queen attacking from the upper-right diagonal
board_up_right_unsafe = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]
board_up_right_safe = [[0, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]
# examplePrinter(board_up_left_unsafe, 3, 4)
# examplePrinter(board_up_left_safe, 3, 4)
# case of a queen attacking from the lower-left diagonal
board_low_left_unsafe = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 1, 0, 0, 0]]
board_low_left_safe = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 0]]
# examplePrinter(board_low_left_unsafe, 3, 4)
# examplePrinter(board_low_left_safe, 3, 4)
# case of a queen attacking from the lower-right diagonal
board_low_right_unsafe = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 1], [0, 0, 0, 0, 0]]
board_low_right_safe = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 0]]
# examplePrinter(board_low_right_unsafe, 3, 4)
# examplePrinter(board_low_right_safe, 3, 4)
# case of same column
board_column_unsafe = [[0, 0, 0, 1, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]
# examplePrinter(board_column_unsafe, 3, 4)
# case of same row
board_row_unsafe = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]
# examplePrinter(board_row_unsafe, 3, 4)
# case of small board
board_small = [[0, 0], [0, 0]]
examplePrinter(board_small, 2, 2)
|
from django.db import models
class BlogPost(models.Model):
"""A topic the user is learning about"""
title = models.CharField(max_length=200)
text = models.TextField()
date_added = models.DateTimeField(auto_now_add=True)
def __str__(self):
"""Return a string representation of the model."""
return self.text
|
print("SCSCSSSKYKYKKK"[input()%7::7])
|
import scrapy
class QuotesSpider(scrapy.Spider):
name = "quotes"
# Scrapy는 Spider의 start_requests 메소드에 의해 리턴 된 요청 오브젝트를 스케줄링한다.
# 각 요청에 대한 response인스턴스를 생성하며 요청에 할당된 콜백함수를 호출한다.
def start_requests(self):
urls = [
'https://quotes.toscrape.com/page/1/',
'https://quotes.toscrape.com/page/2/'
]
for url in urls:
# url : 요청 url
# callback : response시 호출될 콜백함수
yield scrapy.Request(url=url, callback=self.parse)
def parse(self,response):
self.log("******response*********" )
self.log(response.url)
page = response.url.split("/")[-2]
filename = 'quotes-%s.html' % page
with open(filename,'wb') as f:
f.write(response.body)
self.log('Saved file %s' % filename)
|
from django.shortcuts import render
from .models import *
from django.db.models import Q
# Create your views here.
def BootstrapFilterView(request):
queryset = Journal.objects.all()
categories = Category.objects.all()
title_contains_query = request.GET.get("title_contains")
id_exact_query = request.GET.get("title_exact")
title_or_author_query = request.GET.get("title_or_author")
view_count_min = request.GET.get("view_count_min")
view_count_max = request.GET.get("view_count_max")
publish_date_min = request.GET.get("publish_date_min")
publish_date_max = request.GET.get("publish_date_max")
category = request.GET.get("category")
reviewed = request.GET.get("reviewed")
not_reviewed = request.GET.get("not_reviewed")
def is_valid_queryparam(param):
return param != "" and param is not None
if is_valid_queryparam(title_contains_query):
queryset = queryset.filter(title__icontains=title_contains_query)
elif is_valid_queryparam(id_exact_query):
queryset = queryset.filter(id=id_exact_query)
elif is_valid_queryparam(title_or_author_query):
queryset = queryset.filter(Q(title__icontains=title_or_author_query) |
Q(author__name__icontains=title_or_author_query)).distinct()
if is_valid_queryparam(view_count_min):
queryset = queryset.filter(views__gt=view_count_min)
if is_valid_queryparam(view_count_max):
queryset = queryset.filter(views__lt=view_count_max)
if is_valid_queryparam(publish_date_min):
queryset = queryset.filter(publish_date__gt=publish_date_min)
if is_valid_queryparam(publish_date_max):
queryset = queryset.filter(publish_date__lt=publish_date_max)
if is_valid_queryparam(category) and category != "Choose...":
queryset = queryset.filter(categories__name=category)
if reviewed:
queryset = queryset.filter(reviewed=True)
elif not_reviewed:
queryset = queryset.filter(reviewed=False)
context = {
"queryset": queryset,
"categories": categories,
}
return render(request, "bootstrap_form.html", context )
|
from app import hello
from flask import Flask
from flask.helpers import flash
app = Flask(__name__)
@app.route('/home/user/<string:username>/posts/<int:id>')
def function(username,id):
return "Hello,"+username+" You are watching your post which id: "+str(id)
@app.route('/onlyget',methods=['GET','POST'])
def get_req():
return 'Hello All'
if __name__=="__main__":
app.run(debug=True)
|
#------------------------
# Romberg Method
#
# Lucas Motta Freire
#------------------------
import numpy as np
def trapz(a, b, f, t, i):
'''Parameters:
a, b : Points Extrems; f : integral function
t : Value of T(h_i-1); i : index
Return: Value of T(h_i)'''
if i==0:
t = (b-a)/2*(f(a)+f(b))
else:
hi = (b-a)/2**i
new_points = np.linspace(a + hi, b - hi, 2**(i-1)) # New points next step
t = 0.5 * t + hi * np.sum(f(new_points)) # Recalculating trapezium
return t
def romberg(a, b, f, epsilon=1.0e-8, MaxIter=50):
'''Parameters:
a, b : Points Extrems; epsilon, MaxIter : Tolerance
f : integral function
Return: Romberg Table in array and a bollian value for the convergence of the method'''
convergence = True
TR = np.zeros((MaxIter, MaxIter), dtype=float)
TR[0,0] = trapz(a, b, f, 0, 0)
for i in range(1, MaxIter): # building the table in array format
TR[i, 0] = trapz(a, b, f, TR[i-1,0], i)
for k in range(1, i+1):
TR[i, k] = (4**k * TR[i, k-1] - TR[i-1, k-1])/(4**k - 1)
if np.abs(TR[i,k] - TR[i, k-1]) <= epsilon * np.abs(TR[i,k]): # Relative Variation
break
if np.abs(TR[i,k] - TR[i, k-1]) > epsilon * np.abs(TR[i,k]): # Maximum iterations reached
convergence = False
return TR[:i+1,:k+1], convergence
def integral(a, b, f, epsilon=1.0e-8, MaxIter=50):
'''Parameters:
a, b : Points Extrems; epsilon, MaxIter : Tolerance
f : integral function
Return: Integral value obtained by the Romberg method'''
table = romberg(a, b, f, epsilon, MaxIter)[0]
n = len(table)
if romberg(a, b, f, epsilon, MaxIter)[1]:
return table[n-1 , n-1]
|
# my_str = "blablacarblablacar"
# my_symbol = "bla"
#
# my_symbol_count = my_str.count(my_symbol)
# print(my_symbol_count)
# res_message = f"{my_symbol}\n" * my_symbol_count
# print(res_message.strip())
# for _ in range(my_symbol_count):
# print(my_symbol)
# print(my_symbol * _ )
# my_str = "bla BLA car"
# my_str = my_str.lower()
# symbols_heap = []
# for symbol in my_str:
# if symbol not in symbols_heap:
# symbols_heap.append(symbol)
# res_len = (len(symbols_heap))
# print(res_len)
# my_str = "qwerty"
# my_list = []
# for index in range(len(my_str)):
# if not index % 2:
# symbol = my_str[index]
# my_list.append(symbol)
# print(my_list)
# for index, symbol in enumerate(my_str):
# if not index % 2:
# my_list.append(symbol)
# print(my_list)
# my_str = "qwerty"
# my_list = []
# str_index = [3, 2, 5, 5, 1, 0, 5, 0, 3, 2, 1]
#
# for index in str_index:
# symbol = my_str[index]
# my_list.append(symbol)
# print(my_list)
# my_number = 1234567890987654345678987654741052963
# digit_count = len(str(my_number))
# print(digit_count)new_number_str = number_str
# number_str = str(my_number)
# max_symbol = max(number_str)
# print(max_symbol)
#
# test_list = ["1", "2", "3", "4"]
# print(max(test_list))
# number_str = str(my_number)
# new_number_str = number_str[::-1]
# new_number = int(new_number_str)
# print(new_number)
#
# new_number = int(str(my_number)[::-1])
# print(new_number)
# my_list = [1,2,5,3,-8,4]
# my_str = 'qwerty'
# sorted_list = sorted(my_list)
# print(sorted_list)
my_number = 123123
number_str = str(my_number)
sorted_number_symbols_list = sorted(number_str)
new_number_str = ''.join(sorted_number_symbols_list)
new_number = int(new_number_str)
print(new_number)
|
from ConfigParser import SafeConfigParser
### global variables ###
configReader = SafeConfigParser()
project_name = ''
infile = ''
gaincnv_path = ''
losscnv_path = ''
cancer_type = ''
spltbams_path = ''
het_path = ''
nonhet_path = ''
outbamfn = ''
results_path=''
java_path =''
beagle_path=''
samtools_path=''
bedtools_path=''
vcftools_path=''
def InitConfigReader(configFile):
"""init the config file"""
configReader.readfp(open(configFile))
def GetConfigReader():
"""return the configreader"""
return configReader
def GetResultsPath():
return results_path
def SetResultsPath(path):
global results_path
results_path= path
def GetSplitBamsPath():
return spltbams_path
def SetSplitBamsPath(spltbams):
global spltbams_path
spltbams_path= spltbams
def SetCancerType(can_type):
global cancer_type
cancer_type = can_type
def GetCancerType():
return cancer_type
def SetGainCNV(cnv_gain):
global gaincnv_path
gaincnv_path = cnv_gain
def GetGainCNV():
return gaincnv_path
def SetLossCNV(cnv_loss):
global losscnv_path
losscnv_path= cnv_loss
def GetLossCNV():
return losscnv_path
def SetOutputFileName(out_bam_file):
global outbamfn
outbamfn = out_bam_file
def GetOutputFileName():
return outbamfn
def SetLogPath(path):
global log_path
log_path = path
def GetLogPath():
return log_path
def SetHetPath(path):
global het_path
het_path = path
def GetHetPath():
return het_path
def SetNonHetPath(path):
global nonhet_path
nonhet_path = path
def GetNonHetPath():
return nonhet_path
def SetJavaPath(path):
global java_path
java_path = path
def GetJavaPath():
return java_path
def SetBeaglePath(path):
global java_path
java_path = path
def GetBeaglePath():
return java_path
def SetSoftwarePath(j_path, b_path, s_path, bd_path, v_path,sb_path):
configReader.set('SOFTWARE','java_path',str(j_path))
configReader.set('SOFTWARE','beagle_path',str(b_path))
configReader.set('SOFTWARE','samtools_path',str(s_path))
configReader.set('SOFTWARE','bedtools_path',str(bd_path))
configReader.set('SOFTWARE','vcftools_path',str(v_path))
configReader.set('SOFTWARE','sambamba_path',str(sb_path))
def GetSoftwarePath():
java_path = configReader.get('SOFTWARE', 'java_path')
beagle_path = configReader.get('SOFTWARE', 'beagle_path')
samtools_path = configReader.get('SOFTWARE', 'samtools_path')
bedtools_path = configReader.get('SOFTWARE', 'bedtools_path')
vcftools_path = configReader.get('SOFTWARE', 'vcftools_path')
sambamba_path = configReader.get('SOFTWARE', 'sambamba_path')
return java_path, beagle_path,samtools_path, bedtools_path, vcftools_path,sambamba_path
|
import numpy as np
from collections import namedtuple
import os
import sys
os.chdir(sys.path[0])
import imgLibrary
if(len(sys.argv)>1):
name=sys.argv[1]
image=imgLibrary.readP2(name)
smooth=imgLibrary.covolve2D2D(imgLibrary.gaussian2D(3,4),image)
edges=imgLibrary.detectEdge(smooth)
thin=imgLibrary.supressEdge(edges)
final=imgLibrary.supressNoise(thin,0.06*thin.max_shade,0.16*thin.max_shade)
imgLibrary.writeP2("smooth.pgm",smooth)
imgLibrary.writeP2("edges.pgm",edges)
imgLibrary.writeP2("thin.pgm",thin)
imgLibrary.writeP2("final.pgm",final)
|
import glob
import os
import numpy as np
import warnings
import matplotlib.pyplot as plt
from Image_Analysis import image_analysis, write_asymetry_to_file, write_maxima_to_file, write_maxima_to_file_2, write_detections
# from Image_Analysis import detect_star
from scipy.optimize import minimize
from utils import parallel_process
from tqdm import trange
from star_detection_parameters import Parameters
# imgs = glob.glob('/Users/Sahl/Desktop/University/Year_Summer_4/Summer_Project/Data/5*.fits')
# imgs = glob.glob('/shome/sahlr/summer_project_2017/Data/5*.fits')
imgs = glob.glob('/disk1/ert/fits_images/*.fits')
def write_detect_output(detect_output, filename):
out_file = open(filename, 'w')
out_file.write('Galaxy_name,Min_A_flux_180,detection\n')
for dat in detect_output:
out_file.write('{},{},{}\n'.format(*dat))
out_file.close()
step_size = 10000
nsteps = len(imgs)//step_size + 1
out = []
for k in trange(nsteps, desc="Blocks"):
low_limit = k*step_size
high_limit = (k+1)*step_size
out += parallel_process(imgs[low_limit:high_limit], image_analysis, 11)
write_asymetry_to_file('Detections_best/asymmetry_267k.csv', out)
write_detections('Detections_best/detections_267k.csv', out)
write_maxima_to_file_2('maxima_alt_267k.csv', out)
write_maxima_to_file('maxima_267k.csv', out)
# step_size = 10004
# nsteps = len(out)//step_size + 1
# res = []
# with warnings.catch_warnings():
# warnings.simplefilter("ignore", category=RuntimeWarning)
# parameter = Parameters(bin_size=52, n_bins_avg=8,
# factor=1.72, data_file='Detections_best/asymmetry_2k.csv')
# for k in trange(nsteps, desc="Blocks"):
# low_limit = k*step_size
# high_limit = (k+1)*step_size
# res += parallel_process(out[low_limit:high_limit], parameter.star_detect,
# n_jobs=3)
# write_detect_output(res, 'Detections_best/{}_{}_{:.2f}.csv'.format(*parameter.get_params()))
# write_asymetry_to_file('asymetry2.txt', out)
# os.system('git add auto*.txt')
# os.system('git commit -m "Output auto upload"')
# os.system('git push')
|
from keras import Model, optimizers
from keras.layers import Input, Dense
from keras.utils import to_categorical
from keras.datasets import fashion_mnist
import matplotlib.pyplot as plt
# パラメータ + ハイパーパラメータ
img_shape = (28 * 28, )
hidden_dim = 100
output_dim = 10
batch_size = 128
learning_rate = 0.1
epochs = 15
def build_model():
# モデルを定義する
_input = Input(shape=img_shape)
_hidden = Dense(hidden_dim, activation='sigmoid')(_input)
_hidden = Dense(hidden_dim, activation='sigmoid')(_hidden)
_hidden = Dense(hidden_dim, activation='sigmoid')(_hidden)
_hidden = Dense(hidden_dim, activation='sigmoid')(_hidden)
_output = Dense(output_dim, activation='softmax')(_hidden)
model = Model(inputs=_input, outputs=_output)
return model
def load_data():
# データを読み込む
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float') / 255.
x_test = x_test.astype('float') / 255.
print(f'Before: {y_train.shape}')
print(f'y_train[0]: {y_train[0]}')
y_train = to_categorical(y_train, num_classes=output_dim)
print(f'After: {y_train.shape}')
print(f'y_train[0]: {y_train[0]}')
y_test = to_categorical(y_test, num_classes=output_dim)
return x_train, y_train, x_test, y_test
def set_optimizers():
# 最適化アルゴリズムの定義
optim = {}
optim['SGD'] = optimizers.SGD(lr=learning_rate)
optim['Adagrad'] = optimizers.Adagrad(lr=learning_rate)
optim['Adadelta'] = optimizers.Adadelta(lr=learning_rate)
optim['Adam'] = optimizers.Adam()
optim['Nadam'] = optimizers.Nadam()
return optim
def main():
x_train, y_train, x_test, y_test = load_data()
optim = set_optimizers()
results = {}
for key in optim.keys():
print(f'---Now running: {key} model---')
model = build_model()
model.compile(optimizer=optim[key], loss='categorical_crossentropy', metrics=['accuracy'])
results[key] = model.fit(x=x_train, y=y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
plt.figure()
for key in optim.keys():
loss = results[key].history['loss']
plt.plot(range(1, epochs+1), loss, marker='.', label=key)
plt.legend(loc='best', fontsize=10)
plt.grid()
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.savefig('loss.png')
if __name__ == '__main__':
main()
|
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
import sys
import argparse
try:
from PIL import Image
from PIL import ExifTags
from PIL.ExifTags import TAGS
except ImportError:
import Image
def sizeCanvas(imageHeight,imageWidth,canvasHeight,canvasWidth):
if imageHeight < imageWidth:
imageRatio = imageHeight/imageWidth
canvasRatio = canvasHeight/canvasWidth
if (imageRatio<canvasRatio):
return canvasWidth*imageRatio,canvasWidth
else:
return canvasHeight,canvasHeight/imageRatio
else:
imageRatio = imageWidth/imageHeight
canvasRatio = canvasWidth/canvasHeight
if (imageRatio<canvasRatio):
return canvasHeight,canvasHeight*imageRatio
else:
return canvasWidth/imageRatio,canvasWidth
# references
# https://stackoverflow.com/questions/44816682/drawing-grid-lines-across-the-image-uisng-opencv-python
# https://stackoverflow.com/questions/4228530/pil-thumbnail-is-rotating-my-image
# https://stackoverflow.com/questions/12133612/using-pil-to-auto-rotate-picture-taken-with-cell-phone-and-accelorometer
def griddify(filename,canvasWidth,canvasHeight):
# Open image file
image = Image.open(filename)
try:
exifdict = image._getexif()
if exifdict[274] == 3 :
image=image.transpose(180,expand=True)
elif exifdict[274] == 6 :
image=image.rotate(-90,expand=True)
elif exifdict[274] == 8 :
image=image.rotate(90,expand=True)
except:
print("")
my_dpi=200.
# Set up figure
imageWidth = image.size[0]
imageHeight = image.size[1]
print ("image height is %.2f and width is %.2f"%(imageHeight,imageWidth))
fig=plt.figure(figsize=(float(imageWidth)/my_dpi,float(imageHeight)/my_dpi),dpi=my_dpi)
ax=fig.add_subplot(111)
# Remove whitespace from around the image
fig.subplots_adjust(left=0,right=1,bottom=0,top=1)
# Set the gridding interval: here we use the major tick interval
myInterval=imageHeight/8.
heightPieces = 8
if (imageHeight < imageWidth):
myInterval = imageHeight/5.
heightPieces = 5
loc = plticker.MultipleLocator(base=myInterval)
loc2 = plticker.MultipleLocator(base=myInterval)
locMinor = plticker.MultipleLocator(base=myInterval/2)
locMinor2 = plticker.MultipleLocator(base=myInterval/2)
ax.xaxis.set_major_locator(loc)
ax.yaxis.set_major_locator(loc2)
ax.xaxis.set_minor_locator(locMinor)
ax.yaxis.set_minor_locator(locMinor2)
# Add the grid
ax.grid(which='minor', axis='both', linestyle=':', linewidth=0.6,color='r')
ax.grid(which='major', axis='both', linestyle='-', linewidth=1,color='g')
# Add the image
ax.imshow(image)
# Find number of gridsquares in x and y direction
#nx=abs(int(float(ax.get_xlim()[1]-ax.get_xlim()[0])/float(myInterval)))
#ny=abs(int(float(ax.get_ylim()[1]-ax.get_ylim()[0])/float(myInterval)))
if (not ((canvasWidth <= canvasHeight and imageWidth <=imageHeight) or (canvasWidth > canvasHeight and imageWidth > imageHeight))):
heightHold = canvasHeight
canvasHeight=canvasWidth
canvasWidth=heightHold
# Save the figure
fig.savefig('gridded_image.jpg')
newCanvasHeight,newCanvasWidth=sizeCanvas(imageHeight,imageWidth,canvasHeight,canvasWidth)
print("Modify your canvas to have dimensions: %.2f by %.2f cm"%(newCanvasHeight,newCanvasWidth))
print("Divide the canvas into major pieces of size %.2f cm"%(newCanvasHeight/heightPieces))
for x in range(1,8):
print(newCanvasHeight/heightPieces*x, end ="cm ")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
#required.add_argument('--required_arg', required=True)
parser.add_argument('filename', nargs='?', default=None)
parser.add_argument("-w","--width", type=float, default = 60, help="enter the width of your canvas in cm")
parser.add_argument("-t","--height", type=float, default = 45,help="enter the height of your canvas in cm")
args = parser.parse_args()
griddify(args.filename,args.width,args.height)
|
# ==================================================================================================
# Copyright 2012 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from twitter.common.lang import Compatibility
from twitter.common.log.tracer import Tracer, Trace
from twitter.common.testing.clock import ThreadedClock
def test_tracing_timed():
sio = Compatibility.StringIO()
clock = ThreadedClock()
final_trace = []
class PrintTraceInterceptor(Tracer):
def print_trace(self, *args, **kw):
final_trace.append(self._local.parent)
tracer = PrintTraceInterceptor(output=sio, clock=clock, predicate=lambda v: False)
assert not hasattr(tracer._local, 'parent')
with tracer.timed('hello'):
clock.tick(1.0)
with tracer.timed('world 1'):
clock.tick(1.0)
with tracer.timed('world 2'):
clock.tick(1.0)
assert len(final_trace) == 1
final_trace = final_trace[0]
assert final_trace._start == 0
assert final_trace._stop == 3
assert final_trace.duration() == 3
assert final_trace.msg == 'hello'
assert len(final_trace.children) == 2
child = final_trace.children[0]
assert child._start == 1
assert child._stop == 2
assert child.parent is final_trace
assert child.msg == 'world 1'
child = final_trace.children[1]
assert child._start == 2
assert child._stop == 3
assert child.parent is final_trace
assert child.msg == 'world 2'
# should not log if verbosity low
assert sio.getvalue() == ''
def test_tracing_filter():
sio = Compatibility.StringIO()
tracer = Tracer(output=sio)
tracer.log('hello world')
assert sio.getvalue() == 'hello world\n'
sio = Compatibility.StringIO()
tracer = Tracer(output=sio, predicate=lambda v: v >= 1)
tracer.log('hello world')
assert sio.getvalue() == ''
tracer.log('hello world', V=1)
assert sio.getvalue() == 'hello world\n'
tracer.log('ehrmagherd', V=2)
assert sio.getvalue() == 'hello world\nehrmagherd\n'
sio = Compatibility.StringIO()
tracer = Tracer(output=sio, predicate=lambda v: (v % 2 == 0))
tracer.log('hello world', V=0)
assert sio.getvalue() == 'hello world\n'
tracer.log('morf gorf', V=1)
assert sio.getvalue() == 'hello world\n'
tracer.log('ehrmagherd', V=2)
assert sio.getvalue() == 'hello world\nehrmagherd\n'
|
#!/usr/bin/env python
from setuptools import setup
import os
import springserve
dir = os.path.split(os.path.abspath(__file__))[0]
DESCRIPTION = "API Library for video.springserve.com"
LONG_DESCRIPTION = """Springserve is a video adserver, and this library allows you
to interface with its api to do read/write and reporting requests """
URL = 'http://www.springserve.com'
DOWNLOAD_URL = ''
CLASSIFIERS = ['Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3'
]
EMAIL = ''
SETUP_ARGS = {}
REQUIRES = ['requests>=2.0.0', 'requests_oauthlib>=0.4.0',
'link>=0.3.1','xmltodict', 'pandas' ]
# write out the version file so we can keep track on what version the built
# package is
# call setup so it can build the package
setup(name=springserve.__title__,
version=springserve.__version__,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license=springserve.__license__,
maintainer_email=EMAIL,
maintainer=springserve.__author__,
url=URL,
packages=['springserve'],
install_requires = REQUIRES,
#data_files = DATA_FILES,
classifiers=CLASSIFIERS,
**SETUP_ARGS)
|
#!/usr/bin/python3
for i in list(
range(ord('a'), ord('e'))
) + list(
range(ord('f'), ord('q'))
) + list(
range(ord('r'), ord('z') + 1)
):
print('{:c}'.format(i), end="")
|
# que https://www.hackerrank.com/challenges/string-validators/problem?isFullScreen=true
#solution
if __name__ == '__main__':
s = input()
print(any([True for c in s if(c.isalnum())]))
print(any([True for c in s if(c.isalpha())]))
print(any([True for c in s if(c.isdigit())]))
print(any([True for c in s if(c.islower())]))
print(any([True for c in s if(c.isupper())]))
|
import heapq
import sys
#import queue
import time
from collections import namedtuple
import time
import threading
import dummy_threading
import smtplib
num_workers = 10
class Empty(Exception):
"Exception raised by PriorityQueue.get(block=0)/get_nowait()."
pass
class Full(Exception):
"Exception raised by PriorityQueue.put(block=0)/put_nowait()."
pass
__all__ = ['event_scheduler']
Event = namedtuple('Event', 'time,priority,action,params')
class PriorityQueue:
""" Priority Queue maintains task in the order in which it is supposed to be executed"""
def __init__(self, maxsize=0):
self.maxsize = maxsize
self._init(maxsize)
self.mutex = threading.Lock()
self.not_empty = threading.Condition(self.mutex)
self.not_full = threading.Condition(self.mutex)
self.all_tasks_completed = threading.Condition(self.mutex)
self.unfinished_tasks = 0
def task_completed(self):
""" When a task is completed, it decreases the unfinished_task count"""
self.all_tasks_completed.acquire()
try:
unfinished = self.unfinished_tasks-1
if unfinished <=0:
if unfinished < 0:
raise ValueError('tasks completed! this module is called too many times!')
self.all_tasks_completed.notify_all()
self.unfinished_tasks = unfinished
finally:
self.all_tasks_completed.release()
def join(self):
""" When all the workers finish their task, they will be reassingned remaining tasks """
self.all_tasks_completed.acquire()
try:
while self.unfinished_tasks:
self.all_tasks_completed.wait()
finally:
self.all_tasks_completed.release()
def size(self):
""" Returns size of queue """
self.mutex.acquire()
n = self.pqsize()
self.mutex.release()
return n
def full(self):
""" Checks if queue is full """
self.mutex.acquire()
n = 0 < self.maxsize == self.pqsize()
self.mutex.release()
return n
def put(self, item, block=True, timeout=None):
""" Adds event into the queue"""
self.not_full.acquire()
try:
if self.maxsize > 0:
if not block:
if self.pqsize() == self.maxsize:
raise Full
elif timeout is None:
while self.pqsize() == self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("Timeout should be non negative no")
else:
end = time.time() + timeout
while self.pqsize() == self.maxsize:
remaining = end - time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks +=1
self.not_empty.notify()
finally:
self.not_full.release()
def put_without_wait(self, item):
""" Adds task to the queue without blocking it for sometime """
return self.put(item, False)
def get(self, block=True, timeout=None):
""" Removes task from queue if any and returns it for execution """
self.not_empty.acquire()
try:
if not block:
if not self.pqsize():
raise Empty
elif timeout is None:
while not self.pqsize():
self.not_empty.wait()
elif timeout < 0:
raise ValueError("Timeout should be non-negative")
else:
end = time.time() + timeout
while not self.pqsize():
remaining = end - time.time()
if remaining < 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
finally:
self.not_empty.release()
def get_without_wait(self):
""" Get task from queue without getting blocked for sometime """
return self.get(False)
def _init(self, maxsize):
""" Creates a new queue """
self.queue = []
def pqsize(self, len=len):
""" Returns no of elements in the queue """
return len(self.queue)
def _put(self, item, heappush=heapq.heappush):
""" Adds task to the queue. Heapq is used to maintain the tasks in the order of execution"""
heapq.heappush(self.queue, item)
def _get(self, heappop=heapq.heappop):
""" Gets highest priority task from queue """
return heappop(self.queue)
def remove(self, event, block=None):
""" Removes task from queue if it hasn't been executed """
self.not_empty.acquire()
try:
if not self.pqsize():
raise Empty
else:
try:
self.queue.remove(event)
heapq.heapify(self.queue)
except:
raise ValueError("Event not present in queue!")
self.not_full.notify()
self.unfinished_tasks -= 1
finally:
self.not_empty.release()
def execute(self,item):
"""Calls teh function associated with the task"""
print "Doing task " + str(item.action)
item.action(*(item.params))
print "done"
def work(self):
""" Work is assigned to a worker """
while True:
item = self.get()
while (time.time() < item.time):
time.sleep(1)
self.execute(item)
time.sleep(1)
self.task_completed()
class event_scheduler:
""" Allows user to add events, remove events,run scheduler and view time expired at any point of time """
def __init__(self,delay=0):
""" Creates a new priority queue of max size 1000"""
self._queue = PriorityQueue(maxsize=1000)
self.cur_time = time.time()
self.delay_time = delay #if delay is required to compensate for time lost while adding tasks
def add_event(self, time, priority, action, params):
""" Adds event to queue. Takes the time of execution, priority, action and parameters required to perform the action as arguments"""
time = self.cur_time + time
event = Event(time,priority,action,params)
self._queue.put(event)
return event
def cancel_event(self, event):
""" Allows user to remove invalid events from the queue using the event id returned at the time of adding event to the queue"""
self._queue.remove(event)
def empty(self):
""" checks if queue is empty """
return not self._queue
def run(self):
""" Creates workers to execute events from the queue """
for i in range(num_workers):
worker = threading.Thread(target=self._queue.work)
worker.setDaemon(True)
worker.start()
self._queue.join()
def print_time(self):
""" Prints time (in seconds) expired """
time_in_secs = time.time() - self.cur_time
return "Timer: " + str(time_in_secs)
def test_task1(*args):
print "Executing Task 1"
def test_task2(*args):
print "Executing Task 2"
def test_task3(*args):
print "Executing Task 3"
def test_send_birthday_email(*args):
if(len(args)<4):
print "Unsuccessful. Missing parameters!"
return
sender = args[0]
receivers = [args[1]]
message = "Subject:" + args[2] + "\n" + args[3]
try:
smtpObj = smtplib.SMTP('localhost')
smtpObj.sendmail(sender, receivers, message)
print "Successfully sent email"
except:
print "Error: unable to send email"
|
import torch
__all__ = [
"RNNTLoss",
"rnnt_loss",
]
def _rnnt_loss_alphas(
logits,
targets,
logit_lengths,
target_lengths,
blank=-1,
clamp=-1,
):
"""
Compute alphas for RNN transducer loss.
See documentation for RNNTLoss
"""
targets = targets.to(device=logits.device)
logit_lengths = logit_lengths.to(device=logits.device)
target_lengths = target_lengths.to(device=logits.device)
# make sure all int tensors are of type int32.
targets = targets.int()
logit_lengths = logit_lengths.int()
target_lengths = target_lengths.int()
return torch.ops.torchaudio.rnnt_loss_alphas(
logits,
targets,
logit_lengths,
target_lengths,
blank,
clamp,
)
def _rnnt_loss_betas(
logits,
targets,
logit_lengths,
target_lengths,
blank=-1,
clamp=-1,
):
"""
Compute betas for RNN transducer loss
See documentation for RNNTLoss
"""
targets = targets.to(device=logits.device)
logit_lengths = logit_lengths.to(device=logits.device)
target_lengths = target_lengths.to(device=logits.device)
# make sure all int tensors are of type int32.
targets = targets.int()
logit_lengths = logit_lengths.int()
target_lengths = target_lengths.int()
return torch.ops.torchaudio.rnnt_loss_betas(
logits,
targets,
logit_lengths,
target_lengths,
blank,
clamp,
)
class _RNNT(torch.autograd.Function):
@staticmethod
def forward(
ctx,
logits,
targets,
logit_lengths,
target_lengths,
blank=-1,
clamp=-1,
fused_log_softmax=True,
reuse_logits_for_grads=True,
):
"""
See documentation for RNNTLoss
"""
# move everything to the same device.
targets = targets.to(device=logits.device)
logit_lengths = logit_lengths.to(device=logits.device)
target_lengths = target_lengths.to(device=logits.device)
# make sure all int tensors are of type int32.
targets = targets.int()
logit_lengths = logit_lengths.int()
target_lengths = target_lengths.int()
if blank < 0: # reinterpret blank index if blank < 0.
blank = logits.shape[-1] + blank
costs, gradients = torch.ops.torchaudio.rnnt_loss(
logits=logits,
targets=targets,
src_lengths=logit_lengths,
tgt_lengths=target_lengths,
blank=blank,
clamp=clamp,
fused_log_smax=fused_log_softmax,
reuse_logits_for_grads=reuse_logits_for_grads,
)
ctx.grads = gradients
return costs
@staticmethod
def backward(ctx, output_gradients):
output_gradients = output_gradients.view(-1, 1, 1, 1).to(ctx.grads)
ctx.grads.mul_(output_gradients).to(ctx.grads)
return (
ctx.grads, # logits
None, # targets
None, # logit_lengths
None, # target_lengths
None, # blank
None, # clamp
None, # fused_log_softmax
None, # reuse_logits_for_grads
)
def rnnt_loss(
logits,
targets,
logit_lengths,
target_lengths,
blank=-1,
clamp=-1,
fused_log_softmax=True,
reuse_logits_for_grads=True,
):
"""
Compute the RNN Transducer Loss.
The RNN Transducer loss (`Graves 2012 <https://arxiv.org/pdf/1211.3711.pdf>`__) extends the CTC loss by defining
a distribution over output sequences of all lengths, and by jointly modelling both input-output and output-output
dependencies.
Args:
logits (Tensor): Tensor of dimension (batch, time, target, class) containing output from joiner
targets (Tensor): Tensor of dimension (batch, max target length) containing targets with zero padded
logit_lengths (Tensor): Tensor of dimension (batch) containing lengths of each sequence from encoder
target_lengths (Tensor): Tensor of dimension (batch) containing lengths of targets for each sequence
blank (int, opt): blank label (Default: ``-1``)
clamp (float): clamp for gradients (Default: ``-1``)
runtime_check (bool): whether to do sanity check during runtime. (Default: ``False``)
fused_log_softmax (bool): set to False if calling log_softmax outside loss (Default: ``True``)
reuse_logits_for_grads (bool): whether to save memory by reusing logits memory for grads (Default: ``True``)
"""
if not fused_log_softmax:
logits = torch.nn.functional.log_softmax(logits, dim=-1)
reuse_logits_for_grads = (
False # softmax needs the original logits value
)
cost = _RNNT.apply(
logits,
targets,
logit_lengths,
target_lengths,
blank,
clamp,
fused_log_softmax,
reuse_logits_for_grads,
)
return cost
class RNNTLoss(torch.nn.Module):
"""
Compute the RNN Transducer Loss.
The RNN Transducer loss (`Graves 2012 <https://arxiv.org/pdf/1211.3711.pdf>`__) extends the CTC loss by defining
a distribution over output sequences of all lengths, and by jointly modelling both input-output and output-output
dependencies.
Args:
blank (int, opt): blank label (Default: ``-1``)
clamp (float): clamp for gradients (Default: ``-1``)
fused_log_softmax (bool): set to False if calling log_softmax outside loss (Default: ``True``)
reuse_logits_for_grads (bool): whether to save memory by reusing logits memory for grads (Default: ``True``)
"""
def __init__(
self,
blank=-1,
clamp=-1,
fused_log_softmax=True,
reuse_logits_for_grads=True,
):
super().__init__()
self.blank = blank
self.clamp = clamp
self.fused_log_softmax = fused_log_softmax
self.reuse_logits_for_grads = reuse_logits_for_grads
def forward(
self,
logits,
targets,
logit_lengths,
target_lengths,
):
"""
Args:
logits (Tensor): Tensor of dimension (batch, time, target, class) containing output from joiner
targets (Tensor): Tensor of dimension (batch, max target length) containing targets with zero padded
logit_lengths (Tensor): Tensor of dimension (batch) containing lengths of each sequence from encoder
target_lengths (Tensor): Tensor of dimension (batch) containing lengths of targets for each sequence
"""
return rnnt_loss(
logits,
targets,
logit_lengths,
target_lengths,
self.blank,
self.clamp,
self.fused_log_softmax,
self.reuse_logits_for_grads,
)
|
import asyncio
from Utils.Timer import Timer
from World.Region.RegionManager import RegionManager
from Server.Registry.QueuesRegistry import QueuesRegistry
class WorldManager(object):
def __init__(self):
self.heartbeat = 0.01
self.last_update = None
self.region_mgr = RegionManager()
async def run(self):
while True:
self.last_update = Timer.get_ms_time()
self._register_tasks()
await asyncio.sleep(self.heartbeat)
def _register_tasks(self):
asyncio.gather(
asyncio.ensure_future(self.process_player_enter_world()),
asyncio.ensure_future(self.process_player_movement()),
asyncio.ensure_future(self.process_player_exit_world()),
asyncio.ensure_future(self.process_chat_message()),
asyncio.ensure_future(self.process_name_query()),
)
async def process_player_enter_world(self):
try:
player = QueuesRegistry.players_queue.get_nowait()
except asyncio.QueueEmpty:
return
else:
self.region_mgr.add_player(player)
finally:
await asyncio.sleep(0.01)
async def process_player_movement(self):
try:
player, opcode, packet = QueuesRegistry.movement_queue.get_nowait()
except asyncio.QueueEmpty:
return
else:
self.region_mgr.update_player_movement(player, opcode, packet)
finally:
await asyncio.sleep(0.01)
async def process_player_exit_world(self):
try:
player = QueuesRegistry.remove_player_queue.get_nowait()
except asyncio.QueueEmpty:
return
else:
self.region_mgr.remove_player(player)
finally:
await asyncio.sleep(0.01)
async def process_chat_message(self):
try:
sender, text_message_packet = QueuesRegistry.text_message_queue.get_nowait()
except asyncio.QueueEmpty:
return
else:
self.region_mgr.send_chat_message(sender, text_message_packet)
finally:
await asyncio.sleep(0.01)
async def process_name_query(self):
try:
requester, target_guid = QueuesRegistry.name_query_queue.get_nowait()
except asyncio.QueueEmpty:
return
else:
self.region_mgr.send_name_query(requester, target_guid)
finally:
await asyncio.sleep(0.01)
|
pDCount = 9
pDSides = 4
cDCount = 6
cDSides = 6
pSums = dict()
for x in range(1,37): pSums[x] = 0
for p in range(pDSides**pDCount):
pDice = [(p // (pDSides**i) % pDSides) for i in range(pDCount)]
pSums[sum(pDice)+pDCount] += 1
for s in pSums: pSums[s] /= pDSides**pDCount
cSums = dict()
for x in range(1,37): cSums[x] = 0
for c in range(cDSides**cDCount):
cDice = [(c // (cDSides**i) % cDSides) for i in range(cDCount)]
cSums[sum(cDice)+cDCount] += 1
# Results very similar to https://en.wikipedia.org/wiki/Centered_pentachoric_number
for s in cSums: cSums[s] /= cDSides**cDCount
chance = 0
for i in range(2, 37):
for j in range(1, i):
chance += pSums[i] * cSums[j]
print(chance)
|
while True:
n=int(input())
if n==0: break
while n>9: n=sum(list(map(int,list(str(n)))))
print(n)
|
from omnipytent import *
from omnipytent.ext.idan import *
from omnipytent.completers import file_completer
exe = local['./app']
dub = local['dub']['-q']
@task
def compile(ctx):
dub['build']['--compiler=dmd'] & ERUN.bang
@task
def run(ctx):
dub['run'] & BANG
@task
def test(ctx):
dub['test']['--compiler=dmd'] & BANG
@task
def debug(ctx):
CMD.VBGstartGDBForD(exe)
def mkpath(path):
if not path.exists():
mkpath(path.dirname)
path.mkdir()
@task.complete(file_completer('/files/code/wekapp/weka'))
def copy_from_wekapp(ctx, *paths):
print(paths)
for path in paths:
source_file = local.path('/files/code/wekapp/weka') / path
target_file = local.path('source/weka') / path
if not target_file.exists():
mkpath(target_file.dirname)
local['ln']('-s', source_file, target_file)
@task
def copy_fake_weka(ctx):
target_root = local.path('source/weka')
assert not target_root.islink(), "%s is symlink" % target_root
mkpath(target_root)
for faked_weka_module in local.path('../0zfake_libs'):
target = target_root / faked_weka_module.basename
if target.exists():
print('%s exists. %ssymlink' % (target, '' if target.islink() else 'not '))
else:
local['ln']('-s', faked_weka_module, target)
|
import numpy as np
import cv2
import cv2.cv as cv
cap = cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_SIMPLEX
range_R = np.arange(40,205)
range_G = np.arange(70,255)
range_B = np.arange(0,60)
lower_green = np.array([30,120,120])
upper_green = np.array([100,255,255])
while True:
ret, img = cap.read()
blur =cv2.GaussianBlur(img,(5,5),10)
gray = cv2.cvtColor(blur, cv2.COLOR_BGR2GRAY)
hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower_green, upper_green)
mask_rgb = cv2.cvtColor(mask, cv2.COLOR_GRAY2RGB)
edges = cv2.Canny(mask,100,200)
circles = cv2.HoughCircles(edges, cv.CV_HOUGH_GRADIENT, 1.6, 1000, param1 = 100,param2=40)
if circles is not None:
circles = np.round(circles[0, :]).astype("int")
for (x, y, r) in circles:
cv2.circle(img, (x, y), r, (0, 255, 0), 4)
cv2.rectangle(img, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)
if mask_rgb[y][x][0] == 255:
if mask_rgb[y][x][1] == 255:
if mask_rgb[y][x][2] == 255:
cv2.circle(img, (x, y), r, (0, 255, 0), 4)
cv2.rectangle(img, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)
cv2.imshow("imagem", img)
cv2.imshow("output",edges)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
|
from PIL import Image
import sys
def main():
for args in sys.argv[1:]:
image = Image.open(args)
image = image.resize((200,200),Image.ANTIALIAS)
image.save(args)
if __name__ == "__main__":
main()
|
import sqlalchemy
from model import *
def test1():
session = getSession()
print(ImageList)
il = session.query(ImageList).one()
if __name__ == "__main__":
initDb()
session : sqlalchemy.orm.session.Session = getSession()
simple = session.query(Simple).filter(Simple.id == 1).one()
print(simple.id)
session.delete(simple)
session.commit()
|
#-*- coding:utf-8 -*- #官方推荐使用
'''#coding=utf-8'''#不推荐使用
print("the first hello world !")
'''这是注释'''
"""这也是注释"""
|
'''
File name: test_script.py
Author: Haoyuan(Steve) Zhang
Date created: 9/26/2017
'''
'''
File clarification:
Check the accuracy of your algorithm
'''
import numpy as np
# from est_tps import est_tps
# from obtain_morphed_tps import obtain_morphed_tps
# from morph_tps import morph_tps
from morph_tri import morph_tri
from PIL import Image
import imageio
from cpselect import cpselect
# test triangulation morphing
def test_tri(im1, im2, im1_pts, im2_pts, warp_frac, dissolve_frac):
# generate morphed image
morphed_ims = morph_tri(im1, im2, im1_pts, im2_pts, warp_frac, dissolve_frac)
# check output output image number
if morphed_ims.shape[0] != 2:
print("The number of output image is wrong. \n")
return False
morphed_im1 = morphed_ims[0, :, :, :]
# check the color channel number
if morphed_im1.shape[2] != 3:
print("What happened to color channel? \n")
return False
# check the image size
if morphed_im1.shape[0] != 50 or morphed_im1.shape[1] != 50:
print("Something wrong about the size of output image. \n")
return False
print("Triangulation Morphing Test Passed!")
return True
# the main test code
def main():
im1 = np.ones((50, 50, 3))
im2 = np.zeros((50, 50, 3))
im1_pts = np.array([[1, 1], [1, 50], [50, 1], [50, 50], [25, 25]])
im2_pts = np.array([[1, 1], [1, 50], [50, 1], [50, 50], [20, 20]])
warp_frac, dissolve_frac = np.array([0.2, 0.3]), np.array([0.1, 0.3])
if not test_tri(im1, im2, im1_pts, im2_pts, warp_frac, dissolve_frac):
print("The Triangulation Morphing test failed. \n")
return
print("All tests passed! \n")
if __name__ == "__main__":
# test triangulation morphing
main()
im1 = np.array(Image.open('3.jpeg').convert('RGB'))
im2 = np.array(Image.open('4.jpeg').convert('RGB'))
resize_img1 = np.array(Image.fromarray(im1).resize([300, 300]))
resize_img2 = np.array(Image.fromarray(im2).resize([300, 300]))
im1_pts, im2_pts = cpselect(im1, im2)
warp_frac = 1 / 60 * np.array(range(61))
dissolve_frac = 1 / 60 * np.array(range(61))
E = morph_tri(resize_img1, resize_img2, im1_pts, im2_pts, warp_frac, dissolve_frac)
imageio.mimwrite('face_morph_test.avi', E, fps=15)
|
import httplib2
import pprint
import time
import os
import shutil
#import urllib2
#libraries for gdrive file upload
from apiclient.discovery import build
from apiclient.http import MediaFileUpload
from oauth2client.client import OAuth2WebServerFlow
from apiclient import errors
from apiclient import http
#libraries for web browsing
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
#libraries for onedrive file upload
#libraries for dropbox file upload
class file:#bas class file
authorized=False#whether authorization has taken place or not
listupdated=False#whether file list is updated or not
downloadfilepath=None
def __init__(self,location):
self.address=location#address of file on pc
def upload(self):
pass
@staticmethod
def authorize():
pass
class gdrivefile(file):
drive_service=None
filelist=[]
currentquota=None
def upload(self):
if gdrivefile.authorized==False :
gdrivefile.authorize()
gdrivefile.authorized=True
FILENAME = self.address
media_body = MediaFileUpload(FILENAME, mimetype='', resumable=True)
body = {
'title': FILENAME,
'description': '',
'mimeType': ''
}
try:
file = gdrivefile.drive_service.files().insert(body=body, media_body=media_body).execute()
#iINSERT CODE TO UPDATE FILE LIST
except errors.HttpError,error :
print("error in uploading file")
#pprint.pprint(file)
@staticmethod
def authorize():
CLIENT_ID = '268285193546-qpu3mbasinue8ofpiah50fu928lcf24b.apps.googleusercontent.com'
CLIENT_SECRET = '0iyrUyCs-MhAIyOMeYKeeQO-'
# Check https://developers.google.com/drive/scopes for all available scopes
OAUTH_SCOPE = 'https://www.googleapis.com/auth/drive'
# Redirect URI for installed apps
REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
flow = OAuth2WebServerFlow(CLIENT_ID, CLIENT_SECRET, OAUTH_SCOPE,
redirect_uri=REDIRECT_URI)
authorize_url = flow.step1_get_authorize_url()
#print 'Go to the following link in your browser: ' + authorize_url
driver=webdriver.Firefox()#depends on your browser
driver.get(authorize_url)
#login=driver.find_element_by_name("signIn")
#login.send_keys(Keys.RETURN)
accept= WebDriverWait(driver, 60).until(EC.element_to_be_clickable((By.ID, "submit_approve_access")))
accept.send_keys(Keys.RETURN)
#accept.click()
a=driver.find_element_by_id("code")
code=a.get_attribute('value')
driver.quit()
#code = raw_input('Enter verification code: ').strip()#change here
credentials = flow.step2_exchange(code)
# Create an httplib2.Http object and authorize it with our credentials
http = httplib2.Http()
http = credentials.authorize(http)
gdrivefile.drive_service = build('drive', 'v2', http=http)
@staticmethod
def updatefilelist():#information about files on your drive
if gdrivefile.authorized==False :
gdrivefile.authorize()
gdrivefile.authorized=True
page_token = None
while True:
try:
param={}
if page_token:
param['pageToken']=page_token
dfiles=gdrivefile.drive_service.files().list(**param).execute()
gdrivefile.filelist.extend(dfiles['items'])
page_token=dfiles.get('nextPageToken')
gdrivefile.listupdated=True
if not page_token:
break
except errors.HttpError:
print("error in udating list")
break
@staticmethod
def getfile():
if gdrivefile.listupdated==False:
gdrivefile.updatefilelist()
ref=[]
sample=raw_input('enter the file name ').strip()
for gfile in gdrivefile.filelist:
if sample in gfile['title']:
if sample==gfile['title']:
return gfile
ref.append(gfile['title'])
print("No match found.Following are the related files")
for name in ref:
print(name)
return None
@staticmethod
def download():
file2download=gdrivefile.getfile()
if file2download==None:
return
else:
downloadedfile=open(file2download.get('title'),"wb")
download_url=file2download.get('downloadUrl')
if download_url:
resp ,content=gdrivefile.drive_service._http.request(download_url)
if resp.status==200:
#print('Status',resp)
downloadedfile.write(content)
#src=r"C:\\Users\\windows\\Downloads\\" + file2download.get('title')
#dest=os.getcwd()+r"\\" file2download.get('title')
#shutil.move(dest,src)
downloadedfile.close()
#os.rename(dest,src)
else :
print("An error occured in downloading")
else:
print("No such file exists ")
@staticmethod
def getquota():
if gdrivefile.authorized==False :
gdrivefile.authorize()
gdrivefile.authorized=True
about=gdrivefile.drive_service.about().get().execute()
gdrivefile.currentquota=[about['quotaBytesTotal'],about['quotaBytesUsed']]
class odrivefile(file):
def upload(self):
#code for upload
pass
@staticmethod
def authorize():
pass
#code for authorization
class drobboxfile(file):
def upload(self):
#code for upload
pass
@staticmethod
def authorize():
pass
#code for authorization
#testing the new update
'''
add=raw_input("enter address of a file")
f1=gdrivefile(add)
f1.upload()
#f1.upload()
'''
#gdrivefile.download()
gdrivefile.getquota()
a=gdrivefile.currentquota
for data in a:
print(data)+' bytes'
|
def main():
n = int(input("Insira um numero: "))
adjacente = False
while n > 0:
num1 = n % 10
n = n // 10
if num1 == n%10:
adjacente = True
if adjacente:
print("sim")
else:
print("nao")
#--------------------------------------------------
if __name__ == '__main__':
main()
|
n = int(input("Digite seu número: "))
nao_primo = 0
numero_atual = 2
while numero_atual < n:
if n % numero_atual == 0:
nao_primo += 1
numero_atual += 1
if nao_primo == 0:
print("É primo.")
else:
print("Não é primo.")
|
import subprocess
import time
import os
from operator import itemgetter
# -- set update/round time (seconds)
period = 5
# -- set sorting order. up = most used first, use either "up" or "down"
order = "up"
# Inicializar variables de Tiempo Lista de Aplicaciones y Lista de ventanas
t, applist, winlist = 0, [], []
# Directory where the log will be safe
home = os.environ["HOME"]
logdir = home+"/.usagelogs"
# Metodo para ejecutar comandos y retornar la stdout||staderr
def get(command):
try:
return subprocess.check_output(command).decode("utf-8").strip()
except subprocess.CalledProcessError:
pass
def time_format(s):
# convert time format from seconds to h:m:s
m, s = divmod(s, 60); h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s)
def currtime(tformat=None):
return time.strftime("%Y_%m_%d_%H_%M_%S") if tformat == "file" \
else time.strftime("%Y-%m-%d %H:%M:%S")
# path to your logfile
log = logdir+"/"+currtime("file")+".txt"; startt = currtime()
def summarize():
# Abrir archivo log con manejo de exepciones
# wt=write text(default)
with open(log, "wt") as report:
totaltime = sum(it[2] for it in winlist)
report.write("")
alldata = []
for app in applist:
appdata, windata = [], []
apptime = sum([it[2] for it in winlist if it[0] == app])
appperc = round(100*apptime/totaltime)
for d in [app, apptime, appperc]:
appdata.append(d)
wins = [r for r in winlist if r[0] == app]
for w in wins:
wperc = str(round(100*w[2]/totaltime))
windata.append([w[1], w[2], wperc])
windata = sorted(windata, key=itemgetter(1))
windata = windata[::-1] if order == "up" else windata
appdata.append(windata)
alldata.append(appdata)
alldata = sorted(alldata, key = itemgetter(1))
alldata = alldata[::-1] if order == "up" else alldata
for item in alldata:
app, apptime, appperc = item[0], item[1], item[2]
report.write(
("-"*60) \
+"\n" \
+app \
+"\n" \
+time_format(apptime) \
+" ("+str(appperc)+"%)\n" \
+("-"*60) \
+"\n"
)
for w in item[3]:
wname, time, perc = w[0], w[1], w[2]
report.write(
" "+time_format(time)+" ("+perc+"%) " \
+(6-len(perc))*" "+wname+"\n"
)
report.write(
"\n"+"="*60+"\nStarted: "+startt+"\t"+"updated: " \
+currtime()+"\n"+"="*60
)
#Recoleccion de datos infinitamente
while True:
# Tiempo de refresco
time.sleep(period)
# ObteniendoPrint cross@ubuntucrossD:~$ xdotool getactivewindow getwindowpid //17305
frpid = get(["xdotool", "getactivewindow", "getwindowpid"])
# ObteniendoPrint cross@ubuntucrossD:~$ xdotool getactivewindow getwindowname //cross@ubuntucrossD: ~
frname = get(["xdotool", "getactivewindow", "getwindowname"])
# ObteniendoPrint en terminal cross@ubuntucrossD:~$ ps -p 17359 o comm= //bash si no regresa nada es Unknown
app = get(["ps", "-p", frpid, "o", "comm="]) if frpid != None else "Unknown"
# fix a few names
if "bash" in app:
app = "gnome-terminal-bash"
elif app == "soffice.bin":
app = "libreoffice"
# Agregar app a la lista solo si es nueva entrada
if not app in applist:
applist.append(app)
# Inicializando checklist con winlist value
checklist = [item[1] for item in winlist]
# Si no hay registros en checklist
# inicializo winlist
if not frname in checklist:
winlist.append([app, frname, 1*period])
# Si ya existe un registro en la checklist solo se
# se actualiza la winlist
else:
winlist[checklist.index(frname)][
2] = winlist[checklist.index(frname)][2]+1*period
if t == 60/period:
summarize()
t = 0
else:
t += 1
|
# -*- coding: utf-8 -*-
"""Automatic clustering algorithms."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import os.path as op
from collections import defaultdict
import numpy as np
from ..utils.array import (PartialArray, get_excerpts,
chunk_bounds, data_chunk,
_load_ndarray, _as_array,
)
from ..utils._types import Bunch
from ..utils.event import EventEmitter
from ..utils.logging import debug, info
from ..io.kwik.sparse_kk2 import sparsify_features_masks
from ..traces import (Filter, Thresholder, compute_threshold,
FloodFillDetector, WaveformExtractor, PCA,
)
#------------------------------------------------------------------------------
# Spike detection class
#------------------------------------------------------------------------------
def _keep_spikes(samples, bounds):
"""Only keep spikes within the bounds `bounds=(start, end)`."""
start, end = bounds
return (start <= samples) & (samples <= end)
def _split_spikes(groups, idx=None, **arrs):
"""Split spike data according to the channel group."""
dtypes = {'spike_samples': np.float64,
'waveforms': np.float32,
'masks': np.float32,
}
groups = _as_array(groups)
if idx is not None:
n_spikes_chunk = np.sum(idx)
# First, remove the overlapping bands.
groups = groups[idx]
arrs_bis = arrs.copy()
for key, arr in arrs.items():
arrs_bis[key] = arr[idx]
assert len(arrs_bis[key]) == n_spikes_chunk
# Then, split along the group.
groups_u = np.unique(groups)
out = {}
for group in groups_u:
i = (groups == group)
out[group] = {}
for key, arr in arrs_bis.items():
out[group][key] = _concat(arr[i], dtypes.get(key, None))
return out
def _array_list(arrs):
out = np.empty((len(arrs),), dtype=np.object)
out[:] = arrs
return out
def _concat(arr, dtype=None):
return np.array([_[...] for _ in arr], dtype=dtype)
class SpikeCounts(object):
def __init__(self, counts):
self._counts = counts
self._groups = sorted(counts)
if self._groups:
self._chunks = sorted(counts[self._groups[0]])
else:
self._chunks = []
def __call__(self, group=None, chunk=None):
if group is not None and chunk is not None:
return self._counts.get(group, {}).get(chunk, 0)
elif group is None and chunk is None:
return sum([self(chunk=c) for c in self._chunks])
elif group is not None:
return sum([self(chunk=c, group=group) for c in self._chunks])
elif chunk is not None:
return sum([self(chunk=chunk, group=g) for g in self._groups])
#------------------------------------------------------------------------------
# Spike detection class
#------------------------------------------------------------------------------
class SpikeDetekt(EventEmitter):
def __init__(self, tempdir=None, **kwargs):
super(SpikeDetekt, self).__init__()
self._tempdir = tempdir
self._kwargs = kwargs
self._n_channels_per_group = {
group: len(channels)
for group, channels in self._kwargs['probe_channels'].items()
}
self._groups = sorted(self._n_channels_per_group)
self._n_features = self._kwargs['nfeatures_per_channel']
before = self._kwargs['extract_s_before']
after = self._kwargs['extract_s_after']
self._n_samples_waveforms = before + after
# Processing objects creation
# -------------------------------------------------------------------------
def _create_filter(self):
rate = self._kwargs['sample_rate']
low = self._kwargs['filter_low']
high = self._kwargs['filter_high']
order = self._kwargs['filter_butter_order']
return Filter(rate=rate,
low=low,
high=high,
order=order,
)
def _create_thresholder(self, thresholds=None):
mode = self._kwargs['detect_spikes']
return Thresholder(mode=mode, thresholds=thresholds)
def _create_detector(self):
graph = self._kwargs['probe_adjacency_list']
join_size = self._kwargs['connected_component_join_size']
return FloodFillDetector(probe_adjacency_list=graph,
join_size=join_size,
)
def _create_extractor(self, thresholds):
before = self._kwargs['extract_s_before']
after = self._kwargs['extract_s_after']
weight_power = self._kwargs['weight_power']
probe_channels = self._kwargs['probe_channels']
return WaveformExtractor(extract_before=before,
extract_after=after,
weight_power=weight_power,
channels_per_group=probe_channels,
thresholds=thresholds,
)
def _create_pca(self):
n_pcs = self._kwargs['nfeatures_per_channel']
return PCA(n_pcs=n_pcs)
# Misc functions
# -------------------------------------------------------------------------
def update_params(self, **kwargs):
self._kwargs.update(kwargs)
# Processing functions
# -------------------------------------------------------------------------
def apply_filter(self, data):
filter = self._create_filter()
return filter(data).astype(np.float32)
def find_thresholds(self, traces):
"""Find weak and strong thresholds in filtered traces."""
n_excerpts = self._kwargs['nexcerpts']
excerpt_size = self._kwargs['excerpt_size']
single = self._kwargs['use_single_threshold']
strong_f = self._kwargs['threshold_strong_std_factor']
weak_f = self._kwargs['threshold_weak_std_factor']
excerpt = get_excerpts(traces,
n_excerpts=n_excerpts,
excerpt_size=excerpt_size)
excerpt_f = self.apply_filter(excerpt)
thresholds = compute_threshold(excerpt_f,
single_threshold=single,
std_factor=(weak_f, strong_f))
return {'weak': thresholds[0],
'strong': thresholds[1]}
def detect(self, traces_f, thresholds=None):
"""Detect connected waveform components in filtered traces.
Parameters
----------
traces_f : array
An `(n_samples, n_channels)` array with the filtered data.
thresholds : dict
The weak and strong thresholds.
Returns
-------
components : list
A list of `(n, 2)` arrays with `sample, channel` pairs.
"""
# Threshold the data following the weak and strong thresholds.
thresholder = self._create_thresholder(thresholds)
# Transform the filtered data according to the detection mode.
traces_t = thresholder.transform(traces_f)
# Compute the threshold crossings.
weak = thresholder.detect(traces_t, 'weak')
strong = thresholder.detect(traces_t, 'strong')
detector = self._create_detector()
return detector(weak_crossings=weak,
strong_crossings=strong)
def extract_spikes(self, components, traces_f, thresholds=None):
"""Extract spikes from connected components.
Parameters
----------
components : list
List of connected components.
traces_f : array
Filtered data.
thresholds : dict
The weak and strong thresholds.
Returns
-------
spike_samples : array
An `(n_spikes,)` array with the spike samples.
waveforms : array
An `(n_spikes, n_samples, n_channels)` array.
masks : array
An `(n_spikes, n_channels)` array.
"""
n_spikes = len(components)
assert n_spikes > 0
# Transform the filtered data according to the detection mode.
thresholder = self._create_thresholder()
traces_t = thresholder.transform(traces_f)
# Extract all waveforms.
extractor = self._create_extractor(thresholds)
groups, samples, waveforms, masks = zip(*[extractor(component,
data=traces_f,
data_t=traces_t,
)
for component in components])
# Create the return arrays.
groups = np.array(groups, dtype=np.int32)
assert groups.shape == (n_spikes,)
assert groups.dtype == np.int32
samples = np.array(samples, dtype=np.float64)
assert samples.shape == (n_spikes,)
assert samples.dtype == np.float64
# These are lists of arrays of various shapes (because of various
# groups).
waveforms = _array_list(waveforms)
assert waveforms.shape == (n_spikes,)
assert waveforms.dtype == np.object
masks = _array_list(masks)
assert masks.dtype == np.object
assert masks.shape == (n_spikes,)
# Reorder the spikes.
idx = np.argsort(samples)
groups = groups[idx]
samples = samples[idx]
waveforms = waveforms[idx]
masks = masks[idx]
return groups, samples, waveforms, masks
def waveform_pcs(self, waveforms, masks):
"""Compute waveform principal components.
Returns
-------
pcs : array
An `(n_features, n_samples, n_channels)` array.
"""
pca = self._create_pca()
return pca.fit(waveforms, masks)
def features(self, waveforms, pcs):
"""Extract features from waveforms.
Returns
-------
features : array
An `(n_spikes, n_channels, n_features)` array.
"""
pca = self._create_pca()
return pca.transform(waveforms, pcs=pcs)
# Internal functions
# -------------------------------------------------------------------------
def _path(self, name, key=None, group=None):
if self._tempdir is None:
raise ValueError("The temporary directory must be specified.")
assert key >= 0
if group is None:
path = op.join(self._tempdir, '{:s}-{:d}'.format(name, key))
else:
assert group >= 0
fn = '{chunk:d}.{name:s}.{group:d}'.format(
chunk=key, name=name, group=group)
path = op.join(self._tempdir, fn)
return path
def _save(self, array, name, key=None, group=None):
path = self._path(name, key=key, group=group)
dtype = array.dtype
assert dtype != np.object
shape = array.shape
debug("Save `{}` ({}, {}).".format(path, np.dtype(dtype).name, shape))
return array.tofile(path)
def _load(self, name, dtype, shape=None, key=None, group=None):
path = self._path(name, key=key, group=group)
# Handle the case where the file does not exist or is empty.
if not op.exists(path) or shape[0] == 0:
assert shape is not None
return np.zeros(shape, dtype=dtype)
debug("Load `{}` ({}, {}).".format(path, np.dtype(dtype).name, shape))
with open(path, 'rb') as f:
return _load_ndarray(f, dtype=dtype, shape=shape)
def _load_data_chunks(self, name,
n_samples=None,
n_channels=None,
groups=None,
spike_counts=None,
):
_, _, keys, _ = zip(*list(self.iter_chunks(n_samples, n_channels)))
out = {}
for group in groups:
out[group] = []
n_channels_group = self._n_channels_per_group[group]
for key in keys:
n_spikes = spike_counts(group=group, chunk=key)
shape = {
'spike_samples': (n_spikes,),
'waveforms': (n_spikes,
self._n_samples_waveforms,
n_channels_group),
'masks': (n_spikes, n_channels_group),
'features': (n_spikes, n_channels_group, self._n_features),
}[name]
dtype = np.float64 if name == 'spike_samples' else np.float32
w = self._load(name, dtype,
shape=shape,
key=key,
group=group)
out[group].append(w)
return out
def _pca_subset(self, wm, n_spikes_chunk=None, n_spikes_total=None):
waveforms, masks = wm
n_waveforms_max = self._kwargs['pca_nwaveforms_max']
p = n_spikes_chunk / float(n_spikes_total)
k = int(n_spikes_chunk / float(p * n_waveforms_max))
k = np.clip(k, 1, n_spikes_chunk)
return (waveforms[::k, ...], masks[::k, ...])
def iter_chunks(self, n_samples, n_channels):
chunk_size = self._kwargs['chunk_size']
overlap = self._kwargs['chunk_overlap']
for bounds in chunk_bounds(n_samples, chunk_size, overlap=overlap):
yield bounds
# Main steps
# -------------------------------------------------------------------------
def step_detect(self, bounds, chunk_data, chunk_data_keep,
thresholds=None):
key = bounds[2]
# Apply the filter.
data_f = self.apply_filter(chunk_data)
assert data_f.dtype == np.float32
assert data_f.shape == chunk_data.shape
# Save the filtered chunk.
self._save(data_f, 'filtered', key=key)
# Detect spikes in the filtered chunk.
components = self.detect(data_f, thresholds=thresholds)
# Return the list of components in the chunk.
return components
def step_extract(self, bounds, components,
n_spikes_total=None,
n_channels=None,
thresholds=None,
):
"""Return the waveforms to keep for each chunk for PCA."""
assert len(components) > 0
s_start, s_end, keep_start, keep_end = bounds
key = keep_start
n_samples = s_end - s_start
# Get the filtered chunk.
chunk_f = self._load('filtered', np.float32,
shape=(n_samples, n_channels), key=key)
# Extract the spikes from the chunk.
groups, spike_samples, waveforms, masks = self.extract_spikes(
components, chunk_f, thresholds=thresholds)
# Remove spikes in the overlapping bands.
idx = _keep_spikes(spike_samples, (keep_start, keep_end))
n_spikes_chunk = idx.sum()
debug("In chunk {}, keep {} spikes out of {}.".format(
key, n_spikes_chunk, len(spike_samples)))
# Split the data according to the channel groups.
split = _split_spikes(groups,
idx=idx,
spike_samples=spike_samples,
waveforms=waveforms,
masks=masks,
)
# Save the split arrays: spike samples, waveforms, masks.
for group, out in split.items():
for name, arr in out.items():
self._save(arr, name, key=key, group=group)
# Keep some waveforms in memory in order to compute PCA.
wm = {group: (split[group]['waveforms'], split[group]['masks'])
for group in split.keys()}
# Number of counts per group in that chunk.
counts = {group: len(split[group]['waveforms'])
for group in split.keys()}
assert sum(counts.values()) == n_spikes_chunk
wm = {group: self._pca_subset(wm[group],
n_spikes_chunk=n_spikes_chunk,
n_spikes_total=n_spikes_total)
for group in split.keys()}
return wm, counts
def step_pca(self, chunk_waveforms):
if not chunk_waveforms:
return
# This is a dict {key: (waveforms, masks)}.
# Concatenate all waveforms subsets from all chunks.
waveforms_subset, masks_subset = zip(*chunk_waveforms.values())
waveforms_subset = np.vstack(waveforms_subset)
masks_subset = np.vstack(masks_subset)
assert (waveforms_subset.shape[0],
waveforms_subset.shape[2]) == masks_subset.shape
# Perform PCA and return the components.
pcs = self.waveform_pcs(waveforms_subset, masks_subset)
return pcs
def step_features(self, bounds, pcs_per_group, spike_counts):
s_start, s_end, keep_start, keep_end = bounds
key = keep_start
# Loop over the channel groups.
for group, pcs in pcs_per_group.items():
# Find the waveforms shape.
n_spikes = spike_counts(group=group, chunk=key)
n_channels = self._n_channels_per_group[group]
shape = (n_spikes, self._n_samples_waveforms, n_channels)
# Save the waveforms.
waveforms = self._load('waveforms', np.float32,
shape=shape,
key=key, group=group)
# No spikes in the chunk.
if waveforms is None:
continue
# Compute the features.
features = self.features(waveforms, pcs)
if features is not None:
assert features.dtype == np.float32
# Save the features.
self._save(features, 'features', key=key, group=group)
def output_data(self,
n_samples,
n_channels,
groups=None,
spike_counts=None,
):
n_samples_per_chunk = {bounds[2]: (bounds[3] - bounds[2])
for bounds in self.iter_chunks(n_samples,
n_channels)}
keys = sorted(n_samples_per_chunk.keys())
traces_f = [self._load('filtered', np.float32,
shape=(n_samples_per_chunk[key], n_channels),
key=key) for key in keys]
def _load(name):
return self._load_data_chunks(name,
n_samples=n_samples,
n_channels=n_channels,
groups=groups,
spike_counts=spike_counts,
)
output = Bunch(n_chunks=len(keys),
groups=groups,
chunk_keys=keys,
traces_f=traces_f,
spike_samples=_load('spike_samples'),
waveforms=_load('waveforms'),
masks=_load('masks'),
features=_load('features'),
spike_counts=spike_counts,
n_spikes_total=spike_counts(),
n_spikes_per_group={group: spike_counts(group=group)
for group in groups},
n_spikes_per_chunk={chunk: spike_counts(chunk=chunk)
for chunk in keys},
)
return output
def run_serial(self, traces, interval_samples=None):
"""Run SpikeDetekt using one CPU."""
n_samples, n_channels = traces.shape
# Take a subset if necessary.
if interval_samples is not None:
start, end = interval_samples
traces = traces[start:end, ...]
else:
start, end = 0, n_samples
assert 0 <= start < end <= n_samples
# Find the weak and strong thresholds.
info("Finding the thresholds...")
thresholds = self.find_thresholds(traces)
debug("Thresholds: {}.".format(thresholds))
self.emit('find_thresholds', thresholds)
# Pass 1: find the connected components and count the spikes.
info("Detecting spikes...")
# Dictionary {chunk_key: components}.
# Every chunk has a unique key: the `keep_start` integer.
chunk_components = {}
for bounds in self.iter_chunks(n_samples, n_channels):
key = bounds[2]
chunk_data = data_chunk(traces, bounds, with_overlap=True)
chunk_data_keep = data_chunk(traces, bounds, with_overlap=False)
components = self.step_detect(bounds,
chunk_data,
chunk_data_keep,
thresholds=thresholds,
)
debug("Detected {} spikes in chunk {}.".format(
len(components), key))
self.emit('detect_spikes', key=key, n_spikes=len(components))
chunk_components[key] = components
n_spikes_per_chunk = {key: len(val)
for key, val in chunk_components.items()}
n_spikes_total = sum(n_spikes_per_chunk.values())
info("{} spikes detected in total.".format(n_spikes_total))
# Pass 2: extract the spikes and save some waveforms before PCA.
info("Extracting all waveforms...")
# This is a dict {group: {key: (waveforms, masks)}}.
chunk_waveforms = defaultdict(dict)
# This is a dict {group: {key: n_spikes}}.
chunk_counts = defaultdict(dict)
for bounds in self.iter_chunks(n_samples, n_channels):
key = bounds[2]
components = chunk_components[key]
if len(components) == 0:
continue
# This is a dict {group: (waveforms, masks)}.
wm, counts = self.step_extract(bounds,
components,
n_spikes_total=n_spikes_total,
n_channels=n_channels,
thresholds=thresholds,
)
debug("Extracted {} spikes from chunk {}.".format(
sum(counts.values()), key))
self.emit('extract_spikes', key=key, counts=counts)
# Reorganize the chunk waveforms subsets.
for group, wm_group in wm.items():
n_spikes_chunk = len(wm_group[0])
assert len(wm_group[1]) == n_spikes_chunk
chunk_waveforms[group][key] = wm_group
chunk_counts[group][key] = counts[group]
spike_counts = SpikeCounts(chunk_counts)
info("{} waveforms extracted and saved.".format(spike_counts()))
# Compute the PCs.
info("Performing PCA...")
pcs = {}
for group in self._groups:
pcs[group] = self.step_pca(chunk_waveforms[group])
self.emit('compute_pca', group=group, pcs=pcs[group])
info("Principal waveform components computed.")
# Pass 3: compute the features.
info("Computing the features of all spikes...")
for bounds in self.iter_chunks(n_samples, n_channels):
self.step_features(bounds, pcs, spike_counts)
self.emit('compute_features', key=bounds[2])
info("All features computed and saved.")
# Return dictionary of memmapped data.
return self.output_data(n_samples, n_channels,
self._groups, spike_counts)
#------------------------------------------------------------------------------
# Clustering class
#------------------------------------------------------------------------------
class KlustaKwik(object):
"""KlustaKwik automatic clustering algorithm."""
def __init__(self, **kwargs):
assert 'num_starting_clusters' in kwargs
self._kwargs = kwargs
self.__dict__.update(kwargs)
# Set the version.
from klustakwik2 import __version__
self.version = __version__
def cluster(self,
model=None,
spike_ids=None,
features=None,
masks=None,
):
# Get the features and masks.
if model is not None:
if features is None:
features = PartialArray(model.features_masks, 0)
if masks is None:
masks = PartialArray(model.features_masks, 1)
# Select some spikes if needed.
if spike_ids is not None:
features = features[spike_ids]
masks = masks[spike_ids]
# Convert the features and masks to the sparse structure used
# by KK.
data = sparsify_features_masks(features, masks)
data = data.to_sparse_data()
# Run KK.
from klustakwik2 import KK
num_starting_clusters = self._kwargs.pop('num_starting_clusters', 100)
kk = KK(data, **self._kwargs)
self.params = kk.all_params
self.params['num_starting_clusters'] = num_starting_clusters
kk.cluster_mask_starts(num_starting_clusters)
spike_clusters = kk.clusters
return spike_clusters
def cluster(model, algorithm='klustakwik', spike_ids=None, **kwargs):
"""Launch an automatic clustering algorithm on the model.
Parameters
----------
model : BaseModel
A model.
algorithm : str
Only 'klustakwik' is supported currently.
**kwargs
Parameters for KK.
"""
assert algorithm == 'klustakwik'
kk = KlustaKwik(**kwargs)
return kk.cluster(model=model, spike_ids=spike_ids)
|
a = int(input('Escreva um número: '))
b = int(input('Escreva um número: '))
c = int(input('Escreva um número: '))
if a >= b and a >= c and b >= c:
print(f'A ordem decrescente é {a} , {b} e {c}')
elif a >= b and a >=c and c >= b:
print(f'A ordem decrescente é {a} , {c} e {b}')
elif b >= a and b >= c and a >= c:
print(f'A ordem decrescente é {b} , {a} e {c}')
elif b >= a and b >= c and c >= a:
print(f'A ordem decrescente é {b} , {c} e {a}')
elif c >= a and c >= b and a >=b:
print(f'A ordem decrescente é {c} , {a} e {b}')
elif c >= a and c >= b and b >= a:
print(f'A ordem decrescente é {c} , {b} e {a}')
|
from setuptools import setup, find_packages
setup(
name='django-sabayon',
version='0.1.1',
packages=find_packages(),
author='w0de',
author_email='harry@sysop.ooo',
description='Django support for https://github.com/dmathieu/sabayon'
)
|
#!/usr/bin/env
"""
-----------------------------------------------
Bardel
Written By: Colton Fetters
Version: 1.0
First release: 2017
-----------------------------------------------
"""
# Import module
import os
import re
import pymel.core as pm
import maya.cmds as cmds
import maya.mel as mel
# Studio module
import bd_lib.bd_config_lib as config
from bd_lay.bd_scene_info import SceneInfo
class Core(object):
"""docstring for Core"""
def __init__(self, path=None):
super(Core, self).__init__()
self._CONFIG = config.Config()
self._MAYA_PATH = cmds.file(q=True, sceneName=True)
self._SCENEINFO = SceneInfo(self._MAYA_PATH)
self._PROXY = '/18_Proxy'
self._RED = (1, 0, 0)
if not path:
self._PATH = self.create_proxy_directory()
else:
self._PATH = path
def create_proxy_directory(self):
rootPath = self._SCENEINFO.get_root_dir()
folder = rootPath + self._PROXY
if not os.path.exists(folder):
os.makedirs(folder)
return folder
def create_vray_proxy(self, assetName, connectMap=True, smartReduce=True, smartAmount=.25, previewFaces=10000):
"""[Create Vray Proxy of Selected Assets]
[Goes through the selection and assigns shader's diffuse texture map]
Args:
assetName: [Name of proxy]
"""
nodeName = '{}_Proxy'.format(assetName)
fileName = '{}.vrmesh'.format(nodeName)
vrayMesh = '{}_vraymesh'.format(nodeName)
vrayMeshMaterial = '{}_vraymeshmtl'.format(nodeName)
previewType = 'combined'
if smartReduce:
previewFaces = self.smart_reduce(value=smartAmount)
else:
previewFaces = previewFaces
melCommand = 'vrayCreateProxy -exportType 1 -previewFaces {} -dir "{}" -fname "{}"'.format(
previewFaces, self._PATH, fileName)
options = ' -overwrite -ignoreHiddenObjects -createProxyNode -node "{}" -previewType "{}";'.format(
nodeName, previewType)
print(melCommand + options)
mel.eval(melCommand + options)
if connectMap:
originalMap = self.find_orignal_diffuse_map(vrayMeshMaterial)
self.connect_orignal_diffuse_map(vrayMesh, vrayMeshMaterial, originalMap)
def find_orignal_diffuse_map(self, vrayMeshMaterial):
"""[Finds Original Diffuse Map]
[description]
Args:
vrayMeshMaterial: [description]
Returns:
[description]
[type]
"""
materialConnections = cmds.listConnections(
vrayMeshMaterial, s=True, d=False)
for eachMaterial in materialConnections:
if 'vraymesh' not in eachMaterial:
colorMaps = cmds.listConnections(eachMaterial, s=True, d=False)
if colorMaps:
return colorMaps[0]
else:
return self._RED
def connect_orignal_diffuse_map(self, vrayMesh, vrayMeshMaterial, originalMap):
"""[Connects Map to Vray Mesh for Display Purpose]
[Selects the top first shader's diffuse map and connects outValue to Vray Mesh
display color]
Args:
vrayMesh: [Name of mesh]
vrayMeshMaterial: [Name of Material]
originalMap: [First Diffuse Map]
"""
search = re.search(r'[A-Z]', originalMap)
if search:
cmds.connectAttr('{}.outValue'.format(originalMap),
'{}.color'.format(vrayMeshMaterial))
else:
cmds.setAttr('{}.color'.format(vrayMeshMaterial), type='double3')
def toggle_view(self, displayType=3):
"""[Intellegently Changes Dipslay Type Based on User's Selection]
[This method assumes the user is searching for a vray proxy to change its display setting]
Args:
displayType: [Geo's display option] (default: {3})
"""
curSelection = pm.ls(sl=True)
if not curSelection:
curSelection = pm.ls(type='VRayMesh')
for asset in curSelection:
self.view_set(asset, displayType)
if type(curSelection[0]) == pm.nodetypes.Mesh or type(curSelection[0]) == pm.nodetypes.VRayMeshPreview:
print curSelection[0]
assetConnections = pm.listConnections(str(curSelection[0]))
for eachConnection in assetConnections:
if self.type_check(eachConnection):
asset = eachConnection
self.view_set(asset, displayType)
elif type(curSelection[0]) != pm.nodetypes.VRayMesh:
print curSelection[0]
assetConnections = pm.listConnections(str(curSelection[0].getShape()))
for asset in assetConnections:
if self.type_check(asset):
self.view_set(asset, displayType)
else:
asset = curSelection[0]
self.view_set(asset, displayType)
def type_check(self, asset):
"""[Checks if the Asset is a VrayMesh]
[Determines if the Asset is a VrayMesh and returns Bool]
Args:
asset: [PyMel Node]
Returns:
[Bool for Checking]
bool
"""
if type(asset) == pm.nodetypes.VRayMesh:
return True
else:
return False
def view_set(self, asset, displayType):
if asset.geomType.get() != displayType:
asset.geomType.set(displayType)
def smart_reduce(self, value):
sel = pm.ls(sl=True)
faceNumber = pm.polyEvaluate(sel, face=True)
smallerNumber = faceNumber * (value)
return smallerNumber
def find_selection_shader(self):
curSelection = pm.ls(sl=True, dag=True, s=True)
for each in curSelection:
self.selectionsShader = pm.listConnections(each, type='shadingEngine')
shaderList = pm.ls(pm.listConnections(self.selectionsShader), materials=1)
return(shaderList)
def generate_geo_list(self):
shaderList = self.find_selection_shader()
if len(shaderList) == 1:
self.geoList = []
objectList = pm.listConnections(self.selectionsShader)
for each in objectList:
if type(each) == pm.nodetypes.Transform:
self.geoList.append(each)
return(self.geoList)
def select_geo(self):
self.generate_geo_list()
for each in self.geoList:
pm.select(each, add=True)
def main():
Core(path=None).create_vray_proxy(assetName='_Temp', connectMap=True)
Core(path=None).toggle_view(displayType=3)
if __name__ == '__main__':
main()
|
class PagSeguroPaymentParserData:
code = None
registrationDate = None
def getCode(self):
return self.code
def setCode(self, code):
self.code = code
def getRegistrationDate(self):
return self.registrationDate
def setRegistrationDate(self, registrationDate):
self.registrationDate = registrationDate
|
#!/usr/bin/env python3
import gzip
import os
import subprocess
import sys
import tempfile
import collections
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
try:
AOSP_DIR = os.environ['ANDROID_BUILD_TOP']
except KeyError:
print('error: ANDROID_BUILD_TOP environment variable is not set.',
file=sys.stderr)
sys.exit(1)
BUILTIN_HEADERS_DIR = (
os.path.join(AOSP_DIR, 'bionic', 'libc', 'include'),
os.path.join(AOSP_DIR, 'external', 'libcxx', 'include'),
os.path.join(AOSP_DIR, 'prebuilts', 'clang-tools', 'linux-x86',
'clang-headers'),
)
EXPORTED_HEADERS_DIR = (
os.path.join(AOSP_DIR, 'development', 'vndk', 'tools', 'header-checker',
'tests'),
)
SO_EXT = '.so'
SOURCE_ABI_DUMP_EXT_END = '.lsdump'
SOURCE_ABI_DUMP_EXT = SO_EXT + SOURCE_ABI_DUMP_EXT_END
COMPRESSED_SOURCE_ABI_DUMP_EXT = SOURCE_ABI_DUMP_EXT + '.gz'
VENDOR_SUFFIX = '.vendor'
DEFAULT_CPPFLAGS = ['-x', 'c++', '-std=c++11']
DEFAULT_CFLAGS = ['-std=gnu99']
DEFAULT_HEADER_FLAGS = ["-dump-function-declarations"]
DEFAULT_FORMAT = 'ProtobufTextFormat'
class Target(object):
def __init__(self, is_2nd, product):
extra = '_2ND' if is_2nd else ''
build_vars_to_fetch = ['TARGET_ARCH',
'TARGET{}_ARCH'.format(extra),
'TARGET{}_ARCH_VARIANT'.format(extra),
'TARGET{}_CPU_VARIANT'.format(extra)]
build_vars = get_build_vars_for_product(build_vars_to_fetch, product)
self.primary_arch = build_vars[0]
assert self.primary_arch != ''
self.arch = build_vars[1]
self.arch_variant = build_vars[2]
self.cpu_variant = build_vars[3]
def get_arch_str(self):
"""Return a string that represents the architecture and the
architecture variant.
If TARGET_ARCH == TARGET_ARCH_VARIANT, soong makes targetArchVariant
empty. This is the case for aosp_x86_64.
"""
if not self.arch_variant or self.arch_variant == self.arch:
arch_variant = ''
else:
arch_variant = '_' + self.arch_variant
return self.arch + arch_variant
def get_arch_cpu_str(self):
"""Return a string that represents the architecture, the architecture
variant, and the CPU variant."""
if not self.cpu_variant or self.cpu_variant == 'generic':
cpu_variant = ''
else:
cpu_variant = '_' + self.cpu_variant
return self.get_arch_str() + cpu_variant
def copy_reference_dump(lib_path, reference_dump_dir, compress):
reference_dump_path = os.path.join(
reference_dump_dir, os.path.basename(lib_path))
if compress:
reference_dump_path += '.gz'
os.makedirs(os.path.dirname(reference_dump_path), exist_ok=True)
output_content = read_output_content(lib_path, AOSP_DIR)
if compress:
with gzip.open(reference_dump_path, 'wb') as f:
f.write(bytes(output_content, 'utf-8'))
else:
with open(reference_dump_path, 'wb') as f:
f.write(bytes(output_content, 'utf-8'))
print('Created abi dump at', reference_dump_path)
return reference_dump_path
def read_output_content(output_path, replace_str):
with open(output_path, 'r') as f:
return f.read().replace(replace_str, '')
def run_header_abi_dumper(input_path, cflags=tuple(),
export_include_dirs=EXPORTED_HEADERS_DIR,
flags=tuple()):
"""Run header-abi-dumper to dump ABI from `input_path` and return the
output."""
with tempfile.TemporaryDirectory() as tmp:
output_path = os.path.join(tmp, os.path.basename(input_path)) + '.dump'
run_header_abi_dumper_on_file(input_path, output_path,
export_include_dirs, cflags, flags)
return read_output_content(output_path, AOSP_DIR)
def run_header_abi_dumper_on_file(input_path, output_path,
export_include_dirs=tuple(), cflags=tuple(),
flags=tuple()):
"""Run header-abi-dumper to dump ABI from `input_path` and the output is
written to `output_path`."""
input_ext = os.path.splitext(input_path)[1]
cmd = ['header-abi-dumper', '-o', output_path, input_path]
for dir in export_include_dirs:
cmd += ['-I', dir]
cmd += flags
if '-output-format' not in flags:
cmd += ['-output-format', DEFAULT_FORMAT]
if input_ext == ".h":
cmd += DEFAULT_HEADER_FLAGS
cmd += ['--']
cmd += cflags
if input_ext in ('.cpp', '.cc', '.h'):
cmd += DEFAULT_CPPFLAGS
else:
cmd += DEFAULT_CFLAGS
for dir in BUILTIN_HEADERS_DIR:
cmd += ['-isystem', dir]
# The export include dirs imply local include dirs.
for dir in export_include_dirs:
cmd += ['-I', dir]
subprocess.check_call(cmd)
def run_header_abi_linker(output_path, inputs, version_script, api, arch,
flags=tuple()):
"""Link inputs, taking version_script into account"""
cmd = ['header-abi-linker', '-o', output_path, '-v', version_script,
'-api', api, '-arch', arch]
cmd += flags
if '-input-format' not in flags:
cmd += ['-input-format', DEFAULT_FORMAT]
if '-output-format' not in flags:
cmd += ['-output-format', DEFAULT_FORMAT]
cmd += inputs
subprocess.check_call(cmd)
return read_output_content(output_path, AOSP_DIR)
def make_targets(product, variant, targets):
make_cmd = ['build/soong/soong_ui.bash', '--make-mode', '-j',
'TARGET_PRODUCT=' + product, 'TARGET_BUILD_VARIANT=' + variant]
make_cmd += targets
subprocess.check_call(make_cmd, cwd=AOSP_DIR)
def make_tree(product, variant):
"""Build all lsdump files."""
return make_targets(product, variant, ['findlsdumps'])
def make_libraries(product, variant, vndk_version, targets, libs):
"""Build lsdump files for specific libs."""
lsdump_paths = read_lsdump_paths(product, variant, vndk_version, targets,
build=True)
make_target_paths = []
for name in libs:
make_target_paths.extend(path for tag, path in
lsdump_paths[name].values())
make_targets(product, variant, make_target_paths)
def get_lsdump_paths_file_path(product, variant):
"""Get the path to lsdump_paths.txt."""
product_out = get_build_vars_for_product(
['PRODUCT_OUT'], product, variant)[0]
return os.path.join(product_out, 'lsdump_paths.txt')
def _is_sanitizer_variation(variation):
"""Check whether the variation is introduced by a sanitizer."""
return variation in {'asan', 'hwasan', 'tsan', 'intOverflow', 'cfi', 'scs'}
def _get_module_variant_dir_name(tag, vndk_version, arch_cpu_str):
"""Return the module variant directory name.
For example, android_x86_shared, android_vendor.R_arm_armv7-a-neon_shared.
"""
if tag in ('LLNDK', 'NDK', 'PLATFORM'):
return 'android_%s_shared' % arch_cpu_str
if tag.startswith('VNDK'):
return 'android_vendor.%s_%s_shared' % (vndk_version, arch_cpu_str)
raise ValueError(tag + ' is not a known tag.')
def _read_lsdump_paths(lsdump_paths_file_path, vndk_version, targets):
"""Read lsdump paths from lsdump_paths.txt for each libname and variant.
This function returns a dictionary, {lib_name: {arch_cpu: (tag, path)}}.
For example,
{
"libc": {
"x86_x86_64": (
"NDK",
"path/to/libc.so.lsdump"
)
}
}
"""
lsdump_paths = collections.defaultdict(dict)
suffixes = collections.defaultdict(dict)
with open(lsdump_paths_file_path, 'r') as lsdump_paths_file:
for line in lsdump_paths_file:
tag, path = (x.strip() for x in line.split(':', 1))
if not path:
continue
dirname, filename = os.path.split(path)
if not filename.endswith(SOURCE_ABI_DUMP_EXT):
continue
libname = filename[:-len(SOURCE_ABI_DUMP_EXT)]
if not libname:
continue
variant = os.path.basename(dirname)
if not variant:
continue
for target in targets:
arch_cpu = target.get_arch_cpu_str()
prefix = _get_module_variant_dir_name(tag, vndk_version,
arch_cpu)
if not variant.startswith(prefix):
continue
new_suffix = variant[len(prefix):]
# Skip if the suffix contains APEX variations.
new_variations = [x for x in new_suffix.split('_') if x]
if new_variations and not all(_is_sanitizer_variation(x)
for x in new_variations):
continue
old_suffix = suffixes[libname].get(arch_cpu)
if not old_suffix or new_suffix > old_suffix:
lsdump_paths[libname][arch_cpu] = (tag, path)
suffixes[libname][arch_cpu] = new_suffix
return lsdump_paths
def read_lsdump_paths(product, variant, vndk_version, targets, build=True):
"""Build lsdump_paths.txt and read the paths."""
lsdump_paths_file_path = get_lsdump_paths_file_path(product, variant)
if build:
make_targets(product, variant, [lsdump_paths_file_path])
lsdump_paths_file_abspath = os.path.join(AOSP_DIR, lsdump_paths_file_path)
return _read_lsdump_paths(lsdump_paths_file_abspath, vndk_version,
targets)
def find_lib_lsdumps(lsdump_paths, libs, target):
"""Find the lsdump corresponding to libs for the given target.
This function returns a list of (tag, absolute_path).
For example,
[
(
"NDK",
"/path/to/libc.so.lsdump"
)
]
"""
arch_cpu = target.get_arch_cpu_str()
result = []
if libs:
for lib_name in libs:
result.append(lsdump_paths[lib_name][arch_cpu])
else:
result.extend(paths[arch_cpu] for paths in lsdump_paths.values())
return [(tag, os.path.join(AOSP_DIR, path)) for tag, path in result]
def run_abi_diff(old_test_dump_path, new_test_dump_path, arch, lib_name,
flags=tuple()):
abi_diff_cmd = ['header-abi-diff', '-new', new_test_dump_path, '-old',
old_test_dump_path, '-arch', arch, '-lib', lib_name]
with tempfile.TemporaryDirectory() as tmp:
output_name = os.path.join(tmp, lib_name) + '.abidiff'
abi_diff_cmd += ['-o', output_name]
abi_diff_cmd += flags
if '-input-format-old' not in flags:
abi_diff_cmd += ['-input-format-old', DEFAULT_FORMAT]
if '-input-format-new' not in flags:
abi_diff_cmd += ['-input-format-new', DEFAULT_FORMAT]
try:
subprocess.check_call(abi_diff_cmd)
except subprocess.CalledProcessError as err:
return err.returncode
return 0
def get_build_vars_for_product(names, product=None, variant=None):
""" Get build system variable for the launched target."""
if product is None and 'ANDROID_PRODUCT_OUT' not in os.environ:
return None
env = os.environ.copy()
if product:
env['TARGET_PRODUCT'] = product
if variant:
env['TARGET_BUILD_VARIANT'] = variant
cmd = [
os.path.join('build', 'soong', 'soong_ui.bash'),
'--dumpvars-mode', '-vars', ' '.join(names),
]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=AOSP_DIR, env=env)
out, err = proc.communicate()
if proc.returncode != 0:
print("error: %s" % err.decode('utf-8'), file=sys.stderr)
return None
build_vars = out.decode('utf-8').strip().splitlines()
build_vars_list = []
for build_var in build_vars:
value = build_var.partition('=')[2]
build_vars_list.append(value.replace('\'', ''))
return build_vars_list
|
'''
Created on Sep 15, 2015
@author: Jonathan Yu
'''
def repeat(word,number):
vowels = ["a", "e", "i", "o", "u"]
a = word.lower()
if vowels.count(a[0]) != 0:
return word
if len(a) == 1:
return word
if len(a) == 2:
if vowels.count(a[1]) != 0:
a = a * number
return word[0] + a[1:]
else:
return word
if vowels.count(a[2]) != 0:
if len(a) == 3:
a = a[0:3] * number
return word[0] + a[1:]
else:
a = a[0:3] * number + a[3:]
return word[0] + a[1:]
if vowels.count(a[1]) != 0:
a = a[0:2] * number + a[2:]
return word[0] + a[1:]
return word
if __name__ == '__main__':
pass
|
from functools import cached_property
from onegov.core.utils import normalize_for_url
from onegov.election_day import _
from onegov.election_day.layouts.detail import DetailLayout
from onegov.election_day.utils import pdf_filename
from onegov.election_day.utils import svg_filename
class VoteLayout(DetailLayout):
def __init__(self, model, request, tab='entities'):
super().__init__(model, request)
self.tab = tab
tabs_with_embedded_tables = (
'entities',
'districts',
'statistics',
'proposal-entities',
'proposal-districts',
'proposal-statistics',
'counter-proposal-entities',
'counter-proposal-districts',
'counter-proposal-statistics',
'tie-breaker-entities',
'tie-breaker-districts',
'tie-breaker-statistics',
)
@cached_property
def all_tabs(self):
"""Return all tabs. Ordering is important for the main view."""
return (
'entities',
'districts',
'statistics',
'proposal-entities',
'proposal-districts',
'proposal-statistics',
'counter-proposal-entities',
'counter-proposal-districts',
'counter-proposal-statistics',
'tie-breaker-entities',
'tie-breaker-districts',
'tie-breaker-statistics',
'data'
)
def title(self, tab=None):
tab = (self.tab if tab is None else tab) or ''
if tab == 'entities':
return self.principal.label('entities')
if tab == 'districts':
return self.app.principal.label('districts')
if tab == 'statistics':
return _("Statistics")
if tab.startswith('proposal'):
return _("Proposal")
if tab.startswith('counter-proposal'):
return _("Counter Proposal")
if tab.startswith('tie-breaker'):
return _("Tie-Breaker")
if tab == 'data':
return _("Downloads")
return ''
def subtitle(self, tab=None):
tab = (self.tab if tab is None else tab) or ''
if tab.endswith('-entities'):
return self.principal.label('entities')
if tab.endswith('-districts'):
return self.app.principal.label('districts')
if tab.endswith('-statistics'):
return _("Statistics")
return ''
def tab_visible(self, tab):
if self.hide_tab(tab):
return False
if not self.has_results:
return False
if tab == 'entities':
return self.type == 'simple'
if tab == 'proposal-entities':
return self.type == 'complex'
if tab == 'counter-proposal-entities':
return self.type == 'complex'
if tab == 'tie-breaker-entities':
return self.type == 'complex'
if tab == 'districts':
return self.has_districts and self.type == 'simple'
if tab == 'proposal-districts':
return self.has_districts and self.type == 'complex'
if tab == 'counter-proposal-districts':
return self.has_districts and self.type == 'complex'
if tab == 'tie-breaker-districts':
return self.has_districts and self.type == 'complex'
if tab == 'statistics':
return self.type == 'simple'
if tab == 'proposal-statistics':
return self.type == 'complex'
if tab == 'counter-proposal-statistics':
return self.type == 'complex'
if tab == 'tie-breaker-statistics':
return self.type == 'complex'
return True
@cached_property
def visible(self):
return self.tab_visible(self.tab)
@cached_property
def type(self):
return self.model.type
@cached_property
def scope(self):
if 'entities' in self.tab:
return 'entities'
if 'district' in self.tab:
return 'districts'
@cached_property
def ballot(self):
if self.type == 'complex' and 'counter' in self.tab:
return self.model.counter_proposal
if self.type == 'complex' and 'tie-breaker' in self.tab:
return self.model.tie_breaker
return self.model.proposal
@cached_property
def map_link(self):
if self.scope == 'entities':
return self.request.link(
self.model,
f'{self.ballot.type}-by-entities-map',
query_params={'locale': self.request.locale}
)
if self.scope == 'districts':
return self.request.link(
self.model,
f'{self.ballot.type}-by-districts-map',
query_params={'locale': self.request.locale}
)
def table_link(self, query_params=None):
query_params = query_params or {}
if self.tab not in self.tabs_with_embedded_tables:
return None
query_params['locale'] = self.request.locale
if self.scope == 'entities':
return self.request.link(
self.model,
f'{self.ballot.type}-by-entities-table',
query_params=query_params
)
if self.scope == 'districts':
return self.request.link(
self.model,
f'{self.ballot.type}-by-districts-table',
query_params=query_params
)
return self.request.link(
self.model,
f'{self.ballot.type}-statistics-table',
query_params=query_params
)
@cached_property
def widget_link(self):
return self.request.link(
self.model, name='vote-header-widget'
)
@cached_property
def summarize(self):
return self.ballot.results.count() != 1
@cached_property
def main_view(self):
if self.type == 'complex':
return self.request.link(self.model, 'proposal-entities')
for tab in self.all_tabs:
if not self.hide_tab(tab):
return self.request.link(self.model, tab)
return self.request.link(self.model, 'entities')
@cached_property
def answer(self):
return self.model.answer
@cached_property
def menu(self):
if self.type == 'complex':
result = []
for title, prefix in (
(_("Proposal"), 'proposal'),
(_("Counter Proposal"), 'counter-proposal'),
(_("Tie-Breaker"), 'tie-breaker')
):
submenu = [
(
self.subtitle(tab),
self.request.link(self.model, tab),
self.tab == tab,
[]
) for tab in (
f'{prefix}-entities',
f'{prefix}-districts',
f'{prefix}-statistics'
) if self.tab_visible(tab)
]
if submenu:
result.append((
title,
'',
self.tab.startswith(prefix),
submenu
))
if self.tab_visible('data'):
result.append((
self.title('data'),
self.request.link(self.model, 'data'),
self.tab == 'data',
[]
))
return result
return [
(
self.title(tab),
self.request.link(self.model, tab),
self.tab == tab,
[]
) for tab in self.all_tabs if self.tab_visible(tab)
]
@cached_property
def pdf_path(self):
""" Returns the path to the PDF file or None, if it is not available.
"""
path = 'pdf/{}'.format(
pdf_filename(
self.model,
self.request.locale,
last_modified=self.last_modified
)
)
if self.request.app.filestorage.exists(path):
return path
return None
@cached_property
def svg_prefix(self):
return 'districts-map' if 'districts' in self.tab else 'entities-map'
@cached_property
def svg_path(self):
""" Returns the path to the SVG file or None, if it is not available.
"""
if not self.ballot:
return None
path = 'svg/{}'.format(
svg_filename(
self.ballot,
self.svg_prefix,
self.request.locale,
last_modified=self.last_modified
)
)
if self.request.app.filestorage.exists(path):
return path
return None
@cached_property
def svg_link(self):
""" Returns a link to the SVG download view. """
return self.request.link(
self.ballot, name='{}-svg'.format(self.svg_prefix)
)
@cached_property
def svg_name(self):
""" Returns a nice to read SVG filename. """
return '{}.svg'.format(
normalize_for_url(
'{}-{}-{}'.format(
self.model.id,
self.request.translate(self.title() or ''),
self.request.translate(self.subtitle() or '')
).rstrip('-')
)
)
|
from config import db
import random
import web
class Question():
def __init__(self, description,answer):
self.description=description
self.answer=answer
def get_obf_dot_number(self):
return ((len(self.description) % 10)+2)*2
def get_obf_busted(self):
busted='..'
for count in range(1,self.get_obf_dot_number()):
busted=busted+'.'
return busted
def get_obf_plausible(self):
plausible='.'
for count in range(1,self.get_obf_dot_number()):
plausible=plausible+'.'
return plausible
def get_obf_confirmed(self):
confirmed='...'
for count in range(1,self.get_obf_dot_number()):
confirmed=confirmed+'.'
return confirmed
def get_obf_answer(self):
if self.answer=='B':
return self.get_obf_busted()
elif self.answer=='P':
return self.get_obf_plausible()
elif self.answer=='C':
return self.get_obf_confirmed()
class SpecialQuestion():
def __init__(self, description,question_list):
self.description=description
self.question_list=question_list
def shuffle_question_list():
question_list = [row['ID_question'] for row in list(db.select('questions',what='ID_question',where='ID_special_question is NULL'))]
random.shuffle(question_list)
return question_list
def shuffle_special_question_list():
question_list = [row['ID_special_question'] for row in list(db.select('special_questions',what='ID_special_question'))]
random.shuffle(question_list)
return question_list
def get_question(ID_question):
question_row = web.listget(list(db.select('questions',dict(ID_question=ID_question),where='ID_question=$ID_question')), 0, default=None)
if question_row is None:
return None
else:
return Question(question_row['Description'],question_row['Answer'])
def get_special_question(ID_special_question):
special_question_row = web.listget(list(db.select('special_questions',dict(ID_special_question=ID_special_question),where='ID_special_question=$ID_special_question')), 0, default=None)
if special_question_row is None:
return None
else:
ID_special_question=special_question_row['ID_special_question']
description_special_question=special_question_row['Description']
question_list = [Question(row['Description'],row['Answer']) for row in list(db.select('questions',dict(ID_special_question=ID_special_question),where='ID_special_question=$ID_special_question'))]
return SpecialQuestion(description_special_question,question_list)
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="magic_config",
version="0.1.0",
author="mogaiskii",
author_email="sani.mog@gmail.com",
description="declarative settings with multiple backends",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mogaiskii/magic_config",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3.6",
license="MIT",
keywords=["config", "settings", "configuration"],
install_requires=[
'pyyaml~=5.4.1',
]
)
|
import torch
import torch.nn as nn
import numpy as np
'''Q Network'''
class ImageConv(nn.Module):
def _initialize_weights(self):
for module in self.modules():
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
nn.init.orthogonal_(module.weight, nn.init.calculate_gain('relu'))
nn.init.constant_(module.bias, 0)
def __init__(self, image_shape: tuple) -> None:
'''
image_shape:(channel, w, h), such as: (3, 200, 240).
'''
assert len(image_shape) == 3, ValueError("wrong image shape.")
super(ImageConv, self).__init__()
channels, w, h = image_shape
filters = 32
self.net = nn.Sequential(
nn.Conv2d(channels, filters, 3, stride=2, padding=1), nn.ReLU(),
nn.Conv2d(filters, filters, 3, stride=2, padding=1), nn.ReLU(),
nn.Conv2d(filters, filters, 3, stride=2, padding=1), nn.ReLU(),
nn.Conv2d(filters, filters, 3, stride=2, padding=1), nn.ReLU(),
nn.Conv2d(filters, filters, 3, stride=2, padding=1), nn.ReLU(),
nn.Flatten(),
)
with torch.no_grad():
tmp = torch.rand((1, channels, w, h))
self.out_dim = self.net(tmp).shape[1]
self._initialize_weights()
def forward(self, x):
return self.net(x)
class QNet(nn.Module): # nn.Module is a standard PyTorch Network
def __init__(self, mid_dim, state_dim, action_dim):
super(QNet, self).__init__()
self.net = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, action_dim))
def forward(self, state):
return self.net(state) # Q value
class QNetDuel(nn.Module): # Dueling DQN
def __init__(self, mid_dim, state_dim, action_dim):
super(QNetDuel, self).__init__()
self.net_state = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU())
self.net_val = nn.Sequential(nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, 1)) # Q value
self.net_adv = nn.Sequential(nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, action_dim)) # advantage function value 1
def forward(self, state):
t_tmp = self.net_state(state)
q_val = self.net_val(t_tmp)
q_adv = self.net_adv(t_tmp)
return q_val + q_adv - q_adv.mean(dim=1, keepdim=True) # dueling Q value
class QNetTwin(nn.Module): # Double DQN
def __init__(self, mid_dim, state_dim, action_dim):
super(QNetTwin, self).__init__()
self.net_state = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU()) # state
self.net_q1 = nn.Sequential(nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, action_dim)) # q1 value
self.net_q2 = nn.Sequential(nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, action_dim)) # q2 value
def forward(self, state):
tmp = self.net_state(state)
return self.net_q1(tmp) # one Q value
def get_q1_q2(self, state):
tmp = self.net_state(state)
q1 = self.net_q1(tmp)
q2 = self.net_q2(tmp)
return q1, q2 # two Q values
class QNetTwinDuel(nn.Module): # D3QN: Dueling Double DQN
def __init__(self, mid_dim, state_dim, action_dim):
super(QNetTwinDuel, self).__init__()
self.net_state = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU())
self.net_val1 = nn.Sequential(nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, 1)) # q1 value
self.net_val2 = nn.Sequential(nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, 1)) # q2 value
self.net_adv1 = nn.Sequential(nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, action_dim)) # advantage function value 1
self.net_adv2 = nn.Sequential(nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, action_dim)) # advantage function value 1
def forward(self, state):
t_tmp = self.net_state(state)
q_val = self.net_val1(t_tmp)
q_adv = self.net_adv1(t_tmp)
return q_val + q_adv - q_adv.mean(dim=1, keepdim=True) # one dueling Q value
def get_q1_q2(self, state):
tmp = self.net_state(state)
val1 = self.net_val1(tmp)
adv1 = self.net_adv1(tmp)
q1 = val1 + adv1 - adv1.mean(dim=1, keepdim=True)
val2 = self.net_val2(tmp)
adv2 = self.net_adv2(tmp)
q2 = val2 + adv2 - adv2.mean(dim=1, keepdim=True)
return q1, q2 # two dueling Q values
'''Policy Network (Actor)'''
class Actor(nn.Module): # DPG: Deterministic Policy Gradient
def __init__(self, mid_dim, state_dim, action_dim):
super(Actor, self).__init__()
self.net = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, action_dim))
def forward(self, state): # [batch_size, state_dim],eg:[254, 3]
return self.net(state).tanh() # action.tanh()
def get_action(self, state, action_std):
action = self.net(state).tanh()
noise = (torch.randn_like(action) * action_std).clamp(-0.5, 0.5)
return (action + noise).clamp(-1.0, 1.0)
### Hongying: add RNN into Reinforcement.
class ActorRNN(nn.Module): # DPG: Deterministic Policy Gradient
def __init__(self, mid_dim, state_dim, action_dim):
super(ActorRNN, self).__init__()
self.net = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU()
)
self.hidden_dim = mid_dim
self.num_layers = 1
self.rnn = nn.LSTM(input_size=mid_dim, hidden_size=mid_dim, num_layers=self.num_layers)
self.action = nn.Linear(mid_dim, action_dim)
def init_hidden(self, batch_size):
#TODO: 将lstm怎么加入到网络中考虑清楚。
weight = next(self.parameters())
return (
weight.new_zeros(self.num_layers, batch_size, self.hidden_dim),
weight.new_zeros(self.num_layers, batch_size, self.hidden_dim)
)
def forward(self, state, hidden):
out = self.net(state).detach_()
out = out.view(1, state.shape[0], -1) #(seq_len, batch, input_size)
# Initialize hidden state with zeros
out, hidden = self.rnn(out, hidden)
out = out.squeeze()
return self.action(out).tanh(), hidden # action.tanh()
def get_action(self, state, hidden, action_std):
with torch.no_grad():
action, hidden = self.forward(state, hidden)
noise = (torch.randn_like(action) * action_std).clamp(-0.5, 0.5)
return (action + noise).clamp(-1.0, 1.0), hidden
def get_full_state_and_action(self, state, hidden, action_std):
action, hidden = self.get_action(state, hidden, action_std)
h, c = hidden
h, c = h.squeeze(), c.squeeze()
new_state = torch.cat([state, h, c], dim=1)
new_action = torch.cat([action, h, c], dim=1)
return new_state, new_action
class Critic(nn.Module):
def __init__(self, mid_dim, state_dim, action_dim):
super(Critic, self).__init__()
self.net = nn.Sequential(nn.Linear(state_dim + action_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, 1))
def forward(self, state, action):
return self.net(torch.cat((state, action), dim=1)) # Q value
class CriticAdv(nn.Module):
def __init__(self, state_dim, mid_dim):
super(CriticAdv, self).__init__()
if isinstance(state_dim, int):
self.net = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.Hardswish(),
nn.Linear(mid_dim, 1))
else:
def set_dim(i):
return int(12 * 1.5 ** i)
self.net = nn.Sequential(NnnReshape(*state_dim), # -> [batch_size, 4, 96, 96]
nn.Conv2d(state_dim[0], set_dim(0), 4, 2, bias=True), nn.LeakyReLU(),
nn.Conv2d(set_dim(0), set_dim(1), 3, 2, bias=False), nn.ReLU(),
nn.Conv2d(set_dim(1), set_dim(2), 3, 2, bias=False), nn.ReLU(),
nn.Conv2d(set_dim(2), set_dim(3), 3, 2, bias=True), nn.ReLU(),
nn.Conv2d(set_dim(3), set_dim(4), 3, 1, bias=True), nn.ReLU(),
nn.Conv2d(set_dim(4), set_dim(5), 3, 1, bias=True), nn.ReLU(),
NnnReshape(-1),
nn.Linear(set_dim(5), mid_dim), nn.ReLU(),
nn.Linear(mid_dim, 1))
layer_norm(self.net[-1], std=0.5) # output layer for Q value
def forward(self, state):
return self.net(state) # Q value
class CriticTwin(nn.Module):
def __init__(self, mid_dim, state_dim, action_dim, if_use_dn=False):
super().__init__()
if if_use_dn: # use DenseNet (DenseNet has both shallow and deep linear layer)
nn_dense = DenseNet(mid_dim)
lay_dim = nn_dense.out_dim
self.net_sa = nn.Sequential(nn.Linear(state_dim + action_dim, mid_dim), nn.ReLU(),
nn_dense, ) # state-action value function
else: # use a simple network for actor. Deeper network does not mean better performance in RL.
lay_dim = mid_dim
self.net_sa = nn.Sequential(nn.Linear(state_dim + action_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, lay_dim), nn.ReLU())
self.net_q1 = nn.Linear(lay_dim, 1)
self.net_q2 = nn.Linear(lay_dim, 1)
layer_norm(self.net_q1, std=0.1)
layer_norm(self.net_q2, std=0.1)
def forward(self, state, action):
tmp = self.net_sa(torch.cat((state, action), dim=1))
return self.net_q1(tmp) # one Q value
def get_q1_q2(self, state, action):
tmp = self.net_sa(torch.cat((state, action), dim=1))
return self.net_q1(tmp), self.net_q2(tmp) # two Q values
'''Integrated Network (Parameter sharing)'''
class InterDPG(nn.Module): # DPG means deterministic policy gradient
def __init__(self, state_dim, action_dim, mid_dim):
super().__init__()
self.enc_s = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim))
self.enc_a = nn.Sequential(nn.Linear(action_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim))
self.net = DenseNet(mid_dim)
net_out_dim = self.net.out_dim
self.dec_a = nn.Sequential(nn.Linear(net_out_dim, mid_dim), nn.Hardswish(),
nn.Linear(mid_dim, action_dim), nn.Tanh())
self.dec_q = nn.Sequential(nn.Linear(net_out_dim, mid_dim), nn.Hardswish(),
nn.utils.spectral_norm(nn.Linear(mid_dim, 1)))
@staticmethod
def add_noise(a, noise_std):
a_temp = torch.normal(a, noise_std)
mask = torch.tensor((a_temp < -1.0) + (a_temp > 1.0), dtype=torch.float32).cuda()
noise_uniform = torch.rand_like(a)
a_noise = noise_uniform * mask + a_temp * (-mask + 1)
return a_noise
def forward(self, s, noise_std=0.0): # actor
s_ = self.enc_s(s)
a_ = self.net(s_)
a = self.dec_a(a_)
return a if noise_std == 0.0 else self.add_noise(a, noise_std)
def critic(self, s, a):
s_ = self.enc_s(s)
a_ = self.enc_a(a)
q_ = self.net(s_ + a_)
q = self.dec_q(q_)
return q
def next_q_action(self, s, s_next, noise_std):
s_ = self.enc_s(s)
a_ = self.net(s_)
a = self.dec_a(a_)
'''q_target (without noise)'''
a_ = self.enc_a(a)
s_next_ = self.enc_s(s_next)
q_target0_ = self.net(s_next_ + a_)
q_target0 = self.dec_q(q_target0_)
'''q_target (with noise)'''
a_noise = self.add_noise(a, noise_std)
a_noise_ = self.enc_a(a_noise)
q_target1_ = self.net(s_next_ + a_noise_)
q_target1 = self.dec_q(q_target1_)
q_target = (q_target0 + q_target1) * 0.5
return q_target, a
class NnnReshape(nn.Module):
def __init__(self, *args):
super().__init__()
self.args = args
def forward(self, x):
return x.view((x.size(0),) + self.args)
class DenseNet(nn.Module): # plan to hyper-param: layer_number
def __init__(self, mid_dim):
super().__init__()
assert (mid_dim / (2 ** 3)) % 1 == 0
def set_dim(i):
return int((3 / 2) ** i * mid_dim)
self.dense1 = nn.Sequential(nn.Linear(set_dim(0), set_dim(0) // 2), nn.Hardswish())
self.dense2 = nn.Sequential(nn.Linear(set_dim(1), set_dim(1) // 2), nn.Hardswish())
self.out_dim = set_dim(2)
layer_norm(self.dense1[0], std=1.0)
layer_norm(self.dense2[0], std=1.0)
def forward(self, x1):
x2 = torch.cat((x1, self.dense1(x1)), dim=1)
x3 = torch.cat((x2, self.dense2(x2)), dim=1)
return x3
def layer_norm(layer, std=1.0, bias_const=1e-6):
torch.nn.init.orthogonal_(layer.weight, std)
torch.nn.init.constant_(layer.bias, bias_const)
def demo_conv2d_state():
state_dim = (3, 96, 96)
batch_size = 3
def set_dim(i):
return int(12 * 1.5 ** i)
mid_dim = 128
net = nn.Sequential(NnnReshape(*state_dim), # -> [batch_size, 4, 96, 96]
nn.Conv2d(state_dim[0], set_dim(0), 4, 2, bias=True), nn.LeakyReLU(),
nn.Conv2d(set_dim(0), set_dim(1), 3, 2, bias=False), nn.ReLU(),
nn.Conv2d(set_dim(1), set_dim(2), 3, 2, bias=False), nn.ReLU(),
nn.Conv2d(set_dim(2), set_dim(3), 3, 2, bias=True), nn.ReLU(),
nn.Conv2d(set_dim(3), set_dim(4), 3, 1, bias=True), nn.ReLU(),
nn.Conv2d(set_dim(4), set_dim(5), 3, 1, bias=True), nn.ReLU(),
NnnReshape(-1),
nn.Linear(set_dim(5), mid_dim), nn.ReLU())
inp_shape = list(state_dim)
inp_shape.insert(0, batch_size)
inp = torch.ones(inp_shape, dtype=torch.float32)
inp = inp.view(3, -1)
print(inp.shape)
out = net(inp)
print(out.shape)
exit()
if __name__ == '__main__':
state_dim = 16
mid_dim = 128
action_dim = 2
batch_size = 64
actor = ActorRNN(mid_dim=mid_dim, state_dim=state_dim, action_dim=action_dim)
observation = torch.FloatTensor(torch.rand(size=(batch_size, state_dim)))
ho = actor.init_hidden(batch_size)
act, hidden = actor(observation, ho)
act, hidden = actor.get_action(observation, hidden, 1)
h_0, c_0 = hidden
print(f'state dim:{(batch_size, state_dim)}')
print(f'action shape:{act.shape}')
print(f'hidden state:{h_0.shape}')
newstate, newaction = actor.get_full_state_and_action(observation, hidden, 1)
critic = Critic(mid_dim=mid_dim, state_dim=mid_dim * 2 + state_dim, action_dim=mid_dim * 2 + action_dim)
print(critic)
print("newstate:", newstate.shape)
print("newaction:",newaction.shape)
critic.forward(newstate, newaction)
|
from julia.core import which
pytest_plugins = ["pytester"]
def test__using_default_setup(testdir, request):
if request.config.getoption("runpytest") != "subprocess":
raise ValueError("Need `-p pytester --runpytest=subprocess` options.")
# create a temporary conftest.py file
testdir.makeini(
"""
[pytest]
addopts =
-p julia.pytestplugin
"""
)
testdir.makepyfile(
"""
import pytest
@pytest.mark.pyjulia__using_default_setup
def test():
pass
"""
)
args = ("-p", "julia.pytestplugin", "--no-julia")
r0 = testdir.runpytest(*args)
r0.assert_outcomes(passed=1)
r1 = testdir.runpytest("--julia-runtime", which("julia"), *args)
r1.assert_outcomes(skipped=1)
r2 = testdir.runpytest("--julia-inline=yes", *args)
r2.assert_outcomes(skipped=1)
|
from flask.ext.mail import Message
from loghub import mail
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender = sender, recipients = recipients)
msg.body = text_body
msg.html = html_body
mail.send(msg)
|
# -*- coding: utf-8 -*-
import urllib, urllib2, cookielib, time, socket,gzip,StringIO,zlib,inspect,sys
from datetime import datetime, timedelta,tzinfo
try:
import xml.etree.cElementTree as ElementTree
except:
from xml.etree import ElementTree
from epg import *
BASE=[
u'http://supertv.3owl.com/USA.xml',
u'http://supertv.3owl.com/United%20Kingdom.xml',
u'http://supertv.3owl.com/Deutschland.xml',
#u'http://supertv.3owl.com/Espana.xml',
u'http://supertv.3owl.com/France.xml',
u'http://supertv.3owl.com/Italia.xml',
#u'http://supertv.3owl.com/Oesterreich.xml',
#u'http://supertv.3owl.com/Portugal.xml',
#u'http://supertv.3owl.com/Svizzera%20Schweiz%20Suisse.xml',
#u'http://supertv.3owl.com/Viet%20Nam.xml',
#u'http://apps.ohlulz.com/rtmpgui/list.xml',
#u'http://home.no/chj191/LiveTV.xml',
#'http://home.no/chj191/xxx.xml',
]
TITLE = 'SuperTV'
ICON = 'icon-default.png'
HTTP.CacheTime = 60*5
def Start():
# Initialize the plug-in
Plugin.AddViewGroup("InfoList", viewMode="InfoList", mediaType="items")
Plugin.AddViewGroup("List", viewMode="List", mediaType="items")
# Setup the default attributes for the ObjectContainer
ObjectContainer.title1 = TITLE
ObjectContainer.view_group = 'List'
# Setup the default attributes for the other objects
DirectoryObject.thumb = R(ICON)
VideoClipObject.thumb = R(ICON)
@handler('/video/SuperTV', 'SuperTV')
def Main():
objs=[]
if len(BASE) < 2:
return ListLanguages(0)
for b in BASE:
objs.append(DirectoryObject(
key = Callback(ListLanguages, src=BASE.index(b)),
title = urllib.unquote(b.split('/')[-1][:-4]).replace('Espana','España').replace('Viet Nam','Việt Nam').replace('Oesterreich','Österreich')
))
oc = ObjectContainer(objects=objs)
return oc
def ListLanguages(src):
xml=getURL(BASE[src],None)
tree = ElementTree.XML(xml)
if len(tree.findall('channel')) > 0:
return listChannels(src)
streams = tree.findall('stream')
languages = []
for stream in streams:
language = stream.findtext('language').strip().capitalize()
if not language in languages and language.find('Link down') == -1:
languages.append(language)
if len(languages) < 2:
return listVideos(src=src,lang=languages[0])
languages = list(set(languages))
languages.sort()
Log(languages)
objs = []
for l in languages:
objs.append(DirectoryObject(
key = Callback(listVideos,src=src, lang=l),
title = l
))
oc = ObjectContainer(objects=objs)
return oc
def listVideos(src=0, lang=0):
boldStart = ''
boldEnd = ''
#if Client.Platform == ClientPlatform.MacOSX:
# boldStart = '[B]'
# boldEnd = '[/B]'
xml=getURL(BASE[src],None)
tree = ElementTree.XML(xml)
if len(tree.findall('channel')) > 0:
return listChannels(src)
streams = tree.findall('stream')
#dir = MediaContainer(title='SuperTV', title2=urllib.unquote(BASE[src].split('/')[-1][:-4]).replace('Espana','España').replace('Viet Nam','Việt Nam').replace('Oesterreich','Österreich'),view_group='InfoList')
#objs = []
oc = ObjectContainer(view_group='InfoList',title1='SuperTV', title2=urllib.unquote(BASE[src].split('/')[-1][:-4]).replace('Espana','España').replace('Viet Nam','Việt Nam').replace('Oesterreich','Österreich'))
for stream in streams:
language = stream.findtext('language').strip().capitalize()
if language == lang and language.find('Link down') == -1:
title = boldStart+stream.findtext('title')+boldEnd
epgid=stream.findtext('epgid', default=None)
subtitle=''
rtmplink = stream.findtext('link')
if rtmplink[:4] != "http":
for l in stream.findall('backup'):
if l.findtext('link')[:4] == "http":
rtmplink = l.findtext('link')
if rtmplink[:4] != "http":
continue
if epgid:
ep=epgid.split(":")
if ep[0] in EPGs.keys():
e=EPGs[ep[0]](ep[1])
hasEPG = True
desc = ''
epg=e.getEntries()
i=len(epg)
for e in epg:
desc += e[1].strftime("%I:%M")+'-'+e[2].strftime("%I:%M")+":\n"+e[0]+u"\n\n"
if len(epg) > 0:
title += ' - '+epg[0][0]
if len(epg) > 1:
subtitle = 'Next: '+epg[1][0]
#dir.Append(VideoItem(rtmplink, clip='', title=title, summary=desc, thumb=stream.findtext('logourl','')))
vco = VideoClipObject(title = title, summary = desc, thumb = stream.findtext('logourl',''), url = rtmplink)
vco.add(MediaObject(
container = Container.MP4,
video_codec = VideoCodec.H264,
audio_codec = AudioCodec.AAC,
audio_channels = 2,
optimized_for_streaming = True,
parts = [PartObject(key = rtmplink)]))
oc.add(vco)
#oc.add(VideoItem(rtmplink, clip='', title=title, summary=desc, thumb=stream.findtext('logourl','')))
#if stream.findtext('playpath'):
# rtmplink += ' playpath='+stream.findtext('playpath').strip()
#if stream.findtext('swfUrl'):
# rtmplink += ' swfurl='+stream.findtext('swfUrl').strip()
#if stream.findtext('pageUrl'):
# rtmplink += ' pageurl='+stream.findtext('pageUrl').strip()
#if stream.findtext('proxy'):
# rtmplink += ' socks='+stream.findtext('proxy').strip()
#if stream.findtext('advanced','').find('live=') == -1 and rtmplink.find('mms://') == -1:
# rtmplink += ' live=1 '
#rtmplink += ' '+stream.findtext('advanced','').replace('-v','').replace('live=1','').replace('live=true','')
#Log(rtmplink)
#dir.Append(VideoItem(url=rtmplink, title=title, thumb=stream.findtext('logourl'), art='',summary=desc))
#return dir
return oc
|
#!/usr/bin/env python3
#
# Convert a test specification to command-line options
import pscheduler
from validate import spec_is_valid
spec = pscheduler.json_load(exit_on_error=True, max_schema=1)
valid, message = spec_is_valid(spec)
if not valid:
pscheduler.fail(message)
result = pscheduler.speccli_build_args(spec,
strings=[
( 'duration', 'duration' ),
( 'host', 'host' ),
( 'interval', 'interval' ),
( 'starting-comment', 'starting-comment' ),
( 'parting-comment', 'parting-comment' ),
])
pscheduler.succeed_json(result)
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from telegram.ext import Updater, CommandHandler
from telegram.utils.helpers import escape_markdown
from telegram import ParseMode
import telegram.constants
import configparser
import logging
import requests
import strings
import feedparser
from sqlalchemy import create_engine, Column, Integer, UnicodeText, UniqueConstraint
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
# initializing config file
config = configparser.ConfigParser()
config.read("properties.ini")
# getting bot owner ID from config file
owner_id = int(config.get("OWNER", "owner_id"))
def start(bot, update):
update.effective_message.reply_text(strings.stringHelp)
def help_message(bot, update):
# gather all commands help messages from the strings.py file
help_all = strings.help_message + strings.help_url + strings.help_list + strings.help_add + strings.help_remove
update.effective_message.reply_text(text=help_all)
def test(bot, update):
update.effective_message.reply_text("Bot status: Online, duh")
def server_ip(bot, update):
# gather user data like username, first name, last name
user = update.message.from_user
# gather telegram user ID
tg_user_id = update.effective_user.id
# check if the sender's ID is the same as the owner's ID set in the config. for security purposes
if tg_user_id == owner_id:
# access the site
row = requests.get("http://ipinfo.io/ip")
# save the text into a variable to be sent later by the bot
ip = row.text
update.effective_message.reply_text("Server IP: " + ip)
else:
update.effective_message.reply_text("I'm sorry " + user.first_name + ", but I can't let you do that.")
def show_url(bot, update, args):
# gather telegram chat ID (might be the same as user ID if message is sent to the bot via PM)
tg_chat_id = str(update.effective_chat.id)
# check if there is anything written as argument (will give out of range if there's no argument)
if len(args[0]) < 3:
# there's nothing written or it's too less text to be an actual link
update.effective_message.reply_text(strings.stringInvalidURL)
else:
# there is an actual link written
tg_feed_link = args[0]
link_processed = feedparser.parse(tg_feed_link)
feed_title = link_processed.feed.title
feed_description = link_processed.feed.description
feed_link = link_processed.feed.link
entry_title = link_processed.entries[0].title
entry_description = link_processed.entries[0].description
entry_link = link_processed.entries[0].link
# check if there's any * in the title so that proper markdown can be applied
if '*' in feed_title:
# there is a * in the title string
print("# Found * in feed title. Parsing...")
# strips the * from the title string
feed_title.rstrip('*')
else:
# there is no * in the title string
print("# No * found in feed title. Proceeding...")
# check if there's any * in the title so that proper markdown can be applied
if '*' in entry_title:
# there is a * in the title string
print("# Found * in entry title. Parsing...")
# strips the * from the title string
entry_title.rstrip('*')
else:
# there is no * in the title string
print("# No * found in entry title. Proceeding...")
final_message = "feed title: " + "*" + feed_title + "*" + "\n\n" + "feed description: " + escape_markdown(feed_description) + "\n\n" + "feed link: " + escape_markdown(feed_link) + "\n\n" + "entry title: " + "*" + entry_title + "*" + "\n\n" + "entry description: " + escape_markdown(entry_description) + "\n\n" + "entry link: " + escape_markdown(entry_link)
bot.send_message(chat_id=tg_chat_id, text=final_message, parse_mode=ParseMode.MARKDOWN)
def list_urls(bot, update):
# gather telegram chat ID (might be the same as user ID if message is sent to the bot via PM)
tg_chat_id = str(update.effective_chat.id)
# gather telegram user ID
tg_user_id = update.effective_user.id
# gather link data from DB based on who sent the message and from where
user_data = SESSION.query(RSS_Feed).filter(RSS_Feed.user_id == tg_user_id, RSS_Feed.chat_id == tg_chat_id).all()
# make an empty list for later usage
links_list = []
# this loops gets every link from the DB based on the filter above and appends it to the list
for row in user_data:
links_list.append(row.feed_link)
# make an empty string for later usage
final_content = ""
# this neatly arranges the links from links_list to be properly sent by the bot
final_content += "\n\n".join(links_list)
# check if the length of the message is too long to be posted in 1 chat bubble
if len(final_content) <= telegram.constants.MAX_MESSAGE_LENGTH:
bot.send_message(chat_id=tg_chat_id, text= "This chat is subscribed to the following links:" + "\n" + final_content)
else:
bot.send_message(chat_id=tg_chat_id, text="*Warning: *" + strings.errorMsgLong)
print("\n" + "# Message too long for chat " + str(tg_chat_id))
def add_url(bot, update, args):
# check if there is anything written as argument (will give out of range if there's no argument)
if len(args[0]) < 3:
# there's nothing written or it's too less text to be an actual link
update.effective_message.reply_text(strings.stringInvalidURL)
else:
# there is an actual link written
# gather telegram chat data
chat = update.effective_chat
# gather telegram user ID
tg_user_id = update.effective_user.id
# gather telegram chat ID (might be the same as user ID if message is sent to the bot via PM)
tg_chat_id = str(update.effective_chat.id)
# gather the feed link from the command sent by the user
tg_feed_link = args[0]
# check if the user who issued the command is the chat's admin or owner (to prevent spam)
if chat.get_member(tg_user_id).status == 'administrator' or chat.get_member(tg_user_id).status == 'owner' or tg_user_id == owner_id:
# pass the link to be processed by feedparser
link_processed = feedparser.parse(tg_feed_link)
# check if link is a valid RSS Feed link
if link_processed.bozo == 1:
# it's not a valid RSS Feed link
update.effective_message.reply_text(strings.stringInvalidURLbozo)
else:
# the RSS Feed link is valid
# set old_entry_link as the last entry from the rss link to be stored in the DB so it can later be changed when updates occur
tg_old_entry_link = link_processed.entries[0].link
# gather the row which contains exactly that telegram user ID, group ID and link for later comparison
row = SESSION.query(RSS_Feed).filter(RSS_Feed.user_id == tg_user_id, RSS_Feed.feed_link == tg_feed_link, RSS_Feed.chat_id == tg_chat_id).all()
# check if there is an entry already added to the DB by the same user in the same group with the same link
if row:
# there is already a link added to the DB
update.effective_message.reply_text(strings.stringURLalreadyAdded)
else:
# there is no link added, so we'll add it now
# prepare the action for the DB push
action = RSS_Feed(tg_user_id, tg_chat_id, tg_feed_link, tg_old_entry_link)
# add the action to the DB query
SESSION.add(action)
# commit the changes to the DB
SESSION.commit()
update.effective_message.reply_text(strings.stringURLadded)
print("\n" + "# New subscription for user " + str(tg_user_id) + " with link " + tg_feed_link + "\n")
else:
update.effective_message.reply_text(strings.errorAdmin)
def remove_url(bot, update, args):
# check if there is anything written as argument (will give out of range if there's no argument)
if len(args[0]) < 3:
# there's nothing written or it's too less text to be an actual link
update.effective_message.reply_text(strings.stringInvalidURL)
else:
# there is an actual link written
# gather telegram chat data
chat = update.effective_chat
# gather telegram user ID
tg_user_id = update.effective_user.id
# gather telegram chat ID (might be the same as user ID if message is sent to the bot via PM)
tg_chat_id = str(update.effective_chat.id)
# gather the feed link from the command sent by the user
tg_feed_link = args[0]
# check if the user who issued the command is the chat's admin or owner (to prevent spam)
if chat.get_member(tg_user_id).status == 'administrator' or chat.get_member(tg_user_id).status == 'owner' or tg_user_id == owner_id:
# pass the link to be processed by feedparser
link_processed = feedparser.parse(tg_feed_link)
# check if link is a valid RSS Feed link
if link_processed.bozo == 1:
# it's not a valid RSS Feed link
update.effective_message.reply_text(strings.stringInvalidURLbozo)
else:
# the RSS Feed link is valid
# gather all duplicates (if possible) for the same TG User ID, TG Chat ID and link
user_data = SESSION.query(RSS_Feed).filter(RSS_Feed.chat_id == tg_chat_id, RSS_Feed.feed_link == tg_feed_link).all()
# check if it finds the link in the database
if user_data:
# there is an link in the DB
# this loops to delete any possible duplicates for the same TG User ID, TG Chat ID and link
for i in user_data:
# add the action to the DB query
SESSION.delete(i)
# commit the changes to the DB
SESSION.commit()
update.effective_message.reply_text(strings.stringURLremoved)
else:
update.effective_message.reply_text(strings.stringURLalreadyRemoved)
else:
update.effective_message.reply_text(strings.errorAdmin)
def rss_update(bot, job):
# get all of the DB data
user_data = SESSION.query(RSS_Feed).all()
# this loop checks for every row in the DB
for row in user_data:
# get telegram chat ID from DB
tg_chat_id = row.chat_id
# get RSS link from DB
tg_feed_link = row.feed_link
# process the feed from DB
feed_processed = feedparser.parse(tg_feed_link)
# get the last update's entry from the DB
tg_old_entry_link = row.old_entry_link
# define empty list of entry links for when there's new updates to a RSS link
new_entry_links = []
# define empty list of entry titles for when there's new updates to a RSS link
new_entry_titles = []
# this loop checks for every entry from the RSS Feed link from the DB row
for entry in feed_processed.entries:
# check if there are any new updates to the RSS Feed from the old entry
if entry.link != tg_old_entry_link:
# there is a new entry, so it's link is added to the new_entry_links list for later usage
new_entry_links.append(entry.link)
# there is a new entry, so it's title is added to the new_entry_titles list for later usage
new_entry_titles.append(entry.title)
else:
break
# check if there's any new entries queued from the last check
if new_entry_links:
# set the new old_entry_link with the latest update from the RSS Feed
row.old_entry_link = new_entry_links[0]
# commit the changes to the DB
SESSION.commit()
else:
# there's no new entries
print("\n" + "# No new updates for chat " + str(tg_chat_id) + " with link " + tg_feed_link + "\n")
# this loop sends every new update to each user from each group based on the DB entries
for link, title in zip(reversed(new_entry_links), reversed(new_entry_titles)):
print("\n" + "# New entry from " + title + " with link " + link)
# check if there's any * in the title so that proper markdown can be applied
if '*' in title:
# there is a * in the title string
print("# Found * in title. Parsing...")
# strips the * from the title string
title.rstrip('*')
else:
# there is no * in the title string
print("# No * found in title. Proceeding...")
# make the final message with the layout: "<rss_feed_title> <rss_feed_link>"
final_message = "*" + title + "*" + "\n\n" + escape_markdown(link)
# check if the length of the message is too long to be posted in 1 chat bubble
if len(final_message) <= telegram.constants.MAX_MESSAGE_LENGTH:
print("\n" + "# Check passed. Message doesn't exceed Telegram limit " + "\n")
bot.send_message(chat_id=tg_chat_id, text=final_message, parse_mode=ParseMode.MARKDOWN)
else:
print("\n" + "# Message too long for entry link " + link)
bot.send_message(chat_id=tg_chat_id, text="*Warning: *" + strings.errorMsgLong)
BASE = declarative_base()
engine = create_engine(config.get("DB", "db_url"), client_encoding="utf8")
BASE.metadata.bind = engine
BASE.metadata.create_all(engine)
SESSION = scoped_session(sessionmaker(bind=engine, autoflush=False))
class RSS_Feed(BASE):
__tablename__ = "RSS_Feed"
id = Column(Integer, primary_key=True)
user_id = Column(Integer, nullable=False)
chat_id = Column(UnicodeText, nullable=False)
feed_link = Column(UnicodeText)
old_entry_link = Column(UnicodeText)
def __init__(self, user_id, chat_id, feed_link, old_entry_link):
self.user_id = user_id
self.chat_id = chat_id
self.feed_link = feed_link
self.old_entry_link = old_entry_link
def __repr__(self):
return "<RSS_Feed for {} with chatID {} at feed_link {} with old entry {}>".format(self.user_id, self.chat_id, self.feed_link, self.old_entry_link)
BASE.metadata.create_all()
def main():
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
updater = Updater(config.get("KEY", "tg_API_token"))
job = updater.job_queue
job_minute = job.run_repeating(rss_update, int(config.get("UPDATE", "update_interval")), first=0)
job_minute.enabled = True
dispatcher = updater.dispatcher
dispatcher.add_handler(CommandHandler("start", start))
dispatcher.add_handler(CommandHandler("help", help_message))
dispatcher.add_handler(CommandHandler("ip", server_ip))
dispatcher.add_handler(CommandHandler("url", show_url, pass_args=True))
dispatcher.add_handler(CommandHandler("list", list_urls))
dispatcher.add_handler(CommandHandler("feed", rss_update))
dispatcher.add_handler(CommandHandler("test", test))
dispatcher.add_handler(CommandHandler("add", add_url, pass_args=True))
dispatcher.add_handler(CommandHandler("remove", remove_url, pass_args=True))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
|
import unittest
from mytodo.database import User, Todo, Base, dbdriver
from datetime import date
from mytodo.mytodo import default_time
class DbTestCase(unittest.TestCase):
def test_get_todos_month(self):
result = self.dbdriver.get_todos("test_user1", date(2021, 6, 11), date(2021, 6, 12))
self.assertEqual(len(result), 0)
result = self.dbdriver.get_todos("test_user1", date(2021, 7, 15), date(2021, 7, 15))
print(result)
self.assertEqual(len(result), 7)
result = self.dbdriver.get_todos("test_user1", date(2021, 7, 16), date(2021, 7, 22))
self.assertEqual(len(result), 6)
result = self.dbdriver.get_todos("test_user2", date(2021, 7, 15), date(2021, 7, 15))
self.assertEqual(len(result), 0)
result = self.dbdriver.get_todos("test_user3", date(2021, 1, 1), date(2021, 12, 31))
self.assertEqual(len(result), 0)
def test_add_todo(self):
self.dbdriver.add_todo(1, "add1", "add1", date(2021,8,31),default_time,default_time)
result = self.dbdriver.get_todos("test_user1", date(2021, 8, 31), date(2021, 8, 31))
self.assertEqual(len(result), 1)
result = result[0]
self.assertEqual(result['date'], date(2021, 8, 31))
self.assertEqual(result['title'], 'add1')
self.assertEqual(result['description'], "add1")
@classmethod
def setUpClass(cls) -> None:
cls.dbdriver = dbdriver
# clear database
Base.metadata.drop_all(bind=cls.dbdriver.eng)
Base.metadata.create_all(bind=cls.dbdriver.eng)
users = [User(user_id=1, token="test_user1"),
User(user_id=2, token="test_user2"),
User(user_id=3, token="test_user3")]
session = cls.dbdriver.get_session()
session.add_all(users)
session.commit()
todos = [
Todo(user_id=1, date=date(2021,7,15), start="", end="", title="title1", description="desc1"),
Todo(user_id=1, date=date(2021,7,15), start="", end="", title="title2", description="desc2"),
Todo(user_id=1, date=date(2021,7,15), start="", end="", title="title3", description="desc3"),
Todo(user_id=1, date=date(2021,7,15), start="", end="", title="title4", description="desc4"),
Todo(user_id=1, date=date(2021,7,15), start="", end="", title="title5", description="desc5"),
Todo(user_id=1, date=date(2021,7,15), start="", end="", title="title6", description="desc6"),
Todo(user_id=1, date=date(2021,7,15), start="", end="", title="title7", description="desc7"),
Todo(user_id=2, date=date(2021,7,16), start="", end="", title="title1", description="desc1"),
Todo(user_id=2, date=date(2021,7,16), start="", end="", title="title2", description="desc2"),
Todo(user_id=2, date=date(2021,7,16), start="", end="", title="title3", description="desc3"),
Todo(user_id=2, date=date(2021,7,16), start="", end="", title="title4", description="desc4"),
Todo(user_id=2, date=date(2021,7,16), start="", end="", title="title5", description="desc5"),
Todo(user_id=2, date=date(2021,7,16), start="", end="", title="title6", description="desc6"),
Todo(user_id=2, date=date(2021,7,16), start="", end="", title="title7", description="desc7"),
Todo(user_id=1, date=date(2021,7,16), start="", end="", title="title7", description="desc7"),
Todo(user_id=1, date=date(2021,7,17), start="", end="", title="title7", description="desc7"),
Todo(user_id=1, date=date(2021,7,18), start="", end="", title="title7", description="desc7"),
Todo(user_id=1, date=date(2021,7,19), start="", end="", title="title7", description="desc7"),
Todo(user_id=1, date=date(2021,7,20), start="", end="", title="title7", description="desc7"),
Todo(user_id=1, date=date(2021,7,21), start="", end="", title="title7", description="desc7"),
Todo(user_id=1, date=date(2021,8,15), start="", end="", title="title7", description="desc7"),
]
session.add_all(todos)
session.commit()
if __name__ == '__main__':
unittest.main()
|
from django import forms
from books.models import ClubHouseRoom, Book
class ClubHouseRoomForm(forms.ModelForm):
class Meta:
fields = ['room_name']
model = ClubHouseRoom
class BookForm(forms.ModelForm):
class Meta:
fields = ['author', 'title', 'cover']
model = Book
|
#!/usr/bin/python
# Setup and connect the bot to the server, pass along strings to docbot_core
from ircutils3 import bot
from docbot_core import response
# Settings
BOT_NAME = "DR"
NETWORK = "irc.gamesurge.net"
CHANNELS = ["#thefuture",]
class DocBot(bot.SimpleBot):
def on_welcome(self, event):
for chan in CHANNELS:
self.join(chan)
def on_join(self, event):
pass
def on_message(self, event):
payload = response(event)
try:
self.send_message(event.target, payload['message'])
except:
pass
if __name__ == "__main__":
docbot = DocBot(BOT_NAME)
docbot.connect(NETWORK)
docbot.start()
|
'''This module extends PTP for Nikon devices.
Use it in a master module that determines the vendor and automatically uses its
extension. This is why inheritance is not explicit.
'''
from ..util import _main_thread_alive
from construct import (
Container, PrefixedArray, Struct,
)
from contextlib import contextmanager
from six.moves.queue import Queue
from threading import Thread, Event
from time import sleep
import atexit
import logging
logger = logging.getLogger(__name__)
__all__ = ('Nikon',)
class Nikon(object):
'''This class implements Nikon's PTP operations.'''
def __init__(self, *args, **kwargs):
logger.debug('Init Nikon')
super(Nikon, self).__init__(*args, **kwargs)
# TODO: expose the choice to poll or not Nikon events
self.__no_polling = False
self.__nikon_event_shutdown = Event()
self.__nikon_event_proc = None
@contextmanager
def session(self):
'''
Manage Nikon session with context manager.
'''
# When raw device, do not perform
if self.__no_polling:
with super(Nikon, self).session():
yield
return
# Within a normal PTP session
with super(Nikon, self).session():
# launch a polling thread
self.__event_queue = Queue()
self.__nikon_event_proc = Thread(
name='NikonEvtPolling',
target=self.__nikon_poll_events
)
self.__nikon_event_proc.daemon = False
atexit.register(self._nikon_shutdown)
self.__nikon_event_proc.start()
try:
yield
finally:
self._nikon_shutdown()
def _shutdown(self):
self._nikon_shutdown()
super(Nikon, self)._shutdown()
def _nikon_shutdown(self):
logger.debug('Shutdown Nikon events')
self.__nikon_event_shutdown.set()
# Only join a running thread.
if self.__nikon_event_proc and self.__nikon_event_proc.is_alive():
self.__nikon_event_proc.join(2)
def _PropertyCode(self, **product_properties):
props = {
'ShootingBank': 0xD010,
'ShootingBankNameA': 0xD011,
'ShootingBankNameB': 0xD012,
'ShootingBankNameC': 0xD013,
'ShootingBankNameD': 0xD014,
'ResetBank0': 0xD015,
'RawCompression': 0xD016,
'WhiteBalanceAutoBias': 0xD017,
'WhiteBalanceTungstenBias': 0xD018,
'WhiteBalanceFluorescentBias': 0xD019,
'WhiteBalanceDaylightBias': 0xD01A,
'WhiteBalanceFlashBias': 0xD01B,
'WhiteBalanceCloudyBias': 0xD01C,
'WhiteBalanceShadeBias': 0xD01D,
'WhiteBalanceColorTemperature': 0xD01E,
'WhiteBalancePresetNo': 0xD01F,
'WhiteBalancePresetName0': 0xD020,
'WhiteBalancePresetName1': 0xD021,
'WhiteBalancePresetName2': 0xD022,
'WhiteBalancePresetName3': 0xD023,
'WhiteBalancePresetName4': 0xD024,
'WhiteBalancePresetVal0': 0xD025,
'WhiteBalancePresetVal1': 0xD026,
'WhiteBalancePresetVal2': 0xD027,
'WhiteBalancePresetVal3': 0xD028,
'WhiteBalancePresetVal4': 0xD029,
'ImageSharpening': 0xD02A,
'ToneCompensation': 0xD02B,
'ColorModel': 0xD02C,
'HueAdjustment': 0xD02D,
'NonCPULensDataFocalLength': 0xD02E,
'NonCPULensDataMaximumAperture': 0xD02F,
'ShootingMode': 0xD030,
'JPEGCompressionPolicy': 0xD031,
'ColorSpace': 0xD032,
'AutoDXCrop': 0xD033,
'FlickerReduction': 0xD034,
'RemoteMode': 0xD035,
'VideoMode': 0xD036,
'NikonEffectMode': 0xD037,
'Mode': 0xD038,
'CSMMenuBankSelect': 0xD040,
'MenuBankNameA': 0xD041,
'MenuBankNameB': 0xD042,
'MenuBankNameC': 0xD043,
'MenuBankNameD': 0xD044,
'ResetBank': 0xD045,
'A1AFCModePriority': 0xD048,
'A2AFSModePriority': 0xD049,
'A3GroupDynamicAF': 0xD04A,
'A4AFActivation': 0xD04B,
'FocusAreaIllumManualFocus': 0xD04C,
'FocusAreaIllumContinuous': 0xD04D,
'FocusAreaIllumWhenSelected': 0xD04E,
'FocusAreaWrap': 0xD04F,
'VerticalAFON': 0xD050,
'AFLockOn': 0xD051,
'FocusAreaZone': 0xD052,
'EnableCopyright': 0xD053,
'ISOAuto': 0xD054,
'EVISOStep': 0xD055,
'EVStep': 0xD056,
'EVStepExposureComp': 0xD057,
'ExposureCompensation': 0xD058,
'CenterWeightArea': 0xD059,
'ExposureBaseMatrix': 0xD05A,
'ExposureBaseCenter': 0xD05B,
'ExposureBaseSpot': 0xD05C,
'LiveViewAFArea': 0xD05D,
'AELockMode': 0xD05E,
'AELAFLMode': 0xD05F,
'LiveViewAFFocus': 0xD061,
'MeterOff': 0xD062,
'SelfTimer': 0xD063,
'MonitorOff': 0xD064,
'ImgConfTime': 0xD065,
'AutoOffTimers': 0xD066,
'AngleLevel': 0xD067,
'D1ShootingSpeed': 0xD068,
'D2MaximumShots': 0xD069,
'ExposureDelayMode': 0xD06A,
'LongExposureNoiseReduction': 0xD06B,
'FileNumberSequence': 0xD06C,
'ControlPanelFinderRearControl': 0xD06D,
'ControlPanelFinderViewfinder': 0xD06E,
'D7Illumination': 0xD06F,
'NrHighISO': 0xD070,
'SHSetCHGUIDDisp': 0xD071,
'ArtistName': 0xD072,
'NikonCopyrightInfo': 0xD073,
'FlashSyncSpeed': 0xD074,
'FlashShutterSpeed': 0xD075,
'E3AAFlashMode': 0xD076,
'E4ModelingFlash': 0xD077,
'BracketSet': 0xD078,
'E6ManualModeBracketing': 0xD079,
'BracketOrder': 0xD07A,
'E8AutoBracketSelection': 0xD07B,
'BracketingSet': 0xD07C,
'F1CenterButtonShootingMode': 0xD080,
'CenterButtonPlaybackMode': 0xD081,
'F2Multiselector': 0xD082,
'F3PhotoInfoPlayback': 0xD083,
'F4AssignFuncButton': 0xD084,
'F5CustomizeCommDials': 0xD085,
'ReverseCommandDial': 0xD086,
'ApertureSetting': 0xD087,
'MenusAndPlayback': 0xD088,
'F6ButtonsAndDials': 0xD089,
'NoCFCard': 0xD08A,
'CenterButtonZoomRatio': 0xD08B,
'FunctionButton2': 0xD08C,
'AFAreaPoint': 0xD08D,
'NormalAFOn': 0xD08E,
'CleanImageSensor': 0xD08F,
'ImageCommentString': 0xD090,
'ImageCommentEnable': 0xD091,
'ImageRotation': 0xD092,
'ManualSetLensNo': 0xD093,
'MovScreenSize': 0xD0A0,
'MovVoice': 0xD0A1,
'MovMicrophone': 0xD0A2,
'MovFileSlot': 0xD0A3,
'MovRecProhibitCondition': 0xD0A4,
'ManualMovieSetting': 0xD0A6,
'MovQuality': 0xD0A7,
'LiveViewScreenDisplaySetting': 0xD0B2,
'MonitorOffDelay': 0xD0B3,
'Bracketing': 0xD0C0,
'AutoExposureBracketStep': 0xD0C1,
'AutoExposureBracketProgram': 0xD0C2,
'AutoExposureBracketCount': 0xD0C3,
'WhiteBalanceBracketStep': 0xD0C4,
'WhiteBalanceBracketProgram': 0xD0C5,
'LensID': 0xD0E0,
'LensSort': 0xD0E1,
'LensType': 0xD0E2,
'FocalLengthMin': 0xD0E3,
'FocalLengthMax': 0xD0E4,
'MaxApAtMinFocalLength': 0xD0E5,
'MaxApAtMaxFocalLength': 0xD0E6,
'FinderISODisp': 0xD0F0,
'AutoOffPhoto': 0xD0F2,
'AutoOffMenu': 0xD0F3,
'AutoOffInfo': 0xD0F4,
'SelfTimerShootNum': 0xD0F5,
'VignetteCtrl': 0xD0F7,
'AutoDistortionControl': 0xD0F8,
'SceneMode': 0xD0F9,
'SceneMode2': 0xD0FD,
'SelfTimerInterval': 0xD0FE,
'NikonExposureTime': 0xD100,
'ACPower': 0xD101,
'WarningStatus': 0xD102,
'MaximumShots': 0xD103,
'AFLockStatus': 0xD104,
'AELockStatus': 0xD105,
'FVLockStatus': 0xD106,
'AutofocusLCDTopMode2': 0xD107,
'AutofocusArea': 0xD108,
'FlexibleProgram': 0xD109,
'LightMeter': 0xD10A,
'RecordingMedia': 0xD10B,
'USBSpeed': 0xD10C,
'CCDNumber': 0xD10D,
'CameraOrientation': 0xD10E,
'GroupPtnType': 0xD10F,
'FNumberLock': 0xD110,
'ExposureApertureLock': 0xD111,
'TVLockSetting': 0xD112,
'AVLockSetting': 0xD113,
'IllumSetting': 0xD114,
'FocusPointBright': 0xD115,
'ExternalFlashAttached': 0xD120,
'ExternalFlashStatus': 0xD121,
'ExternalFlashSort': 0xD122,
'ExternalFlashMode': 0xD123,
'ExternalFlashCompensation': 0xD124,
'NewExternalFlashMode': 0xD125,
'FlashExposureCompensation': 0xD126,
'HDRMode': 0xD130,
'HDRHighDynamic': 0xD131,
'HDRSmoothing': 0xD132,
'OptimizeImage': 0xD140,
'Saturation': 0xD142,
'BWFillerEffect': 0xD143,
'BWSharpness': 0xD144,
'BWContrast': 0xD145,
'BWSettingType': 0xD146,
'Slot2SaveMode': 0xD148,
'RawBitMode': 0xD149,
'ActiveDLighting': 0xD14E,
'FlourescentType': 0xD14F,
'TuneColourTemperature': 0xD150,
'TunePreset0': 0xD151,
'TunePreset1': 0xD152,
'TunePreset2': 0xD153,
'TunePreset3': 0xD154,
'TunePreset4': 0xD155,
'BeepOff': 0xD160,
'AutofocusMode': 0xD161,
'AFAssist': 0xD163,
'PADVPMode': 0xD164,
'ImageReview': 0xD165,
'AFAreaIllumination': 0xD166,
'NikonFlashMode': 0xD167,
'FlashCommanderMode': 0xD168,
'FlashSign': 0xD169,
'_ISOAuto': 0xD16A,
'RemoteTimeout': 0xD16B,
'GridDisplay': 0xD16C,
'FlashModeManualPower': 0xD16D,
'FlashModeCommanderPower': 0xD16E,
'AutoFP': 0xD16F,
'DateImprintSetting': 0xD170,
'DateCounterSelect': 0xD171,
'DateCountData': 0xD172,
'DateCountDisplaySetting': 0xD173,
'RangeFinderSetting': 0xD174,
'CSMMenu': 0xD180,
'WarningDisplay': 0xD181,
'BatteryCellKind': 0xD182,
'ISOAutoHiLimit': 0xD183,
'DynamicAFArea': 0xD184,
'ContinuousSpeedHigh': 0xD186,
'InfoDispSetting': 0xD187,
'PreviewButton': 0xD189,
'PreviewButton2': 0xD18A,
'AEAFLockButton2': 0xD18B,
'IndicatorDisp': 0xD18D,
'CellKindPriority': 0xD18E,
'BracketingFramesAndSteps': 0xD190,
'LiveViewMode': 0xD1A0,
'LiveViewDriveMode': 0xD1A1,
'LiveViewStatus': 0xD1A2,
'LiveViewImageZoomRatio': 0xD1A3,
'LiveViewProhibitCondition': 0xD1A4,
'MovieShutterSpeed': 0xD1A8,
'MovieFNumber': 0xD1A9,
'MovieISO': 0xD1AA,
'LiveViewMovieMode': 0xD1AC,
'ExposureDisplayStatus': 0xD1B0,
'ExposureIndicateStatus': 0xD1B1,
'InfoDispErrStatus': 0xD1B2,
'ExposureIndicateLightup': 0xD1B3,
'FlashOpen': 0xD1C0,
'FlashCharged': 0xD1C1,
'FlashMRepeatValue': 0xD1D0,
'FlashMRepeatCount': 0xD1D1,
'FlashMRepeatInterval': 0xD1D2,
'FlashCommandChannel': 0xD1D3,
'FlashCommandSelfMode': 0xD1D4,
'FlashCommandSelfCompensation': 0xD1D5,
'FlashCommandSelfValue': 0xD1D6,
'FlashCommandAMode': 0xD1D7,
'FlashCommandACompensation': 0xD1D8,
'FlashCommandAValue': 0xD1D9,
'FlashCommandBMode': 0xD1DA,
'FlashCommandBCompensation': 0xD1DB,
'FlashCommandBValue': 0xD1DC,
'ApplicationMode': 0xD1F0,
'ActiveSlot': 0xD1F2,
'ActivePicCtrlItem': 0xD200,
'ChangePicCtrlItem': 0xD201,
'MovieNrHighISO': 0xD236,
'D241': 0xD241,
'D244': 0xD244,
'D247': 0xD247,
'GUID': 0xD24F,
'D250': 0xD250,
'D251': 0xD251,
'ISO': 0xF002,
'ImageCompression': 0xF009,
'NikonImageSize': 0xF00A,
'NikonWhiteBalance': 0xF00C,
# TODO: Are these redundant? Or product-specific?
'_LongExposureNoiseReduction': 0xF00D,
'HiISONoiseReduction': 0xF00E,
'_ActiveDLighting': 0xF00F,
'_MovQuality': 0xF01C,
}
product_properties.update(props)
return super(Nikon, self)._PropertyCode(
**product_properties
)
def _OperationCode(self, **product_operations):
return super(Nikon, self)._OperationCode(
GetProfileAllData=0x9006,
SendProfileData=0x9007,
DeleteProfile=0x9008,
SetProfileData=0x9009,
AdvancedTransfer=0x9010,
GetFileInfoInBlock=0x9011,
Capture=0x90C0,
AFDrive=0x90C1,
SetControlMode=0x90C2,
DelImageSDRAM=0x90C3,
GetLargeThumb=0x90C4,
CurveDownload=0x90C5,
CurveUpload=0x90C6,
CheckEvents=0x90C7,
DeviceReady=0x90C8,
SetPreWBData=0x90C9,
GetVendorPropCodes=0x90CA,
AFCaptureSDRAM=0x90CB,
GetPictCtrlData=0x90CC,
SetPictCtrlData=0x90CD,
DelCstPicCtrl=0x90CE,
GetPicCtrlCapability=0x90CF,
GetPreviewImg=0x9200,
StartLiveView=0x9201,
EndLiveView=0x9202,
GetLiveViewImg=0x9203,
MfDrive=0x9204,
ChangeAFArea=0x9205,
AFDriveCancel=0x9206,
InitiateCaptureRecInMedia=0x9207,
GetVendorStorageIDs=0x9209,
StartMovieRecInCard=0x920A,
EndMovieRec=0x920B,
TerminateCapture=0x920C,
GetDevicePTPIPInfo=0x90E0,
GetPartialObjectHiSpeed=0x9400,
GetDevicePropEx=0x9504,
**product_operations
)
def _ResponseCode(self, **product_responses):
return super(Nikon, self)._ResponseCode(
HardwareError=0xA001,
OutOfFocus=0xA002,
ChangeCameraModeFailed=0xA003,
InvalidStatus=0xA004,
SetPropertyNotSupported=0xA005,
WbResetError=0xA006,
DustReferenceError=0xA007,
ShutterSpeedBulb=0xA008,
MirrorUpSequence=0xA009,
CameraModeNotAdjustFNumber=0xA00A,
NotLiveView=0xA00B,
MfDriveStepEnd=0xA00C,
MfDriveStepInsufficiency=0xA00E,
AdvancedTransferCancel=0xA022,
**product_responses
)
def _EventCode(self, **product_events):
return super(Nikon, self)._EventCode(
ObjectAddedInSDRAM=0xC101,
CaptureCompleteRecInSdram=0xC102,
AdvancedTransfer=0xC103,
PreviewImageAdded=0xC104,
**product_events
)
def _FilesystemType(self, **product_filesystem_types):
return super(Nikon, self)._FilesystemType(
**product_filesystem_types
)
def _NikonEvent(self):
return PrefixedArray(
self._UInt16,
Struct(
'EventCode' / self._EventCode,
'Parameter' / self._UInt32,
)
)
def _set_endian(self, endian):
logger.debug('Set Nikon endianness')
super(Nikon, self)._set_endian(endian)
self._NikonEvent = self._NikonEvent()
# TODO: Add event queue over all transports and extensions.
def check_events(self):
'''Check Nikon specific event'''
ptp = Container(
OperationCode='CheckEvents',
SessionID=self._session,
TransactionID=self._transaction,
Parameter=[]
)
response = self.recv(ptp)
return self._parse_if_data(response, self._NikonEvent)
# TODO: Provide a single camera agnostic command that will trigger a camera
def capture(self):
'''Nikon specific capture'''
ptp = Container(
OperationCode='Capture',
SessionID=self._session,
TransactionID=self._transaction,
Parameter=[]
)
return self.mesg(ptp)
def af_capture_sdram(self):
'''Nikon specific autofocus and capture to SDRAM'''
ptp = Container(
OperationCode='AFCaptureSDRAM',
SessionID=self._session,
TransactionID=self._transaction,
Parameter=[]
)
return self.mesg(ptp)
def event(self, wait=False):
'''Check Nikon or PTP events
If `wait` this function is blocking. Otherwise it may return None.
'''
# TODO: Do something reasonable on wait=True
evt = None
timeout = None if wait else 0.001
# TODO: Join queues to preserve order of Nikon and PTP events.
if not self.__event_queue.empty():
evt = self.__event_queue.get(block=not wait, timeout=timeout)
else:
evt = super(Nikon, self).event(wait=wait)
return evt
def __nikon_poll_events(self):
'''Poll events, adding them to a queue.'''
while (not self.__nikon_event_shutdown.is_set() and
_main_thread_alive()):
try:
evts = self.check_events()
if evts:
for evt in evts:
logger.debug('Event queued')
self.__event_queue.put(evt)
except Exception as e:
logger.error(e)
sleep(3)
self.__nikon_event_shutdown.clear()
|
#import libs
import RPi.GPIO as GPIO
import time
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(40,GPIO.OUT)
#Initiakize PWM
servo = GPIO.PWM(40,50)
servo.start(0)
while True:
print("0")
servo.ChangeDutyCycle(2.5)
time.sleep(1)
print("180")
servo.ChangeDutyCycle(12.5)
time.sleep(1)
servo.stop()
GPIO.cleanup()
break
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 6 21:06:25 2013
@author: miguel
"""
class Simbolo(object):
def __init__(self,dato,p):
self.simbolo = dato
self.probabilidad = p
self.codigo = ""
self.sucesor = None
def getProbabilidad(self):
return self.probabilidad
def getSucesor(self):
return self.sucesor
def getSimbolo(self):
return self.simbolo
def setSucesor(self,nuevo):
self.sucesor = nuevo
def setCodigo(self,c):
self.codigo += c
def getCodigo(self):
return self.codigo
|
from django.db import models
from account_app.models import Account
from product_mgr_app.models import Product, Rating
class Recommend(models.Model):
account = models.ForeignKey(Account, on_delete=models.CASCADE)
product = models.ForeignKey(Product, on_delete=models.CASCADE)
date = models.DateTimeField('date published')
|
odometer_seq = []
def odometer_speed(x):
x = 100000
while x < 999996:
x += 1
odometer_seq.append(str(x))
odometer_speed(1000)
four_digit_palindromic = []
five_digit_palindromic = []
middle_four_palindromic = []
six_digit_palindromic = []
def four_digit(zz):
for i in zz:
if i[2:] == i[2:][::-1] and len(i[2:]) == 4:
four_digit_palindromic.append(int(i))
four_digit(odometer_seq)
def five_digit(z1):
for j in z1:
if j[1:] == j[1:][::-1] and len(j[1:]) == 5:
five_digit_palindromic.append(int(j))
five_digit(odometer_seq)
def middle_four(z2):
for k in z2:
if k[1:][:-1] == k[1:][:-1][::-1] and len(k[1:][:-1]) == 4:
middle_four_palindromic.append(int(k))
middle_four(odometer_seq)
def six_digit(z3):
for h in z3:
if h[:] == h[::-1] and len(h[:]) == 6:
six_digit_palindromic.append(int(h))
six_digit(odometer_seq)
def checker(list_a,list_b,list_c,list_d):
for i in list_a:
i+=1
if i in list_b:
i+=1
if i in list_c:
i+=1
if i in list_d:
print(i)
Final_result=checker(four_digit_palindromic,five_digit_palindromic,middle_four_palindromic,six_digit_palindromic)
print(Final_result)
|
from __future__ import print_function
import markovify
#import tweepy
import random
import datetime
#from keys import keys
from unidecode import unidecode
import textwrap
from Adafruit_Thermal import *
# Starts the api and auth
#consumer_key = keys['consumer_key']
#consumer_secret = keys['consumer_secret']
#access_token = keys['access_token']
#access_token_secret = keys['access_token_secret']
#auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
#auth.set_access_token(access_token, access_token_secret)
#api = tweepy.API(auth)
# Creates the post and logs to a file
def generate_post():
with open('cleaned.txt') as f:
text = f.read()
text_model = markovify.Text(text, state_size=2)
mash_text = text_model.make_short_sentence(129) # was 140
wrapped_text = textwrap.fill(mash_text, 32)
output_text = "@acoluthon " + mash_text
printer = Adafruit_Thermal("/dev/ttyAMA0", 19200, timeout=5)
printer.justify('L')
printer.feed(3)
printer.doubleHeightOn()
printer.println("Mash Note")
printer.doubleHeightOff()
printer.feed(1)
printer.println(wrapped_text)
printer.feed(3)
# Write the status to a file, for debugging
with open('history.txt', 'a') as f:
f.write('mashed: ' + mash_text + ' | tweeted: ' + output_text + '\n')
return output_text
generate_post()
# Post the status to Twitter
#api.update_status(status=generate_post())
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
def create_app():
# flask app 实例化
app = Flask(__name__,
static_folder="../fronted",
static_url_path="/html",
)
# 加载配置
app.config.from_object("config.Config")
# 初始化db
db.init_app(app)
# 路由
@app.route('/')
def hello_world():
return 'Hello World!'
# 蓝图注册
# 前台用户登录
from myapp.front.auth import front_auth
app.register_blueprint(front_auth, url_prefix="/front_auth/")
# 前台 Blueprint
from myapp.front.view import front
app.register_blueprint(front, url_prefix="/front")
# 后台验证
from myapp.admin.auth import admin_auth
app.register_blueprint(admin_auth, url_prefix="/admin_auth/")
# 后台Blueprint
from myapp.admin.view import admin
app.register_blueprint(admin, url_prefix="/admin/")
# 文件上传与下载
from myapp.api.file import file
app.register_blueprint(file, url_prefix="/file/")
# api
from myapp.api.api import api
app.register_blueprint(api, url_prefix="/api/")
return app
|
pocketMoney=int(input('Enter your pocket money:'))
if(pocketMoney>500):
print('Ew stitnky rich kid')
elif(pocketMoney>100):
print('I live life')
else:
print("im purr ")
|
#!/usr/bin/python3
"""select states module"""
from sys import argv
import MySQLdb
if __name__ == "__main__":
db = MySQLdb.connect(host="localhost",
port=3306,
user=argv[1],
password=argv[2],
database=argv[3])
cursor = db.cursor()
cursor.execute("SELECT * FROM `states` ORDER BY `id` ASC;")
[print(state) for state in cursor.fetchall()]
db.close()
|
# 给一些字符串,然后再给一个,判断这个字符串是不是距离一开始中的
# 任意一个编辑距离为一。构造字典,键为给定的单词,值为它可能改变
# 的样子,用 '_' 填充空位,将要查找的单词修改成可能的样子,检查
# 在不在字典的值中,并且键和给定的单词不相等
class MagicDictionary:
def __init__(self):
"""
Initialize your data structure here.
"""
self.magic = {}
def buildDict(self, words):
"""
Build a dictionary through a list of words
:type words: List[str]
:rtype: void
"""
value = []
for word in words:
i = 0
while i < len(word):
value.append(word[:i]+'_'+word[i+1:])
i += 1
self.magic[word] = value.copy()
value.clear()
def search(self, word):
"""
Returns if there is any word in the trie that equals to the given word after modifying exactly one character
:type word: str
:rtype: bool
"""
i = 0
temp = []
while i < len(word):
temp.append(word[:i]+'_'+word[i+1:])
i += 1
for uglyword in temp:
for key in self.magic.keys():
if word != key and uglyword in self.magic[key]:
return True
return False
# Your MagicDictionary object will be instantiated and called as such:
obj = MagicDictionary()
obj.buildDict(['a', 'b', 'ab'])
param_2 = obj.search('ba')
print(param_2)
|
class WindowRendererStratgey:
def __init__(self):
self.pixels_per_unit = None
self.x_tiles = None
self.y_tiles = None
self.screen_width = None
self.screen_height = None
def reset(self, x_tiles, y_tiles, screen_width, screen_height):
self.x_tiles = x_tiles
self.y_tiles = y_tiles
self.screen_width = screen_width
self.screen_height = screen_height
self.pixels_per_unit = min(
int(screen_width/x_tiles),
int(screen_height/y_tiles)
)
def world_cords_to_screen_cords(self, x_pos, y_pos, width, height):
x_pos = self.x_offset + x_pos * self.pixels_per_unit
y_pos = self.y_offset + y_pos * self.pixels_per_unit
width = width * self.pixels_per_unit
height = height * self.pixels_per_unit
return x_pos, y_pos, width, height
@property
def x_offset(self):
ofsset = self.screen_width
ofsset = ofsset - self.x_tiles * self.pixels_per_unit
ofsset = ofsset/2
return ofsset
@property
def y_offset(self):
ofsset = self.screen_height
ofsset = ofsset - self.y_tiles * self.pixels_per_unit
ofsset = ofsset/2
return ofsset
|
from pennclubs.settings.base import * # noqa: F401, F403
TEST_RUNNER = "xmlrunner.extra.djangotestrunner.XMLTestRunner"
TEST_OUTPUT_VERBOSE = 2
TEST_OUTPUT_DIR = "test-results"
# Use dummy cache for testing
CACHES = {"default": {"BACKEND": "django.core.cache.backends.dummy.DummyCache"}}
|
#should REALLY get this commented up
import math
ffac = 0
sfac = 0
a = int(input("Input the a of the polynomial: "))
b = int(input("Input the b of the polynomial: "))
c = int(input("Input the c of the polynomial: "))
ac = a * c
radical = 1
def findgcf(a, b): #returns the greatest common factor of a and b with the sign of b
while b:
a, b = b, a%b
return int(a)
def radicalreduce(radical):#returns a reduced radical in the form x*sqrt(y)
radreturn = ""
gf = 1
a = 1
if(radical < 0): #divide out an i if applicable
radreturn = "i"
radical = abs(radical)
for i in range (1, 100): #check if it is a perfect square and return the root if it is
if (i**2 == radical):
radreturn = str(math.sqrt(radical))
for i in range (1, 100): #brute force check if radical is divisible by any perfect square
a = i**2
#print(str(i) + " was checked")
if(radical % a == 0) and (a != radical): #record the largest perfect square that divides radical
if(a > gf):
gf = a
if (gf == 1): #if there is no perfect square factor, return the input radical in a square root sign
radreturn = radreturn + "(" + u'\u221a' + str(int(radical)) + ")"
#print("We got here")
else: #return a big ugly string that says the
#print("GF = " + str(gf))
radreturn = str(int(math.sqrt(gf))) + radreturn + u'\u221a' + str(int(radical / gf))
return(radreturn)
def quadsolve(x, y, z): #prints the result of the quadratic formula in radical form + decimal form if real
radreturn = radicalreduce((y**2) - (4 * x * z))
if (4 * x * z) > (y**2):
print("(" + str(-y) + " " + u"\u00b1" + " " + str(radreturn) + ")/" + str(2 * x))
else:
quadpos = (-y + math.sqrt((y**2) - (4 * x * z))) / (2 * x)
quadneg = (-y - math.sqrt((y**2) - (4 * x * z))) / (2 * x)
print("(" + str(-y) + u" \u00b1 " + str(radreturn) + ")/" + str(2 * x))
print(str(quadpos) + " or " + str(quadneg)) #this only outputs raw values, should adjust to output stuff w/sqrts as well
def polyfactor(a, b, c): #take the three given inputs
for i in range (-100, 100): #brute force check
if (i == 0):
continue
ffac = (ac / i) #divide the ac by i for the check
sfac = (ac / ffac) #take the co-factor of ac
if (ffac + sfac) == b and (ac % ffac) == 0: #if the two factors also add up to the b, continue. simulates factoring by grouping
#ffac and a become the two terms of the first grouping, sfac and c become the two terms of second grouping
firstgcf = int(findgcf(int(ffac), a)) #simulate factoring by grouping by finding gcf of two grouped terms and recording it
secgcf = int(findgcf(c, int(sfac)))
fpodfterm = a / firstgcf
fpodsterm = ffac / firstgcf
spodfterm = firstgcf
spodsterm = secgcf
fpodgcf = abs(findgcf(fpodfterm, fpodsterm)) #find the gcf of each pod, only positive ints
spodgcf = abs(findgcf(spodfterm, spodsterm))
fpodfterm = int(fpodfterm / fpodgcf) #divide through by gcf of the respective pod
fpodsterm = int(fpodsterm / fpodgcf)
spodfterm = int(spodfterm / spodgcf)
spodsterm = int(spodsterm / spodgcf)
if(fpodgcf == 1) or (fpodgcf == -1): #if the gcf is 1, null the variable so it doesn't show up
fpodgcf = ""
if(spodgcf == 1) or (spodgcf == -1):
spodgcf = ""
print(str(fpodgcf) + str(spodgcf) + "(" + str(int(fpodfterm)) + "x + " + str(int(fpodsterm)) + ")(" + str(spodfterm) + "x + " + str(spodsterm) + ")")
#print("firstgcf = " + str(firstgcf))
#print("secgcf = " + str(secgcf))
#print("ffac = " + str(ffac))
#print("sfac = " + str(sfac))
break
if (i == 99): #if no soln is found, send the data to the quadsolve function
quadsolve(a, b, c)
polyfactor(a, b, c)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras import Sequential, Model, Input
from tensorflow.keras import layers
from tensorflow.keras.layers import ReLU, Dense, Conv2D, Conv2DTranspose
from tensorflow.keras.layers import DepthwiseConv2D, SeparableConv2D, Dropout
from tensorflow.keras.layers import GlobalAveragePooling2D, Activation, BatchNormalization
from tensorflow.keras.regularizers import l2
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.compat.v1.keras.initializers import glorot_uniform, he_normal
from tensorflow.keras.callbacks import LearningRateScheduler
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import to_categorical
import tensorflow_datasets as tfds
import tensorflow.keras.backend as K
import numpy as np
from sklearn.model_selection import train_test_split
import random
import math
import sys
from layers_c import Layers
from preprocess_c import Preprocess
from pretraining_c import Pretraining
from hypertune_c import HyperTune
class Composable(Layers, Preprocess, Pretraining, HyperTune):
''' Composable base (super) class for Models '''
def __init__(self, init_weights=None, reg=None, relu=None, bias=True):
""" Constructor
init_weights : kernel initializer
reg : kernel regularizer
relu : clip value for ReLU
bias : whether to use bias
"""
Layers.__init__(self, init_weights, reg, relu, bias)
Preprocess.__init__(self)
Pretraining.__init__(self)
HyperTune.__init__(self)
# Feature maps encoding at the bottleneck layer in classifier (high dimensionality)
self._encoding = None
# Pooled and flattened encodings at the bottleneck layer (low dimensionality)
self._embedding = None
# Pre-activation conditional probabilities for classifier
self._probabilities = None
# Post-activation conditional probabilities for classifier
self._softmax = None
self._model = None
@property
def model(self):
return self._model
@model.setter
def model(self, _model):
self._model = _model
@property
def encoding(self):
return self._encoding
@encoding.setter
def encoding(self, layer):
self._encoding = layer
@property
def embedding(self):
return self._embedding
@embedding.setter
def embedding(self, layer):
self._embedding = layer
@property
def probabilities(self):
return self._probabilities
@probabilities.setter
def probabilities(self, layer):
self._probabilities = layer
###
# Training
###
def compile(self, loss='categorical_crossentropy', optimizer=Adam(lr=0.001, decay=1e-5), metrics=['acc']):
""" Compile the model for training
loss : the loss function
optimizer: the optimizer
metrics : metrics to report
"""
self.model.compile(loss=loss, optimizer=optimizer, metrics=metrics)
# training variables
hidden_dropout = None # hidden dropout in classifier
i_lr = 0 # initial rate during full training
e_decay = 0 # weight decay rate during full training
e_steps = 0 # number of steps (batches) in an epoch
t_steps = 0 # total number of steps in training job
def time_decay(self, epoch, lr):
""" Time-based Decay
"""
return lr * (1. / (1. + self.e_decay[1] * epoch))
def step_decay(self, epoch, lr):
""" Step-based (polynomial) decay
"""
return self.i_lr * self.e_decay[1]**(epoch)
def exp_decay(self, epoch, lr):
""" Exponential Decay
"""
return self.i_lr * math.exp(-self.e_decay[1] * epoch)
def cosine_decay(self, epoch, lr, alpha=0.0):
""" Cosine Decay
"""
cosine_decay = 0.5 * (1 + np.cos(np.pi * (self.e_steps * epoch) / self.t_steps))
decayed = (1 - alpha) * cosine_decay + alpha
return lr * decayed
def training_scheduler(self, epoch, lr):
""" Learning Rate scheduler for full-training
epoch : epoch number
lr : current learning rate
"""
# First epoch (not started) - do nothing
if epoch == 0:
return lr
# Hidden dropout unit in classifier
if self.hidden_dropout is not None:
# If training accuracy and validation accuracy more than 3% apart
if self.model.history.history['acc'][epoch-1] > self.model.history.history['val_acc'][epoch-1] + 0.03:
if self.hidden_dropout.rate == 0.0:
self.hidden_dropout.rate = 0.5
elif self.hidden_dropout.rate < 0.75:
self.hidden_dropout.rate *= 1.1
print("*** Overfitting, set dropout to", self.hidden_dropout.rate)
else:
if self.hidden_dropout.rate != 0.0:
print("*** Turning off dropout")
self.hidden_dropout.rate = 0.0
if self.e_decay[0] is None:
return lr
# Decay the learning rate
if self.e_decay[0] == 'time':
lr = self.time_decay(epoch, lr)
elif self.e_decay[0] == 'step':
lr = self.step_decay(epoch, lr)
elif self.e_decay[0] == 'exp':
lr = self.exp_decay(epoch, lr)
else:
lr = self.cosine_decay(epoch, lr)
return lr
def training(self, x_train, y_train, epochs=10, batch_size=32, lr=0.001, decay=(None, 0),
split=0.1, loss='categorical_crossentropy', metrics=['acc']):
""" Full Training of the Model
x_train : training images
y_train : training labels
epochs : number of epochs
batch_size : size of batch
lr : learning rate
decay : step-wise learning rate decay
split : percent to use as validation data
loss : loss function
metrics : metrics to report during training
"""
print("*** Full Training")
# Check for hidden dropout layer in classifier
for layer in self.model.layers:
if isinstance(layer, Dropout):
self.hidden_dropout = layer
break
if decay is None or 0:
decay = (None, 0)
elif isinstance(decay, float):
decay = ('time', decay)
elif not isinstance(decay, tuple):
raise Exception("Training: decay must be (time, value)")
elif decay[0] not in [None, 'time', 'step', 'exp', 'cosine']:
raise Exception("Training: invalid method for decay")
self.i_lr = lr
self.e_decay = decay
self.e_steps = x_train.shape[0] // batch_size
self.t_steps = self.e_steps * epochs
self.compile(optimizer=Adam(lr=lr, decay=decay[1]), loss=loss, metrics=metrics)
lrate = LearningRateScheduler(self.training_scheduler, verbose=1)
self.model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, validation_split=split, verbose=1,
callbacks=[lrate])
def evaluate(self, x_test, y_test):
""" Call underlying evaluate() method
"""
return self._model.evaluate(x_test, y_test)
def cifar10(self, epochs=10, decay=('cosine', 0)):
""" Train on CIFAR-10
epochs : number of epochs for full training
"""
from tensorflow.keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train, x_test = self.standardization(x_train, x_test)
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)
y_train = self.label_smoothing(y_train, 10, 0.1)
# compile the model
self.compile(loss='categorical_crossentropy', metrics=['acc'])
self.warmup(x_train, y_train)
lr, batch_size = self.random_search(x_train, y_train, x_test, y_test)
self.training(x_train, y_train, epochs=epochs, batch_size=batch_size,
lr=lr, decay=decay)
self.evaluate(x_test, y_test)
def cifar100(self, epochs=20, decay=('cosine', 0)):
""" Train on CIFAR-100
epochs : number of epochs for full training
"""
from tensorflow.keras.datasets import cifar100
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
x_train, x_test = self.normalization(x_train, x_test)
y_train = to_categorical(y_train, 100)
y_test = to_categorical(y_test, 100)
y_train = self.label_smoothing(y_train, 10, 0.1)
self.compile(loss='categorical_crossentropy', metrics=['acc'])
self.warmup(x_train, y_train)
lr, batch_size = self.grid_search(x_train, y_train, x_test, y_test)
self.training(x_train, y_train, epochs=epochs, batch_size=batch_size,
lr=lr, decay=decay)
self.evaluate(x_test, y_test)
def coil100(self, epochs=20, decay=('cosine', 0)):
"""
"""
# Get TF.dataset generator for COIL100
train, info = tfds.load('coil100', split='train', shuffle_files=True, with_info=True, as_supervised=True)
n_classes = info.features['label'].num_classes
n_images = info.splits['train'].num_examples
input_shape = info.features['image'].shape
# Get the dataset into memory
train = train.shuffle(n_images).batch(n_images)
for images, labels in train.take(1):
pass
images = np.asarray(images)
images, _ = self.standardization(images, None)
labels = to_categorical(np.asarray(labels), n_classes)
# split the dataset into train/test
x_train, x_test, y_train, y_test = train_test_split(images, labels, test_size=0.2)
self.compile(loss='categorical_crossentropy', metrics=['acc'])
self.warmup(x_train, y_train)
lr, batch_size = self.grid_search(x_train, y_train, x_test, y_test)
self.training(x_train, y_train, epochs=epochs, batch_size=batch_size,
lr=lr, decay=decay)
self.evaluate(x_test, y_test)
|
def main():
# here we create a file access object
fout = open('out.txt', 'at') # 'at' append text 'wt' (over)write text 'xt' exclusive - fails if exists
# we can switch the context of our print statement so it output to this file access object
print('here is some text', file=fout)
fout.close() # always good to clean up
def my_read():
fin = open('out.txt', 'rt') # 'rt' read text
received = fin.read()
fin.close()
print(received)
if __name__ == '__main__':
main() # write some text
my_read() # read it back!
|
#!/usr/bin/env python
import neutronclient.v2_0.client as ntclient
from.credentials import get_neutron_credits
__author__ = 'Yuvv'
cred = get_neutron_credits()
neutron = ntclient.Client(**cred)
# TODO:
'''
>>> ops.neutron.list_networks()
{'networks': [{u'status': u'ACTIVE',
u'subnets': [u'0050c144-91fa-494d-89e7-fb4c7a76e40c'],
u'name': u'public', u'provider:physical_network': u'public',
u'admin_state_up': True,
u'tenant_id': u'1e5cd29a7daf4a6eaa8bd981d2215521',
u'mtu': 0, u'router:external': False,
u'port_security_enabled': True, u'shared': True,
u'provider:network_type': u'flat',
u'id': u'111e3af7-71d0-4dcb-b0f7-a068eaf20e7e',
u'provider:segmentation_id': None}]}
>>> ops.neutron.list_agents()
{u'agents': [{u'binary': u'neutron-dhcp-agent',
u'description': None,
u'admin_state_up': True,
u'heartbeat_timestamp': u'2016-05-25 01:18:58',
u'alive': True,
u'id': u'2131b624-cd64-4139-9f05-2578b44157c5',
u'topic': u'dhcp_agent',
u'host': u'controller',
u'agent_type': u'DHCP agent',
u'started_at': u'2016-05-25 00:53:28',
u'created_at': u'2016-04-26 06:49:13',
u'configurations': {u'subnets': 1,
u'use_namespaces': True,
u'dhcp_lease_duration': 86400,
u'dhcp_driver': u'neutron.agent.linux.dhcp.Dnsmasq',
u'ports': 2, u'log_agent_heartbeats': False,
u'networks': 1}},
{u'binary': u'neutron-linuxbridge-agent',
u'description': None, u'admin_state_up': True,
u'heartbeat_timestamp': u'2016-05-25 01:18:58',
u'alive': True, u'id': u'26c1648e-6e5c-4a9f-bf71-e2f8465859c4',
u'topic': u'N/A',
u'host': u'controller',
u'agent_type': u'Linux bridge agent',
u'started_at': u'2016-05-25 00:53:28',
u'created_at': u'2016-04-26 06:49:13',
u'configurations': {u'interface_mappings': {u'public': u'eth1'},
u'bridge_mappings': {}, u'devices': 1}},
{u'binary': u'neutron-metadata-agent',
u'description': None, u'admin_state_up': True,
u'heartbeat_timestamp': u'2016-05-25 01:18:59',
u'alive': True, u'id': u'91bd88af-2e88-4298-9f9f-5a6752bed2f9',
u'topic': u'N/A', u'host': u'controller',
u'agent_type': u'Metadata agent',
u'started_at': u'2016-05-25 00:53:29',
u'created_at': u'2016-04-26 06:49:13',
u'configurations': {u'log_agent_heartbeats': False,
u'nova_metadata_ip': u'controller',
u'nova_metadata_port': 8775,
u'metadata_proxy_socket': u'/var/lib/neutron/metadata_proxy'}},
{u'binary': u'neutron-linuxbridge-agent',
u'description': None, u'admin_state_up': True,
u'heartbeat_timestamp': u'2016-05-25 01:19:05',
u'alive': True, u'id': u'cc8252c8-5967-4e1a-8388-297bde87346c',
u'topic': u'N/A', u'host': u'compute',
u'agent_type': u'Linux bridge agent',
u'started_at': u'2016-05-25 00:53:36',
u'created_at': u'2016-04-26 06:55:51',
u'configurations': {u'interface_mappings': {u'public': u'eth1'},
u'bridge_mappings': {}, u'devices': 0}}]}
>>> ops.neutron.list_subnets()
{'subnets': [{u'name': u'public',
u'enable_dhcp': True,
u'network_id': u'111e3af7-71d0-4dcb-b0f7-a068eaf20e7e',
u'tenant_id': u'1e5cd29a7daf4a6eaa8bd981d2215521',
u'dns_nameservers': [u'192.168.23.2'],
u'ipv6_ra_mode': None,
u'allocation_pools': [{u'start': u'192.168.23.10', u'end': u'192.168.23.99'}],
u'gateway_ip': u'192.168.23.1',
u'ipv6_address_mode': None,
u'ip_version': 4,
u'host_routes': [],
u'cidr': u'192.168.23.0/24',
u'id': u'0050c144-91fa-494d-89e7-fb4c7a76e40c',
u'subnetpool_id': None}]}
>>> ops.neutron.list_security_groups()
{'security_groups': [{u'tenant_id': u'1e5cd29a7daf4a6eaa8bd981d2215521',
u'name': u'default', u'description': u'Default security group',
u'security_group_rules': [{u'remote_group_id': None,
u'direction': u'ingress',
u'remote_ip_prefix': u'0.0.0.0/0',
u'protocol': u'icmp',
u'tenant_id': u'1e5cd29a7daf4a6eaa8bd981d2215521',
u'port_range_max': None,
u'security_group_id': u'447f5fd8-f7f2-4938-8fb2-413c42b3afc6',
u'port_range_min': None,
u'ethertype': u'IPv4',
u'id': u'0c7c5f57-6d64-4549-afd2-465abd7f5d74'},
{u'remote_group_id': u'447f5fd8-f7f2-4938-8fb2-413c42b3afc6',
u'direction': u'ingress',
u'remote_ip_prefix': None,
u'protocol': None,
u'tenant_id': u'1e5cd29a7daf4a6eaa8bd981d2215521',
u'port_range_max': None,
u'security_group_id': u'447f5fd8-f7f2-4938-8fb2-413c42b3afc6',
u'port_range_min': None,
u'ethertype': u'IPv4',
u'id': u'13b58bce-9029-4fb0-8130-9d3f8dfd5c54'},
{u'remote_group_id': u'447f5fd8-f7f2-4938-8fb2-413c42b3afc6',
u'direction': u'ingress',
u'remote_ip_prefix': None,
u'protocol': None,
u'tenant_id': u'1e5cd29a7daf4a6eaa8bd981d2215521',
u'port_range_max': None,
u'security_group_id': u'447f5fd8-f7f2-4938-8fb2-413c42b3afc6',
u'port_range_min': None,
u'ethertype': u'IPv6',
u'id': u'77eeabcd-24a7-49a7-b504-5109f1f32255'},
{u'remote_group_id': None,
u'direction': u'egress',
u'remote_ip_prefix': None,
u'protocol': None,
u'tenant_id': u'1e5cd29a7daf4a6eaa8bd981d2215521',
u'port_range_max': None,
u'security_group_id': u'447f5fd8-f7f2-4938-8fb2-413c42b3afc6',
u'port_range_min': None, u'ethertype': u'IPv4',
u'id': u'0ebc3d23-19c3-4286-ae74-0ff8b8ac3f21'},
{u'remote_group_id': None, u'direction': u'ingress',
u'remote_ip_prefix': u'0.0.0.0/0',
u'protocol': u'tcp',
u'tenant_id': u'1e5cd29a7daf4a6eaa8bd981d2215521',
u'port_range_max': 22,
u'security_group_id': u'447f5fd8-f7f2-4938-8fb2-413c42b3afc6',
u'port_range_min': 22,
u'ethertype': u'IPv4',
u'id': u'a9e648e9-087d-4175-b44b-4fe7c2be335e'},
{u'remote_group_id': None,
u'direction': u'egress', u'remote_ip_prefix': None,
u'protocol': None, u'tenant_id': u'1e5cd29a7daf4a6eaa8bd981d2215521',
u'port_range_max': None,
u'security_group_id': u'447f5fd8-f7f2-4938-8fb2-413c42b3afc6',
u'port_range_min': None,
u'ethertype': u'IPv6',
u'id': u'9eaa2679-74ee-45e3-b37a-51712bd33813'}],
u'id': u'447f5fd8-f7f2-4938-8fb2-413c42b3afc6'},
{u'tenant_id': u'01715308e32e4f6c9cfeff963e01ee71',
u'name': u'default',
u'description': u'Default security group',
u'security_group_rules': [{u'remote_group_id': None,
u'direction': u'egress', u'remote_ip_prefix': None,
u'protocol': None,
u'tenant_id': u'01715308e32e4f6c9cfeff963e01ee71',
u'port_range_max': None,
u'security_group_id': u'6e4436d5-327d-478f-867e-f8780ee0d6a5',
u'port_range_min': None,
u'ethertype': u'IPv4',
u'id': u'c4d14382-7d19-442f-8f6a-4818fa5cbd15'},
{u'remote_group_id': u'6e4436d5-327d-478f-867e-f8780ee0d6a5',
u'direction': u'ingress',
u'remote_ip_prefix': None,
u'protocol': None,
u'tenant_id': u'01715308e32e4f6c9cfeff963e01ee71',
u'port_range_max': None,
u'security_group_id': u'6e4436d5-327d-478f-867e-f8780ee0d6a5',
u'port_range_min': None,
u'ethertype': u'IPv4',
u'id': u'6cdbd10b-1594-4b6d-b5bb-1c11860e2fc1'},
{u'remote_group_id': None,
u'direction': u'egress',
u'remote_ip_prefix': None, u'protocol': None,
u'tenant_id': u'01715308e32e4f6c9cfeff963e01ee71',
u'port_range_max': None, u'security_group_id':
u'6e4436d5-327d-478f-867e-f8780ee0d6a5',
u'port_range_min': None,
u'ethertype': u'IPv6',
u'id': u'0e355680-e9ae-4643-aebd-ca5c6e66916f'},
{u'remote_group_id': u'6e4436d5-327d-478f-867e-f8780ee0d6a5',
u'direction': u'ingress',
u'remote_ip_prefix': None,
u'protocol': None,
u'tenant_id': u'01715308e32e4f6c9cfeff963e01ee71',
u'port_range_max': None,
u'security_group_id': u'6e4436d5-327d-478f-867e-f8780ee0d6a5',
u'port_range_min': None,
u'ethertype': u'IPv6',
u'id': u'2610cc74-f264-4980-8d04-79ad8b24fcdd'}],
u'id': u'6e4436d5-327d-478f-867e-f8780ee0d6a5'}]}
'''
|
vertices = []
with open('./obj/people.obj','r',encoding='utf-8') as f:
for line in f.readlines():
if (line.startswith('v ')):
line = line.replace('\n', '')
value = line.split(' ')
vertices.append(str(round(float(value[1]) * 50, 2) - 17))
vertices.append(str(round(float(value[2]) * 50, 2) - 7))
vertices.append(str(round(float(value[3]) * 50, 2)))
# 写文件
f = open('output.txt','w', encoding='utf-8')
outputData = '{name:"test", vertices: ' + str(vertices) + '}'
f.write(outputData)
f.close()
# print(vertices)
|
# 引用计数的缺陷
class Person(object):
def __init__(self, name):
self.name = name
self.next = None
self.pre = None
def __del__(self):
super()
print("%s执行了del函数" % self.name)
while True:
p1 = Person('p1')
p2 = Person('p2')
# 循环引用
p1.next = p2
p2.pre = p1
del p1
del p2
a = input("*************")
|
#find the sum of all multiples of 3 or 5 below 1000.
import sys;
def threefivesum():
multiple = 3
counter = 0
while multiple < 1000:
counter += multiple
multiple += 3
multiple = 5
while multiple < 1000:
counter += multiple
multiple += 5
multiple = 15
while multiple < 1000:
counter -= multiple
multiple += 15
return counter
def evenfibsum():
fib1 = 1
fib2 = 2
counter = 0
answer = 0
limit = 4000000
#even fib numbers come every 3 in the sequence 2 - 0 3 - 1 5 - 2 8 - 3
while fib1 < limit & fib2 < limit:
if counter % 3 == 0 :
if fib1 > fib2:
answer += fib1
else:
answer += fib2
if fib1 < fib2:
fib1 += fib2
else:
fib2 += fib1
counter += 1
return answer
print "Two Project Euler Problems"
print "Sum of all multiples of 3 and 5 below 1000"
print threefivesum();
print "Sum of even Fibonacci numbers below 4 million"
print evenfibsum() + " coudn't get this one to work";
|
from chess.pieces import Pawn
from chess.board import Board
import pytest
def test_pawn_init():
pawn = Pawn(1, 0, False)
assert pawn.row == 1
assert pawn.col == 0
assert pawn.is_white is False
assert pawn.first_move is True
assert pawn.unit == 'p'
assert pawn.name == 'bp'
assert pawn.enpassantable is False
rep = pawn.__repr__()
assert rep == "Pawn(1, 0, is_white=False)"
def test_pawn_init_not_first_move():
pawn = Pawn(3, 1, False, False)
assert pawn.first_move is False
@pytest.fixture
def board():
arr = [
["br", "bn", "bb", "bq", "bk", "bb", "wp", "br"],
["--", "--", "--", "bp", "--", "bp", "--", "--"],
["--", "--", "bp", "--", "bp", "--", "--", "--"],
["bp", "bp", "--", "--", "--", "--", "--", "bp"],
["wp", "wp", "--", "--", "--", "--", "--", "wp"],
["--", "--", "wp", "--", "wp", "--", "--", "--"],
["--", "wp", "--", "wp", "--", "wp", "--", "--"],
["wr", "wn", "wb", "wq", "wk", "wb", "bp", "wr"]
]
return Board(array=arr)
@pytest.mark.parametrize(
"coord, piece_name, moves", [
((6, 3), 'wp', [(5, 3), (4, 3)]), # white first move
((1, 3), 'bp', [(2, 3), (3, 3)]), # black first move
((5, 4), 'wp', [(4, 4)]), # white second move
((2, 4), 'bp', [(3, 4)]), # black second move
((4, 7), 'wp', []), # white blocked
((3, 7), 'bp', []), # black blocked
((4, 1), 'wp', [(3, 0)]), # white capture
((3, 1), 'bp', [(4, 0)]), # black capture
((4, 0), 'wp', [(3, 1)]), # white capture on board edge
((3, 0), 'bp', [(4, 1)]) # black capture on board edge
]
)
def test_pawn_get_moves(board, coord, piece_name, moves):
pawn = board[coord]
assert pawn.name == piece_name
assert pawn.get_moves(board) == moves
@pytest.mark.parametrize(
"move_from, capt_coord, move_to, enpass", [
((3, 1), (3, 0), (2, 0), True), # enpassantable, white on black
((4, 6), (4, 7), (5, 7), False) # not enpassantable, black on white
]
)
def test_enpassant(enpassant_board, move_from, capt_coord, move_to, enpass):
move_pawn = enpassant_board[move_from]
capt_pawn = enpassant_board[capt_coord]
capt_pawn.enpassantable = enpass
result = move_to in move_pawn.get_moves(enpassant_board)
assert result is enpass
@pytest.fixture
def enpassant_board():
arr = [
["br", "bn", "bb", "bq", "bk", "bb", "wp", "br"],
["--", "--", "--", "bp", "--", "bp", "--", "--"],
["--", "--", "bp", "--", "bp", "--", "--", "--"],
["bp", "wp", "--", "--", "--", "--", "--", "bp"],
["wp", "--", "--", "--", "--", "--", "bp", "wp"],
["--", "--", "wp", "--", "wp", "--", "--", "--"],
["--", "--", "--", "wp", "--", "wp", "--", "--"],
["wr", "wn", "wb", "wq", "wk", "wb", "bp", "wr"]
]
return Board(array=arr)
|
#find if a node is connected or not
#l = [[1, 3], [3, 4], [2, 4], [1, 2], [2, 3], [5, 6] , [5 , 7]]
l = [[1, 2], [3 , 4] , [5 , 6]]
def isReachable(l , m , s , d):
visited = [False] * (len(m))
#print(visited)
q = []
q.append(s)
visited[m.index(s)] = True
while q:
n = q.pop(0)
if n == d:
return True
for i in l:
if i[0] == n:
#print(i[1])
if visited[m.index(i[1])] == False:
q.append(i[1])
visited[m.index(i[1])] = True
return False
m = [1, 2, 3, 4 , 5 , 6]
re = []
nr = []
for i in m:
for j in range(i + 1 , len(m) + 1):
print(i , j , isReachable(l , m , i , j))
if isReachable(l , m , i , j):
re.append(i)
re.append(j)
else:
nr.append(j)
print(len(l))
print(list(set(re)))
print(list(set(nr)))
|
from onegov.chat import Message
from onegov.org.models.message import TicketMessageMixin
class AgencyMutationMessage(Message, TicketMessageMixin):
__mapper_args__ = {
'polymorphic_identity': 'agency_mutation'
}
@classmethod
def create(cls, ticket, request, change):
return super().create(ticket, request, change=change)
class PersonMutationMessage(Message, TicketMessageMixin):
__mapper_args__ = {
'polymorphic_identity': 'person_mutation'
}
@classmethod
def create(cls, ticket, request, change):
return super().create(ticket, request, change=change)
|
"""
Definitions for model that classifies dispatch messages into 24 categories.
"""
from functools import partial
import re
from autocorrect import spell, word
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from xgboost import XGBClassifier
import nltk
def preprocess(msg, sub_dict, correct=True):
"""
Preprocess dispatch message in the following order:
1. Convert to lower case.
2. Replace abbreviations in sub_dict.
3. Select only the following sections of the message: comments, type, problem,
and responder script.
4. Remove non-alphabetic characters.
5. Correct spelling of words.
"""
msg = msg.lower()
for k, v in sub_dict.items():
msg = re.sub(r'(?:[^\w]|^){}(?:[^\w]|$)'.format(k), ' {} '.format(v), msg)
msg = ' '.join(re.findall(r'(?:comments?):(?P<info>.*?)(?:(?:,[\w\s]*:))', msg) + \
re.findall(r'(?:type?):(?P<info>.*?)(?:(?:,[\w\s]*:))', msg) + \
re.findall(r'(?:problem?):(?P<info>.*?)(?:(?:,[\w\s]*:))', msg) + \
re.findall(r'(?:responder script?):(?P<info>.*?)(?:(?:,[\w\s]*:))', msg))
msg = re.sub(r'[^a-z]', ' ', msg)
msg = msg.split()
if correct:
return ' '.join([(w if w in word.KNOWN_WORDS else spell(w)) for w in msg])
return ' '.join(msg)
class DispatchPreprocessor(BaseEstimator, TransformerMixin):
"""
Mixin for transforming dispatch messages by preprocessing.
"""
def __init__(self, sub_dict):
self.sub_dict = sub_dict
def fit(self, X, y=None):
"""
Obligatory fit method.
"""
return self
def transform(self, X):
"""
Apply preprocess function to input.
"""
return X.apply(partial(preprocess, sub_dict=self.sub_dict))
class TextSelector(BaseEstimator, TransformerMixin):
"""
Select a specific field in a dataframe.
"""
def __init__(self, field):
self.field = field
def fit(self, X, y=None):
"""
Obligatory fit method.
"""
return self
def transform(self, X):
"""
Select the pre-specified field from the input.
"""
return X[self.field]
def tokenizer(str_input):
"""
Tokenizer for string input.
"""
words = re.sub(r"[^A-Za-z0-9\-]", " ", str_input).lower().split()
porter_stemmer = nltk.PorterStemmer()
words = [porter_stemmer.stem(word) for word in words]
return words
SUB_DICT = {
'edp': 'emotionally disturbed person',
'unk': 'unknown',
'als': 'advanced life support',
'bls': 'basic life support',
'ams': 'altered mental state',
'intox': 'intoxicated',
'cath': 'catheter',
'poss': 'possible'
}
class IncidentClassifier(BaseEstimator, TransformerMixin):
"""
Definition for incident classifier. It consists of a pipeline: select
message column, preprocess, vectorize tfidf, svd, XGBClassifier.
"""
def __init__(self, stop_words=nltk.corpus.stopwords.words()):
self.clf_ = Pipeline([
('colext', TextSelector('Message')),
('preprocessor', DispatchPreprocessor(SUB_DICT)),
('tfidf', TfidfVectorizer(tokenizer=tokenizer, stop_words=stop_words,
min_df=.0025, max_df=0.25, ngram_range=(1, 3))),
('svd', TruncatedSVD(algorithm='randomized', n_components=300)),
('clf', XGBClassifier(max_depth=3, n_estimators=300, learning_rate=0.1))
])
def fit(self, X, y=None):
"""
Fit underlying pipeline.
"""
self.clf_.fit(X, y)
return self
def predict(self, X):
"""
Predict using underlying pipeline.
"""
return self.clf_.predict(X)
def predict_proba(self, X):
"""
Use predict_proba with underlying pipeline.
"""
return self.clf_.predict_proba(X)
|
import random
# висновок,що не дуже підходить, бо ти можеш завжди вводити максимально велике число
# (або може взагалі не число ввести)
# (або можеш ввести більше число ніж комп введе)
n1 = random.randint(1, 10)
answer = input('Enter some integer: ')
answer = int(answer)
print(f'You choose {answer}, computer {n1}')
if n1 > answer:
print(f'{n1} Bigger {answer}')
elif n1 < answer:
print(f'{n1} Less {answer}')
else:
print(f'{n1} Equal {answer}')
|
#!/usr/bin/python2
# coding=utf-8
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,requests,mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print "\033[1;96m[!] \x1b[1;91mExit"
os.sys.exit()
def acak(x):
w = 'mhkbpcP'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(x):
w = 'mhkbpcP'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'\033[%s;1m'%str(31+j))
x += '\033[0m'
x = x.replace('!0','\033[0m')
sys.stdout.write(x+'\n')
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.05)
logo = """ \x1b[1;93m______ \x1b[1;92m_______ \x1b[1;94m______ \x1b[1;91m___ _\n \x1b[1;93m| | \x1b[1;92m| _ |\x1b[1;94m| _ | \x1b[1;91m| | | |\n \x1b[1;93m| _ |\x1b[1;92m| |_| |\x1b[1;94m| | || \x1b[1;91m| |_| |\n \x1b[1;93m| | | |\x1b[1;92m| |\x1b[1;94m| |_||_ \x1b[1;91m| _|\n \x1b[1;93m| |_| |\x1b[1;92m| |\x1b[1;94m| __ |\x1b[1;91m| |_ \n \x1b[1;93m| |\x1b[1;92m| _ |\x1b[1;94m| | | |\x1b[1;91m| _ |\n \x1b[1;93m|______| \x1b[1;92m|__| |__|\x1b[1;94m|___| |_|\x1b[1;91m|___| |_| \x1b[1;96mFB\n\n \x1b[1;95m●▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬●\n ✫╬─ \x1b[1;92mReCode \x1b[1;91m: \x1b[1;93mRidwan58 \x1b[1;95m─╬✫\n ✫╬─ \x1b[1;92mFB \x1b[1;92m \x1b[1;91m: \x1b[1;96mFacebook.com/Ridwankechil \x1b[1;95m─╬✫\n ✫╬─ \x1b[1;92mGitHub \x1b[1;91m: \x1b[1;94mGithub.com/RidwanKechil \x1b[1;95m─╬✫\n ●▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬●
"""
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;96m[●] \x1b[1;93mSedang masuk \x1b[1;97m"+o),;sys.stdout.flush();time.sleep(1)
back = 0
threads = []
berhasil = []
cekpoint = []
oks = []
id = []
listgrup = []
vulnot = "\033[31mNot Vuln"
vuln = "\033[32mVuln"
def siapa():
os.system('clear')
nama = raw_input("\033[1;97mSiapa nama kamu ? \033[1;91m: \033[1;92m")
if nama =="":
print"\033[1;96m[!] \033[1;91mIsi yang benar"
time.sleep(1)
siapa()
else:
os.system('clear')
jalan("\033[1;97mSelamat datang \033[1;92m" +nama+ "\n\033[1;97mTerimakasih telah menggunakan tools ini !!")
time.sleep(1)
loginSC()
def loginSC():
os.system('clear')
print"\033[1;97mSilahkan login SC nya dulu bosque\n"
username = raw_input("\033[1;96m[*] \033[1;97mUsername \033[1;91m: \033[1;92m")
password = raw_input("\033[1;96m[*] \033[1;97mPassword \033[1;91m: \033[1;92m")
if username =="Ridwan" and password =="Kechil":
print"\033[1;96m[✓] \033[1;92mLogin success"
time.sleep(1)
login()
else:
print"\033[1;96m[!] \033[1;91mSalah!!"
time.sleep(1)
LoginSC()
|
from launch import LaunchDescription
import launch_ros.actions
def generate_launch_description():
return LaunchDescription([
launch_ros.actions.Node(
package='topic_subscriber_pkg', executable='simple_sub_node', output='screen'),
])
|
from rest_api.app import setup_app
from rest_api.config import DebugConfig, ProductionConfig
app = setup_app(config=ProductionConfig)
if __name__ == "__main__":
app = setup_app(config=DebugConfig)
app.run(host="localhost", port=5000)
|
import sys
import threading
from functools import wraps
from typing import Union, Tuple, Callable, Optional, List, Type
from bugsnag.configuration import Configuration, RequestConfiguration
from bugsnag.event import Event
from bugsnag.handlers import BugsnagHandler
from bugsnag.sessiontracker import SessionTracker
import bugsnag
__all__ = ('Client',)
class Client:
"""
A Bugsnag monitoring and reporting client.
>>> client = Client(api_key='...') # doctest: +SKIP
"""
def __init__(self, configuration: Optional[Configuration] = None,
install_sys_hook=True, **kwargs):
self.configuration = configuration or Configuration() # type: Configuration # noqa: E501
self.session_tracker = SessionTracker(self.configuration)
self.configuration.configure(**kwargs)
if install_sys_hook:
self.install_sys_hook()
def capture(self,
exceptions: Union[Tuple[Type, ...], Callable, None] = None,
**options):
"""
Run a block of code within the clients context.
Any exception raised will be reported to bugsnag.
>>> with client.capture(): # doctest: +SKIP
... raise Exception('an exception passed to bugsnag then reraised')
The context can optionally include specific types to capture.
>>> with client.capture((TypeError,)): # doctest: +SKIP
... raise Exception('an exception which does get captured')
Alternately, functions can be decorated to capture any
exceptions thrown during execution and reraised.
>>> @client.capture # doctest: +SKIP
... def foo():
... raise Exception('an exception passed to bugsnag then reraised')
The decoration can optionally include specific types to capture.
>>> @client.capture((TypeError,)) # doctest: +SKIP
... def foo():
... raise Exception('an exception which does not get captured')
"""
if callable(exceptions):
return ClientContext(self, (Exception,))(exceptions)
return ClientContext(self, exceptions, **options)
def notify(self, exception: BaseException, asynchronous=None, **options):
"""
Notify bugsnag of an exception.
>>> client.notify(Exception('Example')) # doctest: +SKIP
"""
event = Event(exception, self.configuration,
RequestConfiguration.get_instance(), **options)
self.deliver(event, asynchronous=asynchronous)
def notify_exc_info(self, exc_type, exc_value, traceback,
asynchronous=None, **options):
"""
Notify bugsnag of an exception via exc_info.
>>> client.notify_exc_info(*sys.exc_info()) # doctest: +SKIP
"""
exception = exc_value
options['traceback'] = traceback
event = Event(exception, self.configuration,
RequestConfiguration.get_instance(), **options)
self.deliver(event, asynchronous=asynchronous)
def excepthook(self, exc_type, exc_value, traceback):
if self.configuration.auto_notify:
self.notify_exc_info(
exc_type, exc_value, traceback,
severity='error',
unhandled=True,
severity_reason={
'type': 'unhandledException'
})
def install_sys_hook(self):
self.sys_excepthook = sys.excepthook
def excepthook(*exc_info):
self.excepthook(*exc_info)
if self.sys_excepthook:
self.sys_excepthook(*exc_info)
sys.excepthook = excepthook
sys.excepthook.bugsnag_client = self
if hasattr(threading, 'excepthook'):
self.threading_excepthook = threading.excepthook
def threadhook(args):
self.excepthook(args[0], args[1], args[2])
if self.threading_excepthook:
self.threading_excepthook(args)
threading.excepthook = threadhook
threading.excepthook.bugsnag_client = self
def uninstall_sys_hook(self):
client = getattr(sys.excepthook, 'bugsnag_client', None)
if client is self:
sys.excepthook = self.sys_excepthook
self.sys_excepthook = None
if hasattr(threading, 'excepthook'):
client = getattr(threading.excepthook, 'bugsnag_client', None)
if client is self:
threading.excepthook = self.threading_excepthook
self.threading_excepthook = None
def deliver(self, event: Event,
asynchronous: Optional[bool] = None):
"""
Deliver the exception event to Bugsnag.
"""
if not self.should_deliver(event):
return
def run_middleware():
initial_severity = event.severity
initial_reason = event.severity_reason.copy()
def send_payload():
if asynchronous is None:
options = {}
else:
options = {'asynchronous': asynchronous}
if event.api_key is None:
bugsnag.logger.warning(
"No API key configured, couldn't notify")
return
if initial_severity != event.severity:
event.severity_reason = {
'type': 'userCallbackSetSeverity'
}
else:
event.severity_reason = initial_reason
payload = event._payload()
try:
self.configuration.delivery.deliver(self.configuration,
payload, options)
except Exception as e:
bugsnag.logger.exception('Notifying Bugsnag failed %s', e)
# Trigger session delivery
self.session_tracker.send_sessions()
self.configuration.middleware.run(event, send_payload)
self.configuration.internal_middleware.run(event, run_middleware)
def should_deliver(self, event: Event) -> bool:
# Return early if we shouldn't notify for current release stage
if not self.configuration.should_notify():
return False
# Return early if we should ignore exceptions of this type
if self.configuration.should_ignore(event.exception):
return False
return True
def log_handler(self, extra_fields: List[str] = None) -> BugsnagHandler:
return BugsnagHandler(client=self, extra_fields=extra_fields)
class ClientContext:
def __init__(self, client,
exception_types: Optional[Tuple[Type, ...]] = None,
**options):
self.client = client
self.options = options
if 'severity' in options:
options['severity_reason'] = dict(type='userContextSetSeverity')
self.exception_types = exception_types or (Exception,)
def __call__(self, function: Callable):
@wraps(function)
def decorate(*args, **kwargs):
try:
return function(*args, **kwargs)
except self.exception_types as e:
self.client.notify(e, source_func=function, **self.options)
raise
return decorate
def __enter__(self):
pass
def __exit__(self, *exc_info):
if any(exc_info):
if any(isinstance(exc_info[1], e) for e in self.exception_types):
self.client.notify_exc_info(*exc_info, **self.options)
return False
|
#coding=gbk
import zipfile
import os
def zip_files( files, zip_name ):
zip = zipfile.ZipFile( zip_name, 'w', zipfile.ZIP_DEFLATED )
for file in files:
print ('compressing', file)
zip.write( file )
zip.close()
print ('compressing finished')
if __name__=='__main__':
files = ['a.txt','b.txt','file1.txt']
zip_file = 'all.zip'
zip_files(files, zip_file)
|
# Setup in PyCharm
print("Hello World")
|
# -*- coding:utf-8 -*-
"""
一些工具函数
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
input_key = 'input_img'
output_key = 'pred'
def save_model(sess,
inp_tensor,
out_tensor,
output_path):
'''
将模型保存成文件
:param sess: Tensorflow session
:param inp_tensor: 网络的输入 tensor
:param out_tensor: 网络的输出 tensor
:param output_path: 输出模型文件的路径
'''
inputs = {input_key: inp_tensor}
outputs = {output_key: out_tensor}
signature_def_map = {
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
tf.saved_model.signature_def_utils.predict_signature_def(inputs, outputs)
}
b = tf.saved_model.builder.SavedModelBuilder(output_path)
b.add_meta_graph_and_variables(
sess,
tags=[tf.saved_model.tag_constants.SERVING],
signature_def_map=signature_def_map,
assets_collection=tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS))
b.save()
return
def write_pb(sess, graph, output_path, pb_fname):
''' 将模型保存成文件
:param sess: Tensorflow session
:param graph: Tensorflow graph
:param output_path: 输出模型文件的路径
:param pb_fname: 输出模型文件的文件名
'''
origin_def = graph.as_graph_def()
graph_def = tf.graph_util.convert_variables_to_constants(sess, origin_def, ['output'])
graph_def = tf.graph_util.remove_training_nodes(graph_def)
tf.train.write_graph(graph_def, output_path, pb_fname, as_text=False)
return
def mkdir_p(path):
""" make a folder in file system """
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if os.path.isdir(path):
pass
else:
raise
if __name__ == '__main__':
pass
|
import bottle
import os
import hashlib
from model import Uporabnik, Dnevnik, seznam_zvrsti, seznam_ocen
from datetime import date
imenik_s_podatki = 'uporabniki'
uporabniki = {}
skrivnost = 'TO JE ENA HUDA SKRIVNOST'
if not os.path.isdir(imenik_s_podatki):
os.mkdir(imenik_s_podatki)
for ime_datoteke in os.listdir(imenik_s_podatki):
uporabnik = Uporabnik.nalozi_stanje(os.path.join(imenik_s_podatki, ime_datoteke))
uporabniki[uporabnik.uporabnisko_ime] = uporabnik
def trenutni_uporabnik():
uporabnisko_ime = bottle.request.get_cookie('uporabnisko_ime', secret=skrivnost)
if uporabnisko_ime is None:
bottle.redirect('/prijava/')
return uporabniki[uporabnisko_ime]
def dnevnik_uporabnika():
return trenutni_uporabnik().dnevnik
def shrani_trenutnega_uporabnika():
uporabnik = trenutni_uporabnik()
uporabnik.shrani_stanje(os.path.join('uporabniki', f'{uporabnik.uporabnisko_ime}.json'))
# POMOŽNE FUNKCIJE
def _preveri_leto_izdaje(niz):
try:
int(niz)
except:
raise ValueError(f'Pri letu izdaje morate vnesti število!')
def _preveri_select(izbira):
if izbira == None:
raise ValueError('Morate izbrati eno izmed možnosti')
pomozni_slovar = {} # v ta slovar bom pod ključem 'spremenljivka' shranjevala izvajalca ali zvrst
# GET DEKORATORJI
@bottle.get('/prijava/')
def prijava_get():
return bottle.template('prijava.html')
@bottle.get('/')
def osnovna_stran():
bottle.redirect('/dnevnik/')
@bottle.get('/dnevnik/')
def zacetna_stran():
dnevnik = dnevnik_uporabnika()
return bottle.template(
'glasbeni_dnevnik.html',
dnevnik=dnevnik,
zvrsti=seznam_zvrsti,
ocene=seznam_ocen,
albumi=dnevnik.seznam_albumov[::-1]
)
@bottle.get('/dnevnik-po-abecedi/')
def dnevnik_po_abecedi():
dnevnik = dnevnik_uporabnika()
return bottle.template(
'glasbeni_dnevnik.html',
dnevnik=dnevnik,
zvrsti=seznam_zvrsti,
ocene=seznam_ocen,
albumi=dnevnik.sortiraj_po_abecedi()
)
@bottle.get('/dnevnik-po-letu/')
def dnevnik_po_letu():
dnevnik = dnevnik_uporabnika()
return bottle.template(
'glasbeni_dnevnik.html',
dnevnik=dnevnik,
zvrsti=seznam_zvrsti,
ocene=seznam_ocen,
albumi=dnevnik.sortiraj_po_letu()
)
@bottle.get('/dnevnik-po-izvajalcu/')
def dnevnik_po_izvajalcu():
dnevnik = dnevnik_uporabnika()
if len(pomozni_slovar) == 0:
bottle.redirect('/')
return bottle.template(
'glasbeni_dnevnik.html',
dnevnik=dnevnik,
zvrsti=seznam_zvrsti,
ocene=seznam_ocen,
albumi=dnevnik.sortiraj_po_izvajalcu(pomozni_slovar['spremenljivka'])
)
@bottle.get('/dnevnik-po-zvrsti/')
def dnevnik_po_zvrsti():
dnevnik = dnevnik_uporabnika()
if len(pomozni_slovar) == 0:
bottle.redirect('/')
return bottle.template(
'glasbeni_dnevnik.html',
dnevnik=dnevnik,
zvrsti=seznam_zvrsti,
ocene=seznam_ocen,
albumi=dnevnik.sortiraj_po_zvrsti(pomozni_slovar['spremenljivka'])
)
@bottle.get('/info/')
def info():
return bottle.template('info.html')
# POST DEKORATORJI
@bottle.post('/prijava/')
def prijava_post():
uporabnisko_ime = bottle.request.forms.getunicode('uporabnisko_ime')
geslo = bottle.request.forms.getunicode('geslo')
h = hashlib.blake2b()
h.update(geslo.encode(encoding='utf-8'))
zasifrirano_geslo = h.hexdigest()
if 'nov_racun' in bottle.request.forms and uporabnisko_ime not in uporabniki:
uporabnik = Uporabnik(
uporabnisko_ime,
zasifrirano_geslo,
Dnevnik()
)
uporabniki[uporabnisko_ime] = uporabnik
else:
uporabnik = uporabniki[uporabnisko_ime]
uporabnik.preveri_geslo(zasifrirano_geslo)
bottle.response.set_cookie('uporabnisko_ime', uporabnik.uporabnisko_ime, path='/', secret=skrivnost)
bottle.redirect('/')
@bottle.post('/odjava/')
def odjava():
bottle.response.delete_cookie('uporabnisko_ime', path='/')
bottle.redirect('/')
@bottle.post('/dodaj-album/')
def nov_album():
dnevnik = dnevnik_uporabnika()
leto_izdaje =_preveri_leto_izdaje(bottle.request.forms['leto izdaje'])
_preveri_select(bottle.request.forms['zvrst'])
_preveri_select(bottle.request.forms['ocena'])
naslov = bottle.request.forms.getunicode('naslov')
izvajalec = bottle.request.forms.getunicode('izvajalec')
datum = date.today()
leto_izdaje = int(bottle.request.forms['leto izdaje'])
zvrst = bottle.request.forms['zvrst']
ocena = int(bottle.request.forms['ocena'])
opis = bottle.request.forms.getunicode('opis')
dnevnik.nov_album(naslov, izvajalec, datum, leto_izdaje, zvrst, ocena, opis)
shrani_trenutnega_uporabnika()
bottle.redirect('/')
@bottle.post('/sortiraj-po-datumu/')
def po_datumu():
bottle.redirect('/')
@bottle.post('/sortiraj-po-abecedi/')
def po_abecedi():
bottle.redirect('/dnevnik-po-abecedi/')
@bottle.post('/sortiraj-po-letu/')
def po_letu():
bottle.redirect('/dnevnik-po-letu/')
@bottle.post('/sortiraj-po-izvajalcu/')
def po_izvajalcu():
izvajalec = bottle.request.forms.getunicode('izvajalec')
_preveri_select(izvajalec)
pomozni_slovar['spremenljivka'] = izvajalec
bottle.redirect('/dnevnik-po-izvajalcu/')
@bottle.post('/sortiraj-po-zvrsti/')
def po_zvrsti():
zvrst = bottle.request.forms.getunicode('zvrst')
_preveri_select(zvrst)
pomozni_slovar['spremenljivka'] = zvrst
bottle.redirect('/dnevnik-po-zvrsti/')
bottle.run(debug=True, reloader=True)
|
import networkx as nx
if __name__ == "__main__":
G = nx.Graph()
G.add_node(1)
G.add_nodes_from([2, 3])
G.add_nodes_from([
(4, {"color": "red"})
])
G.add_edge(1, 2)
G.add_edges_from([(2, 3), (1, 3)])
print(G.nodes)
|
# Author: Zequn Yu
# PID: A14712777
from __future__ import division
#import pandas as pd
import numpy
import math
# function to help add data to array
def add_data(line, data, file):
while (line):
data.append(line.split())
line = file.readline()
# read data from files
# to save data from files
train_data = []
test_data = []
# to open files
train_file = open("pa5train.txt", "r")
test_file = open("pa5test.txt", "r")
# to read each line from files
train_line = train_file.readline()
test_line = test_file.readline()
# set label
label_idx = len(train_data[0]) - 1;
# call function to read
add_data(train_line, train_data, train_file)
add_data(test_line, test_data, test_file)
# test read
# print("train: ", train_data)
# print("test:", test_data
# implement given weak learners:
# hi,+(x) = 1, if word i occurs in email x
# = −1, otherwise
def classifier_h(data, idx, sign):
# h+ case
# print(" Now the sign is: ", sign)
if(sign == "+"):
if (data[idx] == 1):
return 1
else:
return -1
# h- case
else:
if (data[idx] == 0):
return 1
else:
return -1
# function to get error
def cal_error(list):
tol_err = 0
# check each e-mail feature read in
for email in list:
# check each feature
# for feature in range(0, label_idx):
train_l = email[label_idx]
should_l = classifier_h(email, label_idx, "+")
if(train_l == should_l):
tol_err = tol_err + 1
return tol_err
# function to change alpha
def update_alpha(b_err):
c_list = ()
alpha = .5 * numpy.log((1 - b_err.e) / b_err.e)
c_list.append((alpha, b_err.h, b_err.word))
return c_list
# the main part of bossting
def boost(data):
class_tuple = ()
# for rounds test
boost = [3, 7, 10, 15, 20]
# check different boost
for b in boost:
# print("Now boost is: ", b )
cur_err = 100.0
cur_feature = -1
cur_label = 2
weight = [1/len(data)] * len(data)
# run for rounds of range
# update feature, label and error
rounds = len(data[0]) - 1
for r in range(rounds):
# calculate the number of error
tmp_err = cal_error(r)
# check if need to update error
if(tmp_err < cur_err):
cur_err = tmp_err
cur_feature = r
cur_label = 1
# check if flip label
elif(1 - tmp_err < cur_err):
cur_err = 1 - tmp_err
cur_feature = 1
cur_label = -1
# update alpha
class_tuple = update_alpha(cur_err)
# update
for i in range(len(data)):
d = [weight[i] * numpy.e ** (-class_tuple[0] * y[i] * cur_err.h(x.iloc[i, :], cur_err.word))]
# set normalize
d = 0
sum_d = sum(d)
update_d = [(i / sum_d) for i in d]
#return class_tuple
print_stat(train_data, test_data, tuple)
# function to get label
def cal_label(feature, list):
cur_l = 0
for l in list:
if l[1] == 1:
if feature[l[0]] == 1:
cur_l += l[2]
else:
cur_l -= l[2]
else:
if feature[l[0]] == 0:
cur_l += l[2]
else:
cur_l -= l[2]
return (cur_l / math.fabs(cur_l))
# predict
def predict (data, classif):
pred = data.apply(lambda x: classify(x, classif), axis=1)
return pred
# set final classify
def classify(x, classifiers):
#print("classify turn")
total = 0
for c in classifiers:
# update
alpha = c[0]
h = c[1]
word = c[2]
total += (alpha * h(x, word))
# print(total)
return numpy.sign(total)
# cal train error
def t_err(t_data, tuple):
err = 0
for t in t_data:
currLabel = cal_label(t, tuple)
if currLabel != t[-1]:
err += 1
err = (err / float(len(train_data)))
return err
# cal test error
def e_err(e_data, tuple):
err = 0
for t in e_data:
currLabel = cal_label(t, tuple)
if currLabel != t[-1]:
err += 1
err = err / float(len(e_data))
return err
def print_stat(t_data, e_data, tuple):
# print("start training data:")
t = boost(t_data)
# print("run predict: ")
p = predict(t_data, t)
tr_err = t_err(t_data, tuple)
#print("Now the data err is: ", tr_err)
e = boost(e_data)
# peinr("run predict: ")
p2 = predict(e_data, e)
te_err = e_err(e_data, tuple)
'''
rounds = [3,4,7,10,15,20]
for r in rounds:
for t in dataList:
curr_l = calLabel(t, tuple)
if curLabel != t[-1]:
errorCount += 1
readFile("..\data\pa5test.txt")
errorCount = 0
for t in dataList:
curr_l = calLabel(t, tuple)
if curr_l != t[-1]:
errorCount += 1
'''
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.