text stringlengths 8 6.05M |
|---|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
import seaborn as sns
from sklearn.preprocessing import scale
import sklearn.linear_model as skl_lm
from sklearn.metrics import mean_squared_error, r2_score
import statsmodels.api as sm
import statsmodels.formula.api as smf
# using tensorflow
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn import datasets
import scipy as sp
# get advertising data: sales (in thousands of units) as a function of
# advertising budgets (in thousands of dollars) for TV, radio, newspaper media
advertising = pd.read_csv('./../Data/Advertising.csv',usecols=[1,2,3,4])
credit = pd.read_csv('./../Data/Credit.csv',usecols=list(range(1,12)))
#advertising.info()
#print(advertising.head(3))
#credit.info()
#print(credit.Student)
#print(credit.head(3))
### map credit.Student no-yes vector to 0-1 vector
###credit['Student2'] = credit.Student.map({'No': 0, 'Yes':1})
###print(credit.head(3))
### read 'Auto.csv' data, equate '?' with N/A and drop those rows with N/A
##auto = pd.read_csv('./../Data/Auto.csv',na_values='?').dropna()
###print(len(auto))
#
# Perform linear regression
# Parameters are:
#
#
# order : int, optional
# If order is greater than 1, use numpy.polyfit to estimate a polynomial regression.
# order = 1 (LINEAR FIT)
#
# ci : int in [0, 100] or None, optional
# Size of the confidence interval for the regression estimate. This will be drawn using translucent bands around the regression line. The confidence interval is estimated using a bootstrap; for large datasets, it may be advisable to avoid that computation by setting this parameter to None.
#
# {scatter,line}_kws : dictionaries
# Additional keyword arguments to pass to plt.scatter and plt.plot.
# {'color':'r', 's':9} --> color = red , size = 9
#
p = sns.regplot(advertising.TV,advertising.sales,order=1,ci=None,scatter_kws={'color':'r', 's':119})
plt.xlim(-10,310)
plt.ylim(ymin=0)
plt.show()
# =================================================================
# Regression coefficients using sp.stats.linregress
#print(p.get_lines()[0].get_xdata())
#p.get_lines()[0].get_ydata()
slope, intercept, r_value, p_value, std_err = sp.stats.linregress(x=p.get_lines()[0].get_xdata(),y=p.get_lines()[0].get_ydata())
print("intercept from sp.stats.linregress = ",intercept)
print("slope from sp.stats.linregress = ",slope)
# =================================================================
# Regression coefficients using skl_lm.LinearRegression()
regr = skl_lm.LinearRegression()
### X = scale(advertising.TV,with_mean=False,with_std=False).reshape(-1,1) # reshape to (200,1), Note: csv data has already been centered
X = (advertising.TV).values.reshape(-1,1)
y = advertising.sales
regr.fit(X,y)
print("intercept from skl_lm.LinearRegression() = ",regr.intercept_)
print("slope from skl_lm.LinearRegression() = ",regr.coef_)
# plot scale
plot_scale = 1000.
# create grid coordinates for plotting
B0 = np.linspace(regr.intercept_-2, regr.intercept_+2,50)
B1 = np.linspace(regr.coef_-0.02,regr.coef_+0.02,50)
b0_mesh,b1_mesh = np.meshgrid(B0,B1,indexing='xy')
Z = np.zeros((B0.size,B1.size))
# calculate Z-values (residual sum-of-squares)
for (i,j),v in np.ndenumerate(Z):
Z[i,j] = ((y-(b0_mesh[i,j]+X.ravel()*b1_mesh[i,j]))**2.0).sum()/plot_scale # X.ravel() reshapes back to original so that it is the same shape as y
# Minimized RSS
min_RSS_label = r'$\beta_0$, $\beta_1$ for minimized RSS'
min_rss = np.sum((regr.intercept_+regr.coef_*X - y.values.reshape(-1,1))**2)/plot_scale
print(min_rss)
fig = plt.figure(figsize=(15,6))
fig.suptitle('RSS - Regression coefficients', fontsize=20)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122, projection='3d')
# Left plot
CS = ax1.contour(b0_mesh, b1_mesh, Z, cmap=plt.cm.Set1, levels=[2.15, 2.2, 2.3, 2.5, 3])
ax1.scatter(regr.intercept_, regr.coef_[0], c='r', label=min_RSS_label)
ax1.clabel(CS, inline=True, fontsize=10, fmt='%1.1f')
# Right plot
ax2.plot_surface(b0_mesh, b1_mesh, Z, rstride=3, cstride=3, alpha=0.3)
ax2.contour(b0_mesh, b1_mesh, Z, zdir='z', offset=Z.min(), cmap=plt.cm.Set1,
alpha=0.4, levels=[2.15, 2.2, 2.3, 2.5, 3])
ax2.scatter3D(regr.intercept_, regr.coef_[0], min_rss, c='r', label=min_RSS_label)
ax2.set_zlabel(r'RSS ($\times 10^3$)')
ax2.set_zlim(Z.min(),Z.max())
ax2.set_ylim(0.02,0.07)
# settings common to both plots
for ax in fig.axes:
ax.set_xlabel(r'$\beta_0$', fontsize=17)
ax.set_ylabel(r'$\beta_1$', fontsize=17)
ax.set_yticks([0.03,0.04,0.05,0.06])
ax.legend()
|
from json import loads
from pathlib import Path
from typing import Union
from django.contrib.auth.models import ( # type: ignore
Permission,
Group,
)
from django.contrib.contenttypes.models import ContentType # type: ignore
from django.core.management.base import BaseCommand # type: ignore
from toolz import pluck # type: ignore
Json = Union[dict, list]
class Command(BaseCommand):
help = "Imports state permissions, including hard-coding their ids."
def handle(self, *args, **options): # pylint:disable=no-self-use
print(args, options)
fd = Path("fixtures")
state_models = _load_json(fd / "states.json")
codes = [*pluck("code", pluck("fields", state_models))]
section_model = ContentType.objects.get(
app_label="carts_api", model="section"
)
section_model_id = section_model.id
for code in codes:
new_perms = _create_permissions_for_state(code, section_model_id)
for perm in new_perms:
if Permission.objects.filter(**perm).count() == 0:
new_perm, _ = Permission.objects.get_or_create(**perm)
new_perm.save()
for code in codes:
_create_change_view_group_for_state(code)
_create_permissions_for_admins()
_create_permissions_for_co_users()
_create_permissions_for_business_owners()
def _create_permissions_for_state(code: str, content_type: int) -> list:
def generate_perm(term: str, ctype: int, state: str) -> dict:
return {
"content_type_id": ctype,
"codename": f"{term}_state_{state.lower()}",
"name": f"Can {term} {state.upper()} sections",
}
verbs = ("add", "change", "delete", "view")
perms = []
for verb in verbs:
perms.append(generate_perm(verb, content_type, code))
return perms
def _create_change_view_group_for_state(code: str) -> None:
name = f"Users who can edit and view {code.upper()} sections"
if Group.objects.filter(name=name).exists():
return
change_perm = Permission.objects.get(
codename=f"change_state_{code.lower()}"
)
view_perm = Permission.objects.get(codename=f"view_state_{code.lower()}")
assert change_perm and view_perm
new_group = Group.objects.create(name=name)
new_group.permissions.set([change_perm, view_perm])
new_group.save()
def _create_permissions_for_admins() -> None:
name = "Admin users"
group, _ = Group.objects.get_or_create(name=name)
verbs = ("add", "change", "delete", "view")
models = (
"appuser",
"group",
"logentry",
"permission",
"rolefromjobcode",
"rolesfromjobcode",
"rolefromusername",
"session",
"state",
"statesfromusername",
"user",
)
group_permissions = []
for model in models:
for verb in verbs:
codename = f"{verb}_{model}"
group_permissions.append(Permission.objects.get(codename=codename))
group.permissions.set(group_permissions)
group.save()
def _create_permissions_for_co_users() -> None:
name = "CO users"
group, _ = Group.objects.get_or_create(name=name)
verbs = ("add", "change", "delete", "view")
models = (
"acs",
"fmap",
"section",
"sectionbase",
"sectionschema",
"state",
)
group_permissions = []
for model in models:
for verb in verbs:
codename = f"{verb}_{model}"
group_permissions.append(Permission.objects.get(codename=codename))
group.permissions.set(group_permissions)
group.save()
def _create_permissions_for_business_owners() -> None:
name = "Business owner users"
group, _ = Group.objects.get_or_create(name=name)
verbs = ("add", "change", "delete", "view")
models = (
"acs",
"fmap",
"section",
"sectionbase",
"sectionschema",
"state",
)
group_permissions = []
for model in models:
for verb in verbs:
codename = f"{verb}_{model}"
group_permissions.append(Permission.objects.get(codename=codename))
group.permissions.set(group_permissions)
group.save()
def _load_json(path: Path) -> Json:
return loads(path.read_text())
|
from threading import Thread
import urllib
def th(ur):
htmltext = urllib.urlopen(ur).read() # open the parameter as an argument
print htmltext[0:100] # print the first 100
urls = "http://google.com http://cnn.com http://yahoo.com".split() # create an array of urls
threadlist = [] # you need a data structure so that you can store the threads and properly allocate memory
for u in urls:
t = Thread(target = th, args = (u,)) # look this up somehwere else
t.start() # start the thread
threadlist.append(t) # append it to the threadlist
for b in threadlist:
b.join() # again, not really sure about why you need this.
|
from django.db import models
from datetime import datetime
from django.contrib.auth.models import User
class Category(models.Model):
parent = models.ForeignKey('self', blank=True, null=True,
related_name='children')
name = models.CharField(max_length=300)
slug = models.SlugField(max_length=150, unique=True)
description = models.TextField(blank=True)
def __unicode__(self):
if self.parent:
return u'%s - %s' % (self.parent.name,
self.name)
return self.name
@property
def sorted_children(self):
return self.children.order_by('name')
class Product(models.Model):
code = models.CharField(max_length=20)
name = models.CharField(max_length=300)
slug = models.SlugField(max_length=150, unique=True)
description = models.TextField()
size = models.TextField()
photo = models.CharField(max_length=300)
price_in_dollars = models.DecimalField(max_digits=4,
decimal_places=2)
categories = models.ManyToManyField(Category)
stock = models.PositiveIntegerField(default=0)
items_sold = models.PositiveIntegerField(default=0)
def __unicode__(self):
return u'%s - %s' % (self.name, self.slug)
@property
def unit_price_str(self):
return "$%s" % self.price_in_dollars
# photo = models.ImageField(upload_to='product_photo',
# blank=True)
class Cart(models.Model):
def add_product(self, product, quantity=1):
items = CartItem.objects.filter(cart=self, product=product)
if items.exists():
cart_item = items[0]
cart_item.quantity = cart_item.quantity + int(quantity)
cart_item.save()
else:
cart_item = CartItem(cart=self, quantity=quantity, product=product)
cart_item.save()
return cart_item
def update_quantity(self, cart_item_id, quantity):
cart_item = self.items.get(pk=cart_item_id)
if quantity == 0:
cart_item.delete()
else:
if quantity <= cart_item.product.stock:
cart_item.quantity = quantity
else:
cart_item.quantity = cart_item.product.stock
cart_item.save()
self.save()
return cart_item
def delete_item(self, cart_item_id):
cart_item = self.items.get(pk=cart_item_id)
cart_item.delete()
self.save()
def empty(self):
if self.pk:
self.items.all().delete()
self.delete()
@property
def total_quantity(self):
return sum([ci.quantity for ci in self.items.all()])
@property
def cart_total(self):
return sum([ci.item_total for ci in self.items.all()])
@property
def cart_total_str(self):
return "$%s" % sum([ci.item_total for ci in self.items.all()])
@property
def is_not_valid(self):
for item in self.items.all():
if not item.is_available:
return item
return False
class CartItem(models.Model):
cart = models.ForeignKey('Cart', related_name="items")
quantity = models.IntegerField()
product = models.ForeignKey('Product')
def __unicode__(self):
return u'%s - %s' % (self.product.name, self.quantity)
@property
def item_total(self):
return self.quantity * self.product.price_in_dollars
@property
def item_total_str(self):
return "$%s" % (self.quantity * self.product.price_in_dollars)
@property
def is_available(self):
return self.quantity <= self.product.stock
class Address(models.Model):
# user = models.ForeignKey(User, blank=True, null=True, related_name="shipping_address")
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
email = models.EmailField()
phone = models.CharField(max_length=20)
address = models.CharField(max_length=255)
address2 = models.CharField(max_length=255)
city = models.CharField(max_length=20)
zip_code = models.CharField(max_length=20)
state = models.CharField(max_length=255)
country = models.CharField(max_length=255)
def __unicode__(self):
#return '%s %s (%s, %s)' % (self.first_name, self.last_name, self.zip_code, self.city)
return self.as_text
@property
def as_text(self):
return """
First Name: %(first_name)s,
Last Name: %(last_name)s,
Address: %(address)s,
Zip-Code: %(zipcode)s,
City: %(city)s,
State: %(state)s,
Country: %(country)s
""" % {
'first_name': self.first_name,
'last_name': self.last_name,
'address': '%s\n%s' % (self.address, self.address2),
'zipcode': self.zip_code,
'city': self.city,
'state': self.state,
'country': self.country,
}
class Payment(models.Model):
name_on_card = models.CharField(max_length=50)
card_number = models.CharField(max_length=50)
expiry_date = models.DateField()
card_code = models.CharField(max_length=50)
def __unicode__(self):
return u'%s - %s' % (self.name_on_card, self.card_number)
class Order(models.Model):
#SUBMITTED
#PRELUAT
#PROCESAT
#LIVRARE
#COMPLET
PROCESSING = 1 # New order, addresses and shipping/payment methods chosen (user is in the shipping backend)
CONFIRMING = 2 # The order is pending confirmation (user is on the confirm view)
CONFIRMED = 3 # The order was confirmed (user is in the payment backend)
COMPLETED = 4 # Payment backend successfully completed
SHIPPED = 5 # The order was shipped to client
CANCELLED = 6 # The order was cancelled
STATUS_CODES = (
(PROCESSING, 'Processing'),
(CONFIRMING, 'Confirming'),
(CONFIRMED, 'Confirmed'),
(COMPLETED, 'Completed'),
(SHIPPED, 'Shipped'),
(CANCELLED, 'Cancelled'),
)
user = models.ForeignKey(User, blank=True, null=True, related_name="orders")
address = models.ForeignKey(Address)
payment = models.ForeignKey(Payment)
order_total = models.PositiveIntegerField()
status = models.PositiveIntegerField(choices=STATUS_CODES, default=PROCESSING)
created_at = models.DateTimeField(auto_now_add = True)
def add_item(self, product, quantity=1):
order_item = OrderItem(order=self, quantity=quantity, product=product)
order_item.save()
product.stock = product.stock - quantity
product.items_sold = product.items_sold + quantity
product.save()
def __unicode__(self):
return u'%s - %s' % (self.user, self.created_at)
class OrderItem(models.Model):
order = models.ForeignKey('Order', related_name="items")
quantity = models.IntegerField()
product = models.ForeignKey('Product')
def __unicode__(self):
return u'%s - %s' % (self.product.name, self.quantity)
@property
def item_total(self):
return self.quantity * self.product.price_in_dollars
@property
def item_total_str(self):
return "$%s" % (self.quantity * self.product.price_in_dollars)
class Advertisement(models.Model):
owner = models.CharField(max_length=250)
image = models.URLField()
slogan = models.CharField(max_length=250)
website = models.URLField()
is_displayed = models.BooleanField(default=False)
position = models.PositiveIntegerField(default=1)
def __unicode__(self):
return u'%s' % (self.owner)
# class ProductDetail(models.Model):
# '''
# The ``ProductDetail`` model represents information unique to a
# specific product. This is a generic design that can be used
# to extend the information contained in the ``Product`` model with
# specific, extra details.
# '''
# product = models.ForeignKey('Product',
# related_name='details')
# attribute = models.ForeignKey('ProductAttribute')
# value = models.CharField(max_length=500)
# description = models.TextField(blank=True)
# def __unicode__(self):
# return u'%s: %s - %s' % (self.product,
# self.attribute,
# self.value)
# class ProductAttribute(models.Model):
# '''
# The "ProductAttribute" model represents a class of feature found
# across a set of products. It does not store any data values
# related to the attribute, but only describes what kind of a
# product feature we are trying to capture. Possible attributes
# include things such as materials, colors, sizes, and many, many
# more.
# '''
# name = models.CharField(max_length=300)
# description = models.TextField(blank=True)
# def __unicode__(self):
# return u'%s' % self.name
|
import random
inputFileName = 'data.csv'
outputFileName_train = inputFileName[:-4] + '_train.csv'
outputFileName_test = inputFileName[:-4] + '_test.csv'
headers = True
f_w_train = open(outputFileName_train, 'w')
f_w_test = open(outputFileName_test, 'w')
p = 0.3
f = open(inputFileName, 'r')
nrows = 0
for line in f:
tempVar = line
if headers and nrows == 0:
f_w_train.write(tempVar)
f_w_test.write(tempVar)
else:
if random.random() > 0.3:
f_w_train.write(tempVar)
else:
f_w_test.write(tempVar)
nrows += 1
f.close()
f_w_train.close()
f_w_test.close()
|
from sys import argv
script, filename = argv
target = open(filename).read()
print(target) |
from . import Anime
from . import Games
from . import Members
from . import Pets
from . import Utils
|
from django.shortcuts import render,redirect
from django.http import HttpResponse
from .models import todos
from .forms import listform
# Create your views here.
def index(request):
if request.method=="POST":
form=listform(request.POST or None)
if form.is_valid:
form.save()
todo_list = todos.objects.all()
return render(request, "todo_app/index.html", {'todo_list': todo_list})
else:
todo_list=todos.objects.all()
return render(request,"todo_app/index.html",{'todo_list':todo_list})
def about(request):
return render(request,"todo_app/about.html")
def create(request):
if request.method == "POST":
form = listform(request.POST or None)
if form.is_valid:
form.save()
todo_list = todos.objects.all()
return render(request, "todo_app/create.html", {'todo_list': todo_list})
else:
todo_list = todos.objects.all()
return render(request, "todo_app/create.html", {'todo_list': todo_list})
def delete(request,todos_id):
todo=todos.objects.get(pk=todos_id)
todo.delete()
return redirect("index")
def update(request,todos_id):
if request.method=="POST":
todo_list = todos.objects.get(pk=todos_id)
form = listform(request.POST or None,instance=todo_list)
if form.is_valid:
form.save()
return redirect("index")
else:
todo_item = todos.objects.get(pk=todos_id)
return render(request, "todo_app/update.html", {'todo_item': todo_item})
def finish(request,todos_id):
todo=todos.objects.get(pk=todos_id)
todo.finished=False
todo.save()
return redirect("index")
def no_finish(request,todos_id):
todo=todos.objects.get(pk=todos_id)
todo.finished=True
todo.save()
return redirect("index")
|
import Fortuna as rng
class Weapon:
weapons = rng.TruffleShuffle([
'Cutlass', 'Hook', 'Steam Powered Flint Lock', 'Knife', 'Cannon',
'Musket', 'Black-powder Blunderbuss', 'Dagger', 'Scimitar',
'Boarding Axe',
])
def __init__(self):
self.name = self.weapons()
def __str__(self):
return self.name
if __name__ == '__main__':
w = Weapon()
print(w)
|
# -*- conding:Utf-8 -*-
def add(a,b):
print("ADDING %d +%d" % (a,b))
return a + b
def subtract(a,b):
print("SUBTRACTING %d -%d" % (a,b))
return a - b
def multiply(a,b):
print("MULTIPLYING %d * %d" % (a,b))
return (a*b)
def divide(a,b):
print("DIVIDING %d /%d" % (a,b))
return (a/b)
print("Let's do some math with just functions!")
age = add(15,12)
height = subtract(100,48)
weight = multiply(7,8)
iq = divide(100,2)
print("Age:%d,Height:%d,weight:%d,IQ:%d" % (age,height,weight,iq))
# A puzzle for the extra credit type it in anyway
print("Here is a puzzle")
what = add(age,subtract(height,multiply(weight,divide(iq,2))))
print("That becomes:",what,"can you do it by hand?")
|
from time import sleep
from unittest import mock
from gitrack import config
from .helpers import repo_data_dir, ProviderForTesting
class TestStart:
def test_basic(self, cmd):
result, repo_dir = cmd('start')
assert result.exit_code == 0
store = config.Store.get_for_repo(repo_dir)
assert store['running'] is True
status_file = repo_data_dir(repo_dir) / 'status'
assert status_file.exists()
start_timestamp = int(status_file.read_text())
assert start_timestamp > 0
def test_dont_restart_already_running_repo(self, cmd):
result, repo_dir = cmd('start')
assert result.exit_code == 0
status_file = repo_data_dir(repo_dir) / 'status'
assert status_file.exists()
original_start_timestamp = int(status_file.read_text())
assert original_start_timestamp > 0
sleep(2)
result, repo_dir_new = cmd('start')
assert result.exit_code == 0
assert repo_dir == repo_dir_new
status_file = repo_data_dir(repo_dir) / 'status'
start_timestamp = int(status_file.read_text())
assert original_start_timestamp == start_timestamp
def test_project(self, cmd, mocker):
mocker.spy(ProviderForTesting, 'stop')
mocker.spy(ProviderForTesting, 'start')
result, _ = cmd('start', config='project.config')
assert result.exit_code == 0
ProviderForTesting.start.assert_called_once_with(mock.ANY, project=123, force=False)
|
import os
from setuptools import setup
import re
import sys
MIN_PYTHON_VERSION = (2, 5)
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'requestflow'))
from version import VERSION
if __name__=="__main__":
if sys.version_info < MIN_PYTHON_VERSION:
args = (NAME, VERSION, ".".join([str(x) for x in MIN_PYTHON_VERSION]))
raise Exception, "%s-%s requires Python %s or higher." % args
setup(name="requestflow",
version=VERSION,
description="Tracking the id ",
author="Hulu",
author_email="the-core@hulu.com",
packages=['requestflow'],
install_requires=['python-json-logger==0.1.4'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries'])
|
import FWCore.ParameterSet.Config as cms
source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_100_1_PyU.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_101_1_icZ.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_102_1_YtI.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_103_1_9bS.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_104_1_fgf.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_105_1_r5d.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_106_1_HNU.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_107_1_usM.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_108_1_J7s.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_109_1_ZfM.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_10_1_4qz.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_110_1_OW0.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_111_1_yGP.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_112_1_e8J.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_113_1_iql.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_114_1_RE6.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_115_1_m67.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_116_1_T8n.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_117_1_9WK.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_118_1_hZz.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_119_1_t97.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_11_1_rFx.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_120_1_Euh.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_121_1_Fhf.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_122_1_b4R.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_123_1_1k8.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_124_1_sdg.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_125_1_elX.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_126_1_fSD.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_127_1_TJf.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_128_1_zhf.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_129_1_8uG.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_12_1_LD1.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_130_1_Ewd.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_131_1_3pD.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_132_1_Dif.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_133_1_0eu.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_134_1_kAz.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_135_1_amD.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_136_1_LYW.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_137_1_XWQ.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_138_1_iw1.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_139_1_AUg.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_13_1_Tsh.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_140_1_FaX.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_141_1_szC.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_142_1_AQl.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_143_1_L7R.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_144_1_RFs.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_145_1_rel.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_146_1_FU7.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_147_1_JuY.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_148_1_oYK.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_149_1_1dN.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_14_1_gUR.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_150_1_uEL.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_151_1_o11.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_152_1_jtk.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_153_1_yhT.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_154_1_3XX.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_155_1_fgK.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_156_1_PEB.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_157_1_qJ2.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_158_1_XND.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_159_1_qb5.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_15_1_OBB.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_160_1_FIR.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_161_1_4PI.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_162_1_QNr.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_163_1_5sO.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_164_1_mQv.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_165_1_Y7M.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_166_1_oes.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_167_1_HgF.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_168_1_Go9.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_169_1_Ily.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_16_1_eYi.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_170_1_9aj.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_171_1_jjf.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_172_1_6mY.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_173_1_naN.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_174_1_oeE.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_175_1_3lp.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_176_1_zVs.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_177_1_xxg.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_178_1_5bO.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_179_1_17L.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_17_1_o5T.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_180_1_vUA.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_181_1_tjW.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_182_1_rdT.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_183_1_BLc.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_184_1_kdf.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_185_1_urZ.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_186_1_2kl.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_187_1_SyU.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_188_1_4Pg.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_189_1_BrM.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_18_1_Vxj.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_190_1_MBo.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_191_1_O10.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_192_1_IOK.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_193_1_1g7.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_194_1_tHl.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_195_1_nQF.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_196_1_mqZ.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_197_1_1mv.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_198_1_xb5.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_199_1_bhz.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_19_1_fzP.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_1_1_ejM.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_200_1_Szc.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_20_1_lwH.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_21_1_klG.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_22_1_Kd8.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_23_1_StR.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_24_1_Gfv.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_25_1_JSs.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_26_1_Pj2.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_27_1_dl6.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_28_1_gtX.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_29_1_5Wt.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_2_1_chA.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_30_1_b59.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_31_1_gaK.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_32_1_3ju.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_33_1_n6N.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_34_1_guK.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_35_1_EL0.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_36_1_gtd.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_37_1_0rU.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_38_1_ZV0.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_39_1_M9h.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_3_1_hYM.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_40_1_Vxb.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_41_1_JHt.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_42_1_2h9.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_43_1_9XJ.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_44_1_c3w.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_45_1_vnj.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_46_1_7gn.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_47_1_8kL.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_48_1_xKm.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_49_1_x4A.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_4_1_ezP.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_50_1_bLL.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_51_1_zSL.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_52_1_8IP.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_53_1_2Qz.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_54_1_J0U.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_55_1_bPb.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_56_1_W9z.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_57_1_GyK.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_58_1_dkO.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_59_1_IYi.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_5_1_22I.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_60_1_cHc.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_61_1_0KJ.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_62_1_XXM.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_63_1_9Oh.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_64_1_9ya.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_65_1_QFh.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_66_1_IeZ.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_67_1_FVm.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_68_1_Qvk.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_69_1_hOx.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_6_1_oGh.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_70_1_S4z.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_71_1_Uk4.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_72_1_gkG.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_73_1_JOU.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_74_1_xg4.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_75_1_km6.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_76_1_wpK.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_77_1_bjk.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_78_1_Hw7.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_79_1_aPe.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_7_1_WpL.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_80_1_I2P.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_81_1_uVG.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_82_1_8gF.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_83_1_XrA.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_84_1_238.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_85_1_sb4.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_86_1_bW0.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_87_1_Fwd.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_88_1_AZE.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_89_1_tWE.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_8_1_fub.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_90_1_wqB.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_91_1_zGV.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_92_1_869.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_93_1_Fuz.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_94_1_qT6.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_95_1_6ih.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_96_1_nZn.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_97_1_K8B.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_98_1_oVy.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_99_1_gf2.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R4_HISTATS/outfile14TeVSKIM_9_1_DHE.root',
)
)
|
# standard imports
import os
# RootTools
from RootTools.core.standard import *
# Logging
import logging
logger = logging.getLogger(__name__)
dir = "/scratch/rschoefbeck/"
TTZ_200PU = Sample.fromDirectory("TTZ_200PU", texName = "ttZ (200PU)", directory = [os.path.join( dir, "TTZ", "200PU")], treeName = 'Delphes')
TTZ_200PU.xsec = 0.7152
TTZ_nominal = Sample.fromDirectory("TTZ_nominal", texName = "ttZ (nominal)", directory = [os.path.join( dir, "TTZ", "nominal")], treeName = 'Delphes')
TTZ_nominal.xsec = 0.7152
WZ = Sample.fromDirectory("WZ", texName = "WZ", directory = [os.path.join( dir, "WZ", )], treeName = 'Delphes')
WZ.xsec = 3.125
|
class ResponseError(Exception):
pass
class ResponseCodeError(Exception):
pass
class AutoCodeConfigError(Exception):
pass
|
from tkinter import *
import tkinter as Tk
import tkinter as ttk
import matriz
from tkinter import messagebox
matr = matriz.matriz()
class Interfaz:
def __init__(self):
self.root = Tk.Tk()
self.entries = []
def ejecutarInicioMatriz(self):
matr.crearMatriz()
def NumerosaLetras(self, numero):
salida = ""
if numero == 1:
salida = "HvS"
elif numero == 2:
salida = "MvS"
elif numero == 3:
salida = "HmS"
elif numero == 4:
salida = "MmS"
elif numero == 5:
salida = "HmE"
elif numero == 6:
salida = "MmE"
elif numero == 7:
salida = "HvE"
elif numero == 8:
salida = "MvE"
else:
salida = "-"
return salida
# Aqui esta la condicion de los 5 turnos sin nacimiento, pero cuesta demasiado que suceda
# Casi imposible, ya que hay muchos nacimientos
def AccionSiguiente(self):
if matr.condicionSiContinua() == True:
matr.logica_Siguiente()
for r in range(25):
for c in range(25):
#recorrer matriz de entes antes creada
self.entries[r][c].delete(0,"end")
self.entries[r][c].insert(0, '{}'.format(self.NumerosaLetras(matr.lista[r][c])))
elif matr.condicionSiContinua() == False:
resultado = messagebox.showinfo("Juego", "Juego terminado") #Aca hay que poner un pop-up
if resultado == "Ok":
self.root.destroy()
def enfermar(self):
#ultimo metodo de matriz, que busca aleatoriamente uno nuevo para enfermar
matr.enfermarEnte()
for r in range(25):
for c in range(25):
self.entries[r][c].delete(0,"end")
self.entries[r][c].insert(0, '{}'.format(self.NumerosaLetras(matr.lista[r][c])))
def menu(self):
#comentario de prueba
self.root.title("JUEGO DE LA VIDA")
self.root.geometry("702x432")
##Pantalla de menú principal
self.img1 = PhotoImage(file="Proyecto_Progra2\ProyectoFinal\Imagenes\Juegodelavida.1.png")
self.img = Label(self.root,image=self.img1).place(x=0,y=0)
##Botones del menu
self.img1btn1= PhotoImage(file="Proyecto_Progra2\ProyectoFinal\Imagenes\jugar.png")
self.img1btn2= PhotoImage(file="Proyecto_Progra2\ProyectoFinal\Imagenes\Salir.png")
self.img1btn3= PhotoImage(file="Proyecto_Progra2\ProyectoFinal\Imagenes\AjustesBG.png")
self.boton = Button(self.root,image=self.img1btn1,height=41, width=177,borderwidth=0,command=self.juego).place(x=260,y=215)#Boton jugar
self.boton = Button(self.root,image=self.img1btn2,height=41, width=177,borderwidth=0,command=self.root.destroy).place(x=260,y=265)#Boton salir
self.boton = Button(self.root,image=self.img1btn3,height=43, width=44,borderwidth=0,command=self.ajuste).place(x=0,y=15)#Boton ajustes
self.root.resizable(0,0)
self.root.quit()
self.root.deiconify()
self.root.mainloop()
def ajuste(self):
##Menu
self.ajustes = Tk.Toplevel(self.root)
self.ajustes.title("AJUSTES")
self.ajustes.geometry("500x471")
self.img2 = PhotoImage(file="Proyecto_Progra2\ProyectoFinal\Imagenes\Ajustes.png")
self.fnd2 = Label(self.ajustes,image=self.img2).place(x=0,y=0)
self.ajustes.resizable(0,0)
#Boton X
self.Brillo = Scale(self.ajustes,from_=0,to=100, orient=HORIZONTAL,length=250).place(x=200,y=130)
self.volumen = Scale(self.ajustes,from_=0,to=100, orient=HORIZONTAL,length=250).place(x=200,y=220)
def aniquilar(self):
resultado = messagebox.askquestion("Aniquilar", "¿Desea detener el juego?")
if resultado == "yes":
self.root.destroy()
def juego(self):
##Juego
self.root.withdraw()
self.juegos = Tk.Toplevel(self.root)
self.ejecutarInicioMatriz()
self.juegos.title("JUEGO")
self.juegos.geometry("1300x475")
self.img3 = PhotoImage(file="Proyecto_Progra2\ProyectoFinal\Imagenes\Juego.png")
self.fnd3 = Label(self.juegos,image=self.img3).place(x=0,y=0)
#
for r in range(25):
self.entries.append([])
for c in range(25):
self.entries[r].append(Entry(self.juegos, width=5))
self.entries[r][c].grid(row=r, column=c)
self.entries[r][c].insert(0, '{}'.format(self.NumerosaLetras(matr.lista[r][c])))
self.juegos.resizable(0,0)
##BOTONES
self.img3btn1= PhotoImage(file="Proyecto_Progra2\ProyectoFinal\Imagenes\AjustesBG.png")
self.img3btn2= PhotoImage(file="Proyecto_Progra2\ProyectoFinal\Imagenes\Aniquilar.png")
self.img3btn3= PhotoImage(file="Proyecto_Progra2\ProyectoFinal\Imagenes\Enfermar.png")
self.img3btn4= PhotoImage(file="Proyecto_Progra2\ProyectoFinal\Imagenes\Siguiente.png")
boton = Button(self.juegos,image=self.img3btn1,height=43, width=44,borderwidth=0,command=self.ajuste).place(x=850,y=10)#Boton Ajustes
boton = Button(self.juegos,image=self.img3btn2,height=41, width=141,borderwidth=0, command=self.aniquilar).place(x=1010,y=110)#Boton Aniquilar
boton = Button(self.juegos,image=self.img3btn3,height=41, width=141,borderwidth=0,command=self.enfermar).place(x=1010,y=210)#Boton Enfermar
boton = Button(self.juegos,image=self.img3btn4,height=41, width=141,borderwidth=0,command=self.AccionSiguiente).place(x=1010,y=310)#Boton Siguiente
a=Interfaz()
a.menu() |
#coding: utf8
import csv
from ftplib import FTP
import os
import os.path
import re
from urlparse import urlparse
from invoke import run, task
import requests
open_data_licenses = [
'http://data.gc.ca/eng/open-government-licence-canada',
'http://donnees.ville.montreal.qc.ca/licence/licence-texte-complet/',
'http://donnees.ville.quebec.qc.ca/licence.aspx',
'http://opendata.peelregion.ca/terms-of-use.aspx',
'http://ottawa.ca/en/mobile-apps-and-open-data/open-data-terms-use',
'http://www.citywindsor.ca/opendata/Documents/OpenDataTermsofUse.pdf',
'http://www.countygp.ab.ca/EN/main/community/maps-gis/open-data/open-data-licence.html',
'http://www.edmonton.ca/city_government/initiatives_innovation/open-data-terms-of-use.aspx',
'http://www.electionspei.ca/apilicense',
'http://www.fredericton.ca/en/citygovernment/TermsOfUse.asp',
'http://www.london.ca/d.aspx?s=/Open_Data/Open_Data_Terms_Use.htm',
'http://www5.mississauga.ca/research_catalogue/CityofMississauga_TermsofUse.pdf',
'http://www.regina.ca/residents/open-government/data/terms/',
'http://www.regionofwaterloo.ca/en/regionalGovernment/OpenDataLicence.asp',
'http://www1.toronto.ca/wps/portal/contentonly?vgnextoid=4a37e03bb8d1e310VgnVCM10000071d60f89RCRD&vgnextfmt=default',
'https://cityonline.calgary.ca/Pages/PdcTermsOfUse.aspx',
]
some_rights_reserved_licenses = [
'http://www.electionsquebec.qc.ca/francais/conditions-d-utilisation-de-notre-site-web.php', # per CIPPIC
'https://mli2.gov.mb.ca/app/register/app/index.php', # no commercial redistribution
'https://www.geosask.ca/Portal/jsp/terms_popup.jsp', # per CIPPIC
]
all_rights_reserved_licenses = [
'http://opendata-saskatoon.cloudapp.net/TermsOfUse/TermsOfUse', # open data license pending
'http://www.altalis.com/agreement.html', # per CIPPIC
'http://www.elections.on.ca/en-CA/Tools/ElectoralDistricts/LimitedUseDataProductLicenceAgreement.htm', # per CIPPIC
]
# Returns the directory in which a shapefile exists.
def dirname(path):
# GitPython can't handle paths starting with "./".
if path.startswith('./'):
path = path[2:]
if os.path.isdir(path):
return path
else:
return os.path.dirname(path)
# Reads `definition.py` files.
def registry(base='.'):
import boundaries
boundaries.autodiscover(base)
return boundaries.registry
# Reads a remote CSV file.
def csv_reader(url):
from StringIO import StringIO
return csv.reader(StringIO(requests.get(url).content))
# Maps Standard Geographical Classification codes to the OCD identifiers of provinces and territories.
ocd_codes_memo = {}
def ocd_codes():
if not ocd_codes_memo:
ocd_codes_memo['01'] = 'ocd-division/country:ca'
reader = csv_reader('https://raw.github.com/opencivicdata/ocd-division-ids/master/mappings/country-ca-sgc/ca_provinces_and_territories.csv')
for row in reader:
ocd_codes_memo[row[1]] = row[0]
return ocd_codes_memo
# Maps OCD identifiers and Standard Geographical Classification codes to names.
ocd_names_memo = {}
def ocd_names():
if not ocd_names_memo:
urls = [
'https://raw.github.com/opencivicdata/ocd-division-ids/master/identifiers/country-ca/ca_manual.csv',
'https://raw.github.com/opencivicdata/ocd-division-ids/master/identifiers/country-ca/ca_provinces_and_territories.csv',
'https://raw.github.com/opencivicdata/ocd-division-ids/master/identifiers/country-ca/ca_census_divisions.csv',
'https://raw.github.com/opencivicdata/ocd-division-ids/master/identifiers/country-ca/ca_census_subdivisions.csv',
'https://raw.github.com/jpmckinney/ocd-division-ids/ca/identifiers/country-ca/census_subdivision-montreal-arrondissements.csv', # @todo switch repository and branch
]
for url in urls:
for row in csv_reader(url):
ocd_names_memo[row[0].decode('utf8')] = row[1].decode('utf8')
return ocd_names_memo
# Returns the Open Civic Data division identifier and Standard Geographical Classification code.
def get_ocd_division(slug, config):
ocd_division = config['metadata'].get('ocd_division')
geographic_code = config['metadata'].get('geographic_code')
if ocd_division:
if geographic_code:
raise Exception('%s: Set ocd_division or geographic_code' % slug)
else:
if geographic_code:
length = len(geographic_code)
if length == 2:
ocd_division = ocd_codes()[geographic_code]
elif length == 4:
ocd_division = 'ocd-division/country:ca/cd:%s' % geographic_code
elif length == 7:
ocd_division = 'ocd-division/country:ca/csd:%s' % geographic_code
else:
raise Exception('%s: Unrecognized geographic code %s' % (slug, geographic_code))
return [ocd_division, geographic_code]
# Check that all data directories contain a `LICENSE.txt`.
@task
def licenses(base='.'):
for (dirpath, dirnames, filenames) in os.walk(base, followlinks=True):
if '.git' in dirnames:
dirnames.remove('.git')
if '.DS_Store' in filenames:
filenames.remove('.DS_Store')
if filenames and 'LICENSE.txt' not in filenames:
print '%s No LICENSE.txt' % dirpath
# Check that all `definition.py` files are valid.
@task
def definitions(base='.'):
import lxml.html
valid_keys = set([
# Added by boundaries.register.
'file',
# Used by represent-boundaries.
'name',
'singular',
'domain',
'last_updated',
'slug_func',
'name_func',
'id_func',
'is_valid_func',
'authority',
'source_url',
'licence_url',
'data_url',
'metadata',
'notes',
'encoding',
# Used by this script. Not validated.
'ogr2ogr',
'prj',
])
valid_metadata_keys = set([
'geographic_code',
'ocd_division',
])
terms = {
# @see https://www.cippic.ca/sites/default/files/CIPPIC%20-%20How%20to%20Redistribute%20Open%20Data.pdf
'http://ottawa.ca/en/mobile-apps-and-open-data/open-data-terms-use': 'I. Terms of Use. This work is provided under the terms of “City of Ottawa – Terms of Use” (http://ottawa.ca/en/mobile-apps-and-open-data/open-data-terms-use). Any use of the work other than as authorized under these terms is strictly prohibited.',
'http://www.citywindsor.ca/opendata/Documents/OpenDataTermsofUse.pdf': 'I. Terms of Use. This work is provided under the terms of “City of Windsor – Terms of Use” (http://www.citywindsor.ca/opendata/Documents/OpenDataTermsofUse.pdf). Any use of the work other than as authorized under these terms is strictly prohibited.',
'http://www.edmonton.ca/city_government/initiatives_innovation/open-data-terms-of-use.aspx': 'I. Terms of Use. This work is provided under the terms of City of Edmonton Open Data Terms of Use (http://www.edmonton.ca/city_government/initiatives_innovation/open-data-terms-of-use.aspx). Any use of the work other than as authorized under these terms is strictly prohibited.',
'http://www.electionsquebec.qc.ca/francais/conditions-d-utilisation-de-notre-site-web.php': """Attribution: This data is provided by the Directeur général des élections du Québec (http://www.electionsquebec.qc.ca), reproduced according to the terms of the "Conditions d'utilisation de notre site Web" (http://www.electionsquebec.qc.ca/francais/conditions-d-utilisation-de-notre-site-web.php). Copyright in the work belongs to the Government of Quebec.""",
'http://www.london.ca/d.aspx?s=/Open_Data/Open_Data_Terms_Use.htm': 'I. Terms of Use. This work is provided under the terms of “Open Data London – Terms of Use” (http://www.london.ca/d.aspx?s=/Open_Data/Open_Data_Terms_Use.htm). Any use of the work other than as authorized under these terms is strictly prohibited.',
'http://www5.mississauga.ca/research_catalogue/CityofMississauga_TermsofUse.pdf': 'I. Terms of Use. This work is provided under the terms of “City of Mississauga – Terms of Use” (http://www.mississauga.ca/file/COM/CityOfMississaugaTermsOfUse.pdf). Any use of the work other than as authorized under these terms is strictly prohibited.',
'https://cityonline.calgary.ca/Pages/PdcTermsOfUse.aspx': 'I. Terms of Use. This data is provided by the City of Calgary and is made available under the Open Data Catalogue Terms of Use (https://cityonline.calgary.ca/Pages/PdcTermsOfUse.aspx).',
# Open Government Licence.
'http://data.gc.ca/eng/open-government-licence-canada': 'I. Terms of Use. Contains information licensed under the Open Government Licence – Canada (http://data.gc.ca/eng/open-government-licence-canada).',
'http://www.countygp.ab.ca/EN/main/community/maps-gis/open-data/open-data-licence.html': 'I. Terms of Use. Contains information licensed under the Open Government Licence – County of Grande Prairie (http://www.countygp.ab.ca/EN/main/community/maps-gis/open-data/open-data-licence.html).',
'http://www.nanaimo.ca/EN/main/departments/106/DataCatalogue/Licence.html': 'I. Terms of Use. Contains information licensed under the Open Government Licence - Nanaimo (http://www.nanaimo.ca/EN/main/departments/106/DataCatalogue/Licence.html).',
'http://www1.toronto.ca/wps/portal/contentonly?vgnextoid=4a37e03bb8d1e310VgnVCM10000071d60f89RCRD&vgnextfmt=default': 'I. Terms of Use. Contains information licensed under the Open Government Licence – Toronto (http://www1.toronto.ca/wps/portal/contentonly?vgnextoid=4a37e03bb8d1e310VgnVCM10000071d60f89RCRD&vgnextfmt=default).',
# Text provided by license.
'http://donnees.ville.montreal.qc.ca/licence/licence-texte-complet/': "I. Termes d'utilisation. Contient des données reproduites, modifiées, traduites ou distribuées « telles quelles » avec la permission de la Ville de Montréal (http://donnees.ville.montreal.qc.ca/licence/licence-texte-complet/).",
'http://donnees.ville.quebec.qc.ca/licence.aspx': "I. Conditions d'utilisation. Contient des données reproduites et distribuées « telles quelles » avec la permission de la Ville de Québec (http://donnees.ville.quebec.qc.ca/licence.aspx).",
'http://opendata.peelregion.ca/terms-of-use.aspx': "I. Terms of Use. Contains public sector Information made available under The Regional Municipality of Peel's Open Data Licence - Version 1.0 (http://opendata.peelregion.ca/terms-of-use.aspx).",
'http://www.regionofwaterloo.ca/en/regionalGovernment/OpenDataLicence.asp': 'I. Terms of Use. Contains information provided by the Regional Municipality of Waterloo under licence (http://www.regionofwaterloo.ca/en/regionalGovernment/OpenDataLicence.asp).',
# Kent Mewhort email (2012-02-10).
'https://mli2.gov.mb.ca/app/register/app/index.php': '© 2001 Her Majesty the Queen in Right of Manitoba, as represented by the Minister of Conservation. All rights reserved. Distributed under the terms of the Manitoba Land Initiative Terms and Conditions of Use (https://mli2.gov.mb.ca//app/register/app/index.php).',
}
terms_re = {
# @see https://www.cippic.ca/sites/default/files/CIPPIC%20-%20How%20to%20Redistribute%20Open%20Data.pdf
'https://www.geosask.ca/Portal/jsp/terms_popup.jsp': re.compile("\AAttribution: (Source|Adapted from): Her Majesty In Right Of Saskatchewan or Information Services Corporation of Saskatchewan, [^.]+\. The incorporation of data sourced from Her Majesty In Right Of Saskatchewan and/or Information Services Corporation of Saskatchewan, within this product shall not be construed as constituting an endorsement by Her Majesty In Right Of Saskatchewan or Information Services Corporation of Saskatchewan of such product\.\Z"),
}
all_rights_reserved_terms_re = re.compile('\ADistributed with permission from .+?. Please direct licensing inquiries and requests to:\n\n')
authorities = [
u'Elections Prince Edward Island',
u'Regional Municipality of Peel',
u'Regional Municipality of Waterloo',
u'Ville de Montréal',
]
# Map census type codes to names.
census_division_type_names = {}
document = lxml.html.fromstring(requests.get('http://www12.statcan.gc.ca/census-recensement/2011/ref/dict/table-tableau/table-tableau-4-eng.cfm').content)
for abbr in document.xpath('//table/tbody/tr/th[1]/abbr'):
census_division_type_names[abbr.text_content()] = re.sub(' /.+\Z', '', abbr.attrib['title'])
census_subdivision_type_names = {}
document = lxml.html.fromstring(requests.get('http://www12.statcan.gc.ca/census-recensement/2011/ref/dict/table-tableau/table-tableau-5-eng.cfm').content)
for abbr in document.xpath('//table/tbody/tr/th[1]/abbr'):
census_subdivision_type_names[abbr.text_content()] = re.sub(' /.+\Z', '', abbr.attrib['title'])
# Map OCD identifiers to census subdivision types.
types = {}
reader = csv_reader('https://raw.github.com/jpmckinney/ocd-division-ids/ca/mappings/country-ca-types/ca_census_divisions.csv') # @todo switch repository and branch
for row in reader:
types[row[0]] = census_division_type_names[row[1].decode('utf8')]
reader = csv_reader('https://raw.github.com/opencivicdata/ocd-division-ids/master/mappings/country-ca-types/ca_census_subdivisions.csv')
for row in reader:
types[row[0]] = census_subdivision_type_names[row[1].decode('utf8')]
codes = ocd_codes()
names = ocd_names()
ocd_divisions = set()
for slug, config in registry(base).items():
directory = dirname(config['file'])
# Validate LICENSE.txt.
license_path = os.path.join(directory, 'LICENSE.txt')
if os.path.exists(license_path):
with open(license_path) as f:
license_text = f.read().rstrip('\n')
if config.get('licence_url'):
licence_url = config['licence_url']
if licence_url in open_data_licenses or licence_url in some_rights_reserved_licenses:
if not terms.get(licence_url) and not terms_re.get(licence_url):
print "%-50s No LICENSE.txt template for License URL %s" % (slug, licence_url)
elif terms.get(licence_url) and license_text != terms[licence_url] or terms_re.get(licence_url) and not terms_re[licence_url].search(license_text):
print "%-50s Expected LICENSE.txt to match license-specific template" % slug
elif licence_url in all_rights_reserved_licenses:
if not all_rights_reserved_terms_re.search(license_text):
print '%-50s Expected LICENSE.txt to match "all rights reserved" template' % slug
else:
print '%-50s Unrecognized License URL %s' % (slug, licence_url)
elif not all_rights_reserved_terms_re.search(license_text):
print '%-50s Expected LICENSE.txt to match "all rights reserved" template' % slug
# Check for invalid keys, non-unique or empty values.
invalid_keys = set(config.keys()) - valid_keys
if invalid_keys:
print "%-50s Unrecognized key: %s" % (slug, ', '.join(invalid_keys))
values = [value for key, value in config.items() if key != 'metadata']
if len(values) > len(set(values)):
print "%-50s Non-unique values" % slug
for key, value in config.items():
if not value:
print "%-50s Empty value for %s" % (slug, key)
# Check for missing required keys.
for key in ('domain', 'last_updated', 'name_func', 'authority', 'encoding'):
if not config.get(key):
print "%-50s Missing %s" % (slug, key)
if not config.get('source_url') and (config.get('licence_url') or config.get('data_url')):
print "%-50s Missing source_url" % slug
if config.get('source_url') and not config.get('licence_url') and not config.get('data_url'):
print "%-50s Missing licence_url or data_url" % slug
# Validate fields.
for key in ('name', 'singular'):
if config.get(key):
print "%-50s Expected %s to be missing" % (slug, key)
if config.get('encoding') and config['encoding'] != 'iso-8859-1':
print "%-50s Expected encoding to be iso-8859-1 not %s" % (slug, config['encoding'])
if slug not in ('Census divisions', 'Census subdivisions'):
if config.get('metadata'):
# Check for invalid keys or empty values.
invalid_keys = set(config['metadata'].keys()) - valid_metadata_keys
if invalid_keys:
print "%-50s Unrecognized key: %s" % (slug, ', '.join(invalid_keys))
for key, value in config['metadata'].items():
if not value:
print "%-50s Empty value for %s" % (slug, key)
ocd_division, geographic_code = get_ocd_division(slug, config)
if ocd_division:
# Ensure ocd_division is unique.
if ocd_division in ocd_divisions:
raise Exception('%s: Duplicate ocd_division %s' % (slug, ocd_division))
else:
ocd_divisions.add(ocd_division)
sections = ocd_division.split('/')
ocd_type, ocd_type_id = sections[-1].split(':')
# Validate domain.
name = names[ocd_division]
if ocd_type == 'country':
expected = 'Federal electoral districts'
if slug != expected:
print "%-50s Expected slug to be %s" % (slug, expected)
if config['domain'] != name:
print "%-50s Expected domain to be %s not %s" % (slug, name, config['domain'])
expected = 'Her Majesty the Queen in Right of Canada'
if config['authority'] != expected:
print "%-50s Expected authority to be %s not %s" % (slug, expected, config['authority'])
elif ocd_type in ('province', 'territory'):
expected = '%s electoral districts' % name
if slug != expected:
print "%-50s Expected slug to be %s" % (slug, expected)
if config['domain'] != name:
print "%-50s Expected domain to be %s not %s" % (slug, name, config['domain'])
expected = 'Her Majesty the Queen in Right of %s' % name
if config['authority'] != expected:
print "%-50s Expected authority to be %s not %s" % (slug, expected, config['authority'])
elif ocd_type in ('cd', 'csd'):
province_or_territory_code = ocd_type_id[:2]
province_or_territory_abbreviation = codes[province_or_territory_code].split(':')[-1].upper()
if province_or_territory_code == '24':
expected = re.compile('\A%s (boroughs|districts)\Z' % name)
else:
expected = re.compile('\A%s (districts|divisions|wards)\Z' % name)
if not expected.search(slug):
print "%-50s Expected slug to match %s" % (slug, expected.pattern)
expected = '%s, %s' % (name, province_or_territory_abbreviation)
if config['domain'] != expected:
print "%-50s Expected domain to be %s not %s" % (slug, expected, config['domain'])
if province_or_territory_code == '24':
preposition = 'de'
else:
preposition = 'of'
expected = '%s %s %s' % (types[ocd_division], preposition, name)
if config['authority'] != expected and config['authority'] not in authorities:
print "%-50s Expected authority to be %s not %s" % (slug, expected, config['authority'])
elif ocd_type == 'arrondissement':
census_subdivision_ocd_division = '/'.join(sections[:-1])
census_subdivision_name = names[census_subdivision_ocd_division]
province_or_territory_code = census_subdivision_ocd_division.split(':')[-1][:2]
province_or_territory_abbreviation = codes[province_or_territory_code].split(':')[-1].upper()
expected = '%s districts' % name
if slug != expected:
print "%-50s Expected slug to be %s" % (slug, expected)
expected = '%s, %s, %s' % (name, census_subdivision_name, province_or_territory_abbreviation)
if config['domain'] != expected:
print "%-50s Expected domain to be %s not %s" % (slug, expected, config['domain'])
if province_or_territory_code == '24':
preposition = 'de'
else:
preposition = 'of'
expected = '%s %s %s' % (types[census_subdivision_ocd_division], preposition, census_subdivision_name)
if config['authority'] != expected:
print "%-50s Expected authority to be %s not %s" % (slug, expected, config['authority'])
else:
raise Exception('%s: Unrecognized OCD type %s' % (slug, ocd_type))
else:
print "%-50s Missing metadata" % slug
# Check that the source, data and license URLs work.
@task
def urls(base='.'):
for slug, config in registry(base).items():
for key in ('source_url', 'licence_url', 'data_url'):
if config.get(key):
url = config[key]
result = urlparse(url)
if result.scheme == 'ftp':
ftp = FTP(result.hostname)
ftp.login(result.username, result.password)
ftp.cwd(os.path.dirname(result.path))
if os.path.basename(result.path) not in ftp.nlst():
print '404 %s' % url
ftp.quit()
else:
try:
arguments = {}
if result.username:
url = '%s://%s%s' % (result.scheme, result.hostname, result.path)
arguments['auth'] = (result.username, result.password)
response = requests.head(url, **arguments)
status_code = response.status_code
if status_code != 200:
print '%d %s' % (status_code, url)
except requests.exceptions.ConnectionError:
print '404 %s' % url
# Update any out-of-date shapefiles.
@task
def shapefiles(base='.'):
def process(slug, config, url, data_file_path):
from glob import glob
from zipfile import ZipFile, BadZipfile
from git import Repo
# We can only process KML, KMZ and ZIP files.
extension = os.path.splitext(data_file_path)[1]
if extension in ('.kml', '.kmz', '.zip'):
repo = Repo('.')
index = repo.index
directory = dirname(config['file'])
# Remove old files.
for basename in os.listdir(directory):
if basename not in ('.DS_Store', 'definition.py', 'LICENSE.txt', 'data.kml', 'data.kmz', 'data.zip'):
os.unlink(os.path.join(directory, basename))
index.remove([os.path.join(directory, basename)])
files_to_add = []
# Unzip any zip file.
error_thrown = False
if extension == '.zip':
try:
zip_file = ZipFile(data_file_path)
for name in zip_file.namelist():
# Flatten the zip file hierarchy.
extension = os.path.splitext(name)[1]
if extension in ('.kml', '.kmz'):
basename = 'data%s' % extension # assumes one KML or KMZ file per archive
else:
basename = os.path.basename(name) # assumes no collisions across hierarchy
with open(os.path.join(directory, basename), 'wb') as f:
f.write(zip_file.read(name))
if extension not in ('.kml', '.kmz'):
files_to_add.append(os.path.join(directory, basename))
except BadZipfile:
error_thrown = True
print 'Bad ZIP file %s\n' % url
finally:
os.unlink(data_file_path)
# Unzip any KMZ file.
kmz_file_path = os.path.join(directory, 'data.kmz')
if not error_thrown and os.path.exists(kmz_file_path):
try:
zip_file = ZipFile(kmz_file_path)
for name in zip_file.namelist():
# A KMZ file contains a single KML file and other supporting files.
# @see https://developers.google.com/kml/documentation/kmzarchives
if os.path.splitext(name)[1] == '.kml':
with open(os.path.join(directory, 'data.kml'), 'wb') as f:
f.write(zip_file.read(name))
except BadZipfile:
error_thrown = True
print 'Bad KMZ file %s\n' % url
finally:
os.unlink(kmz_file_path)
if not error_thrown:
# Convert any KML to shapefile.
kml_file_path = os.path.join(directory, 'data.kml')
if os.path.exists(kml_file_path):
result = run('ogrinfo -q %s | grep -v "3D Point"' % kml_file_path, hide='out').stdout
if result.count('\n') > 1:
print 'Too many layers %s' % url
else:
layer = re.search('\A\d+: (\S+)', result).group(1)
run('ogr2ogr -f "ESRI Shapefile" %s %s -nlt POLYGON %s' % (directory, kml_file_path, layer), echo=True)
for name in glob(os.path.join(directory, '*.[dps][bhr][fjpx]')):
files_to_add.append(name)
os.unlink(kml_file_path)
# Merge multiple shapefiles into one.
names = glob(os.path.join(directory, '*.shp'))
if len(names) > 1:
for name in names:
run('ogr2ogr -f "ESRI Shapefile" %s %s -update -append -nln Boundaries' % (directory, name), echo=True)
basename = os.path.splitext(os.path.basename(name))[0]
for name in glob(os.path.join(directory, '%s.[dps][bhr][fjnpx]' % basename)):
files_to_add.remove(name)
os.unlink(name)
# Convert any 3D shapefile into 2D.
shp_file_path = glob(os.path.join(directory, '*.shp'))
if shp_file_path:
shp_file_path = shp_file_path[0]
if shp_file_path and os.path.exists(shp_file_path):
result = run('ogrinfo -q %s' % shp_file_path, hide='out').stdout
if result.count('\n') > 1:
print 'Too many layers %s' % url
elif re.search('3D Polygon', result):
run('ogr2ogr -f "ESRI Shapefile" %s %s -nlt POLYGON -overwrite' % (directory, shp_file_path), echo=True)
# Replace "Double_Stereographic" with "Oblique_Stereographic".
prj_file_path = os.path.splitext(shp_file_path)[0] + '.prj'
if prj_file_path and os.path.exists(prj_file_path):
with open(prj_file_path) as f:
prj = f.read()
if 'Double_Stereographic' in prj:
with open(prj_file_path, 'w') as f:
f.write(prj.replace('Double_Stereographic', 'Oblique_Stereographic'))
elif config.get('prj'):
with open(prj_file_path, 'w') as f:
f.write(requests.get(config['prj']).content)
files_to_add.append(prj_file_path)
else:
print 'No PRJ file %s' % url
# Run any additional commands on the shapefile.
if config.get('ogr2ogr'):
run('ogr2ogr -f "ESRI Shapefile" -overwrite %s %s %s' % (directory, shp_file_path, config['ogr2ogr']), echo=True)
for name in list(files_to_add):
if not os.path.exists(name):
files_to_add.remove(name)
# Add files to git.
index.add(files_to_add)
# Update last updated timestamp.
definition_path = os.path.join(directory, 'definition.py')
with open(definition_path) as f:
definition = f.read()
with open(definition_path, 'w') as f:
f.write(re.sub('(?<=last_updated=date\()[\d, ]+', last_updated.strftime('%Y, %-m, %-d'), definition))
# Print notes.
notes = []
if config.get('notes'):
notes.append(config['notes'])
if notes:
print '%s\n%s\n' % (slug, '\n'.join(notes))
else:
print 'Unrecognized extension %s\n' % url
from datetime import datetime
from rfc6266 import parse_headers
# Retrieve shapefiles.
for slug, config in registry(base).items():
if config.get('data_url'):
url = config['data_url']
result = urlparse(url)
if result.scheme == 'ftp':
# Get the last modified timestamp.
ftp = FTP(result.hostname)
ftp.login(result.username, result.password)
last_modified = ftp.sendcmd('MDTM %s' % result.path)
# Parse the timestamp as a date.
last_updated = datetime.strptime(last_modified[4:], '%Y%m%d%H%M%S').date()
if config['last_updated'] < last_updated:
# Determine the file extension.
extension = os.path.splitext(url)[1]
# Set the new file's name.
data_file_path = os.path.join(dirname(config['file']), 'data%s' % extension)
# Download new file.
ftp.retrbinary('RETR %s' % result.path, open(data_file_path, 'wb').write)
ftp.quit()
process(slug, config, url, data_file_path)
else:
# Get the last modified timestamp.
arguments = {'allow_redirects': True}
if result.username:
url = '%s://%s%s' % (result.scheme, result.hostname, result.path)
arguments['auth'] = (result.username, result.password)
response = requests.head(url, **arguments)
last_modified = response.headers.get('last-modified')
# Parse the timestamp as a date.
if last_modified:
last_updated = datetime.strptime(last_modified, '%a, %d %b %Y %H:%M:%S GMT')
else:
last_updated = datetime.now()
last_updated = last_updated.date()
if config['last_updated'] > last_updated:
print '%s are more recent than the source\n' % slug
elif config['last_updated'] < last_updated:
# Determine the file extension.
if response.headers.get('content-disposition'):
filename = parse_headers(response.headers['content-disposition']).filename_unsafe
else:
filename = url
extension = os.path.splitext(filename)[1].lower()
# Set the new file's name.
data_file_path = os.path.join(dirname(config['file']), 'data%s' % extension)
# Download new file.
arguments['stream'] = True
response = requests.get(url, **arguments)
with open(data_file_path, 'wb') as f:
for chunk in response.iter_content():
f.write(chunk)
process(slug, config, url, data_file_path)
# Check that all ScraperWiki scrapers are in Represent.
@task
def scraperwiki():
import lxml.html
ignore_slugs = set([
# Not relevant to Represent.
'canadian_federal_bills_wip',
'seao',
'seao_details',
'trial_sg_company_numbers',
'trial_us_nv_company_numbers',
'trial_us_wy_company_numbers_1',
# Past elections.
'bc_2013_candidates_1',
# Obsolete scrapers.
'halifax_city_councillors',
'ottawa-mayor-and-councillors',
'quebec_council',
'sherbrooke',
'winnipeg_city_council',
])
# Collect the slugs of ScraperWiki scrapers tagged with "cdnpoli".
tagged_slugs = set()
query_string = ''
while True:
document = lxml.html.fromstring(requests.get('https://classic.scraperwiki.com/tags/cdnpoli%s' % query_string).content)
tagged_slugs.update(href.split('/')[2] for href in document.xpath('//*[@class="screenshot"]/@href'))
href = document.xpath('//*[@class="next"]/@href')
if href:
query_string = href[0]
else:
break
response = requests.get('https://api.scraperwiki.com/api/1.0/scraper/getuserinfo?format=jsondict&username=jpmckinney').json()[0]
scraperwiki_slugs = set(item for sublist in response['coderoles'].values() for item in sublist)
response = requests.get('http://represent.opennorth.ca/representative-sets/?limit=0').json()['objects']
represent_slugs = set(os.path.basename(os.path.dirname(item['scraperwiki_url'])) for item in response)
response = requests.get('http://represent.opennorth.ca/candidates/?limit=0').json()['objects']
ignore_slugs.update(os.path.basename(os.path.dirname(item['scraperwiki_url'])) for item in response)
messages = {
'On ScraperWiki but not on Represent': scraperwiki_slugs - represent_slugs - ignore_slugs,
'On Represent but not on ScraperWiki': represent_slugs - scraperwiki_slugs,
'Tagged "cdnpoli" on ScraperWiki but not on Represent': tagged_slugs - scraperwiki_slugs - represent_slugs - ignore_slugs,
'On Represent but not tagged "cdnpoli" on ScraperWiki': represent_slugs - tagged_slugs,
}
for message, slugs in messages.items():
if slugs:
print '%s:' % message
for slug in slugs:
print 'https://classic.scraperwiki.com/scrapers/%s/' % slug
print
# Update the spreadsheet for tracking progress on data collection.
@task
def spreadsheet(base='.', private_data_base='../represent-canada-private-data'):
import sys
codes = ocd_codes()
names = ocd_names()
rows = {}
# Boundary sets
# @todo loop over private_data_base as well (private = 'N')
private = 'Y'
for slug, config in registry(base).items():
if config.get('metadata'):
ocd_division, geographic_code = get_ocd_division(slug, config)
if ocd_division:
sections = ocd_division.split('/')
ocd_type, ocd_type_id = sections[-1].split(':')
# Determine province or territory.
if ocd_type == 'country':
province_or_territory = None
elif ocd_type in ('province', 'territory'):
province_or_territory = ocd_division
elif ocd_type in ('cd', 'csd'):
province_or_territory = codes[ocd_type_id[:2]]
elif ocd_type == 'arrondissement':
province_or_territory = codes[sections[-2].split(':')[-1][:2]]
if province_or_territory:
province_or_territory = province_or_territory.split(':')[-1].upper()
row = {
'OCD': ocd_division,
'Geographic code': geographic_code,
'Geographic name': names[ocd_division],
'Province or territory': province_or_territory,
'Shapefile?': 'Y',
# Columns used once requested.
'Contact': None, # manual
'Highrise URL': None, # manual
'Request notes': '', # manual if not received
# Columns used once received.
'Last boundary': config.get('last_updated'),
'Next boundary': None, # manual
'Permission to distribute': permission,
'Received via': None, # manual if MFIPPA
'License URL': config.get('licence_url'),
'Denial notes': None, # manual
}
if config.get('data_url'):
row['Contact'] = 'N/A'
row['Received via'] = 'online'
else:
# @todo reconstruct contact from LICENSE.txt
row['Received via'] = 'email' # @todo MFIPPA
directory = dirname(config['file'])
with open(os.path.join(directory, 'LICENSE.txt')) as f:
license_text = f.read().rstrip('\n')
if config.get('licence_url'):
licence_url = config['licence_url']
if licence_url in open_data_licenses:
row['Type of license'] = 'Open'
elif licence_url in some_rights_reserved_licenses:
row['Type of license'] = 'Most rights reserved'
elif licence_url in all_rights_reserved_licenses:
row['Type of license'] = 'All rights reserved'
else:
# @todo License agreements
row['Type of license'] = 'Unlicensed'
rows.append(row)
else:
print 'No OCD division for %s' % slug
# Representative sets
# reader = csv_reader('https://raw.github.com/opencivicdata/ocd-division-ids/master/mappings/country-ca-abbr/ca_provinces_and_territories.csv')
# abbreviations = [row[1] for row in reader]
# Map Standard Geographical Classification codes to ScraperWiki URLs.
# @see https://github.com/opennorth/represent-canada/issues/60
# scraperwiki_urls = {}
# for representative_set in requests.get('http://represent.opennorth.ca/representative-sets/?limit=0').json()['objects']:
# boundary_set_url = representative_set['related']['boundary_set_url']
# if boundary_set_url:
# boundary_set = requests.get('http://represent.opennorth.ca%s' % boundary_set_url).json()
# if boundary_set.get('metadata') and boundary_set['metadata'].get('geographic_code'):
# scraperwiki_urls[boundary_set['metadata']['geographic_code']] = representative_set['scraperwiki_url']
# else:
# print '%-65s No metadata' % boundary_set_url
# else:
# print "%-65s No boundary_set_url" % representative_set['url']
# @todo track if scraped in bulk via Represent API?
# Map to Pupa modules.
# @todo check for pupa scrapers
# geographic_name_re = re.compile('\A(.+) \((.+)\)\Z')
# reader = csv_reader('http://www12.statcan.gc.ca/census-recensement/2011/dp-pd/hlt-fst/pd-pl/FullFile.cfm?T=301&LANG=Eng&OFT=CSV&OFN=98-310-XWE2011002-301.CSV')
# writer = csv.writer(sys.stdout)
# reader.next() # title
# reader.next() # headers
# rows = {}
# for row in reader:
# if row:
# result = geographic_name_re.search(row[1])
# if result:
# name = result.group(1)
# province_or_territory = result.group(2)
# if province_or_territory not in abbreviations:
# raise Exception('Unrecognized province or territory "%s" in "%s"' % (province_or_territory, row[1]))
# elif row[1] == 'Canada':
# name = 'Canada'
# province_or_territory = 'Canada'
# else:
# raise Exception('Unrecognized name "%s"' % row[1])
# writer.writerow([
# row[0],
# name,
# province_or_territory,
# row[4],
# scraperwiki_urls.get(row[0]),
# ])
# else:
# break
# @todo compare against live spreadsheet, log any conflicts
# Reads the spreadsheet for tracking progress on data collection.
# reader = csv_reader('https://docs.google.com/spreadsheet/pub?key=0AtzgYYy0ZABtdGpJdVBrbWtUaEV0THNUd2JIZ1JqM2c&single=true&gid=18&output=csv')
# reader.next() # headers
# live = dict((row[0], row[1:]) for row in reader)
# if no conflicts, update live spreadsheet?
# https://code.google.com/p/gdata-python-client/
|
from datetime import datetime
from haystack import indexes
from zhihu.news.models import News
from zhihu.articles.models import Article
from zhihu.qa.models import Question
from django.contrib.auth import get_user_model
from taggit.models import Tag
class ArticleIndex(indexes.SearchIndex, indexes.Indexable):
'''文章模型建立索引'''
text = indexes.CharField(document=True, use_template=True,
template_name='search/articles_text.txt')
def get_model(self):
# 这里修改成你自己的数据库模型
return Article
def index_queryset(self, using=None):
'''索引更新时调用'''
return self.get_model().objects.filter(status="P",
updated_at__lte=datetime.now())
class NewsIndex(indexes.SearchIndex, indexes.Indexable):
'''文章模型建立索引'''
text = indexes.CharField(document=True, use_template=True,
template_name='search/news_text.txt')
def get_model(self):
# 这里修改成你自己的数据库模型
return News
def index_queryset(self, using=None):
'''索引更新时调用'''
return self.get_model().objects.filter(reply=False,
updated_at__lte=datetime.now())
class QuestionIndex(indexes.SearchIndex, indexes.Indexable):
'''文章模型建立索引'''
text = indexes.CharField(document=True, use_template=True,
template_name='search/questions_text.txt')
def get_model(self):
# 这里修改成你自己的数据库模型
return Question
def index_queryset(self, using=None):
'''索引更新时调用'''
return self.get_model().objects.filter(updated_at__lte=datetime.now())
class TagsIndex(indexes.SearchIndex, indexes.Indexable):
'''文章模型建立索引'''
text = indexes.CharField(document=True, use_template=True,
template_name='search/tags_text.txt')
def get_model(self):
# 这里修改成你自己的数据库模型
return Tag
def index_queryset(self, using=None):
'''索引更新时调用'''
return self.get_model().objects.all()
|
import py
import pytest
import io
from atsim.potentials.config import Configuration
from atsim.potentials.config._config_parser import ConfigParser
from .._runlammps import needsLAMMPS, extractLAMMPSEnergy, runLAMMPS, lammps_run_fluorite_fixture, lammps_run_fixture
from .._rundlpoly import needsDLPOLY, runDLPoly, extractDLPOLYEnergy
from ._common import _get_dlpoly_resource_dir, _get_lammps_resource_dir
def test_configuration_setfl_synonyms():
cfg_file_path = _get_lammps_resource_dir().join("CRG_U_Th.aspot")
with io.open(cfg_file_path.strpath, encoding = "utf8") as config_file:
config_parser = ConfigParser(config_file)
assert config_parser.tabulation.target == u"setfl"
# Now change setfl to lammps_eam_alloy and check that the target still registers as setfl
import configparser
inifile = configparser.ConfigParser()
inifile.read(u"{}".format(cfg_file_path.strpath))
inifile[u"Tabulation"][u'target'] = u"lammps_eam_alloy"
modified = io.StringIO()
inifile.write(modified)
modified.seek(0)
inifile = configparser.ConfigParser()
inifile.read_file(modified)
assert inifile[u"Tabulation"][u'target'] == u"lammps_eam_alloy"
modified.seek(0)
config_parser = ConfigParser(modified)
assert config_parser.tabulation.target == u"setfl"
@needsLAMMPS
def test_lammps_setfl_crg_tabulate_ThO2(lammps_run_fluorite_fixture):
tmpdir = lammps_run_fluorite_fixture
cfgobj = Configuration()
config_file = io.open(_get_lammps_resource_dir().join("CRG_U_Th.aspot").strpath, encoding = "utf8")
tabulation = cfgobj.read(config_file)
with tmpdir.join("table.eam.alloy").open("w") as outfile:
tabulation.write(outfile)
with lammps_run_fluorite_fixture.join("potentials.lmpinc").open('w') as potfile:
potfile.write(u"""variable O equal 1
set type 1 charge -1.1104
set type 2 charge 2.2208
kspace_style pppm 1.0e-6
pair_style hybrid/overlay coul/long 10.0 eam/alloy
pair_coeff * * coul/long
pair_coeff * * eam/alloy table.eam.alloy O Th
""")
runLAMMPS(cwd = tmpdir.strpath)
energy = extractLAMMPSEnergy(cwd = tmpdir.strpath)
expect = -157.552359260862
assert pytest.approx(expect, rel = 1e-3) == energy
@needsLAMMPS
def test_lammps_setfl_crg_tabulate_UO2(lammps_run_fluorite_fixture):
tmpdir = lammps_run_fluorite_fixture
cfgobj = Configuration()
config_file =io.open( _get_lammps_resource_dir().join("CRG_U_Th.aspot").strpath, encoding = "utf8")
tabulation = cfgobj.read(config_file)
with tmpdir.join("table.eam.alloy").open("w") as outfile:
tabulation.write(outfile)
with lammps_run_fluorite_fixture.join("potentials.lmpinc").open('w') as potfile:
potfile.write(u"""variable O equal 1
set type 1 charge -1.1104
set type 2 charge 2.2208
kspace_style pppm 1.0e-6
pair_style hybrid/overlay coul/long 10.0 eam/alloy
pair_coeff * * coul/long
pair_coeff * * eam/alloy table.eam.alloy O U
""")
runLAMMPS(cwd = tmpdir.strpath)
energy = extractLAMMPSEnergy(cwd = tmpdir.strpath)
expect = -163.072240194504
assert pytest.approx(expect) == energy
@needsDLPOLY
def test_dlpoly_TABEAM_tabulate_CeO2(tmpdir):
# Copy files into the tmpdir.
rd = _get_dlpoly_resource_dir()
files = [
("CONFIG_CeO2", "CONFIG"),
("CONTROL_CeO2", "CONTROL"),
("FIELD_CeO2", "FIELD")
]
for src, dest in files:
src = rd.join(src)
dest = tmpdir.join(dest)
src.copy(dest)
# Tabulate the TABEAM potential
cfgobj = Configuration()
config_file = io.open(rd.join("CRG_Ce.aspot").strpath, encoding = "utf8")
tabulation = cfgobj.read(config_file)
with tmpdir.join("TABEAM").open("w") as outfile:
tabulation.write(outfile)
runDLPoly(cwd = tmpdir.strpath)
actual = extractDLPOLYEnergy(cwd = tmpdir.strpath)
expect = -532.6778
assert pytest.approx(expect) == actual
@needsLAMMPS
def test_lammps_EAM_FS_tabulate_AlFe(lammps_run_fixture):
tmpdir = lammps_run_fixture
cfgobj = Configuration()
config_file = io.open(_get_lammps_resource_dir().join("AlFe_setfl_fs.aspot").strpath, encoding = "utf8")
tabulation = cfgobj.read(config_file)
_get_lammps_resource_dir().join("random_Al_Fe.lmpstruct").copy(tmpdir.join("structure.lmpstruct"))
_get_lammps_resource_dir().join("AlFe_mm.eam.fs").copy(tmpdir.join("table.eam.fs"))
with tmpdir.join("potentials.lmpinc").open("w") as potfile:
potfile.write(u"pair_style eam/fs\n")
potfile.write(u"pair_coeff * * table.eam.fs Al Fe\n")
runLAMMPS(cwd = tmpdir.strpath)
expect = extractLAMMPSEnergy(cwd = tmpdir.strpath)
with tmpdir.join("table.eam.fs").open("w") as outfile:
tabulation.write(outfile)
runLAMMPS(cwd = tmpdir.strpath)
actual = extractLAMMPSEnergy(cwd = tmpdir.strpath)
assert pytest.approx(expect) == actual
@needsDLPOLY
def test_dlpoly_EAM_FS_tabulate_AlFe(tmpdir):
cfg_file_path = _get_lammps_resource_dir().join("AlFe_setfl_fs.aspot")
from atsim.potentials.config._config_parser import _RawConfigParser
inifile = _RawConfigParser()
inifile.read(u"{}".format(cfg_file_path.strpath))
inifile[u"Tabulation"][u'target'] = u"DL_POLY_EAM_fs"
modified = io.StringIO()
inifile.write(modified)
modified.seek(0)
cfgobj = Configuration()
tabulation = cfgobj.read(modified)
with tmpdir.join("TABEAM").open("w") as outfile:
tabulation.write(outfile)
_get_dlpoly_resource_dir().join("CONTROL_random_Al_Fe").copy(tmpdir.join("CONTROL"))
_get_dlpoly_resource_dir().join("CONFIG_random_Al_Fe").copy(tmpdir.join("CONFIG"))
_get_dlpoly_resource_dir().join("FIELD_random_Al_Fe").copy(tmpdir.join("FIELD"))
runDLPoly(cwd = tmpdir.strpath)
actual = extractDLPOLYEnergy(cwd = tmpdir.strpath)
expect = -31.769632
assert pytest.approx(expect) == actual
def test_custom_species_data():
cfg_file_path = _get_lammps_resource_dir().join("CRG_U_Th.aspot")
from atsim.potentials.config._config_parser import _RawConfigParser
inifile = _RawConfigParser()
inifile.read(u"{}".format(cfg_file_path.strpath))
inifile.add_section(u'Species')
inifile[u"Species"][u"U.atomic_mass"] = u"235"
inifile[u"Species"][u"U.lattice_constant"] = u"5.678"
inifile[u"Species"][u"Th.lattice_type"] = u"bcc"
modified = io.StringIO()
inifile.write(modified)
modified.seek(0)
cfgobj = Configuration()
tabulation = cfgobj.read(modified)
plist = tabulation.eam_potentials
upot = [p for p in plist if p.species == u"U"][0]
assert pytest.approx(235.0) == upot.mass
assert pytest.approx(5.678) == upot.latticeConstant
cepot = [p for p in plist if p.species == u"Th"][0]
assert cepot.latticeType == u'bcc'
|
import tensorflow as tf
# tf.concat 除合并轴维度之外 的 其他轴维度 必须一致。
a = tf.ones([4,35,8])
b = tf.ones([2,35,8])
c = tf.concat([a,b], axis=0) # axis=0 合并0轴, 1轴(35) 和 2轴(8) 必须相等
print(c.shape)
# In[]:
a = tf.ones([4,35,8])
b = tf.ones([4,35,8])
c = tf.concat([a,b], axis=1) # axis=1 合并1轴, 0轴(4) 和 2轴(8) 必须相等
print(c.shape)
d = tf.concat([a,b], axis=2) # axis=2 合并2轴, 0轴(4) 和 1轴(35) 必须相等
print(d.shape)
# In[]:
'''
School1:[classes, students, scores]
School2:[classes, students, scores]
[schools, classes, students, scores]
'''
# stack 要求 所有轴 的 维度 都必须相等
a = tf.ones([4,35,8])
b = tf.ones([4,35,8])
c = tf.stack([a,b], axis=0) # 在最前面创造一个维度后再合并
print(c.shape)
d = tf.stack([a,b], axis=3) # 在最后面创造一个维度后再合并
print(d.shape)
# In[]:
# unstack: 分解指定轴,全部分解
a = tf.ones([4,35,8])
b = tf.ones([4,35,8])
c = tf.stack([a,b], axis=0) # (2, 4, 35, 8)
print(c.shape)
aa, bb = tf.unstack(c, axis=0) # (2, 4, 35, 8)分解0轴:维度为2,所以有分解为2个tensor (4, 35, 8)
print(aa.shape, bb.shape)
res = tf.unstack(c, axis=1) # (2, 4, 35, 8)分解1轴:维度为4,所以有分解为4个tensor (2, 35, 8)
print(res[0].shape, res[1].shape, res[3].shape, len(res))
res = tf.unstack(c, axis=2) # (2, 4, 35, 8)分解2轴:维度为35,所以有分解为35个tensor (2, 4, 8)
print(res[0].shape, res[1].shape, res[34].shape,len(res))
res = tf.unstack(c, axis=3) # (2, 4, 35, 8)分解3轴:维度为8,所以有分解为8个tensor (2, 4, 35)
print(res[0].shape, res[1].shape, res[7].shape, len(res))
# In[]:
a = tf.ones([4,35,8])
b = tf.ones([4,35,8])
c = tf.stack([a,b], axis=0) # (2, 4, 35, 8)
print(c.shape)
res = tf.unstack(c, axis=3) # (2, 4, 35, 8)分解3轴:维度为8,所以有分解为8个tensor (2, 4, 35)
print(res[0].shape, res[1].shape, res[7].shape, len(res))
print("--------------------------------------------------------------------")
# split分解更灵活,可以指定“分解维度”
res = tf.split(c, axis=3, num_or_size_splits=2) # 分解3轴:由于num_or_size_splits=2 即3轴的8维度平均分为2份:(2, 4, 35, 4)
for i in range(0, len(res)):
print(res[i].shape)
print("--------------------------------------------------------------------")
# 分解3轴:由于num_or_size_splits=[2,2,4] 即3轴的8分为3份:(2, 4, 35, 2) (2, 4, 35, 2) (2, 4, 35, 4)
res = tf.split(c, axis=3, num_or_size_splits=[2,2,4])
for i in range(0, len(res)):
print(res[i].shape)
|
# -*- coding: utf-8 -*-
import scrapy
import os
from iachina.items import IachinaItem
class IachinaSpiderSpider(scrapy.Spider):
name = "iachina_spider"
allowed_domains = ["iachina.cn"]
start_urls = (
'http://old.iachina.cn/product.php?action=company&ttype=2',
)
headers = {
'accept-encoding': "gzip, deflate",
'accept-language': "zh-CN,zh;q=0.9,en;q=0.8",
'upgrade-insecure-requests': "1",
'user-agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36",
'accept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
'referer': "http://old.iachina.cn/product.php?action=company&ttype=2",
'cookie': "PHPSESSID=201ed6740a7231fd0670a7a5be2af784; Hm_lvt_3ea51289017d00e5265ee3f2f37be0a8=1511334288; Hm_lpvt_3ea51289017d00e5265ee3f2f37be0a8=1511336092",
'proxy-connection': "keep-alive",
'cache-control': "no-cache",
}
def parse(self, response):
companies=response.css('div.prolist ul li a::text').extract()
company_url=response.css('div.prolist ul li a::attr(href)').extract()
for i in range(len(companies)):
company_name=companies[i]
url=company_url[i]
yield scrapy.Request(response.urljoin(url),headers=self.headers,meta={'company_name':company_name},callback=self.parse_product_type)
next_page=response.css('div.cutpage a::attr(href)').extract()
if next_page is not None and len(next_page)>0 and next_page[-1]!='javascript:;':
yield scrapy.Request(response.urljoin(next_page[-1]),headers=self.headers,callback=self.parse)
def parse_product_type(self,response):
product_types = response.css('div.prolist ul li a::text').extract()
product_type_url = response.css('div.prolist ul li a::attr(href)').extract()
for i in range(len(product_types)):
company_name=response.meta['company_name']
product_type = product_types[i]
url = product_type_url[i]
yield scrapy.Request(response.urljoin(url), headers=self.headers,meta={"company_name":company_name,"product_type":product_type}, callback=self.parse_product_name)
next_page = response.css('div.cutpage a::attr(href)').extract()
if next_page is not None and len(next_page)>0 and next_page[-1]!='javascript:;' :
yield scrapy.Request(response.urljoin(next_page[-1]), headers=self.headers,meta={"company_name":company_name}, callback=self.parse_product_type)
def parse_product_name(self,response):
product_names = response.css('div.prolist ul li a::text').extract()
product_name_url = response.css('div.prolist ul li a::attr(href)').extract()
for i in range(len(product_names)):
company_name=response.meta['company_name']
product_type=response.meta['product_type']
product_name = product_names[i]
url = product_name_url[i]
yield scrapy.Request(response.urljoin(url), headers=self.headers,meta={"company_name":company_name,"product_type":product_type,"product_name":product_name}, callback=self.parse_clause_name)
next_page = response.css('div.cutpage a::attr(href)').extract()
if next_page is not None and len(next_page)>0 and next_page[-1] != 'javascript:;' :
yield scrapy.Request(response.urljoin(next_page[-1]), headers=self.headers,meta={"company_name":company_name,"product_type":product_type}, callback=self.parse_product_name)
def parse_clause_name(self,response):
clause_names = response.css('div.prolist tr td a::text').extract()
clauses_urls = response.css('div.prolist tr td a::attr(href)').extract()
for i in range(len(clause_names)):
company_name=response.meta['company_name']
product_name=response.meta['product_name']
product_type=response.meta['product_type']
clause_name = clause_names[i]
url = clauses_urls[i]
yield scrapy.Request(response.urljoin(url), headers=self.headers,meta={"company_name":company_name,"product_type":product_type,"product_name":product_name,"clause_name":clause_name},callback=self.parse_content)
next_page = response.css('div.cutpage a::attr(href)').extract()
if next_page is not None and len(next_page)>0 and next_page[-1] != 'javascript:;':
yield scrapy.Request(response.urljoin(next_page[-1]), headers=self.headers,meta={"company_name":company_name,"product_type":product_type,"product_name":product_name}, callback=self.parse_clause_name)
def parse_content(self,response):
content=response.body
content=content.decode('gb2312')
item=IachinaItem()
item['company_name']=response.meta['company_name']
item['product_name']=response.meta['product_name']
item['product_type']=response.meta['product_type']
item['clause_name']=response.meta['clause_name']
item['clause_url']=response.urljoin(response.url)
item['content']=content
filedir=('./data/{0}/{1}/{2}/{3}/'.format(item['company_name'],item['product_type'],item['product_name'],item['clause_name']))
print(filedir)
if not os.path.exists(filedir):
os.makedirs(filedir)
filename=filedir + item['clause_name'] + '.docx'
print(filename)
html = open(filename, 'w', encoding='gbk')
html.write('''%s'''%content)
html.close()
yield item
|
import logging
from collections import Counter
from antlr4 import *
from tptp_grammar.cnf_formulaLexer import cnf_formulaLexer as Lexer
from tptp_grammar.cnf_formulaParser import cnf_formulaParser as Parser
from tptp_grammar.cnf_formulaListener import cnf_formulaListener as Listener
from questions.utils import timer
log = logging.getLogger(__name__)
def token_counts(c, max_terminals=None):
input_stream = InputStream(c)
lexer = Lexer(input_stream)
stream = CommonTokenStream(lexer)
parser = Parser(stream)
parser.buildParseTrees = False
listener = MyListener(max_terminals=max_terminals)
parser.addParseListener(listener)
with timer() as t:
parser.cnf_formula()
if parser.getNumberOfSyntaxErrors() > 0:
raise ValueError(f'{parser.getNumberOfSyntaxErrors()} syntax errors occurred while parsing \"{c}\".')
assert stream.index == stream.getNumberOfOnChannelTokens() - 1 == listener.all_terminals
res = {
'literal': listener.literals,
'not': listener.terminals[Parser.Not],
'equality': listener.terminals[Parser.Infix_equality],
'inequality': listener.terminals[Parser.Infix_inequality],
'variable': sorted(listener.variables.values(), reverse=True),
'number': listener.numbers,
'symbol': listener.functors,
'terminals': stream.index,
'time': t.elapsed
}
return res
class MyListener(Listener):
def __init__(self, max_terminals=None):
super().__init__()
self.max_terminals = max_terminals
self.terminals = Counter()
self.functors = Counter()
self.variables = Counter()
self.literals = 0
self.numbers = 0
self.all_terminals = 0
tracked_terminal_types = [
Parser.Not,
Parser.Infix_equality,
Parser.Infix_inequality,
]
def visitTerminal(self, node:TerminalNode):
self.all_terminals += 1
if self.max_terminals is not None and self.all_terminals > self.max_terminals:
raise MaxTerminalsError(self.max_terminals)
symbol_type = node.symbol.type
if symbol_type in self.tracked_terminal_types:
self.terminals[symbol_type] += 1
if isinstance(node.parentCtx, Parser.Atomic_wordContext) and isinstance(node.parentCtx.parentCtx, Parser.FunctorContext):
if symbol_type == Parser.Lower_word:
symbol_text = node.symbol.text
elif symbol_type == Parser.Single_quoted:
assert node.symbol.text[0] == '\'' and node.symbol.text[-1] == '\''
symbol_text = node.symbol.text[1:-1]
else:
raise ValueError(f'Unsupported type of functor token: {symbol_type}')
self.functors[symbol_text] += 1
def enterVariable(self, ctx):
self.variables[ctx.start.text] += 1
def enterCnf_literal(self, ctx):
self.literals += 1
def enterNumber(self, ctx):
self.numbers += 1
class MaxTerminalsError(ValueError):
def __init__(self, max_terminals):
super().__init__(f'The formula has more than {max_terminals} terminals.')
|
# -*- coding: utf-8 -*
import numpy as np
import matplotlib.pyplot as plt
#
#ВнИМАНиЕ!111
#используес python 2.7
def calcilate_parametrs(result):
tp=0
tn=0
fp=0
fn=0
for i in range(len(result)//2):
#True Negative
if result[i]==0:
tn+=1
#Fasle Negative
else:
fn+=1
for i in range(len(result)//2, len(result)):
if(result[i]==1):
tp+=1
else:
fp+=1
precision=float(tp)/(tp+fp)
recal=float(tp)/(tp+fn)
params={
'TP': tp,
'TN':tn,
'FP':fp,
'FN':fn,
'alfa': float(fp)/(tn+fp),
'betta':float(fn)/(tp+fn),
'accuracy': float(tp+tn)/result.size,
'precision':precision,
'recal':recal,
'f(1-score)':2.*(precision*recal)/(precision+recal)}
return params
def classifier_1(mens):
result=np.random.randint(0,2, mens.size)
#футболитсы - negative
#баскетболиты - positive
return result
def classifier_2(mens, thresh=190):
result=[]
for men in mens:
if men<thresh:
result.append(0)
else:
result.append(1)
return np.array(result)
def main():
mens=np.ones((1000))
mens[0:500]*=180 #футболитсы
mens[500:1000]*=200 #баскетболитсыц
noise=np.random.randn(mens.size)*15
mens+=noise
res1=classifier_1(mens)
print'res1=',calcilate_parametrs(res1)
print
res2=classifier_2(mens)
print'res2=', calcilate_parametrs(res2)
print
#порог = 185
res3=classifier_2(mens, 185)
print'Thresh=185', calcilate_parametrs(res3)
print
#порог = 195
res4=classifier_2(mens, 195)
print'Thresh=195', calcilate_parametrs(res4)
alfs=[]
betts=[]
for i in np.arange(180, 201, 1):
res=classifier_2(mens, i)
par=calcilate_parametrs(res)
print 'Thresh=',i
print par
print
if __name__=="__main__":
main() |
import os
os.system("find ./apps/ -type d -name 'migrations' -exec rm -rf {} +")
os.system("find ./apps/ -type d -name '__pycache__' -exec rm -rf {} +")
|
from rest_framework.response import Response
from rest_framework import (generics,
viewsets,
mixins,
filters)
from rest_framework.permissions import (IsAuthenticatedOrReadOnly,
IsAuthenticated)
from rest_framework.generics import get_object_or_404
from rest_framework.parsers import (MultiPartParser,
FormParser,
FileUploadParser)
from rest_framework.decorators import api_view
from apps.adverts.api.permissions import *
from apps.adverts.models import Advert
from apps.users.models import CustomUser
from apps.adverts.api.serializers import *
from apps.users.api.serializers import CustomUserSerializer
from apps.core.utils import generate_unique_slug
@api_view(['GET'])
def general_search(request):
"""
This is used for general searches. it returns search results based on a user's input.
The search result could either be a user,or an advert.
"""
query = request.GET.get("search", None)
if query:
adverts = Advert.objects.all()
users = CustomUser.objects.all()
serializer_context = {"request":request}
adverts = adverts.filter(name__icontains=query)
users = users.filter(username__icontains=query)
return Response({
"users": CustomUserSerializer(
users,
many=True,
context=serializer_context
).data,
"adverts": AdvertSerializer(
adverts,
many=True,
context=serializer_context
).data
})
return Response(
{
'success': False,
'message': "You need to pass a query param"
})
class AdvertViewSet(mixins.ListModelMixin,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
""" This generates crud views for adverts.
"""
queryset = Advert.objects.all().order_by("-date_created")
lookup_field = 'slug'
serializer_class = AdvertSerializer
permission_classes = [IsAuthenticatedOrReadOnly, IsUserOrReadOnly]
parser_classes = [MultiPartParser, FormParser]
def perform_update(self, serializer):
cat = self.request.data["category"]
category = get_object_or_404(Category, name=cat)
serializer.save(category=category)
def perform_create(self, serializer):
slug = generate_unique_slug(self.request.data["name"])
cat = self.request.data["category"]
user = self.request.user
category = get_object_or_404(Category, name=cat)
serializer.save(slug=slug, user=user, category=category)
class CategoryListAPIView(generics.ListAPIView):
""" This returns a list of product categories
"""
serializer_class = AdvertSerializer
permission_classes = [IsAuthenticatedOrReadOnly]
def get_queryset(self):
kwarg_slug = self.kwargs.get("category")
return Advert.objects.filter(category__name=kwarg_slug).order_by("-date_created")
class SellerShopAPIView(generics.ListAPIView):
""" This returns a list of adverts created by a user
"""
serializer_class = AdvertSerializer
permission_classes = [IsAuthenticatedOrReadOnly]
def get_queryset(self):
user_id = self.kwargs.get("user_id")
user = get_object_or_404(CustomUser, id=user_id)
return user.adverts.all().order_by("-date_created") |
from django.db import models
# Create your models here.
from movie_app.models import DateBaseModel
class AllUser(DateBaseModel):
first_name = models.CharField(max_length=32)
last_name = models.CharField(max_length=32, blank=True, null=True)
phone = models.CharField(max_length=20, blank=True, null=True)
mobile = models.CharField(max_length=20, blank=True, null=True)
is_admin = models.BooleanField(default=False)
|
import sys
import re
import requests
import spacy
def main(args):
if len(args) == 0:
print("usage: \n" + sys.argv[0] + " <url> [url...]")
exit(0)
nlp = spacy.load('en')
for url in args:
text = extract_text(url)
doc = nlp(text)
for ent in doc.ents:
if ent.label_ == "GPE":
print(ent.text)
def extract_text(url):
print(url)
response = requests.get(url)
if response.status_code >= 300:
print("request returned a faulty status code: " + response.status_code)
return
text = response.text
text = re.sub(r'<(script).*?</\1>(?s)', ' ', text)
text = re.sub(r'<!(--).*?\1>(?s)', ' ', text)
text = re.sub('<.*?>', ' ', text)
text = re.sub('&#\d+;', ' ', text)
text = re.sub('[\s]+', ' ', text)
return text
main(sys.argv[1:])
|
#!/usr/bin/env python3
# Copyright (c) 2016 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Hello World
Make Cozmo say 'Hello World' in this simple Cozmo SDK example program.
'''
import cozmo
from cozmo.util import degrees, distance_mm, speed_mmps
import time
def cozmo_program(robot: cozmo.robot.Robot):
robot.say_text("recording ", voice_pitch=0, duration_scalar=0.7).wait_for_completed()
time.sleep(10)
robot.say_text("Hi Cozmo, things are looking good.", voice_pitch=0, duration_scalar=0.7).wait_for_completed()
robot.turn_in_place(degrees(30)).wait_for_completed()
robot.move_head(5)
robot.move_lift(5)
robot.say_text("As you can see, Temperatures this week are going to hit a high of 84 on Monday and then taper down to the mid seventies by Friday. We will be mostly sunny with the occasional cloud or two and some rain on Thursday.", voice_pitch=0, duration_scalar=0.7).wait_for_completed()
robot.turn_in_place(degrees(-30)).wait_for_completed()
robot.move_lift(-5)
robot.move_head(-5)
robot.say_text(" Back to you Cozmo.", voice_pitch=0, duration_scalar=0.7).wait_for_completed()
cozmo.run_program(cozmo_program)
|
# PROBLEM 3
#
# Modify the below functions acceleration and
# ship_trajectory to plot the trajectory of a
# spacecraft with the given initial position
# and velocity. Use the Forward Euler Method
# to accomplish this.
import numpy
import matplotlib.pyplot
h = 5.0 # s
EARTH_MASS = 5.97e24 # kg
GRAVITATIONAL_CONSTANT = 6.67e-11 # N * m^2 / kg^2
MOON_DISTANCE = 384e6 # m
_earth_gravitational = GRAVITATIONAL_CONSTANT * EARTH_MASS
def acceleration(spaceship_position):
vector_to_earth = - spaceship_position
norm = numpy.linalg.norm(vector_to_earth)
# unit = vector_to_earth / norm
# return unit * GRAVITATIONAL_CONSTANT * EARTH_MASS / (norm ** 2)
return vector_to_earth * _earth_gravitational / (norm ** 3)
def ship_trajectory():
# num_steps = 13000 # about 3.6 hours
num_steps = 60 * 60 * 24 * 2
x = numpy.zeros([num_steps + 1, 2]) # m
v = numpy.zeros([num_steps + 1, 2]) # m / s
x[0] = 15e6, 1e6
# v[0] = 2e3, 4e3 # |v[0]| = 4.472 km / s
v[0] = 3.14e3, 6.41e3 # or 3.192e3, 6.384e3
for step in range(num_steps):
x[step + 1] = x[step] + h * v[step]
v[step + 1] = v[step] + h * acceleration(x[step])
return x, v
def moon_orbit():
num_steps = 50
x = numpy.zeros([num_steps + 1, 2])
two_pi = 2. * numpy.pi
for i in range(num_steps + 1):
rad = two_pi * i / num_steps
x[i, 0] = MOON_DISTANCE * numpy.cos(rad)
x[i, 1] = MOON_DISTANCE * numpy.sin(rad)
return x
mo = moon_orbit()
x, v = ship_trajectory()
def plot_me():
matplotlib.pyplot.plot(x[:, 0], x[:, 1], ':') # ship trajectory
matplotlib.pyplot.plot(x[:500, 0], x[:500, 1]) # first few minutes of ship
matplotlib.pyplot.plot(mo[:, 0], mo[:, 1], '--') # moon orbit
matplotlib.pyplot.scatter(0, 0) # earth
matplotlib.pyplot.scatter(*mo[17], s=10) # moon
matplotlib.pyplot.axis('equal')
axes = matplotlib.pyplot.gca()
axes.set_xlabel('Longitudinal position in m')
axes.set_ylabel('Lateral position in m')
matplotlib.pyplot.show()
plot_me()
|
# https://www.hackerrank.com/challenges/print-the-elements-of-a-linked-list-in-reverse/problem
# sol1
# using an array
def reversePrint(head):
result = []
if head is None:
return
if head.next is None:
print(head.data)
return
temp = head
while temp is not None:
result.append(temp.data)
temp = temp.next
result.reverse()
for i in result:
print(i)
# sol 2
# using recursion
def reversePrint(head):
if head is None:
return
else:
reversePrint(head.next)
print(head.data)
|
from random import randint
from ai import tah_pocitace, tah
def vyhodnot(pole):
if 'xxx' in pole:
return 'x'
elif 'ooo' in pole:
return 'o'
elif '-' not in pole: # Nikdy nepouzivejte not '-' in pole, je to mene citelne
return '!'
else:
return '-'
def tah_hrace(pole):
"""Získá od uživatele pozici, kam chce táhnout, a vrátí pole se zaznamenaným tahem hráče."""
while True:
try:
pozice=int(input('Kam chceš hrát? (0..{})'.format(len(pole) - 1)))
except ValueError:
print('To neni číslo!')
else:
if pozice < 0 or pozice >= len(pole):
print('Nemůžeš hrát venku z pole!')
elif pole[pozice] != '-':
print('Tam není volno!')
else:
return tah(pole, pozice, 'o')
def piskvorky1d():
pole = '-' * 20
i = 0
while True:
if i % 2 == 0:
pole = tah_hrace(pole)
else:
pole = tah_pocitace(pole, "x")
print(pole)
if vyhodnot(pole) == 'o':
print('Vyhrál hráč.')
elif vyhodnot(pole) == 'x':
print('Vyhrál počítač.')
elif vyhodnot(pole) == '!':
print('Remíza!')
if vyhodnot(pole) != '-':
break
i += 1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf.urls import url
from . import views
from django.contrib.auth.decorators import login_required
app_name = 'coffee'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^list_all/',
login_required(views.list_all),
name='list_all'),
url(r'^contact/', views.contact, name='contact'),
url(r'^provider_add/$',
views.CoffeeProviderCreate.as_view(), name='provider_add'),
url(r'^provider_edit/(?P<pk>\d+)/',
views.CoffeeProviderUpdate.as_view(), name='provider_edit'),
url(r'^provider_delete/(?P<pk>\d+)/',
views.CoffeeProviderDelete.as_view(), name='provider_delete'),
url(r'^offer_add/$',
views.CoffeeOfferCreate.as_view(), name='offer_add'),
url(r'^offer_edit/(?P<pk>\d+)/',
views.CoffeeOfferUpdate.as_view(), name='offer_edit'),
url(r'^offer_delete/(?P<pk>\d+)/',
views.CoffeeOfferDelete.as_view(), name='offer_delete'),
]
|
from flask import Flask, redirect, url_for, request, render_template
from werkzeug.utils import secure_filename
from scapy.all import *
app = Flask(__name__)
# app.config['UPLOAD_FOLDER'] = '/uploads'
@app.route('/uploader', methods = ['POST', 'GET'])
def uploader():
if request.method == 'POST':
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No file selected')
return redirect(request.url)
filename = secure_filename(file.filename)
file.save(filename)
return redirect(url_for('view', filename = filename))
@app.route('/view/<filename>', methods = ['POST', 'GET'])
def view(filename):
packets = rdpcap(filename)
packet = []
src = ""
dst = ""
for i in range(len(packets)):
ipFrame = packets[i].getlayer(IP)
tcpFrame = packets[i].getlayer(TCP)
if request.method == 'POST':
src = request.form.get("src");
dst = request.form.get("dst");
if src:
if ipFrame and ipFrame.src == src:
pktDict = {"frame": i, "source": ipFrame.src, "source-port": tcpFrame.sport, "destination": ipFrame.dst, "destination-port": tcpFrame.dport, "seq-no": tcpFrame.seq, "ack-no": tcpFrame.ack, "protocol": ipFrame.proto}
packet.append(pktDict)
elif dst:
if ipFrame and ipFrame.dst == dst:
pktDict = {"frame": i, "source": ipFrame.src, "source-port": tcpFrame.sport, "destination": ipFrame.dst, "destination-port": tcpFrame.dport, "seq-no": tcpFrame.seq, "ack-no": tcpFrame.ack, "protocol": ipFrame.proto}
packet.append(pktDict)
elif src and dst:
if ipFrame and ipFrame.dst == dst and ipFrame.src == src:
pktDict = {"frame": i, "source": ipFrame.src, "source-port": tcpFrame.sport, "destination": ipFrame.dst, "destination-port": tcpFrame.dport, "seq-no": tcpFrame.seq, "ack-no": tcpFrame.ack, "protocol": ipFrame.proto}
packet.append(pktDict)
else:
if ipFrame:
pktDict = {"frame": i, "source": ipFrame.src, "source-port": tcpFrame.sport, "destination": ipFrame.dst, "destination-port": tcpFrame.dport, "seq-no": tcpFrame.seq, "ack-no": tcpFrame.ack, "protocol": ipFrame.proto}
packet.append(pktDict)
else:
if ipFrame:
pktDict = {"frame": i, "source": ipFrame.src, "source-port": tcpFrame.sport, "destination": ipFrame.dst, "destination-port": tcpFrame.dport, "seq-no": tcpFrame.seq, "ack-no": tcpFrame.ack, "protocol": ipFrame.proto}
packet.append(pktDict)
return render_template('view.html', data = packet)
@app.route('/')
def index():
return render_template('index.html')
if __name__=='__main__':
app.run(debug = True)
|
from random import choice, sample
from flask import Flask, render_template, request
# "__name__" is a special Python variable for the name of the current module
# Flask wants to know this to know what any imported things are relative to.
app = Flask(__name__)
AWESOMENESS = [
'awesome', 'terrific', 'fantastic', 'neato', 'fantabulous', 'wowza', 'oh-so-not-meh',
'brilliant', 'ducky', 'coolio', 'incredible', 'wonderful', 'smashing', 'lovely']
@app.route('/')
def start_here():
"""Homepage."""
return "Hi! This is the home page."
@app.route('/hello')
def say_hello():
"""Save hello to user."""
return render_template("hello.html")
@app.route('/greet')
def greet_person():
"""Greet user."""
player = request.args.get("person")
input_compliments = sample(AWESOMENESS,3)
return render_template("compliment.html",
person=player,
compliments=input_compliments)
@app.route('/game')
def show_game_form():
game_choice = request.args.get("play-game")
if game_choice == "no":
return "<h2> Goodbye! We'll miss you! </h2>"
else:
return render_template("game.html")
@app.route('/madlib')
def mad_lib():
input_person = request.args.get("person")
input_color = request.args.get("color")
input_noun = request.args.get("noun")
input_adjective = request.args.get("adjective")
input_number = request.args.get("number")
input_adjective2 = request.args.get("adjective2")
input_animals = request.args.getlist("animals")
if len(input_animals)==1:
input_animals = input_animals[0]
if len(input_animals)==2:
input_animals = input_animals[0] + " and " + input_animals[1]
if len(input_animals)==3:
input_animals = input_animals[0] + ", " + input_animals[1] + " and " + input_animals[2]
return render_template("madlib.html",
person=input_person,
color=input_color,
noun=input_noun,
adjective=input_adjective,
number=input_number,
adjective2=input_adjective2,
animals=input_animals)
if __name__ == '__main__':
# debug=True gives us error messages in the browser and also "reloads" our web app
# if we change the code.
app.run(debug=True)
|
#!/usr/local/bin/python
import sys
import pycurl
from string import maketrans
import cStringIO
import re
import urllib
import glob
import os
# Set Your MC directory
minecraftHome = "/home/minecraft/"
projectHome = []
plugin_versions = []
new_plugin = ""
plugsList = []
plugsName = []
plugsHome = []
plugSearch = []
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def disable(self):
self.HEADER = ''
self.OKBLUE = ''
self.OKGREEN = ''
self.WARNING = ''
self.FAIL = ''
self.ENDC = ''
def usage():
print("Install a plugin: Crafty 'plugin-name'")
print("Update plugins and CraftBukkit just run Crafty with no options")
print("Examples:")
print("---------")
print("Crafty Downloads and installs all updates")
print('Crafty "worldedit" Downloads and installs the WorldEdit plugin')
print('Crafty "tim the enchanter" Downloads and installs Tim the Enchanter plugin')
def getPlugs():
print ("Determining installed Plugins")
plugsList = glob.glob(minecraftHome+"plugins/*.jar")
for item in plugsList:
p = os.popen("unzip -c " + item + " plugin.yml | grep '^name:' | uniq| awk '{print $2}'","r")
while 1:
line = p.readline()
if not line: break
line = re.sub('\n',"",line)
line = re.sub('\r',"",line)
line = re.sub("name:","",line)
line = re.sub(" ","",line)
plugsName.append(line)
print("Found: "+line)
plugSearch.append(line)
findPlugsHome(plugSearch)
def findPlugsHome(plugs):
for item in plugs:
failedPlugs = []
projectHome[:] = []
plugin_versions[:] = []
try:
search(item)
except:
print("Can not stat "+item)
failedPlugs.append(item)
def checkPlugsVersion():
return(0)
def updateBukkit():
print("Checking Craftbukkit:")
return(0)
def search(plugin):
# This is by no means perfect but it works surprisingly well!
print("")
print(bcolors.OKGREEN + "Looking up "+plugin+" on Bukkit.org"+ bcolors.ENDC)
print("")
# user input formatting
buf = cStringIO.StringIO()
intab = " "
outtab = "+"
trantab = maketrans(intab, outtab)
# str = sys.argv[1]
str = plugin
# Use Bing query to guess the project's bukkit uri
c = pycurl.Curl()
c.setopt(pycurl.USERAGENT, "Mozilla")
c.setopt(c.URL, "http://www.bing.com/search?q=site:dev.bukkit.org+"+str.translate(trantab)+"&&format=rss")
c.setopt(c.WRITEFUNCTION, buf.write)
c.perform()
buffer = buf.getvalue()
parsed_buffer = re.sub('<[^<]> ', " ", buffer)
parsed_buffer = re.sub('&'," ", parsed_buffer)
parsed_buffer = re.sub("\d+", " ", parsed_buffer)
parsed_buffer = re.sub("</link><description>Curse"," ",parsed_buffer)
parsed_buffer = re.sub("</link>", " ",parsed_buffer)
search_results = parsed_buffer.split()
buf.close()
for item in search_results:
if "url?q=http://dev.bukkit.org/bukkit-plugins/" and "files/" in item:
item = item.replace('Bukkit</title><link>', '')
item = item.replace('files/', '')
projectHome1 = item
try:
print("Found project @ "+projectHome1)
except:
print("Can not determine project home")
return
# Find the latest version
buf = cStringIO.StringIO()
c = pycurl.Curl()
c.setopt(pycurl.USERAGENT, "Mozilla")
c.setopt(c.URL, projectHome1)
c.setopt(c.WRITEFUNCTION, buf.write)
c.perform()
buffer = buf.getvalue()
buffer = re.sub('<[^<]> ', " ", buffer)
version_results = buffer.split()
for item in version_results:
if "files" not in item:
item = ""
if "<dt>Downloads</dt>" in item:
item = ""
if "span" in item:
item = ""
if "Download" in item:
item = item.replace('href="', '')
item = item.replace('">Download</a>', '')
item = item.replace('</link><description>', '')
projectHome2 = "http://dev.bukkit.org"+item
print("Found Version @ "+ projectHome2)
# Find file
try:
buf = cStringIO.StringIO()
c = pycurl.Curl()
c.setopt(pycurl.USERAGENT, "Mozilla")
c.setopt(c.URL, projectHome2)
c.setopt(c.WRITEFUNCTION, buf.write)
c.perform()
buffer = buf.getvalue()
buffer = re.sub('<[^<]> ', " ", buffer)
version_results = buffer.split()
for item in version_results:
if "Download" not in item:
item = ""
elif "<dt>" in item:
item = ""
elif ".jar" or ".tar" or ".zip" or ".rar" or ".tgz" or ".gz" in item:
item = item.replace('href="', '')
item = item.replace('">Download</a>', '')
item = item.replace('</span></li>', '')
projectHome3 = item
else:
print "Can not determine project files"
return
except:
print("Sorry,could not determine which file to download")
print("Found file @ "+projectHome3)
print(bcolors.WARNING + "")
print("-[Project Facts]-")
print("" + bcolors.ENDC)
# Get project facts
buf = cStringIO.StringIO()
c = pycurl.Curl()
c.setopt(pycurl.USERAGENT, "Mozilla")
c.setopt(c.URL, projectHome1)
c.setopt(c.WRITEFUNCTION, buf.write)
c.perform()
buffer = buf.getvalue()
facts_storage = []
facts_results = buffer.split('>')
for item in facts_results:
if 'data-shortdate' in item:
if 'data-prefix' not in item:
formatter = item.split('"')
facts_storage.append(formatter[3])
print(bcolors.OKBLUE +'[Date Created] '+ bcolors.ENDC +facts_storage[0])
print(bcolors.OKBLUE +'[Last Updated] '+ bcolors.ENDC +facts_storage[1])
return
buf.close()
if __name__ == '__main__':
try:
if sys.argv[1] == "-s":
search(sys.argv[2])
except:
getPlugs()
|
import serial
import serial.tools.list_ports
import sys
from datetime import datetime
import os
class SerialWrapper:
def __init__(self):
self.ser = serial.Serial()
#get file name
now = datetime.now()
time = now.strftime("%Y-%m-%d-%H:%M:%S")
fileName = "tel-" + time
#create file and directory
try:
dirName = "telemetry_data/"
os.mkdir(dirName)
except:
pass
try:
self.backup = open(dirName + fileName, "w")
except:
print("could not create the file: " + time)
sys.exit()
#read x bytes as an interger, little endian
def readBytes(self, amount):
bytes = self.ser.read(amount)
self.backup.write(str(bytes));
total = 0
count = 0
for v in bytes:
total += v << (count * 8)
count += 1
return total
#tries to initialize a device
def initDevice(self, ser):
self.ser.write(b'aaa')
if (ser.read(3) == b'bbb'):
print("yay")
return 1
print("nay")
return 0
#tries to find and initialize the correct port
def openSerial(self, baud):
ser = self.ser
ser.baudrate = baud
ports = self.getSafeDevices()
for v in ports:
ser.port = v.device
ser.open()
print("Testing" + str(v))
if self.initDevice(ser):
print("Succesfully connected")
return 0
ser.close()
else:
return 1
#finds devices that are safe to communicate with
def getSafeDevices(self):
safeStrings = [
"usb",
"arduino",
"ch340"
]
safe_devices = []
devices = serial.tools.list_ports.comports()
for d in devices:
flag = False
for substring in safeStrings:
if substring in d.description.lower():
flag = True
if flag:
safe_devices.append(d)
return safe_devices |
# def haha(x,y):
# return x*y
a = lambda x,y:x*y
print(a(4,3))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import handin6
test1 = handin6.fasta_to_dict("test1.fasta")
test2 = handin6.fasta_to_dict("test2.fasta")
for item1 in test1:
if not item1 in test2:
print item1 |
from operator import itemgetter
class playerSort(object):
def __init__(self, playerList):
playerList = sorted(playerList, key=itemgetter('projection'), reverse=True)
self.positionList = set( [player['position'] for player in playerList] )
self.byPosition = {}
self.prunedByPosition = {}
for pos in self.positionList:
self.byPosition[pos] = [ player for player in playerList if player['position'] == pos ]
for i, player in enumerate(self.byPosition[pos]):
if i == 0:
self.prunedByPosition[pos] = [ player ]
lastSalary = player['salary']
lastProjection = player['projection']
elif ( player['salary'] < lastSalary ) and ( player['projection'] > 0 ):
self.prunedByPosition[pos].append(player)
lastSalary = player['salary']
lastProjection = player['projection'] |
#·······························································#
#· UNIVERSIDAD NACIONAL SAN ANTONIO ABAD DEL CUSCO #
#· Escuela Profesional de Ingenieria Informatica y de Sistemas#
#· Robotica y Procesamiento de Señales #
#· Reconocimiento de objetos por medio del histograma o filtro#
# segun los colores del objeto #
#·······························································#
# Libreiras necesarias
import cv2
import numpy as np
import urllib
import urllib.request
# definimos un procedimiento para dibujar contornos y hallar coordenadas
def dibujar(mask, color):
contornos, nul = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Se recorre cada uno de los contornos encontrados
for c in contornos:
# Determinamos el area del contorno en pixels
area = cv2.contourArea(c)
# Si el area es mayor al valor se dibuja
if area > 500:
# buscamos areas centrales
M = cv2.moments(c)
# un if por si el denominador es 0 y pueda ser indeterminado
if (M["m00"] == 0):
M["m00"] = 1
# Encontramos coordenadas X y Y
x = int(M["m10"] / M["m00"])
y = int(M['m01'] / M['m00'])
# mejora la visualizacion del contorno
nuevoContorno = cv2.convexHull(c)
# dibujamos un circulo de 7 pixeles en el centro del objeto encontrado XY
cv2.circle(image, (x, y), 7, (0, 255, 0), -1)
# para visualizar el texto
cv2.putText(image, '{},{}'.format(x, y), (x + 10, y), font, 0.75, (0, 255, 0), 1, cv2.LINE_AA)
# dibujamos los contornos
cv2.drawContours(image, [nuevoContorno], 0, color, 3)
# Creamos una variable de conexion IP con la camara
url = 'http://192.168.1.153:8080/shot.jpg'
# Definimos el rango de valores del azul
low_blue = np.array([100, 100, 20], np.uint8)
high_blue = np.array([125, 255, 255], np.uint8)
# Definimos el rango de valores del amarillo
low_yellow = np.array([20, 100, 20], np.uint8)
high_yellow = np.array([45, 255, 255], np.uint8)
# Definimos el rango de valores del rojo
low_red_1 = np.array([0, 100, 20], np.uint8)
high_red_1 = np.array([5, 255, 255], np.uint8)
low_red_2 = np.array([175, 100, 20], np.uint8)
high_red_2 = np.array([179, 255, 255], np.uint8)
# Definimos el rango de valores del Naranja
low_orange = np.array([11, 100, 20], np.uint8)
high_orange = np.array([19, 255, 255], np.uint8)
# Definimos el rango de valores del Verde
low_green = np.array([36, 100, 20], np.uint8)
high_green = np.array([70, 255, 255], np.uint8)
# Definimos el rango de valores del Violeta
low_violet = np.array([130, 100, 20], np.uint8)
high_violet = np.array([145, 255, 255], np.uint8)
# Definimos el rango de valores del Rosa
low_pink = np.array([146, 100, 20], np.uint8)
high_pink = np.array([170, 255, 255], np.uint8)
font = cv2.FONT_HERSHEY_SIMPLEX
# Iniciamos el programa en un bucle
while True:
# Iniciamos la transmision de la camara
imgResp = urllib.request.urlopen(url)
imgNp = np.array(bytearray(imgResp.read()), dtype=np.uint8)
# Almacenamos la transmision en una variable image
image = cv2.imdecode(imgNp, -1)
# Transformamos la imagen de BGR a HSV
frameHSV = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
#OBTENCION DE MASCARAS DE COLOR
# Obtenemos una imagen binaria del azul
maskAzul = cv2.inRange(frameHSV, low_blue, high_blue)
# Obtenemos una imagen binaria del amarillo
maskAmarillo = cv2.inRange(frameHSV, low_yellow, high_yellow)
# Obtenemos una imagen binaria del rojo 1
maskRed1 = cv2.inRange(frameHSV, low_red_1, high_red_1)
# Obtenemos una imagen binaria del rojo 2
maskRed2 = cv2.inRange(frameHSV, low_red_2, high_red_2)
# Obtenemos una imagen binaria del rojo total
maskRed = cv2.add(maskRed1, maskRed2)
# Obtenemos una imagen binaria del Naranja
maskNaranja = cv2.inRange(frameHSV, low_orange, high_orange)
# Obtenemos una imagen binaria del verde
maskVerde = cv2.inRange(frameHSV, low_green, high_green)
# Obtenemos una imagen binaria del violeta
maskVioleta = cv2.inRange(frameHSV, low_violet, high_violet)
# Obtenemos una imagen binaria del rosa
maskRosa = cv2.inRange(frameHSV, low_pink, high_pink)
# Dibujamos los contornos encontrados segun su color
dibujar(maskAzul, (255, 0, 0))
dibujar(maskAmarillo, (0, 255, 255))
dibujar(maskRed, (0, 0, 255))
dibujar(maskNaranja, (26, 127, 239))
dibujar(maskVerde, (0, 255, 0))
dibujar(maskVioleta, (120, 40, 140))
dibujar(maskRosa, (255, 0, 255))
# mostramos el video
cv2.imshow('frame', image)
if cv2.waitKey(1) & 0xFF == ord('s'):
break
|
""" Contains upgrade tasks that are executed when the application is being
upgraded on the server. See :class:`onegov.core.upgrade.upgrade_task`.
"""
from onegov.core.upgrade import upgrade_task
from onegov.core.utils import linkify
from onegov.org.models import Organisation
from onegov.people import Agency
@upgrade_task("Add default values for page breaks of PDFs")
def add_default_value_for_pagebreak_pdf(context):
""" Adds the elected candidates to the archived results,
"""
session = context.session
if context.has_column('organisations', 'meta'):
for org in session.query(Organisation).all():
org.meta['page_break_on_level_root_pdf'] = 1
org.meta['page_break_on_level_org_pdf'] = 1
@upgrade_task("Convert Agency.portrait to a html")
def convert_agency_portrait_to_html(context):
session = context.session
if context.has_column('agencies', 'portrait'):
for agency in session.query(Agency).all():
agency.portrait = '<p>{}</p>'.format(
linkify(agency.portrait).replace('\n', '<br>'))
@upgrade_task("Replace person.address in Agency.export_fields")
def replace_removed_export_fields(context):
session = context.session
if context.has_column('agencies', 'meta'):
for agency in session.query(Agency).all():
export_fields = agency.meta.get('export_fields', [])
if 'person.address' in export_fields:
# replace old shared field with new split field
# but preserving the order
idx = export_fields.index('person.address')
export_fields = export_fields[:idx] + [
'person.location_address',
'person.location_code_city',
'person.postal_address',
'person.postal_code_city',
] + export_fields[idx + 1:]
agency.meta['export_fields'] = export_fields
|
# -*- coding: utf-8 -*-
import logging
from openerp import pooler
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
from openerp.osv import osv, fields
from openerp import netsvc
class rel_ifrs_tributario(osv.osv):
_name = 'rel.ifrs.tributario'
_columns = {
'ifrs_id': fields.many2one('account.asset.asset', string='Activo'),
'tributario_id': fields.many2one('account.asset.asset', string='Activo'),
}
def buscar_tipo_categoria(self, cr, uid, id):
if id:
return self.pool.get('account.asset.category').browse(cr, uid, id).accounting_type
else: return False
def relacionar_ifrs_tributario(self, cr, uid, ids, context=None):
sql="""SELECT tab1.id, tab1.category_id, tab2.id, tab2.category_id FROM account_asset_asset tab1, account_asset_asset tab2
WHERE tab1.code=tab2.code
AND tab1.id<>tab2.id
AND tab1.id=(SELECT MAX(id) FROM account_asset_asset tab
WHERE tab.code=tab1.code)
"""
cr.execute(sql)
respuesta=cr.fetchall()
if respuesta and respuesta[0]:
for asset in respuesta:
ifrs = asset[0] if self.buscar_tipo_categoria(cr, uid, asset[1]) == 'ifrs' else asset[2] if self.buscar_tipo_categoria(cr, uid, asset[3]) == 'ifrs' else False
tributario = asset[0] if self.buscar_tipo_categoria(cr, uid, asset[1]) == 'tributario' else asset[2] if self.buscar_tipo_categoria(cr, uid, asset[3]) == 'tributario' else False
if ifrs and tributario:
self.create(cr,uid,{'ifrs_id':ifrs,'tributario_id':tributario}, context)
return True
def copiar_configuracion_activo(self, cr, uid, ids, context=None):
ids_cop = self.search(cr,uid,[('ifrs_id','!=', False),('tributario_id','!=',False)],{})
ids_to_copy = []
ids_rel = {}
for rel in self.browse(cr,uid,ids_cop):
ids_to_copy.append(rel.tributario_id.id)
ids_rel[rel.tributario_id.id] = rel.ifrs_id.id
values = self.pool.get('account.asset.asset').read(cr, uid, ids_to_copy,['prorata','method','method_time','method_number','method_period','method_progress_factor','method_end','category_id','purchase_value'])
for d in values:
d['tipo_plan'] = 'tributario'
d['asset_id'] = ids_rel[d['id']]
d['category_id'] = d['category_id'][0] if d['category_id'] else False
d['valor_bruto_activo'] = d['purchase_value']
del d['id']
del d['purchase_value']
for vls in values:
try:
self.pool.get('account.asset.config.plan').create(cr,uid,vls)
except: continue
return True
def actualizar_lineas_depreciacion(self, cr, uid, ids, context=None):
ids_cop = self.search(cr,uid,[('ifrs_id','!=', False),('tributario_id','!=',False)],{})
ids_to_copy = []
ids_rel = {}
for rel in self.browse(cr,uid,ids_cop):
ids_to_copy.append(rel.tributario_id.id)
ids_rel[rel.tributario_id.id] = rel.ifrs_id.id
for id in ids_to_copy:
try:
sql = """
update account_asset_depreciation_line set tipo_plan = 'tributario', asset_id = %d where asset_id = %d
""" % (ids_rel[id],id)
cr.execute(sql)
except:continue
return True
def borrar_activos_tributarios(self, cr, uid, ids, context=None):
ids_cop = self.search(cr,uid,[('ifrs_id','!=', False),('tributario_id','!=',False)],{})
ids_to_copy = []
ids_rel = {}
for rel in self.browse(cr,uid,ids_cop):
ids_to_copy.append(rel.tributario_id.id)
ids_rel[rel.tributario_id.id] = rel.ifrs_id.id
sql = """
delete from account_asset_asset where id in %s
""" % str(tuple(ids_to_copy))
cr.execute(sql)
return True
def traspaso_tributarios_sin_relacion(self, cr, uid, ids, context=None):
sql="""select id from account_asset_asset where category_id in (select id from account_asset_category where accounting_type = 'tributario')
and id not in (select tributario_id from rel_ifrs_tributario
where tributario_id is not null)"""
cr.execute(sql)
respuesta=cr.fetchall()
if respuesta and respuesta[0]:
ids_to_copy = map(lambda x:x[0],respuesta)
values = self.pool.get('account.asset.asset').read(cr, uid, ids_to_copy,['prorata','method','method_time','method_number','method_period','method_progress_factor','method_end','category_id','purchase_value'])
for d in values:
d['tipo_plan'] = 'tributario'
d['asset_id'] = d['id']
d['category_id'] = d['category_id'][0] if d['category_id'] else False
d['valor_bruto_activo'] = d['purchase_value']
del d['id']
del d['purchase_value']
for vls in values:
try:
self.pool.get('account.asset.config.plan').create(cr,uid,vls)
except: continue
for id in ids_to_copy:
try:
sql = """
update account_asset_depreciation_line set tipo_plan = 'tributario' where asset_id = %d
""" % id
cr.execute(sql)
except:continue
return True
def traspaso_ifrs_sin_relacion(self, cr, uid, ids, context=None):
sql="""
select id from account_asset_asset where category_id in (select id from account_asset_category where accounting_type = 'ifrs')
"""
cr.execute(sql)
respuesta=cr.fetchall()
if respuesta and respuesta[0]:
ids_to_copy = map(lambda x:x[0],respuesta)
values = self.pool.get('account.asset.asset').read(cr, uid, ids_to_copy,['prorata','method','method_time','method_number','method_period','method_progress_factor','method_end','category_id','purchase_value'])
for d in values:
d['tipo_plan'] = 'ifrs'
d['asset_id'] = d['id']
d['category_id'] = d['category_id'][0] if d['category_id'] else False
d['valor_bruto_activo'] = d['purchase_value']
del d['id']
del d['purchase_value']
for vls in values:
try:
self.pool.get('account.asset.config.plan').create(cr,uid,vls)
except: continue
for id in ids_to_copy:
try:
sql = """
update account_asset_depreciation_line set tipo_plan = 'ifrs' where asset_id = %d
""" % id
cr.execute(sql)
except:continue
return True
rel_ifrs_tributario()
"""
insert into account_asset_config_plan (prorata, method, method_time, method_number, method_period, method_progress_factor,
method_end, category_id, valor_bruto_activo, asset_id, tipo_plan,create_date,write_date,create_uid,write_uid)
select prorata,method,method_time,method_number,method_period,method_progress_factor,
method_end,category_id,purchase_value , id, 'tributario',CURRENT_TIMESTAMP, CURRENT_TIMESTAMP , 1 ,1
from account_asset_asset where category_id in (select id from account_asset_category where accounting_type = 'tributario')
and id not in (select tributario_id from rel_ifrs_tributario
where tributario_id is not null)
"""
"""
update account_asset_depreciation_line set tipo_plan = 'tributario' where asset_id in (
select id
from account_asset_asset where accounting_type = 'tributario'
and id not in (select tributario_id from rel_ifrs_tributario
where tributario_id is not null)
)
""" |
def merge_sorted_list(arr1, arr2):
# m = len(arr1), n = len(arr1)
# since we know arr1 is always greater or equal to (m+n)
# we compare arr2[i] with arr1[j] element and check if it
# should be placed there
i, j = 0, 0
while i < len(arr1) and j < len(arr2):
if arr2[j] == arr1[i] or arr2[j] <= arr1[i+1]:
arr1.insert(i+1, arr2[j])
arr1.pop()
i += 2
j += 1
elif arr2[j] > arr1[i]:
if arr1[i+1] == 0:
arr1[i+1] = arr2[j]
i += 1
j += 1
else:
i += 1
elif arr2[j] < arr1[i]:
arr1.insert(i, arr2[j])
arr1.pop()
i += 1
j += 1
print(arr1)
return arr1
def merge_sorted_list_v2(nums1, m, nums2, n):
# fill up that empty space with string
stop = 0
for i in range(len(nums1)-1, 0, -1):
if stop == len(nums2):
break
else:
nums1[i] = ""
stop += 1
print(nums1)
if m == 0:
nums1 = nums2
i, j = 0, 0
empty_space = len(nums2)
while j < len(nums2):
if nums2[j] <= nums1[i] or i == empty_space:
nums1.insert(i, nums2[j])
nums1.pop()
j += 1
empty_space += 1
else:
i += 1
return nums1
a = [1, 2, 3, 0, 0, 0]
b = [2, 5, 6]
nums1 = [-1, 0, 0, 3, 3, 3, 0, 0, 0]
m = 6
nums2 = [1, 2, 2]
n = 3
result = merge_sorted_list_v2(nums1, 6, nums2, 3)
print("result: ", result)
|
# User input DNA sequence
try:
DNA_sequence = input("Please enter the DNA sequence below: \n")
except:
print("Invalid, please enter a valid DNA sequence.")
# DNA sequence to uppercases
DNA = DNA_sequence.upper()
# check for the nucleotides AT and GC and calculate the proportion
count_AT = 0
count_GC = 0
for nuc in DNA:
if nuc == "A" or nuc == "T": # check for A or T nucleotide
count_AT += 1
else: # else for G or C nucleotide
count_GC += 1
total_nuc = count_AT + count_GC # calculate the total value of nucleotides
proportion_AT = count_AT / total_nuc # proportion AT nucleotides
proportion_GC = count_GC / total_nuc # proportion GC nucloetides
pro_AT = float("{0:.2f}".format(proportion_AT)) # proportion rounded to two decimal points
pro_GC = float("{0:.2f}".format(proportion_GC))
print("AT bases " + str(count_AT) + ", GC bases " + str(count_GC) +
"; Proportion AT " + str(pro_AT) + ", proportion GC " + str(pro_GC)) # print out values |
"""
Div-conforming B-spline discretization of 3D Taylor--Green flow, using the
method of subgrid vortices.
"""
from tIGAr import *
from tIGAr.compatibleSplines import *
from tIGAr.BSplines import *
from tIGAr.timeIntegration import *
import math
import ufl
# Re-ordering of DoFs causes FunctionSpace creation to slow down dramatically
# in larger problems. These parameters partially alleviate the issue.
parameters['reorder_dofs_serial'] = False
parameters['dof_ordering_library'] = 'random'
# Suppress warnings about Krylov solver non-convergence:
set_log_level(40)
# Use TSFC representation, due to complicated forms:
parameters["form_compiler"]["representation"] = "tsfc"
import sys
sys.setrecursionlimit(10000)
from os import path
####### Parameters #######
# Arguments are parsed from the command line, with some hard-coded defaults
# here. See help messages for descriptions of parameters.
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--Nel',dest='Nel',default=32,
help='Number of elements in each direction.')
parser.add_argument('--N_STEPS_over_Nel',
dest='N_STEPS_over_Nel',default=8,
help='Ratio of number of time steps to Nel.')
parser.add_argument('--kPrime',dest='kPrime',default=1,
help='Degree up to which velocity is complete.')
parser.add_argument('--Re',dest='Re',default=1600.0,
help='Reynolds number.')
parser.add_argument('--T',dest='T',default=10.0,
help='Length of time interval to consider.')
parser.add_argument('--QUAD_REDUCE',dest='QUAD_REDUCE',default=0,
help='Degree by which to reduce quadrature accuracy.')
parser.add_argument('--VIZ',dest='VIZ',action='store_true',
help='Include option to output visualization files.')
parser.add_argument('--GALERKIN',dest='GALERKIN',action='store_true',
help='Include option to output visualization files.')
parser.add_argument('--MAX_KSP_IT',dest='MAX_KSP_IT',default=1000,
help='Maximum number of Krylov iterations.')
parser.add_argument('--LINEAR_TOL',dest='LINEAR_TOL',default=1e-3,
help='Relative tolerance for Krylov solves.')
parser.add_argument('--NONLIN_TOL',dest='NONLIN_TOL',default=1e-3,
help='Relative tolerance for nonlinear solves.')
parser.add_argument('--penalty',dest='penalty',default=1e4,
help='Dimensionless penalty for iterated penalty solver.')
parser.add_argument('--RHO_INF',dest='RHO_INF',default=0.5,
help='Spectral radius of generalized-alpha integrator.')
parser.add_argument('--OUT_SKIP',dest='OUT_SKIP',default=1,
help='Number of steps to skip between writing files.')
parser.add_argument('--SCRATCH',dest='SCRATCH',default=".",
help='Directory in which to put large output files.')
args = parser.parse_args()
Nel = int(args.Nel)
kPrime = int(args.kPrime)
Re = Constant(float(args.Re))
TIME_INTERVAL = float(args.T)
VIZ = bool(args.VIZ)
GALERKIN = bool(args.GALERKIN)
MAX_KSP_IT = int(args.MAX_KSP_IT)
LINEAR_TOL = float(args.LINEAR_TOL)
NONLIN_TOL = float(args.NONLIN_TOL)
penalty = float(args.penalty)
N_STEPS_over_Nel = int(args.N_STEPS_over_Nel)
RHO_INF = float(args.RHO_INF)
OUT_SKIP = int(args.OUT_SKIP)
QUAD_REDUCE = int(args.QUAD_REDUCE)
SCRATCH = str(args.SCRATCH)
# Check if restarting:
if(path.exists("t.dat")):
tfile = open('t.dat','r')
fs = tfile.read()
tfile.close()
tokens = fs.split()
startStep = int(tokens[0])
startTime = float(tokens[1])
else:
startStep = 0
startTime = 0.0
####### Preprocessing #######
if(mpirank==0):
print("Generating extraction data...")
sys.stdout.flush()
# Polynomial degree in each direction.
degs = 3*[kPrime,]
# Knot vectors for defining the control mesh.
kvecs = [uniformKnots(degs[i],0.0,math.pi,Nel,False) for i in range(0,3)]
# Define a trivial mapping from parametric to physical space, via explicit
# B-spline.
controlMesh = ExplicitBSplineControlMesh(degs,kvecs)
# Define the spaces for RT-type compatible splines on this geometry.
fieldList = generateFieldsCompat(controlMesh,"RT",degs)
# Include an extra scalar field for the fine-scale pressure.
fieldList += [BSpline(degs,kvecs),]
splineGenerator = FieldListSpline(controlMesh,fieldList)
# Apply strong BCs in parametric normal directions.
for field in range(0,3):
scalarSpline = splineGenerator.getFieldSpline(field)
for side in range(0,2):
sideDofs = scalarSpline.getSideDofs(field,side)
splineGenerator.addZeroDofs(field,sideDofs)
####### Analysis #######
if(mpirank==0):
print("Setting up extracted spline...")
# Important to use sufficient quadrature:
QUAD_DEG = 2*(max(degs)+1)-QUAD_REDUCE
spline = ExtractedSpline(splineGenerator,QUAD_DEG)
if(mpirank==0):
print("Starting analysis...")
# Parameters of the time discretization:
N_STEPS = N_STEPS_over_Nel*Nel
DELTA_T = Constant(TIME_INTERVAL/float(N_STEPS))
# Define the viscosity based on the desired Reynolds number.
nu = Constant(1.0/Re)
# The initial condition for the flow:
x = spline.spatialCoordinates()
soln0 = sin(x[0])*cos(x[1])*cos(x[2])
soln1 = -cos(x[0])*sin(x[1])*cos(x[2])
soln = as_vector([soln0,soln1,Constant(0.0)])
# For 3D computations, use an iterative solver.
spline.linearSolver = PETScKrylovSolver("gmres","jacobi")
spline.linearSolver.parameters["relative_tolerance"] = LINEAR_TOL
# Linear solver sometimes fails to converge, but convergence of nonlinear
# iteration is still enforced to within spline.relativeTolerance.
spline.linearSolver.parameters["error_on_nonconvergence"] = False
spline.linearSolver.parameters["maximum_iterations"] = MAX_KSP_IT
spline.linearSolver.ksp().setGMRESRestart(MAX_KSP_IT)
spline.relativeTolerance = NONLIN_TOL
# "Un-pack" a velocity--pressure pair from spline.V, which is just a mixed
# space with four scalar fields.
def unpack(up):
d = spline.mesh.geometry().dim()
u_hat = as_vector([up[i] for i in range(0,d)])
p_hat = up[-1]
return u_hat, p_hat
# The unknown parametric velocity and fine-scale pressure:
up_hat = Function(spline.V)
u_hat, p_hat = unpack(up_hat)
# Parametric velocity at the old time level:
up_old_hat = Function(spline.V)
# Parametric $\partial_t u$ at the old time level. (Note that the suffix "dot"
# is a convention coming from the origins of generalized-$\alpha$ in structural
# problems, and does not refer to a material time derivative here.)
updot_old_hat = Function(spline.V)
# Create a generalized-alpha time integrator.
timeInt = GeneralizedAlphaIntegrator(RHO_INF,DELTA_T,up_hat,
(up_old_hat, updot_old_hat),
t=startTime)
# The alpha-level parametric velocity and its partial derivative w.r.t. time:
up_hat_alpha = timeInt.x_alpha()
updot_hat_alpha = timeInt.xdot_alpha()
# A helper function to take the symmetric gradient:
def eps(u):
return 0.5*(spline.grad(u) + spline.grad(u).T)
# The physical velocity and its temporal partial derivative:
u = cartesianPushforwardRT(unpack(up_hat_alpha)[0],spline.F)
udot = cartesianPushforwardRT(unpack(updot_hat_alpha)[0],spline.F)
p = cartesianPushforwardW(p_hat,spline.F)
# The parametric and physical test functions:
vq_hat = TestFunction(spline.V)
v_hat, q_hat = unpack(vq_hat)
v = cartesianPushforwardRT(v_hat,spline.F)
q = cartesianPushforwardW(q_hat,spline.F)
# The material time derivative of the velocity:
Du_Dt = udot + spline.grad(u)*u
# The viscous part of the Cauchy stress:
sigmaVisc = 2.0*nu*eps(u)
# Contribution to the weak problem for given test function; plugging u into
# this as the test function will be considered the "resolved dissipation".
def resVisc(v):
return inner(sigmaVisc,eps(v))*spline.dx
# The problem is posed on a solenoidal subspace, as enforced by the iterative
# penalty solver; no pressure terms are necessary in the weak form.
resGalerkin = inner(Du_Dt,v)*spline.dx + resVisc(v)
# Extra term associated with SUPG stabilization. This technically leaves
# an un-determined hydrostatic mode in the fine-scale
# pressure, but we can let the iterative solver choose it for us with no
# effect on the velocity solution.
resStrong = Du_Dt - spline.div(sigmaVisc) + spline.grad(p)
def Ladv(v,q):
return spline.grad(v)*u + spline.grad(q)
# Mesh size information:
dxi_dxiHat = 0.5*ufl.Jacobian(spline.mesh)
dx_dxi = spline.parametricGrad(spline.F)
dx_dxiHat = dx_dxi*dxi_dxiHat
Ginv = dx_dxiHat*dx_dxiHat.T
G = inv(Ginv)
# Defining the stabilization constants:
C_I = Constant(3.0*max(degs)**2)
tau_M = 1.0/sqrt(dot(u,G*u) + 4.0/DELTA_T**2 + ((C_I*nu)**2)*inner(G,G))
tau_C = 1.0/(tau_M*tr(G))
# Fine scale velocity using the quasi-static subscale model:
uPrime = -tau_M*resStrong
# We need to be able to plug in the velocity solution as a test function
# later, to compute model dissipation.
def resSUPG(v,q,uPrime):
return inner(uPrime,-Ladv(v,q))*spline.dx
def resVMS(v,q,uPrime):
return inner(v,spline.grad(u)*uPrime)*spline.dx \
- inner(spline.grad(v),outer(uPrime,uPrime))*spline.dx
def resModel(v,q,uPrime):
return resSUPG(v,q,uPrime) + resVMS(v,q,uPrime)
# Residual of the full formulation and consistent linearization:
res = resGalerkin
if(not GALERKIN):
res += resModel(v,q,uPrime)
else:
res += inner(p,q)*spline.dx # (Pin down redundant field.)
Dres = derivative(res, up_hat)
# Divergence of the velocity field, given a function in the mixed space.
# It is weighted to make the penalty dimensionless and indepenent of position,
# as needed for the update step of the iterated
# penalty solver.
divOp = lambda up : sqrt(tau_C)*spline.div(cartesianPushforwardRT
(unpack(up)[0],spline.F))
# Auxiliary Function to re-use during the iterated penalty solves:
w = Function(spline.V)
# Projection of initial condition; need to specify how to get the (parametric)
# velocity vector field from a function in the mixed space.
if(startStep==0):
if(mpirank==0):
print("Projecting velocity IC...")
up_old_hat.assign(divFreeProject(soln,spline,
getVelocity=lambda up : unpack(up)[0]))
else:
# If restarting, load the initial condition from the appropriate restart
# file.
if(mpirank==0):
print("Loading initial condition from step "+str(startStep)+" ...")
f = HDF5File(worldcomm,
SCRATCH+"/restarts/restart."+str(startStep)+".h5", 'r')
f.read(up_old_hat,'/up_old_hat')
f.read(updot_old_hat,'/updot_old_hat')
# (While not mathematically-necessary as tolerances go to zero, storing
# the initial guess for the pressure in the iterated penalty solver
# helps with reproducibility when restarting at finite solver tolerances.)
f.read(w,'/w')
f.close()
# Predictor:
up_hat.assign(up_old_hat)
# Files for optional ParaView output:
if(VIZ):
uFile = File(SCRATCH+"/results/ux.pvd")
vFile = File(SCRATCH+"/results/uy.pvd")
wFile = File(SCRATCH+"/results/uz.pvd")
# Time stepping loop:
for i in range(startStep,N_STEPS):
if(mpirank == 0):
print("\n------- Time step "+str(i+1)+"/"+str(N_STEPS)
+" , t = "+str(timeInt.t)+" -------\n")
sys.stdout.flush()
# Output checkpoint data and, optionally, vizualization files.
if(i%OUT_SKIP==0):
if(i != startStep):
f = HDF5File(worldcomm,
SCRATCH+"/restarts/restart."+str(i)+".h5", 'w')
f.write(up_old_hat,'/up_old_hat')
f.write(updot_old_hat,'/updot_old_hat')
f.write(w,'/w')
f.close()
if(mpirank==0):
tfile = open('t.dat','w')
tfile.write(str(i)+" "
+str(timeInt.t - float(timeInt.DELTA_T)))
tfile.close()
if(VIZ):
# Take advantage of explicit B-spline geometry to simplify
# visualization.
ux, uy, uz, _ = up_hat_old.split()
ux.rename("u","u")
uy.rename("v","v")
uz.rename("w","w")
uFile << ux
vFile << ux
wFile << uz
# Solve for velocity in a solenoidal subspace of the RT-type
# B-spline space, where divergence acts on the velocity unknowns in the
# parametric domain.
iteratedDivFreeSolve(res,up_hat,vq_hat,spline,
divOp=divOp,
penalty=Constant(penalty),
J=Dres,w=w)
# Assemble the dissipation rates, and append them to a file that can be
# straightforwardly plotted as a function of time using gnuplot.
dissipationScale = (1.0/pi**3)
resolvedDissipationRate = assemble(dissipationScale*resVisc(u))
if(GALERKIN):
modelDissipationRate = 0.0
else:
modelDissipationRate = assemble(dissipationScale
*resModel(u,Constant(0.0)*p,uPrime))
dissipationRate = resolvedDissipationRate + modelDissipationRate
# Because the algebraic problem is solved only approximately, there is a
# nonzero divergence to the velocity field. If the tolerances are set
# small enough and/or penalty set high enough, this can be driven down
# to machine precision.
divError = math.sqrt(assemble((spline.div(u)**2)*spline.dx))
if(mpirank==0):
print("Divergence error ($L^2$): "+str(divError))
mode = "a"
if(i==0):
mode = "w"
outFile = open("dissipationRate.dat",mode)
outFile.write(str(timeInt.t)+" "+str(dissipationRate)+" "
+str(modelDissipationRate)+"\n")
outFile.close()
# Move to the next time step.
timeInt.advance()
|
# File_name: stop_eks.py
# Purpose: Stop load balancers that are running
# Problem: botocore.exceptions.ClientError: An error occurred (AccessDeniedException) when calling the ListClusters operation: Account 015670528421 is not authorized to use this service
# Author: Søren Wandrup-Bendixen
# Email: soren.wandrup-Bendixen@cybercom.com
# Created: 2019-07-01
# Called from lambda_function.py
import boto3
# instance_type = 'eks'
def delete_clusters(instance_type,region_name_,RunningInstances) :
client = boto3.client(instance_type, region_name=region_name_)
try:
cluster_names = client.list_clusters( )['clusters']
for cluster_name in cluster_names:
RunningInstances.append(instance_type + ' ' + region_name_ + ' running ' + cluster_name)
response = client.delete_cluster( name=cluster_name )
except Exception:
print ( instance_type + ' ' + region_name_ + ' does not support clusters')
return
|
from mesh_server import *
from boundarycondition_server import *
from tqdm import tqdm # status bar
import os
import subprocess
import numpy as np
import matplotlib as mpl
if os.environ.get('DISPLAY','') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
import matplotlib.pyplot as plt
# global variables, beware of namespace collision
T = 1 # final time
num_steps = 2000 # number of time steps # must satisfy CFL condition
dt = T / num_steps
mu = 0.03 # dynamic viscosity, poise
rho = 1 # density, g/cm3
# windkessel
c = 1 #1.6e-5 distant capacitance
Rd = 1e5 #6001.2 distant resistance
Rp = 5e4 #7501.5 proximal resistance
p_windkessel_1 = 1.06e5 # init val, large number could lead to overflow
p_windkessel_2 = 1.06e5 # init val
a=.5 # vessel shrink length = 2a
b=.0 # vessel shrink intensity = b
u0=1. # init amplitude
#### Define symmetric gradient
def epsilon(u):
return sym(nabla_grad(u))
#### Define stress tensor
def sigma(u, p):
return 2*mu*epsilon(u) - p*Identity(len(u))
# post computation: define diagnosis section
x0=1.
x1=1.9
x2=1.9
x3=1.
nn=10 # number of monte carlo samples
tol=1e-6 # tolerance from boundary
# diagnosis on the trunk
xx0=-x0*np.ones(nn)
yy0=np.linspace(-D0 +tol,D0 -tol,nn)
# diagnosis on branch 1
xx1=np.linspace(x1-d +tol,x1+d -tol,nn)
yy1=np.linspace(x1+d -tol,x1-d +tol,nn)
# diagnosis on branch 2
xx2=np.linspace(x2-d +tol,x2+d -tol,nn)
yy2=np.linspace(-x2-d+tol,-x2+d-tol,nn)
# diagnosis on illness part
xx3=np.linspace(x2-d +tol,x2+d -b-tol,nn)
yy3=np.linspace(-y2-d+tol,-x2+d-b-tol,nn)
def slice(xx,yy):
"In: x-y ranges. Out: grid, list of np array" #avoid zip variable which can be used only once
return [np.array(i) for i in zip(xx,yy)]
def average_over_line(u_or_p,grid):
"In: grid from function above. Out: mean value of the given function (1d or 2d)"
return np.mean([u_or_p(i) for i in grid],axis=0)
xy0=slice(xx0,yy0)
xy1=slice(xx1,yy1)
xy2=slice(xx2,yy2)
xy3=slice(xx3,yy3)
section_len = 2*B
def compute_NSsolution(mesh, V, Q,
T = T,
num_steps = num_steps,
mu = mu,
rho = rho,
c = c,
Rd = Rd,
Rp = Rp,
p_windkessel_1 =p_windkessel_1,
p_windkessel_2 =p_windkessel_2,
u0=u0,
s=s,
inflow_expr=inflow_expr,
inflow_str=inflow_str,
heartfun=heartfun,
xy0=xy0,
xy1=xy1,
xy2=xy2,
xy3=xy3,
):
#### Define trial and test functions
u = TrialFunction(V)
v = TestFunction(V)
p = TrialFunction(Q)
q = TestFunction(Q)
#### Define functions for solutions at previous and current time steps
u_n = Function(V)
u_ = Function(V)
p_n = Function(Q)
p_ = Function(Q)
#### Define expressions used in variational forms
U = 0.5*(u_n + u)
n = FacetNormal(mesh)
f = Constant((0, 0))
k = Constant(dt)
mu = Constant(mu)
rho = Constant(rho)
def epsilon(u):
return sym(nabla_grad(u))
#### Define stress tensor
def sigma(u, p):
return 2*mu*epsilon(u) - p*Identity(len(u))
#### Define variational problem for step 1
F1 = rho*dot((u - u_n) / k, v)*dx \
+ rho*dot(dot(u_n, nabla_grad(u_n)), v)*dx \
+ inner(sigma(U, p_n), epsilon(v))*dx \
+ dot(p_n*n, v)*ds - dot(mu*nabla_grad(U)*n, v)*ds \
- dot(f, v)*dx
a1 = lhs(F1)
L1 = rhs(F1)
#### Define variational problem for step 2
a2 = dot(nabla_grad(p), nabla_grad(q))*dx
L2 = dot(nabla_grad(p_n), nabla_grad(q))*dx - (1/k)*div(u_)*q*dx
#### Define variational problem for step 3
a3 = dot(u, v)*dx
L3 = dot(u_, v)*dx - k*dot(nabla_grad(p_ - p_n), v)*dx
#### Assemble matrices
A1 = assemble(a1)
A2 = assemble(a2)
A3 = assemble(a3)
# Create progress bar
#progress = dolfin.cpp.log.Progress('Time-stepping')
#set_log_level(PROGRESS)
pbar = tqdm(total=T)
### Time-stepping
files = []
t = 0.0
# init windkessel pressure
p_bdry_1 = 0.0
p_bdry_2 = 0.0
# init diagnostic profile
t_grid = []
p_at_bd_0 = []
p_at_bd_1 = []
p_at_bd_2 = []
p_at_bd_3 = []
for n in range(num_steps):
# Update current time
t += dt
xy0=slice(x0)
xy1=slice(x1)
xy2=slice(x2)
xy3=slice(x3)
if flag_dynamic == True:
# u_at_bd_1 = [u_(i) for i in xy1]
# u_normal_1 = [np.dot(u, np.array([1., 1.])/np.sqrt(2)) for u in u_at_bd_1]
u_avg_1 = np.dot(average_over_line(u_,xy1),[1,1])*section_len
# u_at_bd_2 = [u_(np.array(i)) for i in xy2]
# u_normal_2 = [np.dot(u, np.array([1., -1.])/np.sqrt(2)) for u in u_at_bd_2]
# u_avg_2 = sum(u_normal_2)/nn*0.2*np.sqrt(2)
u_avg_2 = np.dot(average_over_line(u_,xy2),[1,1])*section_len
p_windkessel_1 += dt/c*(-p_windkessel_1/Rd+u_avg_1)
p_windkessel_2 += dt/c*(-p_windkessel_2/Rd+u_avg_2)
p_bdry_1 = p_windkessel_1 + Rp * u_avg_1
p_bdry_2 = p_windkessel_2 + Rp * u_avg_2
bcu,bcp = compute_bc(V,Q,t,
p_bdry_1 =p_bdry_1,
p_bdry_2 =p_bdry_2,
u0=u0,
s=s,
inflow_expr=inflow_expr,
inflow_str=inflow_str,
heartfun=heartfun,)
#### Apply boundary conditions to matrices
[bc.apply(A1) for bc in bcu]
[bc.apply(A2) for bc in bcp]
# Step 1: Tentative velocity step
b1 = assemble(L1)
[bc.apply(b1) for bc in bcu]
solve(A1, u_.vector(), b1, 'bicgstab', 'hypre_amg')
# Step 2: Pressure correction step
b2 = assemble(L2)
[bc.apply(b2) for bc in bcp]
solve(A2, p_.vector(), b2, 'bicgstab', 'hypre_amg')
# Step 3: Velocity correction step
b3 = assemble(L3)
solve(A3, u_.vector(), b3, 'cg', 'sor')
# # Save solution to file (XDMF/HDF5)
# xdmffile_u.write(u_, t)
# xdmffile_p.write(p_, t)
# # Save nodal values to file
# timeseries_u.store(u_.vector(), t)
# timeseries_p.store(p_.vector(), t)
# Update previous solution
p_n.assign(p_)
u_n.assign(u_)
# print([x for x in xy0])
diag0 = np.mean([p_(np.array(i)) for i in xy0])
diag1 = np.mean([p_(np.array(i)) for i in xy1])
diag2 = np.mean([p_(np.array(i)) for i in xy2])
diag3 = np.mean([p_(np.array(i)) for i in xy3])
t_grid.append(t)
p_at_bd_0.append(diag0)
p_at_bd_1.append(diag1)
p_at_bd_2.append(diag2)
p_at_bd_3.append(diag3)
if flag_movie:
# # Plot solution
# plt.figure()
# plt.cla()
# ax1 = plt.subplot(gs[0,0])
# plt1 = plot(p_)
# plt2 = plot(u_, title = "Pressure and Velocity,t = %.4f" % t)
# cbax = plt.subplot(gs[0,1])
# cb = plt.colorbar(cax = cbax, mappable = plt1, orientation = 'vertical', ticklocation = 'right')
# plt.show()
plt.cla()
plot(p_)
plot(u_, title = "t = %.4f" % t + 'u_max:%.2f, ' % u_.vector().vec().max()[1] + 'p_max:%.2f ' % p_.vector().vec().max()[1])
fname = '_tmp%03d.png' % n
# print('Saving frame', fname)
plt.savefig(fname)
files.append(fname)
# Update progress bar
# progress.update(t / T)
# if n % 20 == 0:
pbar.update(dt)
pbar.set_description("t = %.4f" % t + 'u_max:%.2f, ' % u_.vector().vec().max()[1] + 'p_max:%.2f ' % p_.vector().vec().max()[1])
pbar.close()
if __name__ == '__main__':
mesh, V, Q = compute_mesh()
# bcu, bcp = compute_bc(V,Q,0.)
u_, p_ = compute_NSsolution(mesh, V, Q)
xy0 = slice(x0)
xy1 = slice(x1)
xy2 = slice(x2)
xy3 = slice(x3)
average_over_line(p_,xy0)
average_over_line(p_,xy1)
average_over_line(p_,xy2)
average_over_line(p_,xy3)
print("test completed.") |
from django.shortcuts import render, get_object_or_404
from django.utils import timezone
from .models import Project, Task, Comment
from .forms import ProjectForm, UserForm, UserProfileForm, TaskForm, CommentForm
from django.shortcuts import redirect
from django.contrib.auth.decorators import login_required
# Create your views here.
def project_list(request):
projects = Project.objects.filter(created_at__lte=timezone.now()).order_by('created_at')
return render(request, 'taskmanagerapp/project_list.html', {'projects': projects})
@login_required
def project_detail(request, pk):
proj = get_object_or_404(Project, pk=pk)
return render(request, 'taskmanagerapp/project_detail.html', {'proj': proj})
@login_required
def task_detail(request, pk):
task= get_object_or_404(Task, pk=pk)
return render(request, 'taskmanagerapp/task_detail.html', {'task': task})
@login_required
def new_project(request):
if request.method == "POST":
form = ProjectForm(request.POST)
if form.is_valid():
project = form.save(commit=False)
project.author = request.user
project.save()
return redirect('taskmanagerapp.views.project_detail',pk=project.pk)
else:
form = ProjectForm()
return render(request, 'taskmanagerapp/project_edit.html', {'form': form})
@login_required
def new_task(request):
if request.method == "POST":
form = TaskForm(request.POST)
if form.is_valid():
task = form.save(commit=False)
task.author = request.user
task.save()
return redirect('taskmanagerapp.views.project_list')
else:
form = TaskForm()
return render(request, 'taskmanagerapp/task_edit.html', {'form': form})
@login_required
def add_comment_to_project(request, pk):
project = get_object_or_404(Project, pk=pk)
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.author = request.user
comment.project=project
comment.save()
return redirect('taskmanagerapp.views.project_detail', pk=project.pk)
else:
form = CommentForm()
return render(request, 'taskmanagerapp/add_comment_to_project.html', {'form': form})
@login_required
def project_edit(request, pk):
proj2 = get_object_or_404(Project, pk=pk)
if request.method == "POST":
form = ProjectForm(request.POST, instance=proj2)
if form.is_valid():
project = form.save(commit=False)
project.author = request.user
project.save()
form.save_m2m()
return redirect('taskmanagerapp.views.project_detail',pk=project.pk)
else:
form = ProjectForm(instance=proj2)
return render(request, 'taskmanagerapp/project_edit.html', {'form': form})
@login_required
def task_edit(request, pk):
task2 = get_object_or_404(Task, pk=pk)
if request.method == "POST":
form = TaskForm(request.POST, instance=task2)
if form.is_valid():
task = form.save(commit=False)
task.author = request.user
task.save()
form.save_m2m()
return redirect('taskmanagerapp.views.task_detail',pk=task.pk)
else:
form = TaskForm(instance=task2)
return render(request, 'taskmanagerapp/task_edit.html', {'form': form})
def register(request):
if request.method == "POST":
uf = UserForm(request.POST, prefix='user')
upf = UserProfileForm(request.POST, prefix='userprofile')
if uf.is_valid() * upf.is_valid():
user = uf.save()
userprofile = upf.save(commit=False)
userprofile.user = user
userprofile.save()
return redirect('taskmanagerapp.views.project_list')
else:
uf = UserForm()
upf = UserProfileForm()
return render(request, 'taskmanagerapp/register.html', {'uf': uf, 'upf': upf})
|
import random
n = 10
print n
for _ in range(n):
print random.choice([0,1]),
|
#Schreiben Sie ein Programm,
#das alle durch 3 und 7 teilbaren Zahlen zwischen j und k (k > j) ermittelt.
j=int(input("Please select min:"))
k=int(input("Please select max:"))
def finder (j,k):
while j<=k:
if j%3==0 and j%7==0 :
j+=1
else:
j+=1
return (j,k)
print("Results:",finder(j,k)) |
from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length=100, verbose_name='分类')
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=100, verbose_name='标签')
def __str__(self):
return self.name
class Post(models.Model):
title = models.CharField(max_length=70, verbose_name='标题')
body = models.TextField(verbose_name='正文')
created_time = models.DateTimeField(verbose_name='创建时间')
modified_time = models.DateTimeField(verbose_name='修改时间')
excerpt = models.CharField(max_length=200, blank=True, verbose_name='摘要')
category = models.ForeignKey(Category, on_delete=models.CASCADE, verbose_name='分类')
tags = models.ManyToManyField(Tag, blank=True, verbose_name='标签')
author = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='作者')
views = models.PositiveIntegerField(default=0)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog:detail', kwargs={'pk': self.pk})
class Meta:
ordering = ['-created_time']
def increase_views(self):
self.views += 1
self.save(update_fields=['views'])
|
#!/usr/bin/python
import linkedlist
L = linkedlist.LinkedList()
L.insert_at_head(1)
print("The value of the head is {0}".format(L.head.data))
L.insert_at_head(2)
print("The value of the head is now {0}".format(L.head.data))
L.insert_at_head(3)
print("The value of the head is now {0}".format(L.head.data))
a = L.delete_at_head()
print("The value at the head which was deleted was {0}".format(a))
L.insert_at_tail(40)
print("The value of the tail is now {0}".format(L.tail.data))
L.insert_at_tail(50)
print("The value of the tail is now {0}".format(L.tail.data))
L.insert_at_tail(60)
print("The value of the tail is now {0}".format(L.tail.data))
|
from stdmodandoption import *
import cameron_functions as CF
import collections
def kslaw(ssdict):
nested_dict = lambda: collections.defaultdict(nested_dict)
plotdict = nested_dict()
runtodo=ssdict['runtodo']
wanted=ssdict['wanted']
print 'wanted', wanted
startno=ssdict['startno']
Nsnap=ssdict['Nsnap']
snapsep=ssdict['snapsep']
the_prefix=ssdict['the_prefix']
the_suffix=ssdict['the_suffix']
fmeat=ssdict['fmeat']
maxlength=ssdict['maxlength']
withinr=ssdict['withinr']
withoutr=ssdict['withoutr']
nogrid=ssdict['nogrid']
title='MW'
titleneed=title
dclabelneed=1
useM1=1
nogrid=10
rotface=1
loccen=1
newlabelneed=1
print 'runtodo', runtodo
numoftimes=0
for i in range(startno,Nsnap,snapsep):
snaplist=[]
info=SSF.outdirname(runtodo, i)
rundir=info['rundir']
runtitle=info['runtitle']
slabel=info['slabel']
snlabel=info['snlabel']
dclabel=info['dclabel']
resolabel=info['resolabel']
the_snapdir=info['the_snapdir']
Nsnapstring=info['Nsnapstring']
havecr=info['havecr']
Fcal=info['Fcal']
iavesfr=info['iavesfr']
timestep=info['timestep']
cosmo=info['cosmo']
maindir=info['maindir']
color=info['color']
haveB=info['haveB']
M1speed=info['M1speed']
newlabel=info['newlabel']
snumadd=info['snumadd']
usepep=info['usepep']
halostr=info['halostr']
ptitle=title
if runtitle=='SMC':
ptitle='Dwarf'
elif runtitle=='SBC':
ptitle='Starburst'
elif runtitle=='MW':
ptitle=r'$L\star$ Galaxy'
labelneed=dclabel
if newlabelneed==1:
labelneed="\n".join(wrap(newlabel,17))
if cosmo==1:
h0=1
else:
h0=0
agecount=10 #Myr
info=SSF.outdirname(runtodo, i)
rundir=info['rundir']
runtitle=info['runtitle']
slabel=info['slabel']
snlabel=info['snlabel']
dclabel=info['dclabel']
resolabel=info['resolabel']
the_snapdir=info['the_snapdir']
Nsnapstring=info['Nsnapstring']
havecr=info['havecr']
haveB=info['haveB']
Fcal=info['Fcal']
iavesfr=info['iavesfr']
timestep=info['timestep']
color=info['color']
highres = info['highres']
ptitle=title
if runtitle=='SMC':
ptitle='Dwarf'
elif runtitle=='SBC':
ptitle='Starburst'
elif runtitle=='MW':
ptitle=r'$L\star$ Galaxy'
G=SSF.readsnapfromrun(runtodo,Nsnap,0,rotface=1,loccen=1)
cenin = G['cen']; vcenin = G['vcen']; angLin = G['angL'];
S = SSF.readsnapfromrun(runtodo,Nsnap,4,rotface=1,loccen=1,importLcen=1,angLin=angLin,cenin=cenin,vcenin=vcenin)
xlist=ylist=np.linspace(withoutr,withinr,num=nogrid)
gasdenlist = SSF.calsurdenxy(G,xlist,ylist,maxlength)
Sdata = SSF.calsfr(S,tintval=0.01,cosmo=1,withinr=20)
sfrl = Sdata['sfrl']; Sxl = Sdata['Sxl']; Syl = Sdata['Syl']; Szl = Sdata['Szl']
SFRdenlist = SSF.calSFRsurdenxy(sfrl,Sxl,Syl,Szl,xlist,ylist,maxlength)
plotdict[wanted]['xlab'] = r'${\rm \Sigma_{gas}\;[M_{\odot}/pc^2]}$'
plotdict[wanted]['xnl'] = np.ravel(gasdenlist)
plotdict[wanted]['ylab'] = r'${\rm \dot{\Sigma}_\star}\;[{\rm M_{\odot}/yr/kpc^2}]$'
plotdict[wanted]['ynl'] = np.ravel(SFRdenlist)
plotdict[wanted]['runtodo'] = runtodo
plotdict[wanted]['labelneed'] = labelneed
plotdict[wanted]['lsn'] = 'None'
plotdict[wanted]['lw'] = 2
plotdict[wanted]['marker'] = 's'
plotdict[wanted]['color'] = color
plotdict[wanted]['runtitle'] = runtitle
plotdict[wanted]['ptitle'] = ptitle
filename=plotloc+'CRplot/SFRsurden/kslaw_'+fmeat+'.pdf'
plotdict[wanted]['filename'] = filename
return plotdict
|
from django.conf.urls.defaults import patterns, include
from api.resources import CurrencyItemResource
from tastypie.api import Api
v1_api = Api(api_name='v1')
v1_api.register(CurrencyItemResource())
urlpatterns = patterns('',
(r'^api/', include(v1_api.urls)),
) |
from flask import Flask , request , render_template
from flask_cors import CORS ,cross_origin
from Log_Writer.logger import App_Logger
from Raw_Data_Formatter.data_formatter import formatter
from Data_Validator.data_validator import Validator
from Preprocessing.preprocessor import Preprocessor
from Get_Model_for_Cluster.model_finder import Find_model
import sys
app=Flask(__name__)
CORS(app)
log_writer=App_Logger()
@app.route('/',methods=['GET','POST'])
@cross_origin()
def homePage():
log_writer.log(log_message="Rendered Home Page")
return render_template("index.html")
@app.route('/Prediction',methods=['POST'])
@cross_origin()
def index():
try:
if request.method=='POST':
# Gathering the data and converting it to dataframe
data=formatter().format_data()
# Validating the input data
err = Validator().validate(data)
if err > 0:
sys.exit()
# Preprocessing the data
data = Preprocessor().preprocess(data)
# Finding the cluster to which data point belongs
# and importing the model trained for that cluster
data,model=Find_model().get_model(data)
pred = model.predict(data)[0]
if pred == 0:
statement = "is not Fraudulent \n\n .The Customer is Innocent"
if pred == 1:
statement = "is Fraudulent \n\n .The Customer is a crook."
return render_template("results.html",prediction=statement)
else:
return render_template("index.html")
except Exception as e:
log_writer.log(log_message="\nError Occured in index page route\n")
return print(e)
if __name__=="__main__":
app.run(debug=True,host="127.0.0.1",port=8001) |
import os
from pyFG import FortiOS
from cloudify.decorators import operation
from cloudify.state import ctx_parameters as inputs
TEMPLATE_CONFIG_FILE = 'portConfig.conf'
CONFIG_FILE = 'portConfig'
FIREWALL_FILE = 'firewall.conf'
TMP_CONFIG_FILE = '/tmp/portConfig'
portIdToSearch = 'portX'
portIpToSearch = 'PORTIP'
portMaskToSearch = 'PORTMASK'
portAliasToSearch = 'ALIASNAME'
@operation
def portConfig(ctx, **kwargs):
ctx.logger.info('Start port config task....')
templatePortConfig=ctx.get_resource(TEMPLATE_CONFIG_FILE)
ctx.download_resource(templatePortConfig, TMP_CONFIG_FILE)
forti_host = inputs['fortinet_host']
forti_username = inputs['fortinet_user']
forti_password = inputs['fortinet_password']
portnum = inputs['test_port_number']
portip = inputs['test_ce_port']
portmask = '255.255.255.0'
aliasname = 'Internal'
f = open(TMP_CONFIG_FILE, 'r+')
clean = f.read().replace(portIdToSearch, portnum).replace(portIpToSearch, portip).replace(portMaskToSearch, portmask).replace(portAliasToSearch, aliasname)
f.write(clean)
portConf=ctx.get_resource(TMP_CONFIG_FILE)
ctx.logger.info('New file to execute {0} '.format(portConf))
ctx.logger.info('Open connection to host {0} '.format(forti_host))
conn = FortiOS(forti_host, username=forti_username, password=forti_password)
conn.open()
execCmd(ctx,conn,portConf)
# @operation
# def configFireWall(ctx, **kwargs):
#
# ctx.logger.info('Start config firewall task....')
#
# firewallFile=ctx.get_resource(FIREWALL_FILE)
#
# forti_host = inputs['fortinet_host']
# forti_username = inputs['fortinet_user']
# forti_password = inputs['fortinet_password']
#
# ctx.logger.info('Open connection to host {0} '.format(forti_host))
# conn = FortiOS(forti_host, username=forti_username, password=forti_password)
# conn.open()
#
# execCmd(ctx,conn,firewallFile)
def execCmd(ctx,conn,configfile):
ctx.logger.info('Execute command: {0}'.format(configfile))
conn.execute_command(configfile)
conn.close()
|
import brownie
import pytest
DEADLINE = 99999999999
storage_bytecode = "0x6080604052600560005534801561001557600080fd5b5060ac806100246000" \
"396000f3fe6080604052348015600f57600080fd5b5060043610603257600035" \
"60e01c806360fe47b11460375780636d4ce63c146053575b600080fd5b605160" \
"048036036020811015604b57600080fd5b5035606b565b005b60596070565b60" \
"408051918252519081900360200190f35b600055565b6000549056fea2646970" \
"667358221220da99a6a9d4cea3f86897beaabcc36a956a9de39ec09abb36fa08" \
"6b5e25243df164736f6c63430006070033"
storage_abi = [
{
"inputs": [],
"name": "get",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint256",
"name": "_x",
"type": "uint256"
}
],
"name": "set",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
}
]
@pytest.fixture(scope="module")
def liquid_lgt(lgt, accounts):
lgt.mint(50, {'from': accounts[0]})
lgt.addLiquidity(1, 51, 99999999999, {'from': accounts[0], 'value': "0.05 ether"})
lgt.mint(80, {'from': accounts[1]})
lgt.addLiquidity(1, 50, 99999999999, {'from': accounts[1], 'value': "0.049 ether"})
yield lgt
def test_deploy(liquid_lgt, accounts, Contract):
initial_tokens = liquid_lgt.poolTokenReserves()
price = liquid_lgt.getEthToTokenOutputPrice(5)
tx = liquid_lgt.deploy(5, DEADLINE, storage_bytecode, {'from': accounts[0], 'value': price})
address = tx.return_value
contract = Contract.from_abi(name="Storage", address=address, abi=storage_abi, owner=accounts[0])
assert contract.get() == 5
contract.set(10)
assert contract.get() == 10
assert initial_tokens - 5 == liquid_lgt.poolTokenReserves()
def test_deploy_refund(liquid_lgt, accounts):
initial_balance = accounts[0].balance()
price = liquid_lgt.getEthToTokenOutputPrice(5)
liquid_lgt.deploy(5, DEADLINE, storage_bytecode, {'from': accounts[0], 'value': price * 2})
assert initial_balance - price == accounts[0].balance()
def test_deploy_deadline_reverts(liquid_lgt, accounts):
with brownie.reverts("dev: deadline passed"):
liquid_lgt.deploy(5, 1, storage_bytecode, {'from': accounts[0], 'value': "1 ether"})
def test_create2(liquid_lgt, accounts, Contract):
initial_tokens = liquid_lgt.poolTokenReserves()
price = liquid_lgt.getEthToTokenOutputPrice(4)
tx = liquid_lgt.create2(4, DEADLINE, "0xabc", storage_bytecode, {'from': accounts[0], 'value': price})
address = tx.return_value
contract = Contract.from_abi(name="Storage", address=address, abi=storage_abi, owner=accounts[0])
assert contract.get() == 5
contract.set(10)
assert contract.get() == 10
assert initial_tokens - 4 == liquid_lgt.poolTokenReserves()
def test_create2_refund(liquid_lgt, accounts):
initial_balance = accounts[0].balance()
price = liquid_lgt.getEthToTokenOutputPrice(3)
liquid_lgt.create2(3, DEADLINE, "0xabc", storage_bytecode, {'from': accounts[0], 'value': price * 2})
assert initial_balance - price == accounts[0].balance()
def test_create2_deadline_reverts(liquid_lgt, accounts):
with brownie.reverts("dev: deadline passed"):
liquid_lgt.create2(4, 1, "0xabc", storage_bytecode, {'from': accounts[0], 'value': "1 ether"})
|
import numpy as np
from sklearn.datasets import load_iris
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from sklearn.metrics import accuracy_score
from sklearn.svm import LinearSVC, SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression # 분류
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
# 1. 데이터
dataset = load_iris()
x = dataset.data
y = dataset.target
print(x.shape, y.shape) # (150, 4) ,(150,)
kfold = KFold(n_splits=5, shuffle=True)
# 2. 모델
model = LinearSVC()
scores = cross_val_score(model, x, y, cv=kfold) # fit, score가 다 포함
print('scores :', scores)
'''
# 훈련
model.fit(x_train,y_train)
y_pred = model.predict(x_test)
# print('y_test :', y_test)
# print('y_pred :', y_pred)
result = model.score(x_test,y_test)
print(i.__name__ + '\'s score(acc) :', result)
acc = accuracy_score(y_test, y_pred)
print(i.__name__ + '\'s accuracy_score :', acc)
'''
|
"""This program will print out the input as output method"""
def reverseinput():
"""This main function will print out input provided."""
var_x = int(input())
print(var_x)
print(var_x + 5)
print(var_x - 17)
print(var_x * 32)
print((5 * var_x ** 2) + (10 * 5 * var_x) + 3)
reverseinput()
|
#! /usr/bin/env python3
# -*- coding:utf-8 -*-
__author__ = 'wjq'
from peewee import *
from playhouse.db_url import connect
import datetime
import json
import bson
db = connect('mysql://root:123456@localhost:3306/cms_dev')
class Info(Model):
id = PrimaryKeyField()
rhost = CharField()
rport = IntegerField()
rdb = IntegerField()
date = DateTimeField()
info = CharField()
class Meta:
database = db
def __repr__(self):
return json.dumps(self.__dict__ , cls=DatetimeEncoder)
# return '{\'id\': %d , \'rhost\': \'%s\' , \'rport\': %d , \'rdb\': %d , \'date\': \'%s\' , \'info\': %s}' % (self.id , self.rhost , self.rport , self.rdb , self.date , self.info)
def __str__(self):
return '{id:%d , rhost:%s , rport:%d , rdb:%d , date:%s , info:%s }' % (self.id , self.rhost , self.rport , self.rdb , self.date , self.info)
class JsonInfo():
def __init__(self , id ,date , used_memory , used_memory_human , used_memory_rss , used_memory_peak , used_memory_peak_human , mem_fragmentation_ratio ):
self.id =id
self.date = date
self.used_memory = used_memory
self.used_memory_human = used_memory_human
self.used_memory_rss = used_memory_rss
self.used_memory_peak = used_memory_peak
self.used_memory_peak_human = used_memory_peak_human
self.mem_fragmentation_ratio = mem_fragmentation_ratio
def __repr__(self):
return json.dumps(self.__dict__ , cls=DatetimeEncoder)
class DatetimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
elif isinstance(obj, datetime.date):
return obj.strftime('%Y-%m-%d')
elif isinstance(obj, set):
return list(obj)
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
if __name__ == '__main__':
Info.create(date=datetime.datetime.now() , info='{test:1234}' , rhost='10.100.14.93' , rport=6379 , rdb=0)
|
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import random
# Calibrate camera
def calibrate(img, objpoints, imgpoints):
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Find corners of the chessboard
ret, corners = cv2.findChessboardCorners(gray, (9,6),None)
# Calibrate camera
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
return mtx, dist
# Return the undistorted image
def calc_undistort(img, mtx, dist):
undist = cv2.undistort(img, mtx, dist, None, mtx)
return undist
# Unwarp an image
def unwarp(img, nx, ny, objpoints, imgpoints, offset):
# Undistort the image
undist = calc_undistort(img, objpoints, imgpoints)
# Convert to grayscale
gray = cv2.cvtColor(undist, cv2.COLOR_RGB2GRAY)
# Search for corners
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
if ret == True:
# Draw corners
cv2.drawChessboardCorners(undist, (nx, ny), corners, ret)
img_size = (gray.shape[1], gray.shape[0])
# Get the outer four corners detected for source points
src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
# Use offset to choose four corners for destination points
dst = np.float32([[offset, offset], [img_size[0]-offset, offset],
[img_size[0]-offset, img_size[1]-offset],
[offset, img_size[1]-offset]])
# Calculate the perspective transform matrix
M = cv2.getPerspectiveTransform(src, dst)
# Warp the image
warped = cv2.warpPerspective(undist, M, img_size, flags=cv2.INTER_LINEAR)
else:
warped, M = None
return warped, M
# Function to plot and save images
def plot_imgs(imgs, titles, figsize=(24, 9), cmap='gray', save=False):
num_imgs = len(imgs)
f, axes = plt.subplots(1, num_imgs, figsize=figsize)
f.tight_layout()
for i in range(num_imgs):
axes[i].imshow(imgs[i], cmap=cmap)
axes[i].set_title(titles[i], fontsize=50)
if save == True:
plt.savefig('./output_images/' + titles[-1] + '.png', bbox_inches='tight')
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# Gradient threshold
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):
# Calculate directional gradient
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0) if orient == 'x' else cv2.Sobel(gray, cv2.CV_64F, 0, 1)
absolute = np.absolute(sobel)
scaled = np.uint8(255*absolute/np.max(absolute))
grad_binary = np.zeros_like(scaled)
grad_binary[(scaled >= thresh[0]) & (scaled <= thresh[1])] = 1
return grad_binary
# Magnitude of the gradient
def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):
# Calculate gradient magnitude
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
abs_sobelxy = np.sqrt(sobelx**2 + sobely**2)
scaled = np.uint8(255*abs_sobelxy/np.max(abs_sobelxy))
mag_binary = np.zeros_like(scaled)
mag_binary[(scaled >= mag_thresh[0]) & (scaled <= mag_thresh[1])] = 1
return mag_binary
# Direction of the gradient
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):
# Calculate gradient direction
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
abs_sobelx = np.sqrt(sobelx**2)
abs_sobely = np.sqrt(sobely**2)
grad = np.arctan2(abs_sobely, abs_sobelx)
dir_binary = np.zeros_like(grad)
dir_binary[(grad >= thresh[0]) & (grad <= thresh[1])] = 1
return dir_binary
# Function to threshold the S channel
def S_select(img, thresh=(0, 255)):
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
S = hls[:,:,2]
binary_output = np.zeros_like(S)
binary_output[(S > thresh[0]) & (S <= thresh[1])] = 1
return binary_output
# Final pipeline for thresholding images
def threshold_pipeline(img, s_thresh=(170, 255), sx_thresh=(20, 100)):
img = np.copy(img)
# Convert to HSV color space and exclude the V channel
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)
l_channel = hsv[:,:,1]
s_channel = hsv[:,:,2]
# Sobel x
sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1
# Threshold color channel
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
# Combine thresholds
combined_binary = np.zeros_like(sxbinary)
combined_binary[(s_binary == 1) | (sxbinary == 1)] = 1
return combined_binary
# Define source and destination points
src = np.float32([[600, 448], [686, 448], [1120, 720], [202, 720]])
dst = np.float32([[350, 0], [900, 0], [900, 720], [350, 720]])
# Warp an image
def warp(img, reverse=False):
img_size = (img.shape[1], img.shape[0])
# Compute perspective transform and inverse
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
# Allow for transforming from warped image, back to original
if reverse == True:
warped = cv2.warpPerspective(img, Minv, img_size, flags=cv2.INTER_LINEAR)
return warped
else:
warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
return warped
def find_lane_lines(img):
# Take a histogram of the bottom half of the image
histogram = np.sum(img[int(img.shape[0]/2):,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((img, img, img))*255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(img.shape[0]/nwindows)
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Identify the x and y positions of all nonzero pixels in the image
nonzero = img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = img.shape[0] - (window+1)*window_height
win_y_high = img.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Color lane lines red and blue
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate x and y values for plotting
ploty = np.linspace(0, img.shape[0]-1, img.shape[0])
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
return out_img, ploty, left_fitx, right_fitx, left_fit, right_fit
# Search for lane lines within a margin around the lines from the previous frame
def find_more_lane_lines(img, left_fit, right_fit):
# Find line pixels in the next frame
nonzero = img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Return None if not enough pixels were found within the margin
if lefty.shape[0] < 10 or righty.shape[0] < 10:
return None, None, None, None, None, None, None
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, img.shape[0]-1, img.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
return ploty, left_fitx, right_fitx, left_fit, right_fit
# Alternative to sliding window search
def window_mask(width, height, img_ref, center, level):
output = np.zeros_like(img_ref)
output[int(img_ref.shape[0]-(level+1)*height):int(img_ref.shape[0]-level*height),max(0,int(center-width/2)):min(int(center+width/2),img_ref.shape[1])] = 1
return output
def find_window_centroids(img, window_width, window_height, margin):
window_centroids = [] # Store the (left,right) window centroid positions per level
window = np.ones(window_width) # Create our window template that we will use for convolutions
# First find the two starting positions for the left and right lane by using np.sum to get the vertical image slice
# and then np.convolve the vertical image slice with the window template
# Sum quarter bottom of image to get slice, could use a different ratio
l_sum = np.sum(img[int(3*img.shape[0]/4):,:int(img.shape[1]/2)], axis=0)
l_center = np.argmax(np.convolve(window,l_sum))-window_width/2
r_sum = np.sum(img[int(3*img.shape[0]/4):,int(img.shape[1]/2):], axis=0)
r_center = np.argmax(np.convolve(window,r_sum))-window_width/2+int(img.shape[1]/2)
# Add what we found for the first layer
window_centroids.append((l_center,r_center))
# Go through each layer looking for max pixel locations
for level in range(1,(int)(img.shape[0]/window_height)):
# convolve the window into the vertical slice of the image
image_layer = np.sum(img[int(img.shape[0]-(level+1)*window_height):int(img.shape[0]-level*window_height),:], axis=0)
conv_signal = np.convolve(window, image_layer)
# Find the best left centroid by using past left center as a reference
# Use window_width/2 as offset because convolution signal reference is at right side of window, not center of window
offset = window_width/2
l_min_index = int(max(l_center+offset-margin,0))
l_max_index = int(min(l_center+offset+margin,img.shape[1]))
l_center = np.argmax(conv_signal[l_min_index:l_max_index])+l_min_index-offset
# Find the best right centroid by using past right center as a reference
r_min_index = int(max(r_center+offset-margin,0))
r_max_index = int(min(r_center+offset+margin,img.shape[1]))
r_center = np.argmax(conv_signal[r_min_index:r_max_index])+r_min_index-offset
# Add what we found for that layer
window_centroids.append((l_center,r_center))
return window_centroids
def draw_lines(img):
# window settings
window_width = 50
window_height = 80 # Break image into 9 vertical layers since image height is 720
margin = 100 # How much to slide left and right for searching
window_centroids = find_window_centroids(warped, window_width, window_height, margin)
# If we found any window centers
if len(window_centroids) > 0:
# Points used to draw all the left and right windows
l_points = np.zeros_like(img)
r_points = np.zeros_like(img)
# Go through each level and draw the windows
for level in range(0,len(window_centroids)):
# Window_mask is a function to draw window areas
l_mask = window_mask(window_width, window_height, img, window_centroids[level][0], level)
r_mask = window_mask(window_width, window_height, img, window_centroids[level][1], level)
# Add graphic points from window mask here to total pixels found
l_points[(l_points == 255) | ((l_mask == 1) ) ] = 255
r_points[(r_points == 255) | ((r_mask == 1) ) ] = 255
# Draw the results
template = np.array(r_points+l_points,np.uint8) # add both left and right window pixels together
zero_channel = np.zeros_like(template) # create a zero color channle
template = np.array(cv2.merge((zero_channel,template,zero_channel)),np.uint8) # make window pixels green
warpage = np.array(cv2.merge((img,img,img)),np.uint8) # making the original road pixels 3 color channels
output = cv2.addWeighted(warpage, 1, template, 0.5, 0.0) # overlay the orignal road image with window results
# If no window centers found, just display orginal road image
else:
output = np.array(cv2.merge((img,img,img)),np.uint8)
return output
# Measure radius of curve in pixels
def measure_curve_pixels(ploty, left_fitx, right_fitx):
# Define y-value where we want radius of curvature
# I'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
left_fit = np.polyfit(ploty, left_fitx, 2)
right_fit = np.polyfit(ploty, right_fitx, 2)
left_curverad = ((1 + (2*left_fit[0]*y_eval + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])
right_curverad = ((1 + (2*right_fit[0]*y_eval + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])
return(left_curverad, right_curverad)
# Measure radius of curve in meters
def measure_curve(ploty, left_fit, right_fit):
# Define conversions in x and y from pixels space to meters
ym_per_pix = 27/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Fit new polynomials to x,y in world space
leftx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
rightx
left_fit_cr = np.polyfit(ploty*ym_per_pix, leftx*xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty*ym_per_pix, rightx*xm_per_pix, 2)
# Calculate the new radii of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# Now our radius of curvature is in meters
return (left_curverad, right_curverad)
def distance_from_center(image_width, pts):
# Get position of center camera, should be center of image
position = image_width/2
left_base = np.min(pts[(pts[:,1] < position) & (pts[:,0] > 700)][:,1])
right_base = np.max(pts[(pts[:,1] > position) & (pts[:,0] > 700)][:,1])
# Expected center of lane, half distance between left and right corners
center = (left_base + right_base)/2
# Define conversions in x and y from pixels space to meters
xm_per_pix = 3.7/550
return (position - center) * xm_per_pix
# Fill in the lane with color and convert back to the original perspective
def draw_on_road(undist, warped, ploty, left_fitx, right_fitx, radius_left, radius_right):
# Create an image to draw lines on
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_(pts), (0, 255, 0))
# Warp the blank back into original image space using inverse perspective matrix
new_warp = warp(color_warp, src, dst, reverse=True)
# Combine the result with the original image
result = cv2.addWeighted(undist, 0.7, new_warp, 0.3, 0)
# Calculate distance from center in meters
imwidth = undist.shape[1]
pts = np.argwhere(new_warp[:, :, 1])
distance = distance_from_center(imwidth, pts)
# Write curve on image
result = cv2.putText(result,
"Left Radius: {0} m, Right Radius: {1} m".format(int(radius_left), int(radius_right)),
(5,40), cv2.FONT_HERSHEY_SIMPLEX, 1, 255)
# Write distance from center on image
result = cv2.putText(result,
"Distance from center: {0} m".format(str(distance)),
(5,70), cv2.FONT_HERSHEY_SIMPLEX, 1, 255)
return result
# First attempt at pipeline
def pipeline1(img):
# Define source and destination points
src = np.float32([[600, 448], [686, 448], [1120, 720], [202, 720]])
dst = np.float32([[350, 0], [900, 0], [900, 720], [350, 720]])
# Warp images
warped = warp(img, reverse=False)
# Create binary warped images
binary_warped = threshold_pipeline(warped,
s_thresh=(170, 255),
sx_thresh=(20, 100))
# Find lines
out_img, ploty, left_fitx, right_fitx = find_lane_lines(binary_warped)
# Measure the curve of the road
curve = measure_curve(ploty, left_fitx, right_fitx)
# Fill in lane green and undistort
new_img = draw_on_road(img, binary_warped, ploty, left_fitx, right_fitx, radius_left, radius_right)
return new_img, curve
# Line class for keeping track of recent measurements
class Line():
def __init__(self):
# Was the line detected in the last iteration?
self.detected = False
# Number of iterations to smooth over
self.n = 3
# Most recent coefficients
self.current = [0, 0, 0]
# Difference in coefficients between last and new fit
self.diffs = [0, 0, 0]
# Polynomial coefficients with length n
self.A = []
self.B = []
self.C = []
# Average coefficients from last n iterations
self.avg_A = 0
self.avg_B = 0
self.avg_C = 0
# Radius of curvature
self.rad_of_curve = None
def add_avg_n_coefficients(self, A, B, C, radius):
# Most recent coefficients
self.current = [A, B, C]
# Difference in coefficients between last n and new
self.diffs = [A - self.avg_A, B - self.avg_B, C - self.avg_C]
# Add new coeffiecients to array
self.A.append(A)
self.B.append(B)
self.C.append(C)
# Remove oldest coefficient if number of stored coefficients is greater than n
if len(self.A) > self.n:
self.A.pop(0)
self.B.pop(0)
self.C.pop(0)
# Update average of coefficients
self.avg_A = np.mean(self.A)
self.avg_B = np.mean(self.B)
self.avg_C = np.mean(self.C)
# Radius of curvature
self.rad_of_curve = radius
return(self.avg_A, self.avg_B, self.avg_C)
def get_coefficients(self):
return(self.avg_A, self.avg_B, self.avg_C, self.diffs, self.rad_of_curve)
left_line = Line()
right_line = Line()
# Final pipeline
def pipeline(img):
# Undistort images
undist = calc_undistort(img, objpoints, imgpoints)
# Define source and destination points
src = np.float32([[600, 448], [686, 448], [1120, 720], [202, 720]])
dst = np.float32([[350, 0], [900, 0], [900, 720], [350, 720]])
# Warp images
warped = warp(undist, src, dst)
# Create binary warped images
binary_warped = threshold_pipeline(warped)
# If a line was not detected:
if left_line.detected == False:
# Find lines blindly
out_img, ploty, left_fitx, right_fitx, left_fit, right_fit = find_lane_lines(binary_warped)
# Measure the radius of the curve of the road
radius = measure_curve(ploty, left_fitx, right_fitx)
radius_left = radius[0]
radius_right = radius[1]
# Add coefficients and radii to lines
left_coeff = left_line.add_avg_n_coefficients(left_fit[0], left_fit[1], left_fit[2], radius_left)
right_coeff = right_line.add_avg_n_coefficients(right_fit[0], right_fit[1], right_fit[2], radius_right)
left_line.detected = True
else:
# Get last coefficients
a_left, b_left, c_left, diffs_left, radius_left = left_line.get_coefficients()
a_right, b_right, c_right, diffs_right, radius_right = right_line.get_coefficients()
left_coeff = [a_left, b_left, c_left]
right_coeff = [a_right, b_right, c_right]
# Try to find lines within margin of last detected line
margin = 100
out_image, window_img, ploty, left_fitx, right_fitx, left_fit, right_fit = find_more_lane_lines(binary_warped, left_coeff, right_coeff)
# Calculate radius of each line
radius = measure_curve(ploty, left_fitx, right_fitx)
radius_left = radius[0]
radius_right = radius[1]
# If a line was detected within the margin and radii are reasonably close to parallel:
if (left_fit != None) and ((radius_right < radius_left + 50) and (radius_right > radius_left - 50)):
# Add coefficients and radii to lines
left_coeff = left_line.add_avg_n_coefficients(left_fit[0], left_fit[1], left_fit[2], radius_left)
right_coeff = right_line.add_avg_n_coefficients(right_fit[0], right_fit[1], right_fit[2], radius_right)
left_fitx = left_coeff[0]*ploty**2 + left_coeff[1]*ploty + left_coeff[2]
right_fitx = right_coeff[0]*ploty**2 + right_coeff[1]*ploty + right_coeff[2]
else:
# Use average coefficients from last n frames to plot lines
left_fitx = left_line.avg_A*ploty**2 + left_line.avg_B*ploty + left_line.avg_C
right_fitx = right_line.avg_A*ploty**2 + right_line.avg_B*ploty + right_line.avg_C
left_line.detected = False
# Fill in lane green and undistort
new_img = draw_on_road(img, binary_warped, ploty, left_fitx, right_fitx, radius_left, radius_right).astype(np.uint8)
# Write curve on image
new_img = cv2.putText(new_img,
"Left: {0} Right: {1}".format(str(radius_left), str(radius_right)),
(5,5), cv2.FONT_HERSHEY_SIMPLEX, 1, 255)
return new_img |
#!/usr/bin/env python
import sys
import rospy
import math
from std_msgs.msg import Float64
from local_pathfinding.msg import AISMsg, GPS, path, latlon, windSensor
from utilities import *
from Sailbot import *
from matplotlib import pyplot as plt
from matplotlib import patches
import time
# Constants
VISUALIZER_UPDATE_PERIOD_SECONDS = 0.1
LATLON_TEXT_DECIMAL_PLACES = 3
# Globals for callbacks
localPath = None
nextLocalWaypoint = None
nextGlobalWaypoint = None
# ROS subscribe callbacks
def localPathCallback(data):
global localPath
localPath = data.waypoints
def nextLocalWaypointCallback(data):
global nextLocalWaypoint
nextLocalWaypoint = data
def nextGlobalWaypointCallback(data):
global nextGlobalWaypoint
nextGlobalWaypoint = data
# Global variable for speedup
speedup = 1.0
def speedupCallback(data):
global speedup
speedup = data.data
# Set xy for figure
def getXYLimits(xy0, xy1):
xPLim = max([xy0[0], xy1[0]])
xNLim = min([xy0[0], xy1[0]])
yPLim = max([xy0[1], xy1[1]])
yNLim = min([xy0[1], xy1[1]])
# Scale to ensure that both dimensions are equal in width
xWidth = xPLim - xNLim
yWidth = yPLim - yNLim
if xWidth > yWidth:
yPLim = xWidth/yWidth * yPLim
yNLim = xWidth/yWidth * yNLim
else:
xPLim = yWidth/xWidth * xPLim
xNLim = yWidth/xWidth * xNLim
# Scale for extra space
multiplier = 0.5
extraWidth = multiplier * (xPLim - xNLim)
xPLim += extraWidth
xNLim -= extraWidth
yPLim += extraWidth
yNLim -= extraWidth
return xPLim, xNLim, yPLim, yNLim
# Check when figure needs resizing
def needAxesResized(positionXY, nextGlobalWaypointXY, xPLim, xNLim, yPLim, yNLim):
def outOfBounds(xy, xPLim, xNLim, yPLim, yNLim):
return xy[0] < xNLim or xy[1] < yNLim or xy[0] > xPLim or xy[1] > yPLim
# Check if boat or goal is out of bounds
if outOfBounds(positionXY, xPLim, xNLim, yPLim, yNLim) or outOfBounds(nextGlobalWaypointXY, xPLim, xNLim, yPLim, yNLim):
return True
# Check if figure is too far zoomed out
currentWidth = xPLim - xNLim
currentHeight = yPLim - yNLim
properWidth = math.fabs(nextGlobalWaypointXY[0] - positionXY[0])
properHeight = math.fabs(nextGlobalWaypointXY[1] - positionXY[1])
if max(properWidth / currentWidth, properHeight / currentHeight) < 0.3:
return True
return False
if __name__ == '__main__':
# Setup ros subscribers
sailbot = Sailbot(nodeName='localPathVisualizer')
rospy.Subscriber("localPath", path, localPathCallback)
rospy.Subscriber("nextLocalWaypoint", latlon, nextLocalWaypointCallback)
rospy.Subscriber("nextGlobalWaypoint", latlon, nextGlobalWaypointCallback)
rospy.Subscriber("speedup", Float64, speedupCallback)
r = rospy.Rate(1.0 / VISUALIZER_UPDATE_PERIOD_SECONDS)
# Wait for first messages
while localPath is None or nextLocalWaypoint is None or nextGlobalWaypoint is None:
# Exit if shutdown
if rospy.is_shutdown():
rospy.loginfo("rospy.is_shutdown() is True. Exiting")
sys.exit()
else:
rospy.loginfo("Waiting to receive first ROS messages")
time.sleep(1)
rospy.loginfo("ROS message received. Starting visualization")
# Convert values from latlon to XY, relative to the referenceLatlon
state = sailbot.getCurrentState()
referenceLatlon = nextGlobalWaypoint # Ensure that this matches createLocalPathSS referenceLatlon for best results
positionXY = latlonToXY(state.position, referenceLatlon)
nextGlobalWaypointXY = latlonToXY(nextGlobalWaypoint, referenceLatlon)
nextLocalWaypointXY = latlonToXY(nextLocalWaypoint, referenceLatlon)
localPathXY = [latlonToXY(localWaypoint, referenceLatlon) for localWaypoint in localPath]
localPathX = [xy[0] for xy in localPathXY]
localPathY = [xy[1] for xy in localPathXY]
# Create plot with waypoints and boat
xPLim, xNLim, yPLim, yNLim = getXYLimits(positionXY, nextGlobalWaypointXY)
markersize = min(xPLim - xNLim, yPLim - yNLim) / 2
axes = plt.gca()
localPathPlot, = axes.plot(localPathX, localPathY, marker='.', color='g', markersize=markersize / 2, linewidth=2) # Small green dots
nextGlobalWaypointPlot, = axes.plot(nextGlobalWaypointXY[0], nextGlobalWaypointXY[1], marker='*', color='y', markersize=markersize) # Yellow star
nextLocalWaypointPlot, = axes.plot(nextLocalWaypointXY[0], nextLocalWaypointXY[1], marker='X', color='g', markersize=markersize) # Green X
positionPlot, = axes.plot(positionXY[0], positionXY[1], marker=(3,0,state.headingDegrees - 90), color='r', markersize=markersize) # Red triangle with correct heading. The (-90) is because the triangle default heading 0 points North, but this heading has 0 be East.
# Setup plot xy limits and labels
axes.set_xlim(xNLim, xPLim)
axes.set_ylim(yNLim, yPLim)
plt.grid(True)
axes.set_xlabel('X distance to next global waypoint (km)')
axes.set_ylabel('Y distance to next global waypoint (km)')
axes.set_title('Local Path Visualizer (speedup = {})'.format(speedup))
axes.set_aspect(aspect=1)
# Show wind speed text and position text
arrowLength = min(xPLim - xNLim, yPLim - yNLim) / 15
arrowCenter = (xNLim + 1.5*arrowLength, yPLim - 1.5*arrowLength)
globalWindSpeedKmph, globalWindDirectionDegrees = measuredWindToGlobalWind(state.measuredWindSpeedKmph, state.measuredWindDirectionDegrees, state.speedKmph, state.headingDegrees)
windSpeedText = axes.text(arrowCenter[0], arrowCenter[1] + 1.5*arrowLength, "Global Wind Speed Kmph: {}".format(globalWindSpeedKmph), ha='center')
positionLatlonText = axes.text(positionXY[0], positionXY[1] + 0.5*arrowLength, "(Lat: {}, Lon: {})".format(round(state.position.lat, LATLON_TEXT_DECIMAL_PLACES), round(state.position.lon, LATLON_TEXT_DECIMAL_PLACES)), ha='center')
nextGlobalWaypointLatlonText = axes.text(nextGlobalWaypointXY[0], nextGlobalWaypointXY[1] + 0.5*arrowLength, "(Lat: {}, Lon: {})".format(round(nextGlobalWaypoint.lat, LATLON_TEXT_DECIMAL_PLACES), round(nextGlobalWaypoint.lon, LATLON_TEXT_DECIMAL_PLACES)), ha='center')
while not rospy.is_shutdown():
state = sailbot.getCurrentState()
referenceLatlon = nextGlobalWaypoint # Ensure that this matches createLocalPathSS referenceLatlon for best results
# Convert values from latlon to XY, relative to the referenceLatlon
positionXY = latlonToXY(state.position, referenceLatlon)
nextGlobalWaypointXY = latlonToXY(nextGlobalWaypoint, referenceLatlon)
nextLocalWaypointXY = latlonToXY(nextLocalWaypoint, referenceLatlon)
localPathXY = [latlonToXY(localWaypoint, referenceLatlon) for localWaypoint in localPath]
localPathX = [xy[0] for xy in localPathXY]
localPathY = [xy[1] for xy in localPathXY]
shipsXY = getObstacles(state.AISData.ships, state.position, state.speedKmph, referenceLatlon)
# Update plots
localPathPlot.set_xdata(localPathX)
localPathPlot.set_ydata(localPathY)
nextGlobalWaypointPlot.set_xdata(nextGlobalWaypointXY[0])
nextGlobalWaypointPlot.set_ydata(nextGlobalWaypointXY[1])
nextLocalWaypointPlot.set_xdata(nextLocalWaypointXY[0])
nextLocalWaypointPlot.set_ydata(nextLocalWaypointXY[1])
positionPlot.set_xdata(positionXY[0])
positionPlot.set_ydata(positionXY[1])
positionPlot.set_marker((3, 0, state.headingDegrees-90)) # Creates a triangle with correct 'heading'
# Resize axes if needed
if needAxesResized(positionXY, nextGlobalWaypointXY, xPLim, xNLim, yPLim, yNLim):
xPLim, xNLim, yPLim, yNLim = getXYLimits(positionXY, nextGlobalWaypointXY)
axes.set_xlim(xNLim, xPLim)
axes.set_ylim(yNLim, yPLim)
# Update wind speed text
arrowLength = min(xPLim - xNLim, yPLim - yNLim) / 15
arrowCenter = (xNLim + 1.5*arrowLength, yPLim - 1.5*arrowLength)
globalWindSpeedKmph, globalWindDirectionDegrees = measuredWindToGlobalWind(state.measuredWindSpeedKmph, state.measuredWindDirectionDegrees, state.speedKmph, state.headingDegrees)
windSpeedText.set_position((arrowCenter[0], arrowCenter[1] + 1.5*arrowLength))
windSpeedText.set_text("Wind Speed Kmph: {}".format(globalWindSpeedKmph))
positionLatlonText.set_position((positionXY[0], positionXY[1] + 0.5*arrowLength))
positionLatlonText.set_text("(Lat: {}, Lon: {})".format(round(state.position.lat, LATLON_TEXT_DECIMAL_PLACES), round(state.position.lon, LATLON_TEXT_DECIMAL_PLACES)))
nextGlobalWaypointLatlonText.set_position((nextGlobalWaypointXY[0], nextGlobalWaypointXY[1] + 0.5*arrowLength))
nextGlobalWaypointLatlonText.set_text("(Lat: {}, Lon: {})".format(round(nextGlobalWaypoint.lat, LATLON_TEXT_DECIMAL_PLACES), round(nextGlobalWaypoint.lon, LATLON_TEXT_DECIMAL_PLACES)))
# Update speedup text
axes.set_title('Local Path Visualizer (speedup = {})'.format(speedup))
# Add boats and wind speed arrow
for ship in shipsXY:
ship.addPatch(axes)
arrowStart = (arrowCenter[0] - 0.5*arrowLength*math.cos(math.radians(globalWindDirectionDegrees)), arrowCenter[1] - 0.5*arrowLength*math.sin(math.radians(globalWindDirectionDegrees)))
windDirection = patches.FancyArrow(arrowStart[0], arrowStart[1], arrowLength*math.cos(math.radians(globalWindDirectionDegrees)), arrowLength*math.sin(math.radians(globalWindDirectionDegrees)), width=arrowLength/4)
axes.add_patch(windDirection)
# Draw then sleep
plt.draw()
plt.pause(0.001)
r.sleep()
# Removes all ships and wind arrow
for p in axes.patches:
p.remove()
|
from ED6ScenarioHelper import *
def main():
# 柏斯
CreateScenaFile(
FileName = 'C1211_1 ._SN',
MapName = 'Bose',
Location = 'C1211.x',
MapIndex = 1,
MapDefaultBGM = "ed60010",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
)
ScpFunction(
"Function_0_66", # 00, 0
)
def Function_0_66(): pass
label("Function_0_66")
EventBegin(0x0)
SetMapFlags(0x400000)
OP_6D(-10, 0, -7120, 0)
SetChrPos(0x101, 280, 0, -20580, 0)
SetChrPos(0x102, 1080, 0, -21780, 0)
SetChrPos(0x103, -780, 0, -21880, 0)
def lambda_B7():
OP_90(0x101, 0x0, 0x0, 0xFA0, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0x101, 1, lambda_B7)
Sleep(100)
def lambda_D7():
OP_90(0x102, 0x0, 0x0, 0xFA0, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0x102, 1, lambda_D7)
Sleep(100)
def lambda_F7():
OP_90(0x103, 0x0, 0x0, 0xFA0, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0x103, 1, lambda_F7)
FadeToBright(1000, 0)
OP_6D(480, 0, -17620, 3500)
OP_0D()
WaitChrThread(0x103, 0x1)
Sleep(400)
ChrTalk(
0x101,
(
"#000F嗯~…………\x01",
"这座塔叫什么名字来着?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#010F『琥珀之塔』。\x02\x03",
"是被世人称作『四轮之塔』的\x01",
"古代遗迹的其中一座。\x02",
)
)
CloseMessageWindow()
TurnDirection(0x101, 0x102, 400)
ChrTalk(
0x101,
(
"#000F啊,是吗。\x01",
"怪不得和『翡翠之塔』很像呢。\x02\x03",
"虽然总体的色调完全不同,\x01",
"不过气氛是相同的……………………\x02",
)
)
CloseMessageWindow()
OP_62(0x101, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Sleep(1000)
OP_8C(0x101, 0, 400)
Sleep(400)
TurnDirection(0x102, 0x101, 400)
ChrTalk(
0x102,
"#014F…………怎么了?\x02",
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#505F嗯,没什么…………\x01",
"好像听到了说话的声音……\x02",
)
)
CloseMessageWindow()
SetChrPos(0x8, 19240, 0, 33890, 42)
SetChrPos(0x9, 10000, 0, 25000, 45)
ClearChrFlags(0x8, 0x80)
ClearChrFlags(0x9, 0x80)
OP_8C(0x102, 0, 0)
OP_6D(170, 0, -8790, 3000)
ChrTalk(
0x8,
"……………………。\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"………………,\x01",
"……………………。\x02",
)
)
CloseMessageWindow()
Sleep(400)
ChrTalk(
0x9,
(
"………………!?\x01",
"……………………。\x02",
)
)
CloseMessageWindow()
Sleep(400)
Fade(1000)
OP_6D(480, 0, -17620, 0)
OP_8C(0x102, 0, 0)
OP_0D()
Sleep(400)
ChrTalk(
0x102,
(
"#012F…………果然,\x01",
"好像有谁在里面。\x02",
)
)
CloseMessageWindow()
TurnDirection(0x101, 0x103, 400)
ChrTalk(
0x101,
"#002F……啊,难道是……\x02",
)
CloseMessageWindow()
TurnDirection(0x102, 0x103, 400)
ChrTalk(
0x103,
(
"#027F呵呵,\x01",
"偶尔绕绕远路也是不错的嘛。\x02\x03",
"说不定\x01",
"我们无意中中了头彩哦。\x02\x03",
"有必要调查一番呢。\x01",
"…………不过一定要慎重行事。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#002F嗯。\x02",
)
CloseMessageWindow()
SetChrFlags(0x8, 0x80)
SetChrFlags(0x9, 0x80)
OP_28(0xF, 0x4, 0x2)
OP_28(0xF, 0x4, 0x4)
OP_28(0xF, 0x1, 0x1)
OP_28(0xF, 0x1, 0x2)
OP_28(0xF, 0x1, 0x8000)
ClearMapFlags(0x400000)
EventEnd(0x0)
Return()
# Function_0_66 end
SaveToFile()
Try(main)
|
# Tuples are like lists & dictionaries BUT they are IMMUTABLE!
# Once an element is inside a tuple, it CANNOT be reassigned
# Tuples use parenthesis
t = (1, 2, 3)
print(type(t))
# <class 'tuple'>
my_list = [1, 2, 3]
print(type(my_list))
# <class 'list'>
t = ('one', 2)
print(t)
# ('one', 2)
print(t[0])
# one
print(t[-1])
# 2
# INDEX METHOD
t = ('a', 'a', 'b')
print(t.index('a'))
# returns first index where the passed argument shows up in the tuple
# 0
print(t.index('b'))
# 2
# COUNT METHOD
print(t.count('a'))
# 2
# WHAT MAKES A TUPLE DIFFERENT (IMMUTABILITY)
print(my_list)
# [1, 2, 3]
my_list[0] = 'NEW'
print(my_list)
# ['NEW', 2, 3]
# t[0] = 'NEW'
# TypeError: 'tuple' object does not support item assignment
# WHY SHOULD I USE A TUPLE?
# Benefits: When passing around objects in your program and you don't want them to be changed.
|
# coding: utf-8
# In[4]:
from http.server import HTTPServer, SimpleHTTPRequestHandler
import ssl
httpd = HTTPServer(('localhost',8888), SimpleHTTPRequestHandler)
httpd.socket = ssl.wrap_socket(httpd.socket, server_side=True, certfile='cert.pem', keyfile='key.pem')
httpd.serve_forever()
|
from typing import List
from fastapi import APIRouter, Depends, FastAPI, File, UploadFile, BackgroundTasks
from sqlalchemy.orm import Session
from starlette.requests import Request
from Scripts.fastapp.common.consts import UPLOAD_DIRECTORY, USING_MODEL_PATH
from Scripts.fastapp.database.conn import db
from Scripts.fastapp import models as m
from Scripts.fastapp.errors import exceptions as ex
from inspect import currentframe as frame
from Scripts.fastapp.database.schema import LeaderBoard
from Scripts.fastapp.common.config import get_logger
import os
router = APIRouter(prefix='/toy')
@router.get('/getLeaderBoard')
async def get_leader_board(request: Request):
"""
no params\n
:return\n
Inzent NB Competition Leader Board
"""
request.state.inspect = frame()
result = LeaderBoard.filter(id__gt='0').all()
print("##RESULT##", result)
# return dict(id=result[0].id, reg_count=result[0].reg_count)
return result
@router.post('/pushData')
async def input_data(request: Request, session: Session = Depends(db.session)):
"""Input Data"""
LeaderBoard.create(session, auto_commit=True, team_name="cwh", score=0.01, ip_add= request.state.ip )
@router.post("/uploadFile")
async def upload_submmision(request: Request, background_tasks: BackgroundTasks, team: str, files: List[UploadFile] = File(...) ,session: Session = Depends(db.session)):
"""
params: docid, sid(session id) file \n
return: Last File's \n
return Sample: \n
"OK"
"""
if not files:
raise ex.XedmUploadFailEx()
for file in files:
contents = await file.read()
print(os.path.join('./', file.filename))
# with open(os.path.join('./', file.filename), "wb") as fp:
with open(UPLOAD_DIRECTORY + file.filename, "wb") as fp:
fp.write(contents)
LeaderBoard.create(session, auto_commit=True, team_name=team, score=0.01, ip_add= request.state.ip )
# background_tasks.add_task(cal_score, request = request, team = team, session = session, file=file)
result = LeaderBoard.filter(id__gt='0').all()
return result
def cal_score(request, team, session, file):
pass |
ans = ""
for i in range(1, 101):
out = ""
if (i % 3 == 0):
out += "Fizz"
if (i % 5 == 0):
out += "Buzz"
if (out == ""):
out += str(i)
ans += out + "\n"
correct = ""
for i in range(1, 101):
correct += str(("Fizz" * (i % 3 == 0) + "Buzz" * (i % 5 == 0) or i)) + "\n"
print correct == ans
|
#!/usr/bin/env python
import sys
sys.path.insert(0, '..')
import models.model as model
import gaModel.gaModel_Yuri as ga
import numpy as np
def execGaModel(year, region, qntYears=5, times=1):
"""
Creates the GAModel with JMA catalog
"""
observations = list()
means = list()
for i in range(qntYears):
observation = model.loadModelDB(region + 'jmaData', year + i)
observation.bins = observation.bins.tolist()
observations.append(observation)
means.append(observation.bins)
del observation
mean = np.mean(means, axis=0)
for i in range(times):
model_ = ga.gaModel(
NGEN=100,
CXPB=0.9,
MUTPB=0.1,
modelOmega=observations,
year=year +
qntYears,
region=region,
mean=mean,
tournsize=2,
n_aval=50000)
model_.executionNumber = i
model_.year = year + qntYears
model_.modelName = region + 'GAModel'
# model.saveModelDB(model_)
# model.saveModelToFile(model_,
# '../../Zona4/GAModel/tournsize=2' + region +'GAModel' + str(year+qntYears) + '_' + str(i) + '.txt')
# with open("../../Zona4/GAModel/tournsize=2" + region +"GAModel" + str(year+qntYears) + "_loglikelihood.txt", 'a') as f:
# f.write(str(model_.loglikelihood))
# f.write("\n")
# f.close()
# with open("../../Zona4/GAModel/tournsize=2" + region +"GAModel" + str(year+qntYears) + '_' + str(i) + "logbook.txt", 'w') as f:
# f.write(str(model_.logbook))
# f.write("\n")
# f.close()
# f.close()
def callGAModel(region):
"""
It is a wrapper to the function that generates the GAModel with JMA data
It cover the years of 2000 to 2005, and the models are from 2005 to 2010
"""
year = 2000
# while(year <= 2006):
execGaModel(year, region)
year += 1
def main():
"""
This function creates the needed enviroment needed to
generate both GAModel and List Model with JMA catalog
for the regions: EastJapan, Kanto, Kansai, Tohoku
from 2000 to 2005 to create models from 2005 to 2010
"""
region = 'Kanto'
callGAModel(region)
# region = 'EastJapan'
# callGAModel(region)
# region = 'Tohoku'
# callGAModel(region)
# region = 'Kansai'
# callGAModel(region)
if __name__ == "__main__":
main()
|
#!/usr/bin/python3
def inherits_from(obj, a_class):
"""Check if ihnerits but not the same"""
if type(obj) is a_class:
return False
else:
if issubclass(type(obj), a_class):
return True
else:
return False
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
File Name: hello_async.py
Description:
Created_Time: 2016-09-27 11:13:06
Last modified: 2016-09-27 11时29分27秒
'''
# event loop 是核心,主要有一下的作用。
# 1. 注册,执行,取消执行以及延时调用。
# 2. 为服务端和客户端提供transport
# 3. 为子进程和其他进程通信提供transports
# 4. 线程池函数调用授权
# 例子,简单调用
_author = 'arron'
_email = 'fsxchen@gmail.com'
import asyncio
def floop1(loop):
print('loop1')
def floop2(loop):
print('loop2')
loop1 = asyncio.get_event_loop()
loop2 = asyncio.get_event_loop()
loop1.call_soon(floop1, loop1)
loop2.call_later(1, floop2, loop2)
loop1.run_forever()
loop2.run_forever()
loop1.close()
|
from shorthand.utils.config import CONFIG_FILE_LOCATION
from shorthand.web.app import create_app
default_app = create_app(CONFIG_FILE_LOCATION)
|
from django.contrib import admin
from .models import mileStone
admin.site.register(mileStone) |
from .main import RateLimit as RateLimit
from .redis import RedisInterface as RedisInterface, redisinterface as redisinterface
__all__ = ("RateLimit", "RedisInterface", "redisinterface")
__author__ = "PredaaA"
__version__ = "0.1.23"
|
from django.shortcuts import render
# Create your views here.
def index(request):
return render(request, 'news/index.html')
def search(request):
return render(request, 'news/search.html')
|
from heapq import heappush, heappop, heapify
def solution(q, k):
heapify(q)
answer = 0
while len(q) > 1:
food1 = heappop(q)
if food1 >= k:
return answer
food2 = heappop(q)
heappush(q, food1 + 2*food2)
answer += 1
return answer if q[0] >= k else -1
print(solution([1, 2, 3, 9, 10, 12], 7))
|
import datetime
import requests
import configparser
from bot_handler import BotHandler
config = configparser.ConfigParser()
config.read('config.ini')
myTelegramToken = config['Data']['telegram_token']
bot = BotHandler(myTelegramToken)
# This tuple contains greeting keywords
user_greetings = ('hello', 'hi', 'whats up', 'good afternoon')
bot_greetings = ('Good morning', 'Good afternoon', 'Good evening', 'Hello night owl!')
currentTime = datetime.datetime.now()
# TODO: Make class for Youtube API
def video_search(request_text):
search_url = 'https://www.youtube.com/results?search_query='
text = request_text.split()
converted_text = '+'.join(text)
response = requests.get(search_url + converted_text)
return response.url
def answer(mess_text, user, hour):
if (mess_text.lower()).strip() in user_greetings and 6 <= hour < 12:
bot.send_message(bot_greetings[0] + ' ' + user)
elif (mess_text.lower()).strip() in user_greetings and 12 <= hour < 16:
bot.send_message(bot_greetings[1] + ' ' + user)
elif (mess_text.lower()).strip() in user_greetings and 16 <= hour < 24:
bot.send_message(bot_greetings[2] + ' ' + user)
elif (mess_text.lower()).strip() in user_greetings and 0 <= hour < 6:
bot.send_message(bot_greetings[3] + '' + user)
elif ((mess_text.lower()).strip()).find('search') != -1:
search_text = mess_text.split(' ', 1)
bot.send_message('Your search result \n' + video_search(search_text[1]))
else:
bot.send_message('Sorry but I can\'t answer your message')
def main():
new_offset = None
while True:
current_hour = currentTime.hour
bot.get_updates(new_offset)
bot.get_last_update()
last_update_id = bot.update_id
last_chat_user = bot.user_name
last_chat_message = bot.user_message
answer(last_chat_message, last_chat_user, current_hour)
new_offset = last_update_id + 1
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
exit()
|
import requests
from lxml import etree
def fetch_links(url):
res=requests.get(url).text
return res
def in_page(url):
r=requests.get(url).text
re=etree.HTML(r)
img_urls=re.xpath('//div[@class="x-loaded"]/img/@src')
data=[]
for img_url in img_urls:
img_url='http:%s' %img_url
data=data.append(img_url)
return data
if __name__=='__main__':
start_url = 'https://club.autohome.com.cn/jingxuan/104/1#pvareaid=3311563'
r=fetch_links(start_url)
selector=etree.HTML(r)
links=selector.xpath('//div[@class="pic-box"]/a/@href')
list=[str(link) for link in links]
#
# for url in list:
#
url='https:%s' % list[0]
print(url)
ua = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ' \
'AppleWebKit/537.36 (KHTML, like Gecko) ' \
'Chrome/75.0.3770.142 Safari/537.36'
headers = {
'User-Agent': ua
}
r=requests.get(url,headers=headers)
print(r.text)
# #print(in_page(link))
# r = requests.get(link).text
# print(link)
# re = etree.HTML(r)
# img_urls = re.xpath('//div[@class="x-loaded"]/img/@src')
# print(img_urls)
|
import numpy as np
chosen_pixel = [127, 255, 0]
available_pixels = {'red':[255,0,0], 'green':[0,255,0], 'blue':[0,0,255], 'magenta':[255,0,255],
'tomato':[255, 99, 71], 'lawn green':[124,252,0], 'steel blue':[70,130,180]}
distances = []
for key, value in available_pixels.items():
a1 = np.asarray(value)
c1 = np.asarray(chosen_pixel)
curr_dist = np.linalg.norm(a1 - c1)
distances += [curr_dist]
if(curr_dist == min(distances)):
curr_key = key
print(curr_key)
|
# -*-coding:utf-8-*-
from flask import g, current_app
from flask_restful import reqparse
from albumy.common.restful import RestfulBase, success_response, raise_400_response, raise_404_response
from albumy.extensions import login_required
from albumy.models import User
from albumy.utils.tokens import generate_confirm_token, parse_confirm_token
from albumy.utils.validate import validate_password, validate_email_or_mobile
from albumy.celery_tasks import send_email
class Password(RestfulBase):
"""
逻辑说明:密码的修改主要分为主动修改密码、忘记密码被动修改两类
主动修改是在用户登录的前提下,进行的
忘记密码是在用户未登录的情况下可进行的,给用户一个url(具有时限、一次性)
,在url中修改
"""
@login_required
def __modify_password(self):
req = reqparse.RequestParser()
req.add_argument("old_password", type=validate_password, default="", location="form")
req.add_argument("new_password", type=validate_password, default="", location="form")
args = req.parse_args()
user = g.current_user
if not user.check_password(args["old_password"]):
return raise_400_response(message="密码错误")
fields = dict(
password=args["new_password"]
)
user.update(**fields)
return success_response()
def __forget_password(self, token):
data = parse_confirm_token(token)
user_id = data.get("confirm")
if (not user_id) and data.get("msg"):
return success_response(message="连接过时")
if user_id:
req = reqparse.RequestParser()
req.add_argument("password", type=validate_password, default="", location="form")
req.add_argument("repeat_password", type=validate_password, default="", location="form")
args = req.parse_args()
if args["password"] != args["repeat_password"]:
return success_response(status_code=0, message="两次密码不一致")
user = User.get_by_id(user_id)
fields = {
'password': args["password"]
}
user.update(**fields)
return success_response(message="密码重置成功")
def post(self, token=None):
if not token:
return self.__modify_password()
return self.__forget_password(token)
def get(self):
req = reqparse.RequestParser()
req.add_argument("email_or_mobile", type=validate_email_or_mobile, default="", location="form")
args = req.parse_args()
email_or_mobile = args.get("email_or_mobile")
# 如果输入的是手机,则找到他的邮箱
if email_or_mobile.isnumeric():
_user = User.query.filter_by(mobile=email_or_mobile).first()
else:
_user = User.query.filter_by(email=email_or_mobile).first()
if not _user:
return raise_404_response(message=u"此账号未注册")
token = generate_confirm_token(_user.id).decode("utf-8")
try:
send_email.delay("RESET_PASSWORD", _user.email,
body="{}/api/user/password/{}".format(current_app.config["DOMAIN"], token))
except Exception as e:
print(e)
data = {
"url": "{}/api/user/password/{}".format(current_app.config["DOMAIN"], token)
}
return success_response(data=data)
|
# Generated by Django 2.2.11 on 2020-06-01 13:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0010_auto_20200406_1707'),
]
operations = [
migrations.AlterField(
model_name='productinbasketmodel',
name='product',
field=models.CharField(blank=True, default=None, max_length=128, null=True, verbose_name='Продукт'),
),
]
|
#
# Kiwi - An open source application framework
# Copyright (C) 2012-Today Thibaut DIRLIK <thibaut.dirlik@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import struct
import logging
import json
import zlib
from kiwi.common import Command
from kiwi.protocols import BaseProtocolHandler, ProtocolError
logger = logging.getLogger('kiwi.protocols.jsonrpc')
class ProtocolHandler(BaseProtocolHandler):
""" A json-based protocol.
This is not a pure JsonRPC implementation, but it's almost the same. The data must be compressed with bzip. The first
4 bytes of each message must be an integer : the size of the following message.
The data is then read, decompressed and parsed by json. The Json data is finally converted to a command object.
The :meth:`handle` method is a generator which yields :class:`Command` objects when available.
"""
def __init__(self, maximum_buffer_size=1024*1024*5): # 5MB, maximum message size
self.maximum_buffer_size = maximum_buffer_size
self.size = None
self.buffer = b''
def handle(self, data):
""" This method must be called by servers when data is available.
It returns a generator that yields :class:`Command` objects when they are available.
:raise ProtocolError: If an error is detected in the protocol.
"""
if self.size is None:
if len(data) > 4:
self.size = struct.unpack('!I', data[:4])[0]
data = data[4:]
else:
return # Not enought data to read.
self.buffer += data
if len(self.buffer) >= self.size:
message = self.buffer[:self.size]
message_rest = self.buffer[self.size:]
message_json = None
try:
message = zlib.decompress(message)
message = message.decode('utf-8')
message_json = json.loads(message)
except UnicodeDecodeError:
logger.error('JSON RPC Must use UTF-8 encoding, message will be skipped.')
except zlib.error:
logger.error('Invalid compressed data, message will be skipped.')
except:
logger.exception('An error occured while parsing JSON, message will be skipped.')
finally:
# After handling a command, we clear the variables
self.buffer = b''
self.size = None
if message_json is not None:
yield Command.from_json_dict(message_json)
# We then recursively call handle() the handle the rest of the message.
if message_rest:
for command in self.handle(message_rest):
yield command
def command_to_bytes(self, command):
""" Converts the command to JSON, compress it and returns the result.
The first 4 bytes is an unsigned integer in network byte order containing
the size of the following message. Next bytes are the compressed
JSON representation of the command.
This allow theorically a message of 2^32 bytes, which is equal to
4294967295 bytes (~4GB). It should be enought ;)
"""
command_as_json = json.dumps(command.to_json_dict()).encode('utf-8')
command_compressed = zlib.compress(command_as_json)
return struct.pack('!I', len(command_compressed)) + command_compressed
|
"""
We will use this script to learn Python to absolute beginners
The script is an example of BMI_Calculator implemented in Python
The BMI_Calculator:
# Get the weight(Kg) of the user
# Get the height(m) of the user
# Caculate the BMI using the formula
BMI=weight in kg/height in meters*height in meters
Exercise 2:
Write a program to calculate the BMI by accepting user input from the keyboard and check whether the user
comes in underweight ,normal weight overweight or obesity.
i)Get user weight in kg
ii)Get user height in meter
iii) Use this formula to calculate the bmi
BMI = weight_of_the_user/(height_of_the_user * height_of_the_user)
iv)Use this level to check user category
#)Less than or equal to 18.5 is represents underweight
#)Between 18.5 -24.9 indicate normal weight
#)Between 25 -29.9 denotes over weight
#)Greater than 30 denotes obese
"""
print("Enter the weight of the user in Kg's")
# Get the weight of the user through keyboard
weight_of_the_user = input()
# Get the height of the user through keyboard
print("Enter the height of the user in meters")
height_of_the_user = input()
# Calculate the BMI of the user according to height and weight
bmi = weight_of_the_user/(height_of_the_user * height_of_the_user)
print("BMI of the user is :",bmi)
# Check the user comes under under weight, normal or obesity
if bmi <= 18.5:
print("The user is considered as underweight")
elif bmi > 18.5 and bmi < 24.9:
print("The user is considered as normal weight")
elif bmi > 25 and bmi <= 29.9:
print("The user is considered as overweight")
elif bmi >=30:
print("The user is considered as obese") |
#coding:utf8
from collections import OrderedDict
from goods.models import GoodsChannel
def get_categories():
# 初始化存储容器
categories = OrderedDict()
# 获取一级分类
channels = GoodsChannel.objects.order_by('group_id', 'sequence')
# 对一级分类进行遍历
for channel in channels:
# 获取group_id
group_id = channel.group_id
# 判断group_id 是否在存储容器,如果不在就初始化
if group_id not in categories:
categories[group_id] = {
'channels': [],
'sub_cats': []
}
one = channel.category
# 为channels填充数据
categories[group_id]['channels'].append({
'id': one.id,
'name': one.name,
'url': channel.url
})
# 为sub_cats填充数据
for two in one.goodscategory_set.all():
# 初始化 容器
two.sub_cats = []
# 遍历获取
for three in two.goodscategory_set.all():
two.sub_cats.append(three)
# 组织数据
categories[group_id]['sub_cats'].append(two)
return categories
|
from rest_framework import serializers
from kratos.apps.configuration.models import Configuration
from kratos.apps.app.serializers import AppSerializer
class ConfigurationSerializer(serializers.ModelSerializer):
appinfo = AppSerializer(read_only=True, source='app')
class Meta:
model = Configuration
fields = ('id', 'version', 'name', 'path', 'content', 'app', 'appinfo', 'created_at', 'updated_at')
extra_kwargs = {'app': {'write_only': True, 'required': False}}
|
import ixcom
import time
import sys
import struct
import argparse
import socket
import io
import os
class TextFileParser(ixcom.parser.MessageParser):
def __init__(self, outputfile, skip_parameter=list(), print_request = True):
super().__init__()
self.ignore_output = False
self.outputfile = outputfile
self.print_request = print_request
self.messageSearcher.disableCRC = False
self._indent_level = 0
self.add_subscriber(self)
self.skipParameter = skip_parameter
self.parameterList = list()
def __handle_loglist2(self, message):
self.write_output(f"Channel: {message.payload.data['reserved_paramheader']}\n")
for log in message.payload.data['loglist']:
msgid = log['msgid']
if msgid in ixcom.data.MessagePayloadDictionary:
message_class = ixcom.data.MessagePayloadDictionary[msgid]
zeile = "msgid: %s\n" % (message_class.get_name())
else:
zeile = f'msgid: {msgid}\n'
self.write_divider()
self.write_output(zeile)
self.write_output(f'divider: {log["divider"]}\n')
self.write_output(f'running: {log["running"]}\n')
self.write_output('\n\n')
def handle_message(self, message, from_device):
zeile = "Header Time: %.4f\n" % (message.header.get_time())
if message.header.msgID == ixcom.data.MessageID.PARAMETER:
parameter_id = message.data['parameterID']
if not (parameter_id in ixcom.data.ParameterPayloadDictionary) or \
parameter_id in self.skipParameter:
return
self.write_output(zeile)
self.parameterList.append((parameter_id, message))
if message.payload.data["action"] == 0:
zeile = "Parameter: %s\n" % message.payload.get_name()
else:
zeile = "Parameter request: %s\n" % message.payload.get_name()
if not self.print_request:
self.write_output(zeile+'\n')
return
elif message.header.msgID == ixcom.data.MessageID.COMMAND:
zeile = "Command: %s\n" % message.payload.get_name()
self.write_output(zeile)
if isinstance(message.payload, ixcom.data.PARXCOM_LOGLIST2_Payload):
self.__handle_loglist2(message)
else:
self.__convert_dict(message.payload.data)
zeile = "\n\n"
self.write_output(zeile)
def write_parameter(self):
self.parameterList.sort(key=lambda messagetuple: messagetuple[0])
for par in self.parameterList:
message = par[1]
if message.payload.data["action"] == 0:
zeile = "Parameter: %s\n" % message.payload.get_name()
else:
zeile = "Parameter request: %s\n" % message.payload.get_name()
self.write_output(zeile)
if isinstance(message.payload, ixcom.data.PARXCOM_LOGLIST2_Payload):
self.__handle_loglist2(message)
else:
self.__convert_dict(message.payload.data)
zeile = "\n\n"
self.write_output(zeile)
def write_divider(self):
self.write_output('-'*10+'\n')
def __convert_dict(self, d):
for key in d:
if isinstance(d[key], list) and isinstance(d[key][0], dict):
self.write_output(f'{key}:\n')
self._indent_level += 1
for new_d in d[key]:
self.write_divider()
self.__convert_dict(new_d)
self._indent_level -= 1
else:
self.__convert_key(key, d)
def __convert_key(self, key, d):
if key in ['ip', 'subnetmask', 'gateway', 'defaultAddress', 'serverAddress', 'ipAddress', 'destAddr', 'udpAddr']:
ipbinary = d[key]
zeile = "%s: %s\n" % (key, socket.inet_ntoa(struct.pack('!L', ipbinary)))
elif isinstance(d[key], bytes):
try:
tmp = d[key].split(b'\0')[0]
tmp = tmp.decode('ascii')
zeile = "%s: %s\n" % (key, tmp)
except:
zeile = "%s: %s\n" % (key, d[key])
else:
zeile = "%s: %s\n" % (key, d[key])
self.write_output(zeile)
def write_output(self, line):
if not self.ignore_output:
self.outputfile.write('\t'*self._indent_level+line)
def parse_file(self, inputfile):
while True:
tmpBuffer = inputfile.read(1024)
if not tmpBuffer:
break
self.messageSearcher.process_bytes(tmpBuffer)
def xcom_lookup(argv = None):
parser = argparse.ArgumentParser(description='Searches for XCOM servers on the network')
parser.add_argument('-p', type=int, nargs='?', help='Port', default = 4000)
args = parser.parse_args()
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, True)
s.settimeout(0.2)
s.sendto("hello".encode('utf-8'), ("<broadcast>", args.p))
time.sleep(0.1)
while True:
try:
data, (ip, _) = s.recvfrom(1024) # buffer size is 1024 bytes
try:
client = ixcom.Client(ip, 3000)
client.open_last_free_channel()
sysname = client.get_parameter(19).payload.data['str'].decode('utf-8').split('\0')[0]
imutype = client.get_parameter(107).payload.data["type"]
fwversion = client.get_parameter(5).payload.data['str'].decode('utf-8').split('\0')[0]
if imutype != 255:
print("%s (%s, FW %s): ssh://root@%s, ftp://%s" % (data[:-1].decode('utf-8'), sysname, fwversion, ip, ip))
client.close_channel()
except:
pass
except (socket.timeout, OSError):
try:
s.close()
except:
pass
sys.exit(0)
def configdump2txt(argv=None):
parser = argparse.ArgumentParser(description='Converts xcom binary config dump files to other representations')
parser.add_argument('input_file', metavar='', type=argparse.FileType('rb'), nargs='?', help='Name of the binary file', default='config.dump')
parser.add_argument('-o', '--output', metavar='output_filename', type=argparse.FileType('wt'), help='Filename of the output file', default=sys.stdout)
args = parser.parse_args(args=argv)
xcomparser = TextFileParser(args.output, skip_parameter=[917]) # skip parxcom_loglist2(917)
xcomparser.ignore_output = True
xcomparser.parse_file(args.input_file)
xcomparser.ignore_output = False
xcomparser.write_parameter()
def monitor2xcom(argv = None):
import re
import binascii
parser = argparse.ArgumentParser(description='Converts raw messages in monitor log files to readable text')
parser.add_argument('input_file', metavar='', type=argparse.FileType('rt'), nargs='?',
help='Name of the monitor file', default = 'monitor.log')
parser.add_argument('-o', '--output', metavar='output_filename', type=argparse.FileType('wt'),
help='Filename of the output file', default = sys.stdout)
args = parser.parse_args(args = argv)
xcomparser = TextFileParser(args.output, print_request=False)
for line in args.input_file:
if re.search("Dump Frame", line):
args.output.write("System Time: %s\n" % (line.split(":"))[0])
frame = (line.split("--> "))[1]
frame = frame.replace("\n", "")
args.output.write("Frame: %s\n" % frame)
frame = frame.replace(" ", "")
frame = frame.replace("0x", "").lower()
frame_bin = binascii.unhexlify(frame)
try:
xcomparser.messageSearcher.process_bytes(frame_bin)
except ixcom.data.ParseError as e:
frame_name = str(e).split('convert ')[1]
args.output.write(f'Corrupt {frame_name} frame\n\n')
def split_config(argv = None):
parser = argparse.ArgumentParser(description='Filters out certain parameters from config.dump file')
parser.add_argument('inputfile', metavar='inputfile', type=argparse.FileType('rb'), nargs='?',
help='Name of the binary file', default = 'config.dump')
parser.add_argument('-o', '--output', metavar='output_filename', type=argparse.FileType(mode='wb'),
help='Filename of the output file', default = sys.stdout.buffer)
parser.add_argument('parameter_ids', metavar = 'ID', type=int, nargs = '+', help = 'Parameter IDs to pass through')
args = parser.parse_args()
xcomparser = ixcom.parser.MessageSearcher()
try:
def callback(msg_bytes):
message = ixcom.data.ProtocolMessage()
message.payload = ixcom.data.DefaultParameterPayload()
message.payload.from_bytes(msg_bytes[16:20])
parameterID = message.payload.data['parameterID']
if parameterID in args.parameter_ids:
args.output.write(msg_bytes)
xcomparser.add_callback(callback)
xcomparser.process_bytes(args.inputfile.read())
sys.exit(0)
except Exception as ex:
print(ex)
sys.exit(1)
def remove_partial_msgs(argv = None):
parser = argparse.ArgumentParser(description='Removes Partial Messages from XCOMStream')
parser.add_argument('inputfile', metavar='inputfile', type=argparse.FileType('rb'), nargs='?',
help='Name of the binary XCOMStream file', default = 'XCOMStream.bin')
parser.add_argument('-o', '--output', metavar='output_filename', type=argparse.FileType(mode='wb'),
help='Filename of the output file', default = 'XCOMStream.clean.bin')
args = parser.parse_args()
xcomparser = ixcom.parser.MessageSearcher(disable_crc = False)
iob = io.BytesIO(b'')
def r_callback(in_bytes):
iob.write(in_bytes)
xcomparser.add_callback(r_callback)
xcomparser.process_bytes(args.inputfile.read())
iob.seek(0, os.SEEK_SET)
args.output.write(iob.read()) |
import pygame
class Following:
def __init__(self, top_left_corner,color):
width = 13
height = width
# self.color = pygame.Color("#000000")
self.color = color
self.rect = pygame.Rect(top_left_corner, (width, height))
self.top_left_corner = top_left_corner
def render(self, screen):
# pygame.draw.rect(screen, self.color, self.rect)
pygame.draw.ellipse(screen, self.color, self.rect)
|
# modules
import webbrowser
from win10toast_click import ToastNotifier
# function
page_url = 'http://github.com/'
def open_url():
try:
webbrowser.open_new(page_url)
print('Opening URL...')
except:
print('Failed to open URL. Unsupported variable type.')
# initialize
toaster = ToastNotifier()
# showcase
toaster.show_toast(
"Example two", # title
"Click to open URL! >>", # message
icon_path=None, # 'icon_path'
duration=5, # for how many seconds toast should be visible; None = leave notification in Notification Center
threaded=True, # True = run other code in parallel; False = code execution will wait till notification disappears
callback_on_click=open_url # click notification to run function
)
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
n=input()
l=[int(i) for i in n]
ans=[sum(l),n]
for i in range(len(n)-1):
t0=0
t1=""
for j in range(len(n)-1):
if i==j:
t0+=l[j]-1
t1+=str(l[j]-1)
t0+=9*(len(n)-j-1)
t1+="9"*(len(n)-j-1)
break
else:
t0+=l[j]
t1+=str(l[j])
if t0>=ans[0]:
ans[0]=t0
ans[1]=t1
if ans[0]==sum(l):
ans[1]=n
print(int(ans[1]))
|
from django.contrib import admin
from . models import Brand, Accordion, ProductOrder, Cart
class AccordionAdmin(admin.ModelAdmin):
list_display = ('model_name', 'brand', 'price')
class ProductCartAdmin(admin.ModelAdmin):
list_display = ('product', 'quantity')
class CartAdmin(admin.ModelAdmin):
list_display = ('user', 'active', 'order_date')
admin.site.register(Brand)
admin.site.register(Accordion, AccordionAdmin)
admin.site.register(ProductOrder, ProductCartAdmin)
admin.site.register(Cart, CartAdmin)
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset=pd.read_csv("hours.csv")
X=dataset.iloc[:,:-1].values
y=dataset.iloc[:,1].values
dataset.head()
from sklearn.linear_model import LinearRegression
regressor=LinearRegression()
regressor.fit(X,y)
#LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None, normalize=False)
regressor.coef_
regressor.intercept_
print ("Acurracy: ",regressor.score(X,y) * 100)
y_pred=regressor.predict([[8]])
print(y_pred)
hours=int(input("Enter number of hours: "))
eq=regressor.coef_*hours+regressor.intercept_
y_pred = regressor.predict([[0]])
print(y_pred)
print('y = %f*%f+%f' %(regressor.coef_,hours,regressor.intercept_))
print('Risk Score: ',eq[0])
plt.plot(X,y,'o')
plt.plot(X, regressor.predict(X));
plt.show()
|
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA256
from Crypto.Hash import RIPEMD
import random
import binascii
import sys
# Copyright (C) 2011 Sam Rushing
# Copyright (C) 2013-2014 The python-bitcoinlib developers
#
# This file is part of python-bitcoinlib.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-bitcoinlib, including this file, may be copied, modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE file.
#https://python-bitcoinlib.readthedocs.io/en/latest/_modules/bitcoin/base58.html
b58_digits = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def encode58(b):
"""Encode bytes to a base58-encoded string"""
# Convert big-endian bytes to integer
n = int('0x0' + binascii.hexlify(b).decode('utf8'), 16)
# Divide that integer into bas58
res = []
while n > 0:
n, r = divmod(n, 58)
res.append(b58_digits[r])
res = ''.join(res[::-1])
# Encode leading zeros as base58 zeros
czero = b'\x00'
if sys.version > '3':
# In Python3 indexing a bytes returns numbers, not characters.
czero = 0
pad = 0
for c in b:
if c == czero:
pad += 1
else:
break
return b58_digits[0] * pad + res
def calculateAdresse(number):
#Calculer le hash SHA 256 puis RIPEMD160 (voir librairies dans le cours), on appelle ce résultat hash160
sha256 = SHA256.new()
number_in_bytes = number.to_bytes(2,byteorder='big')
sha256.update(number_in_bytes)
hash160 = RIPEMD.new(sha256.digest())
#print(hash160.digest())
#4 premiers octets du sha256(sha256(0x00 + hash160))
sha256_2 = SHA256.new()
sha256_3 = SHA256.new()
#0x00+hash160
sha256_2.update(bytes(1)+hash160.digest())
sha256_3.update(sha256_2.digest())
#4 premiers octets
#print(sha256_3.digest()[:4])
#Convertir le nombre en base 58
return encode58(bytes(1)+hash160.digest()+sha256_3.digest()[:4])
print(calculateAdresse(random.randint(0,2**16)))
|
from ophyd.controls import EpicsMotor, PVPositioner, EpicsSignal
# M1A
kwargs = {'act': 'XF:23IDA-OP:1{Mir:1}MOVE_CMD.PROC',
'act_val': 1,
'stop': 'XF:23IDA-OP:1{Mir:1}STOP_CMD.PROC',
'stop_val': 1,
'done': 'XF:23IDA-OP:1{Mir:1}BUSY_STS',
'done_val': 0}
m1a_z = PVPositioner('XF:23IDA-OP:1{Mir:1-Ax:Z}Mtr_POS_SP',
readback='XF:23IDA-OP:1{Mir:1-Ax:Z}Mtr_MON',
name='m1a_z', **kwargs)
m1a_y = PVPositioner('XF:23IDA-OP:1{Mir:1-Ax:Y}Mtr_POS_SP',
readback='XF:23IDA-OP:1{Mir:1-Ax:Y}Mtr_MON',
name='m1a_y', **kwargs)
m1a_x = PVPositioner('XF:23IDA-OP:1{Mir:1-Ax:X}Mtr_POS_SP',
readback='XF:23IDA-OP:1{Mir:1-Ax:X}Mtr_MON',
name='m1a_x', **kwargs)
m1a_pit = PVPositioner('XF:23IDA-OP:1{Mir:1-Ax:Pit}Mtr_POS_SP',
readback='XF:23IDA-OP:1{Mir:1-Ax:Pit}Mtr_MON',
name='m1a_pit', **kwargs)
m1a_yaw = PVPositioner('XF:23IDA-OP:1{Mir:1-Ax:Yaw}Mtr_POS_SP',
readback='XF:23IDA-OP:1{Mir:1-Ax:Yaw}Mtr_MON',
name='m1a_yaw', **kwargs)
m1a_rol = PVPositioner('XF:23IDA-OP:1{Mir:1-Ax:Rol}Mtr_POS_SP',
readback='XF:23IDA-OP:1{Mir:1-Ax:Rol}Mtr_MON',
name='m1a_rol', **kwargs)
m1a = [m1a_z, m1a_y, m1a_x, m1a_pit, m1a_yaw, m1a_rol]
# M1B1
kwargs = {'act': 'XF:23IDA-OP:2{Mir:1A}MOVE_CMD.PROC',
'act_val': 1,
'stop': 'XF:23IDA-OP:2{Mir:1A}STOP_CMD.PROC',
'stop_val': 1,
'done': 'XF:23IDA-OP:2{Mir:1A}BUSY_STS',
'done_val': 0}
m1b1_z = PVPositioner('XF:23IDA-OP:2{Mir:1A-Ax:Z}Mtr_POS_SP',
readback='XF:23IDA-OP:2{Mir:1A-Ax:Z}Mtr_MON',
name='m1b1_z', **kwargs)
m1b1_y = PVPositioner('XF:23IDA-OP:2{Mir:1A-Ax:Y}Mtr_POS_SP',
readback='XF:23IDA-OP:2{Mir:1A-Ax:Y}Mtr_MON',
name='m1b1_y', **kwargs)
m1b1_x = PVPositioner('XF:23IDA-OP:2{Mir:1A-Ax:X}Mtr_POS_SP',
readback='XF:23IDA-OP:2{Mir:1A-Ax:X}Mtr_MON',
name='m1b1_x', **kwargs)
m1b1_pit = PVPositioner('XF:23IDA-OP:2{Mir:1A-Ax:Pit}Mtr_POS_SP',
readback='XF:23IDA-OP:2{Mir:1A-Ax:Pit}Mtr_MON',
name='m1b1_pit', **kwargs)
m1b1_yaw = PVPositioner('XF:23IDA-OP:2{Mir:1A-Ax:Yaw}Mtr_POS_SP',
readback='XF:23IDA-OP:2{Mir:1A-Ax:Yaw}Mtr_MON',
name='m1b1_yaw', **kwargs)
m1b1_rol = PVPositioner('XF:23IDA-OP:2{Mir:1A-Ax:Rol}Mtr_POS_SP',
readback='XF:23IDA-OP:2{Mir:1A-Ax:Rol}Mtr_MON',
name='m1b1_rol', **kwargs)
m1b1 = [m1b1_z, m1b1_y, m1b1_x, m1b1_pit, m1b1_yaw, m1b1_rol]
# M1B2
kwargs = {'act': 'XF:23IDA-OP:2{Mir:1B}MOVE_CMD.PROC',
'act_val': 1,
'stop': 'XF:23IDA-OP:2{Mir:1B}STOP_CMD.PROC',
'stop_val': 1,
'done': 'XF:23IDA-OP:2{Mir:1B}BUSY_STS',
'done_val': 0}
m1b2_z = PVPositioner('XF:23IDA-OP:2{Mir:1B-Ax:Z}Mtr_POS_SP',
readback='XF:23IDA-OP:2{Mir:1B-Ax:Z}Mtr_MON',
name='m1b2_z', **kwargs)
m1b2_y = PVPositioner('XF:23IDA-OP:2{Mir:1B-Ax:Y}Mtr_POS_SP',
readback='XF:23IDA-OP:2{Mir:1B-Ax:Y}Mtr_MON',
name='m1b2_y', **kwargs)
m1b2_x = PVPositioner('XF:23IDA-OP:2{Mir:1B-Ax:X}Mtr_POS_SP',
readback='XF:23IDA-OP:2{Mir:1B-Ax:X}Mtr_MON',
name='m1b2_x', **kwargs)
m1b2_pit = PVPositioner('XF:23IDA-OP:2{Mir:1B-Ax:Pit}Mtr_POS_SP',
readback='XF:23IDA-OP:2{Mir:1B-Ax:Pit}Mtr_MON',
name='m1b2_pit', **kwargs)
m1b2_yaw = PVPositioner('XF:23IDA-OP:2{Mir:1B-Ax:Yaw}Mtr_POS_SP',
readback='XF:23IDA-OP:2{Mir:1B-Ax:Yaw}Mtr_MON',
name='m1b2_yaw', **kwargs)
m1b2_rol = PVPositioner('XF:23IDA-OP:2{Mir:1B-Ax:Rol}Mtr_POS_SP',
readback='XF:23IDA-OP:2{Mir:1B-Ax:Rol}Mtr_MON',
name='m1b2_rol', **kwargs)
m1b2 = [m1b2_z, m1b2_y, m1b2_x, m1b2_pit, m1b2_yaw, m1b2_rol]
# VLS-PGM
pgm_energy = PVPositioner('XF:23ID1-OP{Mono}Enrgy-SP',
readback='XF:23ID1-OP{Mono}Enrgy-I',
stop='XF:23ID1-OP{Mono}Cmd:Stop-Cmd',
stop_val=1, put_complete=True,
name='pgm_energy',
limits=(200,2200))
pgm_mir_pit = EpicsMotor('XF:23ID1-OP{Mono-Ax:MirP}Mtr', name='pgm_mir_pit')
pgm_grt_pit = EpicsMotor('XF:23ID1-OP{Mono-Ax:GrtP}Mtr', name='pgm_grt_pit')
pgm_mir_x = EpicsMotor('XF:23ID1-OP{Mono-Ax:MirX}Mtr', name='pgm_mir_x')
pgm_grt_x = EpicsMotor('XF:23ID1-OP{Mono-Ax:GrtX}Mtr', name='pgm_grt_x')
pgm_energy_sp = EpicsSignal('XF:23ID1-OP{Mono}Enrgy-SP', name='pgm_energy_sp')
pgm_energy_i = EpicsSignal('XF:23ID1-OP{Mono}Enrgy-I', name='pgm_energy_i')
# M3A Mirror
m3a_x = EpicsMotor('XF:23ID1-OP{Mir:3-Ax:XAvg}Mtr', name='m3a_x')
m3a_pit = EpicsMotor('XF:23ID1-OP{Mir:3-Ax:P}Mtr', name='m3a_pit')
m3a_bdr = EpicsMotor('XF:23ID1-OP{Mir:3-Ax:Bdr}Mtr', name='m3a_bdr')
# Fast CCD Shutter
sh_y = EpicsMotor('XF:23ID1-OP{Sh:Fast-Ax:Y}Mtr', name='sh_y')
sh_x = EpicsMotor('XF:23ID1-OP{Sh:Fast-Ax:X}Mtr', name='sh_x')
# Slits
slt1_xg = EpicsMotor('XF:23ID1-OP{Slt:1-Ax:XGap}Mtr', name='slt1_xg')
slt1_xc = EpicsMotor('XF:23ID1-OP{Slt:1-Ax:XCtr}Mtr', name='slt1_xc')
slt1_yg = EpicsMotor('XF:23ID1-OP{Slt:1-Ax:YGap}Mtr', name='slt1_yg')
slt1_yc = EpicsMotor('XF:23ID1-OP{Slt:1-Ax:YCtr}Mtr', name='slt1_yc')
slt2_xg = EpicsMotor('XF:23ID1-OP{Slt:2-Ax:XGap}Mtr', name='slt2_xg')
slt2_xc = EpicsMotor('XF:23ID1-OP{Slt:2-Ax:XCtr}Mtr', name='slt2_xc')
slt2_yg = EpicsMotor('XF:23ID1-OP{Slt:2-Ax:YGap}Mtr', name='slt2_yg')
slt2_yc = EpicsMotor('XF:23ID1-OP{Slt:2-Ax:YCtr}Mtr', name='slt2_yc')
slt3_x = EpicsMotor('XF:23ID1-OP{Slt:3-Ax:X}Mtr', name='slt3_x')
slt3_y = EpicsMotor('XF:23ID1-OP{Slt:3-Ax:Y}Mtr', name='slt3_y')
# Diagnostic Manipulators
diag2_y = EpicsMotor('XF:23ID1-BI{Diag:2-Ax:Y}Mtr', name='diag2_y')
diag3_y = EpicsMotor('XF:23ID1-BI{Diag:3-Ax:Y}Mtr', name='diag3_y')
diag5_y = EpicsMotor('XF:23ID1-BI{Diag:5-Ax:Y}Mtr', name='diag5_y')
diag6_y = EpicsMotor('XF:23ID1-BI{Diag:6-Ax:Y}Mtr', name='diag6_y')
# Setpoint for PID loop
kwargs = {'act': 'XF:23IDA-OP:2{Mir:1B}MOVE_CMD.PROC',
'act_val': 1,
'stop': 'XF:23IDA-OP:2{Mir:1B}STOP_CMD.PROC',
'stop_val': 1,
'done': 'XF:23IDA-OP:2{Mir:1B}BUSY_STS',
'done_val': 0}
diag6_pid = PVPositioner('XF:23ID1-OP{FBck}PID-SP',
readback='XF:23ID1-OP{FBck}PID-RB',
put_complete=True,
name='diag6_pid')
|
#-------------------------------------------------------------------------------
# Name: Flappy Mario v1.4
# Purpose:
#
# Author: Gabriel
#
# Created: 12/07/2014
# Copyright: (c) Gabriel 2014
# Licence: <your licence>
#-------------------------------------------------------------------------------
import sys
import random
import csv
import datetime
import pygame
from pygame.locals import *
pygame.init()
WIDTH = 1000
HEIGHT = 600
WHITE = (255,255,255)
BLACK = (0,0,0)
GND_HEIGHT = 80 # altura do chao
FPS = 30 # maximo frames por segundo
GRAVITY = 500 # valor da gravidade
# cria o clock
clock = pygame.time.Clock()
# inicializa screen
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Super Flappy Mario v1.4")
# carrega imagens e large_font
background = pygame.image.load("images/parallax.png")
ground = pygame.image.load("images/ground.png")
score_img = pygame.image.load("images/mario_score.png")
game_title = pygame.image.load("images/game_title.png")
mario_hammer = pygame.image.load("images/mario_hammer.png")
crush = pygame.image.load("images/crush.png")
get_ready = pygame.image.load("images/get_ready.png")
score_gameover = pygame.image.load("images/score_game_over.png")
medals = pygame.image.load("images/medals.png")
mario_sprite = pygame.image.load("images/mario_sprite.png")
signature = pygame.image.load("images/signature.png")
pipe_img = pygame.image.load("images/pipe.png")
level_img = pygame.image.load("images/level.png")
cursor_img = pygame.image.load("images/cursor.png")
play_again = pygame.image.load("images/play_again.png")
large_font = pygame.font.Font("fonts/Super_Mario_World.ttf", 32)
small_font = pygame.font.Font("fonts/Super_Mario_World.ttf", 20)
def change_music(new_music, loops=-1):
pygame.mixer.music.stop()
pygame.mixer.music.load("sound/{}".format(new_music))
pygame.mixer.music.play(loops)
class Game:
"""
0 modo inicial;
1 preparacao para o jogo (get ready);
2 rodando jogo;
3 game over (placar e medalha)
"""
def __init__(self):
self.pipe_list = []
self.game_mode = 0
self.score = 0 #pontos
self.best_score = 0
self.ground_pos = 0
self.mario_rect = pygame.Rect(50,200,40,40)
self.mario_speed = 0
self.level = 0
self.scroll_speed = -100
if not os.path.isfile("logfile.csv"):
with open("logfile.csv", "w") as logfile:
logwriter = csv.writer(logfile)
logwriter.writerow(["start", "end", "level", "score", "time"])
# cria dos rects dos tubos
# obs: modula gaps (vertical e horizontal)
def create_pipe(self):
if self.level == 0:
self.gap_width = 400
gap_height = 200
elif self.level == 1:
self.gap_width = 300
gap_height = 140
elif self.level == 2:
self.gap_width = random.randrange(300, 401, 100)
gap_height = 140
elif self.level == 3:
self.gap_width = random.randrange(250, 351, 50)
gap_height = random.randrange(120, 201, 40)
width = pipe_img.get_width()
height = random.randrange(20, HEIGHT - gap_height, 20)
up_pipe = pygame.Rect(WIDTH, 0, width, height)
down_pipe = pygame.Rect(WIDTH, height + gap_height, width, HEIGHT - height - gap_height - GND_HEIGHT)
gap_rect = pygame.Rect(WIDTH, height, width, gap_height)
self.pipe_list.append([up_pipe, down_pipe, gap_rect])
# seleciona velocidade dos tubos
# obs: modula velocidade
def set_scroll_speed(self):
if self.level == 0 or self.level == 1:
self.scroll_speed = -160
elif self.level == 2:
self.scroll_speed = -200
elif self.level == 3:
self.scroll_speed = -250
# movimenta os tubos
def scroll_pipe(self, seconds):
for pipe in self.pipe_list:
pipe[0].x += self.scroll_speed * seconds
pipe[1].x += self.scroll_speed * seconds
pipe[2].x += self.scroll_speed * seconds #gap
# desenha os tubos
def draw_pipe(self):
width = pipe_img.get_width()
height = pipe_img.get_height()
for pipe in self.pipe_list:
screen.blit(pygame.transform.flip(pipe_img, False, True), pipe[0].topleft, (0, height - pipe[0].height, width, pipe[0].height))
screen.blit(pipe_img, pipe[1].topleft, (0, 0, width, pipe[1].height))
# detectar colisao com o primeiro da lista
def collision_detect(self):
# colide com um tubo (cima, baixo) e chao
return self.mario_rect.colliderect(self.pipe_list[0][0]) or \
self.mario_rect.colliderect(self.pipe_list[0][1]) or \
self.mario_rect.bottom >= (HEIGHT - GND_HEIGHT)
# atualiza a pontos e desenha o score
def update_score(self, last_state):
if self.mario_rect.colliderect(self.pipe_list[0][2]): #conseguiu passar pelo tubo
passing_gap = True
else:
if passing_gap:
passing_gap = True
self.score += 1
# desenha pontos
def draw_score(self):
screen.blit(score_img, (WIDTH/2 - 50, 10))
texto = large_font.render(str(self.score), True, WHITE)
screen.blit(texto, (WIDTH/2 - 20, 28))
# desliza o ground e parallax
def scroll_ground(self, seconds):
self.ground_pos += self.scroll_speed * seconds
if self.ground_pos < -WIDTH:
self.ground_pos = 0
# desenha o chao
def draw_ground(self):
screen.blit(ground, (self.ground_pos, 460))
screen.blit(ground, (WIDTH + self.ground_pos, 460))
# desenha o sprite adequado do mario trocando-o de acordo com a speed do rect
def draw_mario(self, live=True):
if live:
if self.mario_speed <= 0:
screen.blit(mario_sprite, (self.mario_rect.left, self.mario_rect.top-20), (0,0,40,60))
if 0 < self.mario_speed <= 200:
screen.blit(mario_sprite, (self.mario_rect.left, self.mario_rect.top-20), (40,0,40,60))
if 200 < self.mario_speed <= 480:
screen.blit(mario_sprite, (self.mario_rect.left, self.mario_rect.top-20), (80,0,40,60))
if self.mario_speed > 480:
screen.blit(mario_sprite, (self.mario_rect.left, self.mario_rect.top-20), (120,0,40,60))
else:
screen.blit(mario_sprite, (self.mario_rect.left, self.mario_rect.top-20), (160,0,40,60))
# desenha o parallax
def draw_parallax(self):
screen.blit(background, (0, 0))
# reseta parametros
def reset(self):
self.mario_rect.y = 200
self.ground_pos = 0
self.pipe_list = []
self.score = 0
self.mario_speed = 0
def start(self):
logfile = open("logfile.csv", "a")
logwriter = csv.writer(logfile)
game_mode_list = ["easy", "moderate", "hard", "veryhard"]
hammer_counter = 0
cursor1_pos = 0
cursor2_pos = 0
playtime = 0 # tempo jogado por partida
change_music("Title Theme.ogg")
while True:
milliseconds = clock.tick(FPS) # milisegundos passados desde ultimo frame
seconds = milliseconds/1000.0 # tempo em segundos
screen.fill(WHITE)
for event in pygame.event.get():
if event.type == QUIT:
logfile.close()
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
# gerencia a troca de modos de jogo quando o espaco eh apertado
if event.key == K_SPACE:
if self.game_mode == 1:
start_datetime = "{:%Y-%b-%d %H:%M:%S}".format(datetime.datetime.now())
passing_gap = False # mario esta no gap?
playtime = 0
self.game_mode = 2
elif self.game_mode == 2:
self.mario_speed -= 250
# gerencia a troca de modos de jogo quando o enter eh apertado
if event.key == K_RETURN or event.key == K_KP_ENTER :
if self.game_mode == 0:
change_music("Overworld Theme.ogg")
self.level = cursor1_pos
self.set_scroll_speed()
self.game_mode = 1
elif self.game_mode == 3:
if cursor2_pos == 0: #PLAY AGAIN? <YES>
change_music("Overworld Theme.ogg")
self.game_mode = 1
else: #PLAY AGAIN? <NO>
change_music("Title Theme.ogg")
cursor2_pos = 0
self.game_mode = 0
playtime = 0
self.reset()
if event.key == K_UP:
if self.game_mode == 0 and cursor1_pos > 0:
cursor1_pos -= 1
if self.game_mode == 3 and cursor2_pos > 0:
cursor2_pos -= 1
if event.key == K_DOWN:
if self.game_mode == 0 and cursor1_pos < 3:
cursor1_pos += 1
if self.game_mode == 3 and cursor2_pos < 1:
cursor2_pos += 1
#-----------------------------------------------------------------------
# GERENCIA OS MODOS DE JOGO
# 0 modo inicial; 1 preparacao para o jogo; 2 rodando jogo; 3 game over
#-----------------------------------------------------------------------
# MODO DE JOGO 0: MODO INICIAL
if self.game_mode == 0:
self.draw_parallax()
self.draw_ground()
self.scroll_ground(seconds)
# desenha o titulo, level e assinatura
screen.blit(game_title, (50, 50))
screen.blit(level_img, (400,300))
screen.blit(signature, (250, 550))
screen.blit(cursor_img, (370, 300 + 25*cursor1_pos))
# MODO DE JOGO 1: PREPARACAO (GET READY)
elif self.game_mode == 1:
self.draw_parallax()
self.draw_ground()
self.draw_mario()
self.scroll_ground(seconds)
# desenha instrucoes
screen.blit(get_ready, (300, 100))
# desenha animacao do mario
screen.blit(mario_hammer, (420, 390), (60*hammer_counter, 0, 60, 100))
if playtime > .1:
hammer_counter += 1
playtime = 0
if hammer_counter == 5:
screen.blit(crush, (430, 370))
hammer_counter = 0
playtime += seconds
# MODO DE JOGO 2: RODANDO O JOGO EM SI
elif self.game_mode == 2:
# gera obstaculos
if len(self.pipe_list) == 0 or self.pipe_list[-1][0].x < (WIDTH - self.gap_width):
self.create_pipe()
# remove obstaculos que sairem da tela
if self.pipe_list[0][0].x < -pipe_img.get_width():
self.pipe_list.remove(self.pipe_list[0])
self.draw_parallax()
self.draw_pipe()
self.draw_ground()
self.draw_mario()
# desenha placar
screen.blit(score_img, (WIDTH/2 - 50, 10))
text = large_font.render(str(self.score), True, WHITE)
screen.blit(text, (WIDTH/2 - 20, 28))
self.scroll_ground(seconds)
self.scroll_pipe(seconds)
# atualiza o placar
if self.pipe_list[0][2].contains(self.mario_rect):
passing_gap = True
else:
if passing_gap:
passing_gap = False
self.score += 1
# restricao do limite superior
if self.mario_rect.top <= 0 and self.mario_speed < 0:
self.mario_speed = 0
# atualiza posicao do mario e do rect
self.mario_speed += GRAVITY * seconds
self.mario_rect.top += self.mario_speed * seconds
if self.collision_detect():
# registra linha com dados da jogada
end_datetime = "{:%Y-%b-%d %H:%M:%S}".format(datetime.datetime.now())
logwriter.writerow([start_datetime, end_datetime, game_mode_list[self.level], self.score, round(playtime, 3)])
change_music("Life Lost.ogg", 0)
self.game_mode = 3
playtime += seconds
# MODO DE JOGO 3: TELA DE PONTUACAO E GAME OVER
elif self.game_mode == 3:
self.draw_parallax()
self.draw_pipe()
self.draw_ground()
self.draw_mario(False)
# quando a animacao acaba desenha game over e o score
if self.mario_rect.bottom >= HEIGHT - GND_HEIGHT:
# para o personagem no chao
self.mario_speed = 0
self.mario_rect.bottom = HEIGHT - GND_HEIGHT
# desenha o placar
screen.blit(score_gameover, (WIDTH/2 - 200, 100))
# mostra o score e o best score
text = large_font.render(str(self.score), True, BLACK)
screen.blit(text, (600, 210))
if self.score > self.best_score:
self.best_score = self.score
text = large_font.render(str(self.best_score), True, BLACK)
screen.blit(text, (600, 290))
# mostra medalha correspondente 20, 40, 60, 80
if self.score > 20 : #bronze
screen.blit(medals, (340, 225), (0,0,80,100))
if self.score > 40: #prata
screen.blit(medals, (340, 225), (80,0,80,100))
if self.score > 60: #ouro
screen.blit(medals, (340, 225), (160,0,80,100))
if self.score > 80: #diamante
screen.blit(medals, (340, 225), (240,0,80,100))
# desenha o play again
screen.blit(play_again, (300, 400))
screen.blit(cursor_img, (580, 405 + 30*cursor2_pos))
else:
self.mario_speed += GRAVITY * seconds
self.mario_rect.top += self.mario_speed * seconds
pygame.display.update()
if __name__ == "__main__":
game = Game()
game.start()
|
# Hint: You may not need all of these. Remove the unused functions.
from hashtables import (HashTable,
hash_table_insert,
hash_table_remove,
hash_table_retrieve,
hash_table_resize)
class Ticket:
def __init__(self, source, destination):
self.source = source
self.destination = destination
def reconstruct_trip(tickets, length):
hashtable = HashTable(length) # 5
route = [None] * length #[None,None,None,None,None,]
"""
YOUR CODE HERE
"""
for ticket in tickets:
# here we are inserting a hash per the length of the tickets so here for example we have 5 tickets and so we insert the length of the ticket list, source ,and destination
# we are starting the sequence so to speak so we can work with changing it around.
hash_table_insert(hashtable, ticket.source, ticket.destination)
# here if find the location of the first airport which had no previous airport to fly from which will be our head so in the test case it will be ticket 1 PDX or in test2 LAX
# so now we have our starting point and it's as simple as linking everything together since every ticket has a final destination
location = hash_table_retrieve(hashtable, "NONE")
# so we want to loop as long as the length of the list of tickets.
for i in range(length):
# the list above labled "route" has a list of a bunch of NONE's so we change FIRST index with the current location which is our head
route[i] = location
# then we retrieve the next location by running "hash_table_retrieve" with the hashtable and the current "location" as the previous airport it arrived from and replacing the current location vairable with the new found destination
location = hash_table_retrieve(hashtable, location)
# here we print because the READ ME said to
print(route)
# and now we return it
return route
|
from enum import Enum
class Strategy(Enum):
DISTRESSED = 0
GROWTH = 1
INDUSTRY_FOCUSED = 2
VENTURE_CAPITAL = 3
MIDDLE_BUYOUT = 4
LARGE_BUYOUT = 5
|
#!/usr/bin/env python3
""" Logistic regression for prediction of satisfying assignments """
import os
import os.path
from dataset import load_dataset
import tensorflow as tf
features = []
for sign in ["x", "-x"]:
for phi in ["phi_1", "phi_2", "phi_3", "phi_4", "phi_horn", "phi_cohorn", "phi"]:
features += [
'num({}, {})'.format(sign, phi),
'avg({}, {})'.format(sign, phi),
'max({}, {})'.format(sign, phi),
'min({}, {})'.format(sign, phi),
'jw({}, {})'.format(sign, phi),
'neg({}, {})'.format(sign, phi),
#'neut({}, {})'.format(sign, phi),
'pos({}, {})'.format(sign, phi),
]
inputs, outputs = load_dataset(['samples/vmpc_30'], demean=True)
inputs = dict(zip(features, inputs.T.tolist())) # This is kinda bullshit
def iterator(randomize):
dataset = tf.data.Dataset.from_tensor_slices((inputs, outputs))
if randomize:
dataset = dataset.shuffle(1024).batch(16).repeat(2)
return dataset.make_one_shot_iterator()
model = tf.estimator.LinearClassifier(
model_dir='models/',
feature_columns=map(tf.feature_column.numeric_column, features),
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.1,
#l2_regularization_strength=1.0,
),
)
model.train(input_fn=iterator(True).get_next)
results = model.evaluate(input_fn=iterator(False).get_next)
print("PREDICT\tACTUAL")
for point in results:
print("%s\t%s".format(results[point], outputs[point]))
|
import os
import pandas as pd
import cv2
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch import optim, nn
from efficientnet_pytorch import EfficientNet
from tqdm import tqdm
from sklearn.model_selection import train_test_split
class AptosDataset(Dataset):
def __init__(self, df, img_dir, mode='train'):
df['path'] = df['id_code'].map(lambda x: os.path.join(img_dir, '{}.png'.format(x)))
self.df = df
self.mode = mode
def __len__(self):
return len(self.df)
#return 100
def __getitem__(self, idx):
img = cv2.imread(self.df['path'].iloc[idx])
img = cv2.resize(img, (512, 512))
img = img.astype('float32')
img = img.transpose(2, 0, 1) / 255.0
if self.mode == 'train':
diagnosis = self.df['diagnosis'].iloc[idx]
return img, diagnosis
else:
return img, self.df['id_code'].iloc[idx]
class AptosNet(nn.Module):
def __init__(self):
super(AptosNet, self).__init__()
snapshot = torch.load('../input/efficientnet-pytorch/efficientnet-b0-08094119.pth')
self.base_model = EfficientNet.from_name('efficientnet-b0')
self.base_model.load_state_dict(snapshot)
self.fc1 = nn.Linear(256, 256)
self.fc2 = nn.Linear(256, 5)
def forward(self, x):
x = self.base_model.extract_features(x)
x = torch.max(x, axis=1)[0]
x = x.view(x.shape[0], -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def train_epoch(data_loader, model, criterion, optimizer, epoch):
model.train()
loss_sum = 0
for data, target in tqdm(data_loader):
data = data.cuda()
target = target.cuda()
output = model(data)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_sum += loss.item()
loss_epoch = loss_sum / len(data_loader)
print('train_loss = ', loss_epoch)
def val_epoch(data_loader, model, criterion, epoch):
model.eval()
loss_sum = 0
for data, target in tqdm(data_loader):
data = data.cuda()
target = target.cuda()
output = model(data)
loss = criterion(output, target)
loss_sum += loss.item()
loss_epoch = loss_sum / len(data_loader)
print('val_loss = ', loss_epoch)
def train(data_dir):
num_epoch = 2
df = pd.read_csv(os.path.join(data_dir, 'train.csv'))
df_train, df_val = train_test_split(df, test_size=0.1, random_state=42)
train_dataset = AptosDataset(df=df_train, img_dir=os.path.join(data_dir, 'train_images/'))
train_loader = DataLoader(dataset=train_dataset, batch_size=4, shuffle=True, num_workers=4)
val_dataset = AptosDataset(df=df_val, img_dir=os.path.join(data_dir, 'train_images/'))
val_loader = DataLoader(dataset=val_dataset, batch_size=4, shuffle=False, num_workers=4)
model = AptosNet().cuda()
optimizer = optim.Adam(model.parameters(), lr=0.001)
criterion = nn.CrossEntropyLoss()
for epoch in range(num_epoch):
train_epoch(train_loader, model, criterion, optimizer, epoch)
val_epoch(val_loader, model, criterion, epoch)
return model
def test(data_dir, model):
df = pd.read_csv(os.path.join(data_dir, 'test.csv'))
df = df.assign(diagnosis=0)
test_dataset = AptosDataset(df, img_dir=os.path.join(data_dir, 'test_images/'), mode='test')
test_loader = DataLoader(dataset=test_dataset, batch_size=1, shuffle=False, num_workers=1)
df = df.set_index('id_code')
for data, ids in tqdm(test_loader):
data = data.cuda()
output = model(data)
_, preds = torch.max(output, axis=1)
df.at[ids[0], 'diagnosis'] = preds.cpu().item()
df = df.drop(columns=['path'])
df.to_csv('submission.csv')
if __name__ == '__main__':
data_dir = '../input/aptos2019-blindness-detection'
model = train(data_dir)
test(data_dir, model) |
# excel sheet column number
import math
def solution(s):
result = 0
for i in range(len(s)):
result += (ord(s[i])-64)*math.pow(26, len(s)-i-1)
return int(result)
if __name__ == '__main__':
print(solution('AA')) |
def partial(func : Callable, *args, **kwargs):
'''
partial
Simple implementation for false currying
:: func :: Function to curry
:: *args :: Positional arguments
:: **kwargs :: Keyword arguments
'''
def p_func(*p_args, **p_kwargs):
return func(*args, *p_args, **kwargs, **p_kwargs)
return p_func
|
from django.db import models
class Movie(models.Model):
title = models.CharField(max_length=200)
year = models.IntegerField(default=0)
trailer_id = models.URLField(max_length=200)
poster_url = models.URLField(max_length=200)
budget = models.IntegerField(default=0)
rating = models.FloatField(default=0.0)
revenue = models.IntegerField(default=0)
runtime = models.IntegerField(default=0)
tmdb_id = models.CharField(max_length=200)
class Queue(models.Model):
name = models.CharField(max_length=200)
movies = models.ManyToManyField(Movie)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.